text
stringlengths 0
3.34M
|
---|
-- Andreas, 2020-06-21, issue #3734, reported 2019-05-01 by gallais
-- User warnings also on constructors and pattern synonyms.
-- {-# OPTIONS -v scope.warning.usage:50 #-}
data A : Set where
s : A → A
a : A
b : A
b = a -- (usage of a, but no warning yet installed)
{-# WARNING_ON_USAGE a "Used a" #-}
_ : A
_ = a -- usage of a
_ : A → A
_ = λ where
a → -- pattern usage of a
a -- usage of a
x → x
_ : A
_ = s b -- (usage of a via b, need not show)
pattern c = s a -- usage of a
{-# WARNING_ON_USAGE c "Used c" #-}
_ : A
_ = c -- usage of c
_ : A → A
_ = λ where
c → -- pattern usage of c
a -- usage of a
x → x
-- Ambiguous constructors
module M where
data D1 : Set where cons : D1
{-# WARNING_ON_USAGE cons "Warning on D1.cons (shouldn't show)" #-}
data D2 : Set where cons : D2
{-# WARNING_ON_USAGE cons "Warning on D2.cons (should show)" #-}
open M
d2 : D2
d2 = cons -- usage of D2.cons
f2 : D2 → Set
f2 cons = A -- pattern usage of D2.cons
-- Ambiguous attachments of warnings shall apply to all disambiguations.
data Amb1 : Set where amb : Amb1
data Amb2 : Set where amb : Amb2
{-# WARNING_ON_USAGE amb "Ambiguous constructor amb was used" #-}
test1 : Amb1
test1 = amb
test2 : Amb2
test2 = amb
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e338m15_6limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
[GOAL]
α : Type u_1
r : α → α → Prop
inst✝³ : DecidableRel r
inst✝² : IsTrans α r
inst✝¹ : IsAntisymm α r
inst✝ : IsTotal α r
s : Multiset α
a : α
⊢ a ∈ sort r s ↔ a ∈ s
[PROOFSTEP]
rw [← mem_coe, sort_eq]
|
Formal statement is: lemma uniformly_continuous_on_extension_at_closure: fixes f::"'a::metric_space \<Rightarrow> 'b::complete_space" assumes uc: "uniformly_continuous_on X f" assumes "x \<in> closure X" obtains l where "(f \<longlongrightarrow> l) (at x within X)" Informal statement is: If $f$ is uniformly continuous on $X$, then it has a limit at every point of the closure of $X$. |
import h5py
import numpy as np
import xarray as xr
from pathlib import Path
import brainio_collection
from brainio_base.assemblies import NeuronRecordingAssembly
from brainio_contrib.packaging import package_data_assembly
from mkgu_packaging.dicarlo.kar2018 import filter_neuroids
def load_responses(response_file, additional_coords):
responses = h5py.File(response_file, 'r')
assemblies = []
neuroid_id_offset = 0
for monkey in responses.keys():
spike_rates = responses[monkey]['rates']
assembly = xr.DataArray(spike_rates.value,
coords={**{
'image_num': ('image_id', list(range(spike_rates.shape[0]))),
'neuroid_id': ('neuroid', list(
range(neuroid_id_offset, neuroid_id_offset + spike_rates.shape[1]))),
'region': ('neuroid', ['IT'] * spike_rates.shape[1]),
'monkey': ('neuroid', [monkey] * spike_rates.shape[1]),
'repetition': list(range(spike_rates.shape[2])),
}, **additional_coords},
dims=['image_id', 'neuroid', 'repetition'])
assemblies.append(assembly)
neuroid_id_offset += spike_rates.shape[1]
assembly = xr.concat(assemblies, 'neuroid')
assembly = assembly.stack(presentation=['image_id', 'repetition'])
assembly = NeuronRecordingAssembly(assembly)
assert len(assembly['presentation']) == 640 * 63
assert len(np.unique(assembly['image_id'])) == 640
assert len(assembly.sel(monkey='nano')['neuroid']) == len(assembly.sel(monkey='magneto')['neuroid']) == 288
assert len(assembly['neuroid']) == len(np.unique(assembly['neuroid_id'])) == 288 * 2
# filter noisy electrodes
assembly = filter_neuroids(assembly, threshold=.7)
# add time info
assembly = assembly.expand_dims('time_bin')
assembly['time_bin_start'] = 'time_bin', [70]
assembly['time_bin_end'] = 'time_bin', [170]
assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')
return assembly
def load_stimuli_ids(data_dir):
# these stimuli_ids are SHA1 hashes on generative parameters
stimuli_ids = h5py.File(data_dir / 'hvm640_ids.mat', 'r')
stimuli_ids = [''.join(chr(c) for c in stimuli_ids[stimuli_ids['hvm640_ids'].value[0, i]])
for i in range(stimuli_ids['hvm640_ids'].value[0].size)]
# we use the filenames to reference into our packaged StimulusSet ids
stimuli_filenames = h5py.File(data_dir / 'hvm640_names.mat', 'r')
stimuli_filenames = [''.join(chr(c) for c in stimuli_filenames[stimuli_filenames['hvm640_img_names'].value[0, i]])
for i in range(stimuli_filenames['hvm640_img_names'].value[0].size)]
# the stimuli_ids in our packaged StimulusSets are SHA1 hashes on pixels.
# we thus need to reference between those two ids.
packaged_stimuli = brainio_collection.get_stimulus_set('dicarlo.hvm')
reference_table = {row.image_file_name: row.image_id for row in packaged_stimuli.itertuples()}
referenced_ids = [reference_table[filename] for filename in stimuli_filenames]
return {'image_id': ('image_id', referenced_ids),
'image_generative_id': ('image_id', stimuli_ids)}
def main():
data_dir = Path(__file__).parent / 'hvm'
stimuli_ids = load_stimuli_ids(data_dir)
assembly = load_responses(data_dir / 'hvm640_neural.h5', additional_coords=stimuli_ids)
assembly.name = 'dicarlo.Kar2018hvm'
package_data_assembly(assembly, data_assembly_name=assembly.name, stimulus_set_name='dicarlo.hvm',
bucket_name='brainio-dicarlo')
if __name__ == '__main__':
main()
|
data Tree elem = Empty
| Node (Tree elem) elem (Tree elem)
%name Tree tree, tree1
insert : Ord elem => elem -> Tree elem -> Tree elem
insert x Empty = Node Empty x Empty
insert x orig@(Node left val right) = case compare x val of
LT => Node (insert x left) val right
EQ => orig
GT => Node left val (insert x right)
{-data BSTree : Type -> Type where
Empty : Ord elem => BSTree elem
Node : Ord elem => (left : BSTree elem) -> (val : elem) ->
(right : BSTree elem) -> BSTree elem
insert : elem -> BSTree elem -> BSTree elem
insert x Empty = Node Empty x Empty
insert x orig@(Node left val right) = case compare x val of
LT => Node (insert x left) val right
EQ => orig
GT => Node left val (insert x right)
-}
listToTree : Ord a => List a -> Tree a
listToTree [] = Empty
listToTree (x :: xs) = insert x (listToTree xs)
treeToList : Tree a -> List a
treeToList Empty = []
treeToList (Node left x right) = treeToList left ++ [x] ++ treeToList right
data Expr : Type where
Val : Int -> Expr
Add : Expr -> Expr -> Expr
Sub : Expr -> Expr -> Expr
Mult : Expr -> Expr -> Expr
evaluate : Expr -> Int
evaluate (Val x) = x
evaluate (Add x y) = evaluate x + evaluate y
evaluate (Sub x y) = evaluate x - evaluate y
evaluate (Mult x y) = evaluate x * evaluate y
maxMaybe : Ord a => Maybe a -> Maybe a -> Maybe a
maxMaybe Nothing Nothing = Nothing
maxMaybe Nothing (Just x) = Just x
maxMaybe (Just x) Nothing = Just x
maxMaybe (Just x) (Just y) = Just (max x y)
|
# Solow-Model with human capital and distorting taxation
**Importing relevant packages and modules**
```python
import numpy as np
from scipy import optimize
import sympy as sm
import matplotlib.pyplot as plt
# autoreload modules when code is run
%load_ext autoreload
%autoreload 2
#XD FOR SMART UPLOAD
# local modules
# import modelproject if code is used form another notebook or file
```
# Model description
The following solow-model incorperates human capital accumulation aswell as distorting taxation.
The model consists of the following equations:
1 $$ (1-\tau)^{\eta}L_t, \text{ } 0<\eta<1,$$
2 $$H_{t+1}={\tau}w_tN_t+(1-{\delta})H_t $$
3 $$K_{t+1}=s_kY_t+(1-{\delta})K_t $$
4 $$K^\alpha_{t}H^\beta_{t}(A_{t}N_{t})^{1-\alpha-\beta} $$
5 $$H_t=h_{t}N_t $$
6 $$L_{t+1}=(1+n)L_t $$
7 $$A_{t+1}=(1+g)A_t $$
* $K_t$ is capital in period t
* $L_t$ is labor i period t (with a constant growth rate of $n$)
* $H_t$ is human capital in period t
* $A_t$ is technology in period t (with a constant growth rate of $g$)
* $N_t$ is the total workhours supplied in the economy in period t
* $Y_t$ is GDP
The model contains the following parameters:$$(\eta,\tau,s_k,\delta,\alpha,\beta,n,g) $$
Using the equations above, we can find the two transitioncurves given by:
$$ \tilde{k}_{t+1}=\left(\frac{1}{(1+n)(1+g}\right)[s_{k}\tilde{k}^{\alpha}_{t}\tilde{h}^{\beta}_{t}(1-\tau)^{\eta(1+\alpha)}+(1+\delta)\tilde{k}_{t}] $$
and
$$ \tilde{h}_{t+1}=\left(\frac{1}{(1+n)(1+g}\right)[\tau(1-\alpha)\tilde{k}^{\alpha}_{t}\tilde{h}^{\beta}_{t}(1-\tau)^{\eta(1+\alpha)}+(1+\delta)\tilde{h}_{t}] $$
where $\tilde{k}_{t}$ is the capital per effective worker given by $\tilde{k}_{t}=\frac{Y}{AK}$
and
$\tilde{h}_{t}$ is the human capital per effective worker given by $\tilde{h}_{t}=\frac{Y}{AK}$
```python
```
|
#include <tupleSource.h>
#include <boost/python.hpp>
using namespace boost::python;
void export_tupleSource()
{
class_<TupleSource, bases<FunctorSource>, boost::shared_ptr<TupleSource>, boost::noncopyable>
("TupleSource", init<std::string, TuplePtr>())
.def("class_name", &TupleSource::class_name)
.def("processing", &TupleSource::processing)
.def("flow_code", &TupleSource::flow_code)
;
}
|
from __future__ import print_function
import pandas as pd
import pickle
import numpy as np
from itertools import chain
from collections import OrderedDict
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
class LDA():
def __init__(self, n_topics=10, n_features=5000, max_df=.75, min_df=2, max_iter=5, alpha=None, eta=None):
'''
'''
self.n_topics = n_topics
self.n_features = n_features
self.max_df = max_df
self.min_df = min_df
self.max_iter = max_iter
self.lda = None
self.tf = None
self.topics = None
self.alpha = alpha
self.eta = eta
def vectorizecounts(self, docs):
'''
'''
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
self.tf_vectorizer = CountVectorizer(max_df=self.max_df, min_df=self.min_df, max_features=self.n_features)
t0 = time()
self.tf = self.tf_vectorizer.fit_transform(docs)
self.n_samples = len(docs)
print("done in %0.3fs." % (time() - t0))
def fitLDA(self):
'''
'''
print("Fitting LDA models with tf features, n_samples=%d and n_features=%d..."
% (self.n_samples, self.n_features))
self.lda = LatentDirichletAllocation(doc_topic_prior=self.alpha, topic_word_prior=self.eta, n_topics=self.n_topics, max_iter=self.max_iter,
learning_method='online', learning_offset=10.,
random_state=0, n_jobs=6)
t0 = time()
self.topics = self.lda.fit(self.tf)
print("done in %0.3fs." % (time() - t0))
def print_top_words(self, n_top_words):
'''
'''
tf_feature_names = self.tf_vectorizer.get_feature_names()
for topic_idx, topic in enumerate(self.lda.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([tf_feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
def get_topic_content(self, topic):
'''
Parameters
--------------
topic: int
Topic index
Returns
-----------
feature_names : list
Array of words corresponding to the given feature.
topic_content : np.array(n_features)
Topic vector over the feature space
'''
return self.tf_vectorizer.get_feature_names(), self.lda.components_
def get_doc_topics(self, docs):
# Convert the document into feature space.
feature_vec = self.tf_vectorizer.fit_transform(docs)
return self.lda.fit_transform(feature_vec)
def LoadLDAModel(path):
with open(path, 'rb') as f:
return pickle.load(f)
def SaveLDAModel(path, lda_model):
# Save the model
with open(path, 'wb') as f:
pickle.dump(lda_model, f) |
%-----------------------------------------------------------------------------
%
% Template for sigplanconf LaTeX Class
%
% Name: sigplanconf-template.tex
%
% Purpose: A template for sigplanconf.cls, which is a LaTeX 2e class
% file for SIGPLAN conference proceedings.
%
% Guide: Refer to "Author's Guide to the ACM SIGPLAN Class,"
% sigplanconf-guide.pdf
%
% Author: Paul C. Anagnostopoulos
% Windfall Software
% 978 371-2316
% [email protected]
%
% Created: 15 February 2005
%
%-----------------------------------------------------------------------------
\documentclass[preprint]{sigplanconf}
% The following \documentclass options may be useful:
% preprint Remove this option only once the paper is in final form.
% 10pt To set in 10-point type instead of 9-point.
% 11pt To set in 11-point type instead of 9-point.
% authoryear To obtain author/year citation style instead of numeric.
\usepackage{amsmath}
% What we've added
\usepackage{fancyvrb}
\usepackage{pifont}
\usepackage{graphicx}
\usepackage{rotating}
\usepackage{multirow}
\usepackage{hyperref}
\usepackage{amsthm}
\usepackage[x11names]{xcolor}
\usepackage{framed}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{subcaption}
\usepackage{comment}
\usepackage{float}
\usepackage{auxhook} % move to our own FPG package
\usepackage{refcount}
\usepackage{enumerate}
\usepackage{lipsum}
%\usepackage{git-repo-version}
%\usepackage{peer-review}
\usepackage{bigfoot} % for verbatim in footnotes
% from http://ctan.mackichan.com/graphics/pgf/contrib/pgf-umlsd/pgf-umlsd.sty,
% from https://www.ctan.org/tex-archive/graphics/pgf/contrib/pgf-umlsd?lang=en
\usepackage[underline=false,roundedcorners=false]{pgf-umlsd}
\usepackage{tikz}
\usetikzlibrary{shapes.symbols}
\usetikzlibrary{patterns}
\usetikzlibrary{decorations.markings}
\usetikzlibrary{decorations.pathmorphing}
\usetikzlibrary{decorations.pathreplacing}
%\usepackage{parskip} % too crude
% from http://tex.stackexchange.com/questions/191572/beginframed-with-background-color
%\colorlet{shadecolor}{LavenderBlush2}
%\colorlet{framecolor}{Red1}
\definecolor{col:sync}{RGB}{120,0,0}
\definecolor{col:async}{RGB}{0,120,0}
\definecolor{col:nt}{RGB}{120,0,0}
\newenvironment{frshaded}{%
\def\FrameCommand{\fboxrule=\FrameRule\fboxsep=\FrameSep \fcolorbox{black}{white}}%
\MakeFramed {\FrameRestore}}%
{\endMakeFramed}
\captionsetup[subfigure]{labelformat=simple}
\renewcommand\thesubfigure{(\alph{subfigure})}
\newcommand{\cursor}{%
\begin{center}
\color{blue}
\marginpar{\vskip 4pt\sc Cursor}
\begin{tabular*}{\linewidth}{c}
\hline
\end{tabular*}
\end{center}
}
\newcommand{\todo}[2][0pt]{%
\marginpar[\raggedleft\vskip #1\tiny\color{red} #2]{\raggedright\vskip #1\tiny\color{red} #2}%
}
\newcommand{\good}[1][-0.7cm]{%
\todo[#1]{\Huge\ding{51}}%
}
\newcommand{\bad}[1][-0.7cm]{%
\todo[#1]{\Huge\ding{55}}%
}
\newcommand{\sectionline}{%
\nointerlineskip \vspace{\baselineskip}%
~\hspace{\fill}\rule{0.5\columnwidth}{.7pt}\hspace{\fill}~%
\par\nointerlineskip \vspace{\baselineskip}
}
\DefineVerbatimEnvironment%
{Code}{Verbatim}
{samepage=true,commandchars=\\\{\},fontsize=\small,xleftmargin=1em}
\DefineVerbatimEnvironment%
{Code*}{Verbatim}
{samepage=true,commandchars=\\\<\>,fontsize=\small,xleftmargin=1em}
\DefineVerbatimEnvironment%
{SmallCode}{Verbatim}
{samepage=true,commandchars=\\\{\},fontsize=\scriptsize,xleftmargin=1em}
% {frame=lines,samepage=true,commandchars=\\\{\}}
\DefineShortVerb{\|}
\SaveVerb{Slash}|\|
\newcommand{\lam}{\UseVerb{Slash}} % for use inside {Code}
\SaveVerb{OpenSB}|[|
\newcommand{\opensb}{\UseVerb{OpenSB}} % for use inside {Code}
\SaveVerb{CloseSB}|]|
\newcommand{\closesb}{\UseVerb{CloseSB}} % for use inside {Code}
\SaveVerb{OpenCB}|{|
\newcommand{\opencb}{\UseVerb{OpenCB}} % for use inside {Code}
\SaveVerb{CloseCB}|}|
\newcommand{\closecb}{\UseVerb{CloseCB}} % for use inside {Code}
\UndefineShortVerb{\|}
\newcommand{\hackage}[2][]{{\tt #2}#1~\cite{hackage:#2}}
\newcommand{\github}[2][]{{\tt #2}#1~\cite{github:#2}}
\newtheorem*{definition}{Definition}
\newtheorem*{myproof}{Proof}
\newtheorem*{mytheorem}{Theorem}
\newcommand{\remotemonad}{remote monad}
\newcommand{\remoteaf}{remote applicative functor}
\newcommand{\IO}{{\tt IO}}
\newcommand{\BlankCanvas}{Blank Canvas}
\newcommand{\Canvas}{{\tt Canvas}}
% This should be a common sty file?
\makeatletter
\newcounter{qfirstline}
\newcounter{qfinalline}
\newcommand{\quotecode}[3]{
\ifnum0<0#2\relax% if start is a number
\setcounter{qfinalline}{#2}
\addtocounter{qfinalline}{#3}
\addtocounter{qfinalline}{-1}
\VerbatimInput[samepage=true,firstline=#2,lastline=\theqfinalline,fontsize=\small,xleftmargin=1em]{#1}%
\else% otherwise we need a reference
\immediate\write\@auxout{\string\coderef{#1 #2}}
\setcounterref{qfirstline}{#1 #2}
\setcounterref{qfinalline}{#1 #2}
\addtocounter{qfinalline}{#3}
\addtocounter{qfinalline}{-1}
\ifnum\theqfirstline=0\relax
\vspace{0.2in}
(#1)
\else%
\VerbatimInput[samepage=true, firstline=\theqfirstline, lastline=\theqfinalline, fontsize=\small, xleftmargin=1em]{#1}%
\fi
% does not work yet
\fi%
}
% was going to add \noident, but its messy
% add the ability to have a coderef
\AddLineBeginAux{%
\string\providecommand\string\coderef[1]{}%
}
\def\codelabel#1#2{\@bsphack
\protected@write\@auxout{}%
{\string\newlabel{#1}{{#2}{\thepage}{}{Doc-Start}{}}}%
\@esphack}
\makeatother
% pull in the code references
\input{coderef.aux}
\begin{document}
\special{papersize=8.5in,11in}
\setlength{\pdfpageheight}{\paperheight}
\setlength{\pdfpagewidth}{\paperwidth}
\conferenceinfo{CONF 'yy}{Month d--d, 20yy, City, ST, Country}
\copyrightyear{20yy}
\copyrightdata{978-1-nnnn-nnnn-n/yy/mm}
\doi{nnnnnnn.nnnnnnn}
% Uncomment one of the following two, if you are not going for the
% traditional copyright transfer agreement.
\toappear{Copyright held by author(s).
This is the referee's version of the paper, for the purposes of peer-review.}
%\exclusivelicense % ACM gets exclusive license to publish,
% you retain copyright
%\permissiontopublish % ACM gets nonexclusive license to publish
% (paid open-access papers,
% short abstracts)
%\titlebanner{Early Draft} % These are ignored unless
%\preprintfooter{Early Draft} % 'preprint' option specified.
\title{High Performance FRP}
%\subtitle{A Foreign Function Interface to the JavaScript Canvas API}
\authorinfo{Ryan Trinkle%\titlenote{Corresponding author}
\and Jeffrey Rosenbluth}%\titlenote
% {Information and Telecommunication Technology Center, University of Kansas}
%{[email protected]}
\maketitle
\begin{abstract}
Abstract
\end{abstract}
\category{D.3.2}{Programming Languages}
{Language Classifications}
[Applicative (functional) languages]
\keywords
FRP, Functional, Reactive.
%Internet of Things.
\section{Introduction}
\begin{Code}
type Event a = Time -> Maybe a
instance Functor Behavior where
fmap f b = {s} t -> f . b \$ t
instance Applicative Behavior where
pure a = const a
f <*> x = \lam t -> (f t) (x t)
instance Monad Behavior where
return = pure
f >>= k = \lam t -> k (f t) t
instance Functor Event where
fmap f e = \lam t -> f <\$> e t
never :: Event a
never \lam t -> Nothing
\end{Code}
\end{document} |
module Impure.LFRef.Syntax where
open import Prelude
open import Data.Vec hiding ([_]; map)
open import Data.List hiding ([_])
open import Data.List.All hiding (lookup)
data Term : (n : ℕ) → Set
data Type : (n : ℕ) → Set
data Term where
var : ∀ {n} → Fin n → Term n
loc : ∀ {n} → ℕ → Term n
unit : ∀ {n} → Term n
-- constructor application
con : ∀ {n} → (fn : ℕ) → (ts : List (Term n)) → Term n
infixl 30 _·★_
data Exp : ℕ → Set where
-- basic lambda expressions
tm : ∀ {n} → Term n → Exp n
-- function calls
_·★_ : ∀ {n} → (fn : ℕ) → (as : List (Term n)) → Exp n
-- heap manipulation
ref : ∀ {n} → Exp n → Exp n
!_ : ∀ {n} → Exp n → Exp n
_≔_ : ∀ {n} → Exp n → Exp n → Exp n
data SeqExp : ℕ → Set where
lett : ∀ {n} → (x : Exp n) → (e : SeqExp (suc n)) → SeqExp n
ret : ∀ {n} → Exp n → SeqExp n
data Val : Term 0 → Set where
loc : ∀ {i} → Val (loc i)
unit : Val unit
con : ∀ {k ts} → Val (con k ts)
data ExpVal : Exp zero → Set where
tm : ∀ {t} → Val t → ExpVal (tm t)
data SeqExpVal : SeqExp zero → Set where
ret-tm : ∀ {t} → Val t → SeqExpVal (ret (tm t))
-- telescoped contexts/arguments
data Tele : (n m : ℕ) → Set where
ε : ∀ {n} → Tele n 0
_⟶_ : ∀ {m n} → Type n → Tele (suc n) m → Tele n (suc m)
infixl 20 _[_]
data Type where
_[_] : ∀ {n} → ℕ → (ts : List (Term n)) → Type n
Ref : ∀ {n} → (A : Type n) → Type n
Unit : ∀ {n} → Type n
Store : Set
Store = List (∃ Val)
record ConType : Set where
field
m : ℕ
args : Tele 0 m
tp : ℕ
indices : List (Term m)
record Fun : Set where
field
m : ℕ
args : Tele 0 m
returntype : Type m
body : Exp m
record Sig : Set where
field
types : List (∃ (Tele 0))
constructors : List ConType
funs : List Fun
open import Data.Fin.Substitution
module App {T} (l : Lift T Term) where
open Lift l
_tp/_ : ∀ {n n'} → Type n → Sub T n n' → Type n'
_/_ : ∀ {n n'} → Term n → Sub T n n' → Term n'
var x / s = lift $ lookup x s
unit / s = unit
_/_ {n} {n'} (con c ts) s = con c (map/ ts)
where
-- inlined for termination checker..
map/ : List (Term n) → List (Term n')
map/ [] = []
map/ (x ∷ ts₁) = x / s ∷ map/ ts₁
loc x / s = loc x
_tele/_ : ∀ {n m n'} → Tele n m → Sub T n n' → Tele n' m
ε tele/ s = ε
(x ⟶ t) tele/ s = (x tp/ s) ⟶ (t tele/ (s ↑))
_tp/_ {n} {n'} (k [ ts ]) s = k [ map/ ts ]
where
-- inlined for termination checker..
map/ : List (Term n) → List (Term n')
map/ [] = []
map/ (x ∷ ts₁) = x / s ∷ map/ ts₁
(Ref A) tp/ s = Ref (A tp/ s)
Unit tp/ s = Unit
_exp/_ : ∀ {n n'} → Exp n → Sub T n n' → Exp n'
tm x exp/ s = tm (x / s)
_exp/_ {n} {n'} (fn ·★ ts) s = fn ·★ map/ ts
where
-- inlined for termination checker..
map/ : List (Term n) → List (Term n')
map/ [] = []
map/ (x ∷ ts₁) = x / s ∷ map/ ts₁
ref x exp/ s = ref (x exp/ s)
(! x) exp/ s = ! (x exp/ s)
(y ≔ x) exp/ s = (y exp/ s) ≔ (x exp/ s)
_seq/_ : ∀ {n n'} → SeqExp n → Sub T n n' → SeqExp n'
lett x e seq/ s = lett (x exp/ s) (e seq/ (s ↑))
ret e seq/ s = ret (e exp/ s)
open Application (record { _/_ = _/_ }) using (_/✶_)
tmSubst : TermSubst Term
tmSubst = record { var = var; app = App._/_ }
open TermSubst tmSubst hiding (var) public
open App termLift using (_exp/_; _tp/_; _tele/_; _seq/_) public
|
#' Parse data for deep learning model training
#'
#' @importFrom recipes all_predictors
#' @importFrom recipes all_outcomes
#'
#' @param input_data A dataframe containing the input data.
#' @param partitioning_type A character string indicating the desired spatial data partitioning method. Can be "default", "block", "checkerboard1", or "checkerboard2".
#'
#' @return A dataframe containing the prepared data.
#' @examples
#' \dontrun{
#' # download benchmarking data
#' benchmarking_data <- get_benchmarking_data("Lynx lynx",
#' limit = 1500)
#'
#' # transform benchmarking data into a format suitable for deep learning
#' # if you have previously used a partitioning method you should specify it here
#' benchmarking_data_dl <- prepare_dl_data(input_data = benchmarking_data$df_data,
#' partitioning_type = "default")
#'
#' # perform sanity check on the transformed dataset
#' # for the training set
#' head(benchmarking_data_dl$train_tbl)
#' table(benchmarking_data_dl$y_train_vec)
#'
#' # for the test set
#' head(benchmarking_data_dl$test_tbl)
#' table(benchmarking_data_dl$y_test_vec)
#'}
#'@export
prepare_dl_data <- function(input_data, partitioning_type) {
if (partitioning_type %in% c("checkerboard1", "checkerboard2")) {
input_data$grp_checkerboard <- NULL
input_data$label <- as.integer(input_data$label)
# fix coercion error (for plotting)
input_data$label <- ifelse(input_data$label == 2, 1, 0)
}
input_data$grp <- NULL
input_data <- input_data %>%
tidyr::drop_na() %>%
dplyr::select(label, dplyr::everything())
train_test_split <- rsample::initial_split(input_data, prop = 0.8)
train_tbl <- rsample::training(train_test_split)
test_tbl <- rsample::testing(train_test_split)
# create a recipe for centering and scaling
rec_obj <- recipes::recipe(label ~ ., data = train_tbl) %>%
recipes::step_center(all_predictors(),
-all_outcomes()) %>%
recipes::step_scale(all_predictors(), -all_outcomes()) %>%
recipes::prep(data = train_tbl)
# use recipe
x_train_tbl <- recipes::bake(rec_obj, new_data = train_tbl) %>%
dplyr::select(-label)
x_test_tbl <- recipes::bake(rec_obj, new_data = test_tbl) %>%
dplyr::select(-label)
y_train_vec <- train_tbl$label
y_test_vec <- test_tbl$label
result_list <- list(train_tbl = x_train_tbl,
test_tbl = x_test_tbl,
y_train_vec = y_train_vec,
y_test_vec = y_test_vec,
rec_obj = rec_obj)
return(result_list)
}
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- The free monad construction on indexed containers
------------------------------------------------------------------------
module Data.Container.Indexed.FreeMonad where
open import Level
open import Function hiding (const)
open import Category.Monad.Predicate
open import Data.Container.Indexed hiding (_∈_)
open import Data.Container.Indexed.Combinator hiding (id; _∘_)
open import Data.Empty
open import Data.Sum using (inj₁; inj₂)
open import Data.Product
open import Data.W.Indexed
open import Relation.Unary
open import Relation.Unary.PredicateTransformer
------------------------------------------------------------------------
infixl 9 _⋆C_
infix 9 _⋆_
_⋆C_ : ∀ {i o c r} {I : Set i} {O : Set o} →
Container I O c r → Pred O c → Container I O _ _
C ⋆C X = const X ⊎ C
_⋆_ : ∀ {ℓ} {O : Set ℓ} → Container O O ℓ ℓ → Pt O ℓ
C ⋆ X = μ (C ⋆C X)
pattern returnP x = (inj₁ x , _)
pattern doP c k = (inj₂ c , k)
do : ∀ {ℓ} {O : Set ℓ} {C : Container O O ℓ ℓ} {X} →
⟦ C ⟧ (C ⋆ X) ⊆ C ⋆ X
do (c , k) = sup (doP c k)
rawPMonad : ∀ {ℓ} {O : Set ℓ} {C : Container O O ℓ ℓ} →
RawPMonad {ℓ = ℓ} (_⋆_ C)
rawPMonad {C = C} = record
{ return? = return
; _=<?_ = _=<<_
}
where
return : ∀ {X} → X ⊆ C ⋆ X
return x = sup (inj₁ x , ⊥-elim ∘ lower)
_=<<_ : ∀ {X Y} → X ⊆ C ⋆ Y → C ⋆ X ⊆ C ⋆ Y
f =<< sup (returnP x) = f x
f =<< sup (doP c k) = do (c , λ r → f =<< k r)
leaf : ∀ {ℓ} {O : Set ℓ} {C : Container O O ℓ ℓ} {X : Pred O ℓ} →
⟦ C ⟧ X ⊆ C ⋆ X
leaf (c , k) = do (c , return? ∘ k)
where
open RawPMonad rawPMonad
|
State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
⊢ (∏ᶠ (i : α) (_ : p i), f i) = ∏ i in t, f i State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
⊢ (∏ᶠ (i : α) (_ : p i), f i) = ∏ i in t, f i Tactic: set s := { x | p x } State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
⊢ (∏ᶠ (i : α) (_ : p i), f i) = ∏ i in t, f i State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
⊢ (∏ᶠ (i : α) (_ : p i), f i) = ∏ i in t, f i Tactic: have : mulSupport (s.mulIndicator f) ⊆ t := by
rw [Set.mulSupport_mulIndicator]
intro x hx
exact (h hx.2).1 hx.1 State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
⊢ (∏ᶠ (i : α) (_ : p i), f i) = ∏ i in t, f i State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
⊢ ∏ i in t, mulIndicator s f i = ∏ i in t, f i Tactic: erw [finprod_mem_def, finprod_eq_prod_of_mulSupport_subset _ this] State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
⊢ ∏ i in t, mulIndicator s f i = ∏ i in t, f i State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
x : α
hx : x ∈ t
hxs : ¬x ∈ s
⊢ f x = 1 Tactic: refine' Finset.prod_congr rfl fun x hx => mulIndicator_apply_eq_self.2 fun hxs => _ State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
x : α
hx : x ∈ t
hxs : ¬x ∈ s
⊢ f x = 1 State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
x : α
hx : x ∈ t
hxs : f x ≠ 1
⊢ x ∈ {x | p x} Tactic: contrapose! hxs State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
this : mulSupport (mulIndicator s f) ⊆ ↑t
x : α
hx : x ∈ t
hxs : f x ≠ 1
⊢ x ∈ {x | p x} State After: no goals Tactic: exact (h hxs).2 hx State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
⊢ mulSupport (mulIndicator s f) ⊆ ↑t State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
⊢ s ∩ mulSupport f ⊆ ↑t Tactic: rw [Set.mulSupport_mulIndicator] State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
⊢ s ∩ mulSupport f ⊆ ↑t State After: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
x : α
hx : x ∈ s ∩ mulSupport f
⊢ x ∈ ↑t Tactic: intro x hx State Before: α : Type u_1
β : Type ?u.188274
ι : Type ?u.188277
G : Type ?u.188280
M : Type u_2
N : Type ?u.188286
inst✝¹ : CommMonoid M
inst✝ : CommMonoid N
f : α → M
p : α → Prop
t : Finset α
h : ∀ {x : α}, f x ≠ 1 → (p x ↔ x ∈ t)
s : Set α := {x | p x}
x : α
hx : x ∈ s ∩ mulSupport f
⊢ x ∈ ↑t State After: no goals Tactic: exact (h hx.2).1 hx.1 |
If $f$ is a holomorphic function on $\mathbb{C}$ and $\lim_{z \to \infty} \frac{1}{f(z)} = l$, then $f$ is a polynomial. |
State Before: R : Type u
S : Type v
T : Type w
a b : R
n : ℕ
inst✝² : CommRing R
p✝ q✝ : R[X]
inst✝¹ : Ring S
inst✝ : Algebra R S
p q : R[X]
hq : Monic q
x : S
hx : ↑(aeval x) q = 0
⊢ ↑(aeval x) (p %ₘ q) = ↑(aeval x) p State After: no goals Tactic: rw [modByMonic_eq_sub_mul_div p hq, _root_.map_sub, _root_.map_mul, hx, MulZeroClass.zero_mul,
sub_zero] |
%% Band-Pass filter visualization
[b, a] = butter(4, [0.5 50] / 100, 'bandpass');
y = filter(b, a, cnt.x);
cnt.x=cnt.x(:,1:34);
cnt.clab=cnt.clab(:,1:34);
f_sz=ceil(length(cnt.x)/2);
f=100*linspace(0,1,f_sz);
f_X=fft(cnt.x);
f_y=fft(y);
subplot(2,1,1)
stem(f,abs(f_X(1:f_sz)));
title('Original signal');
xlabel('frequency');
xlim([0 50]);
ylabel('power');
subplot(2,1,2)
stem(f,abs(f_y(1:f_sz)));
xlim([0 50]);
title('Application of the beta (13-30Hz) bandpass filter')
xlabel('frequency');
ylabel('power'); |
PROGRAM BIG_CSP_GALAXEV
! Computes spectral energy distribution as a function of time for a
! Composite Stellar Population (CSP) by performing a convolution integral
! of the spectral energy distributions for a Single Stellar Population SSP
! with the chosen Star Formation Rate (SFR).
! Modified in Toulouse: 6/8/97 to include both convolution of sed and spectral indices
! Modified version of csp_galaxev to allow for large SFH files entered by user.
! GBA, August 1st, 2013
! Array declarations
include 'jb.dec'
include 'csp.dec'
character ans,name*256,save*256,aux*512,atlas
logical stelib
real w(imw),h(imw)
real tauv,mu_d,tv,fext,fi(40),fc(40),ta(0:jts)
real snr(0:jts),pnr(0:jts), bh(0:jts), sn(0:jts), wd(0:jts), rm(0:jts)
real bol(0:jts),str(0:jts), sf(0:jts),evf(0:jts),gas(0:jts),gal(0:jts),xml(0:jts),xdp(0:jts)
real fxu(0:jts),fxg(0:jts),fxr(0:jts),fxi(0:jts),fxz(0:jts),fxk(0:jts),tofr(0:jts),bolmr(0:jts)
real ufwa,gfwa,rfwa,ifwa,zfwa,kfwa,ufwla,gfwla,rfwla,ifwla,zfwla,kfwla,mwa,mwla
common /w_ages/ fxu,fxg,fxr,fxi,fxz,fxk,ufwa,gfwa,rfwa,ifwa,zfwa,kfwa,ufwla,gfwla,rfwla,ifwla,zfwla,kfwla,mwa,mwla
data ihrd/0/,atlas/' '/
! Check if correct filter file is in use.
j=ifilter()
if (j.eq.0) then
write (6,'(2a)') char(7),'Please assign correct filter file'
write (6,'(2a)') char(7),'Use command: stdfilt'
stop
endif
! Ask for file name
save='_'
1 call copyright(6)
2 l=margo(save)
write (6,'(x,3a,$)') 'BC_GALAXEV SSP sed in file [',save(1:l),'] = '
read (5,'(a)',end=10) name
if (largo(name).eq.0) then
name=save
else
! Check if interpolation in metallicity of SSP is requested
call interpolate_ssp(name)
endif
call s500('k','',5.)
call chaext(name,'ised',mm)
iread=0
if (name.ne.save) iread=1
! lr=index(name,'_lr_')
open (1,file=name,form='unformatted',status='old',err=3)
close (1)
jdef=jjdef(name)
! Ask for dust attenuation parameters
write (6,'(/x,a,$)') 'Include attenuation by dust? Y/[N] '
read (5,'(a)',end=10) ans
if (ans.ne.'y'.and.ans.ne.'Y') then
tauv=0.
mu_d=0.
else
13 write (6,'(/x,a)') '[using simple 2-component model of Charlot & Fall (2000)]'
write (6,'(x,a,$)') 'Enter total effective attenuation optical depth: tau_V [1.0] = '
! read (5,'(f10.0)',err=13,end=10) tauv
read (5,'(a)',end=10) aux
if (largo(aux).eq.0) then
tauv=1.
else
read (aux,*,err=13) tauv
endif
14 write (6,'(x,a,$)') 'Enter fraction of tau_V arising from the ambient ISM: mu [0.3] = '
! read (5,'(f10.0)',err=14,end=10) mu_d
read (5,'(a)',end=10) aux
if (largo(aux).eq.0) then
mu_d=0.3
else
read (aux,*,err=14) mu_d
endif
write (6,'(x,a,f7.4)') '...using tau_V = ',tauv
write (6,'(x,a,f7.4)') ' mu = ',mu_d
endif
! write (501,*) tauv,mu_d
call s500('f','',tauv)
call s500('f','',mu_d)
! Open input sed file (read again, tb modified in case of truncated SFR)
! if (iread.ne.0) then
save=name
close (1)
! Check length of records (needed in Linux)
open (1,file=name,form='unformatted',status='old',err=3)
read (1)
read (1)
read (1,err=55) inl,(h(i),i=1,inl),inx
! write (6,*) inl,inx
imx=1
goto 56
55 imx=0
56 close (1)
open (1,file=name,form='unformatted',status='old',err=3)
! Read basic parameters from SSP input file
read (1) nsteps,(tb(i),i=0,nsteps-1),ml,mu,iseg,
& (xx(i),lm(i),um(i),baux(i),cn(i),cc(i),i=1,iseg),
& totm,totn,avs,jo,tauo,id,tcut,ttt,ttt,ttt,id,id,igw,stelib
if (jo.ne.0) then
write (6,'(x,a$)')'File does not contain an SSP. Proceed Y/[N] ? '
read (5,'(a)',end=10) ans
if (ans.ne.'y'.and.ans.ne.'Y') goto 1
endif
! Read sed from SSP file
write (6,'(2a)') ' Reading file ',name
read (1) inl,(w(i),i=1,inl)
do n=0,nsteps-1
if (imx.gt.0) then
read (1) inl,(fl(i,n),i=1,inl),inx,(fl(i,n),i=inl+1,inl+inx)
else
read (1) inl,(fl(i,n),i=1,inl)
inx=0
endif
! Attenuate by dust if requested (use Charlot & Fall, 2000, ApJ, 539, 718)
if (tauv.gt.0.) then
if (tb(n).le.1.e7) then
tv=tauv
else
tv=mu_d*tauv
endif
do i=1,inl
fl(i,n)=fl(i,n)*fext(w(i),tv)
enddo
endif
enddo
inw=inl+inx
write (6,*) inl,' wavelength points per record'
write (6,*) inx,' spectral index points per record'
write (6,*) inw,' total points per record'
write (6,*) nsteps,' time steps'
read (1,end=4) nsteps,(bflx(i),i=0,nsteps-1)
read (1,end=4) nsteps,(strm(i),i=0,nsteps-1)
read (1,end=4) nsteps,(sf(i),i=0,nsteps-1)
read (1,end=4) nsteps,(evfl(i),i=0,nsteps-1)
read (1,end=4) nsteps,(snbr(i),i=0,nsteps-1)
read (1,end=4) nsteps,(pnbr(i),i=0,nsteps-1)
read (1,end=4) nsteps,(bhtn(i),i=0,nsteps-1)
read (1,end=4) nsteps,(sntn(i),i=0,nsteps-1)
read (1,end=4) nsteps,(wdtn(i),i=0,nsteps-1)
read (1,end=4) nsteps,(rmtm(i),i=0,nsteps-1)
read (1,end=4) nsteps,(tofr(i),i=0,nsteps-1)
read (1,end=4) nsteps,(bolmr(i),i=0,nsteps-1)
read (1,end=4) nsteps,(gasms(i),i=0,nsteps-1)
read (1,end=4) nsteps,(galms(i),i=0,nsteps-1)
read (1,end=4) nsteps,(rml(i),i=0,nsteps-1)
read (1,end=4) nsteps,(rdp(i),i=0,nsteps-1)
do i=0,nsteps-1 ; toff(i) = tofr(i) ; enddo
do i=0,nsteps-1 ; bolms(i) = bolmr(i) ; enddo
4 close (1)
! Store flux in the u, g, r, i, z, and K bands for each SSP age in order
! to compute the flux weighted age of the composite population.
! filter = 120 SDSS Camera u Response Function, airmass = 1.3 (June 2001)
! filter = 121 SDSS Camera g Response Function, airmass = 1.3 (June 2001)
! filter = 122 SDSS Camera r Response Function, airmass = 1.3 (June 2001)
! filter = 123 SDSS Camera i Response Function, airmass = 1.3 (June 2001)
! filter = 124 SDSS Camera z Response Function, airmass = 1.3 (June 2001)
! filter = 57 IR K filter + Palomar 200 IR detectors + atmosphere.57
5 write (6,'(/x,a,$)') 'Compute flux weighted age in the galaxy rest frame at z [0] = '
read (5,'(f12.0)',err=5,end=10) z
! read (5,*,err=5,end=10) z
do n=0,nsteps-1
do i=1,inl
h(i)=fl(i,n)
enddo
! In the observer frame use the value of z entered above
! zu = z
! In the galaxy restframe use z = 0
zu = 0.
fxu(n) = filter_n(120,w,h,inl,zu,kerr)
fxg(n) = filter_n(121,w,h,inl,zu,kerr)
fxr(n) = filter_n(122,w,h,inl,zu,kerr)
fxi(n) = filter_n(123,w,h,inl,zu,kerr)
fxz(n) = filter_n(124,w,h,inl,zu,kerr)
fxk(n) = filter_n( 57,w,h,inl,zu,kerr)
enddo
! endif
! Ask for SFR
call sfr_0_b(1,z)
! Define time steps to compute new sed, for instance:
! tbeg=2.5E9 ; tend= 3.5e9 ; tstp=5.0e6 ; msteps=1+(tend-tbeg)/tstp ; do n=0,msteps-1 ; ta(n)=tbeg+n*tstp ; enddo
! tbeg=2.5E9 ; tend= 3.5e9 ; tstp=1.0e6 ; msteps=1+(tend-tbeg)/tstp ; do n=0,msteps-1 ; ta(n)=tbeg+n*tstp ; enddo
tbeg=0.0E9 ; tend=14.0e9 ; tstp=1.0e6 ; msteps=1+(tend-tbeg)/tstp ; do n=0,msteps-1 ; ta(n)=tbeg+n*tstp ; enddo
write (6,*) msteps,' time steps in new grid, from:',ta(0),' to',ta(msteps-1)
! Expand time scale if required
! if ((io.ne.2.and.tcut.lt.20.E9).or.(io.eq.2.and.tcut.gt.2.E9)) call expand_time_steps
if ((io.ne.2.and.tcut.lt.20.E9).or.(io.eq.2 )) call expand_time_steps
if (io.eq.7) call add_time_steps
! Ask for output file name. Open files.
! Write time scale, IMF, and wavelength scale in output file
lun=-1
ioption=99
call name_sed(lun,jun,kdist,ihrd,ioption,jdef,ldef,ml,mu,name,atlas)
! Compute convolution integral, rest frame colors, and write results for CSP
if (isingle == 0) then
! Use for standard csp models
nstart = 0
nfinal = msteps-1
write (lun) msteps,(ta(i),i=0,msteps-1),ml,mu,iseg,(xx(i),lm(i),um(i),baux(i),cn(i),cc(i),i=1,iseg),totm,totn,avs,io,tau,id,tau,tau,1.,1.,id,id,igw,stelib
else
! Use for Chen et al. SFH model
call s500('r',name,0.)
nfinal=kfinal(tsng)
nstart = nfinal
write (lun) 1,tb(nfinal),ml,mu,iseg,(xx(i),lm(i),um(i),baux(i),cn(i),cc(i),i=1,iseg),totm,totn,avs,io,tau,id,tau,tau,1.,1.,id,id,igw,stelib
endif
write (lun) inl,(w(i),i=1,inl)
do n=nstart,nfinal
age=ta(n)
if (io.gt.0) then
! Perform convolution with chosen SFR
call convolve_tx(h,age,bol(n),str(n),evf(n),snr(n),pnr(n),bh(n),sn(n),wd(n),rm(n),gas(n),gal(n),xml(n),xdp(n))
call file_w_ages(io,name,zu,age,w,h,inl,ufwa,gfwa,rfwa,ifwa,zfwa,kfwa,mwa,ufwla,gfwla,rfwla,ifwla,zfwla,kfwla,mwla)
else
! Interpolate in log t input SSP model at this age
if (age <= tb(0)) then
i=1
a=0.
elseif (age >= tb(nsteps-1)) then
i=nsteps
a=0.
else
call locate(tb,nsteps-1,age,i)
if (tb(i-1) > 0.) then
a = alog10(age/tb(i-1))/alog10(tb(i)/tb(i-1))
else
a = age/tb(i)
endif
endif
b=1.-a
do l=1,inw
h(l) = b*fl(l,i-1) + a*fl(l,i)
enddo
bol(n) = b*bflx(i-1) + a*bflx(i)
str(n) = b*strm(i-1) + a*strm(i)
evf(n) = b*evfl(i-1) + a*evfl(i)
snr(n) = b*snbr(i-1) + a*snbr(i)
pnr(n) = b*pnbr(i-1) + a*pnbr(i)
bh (n) = b*bhtn(i-1) + a*bhtn(i)
sn (n) = b*sntn(i-1) + a*sntn(i)
wd (n) = b*wdtn(i-1) + a*wdtn(i)
rm (n) = b*rmtm(i-1) + a*rmtm(i)
toff(n) = b*tofr(i-1) + a*tofr(i)
bolms(n) = b*bolmr(i-1) + a*bolmr(i)
gas(n) = b*gasms(i-1) + a*gasms(i)
gal(n) = b*galms(i-1) + a*galms(i)
xml(n) = b*rml(i-1) + a*rml(i)
xdp(n) = b*rdp(i-1) + a*rdp(i)
! write (6,*) age,b,i-1,tb(i-1),a,i,tb(i)
! read (5,*)
endif
! Compute galaxy mass
do i=1,n
! sf(n)=sfr(tb(n))
sf(n)=sfr(ta(n)) ! just for new time scale in big_csp_galaxev
enddo
! galmass=gal_mass(io,tb(n),sf(n))
galmass=gal_mass(io,ta(n),sf(n))
! Store sed. Report standard colors and Guy Worthey Spectral indices
if (isingle == 0 .or. n == nfinal) then
write (lun) inl,(h(i),i=1,inl),inx,(h(i),i=inl+1,inl+inx)
call rf_color(io,age,w,h,inl,lun,bol(n),str(n),sf(n),evf(n),snr(n),pnr(n),bh(n),sn(n),wd(n),rm(n),toff(n),bolms(n),gas(n),gal(n),xml(n),xdp(n))
! Guy Worthey Spectral indices
inx2=inx/2
do i=1,inx2
fi(i)=h(inl+i)
fc(i)=h(inl+inx2+i)
enddo
igw=0
! call gw_indices(tb(n),fi,fc,lun,igw)
call gw_indices(ta(n),fi,fc,lun,igw)
endif
! Report percent done
if (n.gt.1) call percent(6,n,nfinal,'BIG_CSP_GALAXEV ' // name(1:nargo(name)))
enddo
! Add time behaviour of various quantities
if (isingle == 0) then
write (lun) msteps,( bol(i),i=0,msteps-1)
write (lun) msteps,( str(i),i=0,msteps-1)
write (lun) msteps,( sf(i),i=0,msteps-1)
write (lun) msteps,( evf(i),i=0,msteps-1)
write (lun) msteps,( snr(i),i=0,msteps-1)
write (lun) msteps,( pnr(i),i=0,msteps-1)
write (lun) msteps,( bh(i),i=0,msteps-1)
write (lun) msteps,( sn(i),i=0,msteps-1)
write (lun) msteps,( wd(i),i=0,msteps-1)
write (lun) msteps,( rm(i),i=0,msteps-1)
write (lun) msteps,( toff(i),i=0,msteps-1)
write (lun) msteps,(bolms(i),i=0,msteps-1)
write (lun) msteps,( gas(i),i=0,msteps-1)
write (lun) msteps,( gal(i),i=0,msteps-1)
write (lun) msteps,( xml(i),i=0,msteps-1)
write (lun) msteps,( xdp(i),i=0,msteps-1)
endif
! Write command file to delete unwanted files
call delete_files(name,inl,1)
call file_w_ages(-1,name,zu,age,w,h,inl,ufwa,gfwa,rfwa,ifwa,zfwa,kfwa,mwa,ufwla,gfwla,rfwla,ifwla,zfwla,kfwla,mwla)
goto 1
3 write (6,'(x,5a)') char(7),'File ',name(1:largo(name)),' not found',char(7)
goto 2
10 end
REAL FUNCTION FEXT(X,TV)
real x,tv,tau
fext=1.
if (tv.eq.0.) return
tau=tv*( (5500./x)**0.7 )
fext=exp(-tau)
return
end
|
State Before: a b n : ℕ
n0 : 0 < n
⊢ a ^ n ∣ b ^ n ↔ a ∣ b State After: a b n : ℕ
n0 : 0 < n
h : a ^ n ∣ b ^ n
⊢ a ∣ b Tactic: refine' ⟨fun h => _, fun h => pow_dvd_pow_of_dvd h _⟩ State Before: a b n : ℕ
n0 : 0 < n
h : a ^ n ∣ b ^ n
⊢ a ∣ b State After: case inl
a b n : ℕ
n0 : 0 < n
h : a ^ n ∣ b ^ n
g0 : gcd a b = 0
⊢ a ∣ b
case inr
a b n : ℕ
n0 : 0 < n
h : a ^ n ∣ b ^ n
g0 : gcd a b > 0
⊢ a ∣ b Tactic: cases' Nat.eq_zero_or_pos (gcd a b) with g0 g0 State Before: case inr
a b n : ℕ
n0 : 0 < n
h : a ^ n ∣ b ^ n
g0 : gcd a b > 0
⊢ a ∣ b State After: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
h : (a' * g) ^ n ∣ (b' * g) ^ n
g0 : gcd (a' * g) (b' * g) > 0
⊢ a' * g ∣ b' * g Tactic: rcases exists_coprime' g0 with ⟨g, a', b', g0', co, rfl, rfl⟩ State Before: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
h : (a' * g) ^ n ∣ (b' * g) ^ n
g0 : gcd (a' * g) (b' * g) > 0
⊢ a' * g ∣ b' * g State After: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
h : a' ^ n * g ^ n ∣ b' ^ n * g ^ n
g0 : gcd (a' * g) (b' * g) > 0
⊢ a' * g ∣ b' * g Tactic: rw [mul_pow, mul_pow] at h State Before: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
h : a' ^ n * g ^ n ∣ b' ^ n * g ^ n
g0 : gcd (a' * g) (b' * g) > 0
⊢ a' * g ∣ b' * g State After: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
g0 : gcd (a' * g) (b' * g) > 0
h : a' ^ n ∣ b' ^ n
⊢ a' * g ∣ b' * g Tactic: replace h := Nat.dvd_of_mul_dvd_mul_right (pow_pos g0' _) h State Before: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
g0 : gcd (a' * g) (b' * g) > 0
h : a' ^ n ∣ b' ^ n
⊢ a' * g ∣ b' * g State After: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
g0 : gcd (a' * g) (b' * g) > 0
h : a' ^ n ∣ b' ^ n
this : a' ^ succ 0 ∣ a' ^ n
⊢ a' * g ∣ b' * g Tactic: have := pow_dvd_pow a' n0 State Before: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
g0 : gcd (a' * g) (b' * g) > 0
h : a' ^ n ∣ b' ^ n
this : a' ^ succ 0 ∣ a' ^ n
⊢ a' * g ∣ b' * g State After: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
g0 : gcd (a' * g) (b' * g) > 0
h : a' ^ n ∣ b' ^ n
this : a' ∣ 1
⊢ a' * g ∣ b' * g Tactic: rw [pow_one, (co.pow n n).eq_one_of_dvd h] at this State Before: case inr.intro.intro.intro.intro.intro.intro
n : ℕ
n0 : 0 < n
g : ℕ
a' b' : ℕ
g0' : 0 < g
co : coprime a' b'
g0 : gcd (a' * g) (b' * g) > 0
h : a' ^ n ∣ b' ^ n
this : a' ∣ 1
⊢ a' * g ∣ b' * g State After: no goals Tactic: simp [eq_one_of_dvd_one this] State Before: case inl
a b n : ℕ
n0 : 0 < n
h : a ^ n ∣ b ^ n
g0 : gcd a b = 0
⊢ a ∣ b State After: no goals Tactic: simp [eq_zero_of_gcd_eq_zero_right g0] |
[STATEMENT]
lemma (in vbrelation) vbrelation_vsubset_vtimes:
assumes "\<D>\<^sub>\<circ> r \<subseteq>\<^sub>\<circ> A" and "\<R>\<^sub>\<circ> r \<subseteq>\<^sub>\<circ> B"
shows "r \<subseteq>\<^sub>\<circ> A \<times>\<^sub>\<circ> B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<subseteq>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
proof(intro vsubsetI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> r \<Longrightarrow> x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> r \<Longrightarrow> x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
assume prems: "x \<in>\<^sub>\<circ> r"
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> r
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> r \<Longrightarrow> x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
with vbrelation
[PROOF STATE]
proof (chain)
picking this:
vpairs r = r
x \<in>\<^sub>\<circ> r
[PROOF STEP]
obtain a b where x_def: "x = \<langle>a, b\<rangle>"
[PROOF STATE]
proof (prove)
using this:
vpairs r = r
x \<in>\<^sub>\<circ> r
goal (1 subgoal):
1. (\<And>a b. x = \<langle>a, b\<rangle> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x = \<langle>a, b\<rangle>
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> r \<Longrightarrow> x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
from prems
[PROOF STATE]
proof (chain)
picking this:
x \<in>\<^sub>\<circ> r
[PROOF STEP]
have a: "a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r" and b: "b \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r"
[PROOF STATE]
proof (prove)
using this:
x \<in>\<^sub>\<circ> r
goal (1 subgoal):
1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r &&& b \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
[PROOF STEP]
unfolding x_def
[PROOF STATE]
proof (prove)
using this:
\<langle>a, b\<rangle> \<in>\<^sub>\<circ> r
goal (1 subgoal):
1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r &&& b \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r
b \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> r \<Longrightarrow> x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
\<D>\<^sub>\<circ> r \<subseteq>\<^sub>\<circ> A
\<R>\<^sub>\<circ> r \<subseteq>\<^sub>\<circ> B
a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r
b \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
[PROOF STEP]
have "a \<in>\<^sub>\<circ> A" and "b \<in>\<^sub>\<circ> B"
[PROOF STATE]
proof (prove)
using this:
\<D>\<^sub>\<circ> r \<subseteq>\<^sub>\<circ> A
\<R>\<^sub>\<circ> r \<subseteq>\<^sub>\<circ> B
a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r
b \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
goal (1 subgoal):
1. a \<in>\<^sub>\<circ> A &&& b \<in>\<^sub>\<circ> B
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a \<in>\<^sub>\<circ> A
b \<in>\<^sub>\<circ> B
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> r \<Longrightarrow> x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<in>\<^sub>\<circ> A
b \<in>\<^sub>\<circ> B
[PROOF STEP]
show "x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B"
[PROOF STATE]
proof (prove)
using this:
a \<in>\<^sub>\<circ> A
b \<in>\<^sub>\<circ> B
goal (1 subgoal):
1. x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
unfolding x_def
[PROOF STATE]
proof (prove)
using this:
a \<in>\<^sub>\<circ> A
b \<in>\<^sub>\<circ> B
goal (1 subgoal):
1. \<langle>a, b\<rangle> \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> A \<times>\<^sub>\<circ> B
goal:
No subgoals!
[PROOF STEP]
qed |
Require Import Logic.lib.Ensembles_ext.
Require Import Logic.GeneralLogic.Base.
Require Import Logic.GeneralLogic.KripkeModel.
Require Import Logic.GeneralLogic.ProofTheory.BasicSequentCalculus.
Require Import Logic.GeneralLogic.ShallowEmbedded.PredicateAsLang.
Require Import Logic.MinimumLogic.Syntax.
Require Import Logic.MinimumLogic.ProofTheory.Minimum.
Require Import Logic.MinimumLogic.Semantics.Trivial.
Require Import Logic.MinimumLogic.Sound.Sound_Classical_Trivial.
Require Import Logic.PropositionalLogic.Syntax.
Require Import Logic.PropositionalLogic.ProofTheory.Intuitionistic.
Require Import Logic.PropositionalLogic.ProofTheory.Classical.
Require Import Logic.PropositionalLogic.Semantics.Trivial.
Require Import Logic.PropositionalLogic.Sound.Sound_Classical_Trivial.
(* TODO: split part of this file into MinimumLogic folder. *)
Instance Pred_minL (A: Type): MinimumLanguage (Pred_L A) := Build_MinimumLanguage (Pred_L A) Semantics.impp.
Instance Pred_andpL (A: Type): AndLanguage (Pred_L A) := Build_AndLanguage (Pred_L A) Semantics.andp.
Instance Pred_orpL (A: Type): OrLanguage (Pred_L A) := Build_OrLanguage (Pred_L A) Semantics.orp.
Instance Pred_falsepL (A: Type): FalseLanguage (Pred_L A) := Build_FalseLanguage (Pred_L A) Semantics.falsep.
Instance Pred_tminSM (A: Type): TrivialMinimumSemantics (Pred_L A) (Build_Model A) (Pred_SM A).
Proof.
constructor.
intros; apply Same_set_refl.
Qed.
Instance Pred_andpSM (A: Type): AndSemantics (Pred_L A) (Build_Model A) (Pred_SM A).
Proof.
constructor.
+ intros; apply Same_set_refl.
Qed.
Instance Pred_orpSM (A: Type): OrSemantics (Pred_L A) (Build_Model A) (Pred_SM A).
Proof.
constructor.
+ intros; apply Same_set_refl.
Qed.
Instance Pred_falsepSM (A: Type): FalseSemantics (Pred_L A) (Build_Model A) (Pred_SM A).
Proof.
constructor.
+ apply Same_set_refl.
Qed.
Instance Pred_Gamma (A: Type): Provable (Pred_L A) :=
Build_Provable (Pred_L A) (fun x: expr => forall a: A, x a).
Instance Pred_minAX (A: Type): MinimumAxiomatization (Pred_L A) (Pred_Gamma A).
Proof.
constructor.
+ intros x y ? ? m.
pose proof @sound_modus_ponens (Pred_L A) _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) x y.
exact (H1 m (H m) (H0 m)).
+ intros x y.
exact (@sound_axiom1 (Pred_L A) _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) x y).
+ intros x y z.
exact (@sound_axiom2 (Pred_L A) _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) x y z).
Qed.
Instance Pred_andpAX (A: Type): AndAxiomatization (Pred_L A) (Pred_Gamma A).
Proof.
constructor.
+ intros x y.
exact (@sound_andp_intros (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_andpSM A) x y).
+ intros x y.
exact (@sound_andp_elim1 (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_andpSM A) x y).
+ intros x y.
exact (@sound_andp_elim2 (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_andpSM A) x y).
Qed.
Instance Pred_orpAX (A: Type): OrAxiomatization (Pred_L A) (Pred_Gamma A).
Proof.
constructor.
+ intros x y.
exact (@sound_orp_intros1 (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_orpSM A) x y).
+ intros x y.
exact (@sound_orp_intros2 (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_orpSM A) x y).
+ intros x y z.
exact (@sound_orp_elim (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_orpSM A) x y z).
Qed.
Instance Pred_falsepAX (A: Type): FalseAxiomatization (Pred_L A) (Pred_Gamma A).
Proof.
constructor.
+ intros x.
exact (@sound_falsep_elim (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_falsepSM A) x).
Qed.
(*TODO: need to adjust(about classical)
Instance Pred_cpGamma (A: Type): ClassicalPropositionalLogic (Pred_L A) (Pred_Gamma A).
Proof.
constructor.
intros x.
exact (@sound_excluded_middle (Pred_L A) _ _ (Build_Model A) (Pred_SM A) (Pred_tminSM A) (Pred_tpSM A) x).
Qed.
*)
Require Import Logic.GeneralLogic.Semantics.Kripke.
Require Import Logic.MinimumLogic.Semantics.Kripke.
Require Import Logic.MinimumLogic.Semantics.SemanticEquiv.
Require Import Logic.PropositionalLogic.Semantics.Kripke.
Instance Pred_kiSM (A: Type): @KripkeIntuitionisticSemantics (Pred_L A) (Build_Model A) (unit_kMD _) tt eq (Pred_SM A) :=
@eqR_KripkeIntuitionistic _ _ _.
Instance Pred_kminSM (A: Type): @KripkeMinimumSemantics (Pred_L A) (Pred_minL A) (Build_Model A) (unit_kMD _) tt eq (Pred_SM A) :=
@Trivial2Kripke _ _ _ _ (Pred_tminSM A).
Instance Pred_kandpSM (A: Type): @KripkeAndSemantics (Pred_L A) (Pred_andpL A) (Build_Model A) (unit_kMD _) tt (Pred_SM A).
Proof.
constructor.
+ intros; apply Same_set_refl.
Qed.
Instance Pred_korpSM (A: Type): @KripkeOrSemantics (Pred_L A) (Pred_orpL A) (Build_Model A) (unit_kMD _) tt (Pred_SM A).
Proof.
constructor.
+ intros; apply Same_set_refl.
Qed.
Instance Pred_kfalsepSM (A: Type): @KripkeFalseSemantics (Pred_L A) (Pred_falsepL A) (Build_Model A) (unit_kMD _) tt (Pred_SM A).
Proof.
constructor.
+ apply Same_set_refl.
Qed.
|
Require Export SfLib.
(** Assignment 09 *)
(** Due: 2016/05/15 23:59 *)
(* Important:
- You are NOT allowed to use the [admit] tactic.
- You are ALLOWED to use any tactics including:
[tauto], [intuition], [firstorder], [omega].
- Just leave [exact GIVEUP] for those problems that you fail to prove.
*)
Definition GIVEUP {T: Type} : T. Admitted.
Axiom functional_extensionality : forall {X Y: Type} {f g : X -> Y},
(forall (x: X), f x = g x) -> f = g.
Definition state := id -> nat.
Definition empty_state : state :=
fun _ => 0.
Definition update (st : state) (x : id) (n : nat) : state :=
fun x' => if eq_id_dec x x' then n else st x'.
Inductive aexp : Type :=
| ANum : nat -> aexp
| AId : id -> aexp (* <----- NEW *)
| APlus : aexp -> aexp -> aexp
| AMinus : aexp -> aexp -> aexp
| AMult : aexp -> aexp -> aexp.
Tactic Notation "aexp_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "ANum" | Case_aux c "AId" | Case_aux c "APlus"
| Case_aux c "AMinus" | Case_aux c "AMult" ].
(** Defining a few variable names as notational shorthands will make
examples easier to read: *)
Definition X : id := Id 0.
Definition Y : id := Id 1.
Definition Z : id := Id 2.
Opaque X.
Opaque Y.
Opaque Z.
Inductive bexp : Type :=
| BTrue : bexp
| BFalse : bexp
| BEq : aexp -> aexp -> bexp
| BLe : aexp -> aexp -> bexp
| BNot : bexp -> bexp
| BAnd : bexp -> bexp -> bexp.
Tactic Notation "bexp_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "BTrue" | Case_aux c "BFalse" | Case_aux c "BEq"
| Case_aux c "BLe" | Case_aux c "BNot" | Case_aux c "BAnd" ].
Fixpoint aeval (st : state) (a : aexp) : nat :=
match a with
| ANum n => n
| AId x => st x
| APlus a1 a2 => (aeval st a1) + (aeval st a2)
| AMinus a1 a2 => (aeval st a1) - (aeval st a2)
| AMult a1 a2 => (aeval st a1) * (aeval st a2)
end.
Fixpoint beval (st : state) (b : bexp) : bool :=
match b with
| BTrue => true
| BFalse => false
| BEq a1 a2 => beq_nat (aeval st a1) (aeval st a2)
| BLe a1 a2 => ble_nat (aeval st a1) (aeval st a2)
| BNot b1 => negb (beval st b1)
| BAnd b1 b2 => andb (beval st b1) (beval st b2)
end.
Inductive com : Type :=
| CSkip : com
| CAss : id -> aexp -> com
| CSeq : com -> com -> com
| CIf : bexp -> com -> com -> com
| CWhile : bexp -> com -> com.
Tactic Notation "com_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "SKIP" | Case_aux c "::=" | Case_aux c ";;"
| Case_aux c "IFB" | Case_aux c "WHILE" ].
Notation "'SKIP'" :=
CSkip.
Notation "x '::=' a" :=
(CAss x a) (at level 60).
Notation "c1 ;; c2" :=
(CSeq c1 c2) (at level 80, right associativity).
Notation "'WHILE' b 'DO' c 'END'" :=
(CWhile b c) (at level 80, right associativity).
Notation "'IFB' c1 'THEN' c2 'ELSE' c3 'FI'" :=
(CIf c1 c2 c3) (at level 80, right associativity).
Reserved Notation "c1 '/' st '||' st'" (at level 40, st at level 39).
Inductive ceval : com -> state -> state -> Prop :=
| E_Skip : forall st,
SKIP / st || st
| E_Ass : forall st a1 n x,
aeval st a1 = n ->
(x ::= a1) / st || (update st x n)
| E_Seq : forall c1 c2 st st' st'',
c1 / st || st' ->
c2 / st' || st'' ->
(c1 ;; c2) / st || st''
| E_IfTrue : forall st st' b c1 c2,
beval st b = true ->
c1 / st || st' ->
(IFB b THEN c1 ELSE c2 FI) / st || st'
| E_IfFalse : forall st st' b c1 c2,
beval st b = false ->
c2 / st || st' ->
(IFB b THEN c1 ELSE c2 FI) / st || st'
| E_WhileEnd : forall b st c,
beval st b = false ->
(WHILE b DO c END) / st || st
| E_WhileLoop : forall st st' st'' b c,
beval st b = true ->
c / st || st' ->
(WHILE b DO c END) / st' || st'' ->
(WHILE b DO c END) / st || st''
where "c1 '/' st '||' st'" := (ceval c1 st st').
Tactic Notation "ceval_cases" tactic(first) ident(c) :=
first;
[ Case_aux c "E_Skip" | Case_aux c "E_Ass" | Case_aux c "E_Seq"
| Case_aux c "E_IfTrue" | Case_aux c "E_IfFalse"
| Case_aux c "E_WhileEnd" | Case_aux c "E_WhileLoop" ].
Definition aequiv (a1 a2 : aexp) : Prop :=
forall (st:state),
aeval st a1 = aeval st a2.
Definition bequiv (b1 b2 : bexp) : Prop :=
forall (st:state),
beval st b1 = beval st b2.
(** For commands, the situation is a little more subtle. We can't
simply say "two commands are behaviorally equivalent if they
evaluate to the same ending state whenever they are started in the
same initial state," because some commands (in some starting
states) don't terminate in any final state at all! What we need
instead is this: two commands are behaviorally equivalent if, for
any given starting state, they either both diverge or both
terminate in the same final state. A compact way to express this
is "if the first one terminates in a particular state then so does
the second, and vice versa." *)
Definition cequiv (c1 c2 : com) : Prop :=
forall (st st' : state),
(c1 / st || st') <-> (c2 / st || st').
Definition atrans_sound (atrans : aexp -> aexp) : Prop :=
forall (a : aexp),
aequiv a (atrans a).
Definition btrans_sound (btrans : bexp -> bexp) : Prop :=
forall (b : bexp),
bequiv b (btrans b).
Definition ctrans_sound (ctrans : com -> com) : Prop :=
forall (c : com),
cequiv c (ctrans c).
(* ####################################################### *)
(** ** Behavioral Equivalence is an Equivalence *)
Lemma refl_aequiv : forall (a : aexp), aequiv a a.
Proof.
intros a st. reflexivity. Qed.
Lemma sym_aequiv : forall (a1 a2 : aexp),
aequiv a1 a2 -> aequiv a2 a1.
Proof.
intros a1 a2 H. intros st. symmetry. apply H. Qed.
Lemma trans_aequiv : forall (a1 a2 a3 : aexp),
aequiv a1 a2 -> aequiv a2 a3 -> aequiv a1 a3.
Proof.
unfold aequiv. intros a1 a2 a3 H12 H23 st.
rewrite (H12 st). rewrite (H23 st). reflexivity. Qed.
Lemma refl_bequiv : forall (b : bexp), bequiv b b.
Proof.
unfold bequiv. intros b st. reflexivity. Qed.
Lemma sym_bequiv : forall (b1 b2 : bexp),
bequiv b1 b2 -> bequiv b2 b1.
Proof.
unfold bequiv. intros b1 b2 H. intros st. symmetry. apply H. Qed.
Lemma trans_bequiv : forall (b1 b2 b3 : bexp),
bequiv b1 b2 -> bequiv b2 b3 -> bequiv b1 b3.
Proof.
unfold bequiv. intros b1 b2 b3 H12 H23 st.
rewrite (H12 st). rewrite (H23 st). reflexivity. Qed.
Lemma refl_cequiv : forall (c : com), cequiv c c.
Proof.
unfold cequiv. intros c st st'. apply iff_refl. Qed.
Lemma sym_cequiv : forall (c1 c2 : com),
cequiv c1 c2 -> cequiv c2 c1.
Proof.
unfold cequiv. intros c1 c2 H st st'.
assert (c1 / st || st' <-> c2 / st || st') as H'.
SCase "Proof of assertion". apply H.
apply iff_sym. assumption.
Qed.
Lemma iff_trans : forall (P1 P2 P3 : Prop),
(P1 <-> P2) -> (P2 <-> P3) -> (P1 <-> P3).
Proof.
intros P1 P2 P3 H12 H23.
inversion H12. inversion H23.
split; intros A.
apply H1. apply H. apply A.
apply H0. apply H2. apply A. Qed.
Lemma trans_cequiv : forall (c1 c2 c3 : com),
cequiv c1 c2 -> cequiv c2 c3 -> cequiv c1 c3.
Proof.
unfold cequiv. intros c1 c2 c3 H12 H23 st st'.
apply iff_trans with (c2 / st || st'). apply H12. apply H23. Qed.
(* ######################################################## *)
(** ** Behavioral Equivalence is a Congruence *)
Theorem CAss_congruence : forall i a1 a1',
aequiv a1 a1' ->
cequiv (CAss i a1) (CAss i a1').
Proof.
intros i a1 a2 Heqv st st'.
split; intros Hceval.
Case "->".
inversion Hceval. subst. apply E_Ass.
rewrite Heqv. reflexivity.
Case "<-".
inversion Hceval. subst. apply E_Ass.
rewrite Heqv. reflexivity. Qed.
(** The congruence property for loops is a little more interesting,
since it requires induction.
_Theorem_: Equivalence is a congruence for [WHILE] -- that is, if
[b1] is equivalent to [b1'] and [c1] is equivalent to [c1'], then
[WHILE b1 DO c1 END] is equivalent to [WHILE b1' DO c1' END].
_Proof_: Suppose [b1] is equivalent to [b1'] and [c1] is
equivalent to [c1']. We must show, for every [st] and [st'], that
[WHILE b1 DO c1 END / st || st'] iff [WHILE b1' DO c1' END / st
|| st']. We consider the two directions separately.
- ([->]) We show that [WHILE b1 DO c1 END / st || st'] implies
[WHILE b1' DO c1' END / st || st'], by induction on a
derivation of [WHILE b1 DO c1 END / st || st']. The only
nontrivial cases are when the final rule in the derivation is
[E_WhileEnd] or [E_WhileLoop].
- [E_WhileEnd]: In this case, the form of the rule gives us
[beval st b1 = false] and [st = st']. But then, since
[b1] and [b1'] are equivalent, we have [beval st b1' =
false], and [E-WhileEnd] applies, giving us [WHILE b1' DO
c1' END / st || st'], as required.
- [E_WhileLoop]: The form of the rule now gives us [beval st
b1 = true], with [c1 / st || st'0] and [WHILE b1 DO c1
END / st'0 || st'] for some state [st'0], with the
induction hypothesis [WHILE b1' DO c1' END / st'0 ||
st'].
Since [c1] and [c1'] are equivalent, we know that [c1' /
st || st'0]. And since [b1] and [b1'] are equivalent, we
have [beval st b1' = true]. Now [E-WhileLoop] applies,
giving us [WHILE b1' DO c1' END / st || st'], as
required.
- ([<-]) Similar. [] *)
Theorem CWhile_congruence : forall b1 b1' c1 c1',
bequiv b1 b1' -> cequiv c1 c1' ->
cequiv (WHILE b1 DO c1 END) (WHILE b1' DO c1' END).
Proof.
(* WORKED IN CLASS *)
unfold bequiv,cequiv.
intros b1 b1' c1 c1' Hb1e Hc1e st st'.
split; intros Hce.
Case "->".
remember (WHILE b1 DO c1 END) as cwhile eqn:Heqcwhile.
induction Hce; inversion Heqcwhile; subst.
SCase "E_WhileEnd".
apply E_WhileEnd. rewrite <- Hb1e. apply H.
SCase "E_WhileLoop".
apply E_WhileLoop with (st' := st').
SSCase "show loop runs". rewrite <- Hb1e. apply H.
SSCase "body execution".
apply (Hc1e st st'). apply Hce1.
SSCase "subsequent loop execution".
apply IHHce2. reflexivity.
Case "<-".
remember (WHILE b1' DO c1' END) as c'while eqn:Heqc'while.
induction Hce; inversion Heqc'while; subst.
SCase "E_WhileEnd".
apply E_WhileEnd. rewrite -> Hb1e. apply H.
SCase "E_WhileLoop".
apply E_WhileLoop with (st' := st').
SSCase "show loop runs". rewrite -> Hb1e. apply H.
SSCase "body execution".
apply (Hc1e st st'). apply Hce1.
SSCase "subsequent loop execution".
apply IHHce2. reflexivity. Qed.
(* ######################################################## *)
(** ** Definitions used in the exercises *)
Definition loop : com :=
WHILE BTrue DO
SKIP
END.
Fixpoint no_whiles (c : com) : bool :=
match c with
| SKIP => true
| _ ::= _ => true
| c1 ;; c2 => andb (no_whiles c1) (no_whiles c2)
| IFB _ THEN ct ELSE cf FI => andb (no_whiles ct) (no_whiles cf)
| WHILE _ DO _ END => false
end.
Fixpoint fold_constants_aexp (a : aexp) : aexp :=
match a with
| ANum n => ANum n
| AId i => AId i
| APlus a1 a2 =>
match (fold_constants_aexp a1, fold_constants_aexp a2) with
| (ANum n1, ANum n2) => ANum (n1 + n2)
| (a1', a2') => APlus a1' a2'
end
| AMinus a1 a2 =>
match (fold_constants_aexp a1, fold_constants_aexp a2) with
| (ANum n1, ANum n2) => ANum (n1 - n2)
| (a1', a2') => AMinus a1' a2'
end
| AMult a1 a2 =>
match (fold_constants_aexp a1, fold_constants_aexp a2) with
| (ANum n1, ANum n2) => ANum (n1 * n2)
| (a1', a2') => AMult a1' a2'
end
end.
Fixpoint fold_constants_bexp (b : bexp) : bexp :=
match b with
| BTrue => BTrue
| BFalse => BFalse
| BEq a1 a2 =>
match (fold_constants_aexp a1, fold_constants_aexp a2) with
| (ANum n1, ANum n2) => if beq_nat n1 n2 then BTrue else BFalse
| (a1', a2') => BEq a1' a2'
end
| BLe a1 a2 =>
match (fold_constants_aexp a1, fold_constants_aexp a2) with
| (ANum n1, ANum n2) => if ble_nat n1 n2 then BTrue else BFalse
| (a1', a2') => BLe a1' a2'
end
| BNot b1 =>
match (fold_constants_bexp b1) with
| BTrue => BFalse
| BFalse => BTrue
| b1' => BNot b1'
end
| BAnd b1 b2 =>
match (fold_constants_bexp b1, fold_constants_bexp b2) with
| (_, BFalse) | (BFalse, _) => BFalse
| (b1', BTrue) | (BTrue, b1') => b1'
| (b1', b2') => BAnd b1' b2'
end
end.
Fixpoint fold_constants_com (c : com) : com :=
match c with
| SKIP =>
SKIP
| i ::= a =>
CAss i (fold_constants_aexp a)
| SKIP ;; c' | c' ;; SKIP => c'
| c1 ;; c2 =>
(fold_constants_com c1) ;; (fold_constants_com c2)
| IFB b THEN c1 ELSE c2 FI =>
match fold_constants_bexp b with
| BTrue => fold_constants_com c1
| BFalse => fold_constants_com c2
| b' => IFB b' THEN fold_constants_com c1
ELSE fold_constants_com c2 FI
end
| WHILE b DO c END =>
match fold_constants_bexp b with
| BTrue => WHILE BTrue DO SKIP END
| BFalse => SKIP
| b' => WHILE b' DO (fold_constants_com c) END
end
end.
Theorem fold_constants_aexp_sound :
atrans_sound fold_constants_aexp.
Proof.
unfold atrans_sound. intros a. unfold aequiv. intros st.
aexp_cases (induction a) Case; simpl;
(* ANum and AId follow immediately *)
try reflexivity;
(* APlus, AMinus, and AMult follow from the IH
and the observation that
aeval st (APlus a1 a2)
= ANum ((aeval st a1) + (aeval st a2))
= aeval st (ANum ((aeval st a1) + (aeval st a2)))
(and similarly for AMinus/minus and AMult/mult) *)
try (destruct (fold_constants_aexp a1);
destruct (fold_constants_aexp a2);
rewrite IHa1; rewrite IHa2; reflexivity). Qed.
Axiom skip_left: forall c,
cequiv
(SKIP;; c)
c.
Axiom skip_right: forall c,
cequiv
(c;; SKIP)
c.
Axiom IFB_true: forall b c1 c2,
bequiv b BTrue ->
cequiv
(IFB b THEN c1 ELSE c2 FI)
c1.
Axiom IFB_false: forall b c1 c2,
bequiv b BFalse ->
cequiv
(IFB b THEN c1 ELSE c2 FI)
c2.
Axiom swap_if_branches: forall b e1 e2,
cequiv
(IFB b THEN e1 ELSE e2 FI)
(IFB BNot b THEN e2 ELSE e1 FI).
Axiom WHILE_true: forall b c,
bequiv b BTrue ->
cequiv
(WHILE b DO c END)
(WHILE BTrue DO SKIP END).
Axiom seq_assoc : forall c1 c2 c3,
cequiv ((c1;;c2);;c3) (c1;;(c2;;c3)).
Axiom assign_aequiv : forall X e,
aequiv (AId X) e ->
cequiv SKIP (X ::= e).
Axiom CSeq_congruence : forall c1 c1' c2 c2',
cequiv c1 c1' -> cequiv c2 c2' ->
cequiv (c1;;c2) (c1';;c2').
Axiom CIf_congruence : forall b b' c1 c1' c2 c2',
bequiv b b' -> cequiv c1 c1' -> cequiv c2 c2' ->
cequiv (IFB b THEN c1 ELSE c2 FI) (IFB b' THEN c1' ELSE c2' FI).
(* ######################################################## *)
(* optimize_0plus *)
Fixpoint optimize_0plus_aexp (e:aexp) : aexp :=
match e with
| ANum n =>
ANum n
| AId i => AId i
| APlus (ANum 0) e' | APlus e' (ANum 0) =>
optimize_0plus_aexp e'
| APlus e1 e2 =>
APlus (optimize_0plus_aexp e1) (optimize_0plus_aexp e2)
| AMinus e1 e2 =>
AMinus (optimize_0plus_aexp e1) (optimize_0plus_aexp e2)
| AMult e1 e2 =>
AMult (optimize_0plus_aexp e1) (optimize_0plus_aexp e2)
end.
Fixpoint optimize_0plus_bexp (b : bexp) : bexp :=
match b with
| BTrue => BTrue
| BFalse => BFalse
| BEq a1 a2 =>
BEq (optimize_0plus_aexp a1) (optimize_0plus_aexp a2)
| BLe a1 a2 =>
BLe (optimize_0plus_aexp a1) (optimize_0plus_aexp a2)
| BNot b1 =>
BNot (optimize_0plus_bexp b1)
| BAnd b1 b2 =>
BAnd (optimize_0plus_bexp b1) (optimize_0plus_bexp b2)
end.
Fixpoint optimize_0plus_com (c : com) : com :=
match c with
| SKIP =>
SKIP
| i ::= a =>
CAss i (optimize_0plus_aexp a)
| c1 ;; c2 =>
(optimize_0plus_com c1) ;; (optimize_0plus_com c2)
| IFB b THEN c1 ELSE c2 FI =>
IFB (optimize_0plus_bexp b)
THEN optimize_0plus_com c1
ELSE optimize_0plus_com c2
FI
| WHILE b DO c END =>
WHILE (optimize_0plus_bexp b) DO
(optimize_0plus_com c)
END
end.
Definition constfold_0plus (c: com) : com :=
optimize_0plus_com (fold_constants_com c).
Eval compute in
constfold_0plus
(X ::= APlus (ANum 4) (ANum 5);;
Y ::= AMinus (AId X) (ANum 3);;
IFB BEq (AMinus (AId X) (AId Y)) (APlus (AId X) (AMinus (ANum 4) (ANum 4))) THEN
SKIP
ELSE
Y ::= ANum 0
FI;;
IFB BLe (ANum 0) (AMinus (ANum 4) (APlus (ANum 2) (ANum 1))) THEN
Y ::= ANum 0
ELSE
SKIP
FI;;
WHILE BEq (AId Y) (ANum 0) DO
X ::= APlus (AId X) (ANum 1)
END).
|
UMPIRES, PLAYERS, COACHES AND ANYONE ELSE ARE WELCOME.
Stage 1 This is designed to give an overview of the basic Laws of the game. It gives the tools to enable a person to umpire matches at the basic recreational level.
Stage 2 This course is designed to give you further knowledge and skills to enable you to become a member of a league panel and is available if you are a member and have attended the Stage 1 Course. It will complement that course by looking beyond the basic laws, build on field craft and match management techniques and introduce the concept of working, as a team, with a colleague.
THE COURSE FEE INCLUDES:- Resource pack & first year membership of ECB ACO (membership is optional and subject to meeting application criteria).
The cost of each Stage is £30. But Stage 1 and 2 taken consecutively will be at discounted cost of £40.
Please register and pay ECB ACO direct using URL links. |
/* Utility functions for working with GSL data types. */
#include <math.h>
#include <stdio.h>
#include <unistd.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_permutation.h>
#include <gsl/gsl_sort_vector.h>
#include <gsl/gsl_spmatrix.h>
#include <gsl/gsl_statistics.h>
#include <hdf5.h>
#include <hdf5_hl.h>
#include <osqp/osqp.h>
#include "qdm.h"
gsl_vector *
qdm_vector_seq(double from, double to, double by)
{
size_t size = fabs(to - from) / by;
gsl_vector *s = gsl_vector_alloc(size + 1);
double value = from;
for (size_t i = 0; i < s->size; i++) {
gsl_vector_set(s, i, value);
value += by;
}
return s;
}
void
qdm_vector_set_seq(gsl_vector *v, double from, double to)
{
double by = (to - from) / (double)(v->size - 1);
double value = from;
for (size_t i = 0; i < v->size; i++) {
gsl_vector_set(v, i, value);
value += by;
}
}
size_t
qdm_vector_search(const gsl_vector *v, double needle)
{
double x = 0;
size_t i = v->size - 1;
for (; i > 0; i--) {
x = gsl_vector_get(v, i);
if (needle >= x) {
break;
}
}
return i;
}
void
qdm_vector_csv_fwrite(FILE *f, const gsl_vector *v)
{
for (size_t i = 0; i < v->size; i++) {
fprintf(f, "%.17g", gsl_vector_get(v, i));
if (i < v->size - 1) {
fprintf(f, ",");
}
}
fprintf(f, "\n");
}
void
qdm_matrix_csv_fwrite(FILE *f, const gsl_matrix *m)
{
if (m != NULL) {
for (size_t i = 0; i < m->size1; i++) {
gsl_vector_const_view row = gsl_matrix_const_row(m, i);
qdm_vector_csv_fwrite(f, &row.vector);
}
}
}
/* Compute M^T * M */
int
qdm_matrix_tmm(gsl_matrix *m, gsl_matrix *result)
{
return gsl_blas_dgemm(
CblasTrans , CblasNoTrans , 1.0 ,
m , m ,
0.0 , result
);
}
/* Compute det(M^T * M) */
int
qdm_matrix_det_tmm(gsl_matrix *m, double *det)
{
int status = 0;
gsl_matrix *c = gsl_matrix_alloc(m->size2, m->size2);
gsl_permutation *p = gsl_permutation_alloc(c->size1);
status = gsl_blas_dgemm(
CblasTrans , CblasNoTrans , 1.0 ,
m , m ,
0.0 , c
);
if (status != 0) {
goto cleanup;
}
int signum = 0;
status = gsl_linalg_LU_decomp(c, p, &signum);
if (status != 0) {
goto cleanup;
}
*det = gsl_linalg_LU_det(c, signum);
cleanup:
gsl_permutation_free(p);
gsl_matrix_free(c);
return status;
}
/* Create a sorted copy of the vector v. */
gsl_vector *
qdm_vector_sorted(const gsl_vector *v)
{
gsl_vector *s = gsl_vector_alloc(v->size);
gsl_vector_memcpy(s, v);
gsl_sort_vector(s);
return s;
}
gsl_vector *
qdm_vector_quantile(gsl_vector *data, gsl_vector *probs)
{
gsl_vector *sorted = qdm_vector_sorted(data);
gsl_vector *quantiles = gsl_vector_alloc(probs->size);
for (size_t i = 0; i < probs->size; i++) {
double q = gsl_stats_quantile_from_sorted_data(
sorted->data,
sorted->stride,
sorted->size,
gsl_vector_get(probs, i)
);
gsl_vector_set(quantiles, i, q);
}
free(sorted);
return quantiles;
}
/* Compute the residual sum of squares:
*
* sum((y[i] - f(x[i])) ^ 2)
*/
double
qdm_vector_rss(const gsl_vector *y, const gsl_vector *fx)
{
int status = 0;
double rss = 0.0;
gsl_vector *se = gsl_vector_alloc(y->size);
status = gsl_vector_memcpy(se, y);
if (status != 0) {
goto cleanup;
}
status = gsl_vector_sub(se, fx);
if (status != 0) {
goto cleanup;
}
status = gsl_vector_mul(se, se);
if (status != 0) {
goto cleanup;
}
rss = qdm_vector_sum(se);
cleanup:
gsl_vector_free(se);
return rss;
}
/* Compute the summation of the vector.
*
* This uses the "iterative Kahan-Babuska algorithm" (aka Klein summation) as
* outlined here:
*
* https://en.wikipedia.org/wiki/Kahan_summation_algorithm
*/
double
qdm_vector_sum(gsl_vector *v)
{
double s = 0.0;
double cs = 0.0;
double ccs = 0.0;
for (size_t i = 0; i < v->size; i++) {
double t = s + gsl_vector_get(v, i);
double c;
if (fabs(s) >= fabs(gsl_vector_get(v, i))) {
c = (s - t) + gsl_vector_get(v, i);
} else {
c = (gsl_vector_get(v, i) - t) + s;
}
s = t;
t = cs + c;
double cc;
if (fabs(cs) >= fabs(c)) {
cc = (cs - t) + c;
} else {
cc = (c - t) + cs;
}
cs = t;
ccs = ccs + cc;
}
return s + cs + ccs;
}
/* Return the first index with a value greater than the value. */
size_t
qdm_vector_greater_than(const gsl_vector *v, double value)
{
for (size_t i = 0; i < v->size; i++) {
if (gsl_vector_get(v, i) > value) {
return i;
}
}
return 0;
}
/* Select the elements in the upper triangle of the matrix. All other elements
* will be set to zero.
*/
void
qdm_matrix_select_upper_triangle(gsl_matrix *m)
{
for (size_t i = 0; i < m->size1; i++) {
for (size_t j = 0; j < m->size2; j++) {
if (j < i) {
gsl_matrix_set(m, i, j, 0);
}
}
}
}
int
qdm_matrix_to_csc_matrix(csc **result, gsl_matrix *input)
{
int status = 0;
gsl_spmatrix *sm = gsl_spmatrix_alloc(input->size1, input->size2);
status = gsl_spmatrix_d2sp(sm, input);
if (status != 0) {
goto cleanup_sm;
}
gsl_spmatrix *sm_csc = gsl_spmatrix_compress(sm, GSL_SPMATRIX_CSC);
int m = sm_csc->size1;
int n = sm_csc->size2;
int nzmax = gsl_spmatrix_nnz(sm_csc);
size_t x_size = sizeof(double) * nzmax;
double *x = malloc(x_size);
memcpy(x, sm_csc->data, x_size);
size_t i_size = sizeof(int) * nzmax;
int *i = malloc(i_size);
memcpy(i, sm_csc->i, i_size);
size_t p_size = sizeof(int) * (n + 1);
int *p = malloc(p_size);
memcpy(p, sm_csc->p, p_size);
*result = csc_matrix(
m, // m First dimension (rows)
n, // n Second dimension (columns)
nzmax, // nzmax Maximum number of nonzero elements
x, // x Vector of data (size nzmax)
i, // i Vector of row indices (size nzmax)
p // p Vector of column pointers (size n+1)
);
gsl_spmatrix_free(sm_csc);
cleanup_sm:
gsl_spmatrix_free(sm);
return status;
}
gsl_vector *
qdm_vector_copy(const gsl_vector *src)
{
gsl_vector *dst = gsl_vector_alloc(src->size);
gsl_vector_memcpy(dst, src);
return dst;
}
gsl_matrix *
qdm_matrix_copy(const gsl_matrix *src)
{
gsl_matrix *dst = gsl_matrix_alloc(src->size1, src->size2);
gsl_matrix_memcpy(dst, src);
return dst;
}
int create_hd5(
const char *file_path,
const char *group_path
)
{
int status = 0;
hid_t file = -1;
hid_t group = -1;
hid_t gcpl = -1;
/* Save old error handler */
herr_t (*old_func)(hid_t, void*) = NULL;
void *old_client_data = NULL;
H5Eget_auto(H5E_DEFAULT, &old_func, &old_client_data);
/* Turn off error handling */
H5Eset_auto(H5E_DEFAULT, NULL, NULL);
file = H5Fopen(file_path, H5F_ACC_RDWR, H5P_DEFAULT);
if (file < 0) {
file = H5Fcreate(file_path, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
}
if (file < 0) {
status = file;
goto cleanup;
}
group = H5Gopen(file, group_path, H5P_DEFAULT);
if (group < 0) {
gcpl = H5Pcreate(H5P_LINK_CREATE);
if (gcpl < 0) {
status = gcpl;
goto cleanup;
}
status = H5Pset_create_intermediate_group(gcpl, 1);
if (status < 0) {
goto cleanup;
}
group = H5Gcreate(file, group_path, gcpl, H5P_DEFAULT, H5P_DEFAULT);
if (group < 0) {
status = group;
goto cleanup;
}
}
cleanup:
if (gcpl >= 0) {
H5Pclose(gcpl);
}
if (group >= 0) {
H5Gclose(group);
}
if (file >= 0) {
H5Fclose(file);
}
/* Restore previous error handler */
H5Eset_auto(H5E_DEFAULT, old_func, old_client_data);
if (status < 0) {
H5Eprint(H5E_DEFAULT, stderr);
}
return status;
}
int
qdm_vector_hd5_read(
hid_t id,
const char *name,
gsl_vector **v
)
{
int status = 0;
int rank = 0;
status = H5LTget_dataset_ndims(id, name, &rank);
if (status < 0) {
return status;
}
hsize_t dims[rank];
status = H5LTget_dataset_info(id, name, dims, NULL, NULL);
if (status < 0) {
return status;
}
size_t size = 1;
for (int i = 0; i < rank; i++) {
size *= dims[i];
}
gsl_vector *tmp = gsl_vector_alloc(size);
status = H5LTread_dataset_double(id, name, tmp->data);
if (status < 0) {
gsl_vector_free(tmp);
return status;
}
*v = tmp;
return status;
}
int
qdm_vector_hd5_write(
hid_t id,
const char *name,
const gsl_vector *v
)
{
int status = 0;
hid_t datatype = -1;
hid_t dataspace = -1;
hid_t dataset = -1;
hid_t dcpl = -1;
if (v == NULL) {
goto cleanup;
}
datatype = H5Tcopy(H5T_NATIVE_DOUBLE);
status = H5Tset_order(datatype, H5T_ORDER_LE);
if (status != 0) {
goto cleanup;
}
hsize_t dims[1] = {
v->size,
};
dataspace = H5Screate_simple(1, dims, NULL);
if (dataspace < 0) {
status = dataspace;
goto cleanup;
}
dcpl = H5Pcreate(H5P_DATASET_CREATE);
if (dcpl < 0) {
status = dcpl;
goto cleanup;
}
/* Only enable compression on larger vectors. */
/*
if (v->size > 1024) {
status = H5Pset_deflate(dcpl, 9);
if (status != 0) {
goto cleanup;
}
hsize_t chunk_dims[1] = {
v->size,
};
status = H5Pset_chunk(dcpl, 1, chunk_dims);
if (status != 0) {
goto cleanup;
}
}
*/
dataset = H5Dcreate(id, name, datatype, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
if (dataset < 0) {
status = dataset;
goto cleanup;
}
status = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, v->data);
if (status != 0) {
goto cleanup;
}
cleanup:
if (dcpl >= 0) {
H5Pclose(dcpl);
}
if (dataset >= 0) {
H5Dclose(dataset);
}
if (dataspace >= 0) {
H5Sclose(dataspace);
}
if (datatype >= 0) {
H5Tclose(datatype);
}
H5Oflush(id);
return status;
}
int
qdm_matrix_hd5_read(
hid_t id,
const char *name,
gsl_matrix **m
)
{
int status = 0;
int rank = 0;
status = H5LTget_dataset_ndims(id, name, &rank);
if (status < 0) {
return status;
}
if (rank < 2) {
return -1;
}
hsize_t dims[rank];
status = H5LTget_dataset_info(id, name, dims, NULL, NULL);
if (status < 0) {
return status;
}
size_t size1 = dims[0];
size_t size2 = 1;
for (int i = 1; i < rank; i++) {
size2 *= dims[i];
}
gsl_matrix *tmp = gsl_matrix_alloc(size1, size2);
status = H5LTread_dataset_double(id, name, tmp->data);
if (status < 0) {
gsl_matrix_free(tmp);
return status;
}
*m = tmp;
return status;
}
int
qdm_matrix_hd5_write(
hid_t id,
const char *name,
const gsl_matrix *m
)
{
int status = 0;
hid_t datatype = -1;
hid_t dataspace = -1;
hid_t dataset = -1;
hid_t dcpl = -1;
if (m == NULL) {
goto cleanup;
}
datatype = H5Tcopy(H5T_NATIVE_DOUBLE);
status = H5Tset_order(datatype, H5T_ORDER_LE);
if (status != 0) {
goto cleanup;
}
hsize_t dims[2] = {
m->size1,
m->size2,
};
dataspace = H5Screate_simple(2, dims, NULL);
if (dataspace < 0) {
status = dataspace;
goto cleanup;
}
dcpl = H5Pcreate(H5P_DATASET_CREATE);
if (dcpl < 0) {
status = dcpl;
goto cleanup;
}
/* Only enable compression on larger matrices. */
/*
if (m->size1 * m->size2 > 1024) {
status = H5Pset_deflate(dcpl, 9);
if (status != 0) {
goto cleanup;
}
hsize_t chunk_dims[2] = {
m->size1,
m->size2
};
status = H5Pset_chunk(dcpl, 2, chunk_dims);
if (status != 0) {
goto cleanup;
}
}
*/
dataset = H5Dcreate(id, name, datatype, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
if (dataset < 0) {
status = dataset;
goto cleanup;
}
status = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, m->data);
if (status != 0) {
goto cleanup;
}
cleanup:
if (dcpl >= 0) {
H5Pclose(dcpl);
}
if (dataset >= 0) {
H5Dclose(dataset);
}
if (dataspace >= 0) {
H5Sclose(dataspace);
}
if (datatype >= 0) {
H5Tclose(datatype);
}
H5Oflush(id);
return status;
}
gsl_vector *
qdm_matrix_filter(
const gsl_matrix *m,
size_t needle_column,
double needle_value,
size_t select_column
)
{
size_t size = 0;
for (size_t i = 0; i < m->size1; i++) {
if (gsl_matrix_get(m, i, needle_column) == needle_value) {
size++;
}
}
gsl_vector *v = gsl_vector_alloc(size);
size_t j = 0;
for (size_t i = 0; i < m->size1; i++) {
double select_value = gsl_matrix_get(m, i, select_column);
if (gsl_matrix_get(m, i, needle_column) == needle_value) {
gsl_vector_set(v, j, select_value);
j++;
}
}
return v;
}
|
If $f$ is a holomorphic function on a ball $B(z,r)$ and $w \in B(z,r)$, then the $k$th derivative of $f$ at $w$ is equal to the contour integral of $f(u)/(u-w)^{k+1}$ over the circle of radius $r$ centered at $z$. |
module Eval where
open import Coinduction
open import Data.Bool
open import Data.Fin using (Fin)
import Data.Fin as F
open import Data.Product using (Σ; _,_)
import Data.Product as P
open import Data.Stream using (Stream; _∷_)
import Data.Stream as S
open import Data.Vec using (Vec; []; _∷_)
import Data.Vec as V
open import Relation.Binary.PropositionalEquality
open import Types
⟦_⟧ᵗ : Type → Set
⟦ 𝔹 ⟧ᵗ = Bool
⟦ 𝔹⁺ n ⟧ᵗ = Vec Bool n
⟦ ℂ τ ⟧ᵗ = Stream ⟦ τ ⟧ᵗ
⟦ σ ⇒ τ ⟧ᵗ = ⟦ σ ⟧ᵗ → ⟦ τ ⟧ᵗ
⟦ σ × τ ⟧ᵗ = P._×_ ⟦ σ ⟧ᵗ ⟦ τ ⟧ᵗ
data Env : ∀ {n} → Ctx n → Set where
[] : Env []
_∷_ : ∀ {n} {Γ : Ctx n} {τ} → ⟦ τ ⟧ᵗ → Env Γ → Env (τ ∷ Γ)
lookupEnv : ∀ {n} {Γ : Ctx n} (i : Fin n) → Env Γ → ⟦ V.lookup i Γ ⟧ᵗ
lookupEnv F.zero (x ∷ env) = x
lookupEnv (F.suc i) (x ∷ env) = lookupEnv i env
private
runReg : ∀ {σ τ} → (⟦ τ ⟧ᵗ → ⟦ τ ⟧ᵗ P.× ⟦ σ ⟧ᵗ) → ⟦ τ ⟧ᵗ → Stream ⟦ σ ⟧ᵗ
runReg f s with f s
runReg f s | s′ , x = x ∷ ♯ (runReg f s′)
_⟦_⟧ : ∀ {n} {Γ : Ctx n} {τ} → Env Γ → Term Γ τ → ⟦ τ ⟧ᵗ
env ⟦ bitI ⟧ = true
env ⟦ bitO ⟧ = false
env ⟦ [] ⟧ = []
env ⟦ x ∷ xs ⟧ = env ⟦ x ⟧ ∷ env ⟦ xs ⟧
env ⟦ x nand y ⟧ with env ⟦ x ⟧ | env ⟦ y ⟧
env ⟦ x nand y ⟧ | true | true = false
env ⟦ x nand y ⟧ | _ | _ = true
env ⟦ reg xt ft ⟧ = runReg (env ⟦ ft ⟧) (env ⟦ xt ⟧)
env ⟦ pair xt yt ⟧ = (env ⟦ xt ⟧) , (env ⟦ yt ⟧)
env ⟦ latest t ⟧ = S.head (env ⟦ t ⟧)
env ⟦ head t ⟧ = V.head (env ⟦ t ⟧)
env ⟦ tail t ⟧ = V.tail (env ⟦ t ⟧)
env ⟦ var i refl ⟧ = lookupEnv i env
env ⟦ f ∙ x ⟧ = (env ⟦ f ⟧) (env ⟦ x ⟧)
env ⟦ lam t ⟧ = λ x → (x ∷ env) ⟦ t ⟧
|
(*
* Copyright (c) 2020, CleanQ Project - Systems Group, ETH Zurich
* All rights reserved.
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
*
* See "LICENSE" for details.
*
* SPDX-License-Identifier: BSD-2-Clause
*)
section "CleanQ Abstract List Model"
text \<open>
We define a first refinement of the abstract model based on sets. We redefine the
transfer sets as lists in order to get the FIFO for the transfer from X to Y and
vice versa. We define the model of this first refinement in the the following
Isabelle theory:
\<close>
theory CleanQ_ListModel
(*<*)
imports Main CleanQ_SetModel CleanQ_Utils
(*>*)
begin
(* ==================================================================================== *)
subsection \<open>State Definition\<close>
(* ==================================================================================== *)
text \<open>
Similar to the abstract set model, We express a system with a single point-to-point
queue between two agents $X$ and $Y$. However, in contrast we now use lists instead
of sets for the transfer between the two agents. The state of the abstract CleanQ List
model is captured in the Isabelle record \verb+CleanQ_List_State+. Like the previous
model, we express the buffers owned by $X$ or $Y$ as sets.
\<^item> lSX: this is the set of buffers owned by X.
\<^item> lSY: this is the set of buffers owned by Y.
\<^item> lTXY: this is a list of buffers in transfer from X to Y.
\<^item> lTYX: this is a list of buffers in transfer from Y to X.
\<close>
record 'a CleanQ_List_State =
lSX :: "'a set"
lSY :: "'a set"
lTXY :: "'a list"
lTYX :: "'a list"
text \<open>
Like the abstract set model, we do not specify the representation of the buffer
elements. This can be a single, fixed-sized page frame, a variable-sized base-limit
segment, or a set of memory locations.
\<close>
(*<*)
(* Define some global variables to make Simpl/Complex proofs work *)
record 'g CleanQ_List_State_vars =
ListRB_' :: "nat CleanQ_List_State"
ListB_' :: nat
(*>*)
(* ==================================================================================== *)
subsection \<open>State Lifting Function\<close>
(* ==================================================================================== *)
text \<open>
The CleanQ List model is a data refinement of the CleanQ Set Model. We can define an
interpretation function. That lifts the CleanQ list model state into the CleanQ
set model state by taking the \verb+set+ of the transfer lists.
\<close>
definition CleanQ_List2Set :: "'a CleanQ_List_State \<Rightarrow> 'a CleanQ_Set_State"
where "CleanQ_List2Set l = \<lparr> SX = lSX l, SY = lSY l,
TXY = set (lTXY l), TYX = set (lTYX l) \<rparr>"
(* ==================================================================================== *)
subsection \<open>CleanQ List Model Invariants\<close>
(* ==================================================================================== *)
text \<open>
We now formulate the invariants I1 and I2 under the the CleanQ list model, and add
an additional invariant I3.
\<close>
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>I1: Constant Union (Image)\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
The union of all sets is constant. We formulate this as an image for
\verb+CleanQ_List+ where we take the set of the transfer lists and apply the
union.
\<close>
definition I1_list_img :: "'a CleanQ_List_State \<Rightarrow> 'a set \<Rightarrow> bool"
where "I1_list_img rb K \<longleftrightarrow> ((lSX rb) \<union> (lSY rb) \<union> set (lTXY rb) \<union> set (lTYX rb)) = K"
text \<open>
We can show that the image of the invariant satisfies the original invariant I1 when
we apply the lifting function \verb+CleanQ_List2Set+ to the model state. We prove
this in the following lemma.
\<close>
lemma I1_list_img_lift:
"I1_list_img L K = I1 (CleanQ_List2Set L) K"
unfolding CleanQ_List2Set_def I1_def I1_list_img_def by(simp)
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>I2: Pairwise Empty (Image)\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
All pairwise intersections are empty. Again, we formulate this as an image for
\verb+CleanQ_List+ by taking the set of the transfer lists.
\<close>
definition I2_list_img :: "'a CleanQ_List_State \<Rightarrow> bool"
where "I2_list_img rb \<longleftrightarrow> lSX rb \<inter> lSY rb = {} \<and> lSX rb \<inter> set (lTXY rb) = {} \<and>
lSX rb \<inter> set (lTYX rb) = {} \<and> lSY rb \<inter> set (lTXY rb) = {} \<and>
lSY rb \<inter> set (lTYX rb) = {} \<and> set (lTXY rb) \<inter> set (lTYX rb) = {}"
text \<open>
Finally, we can show that the image of the Invariant I2 is equivalent to the original
invariant, when we lift the CleanQ List State to the CleanQ Set State. We prove this
in the following lemma:
\<close>
lemma I2_list_img_lift:
"I2_list_img L = I2 (CleanQ_List2Set L)"
unfolding CleanQ_List2Set_def I2_def I2_list_img_def by(simp)
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>I3: Distinct Transfer Lists\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
In contrast to sets, an element can be in a list twice. Thus we need to rule out,
that a buffer can be present in a list twice. This invariant is required for the
move from sets to lists. In sets an element occurs only once while in lists it can
occur multiple times. If we map the list which contains twice the same element,
it would be mapped to only a single set element. In order to avoid this the elements
of the lists need to be distinct
\<close>
definition I3 :: "'a CleanQ_List_State \<Rightarrow> bool"
where "I3 st_list \<longleftrightarrow> distinct (lTXY st_list) \<and> distinct (lTYX st_list)"
text \<open>
Form the invariant I3, we obtain that the cardinality of the sets in the lifted
CleanQ set state is the same as the length of the lists.
\<close>
lemma I3_cardinality :
assumes I3_holds : "I3 L" and lift: "LS = CleanQ_List2Set L"
shows "length (lTXY L) = card (TXY LS) \<and> length (lTYX L) = card (TYX LS)"
using I3_holds lift unfolding CleanQ_List2Set_def
by (metis CleanQ_List2Set_def CleanQ_Set_State.ext_inject CleanQ_Set_State.surjective
I3_def assms(2) distinct_card)
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>All CleanQ List Invariants\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
We combine all invariants for the abstract CleanQ list model and define the unified
predicate \verb+CleanQ_List_Invariants+.
\<close>
definition CleanQ_List_Invariants :: "'a set \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> bool"
where "CleanQ_List_Invariants K rb \<longleftrightarrow> I1_list_img rb K \<and> I2_list_img rb \<and> I3 rb"
lemmas CleanQ_List_Invariants_simp = CleanQ_List_Invariants_def I1_list_img_def
I2_list_img_def I3_def
text \<open>
Finally, we can show that when the CleanQ List invariants are satisfied, this also
satisfies the set invariants.
\<close>
lemma CleanQ_List_Invariants_Set_Invariants:
"CleanQ_List_Invariants K L \<Longrightarrow> CleanQ_Set_Invariants K (CleanQ_List2Set L)"
by (simp add: CleanQ_List_Invariants_def CleanQ_Set_Invariants_def
I1_list_img_lift I2_list_img_lift)
(* ==================================================================================== *)
subsection \<open>State Transition Operations\<close>
(* ==================================================================================== *)
text \<open>
We now formulate the state transition operations in terms of the CleanQ List model
state. Again, the two agents can, independently from each other, perform one of
two operations, \verb+enqueue+ and \verb+dequeue+, which trigger an ownership
transfer of buffer elements.
\<close>
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>Enqueue Operation\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
The \verb+enqueue+ operation is analogous to the Set operations except that the elements
are added to the list instead of inserted to the set. Note, we always insert the
element at the end of the list.
\<close>
definition CleanQ_List_enq_x :: "'a \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_enq_x b rb = rb \<lparr> lSX := (lSX rb) - {b}, lTXY := lTXY rb @ [b] \<rparr>"
definition CleanQ_List_enq_y :: "'a \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_enq_y b rb = rb \<lparr> lSY := (lSY rb) - {b}, lTYX := lTYX rb @ [b] \<rparr>"
text \<open>
These definitions are the same as producing a new record:
\<close>
lemma CleanQ_List_enq_x_upd :
"CleanQ_List_enq_x b rb = \<lparr> lSX = (lSX rb) - {b}, lSY = (lSY rb),
lTXY = (lTXY rb) @ [b], lTYX = (lTYX rb) \<rparr>"
by(simp add:CleanQ_List_enq_x_def)
lemma CleanQ_List_enq_y_upd :
"CleanQ_List_enq_y b rb = \<lparr> lSX = (lSX rb), lSY = (lSY rb) - {b},
lTXY = (lTXY rb), lTYX = (lTYX rb) @ [b] \<rparr>"
by(simp add:CleanQ_List_enq_y_def)
text \<open>
We can now show that the result of the \verb+enqueue+ operation is the same as the
\verb+enqueue+ operation on the set model. This is the refinement proof for the
\verb+enqueue+ operation, showing commutative equivalence.
\<close>
lemma CleanQ_List_enq_x_equal :
"CleanQ_List2Set (CleanQ_List_enq_x b rb) = CleanQ_Set_enq_x b (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_enq_x_def CleanQ_List_enq_x_def
by(auto)
lemma CleanQ_List_enq_y_equal :
"CleanQ_List2Set (CleanQ_List_enq_y b rb) = CleanQ_Set_enq_y b (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_enq_y_def CleanQ_List_enq_y_def
by(auto)
text \<open>
The enqueue operations move buffers around different sets and lists. We define a
few helper lemmas, which allow us to talk about where the buffer ends up.
\<close>
lemma CleanQ_List_enq_x_result :
assumes X_owned: "b \<in> lSX rb" and X_enq: "rb' = CleanQ_List_enq_x b rb"
and I2_holds : "I2_list_img rb"
shows "b \<in> set (lTXY rb') \<and> b \<notin> lSX rb' \<and> b \<notin> lSY rb' \<and> b \<notin> set (lTYX rb')"
using X_owned X_enq I2_holds unfolding CleanQ_List_enq_x_def by(auto simp:I2_list_img_def)
lemma CleanQ_List_enq_y_result :
assumes Y_owned: "b \<in> lSY rb" and X_enq: "rb' = CleanQ_List_enq_y b rb"
and I2_holds : "I2_list_img rb"
shows "b \<in> set (lTYX rb') \<and> b \<notin> lSY rb' \<and> b \<notin> lSX rb' \<and> b \<notin> set (lTXY rb')"
using Y_owned X_enq I2_holds unfolding CleanQ_List_enq_y_def by(auto simp:I2_list_img_def)
text \<open>
Not only we can say that it is in the set of the list, we can even say that
the buffer is precisely at the end of it.
\<close>
lemma CleanQ_List_enq_x_result_p :
assumes X_owned: "b \<in> lSX rb" and X_enq: "rb' = CleanQ_List_enq_x b rb"
and I2_holds : "I2_list_img rb"
shows "b = last (lTXY rb') \<and> b \<notin> lSX rb' \<and> b \<notin> lSY rb' \<and> b \<notin> set (lTYX rb')"
using X_owned X_enq I2_holds unfolding CleanQ_List_enq_x_def by(auto simp:I2_list_img_def)
lemma CleanQ_List_enq_y_result_p :
assumes Y_owned: "b \<in> lSY rb" and X_enq: "rb' = CleanQ_List_enq_y b rb"
and I2_holds : "I2_list_img rb"
shows "b = last (lTYX rb') \<and> b \<notin> lSY rb' \<and> b \<notin> lSX rb' \<and> b \<notin> set (lTXY rb')"
using Y_owned X_enq I2_holds unfolding CleanQ_List_enq_y_def by(auto simp:I2_list_img_def)
text \<open>
The two operations \verb+CleanQ_Set_enq_x+ and \verb+CleanQ_Set_enq_y+ transition
the model state. Thus we need to prove that all invariants are preserved. We do this
Individually first, then do the union. Note, the proofs are symmetric.
\<close>
lemma CleanQ_List_enq_x_I1 :
assumes I1_holds: "I1_list_img rb K" and X_owned: "b \<in> lSX rb"
shows "I1_list_img (CleanQ_List_enq_x b rb) K"
using I1_holds X_owned unfolding CleanQ_List_enq_x_def I1_list_img_def by auto
lemma CleanQ_List_enq_y_I1 :
assumes I1_holds: "I1_list_img rb K" and X_owned: "b \<in> lSY rb"
shows "I1_list_img (CleanQ_List_enq_y b rb) K"
using I1_holds X_owned unfolding CleanQ_List_enq_y_def I1_list_img_def by auto
lemma CleanQ_List_enq_x_I2 :
assumes I2_holds: "I2_list_img rb" and X_owned: "b \<in> lSX rb"
shows "I2_list_img (CleanQ_List_enq_x b rb)"
using I2_holds X_owned unfolding CleanQ_List_enq_x_def I2_list_img_def by auto
lemma CleanQ_List_enq_y_I2 :
assumes I2_holds: "I2_list_img rb" and X_owned: "b \<in> lSY rb"
shows "I2_list_img (CleanQ_List_enq_y b rb)"
using I2_holds X_owned unfolding CleanQ_List_enq_y_def I2_list_img_def by auto
lemma CleanQ_List_enq_x_I3 :
assumes I2_holds: "I2_list_img rb" and I3_holds: "I3 rb" and X_owned: "b \<in> lSX rb"
shows "I3 (CleanQ_List_enq_x b rb)"
using I2_holds I3_holds X_owned unfolding CleanQ_List_enq_x_def I3_def I2_list_img_def
by auto
lemma CleanQ_List_enq_y_I3 :
assumes I2_holds: "I2_list_img rb" and I3_holds: "I3 rb" and X_owned: "b \<in> lSY rb"
shows "I3 (CleanQ_List_enq_y b rb)"
using I2_holds I3_holds X_owned unfolding CleanQ_List_enq_y_def I3_def I2_list_img_def
by auto
text \<open>
Invariants I1, I2, and I3 are preserved by \verb+enqueue+ operations, thus we can
combine them to obtain show that the combined predicate \verb+CleanQ_List_Invariants+
always holds.
\<close>
lemma CleanQ_List_enq_x_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb" and X_owned: "b \<in> lSX rb"
shows "CleanQ_List_Invariants K (CleanQ_List_enq_x b rb)"
using assms unfolding CleanQ_List_Invariants_def
by(simp add:CleanQ_List_enq_x_I1 CleanQ_List_enq_x_I2 CleanQ_List_enq_x_I3)
lemma CleanQ_List_enq_y_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb" and Y_owned: "b \<in> lSY rb"
shows "CleanQ_List_Invariants K (CleanQ_List_enq_y b rb)"
using assms unfolding CleanQ_List_Invariants_def
by(simp add:CleanQ_List_enq_y_I1 CleanQ_List_enq_y_I2 CleanQ_List_enq_y_I3)
text \<open>
Finally, we can show that the invariants of the set model are preserved.
\<close>
lemma CleanQ_List_enq_x_Set_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb" and X_owned: "b \<in> lSX rb"
and RB_upd: "rb' = CleanQ_List_enq_x b rb"
shows "CleanQ_Set_Invariants K (CleanQ_List2Set rb')"
by (metis CleanQ_List_Invariants_Set_Invariants CleanQ_List_enq_x_Invariants assms)
lemma CleanQ_List_enq_y_Set_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb" and Y_owned: "b \<in> lSY rb"
and RB_upd: "rb' = CleanQ_List_enq_y b rb"
shows "CleanQ_Set_Invariants K (CleanQ_List2Set rb')"
by (metis CleanQ_List_Invariants_Set_Invariants CleanQ_List_enq_y_Invariants assms)
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>Dequeue Operation\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
The \verb+dequeue+ operation is analogous to the Set operations except that the elements
are removed from the list instead of inserted to the set. Note, we always remove the
element at the front of the list.
\<close>
definition CleanQ_List_deq_x :: "'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_deq_x rb = rb \<lparr> lSX := (lSX rb) \<union> {hd (lTYX rb)},
lTYX := tl (lTYX rb) \<rparr>"
definition CleanQ_List_deq_y :: "'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_deq_y rb = rb \<lparr> lSY := (lSY rb) \<union> {hd (lTXY rb)},
lTXY := tl (lTXY rb) \<rparr>"
text \<open>
These definitions are the same as producing a new record, instead of updating the old one
\<close>
lemma CleanQ_List_deq_x_upd :
"CleanQ_List_deq_x rb = \<lparr> lSX = (lSX rb) \<union> {hd (lTYX rb)}, lSY = lSY rb,
lTXY = lTXY rb, lTYX = tl (lTYX rb) \<rparr>"
by (simp add: CleanQ_List_deq_x_def)
lemma CleanQ_List_deq_y_upd :
"CleanQ_List_deq_y rb = \<lparr> lSX = lSX rb, lSY = (lSY rb) \<union> {hd (lTXY rb)},
lTXY = tl (lTXY rb), lTYX = lTYX rb \<rparr>"
by(simp add:CleanQ_List_deq_y_def)
text \<open>
We can express the dequeue operation using \verb+drop 1+ and \verb+take 1+ instead of
using head and tail.
\<close>
lemma CleanQ_List_deq_x_drop_take :
"lTYX rb \<noteq> [] \<Longrightarrow> CleanQ_List_deq_x rb = rb \<lparr> lSX := (lSX rb) \<union> set (take 1 (lTYX rb)),
lTYX := drop 1 (lTYX rb) \<rparr>"
unfolding CleanQ_List_deq_x_def
by(auto simp add:list_tail_drop_one list_head_take_one_set2)
lemma CleanQ_List_deq_y_drop_take :
"lTXY rb \<noteq> [] \<Longrightarrow> CleanQ_List_deq_y rb = rb \<lparr> lSY := (lSY rb) \<union> set (take 1 (lTXY rb)),
lTXY := drop 1 (lTXY rb) \<rparr>"
unfolding CleanQ_List_deq_y_def
by(auto simp add:list_tail_drop_one list_head_take_one_set2)
text \<open>
The dequeue operations move buffers around different sets and lists. We define a
few helper lemmas, which allow us to talk about where the buffer ends up.
\<close>
lemma CleanQ_List_deq_x_result :
assumes ne: "lTYX rb \<noteq> []" and X_deq: "rb' = CleanQ_List_deq_x rb"
and I: "CleanQ_List_Invariants K rb" and buf: "b = hd (lTYX rb)"
shows "b \<in> (lSX rb') \<and> b \<notin> lSY rb' \<and> b \<notin> set(lTXY rb') \<and> b \<notin> set (lTYX rb')"
using assms unfolding CleanQ_List_deq_x_def
by(auto simp:CleanQ_List_Invariants_simp list_set_hd_tl_subtract)
lemma CleanQ_List_deq_y_result :
assumes ne: "lTXY rb \<noteq> []" and Y_deq: "rb' = CleanQ_List_deq_y rb"
and I: "CleanQ_List_Invariants K rb" and buf: "b = hd (lTXY rb)"
shows "b \<in> (lSY rb') \<and> b \<notin> lSX rb' \<and> b \<notin> set(lTXY rb') \<and> b \<notin> set (lTYX rb')"
using assms unfolding CleanQ_List_deq_y_def
apply(simp add:CleanQ_List_Invariants_simp list_set_hd_tl_subtract )
by (meson disjoint_iff_not_equal hd_in_set)
text \<open>
We can now show that the operations have the same outcome of when lifted to the
set model.
\<close>
lemma CleanQ_List_deq_x_equal :
assumes ne: "lTYX rb \<noteq> []" and TYX_owned : "b = hd (lTYX rb)" and I3_holds : "I3 rb"
shows "CleanQ_List2Set (CleanQ_List_deq_x rb) = CleanQ_Set_deq_x b (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_deq_x_def CleanQ_List_deq_x_def
using assms by(simp add: list_set_hd_tl_subtract I3_def)
lemma CleanQ_List_deq_y_equal :
assumes ne: "lTXY rb \<noteq> []" and TXY_owned : "b = hd (lTXY rb)" and I3_holds : "I3 rb"
shows "CleanQ_List2Set (CleanQ_List_deq_y rb) = CleanQ_Set_deq_y b (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_deq_y_def CleanQ_List_deq_y_def
using assms by(simp add: list_set_hd_tl_subtract I3_def)
text \<open>
The two operations \verb+CleanQ_List_deq_x+ and \verb+CleanQ_List_deq_y+ transition
the model state. Thus we need to prove that invariants \verb+I1_list_img+,
\verb+I2_list_img+, and \verb+I3+ are preserved for both of them.
\<close>
lemma CleanQ_List_deq_x_I1 :
assumes I1_holds : "I1_list_img rb K" and TYX_ne: "(lTYX rb) \<noteq> []"
shows "I1_list_img (CleanQ_List_deq_x rb) K"
using TYX_ne I1_holds list_set_hd_tl_union
unfolding CleanQ_List_deq_x_def by(auto simp:I1_list_img_def)
lemma CleanQ_List_deq_y_I1 :
assumes I1_holds : "I1_list_img rb K" and TXY_ne: "(lTXY rb) \<noteq> []"
shows "I1_list_img (CleanQ_List_deq_y rb) K"
using TXY_ne I1_holds list_set_hd_tl_union
unfolding CleanQ_List_deq_y_def by(auto simp:I1_list_img_def)
lemma CleanQ_List_deq_x_I2 :
assumes I2_holds : "I2_list_img rb" and ne: "(lTYX rb) \<noteq> []" and I3_holds: "I3 rb"
shows "I2_list_img (CleanQ_List_deq_x rb)"
using assms unfolding CleanQ_List_deq_x_def I2_list_img_def I3_def
by (auto simp add: list_set_hd_tl_subtract)
lemma CleanQ_List_deq_y_I2 :
assumes I2_holds : "I2_list_img rb" and ne: "(lTXY rb) \<noteq> []" and I3_holds: "I3 rb"
shows "I2_list_img (CleanQ_List_deq_y rb)"
using assms unfolding CleanQ_List_deq_y_def I2_list_img_def I3_def
apply (auto simp: list_set_hd_tl_subtract)
by (metis IntI empty_iff hd_in_set)
lemma CleanQ_List_deq_x_I3 :
assumes I3_holds : "I3 rb"
shows "I3 (CleanQ_List_deq_x rb)"
using assms distinct_tl unfolding CleanQ_List_deq_x_def I3_def by auto
lemma CleanQ_List_deq_y_I3 :
assumes I3_holds : "I3 rb"
shows "I3 (CleanQ_List_deq_y rb)"
using assms distinct_tl unfolding CleanQ_List_deq_y_def I3_def by auto
text \<open>
Both invariants I1, I2, and I3 are preserved by dequeue operations, thus we can combine
them to obtain show that the predicate \verb+CleanQ_List_Invariants+ holds
\<close>
lemma CleanQ_List_deq_x_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb" and TYX_ne: "[] \<noteq> (lTYX rb)"
shows "CleanQ_List_Invariants K (CleanQ_List_deq_x rb)"
using assms CleanQ_List_deq_x_I1 CleanQ_List_deq_x_I2 CleanQ_List_deq_x_I3
using CleanQ_List_Invariants_def by fastforce
lemma CleanQ_List_deq_y_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb" and TYX_ne: "[] \<noteq> (lTXY rb)"
shows "CleanQ_List_Invariants K (CleanQ_List_deq_y rb)"
using assms CleanQ_List_deq_y_I1 CleanQ_List_deq_y_I2 CleanQ_List_deq_y_I3
using CleanQ_List_Invariants_def by fastforce
(* ==================================================================================== *)
subsection \<open>Multi-Step State Transition Operations\<close>
(* ==================================================================================== *)
text \<open>
We now define the \verb+enqueue+ and \verb+dequeue+ operations for multipl step
state advancements in one instance.
\<close>
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>Enqueue Operation\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
We first define the \verb+enqueue_n+ operation, for both sides. This will remove a list
of buffers from the owning set, and add it to the transfer set
\<close>
definition CleanQ_List_enq_n_x :: "'a list \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_enq_n_x B rb = rb \<lparr> lSX := (lSX rb) - set B, lTXY := lTXY rb @ B \<rparr>"
definition CleanQ_List_enq_n_y :: "'a list \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_enq_n_y B rb = rb \<lparr> lSY := (lSY rb) - set B, lTYX := lTYX rb @ B \<rparr>"
text \<open>
This can be defined inductively as:
\<close>
lemma CleanQ_List_enq_n_x_ind:
"CleanQ_List_enq_n_x (b # B) rb = CleanQ_List_enq_n_x B (CleanQ_List_enq_x b rb)"
unfolding CleanQ_List_enq_n_x_def CleanQ_List_enq_x_def
by (simp, meson Diff_insert2)
lemma CleanQ_List_enq_n_y_ind:
"CleanQ_List_enq_n_y (b # B) rb = CleanQ_List_enq_n_y B (CleanQ_List_enq_y b rb)"
unfolding CleanQ_List_enq_n_y_def CleanQ_List_enq_y_def
by (simp, meson Diff_insert2)
text \<open>
We can now show that with an empty list, there is no change in the state and if the
list is just containing one element, then this is just the same as the single element
enqueues.
\<close>
lemma CleanQ_List_enq_n_x_0[simp]:
"CleanQ_List_enq_n_x [] rb = rb"
unfolding CleanQ_List_enq_n_x_def by(auto)
lemma CleanQ_List_enq_n_y_0[simp]:
"CleanQ_List_enq_n_y [] rb = rb"
unfolding CleanQ_List_enq_n_y_def by(auto)
lemma CleanQ_List_enq_n_x_1[simp]:
"CleanQ_List_enq_n_x [b] rb = CleanQ_List_enq_x b rb"
unfolding CleanQ_List_enq_n_x_def CleanQ_List_enq_x_def by(auto)
lemma CleanQ_List_enq_n_y_1[simp]:
"CleanQ_List_enq_n_y [b] rb = CleanQ_List_enq_y b rb"
unfolding CleanQ_List_enq_n_y_def CleanQ_List_enq_y_def by(auto)
text \<open>
The multi-step enqueue operations move buffers around different sets and lists.
We define a few helper lemmas, which allow us to talk about where the buffer ends up.
\<close>
lemma CleanQ_List_enq_n_x_result :
assumes X_owned: "\<forall>b \<in> set B. b \<in> lSX rb" and X_enq: "rb' = CleanQ_List_enq_n_x B rb"
and I2_holds : "I2_list_img rb"
shows "\<forall>b \<in> set B. b \<in> set (lTXY rb') \<and> b \<notin> lSX rb' \<and> b \<notin> lSY rb' \<and> b \<notin> set (lTYX rb')"
using X_owned X_enq I2_holds unfolding CleanQ_List_enq_n_x_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_enq_n_y_result :
assumes X_owned: "\<forall>b \<in> set B. b \<in> lSY rb" and X_enq: "rb' = CleanQ_List_enq_n_y B rb"
and I2_holds : "I2_list_img rb"
shows "\<forall>b \<in> set B. b \<in> set (lTYX rb') \<and> b \<notin> lSX rb' \<and> b \<notin> lSY rb' \<and> b \<notin> set (lTXY rb')"
using X_owned X_enq I2_holds unfolding CleanQ_List_enq_n_y_def
by(auto simp:CleanQ_List_Invariants_simp)
text \<open>
We can now show that the outcome of the list \verb+enqeue_n+ operation is the same
as the corresponding set operation.
\<close>
lemma CleanQ_List_enq_n_x_equal :
"CleanQ_List2Set (CleanQ_List_enq_n_x B rb) = CleanQ_Set_enq_n_x (set B) (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_enq_n_x_def CleanQ_List_enq_n_x_def
by(auto)
lemma CleanQ_List_enq_n_yx_equal :
"CleanQ_List2Set (CleanQ_List_enq_n_y B rb) = CleanQ_Set_enq_n_y (set B) (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_enq_n_y_def CleanQ_List_enq_n_y_def
by(auto)
text \<open>
We now show that the \verb+enqueue_n+ operation satisfy the list invariant, we show each
invariant I1-I3 individually.
\<close>
lemma CleanQ_List_enq_n_x_I1 :
assumes I1_holds: "I1_list_img rb K" and X_owned: "\<forall> b \<in> set B. b \<in> lSX rb"
shows "I1_list_img (CleanQ_List_enq_n_x B rb) K"
unfolding CleanQ_List_enq_n_x_def using I1_holds X_owned by(auto simp:I1_list_img_def)
lemma CleanQ_List_enq_n_y_I1 :
assumes I1_holds: "I1_list_img rb K" and Y_owned: "\<forall> b \<in> set B. b \<in> lSY rb"
shows "I1_list_img (CleanQ_List_enq_n_y B rb) K"
unfolding CleanQ_List_enq_n_y_def using I1_holds Y_owned by(auto simp:I1_list_img_def)
lemma CleanQ_List_enq_n_x_I2 :
assumes I2_holds: "I2_list_img rb" and X_owned: "\<forall>b \<in> set B. b \<in> lSX rb"
and dist: "distinct B"
shows "I2_list_img (CleanQ_List_enq_n_x B rb)"
unfolding CleanQ_List_enq_n_x_def using assms by(auto simp:I2_list_img_def)
lemma CleanQ_List_enq_n_y_I2 :
assumes I2_holds: "I2_list_img rb" and X_owned: "\<forall>b \<in> set B. b \<in> lSY rb"
and dist: "distinct B"
shows "I2_list_img (CleanQ_List_enq_n_y B rb)"
unfolding CleanQ_List_enq_n_y_def using assms by(auto simp:I2_list_img_def)
lemma CleanQ_List_enq_n_x_I3 :
assumes I2_holds: "I2_list_img rb" and I3_holds: "I3 rb"
and X_owned: "\<forall> b \<in> set B. b \<in> lSX rb" and dist: "distinct B"
shows "I3 (CleanQ_List_enq_n_x B rb)"
unfolding CleanQ_List_enq_n_x_def using assms by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_enq_n_y_I3 :
assumes I2_holds: "I2_list_img rb" and I3_holds: "I3 rb"
and Y_owned: "\<forall> b \<in> set B. b \<in> lSY rb" and dist: "distinct B"
shows "I3 (CleanQ_List_enq_n_y B rb)"
unfolding CleanQ_List_enq_n_y_def using assms by(auto simp:CleanQ_List_Invariants_simp)
text \<open>
We can now combine the proofs for invariants I1-I3 and show the complete list invariant.
\<close>
lemma CleanQ_List_enq_n_x_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb"
and X_owned: "\<forall> b \<in> set B. b \<in> lSX rb" and dist: "distinct B"
shows "CleanQ_List_Invariants K (CleanQ_List_enq_n_x B rb)"
unfolding CleanQ_List_enq_n_x_def
using assms CleanQ_List_enq_n_x_I3 CleanQ_List_enq_n_x_I2 CleanQ_List_enq_n_x_I1
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_enq_n_y_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb"
and Y_owned: "\<forall> b \<in> set B. b \<in> lSY rb" and dist: "distinct B"
shows "CleanQ_List_Invariants K (CleanQ_List_enq_n_y B rb)"
unfolding CleanQ_List_enq_n_y_def
using assms CleanQ_List_enq_n_y_I3 CleanQ_List_enq_n_y_I2 CleanQ_List_enq_n_y_I1
by(auto simp:CleanQ_List_Invariants_simp)
text \<open>
Finally, we can also show that the set invariants are preserved.
\<close>
lemma CleanQ_List_enq_n_x_Set_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb"
and X_owned: "\<forall> b \<in> set B. b \<in> lSX rb" and dist: "distinct B"
and RB_upd: "rb' = CleanQ_List_enq_n_x B rb"
shows "CleanQ_Set_Invariants K (CleanQ_List2Set rb')"
using assms CleanQ_List_Invariants_Set_Invariants CleanQ_List_enq_n_x_Invariants
by(metis)
lemma CleanQ_List_enq_n_y_Set_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb"
and X_owned: "\<forall> b \<in> set B. b \<in> lSY rb" and dist: "distinct B"
and RB_upd: "rb' = CleanQ_List_enq_n_y B rb"
shows "CleanQ_Set_Invariants K (CleanQ_List2Set rb')"
using assms CleanQ_List_Invariants_Set_Invariants CleanQ_List_enq_n_y_Invariants
by(metis)
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>Dequeue Operation\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
The multi-step \verb+dequeue_n+ operation is similar to the single step operation,
with the exception that it takes the first $n$ elements from the transfer list, instead
of just the head.
\<close>
definition CleanQ_List_deq_n_x :: "nat \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_deq_n_x n rb = rb \<lparr> lSX := (lSX rb) \<union> set (take n (lTYX rb)),
lTYX := drop n (lTYX rb) \<rparr>"
definition CleanQ_List_deq_n_y :: "nat \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State"
where "CleanQ_List_deq_n_y n rb = rb \<lparr> lSY := (lSY rb) \<union> set (take n (lTXY rb)),
lTXY := drop n (lTXY rb) \<rparr>"
text \<open>
This can be defined inductively as:
Note: this equivalence only holds if there are elements in the list. Technically,
it doesn't matter the order, but needs to make sure there are enough elements
in the list.
\<close>
lemma CleanQ_List_deq_n_x_ind:
"lTYX rb \<noteq> [] \<Longrightarrow> CleanQ_List_deq_n_x (Suc n) rb
= CleanQ_List_deq_n_x n (CleanQ_List_deq_x rb)"
unfolding CleanQ_List_deq_n_x_def CleanQ_List_deq_x_def
by(auto simp add: drop_Suc take_Suc)
lemma CleanQ_List_deq_n_y_ind:
"lTXY rb \<noteq> [] \<Longrightarrow> CleanQ_List_deq_n_y (Suc n) rb
= CleanQ_List_deq_n_y n (CleanQ_List_deq_y rb)"
unfolding CleanQ_List_deq_n_y_def CleanQ_List_deq_y_def
by(auto simp add: drop_Suc take_Suc)
text \<open>
We can now show that with an empty list, there is no change in the state and if the
list is just containing one element, then this is just the same as the single element
dequeue.
\<close>
lemma CleanQ_List_deq_n_x_0[simp]:
"CleanQ_List_deq_n_x 0 rb = rb"
unfolding CleanQ_List_deq_n_x_def by(auto)
lemma CleanQ_List_deq_n_y_0[simp]:
"CleanQ_List_deq_n_y 0 rb = rb"
unfolding CleanQ_List_deq_n_y_def by(auto)
lemma CleanQ_List_deq_n_x_1[simp]:
"(lTYX rb \<noteq> []) \<Longrightarrow> CleanQ_List_deq_n_x 1 rb = CleanQ_List_deq_x rb"
by (simp add: CleanQ_List_deq_n_x_ind)
lemma CleanQ_List_deq_n_y_1[simp]:
"(lTXY rb \<noteq> []) \<Longrightarrow> CleanQ_List_deq_n_y 1 rb = CleanQ_List_deq_y rb"
by (simp add: CleanQ_List_deq_n_y_ind)
text \<open>
We can now talk about the effects of the \verb+dequeue_n+ operation with respect to
the ownership sets.
\<close>
lemma CleanQ_List_deq_n_x_equal :
assumes TYX_owned : "B = take n (lTYX rb)" and I3_holds : "I3 rb"
shows "CleanQ_List2Set (CleanQ_List_deq_n_x n rb)
= CleanQ_Set_deq_n_x (set B) (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_deq_n_x_def CleanQ_List_deq_n_x_def
by (simp, metis Diff_cancel I3_def I3_holds TYX_owned Un_Diff Un_Diff_Int
list_distinct_drop_take_inter list_take_drop_union sup.commute)
lemma CleanQ_List_deq_n_y_equal :
assumes TYX_owned : "B = take n (lTXY rb)" and I3_holds : "I3 rb"
shows "CleanQ_List2Set (CleanQ_List_deq_n_y n rb)
= CleanQ_Set_deq_n_y (set B) (CleanQ_List2Set rb)"
unfolding CleanQ_List2Set_def CleanQ_Set_deq_n_y_def CleanQ_List_deq_n_y_def
by (simp, metis Diff_cancel I3_def I3_holds TYX_owned Un_Diff Un_Diff_Int
list_distinct_drop_take_inter list_take_drop_union sup.commute)
text \<open>
The \verb+dequeue_n+ operation preserves the invariant I1-I3
\<close>
lemma CleanQ_List_deq_n_x_I1 :
assumes I1_holds : "I1_list_img rb K"
shows "I1_list_img (CleanQ_List_deq_n_x n rb) K"
using assms unfolding I1_list_img_def CleanQ_List_deq_n_x_def
by (simp, metis list_take_drop_union sup_assoc sup_left_commute)
lemma CleanQ_List_deq_n_y_I1 :
assumes I1_holds : "I1_list_img rb K"
shows "I1_list_img (CleanQ_List_deq_n_y n rb) K"
using assms unfolding I1_list_img_def CleanQ_List_deq_n_y_def
by (simp, metis list_take_drop_union sup_assoc sup_left_commute)
lemma CleanQ_List_deq_n_x_I2 :
assumes I2_holds : "I2_list_img rb" and I3_holds: "I3 rb"
shows "I2_list_img (CleanQ_List_deq_n_x n rb)"
using assms unfolding CleanQ_List_deq_n_x_def I2_list_img_def I3_def
by(simp add:list_drop_set_inter2 list_take_drop_union_inters list_distinct_drop_take_inter
Int_Un_distrib inf.commute list_take_set_inter2)
lemma CleanQ_List_deq_n_y_I2 :
assumes I2_holds : "I2_list_img rb" and I3_holds: "I3 rb"
shows "I2_list_img (CleanQ_List_deq_n_y n rb)"
using assms unfolding CleanQ_List_deq_n_y_def I2_list_img_def I3_def
by(simp add:list_drop_set_inter2 list_take_drop_union_inters list_distinct_drop_take_inter
Int_Un_distrib inf.commute list_take_set_inter2)
lemma CleanQ_List_deq_n_x_I3 :
assumes I3_holds : "I3 rb"
shows "I3 (CleanQ_List_deq_n_x n rb)"
unfolding CleanQ_List_deq_n_x_def using I3_holds distinct_tl by (auto simp:I3_def)
lemma CleanQ_List_deq_n_y_I3 :
assumes I3_holds : "I3 rb"
shows "I3 (CleanQ_List_deq_n_y n rb)"
unfolding CleanQ_List_deq_n_y_def using I3_holds distinct_tl by (auto simp:I3_def)
lemma CleanQ_List_deq_n_x_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb"
shows "CleanQ_List_Invariants K (CleanQ_List_deq_n_x n rb)"
using assms CleanQ_List_deq_n_x_I1 CleanQ_List_deq_n_x_I2 CleanQ_List_deq_n_x_I3
using CleanQ_List_Invariants_simp by blast
lemma CleanQ_List_deq_n_y_Invariants :
assumes I_holds : "CleanQ_List_Invariants K rb"
shows "CleanQ_List_Invariants K (CleanQ_List_deq_n_y n rb)"
using assms CleanQ_List_deq_n_y_I1 CleanQ_List_deq_n_y_I2 CleanQ_List_deq_n_y_I3
using CleanQ_List_Invariants_simp by blast
(* ==================================================================================== *)
subsection \<open>Strong and Weak Frame Conditions\<close>
(* ==================================================================================== *)
text \<open>
We now define the strong and weak frame conditions for the CleanQ List model. Those are
used in the concurrency proofs. They define the set of operations the other side can
do to the state of the queue system.
\<close>
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>Strong Frame Condition\<close>
(* ------------------------------------------------------------------------------------ *)
fun CleanQ_List_Frame_Strong :: "'a tuple \<Rightarrow>'a tuple \<Rightarrow> bool"
where "CleanQ_List_Frame_Strong (sx',txy',sy',tyx') (sx,txy,sy,tyx) \<longleftrightarrow>
sx' = sx \<and> txy' = txy \<and> sy' = sy \<and> tyx' = tyx"
lemma CleanQ_Set_Strong_Frame_Implies:
"CleanQ_List_Frame_Strong A B \<Longrightarrow> CleanQ_Set_Frame_Strong A B"
by (smt CleanQ_List_Frame_Strong.elims(2) CleanQ_Set_Frame_Strong.simps)
(* ------------------------------------------------------------------------------------ *)
subsubsection \<open>Weak Frame Condition\<close>
(* ------------------------------------------------------------------------------------ *)
text \<open>
For the concurrent case, we can not assume that the sets we do not explicitly modify by
an operation do not change and for this we have to weaken the frame condition that
e.g. when enqueueing from X the sets TXY, SY and TYX might change through actions of Y.
\<close>
(*
Note: Those definitions contain two additional clauses
set dtxy \<inter> set (lTXY st) = {} \<and> set dsy \<inter> set (lTXY st) = {}
set dtyx \<inter> set (lTYX st) = {} \<and> set dsx \<inter> set (lTYX st) = {}
which can be obtained by combining the frame conditions with the Invariant
definition CleanQ_List_Frame_Weak_x ::
"'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool"
where "CleanQ_List_Frame_Weak_x st' st dtxy dsy \<longleftrightarrow>
lSX st = lSX st' \<and> lSY st \<union> set dsy = lSY st'\<union> set dtxy \<and>
dtxy @ lTXY st = lTXY st' \<and> lTYX st = lTYX st' @ dsy \<and>
set dsy \<inter> (lSY st) = {} \<and> set dtxy \<inter> set (lTXY st) = {} \<and>
set dsy \<inter> set (lTXY st) = {} \<and> distinct dsy"
definition CleanQ_List_Frame_Weak_y ::
"'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool"
where "CleanQ_List_Frame_Weak_y st' st dtyx dsx \<longleftrightarrow>
lSY st = lSY st' \<and> lSX st \<union> set dsx = lSX st'\<union> set dtyx \<and>
dtyx @ lTYX st = lTYX st' \<and> lTXY st = lTXY st' @ dsx \<and>
set dsx \<inter> (lSX st) = {} \<and> set dtyx \<inter> set (lTYX st) = {} \<and>
set dsx \<inter> set (lTYX st) = {} \<and> distinct dsx "
*)
definition CleanQ_List_Frame_Weak_x ::
"'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool"
where "CleanQ_List_Frame_Weak_x st' st dtxy dsy \<longleftrightarrow>
lSX st = lSX st' \<and> lSY st \<union> set dsy = lSY st'\<union> set dtxy \<and>
dtxy @ lTXY st = lTXY st' \<and> lTYX st = lTYX st' @ dsy \<and>
set dsy \<inter> (lSY st) = {} \<and> distinct dsy"
definition CleanQ_List_Frame_Weak_y ::
"'a CleanQ_List_State \<Rightarrow> 'a CleanQ_List_State \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool"
where "CleanQ_List_Frame_Weak_y st' st dtyx dsx \<longleftrightarrow>
lSY st = lSY st' \<and> lSX st \<union> set dsx = lSX st'\<union> set dtyx \<and>
dtyx @ lTYX st = lTYX st' \<and> lTXY st = lTXY st' @ dsx \<and>
set dsx \<inter> (lSX st) = {} \<and> distinct dsx "
text \<open>
From the weak frame conditions and the Invariant we can obtain two additional set
intersections
\<close>
lemma CleanQ_List_Frame_Weak_x_emptyset_A:
assumes I: "CleanQ_List_Invariants K st'" and F: "CleanQ_List_Frame_Weak_x st' st A B"
shows "set A \<inter> set (lTXY st) = {}"
using assms unfolding CleanQ_List_Frame_Weak_x_def CleanQ_List_Invariants_def
by (metis I3_def distinct_append)
lemma CleanQ_List_Frame_Weak_x_emptyset_B:
assumes I: "CleanQ_List_Invariants K st'" and F: "CleanQ_List_Frame_Weak_x st' st A B"
shows "set B \<inter> set (lTXY st) = {}"
using assms unfolding CleanQ_List_Frame_Weak_x_def CleanQ_List_Invariants_def
by (metis (no_types, hide_lams) I2_list_img_def I3_def Un_iff disjoint_iff_not_equal
distinct_append set_append)
lemma CleanQ_List_Frame_Weak_y_emptyset_A:
assumes I: "CleanQ_List_Invariants K st'" and F: "CleanQ_List_Frame_Weak_y st' st A B"
shows "set A \<inter> set (lTYX st) = {}"
using assms unfolding CleanQ_List_Frame_Weak_y_def CleanQ_List_Invariants_def
by (metis I3_def distinct_append)
lemma CleanQ_List_Frame_Weak_y_emptyset_B:
assumes I: "CleanQ_List_Invariants K st'" and F: "CleanQ_List_Frame_Weak_y st' st A B"
shows "set B \<inter> set (lTYX st) = {}"
using assms unfolding CleanQ_List_Frame_Weak_y_def CleanQ_List_Invariants_def
by (metis (no_types, hide_lams) I2_list_img_def I3_def Un_iff disjoint_iff_not_equal
distinct_append set_append)
text \<open>
The weak frame condition of the List model implies the weak frame condition of the
set model.
\<close>
lemma CleanQ_List_Frame_Weak_x_implies:
assumes I: "CleanQ_List_Invariants K st'" and F: "CleanQ_List_Frame_Weak_x st' st A B"
shows "CleanQ_Set_Frame_Weak_x (CleanQ_List2Set st') (CleanQ_List2Set st) (set A) (set B)"
using F unfolding CleanQ_Set_Frame_Weak_x_def CleanQ_List2Set_def CleanQ_List_Frame_Weak_x_def
using I F by (simp, metis CleanQ_List_Frame_Weak_x_emptyset_A
CleanQ_List_Frame_Weak_x_emptyset_B Un_commute set_append)
lemma CleanQ_List_Frame_Weak_y_implies:
assumes I: "CleanQ_List_Invariants K st'" and F: "CleanQ_List_Frame_Weak_y st' st A B"
shows "CleanQ_Set_Frame_Weak_y (CleanQ_List2Set st') (CleanQ_List2Set st) (set A) (set B)"
using F unfolding CleanQ_Set_Frame_Weak_y_def CleanQ_List2Set_def CleanQ_List_Frame_Weak_y_def
using I F by (simp, metis CleanQ_List_Frame_Weak_y_emptyset_A
CleanQ_List_Frame_Weak_y_emptyset_B Un_commute set_append)
text \<open>
We can now show, that enqueue and dequeue operations fulfil the weak frame condition.
\<close>
lemma CleanQ_List_Frame_Weak_x_enq:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b \<in> lSX st'"
shows "CleanQ_List_Frame_Weak_y st' (CleanQ_List_enq_x b st') [] [b]"
using assms unfolding CleanQ_List_Frame_Weak_y_def CleanQ_List_enq_x_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_Frame_Weak_y_enq:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b \<in> lSY st'"
shows "CleanQ_List_Frame_Weak_x st' (CleanQ_List_enq_y b st') [] [b]"
using assms unfolding CleanQ_List_Frame_Weak_x_def CleanQ_List_enq_y_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_Frame_Weak_x_deq:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b = hd (lTYX st') \<and> lTYX st' \<noteq> []"
shows "CleanQ_List_Frame_Weak_y st' (CleanQ_List_deq_x st') [b] []"
using assms unfolding CleanQ_List_Frame_Weak_y_def CleanQ_List_deq_x_def by(auto)
lemma CleanQ_List_Frame_Weak_y_deq:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b = hd (lTXY st') \<and> lTXY st' \<noteq> []"
shows "CleanQ_List_Frame_Weak_x st' (CleanQ_List_deq_y st') [b] []"
using assms unfolding CleanQ_List_Frame_Weak_x_def CleanQ_List_deq_y_def by(auto)
text \<open>
The weak frame condition for an \verb+enqueue+ or \verb+dequeue+ preserves I1.
\<close>
lemma CleanQ_List_enq_x_I1_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_y st' st [] [b]"
shows "I1_list_img st K"
using assms unfolding I1_def CleanQ_List_Frame_Weak_y_def CleanQ_List_Invariants_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_enq_y_I1_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_x st' st [] [b]"
shows "I1_list_img st K"
using assms unfolding I1_def CleanQ_List_Frame_Weak_x_def CleanQ_List_Invariants_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_deq_x_I1_weak:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b = hd (lTYX st')"
and frame: "CleanQ_List_Frame_Weak_y st' st [b] []"
shows "I1_list_img st K"
using assms unfolding I1_def CleanQ_List_Frame_Weak_y_def CleanQ_List_Invariants_def
by (smt I1_list_img_def Un_assoc Un_commute set_append)
lemma CleanQ_List_deq_y_I1_weak:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b = hd (lTXY st')"
and frame: "CleanQ_List_Frame_Weak_x st' st [b] []"
shows "I1_list_img st K"
using assms unfolding I1_def CleanQ_List_Frame_Weak_x_def CleanQ_List_Invariants_def
by (smt I1_list_img_def Un_assoc Un_commute set_append)
text \<open>
The weak frame condition for an \verb+enqueue+ or \verb+dequeue+ preserves I2.
\<close>
lemma CleanQ_List_enq_x_I2_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_y st' st [] [b]"
shows "I2_list_img st"
using assms unfolding CleanQ_List_Frame_Weak_y_def I2_list_img_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_enq_y_I2_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_x st' st [] [b]"
shows "I2_list_img st"
using assms unfolding CleanQ_List_Frame_Weak_x_def I2_list_img_def
by(auto simp:CleanQ_List_Invariants_simp)
lemma CleanQ_List_deq_x_I2_weak:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b = hd (lTYX st')"
and frame: "CleanQ_List_Frame_Weak_y st' st [b] []"
shows "I2_list_img st"
using assms unfolding CleanQ_List_Frame_Weak_y_def I2_list_img_def
apply(simp add:CleanQ_List_Invariants_simp)
by (metis disjoint_insert(2) distinct.simps(2) list.simps(15))
lemma CleanQ_List_deq_y_I2_weak:
assumes I: "CleanQ_List_Invariants K st'" and owns: "b = hd (lTXY st')"
and frame: "CleanQ_List_Frame_Weak_x st' st [b] []"
shows "I2_list_img st"
using assms unfolding CleanQ_List_Frame_Weak_x_def I2_list_img_def
apply(simp add:CleanQ_List_Invariants_simp)
by (metis disjoint_insert(2) distinct.simps(2) inf_commute list.simps(15))
text \<open>
The weak frame condition for an \verb+enqueue+ or \verb+dequeue+ preserves I3.
\<close>
lemma CleanQ_List_enq_x_I3_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_y st' st [] [b]"
shows "I3 st"
using assms unfolding CleanQ_List_Frame_Weak_y_def I3_def
by(auto simp add:CleanQ_List_Invariants_simp)
lemma CleanQ_List_enq_y_I3_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_x st' st [] [b]"
shows "I3 st"
using assms unfolding CleanQ_List_Frame_Weak_x_def I3_def
by(auto simp add:CleanQ_List_Invariants_simp)
lemma CleanQ_List_deq_y_I3_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_x st' st [b] []"
shows "I3 st"
using assms unfolding CleanQ_List_Frame_Weak_x_def I3_def
apply(simp add:CleanQ_List_Invariants_simp)
by (metis distinct.simps(2))
lemma CleanQ_List_deq_x_I3_weak:
assumes I: "CleanQ_List_Invariants K st'"
and frame: "CleanQ_List_Frame_Weak_y st' st [b] []"
shows "I3 st"
using assms unfolding CleanQ_List_Frame_Weak_y_def I3_def
apply(simp add:CleanQ_List_Invariants_simp)
by (metis distinct.simps(2))
text \<open>
Additionally we have to define the weak frame condition for the concurrency case
again similar to the set model.
\<close>
fun frame_list_weak ::
"'a list \<times> 'a set \<times> 'a list \<times> 'a set \<Rightarrow> 'a list \<times> 'a set \<times> 'a list \<times> 'a set \<Rightarrow> bool"
where "frame_list_weak (a',B',c',D') (a,B,c,D) \<longleftrightarrow> (\<exists>\<delta>aB \<delta>Bc.
a' = \<delta>aB @ a \<and>
B' \<union> set \<delta>aB = set \<delta>Bc \<union> B \<and>
c' @ \<delta>Bc = c \<and>
B \<inter> set \<delta>Bc = {} \<and>
distinct \<delta>Bc)
\<and> D' = D"
lemma frame2_s_w:
"frame_strong (a',B',c',D') (a,B,c,D) \<Longrightarrow> frame_list_weak (a',B',c',D') (a,B,c,D)"
by(auto)
lemma CleanQ_List_frame_weak_equiv_x:
"frame_list_weak (lTXY st', lSY st', lTYX st', lSX st' ) (lTXY st, lSY st, lTYX st, lSX st)
\<longleftrightarrow> (\<exists>\<Delta>AB \<Delta>BC. CleanQ_List_Frame_Weak_x st' st \<Delta>AB \<Delta>BC)"
unfolding frame_list_weak.simps CleanQ_List_Frame_Weak_x_def
by (smt inf_commute sup.commute)
lemma CleanQ_List_frame_weak_equiv_y:
"frame_weak (TYX st', SX st', TXY st', SY st' ) (TYX st, SX st, TXY st, SY st)
\<longleftrightarrow> (\<exists>\<Delta>AB \<Delta>BC. CleanQ_Set_Frame_Weak_y st' st \<Delta>AB \<Delta>BC)"
unfolding frame_weak.simps CleanQ_Set_Frame_Weak_y_def
by (metis Int_commute)
text \<open>The second weak frame condition refines the first.\<close>
lemma frame2_w_1_w:
fixes st st' K
assumes I1: "I1 (CleanQ_List2Set st') K"
and I2: "I2 (CleanQ_List2Set st')"
and I3: "I3 st'"
and frame: "frame_list_weak (lTXY st', lSY st', lTYX st', lSX st') (lTXY st, lSY st, lTYX st, lSX st)"
shows "frame_weak (TXY (CleanQ_List2Set st'), SY (CleanQ_List2Set st'), TYX (CleanQ_List2Set st'), SX (CleanQ_List2Set st'))
(TXY (CleanQ_List2Set st), SY (CleanQ_List2Set st), TYX (CleanQ_List2Set st), SX (CleanQ_List2Set st))"
proof -
from frame obtain \<delta>aB \<delta>Bc where
fA: "lTXY st' = \<delta>aB @ lTXY st" and
fB: "lSY st' \<union> set \<delta>aB = set \<delta>Bc \<union> lSY st" and
fC: "lTYX st' @ \<delta>Bc = lTYX st" and
dBC: "lSY st \<inter> set \<delta>Bc = {}" and
fD: "lSX st' = lSX st"
by(auto)
define \<Delta>AB where "\<Delta>AB = set \<delta>aB"
define \<Delta>BC where "\<Delta>BC = set \<delta>Bc"
from fA \<Delta>AB_def have fA': "set (lTXY st') = \<Delta>AB \<union> set (lTXY st)"
by(simp)
from fB \<Delta>AB_def \<Delta>BC_def have fB': "lSY st' \<union> \<Delta>AB = \<Delta>BC \<union> lSY st"
by(simp)
from fC \<Delta>BC_def have fC': "set (lTYX st') \<union> \<Delta>BC = set (lTYX st)"
by (metis set_append)
from fA I3 \<Delta>AB_def have dAB: "set (lTXY st) \<inter> \<Delta>AB = {}"
by(auto simp: I3_def)
from fB have dAC: "set (lTXY st) \<inter> \<Delta>BC = {}"
proof(rule contrapos_pp)
assume "set (lTXY st) \<inter> \<Delta>BC \<noteq> {}"
then obtain x where xa: "x \<in> set (lTXY st)" and xBC: "x \<in> set \<delta>Bc"
unfolding \<Delta>BC_def by(blast)
from xa fA have "x \<in> set (lTXY st')" by(auto)
with I2 have "x \<notin> (lSY st')"
by (meson I2_list_img_def I2_list_img_lift disjoint_iff_not_equal)
moreover from xa fA I3 have "x \<notin> set \<delta>aB" by(auto simp: I3_def)
ultimately show "(lSY st') \<union> set \<delta>aB \<noteq> set \<delta>Bc \<union> (lSY st)"
using xBC by(auto)
qed
from dBC \<Delta>BC_def fB have dBC': "lSY st \<inter> \<Delta>BC = {}"
by(auto)
from fA' fB' fC' dAB dAC dBC' fD show ?thesis
unfolding frame_weak.simps CleanQ_List2Set_def
apply(simp)
by (metis sup.commute)
qed
lemma CleanQ_List_enq_x_weak_I3 :
fixes st st' K x
assumes I: "CleanQ_List_Invariants K st'"
and frame: "frame_list_weak (lTXY st' @ [x], lSY st', lTYX st', lSX st' - {x}) (lTXY st, lSY st, lTYX st, lSX st)"
and owns: "x \<in> lSX st'"
shows "I3 st"
proof(unfold I3_def, intro conjI)
from frame obtain \<delta>aB \<delta>Bc where
fA: "(lTXY st') @ [x] = \<delta>aB @ (lTXY st)" and
fB: "(lSY st') \<union> set \<delta>aB = set \<delta>Bc \<union> (lSY st)" and
fC: "(lTYX st') @ \<delta>Bc = lTYX st" and
dBC: "(lSY st) \<inter> set \<delta>Bc = {}" and
dsBC: "distinct \<delta>Bc" and
fD: "(lSX st') - {x} = lSX st"
by auto
from I owns have "x \<notin> set (lTXY st')" by(auto simp:CleanQ_List_Invariants_simp)
with I have "distinct ((lTXY st') @ [x])" by(auto simp:CleanQ_List_Invariants_simp)
hence "distinct (\<delta>aB @ (lTXY st))" by(simp add:fA)
thus "distinct (lTXY st)" by(auto)
from fA have "set (\<delta>aB @ (lTXY st)) = set ((lTXY st') @ [x])" by(simp)
hence "set \<delta>aB \<subseteq> set (lTXY st') \<union> {x}" by(auto)
with fB have "set \<delta>Bc \<subseteq> set (lTXY st') \<union> (lSY st') \<union> {x}" by(auto)
with I owns have "set (lTYX st') \<inter> set \<delta>Bc = {}"by(auto simp:CleanQ_List_Invariants_simp)
moreover from I have "distinct (lTYX st')" by(auto simp:CleanQ_List_Invariants_simp)
ultimately show "distinct (lTYX st)"
using dsBC fC[symmetric] by(simp)
qed
lemma CleanQ_List_enq_y_weak_I3 :
fixes st st' K x
assumes I: "CleanQ_List_Invariants K st'"
and frame: "frame_list_weak (lTYX st' @ [x], lSX st', lTXY st', lSY st' - {x}) (lTYX st, lSX st, lTXY st, lSY st)"
and owns: "x \<in> lSY st'"
shows "I3 st"
proof(unfold I3_def, intro conjI)
from frame obtain \<delta>aB \<delta>Bc where
fA: "lTYX st' @ [x] = \<delta>aB @ lTYX st" and
fB: "lSX st' \<union> set \<delta>aB = set \<delta>Bc \<union> lSX st" and
fC: "lTXY st' @ \<delta>Bc = lTXY st" and
dBC: "lSX st \<inter> set \<delta>Bc = {}" and
dsBC: "distinct \<delta>Bc" and
fD: "lSY st' - {x} = lSY st"
by(auto)
from I owns have "x \<notin> set (lTYX st')" by(auto simp:CleanQ_List_Invariants_simp)
with I have "distinct ((lTYX st') @ [x])" by(auto simp:CleanQ_List_Invariants_simp)
hence "distinct (\<delta>aB @ (lTYX st))" by(simp add:fA)
thus "distinct (lTYX st)" by(auto)
from fA have "set (\<delta>aB @ (lTYX st)) = set ((lTYX st') @ [x])" by(simp)
hence "set \<delta>aB \<subseteq> set (lTYX st') \<union> {x}" by(auto)
with fB have "set \<delta>Bc \<subseteq> set (lTYX st') \<union> (lSX st') \<union> {x}" by(auto)
with I owns have "set (lTXY st') \<inter> set \<delta>Bc = {}" by(auto simp:CleanQ_List_Invariants_simp)
moreover from I have "distinct (lTXY st')" by(auto simp:CleanQ_List_Invariants_simp)
ultimately show "distinct (lTXY st)"
using dsBC fC[symmetric]
by(simp)
qed
text \<open>The weak frame condition for an dequeue preserves invariant 3.\<close>
lemma CleanQ_List_deq_x_weak_I3:
fixes st st' K x
assumes I: "CleanQ_List_Invariants K st'"
and frame: "frame_list_weak (lTXY st', lSY st', lTYX st', lSX st' \<union> {x}) (lTXY st, lSY st, lTYX st, lSX st)"
and hd: "lTYX st \<noteq> [] \<and> x = hd (lTYX st)"
shows "I3 st"
using I frame hd unfolding CleanQ_List_Invariants_def apply auto
proof -
fix \<delta>aB :: "'a list" and \<delta>Bc :: "'a list"
assume a1: "I2_list_img st'"
assume a2: "I3 st'"
assume a3: "distinct \<delta>Bc"
assume a4: "lTXY st' = \<delta>aB @ lTXY st"
assume a5: "lTYX st' @ \<delta>Bc = lTYX st"
assume "lSY st \<inter> set \<delta>Bc = {}"
assume a6: "lSY st' \<union> set \<delta>aB = set \<delta>Bc \<union> lSY st"
have f7: "\<And>A. (lSY st' \<union> A) \<inter> set (lTYX st') = A \<inter> set (lTYX st')"
using a1 by (simp add: I2_list_img_def Int_Un_distrib2)
have "\<And>A. set \<delta>aB \<inter> (set (lTYX st') \<inter> (set \<delta>aB \<union> A)) = set \<delta>aB \<inter> set (lTYX st')"
by blast
then have "set \<delta>aB \<inter> set (lTYX st') = {}"
using a4 a1 by (metis (no_types) I2_list_img_def inf_commute inf_sup_absorb set_append sup_bot.left_neutral)
then have "set \<delta>Bc \<inter> set (lTYX st') = {}"
using f7 a6 by blast
then show ?thesis
using a5 a4 a3 a2 by (metis (no_types) I3_def distinct_append inf_commute)
qed
text \<open>The weak frame condition for an dequeue preserves invariant 3.\<close>
lemma CleanQ_List_deq_y_weak_I3:
fixes st st' K x
assumes I: "CleanQ_List_Invariants K st'"
and frame: "frame_list_weak (lTYX st', lSX st', lTXY st', lSY st' \<union> {x}) (lTYX st, lSX st, lTXY st, lSY st)"
and hd: "lTXY st \<noteq> [] \<and> x = hd (lTXY st)"
shows "I3 st"
using I frame hd unfolding CleanQ_List_Invariants_def apply auto
proof -
fix \<delta>aB :: "'a list" and \<delta>Bc :: "'a list"
assume a1: "I2_list_img st'"
assume a2: "I3 st'"
assume a3: "distinct \<delta>Bc"
assume a4: "lTYX st' = \<delta>aB @ lTYX st"
assume a5: "lTXY st' @ \<delta>Bc = lTXY st"
assume a6: "lSX st' \<union> set \<delta>aB = set \<delta>Bc \<union> lSX st"
have f7: "lSX st' \<inter> set (lTXY st') = {}"
using a1 by (metis I2_list_img_def)
have "set (lTXY st') \<inter> set \<delta>aB = {}"
using a4 a1 by (metis (no_types) I2_list_img_def inf_bot_right inf_left_commute inf_sup_absorb set_append)
then have "set \<delta>Bc \<inter> set (lTXY st') = {}"
using f7 a6 by blast
then show ?thesis
using a5 a4 a3 a2 by (metis I3_def distinct_append inf_commute)
qed
end
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj19synthconj1 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus (mult lv0 lv1) lv1) (plus (mult lv0 lv1) lv1)).
Admitted.
QuickChick conj19synthconj1.
|
require("shinydashboard")
require("plotly")
# source("degreeDist.r")
# source("graphLayout.r")
# source("adjacencyPlot.r")
require("networkD3", quietly = T)
# shinyUI(fluidPage(
# style = "padding-top: 80px;",
header <- dashboardHeader(title = "GraphX")
body <- dashboardBody(fluidRow(
column(
width = 9,
#Distribution
box(
width = NULL,
solidHeader = TRUE,
##end distribution
#Distribution
box(
width = NULL,
solidHeader = TRUE,
conditionalPanel(
"input.plot_degree == true",
selectInput(
inputId = "graph_stats",
label = "Statistics:",
choices = c(
"Degree",
# "Attribute: Edge Weight",
"Betweenness Centrality",
"Closeness Centrality",
"Eigenvector Centrality",
"Bonacich Power Centrality",
"Authority Score"
),
selected = "Degree"
),
# conditionalPanel(
# "input.vertex_conditional == false",
# plotlyOutput(outputId = "density_dist", height = "300px"),
# ) ,
# conditionalPanel(
# "input.vertex_conditional == true",
plotlyOutput(outputId = "vertex_dist", height = "300px"),
sliderInput(
inputId = "bw_adjust_overlay",
label = "Bandwidth adjustment:",
min = 0.2,
max = 10,
value = 1,
step = 0.2
),
# ),
# checkboxInput(
# inputId = "show_histogram",
# label = strong("Show histogram"),
# value = FALSE
# ),
# #show this only if we bin the observations and show it in histogram
# conditionalPanel(
# condition = "input.show_histogram == true",
# sliderInput(
# inputId = "hist_number_bins",
# label = "No. of Bins:",
# min = 1,
# max = 20,
# ticks = F,
# step = 1,
# value = 10
# )
# ),
# checkboxInput(
# inputId = "density",
# label = strong("Show density estimate"),
# value = FALSE
# ),
# # Display this only if the density is shown
# conditionalPanel(
# condition = "input.density == true",
# sliderInput(
# inputId = "bw_adjust",
# label = "Bandwidth adjustment:",
# min = 0.2,
# max = 1,
# value = .5,
# step = 0.2
# )
# ),
checkboxInput(
inputId = "vertex_conditional",
label = strong("Conditonal on Vertex Attribute "),
value = FALSE
),
conditionalPanel(
"input.vertex_conditional == true",
wellPanel(
p("Select Attribute:"),
uiOutput("AttributeSelect"))
)
)
)
),
##end distribution
# adjacency view
box(
width = NULL,
solidHeader = TRUE,
conditionalPanel(
"input.plot_adjacencyy == true",
selectInput(
inputId = "adjacency_mode",
label = "View:",
choices = c("Adjacency", "Laplacian", "Normalized Laplacian", "Diffusion"),
selected = "Adjacency"
),
uiOutput("caption_adjacency"),
fluidRow(
splitLayout(cellWidths = c("50%", "50%"),
plotlyOutput(
outputId = "adjacency_view",
height = 500,
width = 600
),
conditionalPanel(
"input.embedding_sbm == true",
plotlyOutput(
outputId = "p_hat_view",
height = 500,
width = 600
)
)
)
)
,
selectInput(
inputId = "adjacency_sortedby",
label = "Sorted By:",
choices = c("Unsorted", "Degree", "Block"),
selected = "Unsorted"
)
)
),
# end adjacency view
# layout
box(
width = NULL,
solidHeader = TRUE,
conditionalPanel(
"input.plot_layout == true",
forceNetworkOutput(outputId = "graph_layout", height = "500px")
)
),
# end layout
# scree plot
box(
width = NULL,
solidHeader = TRUE,
#scree plot
conditionalPanel(
"input.plot_scree == true",
# numericInput("spectral_d_to_view", "Number of eigenvalues to keep:", 5),
plotlyOutput(outputId = "scree_plot", height = 300),
checkboxInput(
inputId = "elbow_detect",
label = strong("Elbow Detection"),
value = FALSE
)
)
),
#end scree plot
#pairs plot
box(
width = NULL,
solidHeader = TRUE
,
conditionalPanel("input.plot_pairs == true",
uiOutput("pairsplot"))
)
#end pairs plot
),
column(
width = 3,
box(
width = NULL,
fileInput('file1', 'Upload Graph',
accept = c('.graphml')),
checkboxInput("use_demo_graph", "Use Demo Graph", value = TRUE),
checkboxInput(
inputId = "plot_degree",
label = strong("Graph Statistics"),
value = FALSE
),
checkboxInput(
inputId = "plot_adjacencyy",
label = strong("Adjacency Matrix"),
value = FALSE
),
checkboxInput(
inputId = "plot_layout",
label = strong("Graph Layout"),
value = FALSE
),
checkboxInput(
inputId = "plot_scree",
label = strong("Scree Plot"),
value = FALSE
),
conditionalPanel(
"input.plot_scree == true",
wellPanel(
p("Show:"),
checkboxInput(
inputId = "scree_A",
label = strong("Adjacency"),
value = TRUE
),
checkboxInput(
inputId = "scree_L",
label = strong("Laplacian"),
value = TRUE
),
checkboxInput(
inputId = "scree_nL",
label = strong("Normalized Laplacian"),
value = TRUE
)
)
),
checkboxInput(
inputId = "plot_pairs",
label = strong("Pairs Plot"),
value = FALSE
),
conditionalPanel(
"input.plot_pairs == true",
sliderInput(
"eigenvector_range",
"View Eigenvectors (up to 9):",
min = 1,
max = 9#nrow(A)
,
value = c(1, 5),
step = 1
)
# uiOutput("max_ev_range")
)),
# selectInput(
# inputId = "svd_mat",
# label = "Using the spectral decomp of",
# choices = c("Adjacency",
# "Normalized Laplacian",
# "Laplacian"),
# selected = "Normalized Laplacian"
# ),
box(
width = NULL,
checkboxInput(
inputId = "embedding_sbm",
label = strong("Embedding?"),
value = FALSE
),
conditionalPanel(
"input.embedding_sbm == true",
selectInput(
inputId = "embedding_model",
label = "Model",
choices = c("Random Dot Product Model",
"Stochastic Block Model"),
selected = "Stochastic Block Model"
),
sliderInput(
inputId = "embedding_d",
label = "Latent dimension (d):",
min = 1,
max = 50,
ticks = T,
step = 1,
value = 5
),
conditionalPanel(
"input.embedding_model == 'Stochastic Block Model'",
sliderInput(
inputId = "embedding_k",
label = "Blocks (K):",
min = 1,
max = 50,
ticks = T,
step = 1,
value = 5
)
)
),
checkboxInput(
inputId = "use_k_core",
label = strong("Use k-core?"),
value = FALSE
),
conditionalPanel("input.use_k_core == true",
uiOutput("KinKcore"))
# sliderInput(
# inputId = "par_k_cores",
# label = "K:",
# min = 1,
# max = 10,
# ticks = T,
# step = 1,
# value = 10
# )
)
)
))
dashboardPage(header,
dashboardSidebar(disable = TRUE),
body)
|
[STATEMENT]
lemma insert_sort_by_set [simp]:
"set (insert_sort_by cmp l) = set l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (insert_sort_by cmp l) = set l
[PROOF STEP]
by (induct l) auto |
{-# OPTIONS --without-K --safe #-}
-- https://www.cs.bham.ac.uk/~mhe/papers/omniscient-journal-revised.pdf
module Constructive.OmniscienceAlt where
open import Level renaming (zero to lzero; suc to lsuc)
import Data.Bool as 𝔹 using (_≤_)
open import Data.Bool as 𝔹 using (Bool; true; false; T; f≤t; b≤b; _∧_; not; _∨_)
import Data.Bool.Properties as 𝔹ₚ
open import Data.Empty
open import Data.Unit using (tt)
open import Data.Nat
open import Data.Nat.Properties
open import Data.Product as Prod
open import Data.Sum as Sum
open import Function.Base
open import Relation.Binary as B
open import Relation.Binary.PropositionalEquality
open import Relation.Nullary
open import Relation.Nullary.Decidable
import Relation.Unary as U
-- agda-misc
open import Constructive.Axiom
open import Constructive.Axiom.Properties
open import Constructive.Axiom.Properties.Base.Lemma
open import Constructive.Common
open import Constructive.Combinators
ℕ∞ : Set
ℕ∞ = Σ (ℕ → Bool) λ x → ∀ i → T (x i) → T (x (suc i))
fromℕ-C : ℕ → ℕ → Bool
fromℕ-C zero zero = false
fromℕ-C zero (suc n) = true
fromℕ-C (suc m) zero = false
fromℕ-C (suc m) (suc n) = fromℕ-C m n
fromℕ-convergent : ∀ n i → T (fromℕ-C n i) → T (fromℕ-C n (suc i))
fromℕ-convergent zero (suc i) t = tt
fromℕ-convergent (suc n) (suc i) t = fromℕ-convergent n i t
fromℕ : ℕ → ℕ∞
fromℕ n = fromℕ-C n , fromℕ-convergent n
∞ : ℕ∞
∞ = (λ _ → false) , (λ i x → x)
_≈_ : Rel (ℕ → Bool) lzero
α ≈ β = ∀ i → α i ≡ β i
_≉_ : Rel (ℕ → Bool) lzero
α ≉ β = ¬ (α ≈ β)
_#_ : Rel (ℕ → Bool) lzero
α # β = ∃ λ i → α i ≢ β i
#⇒≉ : {α β : ℕ → Bool} → α # β → α ≉ β
#⇒≉ {α} {β} (i , αi≢βi) α≈β = αi≢βi (α≈β i)
≈-refl : {α : ℕ → Bool} → α ≈ α
≈-refl _ = refl
≈-sym : {α β : ℕ → Bool} → α ≈ β → β ≈ α
≈-sym α≈β i = sym (α≈β i)
≈-trans : {α β γ : ℕ → Bool} → α ≈ β → β ≈ γ → α ≈ γ
≈-trans α≈β β≈γ i = trans (α≈β i) (β≈γ i)
_≈∞_ : Rel ℕ∞ lzero
x ≈∞ y = proj₁ x ≈ proj₁ y
_≉∞_ : Rel ℕ∞ lzero
x ≉∞ y = proj₁ x ≉ proj₁ y
_#∞_ : Rel ℕ∞ lzero
x #∞ y = proj₁ x # proj₁ y
-- Proposition 3.1
-- r = ≤-any
≤-any : (ℕ → Bool) → (ℕ → Bool)
≤-any α zero = α 0
≤-any α (suc n) = α (suc n) ∨ ≤-any α n
≤-any-idem : ∀ α → ≤-any (≤-any α) ≈ ≤-any α
≤-any-idem α zero = refl
≤-any-idem α (suc n) = begin
(α (suc n) ∨ ≤-any α n) ∨ ≤-any (≤-any α) n
≡⟨ cong ((α (suc n) ∨ ≤-any α n) ∨_) $ ≤-any-idem α n ⟩
(α (suc n) ∨ ≤-any α n) ∨ ≤-any α n
≡⟨ 𝔹ₚ.∨-assoc (α (suc n)) (≤-any α n) (≤-any α n) ⟩
α (suc n) ∨ (≤-any α n ∨ ≤-any α n)
≡⟨ cong (α (suc n) ∨_) $ 𝔹ₚ.∨-idem (≤-any α n) ⟩
α (suc n) ∨ ≤-any α n
∎
where open ≡-Reasoning
private
T-∧-× : ∀ {x y} → T (x ∧ y) → (T x × T y)
T-∧-× {true} {true} t = tt , tt
T-×-∧ : ∀ {x y} → (T x × T y) → T (x ∧ y)
T-×-∧ {true} {true} (tt , tt) = tt
T-∨-introʳ : ∀ {x y} → T y → T (x ∨ y)
T-∨-introʳ {true} {true} t = tt
T-∨-introʳ {false} {true} t = tt
T-∨-introˡ : ∀ {x y} → T x → T (x ∨ y)
T-∨-introˡ {true} {true} t = tt
T-∨-introˡ {true} {false} t = tt
≤-any-convergent : ∀ α i → T (≤-any α i) → T (≤-any α (suc i))
≤-any-convergent α n t = T-∨-introʳ t
≤-any-ℕ∞ : (ℕ → Bool) → ℕ∞
≤-any-ℕ∞ α = ≤-any α , ≤-any-convergent α
≤-any-construct : ∀ α n → T (α n) → T (≤-any α n)
≤-any-construct α zero t = t
≤-any-construct α (suc n) t = T-∨-introˡ t
private
not-injective : ∀ {x y} → not x ≡ not y → x ≡ y
not-injective {false} {false} refl = refl
not-injective {true} {true} refl = refl
x≢y⇒not[x]≡y : ∀ {x y} → x ≢ y → not x ≡ y
x≢y⇒not[x]≡y {false} {false} x≢y = contradiction refl x≢y
x≢y⇒not[x]≡y {false} {true} x≢y = refl
x≢y⇒not[x]≡y {true} {false} x≢y = refl
x≢y⇒not[x]≡y {true} {true} x≢y = contradiction refl x≢y
x≢y⇒x≡not[y] : ∀ {x y} → x ≢ y → x ≡ not y
x≢y⇒x≡not[y] {x} {y} x≢y = subst (_≡ not y) (𝔹ₚ.not-involutive x) $
x≢y⇒not[x]≡y {not x} {not y} (x≢y ∘′ not-injective)
x≡y⇒not[x]≢y : ∀ {x y} → x ≡ y → not x ≢ y
x≡y⇒not[x]≢y {false} {false} p ()
x≡y⇒not[x]≢y {false} {true} () q
x≡y⇒not[x]≢y {true} {false} () q
x≡y⇒not[x]≢y {true} {true} p ()
not[x]≡true→x≢true : ∀ {x} → not x ≡ true → x ≢ true
not[x]≡true→x≢true {false} refl ()
not[x]≡true→x≢true {true} () p
false≢true : false ≢ true
false≢true ()
lpo-Bool⇒∀x→x#∞⊎x≈∞ : LPO-Bool ℕ → ∀ x → (x #∞ ∞) ⊎ (x ≈∞ ∞)
lpo-Bool⇒∀x→x#∞⊎x≈∞ lpo-Bool (α , con) with lpo-Bool α
... | inj₁ (x , αx≡true) = inj₁ (x , λ αx≡false → false≢true (trans (sym αx≡false) αx≡true))
... | inj₂ ¬∃x→αx≡true = inj₂ λ i → x≢y⇒x≡not[y] $′ ¬∃P→∀¬P ¬∃x→αx≡true i
T-to-≡ : ∀ {x} → T x → x ≡ true
T-to-≡ {true} tx = refl
≡-to-T : ∀ {x} → x ≡ true → T x
≡-to-T {true} x≡true = tt
private
T-¬-not : ∀ {x} → ¬ (T x) → T (not x)
T-¬-not {false} n = tt
T-¬-not {true} n = n tt
T-not-¬ : ∀ {x} → T (not x) → ¬ (T x)
T-not-¬ {false} tt ()
T-not-¬ {true} () y
¬T-≤-any : ∀ α x → ¬ T (≤-any α x) → ∃ λ y → ¬ T (α y)
¬T-≤-any α zero ¬T with α 0 | inspect α 0
... | true | [ α0≡true ] = contradiction tt ¬T
... | false | [ α0≡false ] = zero , (λ T[α0] → subst T α0≡false T[α0])
¬T-≤-any α (suc x) ¬T with α (suc x) | inspect α (suc x)
... | true | [ αsn≡true ] = ¬T-≤-any α x ¬T
... | false | [ αsn≡false ] = (suc x) , (λ T[αsn] → subst T αsn≡false T[αsn])
{-
¬T-≤-any′ : ∀ α x → ¬ T (≤-any (not ∘′ α) x) → ∃ λ y → T (α y)
¬T-≤-any′ α x ¬T =
Prod.map₂ (λ nt → subst T (𝔹ₚ.not-involutive _) (T-¬-not nt)) $′
¬T-≤-any (not ∘ α) x ¬T
∀x→x#∞⊎x≈∞⇒lpo-Bool : (∀ x → (x #∞ ∞) ⊎ (x ≈∞ ∞)) → LPO-Bool ℕ
∀x→x#∞⊎x≈∞⇒lpo-Bool ≈∞? P with ≈∞? (≤-any-ℕ∞ (λ n → not (P n)))
... | inj₁ (x , ≤-any[not∘P,x]≢true) =
inj₁ (Prod.map₂ T-to-≡ (¬T-≤-any′ P x (contraposition T-to-≡ ≤-any[not∘P,x]≢true)))
... | inj₂ ∀i→≤-any[not∘P,i]≡true =
inj₂ (∀¬P→¬∃P λ i → not[x]≡true→x≢true (T-to-≡ $ ≤-any-extract (not ∘ P) i $ ≡-to-T (∀i→≤-any[not∘P,i]≡true i)))
-- ≤-any (λ n → not (P n)) x ≡ true → ⊥
-- ≤-any (not ∘ P) x ≡ false
--
-- ---------------------------
-- T (≤-any P x)
-}
|
static const char help[] =
"Solves doubly-nonlinear obstacle problems in 2D. Option prefix dnl_.\n"
"The PDE (interior condition) of such problems has solution u(x,y):\n"
" - div (u^q |grad(u+b)|^{p-2} grad(u+b)) = f\n"
"subject to a obstacle constraint\n"
" u >= psi\n"
"FIXME Includes the steady-state, nonlinear ice sheet problem in 2D in which u=H\n"
"is ice thickness, b is bed elevation, and s = H + b is surface elevation:\n"
" - div (D grad H) - div(W H^{n+2}) = m\n"
"The diffusivity D and pseudo-velocity W (Bueler, 2016) are from the\n"
"nonsliding shallow ice approximation (SIA) flux:\n"
" D = Gamma H^{n+2} |grad H + grad b|^{n-1}\n"
" W = - Gamma |grad H + grad b|^{n-1} grad b\n"
"The climatic mass balance f = m(x,y,H) is from one of two models.\n"
"Constants are n >= 1 and Gamma = 2 A (rho g)^n / (n+2) where A is the ice\n"
"softness. The domain is square (0,L)^2 with zero Dirichlet boundary conditions.\n"
"The equation is discretized by a Q1 structured-grid FVE method (Bueler, 2016).\n"
"Requires SNESVI (-snes_type vinewton{rsls|ssls}) because of constraint;\n"
"defaults to SSLS.\n\n";
/*
1. shows basic success with SSLS but DIVERGES AT LEVEL 4:
mpiexec -n 4 ./ice -ice_verif -snes_converged_reason -snes_grid_sequence LEV
2. consider making CMB model smooth
3. add CMB to dump and create plotting script (.py)
4. using exact init shows convergence depends strongly on eps for fine grids:
for LEV in 1 2 3 4 5; do ./ice -ice_verif -ice_exact_init -snes_converged_reason -ksp_type gmres -pc_type gamg -da_refine $LEV -ice_eps EPS; done
result:
(a) works at all levels if EPS=0.005; last KSP somewhat constant but SNES iters growing
(b) fails on level 3 if EPS=0.003,0.002
5. convergent and nearly optimal GMG in flops *but cheating with exact init*, and *avoiding -snes_grid_sequence* and *significant eps=0.01 regularization*:
for LEV in 1 2 3 4 5 6 7 8; do ./ice -ice_verif -ice_exact_init -snes_converged_reason -ksp_type gmres -pc_type mg -da_refine $LEV -snes_type vinewtonrsls -ice_eps 0.01; done
6. visualizing -snes_grid_sequence:
./ice -ice_verif -snes_grid_sequence 2 -ice_eps 0.005 -snes_converged_reason -snes_monitor_solution draw
(was -snes_grid_sequence bug with periodic BCs? see PETSc issue #300)
8. even seems to work in parallel:
mpiexec -n 4 ./ice -ice_verif -snes_grid_sequence 5 -ice_eps 0.005 -snes_converged_reason -snes_monitor_solution draw
9. same outcome with -ice_exact_init and -da_refine 5
mpiexec -n 4 ./ice -ice_verif -da_refine 5 -ice_eps 0.005 -snes_converged_reason -snes_monitor_solution draw -ice_exact_init
10. unpredictable response to changing -snes_linesearch_type bt|l2|basic (cp seems rarely to work)
*/
/* see comments on runtime stuff in icet/icet.c, the time-dependent version */
#include <petsc.h>
#include "icecmb.h"
typedef struct {
double secpera, // number of seconds in a year
L, // spatial domain is (0,L) x (0,L)
g, // acceleration of gravity
rho_ice, // ice density
n_ice, // Glen exponent for SIA flux term
A_ice, // ice softness
Gamma, // coefficient for SIA flux term
D0, // representative value of diffusivity (used in regularizing D)
eps, // regularization parameter for diffusivity D
delta, // dimensionless regularization for slope in SIA formulas
lambda; // amount of upwinding; lambda=0 is none and lambda=1 is "full"
PetscBool verif, // use dome formulas if true
check_admissible; // check admissibility at start of FormFunctionLocal()
CMBModel *cmb; // defined in cmbmodel.h
} AppCtx;
// compute radius from center of (0,L) x (0,L)
double radialcoord(double x, double y, AppCtx *user) {
const double xc = x - user->L/2.0,
yc = y - user->L/2.0;
return PetscSqrtReal(xc * xc + yc * yc);
}
double DomeCMB(double x, double y, AppCtx *user) {
const double domeR = 750.0e3, // radius of exact ice sheet (m)
domeH0 = 3600.0, // center thickness of exact ice sheet (m)
n = user->n_ice,
pp = 1.0 / n,
CC = user->Gamma * PetscPowReal(domeH0,2.0*n+2.0)
/ PetscPowReal(2.0 * domeR * (1.0-1.0/n),n);
double r, s, tmp1, tmp2;
r = radialcoord(x, y, user);
// avoid singularities at center and margin
if (r < 0.01)
r = 0.01;
if (r > domeR - 0.01)
r = domeR - 0.01;
s = r / domeR;
tmp1 = PetscPowReal(s,pp) + PetscPowReal(1.0-s,pp) - 1.0;
tmp2 = 2.0 * PetscPowReal(s,pp) + PetscPowReal(1.0-s,pp-1.0) * (1.0 - 2.0*s) - 1.0;
return (CC / r) * PetscPowReal(tmp1,n-1.0) * tmp2;
}
PetscErrorCode DomeThicknessLocal(DMDALocalInfo *info, double **aH, AppCtx *user) {
const double domeR = 750.0e3, // radius of exact ice sheet (m)
domeH0 = 3600.0, // center thickness of exact ice sheet (m)
n = user->n_ice,
mm = 1.0 + 1.0 / n,
qq = n / (2.0 * n + 2.0),
CC = domeH0 / PetscPowReal(1.0 - 1.0 / n,qq),
dx = user->L / (double)(info->mx-1),
dy = user->L / (double)(info->my-1);
double x, y, r, s, tmp;
int j, k;
PetscFunctionBeginUser;
for (k=info->ys; k<info->ys+info->ym; k++) {
y = k * dy;
for (j=info->xs; j<info->xs+info->xm; j++) {
x = j * dx;
r = radialcoord(x, y, user);
// avoid singularities at margin and center
if (r > domeR - 0.01)
aH[k][j] = 0.0;
else {
if (r < 0.01)
r = 0.01;
s = r / domeR;
tmp = mm * s - (1.0/n) + PetscPowReal(1.0-s,mm) - PetscPowReal(s,mm);
aH[k][j] = CC * PetscPowReal(tmp,qq);
}
}
}
PetscFunctionReturn(0);
}
extern PetscErrorCode SetFromOptionsAppCtx(AppCtx*);
extern PetscErrorCode FormBedLocal(DMDALocalInfo*, int, double**, AppCtx*);
extern PetscErrorCode FormBounds(SNES,Vec,Vec);
extern PetscErrorCode FormFunctionLocal(DMDALocalInfo*, double**, double**, AppCtx*);
int main(int argc,char **argv) {
PetscErrorCode ierr;
DM da;
SNES snes;
KSP ksp;
Vec H;
AppCtx user;
CMBModel cmb;
PetscBool exact_init = PETSC_FALSE, // initialize using dome exact solution
dump = PETSC_FALSE; // dump state (H,b) in binary file ice_MXxMY.dat after solve
DMDALocalInfo info;
double **aH;
SNESConvergedReason reason;
int snesit,kspit;
PetscInitialize(&argc,&argv,(char*)0,help);
user.secpera = 31556926.0; // number of seconds in a year
user.L = 1800.0e3; // m; compare domeR=750.0e3 radius
user.g = 9.81; // m/s^2
user.rho_ice = 910.0; // kg/m^3
user.n_ice = 3.0; // Glen exponent
user.A_ice = 3.1689e-24; // 1/(Pa^3 s); EISMINT I value
user.D0 = 1.0; // m^2 / s
user.eps = 0.001;
user.delta = 1.0e-4;
user.lambda = 0.25;
user.verif = PETSC_FALSE;
user.check_admissible = PETSC_FALSE;
user.cmb = NULL;
ierr = PetscOptionsBegin(PETSC_COMM_WORLD,"ice_","options to ice","");CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-A", "set value of ice softness A in units Pa-3 s-1",
"ice.c",user.A_ice,&user.A_ice,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool(
"-check_admissible", "check admissibility of iterate at start of residual evaluation FormFunctionLocal()",
"ice.c",user.check_admissible,&user.check_admissible,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-D0", "representative value of diffusivity (used in regularizing D) in units m2 s-1",
"ice.c",user.D0,&user.D0,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-delta", "dimensionless regularization for slope in SIA formulas",
"ice.c",user.delta,&user.delta,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool(
"-dump", "save final state (H, b)",
"ice.c",dump,&dump,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-eps", "dimensionless regularization for diffusivity D",
"ice.c",user.eps,&user.eps,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool(
"-exact_init", "initialize with dome exact solution",
"ice.c",exact_init,&exact_init,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-L", "side length of domain in meters",
"ice.c",user.L,&user.L,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-lambda", "amount of upwinding; lambda=0 is none and lambda=1 is full",
"ice.c",user.lambda,&user.lambda,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal(
"-n", "value of Glen exponent n",
"ice.c",user.n_ice,&user.n_ice,NULL);CHKERRQ(ierr);
if (user.n_ice <= 1.0) {
SETERRQ1(PETSC_COMM_WORLD,1,
"ERROR: n = %f not allowed ... n > 1.0 is required\n",user.n_ice);
}
ierr = PetscOptionsReal(
"-rho", "ice density in units kg m3",
"ice.c",user.rho_ice,&user.rho_ice,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool(
"-verif", "use dome exact solution for verification",
"ice.c",user.verif,&user.verif,NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
// derived constant computed after other ice properties are set
user.Gamma = 2.0 * PetscPowReal(user.rho_ice*user.g,user.n_ice)
* user.A_ice / (user.n_ice+2.0);
ierr = SetFromOptions_CMBModel(&cmb,user.secpera);
user.cmb = &cmb;
// DMDA for the cell-centered grid
ierr = DMDACreate2d(PETSC_COMM_WORLD,
DM_BOUNDARY_NONE,DM_BOUNDARY_NONE,
DMDA_STENCIL_BOX,
5,5, PETSC_DECIDE,PETSC_DECIDE,
1, 1, // dof=1, stencilwidth=1
NULL,NULL,&da);
ierr = DMSetFromOptions(da); CHKERRQ(ierr);
ierr = DMSetUp(da); CHKERRQ(ierr); // this must be called BEFORE SetUniformCoordinates
ierr = DMDASetUniformCoordinates(da,0.0,user.L,0.0,user.L,-1.0,-1.0);CHKERRQ(ierr);
ierr = DMSetApplicationContext(da, &user);CHKERRQ(ierr);
// create and configure the SNES to solve a NCP/VI at each step
ierr = SNESCreate(PETSC_COMM_WORLD,&snes);CHKERRQ(ierr);
ierr = SNESSetDM(snes,da);CHKERRQ(ierr);
ierr = SNESSetApplicationContext(snes,&user);CHKERRQ(ierr);
ierr = DMDASNESSetFunctionLocal(da,INSERT_VALUES,
(DMDASNESFunction)FormFunctionLocal,&user); CHKERRQ(ierr);
ierr = SNESSetType(snes,SNESVINEWTONSSLS); CHKERRQ(ierr);
ierr = SNESVISetComputeVariableBounds(snes,&FormBounds); CHKERRQ(ierr);
ierr = SNESSetFromOptions(snes);CHKERRQ(ierr);
// set up initial iterate
ierr = DMCreateGlobalVector(da,&H);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)H,"H"); CHKERRQ(ierr);
if (exact_init) {
ierr = DMDAGetLocalInfo(da,&info); CHKERRQ(ierr);
ierr = DMDAVecGetArray(da,H,&aH); CHKERRQ(ierr);
ierr = DomeThicknessLocal(&info,aH,&user); CHKERRQ(ierr);
ierr = DMDAVecRestoreArray(da,H,&aH); CHKERRQ(ierr);
} else {
ierr = VecSet(H,0.0); CHKERRQ(ierr);
}
// solve
ierr = SNESSolve(snes,NULL,H); CHKERRQ(ierr);
ierr = SNESGetConvergedReason(snes,&reason); CHKERRQ(ierr);
if (reason <= 0) {
ierr = PetscPrintf(PETSC_COMM_WORLD,
"WARNING: SNES not converged ... use -snes_converged_reason to check\n"); CHKERRQ(ierr);
}
// get solution & DM on fine grid (which may have changed) after solve
ierr = VecDestroy(&H); CHKERRQ(ierr);
ierr = DMDestroy(&da); CHKERRQ(ierr);
ierr = SNESGetDM(snes,&da); CHKERRQ(ierr); /* do not destroy da */
ierr = DMDAGetLocalInfo(da,&info); CHKERRQ(ierr);
ierr = SNESGetSolution(snes,&H); CHKERRQ(ierr); /* do not destroy H */
ierr = PetscObjectSetName((PetscObject)H,"H"); CHKERRQ(ierr);
// compute performance measures; note utility of reporting last grid,
// last snesit/kspit when doing -snes_grid_sequence
ierr = SNESGetIterationNumber(snes,&snesit); CHKERRQ(ierr); //
ierr = SNESGetKSP(snes,&ksp); CHKERRQ(ierr);
ierr = KSPGetIterationNumber(ksp,&kspit); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,
"done on %d x %d grid ... SNES iters = %d, last KSP iters = %d\n",
info.mx,info.my,snesit,kspit); CHKERRQ(ierr);
// dump state (H,b) if requested
if (dump) {
char filename[1024];
PetscViewer viewer;
Vec b;
double **ab;
ierr = VecDuplicate(H,&b); CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)b,"b"); CHKERRQ(ierr);
if (user.verif) {
ierr = VecSet(b,0.0); CHKERRQ(ierr);
} else {
ierr = DMDAVecGetArray(da,b,&ab); CHKERRQ(ierr);
ierr = FormBedLocal(&info,0,ab,&user); CHKERRQ(ierr);
ierr = DMDAVecRestoreArray(da,b,&ab); CHKERRQ(ierr);
}
ierr = sprintf(filename,"ice_%dx%d.dat",info.mx,info.my);
ierr = PetscPrintf(PETSC_COMM_WORLD,"writing PETSC binary file %s ...\n",filename); CHKERRQ(ierr);
ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,filename,FILE_MODE_WRITE,&viewer); CHKERRQ(ierr);
ierr = VecView(b,viewer); CHKERRQ(ierr);
ierr = VecView(H,viewer); CHKERRQ(ierr);
ierr = PetscViewerDestroy(&viewer); CHKERRQ(ierr);
VecDestroy(&b);
}
// compute error in verification case
if (user.verif) {
Vec Hexact;
double infnorm, onenorm;
ierr = VecDuplicate(H,&Hexact); CHKERRQ(ierr);
ierr = DMDAVecGetArray(da,Hexact,&aH); CHKERRQ(ierr);
ierr = DomeThicknessLocal(&info,aH,&user); CHKERRQ(ierr);
ierr = DMDAVecRestoreArray(da,Hexact,&aH); CHKERRQ(ierr);
ierr = VecAXPY(H,-1.0,Hexact); CHKERRQ(ierr); // H <- H + (-1.0) Hexact
VecDestroy(&Hexact);
ierr = VecNorm(H,NORM_INFINITY,&infnorm); CHKERRQ(ierr);
ierr = VecNorm(H,NORM_1,&onenorm); CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_WORLD,
"numerical errors: |H-Hexact|_inf = %.3f, |H-Hexact|_average = %.3f\n",
infnorm,onenorm/(double)(info.mx*info.my)); CHKERRQ(ierr);
}
SNESDestroy(&snes);
return PetscFinalize();
}
PetscErrorCode FormBedLocal(DMDALocalInfo *info, int stencilwidth, double **ab, AppCtx *user) {
int j,k,r,s;
const double dx = user->L / (double)(info->mx-1),
dy = user->L / (double)(info->my-1),
Z = PETSC_PI / user->L;
double x, y, b;
// vaguely-random frequencies and coeffs generated by fiddling; see randbed.py
const int nc = 4,
jc[4] = {1, 3, 6, 8},
kc[4] = {1, 3, 4, 7};
const double scalec = 750.0,
C[4][4] = { { 2.00000000, 0.33000000, -0.55020034, 0.54495520},
{ 0.50000000, 0.45014486, 0.60551833, -0.52250644},
{ 0.93812068, 0.32638429, -0.24654812, 0.33887052},
{ 0.17592361, -0.35496741, 0.22694547, -0.05280704} };
PetscFunctionBeginUser;
// go through owned portion of grid and compute b(x,y)
for (k = info->ys-stencilwidth; k < info->ys + info->ym+stencilwidth; k++) {
y = k * dy;
for (j = info->xs-stencilwidth; j < info->xs + info->xm+stencilwidth; j++) {
if (j < 0 || j >= info->mx-1 || k < 0 || k >= info->my-1)
continue;
x = j * dx;
// b(x,y) is sum of a few sines
b = 0.0;
for (r = 0; r < nc; r++) {
for (s = 0; s < nc; s++) {
b += C[r][s] * sin(jc[r] * Z * x) * sin(kc[s] * Z * y);
}
}
ab[k][j] = scalec * b;
}
}
PetscFunctionReturn(0);
}
// for call-back: tell SNESVI (variational inequality) that we want
// 0.0 <= H < +infinity
PetscErrorCode FormBounds(SNES snes, Vec Xl, Vec Xu) {
PetscErrorCode ierr;
PetscFunctionBeginUser;
ierr = VecSet(Xl,0.0); CHKERRQ(ierr);
ierr = VecSet(Xu,PETSC_INFINITY); CHKERRQ(ierr);
PetscFunctionReturn(0);
}
// value of gradient at a point
typedef struct {
double x,y;
} Grad;
/* We factor the SIA flux as
q = - H^{n+2} sigma(|grad s|) grad s
where sigma is the slope-dependent part
sigma(z) = Gamma z^{n-1}.
Also
D = H^{n+2} sigma(|grad s|)
so that q = - D grad s. */
static double sigma(Grad gH, Grad gb, const AppCtx *user) {
const double sx = gH.x + gb.x,
sy = gH.y + gb.y,
slopesqr = sx * sx + sy * sy + user->delta * user->delta;
return user->Gamma * PetscPowReal(slopesqr,(user->n_ice-1.0)/2);
}
/* Pseudo-velocity from bed slope: W = - sigma * grad b. */
static Grad W(double sigma, Grad gb) {
Grad W;
W.x = - sigma * gb.x;
W.y = - sigma * gb.y;
return W;
}
/* DCS = diffusivity from the continuation scheme:
D(eps) = (1-eps) sigma H^{n+2} + eps D_0
so D(1)=D_0 and D(0)=sigma H^{n+2}. */
static double DCS(double sigma, double H, const AppCtx *user) {
return (1.0 - user->eps) * sigma * PetscPowReal(PetscAbsReal(H),user->n_ice+2.0)
+ user->eps * user->D0;
}
/* Flux component from the non-sliding SIA on a general bed. */
PetscErrorCode SIAflux(Grad gH, Grad gb, double H, double Hup, PetscBool xdir,
double *D, double *q, const AppCtx *user) {
const double mysig = sigma(gH,gb,user),
myD = DCS(mysig,H,user);
const Grad myW = W(mysig,gb);
PetscFunctionBeginUser;
if (D) {
*D = myD;
}
if (xdir && q) {
*q = - myD * gH.x + myW.x * PetscPowReal(PetscAbsReal(Hup),user->n_ice+2.0);
} else {
*q = - myD * gH.y + myW.y * PetscPowReal(PetscAbsReal(Hup),user->n_ice+2.0);
}
PetscFunctionReturn(0);
}
// gradients of weights for Q^1 interpolant
static const double gx[4] = {-1.0, 1.0, 1.0, -1.0},
gy[4] = {-1.0, -1.0, 1.0, 1.0};
static double fieldatpt(double xi, double eta, double f[4]) {
// weights for Q^1 interpolant
double x[4] = { 1.0-xi, xi, xi, 1.0-xi},
y[4] = {1.0-eta, 1.0-eta, eta, eta};
return x[0] * y[0] * f[0] + x[1] * y[1] * f[1]
+ x[2] * y[2] * f[2] + x[3] * y[3] * f[3];
}
static double fieldatptArray(int u, int v, double xi, double eta, double **f) {
double ff[4] = {f[v][u], f[v][u+1], f[v+1][u+1], f[v+1][u]};
return fieldatpt(xi,eta,ff);
}
static Grad gradfatpt(double xi, double eta, double dx, double dy, double f[4]) {
Grad gradf;
double x[4] = { 1.0-xi, xi, xi, 1.0-xi},
y[4] = {1.0-eta, 1.0-eta, eta, eta};
gradf.x = gx[0] * y[0] * f[0] + gx[1] * y[1] * f[1]
+ gx[2] * y[2] * f[2] + gx[3] * y[3] * f[3];
gradf.y = x[0] *gy[0] * f[0] + x[1] *gy[1] * f[1]
+ x[2] *gy[2] * f[2] + x[3] *gy[3] * f[3];
gradf.x /= dx;
gradf.y /= dy;
return gradf;
}
static Grad gradfatptArray(int u, int v, double xi, double eta, double dx, double dy,
double **f) {
double ff[4] = {f[v][u], f[v][u+1], f[v+1][u+1], f[v+1][u]};
return gradfatpt(xi,eta,dx,dy,ff);
}
// indexing of the 8 quadrature points along the boundary of the control volume in M*
// point s=0,...,7 is in element (j,k) = (j+je[s],k+ke[s])
static const int je[8] = {0, 0, -1, -1, -1, -1, 0, 0},
ke[8] = {0, 0, 0, 0, -1, -1, -1, -1},
ce[8] = {0, 3, 1, 0, 2, 1, 3, 2};
// direction of flux at 4 points in each element
static const PetscBool xdire[4] = {PETSC_TRUE, PETSC_FALSE, PETSC_TRUE, PETSC_FALSE};
// local (element-wise) coords of quadrature points for M*
static const double locx[4] = { 0.5, 0.75, 0.5, 0.25},
locy[4] = { 0.25, 0.5, 0.75, 0.5};
/* FormFunctionLocal = call-back by SNES using DMDA info.
Evaluates residual FF on local process patch:
FF_{j,k} = \int_{\partial V_{j,k}} \mathbf{q} \cdot \mathbf{n}
- m_{j,k} \Delta x \Delta y
where V_{j,k} is the control volume centered at (x_j,y_k).
Regarding indexing locations along the boundary of the control volume where
flux is evaluated, this figure shows the control volume centered at (x_j,y_k)
and the four elements it meets. Quadrature uses 8 points on the boundary of
the control volume, numbered s=0,...,7:
-------------------
| | |
| ..2..|..1.. |
| 3: | :0 |
k |--------- ---------|
| 4: | :7 |
| ..5..|..6.. |
| | |
-------------------
j
Regarding flux-component indexing on the element indexed by (j,k) node,
the value (aqquad[c])[k][j] for c=0,1,2,3 is an x-component at "*" and
a y-component at "%"; note (x_j,y_k) is lower-left corner:
-------------------
| : |
| *2 |
| 3 : 1 |
|....%.... ....%....|
| : |
| *0 |
| : |
@-------------------
(j,k)
*/
PetscErrorCode FormFunctionLocal(DMDALocalInfo *info, double **aHin,
double **FF, AppCtx *user) {
PetscErrorCode ierr;
const double dx = user->L / (double)(info->mx-1),
dy = user->L / (double)(info->my-1);
// coefficients of quadrature evaluations along the boundary of the control volume in M*
const double coeff[8] = {dy/2, dx/2, dx/2, -dy/2, -dy/2, -dx/2, -dx/2, dy/2};
const PetscBool upwind = (user->lambda > 0.0);
const double upmin = (1.0 - user->lambda) * 0.5,
upmax = (1.0 + user->lambda) * 0.5;
int c, j, k, s;
double H, Hup, lxup, lyup, **aqquad[4], **ab, **aH, DSIA_ckj, qSIA_ckj,
M, x, y;
Grad gH, gb;
Vec qquad[4], Hcopy, b;
PetscFunctionBeginUser;
// copy and set boundary conditions to zero
ierr = DMGetLocalVector(info->da, &Hcopy); CHKERRQ(ierr);
ierr = DMDAVecGetArray(info->da,Hcopy,&aH); CHKERRQ(ierr);
for (k = info->ys-1; k <= info->ys + info->ym; k++) {
for (j = info->xs-1; j <= info->xs + info->xm; j++) {
if (j < 0 || j > info->mx-1 || k < 0 || k > info->my-1)
continue;
if (user->check_admissible && aHin[k][j] < 0.0) {
SETERRQ3(PETSC_COMM_WORLD,1,
"ERROR: non-admissible value H[k][j] = %.3e < 0.0 at j,k = %d,%d\n",
aHin[k][j],j,k);
}
if (j == 0 || j == info->mx-1 || k == 0 || k == info->my-1) {
if (j >= info->xs && j < info->xs+info->xm && k >= info->ys && k < info->ys+info->ym)
FF[k][j] = aHin[k][j]; // FIXME scaling?
aH[k][j] = 0.0;
} else
aH[k][j] = aHin[k][j];
}
}
// get bed elevation b(x,y) on this grid
ierr = DMGetLocalVector(info->da, &b); CHKERRQ(ierr);
ierr = DMDAVecGetArray(info->da,b,&ab); CHKERRQ(ierr);
if (user->verif) {
ierr = VecSet(b,0.0); CHKERRQ(ierr);
} else {
ierr = FormBedLocal(info,1,ab,user); CHKERRQ(ierr); // get stencil width
}
// working space for fluxes; see text for face location of flux evaluation
for (c = 0; c < 4; c++) {
ierr = DMGetLocalVector(info->da, &(qquad[c])); CHKERRQ(ierr);
ierr = DMDAVecGetArray(info->da,qquad[c],&(aqquad[c])); CHKERRQ(ierr);
}
// loop over locally-owned elements, including ghosts, to get fluxes q at
// c = 0,1,2,3 points in element; note start at (xs-1,ys-1)
for (k = info->ys-1; k < info->ys + info->ym; k++) {
for (j = info->xs-1; j < info->xs + info->xm; j++) {
if (j < 0 || j >= info->mx-1 || k < 0 || k >= info->my-1)
continue;
for (c=0; c<4; c++) {
H = fieldatptArray(j,k,locx[c],locy[c],aH);
gH = gradfatptArray(j,k,locx[c],locy[c],dx,dy,aH);
gb = gradfatptArray(j,k,locx[c],locy[c],dx,dy,ab);
if (upwind) {
if (xdire[c] == PETSC_TRUE) {
lxup = (gb.x <= 0.0) ? upmin : upmax;
lyup = locy[c];
} else {
lxup = locx[c];
lyup = (gb.y <= 0.0) ? upmin : upmax;
}
Hup = fieldatptArray(j,k,lxup,lyup,aH);
} else
Hup = H;
ierr = SIAflux(gH,gb,H,Hup,xdire[c],
&DSIA_ckj,&qSIA_ckj,user); CHKERRQ(ierr);
aqquad[c][k][j] = qSIA_ckj;
}
}
}
// loop over nodes, not including ghosts, to get function F(H) from quadature over
// s = 0,1,...,7 points on boundary of control volume (rectangle) around node
for (k=info->ys; k<info->ys+info->ym; k++) {
for (j=info->xs; j<info->xs+info->xm; j++) {
if (j == 0 || j == info->mx-1 || k == 0 || k == info->my-1)
continue;
// climatic mass balance
if (user->verif) {
x = j * dx;
y = k * dy;
M = DomeCMB(x,y,user);
} else {
M = M_CMBModel(user->cmb,ab[k][j] + aH[k][j]); // s=b+H is surface elevation
}
FF[k][j] = - M * dx * dy;
// now add integral over control volume boundary using two
// quadrature points on each side
for (s=0; s<8; s++)
FF[k][j] += coeff[s] * aqquad[ce[s]][k+ke[s]][j+je[s]];
}
}
// restore working space and bed
for (c = 0; c < 4; c++) {
ierr = DMDAVecRestoreArray(info->da,qquad[c],&(aqquad[c])); CHKERRQ(ierr);
ierr = DMRestoreLocalVector(info->da, &(qquad[c])); CHKERRQ(ierr);
}
ierr = DMDAVecRestoreArray(info->da,Hcopy,&aH); CHKERRQ(ierr);
ierr = DMRestoreLocalVector(info->da, &Hcopy); CHKERRQ(ierr);
ierr = DMDAVecRestoreArray(info->da,b,&ab); CHKERRQ(ierr);
ierr = DMRestoreLocalVector(info->da, &b); CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
Require Import Coq.Numbers.BinNums.
Require Import compcert.lib.Maps.
Require Import mc_reify.func_defs.
Require Import mc_reify.get_set_reif.
Require Import ExtLib.Tactics.
Require Import floyd.client_lemmas.
Ltac destruct_match H :=
match type of H with
| context [ match ?x with _ => _ end] => destruct x eqn:?
end.
Section tbled.
Variable tbl : SymEnv.functions RType_typ.
Let RSym_sym := RSym_sym tbl.
Existing Instance RSym_sym.
Let RSym_sym_ok : RSymOk RSym_sym := _.
Existing Instance RSym_sym_ok.
Let Expr_expr_fs := Expr_expr_fs tbl.
Existing Instance Expr_expr_fs.
Let Expr_ok_fs := Expr_ok_fs tbl.
Existing Instance Expr_ok_fs.
Existing Instance MA.
Lemma exprD'_App_R_typ (e1 e2 : expr typ func) tus tvs ty1 ty2 v:
exprD' tus tvs ty2 (App e1 e2) = Some v ->
typeof_expr tus tvs e2 = Some ty1 ->
(exists v1 , exprD' tus tvs (tyArr ty1 ty2) e1 = Some v1) /\
(exists v2, exprD' tus tvs ty1 e2 = Some v2).
Proof.
intros.
assert (X := @exprD'_typeof_expr typ _ _ _ _ _ _ _ tus (App e1 e2) tvs
ty2 v). assert (H2 : typeof_expr tus tvs (App e1 e2) = Some ty2).
apply X. intuition. clear X.
change (App e1 e2) with (AppN.apps e1 (e2 :: nil)) in *.
rewrite AppN.typeof_expr_apps in H2; auto with typeclass_instances.
unfold AppN.typeof_apps in H2.
destruct (typeof_expr tus tvs e1) eqn:?; try congruence.
simpl in H2. destruct (typeof_expr tus tvs e2) eqn:?; try congruence.
simpl. destruct t; simpl in H2; try congruence.
destruct (typ_eq_dec t0 t1) eqn : ?; try congruence.
inversion H2; subst; clear H2.
inversion H0; subst; clear H0.
simpl in H.
change (App e1 e2) with (AppN.apps e1 (e2 :: nil)) in *.
clear Heqs.
rewrite AppN.exprD'_apps in H; auto with typeclass_instances.
unfold AppN.apps_sem' in H.
destruct (typeof_expr tus tvs e1) eqn:?; try congruence.
destruct (exprD' tus tvs t e1) eqn :?; try congruence.
inversion Heqo; subst; clear Heqo. split. eexists. apply Heqo2.
simpl in H.
destruct (exprD' tus tvs ty1 e2) eqn:?; try congruence.
eexists; eauto.
Qed.
Lemma exprD'_App_L_typ (e1 e2 : expr typ func) tus tvs ty1 ty2 v:
exprD' tus tvs ty2 (App e1 e2) = Some v ->
typeof_expr tus tvs e1 = Some (tyArr ty1 ty2) ->
(exists v1 , exprD' tus tvs (tyArr ty1 ty2) e1 = Some v1) /\
(exists v2, exprD' tus tvs ty1 e2 = Some v2).
Proof.
intros.
assert (X := @exprD'_typeof_expr typ _ _ _ _ _ _ _ tus (App e1 e2) tvs
ty2 v). assert (H2 : typeof_expr tus tvs (App e1 e2) = Some ty2).
apply X. intuition. clear X.
change (App e1 e2) with (AppN.apps e1 (e2 :: nil)) in *.
rewrite AppN.typeof_expr_apps in H2; auto with typeclass_instances.
unfold AppN.typeof_apps in H2.
destruct (typeof_expr tus tvs e1) eqn:?; try congruence.
simpl in H2. destruct (typeof_expr tus tvs e2) eqn:?; try congruence.
simpl. destruct t; simpl in H2; try congruence.
destruct (typ_eq_dec t0 t1) eqn : ?; try congruence.
inversion H2; subst; clear H2.
inversion H0; subst; clear H0.
simpl in H.
change (App e1 e2) with (AppN.apps e1 (e2 :: nil)) in *.
clear Heqs.
rewrite AppN.exprD'_apps in H; auto with typeclass_instances.
unfold AppN.apps_sem' in H.
destruct (typeof_expr tus tvs e1) eqn:?; try congruence.
destruct (exprD' tus tvs t e1) eqn :?; try congruence.
inversion Heqo; subst; clear Heqo. split. eexists. apply Heqo2.
simpl in H.
destruct (exprD' tus tvs ty1 e2) eqn:?; try congruence.
eexists; eauto.
Qed.
Lemma exprD_typeof_Some : forall tus tvs t (e : expr typ func) val,
exprD' tus tvs t e = Some val -> typeof_expr tus tvs e = Some t.
Proof.
intros.
eapply ExprTac.exprD_typeof_Some; try apply _. eauto.
Qed.
Lemma typeof_app : forall (e1 e2 : expr typ func) t tvs tus,
typeof_expr tus tvs (App e1 e2) = Some t ->
exists t2, typeof_expr tus tvs e1 = Some (tyArr t2 t) /\
typeof_expr tus tvs e2 = Some (t2).
Proof.
intros. change (App e1 e2) with (AppN.apps e1 (e2 :: nil)) in *.
rewrite AppN.typeof_expr_apps in H; auto with typeclass_instances.
unfold AppN.typeof_apps in H.
destruct (typeof_expr tus tvs e1) eqn:?; try congruence.
destruct t0; try (simpl in H; destruct_match H; congruence).
simpl in H.
repeat (destruct_match H; try congruence).
destruct_match Heqo1; try congruence. subst. inversion H.
subst.
eexists. split. reflexivity. auto.
Qed.
Lemma exprD_ex_typs : forall tus tvs t (e1 e2 : expr typ func) v,
exprD' tus tvs t (App e1 e2) = Some v ->
exists t2, typeof_expr tus tvs e1 = Some (tyArr t2 t) /\
typeof_expr tus tvs e2 = Some (t2).
intros.
assert (X := @exprD'_typeof_expr typ _ _ _ _ _ _ _ tus (App e1 e2) tvs
t v).
assert (H2 : typeof_expr tus tvs (App e1 e2) = Some t).
apply X. intuition. clear X.
apply typeof_app; eauto.
Qed.
Lemma exprD'_one_type : forall tus tvs t1 t2 (e : expr typ func) v1 v2,
exprD' tus tvs t1 e = Some v1 ->
exprD' tus tvs t2 e = Some v2 ->
t1 = t2.
Proof.
intros.
apply ExprTac.exprD_typeof_Some in H; try apply _.
eapply ExprTac.exprD_typeof_eq in H0. symmetry.
eauto. apply _. apply _. apply _. auto.
Qed.
End tbled.
Ltac inv H := inversion H; first [subst | subst_any]; clear H.
Ltac inv_some :=
repeat
match goal with
| [ H : Some _ = Some _ |- _] => inv H
| [ H : None = None |- _ ] => clear H
end.
Ltac rewrite_in_match :=
repeat
match goal with
| [ H : ?x = _ |- context[match ?x with _ => _ end]] =>
rewrite H
| [ H : ?x = _, H1 : context[match ?x with _ => _ end] |- _] =>
rewrite H in H1
end.
Ltac destruct_match_oneres :=
repeat match goal with
[ H : context[match ?x with _ => _ end] |- _] =>
(destruct x eqn:?; try congruence); [ idtac ]
end.
Ltac progress_match :=
repeat (rewrite_in_match; destruct_match_oneres).
Ltac try_simpl_typeof :=
try
match goal with
| [ |- context [typeof_expr ?tus ?tvs ?e] ] =>
let simpd := eval hnf in (typeof_expr tus tvs e) in
match simpd with
| Some _ => change (typeof_expr tus tvs e) with simpd; cbv beta iota
end
| [ H : context [typeof_expr ?tus ?tvs ?e] |- _ ] =>
let simpd := eval hnf in (typeof_expr tus tvs e) in
match simpd with
| Some _ => change (typeof_expr tus tvs e) with simpd in H; cbv beta iota in H
end
end.
Ltac cautious_simpl :=
repeat (
cbv [Monad.bind Monad.ret OptionMonad.Monad_option
(*Rsym eq_sym Rrefl exprT_App typ2_cast typ2 Typ2_tyArr*)] in *;
try_simpl_typeof).
Ltac p_exprD H1 :=
autorewrite with exprD_rw in H1; try apply _;
cautious_simpl; repeat (progress_match; inv_some).
Ltac cleanup_dups :=
repeat
match goal with
| [ H : ?x = Some _, H2 : ?x = Some _ |- _ ] => rewrite H in H2; inv_some
| [ H : ?x = ?x |- _] => clear H
end.
Ltac remove_no_cast :=
repeat
match goal with
[ H : type_cast ?x ?x = Some _ |- _] => clear H
end.
Ltac subst_rty :=
repeat
match goal with
| [ H : Rty _ _ |- _ ] => unfold Rty in H; inversion H; subst; remove_no_cast;
try clear H
end.
Ltac inv_same_types tbl :=
repeat
match goal with
[ H : exprD' ?tus ?tvs ?t1 ?e = Some ?v1,
H1 : exprD' ?tus ?tvs ?t2 ?e = Some ?v2 |- _] =>
let N := fresh "H" in assert (N := exprD'_one_type tbl tus tvs t1 t2 e v1 v2 H H1); subst; try inv N; cleanup_dups
end.
Ltac p_exprD_app tbl :=
repeat
(match goal with
| [ H : exprD' _ _ _ (App _ _ ) = (*Some*) _ |- _ ] => p_exprD H
| [ H : context [match exprD' _ _ _ (App _ _) with _ => _ end] |- _] =>
p_exprD H
end;
cleanup_dups; subst_rty; inv_same_types tbl).
Ltac solve_funcAs :=
repeat
match goal with
| [ H : context [funcAs _ _] |- _ ] =>
unfold funcAs in H; simpl in H;
repeat (rewrite type_cast_refl in H; try apply _; unfold Rcast, Relim in H; simpl in H)
| [ |- context [funcAs _ _ ] ]=> unfold funcAs; simpl;
repeat (try rewrite type_cast_refl; try apply _; unfold Rcast, Relim; simpl)
end;
repeat (try rewrite type_cast_refl; try apply _; unfold Rcast, Relim; simpl).
Ltac solve_funcAs_f H :=
p_exprD H;
solve_funcAs H;
match type of H with
| match ?f with _ => _ end = _ =>
let eqn := fresh "eqn" in
let v := fresh "v" in
(destruct f as [v | ]; try congruence);
clear H; unfold Rty in v; inversion v; subst; try clear v
end.
Ltac p_exprD_inj tbl :=
repeat (
match goal with
| [ H : exprD' ?tus ?tvs ?t (Inj ?e ) = Some ?val |- _ ] =>
let X := fresh "X" in
(assert (X := exprD_typeof_Some tbl tus tvs t (Inj e) val);
simpl in X; specialize (X H); inv X);
p_exprD H; ( solve_funcAs || fail)
| [ H : context [match exprD' ?tus ?tvs ?t (Inj ?e ) with _ => _ end] |- _ ] =>
p_exprD H; ( solve_funcAs || fail)
| [ H : exprD' ?tus ?tvs ?t (Inj ?e ) = _ |- _ ] =>
p_exprD H; ( solve_funcAs || fail)
end; unfold Rcast in *; cautious_simpl; inv_some; subst_rty);
cleanup_dups; try apply _; inv_some.
Ltac copy H :=
match type of H with
| ?x => assert x by exact H
end.
Ltac pose_exprD' :=
repeat
match goal with
| [ H : typeof_expr ?tus ?tvs ?v = Some ?t |- _ ] =>
match goal with
| [H' : exprD' tus tvs t v = Some _ |- _ ] => fail 1
| _ => match type of H with
| ?x => let X := fresh "H" in
assert x as X by exact H;
rewrite (ExprFacts.exprD'_typeof_expr tus tvs v t) in X;
destruct X
end
end
end.
Ltac pose_types tbl :=
repeat
match goal with
| [ H : exprD' ?tus ?tvs ?ty ?v = Some ?r |- _ ] =>
match goal with
| [H' : typeof_expr tus tvs v = Some ty |- _ ] => fail 1
| _ => let X := fresh "H" in
assert (X := exprD_typeof_Some tbl tus tvs ty v r H)
end
end.
Ltac solve_exprD tbl :=
repeat (
p_exprD_app tbl;
p_exprD_inj tbl;
autorewrite with exprD_rw; cautious_simpl; solve_funcAs;
try solve [auto with typeclass_instances | reflexivity | apply _];
try congruence; pose_types tbl; (*pose_exprD';*) fold func in *;
progress_match;
try (rewrite type_cast_refl in *; apply _;
unfold Rcast, Relim; cautious_simpl);
try solve [unfold exprT_App in *; simpl; eauto];
try apply _).
|
<unk> @-@ FM The Fox , adult contemporary
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory NonDetMonadVCG
imports
NonDetMonadLemmas
"wp/WP"
"wp/WPC"
"Strengthen"
begin
(* Wrap up the standard usage pattern of wp/wpc/simp into its own command: *)
method wpsimp uses wp wp_del simp simp_del split split_del cong =
((determ \<open>wp add: wp del: wp_del | wpc |
clarsimp simp: simp simp del: simp_del split: split split del: split_del cong: cong\<close>)+)[1]
declare K_def [simp]
section "Satisfiability"
text {*
The dual to validity: an existential instead of a universal
quantifier for the post condition. In refinement, it is
often sufficient to know that there is one state that
satisfies a condition.
*}
definition
exs_valid :: "('a \<Rightarrow> bool) \<Rightarrow> ('a, 'b) nondet_monad \<Rightarrow>
('b \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> bool"
("\<lbrace>_\<rbrace> _ \<exists>\<lbrace>_\<rbrace>")
where
"exs_valid P f Q \<equiv> (\<forall>s. P s \<longrightarrow> (\<exists>(rv, s') \<in> fst (f s). Q rv s'))"
text {* The above for the exception monad *}
definition
ex_exs_validE :: "('a \<Rightarrow> bool) \<Rightarrow> ('a, 'e + 'b) nondet_monad \<Rightarrow>
('b \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('e \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> bool"
("\<lbrace>_\<rbrace> _ \<exists>\<lbrace>_\<rbrace>, \<lbrace>_\<rbrace>")
where
"ex_exs_validE P f Q E \<equiv>
exs_valid P f (\<lambda>rv. case rv of Inl e \<Rightarrow> E e | Inr v \<Rightarrow> Q v)"
section "Lemmas"
subsection {* Determinism *}
lemma det_set_iff:
"det f \<Longrightarrow> (r \<in> fst (f s)) = (fst (f s) = {r})"
apply (simp add: det_def)
apply (rule iffI)
apply (erule_tac x=s in allE)
apply auto
done
lemma return_det [iff]:
"det (return x)"
by (simp add: det_def return_def)
lemma put_det [iff]:
"det (put s)"
by (simp add: det_def put_def)
lemma get_det [iff]:
"det get"
by (simp add: det_def get_def)
lemma det_gets [iff]:
"det (gets f)"
by (auto simp add: gets_def det_def get_def return_def bind_def)
lemma det_UN:
"det f \<Longrightarrow> (\<Union>x \<in> fst (f s). g x) = (g (THE x. x \<in> fst (f s)))"
unfolding det_def
apply simp
apply (drule spec [of _ s])
apply clarsimp
done
lemma bind_detI [simp, intro!]:
"\<lbrakk> det f; \<forall>x. det (g x) \<rbrakk> \<Longrightarrow> det (f >>= g)"
apply (simp add: bind_def det_def split_def)
apply clarsimp
apply (erule_tac x=s in allE)
apply clarsimp
apply (erule_tac x="a" in allE)
apply (erule_tac x="b" in allE)
apply clarsimp
done
lemma the_run_stateI:
"fst (M s) = {s'} \<Longrightarrow> the_run_state M s = s'"
by (simp add: the_run_state_def)
lemma the_run_state_det:
"\<lbrakk> s' \<in> fst (M s); det M \<rbrakk> \<Longrightarrow> the_run_state M s = s'"
by (simp add: the_run_stateI det_set_iff)
subsection "Lifting and Alternative Basic Definitions"
lemma liftE_liftM: "liftE = liftM Inr"
apply (rule ext)
apply (simp add: liftE_def liftM_def)
done
lemma liftME_liftM: "liftME f = liftM (case_sum Inl (Inr \<circ> f))"
apply (rule ext)
apply (simp add: liftME_def liftM_def bindE_def returnOk_def lift_def)
apply (rule_tac f="bind x" in arg_cong)
apply (rule ext)
apply (case_tac xa)
apply (simp_all add: lift_def throwError_def)
done
lemma liftE_bindE:
"(liftE a) >>=E b = a >>= b"
apply (simp add: liftE_def bindE_def lift_def bind_assoc)
done
lemma liftM_id[simp]: "liftM id = id"
apply (rule ext)
apply (simp add: liftM_def)
done
lemma liftM_bind:
"(liftM t f >>= g) = (f >>= (\<lambda>x. g (t x)))"
by (simp add: liftM_def bind_assoc)
lemma gets_bind_ign: "gets f >>= (\<lambda>x. m) = m"
apply (rule ext)
apply (simp add: bind_def simpler_gets_def)
done
lemma get_bind_apply: "(get >>= f) x = f x x"
by (simp add: get_def bind_def)
lemma exec_get:
"(get >>= m) s = m s s"
by (simp add: get_def bind_def)
lemma bind_eqI:
"\<lbrakk> f = f'; \<And>x. g x = g' x \<rbrakk> \<Longrightarrow> f >>= g = f' >>= g'"
apply (rule ext)
apply (simp add: bind_def)
apply (auto simp: split_def)
done
subsection "Simplification Rules for Lifted And/Or"
lemma pred_andE[elim!]: "\<lbrakk> (A and B) x; \<lbrakk> A x; B x \<rbrakk> \<Longrightarrow> R \<rbrakk> \<Longrightarrow> R"
by(simp add:pred_conj_def)
lemma pred_andI[intro!]: "\<lbrakk> A x; B x \<rbrakk> \<Longrightarrow> (A and B) x"
by(simp add:pred_conj_def)
lemma pred_conj_app[simp]: "(P and Q) x = (P x \<and> Q x)"
by(simp add:pred_conj_def)
lemma bipred_andE[elim!]: "\<lbrakk> (A And B) x y; \<lbrakk> A x y; B x y \<rbrakk> \<Longrightarrow> R \<rbrakk> \<Longrightarrow> R"
by(simp add:bipred_conj_def)
lemma bipred_andI[intro!]: "\<lbrakk> A x y; B x y \<rbrakk> \<Longrightarrow> (A And B) x y"
by (simp add:bipred_conj_def)
lemma bipred_conj_app[simp]: "(P And Q) x = (P x and Q x)"
by(simp add:pred_conj_def bipred_conj_def)
lemma pred_disjE[elim!]: "\<lbrakk> (P or Q) x; P x \<Longrightarrow> R; Q x \<Longrightarrow> R \<rbrakk> \<Longrightarrow> R"
by (fastforce simp: pred_disj_def)
lemma pred_disjI1[intro]: "P x \<Longrightarrow> (P or Q) x"
by (simp add: pred_disj_def)
lemma pred_disjI2[intro]: "Q x \<Longrightarrow> (P or Q) x"
by (simp add: pred_disj_def)
lemma pred_disj_app[simp]: "(P or Q) x = (P x \<or> Q x)"
by auto
lemma bipred_disjI1[intro]: "P x y \<Longrightarrow> (P Or Q) x y"
by (simp add: bipred_disj_def)
lemma bipred_disjI2[intro]: "Q x y \<Longrightarrow> (P Or Q) x y"
by (simp add: bipred_disj_def)
lemma bipred_disj_app[simp]: "(P Or Q) x = (P x or Q x)"
by(simp add:pred_disj_def bipred_disj_def)
lemma pred_notnotD[simp]: "(not not P) = P"
by(simp add:pred_neg_def)
lemma pred_and_true[simp]: "(P and \<top>) = P"
by(simp add:pred_conj_def)
lemma pred_and_true_var[simp]: "(\<top> and P) = P"
by(simp add:pred_conj_def)
lemma pred_and_false[simp]: "(P and \<bottom>) = \<bottom>"
by(simp add:pred_conj_def)
lemma pred_and_false_var[simp]: "(\<bottom> and P) = \<bottom>"
by(simp add:pred_conj_def)
subsection "Hoare Logic Rules"
lemma validE_def2:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>R\<rbrace> \<equiv> \<forall>s. P s \<longrightarrow> (\<forall>(r,s') \<in> fst (f s). case r of Inr b \<Rightarrow> Q b s'
| Inl a \<Rightarrow> R a s')"
by (unfold valid_def validE_def)
lemma seq':
"\<lbrakk> \<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>;
\<forall>x. P x \<longrightarrow> \<lbrace>C\<rbrace> g x \<lbrace>D\<rbrace>;
\<forall>x s. B x s \<longrightarrow> P x \<and> C s \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> do x \<leftarrow> f; g x od \<lbrace>D\<rbrace>"
apply (clarsimp simp: valid_def bind_def)
apply fastforce
done
lemma seq:
assumes f_valid: "\<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>"
assumes g_valid: "\<And>x. P x \<Longrightarrow> \<lbrace>C\<rbrace> g x \<lbrace>D\<rbrace>"
assumes bind: "\<And>x s. B x s \<Longrightarrow> P x \<and> C s"
shows "\<lbrace>A\<rbrace> do x \<leftarrow> f; g x od \<lbrace>D\<rbrace>"
apply (insert f_valid g_valid bind)
apply (blast intro: seq')
done
lemma seq_ext':
"\<lbrakk> \<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>;
\<forall>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> do x \<leftarrow> f; g x od \<lbrace>C\<rbrace>"
by (fastforce simp: valid_def bind_def Let_def split_def)
lemma seq_ext:
assumes f_valid: "\<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>"
assumes g_valid: "\<And>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>"
shows "\<lbrace>A\<rbrace> do x \<leftarrow> f; g x od \<lbrace>C\<rbrace>"
apply(insert f_valid g_valid)
apply(blast intro: seq_ext')
done
lemma seqE':
"\<lbrakk> \<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>,\<lbrace>E\<rbrace>;
\<forall>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> doE x \<leftarrow> f; g x odE \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>"
apply(simp add:bindE_def lift_def bind_def Let_def split_def)
apply(clarsimp simp:validE_def2)
apply (fastforce simp add: throwError_def return_def lift_def
split: sum.splits)
done
lemma seqE:
assumes f_valid: "\<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>,\<lbrace>E\<rbrace>"
assumes g_valid: "\<And>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>"
shows "\<lbrace>A\<rbrace> doE x \<leftarrow> f; g x odE \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>"
apply(insert f_valid g_valid)
apply(blast intro: seqE')
done
lemma hoare_TrueI: "\<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. \<top>\<rbrace>"
by (simp add: valid_def)
lemma hoareE_TrueI: "\<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. \<top>\<rbrace>, \<lbrace>\<lambda>r. \<top>\<rbrace>"
by (simp add: validE_def valid_def)
lemma hoare_True_E_R [simp]:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. True\<rbrace>, -"
by (auto simp add: validE_R_def validE_def valid_def split: sum.splits)
lemma hoare_post_conj [intro!]:
"\<lbrakk> \<lbrace> P \<rbrace> a \<lbrace> Q \<rbrace>; \<lbrace> P \<rbrace> a \<lbrace> R \<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> a \<lbrace> Q And R \<rbrace>"
by (fastforce simp: valid_def split_def bipred_conj_def)
lemma hoare_pre_disj [intro!]:
"\<lbrakk> \<lbrace> P \<rbrace> a \<lbrace> R \<rbrace>; \<lbrace> Q \<rbrace> a \<lbrace> R \<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace> P or Q \<rbrace> a \<lbrace> R \<rbrace>"
by (simp add:valid_def pred_disj_def)
lemma hoare_conj:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P and P'\<rbrace> f \<lbrace>Q And Q'\<rbrace>"
unfolding valid_def by auto
lemma hoare_post_taut: "\<lbrace> P \<rbrace> a \<lbrace> \<top>\<top> \<rbrace>"
by (simp add:valid_def)
lemma wp_post_taut: "\<lbrace>\<lambda>r. True\<rbrace> f \<lbrace>\<lambda>r s. True\<rbrace>"
by (rule hoare_post_taut)
lemma wp_post_tautE: "\<lbrace>\<lambda>r. True\<rbrace> f \<lbrace>\<lambda>r s. True\<rbrace>,\<lbrace>\<lambda>f s. True\<rbrace>"
proof -
have P: "\<And>r. (case r of Inl a \<Rightarrow> True | _ \<Rightarrow> True) = True"
by (case_tac r, simp_all)
show ?thesis
by (simp add: validE_def P wp_post_taut)
qed
lemma hoare_pre_cont [simp]: "\<lbrace> \<bottom> \<rbrace> a \<lbrace> P \<rbrace>"
by (simp add:valid_def)
subsection {* Strongest Postcondition Rules *}
lemma get_sp:
"\<lbrace>P\<rbrace> get \<lbrace>\<lambda>a s. s = a \<and> P s\<rbrace>"
by(simp add:get_def valid_def)
lemma put_sp:
"\<lbrace>\<top>\<rbrace> put a \<lbrace>\<lambda>_ s. s = a\<rbrace>"
by(simp add:put_def valid_def)
lemma return_sp:
"\<lbrace>P\<rbrace> return a \<lbrace>\<lambda>b s. b = a \<and> P s\<rbrace>"
by(simp add:return_def valid_def)
lemma assert_sp:
"\<lbrace> P \<rbrace> assert Q \<lbrace> \<lambda>r s. P s \<and> Q \<rbrace>"
by (simp add: assert_def fail_def return_def valid_def)
lemma hoare_gets_sp:
"\<lbrace>P\<rbrace> gets f \<lbrace>\<lambda>rv s. rv = f s \<and> P s\<rbrace>"
by (simp add: valid_def simpler_gets_def)
lemma hoare_return_drop_var [iff]: "\<lbrace> Q \<rbrace> return x \<lbrace> \<lambda>r. Q \<rbrace>"
by (simp add:valid_def return_def)
lemma hoare_gets [intro!]: "\<lbrakk> \<And>s. P s \<Longrightarrow> Q (f s) s \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> gets f \<lbrace> Q \<rbrace>"
by (simp add:valid_def gets_def get_def bind_def return_def)
lemma hoare_modifyE_var [intro!]:
"\<lbrakk> \<And>s. P s \<Longrightarrow> Q (f s) \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> modify f \<lbrace> \<lambda>r s. Q s \<rbrace>"
by(simp add: valid_def modify_def put_def get_def bind_def)
lemma hoare_if [intro!]:
"\<lbrakk> P \<Longrightarrow> \<lbrace> Q \<rbrace> a \<lbrace> R \<rbrace>; \<not> P \<Longrightarrow> \<lbrace> Q \<rbrace> b \<lbrace> R \<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace> Q \<rbrace> if P then a else b \<lbrace> R \<rbrace>"
by (simp add:valid_def)
lemma hoare_pre_subst: "\<lbrakk> A = B; \<lbrace>A\<rbrace> a \<lbrace>C\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>B\<rbrace> a \<lbrace>C\<rbrace>"
by(clarsimp simp:valid_def split_def)
lemma hoare_post_subst: "\<lbrakk> B = C; \<lbrace>A\<rbrace> a \<lbrace>B\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>A\<rbrace> a \<lbrace>C\<rbrace>"
by(clarsimp simp:valid_def split_def)
lemma hoare_pre_tautI: "\<lbrakk> \<lbrace>A and P\<rbrace> a \<lbrace>B\<rbrace>; \<lbrace>A and not P\<rbrace> a \<lbrace>B\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>A\<rbrace> a \<lbrace>B\<rbrace>"
by(fastforce simp:valid_def split_def pred_conj_def pred_neg_def)
lemma hoare_pre_imp: "\<lbrakk> \<And>s. P s \<Longrightarrow> Q s; \<lbrace>Q\<rbrace> a \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>"
by (fastforce simp add:valid_def)
lemma hoare_post_imp: "\<lbrakk> \<And>r s. Q r s \<Longrightarrow> R r s; \<lbrace>P\<rbrace> a \<lbrace>Q\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>"
by(fastforce simp:valid_def split_def)
lemma hoare_post_impErr': "\<lbrakk> \<lbrace>P\<rbrace> a \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>;
\<forall>r s. Q r s \<longrightarrow> R r s;
\<forall>e s. E e s \<longrightarrow> F e s \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>,\<lbrace>F\<rbrace>"
apply (simp add: validE_def)
apply (rule_tac Q="\<lambda>r s. case r of Inl a \<Rightarrow> E a s | Inr b \<Rightarrow> Q b s" in hoare_post_imp)
apply (case_tac r)
apply simp_all
done
lemma hoare_post_impErr: "\<lbrakk> \<lbrace>P\<rbrace> a \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>;
\<And>r s. Q r s \<Longrightarrow> R r s;
\<And>e s. E e s \<Longrightarrow> F e s \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>,\<lbrace>F\<rbrace>"
apply (blast intro: hoare_post_impErr')
done
lemma hoare_validE_cases:
"\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>, \<lbrace> \<lambda>_ _. True \<rbrace>; \<lbrace> P \<rbrace> f \<lbrace> \<lambda>_ _. True \<rbrace>, \<lbrace> R \<rbrace> \<rbrakk>
\<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>, \<lbrace> R \<rbrace>"
by (simp add: validE_def valid_def split: sum.splits) blast
lemma hoare_post_imp_dc:
"\<lbrakk>\<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. Q\<rbrace>; \<And>s. Q s \<Longrightarrow> R s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. R\<rbrace>,\<lbrace>\<lambda>r. R\<rbrace>"
by (simp add: validE_def valid_def split: sum.splits) blast
lemma hoare_post_imp_dc2:
"\<lbrakk>\<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. Q\<rbrace>; \<And>s. Q s \<Longrightarrow> R s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. R\<rbrace>,\<lbrace>\<lambda>r s. True\<rbrace>"
by (simp add: validE_def valid_def split: sum.splits) blast
lemma hoare_post_imp_dc2E:
"\<lbrakk>\<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. Q\<rbrace>; \<And>s. Q s \<Longrightarrow> R s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>\<lambda>r s. True\<rbrace>, \<lbrace>\<lambda>r. R\<rbrace>"
by (simp add: validE_def valid_def split: sum.splits) fast
lemma hoare_post_imp_dc2E_actual:
"\<lbrakk>\<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. R\<rbrace>\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>\<lambda>r s. True\<rbrace>, \<lbrace>\<lambda>r. R\<rbrace>"
by (simp add: validE_def valid_def split: sum.splits) fast
lemma hoare_post_imp_dc2_actual:
"\<lbrakk>\<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. R\<rbrace>\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>\<lambda>r. R\<rbrace>, \<lbrace>\<lambda>r s. True\<rbrace>"
by (simp add: validE_def valid_def split: sum.splits) fast
lemma hoare_post_impE: "\<lbrakk> \<And>r s. Q r s \<Longrightarrow> R r s; \<lbrace>P\<rbrace> a \<lbrace>Q\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>"
by (fastforce simp:valid_def split_def)
lemma hoare_conjD1:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q rv and R rv\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q rv\<rbrace>"
unfolding valid_def by auto
lemma hoare_conjD2:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q rv and R rv\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. R rv\<rbrace>"
unfolding valid_def by auto
lemma hoare_post_disjI1:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q rv\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q rv or R rv\<rbrace>"
unfolding valid_def by auto
lemma hoare_post_disjI2:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. R rv\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q rv or R rv\<rbrace>"
unfolding valid_def by auto
lemma hoare_weaken_pre:
"\<lbrakk>\<lbrace>Q\<rbrace> a \<lbrace>R\<rbrace>; \<And>s. P s \<Longrightarrow> Q s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>"
apply (rule hoare_pre_imp)
prefer 2
apply assumption
apply blast
done
lemma hoare_strengthen_post:
"\<lbrakk>\<lbrace>P\<rbrace> a \<lbrace>Q\<rbrace>; \<And>r s. Q r s \<Longrightarrow> R r s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>"
apply (rule hoare_post_imp)
prefer 2
apply assumption
apply blast
done
lemma use_valid: "\<lbrakk>(r, s') \<in> fst (f s); \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>; P s \<rbrakk> \<Longrightarrow> Q r s'"
apply (simp add: valid_def)
apply blast
done
lemma use_validE_norm: "\<lbrakk> (Inr r', s') \<in> fst (B s); \<lbrace> P \<rbrace> B \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>; P s \<rbrakk> \<Longrightarrow> Q r' s'"
apply (clarsimp simp: validE_def valid_def)
apply force
done
lemma use_validE_except: "\<lbrakk> (Inl r', s') \<in> fst (B s); \<lbrace> P \<rbrace> B \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>; P s \<rbrakk> \<Longrightarrow> E r' s'"
apply (clarsimp simp: validE_def valid_def)
apply force
done
lemma in_inv_by_hoareD:
"\<lbrakk> \<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. P\<rbrace>; (x,s') \<in> fst (f s) \<rbrakk> \<Longrightarrow> s' = s"
by (auto simp add: valid_def) blast
subsection "Satisfiability"
lemma exs_hoare_post_imp: "\<lbrakk>\<And>r s. Q r s \<Longrightarrow> R r s; \<lbrace>P\<rbrace> a \<exists>\<lbrace>Q\<rbrace>\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<exists>\<lbrace>R\<rbrace>"
apply (simp add: exs_valid_def)
apply safe
apply (erule_tac x=s in allE, simp)
apply blast
done
lemma use_exs_valid: "\<lbrakk>\<lbrace>P\<rbrace> f \<exists>\<lbrace>Q\<rbrace>; P s \<rbrakk> \<Longrightarrow> \<exists>(r, s') \<in> fst (f s). Q r s'"
by (simp add: exs_valid_def)
definition "exs_postcondition P f \<equiv> (\<lambda>a b. \<exists>(rv, s)\<in> f a b. P rv s)"
lemma exs_valid_is_triple:
"exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\<lambda>s f. fst (f s)))"
by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def)
lemmas [wp_trip] = exs_valid_is_triple
lemma exs_valid_weaken_pre [wp_comb]:
"\<lbrakk> \<lbrace> P' \<rbrace> f \<exists>\<lbrace> Q \<rbrace>; \<And>s. P s \<Longrightarrow> P' s \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> f \<exists>\<lbrace> Q \<rbrace>"
apply atomize
apply (clarsimp simp: exs_valid_def)
done
lemma exs_valid_chain:
"\<lbrakk> \<lbrace> P \<rbrace> f \<exists>\<lbrace> Q \<rbrace>; \<And>s. R s \<Longrightarrow> P s; \<And>r s. Q r s \<Longrightarrow> S r s \<rbrakk> \<Longrightarrow> \<lbrace> R \<rbrace> f \<exists>\<lbrace> S \<rbrace>"
apply atomize
apply (fastforce simp: exs_valid_def Bex_def)
done
lemma exs_valid_assume_pre:
"\<lbrakk> \<And>s. P s \<Longrightarrow> \<lbrace> P \<rbrace> f \<exists>\<lbrace> Q \<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> f \<exists>\<lbrace> Q \<rbrace>"
apply (fastforce simp: exs_valid_def)
done
lemma exs_valid_bind [wp_split]:
"\<lbrakk> \<And>x. \<lbrace>B x\<rbrace> g x \<exists>\<lbrace>C\<rbrace>; \<lbrace>A\<rbrace> f \<exists>\<lbrace>B\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace> A \<rbrace> f >>= (\<lambda>x. g x) \<exists>\<lbrace> C \<rbrace>"
apply atomize
apply (clarsimp simp: exs_valid_def bind_def')
apply blast
done
lemma exs_valid_return [wp]:
"\<lbrace> Q v \<rbrace> return v \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: exs_valid_def return_def)
lemma exs_valid_select [wp]:
"\<lbrace> \<lambda>s. \<exists>r \<in> S. Q r s \<rbrace> select S \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: exs_valid_def select_def)
lemma exs_valid_get [wp]:
"\<lbrace> \<lambda>s. Q s s \<rbrace> get \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: exs_valid_def get_def)
lemma exs_valid_gets [wp]:
"\<lbrace> \<lambda>s. Q (f s) s \<rbrace> gets f \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: gets_def) wp
lemma exs_valid_put [wp]:
"\<lbrace> Q v \<rbrace> put v \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: put_def exs_valid_def)
lemma exs_valid_state_assert [wp]:
"\<lbrace> \<lambda>s. Q () s \<and> G s \<rbrace> state_assert G \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: state_assert_def exs_valid_def get_def
assert_def bind_def' return_def)
lemmas exs_valid_guard = exs_valid_state_assert
lemma exs_valid_fail [wp]:
"\<lbrace> \<lambda>_. False \<rbrace> fail \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: fail_def exs_valid_def)
lemma exs_valid_condition [wp]:
"\<lbrakk> \<lbrace> P \<rbrace> L \<exists>\<lbrace> Q \<rbrace>; \<lbrace> P' \<rbrace> R \<exists>\<lbrace> Q \<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace> \<lambda>s. (C s \<and> P s) \<or> (\<not> C s \<and> P' s) \<rbrace> condition C L R \<exists>\<lbrace> Q \<rbrace>"
by (clarsimp simp: condition_def exs_valid_def split: sum.splits)
subsection MISC
lemma hoare_return_simp:
"\<lbrace>P\<rbrace> return x \<lbrace>Q\<rbrace> = (\<forall>s. P s \<longrightarrow> Q x s)"
by (simp add: valid_def return_def)
lemma hoare_gen_asm:
"(P \<Longrightarrow> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>) \<Longrightarrow> \<lbrace>P' and K P\<rbrace> f \<lbrace>Q\<rbrace>"
by (fastforce simp add: valid_def)
lemma hoare_gen_asm_lk:
"(P \<Longrightarrow> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>) \<Longrightarrow> \<lbrace>K P and P'\<rbrace> f \<lbrace>Q\<rbrace>"
by (fastforce simp add: valid_def)
lemma hoare_when_wp [wp]:
"\<lbrakk> P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>if P then Q else R ()\<rbrace> when P f \<lbrace>R\<rbrace>"
by (clarsimp simp: when_def valid_def return_def)
lemma hoare_conjI:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> R r s\<rbrace>"
unfolding valid_def by blast
lemma hoare_disjI1:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<or> R r s \<rbrace>"
unfolding valid_def by blast
lemma hoare_disjI2:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<or> R r s \<rbrace>"
unfolding valid_def by blast
lemma hoare_assume_pre:
"(\<And>s. P s \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>) \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
by (auto simp: valid_def)
lemma hoare_returnOk_sp:
"\<lbrace>P\<rbrace> returnOk x \<lbrace>\<lambda>r s. r = x \<and> P s\<rbrace>, \<lbrace>Q\<rbrace>"
by (simp add: valid_def validE_def returnOk_def return_def)
lemma hoare_assume_preE:
"(\<And>s. P s \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>R\<rbrace>) \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>R\<rbrace>"
by (auto simp: valid_def validE_def)
lemma hoare_allI:
"(\<And>x. \<lbrace>P\<rbrace>f\<lbrace>Q x\<rbrace>) \<Longrightarrow> \<lbrace>P\<rbrace>f\<lbrace>\<lambda>r s. \<forall>x. Q x r s\<rbrace>"
by (simp add: valid_def) blast
lemma validE_allI:
"(\<And>x. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q x r s\<rbrace>,\<lbrace>E\<rbrace>) \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. \<forall>x. Q x r s\<rbrace>,\<lbrace>E\<rbrace>"
by (fastforce simp: valid_def validE_def split: sum.splits)
lemma hoare_exI:
"\<lbrace>P\<rbrace> f \<lbrace>Q x\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. \<exists>x. Q x r s\<rbrace>"
by (simp add: valid_def) blast
lemma hoare_impI:
"(R \<Longrightarrow> \<lbrace>P\<rbrace>f\<lbrace>Q\<rbrace>) \<Longrightarrow> \<lbrace>P\<rbrace>f\<lbrace>\<lambda>r s. R \<longrightarrow> Q r s\<rbrace>"
by (simp add: valid_def) blast
lemma validE_impI:
" \<lbrakk>\<And>E. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>_ _. True\<rbrace>,\<lbrace>E\<rbrace>; (P' \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>)\<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. P' \<longrightarrow> Q r s\<rbrace>, \<lbrace>E\<rbrace>"
by (fastforce simp: validE_def valid_def split: sum.splits)
lemma hoare_case_option_wp:
"\<lbrakk> \<lbrace>P\<rbrace> f None \<lbrace>Q\<rbrace>;
\<And>x. \<lbrace>P' x\<rbrace> f (Some x) \<lbrace>Q' x\<rbrace> \<rbrakk>
\<Longrightarrow> \<lbrace>case_option P P' v\<rbrace> f v \<lbrace>\<lambda>rv. case v of None \<Rightarrow> Q rv | Some x \<Rightarrow> Q' x rv\<rbrace>"
by (cases v) auto
subsection "Reasoning directly about states"
lemma in_throwError:
"((v, s') \<in> fst (throwError e s)) = (v = Inl e \<and> s' = s)"
by (simp add: throwError_def return_def)
lemma in_returnOk:
"((v', s') \<in> fst (returnOk v s)) = (v' = Inr v \<and> s' = s)"
by (simp add: returnOk_def return_def)
lemma in_bind:
"((r,s') \<in> fst ((do x \<leftarrow> f; g x od) s)) =
(\<exists>s'' x. (x, s'') \<in> fst (f s) \<and> (r, s') \<in> fst (g x s''))"
apply (simp add: bind_def split_def)
apply force
done
lemma in_bindE_R:
"((Inr r,s') \<in> fst ((doE x \<leftarrow> f; g x odE) s)) =
(\<exists>s'' x. (Inr x, s'') \<in> fst (f s) \<and> (Inr r, s') \<in> fst (g x s''))"
apply (simp add: bindE_def lift_def split_def bind_def)
apply (clarsimp simp: throwError_def return_def lift_def split: sum.splits)
apply safe
apply (case_tac a)
apply fastforce
apply fastforce
apply force
done
lemma in_bindE_L:
"((Inl r, s') \<in> fst ((doE x \<leftarrow> f; g x odE) s)) \<Longrightarrow>
(\<exists>s'' x. (Inr x, s'') \<in> fst (f s) \<and> (Inl r, s') \<in> fst (g x s'')) \<or> ((Inl r, s') \<in> fst (f s))"
apply (simp add: bindE_def lift_def bind_def)
apply safe
apply (simp add: return_def throwError_def lift_def split_def split: sum.splits if_split_asm)
apply force
done
lemma in_liftE:
"((v, s') \<in> fst (liftE f s)) = (\<exists>v'. v = Inr v' \<and> (v', s') \<in> fst (f s))"
by (force simp add: liftE_def bind_def return_def split_def)
lemma in_whenE: "((v, s') \<in> fst (whenE P f s)) = ((P \<longrightarrow> (v, s') \<in> fst (f s)) \<and>
(\<not>P \<longrightarrow> v = Inr () \<and> s' = s))"
by (simp add: whenE_def in_returnOk)
lemma inl_whenE:
"((Inl x, s') \<in> fst (whenE P f s)) = (P \<and> (Inl x, s') \<in> fst (f s))"
by (auto simp add: in_whenE)
lemma in_fail:
"r \<in> fst (fail s) = False"
by (simp add: fail_def)
lemma in_return:
"(r, s') \<in> fst (return v s) = (r = v \<and> s' = s)"
by (simp add: return_def)
lemma in_assert:
"(r, s') \<in> fst (assert P s) = (P \<and> s' = s)"
by (simp add: assert_def return_def fail_def)
lemma in_assertE:
"(r, s') \<in> fst (assertE P s) = (P \<and> r = Inr () \<and> s' = s)"
by (simp add: assertE_def returnOk_def return_def fail_def)
lemma in_assert_opt:
"(r, s') \<in> fst (assert_opt v s) = (v = Some r \<and> s' = s)"
by (auto simp: assert_opt_def in_fail in_return split: option.splits)
lemma in_get:
"(r, s') \<in> fst (get s) = (r = s \<and> s' = s)"
by (simp add: get_def)
lemma in_gets:
"(r, s') \<in> fst (gets f s) = (r = f s \<and> s' = s)"
by (simp add: simpler_gets_def)
lemma in_put:
"(r, s') \<in> fst (put x s) = (s' = x \<and> r = ())"
by (simp add: put_def)
lemma in_when:
"(v, s') \<in> fst (when P f s) = ((P \<longrightarrow> (v, s') \<in> fst (f s)) \<and> (\<not>P \<longrightarrow> v = () \<and> s' = s))"
by (simp add: when_def in_return)
lemma in_modify:
"(v, s') \<in> fst (modify f s) = (s'=f s \<and> v = ())"
by (simp add: modify_def bind_def get_def put_def)
lemma gets_the_in_monad:
"((v, s') \<in> fst (gets_the f s)) = (s' = s \<and> f s = Some v)"
by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split)
lemma in_alternative:
"(r,s') \<in> fst ((f \<sqinter> g) s) = ((r,s') \<in> fst (f s) \<or> (r,s') \<in> fst (g s))"
by (simp add: alternative_def)
lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L
in_bindE_R in_returnOk in_throwError in_fail
in_assertE in_assert in_return in_assert_opt
in_get in_gets in_put in_when unlessE_whenE
unless_when in_modify gets_the_in_monad
in_alternative
subsection "Non-Failure"
lemma no_failD:
"\<lbrakk> no_fail P m; P s \<rbrakk> \<Longrightarrow> \<not>(snd (m s))"
by (simp add: no_fail_def)
lemma non_fail_modify [wp,simp]:
"no_fail \<top> (modify f)"
by (simp add: no_fail_def modify_def get_def put_def bind_def)
lemma non_fail_gets_simp[simp]:
"no_fail P (gets f)"
unfolding no_fail_def gets_def get_def return_def bind_def
by simp
lemma non_fail_gets:
"no_fail \<top> (gets f)"
by simp
lemma non_fail_select [simp]:
"no_fail \<top> (select S)"
by (simp add: no_fail_def select_def)
lemma no_fail_pre:
"\<lbrakk> no_fail P f; \<And>s. Q s \<Longrightarrow> P s\<rbrakk> \<Longrightarrow> no_fail Q f"
by (simp add: no_fail_def)
lemma no_fail_alt [wp]:
"\<lbrakk> no_fail P f; no_fail Q g \<rbrakk> \<Longrightarrow> no_fail (P and Q) (f OR g)"
by (simp add: no_fail_def alternative_def)
lemma no_fail_return [simp, wp]:
"no_fail \<top> (return x)"
by (simp add: return_def no_fail_def)
lemma no_fail_get [simp, wp]:
"no_fail \<top> get"
by (simp add: get_def no_fail_def)
lemma no_fail_put [simp, wp]:
"no_fail \<top> (put s)"
by (simp add: put_def no_fail_def)
lemma no_fail_when [wp]:
"(P \<Longrightarrow> no_fail Q f) \<Longrightarrow> no_fail (if P then Q else \<top>) (when P f)"
by (simp add: when_def)
lemma no_fail_unless [wp]:
"(\<not>P \<Longrightarrow> no_fail Q f) \<Longrightarrow> no_fail (if P then \<top> else Q) (unless P f)"
by (simp add: unless_def when_def)
lemma no_fail_fail [simp, wp]:
"no_fail \<bottom> fail"
by (simp add: fail_def no_fail_def)
lemmas [wp] = non_fail_gets
lemma no_fail_assert [simp, wp]:
"no_fail (\<lambda>_. P) (assert P)"
by (simp add: assert_def)
lemma no_fail_assert_opt [simp, wp]:
"no_fail (\<lambda>_. P \<noteq> None) (assert_opt P)"
by (simp add: assert_opt_def split: option.splits)
lemma no_fail_case_option [wp]:
assumes f: "no_fail P f"
assumes g: "\<And>x. no_fail (Q x) (g x)"
shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)"
by (clarsimp simp add: f g)
lemma no_fail_if [wp]:
"\<lbrakk> P \<Longrightarrow> no_fail Q f; \<not>P \<Longrightarrow> no_fail R g \<rbrakk> \<Longrightarrow>
no_fail (if P then Q else R) (if P then f else g)"
by simp
lemma no_fail_apply [wp]:
"no_fail P (f (g x)) \<Longrightarrow> no_fail P (f $ g x)"
by simp
lemma no_fail_undefined [simp, wp]:
"no_fail \<bottom> undefined"
by (simp add: no_fail_def)
lemma no_fail_returnOK [simp, wp]:
"no_fail \<top> (returnOk x)"
by (simp add: returnOk_def)
text {* Empty results implies non-failure *}
lemma empty_fail_modify [simp]:
"empty_fail (modify f)"
by (simp add: empty_fail_def simpler_modify_def)
lemma empty_fail_gets [simp]:
"empty_fail (gets f)"
by (simp add: empty_fail_def simpler_gets_def)
lemma empty_failD:
"\<lbrakk> empty_fail m; fst (m s) = {} \<rbrakk> \<Longrightarrow> snd (m s)"
by (simp add: empty_fail_def)
lemma empty_fail_select_f [simp]:
assumes ef: "fst S = {} \<Longrightarrow> snd S"
shows "empty_fail (select_f S)"
by (fastforce simp add: empty_fail_def select_f_def intro: ef)
lemma empty_fail_bind [simp]:
"\<lbrakk> empty_fail a; \<And>x. empty_fail (b x) \<rbrakk> \<Longrightarrow> empty_fail (a >>= b)"
apply (simp add: bind_def empty_fail_def split_def)
apply clarsimp
apply (case_tac "fst (a s) = {}")
apply blast
apply (clarsimp simp: ex_in_conv [symmetric])
done
lemma empty_fail_return [simp]:
"empty_fail (return x)"
by (simp add: empty_fail_def return_def)
lemma empty_fail_mapM [simp]:
assumes m: "\<And>x. empty_fail (m x)"
shows "empty_fail (mapM m xs)"
proof (induct xs)
case Nil
thus ?case by (simp add: mapM_def sequence_def)
next
case Cons
have P: "\<And>m x xs. mapM m (x # xs) = (do y \<leftarrow> m x; ys \<leftarrow> (mapM m xs); return (y # ys) od)"
by (simp add: mapM_def sequence_def Let_def)
from Cons
show ?case by (simp add: P m)
qed
lemma empty_fail [simp]:
"empty_fail fail"
by (simp add: fail_def empty_fail_def)
lemma empty_fail_assert_opt [simp]:
"empty_fail (assert_opt x)"
by (simp add: assert_opt_def split: option.splits)
lemma empty_fail_mk_ef:
"empty_fail (mk_ef o m)"
by (simp add: empty_fail_def mk_ef_def)
subsection "Failure"
lemma fail_wp: "\<lbrace>\<lambda>x. True\<rbrace> fail \<lbrace>Q\<rbrace>"
by (simp add: valid_def fail_def)
lemma failE_wp: "\<lbrace>\<lambda>x. True\<rbrace> fail \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (simp add: validE_def fail_wp)
lemma fail_update [iff]:
"fail (f s) = fail s"
by (simp add: fail_def)
text {* We can prove postconditions using hoare triples *}
lemma post_by_hoare: "\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>; P s; (r, s') \<in> fst (f s) \<rbrakk> \<Longrightarrow> Q r s'"
apply (simp add: valid_def)
apply blast
done
text {* Weakest Precondition Rules *}
lemma hoare_vcg_prop:
"\<lbrace>\<lambda>s. P\<rbrace> f \<lbrace>\<lambda>rv s. P\<rbrace>"
by (simp add: valid_def)
lemma return_wp:
"\<lbrace>P x\<rbrace> return x \<lbrace>P\<rbrace>"
by(simp add:valid_def return_def)
lemma get_wp:
"\<lbrace>\<lambda>s. P s s\<rbrace> get \<lbrace>P\<rbrace>"
by(simp add:valid_def split_def get_def)
lemma gets_wp:
"\<lbrace>\<lambda>s. P (f s) s\<rbrace> gets f \<lbrace>P\<rbrace>"
by(simp add:valid_def split_def gets_def return_def get_def bind_def)
lemma modify_wp:
"\<lbrace>\<lambda>s. P () (f s)\<rbrace> modify f \<lbrace>P\<rbrace>"
by(simp add:valid_def split_def modify_def get_def put_def bind_def)
lemma put_wp:
"\<lbrace>\<lambda>s. P () x\<rbrace> put x \<lbrace>P\<rbrace>"
by(simp add:valid_def put_def)
lemma returnOk_wp:
"\<lbrace>P x\<rbrace> returnOk x \<lbrace>P\<rbrace>,\<lbrace>E\<rbrace>"
by(simp add:validE_def2 returnOk_def return_def)
lemma throwError_wp:
"\<lbrace>E e\<rbrace> throwError e \<lbrace>P\<rbrace>,\<lbrace>E\<rbrace>"
by(simp add:validE_def2 throwError_def return_def)
lemma returnOKE_R_wp : "\<lbrace>P x\<rbrace> returnOk x \<lbrace>P\<rbrace>, -"
by (simp add: validE_R_def validE_def valid_def returnOk_def return_def)
lemma liftE_wp:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> liftE f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by(clarsimp simp:valid_def validE_def2 liftE_def split_def Let_def bind_def return_def)
lemma catch_wp:
"\<lbrakk> \<And>x. \<lbrace>E x\<rbrace> handler x \<lbrace>Q\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> catch f handler \<lbrace>Q\<rbrace>"
apply (unfold catch_def valid_def validE_def return_def)
apply (fastforce simp: bind_def split: sum.splits)
done
lemma handleE'_wp:
"\<lbrakk> \<And>x. \<lbrace>F x\<rbrace> handler x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>F\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> f <handle2> handler \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
apply (unfold handleE'_def valid_def validE_def return_def)
apply (fastforce simp: bind_def split: sum.splits)
done
lemma handleE_wp:
assumes x: "\<And>x. \<lbrace>F x\<rbrace> handler x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
assumes y: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>F\<rbrace>"
shows "\<lbrace>P\<rbrace> f <handle> handler \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (simp add: handleE_def handleE'_wp [OF x y])
lemma hoare_vcg_if_split:
"\<lbrakk> P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>S\<rbrace>; \<not>P \<Longrightarrow> \<lbrace>R\<rbrace> g \<lbrace>S\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. (P \<longrightarrow> Q s) \<and> (\<not>P \<longrightarrow> R s)\<rbrace> if P then f else g \<lbrace>S\<rbrace>"
by simp
lemma hoare_vcg_if_splitE:
"\<lbrakk> P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>S\<rbrace>,\<lbrace>E\<rbrace>; \<not>P \<Longrightarrow> \<lbrace>R\<rbrace> g \<lbrace>S\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. (P \<longrightarrow> Q s) \<and> (\<not>P \<longrightarrow> R s)\<rbrace> if P then f else g \<lbrace>S\<rbrace>,\<lbrace>E\<rbrace>"
by simp
lemma hoare_liftM_subst: "\<lbrace>P\<rbrace> liftM f m \<lbrace>Q\<rbrace> = \<lbrace>P\<rbrace> m \<lbrace>Q \<circ> f\<rbrace>"
apply (simp add: liftM_def bind_def return_def split_def)
apply (simp add: valid_def Ball_def)
apply (rule_tac f=All in arg_cong)
apply (rule ext)
apply fastforce
done
lemma liftE_validE[simp]: "\<lbrace>P\<rbrace> liftE f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace> = \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
apply (simp add: liftE_liftM validE_def hoare_liftM_subst o_def)
done
lemma liftM_wp: "\<lbrace>P\<rbrace> m \<lbrace>Q \<circ> f\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> liftM f m \<lbrace>Q\<rbrace>"
by (simp add: hoare_liftM_subst)
lemma hoare_liftME_subst: "\<lbrace>P\<rbrace> liftME f m \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace> = \<lbrace>P\<rbrace> m \<lbrace>Q \<circ> f\<rbrace>,\<lbrace>E\<rbrace>"
apply (simp add: validE_def liftME_liftM hoare_liftM_subst o_def)
apply (rule_tac f="valid P m" in arg_cong)
apply (rule ext)+
apply (case_tac x, simp_all)
done
lemma liftME_wp: "\<lbrace>P\<rbrace> m \<lbrace>Q \<circ> f\<rbrace>,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> liftME f m \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (simp add: hoare_liftME_subst)
(* FIXME: Move *)
lemma o_const_simp[simp]: "(\<lambda>x. C) \<circ> f = (\<lambda>x. C)"
by (simp add: o_def)
lemma hoare_vcg_split_case_option:
"\<lbrakk> \<And>x. x = None \<Longrightarrow> \<lbrace>P x\<rbrace> f x \<lbrace>R x\<rbrace>;
\<And>x y. x = Some y \<Longrightarrow> \<lbrace>Q x y\<rbrace> g x y \<lbrace>R x\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. (x = None \<longrightarrow> P x s) \<and>
(\<forall>y. x = Some y \<longrightarrow> Q x y s)\<rbrace>
case x of None \<Rightarrow> f x
| Some y \<Rightarrow> g x y
\<lbrace>R x\<rbrace>"
apply(simp add:valid_def split_def)
apply(case_tac x, simp_all)
done
lemma hoare_vcg_split_case_optionE:
assumes none_case: "\<And>x. x = None \<Longrightarrow> \<lbrace>P x\<rbrace> f x \<lbrace>R x\<rbrace>,\<lbrace>E x\<rbrace>"
assumes some_case: "\<And>x y. x = Some y \<Longrightarrow> \<lbrace>Q x y\<rbrace> g x y \<lbrace>R x\<rbrace>,\<lbrace>E x\<rbrace>"
shows "\<lbrace>\<lambda>s. (x = None \<longrightarrow> P x s) \<and>
(\<forall>y. x = Some y \<longrightarrow> Q x y s)\<rbrace>
case x of None \<Rightarrow> f x
| Some y \<Rightarrow> g x y
\<lbrace>R x\<rbrace>,\<lbrace>E x\<rbrace>"
apply(case_tac x, simp_all)
apply(rule none_case, simp)
apply(rule some_case, simp)
done
lemma hoare_vcg_split_case_sum:
"\<lbrakk> \<And>x a. x = Inl a \<Longrightarrow> \<lbrace>P x a\<rbrace> f x a \<lbrace>R x\<rbrace>;
\<And>x b. x = Inr b \<Longrightarrow> \<lbrace>Q x b\<rbrace> g x b \<lbrace>R x\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. (\<forall>a. x = Inl a \<longrightarrow> P x a s) \<and>
(\<forall>b. x = Inr b \<longrightarrow> Q x b s) \<rbrace>
case x of Inl a \<Rightarrow> f x a
| Inr b \<Rightarrow> g x b
\<lbrace>R x\<rbrace>"
apply(simp add:valid_def split_def)
apply(case_tac x, simp_all)
done
lemma hoare_vcg_split_case_sumE:
assumes left_case: "\<And>x a. x = Inl a \<Longrightarrow> \<lbrace>P x a\<rbrace> f x a \<lbrace>R x\<rbrace>"
assumes right_case: "\<And>x b. x = Inr b \<Longrightarrow> \<lbrace>Q x b\<rbrace> g x b \<lbrace>R x\<rbrace>"
shows "\<lbrace>\<lambda>s. (\<forall>a. x = Inl a \<longrightarrow> P x a s) \<and>
(\<forall>b. x = Inr b \<longrightarrow> Q x b s) \<rbrace>
case x of Inl a \<Rightarrow> f x a
| Inr b \<Rightarrow> g x b
\<lbrace>R x\<rbrace>"
apply(case_tac x, simp_all)
apply(rule left_case, simp)
apply(rule right_case, simp)
done
lemma hoare_vcg_precond_imp:
"\<lbrakk> \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>; \<And>s. P s \<Longrightarrow> Q s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>"
by (fastforce simp add:valid_def)
lemma hoare_vcg_precond_impE:
"\<lbrakk> \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>,\<lbrace>E\<rbrace>; \<And>s. P s \<Longrightarrow> Q s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>,\<lbrace>E\<rbrace>"
by (fastforce simp add:validE_def2)
lemma hoare_seq_ext:
assumes g_valid: "\<And>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>"
assumes f_valid: "\<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>"
shows "\<lbrace>A\<rbrace> do x \<leftarrow> f; g x od \<lbrace>C\<rbrace>"
apply(insert f_valid g_valid)
apply(blast intro: seq_ext')
done
lemma hoare_vcg_seqE:
assumes g_valid: "\<And>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>"
assumes f_valid: "\<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>,\<lbrace>E\<rbrace>"
shows "\<lbrace>A\<rbrace> doE x \<leftarrow> f; g x odE \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>"
apply(insert f_valid g_valid)
apply(blast intro: seqE')
done
lemma hoare_seq_ext_nobind:
"\<lbrakk> \<lbrace>B\<rbrace> g \<lbrace>C\<rbrace>;
\<lbrace>A\<rbrace> f \<lbrace>\<lambda>r s. B s\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> do f; g od \<lbrace>C\<rbrace>"
apply (clarsimp simp: valid_def bind_def Let_def split_def)
apply fastforce
done
lemma hoare_seq_ext_nobindE:
"\<lbrakk> \<lbrace>B\<rbrace> g \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>;
\<lbrace>A\<rbrace> f \<lbrace>\<lambda>r s. B s\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> doE f; g odE \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>"
apply (clarsimp simp:validE_def)
apply (simp add:bindE_def Let_def split_def bind_def lift_def)
apply (fastforce simp add: valid_def throwError_def return_def lift_def
split: sum.splits)
done
lemma hoare_chain:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>;
\<And>s. R s \<Longrightarrow> P s;
\<And>r s. Q r s \<Longrightarrow> S r s \<rbrakk> \<Longrightarrow>
\<lbrace>R\<rbrace> f \<lbrace>S\<rbrace>"
by(fastforce simp add:valid_def split_def)
lemma validE_weaken:
"\<lbrakk> \<lbrace>P'\<rbrace> A \<lbrace>Q'\<rbrace>,\<lbrace>E'\<rbrace>; \<And>s. P s \<Longrightarrow> P' s; \<And>r s. Q' r s \<Longrightarrow> Q r s; \<And>r s. E' r s \<Longrightarrow> E r s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> A \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (fastforce simp: validE_def2 split: sum.splits)
lemmas hoare_chainE = validE_weaken
lemma hoare_vcg_handle_elseE:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>;
\<And>e. \<lbrace>E e\<rbrace> g e \<lbrace>R\<rbrace>,\<lbrace>F\<rbrace>;
\<And>x. \<lbrace>Q x\<rbrace> h x \<lbrace>R\<rbrace>,\<lbrace>F\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> f <handle> g <else> h \<lbrace>R\<rbrace>,\<lbrace>F\<rbrace>"
apply (simp add: handle_elseE_def validE_def)
apply (rule seq_ext)
apply assumption
apply (case_tac x, simp_all)
done
lemma alternative_valid:
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
assumes y: "\<lbrace>P\<rbrace> f' \<lbrace>Q\<rbrace>"
shows "\<lbrace>P\<rbrace> f OR f' \<lbrace>Q\<rbrace>"
apply (simp add: valid_def alternative_def)
apply safe
apply (simp add: post_by_hoare [OF x])
apply (simp add: post_by_hoare [OF y])
done
lemma alternative_wp:
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
assumes y: "\<lbrace>P'\<rbrace> f' \<lbrace>Q\<rbrace>"
shows "\<lbrace>P and P'\<rbrace> f OR f' \<lbrace>Q\<rbrace>"
apply (rule alternative_valid)
apply (rule hoare_pre_imp [OF _ x], simp)
apply (rule hoare_pre_imp [OF _ y], simp)
done
lemma alternativeE_wp:
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>" and y: "\<lbrace>P'\<rbrace> f' \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
shows "\<lbrace>P and P'\<rbrace> f OR f' \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
apply (unfold validE_def)
apply (wp add: x y alternative_wp | simp | fold validE_def)+
done
lemma alternativeE_R_wp:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-; \<lbrace>P'\<rbrace> f' \<lbrace>Q\<rbrace>,- \<rbrakk> \<Longrightarrow> \<lbrace>P and P'\<rbrace> f OR f' \<lbrace>Q\<rbrace>,-"
apply (simp add: validE_R_def)
apply (rule alternativeE_wp)
apply assumption+
done
lemma alternative_R_wp:
"\<lbrakk> \<lbrace>P\<rbrace> f -,\<lbrace>Q\<rbrace>; \<lbrace>P'\<rbrace> g -,\<lbrace>Q\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P and P'\<rbrace> f \<sqinter> g -, \<lbrace>Q\<rbrace>"
by (fastforce simp: alternative_def validE_E_def validE_def valid_def)
lemma select_wp: "\<lbrace>\<lambda>s. \<forall>x \<in> S. Q x s\<rbrace> select S \<lbrace>Q\<rbrace>"
by (simp add: select_def valid_def)
lemma select_f_wp:
"\<lbrace>\<lambda>s. \<forall>x\<in>fst S. Q x s\<rbrace> select_f S \<lbrace>Q\<rbrace>"
by (simp add: select_f_def valid_def)
lemma state_select_wp [wp]: "\<lbrace> \<lambda>s. \<forall>t. (s, t) \<in> f \<longrightarrow> P () t \<rbrace> state_select f \<lbrace> P \<rbrace>"
apply (clarsimp simp: state_select_def)
apply (clarsimp simp: valid_def)
done
lemma condition_wp [wp]:
"\<lbrakk> \<lbrace> Q \<rbrace> A \<lbrace> P \<rbrace>; \<lbrace> R \<rbrace> B \<lbrace> P \<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace> \<lambda>s. if C s then Q s else R s \<rbrace> condition C A B \<lbrace> P \<rbrace>"
apply (clarsimp simp: condition_def)
apply (clarsimp simp: valid_def pred_conj_def pred_neg_def split_def)
done
lemma conditionE_wp [wp]:
"\<lbrakk> \<lbrace> P \<rbrace> A \<lbrace> Q \<rbrace>,\<lbrace> R \<rbrace>; \<lbrace> P' \<rbrace> B \<lbrace> Q \<rbrace>,\<lbrace> R \<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace> \<lambda>s. if C s then P s else P' s \<rbrace> condition C A B \<lbrace>Q\<rbrace>,\<lbrace>R\<rbrace>"
apply (clarsimp simp: condition_def)
apply (clarsimp simp: validE_def valid_def)
done
lemma state_assert_wp [wp]: "\<lbrace> \<lambda>s. f s \<longrightarrow> P () s \<rbrace> state_assert f \<lbrace> P \<rbrace>"
apply (clarsimp simp: state_assert_def get_def
assert_def bind_def valid_def return_def fail_def)
done
text {* The weakest precondition handler which works on conjunction *}
lemma hoare_vcg_conj_lift:
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
assumes y: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>"
shows "\<lbrace>\<lambda>s. P s \<and> P' s\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>"
apply (subst bipred_conj_def[symmetric], rule hoare_post_conj)
apply (rule hoare_pre_imp [OF _ x], simp)
apply (rule hoare_pre_imp [OF _ y], simp)
done
lemma hoare_vcg_conj_liftE1:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>P and P'\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> Q' r s\<rbrace>,\<lbrace>E\<rbrace>"
unfolding valid_def validE_R_def validE_def
apply (clarsimp simp: split_def split: sum.splits)
apply (erule allE, erule (1) impE)
apply (erule allE, erule (1) impE)
apply (drule (1) bspec)
apply (drule (1) bspec)
apply clarsimp
done
lemma hoare_vcg_disj_lift:
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
assumes y: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>"
shows "\<lbrace>\<lambda>s. P s \<or> P' s\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<or> Q' rv s\<rbrace>"
apply (simp add: valid_def)
apply safe
apply (erule(1) post_by_hoare [OF x])
apply (erule notE)
apply (erule(1) post_by_hoare [OF y])
done
lemma hoare_vcg_const_Ball_lift:
"\<lbrakk> \<And>x. x \<in> S \<Longrightarrow> \<lbrace>P x\<rbrace> f \<lbrace>Q x\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. \<forall>x\<in>S. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x\<in>S. Q x rv s\<rbrace>"
by (fastforce simp: valid_def)
lemma hoare_vcg_const_Ball_lift_R:
"\<lbrakk> \<And>x. x \<in> S \<Longrightarrow> \<lbrace>P x\<rbrace> f \<lbrace>Q x\<rbrace>,- \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. \<forall>x \<in> S. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x \<in> S. Q x rv s\<rbrace>,-"
apply (simp add: validE_R_def validE_def)
apply (rule hoare_strengthen_post)
apply (erule hoare_vcg_const_Ball_lift)
apply (simp split: sum.splits)
done
lemma hoare_vcg_all_lift:
"\<lbrakk> \<And>x. \<lbrace>P x\<rbrace> f \<lbrace>Q x\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. \<forall>x. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x. Q x rv s\<rbrace>"
by (fastforce simp: valid_def)
lemma hoare_vcg_all_lift_R:
"(\<And>x. \<lbrace>P x\<rbrace> f \<lbrace>Q x\<rbrace>, -) \<Longrightarrow> \<lbrace>\<lambda>s. \<forall>x. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x. Q x rv s\<rbrace>, -"
by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified])
lemma hoare_vcg_imp_lift:
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>\<lambda>rv s. \<not> P rv s\<rbrace>; \<lbrace>Q'\<rbrace> f \<lbrace>Q\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P' s \<or> Q' s\<rbrace> f \<lbrace>\<lambda>rv s. P rv s \<longrightarrow> Q rv s\<rbrace>"
apply (simp only: imp_conv_disj)
apply (erule(1) hoare_vcg_disj_lift)
done
lemma hoare_vcg_imp_lift':
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>\<lambda>rv s. \<not> P rv s\<rbrace>; \<lbrace>Q'\<rbrace> f \<lbrace>Q\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. \<not> P' s \<longrightarrow> Q' s\<rbrace> f \<lbrace>\<lambda>rv s. P rv s \<longrightarrow> Q rv s\<rbrace>"
apply (simp only: imp_conv_disj)
apply simp
apply (erule (1) hoare_vcg_imp_lift)
done
lemma hoare_absorb_imp:
"\<lbrace> P \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> R rv s \<rbrace> \<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<longrightarrow> R rv s \<rbrace>"
by (erule hoare_post_imp[rotated], blast)
lemma hoare_weaken_imp:
"\<lbrakk> \<And>rv s. Q rv s \<Longrightarrow> Q' rv s ; \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q' rv s \<longrightarrow> R rv s\<rbrace> \<rbrakk>
\<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<longrightarrow> R rv s\<rbrace>"
by (clarsimp simp: NonDetMonad.valid_def split_def)
lemma hoare_vcg_const_imp_lift:
"\<lbrakk> P \<Longrightarrow> \<lbrace>Q\<rbrace> m \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. P \<longrightarrow> Q s\<rbrace> m \<lbrace>\<lambda>rv s. P \<longrightarrow> R rv s\<rbrace>"
by (cases P, simp_all add: hoare_vcg_prop)
lemma hoare_vcg_const_imp_lift_R:
"(P \<Longrightarrow> \<lbrace>Q\<rbrace> m \<lbrace>R\<rbrace>,-) \<Longrightarrow> \<lbrace>\<lambda>s. P \<longrightarrow> Q s\<rbrace> m \<lbrace>\<lambda>rv s. P \<longrightarrow> R rv s\<rbrace>,-"
by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits)
lemma hoare_weak_lift_imp:
"\<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>\<lambda>s. P \<longrightarrow> P' s\<rbrace> f \<lbrace>\<lambda>rv s. P \<longrightarrow> Q rv s\<rbrace>"
by (auto simp add: valid_def split_def)
lemma hoare_vcg_weaken_imp:
"\<lbrakk> \<And>rv s. Q rv s \<Longrightarrow> Q' rv s ; \<lbrace> P \<rbrace> f \<lbrace>\<lambda>rv s. Q' rv s \<longrightarrow> R rv s\<rbrace> \<rbrakk>
\<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<longrightarrow> R rv s\<rbrace>"
by (clarsimp simp: valid_def split_def)
lemma hoare_vcg_ex_lift:
"\<lbrakk> \<And>x. \<lbrace>P x\<rbrace> f \<lbrace>Q x\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. \<exists>x. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<exists>x. Q x rv s\<rbrace>"
by (clarsimp simp: valid_def, blast)
lemma hoare_vcg_ex_lift_R1:
"(\<And>x. \<lbrace>P x\<rbrace> f \<lbrace>Q\<rbrace>, -) \<Longrightarrow> \<lbrace>\<lambda>s. \<exists>x. P x s\<rbrace> f \<lbrace>Q\<rbrace>, -"
by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits)
(* for instantiations *)
lemma hoare_triv: "\<lbrace>P\<rbrace>f\<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace>f\<lbrace>Q\<rbrace>" .
lemma hoare_trivE: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>" .
lemma hoare_trivE_R: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-" .
lemma hoare_trivR_R: "\<lbrace>P\<rbrace> f -,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f -,\<lbrace>E\<rbrace>" .
lemma hoare_weaken_preE_E:
"\<lbrakk> \<lbrace>P'\<rbrace> f -,\<lbrace>Q\<rbrace>; \<And>s. P s \<Longrightarrow> P' s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f -,\<lbrace>Q\<rbrace>"
by (fastforce simp add: validE_E_def validE_def valid_def)
lemma hoare_vcg_E_conj:
"\<lbrakk> \<lbrace>P\<rbrace> f -,\<lbrace>E\<rbrace>; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>,\<lbrace>E'\<rbrace> \<rbrakk>
\<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s\<rbrace> f \<lbrace>Q'\<rbrace>, \<lbrace>\<lambda>rv s. E rv s \<and> E' rv s\<rbrace>"
apply (unfold validE_def validE_E_def)
apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all)
apply (case_tac r, simp_all)
done
lemma hoare_vcg_E_elim:
"\<lbrakk> \<lbrace>P\<rbrace> f -,\<lbrace>E\<rbrace>; \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>,- \<rbrakk>
\<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (rule hoare_post_impErr [OF hoare_vcg_E_conj],
(simp add: validE_R_def)+)
lemma hoare_vcg_R_conj:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>,- \<rbrakk>
\<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>,-"
apply (unfold validE_R_def validE_def)
apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all)
apply (case_tac r, simp_all)
done
lemma valid_validE:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace>,\<lbrace>\<lambda>rv. Q\<rbrace>"
apply (simp add: validE_def)
done
lemma valid_validE2:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. Q'\<rbrace>; \<And>s. Q' s \<Longrightarrow> Q s; \<And>s. Q' s \<Longrightarrow> E s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. Q\<rbrace>,\<lbrace>\<lambda>_. E\<rbrace>"
unfolding valid_def validE_def
by (clarsimp split: sum.splits) blast
lemma validE_valid: "\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace>,\<lbrace>\<lambda>rv. Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace>"
apply (unfold validE_def)
apply (rule hoare_post_imp)
defer
apply assumption
apply (case_tac r, simp_all)
done
lemma valid_validE_R:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace>,-"
by (simp add: validE_R_def hoare_post_impErr [OF valid_validE])
lemma valid_validE_E:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f -,\<lbrace>\<lambda>rv. Q\<rbrace>"
by (simp add: validE_E_def hoare_post_impErr [OF valid_validE])
lemma validE_validE_R: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>\<top>\<top>\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
by (simp add: validE_R_def)
lemma validE_R_validE: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>\<top>\<top>\<rbrace>"
by (simp add: validE_R_def)
lemma hoare_post_imp_R: "\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>,-; \<And>r s. Q' r s \<Longrightarrow> Q r s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
apply (unfold validE_R_def)
apply (rule hoare_post_impErr, simp+)
done
lemma hoare_post_comb_imp_conj:
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>; \<And>s. P s \<Longrightarrow> P' s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>"
apply (rule hoare_pre_imp)
defer
apply (rule hoare_vcg_conj_lift)
apply assumption+
apply simp
done
lemma hoare_vcg_precond_impE_R: "\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>,-; \<And>s. P s \<Longrightarrow> P' s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
by (unfold validE_R_def, rule hoare_vcg_precond_impE, simp+)
lemma valid_is_triple:
"valid P f Q = triple_judgement P f (postcondition Q (\<lambda>s f. fst (f s)))"
by (simp add: triple_judgement_def valid_def postcondition_def)
lemma validE_is_triple:
"validE P f Q E = triple_judgement P f
(postconditions (postcondition Q (\<lambda>s f. {(rv, s'). (Inr rv, s') \<in> fst (f s)}))
(postcondition E (\<lambda>s f. {(rv, s'). (Inl rv, s') \<in> fst (f s)})))"
apply (simp add: validE_def triple_judgement_def valid_def postcondition_def
postconditions_def split_def split: sum.split)
apply fastforce
done
lemma validE_R_is_triple:
"validE_R P f Q = triple_judgement P f
(postcondition Q (\<lambda>s f. {(rv, s'). (Inr rv, s') \<in> fst (f s)}))"
by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def)
lemma validE_E_is_triple:
"validE_E P f E = triple_judgement P f
(postcondition E (\<lambda>s f. {(rv, s'). (Inl rv, s') \<in> fst (f s)}))"
by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def)
lemmas hoare_wp_combs =
hoare_post_comb_imp_conj hoare_vcg_precond_imp hoare_vcg_conj_lift
lemmas hoare_wp_combsE =
hoare_vcg_precond_impE
hoare_vcg_precond_impE_R
validE_validE_R
hoare_vcg_R_conj
hoare_vcg_E_elim
hoare_vcg_E_conj
lemmas hoare_wp_state_combsE =
hoare_vcg_precond_impE[OF valid_validE]
hoare_vcg_precond_impE_R[OF valid_validE_R]
valid_validE_R
hoare_vcg_R_conj[OF valid_validE_R]
hoare_vcg_E_elim[OF valid_validE_E]
hoare_vcg_E_conj[OF valid_validE_E]
lemmas hoare_wp_splits [wp_split] =
hoare_seq_ext hoare_vcg_seqE handleE'_wp handleE_wp
validE_validE_R [OF hoare_vcg_seqE [OF validE_R_validE]]
validE_validE_R [OF handleE'_wp [OF validE_R_validE]]
validE_validE_R [OF handleE_wp [OF validE_R_validE]]
catch_wp hoare_vcg_if_split hoare_vcg_if_splitE
validE_validE_R [OF hoare_vcg_if_splitE [OF validE_R_validE validE_R_validE]]
liftM_wp liftME_wp
validE_validE_R [OF liftME_wp [OF validE_R_validE]]
validE_valid
lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs
lemmas [wp] = hoare_vcg_prop
wp_post_taut
return_wp
put_wp
get_wp
gets_wp
modify_wp
returnOk_wp
throwError_wp
fail_wp
failE_wp
liftE_wp
select_f_wp
lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple
text {* Simplifications on conjunction *}
lemma hoare_post_eq: "\<lbrakk> Q = Q'; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
by simp
lemma hoare_post_eqE1: "\<lbrakk> Q = Q'; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by simp
lemma hoare_post_eqE2: "\<lbrakk> E = E'; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E'\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by simp
lemma hoare_post_eqE_R: "\<lbrakk> Q = Q'; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>,- \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
by simp
lemma pred_conj_apply_elim: "(\<lambda>r. Q r and Q' r) = (\<lambda>r s. Q r s \<and> Q' r s)"
by (simp add: pred_conj_def)
lemma pred_conj_conj_elim: "(\<lambda>r s. (Q r and Q' r) s \<and> Q'' r s) = (\<lambda>r s. Q r s \<and> Q' r s \<and> Q'' r s)"
by simp
lemma conj_assoc_apply: "(\<lambda>r s. (Q r s \<and> Q' r s) \<and> Q'' r s) = (\<lambda>r s. Q r s \<and> Q' r s \<and> Q'' r s)"
by simp
lemma all_elim: "(\<lambda>rv s. \<forall>x. P rv s) = P"
by simp
lemma all_conj_elim: "(\<lambda>rv s. (\<forall>x. P rv s) \<and> Q rv s) = (\<lambda>rv s. P rv s \<and> Q rv s)"
by simp
lemmas vcg_rhs_simps = pred_conj_apply_elim pred_conj_conj_elim
conj_assoc_apply all_elim all_conj_elim
lemma if_apply_reduct: "\<lbrace>P\<rbrace> If P' (f x) (g x) \<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> If P' f g x \<lbrace>Q\<rbrace>"
by (cases P', simp_all)
lemma if_apply_reductE: "\<lbrace>P\<rbrace> If P' (f x) (g x) \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> If P' f g x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (cases P', simp_all)
lemma if_apply_reductE_R: "\<lbrace>P\<rbrace> If P' (f x) (g x) \<lbrace>Q\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> If P' f g x \<lbrace>Q\<rbrace>,-"
by (cases P', simp_all)
lemmas hoare_wp_simps [wp_split] =
vcg_rhs_simps [THEN hoare_post_eq] vcg_rhs_simps [THEN hoare_post_eqE1]
vcg_rhs_simps [THEN hoare_post_eqE2] vcg_rhs_simps [THEN hoare_post_eqE_R]
if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI
schematic_goal if_apply_test: "\<lbrace>?Q\<rbrace> (if A then returnOk else K fail) x \<lbrace>P\<rbrace>,\<lbrace>E\<rbrace>"
by wpsimp
lemma hoare_elim_pred_conj:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> Q' r s\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r. Q r and Q' r\<rbrace>"
by (unfold pred_conj_def)
lemma hoare_elim_pred_conjE1:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> Q' r s\<rbrace>,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r. Q r and Q' r\<rbrace>,\<lbrace>E\<rbrace>"
by (unfold pred_conj_def)
lemma hoare_elim_pred_conjE2:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, \<lbrace>\<lambda>x s. E x s \<and> E' x s\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>\<lambda>x. E x and E' x\<rbrace>"
by (unfold pred_conj_def)
lemma hoare_elim_pred_conjE_R:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> Q' r s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r. Q r and Q' r\<rbrace>,-"
by (unfold pred_conj_def)
lemmas hoare_wp_pred_conj_elims =
hoare_elim_pred_conj hoare_elim_pred_conjE1
hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R
lemmas hoare_weaken_preE = hoare_vcg_precond_impE
lemmas hoare_pre [wp_pre] =
hoare_weaken_pre
hoare_weaken_preE
hoare_vcg_precond_impE_R
hoare_weaken_preE_E
declare no_fail_pre [wp_pre]
bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del]
text {* Miscellaneous lemmas on hoare triples *}
lemma hoare_vcg_mp:
assumes a: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
assumes b: "\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<longrightarrow> Q' r s\<rbrace>"
shows "\<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>"
using assms
by (auto simp: valid_def split_def)
(* note about this precond stuff: rules get a chance to bind directly
before any of their combined forms. As a result, these precondition
implication rules are only used when needed. *)
lemma hoare_add_post:
assumes r: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>"
assumes impP: "\<And>s. P s \<Longrightarrow> P' s"
assumes impQ: "\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q' rv s \<longrightarrow> Q rv s\<rbrace>"
shows "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
apply (rule hoare_chain)
apply (rule hoare_vcg_conj_lift)
apply (rule r)
apply (rule impQ)
apply simp
apply (erule impP)
apply simp
done
lemma hoare_whenE_wp:
"(P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>, \<lbrace>E\<rbrace>) \<Longrightarrow> \<lbrace>if P then Q else R ()\<rbrace> whenE P f \<lbrace>R\<rbrace>, \<lbrace>E\<rbrace>"
unfolding whenE_def by clarsimp wp
lemma hoare_gen_asmE:
"(P \<Longrightarrow> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>,-) \<Longrightarrow> \<lbrace>P' and K P\<rbrace> f \<lbrace>Q\<rbrace>, -"
by (simp add: validE_R_def validE_def valid_def) blast
lemma hoare_list_case:
assumes P1: "\<lbrace>P1\<rbrace> f f1 \<lbrace>Q\<rbrace>"
assumes P2: "\<And>y ys. xs = y#ys \<Longrightarrow> \<lbrace>P2 y ys\<rbrace> f (f2 y ys) \<lbrace>Q\<rbrace>"
shows "\<lbrace>case xs of [] \<Rightarrow> P1 | y#ys \<Rightarrow> P2 y ys\<rbrace>
f (case xs of [] \<Rightarrow> f1 | y#ys \<Rightarrow> f2 y ys)
\<lbrace>Q\<rbrace>"
apply (cases xs; simp)
apply (rule P1)
apply (rule P2)
apply simp
done
lemma hoare_unless_wp:
"(\<not>P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>) \<Longrightarrow> \<lbrace>if P then R () else Q\<rbrace> unless P f \<lbrace>R\<rbrace>"
unfolding unless_def by wp auto
lemma hoare_use_eq:
assumes x: "\<And>P. \<lbrace>\<lambda>s. P (f s)\<rbrace> m \<lbrace>\<lambda>rv s. P (f s)\<rbrace>"
assumes y: "\<And>f. \<lbrace>\<lambda>s. P f s\<rbrace> m \<lbrace>\<lambda>rv s. Q f s\<rbrace>"
shows "\<lbrace>\<lambda>s. P (f s) s\<rbrace> m \<lbrace>\<lambda>rv s. Q (f s :: 'c :: type) s \<rbrace>"
apply (rule_tac Q="\<lambda>rv s. \<exists>f'. f' = f s \<and> Q f' s" in hoare_post_imp)
apply simp
apply (wpsimp wp: hoare_vcg_ex_lift x y)
done
lemma hoare_return_sp:
"\<lbrace>P\<rbrace> return x \<lbrace>\<lambda>r. P and K (r = x)\<rbrace>"
by (simp add: valid_def return_def)
lemma hoare_fail_any [simp]:
"\<lbrace>P\<rbrace> fail \<lbrace>Q\<rbrace>" by wp
lemma hoare_failE [simp]: "\<lbrace>P\<rbrace> fail \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>" by wp
lemma hoare_FalseE [simp]:
"\<lbrace>\<lambda>s. False\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
by (simp add: valid_def validE_def)
lemma hoare_K_bind [wp]:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> K_bind f x \<lbrace>Q\<rbrace>"
by simp
text {* Setting up the precondition case splitter. *}
lemma wpc_helper_valid:
"\<lbrace>Q\<rbrace> g \<lbrace>S\<rbrace> \<Longrightarrow> wpc_helper (P, P') (Q, Q') \<lbrace>P\<rbrace> g \<lbrace>S\<rbrace>"
by (clarsimp simp: wpc_helper_def elim!: hoare_pre)
lemma wpc_helper_validE:
"\<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>,\<lbrace>E\<rbrace> \<Longrightarrow> wpc_helper (P, P') (Q, Q') \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>,\<lbrace>E\<rbrace>"
by (clarsimp simp: wpc_helper_def elim!: hoare_pre)
lemma wpc_helper_validE_R:
"\<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>,- \<Longrightarrow> wpc_helper (P, P') (Q, Q') \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>,-"
by (clarsimp simp: wpc_helper_def elim!: hoare_pre)
lemma wpc_helper_validR_R:
"\<lbrace>Q\<rbrace> f -,\<lbrace>E\<rbrace> \<Longrightarrow> wpc_helper (P, P') (Q, Q') \<lbrace>P\<rbrace> f -,\<lbrace>E\<rbrace>"
by (clarsimp simp: wpc_helper_def elim!: hoare_pre)
lemma wpc_helper_no_fail_final:
"no_fail Q f \<Longrightarrow> wpc_helper (P, P') (Q, Q') (no_fail P f)"
by (clarsimp simp: wpc_helper_def elim!: no_fail_pre)
lemma wpc_helper_empty_fail_final:
"empty_fail f \<Longrightarrow> wpc_helper (P, P') (Q, Q') (empty_fail f)"
by (clarsimp simp: wpc_helper_def)
lemma wpc_helper_validNF:
"\<lbrace>Q\<rbrace> g \<lbrace>S\<rbrace>! \<Longrightarrow> wpc_helper (P, P') (Q, Q') \<lbrace>P\<rbrace> g \<lbrace>S\<rbrace>!"
apply (clarsimp simp: wpc_helper_def)
by (metis hoare_wp_combs(2) no_fail_pre validNF_def)
wpc_setup "\<lambda>m. \<lbrace>P\<rbrace> m \<lbrace>Q\<rbrace>" wpc_helper_valid
wpc_setup "\<lambda>m. \<lbrace>P\<rbrace> m \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>" wpc_helper_validE
wpc_setup "\<lambda>m. \<lbrace>P\<rbrace> m \<lbrace>Q\<rbrace>,-" wpc_helper_validE_R
wpc_setup "\<lambda>m. \<lbrace>P\<rbrace> m -,\<lbrace>E\<rbrace>" wpc_helper_validR_R
wpc_setup "\<lambda>m. no_fail P m" wpc_helper_no_fail_final
wpc_setup "\<lambda>m. empty_fail m" wpc_helper_empty_fail_final
wpc_setup "\<lambda>m. \<lbrace>P\<rbrace> m \<lbrace>Q\<rbrace>!" wpc_helper_validNF
lemma in_liftM:
"((r, s') \<in> fst (liftM t f s)) = (\<exists>r'. (r', s') \<in> fst (f s) \<and> r = t r')"
apply (simp add: liftM_def return_def bind_def)
apply (simp add: Bex_def)
done
(* FIXME: eliminate *)
lemmas handy_liftM_lemma = in_liftM
lemma hoare_fun_app_wp[wp]:
"\<lbrace>P\<rbrace> f' x \<lbrace>Q'\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f' $ x \<lbrace>Q'\<rbrace>"
"\<lbrace>P\<rbrace> f x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f $ x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
"\<lbrace>P\<rbrace> f x \<lbrace>Q\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f $ x \<lbrace>Q\<rbrace>,-"
"\<lbrace>P\<rbrace> f x -,\<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f $ x -,\<lbrace>E\<rbrace>"
by simp+
lemma hoare_validE_pred_conj:
"\<lbrakk> \<lbrace>P\<rbrace>f\<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>; \<lbrace>P\<rbrace>f\<lbrace>R\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace>f\<lbrace>Q And R\<rbrace>,\<lbrace>E\<rbrace>"
unfolding valid_def validE_def by (simp add: split_def split: sum.splits)
lemma hoare_validE_conj:
"\<lbrakk> \<lbrace>P\<rbrace>f\<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>; \<lbrace>P\<rbrace>f\<lbrace>R\<rbrace>,\<lbrace>E\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> R r s\<rbrace>,\<lbrace>E\<rbrace>"
unfolding valid_def validE_def by (simp add: split_def split: sum.splits)
lemma hoare_valid_validE:
"\<lbrace>P\<rbrace>f\<lbrace>\<lambda>r. Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace>f\<lbrace>\<lambda>r. Q\<rbrace>,\<lbrace>\<lambda>r. Q\<rbrace>"
unfolding valid_def validE_def by (simp add: split_def split: sum.splits)
lemma liftE_validE_E [wp]:
"\<lbrace>\<top>\<rbrace> liftE f -, \<lbrace>Q\<rbrace>"
by (clarsimp simp: validE_E_def valid_def)
lemma validE_validE_E [wp_comb]:
"\<lbrace>P\<rbrace> f \<lbrace>\<top>\<top>\<rbrace>, \<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f -, \<lbrace>E\<rbrace>"
by (simp add: validE_E_def)
lemma validE_E_validE:
"\<lbrace>P\<rbrace> f -, \<lbrace>E\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<top>\<top>\<rbrace>, \<lbrace>E\<rbrace>"
by (simp add: validE_E_def)
(*
* if_validE_E:
*
* \<lbrakk>?P1 \<Longrightarrow> \<lbrace>?Q1\<rbrace> ?f1 -, \<lbrace>?E\<rbrace>; \<not> ?P1 \<Longrightarrow> \<lbrace>?R1\<rbrace> ?g1 -, \<lbrace>?E\<rbrace>\<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. (?P1 \<longrightarrow> ?Q1 s) \<and> (\<not> ?P1 \<longrightarrow> ?R1 s)\<rbrace> if ?P1 then ?f1 else ?g1 -, \<lbrace>?E\<rbrace>
*)
lemmas if_validE_E [wp_split] =
validE_validE_E [OF hoare_vcg_if_splitE [OF validE_E_validE validE_E_validE]]
lemma returnOk_E [wp]:
"\<lbrace>\<top>\<rbrace> returnOk r -, \<lbrace>Q\<rbrace>"
by (simp add: validE_E_def) wp
lemma hoare_drop_imp:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. R r s \<longrightarrow> Q r s\<rbrace>"
by (auto simp: valid_def)
lemma hoare_drop_impE:
"\<lbrakk>\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r. Q\<rbrace>, \<lbrace>E\<rbrace>\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. R r s \<longrightarrow> Q s\<rbrace>, \<lbrace>E\<rbrace>"
by (simp add: validE_weaken)
lemma hoare_drop_impE_R:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. R r s \<longrightarrow> Q r s\<rbrace>, -"
by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits)
lemma hoare_drop_impE_E:
"\<lbrace>P\<rbrace> f -,\<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> f -,\<lbrace>\<lambda>r s. R r s \<longrightarrow> Q r s\<rbrace>"
by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits)
lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E
lemma bind_det_exec:
"fst (a s) = {(r,s')} \<Longrightarrow> fst ((a >>= b) s) = fst (b r s')"
by (simp add: bind_def)
lemma in_bind_det_exec:
"fst (a s) = {(r,s')} \<Longrightarrow> (s'' \<in> fst ((a >>= b) s)) = (s'' \<in> fst (b r s'))"
by (simp add: bind_def)
lemma exec_put:
"(put s' >>= m) s = m () s'"
by (simp add: bind_def put_def)
lemma bind_execI:
"\<lbrakk> (r'',s'') \<in> fst (f s); \<exists>x \<in> fst (g r'' s''). P x \<rbrakk> \<Longrightarrow>
\<exists>x \<in> fst ((f >>= g) s). P x"
by (force simp: in_bind split_def bind_def)
lemma True_E_E [wp]: "\<lbrace>\<top>\<rbrace> f -,\<lbrace>\<top>\<top>\<rbrace>"
by (auto simp: validE_E_def validE_def valid_def split: sum.splits)
(*
* \<lbrakk>\<And>x. \<lbrace>?B1 x\<rbrace> ?g1 x -, \<lbrace>?E\<rbrace>; \<lbrace>?P\<rbrace> ?f1 \<lbrace>?B1\<rbrace>, \<lbrace>?E\<rbrace>\<rbrakk> \<Longrightarrow> \<lbrace>?P\<rbrace> ?f1 >>=E ?g1 -, \<lbrace>?E\<rbrace>
*)
lemmas [wp_split] =
validE_validE_E [OF hoare_vcg_seqE [OF validE_E_validE]]
lemma case_option_wp:
assumes x: "\<And>x. \<lbrace>P x\<rbrace> m x \<lbrace>Q\<rbrace>"
assumes y: "\<lbrace>P'\<rbrace> m' \<lbrace>Q\<rbrace>"
shows "\<lbrace>\<lambda>s. (x = None \<longrightarrow> P' s) \<and> (x \<noteq> None \<longrightarrow> P (the x) s)\<rbrace>
case_option m' m x \<lbrace>Q\<rbrace>"
apply (cases x; simp)
apply (rule y)
apply (rule x)
done
lemma case_option_wpE:
assumes x: "\<And>x. \<lbrace>P x\<rbrace> m x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
assumes y: "\<lbrace>P'\<rbrace> m' \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
shows "\<lbrace>\<lambda>s. (x = None \<longrightarrow> P' s) \<and> (x \<noteq> None \<longrightarrow> P (the x) s)\<rbrace>
case_option m' m x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
apply (cases x; simp)
apply (rule y)
apply (rule x)
done
lemma in_bindE:
"(rv, s') \<in> fst ((f >>=E (\<lambda>rv'. g rv')) s) =
((\<exists>ex. rv = Inl ex \<and> (Inl ex, s') \<in> fst (f s)) \<or>
(\<exists>rv' s''. (rv, s') \<in> fst (g rv' s'') \<and> (Inr rv', s'') \<in> fst (f s)))"
apply (rule iffI)
apply (clarsimp simp: bindE_def bind_def)
apply (case_tac a)
apply (clarsimp simp: lift_def throwError_def return_def)
apply (clarsimp simp: lift_def)
apply safe
apply (clarsimp simp: bindE_def bind_def)
apply (erule rev_bexI)
apply (simp add: lift_def throwError_def return_def)
apply (clarsimp simp: bindE_def bind_def)
apply (erule rev_bexI)
apply (simp add: lift_def)
done
(*
* \<lbrace>?P\<rbrace> ?m1 -, \<lbrace>?E\<rbrace> \<Longrightarrow> \<lbrace>?P\<rbrace> liftME ?f1 ?m1 -, \<lbrace>?E\<rbrace>
*)
lemmas [wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE]
lemma assert_A_True[simp]: "assert True = return ()"
by (simp add: assert_def)
lemma assert_wp [wp]: "\<lbrace>\<lambda>s. P \<longrightarrow> Q () s\<rbrace> assert P \<lbrace>Q\<rbrace>"
by (cases P, (simp add: assert_def | wp)+)
lemma list_cases_wp:
assumes a: "\<lbrace>P_A\<rbrace> a \<lbrace>Q\<rbrace>"
assumes b: "\<And>x xs. ts = x#xs \<Longrightarrow> \<lbrace>P_B x xs\<rbrace> b x xs \<lbrace>Q\<rbrace>"
shows "\<lbrace>case_list P_A P_B ts\<rbrace> case ts of [] \<Rightarrow> a | x # xs \<Rightarrow> b x xs \<lbrace>Q\<rbrace>"
by (cases ts, auto simp: a b)
(* FIXME: make wp *)
lemma whenE_throwError_wp:
"\<lbrace>\<lambda>s. \<not>Q \<longrightarrow> P s\<rbrace> whenE Q (throwError e) \<lbrace>\<lambda>rv. P\<rbrace>, -"
unfolding whenE_def by wpsimp
lemma select_throwError_wp:
"\<lbrace>\<lambda>s. \<forall>x\<in>S. Q x s\<rbrace> select S >>= throwError -, \<lbrace>Q\<rbrace>"
by (simp add: bind_def throwError_def return_def select_def validE_E_def
validE_def valid_def)
section "validNF Rules"
subsection "Basic validNF theorems"
lemma validNF [intro?]:
"\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>; no_fail P f \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>!"
by (clarsimp simp: validNF_def)
lemma validNF_valid: "\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>"
by (clarsimp simp: validNF_def)
lemma validNF_no_fail: "\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>! \<rbrakk> \<Longrightarrow> no_fail P f"
by (clarsimp simp: validNF_def)
lemma snd_validNF:
"\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>!; P s \<rbrakk> \<Longrightarrow> \<not> snd (f s)"
by (clarsimp simp: validNF_def no_fail_def)
lemma use_validNF:
"\<lbrakk> (r', s') \<in> fst (f s); \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>!; P s \<rbrakk> \<Longrightarrow> Q r' s'"
by (fastforce simp: validNF_def valid_def)
subsection "validNF weakest pre-condition rules"
lemma validNF_return [wp]:
"\<lbrace> P x \<rbrace> return x \<lbrace> P \<rbrace>!"
by (wp validNF)+
lemma validNF_get [wp]:
"\<lbrace> \<lambda>s. P s s \<rbrace> get \<lbrace> P \<rbrace>!"
by (wp validNF)+
lemma validNF_put [wp]:
"\<lbrace> \<lambda>s. P () x \<rbrace> put x \<lbrace> P \<rbrace>!"
by (wp validNF)+
lemma validNF_K_bind [wp]:
"\<lbrace> P \<rbrace> x \<lbrace> Q \<rbrace>! \<Longrightarrow> \<lbrace> P \<rbrace> K_bind x f \<lbrace> Q \<rbrace>!"
by simp
lemma validNF_fail [wp]:
"\<lbrace> \<lambda>s. False \<rbrace> fail \<lbrace> Q \<rbrace>!"
by (clarsimp simp: validNF_def fail_def no_fail_def)
lemma validNF_prop [wp_unsafe]:
"\<lbrakk> no_fail (\<lambda>s. P) f \<rbrakk> \<Longrightarrow> \<lbrace> \<lambda>s. P \<rbrace> f \<lbrace> \<lambda>rv s. P \<rbrace>!"
by (wp validNF)+
lemma validNF_post_conj [intro!]:
"\<lbrakk> \<lbrace> P \<rbrace> a \<lbrace> Q \<rbrace>!; \<lbrace> P \<rbrace> a \<lbrace> R \<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> a \<lbrace> Q And R \<rbrace>!"
by (clarsimp simp: validNF_def)
lemma no_fail_or:
"\<lbrakk>no_fail P a; no_fail Q a\<rbrakk> \<Longrightarrow> no_fail (P or Q) a"
by (clarsimp simp: no_fail_def)
lemma validNF_pre_disj [intro!]:
"\<lbrakk> \<lbrace> P \<rbrace> a \<lbrace> R \<rbrace>!; \<lbrace> Q \<rbrace> a \<lbrace> R \<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace> P or Q \<rbrace> a \<lbrace> R \<rbrace>!"
by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or)
(*
* Set up combination rules for WP, which also requires
* a "wp_trip" rule for validNF.
*)
definition "validNF_property Q s b \<equiv> \<not> snd (b s) \<and> (\<forall>(r', s') \<in> fst (b s). Q r' s')"
lemma validNF_is_triple [wp_trip]:
"validNF P f Q = triple_judgement P f (validNF_property Q)"
apply (clarsimp simp: validNF_def triple_judgement_def validNF_property_def)
apply (auto simp: no_fail_def valid_def)
done
lemma validNF_weaken_pre [wp_comb]:
"\<lbrakk>\<lbrace>Q\<rbrace> a \<lbrace>R\<rbrace>!; \<And>s. P s \<Longrightarrow> Q s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>!"
by (metis hoare_pre_imp no_fail_pre validNF_def)
lemma validNF_post_comb_imp_conj:
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>!; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>!; \<And>s. P s \<Longrightarrow> P' s \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>!"
by (fastforce simp: validNF_def valid_def)
lemma validNF_post_comb_conj_L:
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>!; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>!"
apply (clarsimp simp: validNF_def valid_def no_fail_def)
apply force
done
lemma validNF_post_comb_conj_R:
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>!"
apply (clarsimp simp: validNF_def valid_def no_fail_def)
apply force
done
lemma validNF_post_comb_conj:
"\<lbrakk> \<lbrace>P'\<rbrace> f \<lbrace>Q\<rbrace>!; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>!"
apply (clarsimp simp: validNF_def valid_def no_fail_def)
apply force
done
lemma validNF_if_split [wp_split]:
"\<lbrakk>P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>S\<rbrace>!; \<not> P \<Longrightarrow> \<lbrace>R\<rbrace> g \<lbrace>S\<rbrace>!\<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. (P \<longrightarrow> Q s) \<and> (\<not> P \<longrightarrow> R s)\<rbrace> if P then f else g \<lbrace>S\<rbrace>!"
by simp
lemma validNF_vcg_conj_lift:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>!; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. P s \<and> P' s\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>!"
apply (subst bipred_conj_def[symmetric], rule validNF_post_conj)
apply (erule validNF_weaken_pre, fastforce)
apply (erule validNF_weaken_pre, fastforce)
done
lemma validNF_vcg_disj_lift:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>!; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. P s \<or> P' s\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<or> Q' rv s\<rbrace>!"
apply (clarsimp simp: validNF_def)
apply safe
apply (auto intro!: hoare_vcg_disj_lift)[1]
apply (clarsimp simp: no_fail_def)
done
lemma validNF_vcg_all_lift [wp]:
"\<lbrakk> \<And>x. \<lbrace>P x\<rbrace> f \<lbrace>Q x\<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. \<forall>x. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x. Q x rv s\<rbrace>!"
apply atomize
apply (rule validNF)
apply (clarsimp simp: validNF_def)
apply (rule hoare_vcg_all_lift)
apply force
apply (clarsimp simp: no_fail_def validNF_def)
done
lemma validNF_bind [wp_split]:
"\<lbrakk> \<And>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>!; \<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> do x \<leftarrow> f; g x od \<lbrace>C\<rbrace>!"
apply (rule validNF)
apply (metis validNF_valid hoare_seq_ext)
apply (clarsimp simp: no_fail_def validNF_def bind_def' valid_def)
apply blast
done
lemmas validNF_seq_ext = validNF_bind
subsection "validNF compound rules"
lemma validNF_state_assert [wp]:
"\<lbrace> \<lambda>s. P () s \<and> G s \<rbrace> state_assert G \<lbrace> P \<rbrace>!"
apply (rule validNF)
apply wpsimp
apply (clarsimp simp: no_fail_def state_assert_def
bind_def' assert_def return_def get_def)
done
lemma validNF_modify [wp]:
"\<lbrace> \<lambda>s. P () (f s) \<rbrace> modify f \<lbrace> P \<rbrace>!"
apply (clarsimp simp: modify_def)
apply wp
done
lemma validNF_gets [wp]:
"\<lbrace>\<lambda>s. P (f s) s\<rbrace> gets f \<lbrace>P\<rbrace>!"
apply (clarsimp simp: gets_def)
apply wp
done
lemma validNF_condition [wp]:
"\<lbrakk> \<lbrace> Q \<rbrace> A \<lbrace>P\<rbrace>!; \<lbrace> R \<rbrace> B \<lbrace>P\<rbrace>!\<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. if C s then Q s else R s\<rbrace> condition C A B \<lbrace>P\<rbrace>!"
apply rule
apply (drule validNF_valid)+
apply (erule (1) condition_wp)
apply (drule validNF_no_fail)+
apply (clarsimp simp: no_fail_def condition_def)
done
lemma validNF_alt_def:
"validNF P m Q = (\<forall>s. P s \<longrightarrow> ((\<forall>(r', s') \<in> fst (m s). Q r' s') \<and> \<not> snd (m s)))"
by (fastforce simp: validNF_def valid_def no_fail_def)
lemma validNF_assert [wp]:
"\<lbrace> (\<lambda>s. P) and (R ()) \<rbrace> assert P \<lbrace> R \<rbrace>!"
apply (rule validNF)
apply (clarsimp simp: valid_def in_return)
apply (clarsimp simp: no_fail_def return_def)
done
lemma validNF_false_pre:
"\<lbrace> \<lambda>_. False \<rbrace> P \<lbrace> Q \<rbrace>!"
by (clarsimp simp: validNF_def no_fail_def)
lemma validNF_chain:
"\<lbrakk>\<lbrace>P'\<rbrace> a \<lbrace>R'\<rbrace>!; \<And>s. P s \<Longrightarrow> P' s; \<And>r s. R' r s \<Longrightarrow> R r s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>!"
by (fastforce simp: validNF_def valid_def no_fail_def Ball_def)
lemma validNF_case_prod [wp]:
"\<lbrakk> \<And>x y. validNF (P x y) (B x y) Q \<rbrakk> \<Longrightarrow> validNF (case_prod P v) (case_prod (\<lambda>x y. B x y) v) Q"
by (metis prod.exhaust split_conv)
lemma validE_NF_case_prod [wp]:
"\<lbrakk> \<And>a b. \<lbrace>P a b\<rbrace> f a b \<lbrace>Q\<rbrace>, \<lbrace>E\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>case x of (a, b) \<Rightarrow> P a b\<rbrace> case x of (a, b) \<Rightarrow> f a b \<lbrace>Q\<rbrace>, \<lbrace>E\<rbrace>!"
apply (clarsimp simp: validE_NF_alt_def)
apply (erule validNF_case_prod)
done
lemma no_fail_is_validNF_True: "no_fail P s = (\<lbrace> P \<rbrace> s \<lbrace> \<lambda>_ _. True \<rbrace>!)"
by (clarsimp simp: no_fail_def validNF_def valid_def)
subsection "validNF reasoning in the exception monad"
lemma validE_NF [intro?]:
"\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>; no_fail P f \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>!"
apply (clarsimp simp: validE_NF_def)
done
lemma validE_NF_valid:
"\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>"
apply (clarsimp simp: validE_NF_def)
done
lemma validE_NF_no_fail:
"\<lbrakk> \<lbrace> P \<rbrace> f \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>! \<rbrakk> \<Longrightarrow> no_fail P f"
apply (clarsimp simp: validE_NF_def)
done
lemma validE_NF_weaken_pre [wp_comb]:
"\<lbrakk>\<lbrace>Q\<rbrace> a \<lbrace>R\<rbrace>,\<lbrace>E\<rbrace>!; \<And>s. P s \<Longrightarrow> Q s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>,\<lbrace>E\<rbrace>!"
apply (clarsimp simp: validE_NF_alt_def)
apply (erule validNF_weaken_pre)
apply simp
done
lemma validE_NF_post_comb_conj_L:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, \<lbrace> E \<rbrace>!; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>, \<lbrace> \<lambda>_ _. True \<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>, \<lbrace> E \<rbrace>!"
apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def
valid_def no_fail_def split: sum.splits)
apply force
done
lemma validE_NF_post_comb_conj_R:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, \<lbrace> \<lambda>_ _. True \<rbrace>; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>, \<lbrace> E \<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>, \<lbrace> E \<rbrace>!"
apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def
valid_def no_fail_def split: sum.splits)
apply force
done
lemma validE_NF_post_comb_conj:
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, \<lbrace> E \<rbrace>!; \<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>, \<lbrace> E \<rbrace>! \<rbrakk> \<Longrightarrow> \<lbrace>\<lambda>s. P s \<and> P' s \<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>, \<lbrace> E \<rbrace>!"
apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def
valid_def no_fail_def split: sum.splits)
apply force
done
lemma validE_NF_chain:
"\<lbrakk>\<lbrace>P'\<rbrace> a \<lbrace>R'\<rbrace>,\<lbrace>E'\<rbrace>!;
\<And>s. P s \<Longrightarrow> P' s;
\<And>r' s'. R' r' s' \<Longrightarrow> R r' s';
\<And>r'' s''. E' r'' s'' \<Longrightarrow> E r'' s''\<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. P s \<rbrace> a \<lbrace>\<lambda>r' s'. R r' s'\<rbrace>,\<lbrace>\<lambda>r'' s''. E r'' s''\<rbrace>!"
by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits)
lemma validE_NF_bind_wp [wp]:
"\<lbrakk>\<And>x. \<lbrace>B x\<rbrace> g x \<lbrace>C\<rbrace>, \<lbrace>E\<rbrace>!; \<lbrace>A\<rbrace> f \<lbrace>B\<rbrace>, \<lbrace>E\<rbrace>!\<rbrakk> \<Longrightarrow> \<lbrace>A\<rbrace> f >>=E (\<lambda>x. g x) \<lbrace>C\<rbrace>, \<lbrace>E\<rbrace>!"
apply (unfold validE_NF_alt_def bindE_def)
apply (rule validNF_bind [rotated])
apply assumption
apply (clarsimp simp: lift_def throwError_def split: sum.splits)
apply wpsimp
done
lemma validNF_catch [wp]:
"\<lbrakk>\<And>x. \<lbrace>E x\<rbrace> handler x \<lbrace>Q\<rbrace>!; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, \<lbrace>E\<rbrace>!\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f <catch> (\<lambda>x. handler x) \<lbrace>Q\<rbrace>!"
apply (unfold validE_NF_alt_def catch_def)
apply (rule validNF_bind [rotated])
apply assumption
apply (clarsimp simp: lift_def throwError_def split: sum.splits)
apply wp
done
lemma validNF_throwError [wp]:
"\<lbrace>E e\<rbrace> throwError e \<lbrace>P\<rbrace>, \<lbrace>E\<rbrace>!"
by (unfold validE_NF_alt_def throwError_def o_def) wpsimp
lemma validNF_returnOk [wp]:
"\<lbrace>P e\<rbrace> returnOk e \<lbrace>P\<rbrace>, \<lbrace>E\<rbrace>!"
by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp
lemma validNF_whenE [wp]:
"(P \<Longrightarrow> \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>, \<lbrace>E\<rbrace>!) \<Longrightarrow> \<lbrace>if P then Q else R ()\<rbrace> whenE P f \<lbrace>R\<rbrace>, \<lbrace>E\<rbrace>!"
unfolding whenE_def by clarsimp wp
lemma validNF_nobindE [wp]:
"\<lbrakk> \<lbrace>B\<rbrace> g \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>!;
\<lbrace>A\<rbrace> f \<lbrace>\<lambda>r s. B s\<rbrace>,\<lbrace>E\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>A\<rbrace> doE f; g odE \<lbrace>C\<rbrace>,\<lbrace>E\<rbrace>!"
by clarsimp wp
(*
* Setup triple rules for validE_NF so that we can use the
* "wp_comb" attribute.
*)
definition "validE_NF_property Q E s b \<equiv> \<not> snd (b s)
\<and> (\<forall>(r', s') \<in> fst (b s). case r' of Inl x \<Rightarrow> E x s' | Inr x \<Rightarrow> Q x s')"
lemma validE_NF_is_triple [wp_trip]:
"validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)"
apply (clarsimp simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def
validE_NF_property_def split: sum.splits)
apply blast
done
lemmas [wp_comb] = validE_NF_weaken_pre
lemma validNF_cong:
"\<lbrakk> \<And>s. P s = P' s; \<And>s. P s \<Longrightarrow> m s = m' s;
\<And>r' s' s. \<lbrakk> P s; (r', s') \<in> fst (m s) \<rbrakk> \<Longrightarrow> Q r' s' = Q' r' s' \<rbrakk> \<Longrightarrow>
(\<lbrace> P \<rbrace> m \<lbrace> Q \<rbrace>!) = (\<lbrace> P' \<rbrace> m' \<lbrace> Q' \<rbrace>!)"
by (fastforce simp: validNF_alt_def)
lemma validE_NF_liftE [wp]:
"\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>! \<Longrightarrow> \<lbrace>P\<rbrace> liftE f \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>!"
by (wpsimp simp: validE_NF_alt_def liftE_def)
lemma validE_NF_handleE' [wp]:
"\<lbrakk> \<And>x. \<lbrace>F x\<rbrace> handler x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>!; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>F\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> f <handle2> (\<lambda>x. handler x) \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>!"
apply (unfold validE_NF_alt_def handleE'_def)
apply (rule validNF_bind [rotated])
apply assumption
apply (clarsimp split: sum.splits)
apply wpsimp
done
lemma validE_NF_handleE [wp]:
"\<lbrakk> \<And>x. \<lbrace>F x\<rbrace> handler x \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>!; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>F\<rbrace>! \<rbrakk> \<Longrightarrow>
\<lbrace>P\<rbrace> f <handle> handler \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>!"
apply (unfold handleE_def)
apply (metis validE_NF_handleE')
done
lemma validE_NF_condition [wp]:
"\<lbrakk> \<lbrace> Q \<rbrace> A \<lbrace>P\<rbrace>,\<lbrace> E \<rbrace>!; \<lbrace> R \<rbrace> B \<lbrace>P\<rbrace>,\<lbrace> E \<rbrace>!\<rbrakk>
\<Longrightarrow> \<lbrace>\<lambda>s. if C s then Q s else R s\<rbrace> condition C A B \<lbrace>P\<rbrace>,\<lbrace> E \<rbrace>!"
apply rule
apply (drule validE_NF_valid)+
apply wp
apply (drule validE_NF_no_fail)+
apply (clarsimp simp: no_fail_def condition_def)
done
text {* Strengthen setup. *}
context strengthen_implementation begin
lemma strengthen_hoare [strg]:
"(\<And>r s. st F (op \<longrightarrow>) (Q r s) (R r s))
\<Longrightarrow> st F (op \<longrightarrow>) (\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>) (\<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>)"
by (cases F, auto elim: hoare_strengthen_post)
lemma strengthen_validE_R_cong[strg]:
"(\<And>r s. st F (op \<longrightarrow>) (Q r s) (R r s))
\<Longrightarrow> st F (op \<longrightarrow>) (\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, -) (\<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>, -)"
by (cases F, auto intro: hoare_post_imp_R)
lemma strengthen_validE_cong[strg]:
"(\<And>r s. st F (op \<longrightarrow>) (Q r s) (R r s))
\<Longrightarrow> (\<And>r s. st F (op \<longrightarrow>) (S r s) (T r s))
\<Longrightarrow> st F (op \<longrightarrow>) (\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, \<lbrace>S\<rbrace>) (\<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>, \<lbrace>T\<rbrace>)"
by (cases F, auto elim: hoare_post_impErr)
lemma strengthen_validE_E_cong[strg]:
"(\<And>r s. st F (op \<longrightarrow>) (S r s) (T r s))
\<Longrightarrow> st F (op \<longrightarrow>) (\<lbrace>P\<rbrace> f -, \<lbrace>S\<rbrace>) (\<lbrace>P\<rbrace> f -, \<lbrace>T\<rbrace>)"
by (cases F, auto elim: hoare_post_impErr simp: validE_E_def)
end
end
|
module Parser.Rule.Source
import public Parser.Lexer.Source
import public Parser.Rule.Common
import public Parser.Support
import Core.TT
import Data.Strings
%default total
public export
Rule : Type -> Type
Rule = Rule Token
public export
SourceEmptyRule : Type -> Type
SourceEmptyRule = EmptyRule Token
export
eoi : SourceEmptyRule ()
eoi
= do nextIs "Expected end of input" (isEOI . val)
pure ()
where
isEOI : Token -> Bool
isEOI EndInput = True
isEOI _ = False
export
constant : Rule Constant
constant
= terminal "Expected constant"
(\x => case x.val of
CharLit c => case getCharLit c of
Nothing => Nothing
Just c' => Just (Ch c')
DoubleLit d => Just (Db d)
IntegerLit i => Just (BI i)
StringLit s => case escape s of
Nothing => Nothing
Just s' => Just (Str s')
Ident "Char" => Just CharType
Ident "Double" => Just DoubleType
Ident "Int" => Just IntType
Ident "Integer" => Just IntegerType
Ident "Bits8" => Just Bits8Type
Ident "Bits16" => Just Bits16Type
Ident "Bits32" => Just Bits32Type
Ident "Bits64" => Just Bits64Type
Ident "String" => Just StringType
_ => Nothing)
documentation' : Rule String
documentation' = terminal "Expected documentation comment"
(\x => case x.val of
DocComment d => Just d
_ => Nothing)
export
documentation : Rule String
documentation = unlines <$> some documentation'
export
intLit : Rule Integer
intLit
= terminal "Expected integer literal"
(\x => case x.val of
IntegerLit i => Just i
_ => Nothing)
export
onOffLit : Rule Bool
onOffLit
= terminal "Expected on or off"
(\x => case x.val of
Ident "on" => Just True
Ident "off" => Just False
_ => Nothing)
export
strLit : Rule String
strLit
= terminal "Expected string literal"
(\x => case x.val of
StringLit s => Just s
_ => Nothing)
export
aDotIdent : Rule String
aDotIdent = terminal "Expected dot+identifier"
(\x => case x.val of
DotIdent s => Just s
_ => Nothing)
export
dotIdent : Rule Name
dotIdent = UN <$> aDotIdent
export
symbol : String -> Rule ()
symbol req
= terminal ("Expected '" ++ req ++ "'")
(\x => case x.val of
Symbol s => if s == req then Just ()
else Nothing
_ => Nothing)
export
keyword : String -> Rule ()
keyword req
= terminal ("Expected '" ++ req ++ "'")
(\x => case x.val of
Keyword s => if s == req then Just ()
else Nothing
_ => Nothing)
export
exactIdent : String -> Rule ()
exactIdent req
= terminal ("Expected " ++ req)
(\x => case x.val of
Ident s => if s == req then Just ()
else Nothing
_ => Nothing)
export
pragma : String -> Rule ()
pragma n =
terminal ("Expected pragma " ++ n)
(\x => case x.val of
Pragma s =>
if s == n
then Just ()
else Nothing
_ => Nothing)
export
operator : Rule Name
operator
= terminal "Expected operator"
(\x => case x.val of
Symbol s =>
if s `elem` reservedSymbols
then Nothing
else Just (UN s)
_ => Nothing)
identPart : Rule String
identPart
= terminal "Expected name"
(\x => case x.val of
Ident str => Just str
_ => Nothing)
export
namespacedIdent : Rule (Maybe Namespace, String)
namespacedIdent
= terminal "Expected namespaced name"
(\x => case x.val of
DotSepIdent ns n => Just (Just ns, n)
Ident i => Just (Nothing, i)
_ => Nothing)
export
namespaceId : Rule Namespace
namespaceId = map (uncurry mkNestedNamespace) namespacedIdent
export
moduleIdent : Rule ModuleIdent
moduleIdent
= terminal "Expected module identifier"
(\x => case x.val of
DotSepIdent ns n => Just (mkModuleIdent (Just ns) n)
Ident i => Just (mkModuleIdent Nothing i)
_ => Nothing)
export
unqualifiedName : Rule String
unqualifiedName = identPart
export
holeName : Rule String
holeName
= terminal "Expected hole name"
(\x => case x.val of
HoleIdent str => Just str
_ => Nothing)
reservedNames : List String
reservedNames
= ["Type", "Int", "Integer", "Bits8", "Bits16", "Bits32", "Bits64",
"String", "Char", "Double", "Lazy", "Inf", "Force", "Delay"]
export
name : Rule Name
name = opNonNS <|> do
nsx <- namespacedIdent
-- writing (ns, x) <- namespacedIdent leads to an unsoled constraint.
-- I tried to write a minimised test case but could not reproduce the error
-- on a simplified example.
let ns = fst nsx
let x = snd nsx
opNS (mkNestedNamespace ns x) <|> nameNS ns x
where
reserved : String -> Bool
reserved n = n `elem` reservedNames
nameNS : Maybe Namespace -> String -> SourceEmptyRule Name
nameNS ns x =
if reserved x
then fail $ "can't use reserved name " ++ x
else pure $ mkNamespacedName ns x
opNonNS : Rule Name
opNonNS = symbol "(" *> operator <* symbol ")"
opNS : Namespace -> Rule Name
opNS ns = do
symbol ".("
n <- operator
symbol ")"
pure (NS ns n)
export
IndentInfo : Type
IndentInfo = Int
export
init : IndentInfo
init = 0
continueF : SourceEmptyRule () -> (indent : IndentInfo) -> SourceEmptyRule ()
continueF err indent
= do eoi; err
<|> do keyword "where"; err
<|> do col <- Common.column
if col <= indent
then err
else pure ()
||| Fail if this is the end of a block entry or end of file
export
continue : (indent : IndentInfo) -> SourceEmptyRule ()
continue = continueF (fail "Unexpected end of expression")
||| As 'continue' but failing is fatal (i.e. entire parse fails)
export
mustContinue : (indent : IndentInfo) -> Maybe String -> SourceEmptyRule ()
mustContinue indent Nothing
= continueF (fatalError "Unexpected end of expression") indent
mustContinue indent (Just req)
= continueF (fatalError ("Expected '" ++ req ++ "'")) indent
data ValidIndent =
||| In {}, entries can begin in any column
AnyIndent |
||| Entry must begin in a specific column
AtPos Int |
||| Entry can begin in this column or later
AfterPos Int |
||| Block is finished
EndOfBlock
Show ValidIndent where
show AnyIndent = "[any]"
show (AtPos i) = "[col " ++ show i ++ "]"
show (AfterPos i) = "[after " ++ show i ++ "]"
show EndOfBlock = "[EOB]"
checkValid : ValidIndent -> Int -> SourceEmptyRule ()
checkValid AnyIndent c = pure ()
checkValid (AtPos x) c = if c == x
then pure ()
else fail "Invalid indentation"
checkValid (AfterPos x) c = if c >= x
then pure ()
else fail "Invalid indentation"
checkValid EndOfBlock c = fail "End of block"
||| Any token which indicates the end of a statement/block/expression
isTerminator : Token -> Bool
isTerminator (Symbol ",") = True
isTerminator (Symbol "]") = True
isTerminator (Symbol ";") = True
isTerminator (Symbol "}") = True
isTerminator (Symbol ")") = True
isTerminator (Symbol "|") = True
isTerminator (Symbol "**") = True
isTerminator (Keyword "in") = True
isTerminator (Keyword "then") = True
isTerminator (Keyword "else") = True
isTerminator (Keyword "where") = True
isTerminator EndInput = True
isTerminator _ = False
||| Check we're at the end of a block entry, given the start column
||| of the block.
||| It's the end if we have a terminating token, or the next token starts
||| in or before indent. Works by looking ahead but not consuming.
export
atEnd : (indent : IndentInfo) -> SourceEmptyRule ()
atEnd indent
= eoi
<|> do nextIs "Expected end of block" (isTerminator . val)
pure ()
<|> do col <- Common.column
if (col <= indent)
then pure ()
else fail "Not the end of a block entry"
-- Check we're at the end, but only by looking at indentation
export
atEndIndent : (indent : IndentInfo) -> SourceEmptyRule ()
atEndIndent indent
= eoi
<|> do col <- Common.column
if col <= indent
then pure ()
else fail "Not the end of a block entry"
-- Parse a terminator, return where the next block entry
-- must start, given where the current block entry started
terminator : ValidIndent -> Int -> SourceEmptyRule ValidIndent
terminator valid laststart
= do eoi
pure EndOfBlock
<|> do symbol ";"
pure (afterSemi valid)
<|> do col <- column
afterDedent valid col
<|> pure EndOfBlock
where
-- Expected indentation for the next token can either be anything (if
-- we're inside a brace delimited block) or anywhere after the initial
-- column (if we're inside an indentation delimited block)
afterSemi : ValidIndent -> ValidIndent
afterSemi AnyIndent = AnyIndent -- in braces, anything goes
afterSemi (AtPos c) = AfterPos c -- not in braces, after the last start position
afterSemi (AfterPos c) = AfterPos c
afterSemi EndOfBlock = EndOfBlock
-- Expected indentation for the next token can either be anything (if
-- we're inside a brace delimited block) or in exactly the initial column
-- (if we're inside an indentation delimited block)
afterDedent : ValidIndent -> Int -> SourceEmptyRule ValidIndent
afterDedent AnyIndent col
= if col <= laststart
then pure AnyIndent
else fail "Not the end of a block entry"
afterDedent (AfterPos c) col
= if col <= laststart
then pure (AtPos c)
else fail "Not the end of a block entry"
afterDedent (AtPos c) col
= if col <= laststart
then pure (AtPos c)
else fail "Not the end of a block entry"
afterDedent EndOfBlock col = pure EndOfBlock
-- Parse an entry in a block
blockEntry : ValidIndent -> (IndentInfo -> Rule ty) ->
Rule (ty, ValidIndent)
blockEntry valid rule
= do col <- column
checkValid valid col
p <- rule col
valid' <- terminator valid col
pure (p, valid')
blockEntries : ValidIndent -> (IndentInfo -> Rule ty) ->
SourceEmptyRule (List ty)
blockEntries valid rule
= do eoi; pure []
<|> do res <- blockEntry valid rule
ts <- blockEntries (snd res) rule
pure (fst res :: ts)
<|> pure []
export
block : (IndentInfo -> Rule ty) -> SourceEmptyRule (List ty)
block item
= do symbol "{"
commit
ps <- blockEntries AnyIndent item
symbol "}"
pure ps
<|> do col <- column
blockEntries (AtPos col) item
||| `blockAfter col rule` parses a `rule`-block indented by at
||| least `col` spaces (unless the block is explicitly delimited
||| by curly braces). `rule` is a function of the actual indentation
||| level.
export
blockAfter : Int -> (IndentInfo -> Rule ty) -> SourceEmptyRule (List ty)
blockAfter mincol item
= do symbol "{"
commit
ps <- blockEntries AnyIndent item
symbol "}"
pure ps
<|> do col <- Common.column
if col <= mincol
then pure []
else blockEntries (AtPos col) item
export
blockWithOptHeaderAfter : Int -> (IndentInfo -> Rule hd) -> (IndentInfo -> Rule ty) -> SourceEmptyRule (Maybe hd, List ty)
blockWithOptHeaderAfter {ty} mincol header item
= do symbol "{"
commit
hidt <- optional $ blockEntry AnyIndent header
restOfBlock hidt
<|> do col <- Common.column
if col <= mincol
then pure (Nothing, [])
else do hidt <- optional $ blockEntry (AtPos col) header
ps <- blockEntries (AtPos col) item
pure (map fst hidt, ps)
where
restOfBlock : Maybe (hd, ValidIndent) -> Rule (Maybe hd, List ty)
restOfBlock (Just (h, idt)) = do ps <- blockEntries idt item
symbol "}"
pure (Just h, ps)
restOfBlock Nothing = do ps <- blockEntries AnyIndent item
symbol "}"
pure (Nothing, ps)
export
nonEmptyBlock : (IndentInfo -> Rule ty) -> Rule (List ty)
nonEmptyBlock item
= do symbol "{"
commit
res <- blockEntry AnyIndent item
ps <- blockEntries (snd res) item
symbol "}"
pure (fst res :: ps)
<|> do col <- column
res <- blockEntry (AtPos col) item
ps <- blockEntries (snd res) item
pure (fst res :: ps)
|
From CoqMTL Require Import Control.
(** This module contains proofs that various definitions of monads are
equivalent. More precisely, we prove that these definitions are
equivalent in the sense of the ability to construct an instance of
each of them from the other:
- the main definition used throughout the library (from Control.Monad),
which says that a monad is an [Applicative] functor with [bind],
satisfying the laws [bind_pure_l], [bind_pure_r], [bind_assoc] and
[bind_ap], which relates the [Applicative] and [Monad] structure
- the join-based definition (from Theory.Equivs.MonadJoin), which says
that a monad is an [Applicative] functor with [join] that satisfies
the laws [join_fmap_join], [join_pure], [join_fmap_pure],
[join_fmap_fmap] and [join_ap]
- the Kleisli Triple definition, which is very similar to that of
Theory.Equivs.MonadBind, but has different operation names and
some arguments flipped
As of now, I can't prove that these are equivalent to the compM-based
definition (from Theory.Equivs.MonadComp), which says that a monad is
something that has monadic composition which is associative and has
a neutral element. *)
(** First we require all the necessary modules and repack them so as to
refer to them by nonconflicting names. *)
From CoqMTL Require MonadJoin.
From CoqMTL Require MonadBind.
From CoqMTL Require MonadComp.
Module Join.
Include MonadJoin.
End Join.
Module Bind.
Include MonadBind.
End Bind.
Module Comp.
Include MonadComp.
End Comp.
(** Each proof consists of two instances, one deriving [Monad] from the
definition at hand and the other one deriving an instance for that
definition from [Monad]. *)
(** * join-based definition *)
#[refine]
#[export]
Instance Join_to_Monad
(M : Type -> Type) (inst : Join.Monad M) : Monad M :=
{
is_applicative := @Join.is_applicative M inst;
bind := @Join.bind M inst
}.
Proof.
1, 2, 4: MonadJoin.mjoin.
apply Join.assoc.
Defined.
#[refine]
#[export]
Instance Monad_to_Join (M : Type -> Type) (inst : Monad M)
: Join.Monad M :=
{
is_applicative := @is_applicative M inst;
join := @join M inst
}.
Proof.
all: intros; unfold join, compose; try ext x.
rewrite bind_assoc, bind_fmap. unfold compose, id. reflexivity.
rewrite bind_pure_l. reflexivity.
rewrite bind_fmap, <- bind_pure_r. f_equal.
rewrite bind_fmap, fmap_bind. f_equal.
rewrite !bind_ap. monad.
Defined.
(** * bind-based definition *)
#[refine]
#[export]
Instance MonadBind_to_Monad
(M : Type -> Type) (inst : Bind.Monad M) : Monad M :=
{
is_applicative := @MonadBind.Applicative_MonadBind M inst;
bind := @MonadBind.bind M inst;
}.
Proof. all: MonadBind.mbind. Defined.
#[refine]
#[export]
Instance Monad_to_MonadBind
(M : Type -> Type) (inst : Monad M) : MonadBind.Monad M :=
{
pure := @pure M inst;
bind := @bind M inst;
}.
Proof. all: monad. Defined.
(** * Kleisli triple *)
From CoqMTL Require Import KleisliTriple.
#[refine]
#[export]
Instance Monad_to_KleisliTriple
(M : Type -> Type) (inst : Monad M) : KleisliTriple M :=
{
eta := @pure M inst;
star := fun A B => flip (@bind M inst A B);
}.
Proof.
all: unfold flip; monad.
Defined.
#[refine]
#[export]
Instance KleisliTriple_to_Monad
(M : Type -> Type) (inst : KleisliTriple M) : Monad M :=
{
is_applicative := Applicative_Kleisli M inst;
bind := @bind_Kleisli M inst;
}.
Proof. all: kleisli. Defined.
(** * compM-based definition *)
#[refine]
#[export]
Instance Monad_to_MonadComp
(M : Type -> Type) (inst : Monad M) : MonadComp.Monad M :=
{
is_applicative := is_applicative;
compM := @compM M inst;
}.
Proof. all: unfold compM; monad. Defined.
(** TODO: MonadComp_to_Monad *) |
theory "Cardinality-Domain"
imports "../Launchbury/HOLCF-Utils"
begin
type_synonym oneShot = "one"
abbreviation notOneShot :: oneShot where "notOneShot \<equiv> ONE"
abbreviation oneShot :: oneShot where "oneShot \<equiv> \<bottom>"
type_synonym two = "oneShot\<^sub>\<bottom>"
abbreviation many :: two where "many \<equiv> up\<cdot>notOneShot"
abbreviation once :: two where "once \<equiv> up\<cdot>oneShot"
abbreviation none :: two where "none \<equiv> \<bottom>"
lemma many_max[simp]: "a \<sqsubseteq> many" by (cases a) auto
lemma two_conj: "c = many \<or> c = once \<or> c = none" by (metis Exh_Up one_neq_iffs(1))
lemma two_cases[case_names many once none]:
obtains "c = many" | "c = once" | "c = none" using two_conj by metis
definition two_pred where "two_pred = (\<Lambda> x. if x \<sqsubseteq> once then \<bottom> else x)"
lemma two_pred_simp: "two_pred\<cdot>c = (if c \<sqsubseteq> once then \<bottom> else c)"
unfolding two_pred_def
apply (rule beta_cfun)
apply (rule cont_if_else_above)
apply (auto elim: below_trans)
done
lemma two_pred_simps[simp]:
"two_pred\<cdot>many = many"
"two_pred\<cdot>once = none"
"two_pred\<cdot>none = none"
by (simp_all add: two_pred_simp)
lemma two_pred_below_arg: "two_pred \<cdot> f \<sqsubseteq> f"
by (auto simp add: two_pred_simp)
lemma two_pred_none: "two_pred\<cdot>c = none \<longleftrightarrow> c \<sqsubseteq> once"
by (auto simp add: two_pred_simp)
definition record_call where "record_call x = (\<Lambda> ce. (\<lambda> y. if x = y then two_pred\<cdot>(ce y) else ce y))"
lemma record_call_simp: "(record_call x \<cdot> f) x' = (if x = x' then two_pred \<cdot> (f x') else f x')"
unfolding record_call_def by auto
lemma record_call[simp]: "(record_call x \<cdot> f) x = two_pred \<cdot> (f x)"
unfolding record_call_simp by auto
lemma record_call_other[simp]: "x' \<noteq> x \<Longrightarrow> (record_call x \<cdot> f) x' = f x'"
unfolding record_call_simp by auto
lemma record_call_below_arg: "record_call x \<cdot> f \<sqsubseteq> f"
unfolding record_call_def
by (auto intro!: fun_belowI two_pred_below_arg)
definition two_add :: "two \<rightarrow> two \<rightarrow> two"
where "two_add = (\<Lambda> x. (\<Lambda> y. if x \<sqsubseteq> \<bottom> then y else (if y \<sqsubseteq> \<bottom> then x else many)))"
lemma two_add_simp: "two_add\<cdot>x\<cdot>y = (if x \<sqsubseteq> \<bottom> then y else (if y \<sqsubseteq> \<bottom> then x else many))"
unfolding two_add_def
apply (subst beta_cfun)
apply (rule cont2cont)
apply (rule cont_if_else_above)
apply (auto elim: below_trans)[1]
apply (rule cont_if_else_above)
apply (auto elim: below_trans)[8]
apply (rule beta_cfun)
apply (rule cont_if_else_above)
apply (auto elim: below_trans)[1]
apply (rule cont_if_else_above)
apply auto
done
lemma two_pred_two_add_once: "c \<sqsubseteq> two_pred\<cdot>(two_add\<cdot>once\<cdot>c)"
by (cases c rule: two_cases) (auto simp add: two_add_simp)
end
|
data Vect : Nat -> Type -> Type where
Nil : Vect Z a
(::) : (x : a) -> (xs : Vect k a) -> Vect (S k) a
Eq e => Eq (Vect n e) where
(==) Nil Nil = True
(==) (x :: xs) (y :: ys) = (x == y) && (xs == ys)
(==) _ _ = False
Foldable (Vect n) where
foldr f acc [] = acc
foldr f acc (x :: xs) = f x (foldr f acc xs)
|
Formal statement is: lemma order_0I: "poly p a \<noteq> 0 \<Longrightarrow> order a p = 0" Informal statement is: If $p(a) \neq 0$, then the order of $a$ in $p$ is $0$. |
"""Modification Gamma Correction."""
import cv2 as cv
import numpy as np
from dfd.datasets.modifications.interfaces import ModificationInterface
class GammaCorrectionModification(ModificationInterface):
"""Modification Gamma Correction."""
def __init__(self, gamma_value: float) -> None:
"""Initialize GammaCorrectionModification.
Args:
gamma_value: gamma value, must be positive number
"""
self._gamma_value = gamma_value
def perform(self, image: np.ndarray) -> np.ndarray:
"""Perform gamma correction on provided image.
Args:
image: OpenCV image.
Returns:
Image after gamma correction.
"""
rgb_max_value = 255
look_up_table = np.array(
[
int(((i / rgb_max_value) ** (1.0 / self._gamma_value)) * rgb_max_value)
for i in np.arange(0, 256)
]
).astype("uint8")
# apply gamma correction using lookup table
return cv.LUT(image, look_up_table)
def __str__(self) -> str:
return f"gamma_correction_{self._gamma_value}"
|
import category_theory.abelian.diagram_lemmas.four
import for_mathlib.projectives
import for_mathlib.homological_complex
import for_mathlib.snake_lemma2
noncomputable theory
open category_theory
open category_theory.limits
universes v u
namespace short_exact_sequence
variables {C : Type u} [category.{v} C] [abelian C] [enough_projectives C]
variables {D : Type*} [category D] [abelian D]
-- move this
lemma exact_of_epi_comp_kernel.ι_comp_mono {C : Type u} [category.{v} C] [abelian C] {X Y Z W : C}
(g : Y ⟶ Z) (h : Z ⟶ W) (f : X ⟶ kernel g) (i : kernel g ⟶ Y) (hf : epi f) (hh : mono h)
(hi : i = kernel.ι g) : exact (f ≫ i) (g ≫ h) :=
begin
suffices : exact i g,
{ letI := hf, letI := hh,
exact exact_comp_mono (exact_epi_comp this) },
rw [hi],
exact exact_kernel_ι
end
-- move this
lemma biprod_factors (A B : C) [projective A] [projective B]
(E X : C) (f : A ⊞ B ⟶ X) (e : E ⟶ X) [epi e] :
∃ f' : A ⊞ B ⟶ E, f' ≫ e = f :=
⟨biprod.desc
(projective.factor_thru (biprod.inl ≫ f) e)
(projective.factor_thru (biprod.inr ≫ f) e),
by ext; simp only [projective.factor_thru_comp, biprod.inl_desc_assoc, biprod.inr_desc_assoc]⟩
variables (A B : short_exact_sequence C) (f : A ⟶ B)
def horseshoe_base : short_exact_sequence C :=
short_exact_sequence.mk_split (projective.over A.1) (projective.over A.3)
def horseshoe_base_π : horseshoe_base A ⟶ A :=
{ fst := projective.π _,
snd := biprod.desc (projective.π _ ≫ A.f) (projective.factor_thru (projective.π _) A.g),
trd := projective.π _,
sq1' := by { dsimp [horseshoe_base], simp only [biprod.inl_desc], },
sq2' :=
begin
dsimp [horseshoe_base], apply category_theory.limits.biprod.hom_ext',
{ simp only [zero_comp, exact.w_assoc, biprod.inl_desc_assoc, category.assoc,
short_exact_sequence.f_comp_g, comp_zero, exact_inl_snd], },
{ simp only [projective.factor_thru_comp, biprod.inr_snd_assoc, biprod.inr_desc_assoc], }
end }
instance epi_horseshoe_base_π_1 : epi (horseshoe_base_π A).1 :=
show epi (projective.π _), by apply_instance
instance epi_horseshoe_base_π_3 : epi (horseshoe_base_π A).3 :=
show epi (projective.π _), by apply_instance
local attribute [instance] limits.has_zero_object.has_zero
instance epi_horseshoe_base_π_2 : epi (horseshoe_base_π A).2 :=
begin
let φ := horseshoe_base_π A,
have h : φ.3 ≫ (0 : A.3 ⟶ 0) = (0 : _ ⟶ 0) ≫ (0 : 0 ⟶ 0) := by simp,
refine category_theory.abelian.epi_of_epi_of_epi_of_mono φ.sq1' φ.sq2' h _ _ _ _ _ _;
try { rw ← epi_iff_exact_zero_right }; try { apply_instance },
exact A.exact',
end
variables {A B}
def horseshoe_ker [epi f.1] : short_exact_sequence C :=
(snake_input.mk_of_short_exact_sequence_hom _ _ _ f).kernel_sequence _
begin
dsimp [snake_input.mk_of_short_exact_sequence_hom, snake_diagram.mk_of_short_exact_sequence_hom],
rw snake_diagram.mk_functor_map_f1,
exact A.mono',
end
$ is_zero_of_iso_of_zero (is_zero_zero _) (limits.cokernel.of_epi _).symm
@[simp] lemma horseshoe_ker_fst [epi f.1] : (horseshoe_ker f).1 = kernel f.1 := rfl
@[simp] lemma horseshoe_ker_snd [epi f.1] : (horseshoe_ker f).2 = kernel f.2 := rfl
@[simp] lemma horseshoe_ker_trd [epi f.1] : (horseshoe_ker f).3 = kernel f.3 := rfl
def horseshoe_ker_ι [epi f.1] : horseshoe_ker f ⟶ A :=
{ fst := kernel.ι _,
snd := kernel.ι _,
trd := kernel.ι _,
sq1' :=
begin
dsimp [horseshoe_ker, snake_input.kernel_sequence,
snake_input.mk_of_short_exact_sequence_hom, snake_diagram.mk_of_short_exact_sequence_hom],
delta kernel.map,
rw [snake_diagram.mk_functor_map_f0, kernel.lift_ι],
end,
sq2' :=
begin
dsimp [horseshoe_ker, snake_input.kernel_sequence,
snake_input.mk_of_short_exact_sequence_hom, snake_diagram.mk_of_short_exact_sequence_hom],
delta kernel.map,
rw [snake_diagram.mk_functor_map_g0, kernel.lift_ι],
end }
.
lemma horseshoe_ker_ι_fst [epi f.1] : (horseshoe_ker_ι f).1 = kernel.ι f.1 := rfl
lemma horseshoe_ker_ι_snd [epi f.1] : (horseshoe_ker_ι f).2 = kernel.ι f.2 := rfl
lemma horseshoe_ker_ι_trd [epi f.1] : (horseshoe_ker_ι f).3 = kernel.ι f.3 := rfl
variables (A)
lemma horseshoe_ker_ι_comp_base_π :
(horseshoe_ker_ι (horseshoe_base_π A)) ≫ horseshoe_base_π A = 0 :=
begin
dsimp [horseshoe_ker_ι, horseshoe_base_π],
ext1; show kernel.ι _ ≫ _ = 0; exact exact.w exact_kernel_ι,
end
noncomputable
def horseshoe_step (A : short_exact_sequence C) :
ℕ → Σ (X Y Z : short_exact_sequence C) (ι : X ⟶ Y), Y ⟶ Z
| 0 := ⟨horseshoe_ker (horseshoe_base_π A), _, _, horseshoe_ker_ι _, horseshoe_base_π _⟩
| (n+1) :=
⟨horseshoe_ker (horseshoe_base_π (horseshoe_step n).1), _, _, horseshoe_ker_ι _, horseshoe_base_π _⟩
@[reassoc] lemma horseshoe_step_comp_eq_zero :
∀ n, (horseshoe_step A n).2.2.2.1 ≫ (horseshoe_step A n).2.2.2.2 = 0
| 0 := horseshoe_ker_ι_comp_base_π _
| (n+1) := horseshoe_ker_ι_comp_base_π _
lemma step_fst_mono (n : ℕ) : mono (horseshoe_step A n).2.2.2.1.1 :=
begin
cases n,
{ dsimp [horseshoe_step, horseshoe_ker_ι],
apply_instance },
{ dsimp [horseshoe_step],
cases n, --Why do I have to do this again?!
{ rw [horseshoe_ker_ι_fst],
apply_instance },
{ rw [horseshoe_ker_ι_fst],
apply_instance } }
end
lemma step_snd_mono (n : ℕ) : mono (horseshoe_step A n).2.2.2.1.2 :=
begin
cases n,
{ dsimp [horseshoe_step, horseshoe_ker_ι],
apply_instance },
{ dsimp [horseshoe_step],
cases n, --Why do I have to do this again?!
{ rw [horseshoe_ker_ι_snd],
apply_instance },
{ rw [horseshoe_ker_ι_snd],
apply_instance } }
end
lemma step_trd_mono (n : ℕ) : mono (horseshoe_step A n).2.2.2.1.3 :=
begin
cases n,
{ dsimp [horseshoe_step, horseshoe_ker_ι],
apply_instance },
{ dsimp [horseshoe_step],
cases n, --Why do I have to do this again?!
{ rw [horseshoe_ker_ι_trd],
apply_instance },
{ rw [horseshoe_ker_ι_trd],
apply_instance } }
end
def horseshoe_obj (n : ℕ) := (horseshoe_step A n).2.1
def horseshoe_d (n : ℕ) : horseshoe_obj A (n+1) ⟶ horseshoe_obj A n :=
(horseshoe_step A (n+1)).2.2.2.2 ≫ eq_to_hom (by { dsimp [horseshoe_step], refl })
≫ (horseshoe_step A n).2.2.2.1
lemma horseshoe_d_d (n : ℕ) : horseshoe_d A (n+1) ≫ horseshoe_d A n = 0 :=
begin
dsimp [horseshoe_d, horseshoe_ker_ι],
simp only [category.id_comp, category.assoc, comp_zero, zero_comp,
horseshoe_step_comp_eq_zero_assoc],
end
def horseshoe (A : short_exact_sequence C) : chain_complex (short_exact_sequence C) ℕ :=
chain_complex.of (horseshoe_obj A) (horseshoe_d A) (horseshoe_d_d A)
variables (A)
def horseshoe_π : (horseshoe A).X 0 ⟶ A := horseshoe_base_π _
lemma horseshoe_d_π : (horseshoe A).d 1 0 ≫ horseshoe_π A = 0 :=
begin
dsimp [horseshoe],
erw [chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_π, horseshoe_step],
simp only [category.id_comp, category.assoc, comp_zero, zero_comp,
horseshoe_step_comp_eq_zero_assoc, horseshoe_ker_ι_comp_base_π],
end
def horseshoe_to_single₁ :=
(chain_complex.to_single₀_equiv ((homological_complex.Fst C).obj (horseshoe A)) A.1).symm
⟨(short_exact_sequence.Fst C).map (horseshoe_π A),
begin
have := horseshoe_d_π A, apply_fun (λ f, (short_exact_sequence.Fst C).map f) at this,
rwa [functor.map_comp, functor.map_zero] at this,
end⟩
def horseshoe_to_single₂ :=
(chain_complex.to_single₀_equiv ((homological_complex.Snd C).obj (horseshoe A)) A.2).symm
⟨(short_exact_sequence.Snd C).map (horseshoe_π A),
begin
have := horseshoe_d_π A, apply_fun (λ f, (short_exact_sequence.Snd C).map f) at this,
rwa [functor.map_comp, functor.map_zero] at this,
end⟩
def horseshoe_to_single₃ :=
(chain_complex.to_single₀_equiv ((homological_complex.Trd C).obj (horseshoe A)) A.3).symm
⟨(short_exact_sequence.Trd C).map (horseshoe_π A),
begin
have := horseshoe_d_π A, apply_fun (λ f, (short_exact_sequence.Trd C).map f) at this,
rwa [functor.map_comp, functor.map_zero] at this,
end⟩
lemma horseshoe_exact₁ (A : short_exact_sequence C) (n : ℕ) :
exact (((homological_complex.Fst C).obj (horseshoe A)).d (n + 2) (n + 1))
(((homological_complex.Fst C).obj (horseshoe A)).d (n + 1) n) :=
begin
dsimp [horseshoe_to_single₁],
erw [chain_complex.of_d, chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_step],
set f := horseshoe_base_π (horseshoe_step A n).1,
set g := (horseshoe_step A n).2.2.2.1,
cases n;
convert exact_of_epi_comp_kernel.ι_comp_mono f.1 _ (horseshoe_base_π (horseshoe_ker f)).1 _
infer_instance _ _ using 1,
{ simp [step_fst_mono] },
{ simpa },
{ simp [step_fst_mono] },
{ simpa }
end
lemma horseshoe_exact₂ (A : short_exact_sequence C) (n : ℕ) :
exact (((homological_complex.Snd C).obj (horseshoe A)).d (n + 2) (n + 1))
(((homological_complex.Snd C).obj (horseshoe A)).d (n + 1) n) :=
begin
dsimp [horseshoe_to_single₂],
erw [chain_complex.of_d, chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_step],
set f := horseshoe_base_π (horseshoe_step A n).1,
set g := (horseshoe_step A n).2.2.2.1,
cases n;
convert exact_of_epi_comp_kernel.ι_comp_mono f.2 _ (horseshoe_base_π (horseshoe_ker f)).2 _
infer_instance _ _ using 1,
{ simp [step_snd_mono] },
{ simpa },
{ simp [step_snd_mono] },
{ simpa }
end
lemma horseshoe_exact₃ (A : short_exact_sequence C) (n : ℕ) :
exact (((homological_complex.Trd C).obj (horseshoe A)).d (n + 2) (n + 1))
(((homological_complex.Trd C).obj (horseshoe A)).d (n + 1) n) :=
begin
dsimp [horseshoe_to_single₃],
erw [chain_complex.of_d, chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_step],
set f := horseshoe_base_π (horseshoe_step A n).1,
set g := (horseshoe_step A n).2.2.2.1,
cases n;
convert exact_of_epi_comp_kernel.ι_comp_mono f.3 _ (horseshoe_base_π (horseshoe_ker f)).3 _
infer_instance _ _ using 1,
{ simp [step_trd_mono] },
{ simpa },
{ simp [step_trd_mono] },
{ simpa }
end
lemma horseshoe_is_projective_resolution₁ (A : short_exact_sequence C) :
chain_complex.is_projective_resolution
((homological_complex.Fst C).obj (horseshoe A)) A.1 (horseshoe_to_single₁ A) :=
{ projective := by rintro (_|n); { show projective (projective.over _), apply_instance },
exact₀ :=
begin
dsimp [horseshoe_to_single₁, chain_complex.to_single₀_equiv, horseshoe_π],
erw [chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_step],
rw [category.id_comp, ← short_exact_sequence.comp_fst],
refine abelian.pseudoelement.exact_of_pseudo_exact _ _ ⟨λ a , _, λ a ha, _⟩,
{ rw [← abelian.pseudoelement.comp_apply, ← short_exact_sequence.comp_fst, category.assoc,
horseshoe_ker_ι_comp_base_π, comp_zero, short_exact_sequence.hom_zero_fst,
abelian.pseudoelement.zero_apply] },
{ obtain ⟨b, hb⟩ := is_snake_input.exists_of_exact exact_kernel_ι _ ha,
obtain ⟨c, hc⟩ := abelian.pseudoelement.pseudo_surjective_of_epi
(horseshoe_base_π (horseshoe_ker _)).1 b,
refine ⟨c, _⟩,
rw [short_exact_sequence.comp_fst, abelian.pseudoelement.comp_apply, hc, ← hb],
refl }
end,
exact := λ n, horseshoe_exact₁ A n,
epi := show epi (projective.π _), from infer_instance }
lemma horseshoe_is_projective_resolution₂ (A : short_exact_sequence C) :
chain_complex.is_projective_resolution
((homological_complex.Snd C).obj (horseshoe A)) A.2 (horseshoe_to_single₂ A) :=
{ projective := by rintro (_|n); { show projective (projective.over _ ⊞ projective.over _),
apply_instance },
exact₀ :=
begin
dsimp [horseshoe_to_single₂, chain_complex.to_single₀_equiv, horseshoe_π],
erw [chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_step],
rw [category.id_comp, ← short_exact_sequence.comp_snd],
refine abelian.pseudoelement.exact_of_pseudo_exact _ _ ⟨λ a , _, λ a ha, _⟩,
{ rw [← abelian.pseudoelement.comp_apply, ← short_exact_sequence.comp_snd, category.assoc,
horseshoe_ker_ι_comp_base_π, comp_zero, short_exact_sequence.hom_zero_snd,
abelian.pseudoelement.zero_apply] },
{ obtain ⟨b, hb⟩ := is_snake_input.exists_of_exact exact_kernel_ι _ ha,
obtain ⟨c, hc⟩ := abelian.pseudoelement.pseudo_surjective_of_epi
(horseshoe_base_π (horseshoe_ker _)).2 b,
refine ⟨c, _⟩,
rw [short_exact_sequence.comp_snd, abelian.pseudoelement.comp_apply, hc, ← hb],
refl }
end,
exact := λ n, horseshoe_exact₂ A n,
epi := show epi (horseshoe_base_π _).2, from infer_instance }
lemma horseshoe_is_projective_resolution₃ (A : short_exact_sequence C) :
chain_complex.is_projective_resolution
((homological_complex.Trd C).obj (horseshoe A)) A.3 (horseshoe_to_single₃ A) :=
{ projective := by rintro (_|n); { show projective (projective.over _), apply_instance },
exact₀ :=
begin
dsimp [horseshoe_to_single₃, chain_complex.to_single₀_equiv, horseshoe_π],
erw [chain_complex.of_d],
dsimp [horseshoe_d, horseshoe_step],
rw [category.id_comp, ← short_exact_sequence.comp_trd],
refine abelian.pseudoelement.exact_of_pseudo_exact _ _ ⟨λ a , _, λ a ha, _⟩,
{ rw [← abelian.pseudoelement.comp_apply, ← short_exact_sequence.comp_trd, category.assoc,
horseshoe_ker_ι_comp_base_π, comp_zero, short_exact_sequence.hom_zero_trd,
abelian.pseudoelement.zero_apply] },
{ obtain ⟨b, hb⟩ := is_snake_input.exists_of_exact exact_kernel_ι _ ha,
obtain ⟨c, hc⟩ := abelian.pseudoelement.pseudo_surjective_of_epi
(horseshoe_base_π (horseshoe_ker _)).3 b,
refine ⟨c, _⟩,
rw [short_exact_sequence.comp_trd, abelian.pseudoelement.comp_apply, hc, ← hb],
refl }
end,
exact := λ n, horseshoe_exact₃ A n,
epi := show epi (projective.π _), from infer_instance }
.
lemma horseshoe_split (A : short_exact_sequence C) (n : ℕ) :
((horseshoe A).X n).split :=
begin
cases n;
exact ⟨biprod.fst, biprod.inr, biprod.inl_fst, biprod.inr_snd, biprod.inr_fst, biprod.total⟩
end
lemma horseshoe_f_comp_to_single₂_f (A : short_exact_sequence C) (i : ℕ) :
((horseshoe A).X i).f ≫ (horseshoe_to_single₂ A).f i =
(horseshoe_to_single₁ A).f i ≫ ((chain_complex.single₀ C).map A.f).f i :=
begin
cases i,
{ dsimp [horseshoe_to_single₂, horseshoe_to_single₁, horseshoe, horseshoe_obj, horseshoe_step,
horseshoe_base, horseshoe_π, horseshoe_base_π, chain_complex.to_single₀_equiv],
simp },
{ dsimp [horseshoe_to_single₂, horseshoe_to_single₁, horseshoe, horseshoe_obj, horseshoe_step,
horseshoe_base, horseshoe_π, horseshoe_base_π, chain_complex.to_single₀_equiv],
simp }
end
lemma horseshoe_g_comp_to_single₃_f (A : short_exact_sequence C) (i : ℕ) :
((horseshoe A).X i).g ≫ (horseshoe_to_single₃ A).f i =
(horseshoe_to_single₂ A).f i ≫ ((chain_complex.single₀ C).map A.g).f i :=
begin
cases i,
{ dsimp [horseshoe_to_single₂, horseshoe_to_single₃, chain_complex.to_single₀_equiv, horseshoe,
horseshoe_obj, horseshoe_π, horseshoe_step, horseshoe_base, horseshoe_base_π],
ext,
{ simp },
{ simp } },
{ dsimp [horseshoe_to_single₂, horseshoe_to_single₃, horseshoe, horseshoe_obj, horseshoe_step,
horseshoe_base, horseshoe_π, horseshoe_base_π, chain_complex.to_single₀_equiv],
simp }
end
end short_exact_sequence
|
import data.list.basic data.list.perm
import tactic tactic.basic tactic.omega
namespace tree_sort
inductive tree (α: Type*) --[has_lt α] [h : decidable_rel ((<) : α → α → Prop)]
| l {} : tree
| n : tree -> α -> tree -> tree
def tree.has_dec_eq {α: Type*} [e: decidable_eq α]: decidable_eq (tree α)
| tree.l tree.l := is_true rfl
| tree.l (tree.n l x r) := is_false (λ h, tree.no_confusion h)
| (tree.n l x r) tree.l := is_false (λ h, tree.no_confusion h)
| (tree.n l1 x r1) (tree.n l2 y r2) :=
match (e x y) with
| is_true h := (
match ((tree.has_dec_eq l1 l2), (tree.has_dec_eq r1 r2)) with
| (is_true hl, is_true hr) := is_true (begin
apply eq.subst hl,
apply eq.subst hr,
apply eq.subst h, reflexivity,
end)
| (is_false hln, _) := is_false (λ hc, tree.no_confusion hc (λ hl _ _, absurd hl hln))
| (_, is_false hrn) := is_false (λ hc, tree.no_confusion hc (λ _ _ hr, absurd hr hrn))
end
)
| is_false hn := is_false (λ h, tree.no_confusion h (λ _ h _, absurd h hn))
end
.
instance {α} [decidable_eq α] : decidable_eq (tree α) := tree.has_dec_eq
@[simp, reducible]
def tree.ins {α: Type*} (lt : α → α → bool) (x: α): tree α -> tree α
| tree.l := tree.n tree.l x tree.l
| (tree.n t y s) := cond (lt y x) (tree.n t y (tree.ins s)) (tree.n (tree.ins t) y s)
@[simp, reducible]
def tree.flat {α: Type*}: tree α -> list α
| tree.l := []
| (tree.n t x s) := (tree.flat t) ++ (x::(tree.flat s))
@[simp, reducible]
def build {α: Type*} (lt : α → α → bool) (l: list α) :=
(list.foldr (λ x t , tree.ins lt x t) tree.l l)
@[simp, reducible]
def sort {α: Type*} (lt : α → α → bool) (l: list α) :=
tree.flat (build lt l)
@[simp, reducible]
def mem {α} [decidable_eq α] (x: α): tree α -> Prop
| tree.l := false
| (tree.n l y r) := (mem l) ∨ (x = y) ∨ (mem r)
@[simp, reducible]
def subtree {α} [decidable_eq α]: tree α -> tree α -> Prop
| tree.l _ := true
| _ tree.l := false
| t1 t2@(tree.n l2 y r2) := (
if (t1 = t2) then true
else if (t1 = l2) then true
else if (t1 = r2) then true
else false
)
@[simp]
lemma flat_mem {α} [decidable_eq α]: ∀ (x: α) (t: tree α), mem x t -> x ∈ (t.flat) := begin
intros x t h,
induction t, {
tautology,
},
case tree.n: {
simp, simp at h,
tautology,
}
end
@[simp]
lemma mem_ins {α} {lt: α → α → bool} [decidable_eq α] (x y: α) (t: tree α): mem x t -> mem x (tree.ins lt y t) := begin
intro h,
induction t, tautology,
case tree.n: l z r ih_l ih_r {
simp,
cases (lt z y); {
simp, simp at h,
cases h, {
exact (or.inl $ ih_l h) <|> exact (or.inl h),
}, {
cases h,
exact (or.inr (or.inl h)),
exact (or.inr (or.inr h)) <|> exact (or.inr (or.inr $ ih_r h)),
}
},
}
end
@[simp]
lemma ins_mem_id {α} {lt: α → α → bool} [decidable_eq α] (x: α) (t: tree α): mem x (tree.ins lt x t) := begin
induction t, simp,
case tree.n: l y r ih_l ih_r {
simp,
cases (lt y x), {
simp, apply or.inl, assumption,
}, {
simp, apply or.inr, apply or.inr, assumption,
}
}
end
@[simp]
lemma mem_cons {α} {lt: α → α → bool} [decidable_eq α] (x: α) (l: list α): mem x (build lt (x::l)) := by simp.
@[simp]
lemma mem_build {α} {lt: α → α → bool} [decidable_eq α] (x: α) (l: list α): x ∈ l -> mem x (build lt l) := begin
intro h,
induction l, {
tautology,
},
case list.cons: y t ih{
by_cases hxy: x = y, {
rw hxy, simp,
}, {
have: x ∈ t, {
simp at h,
apply or.elim h,
intro, contradiction,
intro, assumption,
},
unfold build,
simp,
rw <-build,
apply mem_ins,
exact (ih this)
}
}
end
@[simp]
lemma flat_nil {α}: (@tree.l α).flat = [] := by simp.
#check list.perm.trans
@[simp]
lemma flat_cons {α} {lt: α → α → bool} [decidable_eq α]:
∀ (x: α) (t: tree α), (tree.ins lt x t).flat ~ x::(t.flat) := begin
intros,
induction t, simp,
case tree.n: tl y tr ih_l ih_r {
simp,
cases (lt y x), {
simp,
apply list.perm_app_left _ ih_l,
}, {
simp,
have: x :: (tree.flat tl ++ y :: tree.flat tr) ~ (tree.flat tl) ++ x::y::(tree.flat tr),
by apply list.perm_middle.symm,
symmetry,
transitivity (tree.flat tl) ++ x::y::(tree.flat tr),
assumption, clear this,
apply list.perm_app_right,
transitivity, {
apply list.perm.swap y x,
}, {
from (list.perm_cons y).mpr ih_r.symm,
},
}
}
end
@[simp]
lemma perm_cons_left {α} [decidable_eq α]:
∀ {a: α} {l1 l2 rhs: list α} (p: l1 ~ l2), (a::l1) ~ rhs ↔ (a::l2) ~ rhs := begin
intros,
apply iff.intro, {
intro h,
have: a::l1 ~ a::l2, by apply (list.perm_cons a).mpr p,
apply this.symm.trans, assumption,
}, {
intro h,
have: a::l1 ~ a::l2, by apply (list.perm_cons a).mpr p,
apply this.trans, assumption,
}
end
@[simp]
lemma perm_subterm {α} [decidable_eq α]:
∀ {l1 l2 ini rhs: list α} (p: l1 ~ l2), (ini++l1) ~ rhs ↔ (ini++l2) ~ rhs := begin
intros,
apply iff.intro, {
intro h,
have: ini++l1 ~ ini++l2, by apply list.perm_app_right ini p,
apply this.symm.trans, assumption,
}, {
intro h,
have: ini++l1 ~ ini++l2, by apply list.perm_app_right ini p,
apply this.trans, assumption,
}
end
@[simp]
lemma flat_ins {α} {lt: α → α → bool} [decidable_eq α]:
∀ (x: α) (t: tree α) (l: list α), l ~ (t.flat) -> (x::l) ~ (tree.ins lt x t).flat := begin
intros x t l h,
induction t, {
simp, simp at h, rw list.perm_nil at h, rw h,
},
case tree.n: tl y tr ih_l ih_r {
simp, cases (lt y x), {
simp, simp at h,
apply ((list.perm_cons x).mpr h).trans,
have: tree.flat (tree.ins lt x tl) ~ x::(tree.flat tl), by apply flat_cons,
apply (list.perm_app_left (y::tr.flat) this).symm.trans,
reflexivity,
}, {
simp, simp at h,
apply ((list.perm_cons x).mpr h).trans,
have h1: tree.flat (tree.ins lt x tr) ~ x::(tree.flat tr), by apply flat_cons,
have: y::tree.flat (tree.ins lt x tr) ~ y::x::(tree.flat tr),
by apply ((list.perm_cons y).mpr h1),
symmetry,
apply (list.perm_app_right tl.flat this).trans, clear this h1,
have: tree.flat tl ++ y :: x :: tree.flat tr ~ tree.flat tl ++ x :: y :: tree.flat tr, {
apply list.perm_app_right tl.flat,
apply list.perm.swap x y,
},
apply this.trans, clear this,
have: x :: (tree.flat tl ++ y :: tree.flat tr) ~ (tree.flat tl ++ x :: y :: tree.flat tr),
by apply list.perm_middle.symm,
apply this.symm.trans, reflexivity,
}
}
end
protected theorem sort_equiv {α} [decidable_eq α] (lt : α → α → bool) (l: list α): l ~ (sort lt l) := begin
induction l, reflexivity,
case list.cons: h t ih {
simp, simp at ih,
apply flat_ins, assumption,
}
end
example: (tree_sort.sort (λ x y, to_bool (x < y)) (list.range 100)).length = 100 := by omega_nat ff
end tree_sort
|
from collections import defaultdict
from numpy import mod, pi
from .qlisp import QLispError, gateName
def call_macro(gate, st):
qubits = st[1]
if isinstance(st[0], str):
args = ()
else:
args = st[0][1:]
try:
yield from gate(qubits, *args)
except:
raise QLispError(f'extend macro {st} error.')
def extend_control_gate(st, scope):
# TODO
gate, qubits = st
if isinstance(gate[1], str):
if gate[1] == 'Z':
return [('CZ', qubits)]
elif gate[1] == 'X':
return [('Cnot', qubits)]
else:
return [st]
else:
return [st]
def extend_macro(qlisp, lib):
for st in qlisp:
if gateName(st) == 'C':
yield from extend_control_gate(st, lib)
else:
gate = lib.getGate(gateName(st))
if gate is None:
yield st
else:
for st in call_macro(gate, st):
yield from extend_macro([st], lib)
_VZ_rules = {}
def add_VZ_rule(gateName, rule):
_VZ_rules[gateName] = rule
def remove_VZ_rule(gateName, rule):
del _VZ_rules[gateName]
def _VZ_P(st, phaseList):
return [], [mod(phaseList[0] + st[0][1], 2 * pi)]
def _VZ_rfUnitary(st, phaseList):
(_, theta, phi), qubit = st
return [(('rfUnitary', theta, phi - phaseList[0]), qubit)], phaseList
def _VZ_clear(st, phaseList):
return [st], [0] * len(phaseList)
def _VZ_exchangable(st, phaseList):
return [st], phaseList
def _VZ_swap(st, phaseList):
return [st], phaseList[::-1]
add_VZ_rule('P', _VZ_P)
add_VZ_rule('rfUnitary', _VZ_rfUnitary)
add_VZ_rule('Reset', _VZ_clear)
add_VZ_rule('Measure', _VZ_clear)
add_VZ_rule('CZ', _VZ_exchangable)
add_VZ_rule('I', _VZ_exchangable)
add_VZ_rule('Barrier', _VZ_exchangable)
add_VZ_rule('Delay', _VZ_exchangable)
add_VZ_rule('iSWAP', _VZ_swap)
add_VZ_rule('SWAP', _VZ_swap)
def exchangeRzWithGate(st, phaseList, lib):
gate = gateName(st)
if gate in _VZ_rules:
return _VZ_rules[gate](st, phaseList)
else:
raise Exception('Unknow VZ exchange rule.')
def reduceVirtualZ(qlisp, lib):
hold = defaultdict(lambda: 0)
for st in qlisp:
target = st[1]
if isinstance(target, (int, str)):
target = (target, )
try:
stList, phaseList = exchangeRzWithGate(st,
[hold[q] for q in target],
lib)
yield from stList
for q, p in zip(target, phaseList):
hold[q] = mod(p, 2 * pi)
except:
for q in target:
if hold[q] != 0:
yield (('P', hold[q]), q)
hold[q] = 0
yield st
for q in hold:
if hold[q] != 0:
yield (('P', hold[q]), q)
|
[STATEMENT]
lemma seq_suble:
assumes sf: "strict_mono (f :: nat \<Rightarrow> nat)"
shows "n \<le> f n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n \<le> f n
[PROOF STEP]
proof (induct n)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 \<le> f 0
2. \<And>n. n \<le> f n \<Longrightarrow> Suc n \<le> f (Suc n)
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. 0 \<le> f 0
2. \<And>n. n \<le> f n \<Longrightarrow> Suc n \<le> f (Suc n)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> f 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 \<le> f 0
goal (1 subgoal):
1. \<And>n. n \<le> f n \<Longrightarrow> Suc n \<le> f (Suc n)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n. n \<le> f n \<Longrightarrow> Suc n \<le> f (Suc n)
[PROOF STEP]
case (Suc n)
[PROOF STATE]
proof (state)
this:
n \<le> f n
goal (1 subgoal):
1. \<And>n. n \<le> f n \<Longrightarrow> Suc n \<le> f (Suc n)
[PROOF STEP]
with sf [unfolded strict_mono_Suc_iff, rule_format, of n]
[PROOF STATE]
proof (chain)
picking this:
f n < f (Suc n)
n \<le> f n
[PROOF STEP]
have "n < f (Suc n)"
[PROOF STATE]
proof (prove)
using this:
f n < f (Suc n)
n \<le> f n
goal (1 subgoal):
1. n < f (Suc n)
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
n < f (Suc n)
goal (1 subgoal):
1. \<And>n. n \<le> f n \<Longrightarrow> Suc n \<le> f (Suc n)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
n < f (Suc n)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
n < f (Suc n)
goal (1 subgoal):
1. Suc n \<le> f (Suc n)
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
Suc n \<le> f (Suc n)
goal:
No subgoals!
[PROOF STEP]
qed |
/-
Copyright (c) 2021 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth, Eric Wieser
-/
import analysis.normed_space.basic
import analysis.normed_space.pi_Lp
import analysis.inner_product_space.pi_L2
/-!
# Matrices as a normed space
In this file we provide the following non-instances for norms on matrices:
* The elementwise norm:
* `matrix.seminormed_add_comm_group`
* `matrix.normed_add_comm_group`
* `matrix.normed_space`
* The Frobenius norm:
* `matrix.frobenius_seminormed_add_comm_group`
* `matrix.frobenius_normed_add_comm_group`
* `matrix.frobenius_normed_space`
* `matrix.frobenius_normed_ring`
* `matrix.frobenius_normed_algebra`
* The $L^\infty$ operator norm:
* `matrix.linfty_op_seminormed_add_comm_group`
* `matrix.linfty_op_normed_add_comm_group`
* `matrix.linfty_op_normed_space`
* `matrix.linfty_op_non_unital_semi_normed_ring`
* `matrix.linfty_op_semi_normed_ring`
* `matrix.linfty_op_non_unital_normed_ring`
* `matrix.linfty_op_normed_ring`
* `matrix.linfty_op_normed_algebra`
These are not declared as instances because there are several natural choices for defining the norm
of a matrix.
-/
noncomputable theory
open_locale big_operators nnreal matrix
namespace matrix
variables {R l m n α β : Type*} [fintype l] [fintype m] [fintype n]
/-! ### The elementwise supremum norm -/
section linf_linf
section seminormed_add_comm_group
variables [seminormed_add_comm_group α] [seminormed_add_comm_group β]
/-- Seminormed group instance (using sup norm of sup norm) for matrices over a seminormed group. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
protected def seminormed_add_comm_group : seminormed_add_comm_group (matrix m n α) :=
pi.seminormed_add_comm_group
local attribute [instance] matrix.seminormed_add_comm_group
lemma norm_le_iff {r : ℝ} (hr : 0 ≤ r) {A : matrix m n α} :
‖A‖ ≤ r ↔ ∀ i j, ‖A i j‖ ≤ r :=
by simp [pi_norm_le_iff_of_nonneg hr]
lemma nnnorm_le_iff {r : ℝ≥0} {A : matrix m n α} :
‖A‖₊ ≤ r ↔ ∀ i j, ‖A i j‖₊ ≤ r :=
by simp [pi_nnnorm_le_iff]
lemma norm_lt_iff {r : ℝ} (hr : 0 < r) {A : matrix m n α} :
‖A‖ < r ↔ ∀ i j, ‖A i j‖ < r :=
by simp [pi_norm_lt_iff hr]
lemma nnnorm_lt_iff {r : ℝ≥0} (hr : 0 < r) {A : matrix m n α} :
‖A‖₊ < r ↔ ∀ i j, ‖A i j‖₊ < r :=
by simp [pi_nnnorm_lt_iff hr]
lemma norm_entry_le_entrywise_sup_norm (A : matrix m n α) {i : m} {j : n} :
‖A i j‖ ≤ ‖A‖ :=
(norm_le_pi_norm (A i) j).trans (norm_le_pi_norm A i)
lemma nnnorm_entry_le_entrywise_sup_nnnorm (A : matrix m n α) {i : m} {j : n} :
‖A i j‖₊ ≤ ‖A‖₊ :=
(nnnorm_le_pi_nnnorm (A i) j).trans (nnnorm_le_pi_nnnorm A i)
@[simp] lemma nnnorm_map_eq (A : matrix m n α) (f : α → β) (hf : ∀ a, ‖f a‖₊ = ‖a‖₊) :
‖A.map f‖₊ = ‖A‖₊ :=
by simp_rw [pi.nnnorm_def, matrix.map_apply, hf]
@[simp] lemma norm_map_eq (A : matrix m n α) (f : α → β) (hf : ∀ a, ‖f a‖ = ‖a‖) :
‖A.map f‖ = ‖A‖ :=
(congr_arg (coe : ℝ≥0 → ℝ) $ nnnorm_map_eq A f $ λ a, subtype.ext $ hf a : _)
@[simp] lemma nnnorm_transpose (A : matrix m n α) : ‖Aᵀ‖₊ = ‖A‖₊ :=
by { simp_rw [pi.nnnorm_def], exact finset.sup_comm _ _ _ }
@[simp] lemma norm_transpose (A : matrix m n α) : ‖Aᵀ‖ = ‖A‖ := congr_arg coe $ nnnorm_transpose A
@[simp] lemma nnnorm_conj_transpose [star_add_monoid α] [normed_star_group α] (A : matrix m n α) :
‖Aᴴ‖₊ = ‖A‖₊ :=
(nnnorm_map_eq _ _ nnnorm_star).trans A.nnnorm_transpose
@[simp] lemma norm_conj_transpose [star_add_monoid α] [normed_star_group α] (A : matrix m n α) :
‖Aᴴ‖ = ‖A‖ :=
congr_arg coe $ nnnorm_conj_transpose A
instance [star_add_monoid α] [normed_star_group α] : normed_star_group (matrix m m α) :=
⟨norm_conj_transpose⟩
@[simp] lemma nnnorm_col (v : m → α) : ‖col v‖₊ = ‖v‖₊ := by simp [pi.nnnorm_def]
@[simp] lemma norm_col (v : m → α) : ‖col v‖ = ‖v‖ := congr_arg coe $ nnnorm_col v
@[simp] lemma nnnorm_row (v : n → α) : ‖row v‖₊ = ‖v‖₊ := by simp [pi.nnnorm_def]
@[simp] lemma norm_row (v : n → α) : ‖row v‖ = ‖v‖ := congr_arg coe $ nnnorm_row v
@[simp] lemma nnnorm_diagonal [decidable_eq n] (v : n → α) : ‖diagonal v‖₊ = ‖v‖₊ :=
begin
simp_rw pi.nnnorm_def,
congr' 1 with i : 1,
refine le_antisymm (finset.sup_le $ λ j hj, _) _,
{ obtain rfl | hij := eq_or_ne i j,
{ rw diagonal_apply_eq },
{ rw [diagonal_apply_ne _ hij, nnnorm_zero],
exact zero_le _ }, },
{ refine eq.trans_le _ (finset.le_sup (finset.mem_univ i)),
rw diagonal_apply_eq }
end
@[simp] lemma norm_diagonal [decidable_eq n] (v : n → α) : ‖diagonal v‖ = ‖v‖ :=
congr_arg coe $ nnnorm_diagonal v
/-- Note this is safe as an instance as it carries no data. -/
@[nolint fails_quickly]
instance [nonempty n] [decidable_eq n] [has_one α] [norm_one_class α] :
norm_one_class (matrix n n α) :=
⟨(norm_diagonal _).trans $ norm_one⟩
end seminormed_add_comm_group
/-- Normed group instance (using sup norm of sup norm) for matrices over a normed group. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
protected def normed_add_comm_group [normed_add_comm_group α] :
normed_add_comm_group (matrix m n α) :=
pi.normed_add_comm_group
section normed_space
local attribute [instance] matrix.seminormed_add_comm_group
variables [normed_field R] [seminormed_add_comm_group α] [normed_space R α]
/-- Normed space instance (using sup norm of sup norm) for matrices over a normed space. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
protected def normed_space : normed_space R (matrix m n α) :=
pi.normed_space
end normed_space
end linf_linf
/-! ### The $L_\infty$ operator norm
This section defines the matrix norm $\|A\|_\infty = \operatorname{sup}_i (\sum_j \|A_{ij}\|)$.
Note that this is equivalent to the operator norm, considering $A$ as a linear map between two
$L^\infty$ spaces.
-/
section linfty_op
/-- Seminormed group instance (using sup norm of L1 norm) for matrices over a seminormed group. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
protected def linfty_op_seminormed_add_comm_group [seminormed_add_comm_group α] :
seminormed_add_comm_group (matrix m n α) :=
(by apply_instance : seminormed_add_comm_group (m → pi_Lp 1 (λ j : n, α)))
/-- Normed group instance (using sup norm of L1 norm) for matrices over a normed ring. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
protected def linfty_op_normed_add_comm_group [normed_add_comm_group α] :
normed_add_comm_group (matrix m n α) :=
(by apply_instance : normed_add_comm_group (m → pi_Lp 1 (λ j : n, α)))
/-- Normed space instance (using sup norm of L1 norm) for matrices over a normed space. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
protected def linfty_op_normed_space [normed_field R] [seminormed_add_comm_group α]
[normed_space R α] :
normed_space R (matrix m n α) :=
(by apply_instance : normed_space R (m → pi_Lp 1 (λ j : n, α)))
section seminormed_add_comm_group
variables [seminormed_add_comm_group α]
lemma linfty_op_norm_def (A : matrix m n α) :
‖A‖ = ((finset.univ : finset m).sup (λ i : m, ∑ j : n, ‖A i j‖₊) : ℝ≥0) :=
by simp [pi.norm_def, pi_Lp.nnnorm_eq_sum ennreal.one_ne_top]
lemma linfty_op_nnnorm_def (A : matrix m n α) :
‖A‖₊ = (finset.univ : finset m).sup (λ i : m, ∑ j : n, ‖A i j‖₊) :=
subtype.ext $ linfty_op_norm_def A
@[simp] lemma linfty_op_nnnorm_col (v : m → α) :
‖col v‖₊ = ‖v‖₊ :=
begin
rw [linfty_op_nnnorm_def, pi.nnnorm_def],
simp,
end
@[simp] lemma linfty_op_norm_col (v : m → α) :
‖col v‖ = ‖v‖ :=
congr_arg coe $ linfty_op_nnnorm_col v
@[simp] lemma linfty_op_nnnorm_row (v : n → α) :
‖row v‖₊ = ∑ i, ‖v i‖₊ :=
by simp [linfty_op_nnnorm_def]
@[simp] lemma linfty_op_norm_row (v : n → α) :
‖row v‖ = ∑ i, ‖v i‖ :=
(congr_arg coe $ linfty_op_nnnorm_row v).trans $ by simp [nnreal.coe_sum]
@[simp]
lemma linfty_op_nnnorm_diagonal [decidable_eq m] (v : m → α) :
‖diagonal v‖₊ = ‖v‖₊ :=
begin
rw [linfty_op_nnnorm_def, pi.nnnorm_def],
congr' 1 with i : 1,
refine (finset.sum_eq_single_of_mem _ (finset.mem_univ i) $ λ j hj hij, _).trans _,
{ rw [diagonal_apply_ne' _ hij, nnnorm_zero] },
{ rw [diagonal_apply_eq] },
end
@[simp]
lemma linfty_op_norm_diagonal [decidable_eq m] (v : m → α) :
‖diagonal v‖ = ‖v‖ :=
congr_arg coe $ linfty_op_nnnorm_diagonal v
end seminormed_add_comm_group
section non_unital_semi_normed_ring
variables [non_unital_semi_normed_ring α]
lemma linfty_op_nnnorm_mul (A : matrix l m α) (B : matrix m n α) : ‖A ⬝ B‖₊ ≤ ‖A‖₊ * ‖B‖₊ :=
begin
simp_rw [linfty_op_nnnorm_def, matrix.mul_apply],
calc finset.univ.sup (λ i, ∑ k, ‖∑ j, A i j * B j k‖₊)
≤ finset.univ.sup (λ i, ∑ k j, ‖A i j‖₊ * ‖B j k‖₊) :
finset.sup_mono_fun $ λ i hi, finset.sum_le_sum $ λ k hk, nnnorm_sum_le_of_le _ $ λ j hj,
nnnorm_mul_le _ _
... = finset.univ.sup (λ i, ∑ j, (‖A i j‖₊ * ∑ k, ‖B j k‖₊)) :
by simp_rw [@finset.sum_comm _ m n, finset.mul_sum]
... ≤ finset.univ.sup (λ i, ∑ j, ‖A i j‖₊ * finset.univ.sup (λ i, ∑ j, ‖B i j‖₊)) :
finset.sup_mono_fun $ λ i hi, finset.sum_le_sum $ λ j hj,
mul_le_mul_of_nonneg_left (finset.le_sup hj) (zero_le _)
... ≤ finset.univ.sup (λ i, ∑ j, ‖A i j‖₊) * finset.univ.sup (λ i, ∑ j, ‖B i j‖₊) :
by simp_rw [←finset.sum_mul, ←nnreal.finset_sup_mul],
end
lemma linfty_op_norm_mul (A : matrix l m α) (B : matrix m n α) : ‖A ⬝ B‖ ≤ ‖A‖ * ‖B‖ :=
linfty_op_nnnorm_mul _ _
lemma linfty_op_nnnorm_mul_vec (A : matrix l m α) (v : m → α) : ‖A.mul_vec v‖₊ ≤ ‖A‖₊ * ‖v‖₊ :=
begin
rw [←linfty_op_nnnorm_col (A.mul_vec v), ←linfty_op_nnnorm_col v],
exact linfty_op_nnnorm_mul A (col v),
end
lemma linfty_op_norm_mul_vec (A : matrix l m α) (v : m → α) : ‖matrix.mul_vec A v‖ ≤ ‖A‖ * ‖v‖ :=
linfty_op_nnnorm_mul_vec _ _
end non_unital_semi_normed_ring
/-- Seminormed non-unital ring instance (using sup norm of L1 norm) for matrices over a semi normed
non-unital ring. Not declared as an instance because there are several natural choices for defining
the norm of a matrix. -/
local attribute [instance]
protected def linfty_op_non_unital_semi_normed_ring [non_unital_semi_normed_ring α] :
non_unital_semi_normed_ring (matrix n n α) :=
{ norm_mul := linfty_op_norm_mul,
.. matrix.linfty_op_seminormed_add_comm_group,
.. matrix.non_unital_ring }
/-- The `L₁-L∞` norm preserves one on non-empty matrices. Note this is safe as an instance, as it
carries no data. -/
instance linfty_op_norm_one_class [semi_normed_ring α] [norm_one_class α] [decidable_eq n]
[nonempty n] : norm_one_class (matrix n n α) :=
{ norm_one := (linfty_op_norm_diagonal _).trans norm_one }
/-- Seminormed ring instance (using sup norm of L1 norm) for matrices over a semi normed ring. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
protected def linfty_op_semi_normed_ring [semi_normed_ring α] [decidable_eq n] :
semi_normed_ring (matrix n n α) :=
{ .. matrix.linfty_op_non_unital_semi_normed_ring,
.. matrix.ring }
/-- Normed non-unital ring instance (using sup norm of L1 norm) for matrices over a normed
non-unital ring. Not declared as an instance because there are several natural choices for defining
the norm of a matrix. -/
local attribute [instance]
protected def linfty_op_non_unital_normed_ring [non_unital_normed_ring α] :
non_unital_normed_ring (matrix n n α) :=
{ ..matrix.linfty_op_non_unital_semi_normed_ring }
/-- Normed ring instance (using sup norm of L1 norm) for matrices over a normed ring. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
protected def linfty_op_normed_ring [normed_ring α] [decidable_eq n] :
normed_ring (matrix n n α) :=
{ ..matrix.linfty_op_semi_normed_ring }
/-- Normed algebra instance (using sup norm of L1 norm) for matrices over a normed algebra. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
protected def linfty_op_normed_algebra [normed_field R] [semi_normed_ring α] [normed_algebra R α]
[decidable_eq n] :
normed_algebra R (matrix n n α) :=
{ ..matrix.linfty_op_normed_space }
end linfty_op
/-! ### The Frobenius norm
This is defined as $\|A\| = \sqrt{\sum_{i,j} \|A_{ij}\|^2}$.
When the matrix is over the real or complex numbers, this norm is submultiplicative.
-/
section frobenius
open_locale matrix big_operators
/-- Seminormed group instance (using frobenius norm) for matrices over a seminormed group. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
def frobenius_seminormed_add_comm_group [seminormed_add_comm_group α] :
seminormed_add_comm_group (matrix m n α) :=
(by apply_instance : seminormed_add_comm_group (pi_Lp 2 (λ i : m, pi_Lp 2 (λ j : n, α))))
/-- Normed group instance (using frobenius norm) for matrices over a normed group. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
def frobenius_normed_add_comm_group [normed_add_comm_group α] :
normed_add_comm_group (matrix m n α) :=
(by apply_instance : normed_add_comm_group (pi_Lp 2 (λ i : m, pi_Lp 2 (λ j : n, α))))
/-- Normed space instance (using frobenius norm) for matrices over a normed space. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
def frobenius_normed_space [normed_field R] [seminormed_add_comm_group α] [normed_space R α] :
normed_space R (matrix m n α) :=
(by apply_instance : normed_space R (pi_Lp 2 (λ i : m, pi_Lp 2 (λ j : n, α))))
section seminormed_add_comm_group
variables [seminormed_add_comm_group α] [seminormed_add_comm_group β]
lemma frobenius_nnnorm_def (A : matrix m n α) :
‖A‖₊ = (∑ i j, ‖A i j‖₊ ^ (2 : ℝ)) ^ (1/2 : ℝ) :=
by simp_rw [pi_Lp.nnnorm_eq_of_L2, nnreal.sq_sqrt, nnreal.sqrt_eq_rpow, nnreal.rpow_two]
lemma frobenius_norm_def (A : matrix m n α) :
‖A‖ = (∑ i j, ‖A i j‖ ^ (2 : ℝ)) ^ (1/2 : ℝ) :=
(congr_arg coe (frobenius_nnnorm_def A)).trans $ by simp [nnreal.coe_sum]
@[simp] lemma frobenius_nnnorm_map_eq (A : matrix m n α) (f : α → β) (hf : ∀ a, ‖f a‖₊ = ‖a‖₊) :
‖A.map f‖₊ = ‖A‖₊ :=
by simp_rw [frobenius_nnnorm_def, matrix.map_apply, hf]
@[simp] lemma frobenius_norm_map_eq (A : matrix m n α) (f : α → β) (hf : ∀ a, ‖f a‖ = ‖a‖) :
‖A.map f‖ = ‖A‖ :=
(congr_arg (coe : ℝ≥0 → ℝ) $ frobenius_nnnorm_map_eq A f $ λ a, subtype.ext $ hf a : _)
@[simp] lemma frobenius_nnnorm_transpose (A : matrix m n α) : ‖Aᵀ‖₊ = ‖A‖₊ :=
by { rw [frobenius_nnnorm_def, frobenius_nnnorm_def, finset.sum_comm], refl }
@[simp] lemma frobenius_norm_transpose (A : matrix m n α) : ‖Aᵀ‖ = ‖A‖ :=
congr_arg coe $ frobenius_nnnorm_transpose A
@[simp] lemma frobenius_nnnorm_conj_transpose [star_add_monoid α] [normed_star_group α]
(A : matrix m n α) : ‖Aᴴ‖₊ = ‖A‖₊ :=
(frobenius_nnnorm_map_eq _ _ nnnorm_star).trans A.frobenius_nnnorm_transpose
@[simp]
instance frobenius_normed_star_group [star_add_monoid α] [normed_star_group α] :
normed_star_group (matrix m m α) :=
⟨frobenius_norm_conj_transpose⟩
@[simp] lemma frobenius_norm_row (v : m → α) : ‖row v‖ = ‖(pi_Lp.equiv 2 _).symm v‖ :=
begin
rw [frobenius_norm_def, fintype.sum_unique, pi_Lp.norm_eq_of_L2, real.sqrt_eq_rpow],
simp only [row_apply, real.rpow_two, pi_Lp.equiv_symm_apply],
end
@[simp] lemma frobenius_nnnorm_row (v : m → α) : ‖row v‖₊ = ‖(pi_Lp.equiv 2 _).symm v‖₊ :=
subtype.ext $ frobenius_norm_row v
@[simp] lemma frobenius_norm_col (v : n → α) : ‖col v‖ = ‖(pi_Lp.equiv 2 _).symm v‖ :=
begin
simp_rw [frobenius_norm_def, fintype.sum_unique, pi_Lp.norm_eq_of_L2, real.sqrt_eq_rpow],
simp only [col_apply, real.rpow_two, pi_Lp.equiv_symm_apply]
end
@[simp] lemma frobenius_nnnorm_col (v : n → α) : ‖col v‖₊ = ‖(pi_Lp.equiv 2 _).symm v‖₊ :=
subtype.ext $ frobenius_norm_col v
@[simp] lemma frobenius_nnnorm_diagonal [decidable_eq n] (v : n → α) :
‖diagonal v‖₊ = ‖(pi_Lp.equiv 2 _).symm v‖₊ :=
begin
simp_rw [frobenius_nnnorm_def, ←finset.sum_product', finset.univ_product_univ,
pi_Lp.nnnorm_eq_of_L2],
let s := (finset.univ : finset n).map ⟨λ i : n, (i, i), λ i j h, congr_arg prod.fst h⟩,
rw ←finset.sum_subset (finset.subset_univ s) (λ i hi his, _),
{ rw [finset.sum_map, nnreal.sqrt_eq_rpow],
dsimp,
simp_rw [diagonal_apply_eq, nnreal.rpow_two] },
{ suffices : i.1 ≠ i.2,
{ rw [diagonal_apply_ne _ this, nnnorm_zero, nnreal.zero_rpow two_ne_zero], },
intro h,
exact finset.mem_map.not.mp his ⟨i.1, finset.mem_univ _, prod.ext rfl h⟩ }
end
@[simp] lemma frobenius_norm_diagonal [decidable_eq n] (v : n → α) :
‖diagonal v‖ = ‖(pi_Lp.equiv 2 _).symm v‖ :=
(congr_arg coe $ frobenius_nnnorm_diagonal v : _).trans rfl
end seminormed_add_comm_group
lemma frobenius_nnnorm_one [decidable_eq n] [seminormed_add_comm_group α] [has_one α] :
‖(1 : matrix n n α)‖₊ = nnreal.sqrt (fintype.card n) * ‖(1 : α)‖₊:=
begin
refine (frobenius_nnnorm_diagonal _).trans _,
simp_rw [pi_Lp.nnnorm_equiv_symm_const ennreal.two_ne_top, nnreal.sqrt_eq_rpow],
simp only [ennreal.to_real_div, ennreal.one_to_real, ennreal.to_real_bit0],
end
section is_R_or_C
variables [is_R_or_C α]
lemma frobenius_nnnorm_mul (A : matrix l m α) (B : matrix m n α) : ‖A ⬝ B‖₊ ≤ ‖A‖₊ * ‖B‖₊ :=
begin
simp_rw [frobenius_nnnorm_def, matrix.mul_apply],
rw [←nnreal.mul_rpow, @finset.sum_comm _ n m, finset.sum_mul_sum, finset.sum_product],
refine nnreal.rpow_le_rpow _ one_half_pos.le,
refine finset.sum_le_sum (λ i hi, finset.sum_le_sum $ λ j hj, _),
rw [← nnreal.rpow_le_rpow_iff one_half_pos, ← nnreal.rpow_mul,
mul_div_cancel' (1 : ℝ) two_ne_zero, nnreal.rpow_one, nnreal.mul_rpow],
dsimp only,
have := @nnnorm_inner_le_nnnorm α _ _ _ _
((pi_Lp.equiv 2 (λ i, α)).symm (λ j, star (A i j)))
((pi_Lp.equiv 2 (λ i, α)).symm (λ k, B k j)),
simpa only [pi_Lp.equiv_symm_apply, pi_Lp.inner_apply,
is_R_or_C.inner_apply, star_ring_end_apply, pi.nnnorm_def, pi_Lp.nnnorm_eq_of_L2,
star_star, nnnorm_star, nnreal.sqrt_eq_rpow, nnreal.rpow_two] using this,
end
lemma frobenius_norm_mul (A : matrix l m α) (B : matrix m n α) : ‖A ⬝ B‖ ≤ ‖A‖ * ‖B‖ :=
frobenius_nnnorm_mul A B
/-- Normed ring instance (using frobenius norm) for matrices over `ℝ` or `ℂ`. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
def frobenius_normed_ring [decidable_eq m] : normed_ring (matrix m m α) :=
{ norm := has_norm.norm,
norm_mul := frobenius_norm_mul,
..matrix.frobenius_seminormed_add_comm_group }
/-- Normed algebra instance (using frobenius norm) for matrices over `ℝ` or `ℂ`. Not
declared as an instance because there are several natural choices for defining the norm of a
matrix. -/
local attribute [instance]
def frobenius_normed_algebra [decidable_eq m] [normed_field R] [normed_algebra R α] :
normed_algebra R (matrix m m α) :=
{ ..matrix.frobenius_normed_space }
end is_R_or_C
end frobenius
end matrix
|
If $f$ is a continuous function on a convex set $S$, and $f$ is differentiable on the interior of $S$ except for a finite number of points, then the integral of $f$ along any closed path in $S$ is zero. |
[STATEMENT]
lemma nGt_le_conv: "comp x y \<noteq> Gt \<longleftrightarrow> le x y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (comp x y \<noteq> Gt) = le x y
[PROOF STEP]
unfolding le_of_comp_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (comp x y \<noteq> Gt) = (case comp x y of Gt \<Rightarrow> False | _ \<Rightarrow> True)
[PROOF STEP]
by (cases "comp x y", auto) |
************************************************************************
* M020 - extended multigrid solver
************************************************************************
* The M020 routine (based on M011) is an extended version of the
* multigrid solver that is designed to work
* - without COMMON blocks
* - using less parameters in the call
* - with more influence of the user to the solver by parameters
* To use this solver, the user has to provide a set of callback-routines
* for prolongation, restriction, matrix multiplication, solution of
* linear systems on any level,...
*
* The solver itself does not use any COMMON blocks and makes no use
* of any memory management routines. M020 is designed as a Black-Box
* solver. The intelligence of the solver comes from the callback-
* routines, which implement the real work. The callback routines are
* the ones that know about the structure of the matrices, possible
* FEM solution vectors,...
*
* The behaviour of the solver is defined by a parameter block of
* integer and double variables. This parameter block is also passed
* by the solver to all callback-routines. The first part of the
* parameter block is reserved for the parameters of the solver.
* The caller can add more variables to the end of this parameter
* block with information for the callback routines, e.g. the handles
* to matrices, parameters for the solver components,...
*
* Apart of these both multigrid parameter blocks, a user defined
* integer and double precision data block is passed to the
* callback routines as well. These can be used by the caller to
* define the behaviour of "its" callback routines, as it
* allows the callback routines to directly access information
* from the caller.
*
* All in all the following arguments have to be passed to M020.
* Here, SZMGRx >= SZM020x are theoretical constants, defined by the
* user when creating the structures IPARAM/DPARAM, as the user
* can define these arrays larger, containing additional information
* for coarse grid solver(s), smoothers,...
*
* In:
* IPARAM - array [1..SZMGRI] of integer
* Integer parameter structure for M020
* DPARAM - array [1..SZMGRI] of integer
* Double precision parameter structure for M020
* IDATA - array [1..*] of integer
* Start address of a user defined array, which is
* passed to all callback routines.
* DDATA - array [1..*] of double
* Start address of a user defined array, which is
* passed to all callback routines.
* DX - array [1..*] of double precision
* Starting address of solution vectors
* DB - array [1..*] of double precision
* Starting address of right-hand-side vectors
* DD - array [1..*] of double precision
* Starting address of temporary vectors
* DAX - SUBROUTINE (DX,DAX,NEQ,A1,A2,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Matrix-vector multiplication.
* Returns: DAX := A1*A*DX+A2*DAX
* DPROL - SUBROUTINE (DX,DFINE,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Prolongation.
* Calculates DFINE := Prolongation(DX) of solution on
* corser level ILEV-1 to current (finer) level ILEV
* DREST - SUBROUTINE (DFINE,DX,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Restriction.
* Calculates DX := Restriction(DFINE) of solution on finer
* level ILEV+1 to current (coarser) level ILEV
* DPRSM - SUBROUTINE (DX,DB,DD,NEQ,NSM,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Pre-smoothing.
* Performs NSM smoothing steps to DX. DD can be used
* as auxiliary vector.
* DPOSM - SUBROUTINE (DX,DB,DD,NEQ,NSM,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Post-smoothing.
* Performs NSM smoothing steps to DX. DD can be used
* as auxiliary vector.
* DBC - SUBROUTINE (DX,NEQ,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Boundary-implementation
* Implements boundary conditions into solution vector
* DSTEP - SUBROUTINE (DX,DD,DB,DSTEPP,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Step-Size control.
* Calculates DSTEPP = step-length parameter.
* = 1, if no step length control is used
* DEX - SUBROUTINE (DX,DB,DD,NEQ,RHO,ITE,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, coarse grid solver if more than
* one level is used. The solver should solve up to
* the accuracy the MG-solver should solve to (can be
* taken from DPARAM), but is not forced to do that.
* Returns: DX = A^-1 * DB
* ITE = number of iterations, if iterative
* solver is used; 1 otherwise.
* RHO = convergence rate
* DEXS - SUBROUTINE (DX,DB,DD,NEQ,RHO,ITE,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, Coarse grid solver if there's only one
* level; i.e. fallback-routine to standard one-level solver.
* The solver should solve up to the accuracy the MG-solver
* should solve to (can be taken from DPARAM).
* Returns: DX = A^-1 * DB
* ITE = number of iterations, if iterative
* solver is used; 1 otherwise.
* RHO = convergence rate
* DFILT - SUBROUTINE (DX,NEQ,IALGP,IPARAM,DPARAM,IDATA,DDATA)
* Callback-routine, adaptive filtering.
* Performs filtering on different positions of the
* algorithm. Is only used if IPARAM(OFILT)<>0.
*
* The behaviour of the algorithm is determined by the "Input-Variables"
* part in the IPARAM/DPARAM parameter blocks.
*
* The routine uses a couple of vectors for the computation: DX, DB and
* DD. The variables in the parameter list of M020 are pointers
* to a large array containing all these vectors. The starting
* indices of the vectors on the different levels are given by the
* KOFFx offset array in the integer parameters. E.g. the space for
* the solution vectors for level 1,2,3,... are expected to be at
* DX(1+KOFFX(1)), DX(1+KOFFX(2)), DX(1+KOFFX(2)),...
* Concerning the content of the vectors themselves, the caller has to
* initialise the following:
* - The initial start vector, which is expected at DX(1+KOFFX(NLMAX)).
* - The right hand side of the system, which is expected in
* DB(1+KOFFB(NLMAX)).
*
* The output of the algorithm is returned by:
* - the "Output-Variables" block in IPARAM/DPARAM
* - the DX-vector on the finest level, which is a new approximation
* to the solution vector; can be found at DX(1+KOFFX(NLMAX))
*
* The algorithm uses DD (on levels NLMIN..NLMAX) and DX (on level
* NLMIN..NLMAX-1) as temporary vectors during the computation.
* The parameter blocks IPARAM/DPARAM contain further temporary
* variables which are only valid during the computation, and which
* inform the callback-routine about the current status of the
* algorithm (current level,...). All these variables are initialized
* internally. The user has only to specify the input parameters and
* gets the result in the output variables and the solution vector.
*
* The multigrid algorithm stops if
* - the iteration is divergent (both criteria introduced by
* DIVREL and DIVABS are fulfilled)
* - the iteration has been convergent (both criteria introduced by
* EPSREL and EPSABS are fulfilled) and the minimum number of
* iterations is reached
* - the maximum number of iterations is reached
* If NODEF is given <> 0, the residuals are not checked (except for the
* maximum norm of the initial residuum) and there are always
* NITMAX iterations performed.
************************************************************************
***********************************************************************
* Description of multigrid filtering by DFILT:
*
* In:
* DX - Solution vector; array [1..NEQ] of double
* NEQ - length of solution vector
* IALGP - Position in the algorithm
* 0=undefined - enforce the filtering
* 1=on start of the algorithm on finest level;
* DX is solution vector
* 2=on start of the MG sweep on the current level;
* DX is the first solution vector before the
* sweep starts (0D0-array normally, escept for the
* finest level, there it's the result of the previous
* call with IALGP=1)
* 3=before smoothing; DX is solution vector
* 4=after smoothing; DX is solution vector
* 5=before restriction; DX is defect vector
* 6=after restriction; DX is defect vector
* 7=before coarse grid solver;
* DX is RHS vector on coarsest level
* (the result of IALGP=6 !)
* 8=before coarse grid solver;
* DX is start vector on coarsest level (normally filled
* with 0)
* 9=after coarse grid solver;
* DX is calculated solution vector on coarsest level
* 10=before prolongation;
* DX is update-vector on coarser level
* 11=after prolongation; DX is prolongated update vector
* 12=after coarse grid correction, before post smoothing;
* DX is solution vector
* 13=after post-smoothing; DX is solution vector
* 14=after one step of MG; DX is the solution vector on the
* finest level.
* 15=after one step of MG; DX is the last calculated residuum
* IPARAM- array [1..SZMGRI] of integer
* Integer parameter structure of multigrid solver
* DPARAM- array [1..SZMGRI] of integer
* Double precision parameter structure of multigrid solver
* IDATA - array [1..*] of integer
* User defined integer array
* DDATA - array [1..*] of double
* User defined double array
*
* Out (to be returned by DFILT):
* DX - updated vector.
***********************************************************************
SUBROUTINE M020 (IPARAM,DPARAM, IDATA,DDATA, DX,DB,DD,
* DAX,DPROL,DREST,DPRSM,DPOSM,
* DEX,DEXS,DBC,DSTEP,DFILT)
IMPLICIT NONE
INCLUDE 'cout.inc'
INCLUDE 'cbasicmg.inc'
INCLUDE 'ssolvers.inc'
INCLUDE 'm020.inc'
DOUBLE PRECISION DX(*),DB(*),DD(*)
INTEGER IPARAM(SZ020I),IDATA(*)
DOUBLE PRECISION DPARAM(SZ020D),DDATA(*)
EXTERNAL DAX,DPROL,DREST,DPRSM,DPOSM,DEX,DEXS,DBC,DSTEP,DFILT
C local variables:
INTEGER IASRLN, ITIM, NLMIN, NLMAX, INRM, ICYCLE, IFILT, NIT0
INTEGER KOFFX(NNLEV), KOFFB(NNLEV), KOFFD(NNLEV), KNEQ(NNLEV)
DOUBLE PRECISION TIMIN(SZ020D),DMAX,DEFOLD,R,DSTEPP
INTEGER I,ILEV,NODEFC,ITE,MTDV,J
C The queue saves the current residual and the two previous
C and the two previous residuals
DOUBLE PRECISION RESQUE(32)
C Length of the queue of last residuals for the computation of
C the asymptotic convergence rate
IASRLN = MAX(MIN(32,IPARAM (OIASRLN)),1)
C Put some variables into local variables for faster access.
C This is typically optimized away by the compiler...
ITIM = IPARAM (OTIM)
INRM = IPARAM (OINRM)
ICYCLE = IPARAM (OICYCLE)
IFILT = IPARAM (OFILT)
NODEFC = IPARAM (ONODEFC)
CALL LCP3(IPARAM(OKOFFX),KOFFX,NNLEV)
CALL LCP3(IPARAM(OKOFFB),KOFFB,NNLEV)
CALL LCP3(IPARAM(OKOFFD),KOFFD,NNLEV)
CALL LCP3(IPARAM(OKNEQ),KNEQ,NNLEV)
C Iteration when the residuum is printed:
MTDV = MAX(1,IPARAM(OMTRMRS))
C minimum number of MG-steps:
NIT0 = MAX(IPARAM(ONITMIN),0)
C If timing information should be computed, initialise the variables
C in the parameter block and measure the start time.
C Timing information is always measured by stopping a start- and an
C end-timestep. We use a temporary array TIMIN for measuring the start
C timings, while after measuring the end-timings, the difference
C of start- and end-time is added to the final timing information
C in the output block.
IF (ITIM.NE.0) THEN
DPARAM(OTMTOT ) = 0D0
DPARAM(OTMMG ) = 0D0
DPARAM(OTMPROL) = 0D0
DPARAM(OTMREST) = 0D0
DPARAM(OTMDEF ) = 0D0
DPARAM(OTMSMTH) = 0D0
DPARAM(OTMCGSL) = 0D0
DPARAM(OTMFILT) = 0D0
DPARAM(OTMBC ) = 0D0
DPARAM(OTMCGC ) = 0D0
CALL LCL1(TIMIN,SZ020D)
CALL GTMAUX (TIMIN,DPARAM,OTMTOT,0)
ENDIF
C initialise output parameters to standard values
IPARAM(OSTATUS) = 0
IPARAM(OITE) = 0
DPARAM(ODEFINI) = 0D0
DPARAM(ODEFFIN) = 0D0
DPARAM(ORHO ) = 0D0
DPARAM(ORHOASM) = 0D0
C Clear queue with old residuals
CALL LCL1(RESQUE,IASRLN)
C Check the cycle
IF (ICYCLE.LT.0) THEN
C Wrong parameters
IPARAM(OSTATUS) = 1
IF (IPARAM(OMSGTRM).GE.1) THEN
WRITE (MTERM,'(A)') 'M020: Invalid cycle'
END IF
GOTO 99999
END IF
NLMIN = IPARAM(ONLMIN)
NLMAX = IPARAM(ONLMAX)
IF (NLMAX.LT.NLMIN.OR.NLMIN.LE.0.OR.NLMAX.GT.NNLEV) THEN
IPARAM(OSTATUS) = 1
IF (IPARAM(OMSGTRM).GE.1) THEN
WRITE (MTERM,'(A)') 'M020: invalid NLMIN/NLMAX'
END IF
GOTO 99999
ENDIF
C The NEQ entry in the solver structure is initialized to the
C number of equations on the finest level. It will stay so till the
C end of the routine, as this information is actually not used by
C this type of solver.
IPARAM(ONEQ) = KNEQ(NLMAX)
C Set the starting address of the auxiliary array for the callback
C routines to DD on finest level. This vector is used in every
C MG sweep only once: To calculate the defect, smooth it and
C restrict it to the coarser level. Afterwards it's no more used.
C So a couple of callback routines can use it for intermediate
C calculations, which saves some memory!
C We set KCBAUX<>0 only if it's save to use it. At the moment,
C it's not -- set it to 0.
IPARAM(OKCBAUX) = 0
C We start on the maximum level. Set this here, because the callback
C routines might need it.
IPARAM (OILEV) = NLMAX
C Test, if the RHS-vector is 0; in this case the solution is
C also 0 -> special case.
C This is done by checking the maximum norm of the vector.
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,0)
CALL LLI1(DB(1+KOFFB(NLMAX)),KNEQ(NLMAX),DMAX,I)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,1)
IF (DMAX.LE.DPARAM(OVECZER)) THEN
C final defect is 0, as initialised in the output variable above
CALL LCL1(DX(1+KOFFX(NLMAX)),KNEQ(NLMAX))
GOTO 1000
ENDIF
C The same way we check the initial defect for being zero.
C Test if there's only one level. In this case directly activate
C that coarse grid solver, which is responsible if there's only
C one level.
IF (NLMIN.EQ.NLMAX) THEN
IF (IPARAM(OMSGTRM).GT.1) THEN
WRITE (MTERM,'(A)') 'M020: Only one level;'//
* ' switching back to standard solver.'
END IF
C Calculate initial defect
IF (NODEFC.EQ.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,0)
CALL LCP1(DB(1+KOFFB(NLMAX)),DD(1+KOFFD(NLMAX)),KNEQ(NLMAX))
CALL DAX (DX(1+KOFFX(NLMAX)),DD(1+KOFFD(NLMAX)),
* KNEQ(NLMAX),-1D0,1D0,IPARAM,DPARAM,IDATA,DDATA)
CALL LL21(DD(1+KOFFD(NLMAX)),KNEQ(NLMAX),DPARAM(ODEFINI))
C Scaling for the vector (1111...) to have norm 1 (weighted l2-norm)
IF (INRM.GT.0)
* DPARAM(ODEFFIN) = DPARAM(ODEFINI) / SQRT (DBLE(KNEQ(NLMAX)))
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,1)
END IF
C activate DAXS coarse grid solver
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMCGSL,0)
IPARAM(OITE) = NIT0
CALL DEXS(DX(1+KOFFX(NLMAX)),DB(1+KOFFB(NLMAX)),
* DD(1+KOFFD(NLMAX)),KNEQ(NLMAX),
* DPARAM(ORHO),IPARAM(OITE),IPARAM,DPARAM,IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMCGSL,1)
C Calculate final defect
IF (NODEFC.EQ.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,0)
CALL LCP1(DB(1+KOFFB(NLMAX)),DD(1+KOFFD(NLMAX)),KNEQ(NLMAX))
CALL DAX (DX(1+KOFFX(NLMAX)),DD(1+KOFFD(NLMAX)),
* KNEQ(NLMAX),-1D0,1D0,IPARAM,DPARAM,IDATA,DDATA)
CALL LL21(DD(1+KOFFD(NLMAX)),KNEQ(NLMAX),DPARAM(ODEFFIN))
C Scaling for the vector (1111...) to have norm 1 (weighted l2-norm)
IF (INRM.GT.0)
* DPARAM(ODEFFIN) = DPARAM(ODEFFIN) / SQRT (DBLE(KNEQ(NLMAX)))
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,1)
C In this case we can't get the asymptotic residual, otherwise
C we would have to change the code of the coarse grid solver
C itself.
DPARAM(ORHOASM) = DPARAM(ORHO)
END IF
C That's it, single-grid solver = complete solver, finished
GOTO 1000
ENDIF
C The KIT/KIT0-variables in the IPARAM block count the current sweep
C on each level, thus realising the F/V/W-cycle without recursion.
C
C KIT0 is initialised with the number of MG-sweeps on every level.
C KIT counts backwards the number of MG-sweeps left on each level.
C An F-cycle is initialised the same way as the W-cycle, but later
C in decreasing the KIT()-entries it's handled differently.
IPARAM(OKIT0+NLMAX-1) = 1
DO I = NLMIN+1,NLMAX-1
IF (ICYCLE.EQ.0) THEN
IPARAM(OKIT0+I-1) = 2
ELSE
IPARAM(OKIT0+I-1) = ICYCLE
ENDIF
END DO
C On start of the algorithm perform the first filtering:
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(NLMAX)),KNEQ(NLMAX),1,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
IF (NODEFC.EQ.0) THEN
C After the filtering calculate the initial defect:
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,0)
CALL LCP1(DB(1+KOFFB(NLMAX)),DD(1+KOFFD(NLMAX)),KNEQ(NLMAX))
CALL DAX(DX(1+KOFFX(NLMAX)),DD(1+KOFFD(NLMAX)),
* KNEQ(NLMAX),-1D0,1D0,IPARAM,DPARAM,IDATA,DDATA)
CALL LL21(DD(1+KOFFD(NLMAX)),KNEQ(NLMAX),DPARAM(ODEFINI))
C Scaling for the vector (1111...) to have norm 1 (weighted l2-norm)
IF (INRM.GT.0)
* DPARAM(ODEFINI) = DPARAM(ODEFINI) / SQRT (DBLE(KNEQ(NLMAX)))
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,1)
C DEFOLD always saves the defect from the last step. The current
C defect is directly calculated into the output block of DPARAM.
C In the first iteration the "previous" defect is initialised
C by the initial defect.
DEFOLD = DPARAM(ODEFINI)
DPARAM(ODEFFIN) = DPARAM(ODEFINI)
C Print out the initial residuum
IF (IPARAM(OMSGTRM).GE.2) THEN
WRITE (MTERM,'(A,I7,A,D25.16)')
* 'M020: Iteration ',0,', !!RES!! = ',DPARAM(ODEFINI)
END IF
C Check if out initial defect is zero. This may happen if
C the filtering routine filters "everything out"!
C In that case we can directly stop our computation.
IF ( (DPARAM(ODEFINI).LT.DPARAM(OVECZER)) ) THEN
C final defect is 0, as initialised in the output variable above
CALL LCL1(DX(1+KOFFX(NLMAX)),KNEQ(NLMAX))
GOTO 1000
END IF
C Initialize the queue of the last residuals with the
C initial defect
DO I=1,IASRLN
RESQUE(I) = DPARAM(ODEFINI)
END DO
END IF
C Start multigrid iteration; perform at most IPARAM(ONITMAX) iterations.
DO ITE = 1, IPARAM(ONITMAX)
IPARAM(OCRITE) = ITE
C Initialize level counts for all levels.
C Transfer the KIT0-array to the KIT-array; it will be decreased
C consecutively, until 0 is reached.
DO I = NLMIN,NLMAX
IPARAM(OKIT+I-1) = IPARAM(OKIT0+I-1)
END DO
C Start on the maximum level
IPARAM (OILEV) = NLMAX
ILEV = NLMAX
C Perform the filtering for the current solution before the MG sweep.
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(NLMAX)),KNEQ(NLMAX),2,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C On the maximum level we already built out defect vector. If we are
C on a lower level than NLMAX, perform smoothing+restriction down to the
C lowest level NLMIN.
C
C Crippled WHILE-DO-loop to allow decreasing of ILEV in the inner
C of the loop, not at the end.
110 IF (ILEV.NE.NLMIN) THEN
C First perform filtering of the solution before smoothing.
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),3,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Perform the pre-smoothing with the current solution vector
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMSMTH,0)
IF (IPARAM(OKPRSM+ILEV-1).GT.0)
* CALL DPRSM(DX(1+KOFFX(ILEV)),DB(1+KOFFB(ILEV)),
* DD(1+KOFFD(ILEV)),KNEQ(ILEV),IPARAM(OKPRSM+ILEV-1),
* IPARAM,DPARAM,IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMSMTH,1)
C Perform filtering
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),4,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Build the defect vector
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,0)
CALL LCP1(DB(1+KOFFB(ILEV)),DD(1+KOFFD(ILEV)),KNEQ(ILEV))
CALL DAX(DX(1+KOFFX(ILEV)),DD(1+KOFFD(ILEV)),
* KNEQ(ILEV),-1D0,1D0,IPARAM,DPARAM,IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,1)
C Filter the defect vector
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DD(1+KOFFD(ILEV)),KNEQ(ILEV),5,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Go down one level
ILEV = ILEV-1
IPARAM (OILEV) = ILEV
C Restriction of the defect. The restricted defect is placed
C in DB as the right hand side of the lower level.
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMREST,0)
CALL DREST(DD(1+KOFFD(ILEV+1)),DB(1+KOFFB(ILEV)),
* IPARAM,DPARAM,IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMREST,1)
C Filter the restricted defect vector
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DB(1+KOFFB(ILEV)),KNEQ(ILEV),6,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C From now on (i.e. after the first restriction)
C it's save to use "DD(NLMAX)" as auxiliary vector:
IPARAM(OKCBAUX) = KOFFD(NLMAX)
C Choose zero as initial vector on lower level. Implement boundary
C conditions into the just calculated right hand side.
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMBC,0)
CALL LCL1(DX(1+KOFFX(ILEV)),KNEQ(ILEV))
CALL DBC(DB(1+KOFFB(ILEV)),KNEQ(ILEV),IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMBC,1)
C Perform the filtering on the start solution
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),2,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C If we are not on the lowest level, repeat the smoothing of
C the solution/restriction of the new defect:
GOTO 110
END IF
C The previous IF/GOTO sweep ensures that we are on the lowest level now.
C In DD there is the defect on the lowest level, DX is filled with zero
C (plus eventually filtering).
C Do some probably filtering for coarse grid solution and RHS vector
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DB(1+KOFFB(ILEV)),KNEQ(ILEV),7,IPARAM,DPARAM,
* IDATA,DDATA)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),8,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Solve the system on lowest level:
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMCGSL,0)
I = NIT0
CALL DEX(DX(1+KOFFX(NLMIN)),DB(1+KOFFB(NLMIN)),
* DD(1+KOFFD(NLMIN)),KNEQ(NLMIN),R,I,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMCGSL,1)
C Filter the solution on lowest level
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),9,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Prolongate the solution vector, perform the coarse grid
C correction and realise the MG-cycles until we have reached
C the fine grid again:
130 IF (ILEV.NE.NLMAX) THEN
C go to the next higher level
ILEV = ILEV+1
IPARAM (OILEV)=ILEV
C When we reach NLMAX, KCBAUX must no more be used:
IF (ILEV.EQ.NLMAX) THEN
IPARAM(OKCBAUX) = 0
END IF
C First perform filtering to the non-prolongated update vector
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),10,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Prolongate the update vector; DPROL returns DD:=PROL(DX)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMPROL,0)
CALL DPROL(DX(1+KOFFX(ILEV-1)),DD(1+KOFFD(ILEV)),
* IPARAM,DPARAM,IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMPROL,1)
C implement boundary conditions into the prolongated vector
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMBC,0)
CALL DBC(DD(1+KOFFD(ILEV)),KNEQ(ILEV),IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMBC,1)
C Perform filtering of the prolongated update vector
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DD(1+KOFFD(ILEV)),KNEQ(ILEV),11,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Calculate the step length parameter for the coarse grid correction
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMCGC,0)
CALL DSTEP(DX(1+KOFFX(ILEV)),DD(1+KOFFD(ILEV)),
* DB(1+KOFFB(ILEV)),KNEQ(ILEV),DSTEPP,IPARAM,DPARAM,
* IDATA,DDATA)
C Perform the coarse grid correction by adding the coarse grid
C solution (with the calculated step-length parameter) to
C the current solution
CALL LLC1(DD(1+KOFFD(ILEV)),DX(1+KOFFX(ILEV)),KNEQ(ILEV),
* DSTEPP,1D0)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMCGC,1)
C Perform filtering of the updated solution vector before
C post-smoothing
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),12,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Perform the post-smoothing with the current solution vector
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMSMTH,0)
IF (IPARAM(OKPOSM+ILEV-1).GT.0)
* CALL DPOSM(DX(1+KOFFX(ILEV)),DB(1+KOFFB(ILEV)),
* DD(1+KOFFD(ILEV)),KNEQ(ILEV),IPARAM(OKPOSM+ILEV-1),
* IPARAM,DPARAM,IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMSMTH,1)
C Filter the currect solution vector after post-smoothing
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),13,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
C Update the iteration counter(s) for realising the MG-cycle(s).
C Then either jump to 130 to perform the next prolongation or
C jump to 110 to do perform a next MG sweep on the current
C level.
C
C Here ICYCLE defines how the KIT()-entry is updated.
C For a W-cycle it's resetted to 2 if the sweep is fulfilled
C on the current level, for F-cycle it's set to 1 to not
C perform more that 1 cycle on the current level anymore.
J = OKIT
IPARAM(OKIT+ILEV-1) = IPARAM(OKIT+ILEV-1)-1
IF (IPARAM(OKIT+ILEV-1).LE.0) THEN
IF (ICYCLE.EQ.0) THEN
IPARAM(OKIT+ILEV-1) = 1
ELSE
IPARAM(OKIT+ILEV-1) = IPARAM(OKIT0+ILEV-1)
END IF
GOTO 130
ELSE
GOTO 110
END IF
END IF
C We have (hopefully) successfully performed one MG-sweep, starting
C and ending on the finest level. As we are now on the finest level
C again, we can update our defect vector to test the current
C residuum...
C But first perform some possible filtering with the current
C solution:
IF (IFILT.NE.0) THEN
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DX(1+KOFFX(ILEV)),KNEQ(ILEV),14,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
END IF
IF (NODEFC.EQ.0) THEN
C Calculate the residuum and its norm; the result can be
C found in DD:
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,0)
CALL LCP1(DB(1+KOFFB(NLMAX)),DD(1+KOFFD(NLMAX)),KNEQ(NLMAX))
CALL DAX(DX(1+KOFFX(NLMAX)),DD(1+KOFFD(NLMAX)),
* KNEQ(NLMAX),-1D0,1D0,IPARAM,DPARAM,IDATA,DDATA)
C Filter the residuum before calculating the norm!
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,0)
CALL DFILT(DD(1+KOFFD(ILEV)),KNEQ(ILEV),15,IPARAM,DPARAM,
* IDATA,DDATA)
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMFILT,1)
CALL LL21(DD(1+KOFFD(NLMAX)),KNEQ(NLMAX),DPARAM(ODEFFIN))
C Scaling for the vector (1111...) to have norm 1 (weighted l2-norm)
IF (INRM.GT.0)
* DPARAM(ODEFFIN) = DPARAM(ODEFFIN) / SQRT (DBLE(KNEQ(NLMAX)))
IF (ITIM.GT.1) CALL GTMAUX (TIMIN,DPARAM,OTMDEF,1)
C Shift the queue with the last residuals and add the new
C residual to it
DO I=1,IASRLN-1
RESQUE(I) = RESQUE(I+1)
END DO
RESQUE(IASRLN) = DPARAM(ODEFFIN)
C Check if the iteration is diverging - by checking the absolute
C as well as the relative residuum.
C Use "not" instead of "ge" because this handles NaN/Infinity-cases
C more robust!
IF ( .NOT. ((DPARAM(ODEFFIN).LT.DPARAM(ODIVABS)).AND.
* (DPARAM(ODEFFIN).LT.DPARAM(ODEFINI)*DPARAM(ODIVREL))) )
* THEN
IPARAM(OSTATUS) = 2
GOTO 1000
END IF
C Ok, we are still alive. Check if we are even convergent
IF (ITE.GE.NIT0) THEN
C jump out of the loop if we reached the convergence criterion;
C in standard programming languages i would use a BREAK here :-)
IF ( (DPARAM(OEPSABS).EQ.0D0) .AND.
* (DPARAM(ODEFFIN).LT.DPARAM(ODEFINI)*DPARAM(OEPSREL) ) )
* GOTO 1000
IF ( (DPARAM(ODEFFIN).LT.DPARAM(OEPSABS)).AND.
* (DPARAM(OEPSREL).EQ.0D0) )
* GOTO 1000
IF ( (DPARAM(ODEFFIN).LT.DPARAM(OEPSABS)).AND.
* (DPARAM(ODEFFIN).LT.DPARAM(ODEFINI)*DPARAM(OEPSREL)) )
* GOTO 1000
END IF
IF ((IPARAM(OMSGTRM).GE.2).AND.(MOD(ITE,MTDV).EQ.0)) THEN
WRITE (MTERM,'(A,I7,A,D25.16)')
* 'M020: Iteration ',ITE,', !!RES!! = ',DPARAM(ODEFFIN)
END IF
END IF
C No, not yet convergent - we have to perform the next sweep.
C Save the current defect as "old" defect
DEFOLD = DPARAM(ODEFFIN)
END DO ! ITE
C Ok, the multigrid sweep has finished - either successfully (i.e.
C convergent), or because the maximum number of iterations has
C been reached.
C Remark that at this point, ITE=NITMAX+1 -- but in CRITE the
C correct number of iterations is noted.
1000 CONTINUE
C Finish - either with an error or converged.
C
C Calculation of statistical data is only done if we did not use
C the single-gris solver:
IF (NLMIN.NE.NLMAX) THEN
C Print the last residuum, if we finished before reaching the
C maximum number of iterations. The DO loop has the property
C that ITE=NITMAX+1 if it runs through completely!
IF ((IPARAM(OMSGTRM).GE.2).AND.
* (ITE.GE.1).AND.(ITE.LE.IPARAM(ONITMAX))) THEN
WRITE (MTERM,'(A,I7,A,D25.16)')
* 'M020: Iteration ',ITE,
* ', !!RES!! = ',DPARAM(ODEFFIN)
END IF
C We now gather and print some statistical data, before we close
C this algorithm. From now on we use the real number of iterations,
C which is counted in CRITE:
IPARAM(OITE) = IPARAM(OCRITE)
DPARAM(ORHO) = 0D0
DPARAM(ORHOASM) = 0D0
IF (NODEFC.EQ.0) THEN
C Don't calculate anything if the final residuum is out of
C bounds - would result in NaN's,...
IF (DPARAM(ODEFFIN).LT.1D99) THEN
C Calculate asymptotic convergence rate
IF (RESQUE(1).GE.1D-70) THEN
I = MIN(IPARAM(OITE),IASRLN-1)
DPARAM(ORHOASM) =
* (DPARAM(ODEFFIN)/RESQUE(1))**(1D0/DBLE(I))
END IF
C If the initial defect was zero, the solver immediately
C exits - and so the final residuum is zero and we performed
C no steps; so the resulting multigrid convergence rate stays zero.
C In the other case the multigrid convergence rate computes as
C (final defect/initial defect) ** 1/nit :
IF (DPARAM(ODEFINI).GT.DPARAM(OVECZER)) THEN
DPARAM(ORHO) = (DPARAM(ODEFFIN) / DPARAM(ODEFINI)) **
* (1D0/DBLE(IPARAM(OITE)))
END IF
C If the convergence rate is really > 0, we treat the iteration
C as diverging - the error was getting larger than the original
C one!
IF (DPARAM(ORHO).GT.1D0) THEN
IPARAM(OSTATUS) = 2
END IF
END IF
END IF
END IF
C Print statistical data
IF (NODEFC.EQ.0) THEN
C Don't calculate anything if the final residuum is out of
C bounds - would result in NaN's,...
IF (DPARAM(ODEFFIN).LT.1D99) THEN
IF (IPARAM(OMSGTRM).GE.2) THEN
WRITE (MTERM,'(A)') ''
IF (NLMIN.NE.NLMAX) THEN
WRITE (MTERM,'(A)') 'Multigrid statistics:'
ELSE
WRITE (MTERM,'(A)') 'Single grid solver statistics:'
END IF
WRITE (MTERM,'(A)') ''
WRITE (MTERM,'(A,I5)') 'Iterations : ',
* IPARAM(OITE)
WRITE (MTERM,'(A,D24.12)') '!!INITIAL RES!! : ',
* DPARAM(ODEFINI)
WRITE (MTERM,'(A,D24.12)') '!!RES!! : ',
* DPARAM(ODEFFIN)
IF (DPARAM(ODEFINI).GT.DPARAM(OVECZER)) THEN
WRITE (MTERM,'(A,D24.12)') '!!RES!!/!!INITIAL RES!! : ',
* DPARAM(ODEFFIN) / DPARAM(ODEFINI)
ELSE
WRITE (MTERM,'(A,D24.12)') '!!RES!!/!!INITIAL RES!! : ',
* 0D0
END IF
WRITE (MTERM,'(A)') ''
WRITE (MTERM,'(A,D24.12)') 'Rate of convergence : ',
* DPARAM(ORHO)
WRITE (MTERM,'(A)') ''
END IF
IF (IPARAM(OMSGTRM).EQ.1) THEN
WRITE (MTERM,'(A,I5,A,D24.12)')
* 'Multigrid: Iterations/Rate of convergence: ',
* IPARAM(OITE),' /',DPARAM(ORHO)
END IF
ELSE
C DEF=Infinity; RHO=Infinity, set to 1
DPARAM(ORHO) = 1D0
DPARAM(ORHOASM) = 1D0
END IF
END IF
99999 CONTINUE
C Temporary array no more available
IPARAM(OKCBAUX) = 0
C Gather some timing information, finish
IF (ITIM.GT.0) THEN
CALL GTMAUX (TIMIN,DPARAM,OTMTOT,1)
DPARAM(OTMMG ) = DPARAM(OTMTOT ) - DPARAM(OTMPROL)
* - DPARAM(OTMREST) - DPARAM(OTMDEF )
* - DPARAM(OTMSMTH) - DPARAM(OTMCGSL)
* - DPARAM(OTMFILT) - DPARAM(OTMBC )
* - DPARAM(OTMCGC )
END IF
END
************************************************************************
* M020 initialization
*
* The following routine can be used to initialise the IPARAM/DPARAM
* array structures with default values for the computation. After
* calling this routine, the user has to do the following initialisations
* before calling M020:
* - initialise NLMIN and NLMAX
* - initialise KOFFX, KOFFB, KOFFD, KNEQ, KPRSM, KPOSM
* - initialise RHS vector and start vector
* - initialise any user-defined variables attached to the structures
* IPARAM/DPARAM
*
* In:
* -
* Out:
* IPARAM - array [1..SZMGRI] of integer
* Integer parameter structure
* DPARAM - array [1..SZMGRI] of integer
* Double precision parameter structure
************************************************************************
SUBROUTINE INM020 (IPARAM, DPARAM)
IMPLICIT NONE
INCLUDE 'cbasicmg.inc'
INCLUDE 'ssolvers.inc'
INCLUDE 'm020.inc'
INTEGER IPARAM(SZ020I)
DOUBLE PRECISION DPARAM(SZ020D)
C Clear the structures
CALL LCL3 (IPARAM,SZ020I)
CALL LCL1 (DPARAM,SZ020D)
C Initialize standard-parameters:
CALL INGSLV (IPARAM,DPARAM)
C Set the non-zero standard values
IPARAM (OICYCLE) = 0
DPARAM (OSTPMIN) = 1D0
DPARAM (OSTPMAX) = 1D0
C Standard solver tag is 11 for MG
IPARAM (OSLTAG) = 11
END
************************************************************************
* Prepare M020 vectors
*
* This is another preparation routine for the IPARAM/DPARAM array
* structures. It can be called after INM020 to initialise
* the KOFFX/KOFFB/KOFFD/KPRSM/KPOSM subarrays with starting addresses
* of arrays in DWORK and the number of smoothing steps, resp.
*
* PRM020 accepts a couple of handles generated by the dynamic
* memory management. It initialises KOFFX/KOFFB/KOFFD according to
* the starting addresses of the corresponding arrays in DWORK.
* The later call to M020 has then to be made using
* DX=DB=DD=DWORK(1):
*
* CALL M020 (IPARAM,DPARAM,DWORK(1),DWORK(1),DWORK(1),...)
*
* After calling this routine, the caller must make sure that there's
* no memory deallocation between this function and the cann to M020!
* Otherwise the starting addresses of the solution/RHS/aux. arrays
* may not be valid anymore!
*
* IPARAM/DPARAM should be filled with 0 before calling this routine,
* as here only the nonzero parameters are set.
*
* Warning: The routine does not initialize KNEQ!
* This must still be done by the caller!
*
* In:
* NLMIN : minimum level, where the coarse grid solver should solve;
* >= 1.
* NLMAX : maximum level, where the solution should be computed;
* <= NNLEV!
* NPRSM - number of pre-smoothing steps on each level.
* =-1: don't initialise
* NPOSM - number of post-smoothing steps on each level.
* =-1: don't initialise
* LOFFX - array [1..NNLEV] of integer
* Array of handles to the DX-vectors
* LOFFB - array [1..NNLEV] of integer
* Array of handles to the DB-vectors
* LOFFD - array [1..NNLEV] of integer
* Array of handles to the DD-vectors
*
* Out:
* The IPARAM/DPARAM structure will be modified in the following
* variables:
*
* KOFFX - array [1..NNLEV] of integer
* Starting offsets of the DX-vectors relative to DWORK(1)
* KOFFB - array [1..NNLEV] of integer
* Starting offsets of the DX-vectors relative to DWORK(1)
* KOFFD - array [1..NNLEV] of integer
* Starting offsets of the DX-vectors relative to DWORK(1)
*
* If NPRSM <> -1:
* KPRSM - array [1..NNLEV] of integer
* Number of pre-smoothing steps on each level
*
* If NPOSM <> -1:
* KPOSM - array [1..NNLEV] of integer
* Number of post-smoothing steps on each level
************************************************************************
SUBROUTINE PRM020 (IPARAM, DPARAM,NLMIN,NLMAX,
* NPRSM, NPOSM, LOFFX, LOFFB, LOFFD)
IMPLICIT NONE
INCLUDE 'cmem.inc'
INCLUDE 'cbasicmg.inc'
INCLUDE 'ssolvers.inc'
INCLUDE 'm020.inc'
INTEGER IPARAM(SZ020I)
DOUBLE PRECISION DPARAM(SZ020D)
INTEGER LOFFX(NNLEV),LOFFB(NNLEV),LOFFD(NNLEV),NPRSM,NPOSM
INTEGER NLMIN,NLMAX
INTEGER I
IF (NPRSM.GT.0) THEN
DO I=0,NNLEV-1
IPARAM (OKPRSM+I) = NPRSM
END DO
END IF
IF (NPOSM.GT.0) THEN
DO I=0,NNLEV-1
IPARAM (OKPOSM+I) = NPOSM
END DO
END IF
DO I=1,NNLEV
IF ((I.GE.NLMIN).AND.(I.LE.NLMAX)) THEN
IPARAM (OKOFFX+I-1) = L(LOFFX(I))-1
IPARAM (OKOFFB+I-1) = L(LOFFB(I))-1
IPARAM (OKOFFD+I-1) = L(LOFFD(I))-1
ELSE
IPARAM (OKOFFX+I-1) = 0
IPARAM (OKOFFB+I-1) = 0
IPARAM (OKOFFD+I-1) = 0
END IF
END DO
END
|
/**************************************************************************\
|
| Copyright (C) 2009 Marc Stevens
|
| This program is free software: you can redistribute it and/or modify
| it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
| (at your option) any later version.
|
| This program is distributed in the hope that it will be useful,
| but WITHOUT ANY WARRANTY; without even the implied warranty of
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
|
| You should have received a copy of the GNU General Public License
| along with this program. If not, see <http://www.gnu.org/licenses/>.
|
\**************************************************************************/
#ifndef HASHCLASH_TYPES_HPP
#define HASHCLASH_TYPES_HPP
#include "config.h"
#include <utility>
#include <algorithm>
#include <vector>
#ifdef __CUDACC__
#pragma message("CUDA compiler detected.")
#define NOSERIALIZATION
#define FUNC_PREFIX __device__ __host__
#include <boost/cstdint.hpp>
#else
#define FUNC_PREFIX
#include <boost/cstdint.hpp>
#endif // __CUDACC__
// Since including serialization routines can be intrusive,
// especially if it is not used at all,
// one can disable it by defining NOSERIALIZATION
#ifndef NOSERIALIZATION
#include <boost/serialization/serialization.hpp>
#endif // NOSERIALIZATION
namespace hashclash {
typedef boost::uint32_t uint32;
typedef boost::uint64_t uint64;
// typedef boost::uint16_t uint16;
// typedef boost::uint8_t uint8;
typedef boost::int8_t int8;
FUNC_PREFIX inline uint32 rotate_right(const uint32 x, const unsigned n)
{ return (x>>n) | (x<<(32-n)); }
FUNC_PREFIX inline uint32 rotate_left(const uint32 x, const unsigned n)
{ return (x<<n) | (x>>(32-n)); }
/**** class triple ****/
template<class Ty1, class Ty2, class Ty3>
struct triple {
typedef triple<Ty1, Ty2, Ty3> MyType;
typedef Ty1 first_type;
typedef Ty2 second_type;
typedef Ty3 third_type;
triple()
: first(Ty1()), second(Ty2()), third(Ty3())
{}
triple(const Ty1& val1, const Ty2& val2, const Ty3& val3)
: first(val1), second(val2), third(val3)
{}
template<class O1, class O2, class O3>
triple(const triple<O1, O2, O3>& r)
: first(r.first), second(r.second), third(r.third)
{}
void swap(MyType& r)
{
std::swap(first, r.first);
std::swap(second, r.second);
std::swap(third, r.third);
}
first_type first;
second_type second;
third_type third;
};
template<class Ty1, class Ty2, class Ty3>
inline triple<Ty1,Ty2,Ty3> make_triple(Ty1 v1, Ty2 v2, Ty3 v3)
{
return triple<Ty1,Ty2,Ty3>(v1,v2,v3);
}
template<class Ty1, class Ty2, class Ty3>
inline bool operator==(const triple<Ty1, Ty2, Ty3>& l, const triple<Ty1, Ty2, Ty3>& r)
{
return l.first==r.first && l.second==r.second && l.third==r.third;
}
template<class Ty1, class Ty2, class Ty3>
inline bool operator!=(const triple<Ty1, Ty2, Ty3>& l, const triple<Ty1, Ty2, Ty3>& r)
{ return !(l == r); }
template<class Ty1, class Ty2, class Ty3>
inline bool operator<(const triple<Ty1, Ty2, Ty3>& l, const triple<Ty1, Ty2, Ty3>& r)
{
return l.first<r.first || (l.first==r.first && (l.second<r.second || (l.second==r.second && l.third<r.third)));
}
template<class Ty1, class Ty2, class Ty3>
inline bool operator>(const triple<Ty1, Ty2, Ty3>& l, const triple<Ty1, Ty2, Ty3>& r)
{ return r<l; }
template<class Ty1, class Ty2, class Ty3>
inline bool operator<=(const triple<Ty1, Ty2, Ty3>& l, const triple<Ty1, Ty2, Ty3>& r)
{ return !(r<l); }
template<class Ty1, class Ty2, class Ty3>
inline bool operator>=(const triple<Ty1, Ty2, Ty3>& l, const triple<Ty1, Ty2, Ty3>& r)
{ return !(l<r); }
template<class Ty1, class Ty2, class Ty3>
inline void swap(triple<Ty1,Ty2,Ty3>& l, triple<Ty1,Ty2,Ty3>& r)
{ l.swap(r); }
template<class Ty>
struct sortindices {
uint32 index;
const Ty* ptr;
sortindices(): index(~0), ptr(0) {}
sortindices(uint32 idx, const Ty& val): index(idx), ptr(&val) {}
bool operator<(const sortindices<Ty>& r) const { return *ptr < *r.ptr; }
bool operator==(const sortindices<Ty>& r) const { return *ptr == *r.ptr; }
};
template<class Ty1, class Ty2>
inline void sortbyindex(std::vector<Ty2>& tosort, const std::vector< sortindices<Ty1> >& indices)
{
std::vector<Ty2> tosorttmp(indices.size());
for (unsigned i = 0; i < indices.size(); ++i)
std::swap(tosorttmp[i], tosort[ indices[i].index ]);
std::swap(tosorttmp, tosort);
}
template<class Ty1, class Ty2>
void friendsort(std::vector<Ty1>& tosort, std::vector<Ty2>& tosortfriend)
{
std::vector< sortindices<Ty1> > indices;
indices.reserve(tosort.size());
for (unsigned i = 0; i < tosort.size(); ++i)
indices.push_back( sortindices<Ty1>(i, tosort[i]) );
std::sort( indices.begin(), indices.end() );
indices.erase( std::unique(indices.begin(), indices.end()), indices.end());
sortbyindex(tosort, indices);
sortbyindex(tosortfriend, indices);
}
} // namespace hashclash
#ifndef NOSERIALIZATION
namespace boost {
namespace serialization {
template<class Archive, class Ty1, class Ty2, class Ty3>
void serialize(Archive& ar, hashclash::triple<Ty1,Ty2,Ty3>& t, const unsigned int file_version)
{
ar & make_nvp("first", t.first);
ar & make_nvp("second", t.second);
ar & make_nvp("third", t.third);
}
}
}
#endif // NOSERIALIZATION
#endif // HASHCLASH_TYPES_HPP
|
import data.nat.prime
import algebra.big_operators
import tactic
example (n : ℕ) : n.succ ≠ 0 := n.succ_ne_zero
example (m n : ℕ) (h : m.succ = n.succ) : m = n := nat.succ.inj h
def fac : ℕ → ℕ
| 0 := 1
| (n + 1) := (n + 1) * fac n
example : fac 0 = 1 := rfl
example : fac 0 = 1 := by rw fac
example : fac 0 = 1 := by simp [fac]
example (n : ℕ) : fac (n + 1) = (n + 1) * fac n := rfl
example (n : ℕ) : fac (n + 1) = (n + 1) * fac n := by rw fac
example (n : ℕ) : fac (n + 1) = (n + 1) * fac n := by simp [fac]
theorem fac_pos (n : ℕ) : 0 < fac n :=
begin
induction n with n ih,
{ from zero_lt_one,},
rw fac,
apply mul_pos,
from n.succ_pos,
from ih,
end
theorem dvd_fac {i n : ℕ} (ipos : 0 < i) (ile : i ≤ n) : i ∣ fac n :=
begin
induction n with n ih,
{ linarith,},
rw fac,
cases nat.of_le_succ ile with h h,
{ apply dvd_mul_of_dvd_right, from ih h,},
rw h,
apply dvd_mul_right,
end
theorem pow_two_le_fac (n : ℕ) : 2^(n-1) ≤ fac n :=
begin
cases n with n,
{ simp [fac],},
induction n with n ih,
{ simp [fac],},
rw fac,
simp, simp at ih,
apply mul_le_mul',
{ rw nat.succ_eq_add_one,
linarith,},
from ih,
end
section
variables {α : Type*} (s : finset ℕ) (f : ℕ → ℕ) (n : ℕ)
#check finset.sum s f
#check finset.prod s f
open_locale big_operators
open finset
example : s.sum f = ∑ x in s, f x := rfl
example : s.prod f = ∏ x in s, f x := rfl
example : (range n).sum f = ∑ x in range n, f x := rfl
example : (range n).prod f = ∏ x in range n, f x := rfl
example (f : ℕ → ℕ) : ∑ x in range 0, f x = 0 :=
finset.sum_range_zero f
example (f : ℕ → ℕ) (n : ℕ): ∑ x in range n.succ, f x = (∑ x in range n, f x) + f n :=
finset.sum_range_succ f n
example (f : ℕ → ℕ) : ∏ x in range 0, f x = 1 :=
finset.prod_range_zero f
example (f : ℕ → ℕ) (n : ℕ): ∏ x in range n.succ, f x = (∏ x in range n, f x) * f n :=
finset.prod_range_succ f n
example (n : ℕ) : fac n = ∏ i in range n, (i + 1) :=
begin
induction n with n ih,
{ simp [fac],},
{ rw [fac, prod_range_succ, ih, mul_comm],},
end
theorem sum_id (n : ℕ) : ∑ i in range (n + 1), i = n * (n + 1) / 2 :=
begin
symmetry, apply nat.div_eq_of_eq_mul_right (by norm_num : 0 < 2),
induction n with n ih,
{ simp,},
{ rw [sum_range_succ, mul_add 2, ←ih, nat.succ_eq_add_one],
ring_nf,}
end
theorem sum_sqr (n : ℕ) : ∑ i in range (n + 1), i^2 = n * (n + 1) * (2 *n + 1) / 6 :=
begin
symmetry, apply nat.div_eq_of_eq_mul_right (by norm_num : 0 < 6),
induction n with n ih,
{ simp,},
{ rw [sum_range_succ, mul_add 6, ←ih, nat.succ_eq_add_one],
ring,},
end
end
section
inductive my_nat
| zero : my_nat
| succ : my_nat → my_nat
namespace my_nat
def add : my_nat → my_nat → my_nat
| x zero := x
| x (succ y) := succ (add x y)
def mul : my_nat → my_nat → my_nat
| x zero := zero
| x (succ y) := add (mul x y) x
def pred : my_nat → my_nat
| zero := zero
| (succ x) := x
def tsub : my_nat → my_nat → my_nat
| n zero := n
| n (succ m) := tsub (n.pred) m
theorem zero_add (n : my_nat) : add zero n = n :=
begin
induction n with n ih,
{ refl },
rw [add, ih]
end
theorem succ_add (m n : my_nat) : add (succ m) n = succ (add m n) :=
begin
induction n with n ih,
{ refl },
rw [add, ih],
refl
end
theorem add_comm (m n : my_nat) : add m n = add n m :=
begin
induction n with n ih,
{ rw zero_add, refl },
rw [add, succ_add, ih]
end
theorem add_assoc (m n k : my_nat) : add (add m n) k = add m (add n k) :=
begin
induction k with k ih,
{ simp only [add],},
{ rw [add, ih, ←add, ←add],}
end
theorem mul_add (m n k : my_nat) : mul m (add n k) = add (mul m n) (mul m k) :=
begin
induction k with k ih,
{ simp only [add, mul],},
{ rw [add, mul, ih, add_assoc, ←mul],},
end
theorem zero_mul (n : my_nat) : mul zero n = zero :=
begin
induction n with n ih,
{ simp [mul]},
{ rw [mul, add, ih],},
end
theorem succ_mul (m n : my_nat) : mul (succ m) n = add (mul m n) n :=
begin
induction n with n ih,
{ simp only [mul, add],},
{ rw [mul, ih, add, mul, add_assoc, ←add, add_comm n m, ←add, ←add_assoc],}
end
theorem mul_comm (m n : my_nat) : mul m n = mul n m :=
begin
induction n with n ih,
{ simp only [mul, zero_mul],},
{ rw [succ_mul, ←ih, mul],},
end
theorem succ_pred_self (n : my_nat) : n.succ.pred = n :=
begin
rw pred,
end
theorem sub_self (n : my_nat) : tsub n n = zero :=
begin
induction n with n ih,
{ simp only [tsub],},
{ rw [tsub, succ_pred_self, ih],}
end
end my_nat
end
|
# Quantum Circuit Learning
**Quantum Circuit Learning** (QCL) is an algorithm for applying quantum computers to machine learning [1].
Just like the VQE (Variational Quantum Eigensolver) we have learned in the previous section, it is a **quantum-classical hybrid algorithm**, designed to operate on **NISQ** (Noisy Intermediate-Scale Quantum Computer), a medium-scale quantum computer without error correction function.
Experiments using actual NISQ devices have already been performed, and in March 2019, a paper on actual implementation of QCL by an IBM experiment team [2] was published in Nature and became a hot topic.
In the following, the outline of the algorithm and the specific learning procedure are introduced, then an implementation example using the quantum simulator Qulacs is presented.
This notebook is tranlated from https://dojo.qulacs.org/ja/latest/notebooks/5.1_variational_quantum_eigensolver.html
## Overview of QCL
In recent years, deep learning has been spotlighted in the field of machine learning.
In deep learning, by approximating a complex function using a deep neural network, the relationship between input and output can be learned and predictions can be performed on new data. QCL is a machine learning method that replaces this neural network with a **quantum circuit**, and hence a quantum computer.
By using the quantum circuit, the learning process can be **performed exponentially by using a large number of basis functions** by utilizing the principle of superposition of quantum mechanics, so that the expression capacity of the model is improved.
Furthermore, it is considered that overfitting can be automatically prevented depending on the condition (unitary property) to be satisfied by the quantum circuit. As a result, higher performance can be expected beyond machine learning in classical computers. (See Reference [1] for details)
In a neural network, the function is approximated by adjusting the weight parameter *W* of each layer, and the concept is exactly the same in QCL.
That is, although the quantum circuit used in the QCL includes multiple “rotating gates”, the function is approximated by adjusting the rotating angle $\theta$ of the rotating gate. The specific procedure is shown below.
## Learning procedure
1. Prepare training data {($x_i,y_i$)}. ($x_i$ is input data (teacher data), $y_i$ is the correct output data expected to be predicted from $x_i$)
2. Prepare a circuit called $U_\text{in}(x)$ that is determined by some rule from the input $x$, and create an input state $\{\left|\psi_{\text{in}}(x_i)\right>\}_i=\{U_{\text{in}}(x_i)\left|0\right>\}_i$ with the information of $x_i$ embedded.
3. Multiply gate $U(\theta)$ which depends on parameter $\theta$ with the input state to obtain the output state $\{\left|\psi_{\text{out}}(x_i,\theta)\right>=U(\theta)\left|\psi_{\text{in}}(x_i)\right>\}_i$.
4. The measurement is done by measuring some observable under the output state. (eg. the $Z$ expection of the first qubit: $\left<Z_1\right>=\left<\psi_{\text{out}}\left|Z_1\right|\psi_{\text{out}}\right>$)
5. Set F as an appropriate function (sigmoid, softmax or constant function, etc.), and the output $y(x_i,\theta)$ of the model is F(measurement_i).
6. Calculate the cost function $L(\theta)$ representing the divergence between the correct data $\{y_i\}$ and the output $\{y(x_i,\theta)\}_i$ of the model.
7. Obtain the $\theta=\theta^*$ which minimizes the cost function.
8. Then $y(x,\theta^*)$ is the desired prediction model.
(In the QCL, input data $x$ is first converted to a quantum state using $U_{\text{in}}(x)$, and an output $y$ is obtained there from using a variational quantum circuit $U(\theta)$ and measurement (In the figure, the output is $\left<B(x,\theta)\right>$.) Source: Revised Figure 1 in reference [1].)
## Implementation using quantum simulator Qulacs
In the following, a fitting of sin function $y=sin(\pi x)$ is performed as a demonstration of approximating function.
```python
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
```
```python
######## Parameter #############
nqubit = 3 ## number of qubit
c_depth = 3 ## depth of circuit
time_step = 0.77 ## elapsed time of time evolution with random Hamiltonian
## randomly take num_x_train points from [x_min, x_max] as teacher data.
x_min = - 1.; x_max = 1.;
num_x_train = 50
## one variable function to learn
func_to_learn = lambda x: np.sin(x*np.pi)
## seed of random number
random_seed = 0
## initialization of random number generator
np.random.seed(random_seed)
```
### Prepare training data
```python
#### Prepare teacher data
x_train = x_min + (x_max - x_min) * np.random.rand(num_x_train)
y_train = func_to_learn(x_train)
# Add noise to pure sine function assuming real data used
mag_noise = 0.05
y_train = y_train + mag_noise * np.random.randn(num_x_train)
plt.plot(x_train, y_train, "o"); plt.show()
```
### Construct the input state
Firstly, create a gate $U_{\text{in}}(x_i)$ for embedding the input value $x_i$ in the initial state $\left|00...0\right>$.
According to reference [1], define $U_{\text{in}}(x_i)=\prod_jR^Z_j(\text{cos}^{-1}x^2)R^Y_j(\text{sin}^{-1}x)$ using rotation gates $R^Y_j(\theta)=e^{i\theta Y_j/2}, R^Z_j(\theta)=e^{i\theta Z_j/2}$.
The input $x_i$ is converted into quantum state $\left|\psi_\text{in}(x_i)\right>=U_{\text{in}}(x_i)\left|00...0\right>$.
```python
## When using Google Colaboratory・please run in a local environment where Qulacs is not installed.
!pip install qulacs
```
Requirement already satisfied: qulacs in /anaconda3/envs/VibSpec/lib/python3.7/site-packages (0.1.9)
```python
# Create intial state
from qulacs import QuantumState, QuantumCircuit
state = QuantumState(nqubit) # Initial state |000>
state.set_zero_state()
print(state.get_vector())
```
[1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
```python
# Function that creats a gate encoding x
def U_in(x):
U = QuantumCircuit(nqubit)
angle_y = np.arcsin(x)
angle_z = np.arccos(x**2)
for i in range(nqubit):
U.add_RY_gate(i, angle_y)
U.add_RZ_gate(i, angle_z)
return U
```
```python
# Test initial state
x = 0.1 # appropriate value
U_in(x).update_quantum_state(state) # calculation of U_in|000>
print(state.get_vector())
```
[-6.93804351e-01+7.14937415e-01j -3.54871219e-02-3.51340074e-02j
-3.54871219e-02-3.51340074e-02j 1.77881430e-03-1.76111422e-03j
-3.54871219e-02-3.51340074e-02j 1.77881430e-03-1.76111422e-03j
1.77881430e-03-1.76111422e-03j 8.73809020e-05+9.00424970e-05j]
### Construct variational quantum circuit $U(\theta)$
Next, a variational quantum circuit $U(\theta)$ to be optimized is created. The procedure is the following three steps:
1. Create a transverse magnetic field Ising Hamiltonian.
2. Create rotation gates.
3. Combine step1 and step2's gates alternatively to make a large variational quantum circuit $U(\theta)$.
#### 1.Create a transverse magnetic field Ising Hamiltonian
The expressiveness of the model is enhanced by increasing the complexity (entanglement) of the quantum circuit after performing time evolution based on the transverse magnetic field Ising model learned in section 4-2. (This part can be skipped unless you want to know the details.)
The Hamiltonian of transverse magnetic file Ising model is shown below, time evolution operator is defined as $U_{\text{rand}}=e^{-iHt}$:
\begin{equation}
H=\sum_{j=1}^N a_jX_j+\sum_{j=1}^N\sum_{k=1}^{j-1}J_{jk}Z_jZ_k
\end{equation}
Coefficient $a$ and $J$ is a uniform distribution of $[-1,1]$.
```python
## Basic gate
from qulacs.gate import X, Z
I_mat = np.eye(2, dtype=complex)
X_mat = X(0).get_matrix()
Z_mat = Z(0).get_matrix()
```
```python
## Function that creates fullsize gate.
def make_fullgate(list_SiteAndOperator, nqubit):
'''
Take list_SiteAndOperator = [ [i_0, O_0], [i_1, O_1], ...],
Insert Identity into unrelated qubit
make (2**nqubit, 2**nqubit) matrix:
I(0) * ... * O_0(i_0) * ... * O_1(i_1) ...
'''
list_Site = [SiteAndOperator[0] for SiteAndOperator in list_SiteAndOperator]
list_SingleGates = [] ## Arrange 1-qubit gates and reduce with np.kron
cnt = 0
for i in range(nqubit):
if (i in list_Site):
list_SingleGates.append( list_SiteAndOperator[cnt][1] )
cnt += 1
else: ## an empty site is identity
list_SingleGates.append(I_mat)
return reduce(np.kron, list_SingleGates)
```
```python
#### Create time evolution operator by making random magnetic field and random coupling Ising Hamiltonian
ham = np.zeros((2**nqubit,2**nqubit), dtype = complex)
for i in range(nqubit): ## i runs 0 to nqubit-1
Jx = -1. + 2.*np.random.rand() ## random number in -1~1
ham += Jx * make_fullgate( [ [i, X_mat] ], nqubit)
for j in range(i+1, nqubit):
J_ij = -1. + 2.*np.random.rand()
ham += J_ij * make_fullgate ([ [i, Z_mat], [j, Z_mat]], nqubit)
## Create a time evolution operator by diagonalization. H*P = P*D <-> H = P*D*P^dagger
diag, eigen_vecs = np.linalg.eigh(ham)
time_evol_op = np.dot(np.dot(eigen_vecs, np.diag(np.exp(-1j*time_step*diag))), eigen_vecs.T.conj()) # e^-iHT
```
```python
time_evol_op.shape
```
(8, 8)
```python
# Convert to qulacs gate
from qulacs.gate import DenseMatrix
time_evol_gate = DenseMatrix([i for i in range(nqubit)], time_evol_op)
```
#### 2.Create rotation gates, 3.Create $U(\theta)$
Combine the time evolution operator $U_{\text{rand}}$ accroding to random transverse magnetic field Ising model and the operator of rotation gates operating on $j(=1,2,\dots n)$th qubit
\begin{equation}
U_\text{rot}(\theta_j^{(i)})=R_j^X(\theta_{j1}^{(i)})R_j^Z(\theta_{j2}^{(i)})R_j^X(\theta_{j3}^{(i)})
\end{equation}
to create variational quantum circuit $U(\theta)$:
\begin{equation}
U\Big(\{\theta_j^{(i)}\}_{i,j}\Big)= \prod_{i=1}^d\bigg(\bigg(\prod_{j=1}^nU_\text{rot}(\theta_j^{(i)})\bigg)\cdot U_\text{rand} \bigg)
\end{equation}
Here $i$ is a suffix representing the layer of the quantum circuit, and $U_\text{rand}$ and the above rotation are repeated in $d$ layers in total. There are $3\times n \times d$ parameters. The intial value of each $\theta$ is a uniform distribution of $[0,2\pi]$.
```python
from qulacs import ParametricQuantumCircuit
```
```python
# Assemble output gate U_out & set initial parameter values
U_out = ParametricQuantumCircuit(nqubit)
for d in range(c_depth):
U_out.add_gate(time_evol_gate)
for i in range(nqubit):
angle = 2.0 * np.pi * np.random.rand()
U_out.add_parametric_RX_gate(i,angle)
angle = 2.0 * np.pi * np.random.rand()
U_out.add_parametric_RZ_gate(i,angle)
angle = 2.0 * np.pi * np.random.rand()
U_out.add_parametric_RX_gate(i,angle)
```
```python
# Get the list of initial values of the parameter theta
parameter_count = U_out.get_parameter_count()
theta_init = [U_out.get_parameter(ind) for ind in range(parameter_count)]
```
```python
theta_init
```
[6.007250646127814,
4.046309757767312,
2.663159813474645,
3.810080933381979,
0.12059442161498848,
1.8948504571449056,
4.14799267096281,
1.8226113595664735,
3.88310546309581,
2.6940332019609157,
0.851208649826403,
1.8741631278382846,
3.5811951525261123,
3.7125630518871535,
3.6085919651139333,
4.104181793964002,
4.097285684838374,
2.71068197476515,
5.633168398253273,
2.309459341364396,
2.738620094343915,
5.6041197193647925,
5.065466226710866,
4.4226624059922806,
0.6297441057449945,
5.777279648887616,
4.487710439107831]
For convenience, a function for updating parameter $\theta$ in $U(\theta)$ is created.
```python
# Function that updates parameter theta
def set_U_out(theta):
global U_out
parameter_count = U_out.get_parameter_count()
for i in range(parameter_count):
U_out.set_parameter(i, theta[i])
```
### Measurement
In this demonstration, the output of the model is the expectation value of 0th qubit's Pauli Z on output state $\left|\psi_\text{out}\right>$, that is:
\begin{equation}
y(\theta,x_i)=\left<Z_0\right>=\left<\psi_\text{out}|Z_0|\psi_\text{out}\right>
\end{equation}
```python
# Create observable Z_0
from qulacs import Observable
obs = Observable(nqubit)
obs.add_operator(2.,'Z 0')
# Set observable as 2*Z。
# The reason for multiplying by 2 here is to expand the value range of the final <Z>.
# In order to cope with any unknown function, this constant also needs to be optimized as one parameter.
```
```python
obs.get_expectation_value(state)
```
1.9899748742132415
### Combine a series of procedures into one function
The procedures up to this point can be combined to define a function that returns the predicted value $y(x_i,\theta)$ of the model from the input $x_i$.
```python
# Function that gives prediction value y(x_i, theta) of the model from input x_i
def qcl_pred(x, U_out):
state = QuantumState(nqubit)
state.set_zero_state()
# Calculate input state
U_in(x).update_quantum_state(state)
# Calculate output state
U_out.update_quantum_state(state)
# Output of the model
res = obs.get_expectation_value(state)
return res
```
### Calculation of cost function
The cost function $L(\theta)$ is a mean square error (MSE) between the teacher data and the prediction data.
```python
# Calculate cost function L
def cost_func(theta):
'''
theta: ndarray of length c_depth * nqubit * 3
'''
# update the parameter theta of U_out
# global U_out
set_U_out(theta)
# calculate basing on data of num_x_train in total
y_pred = [qcl_pred(x, U_out) for x in x_train]
# quadratic loss
L = ((y_pred - y_train)**2).mean()
return L
```
```python
# Value of cost function with initial parameter theta
cost_func(theta_init)
```
1.38892593161935
```python
# Figure basing on inital parameter theta
xlist = np.arange(x_min, x_max, 0.02)
y_init = [qcl_pred(x, U_out) for x in xlist]
plt.plot(xlist, y_init)
```
### Learning (optimization by scipy.optimize.minimize)
Preparation is finally finished, and let's start learning from now on. Here, for simplicity, optimization is performed using the Nelder-Mead method, which does not need a gradient calculation formula. When using an optimization method that needs gradients (eg: the BFGS method), refer to the useful gradients calculation formulas introduced in Reference [1].
```python
from scipy.optimize import minimize
```
```python
%%time
# Learning (takes 14 seconds with the writer's PC)
result = minimize(cost_func, theta_init, method='Nelder-Mead')
```
CPU times: user 11.7 s, sys: 11.7 s, total: 23.4 s
Wall time: 14.2 s
```python
# Value of cost_function after optimization
result.fun
```
0.003987076559624772
```python
# Solution of theta by optimization
theta_opt = result.x
print(theta_opt)
```
[7.17242144 5.4043736 1.27744316 3.09192904 0.13144047 2.13757354
4.58470259 2.01924008 2.96107066 2.91843537 1.0609229 1.70351774
6.41114609 6.25686828 2.41619471 3.69387805 4.07551328 1.47666316
3.4108701 2.28524042 1.75253621 7.44181397 3.20314179 5.11364648
1.2831137 2.07306927 3.75112591]
### Plot results
```python
# Insert optimized theta into U_out
set_U_out(theta_opt)
```
```python
# Plot
plt.figure(figsize=(10, 6))
xlist = np.arange(x_min, x_max, 0.02)
# teacher data
plt.plot(x_train, y_train, "o", label='Teacher')
# Figure basing on inital parameter theta
plt.plot(xlist, y_init, '--', label='Initial Model Prediction', c='gray')
# Prediction of the model
y_pred = np.array([qcl_pred(x, U_out) for x in xlist])
plt.plot(xlist, y_pred, label='Final Model Prediction')
plt.legend()
plt.show()
```
It is clear that the approximation of the sin function was successful.
Here we dealt with a very simple task of a one-dimensional function approximation for both input and output, but it can be extended to approximation and classification problems with multidimensional inputs and outputs.
Motivated readers are encouraged to try to classify the [Iris dataset](https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html), one of the typical machine learning datasets, in column `5.2c. Application of QCL to Machine Learning`.
## Reference
[1] K. Mitarai, M. Negoro, M. Kitagawa, and K. Fujii, “Quantum circuit learning”, [Phys. Rev. A 98, 032309 (2018)](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.98.032309), arXiv:https://arxiv.org/abs/1803.00745
[2] V. Havlicek et al. , “Supervised learning with quantum-enhanced feature spaces”, [Nature 567, 209–212 (2019)](https://www.nature.com/articles/s41586-019-0980-2), arXiv:https://arxiv.org/abs/1804.11326
|
1995 Slayer tribute album Slatanic Slaughter featured three tracks which originally appeared on South of Heaven , with the title track , " Mandatory Suicide " and " Spill the Blood " interpreted by <unk> , Crown of Thorns and Grope respectively . Its 1998 follow up Slatanic Slaughter , Vol . 2 only featured two tracks originally from the album ; namely " Silent Scream " arranged by Vader and " Read Between the Lies " interpreted by Anathema . 1999 's Straight to Hell : A Tribute to Slayer collected four Slayer renditions which originated on the album , with versions of South of Heaven performed by Abaddon ( Venom ) and Electric Hellfire Club , " Mandatory Suicide " cut by Chapter 7 and " Behind the Crooked Cross " adapted by <unk> . 2006 Argentine tribute album Al Sur Del Abismo ( Tributo Argentino A Slayer ) saw <unk> and Climatic Terra also respectively cover " South of Heaven " and " Mandatory Suicide " . Hatebreed covered the song " Ghosts of War " for their 2009 cover album For the Lions . They released a music video for it also . Korn has covered the title track at least twice live , once with Kid Rock on vocals and another using the intro to follow into one of their songs live .
|
From CoqMTL Require Import Control.Monad.Lazy.
From CoqAlgs Require Import Base.
From CoqAlgs Require Import Structures.Ord.
Definition Queue (A : Type) : Type :=
list A * nat * Lazy (list A) * nat * list A.
Definition empty {A : Type} : Queue A :=
([], 0, delay [], 0, []).
Definition isEmpty {A : Type} (q : Queue A) : bool :=
let '(w, _, _, _, _) := q in
match w with
| [] => true
| _ => false
end.
Definition checkw {A : Type} (q : Queue A) : Queue A :=
let '(w, lenf, f, lenr, r) := q in
match w with
| [] => (force f, lenf, f, lenr, r)
| _ => q
end.
Definition queue {A : Type} (q : Queue A) : Queue A :=
let '(w, lenf, f, lenr, r) := q in
(* if @trich_leb natlt lenr lenf *)
if Nat.leb lenr lenf
then checkw q
else
let
f' := force f
in
checkw (f', lenf + lenr, delay (force f ++ rev r), 0, []).
Definition snoc {A : Type} (x : A) (q : Queue A) : Queue A :=
let '(w, lenf, f, lenr, r) := q in
queue (w, lenf, f, 1 + lenr, x :: r).
Definition tl {A : Type} (l : list A) : list A :=
match l with
| [] => []
| _ :: t => t
end.
Definition tail {A : Type} (q : Queue A) : option (Queue A) :=
let '(w, lenf, f, lenr, r) := q in
match w with
| [] => None
| h :: t => Some (queue (t, pred lenf, delay (tl (force f)), lenr, r))
end.
Definition head {A : Type} (q : Queue A) : option A :=
let '(w, lenf, f, lenr, r) := q in
match w with
| [] => None
| h :: _ => Some h
end.
(** The queue invariant. *)
Inductive prefix {A : Type} : list A -> list A -> Prop :=
| prefix_nil :
forall l : list A, prefix [] l
| prefix_cons :
forall (h : A) (t l : list A),
prefix t l -> prefix (h :: t) (h :: l).
#[global] Hint Constructors prefix : core.
Lemma prefix_app :
forall (A : Type) (l1 l2 : list A),
prefix l1 (l1 ++ l2).
Proof.
induction l1 as [| h1 t1]; cbn; auto.
Qed.
Lemma prefix_refl :
forall (A : Type) (l : list A),
prefix l l.
Proof.
induction l; auto.
Qed.
#[global] Hint Resolve prefix_app prefix_refl : core.
Lemma prefix_char :
forall (A : Type) (l1 l2 : list A),
prefix l1 l2 <-> exists suffix : list A, l1 ++ suffix = l2.
Proof.
split.
induction 1; cbn; firstorder eauto. exists x. congruence.
destruct 1 as [suffix <-]. apply prefix_app.
Qed.
Definition isQueue {A : Type} (q : Queue A) : Prop :=
let '(w, lenf, f, lenr, r) := q in
prefix w (force f) /\ lenf = length (force f)
/\ lenr = length r /\ lenr <= lenf.
Lemma empty_isQueue :
forall A : Type, isQueue (@empty A).
Proof. cbn. auto. Qed.
Ltac q := repeat
match goal with
| |- forall q : Queue _, _ =>
let w := fresh "w" in
let lenf := fresh "lenf" in
let f := fresh "f" in
let lenr := fresh "lenr" in
let r := fresh "r" in
destruct q as [[[[w lenf] f] lenr] r]
| |- forall _, _ => intro
end.
Lemma checkw_isQueue :
forall (A : Type) (q : Queue A),
isQueue q -> isQueue (checkw q).
Proof.
unfold isQueue. q.
cbn. destruct w; firstorder.
Qed.
Lemma queue_isQueue :
forall (A : Type) (q : Queue A),
isQueue q -> isQueue (queue q).
Proof.
q. cbn. destruct (Nat.leb lenr lenf).
destruct w; cbn in *; firstorder.
cbn in *. destruct (force f); cbn in *.
rewrite rev_length. firstorder; lia.
rewrite app_length, rev_length. firstorder; lia.
Qed.
Lemma snoc_isQueue :
forall (A : Type) (x : A) (q : Queue A),
isQueue q -> isQueue (snoc x q).
Proof.
q. cbn in *. destruct lenf as [| lenf']; cbn.
firstorder. destruct (force f); cbn in *; try congruence.
firstorder.
rewrite app_length. cbn. rewrite rev_length. lia.
lia.
firstorder. destruct (Nat.leb_spec lenr lenf').
destruct w; cbn; firstorder; lia.
destruct (force f); cbn in *; firstorder; try lia.
rewrite !app_length, rev_length. cbn. lia.
Qed.
Lemma force_delay :
forall (A : Type) (x : A),
force (delay x) = x.
Proof. compute. reflexivity. Qed.
Lemma tail_isQueue :
forall (A : Type) (q q' : Queue A),
tail q = Some q' -> isQueue q -> isQueue q'.
Proof.
q; cbn in *. destruct w.
inv H.
destruct (Nat.leb_spec lenr (pred lenf)).
destruct w; inv H; rewrite force_delay; firstorder.
destruct (force f); cbn in *; subst; auto.
inv H.
inv H. rewrite <- H7 in *. cbn in *. lia.
rewrite !force_delay in *. destruct (force f); cbn in *; inv H.
inv H0. inv H.
destruct l; inv H3; firstorder; try lia; rewrite force_delay; inv H.
rewrite rev_length. cbn. reflexivity.
cbn. rewrite app_length, rev_length. reflexivity.
Qed. |
State Before: X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i State After: case intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
hγ : range ↑γ ⊆ s ∧ ∀ (i : Fin (n + 1)), p i ∈ range ↑γ
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i Tactic: rcases h.exists_path_through_family p hp with ⟨γ, hγ⟩ State Before: case intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
hγ : range ↑γ ⊆ s ∧ ∀ (i : Fin (n + 1)), p i ∈ range ↑γ
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i State After: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : range ↑γ ⊆ s
h₂ : ∀ (i : Fin (n + 1)), p i ∈ range ↑γ
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i Tactic: rcases hγ with ⟨h₁, h₂⟩ State Before: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : range ↑γ ⊆ s
h₂ : ∀ (i : Fin (n + 1)), p i ∈ range ↑γ
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i State After: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : range ↑γ ⊆ s
h₂ : ∀ (i : Fin (n + 1)), ∃ y, ↑γ y = p i
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i Tactic: simp only [range, mem_setOf_eq] at h₂ State Before: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : range ↑γ ⊆ s
h₂ : ∀ (i : Fin (n + 1)), ∃ y, ↑γ y = p i
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i State After: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : ∀ (y : ↑I), ↑γ y ∈ s
h₂ : ∀ (i : Fin (n + 1)), ∃ y, ↑γ y = p i
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i Tactic: rw [range_subset_iff] at h₁ State Before: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : ∀ (y : ↑I), ↑γ y ∈ s
h₂ : ∀ (i : Fin (n + 1)), ∃ y, ↑γ y = p i
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i State After: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : ∀ (y : ↑I), ↑γ y ∈ s
t : Fin (n + 1) → ↑I
ht : ∀ (i : Fin (n + 1)), ↑γ (t i) = p i
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i Tactic: choose! t ht using h₂ State Before: case intro.intro
X✝ : Type ?u.681883
Y : Type ?u.681886
inst✝² : TopologicalSpace X✝
inst✝¹ : TopologicalSpace Y
x y z : X✝
ι : Type ?u.681901
F : Set X✝
X : Type u_1
inst✝ : TopologicalSpace X
n : ℕ
s : Set X
h : IsPathConnected s
p : Fin (n + 1) → X
hp : ∀ (i : Fin (n + 1)), p i ∈ s
γ : Path (p 0) (p ↑n)
h₁ : ∀ (y : ↑I), ↑γ y ∈ s
t : Fin (n + 1) → ↑I
ht : ∀ (i : Fin (n + 1)), ↑γ (t i) = p i
⊢ ∃ γ t, (∀ (t : ↑I), ↑γ t ∈ s) ∧ ∀ (i : Fin (n + 1)), ↑γ (t i) = p i State After: no goals Tactic: exact ⟨γ, t, h₁, ht⟩ |
#include <boost/format.hpp>
#include <iostream>
int main()
{
std::cout << boost::format("%1%.%2%.%3%") % 16 % 9 % 2008 << std::endl;
} |
[STATEMENT]
lemma lcp_pref_ext: "u \<le>p v \<Longrightarrow> u \<le>p (u \<cdot> w) \<and>\<^sub>p (v \<cdot> z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<le>p v \<Longrightarrow> u \<le>p u \<cdot> w \<and>\<^sub>p v \<cdot> z
[PROOF STEP]
using longest_common_prefix_max_prefix prefix_prefix triv_pref
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?ps \<le>p ?xs; ?ps \<le>p ?ys\<rbrakk> \<Longrightarrow> ?ps \<le>p ?xs \<and>\<^sub>p ?ys
?xs \<le>p ?ys \<Longrightarrow> ?xs \<le>p ?ys \<cdot> ?zs
?r \<le>p ?r \<cdot> ?s
goal (1 subgoal):
1. u \<le>p v \<Longrightarrow> u \<le>p u \<cdot> w \<and>\<^sub>p v \<cdot> z
[PROOF STEP]
by metis |
REBOL [
Title: "Rejoin-with"
Date: 16-7-2003
File: %rejoin-with.r
Author: "Oldes"
Email: [email protected]
Version: 0.0.1
Category: [util file 1]
]
rejoin-with: func[block str /local new][
if empty? block: reduce block [return block]
new: either series? first block [copy first block] [form first block]
block: next block
while [not tail? block] [
insert tail new str
insert tail new first block
block: next block
]
new
]
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2011 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(* $Id: Setoid_ring_theory.v 14641 2011-11-06 11:59:10Z herbelin $ *)
Require Export Bool.
Require Export Setoid.
Set Implicit Arguments.
Section Setoid_rings.
Variable A : Type.
Variable Aequiv : A -> A -> Prop.
Infix Local "==" := Aequiv (at level 70, no associativity).
Variable S : Setoid_Theory A Aequiv.
Add Setoid A Aequiv S as Asetoid.
Variable Aplus : A -> A -> A.
Variable Amult : A -> A -> A.
Variable Aone : A.
Variable Azero : A.
Variable Aopp : A -> A.
Variable Aeq : A -> A -> bool.
Infix "+" := Aplus (at level 50, left associativity).
Infix "*" := Amult (at level 40, left associativity).
Notation "0" := Azero.
Notation "1" := Aone.
Notation "- x" := (Aopp x).
Variable plus_morph :
forall a a0:A, a == a0 -> forall a1 a2:A, a1 == a2 -> a + a1 == a0 + a2.
Variable mult_morph :
forall a a0:A, a == a0 -> forall a1 a2:A, a1 == a2 -> a * a1 == a0 * a2.
Variable opp_morph : forall a a0:A, a == a0 -> - a == - a0.
Add Morphism Aplus : Aplus_ext.
intros; apply plus_morph; assumption.
Qed.
Add Morphism Amult : Amult_ext.
intros; apply mult_morph; assumption.
Qed.
Add Morphism Aopp : Aopp_ext.
exact opp_morph.
Qed.
Section Theory_of_semi_setoid_rings.
Record Semi_Setoid_Ring_Theory : Prop :=
{SSR_plus_comm : forall n m:A, n + m == m + n;
SSR_plus_assoc : forall n m p:A, n + (m + p) == n + m + p;
SSR_mult_comm : forall n m:A, n * m == m * n;
SSR_mult_assoc : forall n m p:A, n * (m * p) == n * m * p;
SSR_plus_zero_left : forall n:A, 0 + n == n;
SSR_mult_one_left : forall n:A, 1 * n == n;
SSR_mult_zero_left : forall n:A, 0 * n == 0;
SSR_distr_left : forall n m p:A, (n + m) * p == n * p + m * p;
SSR_plus_reg_left : forall n m p:A, n + m == n + p -> m == p;
SSR_eq_prop : forall x y:A, Is_true (Aeq x y) -> x == y}.
Variable T : Semi_Setoid_Ring_Theory.
Let plus_comm := SSR_plus_comm T.
Let plus_assoc := SSR_plus_assoc T.
Let mult_comm := SSR_mult_comm T.
Let mult_assoc := SSR_mult_assoc T.
Let plus_zero_left := SSR_plus_zero_left T.
Let mult_one_left := SSR_mult_one_left T.
Let mult_zero_left := SSR_mult_zero_left T.
Let distr_left := SSR_distr_left T.
Let plus_reg_left := SSR_plus_reg_left T.
Let equiv_refl := Seq_refl A Aequiv S.
Let equiv_sym := Seq_sym A Aequiv S.
Let equiv_trans := Seq_trans A Aequiv S.
Hint Resolve plus_comm plus_assoc mult_comm mult_assoc plus_zero_left
mult_one_left mult_zero_left distr_left plus_reg_left
equiv_refl (*equiv_sym*).
Hint Immediate equiv_sym.
(* Lemmas whose form is x=y are also provided in form y=x because
Auto does not symmetry *)
Lemma SSR_mult_assoc2 : forall n m p:A, n * m * p == n * (m * p).
auto. Qed.
Lemma SSR_plus_assoc2 : forall n m p:A, n + m + p == n + (m + p).
auto. Qed.
Lemma SSR_plus_zero_left2 : forall n:A, n == 0 + n.
auto. Qed.
Lemma SSR_mult_one_left2 : forall n:A, n == 1 * n.
auto. Qed.
Lemma SSR_mult_zero_left2 : forall n:A, 0 == 0 * n.
auto. Qed.
Lemma SSR_distr_left2 : forall n m p:A, n * p + m * p == (n + m) * p.
auto. Qed.
Lemma SSR_plus_permute : forall n m p:A, n + (m + p) == m + (n + p).
intros.
rewrite (plus_assoc n m p).
rewrite (plus_comm n m).
rewrite <- (plus_assoc m n p).
trivial.
Qed.
Lemma SSR_mult_permute : forall n m p:A, n * (m * p) == m * (n * p).
intros.
rewrite (mult_assoc n m p).
rewrite (mult_comm n m).
rewrite <- (mult_assoc m n p).
trivial.
Qed.
Hint Resolve SSR_plus_permute SSR_mult_permute.
Lemma SSR_distr_right : forall n m p:A, n * (m + p) == n * m + n * p.
intros.
rewrite (mult_comm n (m + p)).
rewrite (mult_comm n m).
rewrite (mult_comm n p).
auto.
Qed.
Lemma SSR_distr_right2 : forall n m p:A, n * m + n * p == n * (m + p).
intros.
apply equiv_sym.
apply SSR_distr_right.
Qed.
Lemma SSR_mult_zero_right : forall n:A, n * 0 == 0.
intro; rewrite (mult_comm n 0); auto.
Qed.
Lemma SSR_mult_zero_right2 : forall n:A, 0 == n * 0.
intro; rewrite (mult_comm n 0); auto.
Qed.
Lemma SSR_plus_zero_right : forall n:A, n + 0 == n.
intro; rewrite (plus_comm n 0); auto.
Qed.
Lemma SSR_plus_zero_right2 : forall n:A, n == n + 0.
intro; rewrite (plus_comm n 0); auto.
Qed.
Lemma SSR_mult_one_right : forall n:A, n * 1 == n.
intro; rewrite (mult_comm n 1); auto.
Qed.
Lemma SSR_mult_one_right2 : forall n:A, n == n * 1.
intro; rewrite (mult_comm n 1); auto.
Qed.
Lemma SSR_plus_reg_right : forall n m p:A, m + n == p + n -> m == p.
intros n m p; rewrite (plus_comm m n); rewrite (plus_comm p n).
intro; apply plus_reg_left with n; trivial.
Qed.
End Theory_of_semi_setoid_rings.
Section Theory_of_setoid_rings.
Record Setoid_Ring_Theory : Prop :=
{STh_plus_comm : forall n m:A, n + m == m + n;
STh_plus_assoc : forall n m p:A, n + (m + p) == n + m + p;
STh_mult_comm : forall n m:A, n * m == m * n;
STh_mult_assoc : forall n m p:A, n * (m * p) == n * m * p;
STh_plus_zero_left : forall n:A, 0 + n == n;
STh_mult_one_left : forall n:A, 1 * n == n;
STh_opp_def : forall n:A, n + - n == 0;
STh_distr_left : forall n m p:A, (n + m) * p == n * p + m * p;
STh_eq_prop : forall x y:A, Is_true (Aeq x y) -> x == y}.
Variable T : Setoid_Ring_Theory.
Let plus_comm := STh_plus_comm T.
Let plus_assoc := STh_plus_assoc T.
Let mult_comm := STh_mult_comm T.
Let mult_assoc := STh_mult_assoc T.
Let plus_zero_left := STh_plus_zero_left T.
Let mult_one_left := STh_mult_one_left T.
Let opp_def := STh_opp_def T.
Let distr_left := STh_distr_left T.
Let equiv_refl := Seq_refl A Aequiv S.
Let equiv_sym := Seq_sym A Aequiv S.
Let equiv_trans := Seq_trans A Aequiv S.
Hint Resolve plus_comm plus_assoc mult_comm mult_assoc plus_zero_left
mult_one_left opp_def distr_left equiv_refl equiv_sym.
(* Lemmas whose form is x=y are also provided in form y=x because Auto does
not symmetry *)
Lemma STh_mult_assoc2 : forall n m p:A, n * m * p == n * (m * p).
auto. Qed.
Lemma STh_plus_assoc2 : forall n m p:A, n + m + p == n + (m + p).
auto. Qed.
Lemma STh_plus_zero_left2 : forall n:A, n == 0 + n.
auto. Qed.
Lemma STh_mult_one_left2 : forall n:A, n == 1 * n.
auto. Qed.
Lemma STh_distr_left2 : forall n m p:A, n * p + m * p == (n + m) * p.
auto. Qed.
Lemma STh_opp_def2 : forall n:A, 0 == n + - n.
auto. Qed.
Lemma STh_plus_permute : forall n m p:A, n + (m + p) == m + (n + p).
intros.
rewrite (plus_assoc n m p).
rewrite (plus_comm n m).
rewrite <- (plus_assoc m n p).
trivial.
Qed.
Lemma STh_mult_permute : forall n m p:A, n * (m * p) == m * (n * p).
intros.
rewrite (mult_assoc n m p).
rewrite (mult_comm n m).
rewrite <- (mult_assoc m n p).
trivial.
Qed.
Hint Resolve STh_plus_permute STh_mult_permute.
Lemma Saux1 : forall a:A, a + a == a -> a == 0.
intros.
rewrite <- (plus_zero_left a).
rewrite (plus_comm 0 a).
setoid_replace (a + 0) with (a + (a + - a)) by auto.
rewrite (plus_assoc a a (- a)).
rewrite H.
apply opp_def.
Qed.
Lemma STh_mult_zero_left : forall n:A, 0 * n == 0.
intros.
apply Saux1.
rewrite <- (distr_left 0 0 n).
rewrite (plus_zero_left 0).
trivial.
Qed.
Hint Resolve STh_mult_zero_left.
Lemma STh_mult_zero_left2 : forall n:A, 0 == 0 * n.
auto.
Qed.
Lemma Saux2 : forall x y z:A, x + y == 0 -> x + z == 0 -> y == z.
intros.
rewrite <- (plus_zero_left y).
rewrite <- H0.
rewrite <- (plus_assoc x z y).
rewrite (plus_comm z y).
rewrite (plus_assoc x y z).
rewrite H.
auto.
Qed.
Lemma STh_opp_mult_left : forall x y:A, - (x * y) == - x * y.
intros.
apply Saux2 with (x * y); auto.
rewrite <- (distr_left x (- x) y).
rewrite (opp_def x).
auto.
Qed.
Hint Resolve STh_opp_mult_left.
Lemma STh_opp_mult_left2 : forall x y:A, - x * y == - (x * y).
auto.
Qed.
Lemma STh_mult_zero_right : forall n:A, n * 0 == 0.
intro; rewrite (mult_comm n 0); auto.
Qed.
Lemma STh_mult_zero_right2 : forall n:A, 0 == n * 0.
intro; rewrite (mult_comm n 0); auto.
Qed.
Lemma STh_plus_zero_right : forall n:A, n + 0 == n.
intro; rewrite (plus_comm n 0); auto.
Qed.
Lemma STh_plus_zero_right2 : forall n:A, n == n + 0.
intro; rewrite (plus_comm n 0); auto.
Qed.
Lemma STh_mult_one_right : forall n:A, n * 1 == n.
intro; rewrite (mult_comm n 1); auto.
Qed.
Lemma STh_mult_one_right2 : forall n:A, n == n * 1.
intro; rewrite (mult_comm n 1); auto.
Qed.
Lemma STh_opp_mult_right : forall x y:A, - (x * y) == x * - y.
intros.
rewrite (mult_comm x y).
rewrite (mult_comm x (- y)).
auto.
Qed.
Lemma STh_opp_mult_right2 : forall x y:A, x * - y == - (x * y).
intros.
rewrite (mult_comm x y).
rewrite (mult_comm x (- y)).
auto.
Qed.
Lemma STh_plus_opp_opp : forall x y:A, - x + - y == - (x + y).
intros.
apply Saux2 with (x + y); auto.
rewrite (STh_plus_permute (x + y) (- x) (- y)).
rewrite <- (plus_assoc x y (- y)).
rewrite (opp_def y); rewrite (STh_plus_zero_right x).
rewrite (STh_opp_def2 x); trivial.
Qed.
Lemma STh_plus_permute_opp : forall n m p:A, - m + (n + p) == n + (- m + p).
auto.
Qed.
Lemma STh_opp_opp : forall n:A, - - n == n.
intro.
apply Saux2 with (- n); auto.
rewrite (plus_comm (- n) n); auto.
Qed.
Hint Resolve STh_opp_opp.
Lemma STh_opp_opp2 : forall n:A, n == - - n.
auto.
Qed.
Lemma STh_mult_opp_opp : forall x y:A, - x * - y == x * y.
intros.
rewrite (STh_opp_mult_left2 x (- y)).
rewrite (STh_opp_mult_right2 x y).
trivial.
Qed.
Lemma STh_mult_opp_opp2 : forall x y:A, x * y == - x * - y.
intros.
apply equiv_sym.
apply STh_mult_opp_opp.
Qed.
Lemma STh_opp_zero : - 0 == 0.
rewrite <- (plus_zero_left (- 0)).
trivial.
Qed.
Lemma STh_plus_reg_left : forall n m p:A, n + m == n + p -> m == p.
intros.
rewrite <- (plus_zero_left m).
rewrite <- (plus_zero_left p).
rewrite <- (opp_def n).
rewrite (plus_comm n (- n)).
rewrite <- (plus_assoc (- n) n m).
rewrite <- (plus_assoc (- n) n p).
auto.
Qed.
Lemma STh_plus_reg_right : forall n m p:A, m + n == p + n -> m == p.
intros.
apply STh_plus_reg_left with n.
rewrite (plus_comm n m); rewrite (plus_comm n p); assumption.
Qed.
Lemma STh_distr_right : forall n m p:A, n * (m + p) == n * m + n * p.
intros.
rewrite (mult_comm n (m + p)).
rewrite (mult_comm n m).
rewrite (mult_comm n p).
trivial.
Qed.
Lemma STh_distr_right2 : forall n m p:A, n * m + n * p == n * (m + p).
intros.
apply equiv_sym.
apply STh_distr_right.
Qed.
End Theory_of_setoid_rings.
Hint Resolve STh_mult_zero_left STh_plus_reg_left: core.
Unset Implicit Arguments.
Definition Semi_Setoid_Ring_Theory_of :
Setoid_Ring_Theory -> Semi_Setoid_Ring_Theory.
intros until 1; case H.
split; intros; simpl in |- *; eauto.
Defined.
Coercion Semi_Setoid_Ring_Theory_of : Setoid_Ring_Theory >->
Semi_Setoid_Ring_Theory.
Section product_ring.
End product_ring.
Section power_ring.
End power_ring.
End Setoid_rings.
|
dir = joinpath(@__DIR__, "../config")
isdir(dir) || mkdir(dir)
if !isfile(joinpath(dir, "odbc.ini"))
open(joinpath(dir, "odbc.ini"), "w") do io
write(io, "[ODBC Data Sources]\n\n[ODBC]\nTrace=0\nTraceFile=stderr\n")
end
end
if !isfile(joinpath(dir, "odbcinst.ini"))
open(joinpath(dir, "odbcinst.ini"), "w") do io
write(io, "[ODBC Drivers]\n")
end
end
|
"""
Code for finding a line or wavelength range in spectra.
Author: Eric Ford
Created: August 2020
Contact: https://github.com/eford/
"""
""" Return list of all order indices that contain a pixel with wavelength lambda """
function find_orders_with_line(goal::Real,lambda::AbstractArray{T,2}) where T<:Real
order_min(i) = lambda[1,i]
order_max(i) = lambda[end,i]
#=
for i in 1:10
println("# i= ",i," order_min= ",order_min(i)," order_max= ",order_max(i), " goal= ",goal)
end
flush(stdout)
=#
findall(i->order_min(i)<=goal<=order_max(i), 1:size(lambda,2) )
end
""" Return list of all order indices that contain all pixels with wavelengths between goal_lo and goal_hi """
function find_orders_with_line(goal_lo::Real,goal_hi::Real,lambda::AbstractArray{T,2}) where T<:Real
order_min(i) = lambda[1,i]
order_max(i) = lambda[end,i]
findall(i->order_min(i)<=goal_lo && goal_hi<=order_max(i), 1:size(lambda,2) )
end
""" Return list of all order indices that include any wavelengths between goal_lo and goal_hi """
function find_orders_in_range(goal_lo::Real,goal_hi::Real,lambda::AbstractArray{T,2}) where T<:Real
order_min(i) = lambda[1,i]
order_max(i) = lambda[end,i]
findall(i-> (goal_lo<=order_min(i)<=goal_hi) || (goal_lo<=order_max(i)<=goal_hi), 1:size(lambda,2) )
end
# Find indicies for pixels around lines
const Δλoλ_fit_line_default = 5*(1.8*1000/speed_of_light_mps)
const Δλoλ_edge_pad_default = 0*(1.8*1000/speed_of_light_mps)
""" Return a range of columns indices with wavelengths within Δ of line_center """
function find_cols_to_fit(wavelengths::AbstractArray{T,1}, line_center::Real; Δ::Real = Δλoλ_fit_line_default) where T<:Real
@assert Δ >= zero(Δ)
first = findfirst(x->x>=line_center*(1-Δ),wavelengths)
last = findlast(x->x<=line_center*(1+Δ),wavelengths)
if isnothing(first) || isnothing(last) return 0:0 end
if last<first return last:first end
return first:last
end
""" Return a range of columns indices with wavelengths between line_lo and line_hi """
function find_cols_to_fit(wavelengths::AbstractArray{T,1}, line_lo::Real, line_hi::Real; Δ::Real = Δλoλ_edge_pad_default) where T<:Real
@assert line_lo < line_hi
first = findfirst(x->x>=line_lo*(1-Δ),wavelengths)
last = findlast(x->x<=line_hi*(1+Δ),wavelengths)
if isnothing(first) || isnothing(last) return 0:0 end
if last<first return last:first end
return first:last
end
""" Return list of (pixels, order_idx) pairs that contain pixels with desireed wavelengths.
Excludes locations that contain any pixels with var == NaN.
"""
function findall_line end
function findall_line(goal::Real,lambda::AbstractArray{T1,2},var::AbstractArray{T2,2}; Δ::Real = Δλoλ_fit_line_default) where {T1<:Real, T2<:Real}
@assert lambda[1,1] <= goal <= lambda[end,end]
@assert size(lambda) == size(var)
@assert Δ >= zero(Δ)
orders = find_orders_with_line(goal,lambda)
@assert length(orders) >= 1
locs = map(o->(pixels=find_cols_to_fit(lambda[:,o],goal,Δ=Δ),order=o), orders)
locs_good_idx = findall(t->!any(isnan.(var[t[1],t[2]])),locs)
#locs_good_idx = findall(t-> !(first(t.pixels)==0 || last(t.pixels)==0 || t.order==0) && (!any(isnan.(var[t.pixels,t.order]))) ,locs)
if length(locs) != length(locs_good_idx)
locs = locs[locs_good_idx]
end
return locs
end
function findall_line(goal_lo::Real,goal_hi::Real, lambda::AbstractArray{T1,2},var::AbstractArray{T2,2}; Δ::Real = Δλoλ_edge_pad_default, verbose::Bool = false) where {T1<:Real, T2<:Real}
@assert lambda[1,1] <= goal_lo < goal_hi <= lambda[end,end]
orders = find_orders_with_line(goal_lo,goal_hi,lambda)
#if ! (length(orders) >= 1) return end
#=
if verbose
for i in 1:5
println("# i= ",i," min(order)= ",minimum(lambda[:,i])," max(order)= ",maximum(lambda[:,i]), " goal_lo= ",goal_lo, " goal_hi = ",goal_hi)
end
end
flush(stdout)
=#
@assert length(orders) >= 1
locs = map(o->(pixels=find_cols_to_fit(lambda[:,o],goal_lo, goal_hi,Δ=Δ),order=o), orders)
#locs_good_idx = findall(t->!any(isnan.(var[t[1],t[2]])),locs)
locs_good_idx = findall(t-> !(first(t.pixels)==0 || last(t.pixels)==0 || t.order==0) && (!any(isnan.(var[t.pixels,t.order]))) ,locs)
if length(locs) != length(locs_good_idx)
locs = locs[locs_good_idx]
end
return locs
end
function findall_line(goal::Real,lambda::AbstractArray{T1,1},var::AbstractArray{T2,1}; Δ::Real = Δλoλ_fit_line_default) where {T1<:Real, T2<:Real}
@assert lambda[1] <= goal <= lambda[end]
@assert size(lambda) == size(var)
@assert Δ >= zero(Δ)
locs = find_cols_to_fit(lambda,goal,Δ=Δ)
locs_good_idx = findall(t->!any(isnan.(var[t])),locs)
#locs_good_idx = findall(t-> !(first(t.pixels)==0 || last(t.pixels)==0 || t.order==0) && (!any(isnan.(var[t.pixels,t.order]))) ,locs)
if length(locs) != length(locs_good_idx)
locs = locs[locs_good_idx]
end
return locs
end
function findall_line(goal_lo::Real,goal_hi::Real, lambda::AbstractArray{T1,1},var::AbstractArray{T2,1}; Δ::Real = Δλoλ_edge_pad_default, verbose::Bool = false) where {T1<:Real, T2<:Real}
@assert lambda[1] <= goal_lo < goal_hi <= lambda[end]
#= if verbose
for i in 1:5
println("# i= ",i," min(order)= ",minimum(lambda[:,i])," max(order)= ",maximum(lambda[:,i]), " goal_lo= ",goal_lo, " goal_hi = ",goal_hi)
end
end
flush(stdout)
=#
locs = find_cols_to_fit(lambda,goal_lo, goal_hi,Δ=Δ)
locs_good_idx = findall(t-> !any(isnan.(var[t])) ,locs)
#locs_good_idx = findall(t-> !(first(t.pixels)==0 || last(t.pixels)==0 || t.order==0) && (!any(isnan.(var[t.pixels,t.order]))) ,locs)
if length(locs) != length(locs_good_idx)
locs = locs[locs_good_idx]
end
return locs
end
function findall_line(goal::Real,spectra::AS; Δ::Real = Δλoλ_fit_line_default) where {AS<:AbstractSpectra}
findall_line(goal,spectra.λ,spectra.var, Δ=Δ)
end
function findall_line(goal_lo::Real,goal_hi::Real,spectra::AS; Δ::Real = Δλoλ_edge_pad_default) where {AS<:AbstractSpectra}
findall_line(goal_lo,goal_hi,spectra.λ,spectra.var, Δ=Δ)
end
""" Return (pixels, order_idx) pair that contain "best" region of spectra, based on highest SNR. """
function find_line_best end
function find_line_best(goal::Real,lambda::AbstractArray{T1,2},flux::AbstractArray{T2,2},var::AbstractArray{T3,2}; Δ::Real = Δλoλ_fit_line_default) where {T1<:Real, T2<:Real, T3<:Real}
locs = findall_line(goal,lambda,var,Δ=Δ)
if length(locs) == 0 return missing end
#scores = map( t->sum( flux[t[1],t[2]] ./ var[t[1],t[2]])/sum( 1.0 ./ var[t[1],t[2]]), locs)
scores = map( t->calc_snr(flux[t[1],t[2]],var[t[1],t[2]]), locs)
idx_best = findmax(scores)
locs[idx_best[2]]
end
function find_line_best(goal_lo::Real,goal_hi::Real, lambda::AbstractArray{T1,2},flux::AbstractArray{T2,2},var::AbstractArray{T3,2}; Δ::Real = Δλoλ_edge_pad_default) where {T1<:Real, T2<:Real, T3<:Real}
locs = findall_line(goal_lo,goal_hi,lambda,var,Δ=Δ)
if length(locs) == 0
println("=>(",goal_lo, ", ",goal_hi, ") Δ=",Δ)
return missing
end
#scores = map( t->sum( flux[t[1],t[2]] ./ var[t[1],t[2]])/sum( 1.0 ./ var[t[1],t[2]]), locs)
scores = map( t->calc_snr(flux[t[1],t[2]],var[t[1],t[2]]), locs)
idx_best = findmax(scores)
locs[idx_best[2]]
end
function find_line_best(goal::Real,lambda::AbstractArray{T1,1},flux::AbstractArray{T2,1},var::AbstractArray{T3,1}; Δ::Real = Δλoλ_fit_line_default) where {T1<:Real, T2<:Real, T3<:Real}
cols = find_cols_to_fit(lambda,goal, Δ=Δ)
@assert( ( first(cols)==0 && last(cols)==0) || !any(isnan.(var[cols])) )
return cols
#=
locs = findall_line(goal,lambda,var,Δ=Δ)
if length(locs) == 0 return missing end
#scores = map( t->sum( flux[t[1],t[2]] ./ var[t[1],t[2]])/sum( 1.0 ./ var[t[1],t[2]]), locs)
return locs
scores = map( t->calc_snr(flux[t[1],t[2]],var[t[1],t[2]]), locs)
idx_best = findmax(scores)
locs[idx_best[2]]
=#
end
function find_line_best(goal_lo::Real,goal_hi::Real, lambda::AbstractArray{T1,1},flux::AbstractArray{T2,1},var::AbstractArray{T3,1}; Δ::Real = Δλoλ_edge_pad_default) where {T1<:Real, T2<:Real, T3<:Real}
cols = find_cols_to_fit(lambda,goal_lo, goal_hi, Δ=Δ)
@assert( ( first(cols)==0 && last(cols)==0) || !any(isnan.(var[cols])) )
return cols
#=
locs = findall_line(goal_lo,goal_hi,lambda,var,Δ=Δ)
if length(locs) == 0
println("=>(",goal_lo, ", ",goal_hi, ") Δ=",Δ)
return missing
end
return locs
#scores = map( t->sum( flux[t[1],t[2]] ./ var[t[1],t[2]])/sum( 1.0 ./ var[t[1],t[2]]), locs)
scores = map( t->calc_snr(flux[t],var[t]), locs)
idx_best = findmax(scores)
locs[idx_best[2]]
=#
end
function find_line_best(goal::Real,spectra::AS; Δ::Real = Δλoλ_fit_line_default) where {AS<:AbstractSpectra}
find_line_best(goal,spectra.λ,spectra.flux,spectra.var, Δ=Δ)
end
function find_line_best(goal_lo::Real,goal_hi::Real,spectra::AS; Δ::Real = Δλoλ_edge_pad_default) where {AS<:AbstractSpectra}
find_line_best(goal_lo,goal_hi,spectra.λ,spectra.flux,spectra.var, Δ=Δ)
end
""" Find pixels included in a range of wavelengths """
function find_pixels_for_line_in_chunk( chunk::AbstractChunkOfSpectrum, λ_min::Real, λ_max::Real )# ; plan::LineFinderPlan = LineFinderPlan() )
idx_lo = searchsortedfirst(chunk.λ, λ_min, by=x->x>=λ_min)
idx_tmp = searchsortedlast(chunk.λ[idx_lo:end], λ_max, by=x->x<=λ_max, rev=true)
idx_hi = idx_lo + idx_tmp - 1
return idx_lo:idx_hi
end
function find_pixels_for_line_in_chunklist( chunk_list::AbstractChunkList, λ_min::Real, λ_max::Real; verbose::Bool = true )
ch_idx_all = findall(c-> (λ_min <= minimum(chunk_list.data[c].λ)) && (maximum(chunk_list.data[c].λ) <= λ_max) ,1:length(chunk_list))
println("Hello")
#map(c->(chunk_idx=c, pixels=find_pixels_for_line_in_chunk(chunk_list.data[c], λ_min, λ_max) ), ch_idx)
ch_idx = 0
if length(ch_idx_all) > 1
snr_of_chunks_with_line = map(c->RvSpectMLBase.calc_snr(chunk_list.data[c].flux, chunk_list.data[c].var), ch_idx_all)
ch_idx_to_keep = argmax(snr_of_chunks_with_line)
ch_idx = ch_idx_all[ch_idx_to_keep]
if verbose
println(" Found λ=",λ_min,"-",λ_max," in chunks: ", ch_idx_all, " containing ", length.(ch_idx_all), " pixels.")
println(" SNRs = ", snr_of_chunks_with_line)
println(" Keeping chunk #",ch_idx)
end
elseif length(ch_idx_all) == 1
ch_idx = first(ch_idx_all)
if verbose
println(" Found λ=",λ_min,"-",λ_max," in chunk: ", ch_idx, " containing ", length(ch_idx), " pixels.")
snr_of_chunk_with_line = RvSpectMLBase.calc_snr(chunk_list.data[ch_idx].flux, chunk_list.data[ch_idx].var)
println(" SNRs = ", snr_of_chunk_with_line)
end
end
if ch_idx == 0
error("Didn't find λ = " *string(λ_min)*" - " *string(λ_max)* " in chunklist.")
end
return (chunk_idx=ch_idx, pixels=find_pixels_for_line_in_chunk(chunk_list.data[ch_idx], λ_min, λ_max) )
end
function find_pixels_for_line_in_chunklist( chunk_list::AbstractChunkList, λ_min::Real, λ_max::Real, chunk_id::Integer)
return (chunk_idx=chunk_id, pixels=find_pixels_for_line_in_chunk(chunk_list.data[chunk_id], λ_min, λ_max) )
end
""" `is_in_wavelength_range_list_any_order(λ; list )`
Return true if λ is between lambda_lo and lambda_hi for any row in list
"""
function is_in_wavelength_range_list_any_order(λ::Real; list::DataFrame )
@assert hasproperty(list, :lambda_lo)
@assert hasproperty(list, :lambda_hi)
idx = searchsortedfirst(list[:,:lambda_hi], λ)
return idx>size(list,1) || !(list[idx,:lambda_lo]<=λ<=list[idx,:lambda_hi]) ? false : true
end
""" `is_in_wavelength_range_list(λ; order, list )`
Return true if λ is between lambda_lo and lambda_hi for any row in list
"""
function is_in_wavelength_range_list(λ::Real; order::Integer = 0, list::DataFrame )
@assert hasproperty(list, :lambda_lo)
@assert hasproperty(list, :lambda_hi)
if order == 0
return is_in_wavelength_range_list_any_order(λ, list=list)
else
@assert hasproperty(list, :order)
list_for_order = list |> @filter(_.order==order) |> DataFrame
@assert issorted(list_for_order.lambda_hi)
idx = searchsortedfirst(list_for_order[:,:lambda_hi], λ)
return idx>size(list_for_order,1) || !(list_for_order[idx,:lambda_lo]<=λ<=list_for_order[idx,:lambda_hi]) ? false : true
end
end
""" `is_in_wavelength_range_list(λ_lo, λ_hi; list )`
Return true if there is overlap between (λ_lo, λ_hi) and lambda_lo and lambda_hi for any row in list
# TODO: test
"""
function is_in_wavelength_range_list(λ_lo::Real, λ_hi::Real; list::DataFrame )
@assert λ_lo < λ_hi
@assert hasproperty(list, :lambda_lo)
@assert hasproperty(list, :lambda_hi)
idx = searchsortedfirst(list[:,:lambda_hi], λ_lo)
if idx>size(list,1) return false end
if λ_lo<=list[idx,:lambda_hi] && λ_hi>=list[idx,:lambda_lo]
return true
else
return false
end
end
|
module Issue4267.M where
record R : Set₂ where
field
f : Set₁
|
(* generated by Ott 0.10.17 ***locally nameless*** from: PLC.ott *)
Require Import Metatheory.
(** syntax *)
Definition termvar := var.
Inductive term : Set :=
| term_var_b : nat -> term
| term_var_f : termvar -> term
| term_abs : term -> term
| term_app : term -> term -> term.
(* EXPERIMENTAL *)
(** opening up abstractions *)
Fixpoint open_term_wrt_term_rec (k:nat) (e_5:term) (e__6:term) {struct e__6}: term :=
match e__6 with
| (term_var_b nat) =>
match lt_eq_lt_dec nat k with
| inleft (left _) => term_var_b nat
| inleft (right _) => e_5
| inright _ => term_var_b (nat - 1)
end
| (term_var_f x) => term_var_f x
| (term_abs e) => term_abs (open_term_wrt_term_rec (S k) e_5 e)
| (term_app e1 e2) => term_app (open_term_wrt_term_rec k e_5 e1) (open_term_wrt_term_rec k e_5 e2)
end.
Definition open_term_wrt_term e_5 e__6 := open_term_wrt_term_rec 0 e__6 e_5.
(** terms are locally-closed pre-terms *)
(** definitions *)
(* defns LC_term *)
Inductive lc_term : term -> Prop := (* defn lc_term *)
| lc_term_var_f : forall (x:termvar),
(lc_term (term_var_f x))
| lc_term_abs : forall (e:term),
( forall x , lc_term ( open_term_wrt_term e (term_var_f x) ) ) ->
(lc_term (term_abs e))
| lc_term_app : forall (e1 e2:term),
(lc_term e1) ->
(lc_term e2) ->
(lc_term (term_app e1 e2)).
(** free variables *)
Fixpoint fv_term (e_5:term) : vars :=
match e_5 with
| (term_var_b nat) => {}
| (term_var_f x) => {{x}}
| (term_abs e) => (fv_term e)
| (term_app e1 e2) => (fv_term e1) \u (fv_term e2)
end.
(** substitutions *)
Fixpoint subst_term (e_5:term) (x5:termvar) (e__6:term) {struct e__6} : term :=
match e__6 with
| (term_var_b nat) => term_var_b nat
| (term_var_f x) => (if eq_var x x5 then e_5 else (term_var_f x))
| (term_abs e) => term_abs (subst_term e_5 x5 e)
| (term_app e1 e2) => term_app (subst_term e_5 x5 e1) (subst_term e_5 x5 e2)
end.
(** definitions *)
(* defns Jval *)
Inductive pval : term -> Prop := (* defn pval *)
| pval_var : forall (x:termvar),
pval (term_var_f x)
| pval_app : forall (e1 e2:term),
pval e1 ->
val e2 ->
pval ( (term_app e1 e2) )
with val : term -> Prop := (* defn val *)
| val_pval : forall (e:term),
pval e ->
val e
| val_abs : forall (L:vars) (e:term),
( forall x , x \notin L -> val ( open_term_wrt_term e (term_var_f x) ) ) ->
val ( (term_abs e) ) .
(* defns Jred0 *)
Inductive red0 : term -> term -> Prop := (* defn red0 *)
| red0_beta : forall (e1 e2:term),
lc_term (term_abs e1) ->
lc_term e2 ->
red0 (term_app ( (term_abs e1) ) e2) (open_term_wrt_term e1 e2 ) .
(* defns Jred1 *)
Inductive red1 : term -> term -> Prop := (* defn red1 *)
| red1_empty : forall (e e':term),
red0 e e' ->
red1 e e'
| red1_appL : forall (e1 e2 e1':term),
lc_term e2 ->
red1 e1 e1' ->
red1 (term_app e1 e2) (term_app e1' e2)
| red1_appR : forall (e1 e2 e2':term),
lc_term e1 ->
red1 e2 e2' ->
red1 (term_app e1 e2) (term_app e1 e2')
| red1_abs : forall (L:vars) (e e':term),
( forall x , x \notin L -> red1 ( open_term_wrt_term e (term_var_f x) ) ( open_term_wrt_term e' (term_var_f x) ) ) ->
red1 (term_abs e) (term_abs e').
(* defns Jpara *)
Inductive para_red : term -> term -> Prop := (* defn para_red *)
| para_red_var : forall (x:termvar),
para_red (term_var_f x) (term_var_f x)
| para_red_abs : forall (L:vars) (e e':term),
( forall x , x \notin L -> para_red ( open_term_wrt_term e (term_var_f x) ) ( open_term_wrt_term e' (term_var_f x) ) ) ->
para_red (term_abs e) (term_abs e')
| para_red_app1 : forall (e1 e2 e1' e2':term),
para_red e1 e1' ->
para_red e2 e2' ->
para_red (term_app e1 e2) (term_app e1' e2')
| para_red_app2 : forall (L:vars) (e1 e2 e2' e1':term),
( forall x , x \notin L -> para_red ( open_term_wrt_term e1 (term_var_f x) ) (open_term_wrt_term e1' (term_var_f x) )) ->
para_red e2 e2' ->
para_red (term_app ( (term_abs e1) ) e2) (open_term_wrt_term e1' e2' ) .
(* defns Jcan *)
Inductive can : term -> term -> Prop := (* defn can *)
| can_var : forall (x:termvar),
can (term_var_f x) (term_var_f x)
| can_abs : forall (L:vars) (e e':term),
( forall x , x \notin L -> can ( open_term_wrt_term e (term_var_f x) ) ( open_term_wrt_term e' (term_var_f x) ) ) ->
can (term_abs e) (term_abs e')
| can_app1 : forall (e1 e2 e1' e2':term),
(forall e', ( e1 ) <> term_abs e') ->
can e1 e1' ->
can e2 e2' ->
can (term_app e1 e2) (term_app e1' e2')
| can_app2 : forall (L:vars) (e1 e2 e2' e1':term),
( forall x , x \notin L -> can ( open_term_wrt_term e1 (term_var_f x) ) ( open_term_wrt_term e1' (term_var_f x) )) ->
can e2 e2' ->
can (term_app ( (term_abs e1) ) e2) (open_term_wrt_term e1' e2' ) .
(** infrastructure *)
(* additional definitions *)
(* instanciation of tactics *)
Ltac gather_atoms ::=
let A := gather_atoms_with (fun x : vars => x) in
let B := gather_atoms_with (fun x : var => {{ x }}) in
let D1 := gather_atoms_with (fun x => fv_term x) in
constr:(A \u B \u D1).
Hint Constructors pval val red0 red1 para_red can lc_term.
|
State Before: α : Type u_3
E : Type u_2
F : Type ?u.3098262
G : Type ?u.3098265
m m0 : MeasurableSpace α
p : ℝ≥0∞
q : ℝ
μ ν : Measure α
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedAddCommGroup G
β : Type u_1
mβ : MeasurableSpace β
f : α → β
g : β → E
hg : AEStronglyMeasurable g (Measure.map f μ)
hf : AEMeasurable f
⊢ Memℒp g p ↔ Memℒp (g ∘ f) p State After: no goals Tactic: simp [Memℒp, snorm_map_measure hg hf, hg.comp_aemeasurable hf, hg] |
= = = Sales and accolades = = =
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e256m189_5limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
\documentclass[letterpaper,12pt,twoside,]{pinp}
%% Some pieces required from the pandoc template
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
% Use the lineno option to display guide line numbers if required.
% Note that the use of elements such as single-column equations
% may affect the guide line number alignment.
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
% pinp change: the geometry package layout settings need to be set here, not in pinp.cls
\geometry{layoutsize={0.95588\paperwidth,0.98864\paperheight},%
layouthoffset=0.02206\paperwidth, layoutvoffset=0.00568\paperheight}
\definecolor{pinpblue}{HTML}{185FAF} % imagecolorpicker on blue for new R logo
\definecolor{pnasbluetext}{RGB}{101,0,0} %
\title{Assignment 1 - Exploring Data. Due September 27, 11:59pm 2019}
\author[a]{EPIB607 - Inferential Statistics}
\affil[a]{Fall 2020, McGill University}
\setcounter{secnumdepth}{5}
% Please give the surname of the lead author for the running footer
\leadauthor{Bhatnagar}
% Keywords are not mandatory, but authors are strongly encouraged to provide them. If provided, please include two to five keywords, separated by the pipe symbol, e.g:
\begin{abstract}
All questions are to be answered in an R Markdown document using the
provided template and compiled to a pdf document. You are free to choose
any function from any package to complete the assignment. Concise
answers will be rewarded. Be brief and to the point. Each question is
worth 25 points. Label your graphs appropriately with proper titles and
axis labels. Please submit both the compiled pdf report to Crowdmark by
September 27, 2019, 11:59pm. You need to save your answers to each
question in separate pdf files. You also need to upload your code. See
\url{https://crowdmark.com/help/} for details.
\end{abstract}
\dates{This version was compiled on \today}
% initially we use doi so keep for backwards compatibility
% new name is doi_footer
\pinpfootercontents{Assignment 1 due Sepetember 27, 2020 by 11:59pm}
\begin{document}
% Optional adjustment to line up main text (after abstract) of first page with line numbers, when using both lineno and twocolumn options.
% You should only change this length when you've finalised the article contents.
\verticaladjustment{-2pt}
\maketitle
\thispagestyle{firststyle}
\ifthenelse{\boolean{shortarticle}}{\ifthenelse{\boolean{singlecolumn}}{\abscontentformatted}{\abscontent}}{}
% If your first paragraph (i.e. with the \dropcap) contains a list environment (quote, quotation, theorem, definition, enumerate, itemize...), the line after the list may have some extra indentation. If this is the case, add \parshape=0 to the end of the list environment.
\hypertarget{template}{%
\section*{Template}\label{template}}
\addcontentsline{toc}{section}{Template}
Please use the \texttt{.Rmd} template for Assignment 1 is available on
myCourses.
\hypertarget{points-immunogenicity-of-the-chadox1-ncov-19-vaccine-against-sars-cov-2}{%
\section{(25 points) Immunogenicity of the ChAdOx1 nCoV-19 vaccine
against
SARS-CoV-2}\label{points-immunogenicity-of-the-chadox1-ncov-19-vaccine-against-sars-cov-2}}
This questions refers to the Lancet paper \emph{Safety and
immunogenicity of the ChAdOx1 nCoV-19 vaccine against SARS-CoV-2:a
preliminary report of a phase 1/2, single-blind, randomised controlled
trial} by Folegatti et. al (2020) and available in myCourses.
\begin{enumerate}
\def\labelenumi{\alph{enumi})}
\tightlist
\item
(2 points) Consider Figure 3 Panel B: What visual cues (or aesthetics)
are being used? Briefly describe the main takeaways from the entire
Figure 3.
\item
(3 points) Do you think Figure 3 is a good graphic in terms of
conveying its message clearly? Is there anything you would have done
differently? Explain.
\item
(2 points) Consider the data introduced in class which contains
immunity levels (Immunoglobulin G (IgG)) from the convalescent group
and the vaccine groups post 28 days. Note that the IgG levels in the
dataset below are given on the log10 scale. Calculate the median IgG
levels (ELISA units) on the log10 scale for each group.
\end{enumerate}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{path <-}
\StringTok{"http://www.biostat.mcgill.ca/hanley/statbook/immunogenicityChAdOx1.nCoV-19vaccine.txt"}
\NormalTok{ds <-}\StringTok{ }\KeywordTok{read.table}\NormalTok{(path)}
\KeywordTok{head}\NormalTok{(ds)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# RefIndexCategory IgGResponse.log10.ElisaUnits
# 1 Convalescent 2.56
# 2 Convalescent 2.74
# 3 Convalescent 2.79
# 4 Convalescent 3.32
# 5 Convalescent 3.15
# 6 Convalescent 2.35
\end{verbatim}
\end{ShadedResult}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{str}\NormalTok{(ds)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# 'data.frame': 307 obs. of 2 variables:
# $ RefIndexCategory : Factor w/ 2 levels "Convalescent",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ IgGResponse.log10.ElisaUnits: num 2.56 2.74 2.79 3.32 3.15 2.35 2.72 2.95 2.42 2.64 ...
\end{verbatim}
\end{ShadedResult}
\begin{enumerate}
\def\labelenumi{\alph{enumi})}
\setcounter{enumi}{4}
\tightlist
\item
(1 point) Are you able to calculate a correlation of IgG levels
between groups? If not, explain why not? If yes, interpret the
correlation.
\item
(4 points) From the medians alone, is there enough evidence to
conclude that the median IgG levels in the convalescent group are
higher than the median IgG levels in the vaccine group (post 28 days)?
Explain.
\item
(7 points) Use the Boostrap to asses if there is enough evidence to
suggest that the median IgG levels in the convalescent group are
higher than the median IgG levels in the vaccine group (post 28 days).
\emph{Hint: resample the data with replacement separately in each
group B=1000 times. For each of the B datasets, calculate the median
IgG level and take the difference in medians between the two groups.
Plot the differences in a histogram and calculate the 2.5 and 97.5
percentiles.}
\item
(6 points) The dataset, shown below and available on myCourses, was
extracted (approximately) from Figure 3 Panel A for the ChAdOx1
nCoV-19 (prime) group only. The \texttt{time} column represents the
days since vaccination, and \texttt{igg\_response} are the IgG levels
on the original scale. Create an appropriate figure which shows the
immunity levels as a function of time. You are free to choose the plot
type; the choice of plot should be guided by the message you are
trying to convey. Be sure to label your axes, show units, include a
title and choose an appropriate color palette. Briefly interpret the
plot.
\end{enumerate}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{DT <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{"prime_igg_response.csv"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{head}\NormalTok{(DT)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# time igg_response
# 1 0 930.37376
# 2 0 267.80142
# 3 0 241.40290
# 4 0 170.80787
# 5 0 79.79795
# 6 0 67.12348
\end{verbatim}
\end{ShadedResult}
\newpage
\hypertarget{points-covid-19-cases-comparison-between-counties-with-and-without-stay-at-home-orders}{%
\section{(25 points) COVID-19 Cases Comparison Between Counties With and
Without Stay-at-Home
Orders}\label{points-covid-19-cases-comparison-between-counties-with-and-without-stay-at-home-orders}}
This question is based on the JAMA Network Open article \emph{Comparison
of Estimated Rates of Coronavirus Disease 2019 (COVID-19) in Border
Counties in Iowa Without a Stay-at-Home Order and Border Counties in
Illinois With a Stay-at-Home Order} by Lyu and Wehby (2020) and
available in myCourses. The county and state level cumulative incidence
of cases data is provided in the code below. Note: you need to install
the \texttt{covdata} package (which is not on CRAN) using
\texttt{remotes::install\_github("kjhealy/covdata")}.
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{# remotes::install_github("kjhealy/covdata")}
\KeywordTok{library}\NormalTok{(covdata) }
\KeywordTok{library}\NormalTok{(dplyr); }\KeywordTok{library}\NormalTok{(tidyr); }\KeywordTok{library}\NormalTok{(ggplot2); }\KeywordTok{library}\NormalTok{(readr)}
\CommentTok{# get population data from https://covid19.census.gov/datasets/}
\NormalTok{f <-}\StringTok{ "https://opendata.arcgis.com/datasets/21843f238cbb46b08615fc53e19e0daf_1.csv"}
\NormalTok{pop_county <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\DataTypeTok{file =}\NormalTok{ f) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{rename}\NormalTok{(}\DataTypeTok{fips =}\NormalTok{ GEOID, }\DataTypeTok{population =}\NormalTok{ B01001_001E, }\DataTypeTok{state =}\NormalTok{ State) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{select}\NormalTok{(state, fips, population)}
\NormalTok{county_level <-}\StringTok{ }\NormalTok{nytcovcounty }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{left_join}\NormalTok{(pop_county, }\DataTypeTok{by =} \KeywordTok{c}\NormalTok{(}\StringTok{"state"}\NormalTok{,}\StringTok{"fips"}\NormalTok{)) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{cases.per.10k =}\NormalTok{ cases}\OperatorTok{/}\NormalTok{population }\OperatorTok{*}\StringTok{ }\FloatTok{1e4}\NormalTok{) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{filter}\NormalTok{(state }\OperatorTok{%in%}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"Iowa"}\NormalTok{,}\StringTok{"Illinois"}\NormalTok{)) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{group_by}\NormalTok{(county)}
\NormalTok{pop_state <-}\StringTok{ }\NormalTok{pop_county }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{group_by}\NormalTok{(state) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{population =} \KeywordTok{sum}\NormalTok{(population, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{))}
\NormalTok{state_level <-}\StringTok{ }\NormalTok{county_level }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{group_by}\NormalTok{(state, date) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{filter}\NormalTok{(date }\OperatorTok{>=}\StringTok{ "2020-03-15"}\NormalTok{) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{cases =} \KeywordTok{sum}\NormalTok{(cases)) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{left_join}\NormalTok{(pop_state, }\DataTypeTok{by =} \StringTok{"state"}\NormalTok{) }\OperatorTok{%>%}
\StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{cases.per.10k =}\NormalTok{ cases }\OperatorTok{/}\StringTok{ }\NormalTok{population }\OperatorTok{*}\StringTok{ }\FloatTok{1e4}\NormalTok{, }\DataTypeTok{state =} \KeywordTok{factor}\NormalTok{(state),}
\DataTypeTok{time =} \KeywordTok{as.numeric}\NormalTok{(date }\OperatorTok{-}\StringTok{ }\KeywordTok{min}\NormalTok{(date)) }\OperatorTok{+}\StringTok{ }\DecValTok{1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{head}\NormalTok{(county_level)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# # A tibble: 6 x 8
# # Groups: county [1]
# date county state fips cases deaths population cases.per.10k
# <date> <chr> <chr> <chr> <dbl> <dbl> <dbl> <dbl>
# 1 2020-01-24 Cook Illinois 17031 1 0 5223719 0.00191
# 2 2020-01-25 Cook Illinois 17031 1 0 5223719 0.00191
# 3 2020-01-26 Cook Illinois 17031 1 0 5223719 0.00191
# 4 2020-01-27 Cook Illinois 17031 1 0 5223719 0.00191
# 5 2020-01-28 Cook Illinois 17031 1 0 5223719 0.00191
# 6 2020-01-29 Cook Illinois 17031 1 0 5223719 0.00191
\end{verbatim}
\end{ShadedResult}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{head}\NormalTok{(state_level)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# # A tibble: 6 x 6
# # Groups: state [1]
# state date cases population cases.per.10k time
# <fct> <date> <dbl> <dbl> <dbl> <dbl>
# 1 Illinois 2020-03-15 94 12821497 0.0733 1
# 2 Illinois 2020-03-16 104 12821497 0.0811 2
# 3 Illinois 2020-03-17 159 12821497 0.124 3
# 4 Illinois 2020-03-18 286 12821497 0.223 4
# 5 Illinois 2020-03-19 420 12821497 0.328 5
# 6 Illinois 2020-03-20 583 12821497 0.455 6
\end{verbatim}
\end{ShadedResult}
\begin{enumerate}
\def\labelenumi{\alph{enumi})}
\tightlist
\item
(6 points) Using the county level dataset provided, reproduce Figure 1
of the paper. Does your Figure agree with theirs? Would county level
curves have been more appropriate to show instead of the state totals?
\item
(5 points) Plot the cumulative incidence curves per 10000 people from
March 21 until the most recent day for which you have data, for each
of the counties used in the paper. Interpret the plot and discuss if
the county level plots still agree with the overall conclusion of the
paper.
\item
(4 points) Case counts are inherently tied to testing capacity. Death
from COVID19 doesn't have this issue, although there are other biases
such as misclassification and under reporting. Plot the same graph as
in part (b) but for deaths and interpret the plot.
\item
(10 points) Illinois (Democrat-controlled legislature) is surrounded
by states with
\href{https://www.ncsl.org/research/about-state-legislatures/partisan-composition.aspx\#}{Republican-controlled
legislatures (Iowa, Missouri, Kentucky, Indianna, Wisconsin)}. Do the
data suggest there is a correlation between COVID-19 cases (or deaths)
and which party has legislative control? Explain and justify using
summary statistics and/or figures. Do not overcomplicate this
analysis, i.e., feel free to make simplifying assumptions about
testing.
\end{enumerate}
\newpage
\hypertarget{points-age-structures-of-populations-then-and-now}{%
\section{(25 points) Age-structures of Populations, then and
now}\label{points-age-structures-of-populations-then-and-now}}
The 1911 census of Ireland was taken on April 2nd 1911 and was released
to the public in 1961. Follow
\href{http://www.census.nationalarchives.ie/help/about19011911census.html}{this
link} for further details on the census. James Hanley (JH) has scrapped
the data for Dublin, collected the age-frequency distribtion by gender
and provided you with a three column .csv file on myCourses called
\texttt{age\_sex\_frequencies\_ireland.csv} which looks like this:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{cens <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{"age_sex_frequencies_ireland.csv"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{head}\NormalTok{(cens)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# Gender Age Freq
# 1 Male 0 5332
# 2 Male 1 4570
# 3 Male 2 4979
# 4 Male 3 4789
# 5 Male 4 4884
# 6 Male 5 4787
\end{verbatim}
\end{ShadedResult}
The \texttt{Age} column represents the age in 1911. The \texttt{Freq}
column gives the frequency of the number of people for a given age and
\texttt{Gender}. Note that \texttt{Age} is an interval; for example,
\texttt{Age=0} actually represents individuals who are between the ages
of 0 and 1, \texttt{Age=1} are individuals between ages 1 and 2, and so
on.
\begin{enumerate}
\def\labelenumi{\alph{enumi})}
\tightlist
\item
(6 points) What was the earliest year of birth for (i) males and (ii)
females ?
\item
(8 points) Create a suitable visualization of this data and then
comment on any patterns you see and give reasons for these patterns.
Your choice should leverage all the information provided in the data
and be influenced by the message that you are trying to convey. Be
sure to include an informative title and figure caption.
\item
(8 points) Calculate the mean age, the standard deviation (SD), and
the quartiles: \(Q_{25}, Q_{50} (median), Q_{75}\) separately for
males and females.
\item
(3 points) The original census cards have been scanned are available
online.
\href{http://www.census.nationalarchives.ie/reels/nai000230598/}{This
one in particular} is quite famous. Why?
\end{enumerate}
\newpage
\hypertarget{points-flint-blood-lead-levels}{%
\section{(25 points) Flint Blood Lead
Levels}\label{points-flint-blood-lead-levels}}
Lead in the environment is persistent, bio-accumulative, and toxic.
Chronic exposure to lead in children is associated with many negative
health outcomes even when the Blood Lead Levels (BLLs) are measured as
low as 1.0-10.0 µg/dL. An analysis of childhood exposure to lead is
described in the article \emph{Blood Lead Levels of Children in Flint,
Michigan: 2006-2016} by Gomez et al.~(2018) available on myCourses.
\begin{enumerate}
\def\labelenumi{\alph{enumi})}
\item
(2 points) Summarize the main findings of the study in 280 characters
or less.
\item
(5 points) Does Figure 1 do a good job of conveying its message?
Explain why or why not.
\item
(6 points) Consider the information presented in Figure 1 and think
about the dataset which would have been used to generate the plot.
What are the rows and what are the columns? What is the dimension of
the dataset? Are the data in tidy format?
\item
(6 points) From the graph, extract the yearly BLL percentages \(\geq\)
5.0 \(\mu\)g/dL, in children 5 years and younger, for Flint and
Michigan. You may read directly off the graph or try using a
\href{https://automeris.io/WebPlotDigitizer/}{WebPlotDigitizer}.
Calculate the yearly change from baseline (2006) percentages
separately for each group. Is there evidence to suggest that the
change from baselines are different for Flint vs.~Michigan? Support
your answer with summary statistics and/or a plot.
\item
(6 points) Figure 2 shows the geometric mean BLL levels over time for
children residing within Flint boundaries. Recreate the plot and add
the simple linear regression line. Calculate the correlation
coeffecient \(r\) and compare it with the linear regression output. Do
the results agree?
\end{enumerate}
%\showmatmethods
\bibliography{pinp}
\bibliographystyle{jss}
\end{document}
|
function obj=MOxUnitTestOutcome(test_,duration)
s=struct();
s.test=test_;
s.duration=duration;
obj=class(s,'MOxUnitTestOutcome');
|
#include <boost/test/tools/old/interface.hpp>
#include <chrono>
#include <thread>
#define BOOST_TEST_MODULE Modeltest
#include <boost/test/unit_test.hpp>
#include "model.hpp"
using namespace cwo;
const std::string eth01 = "<addr1>";
const std::string eth02 = "<addr2>";
const std::string vet01 = "<addr3>";
struct fix {
fix()
{
m.apikey("<api-key>");
m.registerwallet(VET, vet01);
m.registerwallet(ETH, eth01);
m.registerwallet(ETH, eth02);
}
Model m;
};
BOOST_AUTO_TEST_CASE(ctors)
{
Model m;
}
BOOST_FIXTURE_TEST_CASE(registerwallets, fix)
{
std::vector<Wallet*> v = m.walletsof(ETH);
BOOST_CHECK_EQUAL(v[0]->address(), eth01);
BOOST_CHECK_EQUAL(v[1]->address(), eth02);
v = m.walletsof(VET);
BOOST_CHECK_EQUAL(v[0]->address(), vet01);
}
BOOST_FIXTURE_TEST_CASE(unregisterwallets, fix)
{
std::vector<Wallet*> v = m.walletsof(VET);
BOOST_CHECK_EQUAL(v[0]->address(), vet01);
m.unregisterwallet(VET, vet01);
v = m.walletsof(VET);
BOOST_CHECK_EQUAL(v.empty(), true);
/* unregister nonexisting wallet */
m.unregisterwallet(BTC, "0xdeadbeef");
v = m.walletsof(ETH);
BOOST_CHECK_EQUAL(v[0]->address(), eth01);
BOOST_CHECK_EQUAL(v[1]->address(), eth02);
}
BOOST_FIXTURE_TEST_CASE(runinanotherthreadandregisterunregisterwallets, fix)
{
std::thread th([&] () {
m.run();
});
std::this_thread::sleep_for(std::chrono::milliseconds(200));
m.stop();
th.join();
}
BOOST_FIXTURE_TEST_CASE(setinvestmentwhilerunning, fix)
{
std::thread th([&] () {
m.run();
});
std::this_thread::sleep_for(std::chrono::milliseconds(200));
m.investment(ETH, 5300);
m.investment(VET, 350);
m.stop();
th.join();
}
BOOST_FIXTURE_TEST_CASE(updateintervalwhilerunning, fix)
{
std::thread th([&] () {
m.run();
});
m.updateinterval(200);
std::this_thread::sleep_for(std::chrono::milliseconds(200));
m.updateinterval(100);
std::this_thread::sleep_for(std::chrono::milliseconds(200));
m.stop();
th.join();
}
|
While Catholics are sometimes accused of worshiping images , in violation of the first commandment , the Church says this is a misunderstanding . In the Church 's opinion , " the honor paid to sacred images is a ' respectful veneration ' , not the adoration due to God alone " . In the 8th century , heated arguments arose over whether religious icons ( in this context paintings ) were prohibited by the first commandment . The dispute was almost entirely restricted to the Eastern church ; the iconoclasts wished to prohibit icons , while the <unk> supported their veneration , a position consistently backed by the Western Church . At the Second Council of Nicaea in 787 , the ecumenical council determined that the veneration of icons and statues was not in violation of the commandment and stated " whoever venerates an image venerates the person portrayed in it . " At around the time of the controversy over Iconoclasm , the Western church began to use monumental sculpture , which by the Romanesque period became a major feature of Western Christian art , that has remained part of the Catholic tradition , in contrast to Eastern Christianity , which avoids large religious sculpture . The Catechism , using very traditional arguments , posits that God gave permission for images that symbolize Christian salvation by leaving symbols such as the bronze serpent , and the cherubim on the Ark of the Covenant . It states that " by becoming incarnate , the Son of God introduced a new economy of images " .
|
(*******************************************************************************
Title: Auxiliary.v
Authors: Jeremy Avigad, Chris Kapulkin, Peter LeFanu Lumsdaine
Date: 1 March 2013
This file contains lemmas that are required as background in the
Fibration Categories project, but are not supplied by the HoTT
library.
*******************************************************************************)
Require Import HoTT.
Require Export FunextAxiom.
Open Scope path.
Monomorphic Definition UU := Type.
Notation pr1 := projT1.
Notation pr2 := projT2.
Arguments pr1 {_ _} _.
Arguments pr2 {_ _} _.
(* TODO (mid): possible improvements now that we’re over the new library.
- increase usage of the [moveL_Pm] family;
- use [apply (concat H)] and [apply (concatR H)], rather than e.g. [path_via' p. apply H.]
- use [whiskerL], [whiskerR] rather than [apply (ap (concat p))] and [apply (ap (fun q => q @ p))].
*)
(*******************************************************************************
Paths.
*******************************************************************************)
Section Paths.
(* Convenient for the frequent idiom [apply (concat (concat_pp_p))] and variants. *)
Global Arguments concat_pp_p {_ _ _ _ _ _ _ _}.
Global Arguments concat_p_pp {_ _ _ _ _ _ _ _}.
(* The Paulin-Mohring elimination principle for equalities is
very useful, but asymmetric. This is its left-handed counterpart. *)
Definition id_opp_elim {X:Type} (x:X)
{P:forall (y:X) (p:y=x), Type}
: (P x 1)
-> forall (y:X) (p:y=x), P y p.
Proof.
intros P1 y p. destruct p. assumption.
Defined.
(* Note: this proof, while clean, is proof-theoretically overkill — it requires a universe, since inspecting it carefully, it Id-eliminates into a Pi-type with large domain (since P has to be generalized). With a little more work, it can be proved more economically, using just pure M-L TT. *)
Definition path_space (A : Type) := { x:A & { y:A & x = y }}.
(* Useful mainly for the idiom [apply (concatR (expression))]. *)
Definition concatR {A : Type} {x y z : A} (q : y = z) (p : x = y)
:= concat p q.
End Paths.
(* A variant of [path_via] that does not attempt to do anything clever. *)
Ltac path_via' mid :=
apply @concat with (y := mid).
(*******************************************************************************
Lemmas about paths in types of the form [{a : A & {b : B & C a b}}].
*******************************************************************************)
Section TriplePaths.
Lemma triple_path {A : Type} {B : Type} (C : A -> B -> Type)
(u v : {a : A & {b : B & C a b}})
(p : (pr1 u) = (pr1 v))
(q : (pr1 (pr2 u)) = (pr1 (pr2 v)))
(r : (transport (fun x => C x (pr1 (pr2 v))) p
(transport (C (pr1 u)) q (pr2 (pr2 u))))
= (pr2 (pr2 v)))
: u = v.
Proof.
destruct u as [u1 [u2 u3]], v as [v1 [v2 v3]]. simpl in * |- *.
destruct p; destruct q; destruct r. simpl. exact 1.
Defined.
Lemma pr1_triple_path {A : Type} {B : Type} (C : A -> B -> Type)
(u v : {a : A & {b : B & C a b}})
(p : (pr1 u) = (pr1 v))
(q : (pr1 (pr2 u)) = (pr1 (pr2 v)))
(r : (transport (fun x => C x (pr1 (pr2 v))) p
(transport (C (pr1 u)) q (pr2 (pr2 u))))
= (pr2 (pr2 v)))
: ap pr1 (triple_path C u v p q r) = p.
Proof.
destruct u as [u1 [u2 u3]], v as [v1 [v2 v3]]. simpl in * |- *.
destruct p. destruct q. destruct r. simpl. exact 1.
Defined.
Lemma pr2_triple_path {A : Type} {B : Type} (C : A -> B -> Type)
(u v : {a : A & {b : B & C a b}})
(p : (pr1 u) = (pr1 v))
(q : (pr1 (pr2 u)) = (pr1 (pr2 v)))
(r : (transport (fun x => C x (pr1 (pr2 v))) p
(transport (C (pr1 u)) q (pr2 (pr2 u))))
= (pr2 (pr2 v)))
: ap (fun xyz => pr1 (pr2 xyz)) (triple_path C u v p q r) = q.
Proof.
destruct u as [u1 [u2 u3]], v as [v1 [v2 v3]]. simpl in * |- *.
destruct p. destruct q. destruct r. simpl. exact 1.
Defined.
(* TODO (low): triple_path_pr3. (The tricky bit is typing the statement.) *)
End TriplePaths.
(*******************************************************************************
Equivalences.
*******************************************************************************)
(* TODO (low): move this declarations into the section itself, once weird bug is sorted out. *)
Arguments BuildEquiv [A B f] _ : rename.
Section Equivs.
(* TODO (high): move to library. *)
Global Arguments equiv_inv [A B] f {_} x.
(* TODO (low): consider; possibly move to library. *)
Global Arguments equiv_fun [A B] _ _.
Global Arguments equiv_isequiv [A B] e.
Global Arguments isequiv_adjointify [A B f] _ _ _.
(* TODO (med): Unnecessary here; but probably move to library. *)
Global Arguments isequiv_ap [A B] f {_} x y.
(* TODO (high): move to library. *)
Global Arguments isequiv_postcompose {_} A [B C] f {_}.
Global Arguments isequiv_precompose {_} [A B] C f {_}.
(*
TODO (low): also consider/try changing arguments as follows:
Arguments cancelL_isequiv [B C] g {ge} [A] f {fge} : rename.
Arguments cancelR_isequiv [A B] f {fe} [C] g {gfe}.
Also: consider changing the order of their arguments; and *definitely* try making them not instances.
*)
(* Compare to [map_equiv_o_inverse] in old library *)
(* TODO (low): actually, this is an instance of homotopy-naturality, [concat_Ap]. Can it be replaced by that? *)
Lemma ap_inverse_o_equiv {A B : Type} (e : A <~> B)
{x y : B} (p : x = y)
: ap e (ap (e ^-1) p)
= (eisretr e x @ p) @ (eisretr e y)^.
Proof.
destruct p. simpl.
path_via (eisretr e x @ (eisretr e x)^).
symmetry. apply concat_pV.
apply whiskerR, inverse, concat_p1.
Defined.
(* Every type [P] is equivalent to the function type [1 -> P]. *)
Lemma well_pointedness (P : Type) : P <~> (Unit -> P).
Proof.
exists (fun x _ => x).
apply (isequiv_adjointify (fun f => f tt)).
intros f. apply path_forall. intros []. exact 1.
intros x. exact 1.
Defined.
End Equivs.
(*******************************************************************************
HLevels.
*******************************************************************************)
Section HLevels.
(* TODO (low): move to HoTT? *)
Record HSet := {
hset_carrier :> Type;
ishset_hset : IsHSet hset_carrier }.
End HLevels.
(*******************************************************************************
Functional Extensionality.
*******************************************************************************)
Section Funext.
Global Arguments path_forall {_} [_ _ _ _] _.
(** [path_forall] commutes with [inverse]. (This follows purely formally from the fact that its inverse [apD10] does.) *)
Definition path_forall_V {X : Type} {P : X -> Type} {f g : forall x, P x}
(H : forall x, f x = g x)
: path_forall (fun x => (H x)^) = (path_forall H)^.
Proof.
path_via' (path_forall (fun x => (apD10 (path_forall H) x)^)).
apply ap, (@ap _ _ (fun h x => (h x)^)). apply inverse, eisretr.
path_via' (path_forall (apD10 (path_forall H)^)).
apply ap, inverse. apply path_forall; intros x. apply apD10_V.
apply eissect.
Defined.
(** Note: this differs from the library function [apD10_path_forall] essentially only in having [pointwise_paths] unfolded in its type. However, that is enough to make working with it more convenient — in particular, [rewrite] often works with this where it fails with [apD10_path_forall]. *)
Definition apD10_path_forall' {A : Type} {P : A -> Type}
{f g : forall x, P x} (h : f == g) (x:A)
: apD10 (path_forall h) x = h x
:= (apD10_path_forall _ _ h x).
(*TODO (high): consistentize the various ways of using [eisretr], [apD10_path_forall], etc. *)
End Funext.
(*******************************************************************************
Various things that don’t have a clear home.
*******************************************************************************)
Section Varia.
(* TODO (low): move to live with hfibers? Where do they live? *)
Definition hfiber_incl {X Y:Type} (f:X -> Y) (y:Y) : hfiber f y -> X
:= pr1.
Fixpoint iterate {A:Type} (f:A->A) (n:nat)
:= fun x => match n with O => x | S n' => f (iterate f n' x) end.
Fixpoint iterate_dep {A:Type} (f:A->A) (B:A->Type)
(g : forall a:A, B a -> B (f a))
(n:nat)
:= fun (x:A) (y:B x) => match n return (B (iterate f n x)) with
| O => y
| S n' => g _ (iterate_dep _ _ g n' x y) end.
Lemma moveR_I {AA BB : Type} (ff : AA -> BB) {H : IsEquiv ff} (x : AA) (y : BB)
: y = ff x -> ff ^-1 y = x.
Proof.
intros H_eq. path_via (ff ^-1 (ff x)).
apply ap, H_eq. apply eissect.
Defined.
(* Compare [equiv_sigma_contr] in library. *)
Lemma isequiv_sigma_contr {X:Type} {Y:X->Type}
: (forall x:X, Contr (Y x)) -> IsEquiv (@projT1 X Y).
Proof.
intros H. exact (equiv_isequiv (equiv_sigma_contr _)).
Defined.
Lemma isequiv_hfiber_incl_over_hprop {Y X : Type} (X_hprop : IsHProp X)
(f : Y -> X) (x:X)
: IsEquiv (hfiber_incl f x).
Proof.
refine (isequiv_sigma_contr _).
Defined.
End Varia.
Notation "A /\ B" := (A * B).
Notation "A <-> B" := ((A -> B) /\ (B -> A)).
(*
Local Variables:
coq-prog-name: "hoqtop"
End:
*)
|
{--
Copyright 2021 Joel Berkeley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--}
||| This module contains general library utilities.
module Util
import Data.Vect
||| All numbers from `0` to `n - 1` inclusive, in increasing order.
|||
||| @n The (exclusive) limit of the range.
export
range : (n : Nat) -> Vect n Nat
range Z = []
range (S n) = snoc (range n) n
|
/-
Copyright (c) 2022 Henrik Böving. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Henrik Böving
-/
namespace Cpdt
namespace Chapter8
inductive Ty where
| nat : Ty
| bool : Ty
| prod : Ty → Ty → Ty
inductive Exp : Ty → Type
| nConst : Nat → Exp .nat
| plus : Exp .nat → Exp .nat → Exp .nat
| eq : Exp .nat → Exp .nat → Exp .bool
| bConst : Bool → Exp .bool
| and : Exp .bool → Exp .bool → Exp .bool
| if_ : Exp .bool → Exp α → Exp α → Exp α
| pair : Exp α → Exp β → Exp (.prod α β)
| fst : Exp (.prod α β) → Exp α
| snd : Exp (.prod α β) → Exp β
@[reducible]
def Ty.denote : Ty → Type
| nat => Nat
| bool => Bool
| prod α β => Prod (denote α) (denote β)
def Exp.denote : Exp α → α.denote
| nConst n => n
| plus l r => Nat.add (denote l) (denote r)
| eq l r => (denote l) == (denote r)
| bConst b => b
| and l r => (denote l) && (denote r)
| if_ d l r => if (denote d) then (denote l) else (denote r)
| pair l r => (denote l, denote r)
| fst p => (denote p).fst
| snd p => (denote p).snd
def Exp.pairOut : Exp (.prod α β) → Option (Exp α × Exp β)
| pair l r => some (l, r)
| _ => none
def Exp.cfold : Exp α → Exp α
| nConst n => nConst n
| plus l r =>
let lfold := cfold l
let rfold := cfold r
match lfold, rfold with
| nConst n, nConst m => nConst (n + m)
| _, _ => plus lfold rfold
| eq l r =>
let lfold := cfold l
let rfold := cfold r
match lfold, rfold with
| nConst n, nConst m => bConst (n == m)
| _, _ => eq lfold rfold
| bConst b => bConst b
| and l r =>
let lfold := cfold l
let rfold := cfold r
match lfold, rfold with
| bConst n, bConst m => bConst (n && m)
| _, _ => and lfold rfold
| if_ d l r =>
let dfold := cfold d
let lfold := cfold l
let rfold := cfold r
match dfold with
| bConst true => lfold
| bConst false => rfold
| _ => if_ dfold lfold rfold
| pair l r => pair (cfold l) (cfold r)
| fst p =>
let pfold := cfold p
match pairOut pfold with
| some p => p.fst
| none => fst pfold
| snd p =>
let pfold := cfold p
match pairOut pfold with
| some p => p.snd
| none => snd pfold
theorem Exp.cfold_correct : denote e = denote (cfold e) := by
induction e with simp[denote, cfold]
| plus l r => split <;> simp_all[denote, cfold]
| eq l r => split <;> simp_all[denote, cfold]
| and l r => split <;> simp_all[denote, cfold]
| if_ d l r dih lih rih =>
rw[dih, lih, rih]
cases cfold d with simp[denote]
| bConst b => cases b <;> simp
| pair l r => simp_all[denote]
| fst p ih =>
rw[ih]
cases cfold p <;> simp[pairOut, denote]
| snd p ih =>
rw[ih]
cases cfold p <;> simp[pairOut, denote]
end Chapter8
end Cpdt
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 06 10:09:58 2014
@author: Patrick
"""
'''Ablauf: Einlesen Train und Test, Filtern der Laenge 2, Zeitverschiebung
mit Kreuzkorrelation, beste Sensorkombination und Regressionsverfahren
ermitteln mit MSE als Optimierungskriterium, Regressor speichern'''
import os
import sys
import fnmatch
import numpy as np
import random
#Fix the import path, so that we can import our parent folder with all required modules
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0,parentdir)
from data_analysis import plotData
from data_analysis.bestRegressor import BestRegressor
from sklearn.externals import joblib
from data_analysis.loadtrain import LoadData
from data_analysis.regressor import Regressor
from mpltools import style
from scipy.stats.stats import pearsonr
from sklearn.metrics import mean_squared_error
import pylab as pl
from matplotlib.pyplot import close
try:
close("all")
except:
pass
def forwinormac():
if os.name=="nt":
workdir= os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(workdir)
if os.name=="posix":
workdir= os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(workdir)
"""Klassen und Arbeitspfad laden"""
forwinormac()
loader=LoadData(sep=",", groundtruth_elements=1,skiprows=1, skipcols=0)
bestRegressor = BestRegressor()
regressor=Regressor()
"""Trainigsdaten laden"""
trainPowerData, trainFeatureData=loader.load_dir('train')
"""Testdaten laden"""
testPowerData, testFeatureData = loader.load_dir('test')
print testPowerData
#print trainPowerData[0:5]
#selection_idx = np.where(testPowerData > 24)
#testPowerData = testPowerData[selection_idx]
#testFeatureData = testFeatureData[selection_idx]
#testPowerData+=26
#trainPowerData+=26
'''
Berechnung des Optimalen Regressionsverfahren und der optimalen Sensorkombination nach MSE
Aufpassen, allSensorCombination bestimmt den besten Regressor mit der errror_func fuer den gesamten
Verlauf, OHNE den fehler für alle XX sekunden zu berechnen und zu mitteln
'''
error_func = mean_squared_error
#resultArray, resultTuple = bestRegressor.allSensorCombinations\
#(trainFeatureData, trainPowerData,testFeatureData,testPowerData, error_func)
resultTuple = bestRegressor.findGoodRegressor\
(trainFeatureData, trainPowerData,testFeatureData,testPowerData, error_func)
predictedPower = bestRegressor.predictResultArrayEntry\
(resultTuple, trainFeatureData,trainPowerData,testFeatureData)
print("============= Predicted ===============")
print predictedPower
print("============= Real ===============")
print testPowerData
regressor.printResult(resultTuple)
bestRegressor.printError(predictedPower, testPowerData)
bestRegressor.calculateMABSForChunks(predictedPower, testPowerData)
#meanError, std, errorInPercent = bestRegressor.calculateMABSForChunks(predictedPower, testPowerData)
|
module plfa.part1.Midterm where
import Relation.Binary.PropositionalEquality as Eq
open Eq using (_≡_; refl; cong; sym)
-- you can add any import definitions that you need
open import Data.Nat using (ℕ; zero; suc; _+_; _*_; _≤_; _>_; z≤n; s≤s; _≤?_)
open import Data.Nat.Properties using (+-assoc; +-suc; *-suc; +-comm; *-distribˡ-+; *-identityʳ)
open import Relation.Nullary using (yes; no)
open import plfa.part1.Induction using (*-distrib-+; *-zero)
-- used for rewrite
simplify : ∀ {A : Set} (x : A) → x ≡ x
simplify x = refl
sum : ℕ → ℕ
sum 0 = 0
sum n@(suc sn) = sum sn + n
-- Problem 1
-- remove the "postulate" and prove this theorem, which is a version of
-- sum n ≡ n * (n + 1) / 2
---postulate
simple : ∀ (n : ℕ) → (sum n) * 2 ≡ (suc n) * n
simple zero = refl
simple (suc n) rewrite *-distrib-+ (sum n) (suc n) 2
| simple n
| simplify n
| *-suc n n
| +-comm n (n * n)
| sym (+-assoc n (n * n) n)
| +-comm n (n * n) | +-assoc (n * n) n n
| sym (+-suc (n * n) (n + n)) | sym (+-assoc n (n * n) (suc (n + n)))
| +-comm n (n * n)
| sym (+-suc (n * n) n)
| +-assoc (n * n) n (suc (n + n))
| sym (+-suc (n * n) (n + suc (n + n)))
| sym (+-suc n (suc (n + n)))
| sym (+-assoc (n * n) n (suc (suc (n + n))))
| *-suc n 1
| *-identityʳ n = refl
-- Problem 2
-- remove the postulate and implement this function, which gives an Natural
-- number approximation of square root
postulate
sqrt : ℕ → ℕ
-- you can run these test cases
-- _ : sqrt 0 ≡ 0
-- _ = refl
-- _ : sqrt 1 ≡ 1
-- _ = refl
-- _ : sqrt 2 ≡ 1
-- _ = refl
-- _ : sqrt 3 ≡ 1
-- _ = refl
-- _ : sqrt 4 ≡ 2
-- _ = refl
-- _ : sqrt 5 ≡ 2
-- _ = refl
-- _ : sqrt 6 ≡ 2
-- _ = refl
-- _ : sqrt 7 ≡ 2
-- _ = refl
-- _ : sqrt 8 ≡ 2
-- _ = refl
-- _ : sqrt 9 ≡ 3
-- _ = refl
-- _ : sqrt 10 ≡ 3
-- _ = refl
-- _ : sqrt 11 ≡ 3
-- _ = refl
-- _ : sqrt 12 ≡ 3
-- _ = refl
-- _ : sqrt 13 ≡ 3
-- _ = refl
-- _ : sqrt 14 ≡ 3
-- _ = refl
-- _ : sqrt 15 ≡ 3
-- _ = refl
-- _ : sqrt 16 ≡ 4
-- _ = refl
-- _ : sqrt 17 ≡ 4
-- _ = refl
-- _ : sqrt 18 ≡ 4
-- _ = refl
-- _ : sqrt 19 ≡ 4
-- _ = refl
-- _ : sqrt 20 ≡ 4
-- _ = refl
-- _ : sqrt 21 ≡ 4
-- _ = refl
-- _ : sqrt 22 ≡ 4
-- _ = refl
-- _ : sqrt 23 ≡ 4
-- _ = refl
-- _ : sqrt 24 ≡ 4
-- _ = refl
-- _ : sqrt 24 ≡ 4
-- _ = refl
-- _ : sqrt 24 ≡ 4
-- _ = refl
-- _ : sqrt 25 ≡ 5
-- _ = refl
-- _ : sqrt 26 ≡ 5
-- _ = refl
-- _ : sqrt 27 ≡ 5
-- _ = refl
|
\chapter{Architecture}\label{ch:arch}
The application is compound by two components:
\begin{enumerate*}[label=]
\item Server;
\item Client.
\end{enumerate*}
\begin{figure}[p]
\centering
\includegraphics{arch}
\caption{Application architecture.}\label{fig:arch}
\end{figure}
\figref{fig:arch} shows the application's architecture.
\input{arch/server}
\input{arch/client}
\input{arch/dependencies}
|
%% BRENTMIN: Brent's minimization method in one dimension
function [xmin,fmin,funccount,varargout] = ...
brentmin(xlow,xupp,Nitmax,tol,f,nout,varargin)
% code taken from
% § 10.2 Parabolic Interpolation and Brent's Method in One Dimension
% Press, Teukolsky, Vetterling & Flannery
% Numerical Recipes in C, Cambridge University Press, 2002
%
% [xmin,fmin,funccout,varargout] = BRENTMIN(xlow,xupp,Nit,tol,f,nout,varargin)
% Given a function f, and given a search interval this routine isolates
% the minimum of fractional precision of about tol using Brent's method.
%
% INPUT
% -----
% xlow,xupp: search interval such that xlow<=xmin<=xupp
% Nitmax: maximum number of function evaluations made by the routine
% tol: fractional precision
% f: [y,varargout{:}] = f(x,varargin{:}) is the function
% nout: no. of outputs of f (in varargout) in addition to the y value
%
% OUTPUT
% ------
% fmin: minimal function value
% xmin: corresponding abscissa-value
% funccount: number of function evaluations made
% varargout: additional outputs of f at optimum
%
% Copyright (c) by Hannes Nickisch 2010-01-10.
if nargin<6, nout = 0; end
varargout = cell(nout,1);
% tolerance is no smaller than machine's floating point precision
tol = max(tol,eps);
% Evaluate endpoints
fa = f(xlow,varargin{:});
fb = f(xupp,varargin{:});
funccount = 2; % number of function evaluations
% Compute the start point
seps = sqrt(eps);
c = 0.5*(3.0 - sqrt(5.0));% golden ratio
a = xlow; b = xupp;
v = a + c*(b-a);
w = v; xf = v;
d = 0.0; e = 0.0;
x = xf; [fx,varargout{:}] = f(x,varargin{:});
funccount = funccount + 1;
fv = fx; fw = fx;
xm = 0.5*(a+b);
tol1 = seps*abs(xf) + tol/3.0;
tol2 = 2.0*tol1;
% Main loop
while ( abs(xf-xm) > (tol2 - 0.5*(b-a)) )
gs = 1;
% Is a parabolic fit possible
if abs(e) > tol1
% Yes, so fit parabola
gs = 0;
r = (xf-w)*(fx-fv);
q = (xf-v)*(fx-fw);
p = (xf-v)*q-(xf-w)*r;
q = 2.0*(q-r);
if q > 0.0, p = -p; end
q = abs(q);
r = e; e = d;
% Is the parabola acceptable
if ( (abs(p)<abs(0.5*q*r)) && (p>q*(a-xf)) && (p<q*(b-xf)) )
% Yes, parabolic interpolation step
d = p/q;
x = xf+d;
% f must not be evaluated too close to ax or bx
if ((x-a) < tol2) || ((b-x) < tol2)
si = sign(xm-xf) + ((xm-xf) == 0);
d = tol1*si;
end
else
% Not acceptable, must do a golden section step
gs=1;
end
end
if gs
% A golden-section step is required
if xf >= xm, e = a-xf; else e = b-xf; end
d = c*e;
end
% The function must not be evaluated too close to xf
si = sign(d) + (d == 0);
x = xf + si * max( abs(d), tol1 );
[fu,varargout{:}] = f(x,varargin{:});
funccount = funccount + 1;
% Update a, b, v, w, x, xm, tol1, tol2
if fu <= fx
if x >= xf, a = xf; else b = xf; end
v = w; fv = fw;
w = xf; fw = fx;
xf = x; fx = fu;
else % fu > fx
if x < xf, a = x; else b = x; end
if ( (fu <= fw) || (w == xf) )
v = w; fv = fw;
w = x; fw = fu;
elseif ( (fu <= fv) || (v == xf) || (v == w) )
v = x; fv = fu;
end
end
xm = 0.5*(a+b);
tol1 = seps*abs(xf) + tol/3.0; tol2 = 2.0*tol1;
if funccount >= Nitmax
% typically we should not get here
% warning(sprintf(['Maximum number of iterations (%d) exceeded:', ...
% 'precision is not guaranteed'],Nitmax))
% fprintf('[%1.3f,%1.3f,%1.3f]\n',xlow,xf,xupp)
break
end
end % while
% check that endpoints are less than the minimum found
if ( (fa < fx) && (fa <= fb) )
xf = xlow; fx = fa;
elseif fb < fx
xf = xupp; fx = fb;
end
fmin = fx;
xmin = xf;
|
\documentclass{homework}
\course{Fluid Mechanics and Operations}{2}
\begin{document}
\section{MSH Problem 2.2}
\textit{\underline{Solution:}}
$dP + g\rho dZ = 0$, \quad $\rho = \displaystyle \frac{PM}{RT}$, \quad $T = 288-0.005Z$
\vspace{1.2ex}
$\Rightarrow dP + \displaystyle \frac{gPM}{R(288-0.005Z)} dZ = 0 \Rightarrow
\int \frac{1}{P} dP = \int -\frac{gM}{R(288-0.005Z)} dZ$
\vspace{1.2ex}
$\Rightarrow \ln \displaystyle \frac{P_b}{P_a} = -\frac{gM}{R} \frac{-1}{0.005} \ln\left\lvert\frac{288-0.005Z}{288}\right\rvert$
\vspace{1.2ex}
$\Rightarrow \ln \displaystyle \frac{1}{2} = \frac{9.8 \cdot 28.8 \cdot 10^{-3}}{8.3145} \frac{1}{0.005} \ln\left\lvert\frac{288-0.005Z}{288}\right\rvert$
\vspace{1.2ex}
$\Rightarrow Z = 5591$m
\section{MSH Problem 2.7}
\textit{\underline{Solution:}}
$r_2 = \displaystyle \frac{0.150}{2} = 0.075$m, \quad $r_B = r_1 = 0.04$m
\vspace{1.2ex}
$V_A = V_B \Rightarrow \pi(r_2^2 - r_i^2) = \pi (r_i^2 - r_B^2)\Rightarrow r_i = 0.060$m
\vspace{1.2ex}
$\rho_B(r_i^2-r_B^2) = \rho_A(r_i^2-r_A^2) \Rightarrow \displaystyle \frac{\rho_B}{\rho_A}(r_i^2 - r_B^2) = r_i^2 - r_A^2$
\begin{displaymath}
\Rightarrow r_A = \sqrt{r_i^2 - \frac{\rho_B}{\rho_A} (r_i^2 - r_B^2)} = \sqrt{0.060^2 - \frac{1020}{1109} (0.060^2 - 0.04^2)} = 0.042 m
\end{displaymath}
\section{}
\textit{\underline{Solution:}}
$P_g = -1.7psi + 62.42\displaystyle \frac{lb}{ft^3} \cdot 1.6 \cdot 1.5 ft \frac{ft^2}{144in^2} = -0.66psi$
\vspace{1.2ex}
$-0.66psi + 62.42 \cdot SG_B \cdot 1.25 \cdot \displaystyle\frac{1}{144} = 0$
\vspace{1.2ex}
$\Rightarrow SG_B=1.22$
\end{document} |
CoInductive stream : Set :=
| cons : nat -> stream -> stream.
CoFixpoint ones : stream := Cons 1 ones.
|
module DataStore
import Data.Vect
infixr 5 .+.
public export
data Schema = SString | SInt | (.+.) Schema Schema
public export
SchemaType : Schema -> Type
SchemaType SString = String
SchemaType SInt = Int
SchemaType (x .+. y) = (SchemaType x, SchemaType y)
export
record DataStore (schema : Schema) where
constructor MkData
size : Nat
items : Vect size (SchemaType schema)
export
empty : DataStore schema
empty = MkData 0 []
export
addToStore : (value : SchemaType schema) -> (store : DataStore schema) -> DataStore schema
addToStore value (MkData _ items) = MkData _ (value :: items)
public export
data StoreView : DataStore schema -> Type where
SNil : StoreView empty
SAdd : (rec : StoreView store) -> StoreView (addToStore value store)
storeViewHelp : (items : Vect size (SchemaType schema)) -> StoreView (MkData size items)
storeViewHelp [] = SNil
storeViewHelp (val :: xs) = SAdd (storeViewHelp xs)
export
storeView : (store : DataStore schema) -> StoreView store
storeView (MkData size items) = storeViewHelp items
|
[STATEMENT]
lemma lnth_ltl: "\<not> lnull xs \<Longrightarrow> lnth (ltl xs) n = lnth xs (Suc n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> lnull xs \<Longrightarrow> lnth (ltl xs) n = lnth xs (Suc n)
[PROOF STEP]
by(auto simp add: not_lnull_conv) |
#include <boost/program_options.hpp>
#include "framework/settings.h"
namespace po = boost::program_options;
namespace game {
void settings_initialize(int argc, char** argv) {
po::options_description additional_options("Additional options");
additional_options.add_options()
("server-url", po::value<std::string>()->default_value("http://svc.warworlds.codeka.com/"), "The URL we use to log in, find other games, and so on. Usually you won't change the default.")
("listen-port", po::value<std::string>()->default_value("9347"), "The port we listen on. You can specify a range with the syntax aaa-bbb")
("auto-login", po::value<std::string>()->default_value(""), "A string used to automatically log on to the server. The value is obfuscated.")
;
po::options_description keybinding_options("Key bindings");
keybinding_options.add_options()
("bind.pause", po::value<std::string>()->default_value("ESC"))
("bind.chat", po::value<std::string>()->default_value("TAB"))
("bind.select", po::value<std::string>()->default_value("Left-Mouse"))
("bind.deselect", po::value<std::string>()->default_value("Right-Mouse"))
("bind.screenshot", po::value<std::string>()->default_value("Ctrl+S"))
;
po::options_description options;
options.add(additional_options).add(keybinding_options);
fw::settings::initialize(options, argc, argv, "default.conf");
}
}
|
Formal statement is: lemma path_connected_complement_countable: fixes S :: "'a::euclidean_space set" assumes "2 \<le> DIM('a)" "countable S" shows "path_connected(- S)" Informal statement is: If $S$ is a countable set in $\mathbb{R}^n$ with $n \geq 2$, then $\mathbb{R}^n \setminus S$ is path-connected. |
[STATEMENT]
lemma alpha_binp_None:
"qOp delta inp binp #= qOp delta' inp' binp' \<Longrightarrow>
(binp i = None) = (binp' i = None)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. qOp delta inp binp #= qOp delta' inp' binp' \<Longrightarrow> (binp i = None) = (binp' i = None)
[PROOF STEP]
by(auto simp add: sameDom_def) |
Formal statement is: lemma finite_bounded_log2: fixes a::complex assumes "a \<noteq> 0" shows "finite {z. norm z \<le> b \<and> exp(a*z) = w}" Informal statement is: If $a \neq 0$, then the set of all complex numbers $z$ such that $|z| \leq b$ and $e^{az} = w$ is finite. |
corollary\<^marker>\<open>tag unimportant\<close> Zero_neq_One[iff]: "0 \<noteq> One" |
This refers to the borrower's debt ratio and is calculated using a borrower's total of monthly payments due on credit obligations divided by the borrower's gross monthly income. It's expressed as a percentage. See also DEBT RATIO.
Escrow set up to take care of the simultaneous purchase of one property and the sale of another property by the same party.
Financial report or statement in tabular form showing personal or corporate assets, liabilities, and equities as of a specified date.
Mortgage that includes level monthly payments that will fully amortize it over a stated term, but provides for a balloon payment due at the end of an earlier specified term.
Remaining balance of a mortgage that must be paid in a lump sum either at the end of the mortgage term or at the end of a specified earlier period. The amount may represent slightly more than a monthly payment or may be substantial. It occurs because the fixed installment did not fully amortize the mortgage, either accidentally or intentionally.
So, it is a large payment due at the end of a loan contract. Equal to the remaining principal balance plus any interest and charges due.
Federal court proceedings to relieve the debts of an individual or business unable to pay its creditors. See also CHAPTER 7 BK and CHAPTER 13 BK.
1/100th of 1%. For example 5 1/2 basis points equal 5.5/100=0.055% or 0.00055.
Suburban section from which a large number of residents commute to work in a n adjoining or nearby metropolitan area.
One in whose favor a trust operates, or in whose behalf the income from a trust estate or trust deed is drawn. Lender on the security of a Note. Also, one who receives funds from a life insurance policy.
Written instructions by a beneficiary under a deed of trust or mortgage stating and demanding the amount necessary for payoff of a lien in full.
Publication issued by the A.M. Best Company, which establishes ratings for hazard insurance carriers by evaluating their assets and liabilities.
Written document that transfers title to personal property.
Written evidence of temporary hazard or title coverage that runs for a limited time and must be replaced by a permanent policy.
Lien on more than one parcel or unit of land, frequently incurred by subdividers or developers who have purchased a single tract of land for the purpose of dividing it into smaller parcels for sale or development. Also known as blanket trust deed.
Also identified as architectural plans, these are provided by a licensed architect, a custom builder or can be provided by a factory-built home supplier. They provide a complete overview of how the project will be constructed, including all necessary architectural and structural issues.
Mortgagor who receives funds in the form of a loan with the obligation of repaying the loan in full with interest, if applicable.
The monthly savings a customer will experience by consolidating/paying off debt with a debt consolidation loan. Monthly savings equals current monthly payments less new monthly payments.
Violation of any legal obligation.
In residential or commercial property, the figure at which occupancy income is equal to all required expenses and debt service.
Loan spanning the gap between the termination of one (generally short-term) loan and the start of another (generally permanent long-term) loan. Also known as gap financing.
Form of second trust that is collateralized by the borrower's present home (which is usually for sale) in a manner that allows the proceeds to be used for closing on a new house before the present home is sold.
Fire and extended coverage insurance for a building under construction. Coverage increases automatically as the building progresses and terminates at completion.
Regulations that control design, construction, and materials used in construction.
Written permission by a local government for the construction of a new building or for making improvements.
Cabinets, ranges, and ovens, or similar features that are part of the structure.
Customers who own their own business. Another word for self-employed.
A license that authorizes a business to operate and is typically required and issued by the city in which the business is located.
Compensation to a business owner or operator for income lost when the business is closed due to a fire or other insured hazard.
Account in which funds are held so that they can be applied as part of the monthly mortgage payment as each payment comes due during the period that an interest rate buydown plan is in effect. |
\documentclass[11pt, oneside]{article}
\usepackage{geometry}
\geometry{letterpaper}
\usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
\usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amsmath}
\usepackage{bm}
\usepackage{ragged2e}
\usepackage{tabu}
\theoremstyle{definition}
\newtheorem{definition}{Definition}[section]
\title{Econometric Analysis ECMT2160 Exam notes}
\author{Charles Christopher Hyland}
\date{Semester 2 2017}
\begin{document}
\pagenumbering{gobble}
\maketitle
\begin{abstract}
Welcome! Hopefully these notes help you to ace ECMT2160!
\end{abstract}
\newpage
\tableofcontents
\newpage
\pagenumbering{arabic}
\section{Time Series Data}
\subsection{Introduction}
Time series are data collected at fixed intervals and stored chronologically. Therefore, unlike cross-sectional data, it's very important the way in which the data is ordered. From this, we can think of time series data as a \textbf{realisation of random variables indexed by time}.
\begin{center}
\{$Y_t$ \: t = 1,..,T\} = \{$Y_1,..,Y_T$\}
\end{center}
We can refer to this sequence as a \textbf{stochastic process}. Intuitively, we can ever only observe the one realisation of a random variable for time series data since we only live once \#YOLO.
Time series data can incorporate trends over years whilst time series at monthly/quarterly frequencies can contain seasonality in the data.\\
\subsection{Gauss-Markov Assumptions}
OLS on time series can be unbiased but we need quite strict assumptions for this to work. We can use alot of assumptions that were similar for cross sectional OLS.
\subsubsection{Assumption 1: Linearity}
For a time series process: \{$y_t : t = 1,..,T$\}
\begin{center}
$y_t$ = \textbf{$\beta'x_t$} + $\epsilon_t$
\end{center}
where $x_t$ = ($x_{t1}$,$x_{t2}$,...,$x_{tk}$)' and $\beta$ is the associated set of parameters. We assume that $y_t$ is a linear combination of $\beta$ terms and an error term.
\subsubsection{Assumption 2: No Perfect Collinearity}
Each $x_tj$ varies somewhat over time and no explanatory variable is an \textbf{exact linear function of the others}. This rules out \textbf{perfect correlation} between predictors. If we had multicollinearity though, it does not violate the assumption but does cause the variance of the estimators to be high and affect standard inferences.
\subsubsection{Assumption 3: Strict Exogeneity}
The conditional expectation of the error term is zero:
E($\epsilon_t|X$) = 0 $\forall$ t
Where X = ($x_1$,...,$x_T$)' and is a matrix of explanatory variables. Anything that causes the results in time t to be correlated with any explanatory variables at any time period is a violation of strict exogeneity assumption. We have the case that there is no correlation or relationship between $\epsilon_t$ and the explanatory variables for all of time.
We can also have a less restrictive form of the assumption of \textbf{weak exogeneity}.
$E(\epsilon_t|x_t,x_{t-1},x_{t-2},...)$ = 0
This means that conditional expectation of error term is not zero for all time periods. This is the case of there is no relationship from time t and prior to that. We can extend this to \textbf{contemporaneous exogeneity} of $E(\epsilon_t$$|$$x_t$) = 0. This imples a lack of correlation between the explanatory variables and the error term for within that time period only. This contemporaneous exogeneity is sufficient for consistency but not for unbiasedness of OLS.
Under the assumptions of \textit{linearity, no perfect collinearity,} and \textit{strict exogeneity}, the OLS estimator is unbiased.
\begin{equation}
E(\hat{\beta}) = \beta
\end{equation}
We get unbiasedness without restricting the correlation across time in the explanatory vavriables. Therefore, the $x_t$ terms are allowed to be correlated with each other across times. Furthermore, the error terms $\epsilon_t$ are also allowed to be correlated across time. We don't know how precise these estimators are though and don't know anything regarding certainty. Therefore, we need other assumptions to be satisifed in order for us to test hypothesis and construct confidence intervals so that we can derive the estimator's distribution.
With \textbf{contemporaneous exogeneity}, this is sufficient for OLS estimators to be \textbf{consistent}.
\subsubsection{Assumption 4: Homoskedasticity}
The conditional variance of the error term is 0.
\begin{equation}
Var(\epsilon_t|X) = \sigma^2 \quad t = 1,...,T
\end{equation}
Here, we are saying that error terms have constant variance for all of time (strict form).
\subsubsection{Assumption 5: No autocorrelation}
The error terms are uncorrelated over time now.
\begin{equation}
Cov(\epsilon_t,\epsilon_s |X) = 0 \quad \forall t \neq s
\end{equation}
We are saying here that there is no covariance between error terms across all time periods.
With assumption 4 and 5, we can now derive the \textbf{spherical disturbances} assumption whereby
\begin{equation}
E(\epsilon\epsilon'|X) = \sigma^2I
\end{equation}
where I is the T-dimensional identity matrix. Its size is the number of time periods we are looking at. Since it is an identity matrix, that means we don't have any covariance between error terms across time but we do have variance terms for the error terms.
Note that we have 3 different kinds of correlations that may differ across time series regression and need to consider.
1) Correlation between $x_{tj}$ and $x_{sj}$. This is the correlation between a predictor and itself across time. This is not really an issue, unless there is a perfect linear relationship.
2) Correlation between $x_{tj}$ and $\epsilon_s$. This violates the strict exogeneity assumption and this leads to OLS being biased as a result. However, it doesn't affect weak exogeneity assumption if the error term is in a different period.
3) Correlation between $\epsilon_t$ and $\epsilon_s$. This violates no autocorrelation assumption and only affects the \textbf{efficiency} of the OLS estimator.
We can have strict exogeneity satisfied but no autocorrelation is violated when ONLY the third (and even first) assumption is not satisfied. We can have a predictor uncorrelated with error terms but we may have error terms correlated with each other.
We can measure the level of \textbf{autocorrelation} between 2 time periods by:
\begin{center}
$\rho = \frac{\sigma_{x_{t}}x_{s}}{\sigma_{x_t}\sigma_{x_s}}$
t $\neq$ s
\end{center}
We look at the covariance between time t and s, divided by the standard deviation of t and s respectively. From this, letting s come before t in time, we can assume that s = t -k, where k = 1,2,...
We can rewrite the equation to get:
\begin{center}
$\rho_k = \frac{\sigma_{x_{t}x_{t-k}}}{\sigma_{x_t}\sigma_{x_{t-k}}}$
t $\neq$ s
\end{center}
We do this because instead of comparing 2 distinct time periods t and s, we can rexpress s as the difference between t and s. So if t =5 and s =2, we can express s as 5-3 (t-k) where k =3. So now we are looking at time period t, and the $k^{th}$ difference.
However, a thing to note is that $\sigma_{x_t}$ and $\sigma_{x_{t-k}}$ is that they both come from the same stochastic process. From the assumption of \textbf{stationarity} whereby the variance is constant throughout the time series, we have
\begin{center}
$\sigma_{x_t} = \sigma_{x_{t-k}} \equiv \sigma_0$
$\forall$ t
$\sigma_{x_{t}x_{t-k}} = \sigma_{x_{s}x_{s-k}} \equiv \sigma_k$
$\forall$ t,s
\end{center}
Here, the variance is the same for all time and that the covariance is also the same for all time periods too.
We can sub in $\sigma_k$ for the covariance and $\sigma_0$ for the variance in our autocorrelation formula of $\rho = \frac{\sigma_{x_{t}x_{t-k}}}{\sigma_{x_{t}}\sigma_{x_{t-k}}}$.
From this, the autocorrelation of a stationary time series is given from:
\begin{center}
$\rho_k = \frac{\sigma_k}{\sigma_0^2}$
\text{where } $\sigma_k = E[(x_t - \bar{x})(x_{t-k} - \bar{x})]$
and $\quad \sigma_0^2 = E[(x_t - \bar{x})^2]$
\end{center}
Note that we only have 1 $\bar{x}$ since the mean is the same due to stationary data.
\textbf{Gauss-Markov Theorem}: Under assumption 1 to 5, OLS estimators are the best linear unbiased estimators BLUE. Recall that we need \textit{strict exogeneity} for assumption 3 for this to hold.
\subsubsection{Assumption 6: Normality}
$\epsilon_t$ is iid as normal random variables with zero mean and $\sigma^2$ variance.
\begin{center}
$\epsilon_t$ $\sim$ N(0,$\sigma^2$)
t = 1,2,...,T
\end{center}
The \textit{Gauss Markov Assumptions and normality assumption} gives us the \textbf{Classical Linear Model assumptions} (CLM) for time series. We need this so that we can perform exact inferences.
\subsection{Classical Linear Model Assumptions}
From assumptions 1-6, we can now carry out statistical inference procedures for time series data. t statistics are $\sim$ $t_{T-k-1}$ distributions under the null, where k is the number of exogenous variables in our regression. Furthermore, confidence intervals have the specified confidence levels and F-statistics have exact F-distributions.
From this, we have a set of assumptions (the most important being strict exogeneity) under which OLS is unbiased.
When spherical disturbances assumption holds, OLS is BLUE and usual OLS variance formulas apply. However, serial correlation tends to be a problem even when strict exogeneity holds. Furthermore, we can add normality and this leads to exact inference. However, this tends to be highly unrealistic for many datasets.
\newpage
\section{Static and Distributed Lag Models}
First, we define a stationary process is where probability distributions are stable over time. More formally, the joint distirbution of random variables from any set of time periods remain unchanged. So the joint distribution of $\{x_{t_1},x_{t_2},...\}$ is the same as $\{x_{t_1+h},x_{t_2+h},...\}$ for h $\geq$ 1 and therefore the series is \textbf{identically distributed}. We know that seasonal and trending data is already non-stationary from this. The idea of stationarity is different to the idea of \textbf{weakly dependent} as this idea states that the correlation between $corr(x_t, x_{t+h}) \rightarrow 0$ as $h \rightarrow \infty$. This means that the process is asymptotically uncorrelated. With this, we can also define the idea of \textbf{covariance stationarity} which requires 3 conditions:
1) E($x_t$) = c, whereby c is a constant.
2) var($x_t$) = k, whereby k is a constant.
3) cov($x_t, x_{t+h}$) = $\rho_h$ whereby the covariance only depends on h, and not t.
Note that stationarity is a \textbf{property of a process, not time series themselves}. These processes generate time series.
Recall there are 3 assumptions we want with time series data: linearity in parameters, exogeneity, and uncorrelated error terms.
\subsection{Static Models}
A static model is only for the current time period and relates two or more time series with each other.
$$
y_t = \alpha_t = \beta_1 x_{1,t} + ... + \beta_k x_{k,t} + \epsilon_t
$$
whereby $\beta_j$, j=1,...,k looks at only the \textbf{contemporaneous} relationship between $\{x_j\}$ and \{y\}. Furthermore, $\epsilon_t$ $\sim$ iid(0,$\sigma^2$). $\epsilon_t$ is referred to as \textbf{white noise}. In order words, white noise are where observations aren't autocorrelated with each other, homoskedastic, and indexed by time.
Recall that serial correlation is the correlation of a variable with itself over time. Serial correlation for the error terms does not affect the bias or consistencies of least squares. If $E(\epsilon_t | X)$ = 0, then explanatory variables are strictly exogeneous leading to unbiased OLS. If instead, $E(\epsilon_t$ $|$ $x_t)$ = 0, then explanatory variables are only \textit{contemporaneously exogenous} and therefore OLS is consistent provided that the time series are \textit{weakly dependent}. Weakly dependence refers to the fact that a $x_t$ and $x_{t+h}$ are almost independent as h $\rightarrow \infty$. This means as the variabels get further apart in time, the correlation between the variables become smaller and smaller.
However, autocorrelated errors (serial correlation) means that we have issues with statistical inference, even in the case of large samples. Furthermore, measures such as $R^2$ and $\bar{R^2}$ are invalidated. These goodness of fit measures are useless if the serial correlation is a result of a spurious regression if series \{y\} and (some of the) \{x\} have \textit{unit roots}. Just briefly explaining, unit roots means that the effects of any shocks do not disappear over time (which is the opposite of weakly dependent). However, if the data is \textit{weakly dependent} (if data is weakly dependent, then we don't have an unit root since unit roots are highly persistent/strong dependent), then these measures are now reliable!
To elaborate more on \textit{unit roots}, we can think of them as a \textbf{stochastic trend} in time series. If we had a time series of:
$$
y_t = c + \alpha_1y_{t-1} + \epsilon_{t-1}
$$
the coefficient $\alpha_1$ is a root. We expect this process to always converge back to the value of c when $\alpha < 1$. If we set c = 0 and $\alpha$ = 0.5, if $y_{t-1}$ was 100, then today it's 50, tomororow 25, and so on until it gets to 0. Here, we can see that this series will converge back to c. However, if we had a root that is a \textbf{unit}, or in other words, when $\alpha = 1$, we see that the series will never converge back to c. From this, we can see that the time series will never recover back to its expected value and therefore the process is very susceptible to shocks and hard to predict. 3 ways for autocorrelation to occur:
1) Ommited variable bias.
2) Functional misspecfication
3) Measurement error in the independent error
\subsection{Testing for Serial Correlation}
We can test for serial correlation! We specify simple alternative models that allow the errors to be serially correlated and use the model to test the null that the errors are not serially correlated. From this, we can derive the first-order autocorrelation:
$$
\epsilon_t = \rho \epsilon_{t-1} + v_t
$$
whereby $v_t \sim $ iid(0,$\sigma_v^2)$. $v_t$ is a white noise process so that if $\rho$ = 0, then that means the error term $\epsilon_t$ is just iid. The error term is a function of its own lag. We don't include an intercept due to zero conditional mean. We can think of this as seeing whether does the previous error term have a relationship/effect on the current error term. If it does ($\rho \neq 0$), then there is autocorrelation as the error terms are related (correlated). The null hypothesis of this is that there is no serial correlation such that $H_0$: $\rho$ = 0. Often though, $\rho >$ 0, when there is serial correlation but we still use a two-sided alternative. However in practice, we can't actually observe error terms $\epsilon_t$ so we instead use the OLS residual $\hat{\epsilon_t}$. From this, if the explanatory variables are strictly exogenous, we can use a simple t-test. Furthermore, we can actually use the t-test as long as $E(\epsilon_t | x_t, x_{t+1})$ = 0 or that the error in a given time period is uncorrelated with regressors contemporaneously and in the next time period. So here, if we are using just a single lagged residual, we only lose one observations since if we had 100 observations, we can only take 99 lag variables and therefore we can't test it on one observation so we drop that observation.
Steps to test for serial correlation are (under strict exogeneity):
\textbf{Step 1.} Set up a time series model and run the regression:
$$
y_t = \alpha + \beta_1 x_{1,t} + ... + \beta_k x_{k,t} + \epsilon_t
$$
whereby t=1,...,T
\textbf{Step 2.} Using the residuals from step 1, run the regression
$$
\hat{\epsilon_t} = \rho \hat{\epsilon_{t-1}} + v_t
$$
whereby t=2,...,T. Doesn't matter if we include an intercept or not! They are asymptotically equivalent. $v_t$ is a white noise and iid. If we don't reject $\rho = 0$, then $\hat{\epsilon}_t = v_t$ so therefore $\hat{\epsilon}_t$ are a white noise process.
\textbf{Step 3.} Compute the \textbf{t-statistic} for $\hat{\rho}$ and test whether $H_0$: $\rho$ = 0. Therefore if we reject, there is autocorrelation with the error terms and we cannot do inferences.
The test mentioned has large-sample justifications and tends to work well. Standard errors are wider if small time period and therefore we might not reject even if $\hat{\rho}$ is "large" since there is alot of room for error. Furthermore, standard errors are smaller if large time period so much more likely to reject $\hat{\rho}$ even if its small. This is due to the fact we have not considered statistical vs practical significance. It may be the case that for a large sample size, we just happened to have found some correlation (and therefore since sample size is large, it's easy to reject things). Note that we must assume homoskedasticity for this test, if heteroskedasticity, then we use robust t-statistics (these are smaller standard errors so it is harder to reject).
From this, we can check for even higher-order autocorrelation.
\textbf{Step 1.} Set up a time series model and run the regression:
$$
y_t = \alpha + \beta_1 x_{1,t} + ... + \beta_k x_{k,t} + \epsilon_t
$$
whereby t=1,...,T
\textbf{Step 2.} Using the residuals from step 1, run the regression
$$
\hat{\epsilon_t} = \rho_1 \hat{\epsilon_{t-1}} + \rho_2 \hat{\epsilon_{t-2}} + ... + \rho_q \hat{\epsilon_{t-q}} + v_t
$$
whereby t = q+1,...,T. Doesn't matter if we include an intercept or not! They are asymptotically equivalent.
\textbf{Step 3.} We then can use a \textbf{F-statistic} to test the joint hypothesis that $H_0$: $\rho_1$ = ... = $\rho_q$ = 0 in the usual way. Therefore, we can test multiple residuals. If we don't reject null, then $\hat{\epsilon}_t = v_t$ so the residuals are a white noise process.
We now consider the case whereby the regressors are no longer strictly exogenous.
\subsubsection{Breusche-Godfrey Test for serial correlation}
In the scenario that regressors are not strictly exogenous, we can no longer run the previous test for serial correlation. Instead, we now need to consider these endogenous regressors when testing for serial correlation, which then leads us to the \textbf{Breusche-Godfrey test}. Our estimates of $\rho$ in the auxillary regression are going to be biased and now we need to correct it. What we can do is to include all of the explanatory variabels from step 1 to the residual autocorrelation regression in step 2 and then run the F-statistic again. \textbf{Note we include the intercept $\gamma_0$}.
We are attempting to model:
$$
\hat{\epsilon_t} = \rho_1 \hat{\epsilon_{t-1}} + \rho_2 \hat{\epsilon_{t-2}} + ... + \rho_q \hat{\epsilon_{t-q}} + \gamma_1 x_{1,t} + ... \gamma_k x_{k,t} + \gamma_0
$$
We run a F-statistic for joint hypothesis of $H_0: \rho_1=...=\rho_q = 0$. This is known as the \textit{Breusch-Godfrey test for $q^{th}$ order residual autocorrelation}. We have corrected for endogenous variables since if predictors are related to error terms, by including them into the equation for the test, we now consider the effects of the regressors as well. We can also include any number of lagged dependent variables as well to regress on. We can also use robust t-statistics in case of heteroskedasticity. In practice, we don't know how large q is so we normally just try random stuff. We can also use the \textbf{Lagrange multipler} to test for it, where LM is:
$$
LM = (n-q)R_u^2
$$
where n is sample size, q is the order residual autocorrelation we are testing for, and the $R_u^2$ is the R-squared from regressing the residual on lagged residuals and regressors. Here, $LM \sim \chi_q^2$ distribution and recall that is a upper one sided test (we can also correct for heteroskedasticity with this).
An additional note is that if we want to check for seasonal forms of serial correlation, then for quarterly seasonality, we check for:
$$
\hat{\epsilon_t} = \rho_1\hat{\epsilon}_{t-4}
$$
and then do a t-test on $\rho_1 = 0$ or not. If it was yearly, then check on t-12.
\subsection{Effects of serial correlation}
Recall that OLS parameter is:
$$
\hat{\beta}_1 = \beta_1 + \frac{\sum\limits_{t=1}^nx_i\epsilon_i}{SST_x}
$$
whereby $d_i = x_i$ since $\bar{x} = 0$ (normally it is $d_i = x_i - \bar{x}$). We simply let $d_i = x_i$ for this question then. Recall that variance of OLS estimator is then:
$$
Var(\hat{\beta}_1) = Var(\frac{\sum\limits_{t=1}^nx_i\epsilon_i}{SST_x})
$$
$$
= \frac{1}{SST_x^2}Var(\sum\limits_{t=1}^nx_i\epsilon_i)
$$
since $SST_x$ is a constant. Now we ignore the fraction in the front (because I'm lazy). Recall we are conditioning on \textbf{X} so $x_i$ is a constant.
$$
= Var(\sum\limits_{t=1}^{n}x_t\epsilon_t|\bm{X})
$$
$$
= \sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}x_tx_{t+j}Cov(\epsilon_t,\epsilon_{t+j})
$$
$$
= \sum\limits_{t=1}^{n-1}x_t^2Var(\epsilon_t) + 2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}x_tx_{t+j}Cov(\epsilon_t,\epsilon_{t+j})
$$
Recall that $Cov(\epsilon_t,\epsilon_{t+j}) = E(\epsilon_t,\epsilon_{t+j})$.
$$
= \sum\limits_{t=1}^{n-1}x_t^2Var(\epsilon_t) + 2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}x_tx_{t+j}E(\epsilon_t,\epsilon_{t+j})
$$
Since $\epsilon_t$ is a AR(1) serial correlation, it has the form of a random walk. Recall that for a random walk, $E(\epsilon_t,\epsilon_{t+j}) = \rho^j\sigma^2$.
$$
= \sum\limits_{t=1}^{n-1}x_t^2Var(\epsilon_t) + 2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}x_tx_{t+j}\rho^j\sigma^2
$$
$$
= \sum\limits_{t=1}^{n-1}x_t^2Var(\epsilon_t) + 2\sigma^2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}x_tx_{t+j}\rho^j
$$
since $\sigma^2$ is a constant. Now recall that $SST_x = \sum\limits_{t=1}^{n}x_t^2$.
$$
= SST_xVar(\sum\limits_{t=1}^{n}\epsilon_t) + 2\sigma^2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}\rho^jx_tx_{t+j}
$$
Now we add in our earlier fraction in the front $\frac{1}{SST_x^2}$
$$
= \frac{SST_xVar(\epsilon_t)}{SST_x^2} + \frac{2\sigma^2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}\rho^jx_tx_{t+j}}{SST_x^2}
$$
$$
= \frac{Var(\epsilon_t)}{SST_x} + \frac{2\sigma^2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}\rho^jx_tx_{t+j}}{SST_x^2}
$$
$$
Var(\hat{\beta}_1)= \frac{\sigma^2}{SST_x} + \frac{2\sigma^2\sum\limits_{t=1}^{n-1}\sum\limits_{j=1}^{n-t}\rho^jx_tx_{t+j}}{SST_x^2}
$$
where $var(\epsilon_t = \sigma^2$ since it is still homoskedastic. The first term is the usual variance of an $\hat{\beta}_1$. The second term is the bias. Note that if $\rho>1$, then the variance of $\hat{\beta}_1$ for an OLS estimate will be underestimating the true variance since it does not take into account of the second term. From this, usual OLS when assuming no serial correlation (but there actually is), will cause the standard errors to be smaller than what it actually should be, leading to test statistics that are too big and then committing too many type 1 errors (falsely rejecting null hypothesis).
\subsection{Correcting Serial Correlation}
Let us suppose we know from the \textit{Breusch-Godfrey test} that our model suffers from serial correlation. We know that the efficiency of OLS estimators is affected. Our standard errors will be off meaning that we can no longer carry out inferences. We also know that goodness of fit is off if the data is not weakly dependent either. Terrible news! From that, we can correct the serial correlation.
Assume we have a linear regression with serially correlated errors and strict exogeneity assumption holds:
$$
a) \quad y_t = \beta_0 + \beta_1 x_t + \epsilon_t
$$
Whereby t=1,...,T and also that:
$$
\epsilon_t = \rho \epsilon_{t-1} + v_t
$$
such that $v_t \sim$ iid(0,$\sigma_v^2$). Here, both $\epsilon_t$ and $v_t$ (our white noise) are assumed to be independent error processes. We can say that $\epsilon_t$ is an AR(1) process but we assume that $|\rho| < 1$ or else some bad stuff is gonna happen since it'll be unstable. We can do some magical algebraic manipulation. Let us then look at t-1:
$$
y_{t-1} = \beta_0 + \beta_1 x_{t-1} + \epsilon_{t-1}
$$
We can multiply though by $\rho$
$$
b) \quad \rho y_{t-1} = \rho \beta_0 + \rho \beta_1 x_{t-1} + \rho \epsilon_{t-1}
$$
$$
a) \quad y_t = \beta_0 + \beta_1 x_t + \epsilon_t
$$
We can now subtract b from a.
$$
y_t - \rho y_{t-1} = (\beta_0 - \rho \beta_0) + (\beta_1 x_t - \rho \beta_1 x_{t-1}) + (\epsilon_t - \rho \epsilon_{t-1})
$$
$$
\tilde{y_1} = (1 - \rho)\beta_0 + \beta_1\tilde{x_t} + \epsilon - \rho\epsilon_t
$$
Since we know that $v_t = \epsilon_t - \rho\epsilon_t$
$$
\tilde{y_t} = \tilde{\beta_0} + \beta_1 \tilde{x_t} + v_t
$$
whereby t=2,...,T. We have taken the \textit{quasi-difference} of the variables. We let that: $\tilde{y_t} = y_t - \rho y_{t-1}$, $\tilde{x_t} = x_t - \rho x_{t-1}$ and $\tilde{\beta_0} = (1 - \rho)\beta_0$. Furthermore, recall $\epsilon_t - \rho \epsilon_{t-1}$ = $v_t$ for a white noise process. Now we have corrected for serially correlated errors and now have a white noise process!!! Inference is all good now. In fact, we satisfy all \textit{Gauss-Markov assumptions}. Note that if $\rho = 1$, we have then just differenced it (not quasi), but we assumed that $|\rho| < 1$. However, note that this only works for the observations from 2,...,t, since we cannot difference the first period. Therefore, for estimators to be BLUE, we need to specify that the first time period is simply:
$$
y_1 = \beta_0 + \beta_1x_1 + \epsilon_1
$$
and all the $v_t$'s are uncorrelated with $\epsilon_1$ so our issue is fine now and we achieved BLUE estimators.
\subsubsection{Cochrane-Orcutt Correcting Serial Correlation (Feasible GLS)}
We can use the \textit{Cochrane-Orcutt} procedure to estimate the model parameters.
\begin{center}
1) Regress $y_t$ on $x_t$ and obtain the residuals $\hat{\epsilon}$
$$
y_t = \beta_0 + \beta_1 x_t + \epsilon_t \quad t=1,...,T
$$
2) Regress $\hat{\epsilon_t}$ on $\hat{\epsilon_{t-1}}$ and obtain $\hat{\rho}$
$$
\hat{\epsilon_t} = \rho \hat{\epsilon_{t-1}} + v_t
$$
3) From quasi-differenced variables (whereby we multiply the lag $y_{t-1}$ by $\rho$ and then difference $y_t$ and $y_{t-1}\rho$), we regress $\tilde{y_t}$ on $\tilde{x_t}$ to obtain parameter estimates
$$
\tilde{y_t} = \tilde{\beta_0} + \beta_1 \tilde{x_t} + \tilde{\epsilon_t}
$$
\end{center}
From this, the usual standard errors, t and F statistics are asymptotically valid. An issue is that using $\hat{\rho}$ instead of $\rho$ causes estimators to be biased but it does lead to more efficiency asymptotically (assuming weakly dependent time series) and consistent. Even if $\tilde{\epsilon_t}$ is not normal, we stil have approximately t and F-statistics. Usually, the Cochrane-Orcutt procedure is iterative whereby we repeat steps 2 and 3, to derive new parameter estimates until there is no change in the estimate of $\rho$ from successive iterations. To run this command in stata, we would go: $\textit{prais dependentvar independentvar, corc}$. Additionally, we can regress y on x and save residuals. Then regress residuals on their lags and save $\hat{\rho}_1$. From this, we go $y - \rho y_{t-1}$ and $x - \rho x_{t-1}$ to generate our new variables. Note that if we included our first observation $y_1 = \beta_0 + \beta_1x_1 + \epsilon_1$, this is known as the \textbf{Prais-Winsten estimation}. Either method should not give us results that differ greatly. More importantly, we may notice that these the Cochrane-Orcutt method may result in coefficents that do not differ too much from OLS (\textit{if the variables are I(0)} BUT the standard errors can be significantly higher to account for the serial correlation (and therefore some variable may now not be statistically significant and also less economically important). \textbf{Note that we cannot compare the $R^2$ of these models with the OLS models since the dependent and independent variables are now different}.
On a final note, we note that sometimes OLS and Cochrane-Orcutt/Prais-Winsten coefficients can vary significantly. This could be because the variables are not related in levels (without differencing) but they are after we quasi-difference them. This leads to an issue since if we are trying to look at static relationships between levels of variables, but they are not I(0), then OLS won't produce consistent estimations. This can arise if the variables have unit roots and doing FGLS (Cochrane-Orcutt) will eliminate these unit roots.
\subsection{Finite Distributed Lag (FDL) Models}
Instead of looking at things in a contemporaneous measure, we can now add in lag effects to see how things in the past affect today and the future! Let's create a variable Z that we think can affect y for up to 2 periods in the future. From this, we derive the \textit{finite distributed lag model} as:
$$
y_t = \alpha_0 + \delta_0 z_t + \delta_1 z_{t-1} + \delta_2 z_{t-2} + \epsilon_t
$$
which includes two lags. For example, we could model:
$$
umbrellassold_t = \alpha_0 + \delta_0 rain_t + \delta_1 rain_{t-1} + \delta_2 rain_{t-2} + \epsilon_t
$$
which tells us that umbrellas sold today is affected by amount of rain today, yesterday, and 2 days ago (since if it's been raining for past few days, people will start giving in and buying their own umbrellas instead of stealing it from poor souls at Fisher library).
We can generalise this by if we want a FDL of order q:
$$
y_t = \alpha_0 + \delta_0 z_t + \delta_1 z_{t-1} + ... + \delta_q z_{t-q} + \epsilon_t
$$
We specify q on what we believe (or statistical test) is a good value of lags to include. $\delta_0$ is the \textbf{impact propensity} which tells us the \textit{immediate change} in y when z increases by 1 unit. So if $\delta_t = 0.5$, then a 1 unit increase in z will increase y by 0.5 units ceterus paribus. As an example, suppose we had the FDL(2) model (with no error terms to keep it simple):
$$
y_{t} = \alpha_0 + \delta_0z_t + \delta_1z_{t-1} + \delta_2z_{t-2}
$$
Mathematically, imagine we had that t=t-1, $z_t$ = 20 units, and then at t=t, we increase it by 1 unit to 21 and then at t=t+1, it drops back down to 20.
$$
y_{t-1} = \alpha_0 + \delta_0 (20) + \delta_1 (20) + \delta_2(20)
$$
$$
y_t = \alpha_0 + \delta_0(20+1) + \delta_1(20) + \delta_2(20)
$$
$$
y_{t+1} = \alpha_0 + \delta_0(20) + \delta_1(20+1) + \delta_2(20)
$$
$$
y_{t+2} = \alpha_0 + \delta_0(20) + \delta_1(20) + \delta_2(20+1)
$$
$$
y_{t+3} = \alpha_0 + \delta_0(20) + \delta_1(20) + \delta_2(20)
$$
Notice that time is increasing by 1 period every equation. We see that the effect of the 1 unit increase propagates throughout the model. So if we take the difference between the first 2 equations (or at time t vs t-1):
$$
= \delta_0(1)
$$
and we can see that at time t, this is the immediate impact. Furthermore, at time t+1, we difference the first and 3rd equation to get:
$$
= \delta_1(1)
$$
which tells us the affect of $z_{t-1}$ on $y_t$ or the change in y \textit{one period} after the \textit{temporary} change in $z_t$. Furthermore, if both z and y are in logarithmic form, the impact propensity is also known as the \textbf{short run (instantaneous) elasticity}. This is similar to our static model we had earlier. If we believe there to be no impact propensity, we can just drop $z_t$ from the regression. (or if it is genuinely 0, then $\delta_0$ = 0).
Using the same example as before, we now have that if we increase $z_t$ by 1 unit \textbf{permanently}, we have that:
$$
y_{t-1} = \alpha_0 + \delta_0 (z_t) + \delta_1 (z_{t-1}) + \delta_2(z_{t-2})
$$
$$
y_{t} = \alpha_0 + \delta_0 (z_t+1) + \delta_1 (z_{t-1}) + \delta_2(z_{t-2})
$$
$$
y_{t+1} = \alpha_0 + \delta_0 (z_t+1) + \delta_1 (z_{t-1}+1) + \delta_2(z_{t-2})
$$
$$
y_{t+2} = \alpha_0 + \delta_0 (z_t+1) + \delta_1 (z_{t-1}+1) + \delta_2(z_{t-2}+1)
$$
and if we take $t+i \quad i \rightarrow \infty$ or into the future, the effect of a permanent increase in $z_t$ is the sum of all the $\delta$. The summation of all $\delta$ values is known as the \textbf{long run propensity} s.t LRP = $\sum_{t=0}^{t=q}\delta_t$. The LRP tells us that if we permanently change z at a given time, the LRP is the ceteris paraibus change in y after the change in z has passed through all q time periods. So if minimum wage rose by 1 dollar an hour, then substitute the value of 1 for $z_{t}$ and sum the coefficients. If y and z are both in natural logarithms, then the LRP is known as the \textbf{long run elasticity}. Note we can also include dummy variables to account for certain events.
Furthermore, note that since $z_t$ is serially correlated with itself through time (which is fine and should be expected!), it will be difficult in getting precise estimates of each $\delta$. From this, even if we run a F-test on $\delta_0 =...=\delta_q =0$ and find that it is statistically significant, we still don't know which lag period is significant with $y_t$ (they may be jointly significant but individually insignificant). If $\delta_1 = \delta_2=...=\delta_q = 0$, then it will suggest to us to use a static model since lags are insignificant. \textit{Note that LRP can be used for a single lagged regressor in the case of multiple regressors}. If we had:
$$
y_t = \alpha_0 + \delta_0z_t + \delta_1z_{t-1} + \delta_2z_{t-2}
$$
and we found LRP = $\delta_0 + \delta_1 + \delta_2 = \lambda$, we can then compute a significance test on the LRP and its confidence intervals. We let: $\theta = \delta_0 + \delta_1 + \delta_2$ and re-express as: $\delta_0 = \theta - \delta_1 - \delta_2$. Sub into the above equation to get:
$$
y_t = \alpha_0 + (\delta_1 + \delta_2 - \theta)z_t + \delta_1z_{t-1} + \delta_2z_{t-2}
$$
$$
y_t = \alpha_0 + \theta z_t + \delta_1z_{t-1} - \delta_1z_t + \delta_2z_{t-2} - \delta_2z_t
$$
$$
y_t = \alpha_0 + \theta z_t + \delta_1(z_{t-1} - z_t) + \delta_2(z_{t-2} - z_{t})
$$
Regress this final equation to get $\hat{\theta}$ which equals our LRP. Now though, we have a standard error from this regression which we can then use to run a significance test on the long run propensity and compute confidence intervals.
\subsection{Infinite Distributed Lag (IDL) Models}
IDL models are FDL models but with q = $\infty$. We model a dependent variable to current and all past values of the independent variable. IDL models are quite unrealistic in the sense that we don't have data that has existed for an infinite time horizon.
$$
y_t = \alpha + \delta_0 z_t + \delta_1 z_{t-1} + ... + \epsilon_t
$$
Even though its unrealistic, this still gives us good approximations and interesting things to look at! However, we do require the fact that $\delta_j$ = 0 as $j \rightarrow \infty$. Here, the effect of a $z_{t}$ variable on $y_t$ decreases the further back in time we go! (Which makes sense since inflation in 1900 shoudn't affect inflation in 2017). Therefore, the effects are strongest for more recent and contemporaneous effects. Note that this doesn't mean that 2016's effect has to be larger than 2015's, just that when it's really far back, it gets smaller.
From all this, we can interpret IDL models quite similar to FDL models. For a temporary change in z, so at time = -1, z = 0, then at time = 0, we have that z increases by 1 unit and one period later, reverts back to its initial level 0 and stays at 0 for the rest of time. Then, for any period $h \geq 0$, we have that:
$$
y_h = \alpha + \delta_j + \epsilon_h
$$
since all the other terms will have died off. $\delta_h$ here is the temporary change in E($y_h$) given a one unit temporary change in z at time 0. More formally, we have E(y) = $\alpha$ + $\delta_h$ $\forall$ h $\geq$ 0. Like we said earlier, for IDL to make sense, we need that $\delta_h \rightarrow 0$ as time progresses or $h \rightarrow \infty$ (so the effect dies off). This means that \textbf{there is no long run effect from a temporary change in z}. E(y) $\rightarrow$ $\alpha$ as h $\rightarrow$ $\infty$ since $\delta \rightarrow 0$. More generally, $\delta_h$ measures the change in the expected value of y after h periods. However though, after sufficient time, this means we expect the average value of y to be a constant. We can still consider the Long run propensity to be LRP = $\delta_0 + ... + \delta_h$ as h $\rightarrow \infty$. From this, we can say that the infinite sum is well definied since $\delta_j$ must converge to 0 and therefore IDL models can be approximated by $\delta_1 + ... + \delta_p$ for a sufficiently large p. Then that means, for permanent increase in z, we have that: $E(y_h) = \alpha + \delta_0 +...+\delta_h$ or the Long run propensity plus $\alpha$. We implictly assume strict exogeneity for this model whereby even all future values of $z_t$ are uncorrelated with the error term. An issue with this model is that it assumes that future values of $z_t$ are constant and never changing which isn't realistic (since if $z_t$ was interest rates, this would definitely be changing in 20 years time compared to today). Therefore, we use a weak exogeneity assumption instead (where the error is uncorrelated with current and past z).
\subsection{Geometric Distributed Lag (GDL)/Koyck distributed Lag Models}
We need to place some assumptions on the infinite Distributed lag model so that we can actually estimate it (since currently there are an infinite number of parameters). We can use the \textbf{geometric/Koyck} distributed lag (GDL) model. From this, the new $\delta_j$ depends on two parameters:
$$
\delta_j = \gamma \rho^j
$$
whereby $| \rho |$ $<$ 1, j=0,1,...
We can let $\gamma$ and $\rho$ be positive or negative, but we need to make sure that $\rho$ $<$ $\gamma$ so that $\delta_j \rightarrow 0$ as time period $j \rightarrow \infty$. So if $\gamma$ is 1 and $\rho$ = 0.5, then $\delta_1$ = 0.5, $\delta_2$ = 0.25, ... until it converges to 0. This means that something in far away in time doesn't have much of an effect. Furthermore, $\rho$ has to be less than 1 or else the terms will explode (try the above example whereby we set $\rho$ to 1.5 instead of 0.5, you'll see $\delta_j$ will keep growing). Therefore from this, we can estimate the IDL model but we make sure we set the coefficients $\delta_j$ = $\gamma \rho^j$.
So for the GDL, we have:
$$
y_t = \alpha + \delta_0 z_t + \delta_1 z_{t-1} + ... + \epsilon_t
$$
whereby $\delta_j = \gamma \rho^j$. The \textit{impact propensity} is $\delta_0$ = $\gamma_0$ so the sign of impact propensity is determiend by the sign of $\gamma$.
$$
y_t = \alpha + \gamma \rho^0 z_t + \gamma \rho^1 z_{t-1} + \gamma \rho^2 z_{t-2} + ... + \epsilon_t
$$
then becomes:
$$
y_t = \alpha + \gamma z_t + \gamma \rho z_{t-1} + \gamma \rho^2 z_{t-2} + ... + \epsilon_t
$$
Note that if $\gamma > 0$ and $\rho>0$, then all lag coefficients are positive. However, if $\rho < 0$, then the lag coefficients will alternative in sign. We can also compute the long-run propensity by using the sum of geometric series which is:
$$
1 + \rho + \rho^2 + ...= \frac{1}{1-\rho}
$$
which, when we apply it to $y_t$ to compute the LRP:
$$
LRP = \gamma + \gamma \rho + \gamma \rho^2 + ...= \frac{\gamma}{1-\rho}
$$
It also follows that the LRP will have the same sign as $\gamma$.
\subsubsection{Application of Koyck}
If we wanted to actually estimate this model, first we have our original equation and its lagged:
$$
y_t = \alpha + \gamma z_t + \gamma \rho z_{t-1} + \gamma \rho^2 z_{t-2} + ... + \epsilon_t
$$
$$
y_{t-1} = \alpha + \gamma z_{t-1} + \gamma \rho z_{t-2} + \gamma \rho^2 z_{t-3} + ... + \epsilon_t
$$
We then multiply the second equation by $\rho$.
$$
\rho y_{t-1} = \rho\alpha + \rho\gamma z_{t-1} + \gamma \rho^2 z_{t-2} + \gamma \rho^3 z_{t-3} + ... + \rho\epsilon_t
$$
We then subtract this equation from our original equation and lots of terms will cancel out:
$$
y_t - \rho y_{t-1} = (1 - \rho)\alpha + \gamma z_t + \epsilon_t - \rho\epsilon_{t-1}
$$
Rearrange $\rho y_{t-1}$ and define new variables:
$$
y_t = \alpha_0 + \gamma z_t + \rho y_{t-1} + \epsilon_t - \rho\epsilon_{t-1}
$$
Notice that we now have a FDL model of order 1! Hoever, if we run OLS on this, we get inconsistent estimates as $y_{t-1}$ is correlated with $\epsilon_{t-1}$. We can use two-stage least-squares to solve this but that's for another unit :) (Although if you are interested, we can use an instrumental variable $x_{t-1}$ for $y_{t-1}$ and estimate the model with that instead).
\subsection{Conclusion}
Static models are quite similar to cross-sectional methods since data ordering doesn't matter (since only 1 time period). They are quite good at estimating contemporaneous relations between two or more variables. However, they aren't able to capture lagged effects which is why distributed lag models are useful for this(although we need to be careful with the order of the data now!). However, we can't use any of these models for forecasting since they don't consider the fact that past values of y, can help to forecast y (note that we only used lags of independent variables and not dependent). Furthermore, they require that future realisations of x need to be known in order to forecast those y values (which is impossible to obtain since we would be in the future and already have the y values if we had those future x values...). Conclusively, the models we have explored so far are good in terms of interpreting casual effects but not so much in actual predictive capabilities.
\newpage
\section{Deterministic and Stochastic Trends}
\subsection{Trending Data}
Economic time series tend to change unidirectionally over time. When we see a series grow or shrink, we need to be careful when modelling and interpreting relationships between 2 or more variables. We wish to avoid \textbf{spurious} regressions, whereby we think there is a significant correlation when in fact, there is none. A systematic change in a time series that does not appear to be periodic is known as a trend.
\textbf{Spurious Relationship:} Finding a significant relationship between 2 time series variables when in actual fact, there is no relationship between them. One good example could be the number of movies Jennifer Lawrence has appeared in and the size of Australia's GDP. Both are growing from between 2000-2017. Therefore if we ran a regression on this, we would actually find a statistically significant relationship between these 2 variables! However, we know that Lawrence has no effect on Australia's GDP no matter how good her movies are. We in fact can actually attributes this due to \textbf{linear trends}, that is, both variables are growing over time, so of course it may appears that there is some correlation going on!
From this, in order to capture true relationships between regressors and dependent variables, we can add a trend variable (t) in order to help control for the case in which any of the variables in the model are linearly trending.
$$
y_t = \beta_0 + \beta_1 x_{1t} + \gamma_{t} + \epsilon_{t}
$$
Where t = 1,2,...,T
Here, this allows us to control for a linear trend that affects $y_t$ which may relate to trends in $x_{1t}$ and $x_{2t}$. If assumptions of linearity, no perfect collinearity, and strict exogeneity holds, then if we do not include the trend t, this can cause bias in estimating $\beta_1$ and $\beta_2$. If we had all the assumptions of the classical linear models satisfied, then we can apply test statistics and confidence intervals in the usual way.
However, when dependent variable is trending, we can still have a high R-square which is very misleading since we overestimate the variance in Y and therefore this leads to a high $R^2$ even with a time trend (since the time trend is capturing alot of the variance). Therefore, we need to be careful in interpreting the goodness of fit when the dependent variable is trending. Here, the r-square captures the variance of the model.
The time trend t represents our ignorance about omitted factors causing $y_t$ to trend up or down. However, we should still be happy with it since we can still fit y to a trend.
If you think about it, adding a time trend to MLR model, has a nice interpretation of detrending y and all explanatory variables. We show what it means to instead \textbf{detrend the variables}.
We can detrend variables by regressing each predictor on a time trend and then differencing the actual value from the predicted value. Therefore, first we estimate:
$$
y_t = \hat{\alpha_0} + \hat{\alpha_1}t + \hat{\epsilon_t}
$$
and we also estimate:
$$
x_t = \hat{\alpha_0} + \hat{\alpha_1}t + \hat{\epsilon_t}
$$
From this, we can the obtain the detrended series $y_t^*$ by subtracting estimated trend component from original series:
$$
y_t^* = y_t - \hat{\alpha_1}t
$$
$$
y_t^{*} = \hat{\alpha_0} + \hat{\epsilon_t}
$$
and also for predictors
$$
\tilde{x_t} = x_t - \hat{\alpha_{1}} t
$$
$$
\tilde{x_{t}} = \hat{\alpha_0} + \hat{\epsilon_t}
$$
This gives us the residuals of each variables. From Frisch-Wraugh theorem, the residuals are left over stuff that aren't related to the time trend. From this, we can run the final regression:
$$
y_t^{*} = \theta_0 + \theta_1\tilde{x_t} + \epsilon_t
$$
An easier to think about it would be, let us regress $y_t$, $x_{t,1},x_{t,2}...,x_{t,k}$ on a constant and time trend t.
$$
y_t = \alpha_0 + \alpha_1t + \epsilon_t
$$ From that, we save save the residuals $\dot{y}_t, \dot{x}_t ...$. We then regress this residuals on each other (where we don't need an intercept since the intercept will just be 0).
$$
\dot{y}_t = \dot{x}_{t,1} + \dot{x}_{t,2}
$$
These also yield identical parameter estimates. From this, the parameter's coefficient and statistical significance will be the same as in the model with a linear trend except the R-square in the detrended model will be significantly lower. Conclusively, OLS coefficient in model with time trend is the same as running a regression on all detrended variables (whether or not we needed to detrend the explanatory variables as well) and then fit a regression using the detrended series.
We tend to use linear specification to account for linear specification.
$$
y_t = \alpha_0 + \alpha_1t + \epsilon_t
$$
where t = 1,...,T and that $\epsilon_t \sim iid(0,\sigma^2)$. From this, the average value of $y_t$ is a linear function of time:
$$
E(y_t) = \alpha_0 + \alpha_1t
$$
whilst $\epsilon_t$ represents the deviations about the trend. Here, normally linear trends tend to be good enough of an approximation. Furthermore, we can define the change in $y_t$ fromp period t-1 to t as: $\Delta y_t = y_t - y_{t-1}$. $\Delta$ is the temporal difference between periods. From this, the linear trend representation:
$$
E(\Delta y_t) = E(y_t) - E(y_{t-1}) - [\alpha_0 + \alpha_1t] - [\alpha_0 + \alpha_1(t-1)] = \alpha_1
$$
and therefore $\alpha_1$ is the average change in $y_t \forall $ t. So from the original equation: $y_t = \beta_0 + \beta_1 x_{1t} + \gamma t + \epsilon_{t} $, the $\gamma$ tells us what is the change in y net of x's are.
Some data are better approximated by exponential trends. Therefore we can use \textbf{exponential trends} whereby for strictly positive variables, we can capture an exponential trend by:
$$
y_t = e^{\beta_0 + \beta_1t + \epsilon_t}
$$
then we can transform this further by taking logarithms on both sides to get:
$$
ln(y_t) = \beta_0 + \beta_1t + \epsilon_t
$$
and now it is the natural logarithm of the variable that follows a linear trend.
We can also define the change in logs as: $\Delta ln(y_t)$ = ln($y_t$) - ln($y_{t-1}$). Then from this, under exponential trend representations:
$$
E[\Delta ln(y_t)] = [\beta_0 + \beta_1t] - [\beta_0 + \beta_1(t-1)] = \beta_1
$$
The log difference is equivalent to the growth rate. Effectively, we are estimating ln($y_t$) = $\beta_0 + \beta_1t + \epsilon_t$. However, recall that the change in log approximates the grow rate such that:
$$
\beta_1 \approx \frac{\Delta y_t}{y_{t-1}}
$$
Thus, note that with expontential trends, the rate of growth is constant. From this, this tells us the average growth over time. An important thing we need to consider in forecasting is the seasonality of the data. For annual data, this is not an issue. If 2 variables follow seasonality, this can lead to spurious regressions and therefore, we can include dummy variables (only 3 or else dummy variable trap) to account for this. Everytime we are in a particular season, the dummy variable activates. This is deterministic since we know exactly the trend depending on the quarter.
\subsection{Stochastic Trends}
The previous models we looked at tended for us to know the deterministic trend for the time series. However, an alternative to represent the trending series is to use a \textbf{random walk} process whereby:
$$
y_t = y_{t-1} + \epsilon_t
$$
whereby t=1,2,.. We can think of random walks as AR(1) models whereby $\rho$ coefficient = 1. We also assume $\epsilon \sim $ white noise and Gaussian. From this, if we back substituted, we get:
$$
y_{t-1} = y_{t-2} + \epsilon_t
$$
$$
y_t = y_{t-2} + \epsilon_t + \epsilon_{t-1}
$$
and from all this, we eventually get
$$
y_t = y_{0} + \epsilon_1 + ... + \epsilon_t
$$
$$
y_t = \sum_{t=1}^{T}\epsilon_t = t\sigma^2
$$
since we had the assumption that $\epsilon_t \sim N(0,1)$. Furthermore, we can assume that $y_0$ = 0 since it is the beginning of time.
Since $y_0$ = 0, we have the following properties:
$$
E(y_t) = E(y_0) = 0
$$
$$
var(y_t) = \sum_{t=1}^{T}var(\epsilon_t) = T\sigma_{\epsilon}^2
$$
From this, we can then show that for any t $\geq$ 1, s $\geq$ 0
$$
Cov(y_t,y_{t+s}) = Var(y_t) = \sigma_{\epsilon}^2t
$$
This means that the covariance between any 2 periods that isn't the starting period, the covariance between them is simply the number of time periods t times the error term. Since the covariance is a function of time, our series is not \textbf{covariance stationary}. Furthermore, it follows that for any t $\geq$ 1, s $\geq$ 0
$$
Corr(y_t,y_{t+s}) = \frac{Cov(y_t,y_{t+s})}{\sqrt{var(y_t)\sqrt{y_{t+s}}}} = \sqrt{\frac{t}{t+s}}
$$
Therefore, for sufficiently long time series, this implies that the correlation between $y_t$ and $y_{t+h}$ remains high (close to unity) even as s grows. This is known as \textbf{highly persistent} or \textbf{strongly dependent} time series which does not satisfy the law of large numbers or the central limit theorem. Persistent means that the correlation will still persist even for a long time horizon. From this, we can see that $y_t$ does not have correlations that die out fast enough as time distance increases. The bigger the time period, the bigger variance we get as a result and furthermore, we now have a sequence of random variables which do not disappear even as time moves on. The effects of shocks do not disappear and therefore violates the assumption of stationarity and covariance stationarity. It is important as to whether is a series highly persistent or not, since if we are looking at for example policy, then policies from 30 years ago can still be having an effect on GDP and stuff today. Random walks are a special case of a \textbf{unit root process} since $\rho=1$, which means it does not converge to a particular value in the long run.
Do not confused trending and highly persistent. Something can be trending but not be highly persistent. However, often the case that highly persistent also contains a clear trend, such as a \textbf{random walk with drift}:
$$
y_t = \alpha_0 + y_{t-1} + \epsilon_t
$$
We see that back substitution gets us:
$$
y_t = \alpha_0t + \epsilon_t + \epsilon_{t-1} + ... + \epsilon_{0} + y_0
$$
and then taking expectation gets us:
$$
E(y_t) = \alpha_0t
$$
so that the expected value of $y_t$ is growing over time if $\alpha_0 > 0$. Furthermore, best predictions of $y_{t+h}$ is $\alpha_0h + y_t$, which is today's value plus a drift $\alpha_0h$.
We note that for future forecasts t+h:
$$
E(y_{t+h}) = y_t
$$
so for any point in the future, our best forecast is the value today.
Highly persistent series such as random walks causes serious problems for regression analysis. If we regress y on x, then we get parameters that are statistically significant even though they are unrelated. Consider
$$
y_t = \beta_0 + \beta_1x_t + \xi_t
$$
whereby $\{y_t\}$ and $\{x_t\}$ are the random walk processes such that:
$$
y_t = y_{t-1} + \epsilon_t \quad \epsilon \sim iid(0,\sigma_{\epsilon}^2)
$$
$$
x_t = x_{t-1} + \nu_t \quad \nu \sim iid(0,\sigma_{\nu}^2)
$$
We also assume that $Cov(\epsilon_t,\nu_t)$ = 0, which then implies that $\{y_t\}$ is independent of $\{x_t\}$. Since $y_t$ is a random walk, then we have that:
$$
E(y_t) = E[y_0 + \epsilon_1 + \epsilon_2 +...+ \epsilon_t] = 0
$$
and same for:
$$
E(x_t) = E[x_0 + \epsilon_1 + \epsilon_2 +...+ \epsilon_t] = 0
$$
From this, it would have suggested that:
$$
\beta_0 = \beta_1 = 0
$$
which then implies that:
$$
\xi_t = y_t = \epsilon_t + \epsilon_{t-1} + ... + \epsilon_1
$$
whereby $\xi_t$ has a mean of zero, variance grows with t, and is \textit{highly persistent}.
Resultantly, the OLS estimator of $\beta_1$ which is $\hat{beta}_1$ does not converge to zero, which means that OLS is inconsistent. Practically, as we get more data, $\hat{\beta}_1$ does not get closer to 0 or even converge to a specific value. From this, we will never figure out that $y_t$ is unrelated to $x_t$. With random walks, the problem arises because $\{x_t\}$ has too much temporal correlations for the law of large numbers to hold. An even more serious issue is that the t-statistic for $H_0: \beta_1 = 0$ does not have a t-distribution, even in large samples. Furthermore, the t-statistic rejects the null hypothesis too often and this issue gets worse as sampling size grows (we are more likely to make a type 1 error, rejecting true null, with more data).
Regressions with 2 or more independent random walks results in \textit{spurious regression problem} in time series. The two series are independent but they appear to be strongly related since they could just trend in the same manner. From this spurious regression, the $R^2$ will be quite large.
A stochastic process $\{y_t:t=1,2,...\}$ is \textbf{strict stationarity} if for every collection of time indices $t_1 < t_2 < ... < t_m$, the joint distribution of $(y_{t_{1}},y_{t_{2}},...,y_{m_{1}}$ is the same as the joint distribution of $(y_{t_{1+h}},y_{t_{2+h}},...,y_{m_{1_h}} \forall h \geq 1$.
From this, we can define \textbf{weakly stationary/covariance-stationary process} as a stochastic process $\{y_t:t=1,2,...\}$ with a finite second moment $E(y_t^2 < \infty)$ whereby $E(y_t) = \mu$, $Var(y_t) = \sigma^2$, and $Cov(y_t, y_{t+h})$ depends only on h and not t (in other words, the covariance is a function of h, not t). If a strict stationary process has a finite second moment (constant variance), then it must be weakly stationary, but the converse is not necessarily true.
From this, a stationary process is \textbf{weakly dependent} if $cov(y_t,y_{t+h} = 0 \quad h \rightarrow \infty$. In other words, observations are almost independent as h increases. Therefore, the random walk process is nonstationary.
Furthermore, \textbf{trending time series} are also nonstationary (whether they are deterministic or stochastic). However, by detrending or differencing, we can transform nonstationary series into stationary series.
\textbf{Trend-stationary} applies for the case when the trend is deterministic and simply estimating the trend and removing it from the data, then the residuals left will be a stationary series. So as long as we include a trend, everything is fine.
\textbf{Difference stationary}: applies for the case when the trend is stochastic. Differencing the data will yield a stationary series.
Weakly dependent processes are said to be \textbf{integrated of order zero} or I(0). Therefore, we don't need to do anything to them before regressions since they satisfy standard limit theorems. Unit root processes like random walks are integrated of order one or I(1). This means that first differencing them will lead to a \textbf{weakly dependent} series (and often stationary process). Therefore, a time series that is I(1) is said to be a \textbf{difference-stationary process}, even though it is misleading as it emphasiss the staionarity after differencing rather than weak dependence. From this, something that is weakly dependent is not necessarily stationary (since mean and variance isn't necessarily constant) and vice versa. Weakly dependence relates to how the function of the covariance of a series should behave asymptotically whilst weakly stationary just requires the covariance to not be a function of time.
To summarise:
\bigskip
\begin{definition}
A time series is \textbf{strictly stationary} when the joint distribution of $Y_t,Y_{t-1},...,Y_{t-j}$ does not depend on t. The joint density $p(y_t,y_{t-1},...,y_{t-k})$ does not depend on t.
\end{definition}
The joint distribution does not depend on time means that the values and such aren't changing due to time factor. If time series are stable, it is easy to estimate since the components are not changing over time. This means that the probability distribution function is the same across any index. However, strict stationarity is a very strong assumption so we have a simpler condition that is sufficient for ARMA models so now we use \textbf{weakly stationarity/covariance stationary}. A weakly stationary series is one whereby the mean is constant, variance is constant, and its covariance is \textit{not} a function of time. In the literature, stationary refers to weakly stationary unless specified otherwise.
$$
E(Y_t) = \mu
$$
$$
Var(Y_t) = \sigma^2
$$
$$
Cov(Y_t,Y_{t-k}) = f(k) \neq g(t)
$$
Note that strictly stationary implies weakly stationary data. Do not get this confused with \textbf{weakly dependent time series} which states that the correlation for:
$$
Corr(X_t,X_{t+h}) \rightarrow 0 \quad h \rightarrow \infty
$$
This means that $X_t$ becomes less correlated with values that are further away from the future. Note that weakly stationary data requires the Covariance to be a function of k and not time, whilst weakly dependence specifies the behaviour of this correlation/covariance needs to decrease to 0 as a function of k. A \textbf{strongly dependent/highly persistent} time series is the case in which this does not hold and is the opposite to a weakly dependent series. Also note that an unit root is not trend stationary (a shock will leave a trend stationary series to revert back to its mean whilst an unit root won't).
\newpage
\section{Autoregressive Process}
Now we are doing actual time series models by attempting using the serial dependence in the response and predictors. Hence, if a variable is correlated with itself over time, we can regress itself over time. This gives us the name \textbf{autoregressive model}. An autoregression is a time series regression in which the dependent AND independent variables belong to the same stochastic process.
$$
y_t = \alpha + \beta y_{t-1} + \epsilon_t
$$
$$
\text{where} \quad \epsilon \sim iid(0,\sigma_{\epsilon}^2)
$$
This is an autoregression of order one or AR(1). Here, we make the predictor the previous period of response. Error term is not autocorrelated and iid unlike response variable. We know $\beta$ tells us if the past variable was a certain amount, this period's response variable would be last period's response scaled by the parameter $\beta$. In reality, there are many other factors affecting $y_t$ and related over time which can be reduced into the relationship with the $y_t$ variable. We can model slow adjustments to long run equilibrium from the AR(1) model. The time it takes for shock (error term) to cause it to return to normal, depends on the size of $\beta$. This is why it can also be known as the \textbf{persistence} term.
We also have a \textbf{highly persistent} time series as a result. Suppose we have:
$$
y_t = y_{t-1} + \epsilon_t
$$
Where $\epsilon_t \sim iid(0,\sigma^2)$.
From this, we can now substitute recursively lagged dependent variables.
$$
y_t = \alpha + \beta y_{t-1} + \epsilon_t
$$
Sub in
$$
y_{t-1} = \alpha + \beta y_{t-2} + \epsilon_{t-1}
$$
To now get
$$
y_t = \alpha + \beta(\alpha + \beta y_{t-2} + \epsilon_{t-1}) + \epsilon_t
$$
Sub in
$$
y_{t-2} = \alpha + \beta y_{t-3} + \epsilon_{t-2}
$$
To now get
$$
y_t = \alpha + \beta\alpha + \beta^2 y_{t-2} + \beta \epsilon_{t-1} + \epsilon
$$
$$
y_t = \alpha(1 + \beta) + \beta^2 (\alpha + \beta y_{t-3} + \epsilon_{t-2}) + \beta \epsilon_{t-1} + \epsilon_t
$$
From this, we can derive the equation.
$$
y_t = \alpha \sum\limits_{i=0}^{t-1}\beta^i + \beta^t y_0 + \sum\limits_{i=0}^{t-1}\beta^i \epsilon_{t-i}
$$
We assume no \textit{unit root} so the coefficient of $|\beta|$ $<$ 1, then as t$\rightarrow \infty$, then for each of the term:
$$
\beta^t y_0 \rightarrow 0
$$
So the middle term disappears.
Now for the first term:
$$\
\alpha \sum\limits_{i=0}^{t-1}\beta^i
$$
Recall that the sum of infinite geometric series:
$$
\sum\limits_{i=0}^{\infty}x^i = \frac{1}{1-x}
$$
Which means that:
$$\
\alpha \sum\limits_{i=0}^{t-1}\beta^i = \alpha \frac{1}{1-\beta} = \frac{\alpha}{1-\beta}
$$
First term of expression is the geometric decay weighted by alpha. For the final term, it is the same:
$$
\sum\limits_{i=0}^{t-1}\beta^i \epsilon_{t-i} = \sum\limits_{i=0}^{\infty}\beta^i \epsilon_{t-i}
$$
From this, we can get this final convergence of:
$$
\frac{\alpha}{1-\beta} + \sum\limits_{i=0}^{\infty}\beta^i \epsilon_{t-i}
$$
If value of $\beta$ is less than 1, we can get a geometric summation. We can take expectation of this expression, the first term is constant, so that give us 0. Expectation of $\beta$ random variable is 0. We need to make the assumption that the data is weakly stationary (which relies on the fact that the mean is constant). We can then show that:
$$
E(y_t) = E[\frac{\alpha}{1-\beta} + \sum\limits_{i=0}^{\infty}\beta^i \epsilon_{t-i}]
$$
$$
E(y_t) = \frac{\alpha}{1-\beta} + \sum\limits_{i=0}^{\infty}\beta^i E(\epsilon_{t-i})
$$
$$
E(y_t) = \frac{\alpha}{1-\beta} = \mu
$$
Variance of constant is 0 so first part disappears. For the second part, each $\epsilon$ is variance constant. Variance of the sum is the sum of the variance. Variance of each one is $\sigma^2$ multipled by $\beta^i$. We have geometric decay with respect to $\beta$ together, to get the final term. We denote it as $\gamma_0$.
We have that:
$$
Var(y_t) = \frac{\sigma_{\epsilon}^2}{1 - \beta^2} = \gamma_0
$$
These are both time-variant, so they do not depend on time.
We have an example for if $\beta_1 = 0.5$ for an AR(1) model. Then in t+1
$y_{t+1} = 0.5y_t = 0.5(1) = 0.5$
$y_{t+2} = 0.5y_{t+1}= 0.5(0.5) = 0.25$
And this will eventually diminish to 0 for t+h, as h $\rightarrow$ $\infty$
If $\beta$ is high, then it'll take longer for this to disappear. If $\beta$ is negative, then the value will be oscilatting between positive and negative.
\textbf{Autocovariance}
Suppose we look at first order autocorrelation whereby $\gamma_1$ = E($y_ty_{t+1})$ Do what we did in assignment 2 to derive it. $\beta$ = $\rho$ for AR(1) model. $\rho_2$ = $\beta^2$ and etc as we increase the lags.
Recall that: corr($y_t,y_{t+j}$) = $\rho^h$ which means that for:
$$
y_t = \rho y_{t-1} + \epsilon_t
$$
So when $|\rho| < 1$, $\rightarrow$ corr($y_t,y_{t+j}$) $\rightarrow$ 0 as h $\rightarrow$ $\infty$. This means that AR(1) is weakly dependent and I(0).
R-squared is worthless since always goes up by adding additional parameters. Adjusted R-square helps to penalise penalty factors but still not enough penalization to help. Therefore, we need to use information criteia (AIC/BIC). Absolute value of criteria shouldn't be used, we should look at minimum one. We can compare information criteria between each other. The first term is identical for both terms, we only care about the penalty term. AIC is better for relatively small sample. These models may suggest AR(p) different values for p.
We assume $\alpha$ = 0 and $\beta$ = 1 gives us a random walk. So if they equal 1, then our unconditional mean/variance doesn't hold anymore.
\textbf{Weakly dependence Process} says that the correlation between $x_t$ and $x_{t+h}$ decreases as h $\rightarrow$ $\infty$. Therefore, a \textit{stationary} time series process is weakly dependence if $x_t$ and $x_{t+h}$ are \textbf{almost independent} as h $\rightarrow$ $\infty$. From this, a weakly dependent process is called an \textbf{integrated process of order 0 I(0)}. Weakly dependence replaces the assumption of having random variables is valid since in a large sample, since we can say that it is now equally distributed like the population. From this, we can use the time series data right away without having to make any alterations to the data.
We have an unit root iff $\rho$ = 1 for an AR(1) model. When $\rho$ = 1, we have an unit root series of:
$$
y_t = \alpha + y_{t-1} + \epsilon_t
$$
Furthermore, if $\alpha$ = 0, and $\rho$ = 1, then we have that $y_t$ follows a \textbf{random walk process}. In both the case of $y_t$ having an unit root or being a random walk, this makes it a I(1). We can take differences of both sides now:
$$
y_t - y_{t-1} = \epsilon_t
$$
where $\epsilon \sim iid(0,\sigma^2)$ which means $\epsilon$ is a weakly dependent process. Therefore, $\Delta y_t$ is a weak dependent process (but noy $y_t$).
Testing for unit root see if $\beta$ = 1 or not. We have an issue that in non-stationary data, t-statistic is no longer unreliable. So what we do, we subtract by t-1 on both sides and then we test the $\gamma$ parameter instead. This is equivalent to testing if $\beta$ = 1. We need to apply different critical values. We need to use Dickey-Fuller distribution instead now. This is only one sided since we are ever only interested is $\beta$ is less than 1. If $\beta$ is greater than 1, then we will have an explosive process whereby shock grows every year. As we increase number of lags in solution, we have more potential solutions. $H_0$ states that $y_t$ has an unit root when we run a test. Equivalently, we can reformulate that $H_0$ is I(1) vs the alternative of I(0).
$$
y_t = \alpha + \rho y_{t-1} + \epsilon_t
$$
$$
y_t - y_{t-1} = \alpha + (\rho-1)y_{t-1} + \epsilon_t
$$
Where we can define that $\theta = (\rho-1)$ to get:
$$
\Delta y_t = \alpha + (\theta)y_{t-1} + \epsilon_t
$$
and specify that $H_0$: $\theta$ = 0 vs $H_a$: $\theta < 0$. The null hypothesis allows us to say that $y_t \sim I(1)$ whilst alternative says that $y_t \sim I(0)$. We can no longer use t-distribution anymore, so we must now use \textbf{Dickey-Fuller} table.
Regress the new variables the usual way and compare it to the critical values. Null hypothesis is covariance stationary. If that's not the case, then we take first difference to get I(1) and test again. We can also introduced a trend variable which will change the critical values. Tables are slightly different. With this model, we could residuals that are serially correlated, so therefore we can use the Augmented Dickey-Fuller test.
\newpage
\section{Vector Autoregression and Error Correction}
If we had the case for AR models:
$$
y_t = \alpha + \beta_1y + \beta_2y_{t-2} + \beta_3y_{t-3} + \epsilon_t
$$
The unconditional mean would be arrived at via backward substitution. There is a shortcut to reach it by basing it on the assumption that we are dealing with stationary data. Therefore, dependent variable in the long run will have a constant mean. From this, we can come up with:
$$
y^* = E(y_t) = E(y_{t-1}) = ... = E(y_{t-3})
$$
Where we then arrive at y*:
$$
y* = \alpha + \beta_1y* + \beta_2y* + \beta_3y*
$$
And then rearrange to get:
$$
y* = \frac{\alpha}{1 - \beta_1 - \beta_2 - \beta_3} = \frac{\alpha}{1-(\beta_1 + \beta+2 + \beta_3)} = \mu
$$
If $\beta$ is close 1, then the time series is said to be persistent since if there is a shock, we would be seeing its effect for a long time. We would have convergence is $\beta$ adds up to something that adds up to 1.
For Vector Autoregressive Process (VAR) we would need to use matrices. We assume weakly dependent time series which mean that they are not really persistent whereby any shocks disappears reasonably quickly. Since both of them are weakly dependent, they are integrated of order 0 and therefore not suffer from spurious regression. We can think of dependent variable is a function of independent variable and LAGGED of independent variable, suggesting a dynamic model. What we can do instead (for an AR(1) model in particular) is:
$$
y_t = \alpha + \beta Z_t + \epsilon_t
$$
and assuming that $\epsilon_t$ is first order autocorrelated so iid assumption is violated and now:
$$
\epsilon_t = \rho \epsilon_{t-1} + V_t
$$
Whereby $V_t \sim$ iid(0,$\sigma$). We can then reparameterize the model by first getting the lag of the model:
$$
y_{t-1} = \alpha + \beta Z_{t-1} + \epsilon_{t-1}
$$
multiply the lag by $\rho$ to get:
$$
\rho y_{t-1} = \rho \alpha + \rho \beta Z_{t-1} + \rho \epsilon_{t-1}
$$
and then subtract the initial equation by this new equation to get:
$$
y_t = \alpha(1-\rho) - \rho y_{t-1} + \beta Z_t - \rho \beta Z_{t-1} + V_t
$$
Our new parameters are now:
$$
y_t = \tilde{\alpha} + \tilde{\gamma} y_{t-1} + \tilde{\beta_0} Z_t + \tilde{\beta_1} Z_{t-1} + V_t
$$
If error terms are autocorrelated, we can add lags of dependent variables in order to remove this autocorrelation. However, parameteres are now changed. So if our model should be static, we have the issue is that now the parameters we have constructed are different to the static model. We can assume no immediate effect on z on y, which means that $z_t$ disappears. We can think of a \textit{feedback effect} whereby z can affect y BUT then y can also affect z. We can write 2 equations affecting y and z. We have the same variables on the RHS of both equations but the parameters differ. We have a lagged y and a lagged z.
\subsection{VAR}
Going off from the last section, \textbf{VAR} is like an autoregressive model whereby instead of 1 dependent variable, we now have M \textbf{endogenous} dependent variables (since we now have this feedback effect). Each error term in each equation is assumed to be iid \textit{relative} to own future and past realisation BUT may be correlated between equations. NOTE THAT ERROR TERM IN EQUATION 3 DIFFERS TO EQUATION 4. Bivariate since only 2 variables y and z and AR(1) sinceo nly 1 lag. We can have any multivariate AR of order p whereby we have m different equations.
A $\Pi$ matrix is a matrix of coefficient parameters nxn matrix whilst the x matrix is a matrix of lagged variables. From this we have:
$$
x_{1t} = \alpha_1 + \Pi_{11}^{(1)} + \Pi_{12}^{(1)}x_{2t-1} + ... + \Pi_{1n}^{(1)}x_{nt-1} + \Pi_{11}^{(2)}x_{1t-2} + \Pi_{12}^{(2)}x_{2t-2} + ... + \Pi_{1n}^{(2)}x_{nt-2} + \epsilon_{1t}
$$
Intercept, its own lag, first lag of 2nd variable, first lag of 3rd variable and so on. Then we have the second lag of itself, then second lag of 2nd variable and so on. The superscript tells us the coefficient for that lag. Error terms are mean and constant covariance matrix. The error terms are a vector whilst the covariance is a matrix.
We can see an example of 1 lag with 2 variables.
VAR is generalises the AR model. Each equation nests an AR model. So if we set the effect from all other variables to 0, then we are left with an AR model. We can have n variables and believe that some variables affect the other variables. Therefore, the specification in reduced form gives us a model that does not impose any restrictions ahead of time.
Structural AR model requires a restriction on the right hand side. We can just use OLS but if variables are different, then we have to use MLE. To identify the system, it means that if we have 2 endogenous variables with 2 equations so direction of causality may be hard to identify so this structural model will allow us to identify the causality via restrictions made.
The number of equations is decided ahead of time of what variables to include. We need to have enough variables to ensure no omitted variable bias occurs. However, for the number of lags to have, we will apply either the AIC or BIC to determine it. We choose the number of lags with lowest information criteria. We take the natural log of the determinant of the variance-covariance matrix. The 2nd component of this is the penalty factor multiplied by number of estimated parameters and divided by the length of the time series. P is the lagged order.
We do a joint test to see if:
$$
\Pi_{12}^{(1)} = \Pi_{12}^{(2)} = 0
$$
to see if a given variable in a system causes another variable. This is known as the \textbf{Granger Causality} test. Rejecting the null means we have Granger causality. Granger causality differs to usual casuality. There could be no reason for one variable to cause another, but we may find that there is granger causality. This means that there is some information in a given variable that helps us to predict a realisation of variable. So Granger causality is predictive by nature and allows for our model to fit better. We assume that we have I(0) variables and locate the optimal lag lengths. Then we estimate a bivariate with order 3 and then we do a joint test. Weakly dependent ensures we don't get spurious regression. I(1) is not weakly dependent and therefore we have to take the difference to ensure they are weakly dependent.
\subsection{Cointegration}
Now consider if we have I(1) variables and therefore non-stationary. We can represent these 2 by random walk models. Any linear combination of these 2 variables are also an I(1) process. However, there are some cases whereby there is a linear combination of these two I(1) variables could lead to a linear equation of I(0). If this is the case, we can say that the 2 variables are \textbf{cointegrated}. Individually they are random walk processes but there is a long run relationship between the 2 variables and therefore wander around together. So there can be a deterministic trend between the 2 variables.
We could have the case that in the short run, they deviate but in the long run, they are cointegrated. This equilibrium means they converge and is denoted by $\beta$. So if we assume that $\beta$ = 1, which means there is a 1-1 relationship between y and z. So thus, $y_t - z_t$ is I(0).
A generic form of cointegration can be seen as:
$$
y_t - \beta Z - \alpha - \gamma t
$$
whereby $\alpha$ and $\gamma$ is set to 0.
\subsubsection{Testing Cointegration}
$$
y_t - \eta_t = \beta Z_t
$$
so LHS is I(0), then RHS MUST be I(0). We can test for the stationarity of the RHS variable.
\subsubsection{Error Correction Model}
$\gamma$ is the adjustment parameter. The larger the $\gamma$ (in absolute terms), the faster to the adjustment in the long run equilibrium. General dynamic models is when the response is a function of lags and also for feedback effects.
There is a tradeoff for exogeneity but we now include a feedback effect between the 2 variables. Variables in our new equation, we have an I(1) model and therefore bad for inferences.
\subsubsection{Vector Error Correction Model}
We now have first difference of variables on the left hand side.
We estimate a static model and collect the residuals. Lag those residual and include into our model of first differences. These parameters tend to be normalised wrt to $\beta$'s.
To summarise, we like to see relationships between 2 or more variables. Check their order of integration. If I(0), use VAR. If not, then use VAR with differences and whether should we be carrying out a vector error correction model IFF the variables are cointegrated.
For Granger causality testing in the case of I(x) whereby x$>$0. Recall we can always work with first differences. If variables are I(1), then we can't use our usual test statistics. Turns out, there is a nice fix to the issue by using \textbf{Toda-Yamamoto} approach. Suppose we have 2 variables y and z. The are I(x) where x$>$0. Let us suppose y is I(1) and z is I(2). We would like to test the hypothesis of granger causality (and we can't use usual Var's levels). Once we identify the order of integration for each variable, we set m to the max number of integration of the 2 variables, so m=2 in this case. Then we proceed as if these 2 variabels are I(0). We set up a VAR model VAR(P) by choosing a lag length based on AIC/BIC. Suppose we found VAR(2) is the best one, with 2 lags.
It is tempting to take difference of variables until I(0) and then find granger causality. However, this is bad. We aren't testing if a variable is affecting another variable, this just tests whether delta of variables affects delta of other variables, which may give us a different result to what we were after.
Error correction term adds more information. We can think of it as an omitted variable. If two variables are cointegrated but no error correction, then we can think of us having an omitted variable. Therefore including an error correction term allows for us to model that in now.
\newpage
\section{Forecasting}
Can't evaluate a forecast until tomorrow happens. Information set $I_t$ is all the information we know. E.g. If we are estimating AR(2), then previous realisations of the random variable is my information set. We use a \textit{loss function} to help decide which model to use.
\subsection{Point Forecast}
The prediction we make is called a \textbf{point forecast}. It is our best case of the random variable of interest in the next period.
\textbf{Forecast Uncertainty:} Things we never know since some parts of the future we will never know.
\textbf{Model Uncertainty:} All models are wrong but some are useful. Our model will differ from the true model and therefore we will get error in our forecasts from this. However, we can minimise this. Here uncertainty, is the difference between our forecast and the true model.
\textbf{Paramter Uncertainty:} We estimate parameters with some uncertainty. The difference here is the difference between true model and our estimated parameters.
Any given loss function should satisfy 3 requirements:
1) If forecast error is 0, then loss function should be 0.
2) If forecast error isn't 0, then loss function should be greater than 0
3) If absolute error from 1 period is bigger than absolute error in another period, then associated loss function should also be bigger from that period.
We can optimise and solve by optimising w.r.t to the forecast. $y_{t+1}$ is a random variable.
We can generate pseudo-forecasting environments. Tradeoff for training-test data split. More training data, in-sample error falls but then we can overfit to our training data. We want enough observations out of sample. RMSFE assumes a quadratic loss function. In addition to point forecast, we should also include interval forecasts.
Larger $\beta$ means that we will have larger variances for multi-step forecasts. Normally $\beta$ $\leq$ 1. Forecast interval will initially widen and eventually stabilise at a level that is equivalent to unconditional variance of a given random variable.
For unit root, we go from an AR model into a random walk whereby $y_t$ is a function of $y_{t-1}$. Our forecast function will be a flat line. A random walk cannot be forecasted and therefore we just this time's value $y_t$ to represent tomorrow's value. Additionally, the variance increases as the forecast horizon increases and the forecast interval will never stabilise and will always be increasing.
When comparing forecasts between Vector AR and AR models, we are testing for Granger causality and seeing does variable Z help us to also predict variable Y.
\end{document}
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2010 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
Require Export Field.
Require Export QArith_base.
Require Import NArithRing.
(** * field and ring tactics for rational numbers *)
Definition Qsrt : ring_theory 0 1 Qplus Qmult Qminus Qopp Qeq.
Proof.
constructor.
exact Qplus_0_l.
exact Qplus_comm.
exact Qplus_assoc.
exact Qmult_1_l.
exact Qmult_comm.
exact Qmult_assoc.
exact Qmult_plus_distr_l.
reflexivity.
exact Qplus_opp_r.
Qed.
Definition Qsft : field_theory 0 1 Qplus Qmult Qminus Qopp Qdiv Qinv Qeq.
Proof.
constructor.
exact Qsrt.
discriminate.
reflexivity.
intros p Hp.
rewrite Qmult_comm.
apply Qmult_inv_r.
exact Hp.
Qed.
Lemma Qpower_theory : power_theory 1 Qmult Qeq Z_of_N Qpower.
Proof.
constructor.
intros r [|n];
reflexivity.
Qed.
Ltac isQcst t :=
match t with
| inject_Z ?z => isZcst z
| Qmake ?n ?d =>
match isZcst n with
true => isPcst d
| _ => false
end
| _ => false
end.
Ltac Qcst t :=
match isQcst t with
true => t
| _ => NotConstant
end.
Ltac Qpow_tac t :=
match t with
| Z0 => N0
| Zpos ?n => Ncst (Npos n)
| Z_of_N ?n => Ncst n
| NtoZ ?n => Ncst n
| _ => NotConstant
end.
Add Field Qfield : Qsft
(decidable Qeq_bool_eq,
completeness Qeq_eq_bool,
constants [Qcst],
power_tac Qpower_theory [Qpow_tac]).
(** Exemple of use: *)
Section Examples.
Let ex1 : forall x y z : Q, (x+y)*z == (x*z)+(y*z).
intros.
ring.
Qed.
Let ex2 : forall x y : Q, x+y == y+x.
intros.
ring.
Qed.
Let ex3 : forall x y z : Q, (x+y)+z == x+(y+z).
intros.
ring.
Qed.
Let ex4 : (inject_Z 1)+(inject_Z 1)==(inject_Z 2).
ring.
Qed.
Let ex5 : 1+1 == 2#1.
ring.
Qed.
Let ex6 : (1#1)+(1#1) == 2#1.
ring.
Qed.
Let ex7 : forall x : Q, x-x== 0.
intro.
ring.
Qed.
Let ex8 : forall x : Q, x^1 == x.
intro.
ring.
Qed.
Let ex9 : forall x : Q, x^0 == 1.
intro.
ring.
Qed.
Let ex10 : forall x y : Q, ~(y==0) -> (x/y)*y == x.
intros.
field.
auto.
Qed.
End Examples.
Lemma Qopp_plus : forall a b, -(a+b) == -a + -b.
Proof.
intros; ring.
Qed.
Lemma Qopp_opp : forall q, - -q==q.
Proof.
intros; ring.
Qed.
|
Spring and summer are soon here again and the sooner we get to picnic weather the better. But are you tired of the same tired old hot dog and chips picnic fare? Well, here are four sites to break out of the same old routines and into some good eating.
Click on the highlighted text above to view Ask Granny’s choice of her favourite Picnic websites. |
Formal statement is: lemma at_to_0: "at a = filtermap (\<lambda>x. x + a) (at 0)" for a :: "'a::real_normed_vector" Informal statement is: The filter at $a$ is the same as the filter at $0$ after translation by $a$. |
## deprecated. See remark about POisson derivative being always negative. |
Require Import Crypto.Specific.Framework.RawCurveParameters.
Require Import Crypto.Util.LetIn.
(***
Modulus : 2^130 - 5
Base: 43 + 1/3
***)
Definition curve : CurveParameters :=
{|
sz := 3%nat;
base := 43 + 1/3;
bitwidth := 64;
s := 2^130;
c := [(1, 5)];
carry_chains := Some [seq 0 (pred 3); [0; 1]]%nat;
a24 := None;
coef_div_modulus := Some 2%nat;
goldilocks := None;
karatsuba := None;
montgomery := false;
freeze := Some true;
ladderstep := false;
mul_code := None;
square_code := None;
upper_bound_of_exponent_loose := None;
upper_bound_of_exponent_tight := None;
allowable_bit_widths := None;
freeze_extra_allowable_bit_widths := None;
modinv_fuel := None
|}.
Ltac extra_prove_mul_eq _ := idtac.
Ltac extra_prove_square_eq _ := idtac.
|
#redirect Users/CarlWMcCabe
|
Golden 1 Credit Union is a Sacramentobased Credit Unions Credit Union. Credit unions are notforprofit cooperatives that provide the same services as a bank: checking and savings accounts. They have two ATMs on campus (one at the Memorial Union MU and one at the Silo) and an office Downtown downtown in the Regency Square with two ATMs at 2nd Street Second St and D Street D St.; another in the Nugget Nugget Market on East Covell Boulevard Covell and another in the Nugget Nugget Market on Mace Boulevard Mace Blvd.
Anyone is eligible to join as long as they are a resident of Yolo County or one of many other counties in California. It used to be that only government employees, students and family could get accounts at Golden 1 Credit Union, but they have opened up membership to residents of most counties in California.
Credit unions are insured by the NCUA (National Credit Union Administration). This insurance is just like the FDIC insurance for banks and your money is insured up to $250,000. Your money is just as safe and secure at a credit union as at a bank.
The first box of checks for new student checking accounts is free.
The Free Checking Account comes with no minimum opening balance, no monthly service fee, and a free ATM or debit card.
Golden 1 is a member of the Coop Network, so members can use http://www.coopfs.org/b2chomepage/ Coop Network ATMs without paying a fee. You can also make deposits. This network includes ATMs of Yolo Federal Credit Union YFCU, USE Credit Union USE, Patelco, First US Community Credit Union, and others. The Coop ATM network has 28,000 ATMs across all 50 states so you can bank nationwide. Bank of America for example only has 18,000 ATMs, and their ATMs are limited to only some states.
Golden 1 is also a member of the CU Service Center network. That means that you can get cash or deposit money in person at any participating credit union branch in this network. You can bank in person in all 50 states at over 4,000 credit union branches.
They wont handle checks drawn on Canadian banks, even if its a USD account, nor can they exchange foreign currency.
You must have a credit history in order to get a debit card/bank card. If you dont have a credit history, you may get an ATM card which can be used to withdraw cash from an ATM, but cannot be used to make purchases at stores or restaurants.
Golden 1 offers free online banking and free online billpay. They also have mobile banking and an app for iPhone and iPad users.
You can scan and deposit checks to your account through the internet from any computer with a scanner. This is available to members who have had a checking account for 1 year.
They have a coin machine where you can deposit coins and the money goes into your account for free.
20050919 01:55:53 nbsp A good option if you prefer to have your money with a nonprofit cooperative over paying fees at a traditional bank. Users/SteveLambert
20051228 01:48:50 nbsp Ive used Golden 1 for my personal banking since Wells Fargo screwed me over in 1995. Theyre not perfect, but Ive always been much happier with Golden 1 than any other bank or credit union Ive used. Users/GrahamFreeman
20060227 15:40:28 nbsp I heard something a while ago about Golden 1 pulling out of the Coop network (which didnt go over well with other credit unions, from what I gather). Users/JulieEickhof
20070321 17:29:33 nbsp Well, theyre still in it, so all is good. I think Golden 1 is the best bank Ive ever used, and their fees are lower and their employees are extremely good. Users/JaimeRaba
20071010 23:12:03 nbsp I just had a horrible experience at Golden 1 Credit Union today. When I signed up for an account they told me that any checks I deposited would be frozen for two weeks, which I think is is a needlessly inconvenient policy. I am in the process of transferring accounts to Golden1 from Bank of America and I decided to hold off for a month until this stupid 30 day hold on new accounts process panned itself out. I went into to cash a deposit refund check from the University Village which had three names on it: my room mate, my cosigner and myself and they told me that I would need to have photo identification of each of the parties involved, no small accomplishment seeing as my cosigner had moved out to Pennsylvania. I managed to get all the signatures and IDs for the check and when I went into to cash the check, the teller told me he needed to go talk to his manager to see if there was anything else he needed to do to put it in my (needlessly frozen) account. I told him to do what he needed to do and five minutes later, there is a line out the door and he is talking with 4 coworkers about how to cash a check. The line was getting restless and so was I... Five minutes to do a simple check deposit?! Also, the clerk said my account would be on hold for two weeks since signing up, and since I signed up 3 weeks ago I thought that I finally hit a bit of good luck. With another teller behind him, I repeated what he said and he angrily told me that he didnt say anything about two weeks, but rather 30 days. After asking why he had just lied to me (I was rather irate at this point) he blatantly told me that he didnt say anything about two weeks, either though his coworker agreed with me.
When all was said and done my checks (one of them for a mere $7.00) are frozen for two weeks simply because I am a new member. To top off the whole ordeal, they did not tell me thank you or any other customary closing courtesy. I told them that if they are in the business of trying to attract members (shareholders) that they were not doing a very good job. Users/BrandonMinow
20071011 10:29:25 nbsp Brandon, as a former bank employee, I must say that these are precautions that every financial institution must take and are pretty standard. Not saying that you are committing fraud but there are a lot of people who do, and holding an item for 2 weeks is the best thing to do for the bank to be able to catch it, as they deal with losses on a daily basis. I know its an inconvenience to you, but the longer your account is open and the more deposits you make, the more cred you will build with the bank and this 2week hold time will go away. Check your fee schedule for more information. Users/CalamityJanie
20080811 12:38:00 nbsp When I signed up for an account here a few weeks ago, my address was somehow entered wrong into the database (misspelled or something similar), so they just didnt send me any mail, and I was never notified or asked to update my address (they did have my phone number and couldve called me). If I hadnt gone in person to ask when my debit card was going to come in, it wouldnt have gotten fixed. Then, they sent me an ATM card instead. I went in to ask for a debit card and they said I dont qualify for one since I dont have much of a credit history (no bad credit, just not enough good credit). When I signed up they didnt say that Id need to qualify for a debit card, and made it sound like Id just get one. After these two serious examples of bad communication, Im closing my account and looking for another credit union.
Edit (10032008): So, over a month ago I went to the Davis branch in person to close my account, and they gave me the balance in cash and told me it was all done. Today I learned that the account was never closed. Im glad Im not putting them in charge of my money any more if they cant even do that.
Users/DanaSullivan
20080812 15:34:15 nbsp This is a pretty standard bank as far as Im concerned. Ive been with Golden Credit since 1995 (although that was with the Clovis, CA branch). I started using the one in Davis when I moved here in 2001 and have rarely had problems. Most of the time things go fine and thats as much as I could ask for a bank that I have a student account through (I originally opened the account in grade school and never changed it). Users/SunjeetBaadkar
20090210 12:26:34 nbsp Ive never had a bad experience with Golden 1, and beyond that Id say Ive had worse experiences with Wells Fargo than the ones listed here. Even if I have a bad experience with Golden 1, I will stay because Wells Fargo has some inane policy regarding their Free checking accounts wherin they transfer money from one account to another once a month... Every month. I use my bank primarily as savings, and also for making large purchases, so I would often have little or no cash in the account they were transfering out of. Golden 1 has no such policy. Users/MasonMurray
20100709 08:16:00 nbsp I have now had a bad experience with them. Sadly, not just one but the same problem continuously for almost a year. I took out a loan to put the deposit on my new apartment until I got the deposit back from my old apartment. I lost my job in the interim, and really ended up needing the money to live off of. I never received bills or payment notices for my loan on time, on at least one occasion not receiving it at all. They called me once or twice, but most of the time it seemed they would call, let the phone ring once then hang up and I would not even have time to answer. My cosigner received over due notices on months when I didnt even receive a due notice. I never got a single over due notice. This was made more frustrating because I went in to the bank to speak with a teller (on three occasions) and was met with I dont know what the problem is, can I help you with anything else?
I doubt that this is the norm, but overall the experience has been better than banking with Wells Fargo, who wanted to charge me $10 to cash a check from their bank. Users/MasonMurray
20100709 10:12:52 nbsp I have been banking with Golden 1 since 2003 and have had good service every time. The tellers are friendly and their online services are easy to use. Users/DagonJones
20101102 17:08:32 nbsp Ive often had to wait close to ten minutes in line to deposit a check. Users/LWolk
20101102 20:50:15 nbsp JUST SO EVERYONE KNOWS: when you go to a bank and they tell you that there is going to be a charge (not that they are charging you) its only because your employer CHOSE to charge you every time you cashed a check at their bank but didnt have an account there. Seriously, I work in a bank, and I couldnt reverse the fee even if I got the manager of the entire effing district involved. ITS YOUR BOSS WHO IS CHARGING YOU, not the bank!!!! So stop whining about it like a little child, grow up, and learn. Users/SheilaMirzai
20110323 21:38:14 nbsp Opening an account with Golden 1 was so easy compared to working with Wells Fargo. I highly recommend banking here. The reason that you will sometimes have to wait in line to cash a check is because they have a lot of happy customers. Users/LucyB
20120315 11:52:20 nbsp The people who work here are always friendly and efficient. Never had a problem. Users/JQuest
20120428 19:06:04 nbsp Miss Britney and(I think her name is) Nancy are two of the sweetest girls you will ever meet. They are very friendly and I get the feeling that they truly want to help people. I did however have an experience with a girl, I think her name was Amy or something. I had a problem with my account and when I asked for some clarification from the manager, she replied. well I am a manager, but I guess you can talk to the other one. I found this an extremley rude comment. You should not talk to cutomers this way, especially if you want to stay in business! And come to find out, Shes not even a manager! Ive heard how she speaks to her fellow coworkers too. NOT okay! Users/newtowiki
20120912 21:41:06 nbsp Signed up for checking and savings accounts today and so far so good. The process took about 30 mintues, the person helping me was fully competent, and it only cost $1. The accounts appear to be totally free unless I overdraft. They have a lot of ebanking features that my old bank didnt offer like photo check depositing, free online bill pay, and a mobile app for banking on my phone. I was able to log into my account instantly and apparently my free debit card and free first check book will be arriving by the weeks end. So far, so good. Users/MikeyCrews
|
If $f$ has a pole at $a$, and $f$ and $g$ agree in a neighborhood of $a$, then $g$ has a pole at $a$. |
[STATEMENT]
lemma UNF_onE:
"UNF_on r A \<Longrightarrow> a \<in> A \<Longrightarrow> (b = c \<Longrightarrow> P) \<Longrightarrow> ((a, b) \<notin> r\<^sup>! \<Longrightarrow> P) \<Longrightarrow> ((a, c) \<notin> r\<^sup>! \<Longrightarrow> P) \<Longrightarrow> P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>UNF_on r A; a \<in> A; b = c \<Longrightarrow> P; (a, b) \<notin> r\<^sup>! \<Longrightarrow> P; (a, c) \<notin> r\<^sup>! \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P
[PROOF STEP]
unfolding UNF_on_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a\<in>A. \<forall>b c. (a, b) \<in> r\<^sup>! \<and> (a, c) \<in> r\<^sup>! \<longrightarrow> b = c; a \<in> A; b = c \<Longrightarrow> P; (a, b) \<notin> r\<^sup>! \<Longrightarrow> P; (a, c) \<notin> r\<^sup>! \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P
[PROOF STEP]
by blast |
Formal statement is: lemmas prime_dvd_mult_nat = prime_dvd_mult_iff[where ?'a = nat] Informal statement is: If $p$ is a prime number and $p$ divides the product of two natural numbers $a$ and $b$, then $p$ divides $a$ or $p$ divides $b$. |
\SetAPI{J-C}
\section{IServiceExtendable}
\label{extendable:IServiceExtendable}
\ClearAPI
\javadoc{com.koch.ambeth.service.IServiceExtendable}{IServiceExtendable}
\javadoc{java.lang.Object}{Object}
\TODO
%% GENERATED LISTINGS - DO NOT EDIT
\inputjava{Extension point for instances of \type{Object}}
{jambeth-service/src/main/java/com/koch/ambeth/service/IServiceExtendable.java}
\begin{lstlisting}[style=Java,caption={Example to register to the extension point (Java)}]
IBeanContextFactory bcf = ...
IBeanConfiguration myExtension = bcf.registerBean(...);
bcf.link(myExtension).to(IServiceExtendable.class).with(...);
\end{lstlisting}
\inputcsharp{Extension point for instances of \type{Object}}
{Ambeth.Service/ambeth/service/IServiceExtendable.cs}
\begin{lstlisting}[style=Csharp,caption={Example to register to the extension point (C\#)}]
IBeanContextFactory bcf = ...
IBeanConfiguration myExtension = bcf.RegisterBean(...);
bcf.Link(myExtension).To<IServiceExtendable>().With(...);
\end{lstlisting}
%% GENERATED LISTINGS END
|
/**
*
* @file core_dsetvar.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mark Gates
* @date 2010-11-15
* @generated d Tue Jan 7 11:44:49 2014
*
**/
#include <lapacke.h>
#include "common.h"
/***************************************************************************//**
*
* @ingroup CORE_double
*
* CORE_dsetvar sets a single variable, x := alpha.
*
*******************************************************************************
*
* @param[in] alpha
* Scalar to set x to, passed by pointer so it can depend on runtime value.
*
* @param[out] x
* On exit, x = alpha.
*
******************************************************************************/
#if defined(PLASMA_HAVE_WEAK)
#pragma weak CORE_dsetvar = PCORE_dsetvar
#define CORE_dsetvar PCORE_dsetvar
#endif
void CORE_dsetvar(const double *alpha, double *x)
{
*x = *alpha;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.