text
stringlengths 0
3.34M
|
---|
SUBROUTINE IN_MARK ( marker, mkcolr, iret )
C************************************************************************
C* IN_MARK *
C* *
C* This subroutine decodes the marker string which is in the form: *
C* *
C* color # / marker # / size / width / hw, sw flag *
C* *
C* Note that the hw, sw flag can appear anywhere in the string. *
C* *
C* The marker size is a real number which is a multiplier for the *
C* base marker size. If the size is 0.0, the current size will be *
C* used. If the marker color is 0, no marker will be drawn. If *
C* the marker color is blank, color number 1 will be used. If *
C* the marker number is missing or 0, the current marker number *
C* will be used. The marker type, size and width are set in this *
C* subroutine, while the color is returned so that the program may *
C* set it when actually plotting markers. The GEMPLT package must *
C* be initialized before this subroutine is called. *
C* *
C* IN_MARK ( MARKER, MKCOLR, IRET ) *
C* *
C* Input parameters: *
C* MARKER CHAR* Marker input *
C* *
C* Output parameters: *
C* MKCOLR INTEGER Marker color *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* M. desJardins/GSFC 6/88 *
C* G. Huffman/GSC 1/89 Color .lt. 0 reset to 0 *
C* S. Schotz/GSC 1/90 Added marker width *
C* M. desJardins/GSFC 9/90 Fix default color; call IN_COLR *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
C*
CHARACTER*(*) marker
C*
REAL rmark (4)
CHARACTER ctemp*48, color*24
C------------------------------------------------------------------------
iret = 0
C
C* Check for hw/sw flag in string.
C
CALL ST_LCUC ( marker, ctemp, ier )
CALL ST_RMST ( ctemp, '/HW', ihwpos, ctemp, ier )
CALL ST_RMST ( ctemp, '/SW', iswpos, ctemp, ier )
ipos = ihwpos + iswpos
IF ( ipos .eq. 0 ) THEN
C
C* Check for flag at beginning of string
C
CALL ST_RMST ( ctemp, 'HW/', ihwpos, ctemp, ier )
CALL ST_RMST ( ctemp, 'SW/', iswpos, ctemp, ier )
ipos = ihwpos + iswpos
END IF
C*
IF ( ihwpos .ne. 0 ) THEN
ihwsw = 2
ELSE IF ( iswpos .ne. 0 ) THEN
ihwsw = 1
ELSE
ihwsw = 0
END IF
C
C* Extract color from string.
C
ipos = INDEX ( ctemp, '/' )
IF ( ipos .eq. 0 ) THEN
color = ctemp
ELSE IF ( ipos .eq. 1 ) THEN
color = ' '
ELSE
color = ctemp ( : ipos-1 )
END IF
CALL IN_COLR ( color, 1, mkcolr, ier )
C
C* Get four reals from the string and ignore color.
C
CALL ST_RLST ( ctemp, '/', 0., 4, rmark, n, ier )
C
C* Check on marker size.
C
sizmrk = rmark (3)
C
C* Get marker width
C
imkwid = NINT ( rmark (4) )
C
C* Get marker type.
C
imtype = NINT ( rmark (2) )
C
C* Set marker values.
C
CALL GSMRKR ( imtype, ihwsw, sizmrk, imkwid, ier )
C*
RETURN
END
|
module Prove
-- One can prove theorems with idris (of course!)
-- propositional equality is defined as below:
--
-- data (=) : a -> b -> Type where
-- Refl : x = x
twoPlusTwo : 2 + 2 = 4
twoPlusTwo = Refl
-- The bottom type (Empty type) has no constructor, we can use it to represent
-- negation of proposition.
disjoint : {n : Nat} -> (Z = S n) -> Void
disjoint p = replace {P = disjointTy} p ()
where disjointTy : Nat -> Type
disjointTy Z = ()
disjointTy (S _) = Void
-- where the library function `replace' has type of
--
-- replace : (x = y) -> P x -> P y
-- above definition is reduced as:
--
-- replace { P = disjointTy } p ()
-- replace { P = disjointTy } (Z = S n) ()
-- (f : (disjointTy Z -> disjointTy (S n))) ()
-- (f : () -> Void) ()
-- Void
-- now we try prove some simple theorems
plusReduces : {n:Nat} -> (Z + n = n)
plusReduces = Refl
-- since 0 + n is n by the definition of (+).
plusReduces' : (n:Nat) -> (n + Z = n)
plusReduces' Z = Refl
plusReduces' (S k) = cong (plusReduces' k)
-- where `cong' is a library function with type (a = b) -> (f a = f b),
-- representing equality respects function application.
-- To finish the hole we had previously for function `parity', we need to prove
-- that (S (S (j + j))) = (S j) + (S j).
parityHelper1 : (j : Nat) -> (S j + S j) = S (S (j + j))
parityHelper1 Z = Refl
parityHelper1 (S j) = rewrite in ?b
|
-- Prueba por inducción 4: ∀ m n : ℕ, m + n = n + m
-- ================================================
-- ----------------------------------------------------
-- Ej. 1. Sean m y n números naturales. Demostrar que
-- m + n = n + m
-- ----------------------------------------------------
import tactic
open nat
variables (m n : ℕ)
-- #check nat.add_zero
-- #check nat.add_succ
-- #check nat.zero_add
-- #check nat.succ_add
-- 1ª demostración
example : m + n = n + m :=
begin
induction n with n HI,
{ rw nat.add_zero,
rw nat.zero_add, },
{ rw add_succ,
rw HI,
rw succ_add, },
end
-- 2ª demostración
example : m + n = n + m :=
begin
induction n with n HI,
{ simp only [nat.add_zero, nat.zero_add] },
{ simp only [add_succ, HI, succ_add] },
end
-- 3ª demostración
example : m + n = n + m :=
by induction n;
simp only [*, nat.add_zero, add_succ, succ_add, nat.zero_add]
-- 4ª demostración
example : m + n = n + m :=
by induction n;
simp [*, add_succ, succ_add]
-- 5ª demostración
example : m + n = n + m :=
nat.rec_on n
(show m + 0 = 0 + m, from
calc m + 0
= m : by rw nat.add_zero
... = 0 + m : by rw nat.zero_add )
(assume n,
assume HI : m + n = n + m,
show m + n.succ = n.succ + m, from
calc
m + succ n
= succ (m + n) : by rw add_succ
... = succ (n + m) : by rw HI
... = succ n + m : by rw succ_add)
-- 6ª demostración
example : m + n = n + m :=
nat.rec_on n
(show m + 0 = 0 + m, by rw [nat.zero_add, nat.add_zero])
(assume n,
assume HI : m + n = n + m,
calc
m + succ n = succ (m + n) : rfl
... = succ (n + m) : by rw HI
... = succ n + m : by rw succ_add)
-- 7ª demostración
example : m + n = n + m :=
nat.rec_on n
(by simp only [nat.zero_add, nat.add_zero])
(λ n HI, by simp only [add_succ, HI, succ_add])
-- 8ª demostración
example : m + n = n + m :=
nat.rec_on n
(by simp)
(λ n HI, by simp [add_succ, HI, succ_add])
-- 9ª demostración
example : m + n = n + m :=
-- by library_search
nat.add_comm m n
-- 10ª demostración
example : m + n = n + m :=
-- by hint
by finish
-- 11ª demostración
example : m + n = n + m :=
by linarith
-- 12ª demostración
example : m + n = n + m :=
by nlinarith
-- 13ª demostración
example : m + n = n + m :=
by ring
-- 14ª demostración
example : m + n = n + m :=
by omega
-- 15ª demostración
lemma conmutativa : ∀ m n : ℕ, m + n = n + m
| m 0 := by simp
| m (n+1) := by simp [add_succ, conmutativa m n, succ_add]
-- 16ª demostración
lemma conmutativa2 : ∀ m n : ℕ, m + n = n + m
| m 0 := by simp only [nat.add_zero, nat.zero_add]
| m (n+1) := by simp only [nat.add_zero, add_succ, conmutativa2 m n, succ_add]
|
Require Import coqutil.Z.Lia.
Require Import coqutil.Word.Naive.
Require Import coqutil.Word.Properties.
Require Coq.Strings.String. Open Scope string_scope.
Require Import Coq.Lists.List. Import ListNotations.
Require Import riscv.Utility.InstructionCoercions. Open Scope ilist_scope.
Require Import riscv.Spec.Machine.
Require Import riscv.Spec.Decode.
Require Import riscv.Spec.PseudoInstructions.
Require Import riscv.Utility.RegisterNames.
Require Import Coq.ZArith.BinInt.
Require Import riscv.Utility.Utility.
Require Import riscv.Platform.Memory.
Require Import riscv.Platform.MinimalCSRsDet.
Require Import riscv.Platform.Run.
Require Import riscv.Utility.Monads.
Require Import riscv.Utility.MonadNotations.
Require Import riscv.Utility.MkMachineWidth.
Require Import riscv.Utility.Encode.
Require Import coqutil.Map.Interface.
Require Import riscv.Utility.Words32Naive.
Require Import riscv.Utility.DefaultMemImpl32.
Require Import coqutil.Map.Z_keyed_SortedListMap.
Require Import riscv.Utility.ExtensibleRecords. Import HnatmapNotations. Open Scope hnatmap_scope.
Require coqutil.Map.SortedList.
Require Import riscv.Examples.SoftmulInsts.
Require Import riscv.Platform.LogInstructionTrace.
(* note: these numbers must fit into a 12bit immediate, and should be small if we want to simulate
execution inside Coq *)
Definition main_start: Z := 0.
Definition handler_start: Z := 32.
Definition handler_stack_start: Z := 512.
Definition heap_start: Z := 640.
(* mem[heap_start+8] := mem[heap_start] * mem[heap_start+4] *)
Definition main_insts := [[
Lw t1 zero heap_start;
Lw t2 zero (heap_start+4);
Mul t3 t1 t2;
Sw zero t3 (heap_start+8)
]].
Definition input1: Z := 5.
Definition input2: Z := 13.
Definition initial_datamem: Mem := Eval vm_compute in
let m := map.of_tuple (Memory.footprint (word.of_Z handler_stack_start) 300)
(HList.tuple.unfoldn id 300 Byte.x00) in
let m := unchecked_store_bytes 4 m (word.of_Z heap_start) (LittleEndian.split 4 input1) in
unchecked_store_bytes 4 m (word.of_Z (heap_start+4)) (LittleEndian.split 4 input2).
Definition putProgram(m: Mem)(addr: Z)(prog: list Instruction): Mem :=
unchecked_store_byte_list (word.of_Z addr) (RiscvMachine.Z32s_to_bytes (List.map encode prog)) m.
Definition initial_mem: Mem := Eval vm_compute in
putProgram (putProgram initial_datamem main_start main_insts) handler_start handler_insts.
Definition FieldNames: natmap Type := MinimalCSRsDet.Fields (natmap.put nil exectrace ExecTrace).
Definition State: Type := hnatmap FieldNames.
Definition initial_regs :=
map.of_tuple (key:=Z) (HList.tuple.unfoldn (Z.add 1) 31 1) (HList.tuple.unfoldn id 31 (word.of_Z (word:=Naive.word 32) 0)).
Definition initial_state: State := HNil
[regs := initial_regs]
[pc := word.of_Z 0]
[nextPc := word.of_Z 4]
[mem := initial_mem]
[log := nil]
[csrs := map.of_list ((CSRField.MTVecBase, (handler_start/4)) ::
(CSRField.MScratch, handler_stack_start) :: nil)]
[exectrace := nil].
#[global] Instance IsRiscvMachine: RiscvProgram (StateAbortFail State) word :=
AddExecTrace FieldNames (MinimalCSRsDet.IsRiscvMachine FieldNames).
(* success flag * final state *)
Fixpoint run(fuel: nat)(s: State): bool * State :=
match fuel with
| O => (true, s)
| S fuel' => match Run.run1 RV32I s with
| (Some _, s') => run fuel' s'
| (None, s') => (false, s')
end
end.
Definition trace(fuel: nat): bool * ExecTrace :=
match run fuel initial_state with
| (isSuccess, final) => (isSuccess, final[exectrace])
end.
(* fails, but that's excpected because it runs past the last instruction of main into unmapped memory
Eval vm_compute in trace 1000. *)
Goal Memory.loadWord initial_state[mem] (word.of_Z (heap_start+8)) = Some (LittleEndian.split 4 0).
Proof. reflexivity. Qed.
Goal Memory.loadWord initial_state[mem] (word.of_Z (heap_start+8)) =
Some (LittleEndian.split 4 0).
Proof. reflexivity. Qed.
Goal Memory.loadWord (snd (run 1000 initial_state))[mem] (word.of_Z (heap_start+8)) =
Some (LittleEndian.split 4 (input1 * input2)).
Proof. vm_compute. reflexivity. Qed.
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- The type for booleans and some operations
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Bool.Base where
open import Data.Unit.Base using (⊤)
open import Data.Empty
open import Level using (Level)
private
variable
a : Level
A : Set a
------------------------------------------------------------------------
-- The boolean type
open import Agda.Builtin.Bool public
------------------------------------------------------------------------
-- Relations
infix 4 _≤_ _<_
data _≤_ : Bool → Bool → Set where
f≤t : false ≤ true
b≤b : ∀ {b} → b ≤ b
data _<_ : Bool → Bool → Set where
f<t : false < true
------------------------------------------------------------------------
-- Boolean operations
infixr 6 _∧_
infixr 5 _∨_ _xor_
not : Bool → Bool
not true = false
not false = true
_∧_ : Bool → Bool → Bool
true ∧ b = b
false ∧ b = false
_∨_ : Bool → Bool → Bool
true ∨ b = true
false ∨ b = b
_xor_ : Bool → Bool → Bool
true xor b = not b
false xor b = b
------------------------------------------------------------------------
-- Other operations
infix 0 if_then_else_
if_then_else_ : Bool → A → A → A
if true then t else f = t
if false then t else f = f
-- A function mapping true to an inhabited type and false to an empty
-- type.
T : Bool → Set
T true = ⊤
T false = ⊥
|
import Lean
import Smt.Reconstruction.Certifying.Boolean
import Smt.Reconstruction.Certifying.Pull
namespace Smt.Reconstruction.Certifying
open Lean Elab.Tactic Meta
def congDupOr (i : Nat) (nm : Ident) (last : Bool) : TacticM Syntax :=
match i with
| 0 =>
if last then `(dupOr₂ $nm)
else `(dupOr $nm)
| (i' + 1) => do
let nm' := mkIdent (Name.mkSimple "w")
let r ← congDupOr i' nm' last
let r: Term := ⟨r⟩
`(congOrLeft (fun $nm' => $r) $nm)
-- i: the index fixed in the original list
-- j: the index of li.head! in the original list
def loop (i j n : Nat) (pivot : Expr) (li : List Expr) (nm : Ident) : TacticM Ident :=
match li with
| [] => return nm
| e::es =>
if e == pivot then do
-- step₁: move expr that is equal to the pivot to position i + 1
let step₁ ←
if j > i + 1 then
let fname ← mkIdent <$> mkFreshId
let e ← getTypeFromName nm.getId
let t ← instantiateMVars e
pullToMiddleCore (i + 1) j nm t fname
pure fname
else pure nm
-- step₂: apply congOrLeft i times with dupOr
let step₂: Ident ← do
let last := i + 1 == n - 1
let tactic ← congDupOr i step₁ last
let tactic := ⟨tactic⟩
let fname ← mkIdent <$> mkFreshId
evalTactic (← `(tactic| have $fname := $tactic))
pure fname
loop i j (n - 1) pivot es step₂
else loop i (j + 1) n pivot es nm
def factorCore (type : Expr) (source : Ident) (suffixIdx : Nat) : TacticM Unit :=
withMainContext do
let initialLength := getLength type
let mut li := collectPropsInOrChain' suffixIdx type
let n := li.length
let mut answer := source
for i in List.range n do
li := List.drop i li
match li with
| [] => break
| e::es => do
answer ← loop i (i + 1) (li.length + i) e es answer
let e ← getTypeFromName answer.getId
let t ← instantiateMVars e
let newLength := getLength t
let propsDropped := initialLength - newLength
li := collectPropsInOrChain' (suffixIdx - propsDropped) t
evalTactic (← `(tactic| exact $answer))
syntax (name := factor) "factor" term (",")? (term)? : tactic
def parseFactor : Syntax → TacticM (Option Nat)
| `(tactic| factor $_) => pure none
| `(tactic| factor $_, $i) => elabTerm i none >>= pure ∘ getNatLit?
| _ => throwError "[factor]: wrong usage"
@[tactic factor] def evalFactor : Tactic := fun stx => do
/- let startTime ← IO.monoMsNow -/
withMainContext do
let e ← elabTerm stx[1] none
let type ← inferType e
let lastSuffix := getLength type - 1
let source := ⟨stx[1]⟩
let sufIdx :=
match (← parseFactor stx) with
| none => lastSuffix
| some i => i
factorCore type source sufIdx
/- let endTime ← IO.monoMsNow -/
/- logInfo m!"[factor] Time taken: {endTime - startTime}ms" -/
example : A ∨ A ∨ A ∨ A ∨ B ∨ A ∨ B ∨ A ∨ C ∨ B ∨ C ∨ B ∨ A → A ∨ B ∨ C :=
by intro h
factor h
example : (A ∨ B ∨ C) ∨ (A ∨ B ∨ C) → A ∨ B ∨ C := by
intro h
factor h, 1
example : (A ∨ B ∨ C) ∨ (E ∨ F) ∨ (A ∨ B ∨ C) ∨ (E ∨ F) → (A ∨ B ∨ C) ∨ (E ∨ F) := by
intro h
factor h, 3
end Smt.Reconstruction.Certifying
|
netmind.db = function( DS, Y=NULL, plotdata=FALSE ) {
netmind.dir = project.datadirectory("bio.snowcrab", "data", "netmind" )
netmind.rawdata.location = file.path( netmind.dir, "archive" )
if (!is.null(Y)) {
iY = which( Y>=1999 ) # no historical data prior to 1998
if (length(iY)==0) return ("No data for specified years")
Y = Y[iY]
}
if(DS =='esonar2netmind.conversion') {
if(is.null(Y) | any(Y < 2014)) stop('This only begins in 2014')
for(y in Y) {
#Changing to convert esonar directly to netmind. -Brent
esonar.raw.location = file.path(project.datadirectory("bio.snowcrab", "data", "esonar", "archive" ), y)
flist = list.files(path=esonar.raw.location, full.names=T, recursive=FALSE)
for(fl in flist){
esonar2netmind(fl)
}
}
}
# -----------------------
if ( DS %in% c("basedata", "metadata", "load") ) {
if (DS=="basedata" ){
flist = list.files(path=netmind.dir, pattern="basedata", full.names=T, recursive=FALSE)
if (!is.null(Y)) {
mm = NULL
for (yy in Y ) {
ll = grep( yy, flist)
if (length(ll)>0 ) mm = c( mm, ll)
}
if (length(mm) > 0 ) flist= flist[mm]
}
out = NULL
for ( i in flist ) {
load( i )
out= rbind( out, basedata )
}
return( out )
}
if (DS=="metadata" ){
flist = list.files(path=netmind.dir, pattern="metadata", full.names=T, recursive=FALSE)
if (!is.null(Y)) {
mm = NULL
for (yy in Y ) {
ll = grep( yy, flist)
if (length(ll)>0 ) mm = c( mm, ll)
}
if (length(mm) > 0 ) flist= flist[mm]
}
out = NULL
for ( i in flist ) {
load( i )
out= rbind( out, metadata )
}
return( out )
}
# default is to "load"
#
if (any( Y < 2004) ) {
print( "Net metrics and bottom contact stats (distance towed) were processed manually by Gulf Region until 2004 ")
print( "and now stored in 'SNTOWS'. This is therefore redundant for historical data and only fills in time of ")
print( "bottom contact, etc for the sake of completeness" )
}
dirlist = list.files(path=netmind.rawdata.location, full.names=T, recursive=T)
# process every data file ... even bad tows .. marginal overhead in order to be complete (sometimes file names are wrong)
nfiles = length(dirlist)
filelist = matrix( NA, ncol=3, nrow=nfiles)
for (f in 1:nfiles) {
yr = netmindDate( fnNetmind=dirlist[f] )
if ( is.null(yr) ) next()
if ( yr %in% Y ) filelist[f,] = c( f, dirlist[f], yr )
}
filelist = filelist[ which( !is.na( filelist[,1] ) ) , ]
set = snowcrab.db( DS="setInitial" ) # UTC
for ( yr in Y ) {
print(yr)
fn.meta = file.path( netmind.dir, paste( "netmind", "metadata", yr, "rdata", sep="." ) )
fn.raw = file.path( netmind.dir, paste( "netmind", "basedata", yr, "rdata", sep="." ) )
fs = filelist[ which( as.numeric(filelist[,3])==yr ) , 2 ]
if (length(fs)==0) next()
basedata = NULL
metadata = NULL
for (f in 1:length(fs) ) {
j = load.netmind.rawdata( fs[f], f=f, set=set ) # variable naming conventions in the past
if (is.null(j)) next()
metadata = rbind( metadata, j$metadata)
basedata = rbind( basedata, j$basedata)
}
save( metadata, file=fn.meta, compress=TRUE )
save( basedata, file=fn.raw, compress=TRUE )
}
# now that it is complete, refresh the set/uid lookup table
netmind.db( DS="set.netmind.lookuptable.redo" )
return ( netmind.dir )
}
# ------------------
if (DS %in% c("stats", "stats.redo" ) ) {
if (DS %in% c("stats") ){
flist = list.files(path=netmind.dir, pattern="stats", full.names=T, recursive=FALSE)
if (!is.null(Y)) { # if Y is declared then subset ... default is to return all
mm = NULL
for (yy in Y ) {
ll = grep( yy, flist)
if (length(ll)>0 ) mm = c( mm, ll)
}
if (length(mm) > 0 ) flist= flist[mm]
}
netmind.stat = NULL
for ( i in flist ) {
load( i )
netmind.stat = rbind( netmind.stat, Stats )
}
netmind.stat$yr = NULL
nm = netmind.db( DS="set.netmind.lookuptable" )
res = merge( nm, netmind.stat, by="netmind_uid", all.x=TRUE, all.y=FALSE, sort=FALSE )
# not really required but just in case missing values cause confusion with rbind
#res$t0 = as.POSIXct( res$t0, origin=lubridate::origin, tz="UTC" )
#res$t1 = as.POSIXct( res$t1, origin=lubridate::origin, tz="UTC" )
#res$dt = difftime( res$t1, res$t0 ) # reset in case time info gets lost with rbind
return (res)
}
# "stats.redo" is the default action
# bring in stats from each data stream and then calculate netmind stats
# bring in minilog and seabird data that has t0, t1 times for start and stop of bottom contact
set = snowcrab.db( DS="setInitial") # UTC
sbStats = seabird.db( DS="stats" )
sbv = c('trip','set', "z", "zsd", "t", "tsd", "n", "t0", "t1", "dt" )
set_sb = merge( set[, c("trip", "set") ], sbStats[,sbv], by=c("trip","set"), all.x=TRUE, all.y=FALSE, sort=FALSE )
mlStats = minilog.db( DS="stats" )
mlv = c('trip','set', "z", "zsd", "t", "tsd", "n", "t0", "t1", "dt" )
set_ml = merge( set[, c("trip", "set") ], mlStats[,mlv], by=c("trip","set"), all.x=TRUE, all.y=FALSE, sort=FALSE )
set = merge( set, set_sb, by=c("trip", "set" ), all.x=TRUE, all.y=FALSE, sort=FALSE, suffixes=c("", ".sb" ) )
set = merge( set, set_ml, by=c("trip", "set" ), all.x=TRUE, all.y=FALSE, sort=FALSE, suffixes=c("", ".ml" ))
# use seabird data as the standard, replace with minilog data where missing
ii = which(!is.finite( set$t0) )
if (length(ii) > 0 ) set$t0[ ii] = set$t0.ml[ii]
ii = which(!is.finite( set$t1) )
if (length(ii) > 0 ) set$t1[ ii] = set$t1.ml[ii]
ii = which(!is.finite( set$z) )
if (length(ii) > 0 ) set$z[ ii] = set$z.ml[ii]
ii = which(!is.finite( set$zsd) )
if (length(ii) > 0 ) set$zsd[ ii] = set$zsd.ml[ii]
ii = which(!is.finite( set$t) )
if (length(ii) > 0 ) set$t[ ii] = set$t.ml[ii]
ii = which(!is.finite( set$tsd) )
if (length(ii) > 0 ) set$tsd[ ii] = set$tsd.ml[ii]
ii = which(!is.finite( set$dt) )
if (length(ii) > 0 ) set$dt[ ii] = set$dt.ml[ii]
tokeep = grep( "\\.ml$", colnames(set), invert=TRUE )
set = set[, tokeep]
set$n = NULL
nm = netmind.db( DS="set.netmind.lookuptable" )
set = merge( set, nm, by=c("trip","set"), all.x=T, all.y=F, sort=F, suffixes=c("", ".netmind") )
# add more data .. t0,t1, dt where missing and width and SA estimates where possible
for ( yr in Y ) {
print(yr)
fn = file.path( netmind.dir, paste( "netmind.stats", yr, "rdata", sep=".") )
Stats = NULL
basedata = netmind.db( DS="basedata", Y=yr )
ii = which( set$yr==yr & !is.na(set$netmind_uid) )
nii = length( ii )
if ( nii== 0 ) next()
rid = set[ ii,]
Stats = NULL
for ( i in 1:nii ){
print(i)
id = rid$netmind_uid[i]
print(rid[i,])
bdi = which( basedata$netmind_uid==id )
if (length(bdi) < 5 ) next()
l = net.configuration( basedata[ bdi ,], t0=rid$t0[i], t1=rid$t1[i], set_timestamp=rid$timestamp[i], yr=yr, plotdata=plotdata )
#if(is.na(l$surfacearea))browser()
l$netmind_uid = id
# not really required but just in case missing values cause confusion with rbind
Stats = rbind( Stats, l )
}
if (is.null(Stats)) next()
Stats$t0 = as.POSIXct(Stats$t0, origin=lubridate::origin, tz="UTC" )
Stats$t1 = as.POSIXct(Stats$t1, origin=lubridate::origin, tz="UTC")
Stats$dt = difftime( Stats$t1, Stats$t0 )
save( Stats, file=fn, compress=TRUE )
}
return ( netmind.dir )
}
# -------------------
if (DS %in% c("set.netmind.lookuptable", "set.netmind.lookuptable.redo") ) {
fn = file.path( netmind.dir, "set.netmind.lookuptable.rdata" )
if (DS=="set.netmind.lookuptable" ) {
B = NULL
if ( file.exists( fn) ) load (fn)
return (B)
}
B = netmind.db( DS="metadata" )
# double check .. should not be necessary .. but in case
uuid = paste( B$trip, B$set, sep="." )
dups = which( duplicated( uuid) )
if (length(dups > 0 ) ) {
toremove =NULL
for (i in dups) {
di = which( uuid == uuid[i] )
w <- B[di,]
tdiff = difftime( B$set_timestamp[di], B$netmind_timestamp[di])
oo = which.min( abs( tdiff) )
toremove = c(toremove, di[-oo] )
print("----")
print( "Matching based upon closest time stamps")
print(B[di, ])
print( "Choosing: ")
print(B[di[oo], ])
print("")
}
B = B[-toremove, ]
}
# double check .. should not be necessary .. but in case
B = B[ , c("trip", "set", "netmind_uid" )]
save(B, file=fn, compress=TRUE )
return(fn)
}
}
|
SUBROUTINE GDMPLT2 ( grid, kx, ky, ix1, iy1, ix2, iy2, ixinc,
+ istag, iyinc, color, positn, rmin, rmax,
+ cint, iret )
C************************************************************************
C* GDMPLT2 *
C* *
C* This subroutine plots grid data for GDMAP. *
C* *
C* GDMPLT2 ( GRID, KX, KY, IX1, IY1, IX2, IY2, IXINC, IYINC, COLOR, *
C* RMIND, RMAXD, IRET ) *
C* *
C* Input parameters: *
C* GRID (KX, KY) REAL Grid data *
C* KX INTEGER Number of points in x dir *
C* KY INTEGER Number of points in y dir *
C* IX1 INTEGER First point in x dir *
C* IY1 INTEGER First point in y dir *
C* IX2 INTEGER Last point in x dir *
C* IY2 INTEGER Last point in y dir *
C* IXINC INTEGER Increment in x dir *
C* ISTAG INTEGER Increment for stagger *
C* IYINC INTEGER Increment in y dir *
C* COLOR CHAR* Color *
C* POSITN CHAR* Position number *
C* RMIND REAL Minimum valid value *
C* RMAXD REAL Maximum valid value *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* M. desJardins/GSFC 4/85 *
C* G. Huffman/GSC 1/89 Dont plot colors less than 1 *
C* M. desJardins/GSFC 2/91 Added valid range *
C* S. Jacobs/NMC 9/94 Added staggered plotting *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
C*
REAL grid ( kx, ky )
CHARACTER*(*) color, positn, cint
C*
CHARACTER pstr*10, clbl(LLCLEV)*24
REAL clvl (LLCLEV)
C*
INTEGER iposx (9), iposy (9) , icolor(LLCLEV)
LOGICAL found
DATA iposx / 0, -1, -1, 3, 3, -1, 3, 0, 0 /
DATA iposy / 0, 2, 0, 2, 0, -2, -2, 4, -4 /
C*
INCLUDE 'ERMISS.FNC'
C------------------------------------------------------------------------
iret = 0
C
C* Get range of data to plot
C
CALL IN_INTC ( cint, rmin, rmax, clvl, nclvl, clbl,
+ rint, cmin, cmax, iret )
C
C* Set the color for the plot.
C
CALL IN_COLR ( color, nclvl, icolor, ier )
C
C* Get the offsets for the position number.
C
CALL ST_ILST ( positn, '/', 0, 1, ipos, n, ier )
IF ( ( ipos .lt. 0 ) .or. ( ipos .gt. 8 ) ) ipos = 0
iyoff = iposy ( ipos + 1 )
ixoff = iposx ( ipos + 1 )
ix = ixoff
C
C* Loop through the grid.
C
ixstrt = ix1
DO j = iy1, iy2, iyinc
fy = FLOAT (j)
C*
DO i = ixstrt, ix2, ixinc
fx = FLOAT (i)
d = grid ( i, j )
IF ( .not. ERMISS (d) ) THEN
jj = 1
found = .false.
DO WHILE ( ( .not. found ) .and.
+ ( jj .le. nclvl ) )
IF ( d .lt. clvl(jj) ) THEN
found = .true.
ELSE
jj = jj + 1
END IF
END DO
C*
IF ( found .and. ( icolor(jj) .ne. 0 ) ) THEN
CALL GSCOLR ( icolor(jj), ier )
id = NINT (d)
CALL ST_INCH ( id, pstr, ier )
CALL ST_LSTR ( pstr, len, ier )
IF ( ( ipos .eq. 0 ) .or. ( ipos .eq. 7 ) .or.
+ ( ipos .eq. 8 ) ) THEN
ix = ixoff - len + 1
ELSE IF ( (ipos.eq.1) .or. (ipos.eq.2) .or.
+ (ipos.eq.5) ) THEN
ix = ixoff - 2 * len
END IF
CALL GQTEXT ( itxfn, itxhw, sztext, itxwid,
+ ibrdr, irrotn, ijust, iret )
CALL GTEXT ( 'G', fx, fy, pstr, 0.0, ix,
+ iyoff, ier )
END IF
END IF
END DO
IF ( ixstrt .eq. ix1 ) THEN
ixstrt = ixstrt + istag
ELSE
ixstrt = ix1
END IF
END DO
C*
RETURN
END
|
/*
@copyright Louis Dionne 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#include <boost/hana.hpp>
#include <boost/hana/assert.hpp>
#include <boost/hana/bool.hpp>
#include <boost/hana/concept/comparable.hpp>
#include <boost/hana/concept/foldable.hpp>
#include <boost/hana/functional/always.hpp>
#include <boost/hana/functional/placeholder.hpp>
#include <boost/hana/concept/logical.hpp>
#include <boost/hana/optional.hpp>
#include <boost/hana/concept/searchable.hpp>
#include <laws/base.hpp>
#include <laws/foldable.hpp>
#include <laws/searchable.hpp>
#include <cstddef>
using namespace boost::hana;
template <typename T, std::size_t n>
using array = T[n];
int main() {
// We can't check the laws because builtin arrays can't be passed
// to functions.
//////////////////////////////////////////////////////////////////////////
// Foldable
//////////////////////////////////////////////////////////////////////////
{
int a[] = {1};
int b[] = {1, 2};
int c[] = {1, 2, 3};
int d[] = {1, 2, 3, 4};
// unpack
{
test::_injection<0> f{};
BOOST_HANA_RUNTIME_CHECK(equal(
unpack(a, f),
f(1)
));
BOOST_HANA_RUNTIME_CHECK(equal(
unpack(b, f),
f(1, 2)
));
BOOST_HANA_RUNTIME_CHECK(equal(
unpack(c, f),
f(1, 2, 3)
));
BOOST_HANA_RUNTIME_CHECK(equal(
unpack(d, f),
f(1, 2, 3, 4)
));
}
static_assert(Foldable<int[3]>::value, "");
}
//////////////////////////////////////////////////////////////////////////
// Searchable
//////////////////////////////////////////////////////////////////////////
{
// any_of
{
static_assert(
not_(any_of(array<int, 1>{0}, _ == 1))
, "");
static_assert(
any_of(array<int, 2>{0, 1}, _ == 0)
, "");
static_assert(
any_of(array<int, 2>{0, 1}, _ == 1)
, "");
static_assert(
not_(any_of(array<int, 2>{0, 1}, _ == 2))
, "");
static_assert(
any_of(array<int, 3>{0, 1, 2}, _ == 0)
, "");
static_assert(
any_of(array<int, 3>{0, 1, 2}, _ == 1)
, "");
static_assert(
any_of(array<int, 3>{0, 1, 2}, _ == 2)
, "");
static_assert(
not_(any_of(array<int, 3>{0, 1, 2}, _ == 3))
, "");
}
// find_if
// Note: Because we need the predicate to return a Constant, this
// is incredibly not powerful.
{
static_assert(equal(
find_if(array<int, 1>{0}, always(true_c)),
just(0)
), "");
BOOST_HANA_CONSTANT_CHECK(equal(
find_if(array<int, 1>{0}, always(false_c)),
nothing
));
}
static_assert(Searchable<int[3]>::value, "");
}
}
|
/*!
@file
Defines `boost::hana::sort`.
@copyright Louis Dionne 2013-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_SORT_HPP
#define BOOST_HANA_SORT_HPP
#include <boost/hana/fwd/sort.hpp>
#include <boost/hana/at.hpp>
#include <boost/hana/concept/sequence.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/core/dispatch.hpp>
#include <boost/hana/core/make.hpp>
#include <boost/hana/detail/nested_by.hpp> // required by fwd decl
#include <boost/hana/length.hpp>
#include <boost/hana/less.hpp>
#include <utility> // std::declval, std::index_sequence
BOOST_HANA_NAMESPACE_BEGIN
//! @cond
template <typename Xs, typename Predicate>
constexpr auto sort_t::operator()(Xs&& xs, Predicate&& pred) const {
using S = typename hana::tag_of<Xs>::type;
using Sort = BOOST_HANA_DISPATCH_IF(sort_impl<S>,
hana::Sequence<S>::value
);
#ifndef BOOST_HANA_CONFIG_DISABLE_CONCEPT_CHECKS
static_assert(hana::Sequence<S>::value,
"hana::sort(xs, predicate) requires 'xs' to be a Sequence");
#endif
return Sort::apply(static_cast<Xs&&>(xs),
static_cast<Predicate&&>(pred));
}
template <typename Xs>
constexpr auto sort_t::operator()(Xs&& xs) const {
using S = typename hana::tag_of<Xs>::type;
using Sort = BOOST_HANA_DISPATCH_IF(sort_impl<S>,
hana::Sequence<S>::value
);
#ifndef BOOST_HANA_CONFIG_DISABLE_CONCEPT_CHECKS
static_assert(hana::Sequence<S>::value,
"hana::sort(xs) requires 'xs' to be a Sequence");
#endif
return Sort::apply(static_cast<Xs&&>(xs));
}
//! @endcond
namespace detail {
template <typename Xs, typename Pred>
struct sort_predicate {
template <std::size_t I, std::size_t J>
using apply = decltype(std::declval<Pred>()(
hana::at_c<I>(std::declval<Xs>()),
hana::at_c<J>(std::declval<Xs>())
));
};
template <typename Pred, std::size_t Insert, bool IsInsertionPoint,
typename Left,
std::size_t ...Right>
struct insert;
// We did not find the insertion point; continue processing elements
// recursively.
template <
typename Pred, std::size_t Insert,
std::size_t ...Left,
std::size_t Right1, std::size_t Right2, std::size_t ...Right
>
struct insert<Pred, Insert, false,
std::index_sequence<Left...>,
Right1, Right2, Right...
> {
using type = typename insert<
Pred, Insert, (bool)Pred::template apply<Insert, Right2>::value,
std::index_sequence<Left..., Right1>,
Right2, Right...
>::type;
};
// We did not find the insertion point, but there is only one element
// left. We insert at the end of the list, and we're done.
template <typename Pred, std::size_t Insert, std::size_t ...Left, std::size_t Last>
struct insert<Pred, Insert, false, std::index_sequence<Left...>, Last> {
using type = std::index_sequence<Left..., Last, Insert>;
};
// We found the insertion point, we're done.
template <typename Pred, std::size_t Insert, std::size_t ...Left, std::size_t ...Right>
struct insert<Pred, Insert, true, std::index_sequence<Left...>, Right...> {
using type = std::index_sequence<Left..., Insert, Right...>;
};
template <typename Pred, typename Result, std::size_t ...T>
struct insertion_sort_impl;
template <typename Pred,
std::size_t Result1, std::size_t ...Result,
std::size_t T, std::size_t ...Ts>
struct insertion_sort_impl<Pred, std::index_sequence<Result1, Result...>, T, Ts...> {
using type = typename insertion_sort_impl<
Pred,
typename insert<
Pred, T, (bool)Pred::template apply<T, Result1>::value,
std::index_sequence<>,
Result1, Result...
>::type,
Ts...
>::type;
};
template <typename Pred, std::size_t T, std::size_t ...Ts>
struct insertion_sort_impl<Pred, std::index_sequence<>, T, Ts...> {
using type = typename insertion_sort_impl<
Pred, std::index_sequence<T>, Ts...
>::type;
};
template <typename Pred, typename Result>
struct insertion_sort_impl<Pred, Result> {
using type = Result;
};
template <typename Pred, typename Indices>
struct sort_helper;
template <typename Pred, std::size_t ...i>
struct sort_helper<Pred, std::index_sequence<i...>> {
using type = typename insertion_sort_impl<
Pred, std::index_sequence<>, i...
>::type;
};
} // end namespace detail
template <typename S, bool condition>
struct sort_impl<S, when<condition>> : default_ {
template <typename Xs, std::size_t ...i>
static constexpr auto apply_impl(Xs&& xs, std::index_sequence<i...>) {
return hana::make<S>(hana::at_c<i>(static_cast<Xs&&>(xs))...);
}
template <typename Xs, typename Pred>
static constexpr auto apply(Xs&& xs, Pred const&) {
constexpr std::size_t Len = decltype(hana::length(xs))::value;
using Indices = typename detail::sort_helper<
detail::sort_predicate<Xs&&, Pred>,
std::make_index_sequence<Len>
>::type;
return apply_impl(static_cast<Xs&&>(xs), Indices{});
}
template <typename Xs>
static constexpr auto apply(Xs&& xs)
{ return sort_impl::apply(static_cast<Xs&&>(xs), hana::less); }
};
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_SORT_HPP
|
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__57.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash_nodata_cub Protocol Case Study*}
theory n_flash_nodata_cub_lemma_on_inv__57 imports n_flash_nodata_cub_base
begin
section{*All lemmas on causal relation between inv__57 and some rule r*}
lemma n_PI_Remote_GetVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Pending'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__57:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Nak_HomeVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__57:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_Put_HomeVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__57:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__57:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__57:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__57:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__57:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_Nak_HomeVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__57:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''HomeUniMsg'') ''Cmd'')) (Const UNI_GetX)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get))) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__57:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_Get_GetVsinv__57:
assumes a1: "(r=n_PI_Local_Get_Get )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__0Vsinv__57:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__0 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_GetX__part__1Vsinv__57:
assumes a1: "(r=n_PI_Local_GetX_GetX__part__1 )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Nak_HomeVsinv__57:
assumes a1: "(r=n_NI_Nak_Home )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutVsinv__57:
assumes a1: "(r=n_NI_Local_Put )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Local_PutXAcksDoneVsinv__57:
assumes a1: "(r=n_NI_Local_PutXAcksDone )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__57 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX__part__0Vsinv__57:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__57:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__57:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__57:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__57:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__57:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__57:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__57:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__57:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__57:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__57:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ShWbVsinv__57:
assumes a1: "r=n_NI_ShWb N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__57:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__57:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__57:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__57:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__57:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__57:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__57:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__57:
assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__57:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__57 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
lemma sgn_scaleR: "sgn (scaleR r x) = scaleR (sgn r) (sgn x)" for x :: "'a::real_normed_vector" |
% $Id$ %
\subsection{Rockblox}
\screenshot{plugins/images/ss-rockblox}{Rockblox}{fig:rockblox}
Rockblox is a Rockbox version of the classic falling blocks game from Russia.
The aim of the game is to make the falling blocks of different shapes
form full rows. Whenever a row is completed, it will be cleared away, and you
gain points. For every ten lines completed, the game level increases, making
the blocks fall faster. If the pile of blocks reaches the ceiling, the game is over.
\begin{btnmap}
\nopt{SANSA_FUZEPLUS_PAD,SAMSUNG_YH92X_PAD}{
\opt{RECORDER_PAD}{\ButtonFOne}
\opt{PLAYER_PAD}{\ButtonStop+\ButtonMenu}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD}{\ButtonOn}
\opt{IPOD_4G_PAD,IPOD_3G_PAD}{\ButtonSelect+\ButtonPlay}
\opt{IAUDIO_X5_PAD,IRIVER_H10_PAD,GIGABEAT_S_PAD}{\ButtonPlay}
\opt{SANSA_E200_PAD,SANSA_C200_PAD}{\ButtonRec}
\opt{SANSA_CLIP_PAD}{\ButtonHome}
\opt{SANSA_FUZE_PAD}{\ButtonSelect+\ButtonUp}
\opt{GIGABEAT_PAD}{\ButtonA}
\opt{MROBE100_PAD}{\ButtonDisplay}
\opt{ONDIO_PAD}{\ButtonMenu+\ButtonOff}
\opt{COWON_D2_PAD}{\ButtonMenu}
\opt{PBELL_VIBE500_PAD}{\ButtonCancel}
\opt{MPIO_HD300_PAD}{\ButtonRec}
\opt{SAMSUNG_YH820_PAD}{\ButtonRec}
\opt{HAVEREMOTEKEYMAP}{& }
& Restart game\\
}
\opt{PLAYER_PAD,RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD%
,IAUDIO_X5_PAD,SANSA_E200_PAD,SANSA_FUZE_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD%
,GIGABEAT_PAD,GIGABEAT_S_PAD,MROBE100_PAD,IPOD_4G_PAD,IPOD_3G_PAD%
,IRIVER_H10_PAD,PBELL_VIBE500_PAD,SANSA_FUZEPLUS_PAD,SAMSUNG_YH92X_PAD%
,SAMSUNG_YH820_PAD}
{\ButtonLeft}
\opt{COWON_D2_PAD}{\TouchMidLeft}
\opt{MPIO_HD300_PAD}{\ButtonRew}
\opt{HAVEREMOTEKEYMAP}{& }
& Move left\\
\opt{PLAYER_PAD,RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD%
,IAUDIO_X5_PAD,SANSA_E200_PAD,SANSA_FUZE_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD%
,GIGABEAT_PAD,GIGABEAT_S_PAD,MROBE100_PAD,IPOD_4G_PAD,IPOD_3G_PAD%
,IRIVER_H10_PAD,PBELL_VIBE500_PAD,SANSA_FUZEPLUS_PAD,SAMSUNG_YH92X_PAD%
,SAMSUNG_YH820_PAD}
{\ButtonRight}
\opt{COWON_D2_PAD}{\TouchMidRight}
\opt{MPIO_HD300_PAD}{\ButtonFF}
\opt{HAVEREMOTEKEYMAP}{& }
& Move right\\
\opt{PLAYER_PAD}{\ButtonMenu}
\opt{RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD,IAUDIO_X5_PAD%
,SANSA_E200_PAD,SANSA_FUZE_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,GIGABEAT_PAD%
,GIGABEAT_S_PAD,MROBE100_PAD,PBELL_VIBE500_PAD}
{\ButtonDown}
\opt{SANSA_FUZEPLUS_PAD}{\ButtonSelect}
\opt{IPOD_4G_PAD,IPOD_3G_PAD}{\ButtonPlay}
\opt{IRIVER_H10_PAD}{\ButtonScrollDown}
\opt{COWON_D2_PAD}{\TouchBottomMiddle}
\opt{MPIO_HD300_PAD}{\ButtonEnter}
\opt{SAMSUNG_YH92X_PAD}{\ButtonRew}
\opt{SAMSUNG_YH820_PAD}{\ButtonFF}
\opt{HAVEREMOTEKEYMAP}{& }
& Move down\\
\opt{PLAYER_PAD}{\ButtonOn+\ButtonPlay}
\opt{RECORDER_PAD}{\ButtonPlay}
\opt{ONDIO_PAD}{\ButtonMenu+\ButtonUp}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD,IAUDIO_X5_PAD}{\ButtonSelect}
\opt{scrollwheel}{\ButtonScrollBack}
\opt{IAUDIO_X5_PAD}{\ButtonPower}
\opt{IRIVER_H10_PAD}{\ButtonRew}
\opt{SANSA_C200_PAD,SANSA_CLIP_PAD}{\ButtonVolDown}
\opt{GIGABEAT_PAD,GIGABEAT_S_PAD}{\ButtonVolUp}
\opt{SANSA_FUZEPLUS_PAD}{\ButtonVolDown{}; \ButtonBottomLeft}
\opt{MROBE100_PAD,PBELL_VIBE500_PAD}{\ButtonMenu}
\opt{COWON_D2_PAD}{\TouchBottomLeft}
\opt{MPIO_HD300_PAD}{\ButtonScrollUp}
\opt{SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{\ButtonUp}
\opt{HAVEREMOTEKEYMAP}{& }
& Rotate anticlockwise\\
\opt{PLAYER_PAD}{\ButtonPlay}
\opt{RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD,IAUDIO_X5_PAD}
{\ButtonUp}
\opt{IPOD_4G_PAD,IPOD_3G_PAD}{\ButtonScrollFwd{} / \ButtonMenu}
\opt{SANSA_E200_PAD,SANSA_FUZE_PAD}{\ButtonScrollFwd}
\opt{IRIVER_H10_PAD}{\ButtonScrollUp}
\opt{SANSA_C200_PAD,SANSA_CLIP_PAD}{\ButtonVolUp/\ButtonUp}
\opt{GIGABEAT_PAD,GIGABEAT_S_PAD}{\ButtonVolDown}
\opt{MROBE100_PAD,PBELL_VIBE500_PAD}{\ButtonPlay}
\opt{COWON_D2_PAD}{\TouchBottomRight{} / \TouchTopMiddle }
\opt{MPIO_HD300_PAD}{\ButtonScrollDown}
\opt{SANSA_FUZEPLUS_PAD}{\ButtonVolUp{}; \ButtonBottomRight}
\opt{SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{\ButtonDown}
\opt{HAVEREMOTEKEYMAP}{& }
& Rotate clockwise\\
\opt{PLAYER_PAD,RECORDER_PAD}{\ButtonOn}
\opt{ONDIO_PAD}{\ButtonMenu}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD}{\ButtonMode}
\opt{IPOD_4G_PAD,IPOD_3G_PAD,SANSA_E200_PAD,SANSA_FUZE_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD%
,GIGABEAT_PAD,GIGABEAT_S_PAD,MROBE100_PAD}{\ButtonSelect}
\opt{IAUDIO_X5_PAD}{\ButtonRec}
\opt{IRIVER_H10_PAD}{\ButtonFF}
\opt{COWON_D2_PAD}{\TouchCenter}
\opt{PBELL_VIBE500_PAD}{\ButtonOK}
\opt{MPIO_HD300_PAD,SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{\ButtonPlay}
\opt{SANSA_FUZEPLUS_PAD}{\ButtonDown}
\opt{HAVEREMOTEKEYMAP}{& }
& Drop\\
\opt{hold_button}{
\ButtonHold{} switch
\opt{HAVEREMOTEKEYMAP}{& }
& Pause\\
}
\opt{PLAYER_PAD}{\ButtonStop}
\opt{RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD}{\ButtonOff}
\opt{IPOD_4G_PAD,IPOD_3G_PAD}{\ButtonMenu+\ButtonSelect}
\opt{IAUDIO_X5_PAD,IRIVER_H10_PAD,SANSA_E200_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,GIGABEAT_PAD,MROBE100_PAD}{\ButtonPower}
\opt{SANSA_FUZE_PAD}{Long \ButtonHome}
\opt{GIGABEAT_S_PAD}{\ButtonBack}
\opt{COWON_D2_PAD,SANSA_FUZEPLUS_PAD}{\ButtonPower}
\opt{PBELL_VIBE500_PAD}{\ButtonRec}
\opt{SAMSUNG_YH92X_PAD}{\ButtonFF}
\opt{SAMSUNG_YH820_PAD}{\ButtonRew}
\opt{MPIO_HD300_PAD}{Long \ButtonMenu}
\opt{HAVEREMOTEKEYMAP}{&
\opt{IRIVER_RC_H100_PAD}{\ButtonRCStop}
}
& Quit\\
\end{btnmap}
|
#ifndef BOOST_LEAF_HPP_INCLUDED
#define BOOST_LEAF_HPP_INCLUDED
// Copyright (c) 2018-2020 Emil Dotchevski and Reverge Studios, Inc.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/leaf/capture.hpp>
#include <boost/leaf/common.hpp>
#include <boost/leaf/context.hpp>
#include <boost/leaf/error.hpp>
#include <boost/leaf/exception.hpp>
#include <boost/leaf/handle_errors.hpp>
#include <boost/leaf/on_error.hpp>
#include <boost/leaf/pred.hpp>
#include <boost/leaf/result.hpp>
#endif
|
-- Andreas, 2016-08-08, issue #2132 reported by effectfully
-- Pattern synonyms in lhss of display form definitions
-- {-# OPTIONS -v scope:50 -v tc.decl:10 #-}
open import Common.Equality
data D : Set where
C c : D
g : D → D
pattern C′ = C
{-# DISPLAY C′ = C′ #-}
{-# DISPLAY g C′ = c #-}
-- Since pattern synonyms are now expanded on lhs of DISPLAY,
-- this behaves as
-- {-# DISPLAY C = C′ #-}
-- {-# DISPLAY g C = c #-}
test : C ≡ g C
test = refl
-- Expected error:
-- C′ != c of type D
-- when checking that the expression refl has type C′ ≡ c
|
section \<open>During-execution security\<close>
theory During_Execution
imports Bisim Language_Semantics begin
subsection \<open>Basic setting\<close>
locale PL_Indis = PL tval aval
for
tval :: "'test \<Rightarrow> 'state \<Rightarrow> bool" and
aval :: "'atom \<Rightarrow> 'state \<Rightarrow> 'state"
+
fixes
indis :: "'state rel"
assumes
equiv_indis: "equiv UNIV indis"
(*******************************************)
context PL_Indis
begin
abbreviation indisAbbrev (infix "\<approx>" 50)
where "s1 \<approx> s2 \<equiv> (s1,s2) \<in> indis"
definition indisE (infix "\<approx>e" 50) where
"se1 \<approx>e se2 \<equiv>
case (se1,se2) of
(Inl s1, Inl s2) \<Rightarrow> s1 \<approx> s2
|(Inr err1, Inr err2) \<Rightarrow> err1 = err2"
lemma refl_indis: "refl indis"
and trans_indis: "trans indis"
and sym_indis: "sym indis"
using equiv_indis unfolding equiv_def by auto
lemma indis_refl[intro]: "s \<approx> s"
using refl_indis unfolding refl_on_def by simp
lemma indis_trans: "\<lbrakk>s \<approx> s'; s' \<approx> s''\<rbrakk> \<Longrightarrow> s \<approx> s''"
using trans_indis unfolding trans_def by blast
lemma indis_sym: "s \<approx> s' \<Longrightarrow> s' \<approx> s"
using sym_indis unfolding sym_def by blast
subsection\<open>Compatibility and discreetness\<close>
definition compatTst where
"compatTst tst \<equiv>
\<forall> s t. s \<approx> t \<longrightarrow> tval tst s = tval tst t"
definition compatAtm where
"compatAtm atm \<equiv>
\<forall> s t. s \<approx> t \<longrightarrow> aval atm s \<approx> aval atm t"
(* \<approx>-preservation: *)
definition presAtm where
"presAtm atm \<equiv>
\<forall> s. s \<approx> aval atm s"
coinductive discr where
intro:
"\<lbrakk>\<And> s c' s'. (c,s) \<rightarrow>c (c',s') \<Longrightarrow> s \<approx> s' \<and> discr c';
\<And> s s'. (c,s) \<rightarrow>t s' \<Longrightarrow> s \<approx> s'\<rbrakk>
\<Longrightarrow> discr c"
lemma presAtm_compatAtm[simp]:
assumes "presAtm atm"
shows "compatAtm atm"
using assms unfolding compatAtm_def
by (metis presAtm_def indis_sym indis_trans)
text\<open>Coinduction for discreetness:\<close>
lemma discr_coind:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>phi c; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> s \<approx> s' \<and> (phi c' \<or> discr c')" and
***: "\<And> c s s'. \<lbrakk>phi c; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> s \<approx> s'"
shows "discr c"
using * apply - apply(erule discr.coinduct) using ** *** by auto
lemma discr_raw_coind:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>phi c; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> s \<approx> s' \<and> phi c'" and
***: "\<And> c s s'. \<lbrakk>phi c; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> s \<approx> s'"
shows "discr c"
using * apply - apply(erule discr_coind) using ** *** by blast+
text\<open>Discreetness versus transition:\<close>
lemma discr_transC:
assumes *: "discr c" and **: "(c,s) \<rightarrow>c (c',s')"
shows "discr c'"
using * apply - apply(erule discr.cases) using ** by blast
lemma discr_MtransC:
assumes "discr c" and "(c,s) \<rightarrow>*c (c',s')"
shows "discr c'"
proof-
have "(c,s) \<rightarrow>*c (c',s') \<Longrightarrow> discr c \<longrightarrow> discr c'"
apply(erule MtransC_induct2) using discr_transC by blast+
thus ?thesis using assms by blast
qed
lemma discr_transC_indis:
assumes *: "discr c" and **: "(c,s) \<rightarrow>c (c',s')"
shows "s \<approx> s'"
using * apply - apply(erule discr.cases) using ** by blast
lemma discr_MtransC_indis:
assumes "discr c" and "(c,s) \<rightarrow>*c (c',s')"
shows "s \<approx> s'"
proof-
have "(c,s) \<rightarrow>*c (c',s') \<Longrightarrow> discr c \<longrightarrow> s \<approx> s'"
apply(erule MtransC_induct2)
apply (metis indis_refl)
by (metis discr.cases discr_MtransC indis_trans)
thus ?thesis using assms by blast
qed
lemma discr_transT:
assumes *: "discr c" and **: "(c,s) \<rightarrow>t s'"
shows "s \<approx> s'"
using * apply - apply(erule discr.cases) using ** by blast
lemma discr_MtransT:
assumes *: "discr c" and **: "(c,s) \<rightarrow>*t s'"
shows "s \<approx> s'"
proof-
obtain d' t' where
cs: "(c,s) \<rightarrow>*c (d',t')" and d't': "(d',t') \<rightarrow>t s'"
using ** by(rule MtransT_invert2)
hence "s \<approx> t'" using * discr_MtransC_indis by blast
moreover
{have "discr d'" using cs * discr_MtransC by blast
hence "t' \<approx> s'" using d't' discr_transT by blast
}
ultimately show ?thesis using indis_trans by blast
qed
subsection\<open>Terminating-interctive discreetness\<close>
coinductive discr0 where
intro:
"\<lbrakk>\<And> s c' s'. \<lbrakk>mustT c s; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> s \<approx> s' \<and> discr0 c';
\<And> s s'. \<lbrakk>mustT c s; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> s \<approx> s'\<rbrakk>
\<Longrightarrow> discr0 c"
text\<open>Coinduction for 0-discreetness:\<close>
lemma discr0_coind[consumes 1, case_names Cont Term, induct pred: discr0]:
assumes *: "phi c" and
**: "\<And> c s c' s'.
\<lbrakk>mustT c s; phi c; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow>
s \<approx> s' \<and> (phi c' \<or> discr0 c')" and
***: "\<And> c s s'. \<lbrakk>mustT c s; phi c; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> s \<approx> s'"
shows "discr0 c"
using * apply - apply(erule discr0.coinduct) using ** *** by auto
lemma discr0_raw_coind[consumes 1, case_names Cont Term]:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>mustT c s; phi c; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> s \<approx> s' \<and> phi c'" and
***: "\<And> c s s'. \<lbrakk>mustT c s; phi c; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> s \<approx> s'"
shows "discr0 c"
using * apply - apply(erule discr0_coind) using ** *** by blast+
text\<open>0-Discreetness versus transition:\<close>
lemma discr0_transC:
assumes *: "discr0 c" and **: "mustT c s" "(c,s) \<rightarrow>c (c',s')"
shows "discr0 c'"
using * apply - apply(erule discr0.cases) using ** by blast
lemma discr0_MtransC:
assumes "discr0 c" and "mustT c s" "(c,s) \<rightarrow>*c (c',s')"
shows "discr0 c'"
proof-
have "(c,s) \<rightarrow>*c (c',s') \<Longrightarrow> mustT c s \<and> discr0 c \<longrightarrow> discr0 c'"
apply(erule MtransC_induct2) using discr0_transC mustT_MtransC
by blast+
thus ?thesis using assms by blast
qed
lemma discr0_transC_indis:
assumes *: "discr0 c" and **: "mustT c s" "(c,s) \<rightarrow>c (c',s')"
shows "s \<approx> s'"
using * apply - apply(erule discr0.cases) using ** by blast
lemma discr0_MtransC_indis:
assumes "discr0 c" and "mustT c s" "(c,s) \<rightarrow>*c (c',s')"
shows "s \<approx> s'"
proof-
have "(c,s) \<rightarrow>*c (c',s') \<Longrightarrow> mustT c s \<and> discr0 c \<longrightarrow> s \<approx> s'"
apply(erule MtransC_induct2)
apply (metis indis_refl)
by (metis discr0_MtransC discr0_transC_indis indis_trans mustT_MtransC)
thus ?thesis using assms by blast
qed
lemma discr0_transT:
assumes *: "discr0 c" and **: "mustT c s" "(c,s) \<rightarrow>t s'"
shows "s \<approx> s'"
using * apply - apply(erule discr0.cases) using ** by blast
lemma discr0_MtransT:
assumes *: "discr0 c" and ***: "mustT c s" and **: "(c,s) \<rightarrow>*t s'"
shows "s \<approx> s'"
proof-
obtain d' t' where
cs: "(c,s) \<rightarrow>*c (d',t')" and d't': "(d',t') \<rightarrow>t s'"
using ** by(rule MtransT_invert2)
hence "s \<approx> t'" using * discr0_MtransC_indis *** by blast
moreover
{have "discr0 d'" using cs * discr0_MtransC *** by blast
hence "t' \<approx> s'"
using *** by (metis mustT_MtransC cs d't' discr0_transT)
}
ultimately show ?thesis using indis_trans by blast
qed
lemma discr_discr0[simp]: "discr c \<Longrightarrow> discr0 c"
by (induct rule: discr0_coind)
(metis discr_transC discr_transC_indis discr_transT)+
subsection\<open>Self-isomorphism\<close>
coinductive siso where
intro:
"\<lbrakk>\<And> s c' s'. (c,s) \<rightarrow>c (c',s') \<Longrightarrow> siso c';
\<And> s t c' s'. \<lbrakk>s \<approx> t; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> \<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t');
\<And> s t s'. \<lbrakk>s \<approx> t; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> \<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'\<rbrakk>
\<Longrightarrow> siso c"
text\<open>Coinduction for self-isomorphism:\<close>
lemma siso_coind:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>phi c; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> phi c' \<or> siso c'" and
***: "\<And> c s t c' s'. \<lbrakk>phi c; s \<approx> t; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> \<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t')" and
****: "\<And> c s t s'. \<lbrakk>phi c; s \<approx> t; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> \<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'"
shows "siso c"
using * apply - apply(erule siso.coinduct) using ** *** **** by auto
lemma siso_raw_coind:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>phi c; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> phi c'" and
***: "\<And> c s t c' s'. \<lbrakk>phi c; s \<approx> t; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> \<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t')" and
****: "\<And> c s t s'. \<lbrakk>phi c; s \<approx> t; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow> \<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'"
shows "siso c"
using * apply - apply(erule siso_coind) using ** *** **** by blast+
text\<open>Self-Isomorphism versus transition:\<close>
lemma siso_transC:
assumes *: "siso c" and **: "(c,s) \<rightarrow>c (c',s')"
shows "siso c'"
using * apply - apply(erule siso.cases) using ** by blast
lemma siso_MtransC:
assumes "siso c" and "(c,s) \<rightarrow>*c (c',s')"
shows "siso c'"
proof-
have "(c,s) \<rightarrow>*c (c',s') \<Longrightarrow> siso c \<longrightarrow> siso c'"
apply(erule MtransC_induct2) using siso_transC by blast+
thus ?thesis using assms by blast
qed
lemma siso_transC_indis:
assumes *: "siso c" and **: "(c,s) \<rightarrow>c (c',s')" and ***: "s \<approx> t"
shows "\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t')"
using * apply - apply(erule siso.cases) using ** *** by blast
lemma siso_transT:
assumes *: "siso c" and **: "(c,s) \<rightarrow>t s'" and ***: "s \<approx> t"
shows "\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'"
using * apply - apply(erule siso.cases) using ** *** by blast
subsection\<open>MustT-interactive self-isomorphism\<close>
coinductive siso0 where
intro:
"\<lbrakk>\<And> s c' s'. \<lbrakk>mustT c s; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> siso0 c';
\<And> s t c' s'.
\<lbrakk>mustT c s; mustT c t; s \<approx> t; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow>
\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t');
\<And> s t s'.
\<lbrakk>mustT c s; mustT c t; s \<approx> t; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow>
\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'\<rbrakk>
\<Longrightarrow> siso0 c"
text\<open>Coinduction for self-isomorphism:\<close>
lemma siso0_coind[consumes 1, case_names Indef Cont Term, induct pred: discr0]:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>phi c; mustT c s; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> phi c' \<or> siso0 c'" and
***: "\<And> c s t c' s'.
\<lbrakk>phi c; mustT c s; mustT c t; s \<approx> t; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow>
\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t')" and
****: "\<And> c s t s'.
\<lbrakk>mustT c s; mustT c t; phi c; s \<approx> t; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow>
\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'"
shows "siso0 c"
using * apply - apply(erule siso0.coinduct) using ** *** **** by auto
lemma siso0_raw_coind[consumes 1, case_names Indef Cont Term]:
assumes *: "phi c" and
**: "\<And> c s c' s'. \<lbrakk>phi c; mustT c s; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow> phi c'" and
***: "\<And> c s t c' s'.
\<lbrakk>phi c; mustT c s; mustT c t; s \<approx> t; (c,s) \<rightarrow>c (c',s')\<rbrakk> \<Longrightarrow>
\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t')" and
****: "\<And> c s t s'.
\<lbrakk>phi c; mustT c s; mustT c t; s \<approx> t; (c,s) \<rightarrow>t s'\<rbrakk> \<Longrightarrow>
\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'"
shows "siso0 c"
using * apply - apply(erule siso0_coind) using ** *** **** by blast+
text\<open>Self-Isomorphism versus transition:\<close>
lemma siso0_transC:
assumes *: "siso0 c" and **: "mustT c s" "(c,s) \<rightarrow>c (c',s')"
shows "siso0 c'"
using * apply - apply(erule siso0.cases) using ** by blast
lemma siso0_MtransC:
assumes "siso0 c" and "mustT c s" and "(c,s) \<rightarrow>*c (c',s')"
shows "siso0 c'"
proof-
have "(c,s) \<rightarrow>*c (c',s') \<Longrightarrow> mustT c s \<and> siso0 c \<longrightarrow> siso0 c'"
apply(erule MtransC_induct2) using siso0_transC mustT_MtransC siso0_transC
by blast+
thus ?thesis using assms by blast
qed
lemma siso0_transC_indis:
assumes *: "siso0 c"
and **: "mustT c s" "mustT c t" "(c,s) \<rightarrow>c (c',s')"
and ***: "s \<approx> t"
shows "\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>c (c',t')"
using * apply - apply(erule siso0.cases) using ** *** by blast
lemma siso0_transT:
assumes *: "siso0 c"
and **: "mustT c s" "mustT c t" "(c,s) \<rightarrow>t s'"
and ***: "s \<approx> t"
shows "\<exists> t'. s' \<approx> t' \<and> (c,t) \<rightarrow>t t'"
using * apply - apply(erule siso0.cases) using ** *** by blast
subsection\<open>Notions of bisimilarity\<close>
text\<open>Matchers:\<close>
(* Notations and conventions:
\\- ``<u>_<v>" means: ``match u by v", where u, v can be:
C (one continuation step), MC (multiple continuation steps),
ZOC (zero or one continuation steps),
T (termination step), MT (multiple steps leading to termination). *)
definition matchC_C where
"matchC_C theta c d \<equiv>
\<forall> s t c' s'.
s \<approx> t \<and> (c,s) \<rightarrow>c (c',s')
\<longrightarrow>
(\<exists> d' t'. (d,t) \<rightarrow>c (d',t') \<and> s' \<approx> t' \<and> (c',d') \<in> theta)"
definition matchC_ZOC where
"matchC_ZOC theta c d \<equiv>
\<forall> s t c' s'.
s \<approx> t \<and> (c,s) \<rightarrow>c (c',s')
\<longrightarrow>
(s' \<approx> t \<and> (c',d) \<in> theta)
\<or>
(\<exists> d' t'. (d,t) \<rightarrow>c (d',t') \<and> s' \<approx> t' \<and> (c',d') \<in> theta)"
definition matchC_ZO where
"matchC_ZO theta c d \<equiv>
\<forall> s t c' s'.
s \<approx> t \<and> (c,s) \<rightarrow>c (c',s')
\<longrightarrow>
(s' \<approx> t \<and> (c',d) \<in> theta)
\<or>
(\<exists> d' t'. (d,t) \<rightarrow>c (d',t') \<and> s' \<approx> t' \<and> (c',d') \<in> theta)
\<or>
(\<exists> t'. (d,t) \<rightarrow>t t' \<and> s' \<approx> t' \<and> discr c')"
definition matchT_T where
"matchT_T c d \<equiv>
\<forall> s t s'.
s \<approx> t \<and> (c,s) \<rightarrow>t s'
\<longrightarrow>
(\<exists> t'. (d,t) \<rightarrow>t t' \<and> s' \<approx> t')"
definition matchT_ZO where
"matchT_ZO c d \<equiv>
\<forall> s t s'.
s \<approx> t \<and> (c,s) \<rightarrow>t s'
\<longrightarrow>
(s' \<approx> t \<and> discr d)
\<or>
(\<exists> d' t'. (d,t) \<rightarrow>c (d',t') \<and> s' \<approx> t' \<and> discr d')
\<or>
(\<exists> t'. (d,t) \<rightarrow>t t' \<and> s' \<approx> t')"
(* *)
definition matchC_MC where
"matchC_MC theta c d \<equiv>
\<forall> s t c' s'.
s \<approx> t \<and> (c,s) \<rightarrow>c (c',s')
\<longrightarrow>
(\<exists> d' t'. (d,t) \<rightarrow>*c (d',t') \<and> s' \<approx> t' \<and> (c',d') \<in> theta)"
definition matchC_TMC where
"matchC_TMC theta c d \<equiv>
\<forall> s t c' s'.
mustT c s \<and> mustT d t \<and> s \<approx> t \<and> (c,s) \<rightarrow>c (c',s')
\<longrightarrow>
(\<exists> d' t'. (d,t) \<rightarrow>*c (d',t') \<and> s' \<approx> t' \<and> (c',d') \<in> theta)"
definition matchC_M where
"matchC_M theta c d \<equiv>
\<forall> s t c' s'.
s \<approx> t \<and> (c,s) \<rightarrow>c (c',s')
\<longrightarrow>
(\<exists> d' t'. (d,t) \<rightarrow>*c (d',t') \<and> s' \<approx> t' \<and> (c',d') \<in> theta)
\<or>
(\<exists> t'. (d,t) \<rightarrow>*t t' \<and> s' \<approx> t' \<and> discr c')"
definition matchT_MT where
"matchT_MT c d \<equiv>
\<forall> s t s'.
s \<approx> t \<and> (c,s) \<rightarrow>t s'
\<longrightarrow>
(\<exists> t'. (d,t) \<rightarrow>*t t' \<and> s' \<approx> t')"
definition matchT_TMT where
"matchT_TMT c d \<equiv>
\<forall> s t s'.
mustT c s \<and> mustT d t \<and> s \<approx> t \<and> (c,s) \<rightarrow>t s'
\<longrightarrow>
(\<exists> t'. (d,t) \<rightarrow>*t t' \<and> s' \<approx> t')"
definition matchT_M where
"matchT_M c d \<equiv>
\<forall> s t s'.
s \<approx> t \<and> (c,s) \<rightarrow>t s'
\<longrightarrow>
(\<exists> d' t'. (d,t) \<rightarrow>*c (d',t') \<and> s' \<approx> t' \<and> discr d')
\<or>
(\<exists> t'. (d,t) \<rightarrow>*t t' \<and> s' \<approx> t')"
lemmas match_defs =
matchC_C_def
matchC_ZOC_def matchC_ZO_def
matchT_T_def matchT_ZO_def
matchC_MC_def matchC_M_def
matchT_MT_def matchT_M_def
matchC_TMC_def matchT_TMT_def
(* For convenience, indis-symmetric variations of the above definitions: *)
lemma matchC_C_def2:
"matchC_C theta d c =
(\<forall> s t d' t'.
s \<approx> t \<and> (d,t) \<rightarrow>c (d',t')
\<longrightarrow>
(\<exists> c' s'. (c,s) \<rightarrow>c (c',s') \<and> s' \<approx> t' \<and> (d',c') \<in> theta))"
unfolding matchC_C_def using indis_sym by blast
lemma matchC_ZOC_def2:
"matchC_ZOC theta d c=
(\<forall> s t d' t'.
s \<approx> t \<and> (d,t) \<rightarrow>c (d',t')
\<longrightarrow>
(s \<approx> t' \<and> (d',c) \<in> theta)
\<or>
(\<exists> c' s'. (c,s) \<rightarrow>c (c',s') \<and> s' \<approx> t' \<and> (d',c') \<in> theta))"
unfolding matchC_ZOC_def using indis_sym by blast
lemma matchC_ZO_def2:
"matchC_ZO theta d c =
(\<forall> s t d' t'.
s \<approx> t \<and> (d,t) \<rightarrow>c (d',t')
\<longrightarrow>
(s \<approx> t' \<and> (d',c) \<in> theta)
\<or>
(\<exists> c' s'. (c,s) \<rightarrow>c (c',s') \<and> s' \<approx> t' \<and> (d',c') \<in> theta)
\<or>
(\<exists> s'. (c,s) \<rightarrow>t s' \<and> s' \<approx> t' \<and> discr d'))"
unfolding matchC_ZO_def using indis_sym by blast
lemma matchT_T_def2:
"matchT_T d c =
(\<forall> s t t'.
s \<approx> t \<and> (d,t) \<rightarrow>t t'
\<longrightarrow>
(\<exists> s'. (c,s) \<rightarrow>t s' \<and> s' \<approx> t'))"
unfolding matchT_T_def using indis_sym by blast
lemma matchT_ZO_def2:
"matchT_ZO d c =
(\<forall> s t t'.
s \<approx> t \<and> (d,t) \<rightarrow>t t'
\<longrightarrow>
(s \<approx> t' \<and> discr c)
\<or>
(\<exists> c' s'. (c,s) \<rightarrow>c (c',s') \<and> s' \<approx> t' \<and> discr c')
\<or>
(\<exists> s'. (c,s) \<rightarrow>t s' \<and> s' \<approx> t'))"
unfolding matchT_ZO_def using indis_sym by blast
(* *)
lemma matchC_MC_def2:
"matchC_MC theta d c=
(\<forall> s t d' t'.
s \<approx> t \<and> (d,t) \<rightarrow>c (d',t')
\<longrightarrow>
(\<exists> c' s'. (c,s) \<rightarrow>*c (c',s') \<and> s' \<approx> t' \<and> (d',c') \<in> theta))"
unfolding matchC_MC_def using indis_sym by blast
lemma matchC_TMC_def2:
"matchC_TMC theta d c=
(\<forall> s t d' t'.
mustT c s \<and> mustT d t \<and> s \<approx> t \<and> (d,t) \<rightarrow>c (d',t')
\<longrightarrow>
(\<exists> c' s'. (c,s) \<rightarrow>*c (c',s') \<and> s' \<approx> t' \<and> (d',c') \<in> theta))"
unfolding matchC_TMC_def using indis_sym by blast
lemma matchC_M_def2:
"matchC_M theta d c =
(\<forall> s t d' t'.
s \<approx> t \<and> (d,t) \<rightarrow>c (d',t')
\<longrightarrow>
(\<exists> c' s'. (c,s) \<rightarrow>*c (c',s') \<and> s' \<approx> t' \<and> (d',c') \<in> theta)
\<or>
(\<exists> s'. (c,s) \<rightarrow>*t s' \<and> s' \<approx> t' \<and> discr d'))"
unfolding matchC_M_def using indis_sym by blast
lemma matchT_MT_def2:
"matchT_MT d c =
(\<forall> s t t'.
s \<approx> t \<and> (d,t) \<rightarrow>t t'
\<longrightarrow>
(\<exists> s'. (c,s) \<rightarrow>*t s' \<and> s' \<approx> t'))"
unfolding matchT_MT_def using indis_sym by blast
lemma matchT_TMT_def2:
"matchT_TMT d c =
(\<forall> s t t'.
mustT c s \<and> mustT d t \<and> s \<approx> t \<and> (d,t) \<rightarrow>t t'
\<longrightarrow>
(\<exists> s'. (c,s) \<rightarrow>*t s' \<and> s' \<approx> t'))"
unfolding matchT_TMT_def using indis_sym by blast
lemma matchT_M_def2:
"matchT_M d c =
(\<forall> s t t'.
s \<approx> t \<and> (d,t) \<rightarrow>t t'
\<longrightarrow>
(\<exists> c' s'. (c,s) \<rightarrow>*c (c',s') \<and> s' \<approx> t' \<and> discr c')
\<or>
(\<exists> s'. (c,s) \<rightarrow>*t s' \<and> s' \<approx> t'))"
unfolding matchT_M_def using indis_sym by blast
text\<open>Retracts:\<close>
(* Strong retract: *)
definition Sretr where
"Sretr theta \<equiv>
{(c,d).
matchC_C theta c d \<and>
matchT_T c d}"
(* Zero-one retract: *)
definition ZOretr where
"ZOretr theta \<equiv>
{(c,d).
matchC_ZO theta c d \<and>
matchT_ZO c d}"
(* Zero-one termination-sensitive retract: *)
definition ZOretrT where
"ZOretrT theta \<equiv>
{(c,d).
matchC_ZOC theta c d \<and>
matchT_T c d}"
(* Weak retract: *)
definition Wretr where
"Wretr theta \<equiv>
{(c,d).
matchC_M theta c d \<and>
matchT_M c d }"
(* Weak termination-sensitive retract: *)
definition WretrT where
"WretrT theta \<equiv>
{(c,d).
matchC_MC theta c d \<and>
matchT_MT c d}"
(* Weak terminating-interactive termination-sensitive retract: *)
definition RetrT where
"RetrT theta \<equiv>
{(c,d).
matchC_TMC theta c d \<and>
matchT_TMT c d}"
lemmas Retr_defs =
Sretr_def
ZOretr_def ZOretrT_def
Wretr_def WretrT_def
RetrT_def
text\<open>The associated bisimilarity relations:\<close>
definition Sbis where "Sbis \<equiv> bis Sretr"
definition ZObis where "ZObis \<equiv> bis ZOretr"
definition ZObisT where "ZObisT \<equiv> bis ZOretrT"
definition Wbis where "Wbis \<equiv> bis Wretr"
definition WbisT where "WbisT \<equiv> bis WretrT"
definition BisT where "BisT \<equiv> bis RetrT"
lemmas bis_defs =
Sbis_def
ZObis_def ZObisT_def
Wbis_def WbisT_def
BisT_def
abbreviation Sbis_abbrev (infix "\<approx>s" 55) where "c1 \<approx>s c2 \<equiv> (c1,c2) \<in> Sbis"
abbreviation ZObis_abbrev (infix "\<approx>01" 55) where "c1 \<approx>01 c2 \<equiv> (c1,c2) \<in> ZObis"
abbreviation ZObisT_abbrev (infix "\<approx>01T" 55) where "c1 \<approx>01T c2 \<equiv> (c1,c2) \<in> ZObisT"
abbreviation Wbis_abbrev (infix "\<approx>w" 55) where "c1 \<approx>w c2 \<equiv> (c1,c2) \<in> Wbis"
abbreviation WbisT_abbrev (infix "\<approx>wT" 55) where "c1 \<approx>wT c2 \<equiv> (c1,c2) \<in> WbisT"
abbreviation BisT_abbrev (infix "\<approx>T" 55) where "c1 \<approx>T c2 \<equiv> (c1,c2) \<in> BisT"
lemma mono_Retr:
"mono Sretr"
"mono ZOretr" "mono ZOretrT"
"mono Wretr" "mono WretrT"
"mono RetrT"
unfolding mono_def Retr_defs match_defs by blast+
(* Sbis: *)
lemma Sbis_prefix:
"Sbis \<subseteq> Sretr Sbis"
unfolding Sbis_def using mono_Retr bis_prefix by blast
lemma Sbis_sym: "sym Sbis"
unfolding Sbis_def using mono_Retr sym_bis by blast
lemma Sbis_Sym: "c \<approx>s d \<Longrightarrow> d \<approx>s c"
using Sbis_sym unfolding sym_def by blast
lemma Sbis_converse:
"((c,d) \<in> theta^-1 \<union> Sbis) = ((d,c) \<in> theta \<union> Sbis)"
by (metis Sbis_sym converseI converse_Un converse_converse sym_conv_converse_eq)
lemma
Sbis_matchC_C: "\<And> s t. c \<approx>s d \<Longrightarrow> matchC_C Sbis c d"
and
Sbis_matchT_T: "\<And> c d. c \<approx>s d \<Longrightarrow> matchT_T c d"
using Sbis_prefix unfolding Sretr_def by auto
lemmas Sbis_step = Sbis_matchC_C Sbis_matchT_T
lemma
Sbis_matchC_C_rev: "\<And> s t. s \<approx>s t \<Longrightarrow> matchC_C Sbis t s"
and
Sbis_matchT_T_rev: "\<And> s t. s \<approx>s t \<Longrightarrow> matchT_T t s"
using Sbis_step Sbis_sym unfolding sym_def by blast+
lemmas Sbis_step_rev = Sbis_matchC_C_rev Sbis_matchT_T_rev
lemma Sbis_coind:
assumes "sym theta" and "theta \<subseteq> Sretr (theta \<union> Sbis)"
shows "theta \<subseteq> Sbis"
using assms mono_Retr bis_coind
unfolding Sbis_def by blast
lemma Sbis_raw_coind:
assumes "sym theta" and "theta \<subseteq> Sretr theta"
shows "theta \<subseteq> Sbis"
using assms mono_Retr bis_raw_coind
unfolding Sbis_def by blast
lemma Sbis_coind2:
assumes "theta \<subseteq> Sretr (theta \<union> Sbis)" and
"theta ^-1 \<subseteq> Sretr ((theta ^-1) \<union> Sbis)"
shows "theta \<subseteq> Sbis"
using assms mono_Retr bis_coind2
unfolding Sbis_def by blast
lemma Sbis_raw_coind2:
assumes "theta \<subseteq> Sretr theta" and
"theta ^-1 \<subseteq> Sretr (theta ^-1)"
shows "theta \<subseteq> Sbis"
using assms mono_Retr bis_raw_coind2
unfolding Sbis_def by blast
(* ZObis: *)
lemma ZObis_prefix:
"ZObis \<subseteq> ZOretr ZObis"
unfolding ZObis_def using mono_Retr bis_prefix by blast
lemma ZObis_sym: "sym ZObis"
unfolding ZObis_def using mono_Retr sym_bis by blast
lemma ZObis_converse:
"((c,d) \<in> theta^-1 \<union> ZObis) = ((d,c) \<in> theta \<union> ZObis)"
by (metis ZObis_sym converseI converse_Un converse_converse sym_conv_converse_eq)
lemma ZObis_Sym: "s \<approx>01 t \<Longrightarrow> t \<approx>01 s"
using ZObis_sym unfolding sym_def by blast
lemma
ZObis_matchC_ZO: "\<And> s t. s \<approx>01 t \<Longrightarrow> matchC_ZO ZObis s t"
and
ZObis_matchT_ZO: "\<And> s t. s \<approx>01 t \<Longrightarrow> matchT_ZO s t"
using ZObis_prefix unfolding ZOretr_def by auto
lemmas ZObis_step = ZObis_matchC_ZO ZObis_matchT_ZO
lemma
ZObis_matchC_ZO_rev: "\<And> s t. s \<approx>01 t \<Longrightarrow> matchC_ZO ZObis t s"
and
ZObis_matchT_ZO_rev: "\<And> s t. s \<approx>01 t \<Longrightarrow> matchT_ZO t s"
using ZObis_step ZObis_sym unfolding sym_def by blast+
lemmas ZObis_step_rev = ZObis_matchC_ZO_rev ZObis_matchT_ZO_rev
lemma ZObis_coind:
assumes "sym theta" and "theta \<subseteq> ZOretr (theta \<union> ZObis)"
shows "theta \<subseteq> ZObis"
using assms mono_Retr bis_coind
unfolding ZObis_def by blast
lemma ZObis_raw_coind:
assumes "sym theta" and "theta \<subseteq> ZOretr theta"
shows "theta \<subseteq> ZObis"
using assms mono_Retr bis_raw_coind
unfolding ZObis_def by blast
lemma ZObis_coind2:
assumes "theta \<subseteq> ZOretr (theta \<union> ZObis)" and
"theta ^-1 \<subseteq> ZOretr ((theta ^-1) \<union> ZObis)"
shows "theta \<subseteq> ZObis"
using assms mono_Retr bis_coind2
unfolding ZObis_def by blast
lemma ZObis_raw_coind2:
assumes "theta \<subseteq> ZOretr theta" and
"theta ^-1 \<subseteq> ZOretr (theta ^-1)"
shows "theta \<subseteq> ZObis"
using assms mono_Retr bis_raw_coind2
unfolding ZObis_def by blast
(* ZObisT: *)
lemma ZObisT_prefix:
"ZObisT \<subseteq> ZOretrT ZObisT"
unfolding ZObisT_def using mono_Retr bis_prefix by blast
lemma ZObisT_sym: "sym ZObisT"
unfolding ZObisT_def using mono_Retr sym_bis by blast
lemma ZObisT_Sym: "s \<approx>01T t \<Longrightarrow> t \<approx>01T s"
using ZObisT_sym unfolding sym_def by blast
lemma ZObisT_converse:
"((c,d) \<in> theta^-1 \<union> ZObisT) = ((d,c) \<in> theta \<union> ZObisT)"
by (metis ZObisT_sym converseI converse_Un converse_converse sym_conv_converse_eq)
lemma
ZObisT_matchC_ZOC: "\<And> s t. s \<approx>01T t \<Longrightarrow> matchC_ZOC ZObisT s t"
and
ZObisT_matchT_T: "\<And> s t. s \<approx>01T t \<Longrightarrow> matchT_T s t"
using ZObisT_prefix unfolding ZOretrT_def by auto
lemmas ZObisT_step = ZObisT_matchC_ZOC ZObisT_matchT_T
lemma
ZObisT_matchC_ZOC_rev: "\<And> s t. s \<approx>01T t \<Longrightarrow> matchC_ZOC ZObisT t s"
and
ZObisT_matchT_T_rev: "\<And> s t. s \<approx>01T t \<Longrightarrow> matchT_T t s"
using ZObisT_step ZObisT_sym unfolding sym_def by blast+
lemmas ZObisT_step_rev = ZObisT_matchC_ZOC_rev ZObisT_matchT_T_rev
lemma ZObisT_coind:
assumes "sym theta" and "theta \<subseteq> ZOretrT (theta \<union> ZObisT)"
shows "theta \<subseteq> ZObisT"
using assms mono_Retr bis_coind
unfolding ZObisT_def by blast
lemma ZObisT_raw_coind:
assumes "sym theta" and "theta \<subseteq> ZOretrT theta"
shows "theta \<subseteq> ZObisT"
using assms mono_Retr bis_raw_coind
unfolding ZObisT_def by blast
lemma ZObisT_coind2:
assumes "theta \<subseteq> ZOretrT (theta \<union> ZObisT)" and
"theta ^-1 \<subseteq> ZOretrT ((theta ^-1) \<union> ZObisT)"
shows "theta \<subseteq> ZObisT"
using assms mono_Retr bis_coind2
unfolding ZObisT_def by blast
lemma ZObisT_raw_coind2:
assumes "theta \<subseteq> ZOretrT theta" and
"theta ^-1 \<subseteq> ZOretrT (theta ^-1)"
shows "theta \<subseteq> ZObisT"
using assms mono_Retr bis_raw_coind2
unfolding ZObisT_def by blast
(* Wbis: *)
lemma Wbis_prefix:
"Wbis \<subseteq> Wretr Wbis"
unfolding Wbis_def using mono_Retr bis_prefix by blast
lemma Wbis_sym: "sym Wbis"
unfolding Wbis_def using mono_Retr sym_bis by blast
lemma Wbis_converse:
"((c,d) \<in> theta^-1 \<union> Wbis) = ((d,c) \<in> theta \<union> Wbis)"
by (metis Wbis_sym converseI converse_Un converse_converse sym_conv_converse_eq)
lemma Wbis_Sym: "c \<approx>w d \<Longrightarrow> d \<approx>w c"
using Wbis_sym unfolding sym_def by blast
lemma
Wbis_matchC_M: "\<And> c d. c \<approx>w d \<Longrightarrow> matchC_M Wbis c d"
and
Wbis_matchT_M: "\<And> c d. c \<approx>w d \<Longrightarrow> matchT_M c d"
using Wbis_prefix unfolding Wretr_def by auto
lemmas Wbis_step = Wbis_matchC_M Wbis_matchT_M
lemma
Wbis_matchC_M_rev: "\<And> s t. s \<approx>w t \<Longrightarrow> matchC_M Wbis t s"
and
Wbis_matchT_M_rev: "\<And> s t. s \<approx>w t \<Longrightarrow> matchT_M t s"
using Wbis_step Wbis_sym unfolding sym_def by blast+
lemmas Wbis_step_rev = Wbis_matchC_M_rev Wbis_matchT_M_rev
lemma Wbis_coind:
assumes "sym theta" and "theta \<subseteq> Wretr (theta \<union> Wbis)"
shows "theta \<subseteq> Wbis"
using assms mono_Retr bis_coind
unfolding Wbis_def by blast
lemma Wbis_raw_coind:
assumes "sym theta" and "theta \<subseteq> Wretr theta"
shows "theta \<subseteq> Wbis"
using assms mono_Retr bis_raw_coind
unfolding Wbis_def by blast
lemma Wbis_coind2:
assumes "theta \<subseteq> Wretr (theta \<union> Wbis)" and
"theta ^-1 \<subseteq> Wretr ((theta ^-1) \<union> Wbis)"
shows "theta \<subseteq> Wbis"
using assms mono_Retr bis_coind2
unfolding Wbis_def by blast
lemma Wbis_raw_coind2:
assumes "theta \<subseteq> Wretr theta" and
"theta ^-1 \<subseteq> Wretr (theta ^-1)"
shows "theta \<subseteq> Wbis"
using assms mono_Retr bis_raw_coind2
unfolding Wbis_def by blast
(* WbisT: *)
lemma WbisT_prefix:
"WbisT \<subseteq> WretrT WbisT"
unfolding WbisT_def using mono_Retr bis_prefix by blast
lemma WbisT_sym: "sym WbisT"
unfolding WbisT_def using mono_Retr sym_bis by blast
lemma WbisT_Sym: "c \<approx>wT d \<Longrightarrow> d \<approx>wT c"
using WbisT_sym unfolding sym_def by blast
lemma WbisT_converse:
"((c,d) \<in> theta^-1 \<union> WbisT) = ((d,c) \<in> theta \<union> WbisT)"
by (metis WbisT_sym converseI converse_Un converse_converse sym_conv_converse_eq)
lemma
WbisT_matchC_MC: "\<And> c d. c \<approx>wT d \<Longrightarrow> matchC_MC WbisT c d"
and
WbisT_matchT_MT: "\<And> c d. c \<approx>wT d \<Longrightarrow> matchT_MT c d"
using WbisT_prefix unfolding WretrT_def by auto
lemmas WbisT_step = WbisT_matchC_MC WbisT_matchT_MT
lemma
WbisT_matchC_MC_rev: "\<And> s t. s \<approx>wT t \<Longrightarrow> matchC_MC WbisT t s"
and
WbisT_matchT_MT_rev: "\<And> s t. s \<approx>wT t \<Longrightarrow> matchT_MT t s"
using WbisT_step WbisT_sym unfolding sym_def by blast+
lemmas WbisT_step_rev = WbisT_matchC_MC_rev WbisT_matchT_MT_rev
lemma WbisT_coind:
assumes "sym theta" and "theta \<subseteq> WretrT (theta \<union> WbisT)"
shows "theta \<subseteq> WbisT"
using assms mono_Retr bis_coind
unfolding WbisT_def by blast
lemma WbisT_raw_coind:
assumes "sym theta" and "theta \<subseteq> WretrT theta"
shows "theta \<subseteq> WbisT"
using assms mono_Retr bis_raw_coind
unfolding WbisT_def by blast
lemma WbisT_coind2:
assumes "theta \<subseteq> WretrT (theta \<union> WbisT)" and
"theta ^-1 \<subseteq> WretrT ((theta ^-1) \<union> WbisT)"
shows "theta \<subseteq> WbisT"
using assms mono_Retr bis_coind2
unfolding WbisT_def by blast
lemma WbisT_raw_coind2:
assumes "theta \<subseteq> WretrT theta" and
"theta ^-1 \<subseteq> WretrT (theta ^-1)"
shows "theta \<subseteq> WbisT"
using assms mono_Retr bis_raw_coind2
unfolding WbisT_def by blast
lemma WbisT_coinduct[consumes 1, case_names sym cont termi]:
assumes \<phi>: "\<phi> c d"
assumes S: "\<And>c d. \<phi> c d \<Longrightarrow> \<phi> d c"
assumes C: "\<And>c s d t c' s'.
\<lbrakk> \<phi> c d ; s \<approx> t ; (c, s) \<rightarrow>c (c', s') \<rbrakk> \<Longrightarrow> \<exists>d' t'. (d, t) \<rightarrow>*c (d', t') \<and> s' \<approx> t' \<and> (\<phi> c' d' \<or> c' \<approx>wT d')"
assumes T: "\<And>c s d t s'.
\<lbrakk> \<phi> c d ; s \<approx> t ; (c, s) \<rightarrow>t s' \<rbrakk> \<Longrightarrow> \<exists>t'. (d, t) \<rightarrow>*t t' \<and> s' \<approx> t'"
shows "c \<approx>wT d"
proof -
let ?\<theta> = "{(c, d). \<phi> c d}"
have "sym ?\<theta>" by (auto intro!: symI S)
moreover
have "?\<theta> \<subseteq> WretrT (?\<theta> \<union> WbisT)"
using C T by (auto simp: WretrT_def matchC_MC_def matchT_MT_def)
ultimately have "?\<theta> \<subseteq> WbisT"
using WbisT_coind by auto
with \<phi> show ?thesis
by auto
qed
(* BisT: *)
lemma BisT_prefix:
"BisT \<subseteq> RetrT BisT"
unfolding BisT_def using mono_Retr bis_prefix by blast
lemma BisT_sym: "sym BisT"
unfolding BisT_def using mono_Retr sym_bis by blast
lemma BisT_Sym: "c \<approx>T d \<Longrightarrow> d \<approx>T c"
using BisT_sym unfolding sym_def by blast
lemma BisT_converse:
"((c,d) \<in> theta^-1 \<union> BisT) = ((d,c) \<in> theta \<union> BisT)"
by (metis BisT_sym converseI converse_Un converse_converse sym_conv_converse_eq)
lemma
BisT_matchC_TMC: "\<And> c d. c \<approx>T d \<Longrightarrow> matchC_TMC BisT c d"
and
BisT_matchT_TMT: "\<And> c d. c \<approx>T d \<Longrightarrow> matchT_TMT c d"
using BisT_prefix unfolding RetrT_def by auto
lemmas BisT_step = BisT_matchC_TMC BisT_matchT_TMT
lemma
BisT_matchC_TMC_rev: "\<And> c d. c \<approx>T d \<Longrightarrow> matchC_TMC BisT d c"
and
BisT_matchT_TMT_rev: "\<And> c d. c \<approx>T d \<Longrightarrow> matchT_TMT d c"
using BisT_step BisT_sym unfolding sym_def by blast+
lemmas BisT_step_rev = BisT_matchC_TMC_rev BisT_matchT_TMT_rev
lemma BisT_coind:
assumes "sym theta" and "theta \<subseteq> RetrT (theta \<union> BisT)"
shows "theta \<subseteq> BisT"
using assms mono_Retr bis_coind
unfolding BisT_def by blast
lemma BisT_raw_coind:
assumes "sym theta" and "theta \<subseteq> RetrT theta"
shows "theta \<subseteq> BisT"
using assms mono_Retr bis_raw_coind
unfolding BisT_def by blast
lemma BisT_coind2:
assumes "theta \<subseteq> RetrT (theta \<union> BisT)" and
"theta ^-1 \<subseteq> RetrT ((theta ^-1) \<union> BisT)"
shows "theta \<subseteq> BisT"
using assms mono_Retr bis_coind2
unfolding BisT_def by blast
lemma BisT_raw_coind2:
assumes "theta \<subseteq> RetrT theta" and
"theta ^-1 \<subseteq> RetrT (theta ^-1)"
shows "theta \<subseteq> BisT"
using assms mono_Retr bis_raw_coind2
unfolding BisT_def by blast
text\<open>Inclusions between bisimilarities:\<close>
lemma match_imp[simp]:
"\<And> theta c1 c2. matchC_C theta c1 c2 \<Longrightarrow> matchC_ZOC theta c1 c2"
(* *)
"\<And> theta c1 c2. matchC_ZOC theta c1 c2 \<Longrightarrow> matchC_ZO theta c1 c2"
(* *)
"\<And> theta c1 c2. matchC_ZOC theta c1 c2 \<Longrightarrow> matchC_MC theta c1 c2"
(* *)
"\<And> theta c1 c2. matchC_ZO theta c1 c2 \<Longrightarrow> matchC_M theta c1 c2"
(* *)
"\<And> theta c1 c2. matchC_MC theta c1 c2 \<Longrightarrow> matchC_M theta c1 c2"
(* *)
(* *)
"\<And> c1 c2. matchT_T c1 c2 \<Longrightarrow> matchT_ZO c1 c2"
(* *)
"\<And> c1 c2. matchT_T c1 c2 \<Longrightarrow> matchT_MT c1 c2"
(* *)
"\<And> c1 c2. matchT_ZO c1 c2 \<Longrightarrow> matchT_M c1 c2"
(* *)
"\<And> c1 c2. matchT_MT c1 c2 \<Longrightarrow> matchT_M c1 c2"
(* *)
"\<And> theta c1 c2. matchC_MC theta c1 c2 \<Longrightarrow> matchC_TMC theta c1 c2"
(* *)
"\<And> theta c1 c2. matchT_MT c1 c2 \<Longrightarrow> matchT_TMT c1 c2"
unfolding match_defs apply(tactic \<open>mauto_no_simp_tac @{context}\<close>)
apply fastforce apply fastforce
apply (metis MtransC_Refl transC_MtransC)
by force+
lemma Retr_incl:
"\<And>theta. Sretr theta \<subseteq> ZOretrT theta"
(* *)
"\<And>theta. ZOretrT theta \<subseteq> ZOretr theta"
(* *)
"\<And>theta. ZOretrT theta \<subseteq> WretrT theta"
(* *)
"\<And>theta. ZOretr theta \<subseteq> Wretr theta"
(* *)
"\<And>theta. WretrT theta \<subseteq> Wretr theta"
(* *)
"\<And>theta. WretrT theta \<subseteq> RetrT theta"
unfolding Retr_defs by auto
lemma bis_incl:
"Sbis \<subseteq> ZObisT"
(* *)
"ZObisT \<subseteq> ZObis"
(* *)
"ZObisT \<subseteq> WbisT"
(* *)
"ZObis \<subseteq> Wbis"
(* *)
"WbisT \<subseteq> Wbis"
(* *)
"WbisT \<subseteq> BisT"
unfolding bis_defs
using Retr_incl mono_bis mono_Retr by blast+
lemma bis_imp[simp]:
"\<And> c1 c2. c1 \<approx>s c2 \<Longrightarrow> c1 \<approx>01T c2"
(* *)
"\<And> c1 c2. c1 \<approx>01T c2 \<Longrightarrow> c1 \<approx>01 c2"
(* *)
"\<And> c1 c2. c1 \<approx>01T c2 \<Longrightarrow> c1 \<approx>wT c2"
(* *)
"\<And> c1 c2. c1 \<approx>01 c2 \<Longrightarrow> c1 \<approx>w c2"
(* *)
"\<And> c1 c2. c1 \<approx>wT c2 \<Longrightarrow> c1 \<approx>w c2"
(* *)
"\<And> c1 c2. c1 \<approx>wT c2 \<Longrightarrow> c1 \<approx>T c2"
using bis_incl rev_subsetD by auto
text\<open>Self-isomorphism implies strong bisimilarity:\<close>
text\<open>0-Self-isomorphism implies weak T 0-bisimilarity:\<close>
lemma siso0_Sbis[simp]:
assumes "siso0 c"
shows "c \<approx>T c"
proof-
let ?theta = "{(c,c) | c . siso0 c}"
have "?theta \<subseteq> BisT"
proof(rule BisT_raw_coind)
show "sym ?theta" unfolding sym_def by blast
next
show "?theta \<subseteq> RetrT ?theta"
proof clarify
fix c assume c: "siso0 c"
show "(c, c) \<in> RetrT ?theta"
unfolding RetrT_def proof (clarify, intro conjI)
show "matchC_TMC ?theta c c"
unfolding matchC_TMC_def apply simp
by (metis c siso0_transC siso0_transC_indis transC_MtransC)
next
show "matchT_TMT c c"
unfolding matchC_TMC_def
by (metis c matchT_TMT_def siso0_transT transT_MtransT)
qed
qed
qed
thus ?thesis using assms by blast
qed
end
(* context PL_Indis *)
end
|
Online registrations (PRDE) - Will it really help lower corruption in the registrar's office?
Flipping Houses – Is it practical? Is it profitable?
What factors decide if a particular property will be a good investment in the long run? |
(*
Title: Fusion Laws
Author: Georg Struth
Maintainer: Georg Struth <g.struth at sheffield.ac.uk>
*)
section \<open>Galois Connection and Fixpoint Fusion\<close>
theory Galois_Fusion
imports "Two_KA"
begin
text \<open>This comes from an AFP entry for lattices.\<close>
unbundle "lattice_syntax"
definition adj :: "('a::ord \<Rightarrow> 'b::ord) \<Rightarrow> ('b \<Rightarrow> 'a) \<Rightarrow> bool" (infixl "\<stileturn>" 70) where
"(f \<stileturn> g) = (\<forall>x y. (f x \<le> y) = (x \<le> g y))"
definition "ladj (g::'a::Inf \<Rightarrow> 'b::ord) = (\<lambda>x. \<Sqinter>{y. x \<le> g y})"
definition "radj (f::'a::Sup \<Rightarrow> 'b::ord) = (\<lambda>y. \<Squnion>{x. f x \<le> y})"
lemma adj_iso1: "f \<stileturn> g \<Longrightarrow> mono f"
unfolding adj_def mono_def by (meson dual_order.refl dual_order.trans)
lemma adj_iso2: "f \<stileturn> g \<Longrightarrow> mono g"
unfolding adj_def mono_def by (meson dual_order.refl dual_order.trans)
lemma adj_comp: "f \<stileturn> g \<Longrightarrow> adj h k \<Longrightarrow> (f \<circ> h) \<stileturn> (k \<circ> g)"
by (simp add: adj_def)
lemma adj_cancel1:
fixes f :: "'a::preorder \<Rightarrow> 'b::ord"
shows "f \<stileturn> g \<Longrightarrow> f \<circ> g \<le> id"
by (simp add: adj_def le_funI)
lemma adj_cancel2:
fixes f :: "'a::ord \<Rightarrow> 'b::preorder"
shows "f \<stileturn> g \<Longrightarrow> id \<le> g \<circ> f"
by (simp add: adj_def eq_iff le_funI)
lemma adj_prop:
fixes f :: "'a::preorder \<Rightarrow>'a"
shows "f \<stileturn> g \<Longrightarrow> f \<circ> g \<le> g \<circ> f"
using adj_cancel1 adj_cancel2 order_trans by blast
lemma adj_cancel_eq1:
fixes f :: "'a::preorder \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> f \<circ> g \<circ> f = f"
unfolding adj_def comp_def fun_eq_iff by (meson eq_iff order_refl order_trans)
lemma adj_cancel_eq2:
fixes f :: "'a::order \<Rightarrow> 'b::preorder"
shows "f \<stileturn> g \<Longrightarrow> g \<circ> f \<circ> g = g"
unfolding adj_def comp_def fun_eq_iff by (meson eq_iff order_refl order_trans)
lemma adj_idem1:
fixes f :: "'a::preorder \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> (f \<circ> g) \<circ> (f \<circ> g) = f \<circ> g"
by (simp add: adj_cancel_eq1 rewriteL_comp_comp)
lemma adj_idem2:
fixes f :: "'a::order \<Rightarrow> 'b::preorder"
shows "f \<stileturn> g \<Longrightarrow> (g \<circ> f) \<circ> (g \<circ> f) = g \<circ> f"
by (simp add: adj_cancel_eq2 rewriteL_comp_comp)
lemma adj_iso3:
fixes f :: "'a::order \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> mono (f \<circ> g)"
by (simp add: adj_iso1 adj_iso2 monoD monoI)
lemma adj_iso4:
fixes f :: "'a::order \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> mono (g \<circ> f)"
by (simp add: adj_iso1 adj_iso2 monoD monoI)
lemma adj_canc1:
fixes f :: "'a::order \<Rightarrow> 'b::ord"
shows "f \<stileturn> g \<Longrightarrow> ((f \<circ> g) x = (f \<circ> g) y \<longrightarrow> g x = g y)"
unfolding adj_def comp_def by (metis eq_iff)
lemma adj_canc2:
fixes f :: "'a::ord \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> ((g \<circ> f) x = (g \<circ> f) y \<longrightarrow> f x = f y)"
unfolding adj_def comp_def by (metis eq_iff)
lemma adj_sur_inv:
fixes f :: "'a::preorder \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> ((surj f) = (f \<circ> g = id))"
unfolding adj_def surj_def comp_def by (metis eq_id_iff eq_iff order_refl order_trans)
lemma adj_surj_inj:
fixes f :: "'a::order \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> ((surj f) = (inj g))"
unfolding adj_def inj_def surj_def by (metis eq_iff order_trans)
lemma adj_inj_inv:
fixes f :: "'a::preorder \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> ((inj f) = (g \<circ> f = id))"
by (metis adj_cancel_eq1 eq_id_iff inj_def o_apply)
lemma adj_inj_surj:
fixes f :: "'a::order \<Rightarrow> 'b::order"
shows "f \<stileturn> g \<Longrightarrow> ((inj f) = (surj g))"
unfolding adj_def inj_def surj_def by (metis eq_iff order_trans)
lemma surj_id_the_inv: "surj f \<Longrightarrow> g \<circ> f = id \<Longrightarrow> g = the_inv f"
by (metis comp_apply id_apply inj_on_id inj_on_imageI2 surj_fun_eq the_inv_f_f)
lemma inj_id_the_inv: "inj f \<Longrightarrow> f \<circ> g = id \<Longrightarrow> f = the_inv g"
by (metis fun.set_map image_inv_f_f inj_imp_surj_inv surj_id surj_id_the_inv)
abbreviation Sup_pres :: "('a::Sup \<Rightarrow> 'b::Sup) \<Rightarrow> bool" where
"Sup_pres f \<equiv> f \<circ> Sup = Sup \<circ> image f"
abbreviation Inf_pres :: "('a::Inf \<Rightarrow> 'b::Inf) \<Rightarrow> bool" where
"Inf_pres f \<equiv> f \<circ> Inf = Inf \<circ> image f"
lemma radj_Inf_pres:
fixes g :: "'b::complete_lattice \<Rightarrow> 'a::complete_lattice"
shows "(\<exists>f. f \<stileturn> g) \<Longrightarrow> Inf_pres g"
apply (rule antisym, simp_all add: le_fun_def adj_def, safe)
apply (meson INF_greatest Inf_lower dual_order.refl dual_order.trans)
by (meson Inf_greatest dual_order.refl le_INF_iff)
lemma ladj_Sup_pres:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "(\<exists>g. f \<stileturn> g) \<Longrightarrow> Sup_pres f"
apply (rule antisym, simp_all add: le_fun_def adj_def, safe)
apply (metis SUP_upper Sup_least)
by (meson SUP_least Sup_upper order_refl order_trans)
lemma radj_adj:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "f \<stileturn> g \<Longrightarrow> g = (radj f)"
unfolding adj_def radj_def by (metis (mono_tags, lifting) cSup_eq_maximum eq_iff mem_Collect_eq)
lemma ladj_adj:
fixes g :: "'b::complete_lattice \<Rightarrow> 'a::complete_lattice"
shows "f \<stileturn> g \<Longrightarrow> f = (ladj g)"
unfolding adj_def ladj_def by (metis (no_types, lifting) cInf_eq_minimum eq_iff mem_Collect_eq)
lemma Inf_subdistl_iso:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "f \<circ> Inf \<le> Inf \<circ> image f \<Longrightarrow> mono f"
unfolding mono_def le_fun_def comp_def by (metis complete_lattice_class.le_INF_iff Inf_atLeast atLeast_iff)
lemma Inf_pres_radj_aux:
fixes g :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "Inf_pres g \<Longrightarrow> (ladj g) \<stileturn> g"
proof-
assume a: "Inf_pres g"
{fix x y
assume b: "ladj g x \<le> y"
hence "g (ladj g x) \<le> g y"
by (simp add: Inf_subdistl_iso a monoD)
hence "\<Sqinter>{g y |y. x \<le> g y} \<le> g y"
by (metis a comp_eq_dest_lhs setcompr_eq_image ladj_def)
hence "x \<le> g y"
using dual_order.trans le_Inf_iff by blast
hence "ladj g x \<le> y \<longrightarrow> x \<le> g y"
by simp}
thus ?thesis
unfolding adj_def ladj_def by (meson CollectI Inf_lower)
qed
lemma Sup_supdistl_iso:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "Sup \<circ> (`) f \<le> f \<circ> Sup \<Longrightarrow> mono f"
unfolding mono_def le_fun_def comp_def by (metis complete_lattice_class.SUP_le_iff Sup_atMost atMost_iff)
lemma Sup_pres_ladj_aux:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "Sup_pres f \<Longrightarrow> f \<stileturn> (radj f)"
proof-
assume a: "Sup_pres f"
{fix x y
assume b: "x \<le> radj f y"
hence "f x \<le> f (radj f y)"
by (simp add: Sup_supdistl_iso a monoD)
hence "f x \<le> \<Squnion>{f x |x. f x \<le> y}"
by (metis a comp_eq_dest_lhs setcompr_eq_image radj_def)
hence "f x \<le> y"
by (smt (verit, ccfv_SIG) Sup_eq_Inf le_Inf_iff mem_Collect_eq)
hence "x \<le> radj f y \<longrightarrow> f x \<le> y"
by simp}
thus ?thesis
unfolding adj_def radj_def by (meson CollectI Sup_upper)
qed
lemma Inf_pres_radj:
fixes g :: "'b::complete_lattice \<Rightarrow> 'a::complete_lattice"
shows "Inf_pres g \<Longrightarrow> (\<exists>f. f \<stileturn> g)"
using Inf_pres_radj_aux by fastforce
lemma Sup_pres_ladj:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "Sup_pres f \<Longrightarrow> (\<exists>g. f \<stileturn> g)"
using Sup_pres_ladj_aux by fastforce
lemma Inf_pres_upper_adj_eq:
fixes g :: "'b::complete_lattice \<Rightarrow> 'a::complete_lattice"
shows "(Inf_pres g) = (\<exists>f. f \<stileturn> g)"
using radj_Inf_pres Inf_pres_radj by blast
lemma Sup_pres_ladj_eq:
fixes f :: "'a::complete_lattice \<Rightarrow> 'b::complete_lattice"
shows "(Sup_pres f) = (\<exists>g. f \<stileturn> g)"
using Sup_pres_ladj ladj_Sup_pres by blast
definition Fix :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a set" where
"Fix f = {x. f x = x}"
lemma lfp_Fix:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
shows "mono f \<Longrightarrow> lfp f = \<Sqinter>(Fix f)"
unfolding lfp_def Fix_def
apply (rule antisym)
apply (simp add: Collect_mono Inf_superset_mono)
by (metis (mono_tags) Inf_lower lfp_def lfp_unfold mem_Collect_eq)
lemma gfp_Fix:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
shows "mono f \<Longrightarrow> gfp f = \<Squnion>(Fix f)"
unfolding gfp_def Fix_def
apply (rule antisym)
apply (metis (mono_tags, lifting) Sup_mono def_gfp_unfold gfp_upperbound mem_Collect_eq)
by (simp add: Collect_mono Sup_subset_mono)
lemma gfp_little_fusion:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
and g :: "'b::complete_lattice \<Rightarrow> 'b"
assumes "mono f"
assumes "h \<circ> f \<le> g \<circ> h"
shows "h (gfp f) \<le> gfp g"
by (metis assms(1) assms(2) comp_apply gfp_unfold gfp_upperbound le_funD)
lemma lfp_little_fusion:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
and g :: "'b::complete_lattice \<Rightarrow> 'b"
assumes "mono f"
assumes "g \<circ> h \<le> h \<circ> f"
shows "lfp g \<le> h (lfp f)"
by (metis assms(1) assms(2) comp_apply le_funD lfp_lowerbound lfp_unfold)
lemma gfp_fusion:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
and g :: "'b::complete_lattice \<Rightarrow> 'b"
assumes "\<exists>f. f \<stileturn> h"
and "mono f"
and "mono g"
and "h \<circ> f = g \<circ> h"
shows "h (gfp f) = gfp g"
by (smt (verit, ccfv_threshold) adj_def assms(1) assms(2) assms(3) assms(4) comp_eq_elim gfp_eqI gfp_fixpoint gfp_upperbound monoD order_refl)
lemma lfp_fusion:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
and g :: "'b::complete_lattice \<Rightarrow> 'b"
assumes "\<exists>f. h \<stileturn> f"
and "mono f"
and "mono g"
and "h \<circ> f = g \<circ> h"
shows "h (lfp f) = lfp g"
by (smt (verit, del_insts) adj_def assms(1) assms(2) assms(3) assms(4) comp_eq_elim dual_order.antisym lfp_lowerbound lfp_unfold monoD order_refl)
lemma gfp_fusion_inf_pres:
fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
and g :: "'b::complete_lattice \<Rightarrow> 'b"
assumes "Inf_pres h"
and "mono f"
and "mono g"
and "h \<circ> f = g \<circ> h"
shows "h (gfp f) = gfp g"
by (simp add: Inf_pres_radj assms gfp_fusion)
lemma k_adju:
fixes k :: "'a::order \<Rightarrow> 'b::complete_lattice"
shows "\<forall>y.\<exists>F. (F::'b \<Rightarrow> 'a \<Rightarrow> 'b) \<stileturn> (\<lambda>k. k y)"
by (force intro!: fun_eq_iff Inf_pres_radj)
lemma k_adju_var: "\<forall>y.\<exists>F.\<forall>x.\<forall>f::'a::order \<Rightarrow> 'b::complete_lattice. (F x \<le> f) = (x \<le> (\<lambda>k. k y) f)"
using k_adju unfolding adj_def by blast
lemma gfp_fusion_var:
fixes F :: "('a::order \<Rightarrow> 'b::complete_lattice) \<Rightarrow> 'a \<Rightarrow> 'b"
and g :: "'b \<Rightarrow> 'b"
assumes "mono F"
and "mono g"
and "\<forall>h. F h x = g (h x)"
shows "gfp F x = gfp g"
by (metis (no_types, opaque_lifting) antisym assms(1) assms(2) assms(3) dual_order.refl gfp_unfold gfp_upperbound k_adju_var monoD)
end
|
lemma tendsto_divide_zero: fixes c :: "'a::real_normed_field" shows "(f \<longlongrightarrow> 0) F \<Longrightarrow> ((\<lambda>x. f x / c) \<longlongrightarrow> 0) F" |
%
% Chapter 2
%
\chapter{Theoretical Background}
\label{chap:theory}
\epigraph{If I have seen further it is by standing on the shoulders of Giants.}{\textit{Isaac Newton}}
\section{Introduction}
In this chapter, we describe the theoretical motivations that drive the searches described in this thesis. We start with a description of the standard model (SM), its particle content and interactions, and the Higgs mechanism. We then talk about the inadequacies of the SM, and the existence of physics beyond the standard model (BSM). We then outline a few BSM models and how they point towards the possible existence of the decays that we search for in this thesis.
\section{The Standard model }
\label{sec:SM}
The SM is the result of human endeavors over centuries to understand what we and the world around us are made of, and capture those ideas in beautiful mathematical form. Our understanding of the world around us has refined progressively from the ancient times, when best tools of observation we had were nothing but our own eyes, to the current day when we are able to collide particles that make up matter at unprecedented speeds, and have sophisticated tools like the CMS detector to aid us. From the ancient greeks who pondered over philosophical questions about what the basic elements of nature were, to the discovery of electron in 1898 by J.J.Thompson, to Rutherford's famous gold foil experiment, to the discovery of the neutron by James Chadwick in 1932, each event has been a stepping stone towards our understanding of nature and the formulation of SM~\cite{th_gun}. During the course of its formulation and after, the SM has accurately explained phenomena already known and predicted the existence of particles that were discovered later. The last of these particles is the Higgs Boson (h), discovered in 2012 at CERN by the CMS and the ATLAS experiments~\cite{Aad:2012tfa, Chatrchyan:2012ufa, Chatrchyan:2013lba}. The SM is a gauge theory, in which three of the four known natural forces (strong, electromagnetic, weak and not gravity) are represented by the SU(3)$\times$SU(2)$\times$U(1) symmetry group. This symmetry group describes under which transformations the SM is invariant. By Noether's theorem each of the above symmetries associated with the SM Lagrangian is associated with a conserved quantity: color charge, weak isospin and electric charge. The following describes the elementary particles of the SM, the interactions among these and finally, the spontaneous symmetry breaking mechanism.
\subsection{Elementary particles}
There are two kinds of elementary particles in the SM. They are characterized by the intrinsic angular momentum that they carry, i.e. by their spin. Fermions, which have half-integer spins, form the building blocks of matter. Bosons, which have integer spins, are the force-carriers or mediators of interactions.
\subsubsection{Fermions of SM}
\label{fermions}
Fermions we described here are fundamental particles, i.e. they cannot be broken down into further constituents. The space-time evolution of the fermions is described by the Dirac equation and their behavior follows Fermi-Dirac statistics. All fermions are subject to the Pauli exclusion principle. They can be further categorized into two classes depending on their interaction with the strong force. Fermions which do not interact with the strong force are called leptons, and do not carry any color charge. Quarks carry color charge and interact via the strong force. Both leptons and quarks are further classified into three generations. Each lepton generation consists of a lepton and a neutrino while each quark generation consists of a up-type and a down-type quark. These are outlined in detail below.
Leptons comprise of the familiar electron (e), and its heavier cousins -- muon ($\Pgm$) and tau lepton ($\Pgt$), which carry the same negative electric charge as the electron ($1.6\times10^{-19} C$). The heavier leptons $\Pgt$ ($\sim 1.8\,\mathrm{GeV}/c^2$ ) and $\Pgm$ ($\sim 105.7\,\mathrm{MeV}/c^2$) have short lifetimes of $\sim 2.9\times 10^{-13}\,$s and $\sim 2.2\times 10^{-6}\,$s respectively. They eventually decay into an electron which is the lightest lepton ($\sim 0.5\,\mathrm{MeV}/c^2$ ) and has infinite lifetime, or lighter hadrons. In the CMS detector, the $\mu$ survives long enough to reach the muon systems, and is thus detected as its own distinct signature. The $\Pgt$ on the other hand, owing to its extremely short lifetime, can travel only a very short distance ($\sim <10\,mm$) before decaying. Thus, only decay products of tau leptons are able to be directly detected by CMS. Each charged lepton is associated with an electrically neutral neutrino. They are called electron neutrino ($\nu_e$), muon neutrino ($\nu_{\mu}$) and tau neutrino ($\nu_{\mu}$). Because neutrinos carry no electric charge, they do not interact via electromagnetic interaction. This means the only way they interact is via the weak interaction. This makes neutrinos very difficult to detect. In particular, they pass through the CMS detector effectively without interacting at all, and their presence and the energy they carry can only be estimated using imbalance in transverse momentum of observed particles (see section~\ref{mt_met_recon}). The three generations of leptons are pictorially shown below.
\begin{equation*}
\binom{e^{-}}{\nu_{e}} \;\;\; \binom{\mu^{-}}{\nu_{\mu}} \;\;\; \binom{\tau^{-}}{\nu_{\tau}}
\end{equation*}
Quarks come in two generations: up-type and down-type. The up-type quarks are the up quark (u), charm quark (c) and top quark (t). Their down-type counterparts are down quark (d), strange quark (s) and bottom quark (b). Each up-type quark carries a positive electric charge of 2/3 times the charge of the electron. Each down-type quark carries a negative electric charge of 1/3 times the charge of the electron. Just like the leptons, each progressive generation is heavier with the third generation consisting of the top and bottom quarks being the heaviest. In fact, the top quark was the last of the SM fermions to be discovered in 1995, and is the most massive particle in the SM ($\sim 173\,\mathrm{GeV}/c^2$). As mentioned above, all quarks carry color charge. Color charge is to strong force as electric charge is to electromagnetic force. This allows quarks to interact via the strong force. Due to a phenomenon called color confinement, quarks aggregate together into color singlets (having zero color charge) particles called hadrons. Hadrons are either formed of 3 (anti-)\,quarks (baryons) or 2 (anti-)\,quarks (mesons). The proton and neutron are baryons. The proton is made of two up quarks, and one down quark. It has a mass of $\sim 938.3\,\mathrm{MeV}/c^2$ and is stable (infinite lifetime). The neutron is made of one up quark and two down quarks. It has a mass of $\sim 939.5\,\mathrm{MeV}/c^2$ and has a lifetime of $\sim880\,$s. The three generations of quarks are pictorially shown below.
\begin{equation*}
\binom{u}{d} \;\;\; \binom{c}{s} \;\;\; \binom{t}{b}
\end{equation*}
Each particle described above has an anti-particle associated with it. Particles (matter) and their anti-particles (anti-matter) are almost identical except they have opposite physical charges (electric charge, lepton number, baryon number). For example, the anti-particle of an electron is the positron which is nearly identical to the electron except for the fact that it has positive electric charge.
\subsubsection{Bosons of SM}
The bosons in SM are carriers or mediators of force. Their behavior follows Bose-Einstein statistics and they are not constrained by the Pauli exclusion principle. The strong interaction, as its name suggests, is the strongest of the fundamental forces\,(see table~\ref{tab:forces}). The eight gluons mediate the strong interaction between particles with color charge. Photons are the mediators of the next strongest fundamental force, the electromagnetic force. Gluons and photons are massless, electrically neutral and have spin 1. Additionally, gluons carry color charge. This is in contrast to photons which are electrically neutral. The $\mathrm{W}^+$,$\mathrm{W}^-$ and Z gauge bosons mediate the weak interaction between particles of different flavors. Both bosons have spin 1. However, unlike the photons and the gluons, they are massive. The W boson has a mass of $\sim 80.4\,\mathrm{GeV}/c^2$ and the Z boson has a mass of $\sim 91.2\,\mathrm{GeV}/c^2$. Finally, the Higgs field, the quanta of which is a massive, scalar (spin 0) and electrically neutral Higgs boson, is responsible for giving masses to W, Z bosons and fermions. Table~\ref{tab:forces} shows the relative strength of fundamental forces and their range.
\begin{table}[hbtp]
\begin{center}
\caption{Relative strengths and ranges of all four fundamental forces, with the strong force as the baseline}
\begin{tabular}{c|c|c}
\hline
Interaction & Relative Strength & Range \\
\hline
Strong & $10^{39}$ & $10^{-15}\,m$\\
Electromagnetic & $10^{36}$& $\infty$\\
Weak & $10^{24}$ &$10^{-18}\,m$\\
Gravity & $1$ &$\infty$\\
\hline
\end{tabular}
\label{tab:forces}
\end{center}
\end{table}
A pictorial summary of all particles in the SM, divided into different classes is shown in Figure~\ref{fig:sm_zoo}.
\begin{figure}[hbtp]
\begin{center}
\includegraphics[width=0.9\textwidth]{plots_and_figures/chapter2/SM_particles.pdf}
\caption{A pictorial summary of particles in the SM. The Higgs boson is shown in yellow. Gauge bosons are shown in red. Leptons and quarks are shown in green and violet respectively~\cite{sm_zoo}.}
\label{fig:sm_zoo}
\end{center}
\end{figure}
\subsection{Theory of interactions in SM}
The SM follows the Lagrangian formalism to describe interaction between the particles. Given the SM is a gauge theory, symmetries of the Lagrangian are central to its understanding~\cite{th_muell}. In a gauge theory, the Lagrangian is invariant under certain (groups of) transformations and each such symmetry is associated with a conservation law (Noether's theorem). The underlying symmetry group that the SM Lagrangian is invariant under is SU(3)$\times$SU(2)$\times$U(1), where the group SU(3) corresponds to the strong interaction while the group SU(2)$\times$U(1) corresponds to the electromagnetic and weak (electroweak) interaction. Each group generator is associated with an underlying vector field, the quanta of which are the gauge bosons (gluons, photons, W and Z) described above. We describe the SM interactions briefly below in order of strength.
\subsubsection{Strong and electroweak interactions}
The theory that describes the strong interaction is called Quantum Chromodynamics (QCD). It is a non-abelian gauge field theory based on SU(3) symmetry. Color charge is the quantity conserved under this symmetry. There are three colors: green (g), red (r) and blue (b). Each color has a corresponding anticolor (negative color). As noted earlier, all quarks and gluons have non-zero color charge. Quarks carry a single color, while each of the eight gluons have a color and anticolor charge. The theory being non-abelian, the generator matrices (Gell-Mann matrices) do not commute. The consequence of this is that gluons (unlike photons) can interact with each other.
The theory that was originally formulated to describe the electromagnetic interaction is called Quantum Electrodynamics (QED). It is a gauge field theory based on U(1) symmetry. Electric charge is the quantity conserved under this symmetry and all particles that interact via electromagnetic interaction need to carry electric charge. Unlike the gluons, the photon (because it is electrically neutral) cannot interact with itself. The weak interaction was initially formulated based on the SU(2) symmetry group, with conserved quantity being the weak isospin. The associated gauge bosons are massive and can be electrically neutral (Z) or charged (W). Quarks of (same) different generations can interact with each other via (Z) W bosons. In the 1960s Glashow, Salam and Weinberg combined the theories describing electromagnetic and weak interactions, after realizing that they were different aspects of the same overarching interaction. This is regarded as electroweak unification, and the electroweak interaction is described by a gauge field theory based on combined SU(2)$\times$U(1) symmetry group. The conserved quantities, weak isospin (T) and electric charge (Q) are related via:
\begin{equation}
Q = T_{3} + \frac{Y_{W}}{2}
\end{equation}
where $\mathrm{T}_{3}$ is the third component of T and $\mathrm{Y}_{W}$ is a quantum number called the weak hypercharge.
The gauge bosons in this theory are divided into a triplet with two electrically charged and one neutral component (corresponding to Ws and Z), and a singlet with no electric charge (corresponding to the photon). However, in order to maintain gauge invariance of the theory, no mass terms are allowed in the Lagrangian. This would require ALL the gauge bosons (and fermions) to be massless. This is known not to be the case. This broken symmetry (photons being massless and W/Z bosons being massive) is explained by the Higgs mechanism~\cite{Englert:1964et,Higgs:1964ia,Higgs:1964pj,Guralnik:1964eu}, described in the next section.
\subsubsection{The Higgs mechanism}
In order to explain how massive gauge bosons come about, the idea of electroweak spontaneous symmetry breaking (EWSB) is introduced. The phenomenon by which EWSB is utilized to give mass to particles is called the Higgs mechanism. Under this mechanism, a new scalar field, $\phi$, called the Higgs field and an associated potential, V$(\phi)$, is introduced. This is represented as doublet and has four degrees of freedom. Three of these four degrees of freedom correspond to the polarizations of the massive W and Z bosons. In order for the Higgs field to interact with W and Z but the not the photon, symmetry has to be broken. The minimum of the potential, i.e. the vacuum state or ground state must be non-zero for this to happen. The parameters of V$(\phi)$ is so chosen such that it has a Mexican-hat (sombrero) shape, which has infinite degenerate non-zero minima. This non-zero minimum is called the vacuum expectation value (vev), which is measured to be 246\,GeV. The direction of symmetry breaking is such that it gives mass to the Z boson but leaves the photon massless. This breaking of symmetry is called spontaneous because there is no particular reason (that we know of) for this direction to have been picked. Nature just happened to spontaneously pick this direction. The Higgs field gives rise to a new massive scalar particle. This particle is the Higgs boson, and corresponds to the fourth remaining degree of freedom of the scalar doublet mentioned above. The fermions acquire mass via Yukawa interaction with the h. The strength of the Yukawa coupling of the h with fermions is proportional to the fermion masses. To summarize, the Higgs mechanism allows the introduction of a mass term for the gauge bosons without breaking the underlying gauge symmetry of the SM Lagrangian. Addition of this field, gives rise to another massive particle, the interactions with which give masses to gauge bosons and fermions. This massive particle is a scalar boson called the Higgs boson, which was discovered in 2012 at CERN by CMS and ATLAS~\cite{Aad:2012tfa, Chatrchyan:2012ufa, Chatrchyan:2013lba}, almost 50 years after it was first predicted to exist. The most recent measurement of the Higgs mass by CMS, combining data from both run I and run II, is: $125.35\pm0.15$\,GeV~\cite{HIG-19-004}. Before the LHC, experiments at LEP and Tevatron looked for existence of the h. It was the last missing piece of the SM, and its discovery can be thought to have concluded an era in particle physics and led us into a newer equally exciting era.
\subsection{Higgs boson production and decays at the LHC}
There are several different ways the Higgs boson can be produced at the LHC~\cite{th_cec}. The LHC collides protons at high energy, and the production modes of the Higgs boson, in order of cross-section, at the LHC are :
\begin{itemize}
%\setlength%{\linespread}{1.2\baselineskip}
\item \textbf{Gluon-Gluon Fusion (ggH)}: Since gluons are massless, they do not directly couple to the h. This production mode proceeds via quark loop. The ggH production cross-section at 13 TeV is $\sim48.37\,pb$ at N3LO~\cite{YR4}.
\item \textbf {Vector Boson Fusion (VBF)}: This production mode has the second largest cross section at the LHC. This mode is characterized by two high-momentum quarks in the final state which hadronize to form jets. The VBF production cross-section at 13 TeV center-of-mass energy is $\sim3.77\,pb$ at NNLO.
\item \textbf {Associated Production}: The third largest h production mode at the LHC involves the production of a virtual $W^*/Z^*$ boson that splits into a real boson W/Z boson and a h. The WH production cross-section is $\sim1.36\,pb$ and the ZH production production cross-section is $\sim0.87\,pb$, at NNLO level for a center-of-mass energy of 13 TeV at the LHC.
\item \textbf {ttH Production}: In this production mode, the h is produced along with a pair of top quarks. The production cross-section at 13 TeV center-of-mass energy is $\sim0.50\,pb$ at NLO.
\end{itemize}
The Feynman diagrams for h production modes described above are shown in Figure~\ref{fig:higs_feyn}. The cross-section of each process as a function of center-of-mass energy is shown in Figure~\ref{fig:higs_xs_som}. Figure~\ref{fig:higs_decays} shows the branching fractions of various SM decays of Higgs boson as a function of its mass, illustrating how branching fractions depend on the Higgs mass. It is interesting to note that the 2012 discovery was made combining the channels where the Higgs decays into Z$\mathrm{Z}^*$, or into $\gamma\gamma$ (di-photon). Even though the Higgs doesn't directly interact with the massless photon, there are loop order contributions. Although, the cross-section of this channel is small compared to the others, its clean final state signature made it one of the primary channels for the discovery.
\begin{figure}[hbtp]
\begin{center}
\includegraphics[width=0.95\textwidth]{plots_and_figures/chapter2/higgs_prod.png}
\caption{ Feynman diagrams for Higgs production modes at LHC: (a) gluon-gluon fusion, (b) vector boson fusion, (c) associated production and (d) ttH~\cite{higg_prod}, are shown.}
\label{fig:higs_feyn}
\end{center}
\end{figure}
\begin{figure}[hbtp]
\begin{center}
\captionsetup{width=.8\textwidth}
\includegraphics[width=0.9\textwidth]{plots_and_figures/chapter2/higgs_xs.png}
\caption{The SM Higgs boson production cross-section as a function of the center-of-mass energy in proton-proton collisions at the LHC~\cite{higg_prod}.}
\label{fig:higs_xs_som}
\end{center}
\end{figure}
\begin{figure}[hbtp]
\begin{center}
\captionsetup{width=.8\textwidth}
\includegraphics[width=0.9\textwidth]{plots_and_figures/chapter2/higgs_decays.pdf}
\caption{Branching fractions to SM decays of the Higgs boson as function of mass~\cite{hg_decay}.}
\label{fig:higs_decays}
\end{center}
\end{figure}
\section{Inadequacies of the SM}
Despite being a faithful description of nature, the SM is not perfect. There are several motivations that suggest the existence of physics beyond the SM. We outline some of these here.
To start, the SM falls short of being an ideal theory of everything, because it doesn't include gravitation. Including gravitational interaction into the SM has proven to be a difficult challenge. It hasn't yet been possible to incorporate the most successful theory of gravity, General Relativity, and the SM into a single framework. Secondly, neutrinos which are electrically neutral leptons (see section ~\ref{fermions}) are strictly massless within the SM. However, it has been well-established by experiments that neutrinos oscillate (change flavor). This is only possible if neutrinos have mass. The small but finite masses that neutrinos are now known to have doesn't fit with the SM formulation. Thirdly, cosmological observations point to the existence of a type of matter and energy, the origin of which cannot be explained within the SM. They are referred to as dark matter and dark energy. About 26\% of the universe is known to be made of dark matter and 69\% is known to be made of dark energy. Thus, particles of the SM form only 5\% of the observable universe. Finally, it is believed that matter and anti-matter were produced in almost equal amounts at the Big Bang. However, the universe is made almost entirely of matter. There is no mechanism in the SM that explains how we ended up with a matter dominated universe. Besides the unexplained phenomena outlined above, our understanding of some theoretical features of the SM is inadequate. The SM contains no less than 19 numerical free parameters. The values of these parameters are known but we do not have an understanding of their origins.
To address such shortcomings, many theories have been proposed that modify the SM in such a way that they are consistent with existing observations, but at the same time try to address its imperfections. These theories, called BSM (beyond the standard model) theories predict many outcomes that are otherwise not allowed by the SM. The recently discovered h unlocks a portal to look for these outcomes. As mentioned earlier, the constraint on the branching fraction to non-SM decay modes of the h, derived from a combined study by CMS and ATLAS is B(non-SM) $<$ 34\% at 95\% confidence level (CL)~\cite{JHEP2016:45}. These limits suggest a significant contribution from exotic (non-SM) decays in the BSM Higgs sector. One such interesting process that is forbidden in the SM but occurs in many new physics scenarios is interactions between charged leptons that violate the conservation of Lepton Flavor. In particular, Lepton Flavor Violating (LFV) decays of the h are allowed by these theories, and could be realized in decays of the h, which is neutral, into two charged leptons of different flavor. Looking for LFV decays of charged leptons is also interesting in the light of neutrino oscillations mentioned earlier, which also violate lepton flavor, a phenomenon that remains unexplained by the SM~\cite{th_kell}.
\section{BSM models with lepton flavor violation}
\label{sec:BSM}
Like all fermions, charged leptons acquire mass from their interaction with the Higgs. Higgs interacts with these leptons via Yukawa couplings. The Yukawa interaction matrix is diagonal in SM:
\[
Y=\begin{pmatrix}
Y_{ee} & 0 & 0 \\
0 & Y_{\mu\mu} & 0 \\
0 & 0 & Y_{\Pgt\Pgt}
\end{pmatrix}
\]
However, in BSM models, the above doesn't hold true~\cite{Harnik:2012pb} and off-diagonal Yukawa couplings are possible. In a model containing only the SM Higgs as the source of EWSB, an effective field theory approach can be used to introduce off-diagonal couplings~\cite{DiazCruz:1999xe}. If only SM particles (quarks, leptons, gauge and Higgs bosons) are considered to exist up to a certain energy scale, $\Lambda$, additional heavy fields can be integrated out, leading to an effective field theory. Higher dimensional operators of dimension 6 then suffice to introduce LFV couplings. Interestingly, dimension 5 operators introduce neutrino oscillations into the SM, but not LFV in interactions of charged leptons. Dimension 6 operators decouple the values of fermion couplings to the h from the fermion masses. The Yukawa couplings can be then written as:
\begin{equation}
Y_{ij}=\frac{m_{ij}}{v}\delta_{ij}+\frac{v^2}{\sqrt{2}\Lambda^2}\hat{\lambda}_{ij}
\end{equation}
where $\hat{\lambda}_{ij}$s are coefficients associated with dimension 6 operators. In the limit $\Lambda\rightarrow\infty$, we recover the SM and off-diagonal couplings are zero. Thus, LFV couplings can be introduced as long as the mass scale is finite. In BSM models with several sources of EWSB, LFV couplings can be introduced without this restriction. Two Higgs doublet models (2HDM) models constitute general models of this class, and allow the violation of lepton flavor~\cite{PhysRevLett.38.622}. Supersymmetry models~\cite{Han:2000jz,Arganda:2004bz,Arhrib:2012ax,Arana-Catania:2013xma,Arganda:2015uca,Arganda:2015naa}, such as the Minimal Supersymmetric Standard model (MSSM) and the Next-to-Minimal Supersymmetric Standard model (NMSSM), also postulate multiple Higgs bosons, and give rise to LFV interactions. Supersymmetric models introduce a alternate (supersymmetric) boson partner to every SM fermion and an alternate fermion partner for every SM boson. These alternate particles, if discovered, could be suitable candidates for explaining dark matter and dark energy. Other models that allow LFV interactions include~\cite{HIG-17-001} composite Higgs models~\cite{Agashe:2009di,Azatov:2009na} which consider the SM h to be a bound state of other BSM particles, partially composite Higgs models such as Randall-Sundrum models~\cite{Perez:2008ee,Casagrande:2008hr,Buras:2009ka}, and several others~\cite{Blanke:2008zb,Giudice:2008uua,AguilarSaavedra:2009mx,Albrecht:2009xr,Goudelis:2011un,McKeen:2012av,Pilaftsis199268,PhysRevD.47.1080,Arganda:2014dta,Ishimori:2010au}.
\section{Pre-LHC constraints on LFV couplings}
Indirect low-energy measurements from pre-LHC era can be used to constrain the $\hmu$ decay. These constraints were derived and summarized in~\cite{Harnik:2012pb}. For example, constraints on $\Pgt\rightarrow\Pgm\gamma$ transition which proceed via a virtual Higgs boson can be used to constrain $\hmu$ decay. Feynman diagrams contributing to this process at one loop level are shown in Figure~\ref{fig:prelhclfv}. Further constraints come from $\Pgt\rightarrow 3\Pgm$ decays, and also from anomalous magnetic dipole moments, and are shown in the same figure. The constraints on Yukawa couplings derived from the above measurements can be converted to constraints on $Br(\hmu)$, following the procedure described in Section~\ref{results}. These constraints set the upper limit on $Br(\hmu)\lesssim 10\%$, thus leaving a lot of room to search for this decay. Similar constraints exist for $\he$ LFV decay, and are set at $Br(\he)\lesssim 10\%$. Indeed, CMS searches looking for $\he$ decay have been performed along with searches for $\hmu$. Finally, it is interesting to note the LFV decay, $\hflep$, is very strongly constrained by $\Pgm\rightarrow\Pe\gamma$ decays, giving an very stringent upper limit $Br(\hflep)\lesssim 2\times10^{-8}$. Due to such strong existing constraints, this search is not been performed by CMS, but rather $\hmu$ (this thesis) and $\he$ searches are performed.
\begin{figure}[hbtp]
\begin{center}
\includegraphics[width=0.9\textwidth]{plots_and_figures/chapter2/tau_mugamma.png}\\
\includegraphics[width=0.9\textwidth]{plots_and_figures/chapter2/tau_3mu.png}\\
\includegraphics[width=0.6\textwidth]{plots_and_figures/chapter2/dipole.png}
\caption{Diagrams contributing to flavor violating process $\Pgt\rightarrow\Pgm\gamma$ (top), $\Pgt\rightarrow 3\Pgm$ (middle) and anomalous magnetic moment of the muon (bottom)~\cite{Harnik:2012pb}.}
\label{fig:prelhclfv}
\end{center}
\end{figure}
\section{Constraints from previous LHC searches}
The first direct search for LFV Higgs decays was published by CMS collaboration in 2015~\cite{Khachatryan:2015kon}. This search improved the limits listed above by an order of magnitude to $\mathcal{B}(\hmu)<1.51\%$ (0.75\%) for observed (expected) limits at 95\% CL. This was followed by another search (2016) which set observed (expected) upper limits on the branching fractions $\mathcal{B}(\he)<0.69\%$ (0.75\%) at 95\% CL~\cite{HIG-14-040}. Both searches were performed with 19.7\,$\mathrm{fb}^{-1}$ of pp collision data collected at 8 TeV center-of-mass energy by CMS during Run I of LHC . The limits from these searches are summarized graphically in Figure~\ref{fig:8tev_limits}. In 2015 and 2017, the ATLAS Collaboration also published results from similar searches performed with data collected by the atlas detector~\cite{Aad:2016blu,Aad:2015gha}. The observed (expected) limits were set at $\mathcal{B}(\hmu)<1.43\%$ (1.01\%) and $\mathcal{B}(\he)<1.04\%$ (1.21\%) at 95\% CL.
\begin{figure*}[hbtp]
\begin{center}
\captionsetup{width=.7\textwidth,justification=centering}
\includegraphics[width=0.6\textwidth]{plots_and_figures/chapter2/mutau_limits.jpg}\\
\includegraphics[width=0.6\textwidth]{plots_and_figures/chapter2/etau_limits.jpg}
\caption{Limits from Run I searches performed by CMS for $\hmu$ (top) and $\he$ (bottom)~\cite{Khachatryan:2015kon,HIG-14-040}.}
\label{fig:8tev_limits}
\end{center}
\end{figure*}
The 2015 CMS search for $\hmu$ saw an excess of events with a significance of 2.4\,$\sigma$. Although this excess is not quite enough to claim evidence for this decay, this gives us a strong motivation to perform this search with a larger amount of data which would either lead us to confirm this excess, or squash it and set much stricter limits on this process. The dataset collected by the CMS detector in 2016 provides us with such an opportunity. It corresponds to proton-proton collision data at a much higher center-of-mass energy of 13\,TeV. The number of h bosons produced depends on the cross-section. Since the cross section scales up at higher center-of-mass energies (see Figure~\ref{fig:higs_xs_som}), a much larger number of h bosons would be produced. Also, the 2016 dataset has a size of 36\,$\mathrm{fb}^{-1}$ which is almost two times in size of the run I dataset. This thesis describes this search specifically in the channel where the $\Pgt$ decays into a electron, i.e. the $\hmue$ channel.
\section{Motivations for $\Hmue$ search}
As mentioned in section~\ref{sec:BSM}, many of the BSM models, that allow LFV decays of the h, predict the existence additional heavy Higgs bosons. For example, 2HDM predicts the existence of two heavy neutral Higgs bosons, H(CP-even) and A(CP-odd). According to a theoretical study published in 2016~\cite{PhysRevD.93.055021}, these heavy bosons (henceforth referred to as H) are expected to decay in a Lepton Flavor Violating manner just like their SM counterpart, h . A direct search for $\Hmu$ would thus provide a complementary probe of these BSM models that postulate the existence of such heavy neutral H bosons. In fact, the 2015 CMS search for $\hmu$ was reinterpreted as a search for $\Hmu$ decay~\cite{Buschmann:2016pb}, and limits on $\sigma(\textrm{gg}\rightarrow \PH)\times\mathcal{B}(\Hmu)$ were set for H bosons in the mass range of 150\,GeV to 300\,GeV. We describe here the first direct search to look for $\Hmu$ decay, in the channel where the $\Pgt$ decays into an electron, i.e. $\Hmue$ channel. Only the primary H production mode (gluon fusion) is considered for this search. This search uses the same dataset as the $\hmu$ search, i.e. 36\,$\mathrm{fb}^{-1}$ of pp collision data at 13 TeV center-of-mass energy collected in 2016, and probes H masses in the range range $200<m_H<900$\,GeV.
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Experiments.ZCohomologyOld.KcompPrelims where
open import Cubical.ZCohomology.Base
open import Cubical.Homotopy.Connected
open import Cubical.HITs.Hopf
-- open import Cubical.Homotopy.Freudenthal hiding (encode)
open import Cubical.HITs.Sn
open import Cubical.HITs.S1
open import Cubical.HITs.Truncation renaming (elim to trElim ; rec to trRec ; map to trMap)
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Transport
open import Cubical.Foundations.Path
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.GroupoidLaws
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Data.Int renaming (_+_ to +Int) hiding (_·_)
open import Cubical.Data.Nat hiding (_·_)
open import Cubical.Data.Unit
open import Cubical.HITs.Susp
open import Cubical.HITs.Nullification
open import Cubical.Data.Prod.Base
open import Cubical.Homotopy.Loopspace
open import Cubical.Data.Bool
open import Cubical.Data.Sum.Base
open import Cubical.Data.Sigma hiding (_×_)
open import Cubical.Foundations.Function
open import Cubical.Foundations.Pointed
open import Cubical.HITs.S3
private
variable
ℓ : Level
A : Type ℓ
{- We want to prove that Kn≃ΩKn+1. For this we use the map ϕ-}
-- Proof of Kₙ ≃ ∥ ΩSⁿ⁺¹ ∥ₙ for $n ≥ 2$
-- Entirely based on Cavallos proof of Freudenthal in Cubical.Homotopy.Freudenthal
module miniFreudenthal (n : HLevel) where
σ : S₊ (2 + n) → typ (Ω (S₊∙ (3 + n)))
σ a = merid a ∙ merid north ⁻¹
S2+n = S₊ (2 + n)
4n+2 = (2 + n) + (2 + n)
module WC-S (p : north ≡ north) where
P : (a b : S₊ (2 + n)) → Type₀
P a b = σ b ≡ p → hLevelTrunc 4n+2 (fiber (λ x → merid x ∙ merid a ⁻¹) p)
hLevelP : (a b : S₊ (2 + n)) → isOfHLevel 4n+2 (P a b)
hLevelP _ _ = isOfHLevelΠ 4n+2 λ _ → isOfHLevelTrunc 4n+2
leftFun : (a : S₊ (2 + n)) → P a north
leftFun a r = ∣ a , (rCancel' (merid a) ∙ rCancel' (merid north) ⁻¹) ∙ r ∣
rightFun : (b : S₊ (2 + n)) → P north b
rightFun b r = ∣ b , r ∣
funsAgree : leftFun north ≡ rightFun north
funsAgree =
funExt λ r → (λ i → ∣ north , rCancel' (rCancel' (merid north)) i ∙ r ∣)
∙ λ i → ∣ north , lUnit r (~ i) ∣
totalFun : (a b : S2+n) → P a b
totalFun = wedgeConSn (suc n) (suc n) hLevelP rightFun leftFun funsAgree .fst
leftId : (λ x → totalFun x north) ≡ leftFun
leftId x i = wedgeConSn (suc n) (suc n) hLevelP rightFun leftFun funsAgree .snd .snd i x
fwd : (p : north ≡ north) (a : S2+n)
→ hLevelTrunc 4n+2 (fiber σ p)
→ hLevelTrunc 4n+2 (fiber (λ x → merid x ∙ merid a ⁻¹) p)
fwd p a = trRec (isOfHLevelTrunc 4n+2) (uncurry (WC-S.totalFun p a))
fwdnorth : (p : north ≡ north) → fwd p north ≡ idfun _
fwdnorth p = funExt (trElim (λ _ → isOfHLevelPath 4n+2 (isOfHLevelTrunc 4n+2) _ _)
λ p → refl)
isEquivFwd : (p : north ≡ north) (a : S2+n) → isEquiv (fwd p a)
isEquivFwd p =
suspToPropElim (ptSn (suc n))
(λ _ → isPropIsEquiv _)
helper
where
helper : isEquiv (fwd p north)
helper = subst isEquiv (sym (fwdnorth p)) (idIsEquiv _)
interpolate : (a : S2+n)
→ PathP (λ i → S2+n → north ≡ merid a i) (λ x → merid x ∙ merid a ⁻¹) merid
interpolate a i x j = compPath-filler (merid x) (merid a ⁻¹) (~ i) j
Code : (y : Susp S2+n) → north ≡ y → Type₀
Code north p = hLevelTrunc 4n+2 (fiber σ p)
Code south q = hLevelTrunc 4n+2 (fiber merid q)
Code (merid a i) p =
Glue
(hLevelTrunc 4n+2 (fiber (interpolate a i) p))
(λ
{ (i = i0) → _ , (fwd p a , isEquivFwd p a)
; (i = i1) → _ , idEquiv _
})
encodeS : (y : S₊ (3 + n)) (p : north ≡ y) → Code y p
encodeS y = J Code ∣ north , rCancel' (merid north) ∣
encodeMerid : (a : S2+n) → encodeS south (merid a) ≡ ∣ a , refl ∣
encodeMerid a =
cong (transport (λ i → gluePath i))
(funExt⁻ (funExt⁻ (WC-S.leftId refl) a) _ ∙ λ i → ∣ a , lem (rCancel' (merid a)) (rCancel' (merid north)) i ∣)
∙ transport (PathP≡Path gluePath _ _)
(λ i → ∣ a , (λ j k → rCancel-filler' (merid a) i j k) ∣)
where
gluePath : I → Type _
gluePath i = hLevelTrunc 4n+2 (fiber (interpolate a i) (λ j → merid a (i ∧ j)))
lem : ∀ {ℓ} {A : Type ℓ} {x y z : A} (p : x ≡ y) (q : z ≡ y) → (p ∙ q ⁻¹) ∙ q ≡ p
lem p q = assoc p (q ⁻¹) q ⁻¹ ∙ cong (p ∙_) (lCancel q) ∙ rUnit p ⁻¹
contractCodeSouth : (p : north ≡ south) (c : Code south p) → encodeS south p ≡ c
contractCodeSouth p =
trElim
(λ _ → isOfHLevelPath 4n+2 (isOfHLevelTrunc 4n+2) _ _)
(uncurry λ a →
J (λ p r → encodeS south p ≡ ∣ a , r ∣) (encodeMerid a))
isConnectedMerid : isConnectedFun 4n+2 (merid {A = S2+n})
isConnectedMerid p = encodeS south p , contractCodeSouth p
isConnectedσ : isConnectedFun 4n+2 σ
isConnectedσ =
transport (λ i → isConnectedFun 4n+2 (interpolate north (~ i))) isConnectedMerid
isConnectedσ-Sn : (n : ℕ) → isConnectedFun (4 + n) (miniFreudenthal.σ n)
isConnectedσ-Sn n =
isConnectedFunSubtr _ n _
(subst (λ x → isConnectedFun x (miniFreudenthal.σ n))
helper
(miniFreudenthal.isConnectedσ n))
where
helper : 2 + (n + (2 + n)) ≡ n + (4 + n)
helper = cong suc (sym (+-suc n _)) ∙ sym (+-suc n _)
stabSpheres-n≥2 : (n : ℕ) → Iso (hLevelTrunc (4 + n) (S₊ (2 + n)))
(hLevelTrunc (4 + n) (typ (Ω (S₊∙ (3 + n)))))
stabSpheres-n≥2 n = connectedTruncIso (4 + n) (miniFreudenthal.σ n) (isConnectedσ-Sn n)
--
ϕ : (pt a : A) → typ (Ω (Susp A , north))
ϕ pt a = (merid a) ∙ sym (merid pt)
private
Kn→ΩKn+1 : (n : ℕ) → coHomK n → typ (Ω (coHomK-ptd (suc n)))
Kn→ΩKn+1 zero x i = ∣ intLoop x i ∣
Kn→ΩKn+1 (suc zero) = trRec (isOfHLevelTrunc 4 ∣ north ∣ ∣ north ∣)
λ a i → ∣ ϕ base a i ∣
Kn→ΩKn+1 (suc (suc n)) = trRec (isOfHLevelTrunc (2 + (3 + n)) ∣ north ∣ ∣ north ∣)
λ a i → ∣ ϕ north a i ∣
d-map : typ (Ω ((Susp S¹) , north)) → S¹
d-map p = subst HopfSuspS¹ p base
d-mapId : (r : S¹) → d-map (ϕ base r) ≡ r
d-mapId r = substComposite HopfSuspS¹ (merid r) (sym (merid base)) base ∙
rotLemma r
where
rotLemma : (r : S¹) → r · base ≡ r
rotLemma base = refl
rotLemma (loop i) = refl
sphereConnectedSpecCase : isConnected 4 (Susp (Susp S¹))
sphereConnectedSpecCase = sphereConnected 3
d-mapComp : Iso (fiber d-map base) (Path (S₊ 3) north north)
d-mapComp = compIso (IsoΣPathTransportPathΣ {B = HopfSuspS¹} _ _)
(congIso (invIso IsoS³TotalHopf))
is1Connected-dmap : isConnectedFun 3 d-map
is1Connected-dmap = toPropElim (λ _ → isPropIsOfHLevel 0)
(isConnectedRetractFromIso 3 d-mapComp
(isOfHLevelRetractFromIso 0 (invIso (PathIdTruncIso 3))
contrHelper))
where
contrHelper : isContr (Path (∥ Susp (Susp S¹) ∥ 4) ∣ north ∣ ∣ north ∣)
fst contrHelper = refl
snd contrHelper = isOfHLevelPlus {n = 0} 2 (sphereConnected 3) ∣ north ∣ ∣ north ∣ refl
d-Iso : Iso (∥ Path (S₊ 2) north north ∥ 3) (coHomK 1)
d-Iso = connectedTruncIso _ d-map is1Connected-dmap
d-mapId2 : Iso.fun d-Iso ∘ trMap (ϕ base) ≡ idfun (coHomK 1)
d-mapId2 = funExt (trElim (λ _ → isOfHLevelPath 3 (isOfHLevelTrunc 3) _ _) λ a i → ∣ d-mapId a i ∣)
Iso∥ϕ₁∥ : Iso (coHomK 1) (∥ Path (S₊ 2) north north ∥ 3)
Iso∥ϕ₁∥ = composesToId→Iso d-Iso (trMap (ϕ base)) d-mapId2
Iso-Kn-ΩKn+1 : (n : HLevel) → Iso (coHomK n) (typ (Ω (coHomK-ptd (suc n))))
Iso-Kn-ΩKn+1 zero = invIso (compIso (congIso (truncIdempotentIso _ isGroupoidS¹)) ΩS¹IsoInt)
Iso-Kn-ΩKn+1 (suc zero) = compIso Iso∥ϕ₁∥ (invIso (PathIdTruncIso 3))
Iso-Kn-ΩKn+1 (suc (suc n)) = compIso (stabSpheres-n≥2 n)
(invIso (PathIdTruncIso (4 + n)))
where
helper : n + (4 + n) ≡ 2 + (n + (2 + n))
helper = +-suc n (3 + n) ∙ (λ i → suc (+-suc n (2 + n) i))
|
r=0.87
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7vp48/media/images/d7vp48-029/svc:tesseract/full/full/0.87/default.jpg Accept:application/hocr+xml
|
There were five main storylines in the run . The first , collected as " Original Sins " , deals with John travelling to America to exorcise a demon , <unk> , and investigate a strange cult known as Damnation 's Army , crossing paths with a demon called Nergal ( from whom he gains demon blood ) , and having to be responsible for killing an old friend , Gary Lester , and betraying another , called Zed , in the process . The following four issues , " The Devil You Know " finally explain John 's failure to save a young girl , Astra , from a demon in Newcastle , an event that left him near insane and incarcerated in an asylum known as <unk> , and still haunted him to the comic 's end . He eventually discovers that the demon responsible for this was Nergal , and uses a technological scheme to trap him , and lead him back to hell . It also contains a crossover with Swamp Thing , where Constantine loses his body while the Swamp Thing uses it to procreate .
|
{-# OPTIONS --cubical --safe --postfix-projections #-}
module Function.Injective.Base where
open import Level
open import Path
open import Data.Sigma
Injective : (A → B) → Type _
Injective f = ∀ x y → (f⟨x⟩≡f⟨y⟩ : f x ≡ f y) → x ≡ y
infixr 0 _↣_
_↣_ : Type a → Type b → Type (a ℓ⊔ b)
A ↣ B = Σ[ f ⦂ (A → B) ] × Injective f
refl-↣ : A ↣ A
refl-↣ .fst x = x
refl-↣ .snd x y x≡y = x≡y
|
If $a \neq 0$, then the fold of the coefficients of the monomial $a x^n$ is the composition of $f(0)$ with itself $n$ times, followed by $f(a)$. |
\documentclass[11pt]{amsbook}
\usepackage{../HBSuerDemir} % ------------------------
\begin{document}
% ++++++++++++++++++++++++++++++++++++++
\hPage{b2p2/312}
% ++++++++++++++++++++++++++++++++++++++
\begin{align*}
4A \frac{\hDif A}{\hDif t} = x(y^2+z^2) \frac{\hDif x}{\hDif t} + y(z^2+x^2) \frac{\hDif y}{\hDif t} + z(x^2+y^2) \frac{\hDif z}{\hDif t}
\end{align*}
When P is at $P_{O}$(6, 0, 0), then Q is at $Q_{O}$(0, 9, 0) and R is at $R_{O}$(0, 0, 12) with $|P_{O}$ $Q_{O}$ $R_{O}|_{2}$ = 9$\sqrt{61}$. Then
\begin{align*}
4.9\sqrt{61}\frac{\hDif A}{\hDif t} = 6(225)2+ 9(680)3 + 12(117)4 \\
9\sqrt{61}\frac{\hDif A}{\hDif t} = 675 + 27.45 + 12.117 \\
\sqrt{61}\frac{\hDif A}{\hDif t} = 75 + 135 + 156 = 366 \\
\frac{\hDif A}{\hDif t} = \frac{766}{\sqrt{61}} = 6\sqrt{61} unit^2/sec
\end{align*}
\subsection{TAYLOR'S FORMULA AND SERIES}
\begin{thm} If f(x, y) has continuous partial derivatives up to order n+1 in a neighborhood of (a, b)$\epsilon \upsilon_{f}$, then
\begin{align*}
f(x, y) = f(a, b) + \sum_{k=1}^{n} \frac{1}{k!} ((x-a) \frac{a}{ax} + (y-b) \frac{a}{ay})^k f(x, y) |_{(a, b)} + R_{n+1}
\end{align*}
where the remainder is given by
\begin{align*}
R_{n+1} = \frac{1}{(n+1)!} ((x-a) \frac{a}{ax} + (y-b) \frac{a}{ax})^{n+1}f(x, y)_{(x*, y*)}
\end{align*}
with (x*, y*) a point on the open segment ($P_{O}$P) joining $P_{O}$(a, b) to P(x, y)
\end{thm}
\begin{proof} Since every point of the line segment [$P_{O}$P] can be represented parametrically as
\begin{align*}
x = a+ht,\qquad y = b+kt\qquad 0\leq t \leq 1,
\end{align*}
\includegraphics[width=0.35\textwidth]{images/b2p2-312-fig01}
The end points of the segment correspond to t=0 and t=1 (observe that h, k are direction numbers of the line segment)
Substituting (2) in f(x, y) gives the function
\end{proof}
\end{document} |
State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.296238
α : Type ?u.296241
β : Type u_3
ι : Type ?u.296247
inst✝⁵ : OrderedSemiring 𝕜
inst✝⁴ : AddCommMonoid E
inst✝³ : AddCommMonoid F
inst✝² : OrderedCancelAddCommMonoid β
inst✝¹ : SMul 𝕜 E
inst✝ : DistribMulAction 𝕜 β
s : Set E
f g : E → β
hf : StrictConvexOn 𝕜 s f
hg : StrictConvexOn 𝕜 s g
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • f x + b • f y + (a • g x + b • g y) = a • (f x + g x) + b • (f y + g y) State After: no goals Tactic: rw [smul_add, smul_add, add_add_add_comm] |
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
⊢ lift (Module.rank F K) * lift (Module.rank K A) = lift (Module.rank F A)
[PROOFSTEP]
obtain ⟨_, b⟩ := Module.Free.exists_basis (R := F) (M := K)
[GOAL]
case intro.mk
F : Type u
K : Type v
A : Type w
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
fst✝ : Type v
b : Basis fst✝ F K
⊢ lift (Module.rank F K) * lift (Module.rank K A) = lift (Module.rank F A)
[PROOFSTEP]
obtain ⟨_, c⟩ := Module.Free.exists_basis (R := K) (M := A)
[GOAL]
case intro.mk.intro.mk
F : Type u
K : Type v
A : Type w
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
fst✝¹ : Type v
b : Basis fst✝¹ F K
fst✝ : Type w
c : Basis fst✝ K A
⊢ lift (Module.rank F K) * lift (Module.rank K A) = lift (Module.rank F A)
[PROOFSTEP]
rw [← (Module.rank F K).lift_id, ← b.mk_eq_rank, ← (Module.rank K A).lift_id, ← c.mk_eq_rank, ← lift_umax.{w, v}, ←
(b.smul c).mk_eq_rank, mk_prod, lift_mul, lift_lift, lift_lift, lift_lift, lift_lift, lift_umax.{v, w}]
[GOAL]
F✝ : Type u
K✝ : Type v
A✝ : Type w
inst✝²¹ : CommRing F✝
inst✝²⁰ : Ring K✝
inst✝¹⁹ : AddCommGroup A✝
inst✝¹⁸ : Algebra F✝ K✝
inst✝¹⁷ : Module K✝ A✝
inst✝¹⁶ : Module F✝ A✝
inst✝¹⁵ : IsScalarTower F✝ K✝ A✝
inst✝¹⁴ : StrongRankCondition F✝
inst✝¹³ : StrongRankCondition K✝
inst✝¹² : Module.Free F✝ K✝
inst✝¹¹ : Module.Free K✝ A✝
F : Type u
K A : Type v
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
⊢ Module.rank F K * Module.rank K A = Module.rank F A
[PROOFSTEP]
convert lift_rank_mul_lift_rank F K A
[GOAL]
case h.e'_2.h.e'_5
F✝ : Type u
K✝ : Type v
A✝ : Type w
inst✝²¹ : CommRing F✝
inst✝²⁰ : Ring K✝
inst✝¹⁹ : AddCommGroup A✝
inst✝¹⁸ : Algebra F✝ K✝
inst✝¹⁷ : Module K✝ A✝
inst✝¹⁶ : Module F✝ A✝
inst✝¹⁵ : IsScalarTower F✝ K✝ A✝
inst✝¹⁴ : StrongRankCondition F✝
inst✝¹³ : StrongRankCondition K✝
inst✝¹² : Module.Free F✝ K✝
inst✝¹¹ : Module.Free K✝ A✝
F : Type u
K A : Type v
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
⊢ Module.rank F K = lift (Module.rank F K)
[PROOFSTEP]
rw [lift_id]
[GOAL]
case h.e'_2.h.e'_6
F✝ : Type u
K✝ : Type v
A✝ : Type w
inst✝²¹ : CommRing F✝
inst✝²⁰ : Ring K✝
inst✝¹⁹ : AddCommGroup A✝
inst✝¹⁸ : Algebra F✝ K✝
inst✝¹⁷ : Module K✝ A✝
inst✝¹⁶ : Module F✝ A✝
inst✝¹⁵ : IsScalarTower F✝ K✝ A✝
inst✝¹⁴ : StrongRankCondition F✝
inst✝¹³ : StrongRankCondition K✝
inst✝¹² : Module.Free F✝ K✝
inst✝¹¹ : Module.Free K✝ A✝
F : Type u
K A : Type v
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
⊢ Module.rank K A = lift (Module.rank K A)
[PROOFSTEP]
rw [lift_id]
[GOAL]
case h.e'_3
F✝ : Type u
K✝ : Type v
A✝ : Type w
inst✝²¹ : CommRing F✝
inst✝²⁰ : Ring K✝
inst✝¹⁹ : AddCommGroup A✝
inst✝¹⁸ : Algebra F✝ K✝
inst✝¹⁷ : Module K✝ A✝
inst✝¹⁶ : Module F✝ A✝
inst✝¹⁵ : IsScalarTower F✝ K✝ A✝
inst✝¹⁴ : StrongRankCondition F✝
inst✝¹³ : StrongRankCondition K✝
inst✝¹² : Module.Free F✝ K✝
inst✝¹¹ : Module.Free K✝ A✝
F : Type u
K A : Type v
inst✝¹⁰ : CommRing F
inst✝⁹ : Ring K
inst✝⁸ : AddCommGroup A
inst✝⁷ : Algebra F K
inst✝⁶ : Module K A
inst✝⁵ : Module F A
inst✝⁴ : IsScalarTower F K A
inst✝³ : StrongRankCondition F
inst✝² : StrongRankCondition K
inst✝¹ : Module.Free F K
inst✝ : Module.Free K A
⊢ Module.rank F A = lift (Module.rank F A)
[PROOFSTEP]
rw [lift_id]
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝¹² : CommRing F
inst✝¹¹ : Ring K
inst✝¹⁰ : AddCommGroup A
inst✝⁹ : Algebra F K
inst✝⁸ : Module K A
inst✝⁷ : Module F A
inst✝⁶ : IsScalarTower F K A
inst✝⁵ : StrongRankCondition F
inst✝⁴ : StrongRankCondition K
inst✝³ : Module.Free F K
inst✝² : Module.Free K A
inst✝¹ : Module.Finite F K
inst✝ : Module.Finite K A
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
letI := nontrivial_of_invariantBasisNumber F
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝¹² : CommRing F
inst✝¹¹ : Ring K
inst✝¹⁰ : AddCommGroup A
inst✝⁹ : Algebra F K
inst✝⁸ : Module K A
inst✝⁷ : Module F A
inst✝⁶ : IsScalarTower F K A
inst✝⁵ : StrongRankCondition F
inst✝⁴ : StrongRankCondition K
inst✝³ : Module.Free F K
inst✝² : Module.Free K A
inst✝¹ : Module.Finite F K
inst✝ : Module.Finite K A
this : Nontrivial F := nontrivial_of_invariantBasisNumber F
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
let b := Module.Free.chooseBasis F K
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝¹² : CommRing F
inst✝¹¹ : Ring K
inst✝¹⁰ : AddCommGroup A
inst✝⁹ : Algebra F K
inst✝⁸ : Module K A
inst✝⁷ : Module F A
inst✝⁶ : IsScalarTower F K A
inst✝⁵ : StrongRankCondition F
inst✝⁴ : StrongRankCondition K
inst✝³ : Module.Free F K
inst✝² : Module.Free K A
inst✝¹ : Module.Finite F K
inst✝ : Module.Finite K A
this : Nontrivial F := nontrivial_of_invariantBasisNumber F
b : Basis (Module.Free.ChooseBasisIndex F K) F K := Module.Free.chooseBasis F K
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
let c := Module.Free.chooseBasis K A
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝¹² : CommRing F
inst✝¹¹ : Ring K
inst✝¹⁰ : AddCommGroup A
inst✝⁹ : Algebra F K
inst✝⁸ : Module K A
inst✝⁷ : Module F A
inst✝⁶ : IsScalarTower F K A
inst✝⁵ : StrongRankCondition F
inst✝⁴ : StrongRankCondition K
inst✝³ : Module.Free F K
inst✝² : Module.Free K A
inst✝¹ : Module.Finite F K
inst✝ : Module.Finite K A
this : Nontrivial F := nontrivial_of_invariantBasisNumber F
b : Basis (Module.Free.ChooseBasisIndex F K) F K := Module.Free.chooseBasis F K
c : Basis (Module.Free.ChooseBasisIndex K A) K A := Module.Free.chooseBasis K A
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
rw [finrank_eq_card_basis b, finrank_eq_card_basis c, finrank_eq_card_basis (b.smul c), Fintype.card_prod]
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝⁶ : Field F
inst✝⁵ : DivisionRing K
inst✝⁴ : AddCommGroup A
inst✝³ : Algebra F K
inst✝² : Module K A
inst✝¹ : Module F A
inst✝ : IsScalarTower F K A
hf : FiniteDimensional F A
b : Finset A
hb : span F ↑b = ⊤
⊢ restrictScalars F (span K ↑b) = restrictScalars F ⊤
[PROOFSTEP]
rw [Submodule.restrictScalars_top, eq_top_iff, ← hb, Submodule.span_le]
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝⁶ : Field F
inst✝⁵ : DivisionRing K
inst✝⁴ : AddCommGroup A
inst✝³ : Algebra F K
inst✝² : Module K A
inst✝¹ : Module F A
inst✝ : IsScalarTower F K A
hf : FiniteDimensional F A
b : Finset A
hb : span F ↑b = ⊤
⊢ ↑b ⊆ ↑(restrictScalars F (span K ↑b))
[PROOFSTEP]
exact Submodule.subset_span
[GOAL]
F : Type u
K : Type v
A : Type w
inst✝⁷ : Field F
inst✝⁶ : DivisionRing K
inst✝⁵ : AddCommGroup A
inst✝⁴ : Algebra F K
inst✝³ : Module K A
inst✝² : Module F A
inst✝¹ : IsScalarTower F K A
inst✝ : FiniteDimensional F K
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
by_cases hA : FiniteDimensional K A
[GOAL]
case pos
F : Type u
K : Type v
A : Type w
inst✝⁷ : Field F
inst✝⁶ : DivisionRing K
inst✝⁵ : AddCommGroup A
inst✝⁴ : Algebra F K
inst✝³ : Module K A
inst✝² : Module F A
inst✝¹ : IsScalarTower F K A
inst✝ : FiniteDimensional F K
hA : FiniteDimensional K A
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
replace hA : FiniteDimensional K A := hA
[GOAL]
case pos
F : Type u
K : Type v
A : Type w
inst✝⁷ : Field F
inst✝⁶ : DivisionRing K
inst✝⁵ : AddCommGroup A
inst✝⁴ : Algebra F K
inst✝³ : Module K A
inst✝² : Module F A
inst✝¹ : IsScalarTower F K A
inst✝ : FiniteDimensional F K
hA : FiniteDimensional K A
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
rw [finrank_mul_finrank']
[GOAL]
case neg
F : Type u
K : Type v
A : Type w
inst✝⁷ : Field F
inst✝⁶ : DivisionRing K
inst✝⁵ : AddCommGroup A
inst✝⁴ : Algebra F K
inst✝³ : Module K A
inst✝² : Module F A
inst✝¹ : IsScalarTower F K A
inst✝ : FiniteDimensional F K
hA : ¬FiniteDimensional K A
⊢ finrank F K * finrank K A = finrank F A
[PROOFSTEP]
rw [finrank_of_infinite_dimensional hA, mul_zero, finrank_of_infinite_dimensional]
[GOAL]
case neg
F : Type u
K : Type v
A : Type w
inst✝⁷ : Field F
inst✝⁶ : DivisionRing K
inst✝⁵ : AddCommGroup A
inst✝⁴ : Algebra F K
inst✝³ : Module K A
inst✝² : Module F A
inst✝¹ : IsScalarTower F K A
inst✝ : FiniteDimensional F K
hA : ¬FiniteDimensional K A
⊢ ¬FiniteDimensional F A
[PROOFSTEP]
exact mt (@right F K A _ _ _ _ _ _ _) hA
[GOAL]
F : Type u
K✝ : Type v
A✝ : Type w
inst✝⁹ : Field F
inst✝⁸ : DivisionRing K✝
inst✝⁷ : AddCommGroup A✝
inst✝⁶ : Algebra F K✝
inst✝⁵ : Module K✝ A✝
inst✝⁴ : Module F A✝
inst✝³ : IsScalarTower F K✝ A✝
A : Type u_1
inst✝² : Ring A
inst✝¹ : IsDomain A
inst✝ : Algebra F A
hp : Nat.Prime (finrank F A)
K : Subalgebra F A
⊢ K = ⊥ ∨ K = ⊤
[PROOFSTEP]
haveI : FiniteDimensional _ _ := finiteDimensional_of_finrank hp.pos
[GOAL]
F : Type u
K✝ : Type v
A✝ : Type w
inst✝⁹ : Field F
inst✝⁸ : DivisionRing K✝
inst✝⁷ : AddCommGroup A✝
inst✝⁶ : Algebra F K✝
inst✝⁵ : Module K✝ A✝
inst✝⁴ : Module F A✝
inst✝³ : IsScalarTower F K✝ A✝
A : Type u_1
inst✝² : Ring A
inst✝¹ : IsDomain A
inst✝ : Algebra F A
hp : Nat.Prime (finrank F A)
K : Subalgebra F A
this : FiniteDimensional F A
⊢ K = ⊥ ∨ K = ⊤
[PROOFSTEP]
letI := divisionRingOfFiniteDimensional F K
[GOAL]
F : Type u
K✝ : Type v
A✝ : Type w
inst✝⁹ : Field F
inst✝⁸ : DivisionRing K✝
inst✝⁷ : AddCommGroup A✝
inst✝⁶ : Algebra F K✝
inst✝⁵ : Module K✝ A✝
inst✝⁴ : Module F A✝
inst✝³ : IsScalarTower F K✝ A✝
A : Type u_1
inst✝² : Ring A
inst✝¹ : IsDomain A
inst✝ : Algebra F A
hp : Nat.Prime (finrank F A)
K : Subalgebra F A
this✝ : FiniteDimensional F A
this : DivisionRing { x // x ∈ K } := divisionRingOfFiniteDimensional F { x // x ∈ K }
⊢ K = ⊥ ∨ K = ⊤
[PROOFSTEP]
refine' (hp.eq_one_or_self_of_dvd _ ⟨_, (finrank_mul_finrank F K A).symm⟩).imp _ fun h => _
[GOAL]
case refine'_1
F : Type u
K✝ : Type v
A✝ : Type w
inst✝⁹ : Field F
inst✝⁸ : DivisionRing K✝
inst✝⁷ : AddCommGroup A✝
inst✝⁶ : Algebra F K✝
inst✝⁵ : Module K✝ A✝
inst✝⁴ : Module F A✝
inst✝³ : IsScalarTower F K✝ A✝
A : Type u_1
inst✝² : Ring A
inst✝¹ : IsDomain A
inst✝ : Algebra F A
hp : Nat.Prime (finrank F A)
K : Subalgebra F A
this✝ : FiniteDimensional F A
this : DivisionRing { x // x ∈ K } := divisionRingOfFiniteDimensional F { x // x ∈ K }
⊢ finrank F { x // x ∈ K } = 1 → K = ⊥
[PROOFSTEP]
exact Subalgebra.eq_bot_of_finrank_one
[GOAL]
case refine'_2
F : Type u
K✝ : Type v
A✝ : Type w
inst✝⁹ : Field F
inst✝⁸ : DivisionRing K✝
inst✝⁷ : AddCommGroup A✝
inst✝⁶ : Algebra F K✝
inst✝⁵ : Module K✝ A✝
inst✝⁴ : Module F A✝
inst✝³ : IsScalarTower F K✝ A✝
A : Type u_1
inst✝² : Ring A
inst✝¹ : IsDomain A
inst✝ : Algebra F A
hp : Nat.Prime (finrank F A)
K : Subalgebra F A
this✝ : FiniteDimensional F A
this : DivisionRing { x // x ∈ K } := divisionRingOfFiniteDimensional F { x // x ∈ K }
h : finrank F { x // x ∈ K } = finrank F A
⊢ K = ⊤
[PROOFSTEP]
exact Algebra.toSubmodule_eq_top.1 (eq_top_of_finrank_eq <| K.finrank_toSubmodule.trans h)
|
MODULE binary_tree
USE global
IMPLICIT NONE
CONTAINS
SUBROUTINE ibppave_bt_search (item,head,find)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( ibppave_node ),POINTER::item1
!TYPE ( ibppave_node ), POINTER::item2
INTEGER::i,icomp
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
!.OR.head%ITERATION.NE.CURRENT_PS)THEN
!CALL free_ibppave_bt(head) ! free the memory of the binary tree
head => item
RETURN
ENDIF
item1 => head
DO
icomp=ibppave_node_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
!.OR.item1%left%ITERATION&.NE.CURRENT_PS)THEN
!CALL free_ibppave_bt(item1%left)
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
! .OR.item1%right%ITERATION.NE.CURRENT_PS)THEN
!CALL free_ibppave_bt(item1%right)
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
item%value(1:4)=item1%value(1:4)
item%stable=item1%stable
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE ibppave_bt_search
SUBROUTINE cibppave_bt_search (item,head,find)
IMPLICIT NONE
TYPE(cibppave_node),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( cibppave_node ),POINTER::item1
INTEGER::i,icomp
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
head => item
RETURN
ENDIF
item1 => head
DO
icomp=cibppave_node_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
item%value(1:4)=item1%value(1:4)
item%stable=item1%stable
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE cibppave_bt_search
SUBROUTINE xyzmatrices_bt_search (item,head,find)
IMPLICIT NONE
TYPE(xyzmatrices_node),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( xyzmatrices_node ),POINTER::item1
INTEGER::i,icomp,nn
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
head => item
RETURN
ENDIF
item1 => head
DO
icomp=xyzmatrices_node_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
nn=item%NLOOPLINE
!PRINT *, item1%xmatrix(1,1) ! Debug
item%xmatrix(1:nn,1:nn)=item1%xmatrix(1:nn,1:nn)
item%ymatrix(1:nn,1:nn)=item1%ymatrix(1:nn,1:nn)
item%zmatrix(2:nn,2:nn)=item1%zmatrix(2:nn,2:nn)
item%detY=item1%detY
item%detZ=item1%detZ
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE xyzmatrices_bt_search
SUBROUTINE cxyzmatrices_bt_search (item,head,find)
IMPLICIT NONE
TYPE(cxyzmatrices_node),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( cxyzmatrices_node ),POINTER::item1
INTEGER::i,icomp,nn
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
head => item
RETURN
ENDIF
item1 => head
DO
icomp=cxyzmatrices_node_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
nn=item%NLOOPLINE
item%xmatrix(1:nn,1:nn)=item1%xmatrix(1:nn,1:nn)
item%ymatrix(1:nn,1:nn)=item1%ymatrix(1:nn,1:nn)
item%zmatrix(2:nn,2:nn)=item1%zmatrix(2:nn,2:nn)
item%detY=item1%detY
item%detZ=item1%detZ
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE cxyzmatrices_bt_search
SUBROUTINE rsmatrices_bt_search (item,head,find)
IMPLICIT NONE
TYPE(rsmatrices_node),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( rsmatrices_node ),POINTER::item1
INTEGER::i,icomp,nn
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
head => item
RETURN
ENDIF
item1 => head
DO
icomp=rsmatrices_node_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
nn=item%NLOOPLINE
item%rmatrix(1:nn,1:nn)=item1%rmatrix(1:nn,1:nn)
item%smatrix(0:nn,0:nn)=item1%smatrix(0:nn,0:nn)
item%detS=item1%detS
item%detR=item1%detR
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE rsmatrices_bt_search
SUBROUTINE crsmatrices_bt_search (item,head,find)
IMPLICIT NONE
TYPE(crsmatrices_node),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( crsmatrices_node ),POINTER::item1
INTEGER::i,icomp,nn
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
head => item
RETURN
ENDIF
item1 => head
DO
icomp=crsmatrices_node_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
nn=item%NLOOPLINE
item%rmatrix(1:nn,1:nn)=item1%rmatrix(1:nn,1:nn)
item%smatrix(0:nn,0:nn)=item1%smatrix(0:nn,0:nn)
item%detS=item1%detS
item%detR=item1%detR
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE crsmatrices_bt_search
SUBROUTINE ibppave_bt_opt_search (head,NLOOPLINE,&
indices,PCL,M2L,item,find)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(INOUT)::head
TYPE(ibppave_node),POINTER,INTENT(OUT)::item
INTEGER,INTENT(IN)::NLOOPLINE
INTEGER,DIMENSION(0:NLOOPLINE),INTENT(IN)::indices
REAL(KIND(1d0)),DIMENSION(NLOOPLINE,0:3),INTENT(IN)::PCL
REAL(KIND(1d0)),DIMENSION(NLOOPLINE),INTENT(IN)::M2L
LOGICAL,INTENT(OUT)::find
TYPE ( ibppave_node ),POINTER::item1
!TYPE ( ibppave_node ), POINTER::item2
INTEGER::i,icomp
find=.FALSE.
IF(.NOT.ASSOCIATED(head))THEN
CALL ibppave_node_alloc(NLOOPLINE,indices,PCL,M2L,item)
head => item
IF(.NOT.ASSOCIATED(ibppave_save_array(NLOOPLINE)%ptr))THEN
ibppave_save_array(NLOOPLINE)%ptr => item
ENDIF
RETURN
ENDIF
IF(.NOT.ASSOCIATED(ibppave_save_array(NLOOPLINE)%ptr))THEN
item1 => head
ELSE
item1 => ibppave_save_array(NLOOPLINE)%ptr
ENDIF
DO
icomp=ibppave_node_opt_compare(NLOOPLINE,indices,PCL,M2L,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
CALL ibppave_node_alloc(NLOOPLINE,indices,PCL,M2L,item)
item1%left => item
item%parent => item1
IF(.NOT.ASSOCIATED(ibppave_save_array(NLOOPLINE)%ptr))THEN
ibppave_save_array(NLOOPLINE)%ptr => item
ENDIF
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
CALL ibppave_node_alloc(NLOOPLINE,indices,PCL,M2L,item)
item1%right => item
item%parent => item1
IF(.NOT.ASSOCIATED(ibppave_save_array(NLOOPLINE)%ptr))THEN
ibppave_save_array(NLOOPLINE)%ptr => item
ENDIF
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
item => item1
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE ibppave_bt_opt_search
SUBROUTINE ibppave_node_alloc(NLOOPLINE,indices,PCL,M2L,item)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(OUT)::item
INTEGER,INTENT(IN)::NLOOPLINE
INTEGER,DIMENSION(0:NLOOPLINE),INTENT(IN)::indices
REAL(KIND(1d0)),DIMENSION(NLOOPLINE,0:3),INTENT(IN)::PCL
REAL(KIND(1d0)),DIMENSION(NLOOPLINE),INTENT(IN)::M2L
ALLOCATE(item)
!item%ITERATION=CURRENT_PS
item%NLOOPLINE=NLOOPLINE
item%stable=.TRUE.
item%indices(0:NLOOPLINE)=indices(0:NLOOPLINE)
item%M2L(1:NLOOPLINE)=M2L(1:NLOOPLINE)
item%PCL(1:NLOOPLINE,0:3)=PCL(1:NLOOPLINE,0:3)
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
RETURN
END SUBROUTINE ibppave_node_alloc
SUBROUTINE cibppave_node_alloc(NLOOPLINE,indices,PCL,M2L,item)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(OUT)::item
INTEGER,INTENT(IN)::NLOOPLINE
INTEGER,DIMENSION(0:NLOOPLINE),INTENT(IN)::indices
REAL(KIND(1d0)),DIMENSION(NLOOPLINE,0:3),INTENT(IN)::PCL
COMPLEX(KIND(1d0)),DIMENSION(NLOOPLINE),INTENT(IN)::M2L
ALLOCATE(item)
item%NLOOPLINE=NLOOPLINE
item%stable=.TRUE.
item%indices(0:NLOOPLINE)=indices(0:NLOOPLINE)
item%M2L(1:NLOOPLINE)=M2L(1:NLOOPLINE)
item%PCL(1:NLOOPLINE,0:3)=PCL(1:NLOOPLINE,0:3)
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
RETURN
END SUBROUTINE cibppave_node_alloc
SUBROUTINE ibppave_node2_alloc(NLOOPLINE,indices,PijMatrix,M2L,item)
IMPLICIT NONE
TYPE(ibppave_node2),POINTER,INTENT(OUT)::item
INTEGER,INTENT(IN)::NLOOPLINE
INTEGER,DIMENSION(0:NLOOPLINE),INTENT(IN)::indices
REAL(KIND(1d0)),DIMENSION(NLOOPLINE,NLOOPLINE),INTENT(IN)::PijMatrix
REAL(KIND(1d0)),DIMENSION(NLOOPLINE),INTENT(IN)::M2L
ALLOCATE(item)
!item%ITERATION=CURRENT_PS
item%NLOOPLINE=NLOOPLINE
item%stable=.TRUE.
item%indices(0:NLOOPLINE)=indices(0:NLOOPLINE)
item%M2L(1:NLOOPLINE)=M2L(1:NLOOPLINE)
item%PijMatrix(1:NLOOPLINE,1:NLOOPLINE)=&
PijMatrix(1:NLOOPLINE,1:NLOOPLINE)
RETURN
END SUBROUTINE ibppave_node2_alloc
SUBROUTINE ibppave_bt_search2(item,head,find)
IMPLICIT NONE
TYPE(ibppave_node2),POINTER,INTENT(INOUT)::head,item
LOGICAL,INTENT(OUT)::find
TYPE ( ibppave_node2 ),POINTER::item1
!TYPE ( ibppave_node2 ), POINTER::item2
INTEGER::i,icomp
find=.FALSE.
NULLIFY(item%parent)
NULLIFY(item%left)
NULLIFY(item%right)
IF(.NOT.ASSOCIATED(head))THEN
head => item
RETURN
ENDIF
item1 => head
DO
icomp=ibppave_node2_compare(item,item1)
IF(icomp.LT.0)THEN
IF(.NOT.ASSOCIATED(item1%left))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSEIF(icomp.GT.0)THEN
IF(.NOT.ASSOCIATED(item1%right))THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ELSE
find=.TRUE.
item%value(1:4)=item1%value(1:4)
item%stable=item1%stable
EXIT
ENDIF
ENDDO
RETURN
END SUBROUTINE ibppave_bt_search2
SUBROUTINE ibppave_bt_insert ( item, head )
IMPLICIT NONE
TYPE ( ibppave_node ),POINTER,INTENT(INOUT)::head
INTEGER::i
TYPE ( ibppave_node ),POINTER,INTENT(IN)::item
TYPE ( ibppave_node ),POINTER::item1
TYPE ( ibppave_node ), POINTER::item2
NULLIFY( item%parent )
NULLIFY( item%left )
NULLIFY( item%right )
!
! In the case of an empty tree.
!
IF(.NOT.ASSOCIATED( head ))THEN
head => item
RETURN
ENDIF
item1 => head
DO
IF(ibppave_node_smaller(item,item1))THEN
IF(.NOT.ASSOCIATED(item1%left ))THEN
item1%left => item
item%parent => item1
EXIT
ELSE
item1 => item1%left
ENDIF
ELSE
IF(.NOT.ASSOCIATED ( item1%right ) )THEN
item1%right => item
item%parent => item1
EXIT
ELSE
item1 => item1%right
ENDIF
ENDIF
ENDDO
RETURN
END SUBROUTINE ibppave_bt_insert
RECURSIVE SUBROUTINE ibppave_bt_print ( head )
IMPLICIT NONE
TYPE ( ibppave_node ),POINTER,INTENT(IN)::head
INTEGER::n,i
IF( ASSOCIATED( head ) )THEN
CALL ibppave_bt_print( head%left )
WRITE(*,*)"======================================================================================"
n=head%NLOOPLINE
WRITE(*,*)n,head%indices(0:n)
WRITE(*,*)head%M2L(1:n)
DO i=1,n
WRITE(*,*)head%PCL(i,0:3)
ENDDO
WRITE(*,*)"value=",head%value(1:4)
WRITE(*,*)"stable=",head%stable
WRITE(*,*)"======================================================================================"
CALL ibppave_bt_print( head%right )
ENDIF
RETURN
END SUBROUTINE ibppave_bt_print
RECURSIVE SUBROUTINE cibppave_bt_print ( head )
IMPLICIT NONE
TYPE ( cibppave_node ),POINTER,INTENT(IN)::head
INTEGER::n,i
IF( ASSOCIATED( head ) )THEN
CALL cibppave_bt_print( head%left )
WRITE(*,*)"======================================================================================"
n=head%NLOOPLINE
WRITE(*,*)n,head%indices(0:n)
WRITE(*,*)head%M2L(1:n)
DO i=1,n
WRITE(*,*)head%PCL(i,0:3)
ENDDO
WRITE(*,*)"value=",head%value(1:4)
WRITE(*,*)"stable=",head%stable
WRITE(*,*)"======================================================================================"
CALL cibppave_bt_print( head%right )
ENDIF
RETURN
END SUBROUTINE cibppave_bt_print
RECURSIVE FUNCTION ibppave_node_count (head) RESULT(res)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(IN)::head
INTEGER::res
IF( .NOT.ASSOCIATED(head))THEN
res=0
RETURN
ENDIF
res=1+ibppave_node_count(head%left)&
+ibppave_node_count(head%right)
RETURN
END FUNCTION ibppave_node_count
RECURSIVE FUNCTION cibppave_node_count (head) RESULT(res)
IMPLICIT NONE
TYPE(cibppave_node),POINTER,INTENT(IN)::head
INTEGER::res
IF( .NOT.ASSOCIATED(head))THEN
res=0
RETURN
ENDIF
res=1+cibppave_node_count(head%left)&
+cibppave_node_count(head%right)
RETURN
END FUNCTION cibppave_node_count
RECURSIVE SUBROUTINE ibppave_bt_print2 ( head )
IMPLICIT NONE
TYPE ( ibppave_node2 ),POINTER,INTENT(IN)::head
INTEGER::n,i
IF( ASSOCIATED( head ) )THEN
CALL ibppave_bt_print2( head%left )
WRITE(*,*)"======================================================================================"
n=head%NLOOPLINE
WRITE(*,*)n,head%indices(0:n)
WRITE(*,*)head%M2L(1:n)
DO i=1,n
WRITE(*,*)head%PijMatrix(i,1:n)
ENDDO
WRITE(*,*)"value=",head%value(1:4)
WRITE(*,*)"stable=",head%stable
WRITE(*,*)"======================================================================================"
CALL ibppave_bt_print2( head%right )
ENDIF
RETURN
END SUBROUTINE ibppave_bt_print2
FUNCTION ibppave_node_smaller(item1,item2) RESULT(res)
! .LE.
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(IN)::item1,item2
LOGICAL::res
INTEGER::icomp
icomp=ibppave_node_compare(item1,item2)
IF(icomp.LE.0)THEN
res=.TRUE.
RETURN
ELSE
res=.FALSE.
RETURN
ENDIF
END FUNCTION ibppave_node_smaller
FUNCTION cibppave_node_smaller(item1,item2) RESULT(res)
! .LE.
IMPLICIT NONE
TYPE(cibppave_node),POINTER,INTENT(IN)::item1,item2
LOGICAL::res
INTEGER::icomp
icomp=cibppave_node_compare(item1,item2)
IF(icomp.LE.0)THEN
res=.TRUE.
RETURN
ELSE
res=.FALSE.
RETURN
ENDIF
END FUNCTION cibppave_node_smaller
FUNCTION ibppave_node_opt_compare(NLOOPLINE,indices,PCL,M2L,item2) RESULT(res)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(IN)::item2
INTEGER,INTENT(IN)::NLOOPLINE
INTEGER,DIMENSION(0:NLOOPLINE),INTENT(IN)::indices
REAL(KIND(1d0)),DIMENSION(NLOOPLINE,0:3),INTENT(IN)::PCL
REAL(KIND(1d0)),DIMENSION(NLOOPLINE),INTENT(IN)::M2L
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
DO i=0,NLOOPLINE
k=indices(i)-item2%indices(i)
IF(k.LT.0)THEN
res=-1
RETURN
ELSEIF(k.GT.0)THEN
res=1
RETURN
ENDIF
ENDDO
DO i=1,NLOOPLINE
icomp=real_compare(M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,NLOOPLINE
DO k=0,3
icomp=real_compare(PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION ibppave_node_opt_compare
FUNCTION ibppave_node_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=0,n
k=item1%indices(i)-item2%indices(i)
IF(k.LT.0)THEN
res=-1
RETURN
ELSEIF(k.GT.0)THEN
res=1
RETURN
ENDIF
ENDDO
DO i=1,n
icomp=real_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=0,3
icomp=real_compare(item1%PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION ibppave_node_compare
FUNCTION cibppave_node_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(cibppave_node),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=0,n
k=item1%indices(i)-item2%indices(i)
IF(k.LT.0)THEN
res=-1
RETURN
ELSEIF(k.GT.0)THEN
res=1
RETURN
ENDIF
ENDDO
DO i=1,n
icomp=complex_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=0,3
icomp=real_compare(item1%PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION cibppave_node_compare
FUNCTION xyzmatrices_node_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(xyzmatrices_node),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=1,n
icomp=real_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=0,3
icomp=real_compare(item1%PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION xyzmatrices_node_compare
FUNCTION cxyzmatrices_node_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(cxyzmatrices_node),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=1,n
icomp=complex_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=0,3
icomp=real_compare(item1%PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION cxyzmatrices_node_compare
FUNCTION rsmatrices_node_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(rsmatrices_node),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=1,n
icomp=real_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=0,3
icomp=real_compare(item1%PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION rsmatrices_node_compare
FUNCTION crsmatrices_node_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(crsmatrices_node),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=1,n
icomp=complex_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=0,3
icomp=real_compare(item1%PCL(i,k),item2%PCL(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION crsmatrices_node_compare
FUNCTION ibppave_node2_compare(item1,item2) RESULT(res)
IMPLICIT NONE
TYPE(ibppave_node2),POINTER,INTENT(IN)::item1,item2
INTEGER::res,n,i,k
REAL(KIND(1d0)),PARAMETER::eps1=1d-6,zthr=1d-4
REAL(KIND(1d0))::temp
INTEGER::icomp
n=item1%NLOOPLINE-item2%NLOOPLINE
IF(n.LT.0)THEN
res=-1
RETURN
ELSEIF(n.GT.0)THEN
res=1
RETURN
ENDIF
n=item1%NLOOPLINE
DO i=0,n
k=item1%indices(i)-item2%indices(i)
IF(k.LT.0)THEN
res=-1
RETURN
ELSEIF(k.GT.0)THEN
res=1
RETURN
ENDIF
ENDDO
DO i=1,n
icomp=real_compare(item1%M2L(i),item2%M2L(i),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
DO i=1,n
DO k=i,n
icomp=real_compare(item1%PijMatrix(i,k),item2%PijMatrix(i,k),eps,zthr)
IF(icomp.NE.0)THEN
res=icomp
RETURN
ENDIF
ENDDO
ENDDO
res=0
RETURN
END FUNCTION ibppave_node2_compare
FUNCTION real_compare(r1,r2,eps,zthr) RESULT(res)
IMPLICIT NONE
REAL(KIND(1d0)),INTENT(IN)::r1,r2,eps,zthr
REAL(KIND(1d0))::maxr,diff
INTEGER::res
maxr=MAX(ABS(r1),ABS(r2))
IF(maxr.LT.zthr)THEN
res=0
RETURN
ENDIF
diff=r1-r2
IF(ABS(diff)/maxr.LT.eps)THEN
res=0
RETURN
ENDIF
IF(diff.GT.0d0)THEN
res=1
RETURN
ELSE
res=-1
RETURN
ENDIF
END FUNCTION real_compare
FUNCTION complex_compare(c1,c2,eps,zthr) RESULT(res)
IMPLICIT NONE
COMPLEX(KIND(1d0)),INTENT(IN)::c1,c2
REAL(KIND(1d0)),INTENT(IN)::eps,zthr
REAL(KIND(1d0))::r1,r2
REAL(KIND(1d0))::maxr,diff
INTEGER::res
r1=DREAL(c1)
r2=DREAL(c2)
res=real_compare(r1,r2,eps,zthr)
IF(res.NE.0)RETURN
r1=DIMAG(c1)
r2=DIMAG(c2)
res=real_compare(r1,r2,eps,zthr)
RETURN
END FUNCTION complex_compare
RECURSIVE SUBROUTINE free_ibppave_bt(head)
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_ibppave_node(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_ibppave_bt(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_ibppave_bt(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_ibppave_bt
RECURSIVE SUBROUTINE free_cibppave_bt(head)
IMPLICIT NONE
TYPE(cibppave_node),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_cibppave_node(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_cibppave_bt(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_cibppave_bt(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_cibppave_bt
RECURSIVE SUBROUTINE free_xyzmatrices_bt(head)
IMPLICIT NONE
TYPE(xyzmatrices_node),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_xyzmatrices_node(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_xyzmatrices_bt(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_xyzmatrices_bt(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_xyzmatrices_bt
RECURSIVE SUBROUTINE free_cxyzmatrices_bt(head)
IMPLICIT NONE
TYPE(cxyzmatrices_node),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_cxyzmatrices_node(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_cxyzmatrices_bt(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_cxyzmatrices_bt(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_cxyzmatrices_bt
RECURSIVE SUBROUTINE free_rsmatrices_bt(head)
IMPLICIT NONE
TYPE(rsmatrices_node),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_rsmatrices_node(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_rsmatrices_bt(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_rsmatrices_bt(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_rsmatrices_bt
RECURSIVE SUBROUTINE free_crsmatrices_bt(head)
IMPLICIT NONE
TYPE(crsmatrices_node),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_crsmatrices_node(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_crsmatrices_bt(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_crsmatrices_bt(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_crsmatrices_bt
RECURSIVE SUBROUTINE free_ibppave_bt2(head)
IMPLICIT NONE
TYPE(ibppave_node2),POINTER,INTENT(INOUT)::head
INTEGER::leaf
leaf=leaf_ibppave_node2(head)
IF(leaf.EQ.0)RETURN
IF(leaf.EQ.1)THEN
! it is a leaf
DEALLOCATE(head)
RETURN
ENDIF
! it is not a leaf
IF(ASSOCIATED(head%left))THEN
CALL free_ibppave_bt2(head%left)
ENDIF
IF(ASSOCIATED(head%right))THEN
CALL free_ibppave_bt2(head%right)
ENDIF
DEALLOCATE(head)
RETURN
END SUBROUTINE free_ibppave_bt2
FUNCTION leaf_ibppave_node(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(ibppave_node),POINTER,INTENT(IN)::head
INTEGER::leaf_ibppave_node
IF(.NOT.ASSOCIATED(head))THEN
leaf_ibppave_node=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_ibppave_node=1
ELSE
leaf_ibppave_node=-1
ENDIF
RETURN
END FUNCTION leaf_ibppave_node
FUNCTION leaf_cibppave_node(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(cibppave_node),POINTER,INTENT(IN)::head
INTEGER::leaf_cibppave_node
IF(.NOT.ASSOCIATED(head))THEN
leaf_cibppave_node=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_cibppave_node=1
ELSE
leaf_cibppave_node=-1
ENDIF
RETURN
END FUNCTION leaf_cibppave_node
FUNCTION leaf_xyzmatrices_node(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(xyzmatrices_node),POINTER,INTENT(IN)::head
INTEGER::leaf_xyzmatrices_node
IF(.NOT.ASSOCIATED(head))THEN
leaf_xyzmatrices_node=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_xyzmatrices_node=1
ELSE
leaf_xyzmatrices_node=-1
ENDIF
RETURN
END FUNCTION leaf_xyzmatrices_node
FUNCTION leaf_cxyzmatrices_node(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(cxyzmatrices_node),POINTER,INTENT(IN)::head
INTEGER::leaf_cxyzmatrices_node
IF(.NOT.ASSOCIATED(head))THEN
leaf_cxyzmatrices_node=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_cxyzmatrices_node=1
ELSE
leaf_cxyzmatrices_node=-1
ENDIF
RETURN
END FUNCTION leaf_cxyzmatrices_node
FUNCTION leaf_rsmatrices_node(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(rsmatrices_node),POINTER,INTENT(IN)::head
INTEGER::leaf_rsmatrices_node
IF(.NOT.ASSOCIATED(head))THEN
leaf_rsmatrices_node=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_rsmatrices_node=1
ELSE
leaf_rsmatrices_node=-1
ENDIF
RETURN
END FUNCTION leaf_rsmatrices_node
FUNCTION leaf_crsmatrices_node(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(crsmatrices_node),POINTER,INTENT(IN)::head
INTEGER::leaf_crsmatrices_node
IF(.NOT.ASSOCIATED(head))THEN
leaf_crsmatrices_node=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_crsmatrices_node=1
ELSE
leaf_crsmatrices_node=-1
ENDIF
RETURN
END FUNCTION leaf_crsmatrices_node
FUNCTION leaf_ibppave_node2(head)
! 0: it is not associated
! 1: it is associated and a leaf
! -1: it is associated and not a leaf
IMPLICIT NONE
TYPE(ibppave_node2),POINTER,INTENT(IN)::head
INTEGER::leaf_ibppave_node2
IF(.NOT.ASSOCIATED(head))THEN
leaf_ibppave_node2=0
ELSEIF(.NOT.ASSOCIATED(head%left).AND.&
.NOT.ASSOCIATED(head%right))THEN
leaf_ibppave_node2=1
ELSE
leaf_ibppave_node2=-1
ENDIF
RETURN
END FUNCTION leaf_ibppave_node2
SUBROUTINE free_ibppave_save
IMPLICIT NONE
CALL free_ibppave_bt(ibp_save)
CALL free_ibppave_bt(pave_save)
CALL free_ibppave_bt(shiftpaveden_save)
CALL free_cibppave_bt(cibp_save)
CALL free_cibppave_bt(cpave_save)
CALL free_cibppave_bt(cshiftpaveden_save)
END SUBROUTINE free_ibppave_save
SUBROUTINE free_ibppave_save2
IMPLICIT NONE
CALL free_ibppave_bt2(ibp_save2)
CALL free_ibppave_bt2(pave_save2)
CALL free_ibppave_bt2(shiftpaveden_save2)
END SUBROUTINE free_ibppave_save2
END MODULE binary_tree
|
module Sized.Parrot where
open import Data.Product
open import Data.String.Base
open import SizedIO.IOObject
open import SizedIO.Base
open import SizedIO.Console hiding (main)
open import SizedIO.ConsoleObject
open import NativeIO
open import Sized.SimpleCell hiding (program; main)
open import Size
record Wrap A : Set where
constructor wrap
field unwrap : A
parrotI = cellJ (Wrap String)
ParrotC : (i : Size) → Set
ParrotC i = ConsoleObject i parrotI
-- but reusing cellC from SimpleCell, as interface is ident!
-- class Parrot implements Cell {
-- Cell cell;
-- Parrot (Cell c) { cell = c; }
-- public String get() {
-- return "(" + cell.get() + ") is what parrot got";
-- }
-- public void put (String s) {
-- cell.put("parrot puts (" + s + ")");
-- }
-- }
-- parrotP is constructor for the consoleObject for interface (cellI String)
parrotP : ∀{i} (c : CellC i) → ParrotC i
(method (parrotP c) get) =
method c get >>= λ { (s , c') →
return (wrap ("(" ++ s ++ ") is what parrot got") , parrotP c' ) }
(method (parrotP c) (put (wrap s))) =
method c (put ("parrot puts (" ++ s ++ ")")) >>= λ { (_ , c') →
return (_ , parrotP c') }
-- public static void main (String[] args) {
-- Parrot c = new Parrot(new SimpleCell("Start"));
-- System.out.println(c.get());
-- c.put(args[1]);
-- System.out.println(c.get());
-- }
-- }
program : String → IOConsole ∞ Unit
program arg =
let c₀ = parrotP (cellP "Start") in
method c₀ get >>= λ{ (wrap s , c₁) →
exec1 (putStrLn s) >>
method c₁ (put (wrap arg)) >>= λ{ (_ , c₂) →
method c₂ get >>= λ{ (wrap s' , c₃) →
exec1 (putStrLn s') }}}
main : NativeIO Unit
main = translateIOConsole (program "hello")
-- -}
|
‘As I Lay Dying’ is a popular group especially among American teens. The bands front man Tim Lambesis has posted a $2 million bail in a murder-for-hire case. The 32-year-old singer was arrested earlier this month after police alleged he tried to hire a hit man to kill his estranged wife.
The hit man turned out to be an undercover police officer, and Lambesis was arrested after prosecutors claim he gave the officer an envelope that contained $1000, personal information and a photo of his wife.
Lambesis lawyer Thomas Warwick is committing to an unusual defense. He is claiming Lambesis’s judgment was impaired by steroid use. The singer is rumored to be a serious body builder and a spokesman for nutritional supplements. He is due back in court on June 10th, if convicted he could face up to nine years in prison. |
(* Title: Models of Kleene Algebra
Author: Alasdair Armstrong, Georg Struth, Tjark Weber
Maintainer: Georg Struth <g.struth at sheffield.ac.uk>
Tjark Weber <tjark.weber at it.uu.se>
*)
section \<open>Models of Kleene Algebras\<close>
theory Kleene_Algebra_Models
imports Kleene_Algebra Dioid_Models
begin
text \<open>We now show that most of the models considered for dioids are
also Kleene algebras. Some of the dioid models cannot be expanded, for
instance max-plus and min-plus semirings, but we do not formalise this
fact. We also currently do not show that formal powerseries and
matrices form Kleene algebras.
The interpretation proofs for some of the following models are quite
similar. One could, perhaps, abstract out common reasoning in the
future.\<close>
subsection \<open>Preliminary Lemmas\<close>
text \<open>We first prove two induction-style statements for dioids that
are useful for establishing the full induction laws. In the future
these will live in a theory file on finite sums for Kleene
algebras.\<close>
context dioid_one_zero
begin
lemma power_inductl: "z + x \<cdot> y \<le> y \<Longrightarrow> (x ^ n) \<cdot> z \<le> y"
proof (induct n)
case 0 show ?case
using "0.prems" by auto
case Suc thus ?case
by (auto, metis mult.assoc mult_isol order_trans)
qed
lemma power_inductr: "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> (x ^ n) \<le> y"
proof (induct n)
case 0 show ?case
using "0.prems" by auto
case Suc
{
fix n
assume "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x ^ n \<le> y"
and "z + y \<cdot> x \<le> y"
hence "z \<cdot> x ^ n \<le> y"
by auto
also have "z \<cdot> x ^ Suc n = z \<cdot> x \<cdot> x ^ n"
by (metis mult.assoc power_Suc)
moreover have "... = (z \<cdot> x ^ n) \<cdot> x"
by (metis mult.assoc power_commutes)
moreover have "... \<le> y \<cdot> x"
by (metis calculation(1) mult_isor)
moreover have "... \<le> y"
using \<open>z + y \<cdot> x \<le> y\<close> by auto
ultimately have "z \<cdot> x ^ Suc n \<le> y" by auto
}
thus ?case
by (metis Suc)
qed
end (* dioid_one_zero *)
subsection \<open>The Powerset Kleene Algebra over a Monoid\<close>
text \<open>We now show that the powerset dioid forms a Kleene
algebra. The Kleene star is defined as in language theory.\<close>
lemma Un_0_Suc: "(\<Union>n. f n) = f 0 \<union> (\<Union>n. f (Suc n))"
by auto (metis not0_implies_Suc)
instantiation set :: (monoid_mult) kleene_algebra
begin
definition star_def: "X\<^sup>\<star> = (\<Union>n. X ^ n)"
lemma star_elim: "x \<in> X\<^sup>\<star> \<longleftrightarrow> (\<exists>k. x \<in> X ^ k)"
by (simp add: star_def)
lemma star_contl: "X \<cdot> Y\<^sup>\<star> = (\<Union>n. X \<cdot> Y ^ n)"
by (auto simp add: star_elim c_prod_def)
lemma star_contr: "X\<^sup>\<star> \<cdot> Y = (\<Union>n. X ^ n \<cdot> Y)"
by (auto simp add: star_elim c_prod_def)
instance
proof
fix X Y Z :: "'a set"
show "1 + X \<cdot> X\<^sup>\<star> \<subseteq> X\<^sup>\<star>"
proof -
have "1 + X \<cdot> X\<^sup>\<star> = (X ^ 0) \<union> (\<Union>n. X ^ (Suc n))"
by (auto simp add: star_def c_prod_def plus_set_def one_set_def)
also have "... = (\<Union>n. X ^ n)"
by (metis Un_0_Suc)
also have "... = X\<^sup>\<star>"
by (simp only: star_def)
finally show ?thesis
by (metis subset_refl)
qed
next
fix X Y Z :: "'a set"
assume hyp: "Z + X \<cdot> Y \<subseteq> Y"
show "X\<^sup>\<star> \<cdot> Z \<subseteq> Y"
by (simp add: star_contr SUP_le_iff) (meson hyp dioid_one_zero_class.power_inductl)
next
fix X Y Z :: "'a set"
assume hyp: "Z + Y \<cdot> X \<subseteq> Y"
show "Z \<cdot> X\<^sup>\<star> \<subseteq> Y"
by (simp add: star_contl SUP_le_iff) (meson dioid_one_zero_class.power_inductr hyp)
qed
end (* instantiation *)
subsection \<open>Language Kleene Algebras\<close>
text \<open>We now specialise this fact to languages.\<close>
interpretation lan_kleene_algebra: kleene_algebra "(+)" "(\<cdot>)" "1::'a lan" "0" "(\<subseteq>)" "(\<subset>)" star ..
subsection \<open>Regular Languages\<close>
text \<open>{\ldots} and further to regular languages. For the sake of
simplicity we just copy in the axiomatisation of regular expressions
by Krauss and Nipkow~\cite{krauss12regular}.\<close>
datatype 'a rexp =
Zero
| One
| Atom 'a
| Plus "'a rexp" "'a rexp"
| Times "'a rexp" "'a rexp"
| Star "'a rexp"
text \<open>The interpretation map that induces regular languages as the
images of regular expressions in the set of languages has also been
adapted from there.\<close>
fun lang :: "'a rexp \<Rightarrow> 'a lan" where
"lang Zero = 0" \<comment> \<open>{}\<close>
| "lang One = 1" \<comment> \<open>{[]}\<close>
| "lang (Atom a) = {[a]}"
| "lang (Plus x y) = lang x + lang y"
| "lang (Times x y) = lang x \<cdot> lang y"
| "lang (Star x) = (lang x)\<^sup>\<star>"
typedef 'a reg_lan = "range lang :: 'a lan set"
by auto
setup_lifting type_definition_reg_lan
instantiation reg_lan :: (type) kleene_algebra
begin
lift_definition star_reg_lan :: "'a reg_lan \<Rightarrow> 'a reg_lan"
is star
by (metis (hide_lams, no_types) image_iff lang.simps(6) rangeI)
lift_definition zero_reg_lan :: "'a reg_lan"
is 0
by (metis lang.simps(1) rangeI)
lift_definition one_reg_lan :: "'a reg_lan"
is 1
by (metis lang.simps(2) rangeI)
lift_definition less_eq_reg_lan :: "'a reg_lan \<Rightarrow> 'a reg_lan \<Rightarrow> bool"
is less_eq .
lift_definition less_reg_lan :: "'a reg_lan \<Rightarrow> 'a reg_lan \<Rightarrow> bool"
is less .
lift_definition plus_reg_lan :: "'a reg_lan \<Rightarrow> 'a reg_lan \<Rightarrow> 'a reg_lan"
is plus
by (metis (hide_lams, no_types) image_iff lang.simps(4) rangeI)
lift_definition times_reg_lan :: "'a reg_lan \<Rightarrow> 'a reg_lan \<Rightarrow> 'a reg_lan"
is times
by (metis (hide_lams, no_types) image_iff lang.simps(5) rangeI)
instance
proof
fix x y z :: "'a reg_lan"
show "x + y + z = x + (y + z)"
by transfer (metis join_semilattice_class.add_assoc')
show "x + y = y + x"
by transfer (metis join_semilattice_class.add_comm)
show "x \<cdot> y \<cdot> z = x \<cdot> (y \<cdot> z)"
by transfer (metis semigroup_mult_class.mult.assoc)
show "(x + y) \<cdot> z = x \<cdot> z + y \<cdot> z"
by transfer (metis semiring_class.distrib_right)
show "1 \<cdot> x = x"
by transfer (metis monoid_mult_class.mult_1_left)
show "x \<cdot> 1 = x"
by transfer (metis monoid_mult_class.mult_1_right)
show "0 + x = x"
by transfer (metis join_semilattice_zero_class.add_zero_l)
show "0 \<cdot> x = 0"
by transfer (metis ab_near_semiring_one_zerol_class.annil)
show "x \<cdot> 0 = 0"
by transfer (metis ab_near_semiring_one_zero_class.annir)
show "x \<le> y \<longleftrightarrow> x + y = y"
by transfer (metis plus_ord_class.less_eq_def)
show "x < y \<longleftrightarrow> x \<le> y \<and> x \<noteq> y"
by transfer (metis plus_ord_class.less_def)
show "x + x = x"
by transfer (metis join_semilattice_class.add_idem)
show "x \<cdot> (y + z) = x \<cdot> y + x \<cdot> z"
by transfer (metis semiring_class.distrib_left)
show "z \<cdot> x \<le> z \<cdot> (x + y)"
by transfer (metis pre_dioid_class.subdistl)
show "1 + x \<cdot> x\<^sup>\<star> \<le> x\<^sup>\<star>"
by transfer (metis star_unfoldl)
show "z + x \<cdot> y \<le> y \<Longrightarrow> x\<^sup>\<star> \<cdot> z \<le> y"
by transfer (metis star_inductl)
show "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x\<^sup>\<star> \<le> y"
by transfer (metis star_inductr)
qed
end (* instantiation *)
interpretation reg_lan_kleene_algebra: kleene_algebra "(+)" "(\<cdot>)" "1::'a reg_lan" 0 "(\<le>)" "(<)" star ..
subsection \<open>Relation Kleene Algebras\<close>
text \<open>We now show that binary relations form Kleene algebras. While
we could have used the reflexive transitive closure operation as the
Kleene star, we prefer the equivalent definition of the star as the
sum of powers. This essentially allows us to copy previous proofs.\<close>
lemma power_is_relpow: "rel_dioid.power X n = X ^^ n"
proof (induct n)
case 0 show ?case
by (metis rel_dioid.power_0 relpow.simps(1))
case Suc thus ?case
by (metis rel_dioid.power_Suc2 relpow.simps(2))
qed
lemma rel_star_def: "X^* = (\<Union>n. rel_dioid.power X n)"
by (simp add: power_is_relpow rtrancl_is_UN_relpow)
lemma rel_star_contl: "X O Y^* = (\<Union>n. X O rel_dioid.power Y n)"
by (metis rel_star_def relcomp_UNION_distrib)
lemma rel_star_contr: "X^* O Y = (\<Union>n. (rel_dioid.power X n) O Y)"
by (metis rel_star_def relcomp_UNION_distrib2)
interpretation rel_kleene_algebra: kleene_algebra "(\<union>)" "(O)" Id "{}" "(\<subseteq>)" "(\<subset>)" rtrancl
proof
fix x y z :: "'a rel"
show "Id \<union> x O x\<^sup>* \<subseteq> x\<^sup>*"
by (metis order_refl r_comp_rtrancl_eq rtrancl_unfold)
next
fix x y z :: "'a rel"
assume "z \<union> x O y \<subseteq> y"
thus "x\<^sup>* O z \<subseteq> y"
by (simp only: rel_star_contr, metis (lifting) SUP_le_iff rel_dioid.power_inductl)
next
fix x y z :: "'a rel"
assume "z \<union> y O x \<subseteq> y"
thus "z O x\<^sup>* \<subseteq> y"
by (simp only: rel_star_contl, metis (lifting) SUP_le_iff rel_dioid.power_inductr)
qed
subsection \<open>Trace Kleene Algebras\<close>
text \<open>Again, the proof that sets of traces form Kleene algebras
follows the same schema.\<close>
definition t_star :: "('p, 'a) trace set \<Rightarrow> ('p, 'a) trace set" where
"t_star X \<equiv> \<Union>n. trace_dioid.power X n"
lemma t_star_elim: "x \<in> t_star X \<longleftrightarrow> (\<exists>n. x \<in> trace_dioid.power X n)"
by (simp add: t_star_def)
lemma t_star_contl: "t_prod X (t_star Y) = (\<Union>n. t_prod X (trace_dioid.power Y n))"
by (auto simp add: t_star_elim t_prod_def)
lemma t_star_contr: "t_prod (t_star X) Y = (\<Union>n. t_prod (trace_dioid.power X n) Y)"
by (auto simp add: t_star_elim t_prod_def)
interpretation trace_kleene_algebra: kleene_algebra "(\<union>)" t_prod t_one t_zero "(\<subseteq>)" "(\<subset>)" t_star
proof
fix X Y Z :: "('a, 'b) trace set"
show "t_one \<union> t_prod X (t_star X) \<subseteq> t_star X"
proof -
have "t_one \<union> t_prod X (t_star X) = (trace_dioid.power X 0) \<union> (\<Union>n. trace_dioid.power X (Suc n))"
by (auto simp add: t_star_def t_prod_def)
also have "... = (\<Union>n. trace_dioid.power X n)"
by (metis Un_0_Suc)
also have "... = t_star X"
by (metis t_star_def)
finally show ?thesis
by (metis subset_refl)
qed
show "Z \<union> t_prod X Y \<subseteq> Y \<Longrightarrow> t_prod (t_star X) Z \<subseteq> Y"
by (simp only: ball_UNIV t_star_contr SUP_le_iff) (metis trace_dioid.power_inductl)
show "Z \<union> t_prod Y X \<subseteq> Y \<Longrightarrow> t_prod Z (t_star X) \<subseteq> Y"
by (simp only: ball_UNIV t_star_contl SUP_le_iff) (metis trace_dioid.power_inductr)
qed
subsection \<open>Path Kleene Algebras\<close>
text \<open>We start with paths that include the empty path.\<close>
definition p_star :: "'a path set \<Rightarrow> 'a path set" where
"p_star X \<equiv> \<Union>n. path_dioid.power X n"
lemma p_star_elim: "x \<in> p_star X \<longleftrightarrow> (\<exists>n. x \<in> path_dioid.power X n)"
by (simp add: p_star_def)
lemma p_star_contl: "p_prod X (p_star Y) = (\<Union>n. p_prod X (path_dioid.power Y n))"
apply (auto simp add: p_prod_def p_star_elim)
apply (metis p_fusion.simps(1))
apply metis
apply (metis p_fusion.simps(1) p_star_elim)
apply (metis p_star_elim)
done
lemma p_star_contr: "p_prod (p_star X) Y = (\<Union>n. p_prod (path_dioid.power X n) Y)"
apply (auto simp add: p_prod_def p_star_elim)
apply (metis p_fusion.simps(1))
apply metis
apply (metis p_fusion.simps(1) p_star_elim)
apply (metis p_star_elim)
done
interpretation path_kleene_algebra: kleene_algebra "(\<union>)" p_prod p_one "{}" "(\<subseteq>)" "(\<subset>)" p_star
proof
fix X Y Z :: "'a path set"
show "p_one \<union> p_prod X (p_star X) \<subseteq> p_star X"
proof -
have "p_one \<union> p_prod X (p_star X) = (path_dioid.power X 0) \<union> (\<Union>n. path_dioid.power X (Suc n))"
by (auto simp add: p_star_def p_prod_def)
also have "... = (\<Union>n. path_dioid.power X n)"
by (metis Un_0_Suc)
also have "... = p_star X"
by (metis p_star_def)
finally show ?thesis
by (metis subset_refl)
qed
show "Z \<union> p_prod X Y \<subseteq> Y \<Longrightarrow> p_prod (p_star X) Z \<subseteq> Y"
by (simp only: ball_UNIV p_star_contr SUP_le_iff) (metis path_dioid.power_inductl)
show "Z \<union> p_prod Y X \<subseteq> Y \<Longrightarrow> p_prod Z (p_star X) \<subseteq> Y"
by (simp only: ball_UNIV p_star_contl SUP_le_iff) (metis path_dioid.power_inductr)
qed
text \<open>We now consider a notion of paths that does not include the
empty path.\<close>
definition pp_star :: "'a ppath set \<Rightarrow> 'a ppath set" where
"pp_star X \<equiv> \<Union>n. ppath_dioid.power X n"
lemma pp_star_elim: "x \<in> pp_star X \<longleftrightarrow> (\<exists>n. x \<in> ppath_dioid.power X n)"
by (simp add: pp_star_def)
lemma pp_star_contl: "pp_prod X (pp_star Y) = (\<Union>n. pp_prod X (ppath_dioid.power Y n))"
by (auto simp add: pp_prod_def pp_star_elim)
lemma pp_star_contr: "pp_prod (pp_star X) Y = (\<Union>n. pp_prod (ppath_dioid.power X n) Y)"
by (auto simp add: pp_prod_def pp_star_elim)
interpretation ppath_kleene_algebra: kleene_algebra "(\<union>)" pp_prod pp_one "{}" "(\<subseteq>)" "(\<subset>)" pp_star
proof
fix X Y Z :: "'a ppath set"
show "pp_one \<union> pp_prod X (pp_star X) \<subseteq> pp_star X"
proof -
have "pp_one \<union> pp_prod X (pp_star X) = (ppath_dioid.power X 0) \<union> (\<Union>n. ppath_dioid.power X (Suc n))"
by (auto simp add: pp_star_def pp_prod_def)
also have "... = (\<Union>n. ppath_dioid.power X n)"
by (metis Un_0_Suc)
also have "... = pp_star X"
by (metis pp_star_def)
finally show ?thesis
by (metis subset_refl)
qed
show "Z \<union> pp_prod X Y \<subseteq> Y \<Longrightarrow> pp_prod (pp_star X) Z \<subseteq> Y"
by (simp only: ball_UNIV pp_star_contr SUP_le_iff) (metis ppath_dioid.power_inductl)
show "Z \<union> pp_prod Y X \<subseteq> Y \<Longrightarrow> pp_prod Z (pp_star X) \<subseteq> Y"
by (simp only: ball_UNIV pp_star_contl SUP_le_iff) (metis ppath_dioid.power_inductr)
qed
subsection \<open>The Distributive Lattice Kleene Algebra\<close>
text \<open>In the case of bounded distributive lattices, the star maps
all elements to to the maximal element.\<close>
definition (in bounded_distributive_lattice) bdl_star :: "'a \<Rightarrow> 'a" where
"bdl_star x = top"
sublocale bounded_distributive_lattice \<subseteq> kleene_algebra sup inf top bot less_eq less bdl_star
proof
fix x y z :: 'a
show "sup top (inf x (bdl_star x)) \<le> bdl_star x"
by (simp add: bdl_star_def)
show "sup z (inf x y) \<le> y \<Longrightarrow> inf (bdl_star x) z \<le> y"
by (simp add: bdl_star_def)
show "sup z (inf y x) \<le> y \<Longrightarrow> inf z (bdl_star x) \<le> y"
by (simp add: bdl_star_def)
qed
subsection \<open>The Min-Plus Kleene Algebra\<close>
text \<open>One cannot define a Kleene star for max-plus and min-plus
algebras that range over the real numbers. Here we define the star for
a min-plus algebra restricted to natural numbers and~$+\infty$. The
resulting Kleene algebra is commutative. Similar variants can be
obtained for max-plus algebras and other algebras ranging over the
positive or negative integers.\<close>
instantiation pnat :: commutative_kleene_algebra
begin
definition star_pnat where
"x\<^sup>\<star> \<equiv> (1::pnat)"
instance
proof
fix x y z :: pnat
show "1 + x \<cdot> x\<^sup>\<star> \<le> x\<^sup>\<star>"
by (metis star_pnat_def zero_pnat_top)
show "z + x \<cdot> y \<le> y \<Longrightarrow> x\<^sup>\<star> \<cdot> z \<le> y"
by (simp add: star_pnat_def)
show "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x\<^sup>\<star> \<le> y"
by (simp add: star_pnat_def)
show "x \<cdot> y = y \<cdot> x"
unfolding times_pnat_def by (cases x, cases y, simp_all)
qed
end (* instantiation *)
end
|
theory Pure_HOL
imports
Pure
"HOL-Eisbach.Eisbach"
"HOL-Eisbach.Eisbach_Tools"
begin
text \<open>Extend the Pure logic itself with the usual HOL connectives.\<close>
section \<open>Constants, quantifiers, and connectives\<close>
abbreviation Equal (infix "=" 2)
where "(x = y) \<equiv> (x \<equiv> y)"
definition "True \<equiv> (\<And>P :: prop. P = P)"
definition "False \<equiv> (\<And>P :: prop. P = True)"
definition All :: "('a \<Rightarrow> prop) \<Rightarrow> prop" (binder "\<forall>" [0] 0)
where "(\<forall>x. PROP P x) \<equiv> (\<And>x. PROP P x)"
definition Ex :: "('a \<Rightarrow> prop) \<Rightarrow> prop" (binder "\<exists>" [0] 0)
where "(\<exists>x. PROP P x) \<equiv> (\<And>Q::prop. (\<forall>x. PROP P x \<Longrightarrow> PROP Q) \<Longrightarrow> PROP Q)"
definition And (infix "\<and>" 4)
where "P \<and> Q \<equiv> (PROP P &&& PROP Q)"
definition Ex1 :: "('a \<Rightarrow> prop) \<Rightarrow> prop" (binder "\<exists>!" [0] 0)
where "(\<exists>!x. PROP P x) \<equiv> (\<exists>x. PROP P x \<and> (\<forall>y. PROP P y \<Longrightarrow> y = x))"
definition Or (infix "\<or>" 3)
where "P \<or> Q \<equiv> (\<And>R::prop. \<lbrakk>PROP P \<Longrightarrow> PROP R; PROP Q \<Longrightarrow> PROP R\<rbrakk> \<Longrightarrow> PROP R)"
definition Not ("(\<not> _)" [5] 6)
where "\<not>P \<equiv> (PROP P \<Longrightarrow> PROP False)"
definition Iff (infix "\<Longleftrightarrow>" 0)
where "(P \<Longleftrightarrow> Q) \<equiv> (PROP P \<Longrightarrow> PROP Q) \<and> (PROP Q \<Longrightarrow> PROP P)"
section \<open>Methods\<close>
text \<open>We set up the basic logical solver following the Eisbach user manual.\<close>
ML_file "~~/src/Tools/misc_legacy.ML"
ML_file "~~/src/Tools/IsaPlanner/isand.ML"
ML_file "~~/src/Tools/IsaPlanner/rw_inst.ML"
ML_file "~~/src/Tools/IsaPlanner/zipper.ML"
ML_file "~~/src/Tools/eqsubst.ML"
\<comment> \<open>Import the @{method subst} method, used for substituting definitional equalities.\<close>
named_theorems intros
named_theorems elims
named_theorems dests
named_theorems simps
named_theorems subst
named_theorems refine
method logic declares intros elims dests subst refine = (
assumption | unfold All_def | fold All_def |
rule intros | erule elims | drule dests | frule dests |
subst subst | subst (asm) subst |
(erule refine; solves \<open>logic\<close>)
)+
method_setup move =
\<open>Scan.succeed (METHOD o (ALLGOALS oo Method.insert_tac))\<close> "Move facts into the goal statements"
\<comment>\<open>Strangely, the move method can solve iterated modus ponens by moving the rule and all premises into the goal state, followed by a call to qed.\<close>
method mp = move
section \<open>Basic axioms and rules\<close>
lemmas mp = meta_mp
lemma TrueI [intros]: "PROP True"
unfolding True_def .
lemmas tautology = TrueI
lemma FalseE [dest, dests]: "PROP False \<Longrightarrow> PROP P"
unfolding False_def proof -
assume "\<And>P. P = True"
hence "PROP P = True" by (rule meta_spec)
thus "PROP P" using tautology by simp
qed
lemmas explosion = FalseE
method explosion = (rule explosion)
lemmas AllE = meta_spec[folded All_def]
lemmas AllE2 = meta_allE[folded All_def]
method instantiate for pred :: "'a \<Rightarrow> prop" and tm :: 'a = (elim AllE[where ?P=pred and ?x=tm])
lemma ExI:
assumes "PROP P x"
shows "\<exists>x. PROP P x"
unfolding Ex_def All_def proof -
fix Q :: "prop"
assume "\<And>x. PROP P x \<Longrightarrow> PROP Q"
with assms show "PROP Q" by mp
qed
lemma ExE:
assumes "\<exists>x. PROP P x" and "\<forall>x. PROP P x \<Longrightarrow> PROP Q"
shows "PROP Q"
proof -
have "(\<forall>x. PROP P x \<Longrightarrow> PROP Q) \<Longrightarrow> PROP Q"
using
AllE[where ?P="\<lambda>x. ((\<forall>y. PROP P y \<Longrightarrow> PROP x) \<Longrightarrow> PROP x)" and ?x="PROP Q"]
assms(1)[unfolded Ex_def]
by mp
thus "PROP Q" using assms(2) by mp
qed
lemma AndI [intro, intros]:
assumes "PROP P" and "PROP Q"
shows "PROP P \<and> PROP Q"
unfolding And_def by fact+
lemma AndD1 [dest, dests]:
assumes "PROP P \<and> PROP Q"
shows "PROP P"
using assms unfolding And_def by (rule conjunctionD1)
lemma AndD2 [dest, dests]:
assumes "PROP P \<and> PROP Q"
shows "PROP Q"
using assms unfolding And_def by (rule conjunctionD2)
lemma AndE [elim, elims]:
assumes "PROP P \<and> PROP Q" and "PROP P \<Longrightarrow> PROP Q \<Longrightarrow> PROP R"
shows "PROP R"
proof -
from \<open>PROP P \<and> PROP Q\<close> have "PROP P" and "PROP Q" by logic
thus "PROP R" using assms by mp
qed
lemma OrI1:
assumes "PROP P"
shows "PROP P \<or> PROP Q"
unfolding Or_def using assms by move logic
lemma OrI2:
assumes "PROP Q"
shows "PROP P \<or> PROP Q"
unfolding Or_def using assms by move logic
lemma OrE [elim, elims]:
assumes "PROP P \<or> PROP Q" and "PROP P \<Longrightarrow> PROP R" and "PROP Q \<Longrightarrow> PROP R"
shows "PROP R"
apply (rule AllE[where ?P="\<lambda>R. (\<lbrakk>PROP P \<Longrightarrow> PROP R; PROP Q \<Longrightarrow> PROP R\<rbrakk> \<Longrightarrow> PROP R)"])
by (unfold All_def, fold Or_def) fact+
lemma NotI [intro, intros]:
assumes "PROP P \<Longrightarrow> PROP False"
shows "\<not>PROP P"
unfolding Not_def by fact
lemma NotE [refine]:
assumes "\<not>PROP P" and "PROP P"
shows "PROP Q"
using assms unfolding Not_def by mp explosion
lemmas contradiction = NotE
method contradiction = (rule contradiction | rule contradiction[rotated])
lemma IffI [intro, intros]:
assumes "PROP P \<Longrightarrow> PROP Q" and "PROP Q \<Longrightarrow> PROP P"
shows "PROP P \<Longleftrightarrow> PROP Q"
unfolding Iff_def by (rule AndI) fact+
lemma IffD1 [dest, dests]:
assumes "PROP P \<Longleftrightarrow> PROP Q"
shows "PROP P \<Longrightarrow> PROP Q"
using assms unfolding Iff_def by (rule AndD1)
lemma IffD2 [dest, dests]:
assumes "PROP P \<Longleftrightarrow> PROP Q"
shows "PROP Q \<Longrightarrow> PROP P"
using assms unfolding Iff_def by (rule AndD2)
lemmas forward_imp = IffD1
lemmas backward_imp = IffD2
text \<open>Logical equivalence is the equality relation on propositions.\<close>
axiomatization where
Iff_imp_Equal [intros]: "(PROP P \<Longleftrightarrow> PROP Q) \<Longrightarrow> (PROP P = PROP Q)"
lemma Iff_iff_Equal:
"(PROP P \<Longleftrightarrow> PROP Q) \<Longleftrightarrow> (PROP P = PROP Q)"
by logic
lemma Iff_is_Equal [simps]:
"(PROP P \<Longleftrightarrow> PROP Q) = (PROP P = PROP Q)"
by logic
section \<open>Classical logic\<close>
axiomatization where LEM: "PROP P \<or> \<not>PROP P"
text \<open>Set up disjunctive case analysis:\<close>
thm OrE[OF LEM, where ?P="PROP Z"]
method cases for cs :: "prop" = (rule OrE[OF LEM[where ?P="PROP cs"]])
lemma double_negE:
assumes "\<not>\<not>PROP P"
shows "PROP P"
proof (cases "PROP P")
show "PROP P \<Longrightarrow> PROP P" .
show "\<not>PROP P \<Longrightarrow> PROP P" by (explosion, fold Not_def, fact)
qed
lemma de_Morgan_And:
"\<not>(PROP A \<and> PROP B) = \<not>PROP A \<or> \<not>PROP B"
apply logic
apply (cases "PROP A")
apply (cases "PROP B")
apply logic
apply (erule OrI1 OrI2)+
apply logic
done
lemma de_Morgan_Or:
"\<not>(PROP A \<or> PROP B) = \<not>PROP A \<and> \<not>PROP B"
apply logic defer
apply logic defer
apply logic
proof -
assume *: "\<not>(PROP A \<or> PROP B)"
{ assume "PROP A"
hence "PROP A \<or> PROP B" by (rule OrI1)
thus "PROP False" using * by contradiction }
{ assume "PROP B"
hence "PROP A \<or> PROP B" by (rule OrI2)
thus "PROP False" using * by contradiction }
qed
end
|
The distribution of $-X$ is the same as the distribution of $X$. |
open import Function using (_∘_)
open import Data.Fin as Fin using (Fin; toℕ)
open import Data.Nat as Nat using (ℕ; suc; zero)
open import Data.Nat.Show using () renaming (show to showℕ)
open import Data.String
open import Data.Vec using (Vec; []; _∷_)
open import Relation.Nullary using (Dec; yes; no)
open import Relation.Binary.PropositionalEquality as PropEq using (_≡_; refl)
module Unification.Show
(Sym : ℕ → Set)
(showSym : ∀ {k} (s : Sym k) → String)
(decEqSym : ∀ {k} (f g : Sym k) → Dec (f ≡ g)) where
import Unification
module UI = Unification Sym decEqSym
open UI hiding (_++_)
showFin : ∀ {n} → Fin n → String
showFin {n} x = (showℕ (toℕ x)) ++ "/" ++ (showℕ n)
mutual
showTerm : ∀ {n} → Term n → String
showTerm (var x) = "?" ++ showFin x
showTerm (con {zero} s []) = showSym s
showTerm (con {suc k} s ts) = showSym s ++ "(" ++ showTermArgs ts ++ ")"
showTermArgs : ∀ {n k} → Vec (Term n) k → String
showTermArgs [] = ""
showTermArgs (t ∷ []) = showTerm t
showTermArgs (t₁ ∷ t₂ ∷ ts) = showTerm t₁ ++ " , " ++ showTermArgs (t₂ ∷ ts)
showSubst : ∀ {m n} → Subst m n → String
showSubst s = "{" ++ showSubst' s ++ "}"
where
showFor : ∀ {n} (x : Fin (suc n)) (t : Term n) → String
showFor x t = "?" ++ showFin x ++ " → " ++ showTerm t
showSubst' : ∀ {m n} → Subst m n → String
showSubst' nil = ""
showSubst' (snoc nil t x)
= "?" ++ showFin x ++ " → " ++ showTerm t
showSubst' (snoc (snoc s t₂ x₂) t₁ x₁)
= showFor x₁ t₁ ++ " , " ++ showSubst' (snoc s t₂ x₂)
|
[STATEMENT]
lemma mk_BaseDim_neq [simp]: "x \<noteq> y \<Longrightarrow> mk_BaseDim x \<noteq> mk_BaseDim y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<noteq> y \<Longrightarrow> mk_BaseDim x \<noteq> mk_BaseDim y
[PROOF STEP]
by (auto simp add: mk_BaseDim_def fun_eq_iff) |
```python
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import numpy as np
import sympy as sym
```
```python
def f(X, n, q, b, c):
Z = np.zeros(len(X))
for i in range(len(X)):
for j in range(int(n)):
for k in range(int(n)):
Z[i] += q[j][k]*X[i][j]*X[i][k]
for j in range(int(n)):
Z[i] += b[j]*X[i][j]
return Z
```
```python
def f_mesh(X, Y, q, b, c):
Z = np.zeros(len(X))
Z = q[0][0]*X*X + q[0][1]*X*Y + q[1][0]*Y*X + q[1][1]*Y*Y + b[0]*X + b[1]*Y + c
return Z
```
```python
def f2(X, Y, n, q, b, c):
Z = q[0][0]*X*X + q[0][1]*X*Y + q[1][0]*Y*X + q[1][1]*Y*Y + b[0]*X + b[1]*Y + int(c[0])
return Z
```
```python
def gd_with_momentum(X_new, X1, Y1, Z1, n, q, b, c, alpha=0.10, beta=0.8, precision=0.0001, max_iter=200):
X_old = np.zeros((1, 2))
X_new = np.zeros((1, 2))
dfr = np.zeros((1, 2))
X_new[0][0] = 4.9
X_new[0][1] = 4.9
i = 0
Xs = np.zeros((max_iter, 2))
Ys = np.zeros(max_iter)
V = np.zeros((max_iter + 1, 2))
x, y = sym.symbols('x y')
df1 = sym.diff(f2(x, y, n, q, b, c), x)
df2 = sym.diff(f2(x, y, n, q, b, c), y)
while np.all(abs(X_new - X_old)) > precision and max_iter > i:
Xs[i] = X_new
Ys[i] = f2 (X_new[0][0], X_new[0][1], n, q, b, c)
X_old = X_new
dfr[0][0] = df1.evalf(subs={x: X_old[0][0], y: X_old[0][1]})
dfr[0][1] = df2.evalf(subs={x: X_old[0][0], y: X_old[0][1]})
i += 1
V[i] = beta * V[i - 1] + (1 - beta) * dfr
X_new = X_new - alpha * V[i]
# print("V: {}, dfr: {}, X_new: {}".format(V[i], dfr, X_new))
alpha *= 0.99
print("Finished with {} step".format(i))
if (i < max_iter):
Xs[i] = X_new
Ys[i] = f2(X_new[0][0], X_new[0][1], n, q, b, c)
for j in range(max_iter - 1, i, -1):
Xs = np.delete(Xs, j, axis=0)
Ys = np.delete(Ys, j, axis=0)
return Xs, Ys
```
```python
X1 = np.arange(-5, 5, 0.1)
Y1 = np.arange(-5, 5, 0.1)
Z1 = np.zeros(len(X1))
X_new = np.zeros((100,2))
for i in range(len(X1)):
X_new[i][0] = X1[i]
X_new[i][1] = Y1[i]
Z1 = f(X_new, n, q, b, c)
x_list, y_list = gd_with_momentum(X_new, X1, Y1, Z1, n, q, b, c)
```
Finished with 200 step
```python
n = input("Enter power of your function: ")
q = []
for i in range(int(n)):
q.append(input("Enter the function's coefficient matrix q's row q[{}]: ".format(i)).split())
b = input("Enter the function's coefficient b matrix: ").split()
c = input("Enter the function's constant c: ")
for i in range(int(n)):
q[i] = list(map(float, q[i]))
b = list(map(float, b))
c = list(map(float, c))
```
Enter power of your function: 2
Enter the function's coefficient matrix q's row q[0]: 1 0
Enter the function's coefficient matrix q's row q[1]: 0 2
Enter the function's coefficient b matrix: 0 0
Enter the function's constant c: 0
```python
X1, Y1 = np.meshgrid(X1, Y1)
Z1 = f_mesh(X1, Y1, q, b, c)
X, Y = zip(*x_list)
Z = y_list
ax = plt.subplots(nrows=1, ncols=1, figsize=(10,10))
cs = plt.contour(X1, Y1, Z1)
plt.clabel(cs, inline=1, fontsize=10)
colors = ['b', 'g', 'm', 'c', 'orange']
for j in range(1, len(X)):
ax[1].annotate('', xy=(X[j], Y[j]), xytext=(X[j-1], Y[j-1]),
arrowprops={'arrowstyle': '->', 'color': 'r', 'lw': 1},
va='center', ha='center')
ax[1].scatter(X, Y, s=40, lw=0)
ax[1].set_xlabel('X')
ax[1].set_ylabel('Y')
ax[1].set_title('Minimizing function')
```
```python
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(X1, Y1, Z1, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('surface')
plt.show()
```
```python
plt.figure()
cs = plt.contour(X1, Y1, Z1)
plt.clabel(cs, inline=1, fontsize=10)
plt.title('Two-dimensional contour plot')
plt.show()
```
```python
```
|
(*|
################################################################################
Retrieving constraints from GADT to ensure exhaustion of pattern matching in Coq
################################################################################
:Link: https://stackoverflow.com/q/56525092
|*)
(*|
Question
********
Let's define two helper types:
|*)
Inductive AB : Set := A | B.
Inductive XY : Set := X | Y.
(*| Then two other types that depend on ``XY`` and ``AB`` |*)
Inductive Wrapped : AB -> XY -> Set :=
| W : forall (ab : AB) (xy : XY), Wrapped ab xy
| WW : forall (ab : AB), Wrapped ab (match ab with A => X | B => Y end).
Inductive Wrapper : XY -> Set :=
WrapW : forall (xy : XY), Wrapped A xy -> Wrapper xy.
(*|
Note the ``WW`` constructor – it can only be value of types ``Wrapped
A X`` and ``Wrapped B Y``.
Now I would like to pattern match on ``Wrapper Y``:
.. coq:: unfold fails
|*)
Fail Definition test (wr : Wrapper Y) : nat :=
match wr with
| WrapW Y w =>
match w with
| W A Y => 27
end
end.
(*|
Why does it happen? ``Wrapper`` forces contained ``Wrapped`` to be
``A`` version, the type signature forces ``Y`` and ``WW`` constructor
forbids being ``A`` and ``Y`` simultaneously. I don't understand why
this case is being even considered, while I am forced to check it
which seems to be impossible.
How to workaround this situation?
|*)
(*|
Answer (HTNW)
*************
Let's simplify:
|*)
Inductive MyTy : Set -> Type :=
MkMyTy : forall (A : Set), A -> MyTy A.
Fail Definition extract (m : MyTy nat) : nat :=
match m with MkMyTy _ x => S x end. (* .unfold .fails *)
(*|
This is because I said
.. code-block:: coq
Inductive MyTy : Set -> Type
This made the first argument to ``MyTy`` an index of ``MyTy``, as
opposed to a parameter. An inductive type with a parameter may look
like this:
|*)
Inductive list (A : Type) : Type :=
| nil : list A
| cons : A -> list A -> list A.
(*|
Parameters are named on the left of the ``:``, and are not
``forall``-d in the definition of each constructor. (They are still
present in the constructors' types outside of the definition: ``cons :
forall (A : Type), A -> list A -> list A``.) If I make the ``Set`` a
parameter of ``MyTy``, then ``extract`` can be defined:
|*)
Reset Initial. (* .none *)
Inductive MyTy (A : Set) : Type :=
MkMyTy : A -> MyTy A.
Definition extract (m : MyTy nat) : nat :=
match m with MkMyTy _ x => S x end.
(*|
The reason for this is that, on the inside, a ``match`` *ignores*
anything you know about the indices of the scrutinee from the outside.
(Or, rather, the underlying ``match`` expression in Gallina ignores
the indices. When you write a ``match`` in the source code, Coq tries
to convert it into the primitive form while incorporating information
from the indices, but it often fails.) The fact that ``m : MyTy nat``
in the first version of ``extract`` simply did not matter. Instead,
the match gave me ``S : Set`` (the name was automatically chosen by
Coq) and ``x : S``, as per the constructor ``MkMyTy``, with no mention
of ``nat``. Meanwhile, because ``MyTy`` has a parameter in the second
version, I actually get ``x : nat``. The ``_`` is really a placeholder
this time; it is mandatory to write it as ``_``, because there's
nothing to match, and you can ``Set Asymmetric Patterns`` to make it
disappear.
The reason we distinguish between parameters and indices is because
parameters have a lot of restrictions—most notably, if ``I`` is an
inductive type with parameters, then the parameters must appear as
variables in the return type of each constructor:
.. coq:: fails
|*)
Fail Inductive F (A : Set) : Set := MkF : list A -> F (list A).
(* ^--------^
BAD: must appear as F A *)
(*|
In your problem, we should make parameters where we can. E.g. the
``match wr with Wrap Y w => _ end`` bit is wrong, because the ``XY``
argument to ``Wrapper`` is an index, so the fact that ``wr : Wrapper
Y`` is ignored; you would need to handle the ``Wrap X w`` case too.
Coq hasn't gotten around to telling you that.
|*)
Reset Wrapped. (* .none *)
Inductive Wrapped (ab : AB) : XY -> Set :=
| W : forall (xy : XY), Wrapped ab xy
| WW : Wrapped ab (match ab with A => X | B => Y end).
Inductive Wrapper (xy : XY) : Set := WrapW : Wrapped A xy -> Wrapper xy.
(*| And now your ``test`` compiles (almost): |*)
Definition test (wr : Wrapper Y) : nat :=
match wr with
| WrapW _ w => (* mandatory _ *)
match w with
| W _ Y => 27 (* mandatory _ *)
end
end.
(*|
because having the parameters gives Coq enough information for its
``match``-elaboration to use information from ``Wrapped``'s index. If
you issue ``Print test.``, you can see that there's a bit of
hoop-jumping to pass information about the index ``Y`` through the
primitive ``match``\ s which would otherwise ignore it. `See the
reference manual for more information.
<https://coq.inria.fr/refman/addendum/extended-pattern-matching.html>`__
|*)
(*|
Answer (radrow)
***************
The solution turned out to be simple but tricky:
.. coq:: none
|*)
Reset Initial.
Inductive AB : Set := A | B.
Inductive XY : Set := X | Y.
Inductive Wrapped : AB -> XY -> Set :=
| W : forall (ab : AB) (xy : XY), Wrapped ab xy
| WW : forall (ab : AB), Wrapped ab (match ab with A => X | B => Y end).
Inductive Wrapper : XY -> Set :=
WrapW : forall (xy : XY), Wrapped A xy -> Wrapper xy.
(*||*)
Definition test (wr : Wrapper Y): nat.
refine (match wr with
| WrapW Y w =>
match w in Wrapped ab xy return ab = A -> xy = Y -> nat with
| W A Y => fun _ _ => 27
| _ => fun _ _ => _
end eq_refl eq_refl
end);
[ | | destruct a]; congruence.
Defined.
(*|
The issue was that Coq didn't infer some necessary invariants to
realize that ``WW`` case is ridiculous. I had to explicitly give it a
proof for it.
In this solution I changed ``match`` to return a function that takes
two proofs and brings them to the context of our actual result:
- ``ab`` is apparently ``A``
- ``xy`` is apparently ``Y``
I have covered real cases ignoring these assumptions, and I deferred
"bad" cases to be proven false later which turned to be trivial. I was
forced to pass the ``eq_refl``\ s manually, but it worked and does not
look that bad.
|*)
|
module datatype-util where
open import constants
open import ctxt
open import syntax-util
open import general-util
open import type-util
open import cedille-types
open import subst
open import rename
open import free-vars
{-# TERMINATING #-}
decompose-arrows : ctxt → type → params × type
decompose-arrows Γ (TpAbs me x atk T) =
let x' = fresh-var-new Γ x in
case decompose-arrows (ctxt-var-decl x' Γ) (rename-var Γ x x' T) of λ where
(ps , T') → Param me x' atk :: ps , T'
decompose-arrows Γ T = [] , T
decompose-ctr-type : ctxt → type → type × params × 𝕃 tmtp
decompose-ctr-type Γ T with decompose-arrows Γ T
...| ps , Tᵣ with decompose-tpapps Tᵣ
...| Tₕ , as = Tₕ , ps , as
{-# TERMINATING #-}
kind-to-indices : ctxt → kind → indices
kind-to-indices Γ (KdAbs x atk k) =
let x' = fresh-var-new Γ x in
Index x' atk :: kind-to-indices (ctxt-var-decl x' Γ) (rename-var Γ x x' k)
kind-to-indices Γ _ = []
rename-indices-h : ctxt → renamectxt → indices → 𝕃 tmtp → indices
rename-indices-h Γ ρ (Index x atk :: is) (ty :: tys) =
Index x' atk' ::
rename-indices-h (ctxt-var-decl x' Γ) (renamectxt-insert ρ x x') is tys
where
x' = fresh-var-renamectxt Γ ρ (maybe-else x id (is-var-unqual ty))
atk' = subst-renamectxt Γ ρ -tk atk
rename-indices-h Γ ρ (Index x atk :: is) [] =
let x' = fresh-var-renamectxt Γ ρ x in
Index x' (subst-renamectxt Γ ρ -tk atk) ::
rename-indices-h (ctxt-var-decl x' Γ) (renamectxt-insert ρ x x') is []
rename-indices-h _ _ [] _ = []
rename-indices : ctxt → indices → 𝕃 tmtp → indices
rename-indices Γ = rename-indices-h Γ empty-renamectxt
positivity : Set
positivity = 𝔹 × 𝔹 -- occurs positively × occurs negatively
pattern occurs-nil = ff , ff
pattern occurs-pos = tt , ff
pattern occurs-neg = ff , tt
pattern occurs-all = tt , tt
--positivity-inc : positivity → positivity
--positivity-dec : positivity → positivity
positivity-neg : positivity → positivity
positivity-add : positivity → positivity → positivity
--positivity-inc = map-fst λ _ → tt
--positivity-dec = map-snd λ _ → tt
positivity-neg = uncurry $ flip _,_
positivity-add (+ₘ , -ₘ) (+ₙ , -ₙ) = (+ₘ || +ₙ) , (-ₘ || -ₙ)
-- just tt = negative occurrence; just ff = not in the return type; nothing = okay
module positivity (x : var) where
open import conversion
not-free : ∀ {ed} → ⟦ ed ⟧ → maybe 𝔹
not-free = maybe-map (λ _ → tt) ∘' maybe-if ∘' is-free-in x
if-free : ∀ {ed} → ⟦ ed ⟧ → positivity
if-free t with is-free-in x t
...| f = f , f
if-free-args : args → positivity
if-free-args as with stringset-contains (free-vars-args as) x
...| f = f , f
hnf' : ∀ {ed} → ctxt → ⟦ ed ⟧ → ⟦ ed ⟧
hnf' Γ T = hnf Γ unfold-no-defs T
mtt = maybe-else tt id
mff = maybe-else ff id
posₒ = fst
negₒ = snd
occurs : positivity → maybe 𝔹
occurs p = maybe-if (negₒ p) >> just tt
{-# TERMINATING #-}
arrs+ : ctxt → type → maybe 𝔹
type+ : ctxt → type → positivity
kind+ : ctxt → kind → positivity
tpkd+ : ctxt → tpkd → positivity
tpapp+ : ctxt → type → positivity
arrs+ Γ (TpAbs me x' atk T) =
let Γ' = ctxt-var-decl x' Γ in
occurs (tpkd+ Γ $ hnf' Γ -tk atk) maybe-or arrs+ Γ' (hnf' Γ' T)
arrs+ Γ (TpApp T tT) = occurs (tpapp+ Γ $ hnf' Γ (TpApp T tT))
--arrs+ Γ T maybe-or (not-free -tT' tT)
arrs+ Γ (TpLam x' atk T) =
let Γ' = ctxt-var-decl x' Γ in
occurs (tpkd+ Γ $ hnf' Γ -tk atk) maybe-or arrs+ Γ' (hnf' Γ' T)
arrs+ Γ (TpVar x') = maybe-if (~ x =string x') >> just ff
arrs+ Γ T = just ff
type+ Γ (TpAbs me x' atk T) =
let Γ' = ctxt-var-decl x' Γ in
positivity-add (positivity-neg $ tpkd+ Γ $ hnf' Γ -tk atk) (type+ Γ' $ hnf' Γ' T)
type+ Γ (TpIota x' T T') =
let Γ' = ctxt-var-decl x' Γ in
positivity-add (type+ Γ $ hnf' Γ T) (type+ Γ' $ hnf' Γ' T')
type+ Γ (TpApp T tT) = tpapp+ Γ $ hnf' Γ $ TpApp T tT
type+ Γ (TpEq tₗ tᵣ) = occurs-nil
type+ Γ (TpHole _) = occurs-nil
type+ Γ (TpLam x' atk T)=
let Γ' = ctxt-var-decl x' Γ in
positivity-add (positivity-neg $ tpkd+ Γ $ hnf' Γ -tk atk) (type+ Γ' (hnf' Γ' T))
type+ Γ (TpVar x') = x =string x' , ff
tpapp+ Γ T with decompose-tpapps T
...| TpVar x' , as =
let f = if-free-args (tmtps-to-args NotErased as) in
if x =string x'
then positivity-add occurs-pos f
else maybe-else' (data-lookup Γ x' as) f
λ {(mk-data-info x'' xₒ'' asₚ asᵢ ps kᵢ k cs csₚₛ eds gds) →
type+ Γ (hnf' Γ $ TpAbs tt x'' (Tkk k) $ foldr (uncurry λ cₓ cₜ → TpAbs ff ignored-var (Tkt cₜ)) (TpVar x'') (inst-ctrs Γ ps asₚ cs))}
...| _ , _ = if-free T
kind+ Γ (KdAbs x' atk k) =
let Γ' = ctxt-var-decl x' Γ in
positivity-add (positivity-neg $ tpkd+ Γ $ hnf' Γ -tk atk) (kind+ Γ' k)
kind+ Γ _ = occurs-nil
tpkd+ Γ (Tkt T) = type+ Γ (hnf' Γ T)
tpkd+ Γ (Tkk k) = kind+ Γ k
ctr-positive : ctxt → type → maybe 𝔹
ctr-positive Γ = arrs+ Γ ∘ hnf' Γ
|
<table>
<tr align=left><td>
<td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli</td>
</table>
Note: This material largely follows the text "Numerical Linear Algebra" by Trefethen and Bau (SIAM, 1997) and is meant as a guide and supplement to the material presented there.
```python
from __future__ import print_function
%matplotlib inline
import numpy
import matplotlib.pyplot as plt
```
# Conditioning and Stability
Once an approximation to a linear system is constructed the next question is how much trust can we put in the approximation? Since the true solution is not known, one of the few tools we have is to ask how well the approximation matches the original equation. In other words, we seek a solution to a system,
$$
\vec{f}(\vec{x}) = \vec{b}.
$$
We do not have $\vec{x}$ but instead have an approximation, $\hat{x}$, and we hope that
$$
\vec{f}(\hat{x}) \approx \vec{b}.
$$
In this section the question we explore is to try to determine a bound on the relative error, $\frac{||\vec{x}-\hat{x}||}{||\vec{x}||}$ given the matrix, $A$.
This leads to the notion of conditioning. Conditioning is the behavior of a problem when the solution is a changed a small bit (perturbed), and it is a mathematical (analytic) property of the original system of equations. Stability, on the other hand, is concerned with how the algorithm used to obtain an approximation behaves when the approximation is perturbed.
## Conditioning and Condition Numbers
A **well-conditioned** problem is one where a small perturbation to the original problem leads to only small changes in the solution.
Formally we can think of a function $f$ which maps $x$ to $y$
$$
f(x) = y \quad \text{or} \quad f: X \rightarrow Y.
$$
Let $x \in X$ where we perturb $x$ with $\delta x$ and we ask how the result $y$ changes:
$$
||f(x) - f(x + \delta x)|| \leq C ||x - (x+\delta x)||
$$
for some constant $C$ possible dependent on $\delta x$ depending on the type of conditioning we are considering.
### Absolute Condition Number
If we let $\delta x$ be the small perturbation to the input and $\delta f = f(x + \delta x) - f(x)$ be the result the **absolute condition number** $\hat{~\kappa}$ can be defined as
$$
\hat{\!\kappa} = \sup_{\delta x} \frac{||\delta f||}{||\delta x||}
$$
for most problems (assuming $\delta f$ and $\delta x$ are both infinitesimal).
When $f$ is differentiable we can evaluate the condition number via the Jacobian. Recall that the derivative of a multi-valued function can be termed in the form of a Jacobian $J(x)$ where
$$
[J(x)]_{ij} = \frac{\partial f_i}{\partial x_j}(x).
$$
This allows us to write the infinitesimal $\delta f$ as
$$
\delta f \approx J(x) \delta x
$$
with equality when $||\delta x|| \rightarrow 0$. Then we can write the condition number as
$$
\hat{\!\kappa} = ||J(x)||
$$
where the norm is the one induced by the spaces $X$ and $Y$.
### Relative Condition Number
The **relative condition number** is defined similarly and is related to the difference before between the absolute error and relative error as defined previously. With the same caveats as before it can be defined as
$$
\kappa = \sup_{\delta x} \left( \frac{\frac{||\delta f||}{||f(x)||}}{\frac{||\delta x||}{||x||}} \right).
$$
Again if $f$ is differentiable we can use the Jacobian $J(x)$ to evaluate the relative condition number as
$$
\kappa = \frac{||J(x)||}{||f(x)|| ~/ ~||x||}.
$$
#### Examples
Calculate the following relative condition numbers of the following problems.
$\sqrt{x}$ for $x > 0$.
$$
f(x) = \sqrt{x}, \quad J(x) = f'(x) = \frac{1}{2 \sqrt{x}} \\
\kappa = \frac{||J(x)||}{||f(x)|| / ||x||} = \frac{1}{2 \sqrt{x}} \frac{x}{\sqrt{x}} = \frac{1}{2}
$$
Calculate the relative condition number for the scalar function $f(x) = x_1 - x_2$ using the vector $\vec{x} = (x_1, x_2)^T \in \mathbb R^2$ using an $\ell_\infty$ norm.
$$
f(x) = x_1 - x_2, \quad J(x) = \left [ \frac{\partial f}{\partial x_1}, \frac{\partial f}{\partial x_2}\right ] = [1, -1] \\
\kappa = \frac{||J(x)||_\infty}{||f(x)||_\infty / ||x||_\infty} = \frac{2 \max_{i=1,2} |x_i|}{|x_1 - x_2|}
$$
where
$$
||J||_\infty = 2.
$$
The condition number of a function was discussed in general terms above. Now, the more specific case of a linear function, a matrix-vector multiplication, is examined. Here we let $\vec{f}(\vec{x})=Ax$ and determine the condition number by perturbing $x$.
We begin with the definition above,
$$\begin{aligned}
\kappa &= \sup_{\delta x} \left ( \frac{||A (\vec{x}+\delta x) - A \vec{x}||}{||A\vec{x}||} \frac{||\vec{x}||}{||\delta x||}\right ), \\
&= \sup_{\delta x} \frac{ ||A \delta x||}{||\delta x||} \frac{||\vec{x}||}{||A\vec{x}||}, \\
&= ||A|| \frac{||\vec{x}||}{||A \vec{x}||},
\end{aligned}$$
where $\delta x$ is a vector.
If $A$ has an inverse, then we note that
$$
\begin{align}
\vec{x} &= A^{-1}A \vec{x}, \\
\Rightarrow ||\vec{x}|| &= || A^{-1}A \vec{x} ||, \\
&\leq ||A^{-1}|| || A \vec{x} ||,
\end{align}
$$
which implies that
$$
\frac{||x||}{||A x||} \leq ||A^{-1}||.
$$
We can now bound the condition number for a matrix by
$$
\kappa \leq ||A|| ||A^{-1}||.
$$
### Condition Number of a Matrix
The condition number of a matrix is defined by the product
$$
\kappa(A) = ||A||~||A^{-1}||.
$$
where here we are thinking about the matrix rather than a problem. If $\kappa$ is small than $A$ is said to be **well-conditioned**. If $A$ is singular we assign $\kappa(A) = \infty$ as the matrix's condition number.
When we are considering the $\ell_2$ norm then we can write the condition number as
$$
\kappa(A) = \frac{\sqrt{\rho(A^\ast A)}}{\sqrt{\rho((A^\ast A)^{-1})}} = \frac{\sqrt{\max |\lambda|}}{\sqrt{\min |\lambda|}}.
$$
### Condition Number of a System of Equations
Another way to think about the conditioning of a problem we have looked at before is that the matrix $A$ itself is an input to the problem. Consider than the system of equations $A\vec{x} = \vec{b}$ where we will perturb both $A$ and $\vec{x}$ resulting in
$$
(A + \delta A)(\vec{x} + \delta x) = \vec{b}.
$$
Assuming we solve the problem exactly we know that $A\vec{x} = \vec{b}$ and that the infinitesimals multiplied $\delta A \delta x$ are smaller than the other term, and the above expression can be approximation by
$$
\begin{aligned}
(A + \delta A)(\vec{x} + \delta x) &= \vec{b}, \\
A\vec{x} + \delta Ax + A \delta x + \delta A \delta \vec{x} &= \vec{b} \\
\delta A\vec{x} + A \delta x & = 0
\end{aligned}
$$
Solving for $\delta x$ leads to
$$
\delta x = -A^{-1} \delta A \vec{x}
$$
implying
$$
||\delta x|| \leq ||A^{-1}|| ~ ||\delta A|| ~ ||\vec{x}||
$$
and therefore
$$
\frac{\frac{||\delta x||}{||\vec{x}||}}{\frac{||\delta A||}{||A||}} \leq ||A^{-1}||~||A|| = \kappa(A).
$$
We can also say the following regarding the condition number of a system of equations then
**Theorem:** Let $\vec{b}$ be fixed and consider the problem of computing $\vec{x}$ in $A\vec{x} = \vec{b}$ where $A$ is square and non-singular. The condition number of this problem with respect to perturbations in $A$ is the condition number of the matrix $\kappa(A)$.
## Stability
We now return to the consideration of the fact that we are interested not only in the well-conditioning of a mathematical problem but in how we might solve it on a finite precision machine. In some sense conditioning describes how well we can solve a problem in exact arithmetic and stability how well we can solve the problem in finite arithmetic.
### Accuracy and Stability
As we have defined before we will consider **absolute error** as
$$
||F(x) - f(x)||
$$
where $F(x)$ is the approximation to the true solution $f(x)$. Similarly we can define **relative error** as
$$
\frac{||F(x) - f(x)||}{||f(x)||}.
$$
In the ideal case we would like the relative error to be $\mathcal{O}(\epsilon_{\text{machine}})$.
#### Forwards Stability
A **forward stable** algorithm for $x \in X$ has
$$
\frac{||F(x) - f(x)||}{||f(x)||} = \mathcal{O}(\epsilon_{\text{machine}})
$$
In other words
> A forward stable algorithm gives almost the right answer to exactly the right question.
#### Backwards Stability
A stronger notion of stability can also be defined which is satisfied by many approaches in numerical linear algebra. We say that an algorithm $F$ is **backward stable** if for $x \in X$ we have
$$
F(x) = f(\hat{\!x})
$$
for some $\hat{\!x}$ with
$$
\frac{||\hat{\!x} - x||}{||x||} = \mathcal{O}(\epsilon_{\text{machine}}).
$$
In other words
> A backward stable algorithm gives exactly the right answer to nearly the right question.
Combining these ideas along with the idea that we should not expect to be able to accurately compute the solution to a poorly conditioned problem we can form the mixed forward-backward sense of stability as for $x \in X$ if
$$
\frac{||F(x) - f(\hat{\!x})||}{||f(\hat{\!x})||} = \mathcal{O}(\epsilon_{\text{machine}})
$$
for some $\hat{\!x}$ with
$$
\frac{||\hat{\!x} - x||}{||x||} = \mathcal{O}(\epsilon_{\text{machine}}).
$$
In other words
> A stable algorithm gives nearly the right answer to nearly the right question.
An important aspect of the above statement is that we can not necessarily guarantee an accurate result. If the condition number $\kappa(x)$ is small we would expect that a stable algorithm would give us an accurate result (by definition). This is reflected in the following theorem.
**Theorem:** Suppose a backward stable algorithm is applied to solve a problem $f: X \rightarrow Y$ with condition number $\kappa$ on a finite precision machine, then the relative errors satisfy
$$
\frac{||F(x) - f(\hat{\!x})||}{||f(\hat{\!x})||} = \mathcal{O}(\kappa(x) ~ \epsilon_{\text{machine}}).
$$
**Proof:** By the definition of the condition number of a problem we can write
$$
\frac{||F(x) - f(\hat{\!x})||}{||f(\hat{\!x})||} \leq (\kappa(x) + \mathcal{O}(\epsilon_{\text{machine}}))\frac{||\hat{\!x} - x||}{||x||}.
$$
Combining this with the definition of backwards stability we can arrive at the statement of the theorem.
**Backward Error Analysis** - Process of using the condition number of the problem and stability of the algorithm to determine the error.
**Forward Error Analysis** - Considers the accrual of error at each step of an algorithm given slightly perturbed input.
### Stability of $A\vec{x} = \vec{b}$ using Householder Triangularization
As an example lets consider the conditioning and algorithm for solving $A\vec{x} = \vec{b}$. Here we will use a $QR$ factorization approach to solve $A\vec{x} = \vec{b}$ given by a Householder triangularization. First off lets discuss the $QR$ factorization itself.
**Theorem:** Let the $QR$ factorization $A = QR$ of a matrix $A \in \mathbb C^{m \times n}$ be computed using a Householder triangularization approach on a finite precision machine, then
$$
\hat{\!Q} \cdot \hat{\!R} = A + \delta A \quad \frac{||\delta A||}{||A||} = \mathcal{O}(\epsilon_{\text{machine}})
$$
for some $\delta A \in \mathbb C^{m \times n}$ where $\hat{\!Q}$ and $\hat{\!R}$ are the finite arithmetic versions of $Q$ and $R$. Householder triangularization is therefore backward stable.
#### Solving $A\vec{x} = \vec{b}$ with $QR$ Factorization
So Householder triangularization is backwards stable but we also know that this does not guarantee accuracy if the problem itself is ill-conditioned. Is backward stability enough to guarantee accurate results if we use it for $A\vec{x} = \vec{b}$ for instance? It turns out that the accuracy of the product of $QR$ is enough to guarantee accuracy of a larger algorithm.
Consider the steps to solving $A \vec{x} = \vec{b}$ using $QR$ factorization:
1. Compute the $QR$ factorization of $A$
1. Multiply the vector $\vec{b}$ by $Q^\ast$ so that $\vec{y} = Q^\ast \vec{b}$.
1. Solve using backward-substitution the triangular system $R \vec{x} = \vec{y}$ or $\vec{x} = R^{-1} \vec{y}$.
We know that step (1) is backward stable, what about step (2), the matrix-vector multiplication? We can write the estimate of the backwards stability as
$$
(\hat{\!Q} + \delta Q) \cdot \hat{\!y} = b \quad \text{with} \quad ||\delta Q|| = \mathcal{O}(\epsilon_{\text{machine}})
$$
where we have inverted the matrix $ \hat{\!Q}$ since it is unitary. Since this is exact we know also that the matrix-vector multiplication is also backwards stable since this is an equivalent statement to multiplying $b$ by a slightly perturbed matrix.
Step (3) is backward substitution (or the computation of $R^{-1}$). Writing the backwards stability estimate we have
$$
(\hat{\!R} + \delta R) \cdot \hat{\!x} = \hat{\!y} \quad \text{with} \quad \frac{||\delta R||}{||\hat{\!R}||} = \mathcal{O}(\epsilon_{\text{machine}})
$$
demonstrating that the results $\hat{\!x}$ is the exact solution to a slight perturbation of the original problem.
These results lead to the following two theorems:
**Theorem:** Using $QR$ factorization to solve $A\vec{x}=\vec{b}$ as described above is backward stable, satisfying
$$
(A + \Delta A) ~ \hat{\!x} = \vec{b}, \quad \frac{||\Delta A||}{||A||} = \mathcal{O}(\epsilon_{\text{machine}})
$$
for some $\Delta A \in \mathbb C^{m \times n}$.
**Theorem:** The solution $\hat{x}$ computed by the above algorithm satisfies
$$
\frac{||\hat{\!x} - \vec{x}||}{||\vec{x}||} = \mathcal{O}(\kappa(x) ~ \epsilon_{\text{machine}}).
$$
|
The United Church, Hyde : What Is Christianity?
But many people rejected Jesus. They thought they’d be happier making their own rules and living outside his kingdom. This rejection of the King is something we all do. Jesus called it wrongdoing.
On the cross, Jesus was cut off from God’s friendship and goodness. He chose to experience hell – so that we don’t have to. The sinless King died to take the punishment wrongdoing deserves.
Repent means to turn our lives around, to live with Jesus in charge instead of ourselves. Believe means to trust that Jesus has done everything we need to give us a place in his kingdom.
Jesus offers all this to anyone and everyone who turn their lives around and believes. |
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory ArchArch_AI
imports "../Arch_AI"
begin
context Arch begin global_naming ARM
definition
"valid_aci aci \<equiv> case aci of MakePool frame slot parent base \<Rightarrow>
\<lambda>s. cte_wp_at (\<lambda>c. c = cap.NullCap) slot s \<and> real_cte_at slot s \<and>
ex_cte_cap_wp_to is_cnode_cap slot s \<and>
slot \<noteq> parent \<and>
cte_wp_at (\<lambda>cap. \<exists>idx. cap = cap.UntypedCap False frame pageBits idx ) parent s \<and>
descendants_of parent (cdt s) = {} \<and>
is_aligned base asid_low_bits \<and> base \<le> 2^asid_bits - 1 \<and>
arm_asid_table (arch_state s) (asid_high_bits_of base) = None"
lemma safe_parent_strg:
"cte_wp_at (\<lambda>cap. cap = UntypedCap False frame pageBits idx) p s \<and>
descendants_of p (cdt s) = {} \<and>
valid_objs s
\<longrightarrow>
cte_wp_at (safe_parent_for (cdt s) p
(ArchObjectCap (ASIDPoolCap frame base)))
p s"
apply (clarsimp simp: cte_wp_at_caps_of_state safe_parent_for_def is_physical_def arch_is_physical_def)
apply (rule is_aligned_no_overflow)
apply (drule (1) caps_of_state_valid_cap)
apply (clarsimp simp: valid_cap_def cap_aligned_def)
done
(* 32-bit instance of Detype_AI.range_cover_full *)
lemma range_cover_full:
"\<lbrakk>is_aligned ptr sz;sz<word_bits\<rbrakk> \<Longrightarrow> range_cover (ptr::word32) sz sz (Suc 0)"
by (clarsimp simp:range_cover_def unat_eq_0 le_mask_iff[symmetric] word_and_le1 word_bits_def)
definition
valid_arch_inv :: "arch_invocation \<Rightarrow> 'z::state_ext state \<Rightarrow> bool"
where
"valid_arch_inv ai \<equiv> case ai of
InvokePageTable pti \<Rightarrow>
valid_pti pti
| InvokePageDirectory pdi \<Rightarrow>
valid_pdi pdi
| InvokePage pgi \<Rightarrow>
valid_page_inv pgi
| InvokeASIDControl aci \<Rightarrow>
valid_aci aci
| InvokeASIDPool ap \<Rightarrow>
valid_apinv ap"
lemma check_vp_wpR [wp]:
"\<lbrace>\<lambda>s. vmsz_aligned w sz \<longrightarrow> P () s\<rbrace>
check_vp_alignment sz w \<lbrace>P\<rbrace>, -"
apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong)
apply (rule hoare_pre)
apply (wp hoare_whenE_wp|wpc)+
apply (simp add: vmsz_aligned_def)
done
lemma check_vp_inv: "\<lbrace>P\<rbrace> check_vp_alignment sz w \<lbrace>\<lambda>_. P\<rbrace>"
apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong)
apply (rule hoare_pre)
apply (wp hoare_whenE_wp|wpc)+
apply simp
done
lemma p2_low_bits_max:
"(2 ^ asid_low_bits - 1) = (max_word :: 10 word)"
by (simp add: asid_low_bits_def max_word_def)
lemma dom_ucast_eq:
"(- dom (\<lambda>a::asid_low_index. p (ucast a::machine_word)) \<inter> {x. ucast x + y \<noteq> 0} = {}) =
(- dom p \<inter> {x. x \<le> 2 ^ asid_low_bits - 1 \<and> x + y \<noteq> 0} = {})"
apply safe
apply clarsimp
apply (rule ccontr)
apply (erule_tac x="ucast x" in in_emptyE)
apply (clarsimp simp: p2_low_bits_max)
apply (rule conjI)
apply (clarsimp simp: ucast_ucast_mask)
apply (subst (asm) less_mask_eq)
apply (rule word_less_sub_le [THEN iffD1])
apply (simp add: word_bits_def)
apply (simp add: asid_low_bits_def)
apply simp
apply (clarsimp simp: ucast_ucast_mask)
apply (subst (asm) less_mask_eq)
apply (rule word_less_sub_le [THEN iffD1])
apply (simp add: word_bits_def)
apply (simp add: asid_low_bits_def)
apply simp
apply (clarsimp simp: p2_low_bits_max)
apply (rule ccontr)
apply simp
apply (erule_tac x="ucast x" in in_emptyE)
apply clarsimp
apply (rule conjI, blast)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less, simp)
apply (simp add: asid_low_bits_def)
done
lemma asid_high_bits_max_word:
"(2 ^ asid_high_bits - 1 :: 7 word) = max_word"
by (simp add: asid_high_bits_def max_word_def)
lemma dom_ucast_eq_7:
"(- dom (\<lambda>a::7 \<comment> \<open>asid_high_bits\<close> word. p (ucast a::word32)) \<inter> {x. x \<le> 2 ^ asid_high_bits - 1} = {}) =
(- dom p \<inter> {x. x \<le> 2 ^ asid_high_bits - 1} = {})"
apply safe
apply clarsimp
apply (rule ccontr)
apply (erule_tac x="ucast x" in in_emptyE)
apply (clarsimp simp: asid_high_bits_max_word)
apply (clarsimp simp: ucast_ucast_mask)
apply (subst (asm) less_mask_eq)
apply (rule word_less_sub_le [THEN iffD1])
apply (simp add: word_bits_def)
apply (simp add: asid_high_bits_def)
apply simp
apply (clarsimp simp: asid_high_bits_max_word)
apply (rule ccontr)
apply simp
apply (erule_tac x="ucast x" in in_emptyE)
apply clarsimp
apply (rule conjI, blast)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less, simp)
apply (simp add: asid_high_bits_def)
done
lemma ucast_fst_hd_assocs:
"- dom (\<lambda>x. pool (ucast (x::asid_low_index)::machine_word)) \<inter> {x. ucast x + (w::machine_word) \<noteq> 0} \<noteq> {}
\<Longrightarrow>
fst (hd [(x, y)\<leftarrow>assocs pool . x \<le> 2 ^ asid_low_bits - 1 \<and> x + w \<noteq> 0 \<and> y = None]) =
ucast (fst (hd [(x, y)\<leftarrow>assocs (\<lambda>a::asid_low_index. pool (ucast a)) .
x \<le> 2 ^ asid_low_bits - 1 \<and>
ucast x + w \<noteq> 0 \<and> y = None]))"
apply (simp add: ucast_assocs[unfolded o_def])
apply (simp add: filter_map split_def)
apply (simp cong: conj_cong add: ucast_ucast_len)
apply (simp add: asid_low_bits_def minus_one_norm)
apply (simp add: ord_le_eq_trans [OF word_n1_ge])
apply (simp add: word_le_make_less)
apply (subgoal_tac "P" for P) (* cut_tac but more awesome *)
apply (subst hd_map, assumption)
apply simp
apply (rule sym, rule ucast_ucast_len)
apply (drule hd_in_set)
apply simp
apply (simp add: assocs_empty_dom_comp null_def split_def)
apply (simp add: ucast_assocs[unfolded o_def] filter_map split_def)
apply (simp cong: conj_cong add: ucast_ucast_len)
done
crunch typ_at [wp]: perform_page_table_invocation, perform_page_invocation,
perform_asid_pool_invocation, perform_page_directory_invocation "\<lambda>s. P (typ_at T p s)"
(wp: crunch_wps)
lemmas perform_page_table_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_page_table_invocation_typ_at]
lemmas perform_page_directory_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_page_directory_invocation_typ_at]
lemmas perform_page_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_page_invocation_typ_at]
lemmas perform_asid_pool_invocation_typ_ats [wp] =
abs_typ_at_lifts [OF perform_asid_pool_invocation_typ_at]
lemma perform_asid_control_invocation_tcb_at:
"\<lbrace>invs and valid_aci aci and st_tcb_at active p and
K (\<forall>w a b c. aci = asid_control_invocation.MakePool w a b c \<longrightarrow> w \<noteq> p)\<rbrace>
perform_asid_control_invocation aci
\<lbrace>\<lambda>rv. tcb_at p\<rbrace>"
apply (simp add: perform_asid_control_invocation_def)
apply (cases aci)
apply clarsimp
apply (wp |simp)+
apply (wp obj_at_delete_objects retype_region_obj_at_other2 hoare_vcg_const_imp_lift|assumption)+
apply (intro impI conjI)
apply (clarsimp simp: retype_addrs_def obj_bits_api_def default_arch_object_def image_def ptr_add_def)
apply (clarsimp simp: st_tcb_at_tcb_at)+
apply (frule st_tcb_ex_cap)
apply fastforce
apply (clarsimp split: Structures_A.thread_state.splits)
apply auto[1]
apply (clarsimp simp: ex_nonz_cap_to_def valid_aci_def)
apply (frule invs_untyped_children)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\<lambda>c. t \<in> zobj_refs c" for t])
apply (simp add: cte_wp_at_caps_of_state)
apply simp
apply (simp add:cte_wp_at_caps_of_state)
apply fastforce
apply (clarsimp simp: zobj_refs_to_obj_refs)
apply (erule(1) in_empty_interE)
apply (clarsimp simp:page_bits_def)
apply simp
done
lemma ucast_asid_high_btis_of_le [simp]:
"ucast (asid_high_bits_of w) \<le> (2 ^ asid_high_bits - 1 :: word32)"
apply (simp add: asid_high_bits_of_def)
apply (rule word_less_sub_1)
apply (rule order_less_le_trans)
apply (rule ucast_less)
apply simp
apply (simp add: asid_high_bits_def)
done
lemma invoke_arch_tcb:
"\<lbrace>invs and valid_arch_inv ai and st_tcb_at active tptr\<rbrace>
arch_perform_invocation ai
\<lbrace>\<lambda>rv. tcb_at tptr\<rbrace>"
apply (simp add: arch_perform_invocation_def)
apply (cases ai, simp_all)
apply (wp, clarsimp simp: st_tcb_at_tcb_at)+
defer
apply (wp, clarsimp simp: st_tcb_at_tcb_at)
apply (wp perform_asid_control_invocation_tcb_at)
apply (clarsimp simp add: valid_arch_inv_def)
apply (clarsimp simp: valid_aci_def)
apply (frule st_tcb_ex_cap)
apply fastforce
apply (clarsimp split: Structures_A.thread_state.splits)
apply auto[1]
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (frule invs_untyped_children)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\<lambda>c. t \<in> zobj_refs c" for t])
apply (simp add: cte_wp_at_caps_of_state)+
apply fastforce
apply (clarsimp simp: zobj_refs_to_obj_refs cte_wp_at_caps_of_state)
apply (drule_tac p="(aa,ba)" in caps_of_state_valid_cap, fastforce)
apply (clarsimp simp: valid_cap_def cap_aligned_def)
apply (drule_tac x=tptr in base_member_set, simp)
apply (simp add: pageBits_def field_simps del: atLeastAtMost_iff)
apply (metis (no_types) orthD1 x_power_minus_1)
apply simp
done
end
locale asid_update = Arch +
fixes ap asid s s'
assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s"
assumes empty: "arm_asid_table (arch_state s) asid = None"
defines "s' \<equiv> s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>"
begin
lemma vs_lookup1' [simp]:
"vs_lookup1 s' = vs_lookup1 s"
by (simp add: vs_lookup1_def s'_def)
lemma vs_lookup_pages1' [simp]:
"vs_lookup_pages1 s' = vs_lookup_pages1 s"
by (simp add: vs_lookup_pages1_def s'_def)
lemma vs_asid_refs' [simp]:
"vs_asid_refs (arm_asid_table (arch_state s')) =
vs_asid_refs (arm_asid_table (arch_state s)) \<union> {([VSRef (ucast asid) None], ap)}"
apply (simp add: s'_def)
apply (rule set_eqI)
apply (rule iffI)
apply (auto simp: vs_asid_refs_def split: if_split_asm)[1]
apply clarsimp
apply (erule disjE)
apply (auto simp: vs_asid_refs_def)[1]
apply (subst (asm) vs_asid_refs_def)
apply (clarsimp dest!: graph_ofD)
apply (rule vs_asid_refsI)
apply (clarsimp simp: empty)
done
lemma vs_lookup':
"vs_lookup s' = vs_lookup s \<union> {([VSRef (ucast asid) None], ap)}"
using ko
apply (simp add: vs_lookup_def)
apply (rule rtrancl_insert)
apply (clarsimp simp: vs_lookup1_def obj_at_def vs_refs_def)
done
lemma vs_lookup_pages':
"vs_lookup_pages s' = vs_lookup_pages s \<union> {([VSRef (ucast asid) None], ap)}"
using ko
apply (simp add: vs_lookup_pages_def)
apply (rule rtrancl_insert)
apply (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def)
done
lemma obj_at [simp]:
"obj_at P p s' = obj_at P p s"
by (simp add: s'_def)
lemma vs_lookup_neq: "\<lbrakk>(rs \<rhd> p) s' ; p \<noteq> ap\<rbrakk> \<Longrightarrow> (rs \<rhd> p) s"
by (clarsimp simp: vs_lookup')
lemma vspace_objs':
"valid_vspace_objs s \<Longrightarrow> valid_vspace_objs s'"
using ko
apply (clarsimp simp: valid_vspace_objs_def)
apply (erule_tac x=p in allE)
apply (case_tac "p = ap";
case_tac ao;
fastforce simp: obj_at_def s'_def
intro: vs_lookup_neq)
done
lemma global_objs':
"valid_global_objs s \<Longrightarrow> valid_global_objs s'"
apply (clarsimp simp: valid_global_objs_def valid_ao_at_def second_level_tables_def)
apply (auto simp: s'_def)
done
lemma caps_of_state_s':
"caps_of_state s' = caps_of_state s"
by (rule caps_of_state_pspace, simp add: s'_def)
lemma valid_vs_lookup':
"\<lbrakk> valid_vs_lookup s;
\<exists>ptr cap. caps_of_state s ptr = Some cap
\<and> ap \<in> obj_refs cap \<and> vs_cap_ref cap = Some [VSRef (ucast asid) None] \<rbrakk>
\<Longrightarrow> valid_vs_lookup s'"
by (clarsimp simp: valid_vs_lookup_def caps_of_state_s' vs_lookup_pages')
lemma valid_table_caps':
"\<lbrakk> valid_table_caps s \<rbrakk>
\<Longrightarrow> valid_table_caps s'"
apply (simp add: valid_table_caps_def caps_of_state_s' second_level_tables_def)
apply (simp add: s'_def)
done
lemma valid_arch_caps:
"\<lbrakk> valid_arch_caps s;
\<exists>ptr cap. caps_of_state s ptr = Some cap
\<and> ap \<in> obj_refs cap \<and> vs_cap_ref cap = Some [VSRef (ucast asid) None] \<rbrakk>
\<Longrightarrow> valid_arch_caps s'"
by (simp add: valid_arch_caps_def caps_of_state_s'
valid_table_caps' valid_vs_lookup')
lemma valid_asid_map':
"valid_asid_map s \<Longrightarrow> valid_asid_map s'"
using empty
apply (clarsimp simp: valid_asid_map_def s'_def)
apply (drule bspec, blast)
apply (clarsimp simp: vspace_at_asid_def)
apply (drule vs_lookup_2ConsD)
apply clarsimp
apply (erule vs_lookup_atE)
apply (drule vs_lookup1D)
apply clarsimp
apply (rule vs_lookupI[rotated])
apply (rule r_into_rtrancl)
apply (rule vs_lookup1I)
apply (fastforce simp: obj_at_def)
apply assumption
apply simp
apply (clarsimp simp: vs_asid_refs_def graph_of_def)
apply fastforce
done
end
context Arch begin global_naming ARM
lemma valid_arch_state_strg:
"valid_arch_state s \<and> ap \<notin> ran (arm_asid_table (arch_state s)) \<and> asid_pool_at ap s \<longrightarrow>
valid_arch_state (s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply (clarsimp simp: valid_arch_state_def)
apply (clarsimp simp: valid_asid_table_def ran_def)
apply (fastforce intro!: inj_on_fun_updI)
done
lemma valid_vs_lookup_at_upd_strg:
"valid_vs_lookup s \<and>
ko_at (ArchObj (ASIDPool Map.empty)) ap s \<and>
arm_asid_table (arch_state s) asid = None \<and>
(\<exists>ptr cap. caps_of_state s ptr = Some cap \<and> ap \<in> obj_refs cap \<and>
vs_cap_ref cap = Some [VSRef (ucast asid) None])
\<longrightarrow>
valid_vs_lookup (s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply clarsimp
apply (subgoal_tac "asid_update ap asid s")
prefer 2
apply unfold_locales[1]
apply assumption+
apply (erule (1) asid_update.valid_vs_lookup')
apply fastforce
done
lemma retype_region_ap:
"\<lbrace>\<top>\<rbrace>
retype_region ap 1 0 (ArchObject ASIDPoolObj) dev
\<lbrace>\<lambda>_. ko_at (ArchObj (ASIDPool Map.empty)) ap\<rbrace>"
apply (rule hoare_post_imp)
prefer 2
apply (rule retype_region_obj_at)
apply simp
apply simp
apply (clarsimp simp: retype_addrs_def obj_bits_api_def default_arch_object_def)
apply (clarsimp simp: obj_at_def default_object_def default_arch_object_def)
done
lemma retype_region_ap':
"\<lbrace>\<top>\<rbrace> retype_region ap 1 0 (ArchObject ASIDPoolObj) dev \<lbrace>\<lambda>rv. asid_pool_at ap\<rbrace>"
apply (rule hoare_strengthen_post, rule retype_region_ap)
apply (clarsimp simp: a_type_def elim!: obj_at_weakenE)
done
lemma no_cap_to_obj_with_diff_ref_null_filter:
"no_cap_to_obj_with_diff_ref cap S
= (\<lambda>s. \<forall>c \<in> ran (null_filter (caps_of_state s) |` (- S)).
obj_refs c = obj_refs cap
\<longrightarrow> table_cap_ref c = table_cap_ref cap)"
apply (simp add: no_cap_to_obj_with_diff_ref_def
ball_ran_eq cte_wp_at_caps_of_state)
apply (simp add: Ball_def)
apply (intro iff_allI ext)
apply (simp add: restrict_map_def null_filter_def)
apply (auto dest!: obj_ref_none_no_asid[rule_format]
simp: table_cap_ref_def)
done
lemma retype_region_no_cap_to_obj:
"\<lbrace>valid_pspace and valid_mdb
and caps_overlap_reserved {ptr..ptr + 2 ^ obj_bits_api ty us - 1}
and caps_no_overlap ptr sz
and pspace_no_overlap_range_cover ptr sz
and no_cap_to_obj_with_diff_ref cap S
and (\<lambda>s. \<exists>slot. cte_wp_at (\<lambda>c. up_aligned_area ptr sz \<subseteq> cap_range c \<and> cap_is_device c = dev) slot s)
and K (ty = Structures_A.CapTableObject \<longrightarrow> 0 < us)
and K (range_cover ptr sz (obj_bits_api ty us) 1) \<rbrace>
retype_region ptr 1 us ty dev
\<lbrace>\<lambda>rv. no_cap_to_obj_with_diff_ref cap S\<rbrace>"
apply (rule hoare_gen_asm)+
apply (simp add: no_cap_to_obj_with_diff_ref_null_filter)
apply (wp retype_region_caps_of | simp)+
apply fastforce
done
lemma valid_table_caps_asid_upd [iff]:
"valid_table_caps (s\<lparr>arch_state := (arm_asid_table_update f (arch_state s))\<rparr>) =
valid_table_caps s"
by (simp add: valid_table_caps_def second_level_tables_def)
lemma vs_asid_ref_upd:
"([VSRef (ucast (asid_high_bits_of asid')) None] \<rhd> ap')
(s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)
= (if asid_high_bits_of asid' = asid_high_bits_of asid
then ap' = ap
else ([VSRef (ucast (asid_high_bits_of asid')) None] \<rhd> ap') s)"
by (fastforce intro: vs_lookup_atI elim: vs_lookup_atE)
lemma vs_asid_ref_eq:
"([VSRef (ucast asid) None] \<rhd> ap) s
= (arm_asid_table (arch_state s) asid = Some ap)"
by (fastforce elim: vs_lookup_atE intro: vs_lookup_atI)
lemma set_cap_reachable_pg_cap:
"\<lbrace>\<lambda>s. P (reachable_pg_cap cap s)\<rbrace> set_cap x y \<lbrace>\<lambda>_ s. P (reachable_pg_cap cap s)\<rbrace>"
by (unfold reachable_pg_cap_def, wp hoare_vcg_ex_lift set_cap.vs_lookup_pages)
lemma cap_insert_simple_arch_caps_ap:
"\<lbrace>valid_arch_caps and (\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s)
and no_cap_to_obj_with_diff_ref cap {dest}
and (\<lambda>s. arm_asid_table (arch_state s) (asid_high_bits_of asid) = None)
and ko_at (ArchObj (ASIDPool Map.empty)) ap
and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv s. valid_arch_caps (s\<lparr>arch_state := arch_state s
\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)\<rbrace>"
apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def
set_untyped_cap_as_full_def bind_assoc)
apply (strengthen valid_vs_lookup_at_upd_strg)
apply (wp get_cap_wp set_cap_valid_vs_lookup set_cap_arch_obj
set_cap_valid_table_caps hoare_vcg_all_lift
| simp split del: if_split)+
apply (rule_tac P = "cte_wp_at ((=) src_cap) src" in set_cap_orth)
apply (wp hoare_vcg_imp_lift hoare_vcg_ball_lift set_free_index_final_cap
hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages
| clarsimp)+
apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift
get_cap_wp static_imp_wp)+
apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps)
apply (rule conjI)
apply (clarsimp simp: vs_cap_ref_def)
apply (rule_tac x="fst dest" in exI)
apply (rule_tac x="snd dest" in exI)
apply simp
apply (rule conjI)
apply (simp add: unique_table_caps_def is_cap_simps)
apply (subst unique_table_refs_def)
apply (intro allI impI)
apply (simp split: if_split_asm)
apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state)
apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state)
apply (erule (3) unique_table_refsD)
done
lemma valid_asid_map_asid_upd_strg:
"valid_asid_map s \<and>
ko_at (ArchObj (ASIDPool Map.empty)) ap s \<and>
arm_asid_table (arch_state s) asid = None \<longrightarrow>
valid_asid_map (s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply clarsimp
apply (subgoal_tac "asid_update ap asid s")
prefer 2
apply unfold_locales[1]
apply assumption+
apply (erule (1) asid_update.valid_asid_map')
done
lemma valid_vspace_objs_asid_upd_strg:
"valid_vspace_objs s \<and>
ko_at (ArchObj (ASIDPool Map.empty)) ap s \<and>
arm_asid_table (arch_state s) asid = None \<longrightarrow>
valid_vspace_objs (s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
apply clarsimp
apply (subgoal_tac "asid_update ap asid s")
prefer 2
apply unfold_locales[1]
apply assumption+
apply (erule (1) asid_update.vspace_objs')
done
lemma valid_global_objs_asid_upd_strg:
"valid_global_objs s \<and>
ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap s \<and>
arm_asid_table (arch_state s) asid = None \<longrightarrow>
valid_global_objs (s\<lparr>arch_state := arch_state s\<lparr>arm_asid_table := arm_asid_table (arch_state s)(asid \<mapsto> ap)\<rparr>\<rparr>)"
by clarsimp
lemma safe_parent_cap_is_device:
"safe_parent_for m p cap pcap \<Longrightarrow> cap_is_device cap = cap_is_device pcap"
by (simp add: safe_parent_for_def)
lemma cap_insert_ap_invs:
"\<lbrace>invs and valid_cap cap and tcb_cap_valid cap dest and
ex_cte_cap_wp_to (appropriate_cte_cap cap) dest and
cte_wp_at (\<lambda>c. c = cap.NullCap) dest and
no_cap_to_obj_with_diff_ref cap {dest} and
(\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and
K (cap = cap.ArchObjectCap (arch_cap.ASIDPoolCap ap asid)) and
(\<lambda>s. \<forall>irq \<in> cap_irqs cap. irq_issued irq s) and
ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap and
(\<lambda>s. ap \<notin> ran (arm_asid_table (arch_state s)) \<and>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = None)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv s. invs (s\<lparr>arch_state := arch_state s
\<lparr>arm_asid_table := (arm_asid_table \<circ> arch_state) s(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (strengthen valid_arch_state_strg
valid_asid_map_asid_upd_strg valid_vspace_objs_asid_upd_strg )
apply (simp cong: conj_cong)
apply (rule hoare_pre)
apply (wp cap_insert_simple_mdb cap_insert_iflive
cap_insert_zombies cap_insert_ifunsafe cap_insert_vspace_objs
cap_insert_valid_global_refs cap_insert_idle
valid_irq_node_typ cap_insert_simple_arch_caps_ap)
apply (clarsimp simp: is_simple_cap_def cte_wp_at_caps_of_state is_cap_simps)
apply (frule safe_parent_cap_is_device)
apply (drule safe_parent_cap_range)
apply (simp add: cap_range_def)
apply (rule conjI)
prefer 2
apply (clarsimp simp: obj_at_def a_type_def)
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (drule_tac p="(a,b)" in caps_of_state_valid_cap, fastforce)
apply (auto simp: obj_at_def is_tcb_def is_cap_table_def
valid_cap_def [where c="cap.Zombie a b x" for a b x]
dest: obj_ref_is_tcb obj_ref_is_cap_table split: option.splits)
done
lemma max_index_upd_no_cap_to:
"\<lbrace>\<lambda>s. no_cap_to_obj_with_diff_ref cap {slot} s \<and>
cte_wp_at ((=) ucap) cref s \<and> is_untyped_cap ucap\<rbrace>
set_cap (max_free_index_update ucap) cref
\<lbrace>\<lambda>rv s. no_cap_to_obj_with_diff_ref cap {slot} s \<rbrace>"
apply (clarsimp simp:no_cap_to_obj_with_diff_ref_def)
apply (wp hoare_vcg_ball_lift set_cap_cte_wp_at_neg)
apply (clarsimp simp:cte_wp_at_caps_of_state free_index_update_def is_cap_simps)
apply (drule_tac x = cref in bspec)
apply clarsimp
apply (clarsimp simp:table_cap_ref_def)
done
lemma perform_asid_control_invocation_st_tcb_at:
"\<lbrace>st_tcb_at (P and (Not \<circ> inactive) and (Not \<circ> idle)) t
and ct_active and invs and valid_aci aci\<rbrace>
perform_asid_control_invocation aci
\<lbrace>\<lambda>y. st_tcb_at P t\<rbrace>"
including no_pre
supply
is_aligned_neg_mask_eq[simp del]
is_aligned_neg_mask_weaken[simp del]
apply (clarsimp simp: perform_asid_control_invocation_def split: asid_control_invocation.splits)
apply (rename_tac word1 a b aa ba word2)
apply (wp hoare_vcg_const_imp_lift retype_region_st_tcb_at set_cap_no_overlap|simp)+
apply (strengthen invs_valid_objs invs_psp_aligned)
apply (clarsimp simp:conj_comms)
apply (wp max_index_upd_invs_simple get_cap_wp)+
apply (rule hoare_name_pre_state)
apply (subgoal_tac "is_aligned word1 page_bits")
prefer 2
apply (clarsimp simp: valid_aci_def cte_wp_at_caps_of_state)
apply (drule(1) caps_of_state_valid[rotated])+
apply (simp add:valid_cap_simps cap_aligned_def page_bits_def)
apply (subst delete_objects_rewrite)
apply (simp add:page_bits_def word_bits_def pageBits_def word_size_bits_def)+
apply (simp add:is_aligned_neg_mask_eq)
apply (rule hoare_pre, wp)
apply (clarsimp simp: valid_aci_def)
apply (frule intvl_range_conv)
apply (simp add:word_bits_def page_bits_def pageBits_def)
apply (clarsimp simp:detype_clear_um_independent page_bits_def is_aligned_neg_mask_eq)
apply (rule conjI)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (rule pspace_no_overlap_detype)
apply (rule caps_of_state_valid_cap)
apply (simp add:page_bits_def)+
apply (simp add:invs_valid_objs invs_psp_aligned)+
apply (rule conjI)
apply (erule pred_tcb_weakenE, simp)
apply (rule conjI)
apply (frule st_tcb_ex_cap)
apply clarsimp
apply (clarsimp split: Structures_A.thread_state.splits)
apply (clarsimp simp: ex_nonz_cap_to_def)
apply (frule invs_untyped_children)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\<lambda>c. t \<in> zobj_refs c" for t])
apply (simp add: cte_wp_at_caps_of_state)+
apply fastforce
apply (clarsimp simp: zobj_refs_to_obj_refs)
apply (fastforce simp:page_bits_def)
apply simp
apply (clarsimp simp:obj_bits_api_def arch_kobj_size_def cte_wp_at_caps_of_state
default_arch_object_def empty_descendants_range_in)
apply (frule_tac cap = "(cap.UntypedCap False word1 pageBits idx)"
in detype_invariants[rotated 3],clarsimp+)
apply (simp add:cte_wp_at_caps_of_state
empty_descendants_range_in descendants_range_def2)+
apply (thin_tac "x = Some cap.NullCap" for x)+
apply (drule(1) caps_of_state_valid_cap[OF _ invs_valid_objs])
apply (intro conjI)
apply (clarsimp simp:valid_cap_def cap_aligned_def range_cover_full
invs_psp_aligned invs_valid_objs page_bits_def)
apply (erule pspace_no_overlap_detype)
apply (auto simp:page_bits_def detype_clear_um_independent)
done
lemma set_cap_idx_up_aligned_area:
"\<lbrace>K (\<exists>idx. pcap = UntypedCap dev ptr pageBits idx) and cte_wp_at ((=) pcap) slot
and valid_objs\<rbrace> set_cap (max_free_index_update pcap) slot
\<lbrace>\<lambda>rv s. (\<exists>slot. cte_wp_at (\<lambda>c. up_aligned_area ptr pageBits \<subseteq> cap_range c \<and> cap_is_device c = dev) slot s)\<rbrace>"
apply (rule hoare_pre)
apply (wp hoare_vcg_ex_lift set_cap_cte_wp_at)
apply (rule_tac x = slot in exI)
apply clarsimp
apply (frule(1) cte_wp_valid_cap)
apply (clarsimp simp: cte_wp_at_caps_of_state is_aligned_neg_mask_eq
p_assoc_help valid_cap_def valid_untyped_def cap_aligned_def)
done
primrec(nonexhaustive) get_untyped_cap_idx :: "cap \<Rightarrow> nat"
where "get_untyped_cap_idx (UntypedCap dev ref sz idx) = idx"
lemma aci_invs':
assumes Q_ignores_arch[simp]: "\<And>f s. Q (arch_state_update f s) = Q s"
assumes Q_ignore_machine_state[simp]: "\<And>f s. Q (machine_state_update f s) = Q s"
assumes Q_detype[simp]: "\<And>f s. Q (detype f s) = Q s"
assumes cap_insert_Q: "\<And>cap src dest. \<lbrace>Q and invs and K (src \<noteq> dest)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_.Q\<rbrace>"
assumes retype_region_Q[wp]:"\<And>a b c d e. \<lbrace>Q\<rbrace> retype_region a b c d e \<lbrace>\<lambda>_.Q\<rbrace>"
assumes set_cap_Q[wp]: "\<And>a b. \<lbrace>Q\<rbrace> set_cap a b \<lbrace>\<lambda>_.Q\<rbrace>"
shows
"\<lbrace>invs and Q and ct_active and valid_aci aci\<rbrace> perform_asid_control_invocation aci \<lbrace>\<lambda>y s. invs s \<and> Q s\<rbrace>"
proof -
have cap_insert_invsQ:
"\<And>cap src dest ap asid.
\<lbrace>Q and (invs and valid_cap cap and tcb_cap_valid cap dest and
ex_cte_cap_wp_to (appropriate_cte_cap cap) dest and
cte_wp_at (\<lambda>c. c = NullCap) dest and
no_cap_to_obj_with_diff_ref cap {dest} and
(\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and
K (cap = ArchObjectCap (ASIDPoolCap ap asid)) and
(\<lambda>s. \<forall>irq\<in>cap_irqs cap. irq_issued irq s) and
ko_at (ArchObj (ASIDPool Map.empty)) ap and
(\<lambda>s. ap \<notin> ran (arm_asid_table (arch_state s)) \<and>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = None))\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv s.
invs
(s\<lparr>arch_state := arch_state s
\<lparr>arm_asid_table := (arm_asid_table \<circ> arch_state) s
(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>) \<and>
Q
(s\<lparr>arch_state := arch_state s
\<lparr>arm_asid_table := (arm_asid_table \<circ> arch_state) s
(asid_high_bits_of asid \<mapsto> ap)\<rparr>\<rparr>)\<rbrace>"
apply (wp cap_insert_ap_invs)
apply simp
apply (rule hoare_pre)
apply (rule cap_insert_Q)
apply (auto simp: cte_wp_at_caps_of_state)
done
show ?thesis
apply (clarsimp simp: perform_asid_control_invocation_def valid_aci_def
split: asid_control_invocation.splits)
apply (rename_tac word1 a b aa ba word2)
apply (rule hoare_pre)
apply (wp hoare_vcg_const_imp_lift)
apply (wp cap_insert_invsQ hoare_vcg_ex_lift
| simp)+
apply (simp add: valid_cap_def |
strengthen real_cte_tcb_valid safe_parent_strg
invs_vobjs_strgs
ex_cte_cap_to_cnode_always_appropriate_strg)+
apply (wp hoare_vcg_const_imp_lift set_free_index_invs
retype_region_plain_invs[where sz = pageBits]
retype_cte_wp_at[where sz = pageBits] hoare_vcg_ex_lift
retype_region_obj_at_other3[where P="is_cap_table n" and sz = pageBits for n]
retype_region_ex_cte_cap_to[where sz = pageBits]
retype_region_ap[simplified]
retype_region_ap'[simplified]
retype_region_no_cap_to_obj[where sz = pageBits,simplified]
| simp del: split_paired_Ex)+
apply (strengthen invs_valid_objs invs_psp_aligned
invs_mdb invs_valid_pspace
exI[where x="case aci of MakePool frame slot parent base \<Rightarrow> parent"]
exI[where x="case aci of MakePool frame slot parent base \<Rightarrow> parent",
simplified]
caps_region_kernel_window_imp[where
p = "case aci of MakePool frame slot parent base \<Rightarrow> parent"]
invs_cap_refs_in_kernel_window)+
apply (wp set_cap_caps_no_overlap set_cap_no_overlap get_cap_wp
max_index_upd_caps_overlap_reserved max_index_upd_invs_simple
set_cap_cte_cap_wp_to set_cap_cte_wp_at max_index_upd_no_cap_to
| simp split del: if_split | wp (once) hoare_vcg_ex_lift)+
apply (rule_tac P = "is_aligned word1 page_bits" in hoare_gen_asm)
apply (subst delete_objects_rewrite)
apply (simp add:page_bits_def pageBits_def word_size_bits_def)
apply (simp add:page_bits_def pageBits_def word_bits_def)
apply (simp add:is_aligned_neg_mask_eq)
apply wp
apply (clarsimp simp: cte_wp_at_caps_of_state if_option_Some
Misc_Arithmetic.if_bool_simps
split del: if_split)
apply (strengthen refl)
apply (frule_tac cap = "(cap.UntypedCap False word1 pageBits idx)"
in detype_invariants[rotated 3],clarsimp+)
apply (simp add:cte_wp_at_caps_of_state)+
apply (simp add:descendants_range_def2 empty_descendants_range_in)
apply (simp add:invs_mdb invs_valid_pspace invs_psp_aligned invs_valid_objs)
apply (clarsimp dest!:caps_of_state_cteD)
apply (frule(1) unsafe_protected[where p=t and p'=t for t])
apply (simp add:empty_descendants_range_in)+
apply fastforce
apply clarsimp
apply (frule_tac p = "(aa,ba)" in cte_wp_valid_cap)
apply fastforce
apply (clarsimp simp: detype_clear_um_independent obj_bits_api_def arch_kobj_size_def
default_arch_object_def conj_comms)
apply (rule conjI)
apply (clarsimp simp:valid_cap_simps cap_aligned_def page_bits_def not_le)
apply clarsimp
apply (simp add:empty_descendants_range_in)
apply (frule valid_cap_aligned)
apply (clarsimp simp: cap_aligned_def is_aligned_neg_mask_eq)
apply (subst caps_no_overlap_detype[OF descendants_range_caps_no_overlapI],
assumption, simp add: is_aligned_neg_mask_eq,
simp add: empty_descendants_range_in)
apply (frule pspace_no_overlap_detype, clarify+)
apply (frule intvl_range_conv[where bits = pageBits])
apply (simp add:pageBits_def word_bits_def)
apply (simp add:is_aligned_neg_mask_eq)
apply (clarsimp simp:is_aligned_neg_mask_eq page_bits_def)
apply (frule(1) ex_cte_cap_protects)
apply (simp add:empty_descendants_range_in)
apply fastforce
apply (rule subset_refl)
apply fastforce
apply (clarsimp simp: field_simps)
apply (intro conjI impI,
simp_all add:free_index_of_def valid_cap_simps valid_untyped_def
empty_descendants_range_in range_cover_full clear_um_def max_free_index_def,
(clarsimp simp:valid_untyped_def valid_cap_simps)+)[1]
apply (erule(1) cap_to_protected)
apply (simp add:empty_descendants_range_in descendants_range_def2)+
apply clarsimp
apply (drule invs_arch_state)+
apply (clarsimp simp: valid_arch_state_def valid_asid_table_def)
apply (drule (1) bspec)+
apply clarsimp
apply (erule notE, erule is_aligned_no_overflow)
apply (clarsimp simp: no_cap_to_obj_with_diff_ref_def)
apply (thin_tac "cte_wp_at ((=) cap.NullCap) p s" for p s)
apply (subst(asm) eq_commute,
erule(1) untyped_children_in_mdbE[where cap="cap.UntypedCap dev p bits idx" for dev p bits idx,
simplified, rotated])
apply (simp add: is_aligned_no_overflow)
apply simp
apply clarsimp
done
qed
lemmas aci_invs[wp] = aci_invs'[where Q=\<top>,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified]
lemma invoke_arch_invs[wp]:
"\<lbrace>invs and ct_active and valid_arch_inv ai\<rbrace>
arch_perform_invocation ai
\<lbrace>\<lambda>rv. invs\<rbrace>"
apply (cases ai, simp_all add: valid_arch_inv_def arch_perform_invocation_def)
apply (wp|simp)+
done
lemma sts_empty_pde [wp]:
"\<lbrace>empty_pde_at p\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. empty_pde_at p\<rbrace>"
apply (simp add: empty_pde_at_def)
apply (rule hoare_pre)
apply (wp hoare_vcg_ex_lift set_thread_state_ko)
apply (clarsimp simp: is_tcb_def)
done
lemma sts_pd_at_asid [wp]:
"\<lbrace>vspace_at_asid asid pd\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. vspace_at_asid asid pd\<rbrace>"
apply (simp add: vspace_at_asid_def)
apply wp
done
lemma sts_same_refs_inv[wp]:
"\<lbrace>\<lambda>s. same_refs m cap s\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv s. same_refs m cap s\<rbrace>"
by (cases m, (clarsimp simp: same_refs_def, wp)+)
lemma sts_valid_slots_inv[wp]:
"\<lbrace>valid_slots m\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_slots m\<rbrace>"
by (cases m, (clarsimp simp: valid_slots_def, wp hoare_vcg_ball_lift sts.vs_lookup sts_typ_ats)+)
lemma sts_valid_page_inv[wp]:
"\<lbrace>valid_page_inv page_invocation\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_page_inv page_invocation\<rbrace>"
apply (cases page_invocation)
by (wpsimp wp: hoare_vcg_ex_lift sts_typ_ats hoare_vcg_disj_lift
simp: valid_page_inv_def same_refs_def
| wps)+
lemma sts_valid_pdi_inv[wp]:
"\<lbrace>valid_pdi page_directory_invocation\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_pdi page_directory_invocation\<rbrace>"
apply (cases page_directory_invocation)
apply (wp | simp add: valid_pdi_def)+
done
lemma sts_valid_arch_inv:
"\<lbrace>valid_arch_inv ai\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. valid_arch_inv ai\<rbrace>"
apply (cases ai, simp_all add: valid_arch_inv_def)
apply (rename_tac page_table_invocation)
apply (case_tac page_table_invocation, simp_all add: valid_pti_def)[1]
apply ((wp valid_pde_lift set_thread_state_valid_cap
hoare_vcg_all_lift hoare_vcg_const_imp_lift
hoare_vcg_ex_lift set_thread_state_ko
sts_typ_ats set_thread_state_cte_wp_at
| clarsimp simp: is_tcb_def)+)[4]
apply (rename_tac asid_control_invocation)
apply (case_tac asid_control_invocation)
apply (clarsimp simp: valid_aci_def cte_wp_at_caps_of_state)
apply (rule hoare_pre, wp hoare_vcg_ex_lift cap_table_at_typ_at)
apply clarsimp
apply (clarsimp simp: valid_apinv_def split: asid_pool_invocation.splits)
apply (rule hoare_pre)
apply (wp hoare_vcg_ex_lift set_thread_state_ko)
apply (clarsimp simp: is_tcb_def)
done
crunch inv[wp]: ensure_safe_mapping "P"
(wp: mapME_x_inv_wp)
(* the induct rule matches the wrong parameters first -> crunch blows up *)
lemma create_mapping_entries_inv [wp]:
"\<lbrace>P\<rbrace> create_mapping_entries base vptr vmsz R A pd \<lbrace>\<lambda>_. P\<rbrace>"
by (induct vmsz; wpsimp wp: lookup_pt_slot_inv)
crunch_ignore (add: select_ext)
crunch inv [wp]: arch_decode_invocation "P"
(wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps)
lemma create_mappings_empty [wp]:
"\<lbrace>\<top>\<rbrace> create_mapping_entries base vptr vmsz R A pd \<lbrace>\<lambda>m s. empty_refs m\<rbrace>, -"
by (cases vmsz; wpsimp simp: pde_ref_def empty_refs_def)
lemma empty_pde_atI:
"\<lbrakk> ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s;
pd (ucast (p && mask pd_bits >> 2)) = InvalidPDE \<rbrakk> \<Longrightarrow>
empty_pde_at p s"
by (fastforce simp add: empty_pde_at_def)
declare lookup_slot_for_cnode_op_cap_to [wp]
lemma shiftr_irrelevant:
"x < 2 ^ asid_low_bits \<Longrightarrow> is_aligned (y :: word32) asid_low_bits \<Longrightarrow>
x + y >> asid_low_bits = y >> asid_low_bits"
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: is_aligned_nth)
apply (drule(1) nth_bounded)
apply (simp add: asid_low_bits_def word_bits_def)
apply simp
apply (rule word_eqI)
apply (simp add: nth_shiftr)
apply safe
apply (drule(1) nth_bounded)
apply (simp add: asid_low_bits_def word_bits_def)
apply simp
done
lemma map_up_enum_0x3C:
"is_aligned (r::32 word) 6 \<Longrightarrow> map (\<lambda>x. x + r) [0 , 4 .e. 0x3C] = [r, r + 4 .e. r + 0x3C]"
apply (simp add: upto_enum_step_def upto_enum_def not_less)
apply (drule is_aligned_no_overflow')
apply simp
apply (erule word_plus_mono_right2)
apply simp
done
lemma create_mapping_entries_parent_for_refs:
"\<lbrace>invs and \<exists>\<rhd> pd and page_directory_at pd
and K (is_aligned pd pd_bits) and K (vmsz_aligned vptr pgsz)
and K (vptr < kernel_base)\<rbrace>
create_mapping_entries ptr vptr pgsz
rights attribs pd
\<lbrace>\<lambda>rv s. \<exists>a b. cte_wp_at (parent_for_refs rv) (a, b) s\<rbrace>, -"
apply (rule hoare_gen_asmE)+
apply (cases pgsz, simp_all add: vmsz_aligned_def largePagePTE_offsets_def
superSectionPDE_offsets_def)
apply (rule hoare_pre)
apply wp
apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to)
apply (elim exEI)
apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def)
apply simp
apply (rule hoare_pre)
apply wp
apply (rule hoare_post_imp_R)
apply (rule lookup_pt_slot_cap_to_multiple1)
apply (elim conjE exEI cte_wp_at_weakenE)
apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def
subset_iff p_0x3C_shift map_up_enum_0x3C)
apply simp
apply (rule hoare_pre, wp)
apply (clarsimp dest!:vs_lookup_pages_vs_lookupI)
apply (drule valid_vs_lookupD, clarsimp)
apply (simp, elim exEI)
apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def
lookup_pd_slot_def Let_def)
apply (subst pd_shifting, simp add: pd_bits_def pageBits_def)
apply (clarsimp simp: vs_cap_ref_def
split: cap.split_asm arch_cap.split_asm option.split_asm)
apply (auto simp: valid_cap_def obj_at_def is_cap_simps cap_asid_def
dest!: caps_of_state_valid_cap split:if_splits)[3]
apply (frule(1) caps_of_state_valid)
apply (clarsimp simp:valid_cap_def obj_at_def)
apply (simp add:is_cap_simps)
apply (rule hoare_pre, wp)
apply (clarsimp dest!:vs_lookup_pages_vs_lookupI)
apply (drule valid_vs_lookupD, clarsimp)
apply (simp, elim exEI)
apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def)
apply (rule conjI)
apply (simp add: subset_eq)
apply (clarsimp simp: lookup_pd_slot_add_eq)
apply (clarsimp simp: vs_cap_ref_def
split: cap.split_asm arch_cap.split_asm option.split_asm)
apply (auto simp: valid_cap_def obj_at_def is_cap_simps cap_asid_def
dest!: caps_of_state_valid_cap split:if_splits)[3]
apply (frule(1) caps_of_state_valid)
apply (clarsimp simp:valid_cap_def obj_at_def)
apply (simp add:is_cap_simps)
done
lemma find_pd_for_asid_shifting_voodoo:
"\<lbrace>pspace_aligned and valid_vspace_objs\<rbrace>
find_pd_for_asid asid
\<lbrace>\<lambda>rv s. v >> 20 = rv + (v >> 20 << 2) && mask pd_bits >> 2\<rbrace>,-"
apply (rule hoare_post_imp_R,
rule find_pd_for_asid_aligned_pd)
apply (subst pd_shifting_dual, simp)
apply (rule word_eqI)
apply (simp add: nth_shiftr nth_shiftl word_size)
apply safe
apply (drule test_bit_size)
apply (simp add: word_size)
done
lemma find_pd_for_asid_ref_offset_voodoo:
"\<lbrace>pspace_aligned and valid_vspace_objs and
K (ref = [VSRef (asid && mask asid_low_bits) (Some AASIDPool),
VSRef (ucast (asid_high_bits_of asid)) None])\<rbrace>
find_pd_for_asid asid
\<lbrace>\<lambda>rv. (ref \<rhd> (rv + (v >> 20 << 2) && ~~ mask pd_bits))\<rbrace>,-"
apply (rule hoare_gen_asmE)
apply (rule_tac Q'="\<lambda>rv s. is_aligned rv 14 \<and> (ref \<rhd> rv) s"
in hoare_post_imp_R)
apply (simp add: ucast_ucast_mask
mask_asid_low_bits_ucast_ucast)
apply (fold asid_low_bits_def)
apply (rule hoare_pre, wp find_pd_for_asid_lookup_ref)
apply (simp add: )
apply (simp add: pd_shifting)
done
declare asid_high_bits_of_shift [simp]
declare mask_shift [simp]
declare word_less_sub_le [simp del]
declare ptrFormPAddr_addFromPPtr [simp]
(* FIXME: move *)
lemma valid_mask_vm_rights[simp]:
"mask_vm_rights V R \<in> valid_vm_rights"
by (simp add: mask_vm_rights_def)
lemma vs_lookup_and_unique_refs:
"\<lbrakk>(ref \<rhd> p) s; caps_of_state s cptr = Some cap; table_cap_ref cap = Some ref';
p \<in> obj_refs cap; valid_vs_lookup s; unique_table_refs (caps_of_state s)\<rbrakk>
\<Longrightarrow> ref = ref'"
apply (frule_tac ref=ref in valid_vs_lookupD[OF vs_lookup_pages_vs_lookupI], assumption)
apply clarsimp
apply (frule_tac cap'=capa in unique_table_refsD)
apply simp+
apply (case_tac capa, simp_all)
apply ((case_tac cap, simp_all)+)[6]
apply (clarsimp simp add: table_cap_ref_def vs_cap_ref_def split: cap.splits arch_cap.splits option.splits)
done
lemma valid_global_ptsD2:
"\<lbrakk>r \<in> set (arm_global_pts (arch_state s)); valid_global_pts s\<rbrakk>
\<Longrightarrow> \<exists>pt. ko_at (ArchObj (PageTable pt)) r s"
apply (clarsimp simp: valid_global_pts_def)
apply (drule (1) bspec)
apply (clarsimp simp: obj_at_def)
done
lemma create_mapping_entries_same_refs:
"\<lbrace>valid_arch_state and valid_vspace_objs and valid_vs_lookup and (\<lambda>s. unique_table_refs (caps_of_state s))
and pspace_aligned and valid_objs and valid_kernel_mappings and \<exists>\<rhd> pd and
(\<lambda>s. \<exists>pd_cap pd_cptr. cte_wp_at ((=) pd_cap) pd_cptr s
\<and> pd_cap = cap.ArchObjectCap (arch_cap.PageDirectoryCap pd (Some asid))) and
page_directory_at pd and K (vaddr < kernel_base \<and> (cap = (cap.ArchObjectCap (arch_cap.PageCap dev p rights' pgsz (Some (asid, vaddr))))))\<rbrace>
create_mapping_entries (addrFromPPtr p) vaddr pgsz rights attribs pd
\<lbrace>\<lambda>rv s. same_refs rv cap s\<rbrace>,-"
apply (rule hoare_gen_asmE)
apply (cases pgsz, simp_all add: lookup_pt_slot_def
largePagePTE_offsets_def superSectionPDE_offsets_def)
apply (wp get_pde_wp | wpc)+
apply (clarsimp simp: lookup_pd_slot_def)
apply (frule (1) pd_aligned)
apply (simp add:pd_shifting vaddr_segment_nonsense2 pt_bits_def)
apply (frule (2) valid_vspace_objsD[rotated], simp)
apply (drule bspec, simp, erule kernel_base_kernel_mapping_slots)
apply (simp, drule (1) pt_aligned)
apply (clarsimp simp: same_refs_def vs_cap_ref_def split: option.splits)
apply (simp add: vaddr_segment_nonsense4 shiftl_shiftr_id
less_trans[OF and_mask_less'[where n=8, unfolded mask_def, simplified]]
word_bits_def
vaddr_segment_nonsense3)
apply (rule conjI, simp add: mask_def)
apply (clarsimp simp: cte_wp_at_caps_of_state
mask_cap_def cap_rights_update_def)
apply (frule (1) vs_lookup_and_unique_refs)
apply (simp_all add: table_cap_ref_def obj_refs_def)[4]
apply (frule_tac p=pd and p'="ptrFromPAddr x" in vs_lookup_step)
apply (clarsimp simp: vs_lookup1_def)
apply (rule exI, erule conjI)
apply (rule exI[where x="VSRef (vaddr >> 20) (Some APageDirectory)"])
apply (rule conjI, rule refl)
apply (simp add: vs_refs_def)
apply (rule_tac x="(ucast (vaddr >> 20), ptrFromPAddr x)" in image_eqI)
apply (simp add: ucast_ucast_len[OF shiftr_less_t2n'] mask_32_max_word graph_of_def)
apply (clarsimp simp:graph_of_def)
apply (frule kernel_base_kernel_mapping_slots, simp add: pde_ref_def)
apply simp
apply (drule (1) ref_is_unique)
apply (rule not_kernel_slot_not_global_pt[simplified second_level_tables_def, rotated])
apply (erule kernel_base_kernel_mapping_slots)
apply (simp add: obj_at_def)
apply (simp_all add: pde_ref_def valid_arch_state_def valid_objs_caps)[8]
apply (wp get_pde_wp | wpc)+
apply (clarsimp simp: lookup_pd_slot_def)
apply (frule (1) pd_aligned)
apply (simp add:pd_shifting vaddr_segment_nonsense2)
apply (frule (2) valid_vspace_objsD[rotated], simp)
apply (drule bspec, simp, erule kernel_base_kernel_mapping_slots)
apply (simp, drule (1) pt_aligned)
apply (clarsimp simp: same_refs_def vs_cap_ref_def upto_enum_step_def upto_enum_word upt_conv_Cons)
apply (simp add: vaddr_segment_nonsense4 shiftl_shiftr_id
less_trans[OF and_mask_less'[where n=8, unfolded mask_def, simplified]]
word_bits_def
vaddr_segment_nonsense3)
apply (rule conjI, simp add: mask_def)
apply (clarsimp simp: cte_wp_at_caps_of_state
mask_cap_def cap_rights_update_def)
apply (frule (1) vs_lookup_and_unique_refs)
apply (simp_all add: table_cap_ref_def obj_refs_def)[4]
apply (frule_tac p=pd and p'="ptrFromPAddr x" in vs_lookup_step)
apply (clarsimp simp: vs_lookup1_def)
apply (rule exI, erule conjI)
apply (rule exI[where x="VSRef (vaddr >> 20) (Some APageDirectory)"])
apply (rule conjI, rule refl)
apply (simp add: vs_refs_def)
apply (rule_tac x="(ucast (vaddr >> 20), ptrFromPAddr x)" in image_eqI)
apply (simp add: ucast_ucast_len[OF shiftr_less_t2n'] graph_of_def)
apply (clarsimp simp: graph_of_def)
apply (frule kernel_base_kernel_mapping_slots, simp add: pde_ref_def)
apply simp
apply (drule (1) ref_is_unique)
apply (rule not_kernel_slot_not_global_pt[simplified second_level_tables_def, rotated])
apply (erule kernel_base_kernel_mapping_slots)
apply (simp add: obj_at_def)
apply (simp_all add: pde_ref_def valid_arch_state_def valid_objs_caps)[8]
apply (wp get_pde_wp returnOKE_R_wp | wpc)+
apply (clarsimp simp: lookup_pd_slot_def)
apply (frule (1) pd_aligned)
apply (clarsimp simp: same_refs_def vs_cap_ref_def pde_ref_pages_def)
apply (simp add: vaddr_segment_nonsense vaddr_segment_nonsense2)
apply (clarsimp simp: cte_wp_at_caps_of_state
mask_cap_def cap_rights_update_def)
apply (frule (1) vs_lookup_and_unique_refs)
apply (simp_all add: table_cap_ref_def obj_refs_def)[4]
apply (drule (1) ref_is_unique)
apply (clarsimp simp: valid_arch_state_def obj_at_def dest!: valid_global_ptsD2)
apply (simp_all add: valid_arch_state_def valid_objs_caps)[6]
apply (wp returnOKE_R_wp | wpc)+
apply (clarsimp simp: lookup_pd_slot_def)
apply (frule (1) pd_aligned)
apply (clarsimp simp: same_refs_def vs_cap_ref_def pde_ref_pages_def upto_enum_step_def upto_enum_word upt_conv_Cons)
apply (simp add: vaddr_segment_nonsense vaddr_segment_nonsense2)
apply (clarsimp simp: cte_wp_at_caps_of_state
mask_cap_def cap_rights_update_def)
apply (frule (1) vs_lookup_and_unique_refs)
apply (simp_all add: table_cap_ref_def obj_refs_def)[4]
apply (drule (1) ref_is_unique)
apply (clarsimp dest!: valid_global_ptsD2 simp: obj_at_def a_type_def valid_arch_state_def)
apply (simp_all add: valid_arch_state_def valid_objs_caps)
done
lemma create_mapping_entries_same_refs_ex:
"\<lbrace>valid_arch_state and valid_vspace_objs and valid_vs_lookup and (\<lambda>s. unique_table_refs (caps_of_state s))
and pspace_aligned and valid_objs and valid_kernel_mappings and \<exists>\<rhd> pd and
(\<lambda>s. \<exists>dev pd_cap pd_cptr asid rights'. cte_wp_at ((=) pd_cap) pd_cptr s
\<and> pd_cap = cap.ArchObjectCap (arch_cap.PageDirectoryCap pd (Some asid))
\<and> page_directory_at pd s \<and> vaddr < kernel_base \<and> (cap = (cap.ArchObjectCap (arch_cap.PageCap dev p rights' pgsz (Some (asid, vaddr))))))\<rbrace>
create_mapping_entries (addrFromPPtr p) vaddr pgsz rights attribs pd
\<lbrace>\<lambda>rv s. same_refs rv cap s\<rbrace>,-"
apply (clarsimp simp: validE_R_def validE_def valid_def split: sum.split)
apply (erule use_validE_R[OF _ create_mapping_entries_same_refs])
apply fastforce
done
lemma cte_wp_at_page_cap_weaken:
"cte_wp_at ((=) (ArchObjectCap (PageCap dev word seta vmpage_size None))) slot s \<Longrightarrow>
cte_wp_at (\<lambda>a. \<exists>dev p R sz m. a = ArchObjectCap (PageCap dev p R sz m)) slot s"
by (clarsimp simp: cte_wp_at_def mask_cap_def cap_rights_update_def)
lemma find_pd_for_asid_lookup_pd_wp:
"\<lbrace> \<lambda>s. valid_vspace_objs s \<and> (\<forall>pd. vspace_at_asid asid pd s \<and> page_directory_at pd s
\<and> (\<exists>\<rhd> pd) s \<longrightarrow> Q pd s) \<rbrace> find_pd_for_asid asid \<lbrace> Q \<rbrace>, -"
apply (rule hoare_post_imp_R)
apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_page_directory])
apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_lookup, simplified])
apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_pd_at_asid, simplified])
apply (wp (once) find_pd_for_asid_inv)
apply auto
done
lemma aligned_sum_less_kernel_base:
"vmsz_aligned p sz
\<Longrightarrow> (p + 2 ^ pageBitsForSize sz - 1 < kernel_base) = (p < kernel_base)"
apply (rule iffI)
apply (rule le_less_trans)
apply (rule is_aligned_no_overflow)
apply (simp add: vmsz_aligned_def)
apply simp
apply (simp add:field_simps[symmetric])
apply (erule gap_between_aligned)
apply (simp add: vmsz_aligned_def)+
apply (case_tac sz,simp_all add:kernel_base_def is_aligned_def)+
done
lemma arch_decode_inv_wf[wp]:
"\<lbrace>invs and valid_cap (cap.ArchObjectCap arch_cap) and
cte_wp_at ((=) (cap.ArchObjectCap arch_cap)) slot and
(\<lambda>s. \<forall>x \<in> set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\<rbrace>
arch_decode_invocation label args cap_index slot arch_cap excaps
\<lbrace>valid_arch_inv\<rbrace>,-"
apply (cases arch_cap)
apply (rename_tac word1 word2)
apply (simp add: arch_decode_invocation_def Let_def split_def cong: if_cong split del: if_split)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp|
wpc|
simp add: valid_arch_inv_def valid_apinv_def)+)[1]
apply (simp add: valid_arch_inv_def valid_apinv_def)
apply (intro allI impI ballI)
apply (elim conjE exE)
apply simp
apply (clarsimp simp: dom_def neq_Nil_conv)
apply (thin_tac "Ball S P" for S P)+
apply (clarsimp simp: valid_cap_def)
apply (rule conjI)
apply (clarsimp simp: obj_at_def)
apply (subgoal_tac "ucast (ucast xa + word2) = xa")
apply simp
apply (simp add: is_aligned_nth)
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_ucast)
apply (drule test_bit_size)
apply (simp add: word_size asid_low_bits_def)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_ucast)
apply (auto simp: asid_low_bits_def)[1]
apply (rule conjI)
apply (clarsimp simp add: cte_wp_at_caps_of_state)
apply (rename_tac c c')
apply (frule_tac cap="(ArchObjectCap (PageDirectoryCap xb None))" in caps_of_state_valid,
assumption)
apply (clarsimp simp: is_pd_cap_def cap_rights_update_def
acap_rights_update_def)
apply (clarsimp simp: word_neq_0_conv)
apply (rule conjI)
apply (subst field_simps, erule is_aligned_add_less_t2n)
apply (simp add: asid_low_bits_def)
apply (rule ucast_less[where 'b=10, simplified], simp)
apply (simp add: asid_low_bits_def asid_bits_def)
apply (simp add: asid_bits_def)
apply (drule vs_lookup_atI)
apply (subst asid_high_bits_of_add_ucast, assumption)
apply assumption
apply (simp add: arch_decode_invocation_def Let_def split_def
cong: if_cong split del: if_split)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger|
wpc|
simp add: valid_arch_inv_def valid_aci_def is_aligned_shiftl_self)+)[1]
apply (rule_tac Q'=
"\<lambda>rv. real_cte_at rv and
ex_cte_cap_wp_to is_cnode_cap rv and
(\<lambda>s. descendants_of (snd (excaps!0)) (cdt s) = {}) and
cte_wp_at (\<lambda>c. \<exists>idx. c = (cap.UntypedCap False frame pageBits idx)) (snd (excaps!0)) and
(\<lambda>s. arm_asid_table (arch_state s) free = None)"
in hoare_post_imp_R)
apply (simp add: lookup_target_slot_def)
apply wp
apply (clarsimp simp: cte_wp_at_def)
apply (rule conjI, clarsimp)
apply (rule shiftl_less_t2n)
apply (rule order_less_le_trans, rule ucast_less, simp)
apply (simp add: asid_bits_def asid_low_bits_def)
apply (simp add: asid_bits_def)
apply (simp split del: if_split)
apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp | wpc | simp add: K_bind_def)+
apply clarsimp
apply (rule conjI, fastforce)
apply (cases excaps, simp)
apply (case_tac list, simp)
apply clarsimp
apply (rule conjI)
apply (drule cte_wp_at_norm, clarsimp, drule cte_wp_valid_cap, fastforce)+
apply assumption
apply (rule conjI)
apply clarsimp
apply (simp add: ex_cte_cap_wp_to_def)
apply (rule_tac x=ac in exI)
apply (rule_tac x=ba in exI)
apply (clarsimp simp add: cte_wp_at_caps_of_state)+
\<comment> \<open>PageCap\<close>
apply (simp add: arch_decode_invocation_def Let_def split_def cong: if_cong split del: if_split)
supply if_split[split del]
apply (cases "invocation_type label = ArchInvocationLabel ARMPageMap")
apply (rename_tac dev word rights vmpage_size option)
apply (rule hoare_pre)
apply (wp whenE_throwError_wp check_vp_wpR hoare_vcg_const_imp_lift_R
create_mapping_entries_parent_for_refs find_pd_for_asid_pd_at_asid
create_mapping_entries_valid_slots create_mapping_entries_same_refs_ex
find_pd_for_asid_lookup_pd_wp hoare_vcg_disj_lift_R
hoare_vcg_ex_lift_R
(* these two lift rules must go last, list has overlapping rules *)
| wpc
| simp add: valid_arch_inv_def valid_page_inv_def is_pg_cap_def
cte_wp_at_caps_of_state[where P="\<lambda>c. same_refs rv c s" for rv s])+
apply (clarsimp simp: neq_Nil_conv invs_vspace_objs)
apply (frule(1) caps_of_state_valid)
apply (clarsimp simp: cte_wp_at_caps_of_state mask_cap_def)
apply (rule conjI[rotated]; clarsimp split: if_splits simp: invs_vspace_objs)
apply (auto, auto simp: cte_wp_at_caps_of_state invs_def valid_state_def
valid_cap_simps is_arch_update_def
is_arch_cap_def cap_master_cap_simps
vmsz_aligned_def vs_cap_ref_def
cap_aligned_def data_at_def
le_mask_iff_lt_2n[where 'a=32, folded word_bits_def, THEN iffD1]
ord_eq_le_trans[OF pd_bits_14]
linorder_not_le aligned_sum_less_kernel_base
elim: is_aligned_weaken split: vmpage_size.split
split: if_splits
intro!: is_aligned_addrFromPPtr is_aligned_addrFromPPtr_n
pbfs_atleast_pageBits)[2]
apply (cases "invocation_type label = ArchInvocationLabel ARMPageUnmap")
apply simp
apply (rule hoare_pre, wp)
apply (clarsimp simp: valid_arch_inv_def valid_page_inv_def)
apply (thin_tac "Ball S P" for S P)
apply (clarsimp split: option.split)
apply (clarsimp simp: valid_cap_def cap_aligned_def)
apply (simp add: valid_unmap_def)
apply (fastforce simp: vmsz_aligned_def elim: is_aligned_weaken intro!: pbfs_atleast_pageBits)
apply (cases "isPageFlushLabel (invocation_type label)")
apply (rule hoare_pre)
apply simp
apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imps)
apply (simp add: valid_arch_inv_def valid_page_inv_def)
apply (wp find_pd_for_asid_pd_at_asid | wpc)+
apply (clarsimp simp: valid_cap_def mask_def)
apply (cases "invocation_type label = ArchInvocationLabel ARMPageGetAddress")
apply simp
apply (rule hoare_pre, wp)
apply (clarsimp simp: valid_arch_inv_def valid_page_inv_def)
apply simp
apply (rule hoare_pre, wp)
apply simp
apply (simp add: arch_decode_invocation_def Let_def split_def
is_final_cap_def
cong: if_cong)
apply (rename_tac word option)
apply (rule hoare_pre)
apply ((wp whenE_throwError_wp check_vp_wpR get_master_pde_wp hoare_vcg_all_lift_R
| wpc
| simp add: valid_arch_inv_def valid_pti_def unlessE_whenE vs_cap_ref_def split: if_splits
| rule_tac x="fst p" in hoare_imp_eq_substR
| wp (once) hoare_vcg_ex_lift_R)+)[1]
apply (rule_tac Q'="\<lambda>a b. ko_at (ArchObj (PageDirectory pd))
(a + (args ! 0 >> 20 << 2) && ~~ mask pd_bits) b \<longrightarrow>
pd (ucast (a + (args ! 0 >> 20 << 2) && mask pd_bits >> 2)) =
InvalidPDE \<longrightarrow> L word option p pd a b" for L in hoare_post_imp_R[rotated])
apply (intro impI)
apply (erule impE)
apply clarsimp
apply (erule impE)
apply (clarsimp split: pde.splits)
apply assumption
apply ((wp whenE_throwError_wp hoare_vcg_all_lift_R
find_pd_for_asid_lookup_slot [unfolded lookup_pd_slot_def Let_def]
find_pd_for_asid_ref_offset_voodoo find_pd_for_asid_shifting_voodoo
find_pd_for_asid_inv
| wpc
| simp add: valid_arch_inv_def valid_pti_def unlessE_whenE empty_pde_atI
vs_cap_ref_def if_apply_def2
| wp (once) hoare_drop_imps hoare_vcg_ex_lift_R)+)[6]
apply (clarsimp simp: is_cap_simps if_apply_def2)
apply (rule conjI)
apply clarsimp
apply (rule conjI, fastforce)
apply (rule conjI, fastforce)
apply (clarsimp simp: neq_Nil_conv)
apply (thin_tac "Ball S P" for S P)
apply (rule conjI)
apply (clarsimp simp: valid_cap_def cap_aligned_def
pt_bits_def pageBits_def is_aligned_addrFromPPtr_n)
apply (rule conjI)
apply (clarsimp simp: valid_cap_def cap_aligned_def)
apply (rule conjI)
apply (drule cte_wp_at_norm, clarsimp, drule cte_wp_valid_cap, fastforce)+
apply (clarsimp simp add: cap_rights_update_def acap_rights_update_def)
apply (clarsimp simp: valid_cap_def cap_aligned_def pt_bits_def pageBits_def linorder_not_le
order_le_less_trans[OF word_and_le2])
apply (rule conjI)
apply (clarsimp simp add: cte_wp_at_caps_of_state)
apply (drule (1) caps_of_state_valid[rotated])
apply clarsimp
apply (clarsimp simp: cap_master_cap_def is_arch_update_def)
apply (clarsimp simp: cap_asid_def cap_rights_update_def acap_rights_update_def is_cap_simps
split: option.split)
apply (rule conjI, fastforce)
apply (rule conjI, fastforce)
apply (clarsimp simp: pde_ref_def)
apply (frule invs_pd_caps)
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (frule (1) caps_of_state_valid[rotated])
apply (clarsimp simp: cap_rights_update_def acap_rights_update_def valid_cap_def)
apply (drule (2) valid_table_caps_ptD)
apply (rule conjI, fastforce simp:)+
apply (clarsimp simp: kernel_vsrefs_def)
apply (simp add: linorder_not_le, drule word_le_minus_one_leq)
apply (drule le_shiftr[where n=20], drule(1) order_trans)
apply (simp add: kernel_base_def)
apply (simp add: valid_arch_inv_def valid_pti_def)
apply (clarsimp simp: cte_wp_at_def is_cap_simps)
apply (simp add: arch_decode_invocation_def Let_def)
apply (cases "isPDFlushLabel (invocation_type label)")
apply simp
apply (rule hoare_pre)
apply (wpsimp wp: whenE_throwError_wp static_imp_wp hoare_drop_imp get_master_pte_wp
get_master_pde_wp whenE_throwError_wp
simp: resolve_vaddr_def valid_arch_inv_def valid_pdi_def)
apply (rule_tac Q'="\<lambda>pd' s. vspace_at_asid x2 pd' s \<and> x2 \<le> mask asid_bits \<and> x2 \<noteq> 0"
in hoare_post_imp_R)
apply wpsimp+
apply (wpsimp wp: throwError_validE_R simp: valid_cap_def mask_def)+
done
declare word_less_sub_le [simp]
crunch pred_tcb_at: perform_page_table_invocation, perform_page_invocation,
perform_asid_pool_invocation, perform_page_directory_invocation "pred_tcb_at proj P t"
(wp: crunch_wps simp: crunch_simps)
lemma arch_pinv_st_tcb_at:
"\<lbrace>invs and valid_arch_inv ai and ct_active and
st_tcb_at (P and (Not \<circ> inactive) and (Not \<circ> idle)) t\<rbrace>
arch_perform_invocation ai
\<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
apply (cases ai, simp_all add: arch_perform_invocation_def valid_arch_inv_def)
apply (wp perform_page_table_invocation_pred_tcb_at,
fastforce elim!: pred_tcb_weakenE)
apply (wp perform_page_directory_invocation_pred_tcb_at, fastforce elim: pred_tcb_weakenE)
apply (wp perform_page_invocation_pred_tcb_at, fastforce elim!: pred_tcb_weakenE)
apply (wp perform_asid_control_invocation_st_tcb_at,
fastforce elim!: pred_tcb_weakenE)
apply (wp perform_asid_pool_invocation_pred_tcb_at,
fastforce elim!: pred_tcb_weakenE)
done
end
context begin interpretation Arch .
requalify_consts
valid_arch_inv
requalify_facts
invoke_arch_tcb
invoke_arch_invs
sts_valid_arch_inv
arch_decode_inv_wf
arch_pinv_st_tcb_at
end
declare invoke_arch_invs[wp]
declare arch_decode_inv_wf[wp]
end
|
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_2, u_1} C
inst✝³ : Preadditive C
inst✝² : HasKernels C
X Y : C
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ⟶ Y
I : IsIso f
⊢ f ≠ 0
[PROOFSTEP]
intro h
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_2, u_1} C
inst✝³ : Preadditive C
inst✝² : HasKernels C
X Y : C
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ⟶ Y
I : IsIso f
h : f = 0
⊢ False
[PROOFSTEP]
apply id_nonzero X
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_2, u_1} C
inst✝³ : Preadditive C
inst✝² : HasKernels C
X Y : C
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ⟶ Y
I : IsIso f
h : f = 0
⊢ 𝟙 X = 0
[PROOFSTEP]
simp only [← IsIso.hom_inv_id f, h, zero_comp]
[GOAL]
C : Type u_1
inst✝³ : Category.{?u.3173, u_1} C
inst✝² : Preadditive C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
⊢ DivisionRing (End X)
[PROOFSTEP]
classical exact
{
(inferInstance :
Ring (End X)) with
inv := fun f =>
if h : f = 0 then 0
else
haveI := isIso_of_hom_simple h
inv f
exists_pair_ne := ⟨𝟙 X, 0, id_nonzero _⟩
inv_zero := dif_pos rfl
mul_inv_cancel := fun f h => by
dsimp
rw [dif_neg h]
haveI := isIso_of_hom_simple h
exact IsIso.inv_hom_id f }
[GOAL]
C : Type u_1
inst✝³ : Category.{?u.3173, u_1} C
inst✝² : Preadditive C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
⊢ DivisionRing (End X)
[PROOFSTEP]
exact
{
(inferInstance :
Ring (End X)) with
inv := fun f =>
if h : f = 0 then 0
else
haveI := isIso_of_hom_simple h
inv f
exists_pair_ne := ⟨𝟙 X, 0, id_nonzero _⟩
inv_zero := dif_pos rfl
mul_inv_cancel := fun f h => by
dsimp
rw [dif_neg h]
haveI := isIso_of_hom_simple h
exact IsIso.inv_hom_id f }
[GOAL]
C : Type u_1
inst✝³ : Category.{?u.4991, u_1} C
inst✝² : Preadditive C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
src✝ : Ring (End X) := inferInstance
f : End X
h : f ≠ 0
⊢ f * f⁻¹ = 1
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝³ : Category.{?u.4991, u_1} C
inst✝² : Preadditive C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
src✝ : Ring (End X) := inferInstance
f : End X
h : f ≠ 0
⊢ (if h : f = 0 then 0 else inv f) ≫ f = 𝟙 X
[PROOFSTEP]
rw [dif_neg h]
[GOAL]
C : Type u_1
inst✝³ : Category.{?u.4991, u_1} C
inst✝² : Preadditive C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
src✝ : Ring (End X) := inferInstance
f : End X
h : f ≠ 0
⊢ inv f ≫ f = 𝟙 X
[PROOFSTEP]
haveI := isIso_of_hom_simple h
[GOAL]
C : Type u_1
inst✝³ : Category.{?u.4991, u_1} C
inst✝² : Preadditive C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
src✝ : Ring (End X) := inferInstance
f : End X
h : f ≠ 0
this : IsIso f
⊢ inv f ≫ f = 𝟙 X
[PROOFSTEP]
exact IsIso.inv_hom_id f
[GOAL]
C : Type u_1
inst✝⁶ : Category.{u_3, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : DivisionRing 𝕜
inst✝³ : HasKernels C
inst✝² : Linear 𝕜 C
X Y : C
inst✝¹ : Simple X
inst✝ : Simple Y
h : (X ≅ Y) → False
f : X ⟶ Y
⊢ f = 0
[PROOFSTEP]
have p := not_congr (isIso_iff_nonzero f)
[GOAL]
C : Type u_1
inst✝⁶ : Category.{u_3, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : DivisionRing 𝕜
inst✝³ : HasKernels C
inst✝² : Linear 𝕜 C
X Y : C
inst✝¹ : Simple X
inst✝ : Simple Y
h : (X ≅ Y) → False
f : X ⟶ Y
p : ¬IsIso f ↔ ¬f ≠ 0
⊢ f = 0
[PROOFSTEP]
simp only [Classical.not_not, Ne.def] at p
[GOAL]
C : Type u_1
inst✝⁶ : Category.{u_3, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : DivisionRing 𝕜
inst✝³ : HasKernels C
inst✝² : Linear 𝕜 C
X Y : C
inst✝¹ : Simple X
inst✝ : Simple Y
h : (X ≅ Y) → False
f : X ⟶ Y
p : ¬IsIso f ↔ f = 0
⊢ f = 0
[PROOFSTEP]
refine' p.mp fun _ => h (asIso f)
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
⊢ finrank 𝕜 (X ⟶ X) = 1
[PROOFSTEP]
have id_nonzero := (isIso_iff_nonzero (𝟙 X)).mp (by infer_instance)
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
⊢ IsIso (𝟙 X)
[PROOFSTEP]
infer_instance
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
⊢ finrank 𝕜 (X ⟶ X) = 1
[PROOFSTEP]
refine' finrank_eq_one (𝟙 X) id_nonzero _
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
⊢ ∀ (w : X ⟶ X), ∃ c, c • 𝟙 X = w
[PROOFSTEP]
intro f
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
f : X ⟶ X
⊢ ∃ c, c • 𝟙 X = f
[PROOFSTEP]
have : Nontrivial (End X) := nontrivial_of_ne _ _ id_nonzero
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
f : X ⟶ X
this : Nontrivial (End X)
⊢ ∃ c, c • 𝟙 X = f
[PROOFSTEP]
have : FiniteDimensional 𝕜 (End X) := I
[GOAL]
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
f : X ⟶ X
this✝ : Nontrivial (End X)
this : FiniteDimensional 𝕜 (End X)
⊢ ∃ c, c • 𝟙 X = f
[PROOFSTEP]
obtain ⟨c, nu⟩ := spectrum.nonempty_of_isAlgClosed_of_finiteDimensional 𝕜 (End.of f)
[GOAL]
case intro
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
f : X ⟶ X
this✝ : Nontrivial (End X)
this : FiniteDimensional 𝕜 (End X)
c : 𝕜
nu : c ∈ spectrum 𝕜 (End.of f)
⊢ ∃ c, c • 𝟙 X = f
[PROOFSTEP]
use c
[GOAL]
case h
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
f : X ⟶ X
this✝ : Nontrivial (End X)
this : FiniteDimensional 𝕜 (End X)
c : 𝕜
nu : c ∈ spectrum 𝕜 (End.of f)
⊢ c • 𝟙 X = f
[PROOFSTEP]
rw [spectrum.mem_iff, IsUnit.sub_iff, isUnit_iff_isIso, isIso_iff_nonzero, Ne.def, Classical.not_not, sub_eq_zero,
Algebra.algebraMap_eq_smul_one] at nu
[GOAL]
case h
C : Type u_1
inst✝⁴ : Category.{u_3, u_1} C
inst✝³ : Preadditive C
𝕜 : Type u_2
inst✝² : Field 𝕜
inst✝¹ : IsAlgClosed 𝕜
inst✝ : Linear 𝕜 C
X : C
isIso_iff_nonzero : ∀ (f : X ⟶ X), IsIso f ↔ f ≠ 0
I : FiniteDimensional 𝕜 (X ⟶ X)
id_nonzero : 𝟙 X ≠ 0
f : X ⟶ X
this✝ : Nontrivial (End X)
this : FiniteDimensional 𝕜 (End X)
c : 𝕜
nu : End.of f = c • 1
⊢ c • 𝟙 X = f
[PROOFSTEP]
exact nu.symm
[GOAL]
C : Type u_1
inst✝⁶ : Category.{?u.16907, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : Field 𝕜
inst✝³ : IsAlgClosed 𝕜
inst✝² : Linear 𝕜 C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
I : FiniteDimensional 𝕜 (X ⟶ X)
⊢ Field (End X)
[PROOFSTEP]
classical exact
{ (inferInstance : DivisionRing (End X)) with
mul_comm := fun f g => by
obtain ⟨c, rfl⟩ := endomorphism_simple_eq_smul_id 𝕜 f
obtain ⟨d, rfl⟩ := endomorphism_simple_eq_smul_id 𝕜 g
simp [← mul_smul, mul_comm c d] }
[GOAL]
C : Type u_1
inst✝⁶ : Category.{?u.16907, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : Field 𝕜
inst✝³ : IsAlgClosed 𝕜
inst✝² : Linear 𝕜 C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
I : FiniteDimensional 𝕜 (X ⟶ X)
⊢ Field (End X)
[PROOFSTEP]
exact
{ (inferInstance : DivisionRing (End X)) with
mul_comm := fun f g => by
obtain ⟨c, rfl⟩ := endomorphism_simple_eq_smul_id 𝕜 f
obtain ⟨d, rfl⟩ := endomorphism_simple_eq_smul_id 𝕜 g
simp [← mul_smul, mul_comm c d] }
[GOAL]
C : Type u_1
inst✝⁶ : Category.{?u.16907, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : Field 𝕜
inst✝³ : IsAlgClosed 𝕜
inst✝² : Linear 𝕜 C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
I : FiniteDimensional 𝕜 (X ⟶ X)
src✝ : DivisionRing (End X) := inferInstance
f g : End X
⊢ f * g = g * f
[PROOFSTEP]
obtain ⟨c, rfl⟩ := endomorphism_simple_eq_smul_id 𝕜 f
[GOAL]
case intro
C : Type u_1
inst✝⁶ : Category.{?u.16907, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : Field 𝕜
inst✝³ : IsAlgClosed 𝕜
inst✝² : Linear 𝕜 C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
I : FiniteDimensional 𝕜 (X ⟶ X)
src✝ : DivisionRing (End X) := inferInstance
g : End X
c : 𝕜
⊢ c • 𝟙 X * g = g * c • 𝟙 X
[PROOFSTEP]
obtain ⟨d, rfl⟩ := endomorphism_simple_eq_smul_id 𝕜 g
[GOAL]
case intro.intro
C : Type u_1
inst✝⁶ : Category.{?u.16907, u_1} C
inst✝⁵ : Preadditive C
𝕜 : Type u_2
inst✝⁴ : Field 𝕜
inst✝³ : IsAlgClosed 𝕜
inst✝² : Linear 𝕜 C
inst✝¹ : HasKernels C
X : C
inst✝ : Simple X
I : FiniteDimensional 𝕜 (X ⟶ X)
src✝ : DivisionRing (End X) := inferInstance
c d : 𝕜
⊢ c • 𝟙 X * d • 𝟙 X = d • 𝟙 X * c • 𝟙 X
[PROOFSTEP]
simp [← mul_smul, mul_comm c d]
[GOAL]
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ finrank 𝕜 (X ⟶ Y) ≤ 1
[PROOFSTEP]
obtain (h | h) := subsingleton_or_nontrivial (X ⟶ Y)
[GOAL]
case inl
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Subsingleton (X ⟶ Y)
⊢ finrank 𝕜 (X ⟶ Y) ≤ 1
[PROOFSTEP]
rw [finrank_zero_of_subsingleton]
[GOAL]
case inl
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Subsingleton (X ⟶ Y)
⊢ 0 ≤ 1
[PROOFSTEP]
exact zero_le_one
[GOAL]
case inr
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
⊢ finrank 𝕜 (X ⟶ Y) ≤ 1
[PROOFSTEP]
obtain ⟨f, nz⟩ := (nontrivial_iff_exists_ne 0).mp h
[GOAL]
case inr.intro
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
f : X ⟶ Y
nz : f ≠ 0
⊢ finrank 𝕜 (X ⟶ Y) ≤ 1
[PROOFSTEP]
haveI fi := (isIso_iff_nonzero f).mpr nz
[GOAL]
case inr.intro
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
f : X ⟶ Y
nz : f ≠ 0
fi : IsIso f
⊢ finrank 𝕜 (X ⟶ Y) ≤ 1
[PROOFSTEP]
refine' finrank_le_one f _
[GOAL]
case inr.intro
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
f : X ⟶ Y
nz : f ≠ 0
fi : IsIso f
⊢ ∀ (w : X ⟶ Y), ∃ c, c • f = w
[PROOFSTEP]
intro g
[GOAL]
case inr.intro
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
f : X ⟶ Y
nz : f ≠ 0
fi : IsIso f
g : X ⟶ Y
⊢ ∃ c, c • f = g
[PROOFSTEP]
obtain ⟨c, w⟩ := endomorphism_simple_eq_smul_id 𝕜 (g ≫ inv f)
[GOAL]
case inr.intro.intro
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
f : X ⟶ Y
nz : f ≠ 0
fi : IsIso f
g : X ⟶ Y
c : 𝕜
w : c • 𝟙 X = g ≫ inv f
⊢ ∃ c, c • f = g
[PROOFSTEP]
exact ⟨c, by simpa using w =≫ f⟩
[GOAL]
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : FiniteDimensional 𝕜 (X ⟶ X)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nontrivial (X ⟶ Y)
f : X ⟶ Y
nz : f ≠ 0
fi : IsIso f
g : X ⟶ Y
c : 𝕜
w : c • 𝟙 X = g ≫ inv f
⊢ c • f = g
[PROOFSTEP]
simpa using w =≫ f
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ finrank 𝕜 (X ⟶ Y) = 1 ↔ Nonempty (X ≅ Y)
[PROOFSTEP]
fconstructor
[GOAL]
case mp
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ finrank 𝕜 (X ⟶ Y) = 1 → Nonempty (X ≅ Y)
[PROOFSTEP]
intro h
[GOAL]
case mp
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : finrank 𝕜 (X ⟶ Y) = 1
⊢ Nonempty (X ≅ Y)
[PROOFSTEP]
rw [finrank_eq_one_iff'] at h
[GOAL]
case mp
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ∃ v _n, ∀ (w : X ⟶ Y), ∃ c, c • v = w
⊢ Nonempty (X ≅ Y)
[PROOFSTEP]
obtain ⟨f, nz, -⟩ := h
[GOAL]
case mp.intro.intro
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ⟶ Y
nz : f ≠ 0
⊢ Nonempty (X ≅ Y)
[PROOFSTEP]
rw [← isIso_iff_nonzero] at nz
[GOAL]
case mp.intro.intro
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ⟶ Y
nz : IsIso f
⊢ Nonempty (X ≅ Y)
[PROOFSTEP]
exact ⟨asIso f⟩
[GOAL]
case mpr
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ Nonempty (X ≅ Y) → finrank 𝕜 (X ⟶ Y) = 1
[PROOFSTEP]
rintro ⟨f⟩
[GOAL]
case mpr.intro
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ≅ Y
⊢ finrank 𝕜 (X ⟶ Y) = 1
[PROOFSTEP]
have le_one := finrank_hom_simple_simple_le_one 𝕜 X Y
[GOAL]
case mpr.intro
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ≅ Y
le_one : finrank 𝕜 (X ⟶ Y) ≤ 1
⊢ finrank 𝕜 (X ⟶ Y) = 1
[PROOFSTEP]
have zero_lt : 0 < finrank 𝕜 (X ⟶ Y) :=
finrank_pos_iff_exists_ne_zero.mpr ⟨f.hom, (isIso_iff_nonzero f.hom).mp inferInstance⟩
[GOAL]
case mpr.intro
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
f : X ≅ Y
le_one : finrank 𝕜 (X ⟶ Y) ≤ 1
zero_lt : 0 < finrank 𝕜 (X ⟶ Y)
⊢ finrank 𝕜 (X ⟶ Y) = 1
[PROOFSTEP]
linarith
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ finrank 𝕜 (X ⟶ Y) = 0 ↔ IsEmpty (X ≅ Y)
[PROOFSTEP]
rw [← not_nonempty_iff, ← not_congr (finrank_hom_simple_simple_eq_one_iff 𝕜 X Y)]
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ finrank 𝕜 (X ⟶ Y) = 0 ↔ ¬finrank 𝕜 (X ⟶ Y) = 1
[PROOFSTEP]
refine' ⟨fun h => by rw [h]; simp, fun h => _⟩
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : finrank 𝕜 (X ⟶ Y) = 0
⊢ ¬finrank 𝕜 (X ⟶ Y) = 1
[PROOFSTEP]
rw [h]
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : finrank 𝕜 (X ⟶ Y) = 0
⊢ ¬0 = 1
[PROOFSTEP]
simp
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ¬finrank 𝕜 (X ⟶ Y) = 1
⊢ finrank 𝕜 (X ⟶ Y) = 0
[PROOFSTEP]
have := finrank_hom_simple_simple_le_one 𝕜 X Y
[GOAL]
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ¬finrank 𝕜 (X ⟶ Y) = 1
this : finrank 𝕜 (X ⟶ Y) ≤ 1
⊢ finrank 𝕜 (X ⟶ Y) = 0
[PROOFSTEP]
interval_cases finrank 𝕜 (X ⟶ Y)
[GOAL]
case «0»
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ¬0 = 1
this : 0 ≤ 1
⊢ 0 = 0
[PROOFSTEP]
rfl
[GOAL]
case «1»
C : Type u_1
inst✝⁹ : Category.{u_3, u_1} C
inst✝⁸ : Preadditive C
𝕜 : Type u_2
inst✝⁷ : Field 𝕜
inst✝⁶ : IsAlgClosed 𝕜
inst✝⁵ : Linear 𝕜 C
inst✝⁴ : HasKernels C
X Y : C
inst✝³ : FiniteDimensional 𝕜 (X ⟶ X)
inst✝² : FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ¬1 = 1
this : 1 ≤ 1
⊢ 1 = 0
[PROOFSTEP]
exact False.elim (h rfl)
[GOAL]
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : ∀ (X Y : C), FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
⊢ finrank 𝕜 (X ⟶ Y) = if Nonempty (X ≅ Y) then 1 else 0
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : ∀ (X Y : C), FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : Nonempty (X ≅ Y)
⊢ finrank 𝕜 (X ⟶ Y) = 1
case neg
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : ∀ (X Y : C), FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ¬Nonempty (X ≅ Y)
⊢ finrank 𝕜 (X ⟶ Y) = 0
[PROOFSTEP]
exact (finrank_hom_simple_simple_eq_one_iff 𝕜 X Y).2 h
[GOAL]
case neg
C : Type u_1
inst✝⁸ : Category.{u_3, u_1} C
inst✝⁷ : Preadditive C
𝕜 : Type u_2
inst✝⁶ : Field 𝕜
inst✝⁵ : IsAlgClosed 𝕜
inst✝⁴ : Linear 𝕜 C
inst✝³ : HasKernels C
X Y : C
inst✝² : ∀ (X Y : C), FiniteDimensional 𝕜 (X ⟶ Y)
inst✝¹ : Simple X
inst✝ : Simple Y
h : ¬Nonempty (X ≅ Y)
⊢ finrank 𝕜 (X ⟶ Y) = 0
[PROOFSTEP]
exact (finrank_hom_simple_simple_eq_zero_iff 𝕜 X Y).2 (not_nonempty_iff.mp h)
|
"""
"""
import numpy as np
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import List, Optional, Type, Union
from .. import OperationGraph
from ..operations import *
from ..visitors import OperationCounter
from ..transformers import DropPrefix
from ...utils import get_subclasses
class _Layer(type):
def __new__(self, name, bases, namespace, **kwargs):
if name == "Layer":
return super().__new__(self, name, bases, namespace, **kwargs)
if "OP_PATTERN" not in namespace:
raise TypeError(f"Layer {name} must specify `OP_PATTERN`")
op_pattern = namespace["OP_PATTERN"]
if (
op_pattern is not None
and not isinstance(op_pattern, OperationPattern)
and (
not isinstance(op_pattern, type)
or not issubclass(op_pattern, Operation)
)
):
raise TypeError("`OP_PATTERN` must be an operation pattern")
return super().__new__(self, name, bases, namespace, **kwargs)
@property
def OP_PATTERN(self) -> Union[Type[Operation], OperationPattern, None]:
return self.__dict__["OP_PATTERN"]
class LayerMatch:
def __init__(self, layer, input_op_graph):
self.layer = layer
self.input_op_graph = input_op_graph
class Layer(metaclass=_Layer):
@classmethod
@abstractmethod
def from_operation_graph(cls, operation_graph):
raise NotImplementedError()
@classmethod
def match(
cls: Type["Layer"],
operation_graph: OperationGraph,
layer_types: Optional[List[Type["Layer"]]] = None,
) -> Optional[LayerMatch]:
if cls is Layer and layer_types is None:
layer_types = list(get_subclasses(cls))
elif cls is not Layer:
if layer_types is not None:
raise TypeError(
"match() got an unexpected keyword argument 'layer_types'"
)
layer_types = [cls]
best_match: Optional[List[Operation]] = None
best_op_count = float("inf")
best_layer_type = Layer
assert layer_types is not None
for layer_type in layer_types:
if layer_type.OP_PATTERN is None:
continue
matches = layer_type.OP_PATTERN.match(operation_graph.output_operations)
for match in matches:
op_count = 0
visitor = OperationCounter()
for op in match:
op_count = visitor.visit(op)
if op_count < best_op_count:
best_match = match
best_op_count = op_count
best_layer_type = layer_type
if best_match is None:
return None
input_op_graph = OperationGraph(best_match)
op_graph = OperationGraph(operation_graph.walk(DropPrefix(input_op_graph)))
return LayerMatch(
best_layer_type.from_operation_graph(op_graph), input_op_graph
)
class InputLayer(Layer):
OP_PATTERN = Input
def __init__(self, shape, dtype):
self.shape = tuple(shape)
self.dtype = dtype
@classmethod
def from_operation_graph(cls, operation_graph):
shape = tuple(
d if d >= 0 else 1 for d in operation_graph.output_operations[0].shape
)
dtype = operation_graph.output_operations[0].dtype
return cls(shape, dtype)
class FullyConnected(Layer):
OP_PATTERN = (
(((Transpose | None) >> (Flatten | Reshape)) | None)
>> (Gemm | (MatMul >> Add))
>> (Activation | None)
)
def __init__(self, weights, bias, activation=None, w_permutation=None):
self.weights = weights
self.bias = bias
self.activation = activation
self.w_permutation = (
w_permutation
if w_permutation is not None
else np.arange(self.weights.shape[0])
)
@classmethod
def from_operation_graph(cls, operation_graph):
# check activation type
op = operation_graph.output_operations
assert len(op) == 1
op = op[0]
activation = None
if isinstance(op, (Relu, Sigmoid, Tanh)):
activation = op.__class__.__name__.lower()
op = op.inputs
assert len(op) == 1
op = op[0]
elif not isinstance(op, (Gemm, Add)):
raise ValueError(
"Expected operation of type (Gemm | Add | Activation), but got %s"
% op.__class__.__name__
)
# get weights and biases
weights = None
bias = None
if isinstance(op, Gemm):
if op.alpha != 1.0 or op.beta != 1.0:
raise ValueError("Scaling not supported in Fully Connected layers.")
if not isinstance(op.a, Operation):
raise ValueError(
"Constant input tensors are not supported for GeMM "
"in Fully Connected layers."
)
if op.transpose_a:
raise ValueError(
"Transposing input to Fully Connected layer is not supported."
)
if isinstance(op.b, Operation):
raise ValueError(
"Multiple input tensors are not supported for GeMM "
"in Fully Connected layers."
)
weights = op.b
if op.transpose_b:
weights = weights.T
if isinstance(op.c, Operation):
raise ValueError(
"Variable input tensors are not supported for GeMM bias "
"in Fully Connected layers."
)
bias = op.c
elif isinstance(op, Add):
if not isinstance(op.a, Operation):
raise ValueError(
"Constant input tensors are not supported for Add "
"in Fully Connected layers."
)
bias = op.b
op = op.a
if not isinstance(op.a, Operation):
raise ValueError(
"Constant input tensors are not supported for MatMul "
"in Fully Connected layers."
)
weights = op.b
else:
raise ValueError(
"Expected type (Gemm | (MatMul >> Add)), but got %s"
% op.__class__.__name__
)
op = op.inputs
assert len(op) == 1
op = op[0]
if isinstance(op, Input):
return cls(weights, bias, activation=activation)
if not isinstance(op, (Flatten, Reshape)):
raise ValueError(
"Expected type (None | (Transpose >> (Flatten | Reshape))), but got %s"
% op.__class__.__name__
)
op = op.inputs
if len(op) > 1:
return cls(weights, bias, activation=activation)
assert len(op) == 1
op = op[0]
if isinstance(op, Input):
return cls(weights, bias, activation=activation)
elif isinstance(op, Transpose):
if not isinstance(op.x, Input):
raise ValueError("Expected Transpose to be applied to Input.")
permutation = np.asarray(op.permutation)
undo_permutation = permutation[permutation]
input_shape = np.asarray(op.x.shape)[permutation]
weights_permutation = (
np.arange(np.product(input_shape))
.reshape(input_shape)
.transpose(undo_permutation)
.flatten()
)
else:
raise ValueError(
"Expected type Transpose, but got %s" % op.__class__.__name__
)
return cls(
weights, bias, activation=activation, w_permutation=weights_permutation
)
class Convolutional(Layer):
OP_PATTERN = Conv >> (Activation | None)
def __init__(
self, weights, bias, activation=None, kernel_shape=None, strides=1, pads=0
):
self.weights = weights
self.bias = bias
self.activation = activation
self.kernel_shape = kernel_shape
if self.kernel_shape is None:
self.kernel_shape = self.weights.shape[2:]
self.strides = strides
self.pads = pads
@classmethod
def from_operation_graph(cls, operation_graph):
op = operation_graph.output_operations
assert len(op) == 1
op = op[0]
# check activation type
activation = None
if isinstance(op, (Relu, Sigmoid, Tanh)):
activation = op.__class__.__name__.lower()
op = op.inputs
assert len(op) == 1
op = op[0]
elif not isinstance(op, Conv):
raise ValueError(
"Expected operation of type (Conv | Activation), but got %s"
% op.__class__.__name__
)
# get weights, biases, and configuration
weights = None
bias = None
kernel_shape = None
pads = None
strides = None
if isinstance(op, Conv):
if np.any(op.dilations != 1):
raise ValueError(
"Dilation is currently not supported in Convolutional layers."
)
if op.group != 1:
raise ValueError(
"Grouping is currently not supported in Convolutional layers."
)
if not isinstance(op.x, Operation):
raise ValueError(
"Constant input tensors are not supported for Conv "
"in Convolutional layers."
)
weights = op.w
bias = op.b
kernel_shape = op.kernel_shape
strides = op.strides
pads = op.pads
else:
raise ValueError("Expected type Conv, but got %s" % op.__class__.__name__)
return cls(
weights,
bias,
activation=activation,
kernel_shape=kernel_shape,
strides=strides,
pads=pads,
)
|
[STATEMENT]
lemma sint_dist:
fixes x y ::word
assumes "x \<noteq> y"
shows "sint x \<noteq> sint y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sint x \<noteq> sint y
[PROOF STEP]
by (simp add: assms) |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj23synthconj3 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural) (lv3 : natural), (@eq natural (Succ (plus (mult lv0 lv1) lv2)) (plus lv3 (plus lv1 (Succ Zero)))).
Admitted.
QuickChick conj23synthconj3.
|
#!/bin/python
import json
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import normalize
import pandas as pd
import nltk
import time
import os
import sys
from sklearn.base import BaseEstimator
import numpy as np
from data_io import *
A = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
B = 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
class Tfidf(BaseEstimator):
def __init__(self, max_df=3.0, ngram_range=(1, 1), a=True):
self.tfidf = TfidfVectorizer(max_df=max_df, ngram_range=ngram_range)
self.max_df = max_df
self.ngram_range = ngram_range
self.a = a
# def set_params(self, **params):
# self.tfidf.set_params(**params)
def fit(self, X, y):
if self.a:
print(A)
else:
print(B)
self.tfidf.fit(X, y)
return self
def transform(self, X):
return self.tfidf.transform(X)
if __name__ == "__main__":
labeled = load_data()
param_grid = {
'tfidf__max_df': [1.0],
'tfidf__ngram_range': [[1, 1]],
"lr__class_weight": [
None
],
'tfidf__a': [True, False]
}
pipe = Pipeline([
('tfidf', Tfidf()),
('lr', LogisticRegression(n_jobs=-1))
])
gs = GridSearchCV(pipe, param_grid=param_grid, n_jobs=1, verbose=4, cv=5)
gs.fit(np.array(labeled.data), labeled.y)
df_results = pd.DataFrame(gs.cv_results_)
print(df_results)
|
Theorem DeMorgan1 : forall P Q : Prop, ~P \/ ~Q -> ~(P /\ Q).
Proof.
intros.
intro.
destruct H as [ H1 | H2 ].
absurd P.
assumption.
apply H0.
absurd Q.
assumption.
apply H0.
Qed.
Theorem DeMorgan2 : forall P Q : Prop, ~P /\ ~Q -> ~(P \/ Q).
Proof.
intros.
intro.
destruct H0 as [ H1 | H2 ].
absurd P.
apply H.
assumption.
absurd Q.
apply H.
assumption.
Qed.
Theorem DeMorgan3 : forall P Q : Prop, ~(P \/ Q) -> ~P /\ ~Q.
Proof.
intros.
split.
intro.
absurd (P \/ Q).
assumption.
left.
assumption.
intro.
absurd (P \/ Q).
assumption.
right.
assumption.
Qed.
|
open import Formalization.PredicateLogic.Signature
module Formalization.PredicateLogic.Syntax (𝔏 : Signature) where
open Signature(𝔏)
open import Data.ListSized
import Lvl
open import Functional using (_∘_ ; _∘₂_ ; swap)
open import Numeral.Finite
open import Numeral.Natural
open import Sets.PredicateSet using (PredSet)
open import Type
private variable ℓ : Lvl.Level
private variable args vars : ℕ
data Term (vars : ℕ) : Type{ℓₒ} where
var : 𝕟(vars) → Term(vars) -- Variables
func : Obj(args) → List(Term(vars))(args) → Term(vars) -- Constants/functions
-- Formulas.
-- Inductive definition of the grammatical elements of the language of predicate logic.
data Formula : ℕ → Type{ℓₚ Lvl.⊔ ℓₒ} where
_$_ : Prop(args) → List(Term(vars))(args) → Formula(vars) -- Relations
⊤ : Formula(vars) -- Tautology (Top / True)
⊥ : Formula(vars) -- Contradiction (Bottom / False)
_∧_ : Formula(vars) → Formula(vars) → Formula(vars) -- Conjunction (And)
_∨_ : Formula(vars) → Formula(vars) → Formula(vars) -- Disjunction (Or)
_⟶_ : Formula(vars) → Formula(vars) → Formula(vars) -- Implication
Ɐ : Formula(𝐒(vars)) → Formula(vars)
∃ : Formula(𝐒(vars)) → Formula(vars)
-- A sentence is a formula with no variables occurring.
Sentence = Formula(𝟎)
infix 1011 _$_
infixr 1005 _∧_
infixr 1004 _∨_
infixr 1000 _⟶_
-- Negation
¬_ : Formula(vars) → Formula(vars)
¬_ = _⟶ ⊥
-- Double negation
¬¬_ : Formula(vars) → Formula(vars)
¬¬_ = (¬_) ∘ (¬_)
-- Reverse implication
_⟵_ : Formula(vars) → Formula(vars) → Formula(vars)
_⟵_ = swap(_⟶_)
-- Equivalence
_⟷_ : Formula(vars) → Formula(vars) → Formula(vars)
p ⟷ q = (p ⟵ q) ∧ (p ⟶ q)
-- (Nor)
_⊽_ : Formula(vars) → Formula(vars) → Formula(vars)
_⊽_ = (¬_) ∘₂ (_∨_)
-- (Nand)
_⊼_ : Formula(vars) → Formula(vars) → Formula(vars)
_⊼_ = (¬_) ∘₂ (_∧_)
-- (Exclusive or / Xor)
_⊻_ : Formula(vars) → Formula(vars) → Formula(vars)
_⊻_ = (¬_) ∘₂ (_⟷_)
infix 1010 ¬_ ¬¬_
infixl 1000 _⟵_ _⟷_
Ɐ₊ : Formula(vars) → Sentence
Ɐ₊{𝟎} φ = φ
Ɐ₊{𝐒 v} φ = Ɐ₊{v} (Ɐ φ)
∃₊ : Formula(vars) → Sentence
∃₊{𝟎} φ = φ
∃₊{𝐒 v} φ = ∃₊{v} (∃ φ)
|
The Davis Youth Soccer League was a soccer league for youth. Colloquially, it was known as club soccer or CYSA, and is more selective an competitive than Davis other youth soccer league (AYSO). To join the league, youth had to try out, be selected by a league coach for their age bracket, and make the commitments required in terms of cost, game play and parental involvement. Games and practices were held at the DYSL Soccer Fields on County Road 105.
In 2010 they became the Davis Legacy Soccer Club.
|
SUBROUTINE IS_CNRG ( report, lenr, maxs, trep, nlen, iret )
C************************************************************************
C* IS_CNRG *
C* *
C* This subroutine removes geographical reference points and replaces *
C* slashes with spaces in a Canadian SIGMET area string. *
C* *
C* IS_CNRG ( REPORT, LENR, MAXS, TREP, NLEN, IRET ) *
C* *
C* Input parameters: *
C* REPORT CHAR* Canadian SIGMET area string *
C* LENR INTEGER Length of string report *
C* MAXS INTEGER Maximum length of string *
C* *
C* Output parameters: *
C* TREP CHAR* New Canadian SIGMET area string *
C* NLEN INTEGER New length of string trep *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* 1 = no slashes *
C* *
C** *
C* Log: *
C* F. J. Yen/NCEP 10/03 Created *
C* F. J. Yen/NCEP 12/03 Return 1 when no slashes in area string.*
C************************************************************************
INCLUDE 'GEMPRM.PRM'
C*
CHARACTER*(*) report, trep
C*
C------------------------------------------------------------------------
iret = 0
C
nslsh = INDEX ( report(:lenr), '/' )
IF ( nslsh .eq. 0 ) THEN
nlen = lenr
trep(:nlen) = report(:lenr)
iret = 1
RETURN
END IF
io = nslsh + 1
nlen = 0
DO WHILE ( io .lt. lenr )
nlen = nlen + 1
trep ( nlen:nlen ) = ' '
C
C* Copy the lat/lon field up to the next slash
C
DO WHILE ( (report (io:io) .ne. '/') .and. (io .le. lenr) )
nlen = nlen + 1
trep ( nlen:nlen ) = report (io:io )
io = io + 1
END DO
C
C* Skip over the geographical reference and slashes
C
nlen = nlen + 1
trep ( nlen:nlen ) = ' '
io = io + 1
DO WHILE ( (report (io:io) .ne. '/') .and. (io .le. lenr) )
io = io + 1
END DO
io = io + 1
END DO
C*
RETURN
END
|
library(nlme)
library(ipw)
library(tidyverse)
#Multilevel panel regression with cross-lagged effects will examine the associations among psychological and behavioral factors and health outcomes over time for HIV positive patients with and without diabetes.
load('data/cleanedData.RData')
table(duplicated(dat[,c('StudyId','time')]))
## do people switch sites?
dat%>%group_by(StudyId)%>%summarize(nsite=n_distinct(Site,na.rm=TRUE))%>%xtabs(~nsite,.)
site <- dat%>%group_by(StudyId)%>%summarize(Site=na.omit(Site)[1])
## function to combine records from the same day
comb <- function(x){
if(all(is.na(x))) return(NA)
x <- na.omit(x)
if(length(x)==1) return(x)
if(n_distinct(x)==1) return(x[1])
if(!is.numeric(x)) return(x[1])
mean(x)
}
dat <- dat%>%filter(!is.na(Date))%>%select(-Date,-BirthYear,-Site)%>%group_by(StudyId,time)%>%summarize_all(comb)
#### effect of dep on adhere controlling for previous cd4, substance abuse, dep
dat$cd4Sqrt <- sqrt(dat$cd4)
|
\section{Introduction}
Glass is a material that is prominently part of criminal investigation processes. When a suspect is apprehended for a crime involving shattered glass, it is a standard procedure to submit particles of his clothing to a forensic science laboratory, in order to determine whether or not evidentiary material is present. However, even in the case where glass particles were detected, it often remains unclear whether those particles are connected to the crime. On this basis, the goal of this project was to build a machine learning model, that is able to classify different types of glass fragments based on their elemental composition and refractive index (RI).
% we could add something along the lines:
% the goal of this paper is to report the main findings % project, and has this and this section. but not really necessary i feel
% The goal of this report is to examine different classification methods for the purpose of classifying different type of glass. In the report, we first describe given data-set. We then proceed to the introduction of selected classification methods and description of their implementation. Finally, we conclude the report by discussing performance of the chosen classification methods. |
section \<open> Collections from Z Toolkit \<close>
theory Collections_Z
imports "Shallow-Expressions.Collections" "Z_Toolkit.Relation_Lib"
begin
subsection \<open> Partial Function Collection Lens \<close>
definition pfun_collection_lens :: "'a \<Rightarrow> 'b \<Longrightarrow> 'a \<Zpfun> 'b" where
[lens_defs]: "pfun_collection_lens = pfun_lens"
adhoc_overloading collection_lens pfun_collection_lens
lemma pfun_collection_lens_mwb [simp]: "mwb_lens (pfun_collection_lens e)"
by (simp add: pfun_collection_lens_def)
lemma source_pfun_collection_lens: "\<S>\<^bsub>pfun_collection_lens i\<^esub> = {f. i \<in> pdom(f)}"
by (auto simp add: lens_defs lens_source_def, metis pfun_upd_ext)
lemma defined_pfun_collection_lens [simp, code_unfold]:
"\<lbrakk> vwb_lens x; $x \<sharp> (e)\<^sub>e \<rbrakk> \<Longrightarrow> \<^bold>D(x[e]) = (e \<in> pdom($x))\<^sub>e"
by (simp add: lens_defined_def src_dyn_lens unrest source_ns_alpha source_pfun_collection_lens)
(simp add: lens_defs wb_lens.source_UNIV)
subsection \<open> Finite Function Collection Lens \<close>
definition ffun_collection_lens :: "'a \<Rightarrow> 'b \<Longrightarrow> 'a \<Zffun> 'b" where
[lens_defs]: "ffun_collection_lens = ffun_lens"
adhoc_overloading collection_lens ffun_collection_lens
lemma ffun_collection_lens_mwb [simp]: "mwb_lens (ffun_collection_lens e)"
by (simp add: ffun_collection_lens_def)
lemma source_ffun_collection_lens: "\<S>\<^bsub>ffun_collection_lens i\<^esub> = {f. i \<in> fdom(f)}"
by (auto simp add: lens_defs lens_source_def, metis ffun_upd_ext)
lemma defined_ffun_collection_lens [simp, code_unfold]:
"\<lbrakk> vwb_lens x; $x \<sharp> (e)\<^sub>e \<rbrakk> \<Longrightarrow> \<^bold>D(x[e]) = (e \<in> fdom($x))\<^sub>e"
by (simp add: lens_defined_def src_dyn_lens unrest source_ns_alpha source_ffun_collection_lens)
(simp add: lens_defs wb_lens.source_UNIV)
end |
(* Title: Capped Omega Algebras
Author: Walter Guttmann
Maintainer: Walter Guttmann <walter.guttmann at canterbury.ac.nz>
*)
section \<open>Capped Omega Algebras\<close>
theory Capped_Omega_Algebras
imports Omega_Algebras
begin
class capped_omega =
fixes capped_omega :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("_\<^sup>\<omega>\<^sub>_" [100,100] 100)
class capped_omega_algebra = bounded_left_zero_kleene_algebra + bounded_distrib_lattice + capped_omega +
assumes capped_omega_unfold: "y\<^sup>\<omega>\<^sub>v = y * y\<^sup>\<omega>\<^sub>v \<sqinter> v"
assumes capped_omega_induct: "x \<le> (y * x \<squnion> z) \<sqinter> v \<longrightarrow> x \<le> y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z"
text \<open>AACP Theorem 6.1\<close>
notation
top ("\<top>")
sublocale capped_omega_algebra < capped: bounded_left_zero_omega_algebra where omega = "(\<lambda>y . y\<^sup>\<omega>\<^sub>\<top>)"
apply unfold_locales
apply (metis capped_omega_unfold inf_top_right)
by (simp add: capped_omega_induct sup_commute)
context capped_omega_algebra
begin
text \<open>AACP Theorem 6.2\<close>
lemma capped_omega_below_omega:
"y\<^sup>\<omega>\<^sub>v \<le> y\<^sup>\<omega>\<^sub>\<top>"
using capped.omega_induct_mult capped_omega_unfold order.eq_iff by force
text \<open>AACP Theorem 6.3\<close>
lemma capped_omega_below:
"y\<^sup>\<omega>\<^sub>v \<le> v"
using capped_omega_unfold order.eq_iff by force
text \<open>AACP Theorem 6.4\<close>
lemma capped_omega_one:
"1\<^sup>\<omega>\<^sub>v = v"
proof -
have "v \<le> (1 * v \<squnion> bot) \<sqinter> v"
by simp
hence "v \<le> 1\<^sup>\<omega>\<^sub>v \<squnion> 1\<^sup>\<star> * bot"
by (simp add: capped_omega_induct)
also have "... = 1\<^sup>\<omega>\<^sub>v"
by (simp add: star_one)
finally show ?thesis
by (simp add: capped_omega_below order.antisym)
qed
text \<open>AACP Theorem 6.5\<close>
lemma capped_omega_zero:
"bot\<^sup>\<omega>\<^sub>v = bot"
by (metis capped_omega_below_omega bot_unique capped.omega_bot)
lemma star_below_cap:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> y\<^sup>\<star> * z \<le> v"
by (metis le_sup_iff order.trans mult_left_isotone star_left_induct)
lemma capped_fix:
assumes "y \<le> u"
and "z \<le> v"
and "u * v \<le> v"
shows "(y * (y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z) \<squnion> z) \<sqinter> v = y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z"
proof -
have "(y * (y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z) \<squnion> z) \<sqinter> v = (y * y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z) \<sqinter> v"
by (simp add: mult_left_dist_sup star.circ_loop_fixpoint sup_assoc)
also have "... = (y * y\<^sup>\<omega>\<^sub>v \<sqinter> v) \<squnion> (y\<^sup>\<star> * z \<sqinter> v)"
by (simp add: inf_sup_distrib2)
also have "... = y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z"
using assms capped_omega_unfold le_iff_inf star_below_cap by auto
finally show ?thesis
.
qed
lemma capped_fixpoint:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> is_fixpoint (\<lambda>x . (y * x \<squnion> z) \<sqinter> v) (y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z)"
by (simp add: capped_fix is_fixpoint_def)
lemma capped_greatest_fixpoint:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> is_greatest_fixpoint (\<lambda>x . (y * x \<squnion> z) \<sqinter> v) (y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z)"
by (smt capped_fix order_refl capped_omega_induct is_greatest_fixpoint_def)
lemma capped_postfixpoint:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> is_postfixpoint (\<lambda>x . (y * x \<squnion> z) \<sqinter> v) (y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z)"
using capped_fix inf.eq_refl is_postfixpoint_def by auto
lemma capped_greatest_postfixpoint:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> is_greatest_postfixpoint (\<lambda>x . (y * x \<squnion> z) \<sqinter> v) (y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z)"
by (smt capped_fix order_refl capped_omega_induct is_greatest_postfixpoint_def)
text \<open>AACP Theorem 6.6\<close>
lemma capped_nu:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> \<nu>(\<lambda>x . (y * x \<squnion> z) \<sqinter> v) = y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z"
by (metis capped_greatest_fixpoint greatest_fixpoint_same)
lemma capped_pnu:
"y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> p\<nu>(\<lambda>x . (y * x \<squnion> z) \<sqinter> v) = y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z"
by (metis capped_greatest_postfixpoint greatest_postfixpoint_same)
text \<open>AACP Theorem 6.7\<close>
lemma unfold_capped_omega:
"y \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> y * y\<^sup>\<omega>\<^sub>v = y\<^sup>\<omega>\<^sub>v"
by (smt (verit, ccfv_SIG) capped_omega_below capped_omega_unfold inf.order_lesseq_imp le_iff_inf mult_isotone)
text \<open>AACP Theorem 6.8\<close>
lemma star_mult_capped_omega:
assumes "y \<le> u"
and "u * v \<le> v"
shows "y\<^sup>\<star> * y\<^sup>\<omega>\<^sub>v = y\<^sup>\<omega>\<^sub>v"
proof -
have "y * y\<^sup>\<omega>\<^sub>v = y\<^sup>\<omega>\<^sub>v"
using assms unfold_capped_omega by auto
hence "y\<^sup>\<star> * y\<^sup>\<omega>\<^sub>v \<le> y\<^sup>\<omega>\<^sub>v"
by (simp add: star_left_induct_mult)
thus ?thesis
by (metis sup_ge2 order.antisym star.circ_loop_fixpoint)
qed
text \<open>AACP Theorem 6.9\<close>
lemma star_zero_below_capped_omega_zero:
assumes "y \<le> u"
and "u * v \<le> v"
shows "y\<^sup>\<star> * bot \<le> y\<^sup>\<omega>\<^sub>v * bot"
proof -
have "y * y\<^sup>\<omega>\<^sub>v \<le> v"
using assms capped_omega_below unfold_capped_omega by auto
hence "y * y\<^sup>\<omega>\<^sub>v = y\<^sup>\<omega>\<^sub>v"
using assms unfold_capped_omega by auto
thus ?thesis
by (metis bot_least eq_refl mult_assoc star_below_cap)
qed
lemma star_zero_below_capped_omega:
"y \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> y\<^sup>\<star> * bot \<le> y\<^sup>\<omega>\<^sub>v"
by (simp add: star_loop_least_fixpoint unfold_capped_omega)
lemma capped_omega_induct_meet_zero:
"x \<le> y * x \<sqinter> v \<Longrightarrow> x \<le> y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * bot"
by (simp add: capped_omega_induct)
text \<open>AACP Theorem 6.10\<close>
lemma capped_omega_induct_meet:
"y \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> x \<le> y * x \<sqinter> v \<Longrightarrow> x \<le> y\<^sup>\<omega>\<^sub>v"
by (metis capped_omega_induct_meet_zero sup_commute le_iff_sup star_zero_below_capped_omega)
lemma capped_omega_induct_equal:
"x = (y * x \<squnion> z) \<sqinter> v \<Longrightarrow> x \<le> y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z"
using capped_omega_induct inf.le_iff_sup by auto
text \<open>AACP Theorem 6.11\<close>
lemma capped_meet_nu:
assumes "y \<le> u"
and "u * v \<le> v"
shows "\<nu>(\<lambda>x . y * x \<sqinter> v) = y\<^sup>\<omega>\<^sub>v"
proof -
have "y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * bot = y\<^sup>\<omega>\<^sub>v"
by (smt assms star_zero_below_capped_omega le_iff_sup sup_commute)
hence "\<nu>(\<lambda>x . (y * x \<squnion> bot) \<sqinter> v) = y\<^sup>\<omega>\<^sub>v"
by (metis assms capped_nu bot_least)
thus ?thesis
by simp
qed
lemma capped_meet_pnu:
assumes "y \<le> u"
and "u * v \<le> v"
shows "p\<nu>(\<lambda>x . y * x \<sqinter> v) = y\<^sup>\<omega>\<^sub>v"
proof -
have "y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * bot = y\<^sup>\<omega>\<^sub>v"
by (smt assms star_zero_below_capped_omega le_iff_sup sup_commute)
hence "p\<nu>(\<lambda>x . (y * x \<squnion> bot) \<sqinter> v) = y\<^sup>\<omega>\<^sub>v"
by (metis assms capped_pnu bot_least)
thus ?thesis
by simp
qed
text \<open>AACP Theorem 6.12\<close>
lemma capped_omega_isotone:
"y \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> t \<le> y \<Longrightarrow> t\<^sup>\<omega>\<^sub>v \<le> y\<^sup>\<omega>\<^sub>v"
by (metis capped_omega_induct_meet capped_omega_unfold le_iff_sup inf.sup_left_isotone mult_right_sub_dist_sup_left)
text \<open>AACP Theorem 6.13\<close>
lemma capped_omega_simulation:
assumes "y \<le> u"
and "s \<le> u"
and "u * v \<le> v"
and "s * t \<le> y * s"
shows "s * t\<^sup>\<omega>\<^sub>v \<le> y\<^sup>\<omega>\<^sub>v"
proof -
have "s * t\<^sup>\<omega>\<^sub>v \<le> s * t * t\<^sup>\<omega>\<^sub>v \<sqinter> s * v"
by (metis capped_omega_below capped_omega_unfold inf.boundedI inf.cobounded1 mult_right_isotone mult_assoc)
also have "... \<le> s * t * t\<^sup>\<omega>\<^sub>v \<sqinter> v"
using assms(2,3) inf.order_lesseq_imp inf.sup_right_isotone mult_left_isotone by blast
also have "... \<le> y * s * t\<^sup>\<omega>\<^sub>v \<sqinter> v"
using assms(4) inf.sup_left_isotone mult_left_isotone by auto
finally show ?thesis
using assms(1,3) capped_omega_induct_meet mult_assoc by auto
qed
lemma capped_omega_slide_sub:
assumes "s \<le> u"
and "y \<le> u"
and "u * u \<le> u"
and "u * v \<le> v"
shows "s * (y * s)\<^sup>\<omega>\<^sub>v \<le> (s * y)\<^sup>\<omega>\<^sub>v"
proof -
have "s * y \<le> u"
by (meson assms(1-3) mult_isotone order_trans)
thus ?thesis
using assms(1,4) capped_omega_simulation mult_assoc by auto
qed
text \<open>AACP Theorem 6.14\<close>
lemma capped_omega_slide:
"s \<le> u \<Longrightarrow> y \<le> u \<Longrightarrow> u * u \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> s * (y * s)\<^sup>\<omega>\<^sub>v = (s * y)\<^sup>\<omega>\<^sub>v"
by (smt (verit) order.antisym mult_assoc mult_right_isotone capped_omega_unfold capped_omega_slide_sub inf.sup_ge1 order_trans)
lemma capped_omega_sub_dist:
"s \<le> u \<Longrightarrow> y \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> s\<^sup>\<omega>\<^sub>v \<le> (s \<squnion> y)\<^sup>\<omega>\<^sub>v"
by (simp add: capped_omega_isotone)
text \<open>AACP Theorem 6.15\<close>
lemma capped_omega_simulation_2:
assumes "s \<le> u"
and "y \<le> u"
and "u * u \<le> u"
and "u * v \<le> v"
and "y * s \<le> s * y"
shows "(s * y)\<^sup>\<omega>\<^sub>v \<le> s\<^sup>\<omega>\<^sub>v"
proof -
have 1: "s * y \<le> u"
using assms(1-3) inf.order_lesseq_imp mult_isotone by blast
have 2: "s * (s * y)\<^sup>\<omega>\<^sub>v \<le> v"
by (meson assms(1,4) capped_omega_below order.trans mult_isotone)
have "(s * y)\<^sup>\<omega>\<^sub>v = s * (y * s)\<^sup>\<omega>\<^sub>v"
using assms(1-4) capped_omega_slide by auto
also have "... \<le> s * (s * y)\<^sup>\<omega>\<^sub>v"
using 1 assms(4,5) capped_omega_isotone mult_right_isotone by blast
also have "... = s * (s * y)\<^sup>\<omega>\<^sub>v \<sqinter> v"
using 2 inf.order_iff by auto
finally show ?thesis
using assms(1,4) capped_omega_induct_meet by blast
qed
text \<open>AACP Theorem 6.16\<close>
lemma left_plus_capped_omega:
assumes "y \<le> u"
and "u * u \<le> u"
and "u * v \<le> v"
shows "(y * y\<^sup>\<star>)\<^sup>\<omega>\<^sub>v = y\<^sup>\<omega>\<^sub>v"
proof -
have 1: "y * y\<^sup>\<star> \<le> u"
by (metis assms(1,2) star_plus star_below_cap)
hence "y * y\<^sup>\<star> * (y * y\<^sup>\<star>)\<^sup>\<omega>\<^sub>v \<le> v"
using assms(3) capped_omega_below unfold_capped_omega by auto
hence "y * y\<^sup>\<star> * (y * y\<^sup>\<star>)\<^sup>\<omega>\<^sub>v = (y * y\<^sup>\<star>)\<^sup>\<omega>\<^sub>v"
using 1 assms(3) unfold_capped_omega by blast
hence "(y * y\<^sup>\<star>)\<^sup>\<omega>\<^sub>v \<le> y\<^sup>\<omega>\<^sub>v"
using 1 by (smt assms(1,3) capped_omega_simulation mult_assoc mult_semi_associative star.circ_transitive_equal star_simulation_right_equal)
thus ?thesis
using 1 by (meson assms(3) capped_omega_isotone order.antisym star.circ_mult_increasing)
qed
text \<open>AACP Theorem 6.17\<close>
lemma capped_omega_sub_vector:
assumes "z \<le> v"
and "y \<le> u"
and "u * v \<le> v"
shows "y\<^sup>\<omega>\<^sub>u * z \<le> y\<^sup>\<omega>\<^sub>v"
proof -
have "y\<^sup>\<omega>\<^sub>u * z \<le> y * y\<^sup>\<omega>\<^sub>u * z \<sqinter> u * z"
by (metis capped_omega_below capped_omega_unfold eq_refl inf.boundedI inf.cobounded1 mult_isotone)
also have "... \<le> y * y\<^sup>\<omega>\<^sub>u * z \<sqinter> v"
by (metis assms(1,3) inf.sup_left_isotone inf_commute mult_right_isotone order_trans)
finally show ?thesis
using assms(2,3) capped_omega_induct_meet mult_assoc by auto
qed
text \<open>AACP Theorem 6.18\<close>
lemma capped_omega_omega:
"y \<le> u \<Longrightarrow> u * v \<le> v \<Longrightarrow> (y\<^sup>\<omega>\<^sub>u)\<^sup>\<omega>\<^sub>v \<le> y\<^sup>\<omega>\<^sub>v"
by (metis capped_omega_below capped_omega_sub_vector unfold_capped_omega)
end
end
|
function [l1, l2, l3]=dtiEigenvaluesFromWestinShapes(cl, cp, vol, method)
%V is the volume of original tensor
%solution
%Method specifies whether the Westin shapes aligned are computed with
%simple ("new") denominator, l1, or original definition (old) denominator,
%l1+l2+l3
if ~exist('method', 'var')
method='westinShapes_l1';
end
switch method
case 'westinShapes_l1'
%westin shapes are new(simplified) versions, NOT the ones computed by dtiComputeWestinShapes:
%cl=(l1-l2)/l1;
%cp=(l2-l3)/l1;
%cs=l3/l1;
l1_sol(:, 1)=-((-3/pi).^(1/3).*vol.^(1/3))./(2^(2/3).*((-1+cl).*(-1+cl+cp)).^(1/3));
l3_sol(:, 1)=(-1+cl+cp).*l1_sol(:, 1);
l1_sol(:, 2)=(3/pi)^(1/3)./(2^(2/3).*(((-1+cl).*(-1+cl+cp))./vol).^(1/3));
l3_sol(:, 2)=-(-1+cl+cp).*l1_sol(:, 2);
l1_sol(:, 3)=-((-1)^(2/3).*(3/pi)^(1/3))./(2^(2/3).*(((-1+cl).*(-1+cl+cp))./vol).^(1/3));
l3_sol(:, 3)=(-1+cl+cp).*l1_sol(:, 3);
case 'westinShapes_lsum'
%westin shapes as those computed by dtiComputeWestinShapes:
%cl=(l1-l2)/(l1+l2+l3);
%cp=(l2-l3)/(l1+l2+l3);
%cs=l3/(l1+l2+l3);
%I am not sure whether the results produced by this method make sence -- at
%least when protted in barycentric coordinates. If you want to use it,
%check the code.
l1_sol(:, 1)=-((3/pi).^(1/3).*(-(2+4*cl+cp).^2.*vol).^(1/3))./(2*((-2+2*cl-cp).*(-1+cl+cp)).^(1/3));
l3_sol(:, 1)=-2*l1_sol(1).*(-1+cl+cp)./(2+4*cl+cp);
l1_sol(:, 2)=((3/pi).^(1/3))./(2*(((-2+2*cl-cp).*(-1+cl+cp))./((2+4*cl+cp).^2.*vol)).^(1/3));
l3_sol(:, 2)=-2*l1_sol(2).*(-1+cl+cp)./(2+4*cl+cp);
l1_sol(:, 3)=((-1).^(2/3).*(3/pi).^(1/3))./(2*(((-2+2*cl-cp).*(-1+cl+cp))./((2+4*cl+cp).^2.*vol)).^(1/3));
l3_sol(:, 3)=-2*l1_sol(3).*(-1+cl+cp)./(2+4*cl+cp);
otherwise
fprintf('Enter either "westinShapes_lsum" or "westinShapes_l1" for method'); return;
end
%The three solutions are only different in that some of them are not real! The second one is usually good enough.
solN=1;
while(~isreal(l1_sol(:, solN)) || ~isreal(l3_sol(:, solN)))
solN=solN+1;
end
l1=l1_sol(:, solN);
l3=l3_sol(:, solN);
l2=vol./(l1.*l3.*4.*pi./3);
l2(isnan(l2))=0;
end
|
If $S$ is a topological space and $v$ is an open subset of $S$, then for every $x \in v$, there exists an open connected subset $u$ of $S$ such that $x \in u \subseteq v$. |
-- notes-05-friday.agda
open import mylib
-- Π-types = dependent function types
Π : (A : Set)(B : A → Set) → Set
Π A B = (x : A) → B x
syntax Π A (λ x → P) = Π[ x ∈ A ] P
-- Σ-types = dependent pair type
record Σ(A : Set)(B : A → Set) : Set where
constructor _,_
field
proj₁ : A
proj₂ : B proj₁
open Σ
syntax Σ A (λ x → P) = Σ[ x ∈ A ] P
List' : Set → Set
List' A = Σ[ n ∈ ℕ ] Vec A n
List'' : Set → Set -- ex: show that this is isomorphic to lists
List'' A = Σ[ n ∈ ℕ ] Fin n → A
{-
Container representation of lists.
Set of shapes S = ℕ.
Family of positions P : ℕ → Set, P = Fin.
All strictly positive types can be represented as containers ( ~ polynomial
functors) !
-}
{-
A → B = Π[ _ ∈ A ] B
A × B = Σ[ _ ∈ A ] B
-}
_⊎'_ : Set → Set → Set
A ⊎' B = Σ[ b ∈ Bool ] F b
where F : Bool → Set
F true = A
F false = B
_×'_ : Set → Set → Set -- exercise: show that this equivalent to ⊎
A ×' B = Π[ b ∈ Bool ] F b
where F : Bool → Set
F true = A
F false = B
{-
"Propositions as types": for predicate logic
P : A → prop = dependent type
-}
All : (A : Set)(P : A → prop) → prop
All A P = Π[ x ∈ A ] P x
Ex : (A : Set)(P : A → prop) → prop
Ex A P = Σ[ x ∈ A ] P x
syntax All A (λ x → P) = ∀[ x ∈ A ] P
infix 0 All
syntax Ex A (λ x → P) = ∃[ x ∈ A ] P
infix 0 Ex
variable PP QQ : A → prop
taut : (∀[ x ∈ A ] PP x ⇒ Q) ⇔ (∃[ x ∈ A ] PP x) ⇒ Q
proj₁ taut f (a , pa) = f a pa
proj₂ taut g a pa = g (a , pa)
data _≡_ : A → A → prop where
refl : {a : A} → a ≡ a
infix 4 _≡_
-- inductive definition of equality: _≡_ is an equivalence relation
sym : (a b : A) → a ≡ b → b ≡ a
sym a .a refl = refl
trans : {a b c : A} → a ≡ b → b ≡ c → a ≡ c
trans refl q = q
cong : {a b : A}(f : A → B) → a ≡ b → f a ≡ f b
cong f refl = refl
{-
Proving that + is associative
_+_ : ℕ → ℕ → ℕ
zero + n = n
suc m + n = suc (m + n)
-}
assoc : (i j k : ℕ) → (i + j) + k ≡ i + (j + k)
assoc zero j k = refl
assoc (suc i) j k = cong suc (assoc i j k)
-- proof by induction = pattern matching + recursion
ind : (P : ℕ → Set)
→ P 0
→ ((n : ℕ) → P n → P (suc n))
→ (n : ℕ) → P n
ind P z s zero = z
ind P z s (suc n) = s n (ind P z s n)
-- eliminator for ℕ = induction/dependent recursion
{-
data _≡_ : A → A → prop where
refl : {a : A} → a ≡ a
What is ind for equality?
-}
ind≡ : (P : (a b : A) → a ≡ b → prop)
(r : (a : A) → P a a refl)
→ (a b : A)(p : a ≡ b) → P a b p
ind≡ P r a .a refl = r a
-- Ex. derive sym, trans, cong from ind≡ (= J)
uip : (a b : A)(p q : a ≡ b) → p ≡ q
uip = ind≡ (λ a b p → (q : a ≡ b) → p ≡ q) λ a q →
{!!}
--uip refl refl = refl
{-
Is uip derivable from J ? Hofmann & Streicher: groupoid model of type theory.
Restricted version of HoTT (infinity groupoid model of TT); observed: version
of univalence in this theory. Voevodsky formulated HoTT, which supports full
univalence.
-}
|
import language.unitb.parser
open list -- unitb.parser
machine foo
variables x, y
invariants
bar: x ∪ y ⊆ x
initialization
x := ∅, y := ∅
events
move
when grd1 : ∅ ∈ x
then end
add_x begin x := y end
swap begin x,y := y,x end
proofs
bar :=
begin
simp [x_1,y_1], admit,
end,
end
#print foo.correctness
-- #print prefix foo.event.swap
-- #print prefix foo.event.move
#print foo.event.swap.step'
#print foo.event.add_x.step'
#print foo.event.move.step'
#print foo.event.spec
#print foo.init'
#print foo.spec
example (s : foo.state) (J : foo.inv s) : true :=
begin
induction s,
dunfold foo.inv at J,
induction J,
admit
end
example (k : foo.state) : true :=
begin
induction k,
admit
end
|
// Copyright 2015-2020 Tim Kouters (Falcons)
// SPDX-License-Identifier: Apache-2.0
/*
* cRefboxSignalTypes.hpp
*
* Created on: Sep 8, 2015
* Author: Ivo Matthijssen
*/
#ifndef CREFBOXSIGNALTYPES_HPP_
#define CREFBOXSIGNALTYPES_HPP_
#include <string>
#include <map>
#include <boost/assign/list_of.hpp>
enum class refboxSignalEnum
{
INVALID = 0,
STOP,
HALT,
CANCEL,
READY,
START,
RESTART,
FIRST_HALF,
HALF_TIME,
SECOND_HALF,
END_GAME,
KICKOFF_OWN,
KICKOFF_OPP,
FREE_KICK_OWN,
FREE_KICK_OPP,
GOAL_KICK_OWN,
GOAL_KICK_OPP,
THROWIN_KICK_OWN,
THROWIN_KICK_OPP,
CORNER_KICK_OWN,
CORNER_KICK_OPP,
PENALTY_KICK_OWN,
PENALTY_KICK_OPP,
GOAL_OWN,
GOAL_OPP,
SUBGOAL_OWN,
SUBGOAL_OPP,
DUMMY,
DROPPED_BALL,
PARK,
SUBSTITUTION_OWN,
SIZE_OF_ENUM
};
static std::map<std::string, refboxSignalEnum> refboxSignalMapping = boost::assign::map_list_of
("INVALID", refboxSignalEnum::INVALID)
("STOP", refboxSignalEnum::STOP)
("HALT", refboxSignalEnum::HALT)
("CANCEL", refboxSignalEnum::CANCEL)
("READY", refboxSignalEnum::READY)
("START", refboxSignalEnum::START)
("RESTART", refboxSignalEnum::RESTART)
("FIRST_HALF", refboxSignalEnum::FIRST_HALF)
("HALF_TIME", refboxSignalEnum::HALF_TIME)
("SECOND_HALF", refboxSignalEnum::SECOND_HALF)
("END_GAME", refboxSignalEnum::END_GAME)
("KICKOFF_OWN", refboxSignalEnum::KICKOFF_OWN)
("KICKOFF_OPP", refboxSignalEnum::KICKOFF_OPP)
("FREEKICK_OWN", refboxSignalEnum::FREE_KICK_OWN)
("FREEKICK_OPP", refboxSignalEnum::FREE_KICK_OPP)
("GOALKICK_OWN", refboxSignalEnum::GOAL_KICK_OWN)
("GOALKICK_OPP", refboxSignalEnum::GOAL_KICK_OPP)
("THROWIN_OWN", refboxSignalEnum::THROWIN_KICK_OWN)
("THROWIN_OPP", refboxSignalEnum::THROWIN_KICK_OPP)
("CORNER_OWN", refboxSignalEnum::CORNER_KICK_OWN)
("CORNER_OPP", refboxSignalEnum::CORNER_KICK_OPP)
("PENALTY_OWN", refboxSignalEnum::PENALTY_KICK_OWN)
("PENALTY_OPP", refboxSignalEnum::PENALTY_KICK_OPP)
("GOAL_OWN", refboxSignalEnum::GOAL_OWN)
("GOAL_OPP", refboxSignalEnum::GOAL_OPP)
("SUBGOAL_OWN", refboxSignalEnum::SUBGOAL_OWN)
("SUBGOAL_OPP", refboxSignalEnum::SUBGOAL_OPP)
("DUMMY", refboxSignalEnum::DUMMY)
("DROPPED_BALL", refboxSignalEnum::DROPPED_BALL)
("PARK", refboxSignalEnum::PARK)
("SUBSTITUTION_OWN", refboxSignalEnum::SUBSTITUTION_OWN)
// note SUBSTITUTION_OPP is filtered at refboxRelay to prevent it interfering with SUBSTITUTION_OWN
// because only the last command would end up here in teamplay...
// if this is ever needed in teamplay (?), then refboxRelay and the communication to teamplay
// must be made robust for rapid bursts of different refbox commands at once
;
#endif /* CREFBOXSIGNALTYPES_HPP_ */
|
import .love08_operational_semantics_demo
/- # LoVe Exercise 8: Operational Semantics -/
set_option pp.beta true
set_option pp.generalized_field_notation false
namespace LoVe
/- ## Question 1: Program Equivalence
For this question, we introduce the notion of program equivalence: `S₁ ≈ S₂`. -/
def big_step_equiv (S₁ S₂ : stmt) : Prop :=
∀s t, (S₁, s) ⟹ t ↔ (S₂, s) ⟹ t
infix ` ≈ ` := big_step_equiv
/- Program equivalence is a equivalence relation, i.e., it is reflexive,
symmetric, and transitive. -/
@[refl] lemma big_step_equiv.refl {S : stmt} :
S ≈ S :=
fix s t,
show (S, s) ⟹ t ↔ (S, s) ⟹ t, from
by refl
@[symm] lemma big_step_equiv.symm {S₁ S₂ : stmt}:
S₁ ≈ S₂ → S₂ ≈ S₁ :=
assume h : S₁ ≈ S₂,
fix s t,
show (S₂, s) ⟹ t ↔ (S₁, s) ⟹ t, from
iff.symm (h s t)
@[trans] lemma big_step_equiv.trans {S₁ S₂ S₃ : stmt} (h₁₂ : S₁ ≈ S₂)
(h₂₃ : S₂ ≈ S₃) :
S₁ ≈ S₃ :=
fix s t,
show (S₁, s) ⟹ t ↔ (S₃, s) ⟹ t, from
iff.trans (h₁₂ s t) (h₂₃ s t)
/- 1.1. Prove the following program equivalences. -/
lemma big_step_equiv.skip_assign_id {x} :
stmt.assign x (λs, s x) ≈ stmt.skip :=
sorry
lemma big_step_equiv.seq_skip_left {S : stmt} :
stmt.skip ;; S ≈ S :=
sorry
lemma big_step_equiv.seq_skip_right {S : stmt} :
S ;; stmt.skip ≈ S :=
sorry
lemma big_step_equiv.ite_seq_while {b} {S : stmt} :
stmt.ite b (S ;; stmt.while b S) stmt.skip ≈ stmt.while b S :=
sorry
/- 1.2. Program equivalence can be used to replace subprograms by other
subprograms with the same semantics. Prove the following so-called congruence
rules: -/
lemma big_step_equiv.seq_congr {S₁ S₂ T₁ T₂ : stmt} (hS : S₁ ≈ S₂)
(hT : T₁ ≈ T₂) :
S₁ ;; T₁ ≈ S₂ ;; T₂ :=
sorry
lemma big_step_equiv.ite_congr {b} {S₁ S₂ T₁ T₂ : stmt} (hS : S₁ ≈ S₂)
(hT : T₁ ≈ T₂) :
stmt.ite b S₁ T₁ ≈ stmt.ite b S₂ T₂ :=
sorry
/- 1.3 (**optional**): Prove one more congruence rule. This one is more
difficult. -/
lemma denote_equiv.while_congr {b} {S₁ S₂ : stmt} (hS : S₁ ≈ S₂) :
stmt.while b S₁ ≈ stmt.while b S₂ :=
sorry
/- ## Question 2: Guarded Command Language (GCL)
In 1976, E. W. Dijkstra introduced the guarded command language, a
minimalistic imperative language with built-in nondeterminism. A grammar for one
of its variants is given below:
S ::= x := e -- assignment
| assert b -- assertion
| S ; S -- sequential composition
| S | ⋯ | S -- nondeterministic choice
| loop S -- nondeterministic iteration
Assignment and sequential composition are as in the WHILE language. The other
statements have the following semantics:
* `assert b` aborts if `b` evaluates to false; otherwise, the command is a
no-op.
* `S | ⋯ | S` chooses any of the branches and executes it, ignoring the other
branches.
* `loop S` executes `S` any number of times.
In Lean, GCL is captured by the following inductive type: -/
namespace gcl
inductive stmt (σ : Type) : Type
| assign : string → (σ → ℕ) → stmt
| assert : (σ → Prop) → stmt
| seq : stmt → stmt → stmt
| choice : list stmt → stmt
| loop : stmt → stmt
infixr ` ;; ` : 90 := stmt.seq
/- The parameter `σ` abstracts over the state type. It is necessary as a work
around for a Lean bug.
The big-step semantics is defined as follows: -/
inductive big_step : (stmt state × state) → state → Prop
| assign {x a s} :
big_step (stmt.assign x a, s) (s{x ↦ a s})
| assert {b : state → Prop} {s} (hcond : b s) :
big_step (stmt.assert b, s) s
| seq {S T s t u} (h₁ : big_step (S, s) t) (h₂ : big_step (T, t) u) :
big_step (S ;; T, s) u
| choice {Ss s t} (i) (hless : i < list.length Ss)
(hbody : big_step (list.nth_le Ss i hless, s) t) :
big_step (stmt.choice Ss, s) t
| loop_base {S s} :
big_step (stmt.loop S, s) s
| loop_step {S s u} (t) (hbody : big_step (S, s) t)
(hrest : big_step (stmt.loop S, t) u) :
big_step (stmt.loop S, s) u
infix ` ⟹ ` : 110 := big_step
/- 2.1. Prove the following inversion rules, as we did in the lecture for the
WHILE language. -/
@[simp] lemma big_step_assign_iff {x a s t} :
(stmt.assign x a, s) ⟹ t ↔ t = s{x ↦ a s} :=
sorry
@[simp] lemma big_step_assert {b s t} :
(stmt.assert b, s) ⟹ t ↔ t = s ∧ b s :=
sorry
@[simp] lemma big_step_seq_iff {S₁ S₂ s t} :
(stmt.seq S₁ S₂, s) ⟹ t ↔ (∃u, (S₁, s) ⟹ u ∧ (S₂, u) ⟹ t) :=
sorry
lemma big_step_loop {S s u} :
(stmt.loop S, s) ⟹ u ↔
(s = u ∨ (∃t, (S, s) ⟹ t ∧ (stmt.loop S, t) ⟹ u)) :=
sorry
@[simp] lemma big_step_choice {Ss s t} :
(stmt.choice Ss, s) ⟹ t ↔
(∃(i : ℕ) (hless : i < list.length Ss),
(list.nth_le Ss i hless, s) ⟹ t) :=
sorry
end gcl
/- 2.2. Complete the translation below of a deterministic program to a GCL
program, by filling in the `sorry` placeholders below. -/
def gcl_of : stmt → gcl.stmt state
| stmt.skip := gcl.stmt.assert (λ_, true)
| (stmt.assign x a) :=
sorry
| (S ;; T) :=
sorry
| (stmt.ite b S T) :=
sorry
| (stmt.while b S) :=
sorry
/- 2.3. In the definition of `gcl_of` above, `skip` is translated to
`assert (λ_, true)`. Looking at the big-step semantics of both constructs, we
can convince ourselves that it makes sense. Can you think of other correct ways
to define the `skip` case? -/
-- enter your answer here
end LoVe
|
"""
A flavor of TOML for more general data-serialization,
including support for 'null' values, infinities, and NaN's.
Also supports JSON3-esque serialization of data structures, using `StructTypes`.
"""
module WOML
using Dates
using TOML, StructTypes
include("toml-type.jl")
include("unions.jl")
include("converter.jl")
"
Parses a TOML string into the given type.
The type defaults to `Dict{String, Any}`, same as the original TOML package.
"
@inline read(toml::AbstractString, T...) = read(IOBuffer(toml, write=false), T...)
function read(io::IO, T = Dict{String, Any})
struct_type = StructTypes.StructType(T)
end
export CM_Read, CM_Write
end # module
|
[STATEMENT]
lemma nxt_run_distinct:
"distinct (run (nxt \<Sigma> \<Delta> q\<^sub>0) (init q\<^sub>0) w n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distinct (run (nxt \<Sigma> \<Delta> q\<^sub>0) (init q\<^sub>0) w n)
[PROOF STEP]
by (cases n; simp del: remdups_fwd.simps; metis (no_types) remdups_fwd_distinct) |
"""Tests for module bregman on OT with bregman projections """
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
import numpy as np
import ot
def test_sinkhorn():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10)
# check constratints
np.testing.assert_allclose(
u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
np.testing.assert_allclose(
u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
def test_sinkhorn_empty():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True)
# check constratints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10,
method='sinkhorn_stabilized', verbose=True, log=True)
# check constratints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn(
[], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling',
verbose=True, log=True)
# check constratints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
def test_sinkhorn_variants():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10)
Gs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10)
Ges = ot.sinkhorn(
u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10)
Gerr = ot.sinkhorn(u, u, M, 1, method='do_not_exists', stopThr=1e-10)
# check values
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Ges, atol=1e-05)
np.testing.assert_allclose(G0, Gerr)
def test_bary():
n_bins = 100 # nb bins
# Gaussian distributions
a1 = ot.datasets.get_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.get_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# wasserstein
reg = 1e-3
bary_wass = ot.bregman.barycenter(A, M, reg, weights)
np.testing.assert_allclose(1, np.sum(bary_wass))
ot.bregman.barycenter(A, M, reg, log=True, verbose=True)
def test_unmix():
n_bins = 50 # nb bins
# Gaussian distributions
a1 = ot.datasets.get_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std
a2 = ot.datasets.get_1D_gauss(n_bins, m=40, s=10)
a = ot.datasets.get_1D_gauss(n_bins, m=30, s=10)
# creating matrix A containing all distributions
D = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
M0 = ot.utils.dist0(2)
M0 /= M0.max()
h0 = ot.unif(2)
# wasserstein
reg = 1e-3
um = ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01,)
np.testing.assert_allclose(1, np.sum(um), rtol=1e-03, atol=1e-03)
np.testing.assert_allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03)
ot.bregman.unmix(a, D, M, M0, h0, reg,
1, alpha=0.01, log=True, verbose=True)
|
In providing instrumentation for the record , Kurstin used all of his instruments such as a Mellotron and a Chamberlin , taping them from a distance to stimulate the Wall of Sound , a recording technique originally developed by Phil Spector that was popular in the early 1960s . He enlisted Clarkson to provide all the background vocals herself . Clarkson , who grew up singing in a chorus , was pleased with the aspect ; saying , " Blending is something I knew how to do from childhood . Sometimes I 'd have to do an alto instead of a soprano because they needed a bigger sound . But I 've never had to do anything like this before — doing all my backup vocals , essentially being my own choir . " Together , they began to record in May 2013 and continued through the summer of that year , beginning by recording " White Christmas " with Clarkson in the vocal booth and with Kurstin on a piano . She commented , " The production is all him . I would be just like ' Hey , can we make this more jazz ? Hey , can we make this more bluesy . And he just , like Harry Potter , made this happen . It 's so weird . "
|
Townsend mainly uses Open C tuning for both six and seven string guitar . He now also uses Open B tuning and Open B flat tuning ( Open C tuning tuned a half and a whole step down respectively ) on his six string guitars . Townsend 's technique varies from fingerpicking , power chords and <unk> to sweep @-@ picked arpeggios and tapping techniques . He is also known for his heavy use of reverb and delay effects . He has expressed that he has no taste for shred guitar , saying that " Musically it doesn 't do anything for me " and that he only solos when he thinks that he can within the context of the song .
|
function dat = N_class_loss(algo,dat)
x = dat.X;
y = dat.X;
% [1:length(x)]';
lss = x-y;
lss(lss ~= 0) = 1;
lss = sum(lss)/length(x);
dat=data([get_name(dat) ' -> N_class_loss=' num2str(lss,4) ],[],lss);
|
library("car")
library("MASS")
library("RJSONIO")
data <- read.csv("people.csv")
write(toJSON(sapply(data[-(1:5)],mean)), file="mean.json")
write(toJSON(sapply(data[-(1:5)],sd)), file="sd.json")
standardized = as.data.frame(scale(data[-(1:5)]))
ranking = prcomp(standardized)
screeplot(ranking, type="lines")
write(toJSON(ranking$rotation), file="pca.json")
write.csv(ranking$rotation, file="pca.csv") |
If $s$ is a compact set and $t$ is a closed set with $s \cap t = \emptyset$, then there exists a positive real number $d$ such that for all $x \in s$ and $y \in t$, we have $d \leq \|x - y\|$. |
// Copyright Twitch Interactive, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: MIT
#pragma once
#include <gsl/span>
#include <vector>
namespace Twitch::Utility {
class OutputByteStream {
public:
void reserve(size_t size)
{
_data.reserve(size);
}
template<typename ValueType, typename ArgType>
void write(const ArgType &arg)
{
size_t startSize = _data.size();
size_t valueSize = sizeof(ValueType);
ValueType value = static_cast<ValueType>(arg);
_data.resize(startSize + valueSize);
memcpy_s(_data.data() + startSize, _data.size() - startSize, &value, valueSize);
}
void writeBytes(gsl::span<const uint8_t> bytes)
{
size_t startSize = _data.size();
_data.resize(startSize + bytes.size());
memcpy_s(_data.data() + startSize, _data.size() - startSize, bytes.data(), bytes.size());
}
std::vector<uint8_t> consume()
{
return std::move(_data);
}
private:
std::vector<uint8_t> _data;
};
class InputByteStream {
public:
InputByteStream(gsl::span<const uint8_t> data)
: _data(std::move(data))
{
}
template<typename ValueType>
ValueType read()
{
ValueType value;
size_t valueSize = sizeof(ValueType);
memcpy_s(&value, sizeof(value), _data.data() + _currentPosition, valueSize);
_currentPosition += valueSize;
return value;
}
void readBytes(gsl::span<uint8_t> bytes)
{
memcpy_s(bytes.data(), bytes.size(), _data.data() + _currentPosition, bytes.size());
_currentPosition += bytes.size();
}
gsl::span<const uint8_t> readBytesView(size_t size)
{
gsl::span<const uint8_t> bytesView = _data.subspan(_currentPosition, size);
_currentPosition += size;
return bytesView;
}
size_t remainingSize() const
{
return _data.size() - _currentPosition;
}
private:
gsl::span<const uint8_t> _data;
size_t _currentPosition = 0;
};
} // namespace Twitch::Utility
|
= = = Bloody Sunday = = =
|
{-# LANGUAGE FlexibleContexts #-}
module Network (initNet, feedForward, sgd, forwardPass, backprop, updateMiniBatch) where
import Control.Monad
import Data.List.Split
import Numeric.LinearAlgebra as L
import MnistLoader
import Aux
feedForward :: Network -> Matrix Float -> Matrix Float
feedForward n a = foldl f a $ zip (weights n) (biases n)
where
f a' (w, b) = sigmoid ((w L.<> a') + b)
sgd :: Network
-> [(Matrix Float, Matrix Float)] -- traning data
-> [(Matrix Float, Matrix Float)] -- Evaluation data
-> Int -- Number of epochs
-> Int -- Current Epoc
-> Int -- Mini batch size
-> Float -- Eta
-> IO Network
sgd n exemplars testData maxEpoch epoch batchSize eta = do
exemplars' <- shuffle exemplars
let batches = chunksOf batchSize exemplars'
let newNet = foldl (\accNet batch -> updateMiniBatch accNet batch eta) n batches
let correct = evaluate newNet testData
putStrLn $ "Epoch " ++ (show (epoch + 1)) ++ " Eval: " ++ (show correct) ++ " / " ++ (show $ length testData)
if epoch == maxEpoch
then return newNet
else sgd newNet exemplars' testData maxEpoch (epoch + 1) batchSize eta
updateMiniBatch :: Network -> [(Matrix Float, Matrix Float)] -> Float -> Network
updateMiniBatch n l@((x, y) : rest) eta = let
initAcc = backprop n x y
nablaSums = foldl f initAcc rest
nabla_b = map fst nablaSums
nabla_w = map snd nablaSums
in
Network {
sizes = sizes n
, weights = zipWith wavg (weights n) nabla_w
, biases = zipWith wavg (biases n) nabla_b
}
where
f nablaSum (x, y) = let
bp = backprop n x y
in
zipWith add nablaSum bp
add (nb, nw) (nb', nw') = (nb+nb', nw+nw')
wavg :: Matrix Float -> Matrix Float -> Matrix Float
wavg m1 m2 = let
nt = (1><1) [fromIntegral $ length l]
eta' = (1><1) [eta]
in m1 - (eta' / nt) * m2
-- Network -> x -> y -> (nabla_b, nabla_w)
backprop :: Network -> Matrix Float -> Matrix Float -> [(Matrix Float, Matrix Float)]
backprop n x y = let
-- Reversed list of forward passes
((a:a':as), (z:zs)) = forwardPass n x
revW = foldl (\acc w -> w:acc) [] (weights n)
fp = zip as zs
zippedFp = zip fp revW
-- Handle last layer manually
delta = (cost_derivative a y) * (sigmoid' z)
nabla_b = delta
nabla_w = delta L.<> (tr' a')
res = foldl f [(nabla_b, nabla_w)] zippedFp
in
res
where
f (acc@((delta, _):_)) ((a, z), w) = let
sp = sigmoid' z
delta' = ((tr' w) L.<> delta) * sp
nabla_b = delta'
nabla_w = delta' L.<> (tr' a)
in
(nabla_b, nabla_w) : acc
-- Network -> activation -> (activations, z vectors)
forwardPass :: Network -> Matrix Float -> ([Matrix Float], [Matrix Float])
forwardPass n a = let
pairs = zip (weights n) (biases n)
fp = foldl f [(a, a)] pairs
in
(map fst fp, init $ map snd fp)
where
f (l@((a, _):_)) (w, b) = let
z = (w L.<> a) + b
activation = sigmoid z
in (activation, z):l
evaluate :: Network -> [(Matrix Float, Matrix Float)] -> Int
evaluate net exemplars = sum $ map f exemplars
where
f (x, y) = let
approx = argMax $ feedForward net x
actual = argMax y
in
if approx == actual
then 1
else 0
cost_derivative output_activations y = output_activations - y
-- Miscellaneous
sigmoid z = 1.0 / (1.0 + (exp $ -z))
sigmoid' :: Matrix Float -> Matrix Float
sigmoid' z = (sigmoid z) * (1 - (sigmoid z))
argMax :: Matrix Float -> Int
argMax m = L.maxIndex $ L.flatten m
-- assuming row-major order
data Network = Network { sizes :: [Int]
, weights :: [Matrix Float]
, biases :: [Matrix Float]
} deriving (Show)
initNet :: [Int] -> IO (Network)
initNet sizes = do
ws <- genWeights sizes
bs <- genBiases sizes
return Network { sizes = sizes
, weights = ws
, biases = bs
}
where
genWeights [s, s'] =
do
m <- L.randn s' s
return [L.cmap realToFrac m]
genWeights (s:s':ss) =
do
rest <- genWeights (s' : ss)
m <- L.randn s' s
return (L.cmap realToFrac m : rest)
genWeights _ = error "Not enough layers"
genBiases [s, s'] =
do
m <- L.randn s' 1
return $ [L.cmap realToFrac m]
genBiases (s:s':ss) =
do
rest <- genBiases (s' : ss)
m <- L.randn s' 1
return (L.cmap realToFrac m : rest)
genBiases _ = error "Not enough layers" |
Posted by jam on 1st December 2016 at 6:42 pm. Bookmark the permalink. Follow any comments here with the RSS feed for this post. Trackbacks are closed, but you can post a comment. |
[STATEMENT]
lemma bmod_onel: "\<otimes> \<rho> f (\<delta>::'a \<Rightarrow> 'b::unital_quantale) x = f x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<otimes> \<rho> f \<delta> x = f x
[PROOF STEP]
apply (simp add: bmod_comp_def pid_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)} = f x
[PROOF STEP]
apply (rule antisym)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)} \<le> f x
2. f x \<le> \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)}
[PROOF STEP]
apply (rule Sup_least, safe)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>xa y z. \<lbrakk>z \<notin> \<xi>; \<rho> x y z\<rbrakk> \<Longrightarrow> \<bottom> \<le> f x
2. \<And>xa y z. \<lbrakk>\<rho> x y z; \<not> f y \<le> f x; z \<in> \<xi>\<rbrakk> \<Longrightarrow> False
3. \<And>xa y z. \<lbrakk>\<rho> x y z; f y = \<bottom>; \<rho> x y z\<rbrakk> \<Longrightarrow> f y \<le> f x
4. f x \<le> \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)}
[PROOF STEP]
apply (simp add: bres_galois)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>xa y z. \<lbrakk>\<rho> x y z; \<not> f y \<le> f x; z \<in> \<xi>\<rbrakk> \<Longrightarrow> False
2. \<And>xa y z. \<lbrakk>\<rho> x y z; f y = \<bottom>; \<rho> x y z\<rbrakk> \<Longrightarrow> f y \<le> f x
3. f x \<le> \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)}
[PROOF STEP]
using unitr_eq
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?e \<in> \<xi>; \<rho> ?x ?y ?e\<rbrakk> \<Longrightarrow> ?x = ?y
goal (3 subgoals):
1. \<And>xa y z. \<lbrakk>\<rho> x y z; \<not> f y \<le> f x; z \<in> \<xi>\<rbrakk> \<Longrightarrow> False
2. \<And>xa y z. \<lbrakk>\<rho> x y z; f y = \<bottom>; \<rho> x y z\<rbrakk> \<Longrightarrow> f y \<le> f x
3. f x \<le> \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)}
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>xa y z. \<lbrakk>\<rho> x y z; f y = \<bottom>; \<rho> x y z\<rbrakk> \<Longrightarrow> f y \<le> f x
2. f x \<le> \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)}
[PROOF STEP]
apply (metis bot.extremum)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f x \<le> \<Squnion>{uu_. \<exists>y z. (z \<in> \<xi> \<longrightarrow> uu_ = f y \<and> \<rho> x y z) \<and> (z \<notin> \<xi> \<longrightarrow> uu_ = \<bottom> \<and> \<rho> x y z)}
[PROOF STEP]
by (metis (mono_tags, lifting) Sup_upper mem_Collect_eq unitr_ex) |
module Number where
import Data.Complex --Complex
import Data.Ratio --Rational
data LispNumber = Integer Integer -- Stores a Haskell Integer
| Real Double | Rational Rational
| Complex (Complex Double) deriving(Eq)
instance Show LispNumber where show = showNumber
instance Num LispNumber where
(Integer a) + (Integer b) = Integer $ a + b
(Integer a) + (Real b) = Real $ fromInteger a + b
(Integer a) + (Rational b) = Rational $ fromInteger a + b
(Integer a) + (Complex b) = Complex $ fromInteger a + b
(Real a) + (Real b) = Real $ a + b
(Real a) + (Integer b) = Real $ a + fromInteger b
(Real a) + (Rational b) = Real $ a + fromRational b
--(Real a) + (Complex b) = Complex $
(Rational a) + (Rational b) = Rational $ a + b
(Complex a) + (Complex b) = Complex $ a + b
(Integer a) - (Integer b) = Integer $ a - b
(Integer a) - (Real b) = Real $ fromInteger a - b
(Integer a) - (Rational b) = Rational $ fromInteger a - b
(Integer a) - (Complex b) = Complex $ fromInteger a - b
(Real a) - (Real b) = Real $ a - b
(Rational a) - (Rational b) = Rational $ a - b
(Complex a) - (Complex b) = Complex $ a - b
(Integer a) * (Integer b) = Integer $ a * b
(Real a) * (Real b) = Real $ a * b
(Rational a) * (Rational b) = Rational $ a * b
(Complex a) * (Complex b) = Complex $ a * b
abs (Integer a) = Integer $ abs a
abs (Real a) = Real $ abs a
abs (Rational a) = Rational $ abs a
abs (Complex a) = Complex $ abs a
fromInteger = Integer
signum (Integer a) = Integer $ signum a
signum (Real a) = Real $ signum a
signum (Rational a) = Rational $ signum a
signum (Complex a) = Complex $ signum a
instance Fractional LispNumber where
(Integer a) / (Integer b) = Rational $ a % b
(Real a) / (Real b) = Real $ a / b
(Complex a) / (Complex b) = Complex $ a / b
(Rational a) / (Rational b) = Rational $ a / b
fromRational = Rational
instance Ord LispNumber where
compare (Integer a) (Integer b) = compare a b
compare (Integer a) (Rational b) = compare (fromIntegral a) b
compare (Integer a) (Real b) = compare (fromIntegral a) b
compare (Integer a) (Complex b) = error "Cant order complex numbers"
compare (Rational b) (Integer a)= compare b (fromIntegral a)
compare (Real b) (Integer a)= compare b (fromIntegral a)
compare (Complex b) (Integer a) = error "Cant order complex numbers"
compare (Real a) (Real b) = compare a b
compare (Rational a) (Rational b) = compare a b
compare (Complex _) (Complex _) = error "Cant order complex numbers"
showNumber :: LispNumber -> String
showNumber (Integer a) = show a
showNumber (Complex a) = show a
showNumber (Rational a) = show (numerator a) ++ "/" ++ show (denominator a)
showNumber (Real a) = show a
|
/*
CandyPoker
https://github.com/sweeterthancandy/CandyPoker
MIT License
Copyright (c) 2019 Gerry Candy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "ps/base/cards.h"
#include "ps/support/command.h"
#include "ps/detail/print.h"
#include "ps/eval/class_cache.h"
#include "ps/eval/holdem_class_vector_cache.h"
#include "app/pretty_printer.h"
#include "app/serialization_util.h"
#include "ps/detail/graph.h"
#include "ps/sim/computer.h"
#include "ps/sim/game_tree.h"
#include "ps/sim/computer_factory.h"
#include "ps/sim/_extra.h"
#include "ps/sim/solver.h"
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
namespace bpt = boost::property_tree;
#include <numeric>
#include <boost/timer/timer.hpp>
namespace ps{
namespace sim{
struct SimpleNumericArguments{
friend std::ostream& operator<<(std::ostream& ostr, SimpleNumericArguments const& self){
ostr << "factor = " << self.factor;
ostr << ", stride = " << self.stride;
ostr << ", clamp_epsilon = " << self.clamp_epsilon;
ostr << ", delta = " << self.delta;
return ostr;
}
double factor{0.05};
size_t stride{20};
double clamp_epsilon{1e-6};
double delta{0.0};
void EmitDescriptions(SolverDecl::ArgumentVisitor& V)const{
V.DeclArgument("factor" , factor,
"used for taking linear product, large faster, too large unstabe");
V.DeclArgument("stride" , stride,
"used for how many iterations before checking stoppage condition, "
"larger faster, too large too slow");
V.DeclArgument("clamp-epsilon" , clamp_epsilon,
"used for clamping close to mixed strategies to non-mixed, "
"too small slower convergence");
V.DeclArgument("delta" , delta, "forcing parameter");
}
void Read(bpt::ptree const& args){
factor = args.get<double>("factor");
stride = args.get<size_t>("stride");
clamp_epsilon = args.get<double>("clamp-epsilon");
delta = args.get<double>("delta");
}
};
struct SimpleNumeric : Solver{
struct Controller{
virtual ~Controller(){}
using ApplyReturnType = boost::optional<
boost::optional<
StateType
>
>;
virtual ApplyReturnType Apply(
size_t loop_count,
std::shared_ptr<GameTree> gt, GraphColouring<AggregateComputer> AG,
SimpleNumericArguments& args, Solution const& solution)=0;
};
struct ProfileController : Controller{
virtual ApplyReturnType Apply(
size_t loop_count,
std::shared_ptr<GameTree> gt, GraphColouring<AggregateComputer> AG,
SimpleNumericArguments& args, Solution const& solution)override
{
if( loop_count != 0 ){
PS_LOG(trace) << "Loop took " << timer_.format();
}
timer_.start();
return {};
}
private:
boost::timer::cpu_timer timer_;
};
SimpleNumeric( SimpleNumericArguments const& args)
: args_{args}
{}
virtual boost::optional<StateType> Execute(SolverContext& ctx,
std::shared_ptr<GameTree> const& gt,
GraphColouring<AggregateComputer> const& AG,
StateType const& S0)override
{
auto S = S0;
auto solution0 = Solution::MakeWithDeps(gt, AG, S0);
for(auto& ctrl : controllers_ ){
auto opt_opt = ctrl->Apply(0, gt, AG, args_, solution0);
if( opt_opt ){
PS_LOG(trace) << "skipping solver";
return *opt_opt;
}
}
for(size_t loop_count{1};;++loop_count){
for(size_t inner=0;inner!=args_.stride;++inner){
auto S_counter = computation_kernel::CounterStrategy(gt, AG, S, args_.delta);
computation_kernel::InplaceLinearCombination(S, S_counter, 1 - args_.factor );
}
computation_kernel::InplaceClamp(S, args_.clamp_epsilon);
auto solution = Solution::MakeWithDeps(gt, AG, S);
for(auto& ctrl : controllers_ ){
auto opt_opt = ctrl->Apply(loop_count, gt, AG, args_, solution);
if( opt_opt ){
return *opt_opt;
}
}
}
}
void AddController(std::shared_ptr<Controller> ctrl){
controllers_.push_back(ctrl);
}
virtual std::string StringDescription()const override{
std::stringstream sstr;
sstr << "SimpleNumeric{" << args_ << "}";
return sstr.str();
}
private:
SimpleNumericArguments args_;
std::vector<std::shared_ptr<Controller> > controllers_;
};
struct NumericSeqDecl : SolverDecl{
struct NumericSeqArguments : SimpleNumericArguments{
using ImplType = SimpleNumericArguments;
size_t ttl{10};
std::string sequence_type{"special"};
size_t min_level{0};
void EmitDescriptions(SolverDecl::ArgumentVisitor& V)const{
ImplType::EmitDescriptions(V);
V.DeclArgument("sequence-type", sequence_type, "how a sequence of solutions is choosen");
V.DeclArgument("ttl" , ttl , "time to live of sequence");
V.DeclArgument("min-level" , min_level, "minoimum level, this can be used to have a large factor");
}
void Read(bpt::ptree const& args){
ImplType::Read(args);
ttl = args.get<size_t>("ttl");
sequence_type = args.get<std::string>("sequence-type");
min_level = args.get<size_t>("min-level");
}
};
struct ConstantSequenceController : SimpleNumeric::Controller{
ConstantSequenceController(SequenceConsumer const& seq, size_t ttl, size_t min_level)
: seq_{seq}
, ttl_{ttl}
, min_level_{min_level}
{}
virtual ApplyReturnType Apply(
size_t loop_count,
std::shared_ptr<GameTree> gt, GraphColouring<AggregateComputer> AG,
SimpleNumericArguments& args, Solution const& solution)override
{
switch( seq_.Consume(solution) ){
case SequenceConsumer::Ctrl_Rejected:
++count_;
break;
case SequenceConsumer::Ctrl_Accepted:
count_ = 0;
break;
case SequenceConsumer::Ctrl_Perfect:
return seq_.AsOptState();
}
if( count_ >= ttl_){
return seq_.AsOptState();
}
if( auto sol = seq_.AsOptSolution() ){
if(sol->Level < min_level_ || sol->Level == 0){
return boost::optional<StateType>{sol->S};
}
}
return {};
}
private:
SequenceConsumer seq_;
size_t ttl_;
size_t min_level_;
size_t count_{0};
};
virtual void Accept(ArgumentVisitor& V)const override{
using namespace std::string_literals;
NumericSeqArguments proto;
proto.EmitDescriptions(V);
}
virtual std::shared_ptr<Solver> Make( bpt::ptree const& args)const override
{
NumericSeqArguments sargs;
sargs.Read(args);
static std::unordered_map<
std::string,
std::function<bool(Solution const&, Solution const&)>
> comps = {
{ "level-sequence", [](auto const& head, auto const& candidate){ return head.Level < candidate.Level; } },
{ "total-sequence", [](auto const& head, auto const& candidate){ return head.Total < candidate.Total; } },
{ "norm-sequence" , [](auto const& head, auto const& candidate){ return head.Norm < candidate.Norm; } } ,
{ "special" , [](auto const& head, auto const& candidate){ return head < candidate; } }
};
auto comp = comps.find(sargs.sequence_type);
if( comp == comps.end() ){
BOOST_THROW_EXCEPTION(std::domain_error("no such sequence type of " + sargs.sequence_type ));
}
auto ctrl = std::make_shared<ConstantSequenceController>( SequenceConsumer( comp->second ), sargs.ttl, sargs.min_level );
auto solver = std::make_shared<SimpleNumeric>(sargs);
solver->AddController(ctrl);
return solver;
}
};
static SolverRegister<NumericSeqDecl> NumericSeqRec("numeric-sequence");
struct TrailSolutionDecl : SolverDecl{
struct TrailSolutionArguments : SimpleNumericArguments{
using ImplType = SimpleNumericArguments;
size_t level{10};
TrailSolutionArguments(){
factor = 0.1;
clamp_epsilon = 1e-4;
}
void EmitDescriptions(SolverDecl::ArgumentVisitor& V)const{
ImplType::EmitDescriptions(V);
V.DeclArgument("level", level, "level to take first soltuin of");
}
void Read(bpt::ptree const& args){
ImplType::Read(args);
level = args.get<size_t>("level");
}
};
// this just returns the first solution which satisified cond(.)
//
// The idea is that we don't worry about non-convergence here
struct TakeFirstController : SimpleNumeric::Controller{
using te_cond_type = std::function<bool(Solution const&)>;
explicit TakeFirstController(te_cond_type const& cond)
: cond_{cond}
{}
virtual ApplyReturnType Apply(
size_t loop_count,
std::shared_ptr<GameTree> gt, GraphColouring<AggregateComputer> AG,
SimpleNumericArguments& args, Solution const& solution)override
{
if( cond_(solution) ){
boost::optional<StateType> ret{solution.S};
return ret;
}
return {};
}
private:
te_cond_type cond_;
};
// just for debugging, print the sequence, but is not used.
// The idea is to use this with TakeFirstController
struct SequencePrinterController : SimpleNumeric::Controller{
virtual ApplyReturnType Apply(
size_t loop_count,
std::shared_ptr<GameTree> gt, GraphColouring<AggregateComputer> AG,
SimpleNumericArguments& args, Solution const& solution)override
{
seq_.Consume(solution);
return {};
}
private:
SequenceConsumer seq_;
};
virtual void Accept(ArgumentVisitor& V)const override{
TrailSolutionArguments proto;
proto.EmitDescriptions(V);
}
virtual std::shared_ptr<Solver> Make(bpt::ptree const& args)const override
{
TrailSolutionArguments sargs;
sargs.Read(args);
auto solver = std::make_shared<SimpleNumeric>(sargs);
solver->AddController(std::make_shared<SequencePrinterController>());
solver->AddController(std::make_shared<SimpleNumeric::ProfileController>());
solver->AddController(std::make_shared<TakeFirstController>([lvl=sargs.level](auto const& sol){ return sol.Level <= lvl; }));
return solver;
}
};
static SolverRegister<TrailSolutionDecl> TrailSolutionReg("trail-solution");
} // end namespace sim
} // end namespace ps
|
SETTING UP YOUR MIND SET FOR SUCCESS: YOUR MINDSET IS THE BACKBONE OF YOUR SUCCESS.
Have you ever wondered why some people are successful? Why the rich get richer? The answer is that they have focus, a mind-set, they do continuous learning and dedication to achieve the success they set as goals, visions, mission and purpose. In our world today, focusing on changing your mind-set sounds time-consuming. However, it is important to keep in right mind, otherwise stepping out of your comfort zone or learning from your failures may not be possible either.
Humans have different thinking patterns which mostly are produced from our habit loops. They are large part of what makes you and me. Our identities are borne from the convergence of these patterns. They create our subjective experience.
So once we change our mind-set and begin to see things from the inner circle, the outer world begins to respond automatically.
First of all we need to understand that each of us faces different challenges at different times in different ways based both on our biology and our unique cultural upbringing. No two people think exactly the same way because no two people have lived exactly the same life.
· Understanding our programed mind is the first step toward success.
As zat rana writer with medium states ‘’How we think affects everything from our ability to solve problems, to how we understand meaning, value, and purpose. Real change doesn’t just happen but takes critical change from our mindset and overcoming challenges.
· Overcome fear. Your ability to recognize that fear causes limitations in your life. It’s easy to get attached to your thoughts and feelings that it is okay and that is the way it is. Successful people don’t take things the way they are they take up the fight and go beyond ‘okay’ standards. you are not your fears. You are the awareness that experiences it.
· Believing in yourself. Do you know who you are? What are your strength and weakness? Knowing that you can do something and believing that you have what it takes is success in the making. Once you change your mind-set, opportunities come to you, but if you don’t change then you will keep chasing opportunities.
· Choose your company wisely. The saying goes ‘show me your friends and I will tell you who you’re’ successful people surround themselves with mentors, business gurus, and people of same influence. The circle of Friendships, relationships, and Associations should enable you to grow and lead you to success, otherwise to be successful, sometimes we need to let go those that are not contributing significantly in our lives.
· Create your vision and mission. Have a huge vision bigger than yourself so it makes you uncomfortable and sleepless till you achieve it. This is why they say rich men don’t sleep. When your vision does not make you sleep then you ought to do what’s supposed to be done to achieve it.
· Challenge accepted/ adventure into the unknown. The ability to come out of fear and from the known to unknown is the power to create what you want and wealth in abundance. The main secret to success is knowing what you want and being willing to pay the price for what it takes. It’s Taking one step at a time.
· Open mindedness and Willingness to change.
· Conscious and Unconscious mind. Create harmony between your conflicting conscious and unconscious mind, this can be by visualizing and continuous meditation of your goals and vision on daily basis so they become part of what you see, smell, feel, touch and talk.
Open mindedness and Willingness to change. A mind free from limited ideas that is willing to change and open to welcome new intuitions and ideologies. How we think affects everything from our ability to solve problems to how we understand values, logic, meaning and purpose of things.
In the same way we form habits of action relating to our environment, we also form habits of thought when it comes to how we think about the world. We are all born into a reality in which — at first, at least — we can’t even distinguish between our own separateness from the world.
With time, however, we start to recognize patterns around us, and we internalize these patterns like habits so that we can reuse them in the future. Usually, if a pattern persists in our mental habits, it means that it is valuable in some sense. But this is only the case if we apply that pattern to the right information.
One of the reasons it’s so hard to change our minds about things is that our brains are stuck in these mental habit loops, which tend to look at information from a singular point of view. Our brains have learned something in one context, so they mistakenly apply it to others, mixing up the triggers that lead to routine thoughts.
But the good news is we’re all capable of overpowering these habit loops, it’s very productive to have these habits operating as the default mode. To think well, we must be aware of their limitations and to not let them restrict us.
True success stories are built only after you resolve the things which are affecting your life. The problems you are facing, your weak points and stop talking negative about yourself.
The journey to a successful life is not a straight line, it’s a bumpy road full of difficulties and setbacks. The average person would give up along the way, but a champion would stay resilient. It’s the mindset that makes a true difference. The fact is that the more obstacles you overcome, the more resilient you become.
As Ken Chennault, CEO of American Express, has a nice tip for everyone who wants to become successful: “Dedicate yourself to a core set of values. Without them, you will never be able to find personal fulfilment, and you will never be able to lead effectively.
c) Invest in yourself by getting mentors, reading books and get trainings.
d) Mastermind with the leaders. Trust In yourself.
e) Start the journey taking one step at a time through learning, doing and teaching.
Success is about setting your mind-set and making changes based on responsibility and action. It is overcoming fear and getting out of the known to the unknown. It is about overcoming failure as a person.
https://thesixfigurementors.com/?id=susanakello. Learn how to set up your mind for success and become a successful career person.
Sharing is caring. What is your mind-set change story and how can you assist a person like you in the same fear state? Please let’s share your story. |
# Double pendulum with torsion damper
> Application of Kane's method assisted with SymPy.
- toc: false
- branch: master
- badges: false
- comments: false
- author: Erik R. Gomez
- categories: [Dynamics, Kane's method]
Let us consider the double pendulum depicted in the figure below. A mass is located at each end of the massless rods concentrated at $\mathcal{P}$ and $\mathcal{Q}$. The massless rods are of length $l$ and the gravity is acting in $-\mathbf{\hat{n}}_{y}$. A torsional spring acts between the first and the second pendulum at point $\mathcal{P}$ with a stiffness of $\sigma$ and a viscous damping $\delta$.
<div>
</div>
Recall Kane's equations:
$$f_{r}^* = f_{r}$$
$$\sum_{j=1}^{N} \left[ \dot{\mathbf{p}}^{B_j} \cdot \mathbf{v}_{r}^{B_j} ~+ \dot{\mathbf{L}}^{B_j} \cdot \boldsymbol{\omega}_{r}^{B_j}\right] = \sum_{j=1}^{N} \left[ \mathbf{F}^{B_j} ~ \cdot \mathbf{v}_{r}^{B_j} ~ + \mathbf{T}^{B_j} \cdot \boldsymbol{\omega}^{B_j}_r \right]$$
Let's set-up the reference frames and parameters corresponding to the figure in SymPy mechanics:
```python
import sympy as sp
import sympy.physics.mechanics as me
me.init_vprinting()
q1, q2 = me.dynamicsymbols('q1, q2') # Generalized coordinates as function of time
qd1, qd2 = me.dynamicsymbols('q1, q2', level=1) # Time-derivative of generalized coordinates
u1, u2 = me.dynamicsymbols('u1, u2') # Generalized speeds (function of time)
l, m, g = sp.symbols('l, m, g') # Constants
sig, delta = sp.symbols('sigma, delta') # Force parameters
```
Orient the reference frames:
```python
N = me.ReferenceFrame('N') # Inertial frame
A = N.orientnew('A', 'Axis', (q1, N.z)) # Rod A frame
B = A.orientnew('B', 'Axis', (q2, N.z)) # Rod B frame
```
## Kinematics
Rotational motion:
$$\boldsymbol{\omega}^{A} = \dot{q}_1 \mathbf{\hat{n}_z}, \qquad \boldsymbol{\omega}^{B} = (\dot{q}_1 + \dot{q}_2) \mathbf{\hat{n}_{z}}$$
```python
A.ang_vel_in(N).express(N)
```
```python
B.ang_vel_in(N).express(N)
```
### Choose generalized speeds as:
$$u_1 = \dot{q}_1, \qquad u_2 = \dot{q}_1 + \dot{q}_2$$
so
$$\boldsymbol{\omega}^{A} = u_1 \mathbf{\hat{n}_z}, \qquad \boldsymbol{\omega}^{B} = u_2 \mathbf{\hat{n}_{z}}$$.
```python
kde = [u1 - qd1, u2 - (qd1 + qd2)] # Kinematical differential equation
```
### Differentiation of vector:
$$\frac{^{N}d \mathbf{r}}{d t} = \frac{^{A}d \mathbf{r}}{d t} + ^{N}\omega^{A} \times \mathbf{r} $$
or by the two-point formula
$$^{N}\mathbf{v}^{\mathcal{P}} = ^{A}\mathbf{v}^{\mathcal{O}} + ^{N}\boldsymbol{\omega}^{A} \times \mathbf{r}_{\mathcal{OP}}.$$
### Calculate the velocities of $\mathcal{P}$ and $\mathcal{Q}$:
Position vectors:
$$\mathbf{r}^{\mathcal{OP}} = - l \mathbf{\hat{a}_y}$$
$$\mathbf{r}^{\mathcal{OQ}} = - l \mathbf{\hat{a}_y} - l \mathbf{\hat{b}_y}$$
Velocities:
$$\mathbf{v}^{\mathcal{P}} = l u_1\mathbf{\hat{a}_x}$$
$$\mathbf{v}^{\mathcal{Q}} = l u_1\mathbf{\hat{a}_x} + l u_2\mathbf{\hat{b}_x}$$
In Sympy Mechanics:
```python
O = me.Point('O') # Origo
O.set_vel(N, 0)
P = O.locatenew('P', ( -l * A.y )) # Point @ end of rod A
Ap = me.Particle('Ap', P, m) # Define mass
P.v2pt_theory(O, N, A) # Set velocity
```
```python
Q = P.locatenew('Q', ( -l * B.y)) # Point @ end of rod B
Bp = me.Particle('Bp', Q, m)
Q.v2pt_theory(P, N, B) # Set velocity
```
Accelerations are needed in Kane's method:
$$\mathbf{a}^{\mathcal{P}} = l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y}$$
$$\mathbf{a}^{\mathcal{Q}} = l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} + l \dot{u}_{2}\mathbf{\hat{b}_x} + l u_{2}^{2}\mathbf{\hat{b}_y}$$
```python
P.acc(N)
```
```python
Q.acc(N)
```
## Inertial forces
Let us calculate the inertial forces before applying forces.
$$f^*_r= \sum_{j=1}^{N} \left[ \dot{\mathbf{p}}^{B_j} \cdot \mathbf{v}_{r}^{B_j} ~+ \dot{\mathbf{L}}^{B_j} \cdot \boldsymbol{\omega}_{r}^{B_j}\right]$$
We have
$$\mathbf{v}^{\mathcal{P}} = l u_1\mathbf{\hat{a}_x}, \qquad \mathbf{v}^{\mathcal{Q}} = l u_1\mathbf{\hat{a}_x} + l u_2\mathbf{\hat{b}_x}$$
### Partial velocities become
$$\mathbf{v}^{\mathcal{P}}_1 = \frac{\partial{} \mathbf{v}^{\mathcal{P}}}{\partial u_1} = l \mathbf{\hat{a}_x}, \qquad
\mathbf{v}^{\mathcal{Q}}_1 = \frac{\partial{} \mathbf{v}^{\mathcal{Q}}}{\partial u_1} = l \mathbf{\hat{a}_x},
$$
$$\mathbf{v}^{\mathcal{P}}_2 = \frac{\partial{} \mathbf{v}^{\mathcal{P}}}{\partial u_2} = \mathbf{0}, \qquad
\mathbf{v}^{\mathcal{Q}}_2 = \frac{\partial{} \mathbf{v}^{\mathcal{Q}}}{\partial u_2} = l \mathbf{\hat{b}_x}
$$
### Time derivative of momentum
$$\mathbf{\dot{p}}^{\mathcal{P}} = m \left(l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} \right), \quad \mathbf{\dot{p}}^{\mathcal{Q}} = m \left(l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} + l \dot{u}_{2}\mathbf{\hat{b}_x} + l u_{2}^{2}\mathbf{\hat{b}_y}\right)$$
### Evaluate the sum
$$f^*_1 = m \left(l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} \right) \cdot l \mathbf{\hat{a}_x} + m \left(l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} + l \dot{u}_{2}\mathbf{\hat{b}_x} + l u_{2}^{2}\mathbf{\hat{b}_y}\right)\cdot l \mathbf{\hat{a}_x} = 2 m l^{2} \dot{u}_{1} + m l^{2}\dot{u}_{2} \operatorname{cos}\left(q_{2}\right)- m l^{2} u^{2}_{2} \operatorname{sin}\left(q_{2}\right)$$
$$f^*_2 = m \left(l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} \right) \cdot \mathbf{0} + m \left(l \dot{u}_{1}\mathbf{\hat{a}_x} + l u_{1}^{2}\mathbf{\hat{a}_y} + l \dot{u}_{2}\mathbf{\hat{b}_x} + l u_{2}^{2}\mathbf{\hat{b}_y}\right)\cdot l \mathbf{\hat{b}_x} = m l^{2}\dot{u}_{1} \operatorname{cos}\left(q_{2}\right) + m l^{2} u^{2}_{1} \operatorname{sin}\left(q_{2}\right) + m l^{2} \dot{u}_{2}$$
```python
KM = me.KanesMethod(N, [q1, q2], [u1, u2], kd_eqs=kde)
fr, fstar = KM.kanes_equations([Ap, Bp], None)
sp.Eq(-fstar,fr)
```
## Forces
$$ f_{r} =\sum_{j=1}^{N} \left[ \mathbf{F}^{B_j} ~ \cdot \mathbf{v}_{r}^{B_j} ~ + \mathbf{T}^{B_j} \cdot \boldsymbol{\omega}^{B_j}_r \right]$$
Recall
$$\boldsymbol{\omega}^{A} = u_1 \mathbf{\hat{n}_z}, \qquad \boldsymbol{\omega}^{B} = u_2 \mathbf{\hat{n}_{z}}$$
### Partial angular velocities
$$\boldsymbol{\omega}^{A}_1 = \mathbf{\hat{n}_z}, \quad \boldsymbol{\omega}^{B}_1 = \mathbf{0},$$
$$\boldsymbol{\omega}^{A}_2 = \mathbf{0}, \quad \boldsymbol{\omega}^{B}_2 = \mathbf{\hat{n}_z}.$$
<div>
</div>
### Torques
$$\mathbf{T}^{A} = \left(\sigma q_2 + \delta \dot{q}_2 \right) \mathbf{\hat{n}_z} = \left(\sigma q_2 + \delta \left(u_2 -u_1\right) \right) \mathbf{\hat{n}_z} $$
$$\mathbf{T}^{B} = -\mathbf{T}^{A} $$
### Gravity
$$\mathbf{F}^{\mathcal{P}} = - g m \mathbf{\hat{n}_y}$$
$$\mathbf{F}^{\mathcal{Q}} = - g m \mathbf{\hat{n}_y}$$
### Evaluate the sum
Torques
$$ f_{1}^T = \mathbf{T}^{A} \cdot \mathbf{\hat{n}_z} + \mathbf{T}^{B} \cdot \mathbf{0} = \sigma q_2 + \delta \left(u_2 -u_1\right) $$
$$ f_{2}^T = \mathbf{T}^{A} \cdot \mathbf{0} + \mathbf{T}^{B} \cdot \mathbf{\hat{n}_z} = -\sigma q_2 - \delta \left(u_2 -u_1\right) $$
Gravity
$$ f_{1}^F = - g m \mathbf{\hat{n}_y} \cdot l \mathbf{\hat{a}_x} - g m \mathbf{\hat{n}_y}\cdot l \mathbf{\hat{a}_x} = - 2 g l m \operatorname{sin}\left(q_{1}\right)$$
$$ f_{2}^F = - g m \mathbf{\hat{n}_y} \cdot \mathbf{0} - g m \mathbf{\hat{n}_y}\cdot l \mathbf{\hat{b}_x} = - g l m \operatorname{sin}\left(q_{1} + q_{2}\right) $$
```python
# Torque
Ta = (sig * q2 + delta * qd2) * N.z
# Gravity force
Fg = -g*m*N.y*0
# Apply forces on frames and points
forces = [(A, Ta), (B, -Ta), (P, Fg), (Q, Fg)]
```
### Generate EOM
```python
KM = me.KanesMethod(N, [q1, q2], [u1, u2], kd_eqs=kde) # Kane's method instance
fr, fstar = KM.kanes_equations([Ap, Bp], forces) # "Evaluate the sum"
sp.Eq(-fstar,fr) # Output EOM
```
## Different generalized speeds
```python
# Test different kde.
kde1 = [u1 - qd1, u2 - qd2] # Equivalent to Lagrange
KM = me.KanesMethod(N, [q1, q2], [u1, u2], kd_eqs=kde1)
fr, fstar = KM.kanes_equations([Ap, Bp], forces)
sp.Eq(-fstar,fr)
```
```python
```
|
function f=burger(u,der)
% if second arg. absent u^2/2, else u.
if nargin<2
f=0.5*u.^2;
else
f=u;
end; |
(* generated by Ott 0.30, locally-nameless lngen from: ../ott/rules.ott *)
Require Import Bool.
Require Import Metalib.Metatheory.
Require Import List.
(** syntax *)
Definition i : Set := nat.
Definition b : Set := bool.
Inductive typ : Set := (*r types *)
| t_int : typ (*r int *)
| t_arrow (A:typ) (B:typ) (*r function types *)
| t_dyn : typ (*r dynamic type *).
Inductive st : Set := (*r input type or projection label *)
| st_ty (A:typ).
Inductive term : Set := (*r terms *)
| trm_var_b (_:nat) (*r variables *)
| trm_var_f (x:var) (*r variables *)
| trm_lit (i5:i) (*r lit *)
| trm_abs (A:typ) (t:term) (*r abstractions *)
| trm_app (t1:term) (t2:term) (*r applications *)
| trm_cast (t:term) (p:var) (b:bool) (A:typ) (B:typ) (*r annotation *)
| trm_add : term (*r addition *)
| trm_addl (i5:i) (*r addl *).
Definition ctx : Set := list ( atom * typ ).
Definition ls : Set := list st.
(* EXPERIMENTAL *)
(** auxiliary functions on the new list types *)
(** library functions *)
(** subrules *)
(** arities *)
(** opening up abstractions *)
Fixpoint open_term_wrt_term_rec (k:nat) (t_5:term) (t__6:term) {struct t__6}: term :=
match t__6 with
| (trm_var_b nat) =>
match lt_eq_lt_dec nat k with
| inleft (left _) => trm_var_b nat
| inleft (right _) => t_5
| inright _ => trm_var_b (nat - 1)
end
| (trm_var_f x) => trm_var_f x
| (trm_lit i5) => trm_lit i5
| (trm_abs A t) => trm_abs A (open_term_wrt_term_rec (S k) t_5 t)
| (trm_app t1 t2) => trm_app (open_term_wrt_term_rec k t_5 t1) (open_term_wrt_term_rec k t_5 t2)
| (trm_cast t p b A B) => trm_cast (open_term_wrt_term_rec k t_5 t) p b A B
| trm_add => trm_add
| (trm_addl i5) => trm_addl i5
end.
Definition open_term_wrt_term t_5 t__6 := open_term_wrt_term_rec 0 t__6 t_5.
(** terms are locally-closed pre-terms *)
(** definitions *)
(* defns LC_term *)
Inductive lc_term : term -> Prop := (* defn lc_term *)
| lc_trm_var_f : forall (x:var),
(lc_term (trm_var_f x))
| lc_trm_lit : forall (i5:i),
(lc_term (trm_lit i5))
| lc_trm_abs : forall (A:typ) (t:term),
( forall x , lc_term ( open_term_wrt_term t (trm_var_f x) ) ) ->
(lc_term (trm_abs A t))
| lc_trm_app : forall (t1 t2:term),
(lc_term t1) ->
(lc_term t2) ->
(lc_term (trm_app t1 t2))
| lc_trm_cast : forall (t:term) (A B:typ) b (p:var),
(lc_term t) ->
(lc_term (trm_cast t p b A B))
| lc_trm_add :
(lc_term trm_add)
| lc_trm_addl : forall (i5:i),
(lc_term (trm_addl i5)).
(** free variables *)
Fixpoint fv_term (t_5:term) : vars :=
match t_5 with
| (trm_var_b nat) => {}
| (trm_var_f x) => {{x}}
| (trm_lit i5) => {}
| (trm_abs A t) => (fv_term t)
| (trm_app t1 t2) => (fv_term t1) \u (fv_term t2)
| (trm_cast t p b A B) => (fv_term t)
| trm_add => {}
| (trm_addl i5) => {}
end.
(** substitutions *)
Fixpoint subst_term (t_5:term) (x5:var) (t__6:term) {struct t__6} : term :=
match t__6 with
| (trm_var_b nat) => trm_var_b nat
| (trm_var_f x) => (if eq_var x x5 then t_5 else (trm_var_f x))
| (trm_lit i5) => trm_lit i5
| (trm_abs A t) => trm_abs A (subst_term t_5 x5 t)
| (trm_app t1 t2) => trm_app (subst_term t_5 x5 t1) (subst_term t_5 x5 t2)
| (trm_cast t p b A B) => trm_cast (subst_term t_5 x5 t) p b A B
| trm_add => trm_add
| (trm_addl i5) => trm_addl i5
end.
(** definitions *)
(* defns Grounds *)
Inductive Ground : typ -> Prop := (* defn Ground *)
| Ground_lit :
Ground t_int
| Ground_dyn :
Ground (t_arrow t_dyn t_dyn) .
(* defns Values *)
Inductive valueb : term -> Prop := (* defn valueb *)
| valueb_lit : forall (i5:i),
valueb (trm_lit i5)
| valueb_add :
valueb trm_add
| valueb_addl : forall (i5:i),
valueb ( (trm_addl i5) )
| valueb_anno : forall (A:typ) (t:term),
lc_term (trm_abs A t) ->
valueb (trm_abs A t)
| valueb_fanno : forall (v:term) (A B C D:typ) (p:var) b,
valueb v ->
valueb ( (trm_cast v p b (t_arrow A B) (t_arrow C D) ) )
| valueb_dyn : forall (v:term) (B:typ) (p:var) b,
valueb v ->
Ground B ->
valueb ( (trm_cast v p b B t_dyn) ) .
(* defns Consistency *)
Inductive sim : typ -> typ -> Prop := (* defn sim *)
| S_i :
sim t_int t_int
| S_arr : forall (A B C D:typ),
sim C A ->
sim B D ->
sim (t_arrow A B) (t_arrow C D)
| S_dynl : forall (A:typ),
sim t_dyn A
| S_dynr : forall (A:typ),
sim A t_dyn.
(* defns Btyping *)
Inductive Btyping : ctx -> term -> typ -> Prop := (* defn Btyping *)
| Btyp_lit : forall (G:ctx) (i5:i),
uniq G ->
Btyping G (trm_lit i5) t_int
| Btyp_var : forall (G:ctx) (x:var) (A:typ),
uniq G ->
binds x A G ->
Btyping G (trm_var_f x) A
| Btyp_abs : forall (L:vars) (G:ctx) (A:typ) (t:term) (B:typ),
( forall x , x \notin L -> Btyping (cons ( x , A ) G ) ( open_term_wrt_term t (trm_var_f x) ) B ) ->
Btyping G (trm_abs A t) (t_arrow A B)
| Btyp_app : forall (G:ctx) (t1 t2:term) (B A:typ),
Btyping G t1 (t_arrow A B) ->
Btyping G t2 A ->
Btyping G (trm_app t1 t2) B
| Btyp_add : forall (G:ctx),
uniq G ->
Btyping G trm_add (t_arrow t_int (t_arrow t_int t_int) )
| Btyp_addl : forall (G:ctx) (i1:i),
uniq G ->
Btyping G (trm_addl i1) (t_arrow t_int t_int)
| Btyp_cast : forall (G:ctx) (t:term) (A B:typ) (p:var) b,
Btyping G t A ->
sim A B ->
Btyping G ( (trm_cast t p b A B) ) B.
Inductive ctx_itemb : Type :=
| appCtxLb : term -> ctx_itemb
| appCtxRb : term -> ctx_itemb
| castCtxb : var -> bool-> typ -> typ -> ctx_itemb.
Inductive wellformedb : ctx_itemb -> Prop :=
| wf_appCtxLb : forall (e : term),
lc_term e ->
wellformedb (appCtxLb e)
| wf_appCtxRb : forall (v : term),
valueb v ->
wellformedb (appCtxRb v)
| wf_castCtxb : forall (A B: typ) p b,
wellformedb (castCtxb p b A B).
Definition fillb (E : ctx_itemb) (e : term) : term :=
match E with
| appCtxLb e2 => trm_app e e2
| appCtxRb v1 => trm_app v1 e
| castCtxb p b A B => trm_cast e p b A B
end.
Inductive resb : Type :=
| t_term : term -> resb
| t_blame : var -> bool -> resb.
(* defns Semantics *)
Inductive bstep : term -> resb -> Prop := (* defn step *)
| do_stepb E e1 e2 :
wellformedb E ->
bstep e1 ( t_term e2 ) ->
bstep (fillb E e1) (t_term (fillb E e2))
| blame_stepb E e1 p b:
wellformedb E ->
bstep e1 (t_blame p b) ->
bstep (fillb E e1) (t_blame p b)
| bStep_beta : forall (A:typ) (t v:term),
lc_term (trm_abs A t) ->
valueb v ->
bstep (trm_app ( (trm_abs A t) ) v) (t_term (open_term_wrt_term t v ) )
| bStep_lit : forall (i5:i) (p:var) b,
bstep (trm_cast (trm_lit i5) p b t_int t_int) (t_term (trm_lit i5))
| bStep_abeta : forall (v1:term) (A B C D:typ) (v2:term) (p:var) b,
valueb ( (trm_cast v1 p b (t_arrow A B) (t_arrow C D)) ) ->
valueb v2 ->
bstep (trm_app ( (trm_cast v1 p b (t_arrow A B) (t_arrow C D)) ) v2) (t_term (trm_cast ( (trm_app v1 ( (trm_cast v2 p (negb b) C A) ) ) ) p b B D))
| bStep_dd : forall (v:term) (p:var) b,
valueb v ->
bstep ( (trm_cast v p b t_dyn t_dyn) ) (t_term v)
| bStep_anyd : forall (v:term) (A:typ) (p:var) b,
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast v p b A t_dyn) ) (t_term (trm_cast ( (trm_cast v p b A (t_arrow t_dyn t_dyn)) ) p b (t_arrow t_dyn t_dyn) t_dyn) )
(* | bStep_dyna : forall (v:term) (A:typ),
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast v t_dyn A) ) (t_term (trm_cast ( (trm_cast v t_dyn (t_arrow t_dyn t_dyn)) ) (t_arrow t_dyn t_dyn) A) ) *)
| bStep_dyna : forall (v:term) (A:typ) (p:var) b ,
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast v p b t_dyn A) ) (t_term (trm_cast (trm_cast v p b t_dyn (t_arrow t_dyn t_dyn)) p b (t_arrow t_dyn t_dyn) A) )
| bStep_vany : forall (v:term) (A:typ) (p:var) (q:var) b1 b2,
Ground A ->
valueb v ->
bstep ( (trm_cast ( (trm_cast v q b1 A t_dyn) ) p b2 t_dyn A) ) (t_term v)
| bStep_vanyp : forall (v:term) (A B:typ) (p:var) (q:var) b1 b2,
Ground A ->
Ground B ->
not (A = B) ->
valueb v ->
bstep ( (trm_cast ( (trm_cast v q b1 A t_dyn) ) p b2 t_dyn B) ) (t_blame p b2)
| bStep_add : forall (i1:i),
bstep (trm_app trm_add (trm_lit i1)) (t_term (trm_addl i1))
| bStep_addl : forall (i1 i2:i),
bstep (trm_app ( (trm_addl i1) ) (trm_lit i2)) (t_term (trm_lit ( i1 + i2 ))).
(*
(* defns Semantics *)
Inductive bstep : term -> resb -> Prop := (* defn step *)
| do_stepb E e1 e2 :
wellformedb E ->
bstep e1 ( t_term e2 ) ->
bstep (fillb E e1) (t_term (fillb E e2))
| blame_stepb E e1 :
wellformedb E ->
bstep e1 (t_blame) ->
bstep (fillb E e1) (t_blame)
| bStep_beta : forall (A:typ) (t v:term),
lc_term (trm_abs A t) ->
valueb v ->
bstep (trm_app ( (trm_abs A t) ) v) (t_term (open_term_wrt_term t v ) )
| bStep_lit : forall (i5:i),
bstep (trm_cast (trm_lit i5) t_int t_int) (t_term (trm_lit i5))
| bStep_abeta : forall (v1:term) (A B C D:typ) (v2:term),
valueb ( (trm_cast v1 (t_arrow A B) (t_arrow C D)) ) ->
valueb v2 ->
bstep (trm_app ( (trm_cast v1 (t_arrow A B) (t_arrow C D)) ) v2) (t_term (trm_cast ( (trm_app v1 ( (trm_cast v2 C A) ) ) ) B D))
| bStep_dd : forall (v:term),
valueb v ->
bstep ( (trm_cast v t_dyn t_dyn) ) (t_term v)
| bStep_anyd : forall (v:term) (A:typ),
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast v A t_dyn) ) (t_term (trm_cast ( (trm_cast v A (t_arrow t_dyn t_dyn)) ) (t_arrow t_dyn t_dyn) t_dyn) )
| bStep_dyna : forall (v:term) (A:typ),
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast v t_dyn A) ) (t_term (trm_cast ( (trm_cast v t_dyn (t_arrow t_dyn t_dyn)) ) (t_arrow t_dyn t_dyn) A) )
| bStep_vany : forall (v:term) (A:typ),
Ground A ->
valueb v ->
bstep ( (trm_cast ( (trm_cast v A t_dyn) ) t_dyn A) ) (t_term v)
| bStep_vanyp : forall (v:term) (A B:typ),
Ground A ->
Ground B ->
not (A = B) ->
valueb v ->
bstep ( (trm_cast ( (trm_cast v A t_dyn) ) t_dyn B) ) t_blame
| bStep_add : forall (i1:i),
bstep (trm_app trm_add (trm_lit i1)) (t_term (trm_addl i1))
| bStep_addl : forall (i1 i2:i),
bstep (trm_app ( (trm_addl i1) ) (trm_lit i2)) (t_term (trm_lit ( i1 + i2 ))). *)
(* defns Semantics *)
(* Inductive bstep : term -> resb -> Prop := (* defn step *)
| do_stepb E e1 e2 :
wellformedb E ->
bstep e1 ( t_term e2 ) ->
bstep (fillb E e1) (t_term (fillb E e2))
| blame_stepb E e1 p b:
wellformedb E ->
bstep e1 (t_blame p b) ->
bstep (fillb E e1) (t_blame p b)
| bStep_beta : forall (A:typ) (t v:term),
lc_term (trm_abs A t) ->
valueb v ->
bstep (trm_app ( (trm_abs A t) ) v) (t_term (open_term_wrt_term t v ) )
| bStep_lit : forall (i5:i) (p:var) b,
bstep (trm_cast (trm_lit i5) p b t_int t_int) (t_term (trm_lit i5))
| bStep_abeta : forall (v1:term) (A B C D:typ) (v2:term) (p:var) b,
valueb ( (trm_cast v1 p b (t_arrow A B) (t_arrow C D)) ) ->
valueb v2 ->
bstep (trm_app ( (trm_cast v1 p b (t_arrow A B) (t_arrow C D)) ) v2) (t_term (trm_cast ( (trm_app v1 ( (trm_cast v2 p (negb b) C A) ) ) ) p b B D))
| bStep_dd : forall (v:term) (p:var) b,
valueb v ->
bstep ( (trm_cast v p b t_dyn t_dyn) ) (t_term v)
| bStep_anyd : forall (v:term) (A:typ) (p:var) b,
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast v p b A t_dyn) ) (t_term (trm_cast ( (trm_cast v p b A (t_arrow t_dyn t_dyn)) ) p b (t_arrow t_dyn t_dyn) t_dyn) )
| bStep_dyna : forall (v:term) (A:typ) (p:var) b p0 b0 ,
valueb v ->
not ( A = t_dyn ) ->
not ( A = (t_arrow t_dyn t_dyn) ) ->
sim A (t_arrow t_dyn t_dyn) ->
bstep ( (trm_cast (trm_cast v p0 b0 (t_arrow t_dyn t_dyn) t_dyn) p b t_dyn A) ) (t_term (trm_cast v p b (t_arrow t_dyn t_dyn) A) )
| bStep_vany : forall (v:term) (A:typ) (p:var) (q:var) b1 b2,
Ground A ->
valueb v ->
bstep ( (trm_cast ( (trm_cast v q b1 A t_dyn) ) p b2 t_dyn A) ) (t_term v)
| bStep_vanyp : forall (v:term) (A B:typ) (p:var) (q:var) b1 b2,
Ground A ->
not (sim A B) ->
valueb v ->
bstep ( (trm_cast ( (trm_cast v q b1 A t_dyn) ) p b2 t_dyn B) ) (t_blame p b2)
| bStep_add : forall (i1:i),
bstep (trm_app trm_add (trm_lit i1)) (t_term (trm_addl i1))
| bStep_addl : forall (i1 i2:i),
bstep (trm_app ( (trm_addl i1) ) (trm_lit i2)) (t_term (trm_lit ( i1 + i2 ))). *)
Inductive bsteps : term -> resb -> Prop :=
| bstep_refl e:
bsteps e (t_term e)
| bstep_n e t' e':
bstep e (t_term e') ->
bsteps e' (t_term t') ->
bsteps e (t_term t')
| bstep_nb e e' p b:
bstep e (t_term e') ->
bsteps e' (t_blame p b) ->
bsteps e (t_blame p b)
| bstep_b e p b:
bstep e (t_blame p b) ->
bsteps e (t_blame p b).
Inductive bbsteps : term -> resb -> nat -> Prop :=
| bbstep_refl e:
bbsteps e (t_term e) 0
| bbstep_n e t' e': forall n,
bstep e (t_term e') ->
bbsteps e' (t_term t') n ->
bbsteps e (t_term t') (1+n)
| bbstep_nb e e' p b:forall n,
bstep e (t_term e') ->
bbsteps e' (t_blame p b) n ->
bbsteps e (t_blame p b) (1+n)
| bbstep_b e p b:
bstep e (t_blame p b) ->
bbsteps e (t_blame p b) 1.
(** infrastructure *)
Hint Constructors Ground valueb sim bbsteps wellformedb bstep bsteps Btyping lc_term : core.
|
State Before: V : Type u_1
inst✝¹ : NormedAddCommGroup V
inst✝ : InnerProductSpace ℝ V
x y : V
h : inner x y = 0
⊢ angle x (x + y) ≤ π / 2 State After: V : Type u_1
inst✝¹ : NormedAddCommGroup V
inst✝ : InnerProductSpace ℝ V
x y : V
h : inner x y = 0
⊢ 0 ≤ ‖x‖ / ‖x + y‖ Tactic: rw [angle_add_eq_arccos_of_inner_eq_zero h, Real.arccos_le_pi_div_two] State Before: V : Type u_1
inst✝¹ : NormedAddCommGroup V
inst✝ : InnerProductSpace ℝ V
x y : V
h : inner x y = 0
⊢ 0 ≤ ‖x‖ / ‖x + y‖ State After: no goals Tactic: exact div_nonneg (norm_nonneg _) (norm_nonneg _) |
\chapter{Hypothesis testing} % Casella berger, S373
\section{Introduction}
This chapter explains hypothesis testing and p-values. A hypothesis is defined as below.
\begin{defn}
A hypothesis is a statement about a population parameter.
\end{defn}
Furthermore one tests two hypotheses against each other. A definiton of this is as follows.
\begin{defn}
The two complementary hypotheses in a hypothesis testing problem are called the null hypothesis and the alternative hypothesis. They are denoted by $H_0$ and $H_1$, respectivly.
\end{defn}
To do a hypothesis test one need to calculate a test statistic from data. For a one sided test one can assume that the data is outside the null model for large values of the test statistic. In the case of a two sided test the data can be outside the null hypothesis for small values of the test statistic too. This leads to calculation of p-values
\section{P-values} % s397
A p-value can give the result of a hypothesis test. The following defintion and theorem can be found in \cite{casella2002statistical}. First we are going to define a valid p-value.
\begin{defn}
\label{def:valpval}
A p-value $p(\boldsymbol{X})$ is a test statistic satisfying $0 \leq p(\boldsymbol{x}) \leq 1$ for every sample point $\boldsymbol{x}$. Small values of $p(\boldsymbol{X})$ give evidence that $H_1$ is true. A p-value is valid if, for every $\theta \in \Theta_0$ and every $0 \leq \alpha \leq 1$,
\begin{equation}
P_\theta (p(\boldsymbol{X}\leq \alpha)) \leq \alpha.
\end{equation}
\label{defn:validpvalue}
\end{defn}
Then a p-value is as follows.
\begin{theorem}
\label{th:pvalue}
Let $W(\boldsymbol{X})$ be a test statistic such that large values of $W$ give evidence that $H_1$ is true. For each sample point $\boldsymbol{x}$, define
\begin{equation*}
p(\boldsymbol{x}) = \sup_{\theta \in \Theta_0} P_\theta (W(\boldsymbol{X}) \geq W(\boldsymbol{x})).
\end{equation*}
Then, $p(\boldsymbol{X})$ is a valid p-value.
\end{theorem}
The $\Theta_0$ is the subset of the parameter space for the null model.
P-values can also be defined by using sufficient statistics. A p-value is then defined as
\begin{equation}
p(\boldsymbol{x}) = P(W(\boldsymbol{X}) \geq W(\boldsymbol{x}) | S(\boldsymbol{X}) = S(\boldsymbol{x})),
\label{eq:pvalue}
\end{equation}
where $S(\boldsymbol{x})$ is a sufficient statistic under the null hypothesis.
By this definition the p-value given a sufficient statistic is valid as shown below
\begin{equation*}
P_\theta(p(\boldsymbol{X}) \leq \alpha ) = \sum_x P(p(\boldsymbol{X}) \leq \alpha | S(\boldsymbol{X}) =s) P_\theta (S(\boldsymbol{X})=s) \leq \sum_s \alpha P_\theta (S(\boldsymbol{X})=s) = \alpha.
\end{equation*}
This result is for discrete $S(\boldsymbol{X})$. However for the continuous case the one can replace the sums with integrals.
\\
\\
For a two-sided hypothesis test the p-value is found by the equation below,
\begin{equation*}
p(\boldsymbol{x}) = 2(\min(P(W(\boldsymbol{X}) \geq W(\boldsymbol{x})), P(W(\boldsymbol{X}) \leq W(\boldsymbol{x})))).
\end{equation*}
\\ % http://www.math.ntnu.no/~bo/Fordypning/2015/Marius/NHPP-Iran-paper.pdf
\section{Test statistics}
\label{sec:teststat}
As mentioned earlier to calculate the p-value one need a test statistic. A test statistic is often a function which takes a sample as input and gives out a meassure of an certain attribute. For our data we will use several types of test statistics. For the test statistics used, the transformed times are applied. This is has also been done in \cite{lindqvist2011monte}. The transformed times have an uniform distribution between 0 and 1. This is because if the times $t_1,...,t_n$ is from a NHPP then $\Lambda(t_1),...,\Lambda(t_n)$ is a homogenous Poisson distribution with intensity 1. The tranformed times are defined as below.
\begin{equation}
V_j = \frac{\Lambda(t_j)}{\Lambda(\tau)}.
\label{eq:transtimes}
\end{equation}
How to find the transformed times for our NHPP is discussed in section \ref{sec:teststatpvalue}.
The different test statistics used will be introduced below. Here $\hat{V}_j$ are the estimated versions of $V_j$ obtained by using estimated parameters in $\Lambda(\cdot)$.
\subsection{Greenwood statistic}
The Greenwood statistic is a two sided statistic where values are rejected for small and large values.
\begin{equation*}
G = \sum_{j=1}^{n+1} (\hat{V_j} - \hat{V}_{j-1})^2
\end{equation*}
\subsection{Laplace statistic}
This one is also a two sided statistic and are rejected for small and large values.
\begin{equation*}
L = \sqrt{\frac{12}{n}} \sum_{j=1}^{n+1} \left( \hat{V_j} - \frac{1}{2} \right)
\end{equation*}
\subsection{Modified Cramer von Mises statistic}
\begin{equation*}
W^2 = \sum_{j=1}^{n} \left[ \hat{V}_j - \frac{(2j-1)}{2n} \right]^2 + \frac{1}{12n}
\end{equation*}
\subsection{Modified Kolmogorov-Smirnov statistic}
\begin{equation*}
D = max[D^+ , D^-]
\end{equation*}
\begin{equation*}
D^+ = \max_{1 \leq j \leq n} \left( \frac{j}{n} - \hat{V}_j \right)
\end{equation*}
\begin{equation*}
D^- = \max_{1 \leq j \leq n} \left( \hat{V}_j - \frac{(j-1)}{n} \right)
\end{equation*}
\\
\\
Both the Modified Cramer von Mises statistic and Modified Kolmogorov-Smirnov statistic reject the null hypothesis for large values of the statistic.
\section{Test statistics and p-values in NHPP}
\label{sec:teststatpvalue}
To be able to use the test statistics in section \ref{sec:teststat} we need to find the tranformed times as defined in equation \ref{eq:transtimes}. To do this the parameters for the rate function need be estimated. These estimates can be found by using maximum likelihood estimators as defined in chapter \ref{chap:like}. From equation \ref{eq:loglike} we see that finding the maximum likelihood might not be trivial because of the term $\Lambda(\tau)$. However the maximum likelihood can be found numerically in R programming language by using the built in function Optim. Furthermore the integral in equation \ref{eq:largelambda} can also be solved numerically. Hence the tranformed times and test statistics can be calculated.
\\
\\
To calculate the p-value in equation \ref{eq:pvalue} we refer to \cite{iranNHPP}. From this paper we have that the p-value can be estimated by
\begin{equation*}
\hat{p} = \#\{W^* \geq W_{obs}\}/M,
\end{equation*}
where $W^*$ is a test statistic for a sample, $W_{obs}$ is the observed test statistic from original data and $M$ is the number of samples.
|
import numpy as np
import mathutils
import itertools
class Environment:
def __init__(self, dimension, num_peaks, initial_angle):
self.H = np.zeros((dimension, num_peaks))
self.W = np.zeros((dimension, num_peaks))
self.C = np.zeros((dimension, num_peaks))
self.S = 2 # initial angle
self.timeStep = 0
class RMPB:
def __init__(self):
#CONSTANTS
# search space boundaries
self.X_MIN = -25.
self.X_MAX = 25.
#search space dimension
self.DIM = 2
# height
self.H_MIN = 30.
self.H_MAX = 70.
self.H_SEV = 5.
# width
self.W_MIN = 1.
self.W_MAX = 13.
self.W_SEV = 0.5
# angle
self.S_MIN = -np.pi
self.S_MAX = np.pi
self.S_SEV = 1.0
# initial angle for rotation
self.INITIAL_ANGLE = 0.
# chaotic constant
self.A = 3.67
# gamma
self.GAMMA = 0.04
# gamma max
self.GAMMA_MAX = 0.1
# period
self.PERIOD = 12
# noisy severity
self.NOISY_SEV = 0.8
self.RAND_SEED = 12345
## Factors subject of experimentation
#number of peaks for each dimension
self.num_peaks = 5
#number of environment to learn
self.learning_period = 20
# time windows : number of future environments for R estimation
self.time_windows = 2
# number of function evaluations before a change
self.computational_budget = 2500
# number of changes for each simulation (run) : number of environments
self.num_changes = 100
# change type experimented for each simulation (run)
self.change_type = 1
## Internal attributes
# environments (corresponding to the initial and
# those obtained after a change)
self.environments = []
self.curr_env = 0
self.C_change = self.rotate_position
self.P_change = self.ct_small_step
self.ss = []
self.minimize = False
def init(self):
self.rnd = np.random.RandomState(self.RAND_SEED)
if(self.change_type == 1):
self.P_change = self.ct_small_step
elif(self.change_type == 2):
self.P_change = self.ct_large_step
elif(self.change_type == 3):
self.P_change = self.ct_random
elif(self.change_type == 4):
self.P_change = self.ct_chaotic
self.C_change = self.ct_dummy
elif(self.change_type == 5):
self.P_change = self.ct_recurrent
elif(self.change_type == 6):
self.P_change = self.ct_recurrent_with_noise
self.curr_env = 0
#self.rnd = np.random.RandomState(self.RAND_SEED)
# initilizing the environments
self.build_environments()
def build_environments(self):
self.environments = []
self.ss = []
#initial environment without change
env0 = Environment(self.DIM, self.num_peaks, self.INITIAL_ANGLE)
env0.C = self.rnd.uniform(low = self.X_MIN, high = self.X_MAX, size = (self.DIM, self.num_peaks))
env0.H = self.rnd.uniform(low = self.H_MIN, high = self.H_MAX, size = (self.DIM, self.num_peaks))
env0.W = self.rnd.uniform(low = self.W_MIN, high = self.W_MAX, size = (self.DIM, self.num_peaks))
env0.timeStep = 0
self.environments.append(env0)
self.ss.append(env0.S)
#generate the rest of the environments from env0
for i in range(1, self.num_changes + self.time_windows):
env = Environment(dimension=self.DIM, num_peaks=self.num_peaks, initial_angle=self.INITIAL_ANGLE)
env.timeStep = i
self.environments.append(env)
self.P_change(i)
self.C_change(i)
self.ss.append(env.S)
def evaluate(self, x):
return self.eval_env(x, self.curr_env)
def evaluate_vect(self, x):
return np.apply_along_axis(self.evaluate, 1, x)
def eval_env(self, x, env_id):
env = self.environments[env_id]
all_peaks = env.H - env.W * np.abs(env.C - np.tile(x, (self.num_peaks, 1)).transpose())
max_peaks = np.max(all_peaks, axis=1)
return np.mean(max_peaks)
def true_robusteness_eval(self, x):
result = [self.eval_env(x, env_id) for env_id in range(self.curr_env, self.curr_env + self.time_windows - 1)]
return self.robustness_definition(result)
def true_robusteness_eval_vect(self, x):
return np.apply_along_axis(self.true_robusteness_eval, 1, x)
def robustness_definition(self, vect_f):
return np.mean(vect_f)
def change(self):
self.curr_env += 1
def rotate_position(self, env_id):
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
#Rotation matrix
c, s = np.cos(env.S), np.sin(env.S)
rot_mat = np.array(((c, -s), (s, c)))
def apply_rotation(col_vect):
return np.dot(col_vect, rot_mat)
env.C = np.apply_along_axis(apply_rotation, 0, prev_env.C)
env.C = np.clip(env.C, self.X_MIN, self.X_MAX)
def ct_small_step(self, env_id):
def change(prev_data, min_val, max_val, sev, gamma, rnd_val):
result = prev_data + gamma * (max_val - min_val) * sev * (2* rnd_val - 1)
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, self.H_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, self.W_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.W.shape))
env.S = change(np.array([prev_env.S]), self.S_MIN, self.S_MAX, self.S_SEV, self.GAMMA, self.rnd.uniform(size=(1,)))
env.S = env.S[0]
def ct_large_step(self, env_id):
def change(prev_data, min_val, max_val, sev, gamma, rnd_val):
result = 2 * rnd_val - 1
result = prev_data + (max_val-min_val)*(gamma * mathutils.sign(result) + (self.GAMMA_MAX - gamma)* result) * sev
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, self.H_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, self.W_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.W.shape))
env.S = change(np.array([prev_env.S]), self.S_MIN, self.S_MAX, self.S_SEV, self.GAMMA, self.rnd.uniform(size=(1,)))
env.S = env.S[0]
def ct_random(self, env_id):
def change(prev_data, min_val, max_val, sev, rnd_val):
result = prev_data * rnd_val * sev
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, self.H_SEV, self.rnd.normal(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, self.W_SEV, self.rnd.normal(size=prev_env.W.shape))
env.S = change(np.array([prev_env.S]), self.S_MIN, self.S_MAX, self.S_SEV, self.rnd.normal(size=(1,)))
env.S = env.S[0]
def ct_dummy(self, env_id):
pass
def ct_chaotic(self, env_id):
def change(prev_data, min_val, max_val):
result = min_val * self.A * (prev_data - min_val) * (1 - (prev_data - min_val)/(max_val-min_val))
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX)
env.W = change(prev_env.W, self.W_MIN, self.W_MAX)
env.C = change(prev_env.C, self.X_MIN, self.X_MAX)
#env.S = env.S[0]
def ct_recurrent(self, env_id):
def change(prev_data, min_val, max_val, angle):
result = min_val + (max_val-min_val) *(np.sin(2*(np.pi*env_id)/self.PERIOD + angle) + 1)/2.;
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
angles = np.array([x + y for x in range(self.DIM) for y in range(self.num_peaks)])
angles = self.PERIOD * angles/(self.DIM + self.num_peaks)
angles = np.reshape(angles, (self.DIM, self.num_peaks))
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, angles)
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, angles)
env.S = 2*np.pi/self.PERIOD
def ct_recurrent_with_noise(self, env_id):
def change(prev_data, min_val, max_val, angle, rnd_val):
result = min_val + (max_val-min_val) *(np.sin(2*(np.pi*env_id)/self.PERIOD + angle) + 1)/2.;
result = result + self.NOISY_SEV*rnd_val
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
angles = np.array([x + y for x in range(self.DIM) for y in range(self.num_peaks)])
angles = self.PERIOD * angles/(self.DIM + self.num_peaks)
angles = np.reshape(angles, (self.DIM, self.num_peaks))
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, angles, self.rnd.normal(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, angles, self.rnd.normal(size=prev_env.W.shape))
env.S = 2*np.pi/self.PERIOD
|
Formal statement is: lemma homeomorphism_of_subsets: "\<lbrakk>homeomorphism S T f g; S' \<subseteq> S; T'' \<subseteq> T; f ` S' = T'\<rbrakk> \<Longrightarrow> homeomorphism S' T' f g" Informal statement is: If $f$ is a homeomorphism from $S$ to $T$, and $S'$ and $T'$ are subsets of $S$ and $T$ respectively, such that $f(S') = T'$, then $f$ is a homeomorphism from $S'$ to $T'$. |
/*
* TS Elements
* Copyright 2015-2018 M. Newhouse
* Released under the MIT license.
*/
#pragma once
#include "control.hpp"
#include <boost/container/small_vector.hpp>
#include <boost/range/iterator_range.hpp>
#include <map>
namespace ts
{
namespace controls
{
// The KeyMapping class maps a key code to zero or more control/slot combinations.
// Keys must be lookuppable (is that a word? I think not) in an efficient manner.
template <typename KeyCode>
class KeyMapping
{
public:
struct Entry
{
Control control;
std::uint32_t slot;
};
using entry_range = boost::iterator_range<const Entry*>;
entry_range controls_by_key(KeyCode key_code) const;
void define_control(KeyCode key_code, Control control, std::uint32_t slot);
private:
std::map<KeyCode, boost::container::small_vector<Entry, 16>> key_mapping_;
};
template <typename KeyCode>
typename KeyMapping<KeyCode>::entry_range KeyMapping<KeyCode>::controls_by_key(KeyCode key_code) const
{
auto it = key_mapping_.find(key_code);
if (it == key_mapping_.end()) return entry_range(nullptr, nullptr);
return entry_range(it->second.data(), it->second.data() + it->second.size());
}
template <typename KeyCode>
void KeyMapping<KeyCode>::define_control(KeyCode key_code, Control control, std::uint32_t slot)
{
key_mapping_[key_code].push_back({ control, slot });
}
}
}
|
-- Idris2
import System
import System.Concurrency
-- Test `conditionWaitTimeout` times out m of n threads for 1 main and n child
-- threads
main : IO ()
main =
let
n = 5
m = 3
in
do cvMutex <- makeMutex
cv <- makeCondition
-- spawn n-m inifinitely patient children
waiting <- for [1..(n - m)] $ \_ => fork $
do mutexAcquire cvMutex
conditionWait cv cvMutex
putStrLn "Woke up despite no timeout (SHOULDN'T HAPPEN)"
mutexRelease cvMutex
-- spawn m impatient children
impatients <- for [1..m] $ \_ => fork $
do mutexAcquire cvMutex
conditionWaitTimeout cv cvMutex 1000000
putStrLn "Where are you mother?"
mutexRelease cvMutex
sleep m
putStrLn "Sorry I'm late children! Weren't there more of you?..."
for impatients $ \t => threadWait t
sleep 1
|
Formal statement is: lemma continuous_on_components: fixes f :: "'a::topological_space \<Rightarrow> 'b::topological_space" assumes "locally connected S " "\<And>C. C \<in> components S \<Longrightarrow> continuous_on C f" shows "continuous_on S f" Informal statement is: If $f$ is continuous on each component of a locally connected space $S$, then $f$ is continuous on $S$. |
-- |
-- Module : Southpaw.Picasso.Render
-- Description : Vector rendering in 2D with Cairo
-- Copyright : (c) Jonatan H Sundqvist, 2015
-- License : MIT
-- Maintainer : Jonatan H Sundqvist
-- Stability : experimental|stable
-- Portability : POSIX (not sure)
--
-- Created September 1 2015
-- TODO | - Rename (eg. Draw, Shapes) (?)
-- - Text, typography (find good library)
-- - Debug versions (cf. Occlusion.Render)
-- - Use BoundingBox (?)
-- SPEC | -
-- -
--------------------------------------------------------------------------------------------------------------------------------------------
-- GHC directives
--------------------------------------------------------------------------------------------------------------------------------------------
{-# LANGUAGE TupleSections #-}
--------------------------------------------------------------------------------------------------------------------------------------------
-- API
--------------------------------------------------------------------------------------------------------------------------------------------
module Southpaw.Picasso.Render where
--------------------------------------------------------------------------------------------------------------------------------------------
-- We'll need these
--------------------------------------------------------------------------------------------------------------------------------------------
import Data.Complex
import Control.Monad (forM, forM_, liftM, liftM2, void)
import Control.Monad.IO.Class
import qualified Graphics.Rendering.Cairo as Cairo
-- import qualified Graphics.Rendering.Cairo.Internal.Surfaces.Image as Image
import qualified Southpaw.Picasso.Palette as Palette
import qualified Southpaw.Picasso.Shapes as Shapes
import Southpaw.Picasso.RenderUtils
import Southpaw.Math.Constants
import Southpaw.Cartesian.Plane.Utilities
--------------------------------------------------------------------------------------------------------------------------------------------
-- Functions
--------------------------------------------------------------------------------------------------------------------------------------------
-- General ---------------------------------------------------------------------------------------------------------------------------------
-- |
-- TODO: Rename (eg. plot)
-- TODO: Generalise (not just circles)
trail :: Palette.Colour Double -> [Complex Double] -> Cairo.Render ()
trail fill trail = forM_ trail $ \dot -> do
choose fill
circle dot 3
-- |
grid :: Int -> Int -> Double -> Cairo.Render ()
grid cols rows size = do
-- TODO: Figure out how to use fill AND stroke
Cairo.setLineWidth 4
gridM_ cols rows $ \ cl rw -> tilePath cl rw >> Cairo.fill --
gridM_ cols rows $ \ cl rw -> tilePath cl rw >> Cairo.stroke --
where
chooseColour cl rw = if (cl `mod` 2) == (rw `mod` 2) then 0.3 else 0.75 -- TODO: This should be a utility function
tilePath cl rw = Cairo.rectangle (fromIntegral cl*size) (fromIntegral rw*size) size size >> Cairo.setSourceRGBA 0.22 0.81 (chooseColour cl rw) 0.32
-- Primitives ------------------------------------------------------------------------------------------------------------------------------
-- |
line :: Complex Double -> Complex Double -> Cairo.Render ()
line (fr:+om) (t:+o) = Cairo.moveTo fr om >> Cairo.lineTo t o
-- | Renders a path of connected lines
-- TODO: Options for colour, width, closed/open, etcairo.
linepath :: [Complex Double] -> Cairo.Render ()
linepath [] = return ()
linepath (e:dge) = void $ vectorise Cairo.moveTo e >> forM dge (vectorise Cairo.lineTo)
-- |
-- TODO: Support asymmetrical crosshairs (?)
crosshairs :: Complex Double -> Complex Double -> Cairo.Render ()
crosshairs centre size = do
line (centre - (hdx:+0)) (centre + (hdx:+0))
line (centre - (0:+hdy)) (centre + (0:+hdy))
where
(hdx:+hdy) = 0.5*size
-- Shapes ----------------------------------------------------------------------------------------------------------------------------------
-- |
-- TODO: Extract argument conversion logic (centre/size vectors to unpacked left-top/dx/dy)
rectangle :: Complex Double -> Complex Double -> Cairo.Render ()
rectangle (cx:+cy) (dx:+dy) = Cairo.rectangle (cx-dx/2) (cy-dy/2) dx dy
-- | A rectangle with an 'anchor' (in normalised coordinates) that is relative to the centre
-- TODO: Should anchor point be relative to centre or topleft corner, use normalised or absolute coords (?)
-- TODO: Less confusing terminology...
-- TODO: Refactor
-- TODO: Test
anchoredRectangle :: Complex Double -> Complex Double -> Complex Double -> Cairo.Render ()
anchoredRectangle p size anchor = rectangle (p-dotwise (*) (anchor+(0.5:+0.5)) size) size
-- |
-- TODO: Add arguments for colour, stroke, etcairo.
-- TODO: Maybe it'd be better if we stuck to the normal pattern of path-config-action that Cairo follows
-- TODO: Make polymorphic
polygon :: Integral int => int -> Double -> Complex Double -> (Double, Double, Double, Double) -> Bool -> Cairo.Render ()
polygon sides radius origin (r,g,b,a) filled = do
-- TODO: Refine 'wrap-around logic'
Cairo.moveTo fx fy
forM_ rest $ \(x:+y) -> Cairo.lineTo x y
Cairo.setSourceRGBA r g b a
Cairo.setLineWidth 12
if filled
then Cairo.fill
else Cairo.stroke
where
((fx:+fy):rest) = Shapes.polygon sides radius origin ++ [fx:+fy]
-- |
-- TODO: Options for fill/stroke, colour, width, etcairo.
circle :: Complex Double -> Double -> Cairo.Render ()
circle (cx:+cy) radius = do
Cairo.arc cx cy radius 0 τ
-- Cairo.fill
-- Composite -------------------------------------------------------------------------------------------------------------------------------
-- |
arrow :: Complex Double -> Complex Double -> Double -> Double -> Double -> Cairo.Render ()
arrow from to sl sw hw = do
let (first:rest) = closePath $ Shapes.arrow from to sl sw hw
vectorise Cairo.moveTo first
forM_ rest $ vectorise Cairo.lineTo
-- |
-- Ugh, I hate underscores so much
-- TODO: Make polymorphic
circlearc :: Int -> Complex Double -> Double -> Double -> Double -> Double -> Cairo.Render ()
circlearc
count -- Number of small circles
(ox:+oy) -- Centre of the 'arc' (pixels?)
spread -- Radius of the 'arc'
radius -- Radius of the small circles
begin -- Start angle of the arc
extent = forM_ [1..count] $ \ n -> do
let n' = fromIntegral n
let θ = begin + n'*extent/fromIntegral count
Cairo.arc (ox - spread*cos θ) (oy - spread*sin θ) radius 0 τ
Cairo.setSourceRGBA (0.5 * (1 + sin θ)) (0.1*n') (1/n') 0.95
Cairo.fill
-- |
bezier :: Complex Double -> Complex Double -> Complex Double -> Cairo.Render ()
bezier (x1:+y1) (x2:+y2) (x3:+y3) = Cairo.curveTo x1 y1 x2 y2 x3 y3
-- |
-- TODO: Generic rounded polygon
roundrect :: Complex Double -> Complex Double -> Double -> Cairo.Render ()
roundrect centre@(cx:+cy) size@(dx:+dy) radius = forM_ (zip [real, imag, real, imag] [(-dx):+(-dy), (dx):+(-dy), (dx):+(dy), (-dx):+(dy)]) $ \(dir, delta@(dx':+dy')) -> do
-- TODO: Finish refactoring
--
-- let dir = (signum (dx*dy))
-- Line segment
vectorise Cairo.moveTo (centre + delta + dir radius)
vectorise Cairo.lineTo (centre - flipx size/2 - real radius)
-- Curve
-- -- First line segment
-- vectorise Cairo.moveTo (centre - size/2 + real radius)
-- vectorise Cairo.lineTo (centre - flipx size/2 - real radius)
-- -- Curve
-- let (cx':+cy') = (centre - flipx size/2 + ((-radius):+radius)) in Cairo.arc cx' cy' radius (3*π/2) (4*π/2)
-- -- Second line segment
-- vectorise Cairo.moveTo (centre - flipx size/2 + imag radius)
-- vectorise Cairo.lineTo (centre + size/2 - imag radius)
-- -- Curve
-- let (cx':+cy') = (centre + size/2 - (radius:+radius)) in Cairo.arc cx' cy' radius 0 (π/2)
-- -- Third line segment
-- vectorise Cairo.moveTo (centre + size/2 - real radius)
-- vectorise Cairo.lineTo (centre - flipy size/2 + real radius)
-- -- Curve
-- let (cx':+cy') = (centre - flipy size/2 + flipy (radius:+radius)) in Cairo.arc cx' cy' radius (π/2) π
-- -- Fourth line segment
-- vectorise Cairo.moveTo (centre - flipy size/2 - imag radius)
-- vectorise Cairo.lineTo (centre - size/2 + imag radius)
-- -- Curve
-- let (cx':+cy') = (centre - size/2 + (radius:+radius)) in Cairo.arc cx' cy' radius π (3*π/2)
-- Images ----------------------------------------------------------------------------------------------------------------------------------
-- |
-- TODO: Factor out clip logic, document properly (cf. other anchored functions in this module)
-- TODO: Wrapper for images (?)
-- TODO: Factor out clip area (?)
anchoredImageWithClip :: (Complex Double -> Complex Double -> Cairo.Render ()) -> Complex Double -> Complex Double -> Cairo.Surface -> Cairo.Render ()
anchoredImageWithClip clip p anchor im = do
size <- imageSurfaceSize im
centre <- return $ p - dotwise (*) anchor size
clip centre size
Cairo.clip
vectorise Cairo.translate $ (centre - size*0.5)
Cairo.setSourceSurface im 0 0
-- vectorise Cairo.moveTo (centre - size*0.5)
Cairo.paint
vectorise Cairo.translate $ negate (centre - size*0.5)
Cairo.resetClip
-- |
anchoredImage :: Complex Double -> Complex Double -> Cairo.Surface -> Cairo.Render ()
anchoredImage p anchor im = anchoredImageWithClip rectangle p anchor im
-- |
imageWithClip :: (Complex Double -> Complex Double -> Cairo.Render ()) -> Complex Double -> Cairo.Surface -> Cairo.Render ()
imageWithClip clip centre im = anchoredImageWithClip clip centre (0.0:+0.0) im
-- |
image :: Complex Double -> Cairo.Surface -> Cairo.Render ()
image p im = anchoredImageWithClip rectangle p (0.0:+0.0) im
-- Typography ------------------------------------------------------------------------------------------------------------------------------
-- | Renders the given string (with an arbitrary function 'draw'). The position of the centre of the bounding box is
-- given by the pin point 'p' and an 'anchor' (whose coordinates are normalised with respect to the text bounds).
-- TODO: Extract anchoring logic
-- TODO: Define anchor constants (eg. left:+top, centre:+bottom) (?)
anchoredText :: Complex Double -> Complex Double -> (String -> Cairo.Render a) -> String -> Cairo.Render a
anchoredText p anchor draw text = do
extents <- textsize text
vectorise Cairo.moveTo $ p - dotwise (*) (anchor + (0.5:+0.5)) extents
draw text
-- |
centredText :: Complex Double -> (String -> Cairo.Render a) -> String -> Cairo.Render a
centredText centre draw text = anchoredText centre (0.0:+0.0) draw text
|
[STATEMENT]
lemma (in metric_space) tendsto_iff: "(f \<longlongrightarrow> l) F \<longleftrightarrow> (\<forall>e>0. eventually (\<lambda>x. dist (f x) l < e) F)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f \<longlongrightarrow> l) F = (\<forall>e>0. \<forall>\<^sub>F x in F. dist (f x) l < e)
[PROOF STEP]
unfolding nhds_metric filterlim_INF filterlim_principal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>b\<in>{0<..}. \<forall>\<^sub>F x in F. f x \<in> {y. dist y l < b}) = (\<forall>e>0. \<forall>\<^sub>F x in F. dist (f x) l < e)
[PROOF STEP]
by auto |
theory count
imports Main
begin
datatype nat = zero | s nat
datatype lst = nil | cons nat lst
inductive leq :: "nat \<Rightarrow> nat \<Rightarrow> bool" where
zero: "leq zero n" |
step: "leq n m \<Longrightarrow> leq (s n) (s m)"
fun leq_fn :: "nat \<Rightarrow> nat \<Rightarrow> bool" where
"leq_fn zero n = True" |
"leq_fn (s m) zero = False" |
"leq_fn (s m) (s n) = leq_fn m n"
declare leq.intros[simp,intro]
fun length :: "lst \<Rightarrow> nat" where
"length nil = zero" |
"length (cons x xs) = s (length xs)"
fun count :: "(nat \<Rightarrow> bool) \<Rightarrow> lst \<Rightarrow> nat" where
"count p nil = zero" |
"count p (cons y ys) = (if (p y) then s (count p ys) else count p ys)"
lemma leq_s_right: "\<And>n. \<And>m. leq n m \<Longrightarrow> leq n (s m)"
proof -
fix n
show "\<And>m. leq n m \<Longrightarrow> leq n (s m)" proof(induct n)
case zero
then show ?case by simp
next
case (s n)
then show ?case by (metis nat.inject leq.simps)
qed
qed
lemma leq_fn_s_right: "\<And>n. \<And>m. leq_fn n m \<Longrightarrow> leq_fn n (s m)"
proof -
fix n
show "\<And>m. leq_fn n m \<Longrightarrow> leq_fn n (s m)" proof(induct n)
case zero
then show ?case by simp
next
case (s n)
then show ?case by (metis leq_fn.elims(2) leq_fn.simps(1) leq_fn.simps(3))
qed
qed
theorem "\<And>p. \<And>xs. leq (count p xs) (length xs)" proof -
fix xs
show "\<And>p. leq (count p xs) (length xs)" proof(induct xs)
case nil
then show ?case by simp
next
case (cons y ys)
then show ?case by (simp add: leq_s_right)
qed
qed
theorem "\<And>xs. \<And>x. leq_fn (count x xs) (length xs)" proof -
fix xs
show "\<And>x. leq_fn (count x xs) (length xs)" proof(induct xs)
case nil
then show ?case by simp
next
case (cons y ys)
then show ?case by (simp add: leq_fn_s_right)
qed
qed
end |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
#P Permutations
#P ------------
#P
#P Under different circumstances different objects are called permutations,
#P even in the context of linear algrebra.
#P
#P Following list describes these objects and their representation in SPIRAL:
#P
#P 1. Regular permutations
#P (a) in cycle notation : (1,2)(3,4)
#P (b) as lists : [2,1,4,3]
#P (c) as index space mapping function: i -> i+1 mod N
#P
#P 2. Parametrized permutation classes
#P [For example stride permutations L(size,str) | size mod str = 0]
#P Represented as a construction function, which returns an index mapping
#P function - (c) above.
#P
#P Conversions: ListPerm (b) <- (a) [gap.perm]
#P PermList (a) <- (b) [gap.perm]
#P PermFunc (a) <- (c) [spiral.spl.perm]
#P ListPermFunc (b) <- (c) [spiral.spl.perm]
#P
#P -------------------
#F PermFunc(<func>, <size>) . . . . . convert perm. function into explicit GAP perm.
#F
#F PermFunc converts a 0-based permutation function used in SPLs, into an explicit
#F GAP permutation. Recall that GAP permutations are 1-based, and are in cycle
#F representation.
#F
PermFunc := (func, size) ->
PermList( List([0..size-1], func) + 1 );
#F PermFunc(<func>, <size>) . . . . . convert perm. function into explicit list perm.
#F
#F PermFunc converts a 0-based permutation function used in SPLs, into an explicit
#F 1-based list permutation. List permutations can be converted to GAP permutations
#F using PermList. Alternatively PermFunc can be used, wihch returns GAP permutation.
#F
ListPermFunc := (func, size) ->
List([0..size-1], func) + 1;
# ==========================================================================
# FuncClass
#
# Base class for symbolic functions
# ==========================================================================
Class(FuncClass, BaseMat, Function, rec(
#-----------------------------------------------------------------------
# Must be implemented in subclasses
#-----------------------------------------------------------------------
lambda := self >> Error("not implemented"),
domain := self >> Error("not implemented"),
range := self >> Error("not implemented"),
#-----------------------------------------------------------------------
_perm := true,
isReal := True,
isPermutation := self >> false,
perm := self >> Checked(self.isPermutation(), PermFunc(x->self.lambda().at(x), self.range())),
#-----------------------------------------------------------------------
print := (self,i,is) >> Print(
self.name, "(", PrintCS(self.params), ")", When(self.transposed, ".transpose()")),
#-----------------------------------------------------------------------
equals := (self, other) >> ObjId(other) = ObjId(self) and self.params=other.params,
dims := self >> [self.range(), self.domain()],
advdims := self >> [ [[ self.range() ]], [[ self.domain() ]] ],
# size along each dimension, for a multidimensional range
# for compatibility with ClassSPL, we wrap this into another list
# (list of outputs, since ClassSPL can have >1 output)
advrange := self >> [[ self.range() ]],
# size along each dimension, for a multidimensional domain
# for compatibility with ClassSPL, we wrap this into another list
# (list of outputs, since ClassSPL can have >1 output)
advdomain := self >> [[ self.domain() ]],
# dimensionality of range (1-d, 2-d, etc)
advrangeDim := self >> Length(self.advrange()[1]),
# dimensionality of domain (1-d, 2-d, etc)
advdomainDim := self >> Length(self.advdomain()[1]),
#-----------------------------------------------------------------------
toAMat := self >> Gath(self).toAMat(),
#-----------------------------------------------------------------------
arithmeticCost := (self, costMul, costAddMul) >> costMul(0) - costMul(0),
#-----------------------------------------------------------------------
transpose := self >> CopyFields(self, rec(transposed := not self.transposed )),
#-----------------------------------------------------------------------
conjTranspose := self >> self.transpose(),
#-----------------------------------------------------------------------
normalizedArithCost := self >> 0,
#-----------------------------------------------------------------------
free := self >> Union(List(self.params, FreeVars)),
#-----------------------------------------------------------------------
# Rewrite rules support
#
from_rChildren := (self, rch) >> CopyFields(ApplyFunc(ObjId(self), rch),
rec(transposed:=self.transposed)),
# NOTE: self.transposed not exposed
rChildren := self >> self.params,
rSetChild := meth(self, n, newChild)
self.params[n] := newChild;
# self.canonizeParams(); ??
self.dimensions := self.dims();
end,
# ----------------------------------------------------------------------
checkParams := meth(self, params)
local nargs, nump;
nargs := NumArgs(self.def);
nump := Length(params);
if nargs <> -1 and nargs <> nump then
Error(self.name, " needs ", NumArgs(self.def), " parameters: ",
ParamsFunc(self.def), "\n");
fi;
end,
#-----------------------------------------------------------------------
canonizeParams := meth(self, params)
local A, nump;
nump := Length(params);
if IsBound(self.abbrevs) then
for A in self.abbrevs do
if NumArgs(A) = -1 or NumArgs(A) = nump then
return ApplyFunc(A, params);
fi;
od;
return params;
else
return params;
fi;
end,
#-----------------------------------------------------------------------
__call__ := meth(arg)
local result, self, params, lkup, h;
self := arg[1];
params := arg{[2..Length(arg)]};
params := self.canonizeParams(params);
self.checkParams(params);
h := self.hash;
if h<>false then
lkup := h.objLookup(self, params);
if lkup[1] <> false then return lkup[1]; fi;
fi;
result := SPL(WithBases(self, rec(params := params, transposed := false)));
result := Inherit(result, ApplyFunc(result.def, params));
result.dimensions := result.dims();
if h<>false then return h.objAdd(result, lkup[2]);
else return result;
fi;
end,
# obsolete
# checkInverse := self >> Checked(self.isPermutation(),
# PermFunc(self.direct, self.range()) * PermFunc(self.inverse, self.domain())),
));
Class(PermClass, FuncClass, rec(
isPermutation := self >> true,
toAMat := self >> Gath(self).toAMat(),
equals := (self, other) >> ObjId(other) = ObjId(self) and self.params=other.params
));
|
Require Import CodeProofDeps.
Require Import Ident.
Require Import Constants.
Require Import RData.
Require Import EventReplay.
Require Import MoverTypes.
Require Import CommonLib.
Require Import AbsAccessor.Spec.
Require Import RealmSyncHandlerAux.Spec.
Require Import RealmSyncHandlerAux.Layer.
Require Import RealmSyncHandler.Code.handle_sysreg_access_trap.
Require Import RealmSyncHandler.LowSpecs.handle_sysreg_access_trap.
Local Open Scope Z_scope.
Section CodeProof.
Context `{real_params: RealParams}.
Context {memb} `{Hmemx: Mem.MemoryModelX memb}.
Context `{Hmwd: UseMemWithData memb}.
Let mem := mwd (cdata RData).
Context `{Hstencil: Stencil}.
Context `{make_program_ops: !MakeProgramOps Clight.function type Clight.fundef type}.
Context `{Hmake_program: !MakeProgram Clight.function type Clight.fundef type}.
Let L : compatlayer (cdata RData) :=
_assert_cond ↦ gensem assert_cond_spec
⊕ _handle_id_sysreg_trap ↦ gensem handle_id_sysreg_trap_spec
⊕ _handle_timer_sysreg_trap ↦ gensem handle_timer_sysreg_trap_spec
⊕ _handle_icc_el1_sysreg_trap ↦ gensem handle_icc_el1_sysreg_trap_spec
.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section BodyProof.
Context `{Hwb: WritableBlockOps}.
Variable (sc: stencil).
Variables (ge: genv) (STENCIL_MATCHES: stencil_matches sc ge).
Variable b_assert_cond: block.
Hypothesis h_assert_cond_s : Genv.find_symbol ge _assert_cond = Some b_assert_cond.
Hypothesis h_assert_cond_p : Genv.find_funct_ptr ge b_assert_cond
= Some (External (EF_external _assert_cond
(signature_of_type (Tcons tuint Tnil) tvoid cc_default))
(Tcons tuint Tnil) tvoid cc_default).
Local Opaque assert_cond_spec.
Variable b_handle_id_sysreg_trap: block.
Hypothesis h_handle_id_sysreg_trap_s : Genv.find_symbol ge _handle_id_sysreg_trap = Some b_handle_id_sysreg_trap.
Hypothesis h_handle_id_sysreg_trap_p : Genv.find_funct_ptr ge b_handle_id_sysreg_trap
= Some (External (EF_external _handle_id_sysreg_trap
(signature_of_type (Tcons Tptr (Tcons tulong Tnil)) tvoid cc_default))
(Tcons Tptr (Tcons tulong Tnil)) tvoid cc_default).
Local Opaque handle_id_sysreg_trap_spec.
Variable b_handle_timer_sysreg_trap: block.
Hypothesis h_handle_timer_sysreg_trap_s : Genv.find_symbol ge _handle_timer_sysreg_trap = Some b_handle_timer_sysreg_trap.
Hypothesis h_handle_timer_sysreg_trap_p : Genv.find_funct_ptr ge b_handle_timer_sysreg_trap
= Some (External (EF_external _handle_timer_sysreg_trap
(signature_of_type (Tcons Tptr (Tcons tulong Tnil)) tvoid cc_default))
(Tcons Tptr (Tcons tulong Tnil)) tvoid cc_default).
Local Opaque handle_timer_sysreg_trap_spec.
Variable b_handle_icc_el1_sysreg_trap: block.
Hypothesis h_handle_icc_el1_sysreg_trap_s : Genv.find_symbol ge _handle_icc_el1_sysreg_trap = Some b_handle_icc_el1_sysreg_trap.
Hypothesis h_handle_icc_el1_sysreg_trap_p : Genv.find_funct_ptr ge b_handle_icc_el1_sysreg_trap
= Some (External (EF_external _handle_icc_el1_sysreg_trap
(signature_of_type (Tcons Tptr (Tcons tulong Tnil)) tvoid cc_default))
(Tcons Tptr (Tcons tulong Tnil)) tvoid cc_default).
Local Opaque handle_icc_el1_sysreg_trap_spec.
Lemma handle_sysreg_access_trap_body_correct:
forall m d d' env le rec_base rec_offset esr
(Henv: env = PTree.empty _)
(Hinv: high_level_invariant d)
(HPTrec: PTree.get _rec le = Some (Vptr rec_base (Int.repr rec_offset)))
(HPTesr: PTree.get _esr le = Some (Vlong esr))
(Hspec: handle_sysreg_access_trap_spec0 (rec_base, rec_offset) (VZ64 (Int64.unsigned esr)) d = Some d'),
exists le', (exec_stmt ge env le ((m, d): mem) handle_sysreg_access_trap_body E0 le' (m, d') Out_normal).
Proof.
solve_code_proof Hspec handle_sysreg_access_trap_body; eexists; solve_proof_low.
Qed.
End BodyProof.
End CodeProof.
|
% SPDX-FileCopyrightText: © 2021 Martin Michlmayr <[email protected]>
% SPDX-License-Identifier: CC-BY-4.0
\setchapterimage[9.5cm]{images/birds}
\chapter{Conflict resolution}
\labch{conflict-resolution}
Open collaboration is a wonderful way to develop software, but conflicts are bound to happen at some point in any project of significant size. There can be disagreements on technical decisions, the direction a project is heading, or other aspects of the project.
A solid governance structure helps to reduce and resolve conflicts by providing clear policies and decision-making processes. The culture of a project also has a big impact, especially if it encourages friendly collaboration.
Organizations can also run into conflicts, including conflicts of interest, especially in trade associations that have corporate members with different commercial interests.
Organizations employ a number of mechanisms to prevent conflicts in the first place. \href{https://doi.org/10.1007/978-3-030-33742-1_11}{Florian Weikert, Dirk Riehle, and Ann Barcomb} studied several FOSS foundations and identified five major areas of conflict prevention:
\begin{itemize}
\itemsep 1em
\item Screening processes: new members and projects are only accepted when they pass a screening process. Such a process can identify common interests and the motivation of potential members. The technical, cultural, and strategic fit of projects can be assessed. Some organizations use an incubation process and new projects can only `graduate` once they meet certain criteria.
\item Governance structures and rules: foundations have formal governance structures and rules that are codified in their by-laws. They can include:
\begin{itemize}
\itemsep 0.50em
\item Transparent affiliations: contributors have to declare their corporate affiliations in public to highlight potential bias.
\item Decoupling money from control: the financial funding of the organization is separate from the technical decision-making.
\item Representation limits: some organizations limit the number of people from the same company who can serve on the board of directors.
\item Independent entities: key staff of the foundation, such as the executive director, may not be employees or consultants of a member company.
\end{itemize}
\item Explicit strategies: organizations protect their culture and values through a number of explicit strategies, such as monitoring the behavior of participants.
\item Common interests: even though member companies often compete in the market place, common interests allow them to collaborate. These common interests can be technical (e.g. a focus on technical merit over corporate agendas) or related to business (e.g. building a common platform against a dominant competitor).
\item Culture and values: shared values prevent bad behavior and contribute to the success. Important values include openness, transparency, equality, and neutrality.
\end{itemize}
\begin{kaobox}[frametitle=Culture and values: Python]
The Python community expects its members to be ``open, considerate, and respectful''. Behaviors that \href{https://www.python.org/psf/conduct/}{reinforce these values include}:
\begin{itemize}
\item Focusing on what is best for the community
\item Acknowledging time and effort
\item Being respectful of differing viewpoints and experiences
\item Showing empathy towards other community members
\item Gracefully accepting constructive criticism
\end{itemize}
\end{kaobox}
|
[STATEMENT]
lemma Sum2:
fixes Q :: pi
and a :: name
and x :: name
and Q' :: pi
and P :: pi
and \<alpha> :: freeRes
shows "Q \<Longrightarrow>a<\<nu>x> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'"
and "Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Q \<Longrightarrow>a<\<nu>x> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q') &&& (Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q')
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Q \<Longrightarrow>a<\<nu>x> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
assume "Q \<Longrightarrow>a<\<nu>x> \<prec> Q'"
[PROOF STATE]
proof (state)
this:
Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
goal (2 subgoals):
1. Q \<Longrightarrow>a<\<nu>x> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
obtain Q'' Q''' where QChain: "Q \<Longrightarrow>\<^sub>\<tau> Q'''"
and Q'''Trans: "Q''' \<longmapsto>a<\<nu>x> \<prec> Q''"
and Q''Chain: "Q'' \<Longrightarrow>\<^sub>\<tau> Q'"
[PROOF STATE]
proof (prove)
using this:
Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
goal (1 subgoal):
1. (\<And>Q''' Q''. \<lbrakk>Q \<Longrightarrow>\<^sub>\<tau> Q'''; Q''' \<longmapsto> a<\<nu>x> \<prec> Q''; Q'' \<Longrightarrow>\<^sub>\<tau> Q'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(force dest: transitionE)
[PROOF STATE]
proof (state)
this:
Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q''' \<longmapsto> a<\<nu>x> \<prec> Q''
Q'' \<Longrightarrow>\<^sub>\<tau> Q'
goal (2 subgoals):
1. Q \<Longrightarrow>a<\<nu>x> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
show "P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
proof(cases "Q = Q'''")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
Q = Q'''
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
have "P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
from Q'''Trans \<open>Q = Q'''\<close>
[PROOF STATE]
proof (chain)
picking this:
Q''' \<longmapsto> a<\<nu>x> \<prec> Q''
Q = Q'''
[PROOF STEP]
have "P \<oplus> Q \<longmapsto>a<\<nu>x> \<prec> Q''"
[PROOF STATE]
proof (prove)
using this:
Q''' \<longmapsto> a<\<nu>x> \<prec> Q''
Q = Q'''
goal (1 subgoal):
1. P \<oplus> Q \<longmapsto> a<\<nu>x> \<prec> Q''
[PROOF STEP]
by(blast intro: Sum2)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<longmapsto> a<\<nu>x> \<prec> Q''
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
P \<oplus> Q \<longmapsto> a<\<nu>x> \<prec> Q''
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
P \<oplus> Q \<longmapsto> a<\<nu>x> \<prec> Q''
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
using Q''Chain
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
P \<oplus> Q \<longmapsto> a<\<nu>x> \<prec> Q''
Q'' \<Longrightarrow>\<^sub>\<tau> Q'
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
by(rule transitionI)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
Q \<noteq> Q'''
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
from QChain \<open>Q \<noteq> Q'''\<close>
[PROOF STATE]
proof (chain)
picking this:
Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q \<noteq> Q'''
[PROOF STEP]
have "P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''"
[PROOF STATE]
proof (prove)
using this:
Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q \<noteq> Q'''
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
[PROOF STEP]
by(rule sum2Chain)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
using Q'''Trans Q''Chain
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q''' \<longmapsto> a<\<nu>x> \<prec> Q''
Q'' \<Longrightarrow>\<^sub>\<tau> Q'
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
[PROOF STEP]
by(rule transitionI)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>a<\<nu>x> \<prec> Q'
goal (1 subgoal):
1. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
assume "Q \<Longrightarrow>\<alpha> \<prec> Q'"
[PROOF STATE]
proof (state)
this:
Q \<Longrightarrow>\<alpha> \<prec> Q'
goal (1 subgoal):
1. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
obtain Q'' Q''' where QChain: "Q \<Longrightarrow>\<^sub>\<tau> Q'''"
and Q'''Trans: "Q''' \<longmapsto>\<alpha> \<prec> Q''"
and Q''Chain: "Q'' \<Longrightarrow>\<^sub>\<tau> Q'"
[PROOF STATE]
proof (prove)
using this:
Q \<Longrightarrow>\<alpha> \<prec> Q'
goal (1 subgoal):
1. (\<And>Q''' Q''. \<lbrakk>Q \<Longrightarrow>\<^sub>\<tau> Q'''; Q''' \<longmapsto> \<alpha> \<prec> Q''; Q'' \<Longrightarrow>\<^sub>\<tau> Q'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(force dest: transitionE)
[PROOF STATE]
proof (state)
this:
Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q''' \<longmapsto> \<alpha> \<prec> Q''
Q'' \<Longrightarrow>\<^sub>\<tau> Q'
goal (1 subgoal):
1. Q \<Longrightarrow>\<alpha> \<prec> Q' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
show "P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
proof(cases "Q = Q'''")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
Q = Q'''
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
have "P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
from Q'''Trans \<open>Q = Q'''\<close>
[PROOF STATE]
proof (chain)
picking this:
Q''' \<longmapsto> \<alpha> \<prec> Q''
Q = Q'''
[PROOF STEP]
have "P \<oplus> Q \<longmapsto>\<alpha> \<prec> Q''"
[PROOF STATE]
proof (prove)
using this:
Q''' \<longmapsto> \<alpha> \<prec> Q''
Q = Q'''
goal (1 subgoal):
1. P \<oplus> Q \<longmapsto> \<alpha> \<prec> Q''
[PROOF STEP]
by(blast intro: Sum2)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<longmapsto> \<alpha> \<prec> Q''
goal (2 subgoals):
1. Q = Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
2. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
P \<oplus> Q \<longmapsto> \<alpha> \<prec> Q''
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
P \<oplus> Q \<longmapsto> \<alpha> \<prec> Q''
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
using Q''Chain
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> P \<oplus> Q
P \<oplus> Q \<longmapsto> \<alpha> \<prec> Q''
Q'' \<Longrightarrow>\<^sub>\<tau> Q'
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
by(rule transitionI)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
Q \<noteq> Q'''
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
from QChain \<open>Q \<noteq> Q'''\<close>
[PROOF STATE]
proof (chain)
picking this:
Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q \<noteq> Q'''
[PROOF STEP]
have "P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''"
[PROOF STATE]
proof (prove)
using this:
Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q \<noteq> Q'''
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
[PROOF STEP]
by(rule sum2Chain)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
goal (1 subgoal):
1. Q \<noteq> Q''' \<Longrightarrow> P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
using Q'''Trans Q''Chain
[PROOF STATE]
proof (prove)
using this:
P \<oplus> Q \<Longrightarrow>\<^sub>\<tau> Q'''
Q''' \<longmapsto> \<alpha> \<prec> Q''
Q'' \<Longrightarrow>\<^sub>\<tau> Q'
goal (1 subgoal):
1. P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
[PROOF STEP]
by(rule transitionI)
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P \<oplus> Q \<Longrightarrow>\<alpha> \<prec> Q'
goal:
No subgoals!
[PROOF STEP]
qed |
! 4670 Numerical Analysis
! Homework Two Redo
! From Notes on Bisection Method:
! f needs to be continuous
! 1. Start with a, b so that f(a), f(b) have opposite signs
! 2. Find the middle M = (a+b)/2 and calculate f(M)
! 3. If f(a) and f(M) have opposite signs
! set b = M
! else set a = M
! end if
! Loop back to #2
module secret
end module secret
program WebberHomework2Question1
use secret ! uses secret module
implicit none
real :: a
real :: b
real :: f
real :: m
real :: mid
real :: tolerance
integer :: i
integer :: findSign ! I didn't realize "sign" was a
! reserved word
a = -10.0d0
b = 10.0d0
tolerance = 1.0d0-10
do i = 1, 10
m = mid(a, b) ! find the middle and calculate f(M)
if (m < tolerance) then ! if m is less than tolerance then
print*, "Middle = ", m ! prints the middle
stop
else if (f(m) == 0) then ! else if f(m) equals zero then
print*, "Middle = ", m ! prints the middle
stop
else ! if neither case is true
print*, "f(a, b) = (", a, ",", b, ") and m = ", m
end if
if ((findSign(f(a)) * findSign(f(m))) < 0) then ! if f(a) && f(M) have opposite signs
b = m ! it sets b to M
else
a = b ! else it sets a to b
end if
end do
stop
end program WebberHomework2Question1
real function f(x) ! main function for the problem
implicit none
real :: x
f = ((x**3)-30) ! equation for f = x^3 - 30
return
end
real function mid(a,b) ! function to find midpoint
implicit none
real :: a
real :: b
mid = ((a + (b - a)) / 2) ! midpoint formula
return
end
integer function findSign(x) ! function to determine if negative
implicit none ! or positive
real :: x
if (x < 0) then ! if less than zero sign is -1
findSign = -1
else if (x == 0) then ! else if x is zero sign is 0
findSign = 0
else ! if x is greater than zero sign is 1
findSign = 1
end if
return
end
|
##### Copyright 2021 The Cirq Developers
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Quantum circuits on Rigetti devices
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/tutorials/rigetti/getting_started">View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/rigetti/getting_started.ipynb">Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/rigetti/getting_started.ipynb">View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/rigetti/getting_started.ipynb">Download notebook</a>
</td>
</table>
```
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
import cirq
```
installing cirq...
[K |████████████████████████████████| 56 kB 668 kB/s
[K |████████████████████████████████| 593 kB 9.5 MB/s
[K |████████████████████████████████| 541 kB 16.4 MB/s
[K |████████████████████████████████| 48 kB 4.6 MB/s
[K |████████████████████████████████| 1.8 MB 47.1 MB/s
[K |████████████████████████████████| 53 kB 1.9 MB/s
[K |████████████████████████████████| 220 kB 55.7 MB/s
[K |████████████████████████████████| 52 kB 1.1 MB/s
[K |████████████████████████████████| 97 kB 5.9 MB/s
[K |████████████████████████████████| 49 kB 5.6 MB/s
[K |████████████████████████████████| 10.1 MB 42.9 MB/s
[K |████████████████████████████████| 65 kB 3.2 MB/s
[K |████████████████████████████████| 145 kB 70.0 MB/s
[K |████████████████████████████████| 38.1 MB 1.3 MB/s
[K |████████████████████████████████| 44 kB 139 kB/s
[K |████████████████████████████████| 229 kB 66.2 MB/s
[K |████████████████████████████████| 243 kB 55.2 MB/s
[K |████████████████████████████████| 1.6 MB 31.7 MB/s
[K |████████████████████████████████| 109 kB 58.9 MB/s
[K |████████████████████████████████| 546 kB 40.0 MB/s
[?25h Building wheel for lark (setup.py) ... [?25l[?25hdone
Building wheel for retrying (setup.py) ... [?25l[?25hdone
Building wheel for rpcq (setup.py) ... [?25l[?25hdone
[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
markdown 3.3.7 requires importlib-metadata>=4.4; python_version < "3.10", but you have importlib-metadata 3.10.1 which is incompatible.
google-colab 1.0.0 requires six~=1.15.0, but you have six 1.16.0 which is incompatible.
datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.
albumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.[0m
installed cirq.
Running this notebook requires the pyQuil QVM and Compiler. If you are running on Google Colab or a Linux Debian machine, you can run the below cell to install them if necessary. If you are on a non-Linux Debian machine, see [these instructions](https://pyquil-docs.rigetti.com/en/stable/start.html#downloading-the-qvm-and-compiler){:.external} for installation.
```
! [ -z "$(which qvm)" ] &&\
apt update &&\
apt install jq &&\
export LATEST_FOREST_SDK_VERSION=$(curl -s https://downloads.rigetti.com/qcs-sdk/versions | jq -r '.versions[0].sdk') &&\
curl -f https://downloads.rigetti.com/qcs-sdk/forest-sdk-$LATEST_FOREST_SDK_VERSION-linux-deb.tar.bz2 -o $PWD/forest-sdk-$LATEST_FOREST_SDK_VERSION-linux-deb.tar.bz2 &&\
tar -xf forest-sdk-$LATEST_FOREST_SDK_VERSION-linux-deb.tar.bz2 &&\
./forest-sdk-$LATEST_FOREST_SDK_VERSION-linux-deb/forest-sdk-$LATEST_FOREST_SDK_VERSION-linux-deb.run &&\
quilc --version &&\
qvm --version
```
Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease
Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]
Get:3 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]
Hit:4 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease
Get:5 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]
Get:6 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]
Ign:7 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease
Hit:8 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release
Get:9 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease [15.9 kB]
Hit:10 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease
Get:11 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease [15.9 kB]
Hit:12 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease
Get:13 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1,512 kB]
Get:14 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2,799 kB]
Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [3,231 kB]
Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,286 kB]
Get:18 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main Sources [1,992 kB]
Get:19 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main amd64 Packages [1,021 kB]
Get:20 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic/main amd64 Packages [45.3 kB]
Fetched 13.2 MB in 5s (2,876 kB/s)
Reading package lists... Done
Building dependency tree
Reading state information... Done
45 packages can be upgraded. Run 'apt list --upgradable' to see them.
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following package was automatically installed and is no longer required:
libnvidia-common-460
Use 'apt autoremove' to remove it.
The following additional packages will be installed:
libjq1 libonig4
The following NEW packages will be installed:
jq libjq1 libonig4
0 upgraded, 3 newly installed, 0 to remove and 45 not upgraded.
Need to get 276 kB of archives.
After this operation, 930 kB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libonig4 amd64 6.7.0-1 [119 kB]
Get:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libjq1 amd64 1.5+dfsg-2 [111 kB]
Get:3 http://archive.ubuntu.com/ubuntu bionic/universe amd64 jq amd64 1.5+dfsg-2 [45.6 kB]
Fetched 276 kB in 0s (1,212 kB/s)
Selecting previously unselected package libonig4:amd64.
(Reading database ... 155632 files and directories currently installed.)
Preparing to unpack .../libonig4_6.7.0-1_amd64.deb ...
Unpacking libonig4:amd64 (6.7.0-1) ...
Selecting previously unselected package libjq1:amd64.
Preparing to unpack .../libjq1_1.5+dfsg-2_amd64.deb ...
Unpacking libjq1:amd64 (1.5+dfsg-2) ...
Selecting previously unselected package jq.
Preparing to unpack .../jq_1.5+dfsg-2_amd64.deb ...
Unpacking jq (1.5+dfsg-2) ...
Setting up libonig4:amd64 (6.7.0-1) ...
Setting up libjq1:amd64 (1.5+dfsg-2) ...
Setting up jq (1.5+dfsg-2) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
Processing triggers for libc-bin (2.27-3ubuntu1.3) ...
/sbin/ldconfig.real: /usr/local/lib/python3.7/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 54.1M 100 54.1M 0 0 25.5M 0 0:00:02 0:00:02 --:--:-- 25.5M
Verifying archive integrity... 0% 7% 14% 22% 29% 37% 44% 51% 59% 66% 74% 81% 89% 96% 100% MD5 checksums are OK. All good.
Uncompressing forest-sdk-deb 0% 7% 14% 22% 29% 37% 44% 51% 59% 66% 74% 81% 89% 96% 100%
Reading package lists... Done
Building dependency tree
Reading state information... Done
libblas-dev is already the newest version (3.7.1-4ubuntu1).
libblas-dev set to manually installed.
liblapack-dev is already the newest version (3.7.1-4ubuntu1).
The following package was automatically installed and is no longer required:
libnvidia-common-460
Use 'apt autoremove' to remove it.
The following NEW packages will be installed:
libffi-dev
0 upgraded, 1 newly installed, 0 to remove and 45 not upgraded.
Need to get 156 kB of archives.
After this operation, 362 kB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 libffi-dev amd64 3.2.1-8 [156 kB]
Fetched 156 kB in 0s (728 kB/s)
Selecting previously unselected package libffi-dev:amd64.
(Reading database ... 155649 files and directories currently installed.)
Preparing to unpack .../libffi-dev_3.2.1-8_amd64.deb ...
Unpacking libffi-dev:amd64 (3.2.1-8) ...
Setting up libffi-dev:amd64 (3.2.1-8) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
Selecting previously unselected package forest-sdk.
(Reading database ... 155684 files and directories currently installed.)
Preparing to unpack forest-sdk_2.23.0.deb ...
Unpacking forest-sdk (2.23.0) ...
Setting up forest-sdk (2.23.0) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
1.23.0 [e6c0939]
1.17.1 [cf3f91f]
Next, run the pyQuil QVM and Compiler if they are not already running on their default ports.
```
import subprocess
subprocess.Popen(["qvm", "--quiet", "-S"])
subprocess.Popen(["quilc", "--quiet", "-R"])
```
<subprocess.Popen at 0x7fd6121add10>
# Running a Bell state circuit
To demonstrate the basic functionality of the Cirq Rigetti integration, we begin constructing a basic Bell state circuit.
```
bell_circuit = cirq.Circuit()
qubits = cirq.LineQubit.range(2)
bell_circuit.append(cirq.H(qubits[0]))
bell_circuit.append(cirq.CNOT(qubits[0], qubits[1]))
bell_circuit.append(cirq.measure(qubits[0], qubits[1], key='m'))
print(bell_circuit)
```
0: ───H───@───M('m')───
│ │
1: ───────X───M────────
Next, we'll import `RigettiQCSService` and list available quantum processors.
```
from cirq_rigetti import RigettiQCSService
quantum_processors = RigettiQCSService.list_quantum_processors().quantum_processors
processors_list = [quantum_processor.id for quantum_processor in quantum_processors]
print(processors_list)
```
['Aspen-11', 'Aspen-M-1']
For now, we'll instantiate the `RigettiQCSService` as a pyQuil [Quantum Virtual Machine](https://docs.rigetti.com/qcs/references/qvm){:.external} based on the topology of one of the available Rigetti quantum processors. At the time of this writing, `Aspen-11` is available.
Note, in addition to the quantum processors listed above, you can also instantiate the `RigettiQCSService` by naming an arbitrary virtual device the pyQuil QVM supports. See the documentation for pyquil [get_qc](https://pyquil-docs.rigetti.com/en/stable/apidocs/pyquil.api.html#pyquil.api.get_qc){:.external} for more information.
```
from cirq_rigetti import circuit_transformers, circuit_sweep_executors, get_rigetti_qcs_service
SERVICE_NAME = processors_list[0]
print(SERVICE_NAME)
service = get_rigetti_qcs_service(SERVICE_NAME, as_qvm=True, noisy=False)
result = service.run(bell_circuit, repetitions=1000)
print(result.histogram(key='m'))
```
Aspen-11
Counter({3: 529, 0: 471})
We'll use the built-in `cirq.plot_state_histogram` to visually verify the results of our Bell state.
```
cirq.plot_state_histogram(result.histogram(key='m'))
```
As expected, we see states 0 (ie '00') and 3 (ie '11') as the dominant results.
You may initialize both the `RigettiQCSService` and `RigettiQCSSampler` with execution functions from the `cirq_rigetti.circuit_sweep_executor` module and transformation functions from `cirq_rigetti.circuit_transformations`.
You may invoke these functions with arguments for controlling your circuit execution at a more fine grained level. For instance, you may want add [Pragma statements](https://pyquil-docs.rigetti.com/en/stable/compiler.html#region-specific-compiler-features-through-pragma){:.external} to set the [initial rewiring](https://pyquil-docs.rigetti.com/en/stable/compiler.html#initial-rewiring){:.external} strategy, invoke [active qubit reset](https://arxiv.org/abs/2001.04449){:.external} prior to execution, or explicitly address physical qubits on the quantum computer.
```
from pyquil.quilbase import Reset, Pragma
def hook(program, measurement_id_map):
program._instructions.insert(0, Reset())
program._instructions.insert(1, Pragma('INITIAL_REWIRING', freeform_string='GREEDY'))
print(program)
return program, measurement_id_map
# assign qubits explicitly to hardware or virtual machine qubits.
qubit_id_map = {
qubits[0]: 4,
qubits[1]: 5,
}
executor = circuit_sweep_executors.with_quilc_compilation_and_cirq_parameter_resolution
transformer = circuit_transformers.build(qubit_id_map=qubit_id_map, qubits=qubits, post_transformation_hooks=[hook])
service = get_rigetti_qcs_service(SERVICE_NAME, as_qvm=True, executor=executor, transformer=transformer)
result = service.run(bell_circuit, repetitions=1000)
cirq.plot_state_histogram(result.histogram(key='m'))
```
# Running a parameterized circuit
Of course, you may be running a parameterized circuit and would like to leverage the [Quil compilers](https://pyquil-docs.rigetti.com/en/stable/compiler.html){:.external} support for parametric compilation. This affords a speedup in execution times as the Cirq Rigetti integration will only compile the circuit once for a single parameter sweep.
We start by initializing the `RigettiQCSSampler` and specifying a circuit sweep executor that supports parametric compilation. Note, that this class accepts the same `executor` and `transformer` types as `RigettiQCSService`.
```
from cirq_rigetti import get_rigetti_qcs_sampler
executor = circuit_sweep_executors.with_quilc_parametric_compilation
sampler = get_rigetti_qcs_sampler(SERVICE_NAME, as_qvm=True, executor=executor)
```
Next, we will initialize a parameterized circuit in Cirq along with a set of parameter values.
```
import sympy
qubit = cirq.LineQubit.range(1)[0]
circuit = cirq.Circuit(
cirq.H(qubit)**sympy.Symbol('t'),
cirq.measure(qubit, key='m'))
params = cirq.Linspace('t', start=0, stop=4, length=5)
print(circuit)
```
0: ───H^t───M('m')───
In much the same way that we invoke, `RigettiQCSSerivce.run`, we invoke `RigettiQCSSampler.run_sweep` with our parameters here.
```
import pandas as pd
results = sampler.run_sweep(circuit, params, repetitions=10)
data = {f't={result.params["t"]}': [measurement[0] for measurement in result.measurements['m']] for result in results}
pd.DataFrame(data)
```
<div id="df-8b7f6894-aff7-4811-9f82-84dcbd27acc2">
<div class="colab-df-container">
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>t=0.0</th>
<th>t=1.0</th>
<th>t=2.0</th>
<th>t=3.0</th>
<th>t=4.0</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>5</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>6</th>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>7</th>
<td>0</td>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>8</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>9</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
</tbody>
</table>
</div>
<button class="colab-df-convert" onclick="convertToInteractive('df-8b7f6894-aff7-4811-9f82-84dcbd27acc2')"
title="Convert this dataframe to an interactive table."
style="display:none;">
<svg xmlns="http://www.w3.org/2000/svg" height="24px"viewBox="0 0 24 24"
width="24px">
<path d="M0 0h24v24H0V0z" fill="none"/>
<path d="M18.56 5.44l.94 2.06.94-2.06 2.06-.94-2.06-.94-.94-2.06-.94 2.06-2.06.94zm-11 1L8.5 8.5l.94-2.06 2.06-.94-2.06-.94L8.5 2.5l-.94 2.06-2.06.94zm10 10l.94 2.06.94-2.06 2.06-.94-2.06-.94-.94-2.06-.94 2.06-2.06.94z"/><path d="M17.41 7.96l-1.37-1.37c-.4-.4-.92-.59-1.43-.59-.52 0-1.04.2-1.43.59L10.3 9.45l-7.72 7.72c-.78.78-.78 2.05 0 2.83L4 21.41c.39.39.9.59 1.41.59.51 0 1.02-.2 1.41-.59l7.78-7.78 2.81-2.81c.8-.78.8-2.07 0-2.86zM5.41 20L4 18.59l7.72-7.72 1.47 1.35L5.41 20z"/>
</svg>
</button>
<style>
.colab-df-container {
display:flex;
flex-wrap:wrap;
gap: 12px;
}
.colab-df-convert {
background-color: #E8F0FE;
border: none;
border-radius: 50%;
cursor: pointer;
display: none;
fill: #1967D2;
height: 32px;
padding: 0 0 0 0;
width: 32px;
}
.colab-df-convert:hover {
background-color: #E2EBFA;
box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);
fill: #174EA6;
}
[theme=dark] .colab-df-convert {
background-color: #3B4455;
fill: #D2E3FC;
}
[theme=dark] .colab-df-convert:hover {
background-color: #434B5C;
box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);
filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));
fill: #FFFFFF;
}
</style>
</div>
</div>
|
% $Id$ %
\subsection{Calculator}
\screenshot{plugins/images/ss-calculator}{Calculator}{img:calculator}
This is a simple scientific calculator for use on the \dap. It works like a
standard calculator. Pressing the ``1st'' and ``2nd'' buttons will toggle between
other available math functions.
\begin{btnmap}
\opt{RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD,IPOD_4G_PAD%
,IPOD_3G_PAD,IAUDIO_X5_PAD,SANSA_E200_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,GIGABEAT_PAD%
,GIGABEAT_S_PAD,MROBE100_PAD,IRIVER_H10_PAD,SANSA_FUZE_PAD,PBELL_VIBE500_PAD%
,SANSA_FUZEPLUS_PAD,SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}
{\ButtonLeft{} / \ButtonRight{} /}
\opt{RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD,IAUDIO_X5_PAD%
,SANSA_E200_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,GIGABEAT_PAD,GIGABEAT_S_PAD,MROBE100_PAD%
,SANSA_FUZE_PAD,PBELL_VIBE500_PAD,SANSA_FUZEPLUS_PAD,SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}
{\ButtonUp{} / \ButtonDown}
\opt{SANSA_E200_PAD,SANSA_FUZE_PAD}{/}
\opt{scrollwheel}{\ButtonScrollFwd{} / \ButtonScrollBack}
\opt{IRIVER_H10_PAD}{\ButtonScrollUp{} / \ButtonScrollDown}
\opt{MPIO_HD300_PAD}{\ButtonRew / \ButtonFF / \ButtonScrollUp / \ButtonScrollDown}
\opt{MPIO_HD200_PAD}{\ButtonRew / \ButtonFF}
\opt{COWON_D2_PAD}{\TouchMidLeft{} / \TouchMidRight / \TouchTopMiddle / \TouchBottomMiddle}
\opt{HAVEREMOTEKEYMAP}{& }
& Move around the keypad\\
%
\opt{RECORDER_PAD}{\ButtonPlay}
\opt{ONDIO_PAD}{\ButtonMenu}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD,IPOD_4G_PAD,IPOD_3G_PAD,IAUDIO_X5_PAD%
,SANSA_E200_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,GIGABEAT_PAD,GIGABEAT_S_PAD,MROBE100_PAD%
,SANSA_FUZE_PAD,SANSA_FUZEPLUS_PAD}
{\ButtonSelect}
\opt{IRIVER_H10_PAD,SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{\ButtonPlay}
\opt{COWON_D2_PAD}{\TouchCenter}
\opt{PBELL_VIBE500_PAD}{\ButtonOK}
\opt{MPIO_HD200_PAD}{\ButtonFunc}
\opt{MPIO_HD300_PAD}{\ButtonEnter}
\opt{HAVEREMOTEKEYMAP}{& }
& Select a button\\
%
\nopt{ONDIO_PAD,IPOD_4G_PAD,IPOD_3G_PAD}{
\opt{RECORDER_PAD}{\ButtonFOne}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD,IAUDIO_X5_PAD,SANSA_E200_PAD%
,SANSA_C200_PAD,MPIO_HD200_PAD}{\ButtonRec}
\opt{SANSA_CLIP_PAD,SANSA_FUZE_PAD}{\ButtonHome}
\opt{IRIVER_H10_PAD,SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{\ButtonRew}
\opt{GIGABEAT_PAD}{\ButtonA}
\opt{GIGABEAT_S_PAD}{\ButtonPlay}
\opt{MROBE100_PAD}{\ButtonDisplay}
\opt{COWON_D2_PAD,MPIO_HD300_PAD}{\ButtonMenu}
\opt{PBELL_VIBE500_PAD}{\ButtonCancel}
\opt{SANSA_FUZEPLUS_PAD}{\ButtonBack}
\opt{HAVEREMOTEKEYMAP}{& }
& Delete last entered digit or clear after calculation\\
%
}
\opt{RECORDER_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD,SAMSUNG_YH820_PAD}{
\opt{RECORDER_PAD}{\ButtonFTwo}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD}{\ButtonMode}
\opt{SAMSUNG_YH820_PAD}{\ButtonRec}
\opt{HAVEREMOTEKEYMAP}{& }
& Cycle through the 4 basic operators\\
%
}
\opt{RECORDER_PAD}{\ButtonFThree}
\opt{ONDIO_PAD}{Long \ButtonMenu}
\opt{IRIVER_H100_PAD,IRIVER_H300_PAD}{\ButtonOn}
\opt{IPOD_4G_PAD,IPOD_3G_PAD,IRIVER_H10_PAD,IAUDIO_X5_PAD,IRIVER_H10_PAD
,PBELL_VIBE500_PAD,MPIO_HD200_PAD,MPIO_HD300_PAD,SANSA_FUZEPLUS_PAD}
{\ButtonPlay}
\opt{SANSA_E200_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,SANSA_FUZE_PAD}{Long \ButtonSelect}
\opt{GIGABEAT_PAD,GIGABEAT_S_PAD,MROBE100_PAD}{\ButtonMenu}
\opt{COWON_D2_PAD}{\TouchBottomRight}
\opt{SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{\ButtonFF}
\opt{HAVEREMOTEKEYMAP}{& }
& Calculate\\
%
\opt{RECORDER_PAD,ONDIO_PAD,IRIVER_H100_PAD,IRIVER_H300_PAD}{\ButtonOff}
\opt{IPOD_4G_PAD,IPOD_3G_PAD}{\ButtonMenu}
\opt{IAUDIO_X5_PAD,IRIVER_H10_PAD,SANSA_E200_PAD,SANSA_C200_PAD,SANSA_CLIP_PAD,GIGABEAT_PAD%
,MROBE100_PAD,COWON_D2_PAD}{\ButtonPower}
\opt{SANSA_FUZE_PAD}{Long \ButtonHome}
\opt{GIGABEAT_S_PAD}{\ButtonBack}
\opt{PBELL_VIBE500_PAD}{\ButtonRec}
\opt{SAMSUNG_YH92X_PAD,SAMSUNG_YH820_PAD}{Long \ButtonRew}
\opt{MPIO_HD200_PAD}{\ButtonRec + \ButtonPlay}
\opt{MPIO_HD300_PAD}{Long \ButtonMenu}
\opt{SANSA_FUZEPLUS_PAD}{Long \ButtonBack}
\opt{HAVEREMOTEKEYMAP}{&
\opt{IRIVER_RC_H100_PAD}{\ButtonRCStop}
}
& Quit\\
\end{btnmap}
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(NICTA_GPL)
*)
(*
* Proofs regarding the State Translation.
*)
theory StateTranslationProofs_DR
imports StateTranslation_D
begin
context begin interpretation Arch . (*FIXME: arch_split*)
declare transform_current_domain_def [simp]
lemma asid_high_bits [simp]:
"Types_D.asid_high_bits = asid_high_bits"
by (simp add:Types_D.asid_high_bits_def asid_high_bits_def)
lemma asid_low_bits [simp]:
"Types_D.asid_low_bits = asid_low_bits"
by (simp add:Types_D.asid_low_bits_def asid_low_bits_def)
lemma asid_bits [simp]:
"Types_D.asid_bits = asid_bits"
by (simp add:Types_D.asid_bits_def asid_bits_def)
lemma get_obj_simps [simp]:
"get_obj (s\<lparr>cur_thread := a\<rparr>) = get_obj s"
apply (rule ext)
apply (clarsimp simp: get_obj_def)
done
lemma transform_objects_simps [simp]:
"transform_objects (s\<lparr>cur_thread := a\<rparr>) = transform_objects s"
apply (rule ext)
apply (clarsimp simp: transform_objects_def transform_object_def)
done
lemma transform_cdt_simps [simp]:
"transform_cdt (s\<lparr>cur_thread := a\<rparr>) = transform_cdt s"
apply (rule ext)+
apply (clarsimp simp: transform_cdt_def split_def)
done
(* Aggressive simp rules, using explictly *)
abbreviation
"update_machine ms s \<equiv> machine_state_update (\<lambda>_. ms) s"
abbreviation
"update_kheap kh s \<equiv> kheap_update (\<lambda>_. kh) s"
abbreviation
"tcb_set_mi tcb msg \<equiv> tcb \<lparr>tcb_context := (tcb_context tcb)(msg_info_register := msg)\<rparr>"
abbreviation
"update_tcb_cxt_badge msg tcb\<equiv> tcb \<lparr>tcb_context := (tcb_context tcb)(badge_register := msg)\<rparr>"
abbreviation
"update_tcb_state state tcb \<equiv> tcb \<lparr>tcb_state := state\<rparr>"
abbreviation
"update_tcb_boundntfn ntfn_opt tcb \<equiv> tcb \<lparr>tcb_bound_notification := ntfn_opt\<rparr>"
abbreviation
"dupdate_cdl_object ptr obj s \<equiv> cdl_objects_update (\<lambda>_. cdl_objects s(ptr \<mapsto> obj)) s"
abbreviation
"dupdate_tcb_intent intent tcb\<equiv> tcb \<lparr>cdl_tcb_intent := intent\<rparr>"
lemma update_kheap_triv[simp]:
"kheap s y = Some obj\<Longrightarrow> update_kheap ((kheap s)(y \<mapsto> obj)) s = s"
apply (case_tac s,clarsimp)
apply (rule ext,clarsimp)
done
lemma msg_registers_lt_msg_max_length [simp]:
"length msg_registers < msg_max_length"
by (simp add: msgRegisters_unfold msg_max_length_def)
lemma get_tcb_mrs_update_state :
"get_tcb_mrs ms (tcb_state_update f tcb) = get_tcb_mrs ms tcb"
by (clarsimp simp:get_tcb_mrs_def Suc_le_eq get_tcb_message_info_def get_ipc_buffer_words_def)
lemma msg_info_badge_register_no_overlap:
"badge_register \<noteq> msg_info_register"
by (clarsimp simp:badge_register_def msg_info_register_def
ARM.badgeRegister_def
ARM.msgInfoRegister_def)
lemma badge_cap_register_overlap:
"badge_register = cap_register"
by (clarsimp simp:badge_register_def cap_register_def
ARM.badgeRegister_def
ARM.capRegister_def)
lemma cap_msg_info_register_no_overlap:
"cap_register \<noteq> msg_info_register"
by (clarsimp simp:msg_info_register_def cap_register_def
ARM.msgInfoRegister_def
ARM.capRegister_def)
lemmas register_overlap_check = msg_info_badge_register_no_overlap
cap_msg_info_register_no_overlap
badge_cap_register_overlap
lemma transform_full_intent_cong:
"\<lbrakk>ms = ms'; ptr = ptr';
arch_tcb_context_get (tcb_arch tcb) = arch_tcb_context_get (tcb_arch tcb');
tcb_ipc_buffer tcb = tcb_ipc_buffer tcb'; tcb_ipcframe tcb = tcb_ipcframe tcb'\<rbrakk>
\<Longrightarrow> transform_full_intent ms ptr tcb = transform_full_intent ms' ptr' tcb'"
by (simp add: transform_full_intent_def get_tcb_message_info_def get_tcb_mrs_def Suc_le_eq get_ipc_buffer_words_def)
lemma caps_of_state_eq_lift:
"\<forall>cap. cte_wp_at (op=cap) p s = cte_wp_at (op=cap) p s' \<Longrightarrow> caps_of_state s p = caps_of_state s' p"
apply (simp add:cte_wp_at_def caps_of_state_def)
done
lemma caps_of_state_irrelavent_simp:
"ref \<noteq> epptr \<Longrightarrow> caps_of_state (update_kheap (kh(epptr \<mapsto> obj)) s) (ref, cref) = caps_of_state (update_kheap kh s) (ref, cref)"
apply (rule caps_of_state_eq_lift)
apply (clarsimp simp: cte_wp_at_cases)
done
(* This doesn't satisfy the obvious transformation into capDL because of
pagetables etc. *)
fun
caps_of_object :: "kernel_object \<Rightarrow> (bool list \<rightharpoonup> cap)"
where
"caps_of_object (Structures_A.CNode sz c) = (if well_formed_cnode_n sz c then c else empty)"
| "caps_of_object (Structures_A.TCB t) = (\<lambda>n. option_map (\<lambda>(f, _). f t) (tcb_cap_cases n))"
| "caps_of_object _ = empty"
lemma caps_of_state_def2:
"caps_of_state s = (\<lambda>ptr. case (option_map caps_of_object (kheap s (fst ptr))) of
None \<Rightarrow> None
| Some f \<Rightarrow> f (snd ptr))"
unfolding caps_of_state_def get_cap_def tcb_cnode_map_def
apply (rule ext)
apply (clarsimp simp add: split_def get_object_def bind_def gets_def get_def return_def assert_def fail_def)
apply (case_tac y, simp_all add: bind_def assert_def return_def assert_opt_def fail_def tcb_cap_cases_def
split: option.splits)
done
lemma caps_of_state_update_same_caps:
assumes kh: "kh ptr = Some ko"
and coo: "caps_of_object ko' = caps_of_object ko"
shows "caps_of_state (update_kheap (kh(ptr \<mapsto> ko')) s) = caps_of_state (update_kheap kh s)"
using kh coo
apply -
apply (rule ext)
apply (clarsimp simp add: caps_of_state_def2)
done
lemma caps_of_state_update_tcb:
"\<lbrakk> kh thread = Some (TCB tcb);
(tcb_ctable tcb) = (tcb_ctable (f tcb));
(tcb_vtable tcb) = (tcb_vtable (f tcb));
(tcb_reply tcb) = (tcb_reply (f tcb));
(tcb_caller tcb) = (tcb_caller (f tcb));
(tcb_ipcframe tcb) = (tcb_ipcframe (f tcb)) \<rbrakk>
\<Longrightarrow>
caps_of_state (update_kheap (kh(thread \<mapsto> (TCB (f tcb)))) s) =
caps_of_state (update_kheap kh s)"
apply (erule caps_of_state_update_same_caps)
apply (rule ext)
apply (simp add: tcb_cap_cases_def split: if_split)
done
lemmas caps_of_state_upds = caps_of_state_update_tcb caps_of_state_update_same_caps
lemma transform_cdt_kheap_update [simp]:
"transform_cdt (kheap_update f s) = transform_cdt s"
by (clarsimp simp: transform_cdt_def)
lemma transform_cdt_update_machine [simp]:
"transform_cdt (update_machine ms s) = transform_cdt s "
by (clarsimp simp: transform_cdt_def)
lemma transform_cdt_update_original_cap [simp]:
"transform_cdt (b\<lparr>is_original_cap := x\<rparr>) = transform_cdt b"
by (clarsimp simp: transform_cdt_def)
lemma transform_asid_table_kheap_update [simp]:
"transform_asid_table (kheap_update f s) = transform_asid_table s"
by (clarsimp simp: transform_asid_table_def)
lemma transform_asid_table_update_machine [simp]:
"transform_asid_table (update_machine ms s) = transform_asid_table s "
by (clarsimp simp: transform_asid_table_def)
lemma transform_asid_table_update_original_cap [simp]:
"transform_asid_table (b\<lparr>is_original_cap := x\<rparr>) = transform_asid_table b"
by (clarsimp simp: transform_asid_table_def)
lemma transform_objects_update_kheap_same_caps:
"\<lbrakk> kh ptr = Some ko; caps_of_object ko' = caps_of_object ko; a_type ko' = a_type ko\<rbrakk> \<Longrightarrow>
transform_objects (update_kheap (kh(ptr \<mapsto> ko')) s) =
(if ptr = idle_thread s then
transform_objects (update_kheap kh s)
else
(transform_objects (update_kheap kh s))(ptr \<mapsto> transform_object (machine_state s) ptr (ekheap s ptr) ko'))"
unfolding transform_objects_def
apply (rule ext)
apply (simp add: map_option_case restrict_map_def map_add_def )
done
lemma transform_objects_update_same:
"\<lbrakk> kheap s ptr = Some ko; transform_object (machine_state s) ptr (ekheap s ptr) ko = ko'; ptr \<noteq> idle_thread s \<rbrakk>
\<Longrightarrow> (transform_objects s)(ptr \<mapsto> ko') = transform_objects s"
unfolding transform_objects_def
by (rule ext) (simp)
text {* Facts about map_lift_over *}
lemma map_lift_over_eq_Some:
"(map_lift_over f m x = Some y)
= (\<exists>x' y'. x = f x' \<and> y = f y' \<and> inj_on f (dom m \<union> ran m)
\<and> m x' = Some y')"
proof -
have P: "inj_on f (dom m \<union> ran m) \<longrightarrow> inj_on f (dom m)"
by (auto elim: subset_inj_on)
have Q: "\<And>x y. \<lbrakk> m x = Some y; inj_on f (dom m \<union> ran m) \<rbrakk>
\<Longrightarrow> inv_into (dom m) f (f x) = x"
using P
by (blast intro: inv_into_f_f)
show ?thesis
by (auto simp add: map_lift_over_def Q)
qed
lemma map_lift_over_eq_None:
"(map_lift_over f m x = None)
= (inj_on f (dom m \<union> ran m) \<longrightarrow>
(\<forall>x'. x = f x' \<longrightarrow> m x' = None))"
proof -
have P: "inj_on f (dom m \<union> ran m) \<Longrightarrow> inj_on f (dom m)"
by (auto elim: subset_inj_on)
show ?thesis
by (auto simp add: map_lift_over_def P[THEN inv_into_f_f] domI
inj_on_eq_iff[where f=f]
| rule ccontr[where P="v = None" for v])+
qed
lemma map_lift_over_f_eq:
"inj_on f ({x} \<union> dom m \<union> ran m) \<Longrightarrow>
(map_lift_over f m (f x) = v) = (v = map_option f (m x))"
apply (cases v, simp_all add: map_lift_over_eq_None map_lift_over_eq_Some)
apply (auto simp: option_map_def split: option.split)
done
lemma map_lift_over_eq_cases[unfolded map_lift_over_eq_None map_lift_over_eq_Some]:
"(map_lift_over f m x = v)
= (case v of None \<Rightarrow> map_lift_over f m x = None
| Some z \<Rightarrow> map_lift_over f m x = Some z)"
by (simp split: option.split)
lemma map_lift_over_upd:
assumes inj_f: "inj_on f ({x} \<union> set_option y \<union> dom m \<union> ran m)"
shows "(map_lift_over f (m(x := y)))
= ((map_lift_over f m) (f x := map_option f y))"
proof -
have Q: "inj_on f (dom m \<union> ran m)"
"inj_on f (insert x (dom m \<union> ran (m(x := y))))"
"inj_on f (dom m)"
"inj_on f (insert x (dom m))"
"inj_on f (dom m - {x} \<union> ran (m(x := None)))"
"inj_on f (dom m - {x})"
apply (safe intro!: subset_inj_on[OF inj_f])
apply (auto simp: ran_def split: if_split_asm)
done
show ?thesis
apply (simp add: map_lift_over_def Q del: inj_on_insert)
apply (safe intro!: ext)
apply (simp_all add: Q[THEN inv_into_f_f] domI
cong del: imp_cong)
apply (auto simp add: Q[THEN inv_into_f_f] domI
inj_on_eq_iff[OF inj_f] ranI
simp del: inj_on_insert)
done
qed
lemma map_lift_over_if_eq_twice:
assumes inj_f: "inj_on f (dom m \<union> ran m \<union> {y, y'} \<union> set_option z \<union> set_option z')"
shows
"map_lift_over f (\<lambda>x. if m x = Some y then z else if m x = Some y' then z' else m x)
= (\<lambda>x. if map_lift_over f m x = Some (f y) then map_option f z
else if map_lift_over f m x = Some (f y') then map_option f z'
else map_lift_over f m x)"
(is "map_lift_over f ?ifeq = ?rhs")
proof -
from inj_f
have 1: "inj_on f (dom m \<union> ran m)" "inj_on f (dom m)"
by (auto simp: inj_on_Un)
have "dom ?ifeq \<subseteq> dom m"
by (auto split: if_split_asm)
with inj_f
have 2: "inj_on f (dom ?ifeq)"
by (auto elim!: subset_inj_on)
have "dom ?ifeq \<union> ran ?ifeq \<subseteq> dom m \<union> ran m \<union> set_option z \<union> set_option z'"
by (auto simp: ran_def)
with inj_f
have "inj_on f (dom ?ifeq \<union> ran ?ifeq)"
by (auto elim!: subset_inj_on)
note Q = 1 2 this
note if_split[split del]
show ?thesis
apply (simp add: map_lift_over_def Q)
apply (rule ext)
apply (case_tac "x \<in> f ` dom ?ifeq")
apply clarsimp
apply (subst if_P, fastforce split: if_split_asm)+
apply (simp add: Q[THEN inv_into_f_f] domI ranI inj_on_eq_iff[OF inj_f]
split: if_split_asm)
apply (subst if_not_P, simp, rule allI, fastforce)+
apply (auto simp: option_map_def Q[THEN inv_into_f_f] domI ranI
inj_on_eq_iff[OF inj_f]
split: if_split option.split)
done
qed
lemma map_lift_over_if_eq:
assumes inj_f: "inj_on f (dom m \<union> ran m \<union> {y} \<union> set_option z)"
shows
"map_lift_over f (\<lambda>x. if m x = Some y then z else m x)
= (\<lambda>x. if map_lift_over f m x = Some (f y) then map_option f z
else map_lift_over f m x)"
using inj_f map_lift_over_if_eq_twice[where f=f and m=m and y=y and z=z and y'=y and z'=z]
apply (simp del: inj_on_insert)
done
end
end
|
(*
(C) Copyright Andreas Viktor Hess, DTU, 2018-2020
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*)
(* Title: Stateful_Strands.thy
Author: Andreas Viktor Hess, DTU
*)
section \<open>Stateful Strands\<close>
theory Stateful_Strands
imports Strands_and_Constraints
begin
subsection \<open>Stateful Constraints\<close>
datatype (funs\<^sub>s\<^sub>s\<^sub>t\<^sub>p: 'a, vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p: 'b) stateful_strand_step =
Send (the_msg: "('a,'b) term") ("send\<langle>_\<rangle>" 80)
| Receive (the_msg: "('a,'b) term") ("receive\<langle>_\<rangle>" 80)
| Equality (the_check: poscheckvariant) (the_lhs: "('a,'b) term") (the_rhs: "('a,'b) term")
("\<langle>_: _ \<doteq> _\<rangle>" [80,80])
| Insert (the_elem_term: "('a,'b) term") (the_set_term: "('a,'b) term") ("insert\<langle>_,_\<rangle>" 80)
| Delete (the_elem_term: "('a,'b) term") (the_set_term: "('a,'b) term") ("delete\<langle>_,_\<rangle>" 80)
| InSet (the_check: poscheckvariant) (the_elem_term: "('a,'b) term") (the_set_term: "('a,'b) term")
("\<langle>_: _ \<in> _\<rangle>" [80,80])
| NegChecks (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p: "'b list")
(the_eqs: "(('a,'b) term \<times> ('a,'b) term) list")
(the_ins: "(('a,'b) term \<times> ('a,'b) term) list")
("\<forall>_\<langle>\<or>\<noteq>: _ \<or>\<notin>: _\<rangle>" [80,80])
where
"bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Send _) = []"
| "bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Receive _) = []"
| "bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Equality _ _ _) = []"
| "bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Insert _ _) = []"
| "bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Delete _ _) = []"
| "bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (InSet _ _ _) = []"
type_synonym ('a,'b) stateful_strand = "('a,'b) stateful_strand_step list"
type_synonym ('a,'b) dbstatelist = "(('a,'b) term \<times> ('a,'b) term) list"
type_synonym ('a,'b) dbstate = "(('a,'b) term \<times> ('a,'b) term) set"
abbreviation
"is_Assignment x \<equiv> (is_Equality x \<or> is_InSet x) \<and> the_check x = Assign"
abbreviation
"is_Check x \<equiv> ((is_Equality x \<or> is_InSet x) \<and> the_check x = Check) \<or> is_NegChecks x"
abbreviation
"is_Update x \<equiv> is_Insert x \<or> is_Delete x"
abbreviation InSet_select ("select\<langle>_,_\<rangle>") where "select\<langle>t,s\<rangle> \<equiv> InSet Assign t s"
abbreviation InSet_check ("\<langle>_ in _\<rangle>") where "\<langle>t in s\<rangle> \<equiv> InSet Check t s"
abbreviation Equality_assign ("\<langle>_ := _\<rangle>") where "\<langle>t := s\<rangle> \<equiv> Equality Assign t s"
abbreviation Equality_check ("\<langle>_ == _\<rangle>") where "\<langle>t == s\<rangle> \<equiv> Equality Check t s"
abbreviation NegChecks_Inequality1 ("\<langle>_ != _\<rangle>") where
"\<langle>t != s\<rangle> \<equiv> NegChecks [] [(t,s)] []"
abbreviation NegChecks_Inequality2 ("\<forall>_\<langle>_ != _\<rangle>") where
"\<forall>x\<langle>t != s\<rangle> \<equiv> NegChecks [x] [(t,s)] []"
abbreviation NegChecks_Inequality3 ("\<forall>_,_\<langle>_ != _\<rangle>") where
"\<forall>x,y\<langle>t != s\<rangle> \<equiv> NegChecks [x,y] [(t,s)] []"
abbreviation NegChecks_Inequality4 ("\<forall>_,_,_\<langle>_ != _\<rangle>") where
"\<forall>x,y,z\<langle>t != s\<rangle> \<equiv> NegChecks [x,y,z] [(t,s)] []"
abbreviation NegChecks_NotInSet1 ("\<langle>_ not in _\<rangle>") where
"\<langle>t not in s\<rangle> \<equiv> NegChecks [] [] [(t,s)]"
abbreviation NegChecks_NotInSet2 ("\<forall>_\<langle>_ not in _\<rangle>") where
"\<forall>x\<langle>t not in s\<rangle> \<equiv> NegChecks [x] [] [(t,s)]"
abbreviation NegChecks_NotInSet3 ("\<forall>_,_\<langle>_ not in _\<rangle>") where
"\<forall>x,y\<langle>t not in s\<rangle> \<equiv> NegChecks [x,y] [] [(t,s)]"
abbreviation NegChecks_NotInSet4 ("\<forall>_,_,_\<langle>_ not in _\<rangle>") where
"\<forall>x,y,z\<langle>t not in s\<rangle> \<equiv> NegChecks [x,y,z] [] [(t,s)]"
fun trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p where
"trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Send t) = {t}"
| "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Receive t) = {t}"
| "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Equality _ t t') = {t,t'}"
| "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Insert t t') = {t,t'}"
| "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Delete t t') = {t,t'}"
| "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (InSet _ t t') = {t,t'}"
| "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (NegChecks _ F F') = trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F'"
definition trms\<^sub>s\<^sub>s\<^sub>t where "trms\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p ` set S)"
declare trms\<^sub>s\<^sub>s\<^sub>t_def[simp]
fun trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p where
"trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Send t) = [t]"
| "trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Receive t) = [t]"
| "trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Equality _ t t') = [t,t']"
| "trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Insert t t') = [t,t']"
| "trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Delete t t') = [t,t']"
| "trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (InSet _ t t') = [t,t']"
| "trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (NegChecks _ F F') = concat (map (\<lambda>(t,t'). [t,t']) (F@F'))"
definition trms_list\<^sub>s\<^sub>s\<^sub>t where "trms_list\<^sub>s\<^sub>s\<^sub>t S \<equiv> remdups (concat (map trms_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
definition ik\<^sub>s\<^sub>s\<^sub>t where "ik\<^sub>s\<^sub>s\<^sub>t A \<equiv> {t. Receive t \<in> set A}"
definition bvars\<^sub>s\<^sub>s\<^sub>t::"('a,'b) stateful_strand \<Rightarrow> 'b set" where
"bvars\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(set (map (set \<circ> bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p) S))"
fun fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p::"('a,'b) stateful_strand_step \<Rightarrow> 'b set" where
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Send t) = fv t"
| "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Receive t) = fv t"
| "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Equality _ t t') = fv t \<union> fv t'"
| "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Insert t t') = fv t \<union> fv t'"
| "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Delete t t') = fv t \<union> fv t'"
| "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (InSet _ t t') = fv t \<union> fv t'"
| "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (NegChecks X F F') = fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F' - set X"
definition fv\<^sub>s\<^sub>s\<^sub>t::"('a,'b) stateful_strand \<Rightarrow> 'b set" where
"fv\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(set (map fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
fun fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p where
"fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (send\<langle>t\<rangle>) = fv_list t"
| "fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (receive\<langle>t\<rangle>) = fv_list t"
| "fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>_: t \<doteq> s\<rangle>) = fv_list t@fv_list s"
| "fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,s\<rangle>) = fv_list t@fv_list s"
| "fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (delete\<langle>t,s\<rangle>) = fv_list t@fv_list s"
| "fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>_: t \<in> s\<rangle>) = fv_list t@fv_list s"
| "fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: F'\<rangle>) = filter (\<lambda>x. x \<notin> set X) (fv_list\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F@F'))"
definition fv_list\<^sub>s\<^sub>s\<^sub>t where
"fv_list\<^sub>s\<^sub>s\<^sub>t S \<equiv> remdups (concat (map fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
declare bvars\<^sub>s\<^sub>s\<^sub>t_def[simp]
declare fv\<^sub>s\<^sub>s\<^sub>t_def[simp]
definition vars\<^sub>s\<^sub>s\<^sub>t::"('a,'b) stateful_strand \<Rightarrow> 'b set" where
"vars\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(set (map vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
abbreviation wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p::"('a,'b) stateful_strand_step \<Rightarrow> 'b set" where
"wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<equiv>
case x of
NegChecks _ _ _ \<Rightarrow> {}
| Equality Check _ _ \<Rightarrow> {}
| InSet Check _ _ \<Rightarrow> {}
| Delete _ _ \<Rightarrow> {}
| _ \<Rightarrow> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x"
definition wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t::"('a,'b) stateful_strand \<Rightarrow> 'b set" where
"wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(set (map wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
abbreviation wfvarsoccs\<^sub>s\<^sub>s\<^sub>t\<^sub>p where
"wfvarsoccs\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<equiv>
case x of
Send t \<Rightarrow> fv t
| Equality Assign s t \<Rightarrow> fv s
| InSet Assign s t \<Rightarrow> fv s \<union> fv t
| _ \<Rightarrow> {}"
definition wfvarsoccs\<^sub>s\<^sub>s\<^sub>t where
"wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(set (map wfvarsoccs\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
fun wf'\<^sub>s\<^sub>s\<^sub>t::"'b set \<Rightarrow> ('a,'b) stateful_strand \<Rightarrow> bool" where
"wf'\<^sub>s\<^sub>s\<^sub>t V [] = True"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (Receive t#S) = (fv t \<subseteq> V \<and> wf'\<^sub>s\<^sub>s\<^sub>t V S)"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (Send t#S) = wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t) S"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (Equality Assign t t'#S) = (fv t' \<subseteq> V \<and> wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t) S)"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (Equality Check _ _#S) = wf'\<^sub>s\<^sub>s\<^sub>t V S"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (Insert t s#S) = (fv t \<subseteq> V \<and> fv s \<subseteq> V \<and> wf'\<^sub>s\<^sub>s\<^sub>t V S)"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (Delete _ _#S) = wf'\<^sub>s\<^sub>s\<^sub>t V S"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (InSet Assign t s#S) = wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> fv s) S"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (InSet Check _ _#S) = wf'\<^sub>s\<^sub>s\<^sub>t V S"
| "wf'\<^sub>s\<^sub>s\<^sub>t V (NegChecks _ _ _#S) = wf'\<^sub>s\<^sub>s\<^sub>t V S"
abbreviation "wf\<^sub>s\<^sub>s\<^sub>t S \<equiv> wf'\<^sub>s\<^sub>s\<^sub>t {} S \<and> fv\<^sub>s\<^sub>s\<^sub>t S \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}"
fun subst_apply_stateful_strand_step::
"('a,'b) stateful_strand_step \<Rightarrow> ('a,'b) subst \<Rightarrow> ('a,'b) stateful_strand_step"
(infix "\<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p" 51) where
"send\<langle>t\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = send\<langle>t \<cdot> \<theta>\<rangle>"
| "receive\<langle>t\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = receive\<langle>t \<cdot> \<theta>\<rangle>"
| "\<langle>a: t \<doteq> s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<langle>a: (t \<cdot> \<theta>) \<doteq> (s \<cdot> \<theta>)\<rangle>"
| "\<langle>a: t \<in> s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<langle>a: (t \<cdot> \<theta>) \<in> (s \<cdot> \<theta>)\<rangle>"
| "insert\<langle>t,s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = insert\<langle>t \<cdot> \<theta>, s \<cdot> \<theta>\<rangle>"
| "delete\<langle>t,s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = delete\<langle>t \<cdot> \<theta>, s \<cdot> \<theta>\<rangle>"
| "\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<forall>X\<langle>\<or>\<noteq>: (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<or>\<notin>: (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>)\<rangle>"
definition subst_apply_stateful_strand::
"('a,'b) stateful_strand \<Rightarrow> ('a,'b) subst \<Rightarrow> ('a,'b) stateful_strand"
(infix "\<cdot>\<^sub>s\<^sub>s\<^sub>t" 51) where
"S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta> \<equiv> map (\<lambda>x. x \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) S"
fun dbupd\<^sub>s\<^sub>s\<^sub>t::"('f,'v) stateful_strand \<Rightarrow> ('f,'v) subst \<Rightarrow> ('f,'v) dbstate \<Rightarrow> ('f,'v) dbstate"
where
"dbupd\<^sub>s\<^sub>s\<^sub>t [] I D = D"
| "dbupd\<^sub>s\<^sub>s\<^sub>t (Insert t s#A) I D = dbupd\<^sub>s\<^sub>s\<^sub>t A I (insert ((t,s) \<cdot>\<^sub>p I) D)"
| "dbupd\<^sub>s\<^sub>s\<^sub>t (Delete t s#A) I D = dbupd\<^sub>s\<^sub>s\<^sub>t A I (D - {((t,s) \<cdot>\<^sub>p I)})"
| "dbupd\<^sub>s\<^sub>s\<^sub>t (_#A) I D = dbupd\<^sub>s\<^sub>s\<^sub>t A I D"
fun db'\<^sub>s\<^sub>s\<^sub>t::"('f,'v) stateful_strand \<Rightarrow> ('f,'v) subst \<Rightarrow> ('f,'v) dbstatelist \<Rightarrow> ('f,'v) dbstatelist"
where
"db'\<^sub>s\<^sub>s\<^sub>t [] I D = D"
| "db'\<^sub>s\<^sub>s\<^sub>t (Insert t s#A) I D = db'\<^sub>s\<^sub>s\<^sub>t A I (List.insert ((t,s) \<cdot>\<^sub>p I) D)"
| "db'\<^sub>s\<^sub>s\<^sub>t (Delete t s#A) I D = db'\<^sub>s\<^sub>s\<^sub>t A I (List.removeAll ((t,s) \<cdot>\<^sub>p I) D)"
| "db'\<^sub>s\<^sub>s\<^sub>t (_#A) I D = db'\<^sub>s\<^sub>s\<^sub>t A I D"
definition db\<^sub>s\<^sub>s\<^sub>t where
"db\<^sub>s\<^sub>s\<^sub>t S I \<equiv> db'\<^sub>s\<^sub>s\<^sub>t S I []"
fun setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p where
"setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Insert t s) = {(t,s)}"
| "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Delete t s) = {(t,s)}"
| "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (InSet _ t s) = {(t,s)}"
| "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (NegChecks _ _ F') = set F'"
| "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p _ = {}"
text \<open>The set-operations of a stateful strand\<close>
definition setops\<^sub>s\<^sub>s\<^sub>t where
"setops\<^sub>s\<^sub>s\<^sub>t S \<equiv> \<Union>(setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p ` set S)"
fun setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p where
"setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Insert t s) = [(t,s)]"
| "setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (Delete t s) = [(t,s)]"
| "setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (InSet _ t s) = [(t,s)]"
| "setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p (NegChecks _ _ F') = F'"
| "setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p _ = []"
text \<open>The set-operations of a stateful strand (list variant)\<close>
definition setops_list\<^sub>s\<^sub>s\<^sub>t where
"setops_list\<^sub>s\<^sub>s\<^sub>t S \<equiv> remdups (concat (map setops_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p S))"
subsection \<open>Small Lemmata\<close>
lemma trms_list\<^sub>s\<^sub>s\<^sub>t_is_trms\<^sub>s\<^sub>s\<^sub>t: "trms\<^sub>s\<^sub>s\<^sub>t S = set (trms_list\<^sub>s\<^sub>s\<^sub>t S)"
unfolding trms\<^sub>s\<^sub>t_def trms_list\<^sub>s\<^sub>s\<^sub>t_def
proof (induction S)
case (Cons x S) thus ?case by (cases x) auto
qed simp
lemma setops_list\<^sub>s\<^sub>s\<^sub>t_is_setops\<^sub>s\<^sub>s\<^sub>t: "setops\<^sub>s\<^sub>s\<^sub>t S = set (setops_list\<^sub>s\<^sub>s\<^sub>t S)"
unfolding setops\<^sub>s\<^sub>s\<^sub>t_def setops_list\<^sub>s\<^sub>s\<^sub>t_def
proof (induction S)
case (Cons x S) thus ?case by (cases x) auto
qed simp
lemma fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p: "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p a = set (fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p a)"
proof (cases a)
case (NegChecks X F G) thus ?thesis
using fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_append[of F G] fv_list\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_append[of F G]
fv_list\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_is_fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s[of "F@G"]
by auto
qed (simp_all add: fv_list\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_is_fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s fv_list_is_fv)
lemma fv_list\<^sub>s\<^sub>s\<^sub>t_is_fv\<^sub>s\<^sub>s\<^sub>t: "fv\<^sub>s\<^sub>s\<^sub>t S = set (fv_list\<^sub>s\<^sub>s\<^sub>t S)"
unfolding fv\<^sub>s\<^sub>s\<^sub>t_def fv_list\<^sub>s\<^sub>s\<^sub>t_def by (induct S) (simp_all add: fv_list\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p)
lemma trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite[simp]: "finite (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p x)"
by (cases x) auto
lemma trms\<^sub>s\<^sub>s\<^sub>t_finite[simp]: "finite (trms\<^sub>s\<^sub>s\<^sub>t S)"
using trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite unfolding trms\<^sub>s\<^sub>s\<^sub>t_def by (induct S) auto
lemma vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite[simp]: "finite (vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x)"
by (cases x) auto
lemma vars\<^sub>s\<^sub>s\<^sub>t_finite[simp]: "finite (vars\<^sub>s\<^sub>s\<^sub>t S)"
using vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite unfolding vars\<^sub>s\<^sub>s\<^sub>t_def by (induct S) auto
lemma fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite[simp]: "finite (fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p x)"
by (cases x) auto
lemma fv\<^sub>s\<^sub>s\<^sub>t_finite[simp]: "finite (fv\<^sub>s\<^sub>s\<^sub>t S)"
using fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite unfolding fv\<^sub>s\<^sub>s\<^sub>t_def by (induct S) auto
lemma bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite[simp]: "finite (set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x))"
by (rule finite_set)
lemma bvars\<^sub>s\<^sub>s\<^sub>t_finite[simp]: "finite (bvars\<^sub>s\<^sub>s\<^sub>t S)"
using bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_finite unfolding bvars\<^sub>s\<^sub>s\<^sub>t_def by (induct S) auto
lemma subst_sst_nil[simp]: "[] \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta> = []"
by (simp add: subst_apply_stateful_strand_def)
lemma db\<^sub>s\<^sub>s\<^sub>t_nil[simp]: "db\<^sub>s\<^sub>s\<^sub>t [] \<I> = []"
by (simp add: db\<^sub>s\<^sub>s\<^sub>t_def)
lemma ik\<^sub>s\<^sub>s\<^sub>t_nil[simp]: "ik\<^sub>s\<^sub>s\<^sub>t [] = {}"
by (simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
lemma ik\<^sub>s\<^sub>s\<^sub>t_append[simp]: "ik\<^sub>s\<^sub>s\<^sub>t (A@B) = ik\<^sub>s\<^sub>s\<^sub>t A \<union> ik\<^sub>s\<^sub>s\<^sub>t B"
by (auto simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
lemma db\<^sub>s\<^sub>s\<^sub>t_set_is_dbupd\<^sub>s\<^sub>s\<^sub>t: "set (db'\<^sub>s\<^sub>s\<^sub>t A I D) = dbupd\<^sub>s\<^sub>s\<^sub>t A I (set D)" (is "?A = ?B")
proof
show "?A \<subseteq> ?B"
proof
fix t s show "(t,s) \<in> ?A \<Longrightarrow> (t,s) \<in> ?B" by (induct rule: db'\<^sub>s\<^sub>s\<^sub>t.induct) auto
qed
show "?B \<subseteq> ?A"
proof
fix t s show "(t,s) \<in> ?B \<Longrightarrow> (t,s) \<in> ?A" by (induct arbitrary: D rule: dbupd\<^sub>s\<^sub>s\<^sub>t.induct) auto
qed
qed
lemma dbupd\<^sub>s\<^sub>s\<^sub>t_no_upd:
assumes "\<forall>a \<in> set A. \<not>is_Insert a \<and> \<not>is_Delete a"
shows "dbupd\<^sub>s\<^sub>s\<^sub>t A I D = D"
using assms
proof (induction A)
case (Cons a A) thus ?case by (cases a) auto
qed simp
lemma db\<^sub>s\<^sub>s\<^sub>t_no_upd:
assumes "\<forall>a \<in> set A. \<not>is_Insert a \<and> \<not>is_Delete a"
shows "db'\<^sub>s\<^sub>s\<^sub>t A I D = D"
using assms
proof (induction A)
case (Cons a A) thus ?case by (cases a) auto
qed simp
lemma db\<^sub>s\<^sub>s\<^sub>t_no_upd_append:
assumes "\<forall>b \<in> set B. \<not>is_Insert b \<and> \<not>is_Delete b"
shows "db'\<^sub>s\<^sub>s\<^sub>t A = db'\<^sub>s\<^sub>s\<^sub>t (A@B)"
using assms
proof (induction A)
case Nil thus ?case by (simp add: db\<^sub>s\<^sub>s\<^sub>t_no_upd)
next
case (Cons a A) thus ?case by (cases a) simp_all
qed
lemma db\<^sub>s\<^sub>s\<^sub>t_append:
"db'\<^sub>s\<^sub>s\<^sub>t (A@B) I D = db'\<^sub>s\<^sub>s\<^sub>t B I (db'\<^sub>s\<^sub>s\<^sub>t A I D)"
proof (induction A arbitrary: D)
case (Cons a A) thus ?case by (cases a) auto
qed simp
lemma db\<^sub>s\<^sub>s\<^sub>t_in_cases:
assumes "(t,s) \<in> set (db'\<^sub>s\<^sub>s\<^sub>t A I D)"
shows "(t,s) \<in> set D \<or> (\<exists>t' s'. insert\<langle>t',s'\<rangle> \<in> set A \<and> t = t' \<cdot> I \<and> s = s' \<cdot> I)"
using assms
proof (induction A arbitrary: D)
case (Cons a A) thus ?case by (cases a) fastforce+
qed simp
lemma db\<^sub>s\<^sub>s\<^sub>t_in_cases':
assumes "(t,s) \<in> set (db'\<^sub>s\<^sub>s\<^sub>t A I D)"
and "(t,s) \<notin> set D"
shows "\<exists>B C t' s'. A = B@insert\<langle>t',s'\<rangle>#C \<and> t = t' \<cdot> I \<and> s = s' \<cdot> I \<and>
(\<forall>t'' s''. delete\<langle>t'',s''\<rangle> \<in> set C \<longrightarrow> t \<noteq> t'' \<cdot> I \<or> s \<noteq> s'' \<cdot> I)"
using assms(1)
proof (induction A rule: List.rev_induct)
case (snoc a A)
note * = snoc db\<^sub>s\<^sub>s\<^sub>t_append[of A "[a]" I D]
thus ?case
proof (cases a)
case (Insert t' s')
thus ?thesis using * by (cases "(t,s) \<in> set (db'\<^sub>s\<^sub>s\<^sub>t A I D)") force+
next
case (Delete t' s')
hence **: "t \<noteq> t' \<cdot> I \<or> s \<noteq> s' \<cdot> I" using * by simp
have "(t,s) \<in> set (db'\<^sub>s\<^sub>s\<^sub>t A I D)" using * Delete by force
then obtain B C u v where B:
"A = B@insert\<langle>u,v\<rangle>#C" "t = u \<cdot> I" "s = v \<cdot> I"
"\<forall>t' s'. delete\<langle>t',s'\<rangle> \<in> set C \<longrightarrow> t \<noteq> t' \<cdot> I \<or> s \<noteq> s' \<cdot> I"
using snoc.IH by moura
have "A@[a] = B@insert\<langle>u,v\<rangle>#(C@[a])"
"\<forall>t' s'. delete\<langle>t',s'\<rangle> \<in> set (C@[a]) \<longrightarrow> t \<noteq> t' \<cdot> I \<or> s \<noteq> s' \<cdot> I"
using B(1,4) Delete ** by auto
thus ?thesis using B(2,3) by blast
qed force+
qed (simp add: assms(2))
lemma db\<^sub>s\<^sub>s\<^sub>t_filter:
"db'\<^sub>s\<^sub>s\<^sub>t A I D = db'\<^sub>s\<^sub>s\<^sub>t (filter is_Update A) I D"
by (induct A I D rule: db'\<^sub>s\<^sub>s\<^sub>t.induct) simp_all
lemma subst_sst_cons: "a#A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta> = (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>)#(A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
by (simp add: subst_apply_stateful_strand_def)
lemma subst_sst_snoc: "A@[a] \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta> = (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)@[a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>]"
by (simp add: subst_apply_stateful_strand_def)
lemma subst_sst_append[simp]: "A@B \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta> = (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)@(B \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
by (simp add: subst_apply_stateful_strand_def)
lemma sst_vars_append_subset:
"fv\<^sub>s\<^sub>s\<^sub>t A \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t (A@B)" "bvars\<^sub>s\<^sub>s\<^sub>t A \<subseteq> bvars\<^sub>s\<^sub>s\<^sub>t (A@B)"
"fv\<^sub>s\<^sub>s\<^sub>t B \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t (A@B)" "bvars\<^sub>s\<^sub>s\<^sub>t B \<subseteq> bvars\<^sub>s\<^sub>s\<^sub>t (A@B)"
by auto
lemma sst_vars_disj_cons[simp]: "fv\<^sub>s\<^sub>s\<^sub>t (a#A) \<inter> bvars\<^sub>s\<^sub>s\<^sub>t (a#A) = {} \<Longrightarrow> fv\<^sub>s\<^sub>s\<^sub>t A \<inter> bvars\<^sub>s\<^sub>s\<^sub>t A = {}"
unfolding fv\<^sub>s\<^sub>s\<^sub>t_def bvars\<^sub>s\<^sub>s\<^sub>t_def by auto
lemma fv\<^sub>s\<^sub>s\<^sub>t_cons_subset[simp]: "fv\<^sub>s\<^sub>s\<^sub>t A \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t (a#A)"
by auto
lemma fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases[simp]:
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (send\<langle>t\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>)"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (receive\<langle>t\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>)"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<doteq> s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (delete\<langle>t,s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<in> s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) =
fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) - set X"
by simp_all
lemma vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_cases[simp]:
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (send\<langle>t\<rangle>) = fv t"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (receive\<langle>t\<rangle>) = fv t"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<doteq> s\<rangle>) = fv t \<union> fv s"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,s\<rangle>) = fv t \<union> fv s"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (delete\<langle>t,s\<rangle>) = fv t \<union> fv s"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<in> s\<rangle>) = fv t \<union> fv s"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>) = fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G \<union> set X" (is ?A)
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: [(t,s)] \<or>\<notin>: []\<rangle>) = fv t \<union> fv s \<union> set X" (is ?B)
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: [] \<or>\<notin>: [(t,s)]\<rangle>) = fv t \<union> fv s \<union> set X" (is ?C)
proof
show ?A ?B ?C by auto
qed simp_all
lemma vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases[simp]:
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (send\<langle>t\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>)"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (receive\<langle>t\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>)"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<doteq> s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (delete\<langle>t,s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<in> s\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = fv (t \<cdot> \<theta>) \<union> fv (s \<cdot> \<theta>)"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) =
fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<union> set X" (is ?A)
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: [(t,s)] \<or>\<notin>: []\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) =
fv (t \<cdot> rm_vars (set X) \<theta>) \<union> fv (s \<cdot> rm_vars (set X) \<theta>) \<union> set X" (is ?B)
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: [] \<or>\<notin>: [(t,s)]\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) =
fv (t \<cdot> rm_vars (set X) \<theta>) \<union> fv (s \<cdot> rm_vars (set X) \<theta>) \<union> set X" (is ?C)
proof
show ?A ?B ?C by auto
qed simp_all
lemma bvars\<^sub>s\<^sub>s\<^sub>t_cons_subset: "bvars\<^sub>s\<^sub>s\<^sub>t A \<subseteq> bvars\<^sub>s\<^sub>s\<^sub>t (a#A)"
by auto
lemma bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst: "bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) = bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a"
by (cases a) auto
lemma bvars\<^sub>s\<^sub>s\<^sub>t_subst: "bvars\<^sub>s\<^sub>s\<^sub>t (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>) = bvars\<^sub>s\<^sub>s\<^sub>t A"
using bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst[of _ \<delta>]
by (induct A) (simp_all add: subst_apply_stateful_strand_def)
lemma bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_set_cases[simp]:
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (send\<langle>t\<rangle>)) = {}"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (receive\<langle>t\<rangle>)) = {}"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<doteq> s\<rangle>)) = {}"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,s\<rangle>)) = {}"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (delete\<langle>t,s\<rangle>)) = {}"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>c: t \<in> s\<rangle>)) = {}"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>)) = set X"
by simp_all
lemma bvars\<^sub>s\<^sub>s\<^sub>t_NegChecks: "bvars\<^sub>s\<^sub>s\<^sub>t A = bvars\<^sub>s\<^sub>s\<^sub>t (filter is_NegChecks A)"
proof (induction A)
case (Cons a A) thus ?case by (cases a) fastforce+
qed simp
lemma vars\<^sub>s\<^sub>s\<^sub>t_append[simp]: "vars\<^sub>s\<^sub>s\<^sub>t (A@B) = vars\<^sub>s\<^sub>s\<^sub>t A \<union> vars\<^sub>s\<^sub>s\<^sub>t B"
by (simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
lemma vars\<^sub>s\<^sub>s\<^sub>t_Nil[simp]: "vars\<^sub>s\<^sub>s\<^sub>t [] = {}"
by (simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
lemma vars\<^sub>s\<^sub>s\<^sub>t_Cons: "vars\<^sub>s\<^sub>s\<^sub>t (a#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
by (simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
lemma fv\<^sub>s\<^sub>s\<^sub>t_Cons: "fv\<^sub>s\<^sub>s\<^sub>t (a#A) = fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<union> fv\<^sub>s\<^sub>s\<^sub>t A"
unfolding fv\<^sub>s\<^sub>s\<^sub>t_def by simp
lemma bvars\<^sub>s\<^sub>s\<^sub>t_Cons: "bvars\<^sub>s\<^sub>s\<^sub>t (a#A) = set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a) \<union> bvars\<^sub>s\<^sub>s\<^sub>t A"
unfolding bvars\<^sub>s\<^sub>s\<^sub>t_def by auto
lemma vars\<^sub>s\<^sub>s\<^sub>t_Cons'[simp]:
"vars\<^sub>s\<^sub>s\<^sub>t (send\<langle>t\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (send\<langle>t\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
"vars\<^sub>s\<^sub>s\<^sub>t (receive\<langle>t\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (receive\<langle>t\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
"vars\<^sub>s\<^sub>s\<^sub>t (\<langle>a: t \<doteq> s\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>a: t \<doteq> s\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
"vars\<^sub>s\<^sub>s\<^sub>t (insert\<langle>t,s\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,s\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
"vars\<^sub>s\<^sub>s\<^sub>t (delete\<langle>t,s\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (delete\<langle>t,s\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
"vars\<^sub>s\<^sub>s\<^sub>t (\<langle>a: t \<in> s\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>a: t \<in> s\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
"vars\<^sub>s\<^sub>s\<^sub>t (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>#A) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>) \<union> vars\<^sub>s\<^sub>s\<^sub>t A"
by (simp_all add: vars\<^sub>s\<^sub>s\<^sub>t_def)
lemma vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p:
fixes x::"('a,'b) stateful_strand_step"
shows "vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x = fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<union> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x)"
proof (cases x)
case (NegChecks X F G) thus ?thesis by (induct F) force+
qed simp_all
lemma vars\<^sub>s\<^sub>s\<^sub>t_is_fv\<^sub>s\<^sub>s\<^sub>t_bvars\<^sub>s\<^sub>s\<^sub>t:
fixes S::"('a,'b) stateful_strand"
shows "vars\<^sub>s\<^sub>s\<^sub>t S = fv\<^sub>s\<^sub>s\<^sub>t S \<union> bvars\<^sub>s\<^sub>s\<^sub>t S"
proof (induction S)
case (Cons x S) thus ?case
using vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p[of x]
by (auto simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
qed simp
lemma vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_NegCheck[simp]:
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>) = set X \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G"
by (simp_all add: sup_commute sup_left_commute vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p)
lemma bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_NegCheck[simp]:
"bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>) = X"
"set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>[]\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>)) = {}"
by simp_all
lemma fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_NegCheck[simp]:
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>) = fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G - set X"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<forall>[]\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle>) = fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>t != s\<rangle>) = fv t \<union> fv s"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>t not in s\<rangle>) = fv t \<union> fv s"
by simp_all
lemma fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_subterm_trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p:
assumes "x \<in> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p a"
shows "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a)"
using assms var_is_subterm
proof (cases a)
case (NegChecks X F F')
hence "x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F' - set X" using assms by simp
thus ?thesis using NegChecks var_is_subterm by fastforce
qed force+
lemma fv\<^sub>s\<^sub>s\<^sub>t_is_subterm_trms\<^sub>s\<^sub>s\<^sub>t: "x \<in> fv\<^sub>s\<^sub>s\<^sub>t A \<Longrightarrow> Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A)"
proof (induction A)
case (Cons a A) thus ?case using fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_subterm_trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p by (cases "x \<in> fv\<^sub>s\<^sub>s\<^sub>t A") auto
qed simp
lemma var_subterm_trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p:
assumes "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a)"
shows "x \<in> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a"
using assms vars_iff_subtermeq
proof (cases a)
case (NegChecks X F F')
hence "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F')" using assms by simp
thus ?thesis using NegChecks vars_iff_subtermeq by force
qed force+
lemma var_subterm_trms\<^sub>s\<^sub>s\<^sub>t_is_vars\<^sub>s\<^sub>s\<^sub>t: "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A) \<Longrightarrow> x \<in> vars\<^sub>s\<^sub>s\<^sub>t A"
proof (induction A)
case (Cons a A)
show ?case
proof (cases "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A)")
case True thus ?thesis using Cons.IH by (simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
next
case False thus ?thesis
using Cons.prems var_subterm_trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_is_vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p
by (fastforce simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
qed
qed simp
lemma var_trms\<^sub>s\<^sub>s\<^sub>t_is_vars\<^sub>s\<^sub>s\<^sub>t: "Var x \<in> trms\<^sub>s\<^sub>s\<^sub>t A \<Longrightarrow> x \<in> vars\<^sub>s\<^sub>s\<^sub>t A"
by (meson var_subterm_trms\<^sub>s\<^sub>s\<^sub>t_is_vars\<^sub>s\<^sub>s\<^sub>t UN_I term.order_refl)
lemma ik\<^sub>s\<^sub>s\<^sub>t_trms\<^sub>s\<^sub>s\<^sub>t_subset: "ik\<^sub>s\<^sub>s\<^sub>t A \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A"
by (force simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
lemma var_subterm_ik\<^sub>s\<^sub>s\<^sub>t_is_vars\<^sub>s\<^sub>s\<^sub>t: "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (ik\<^sub>s\<^sub>s\<^sub>t A) \<Longrightarrow> x \<in> vars\<^sub>s\<^sub>s\<^sub>t A"
using var_subterm_trms\<^sub>s\<^sub>s\<^sub>t_is_vars\<^sub>s\<^sub>s\<^sub>t ik\<^sub>s\<^sub>s\<^sub>t_trms\<^sub>s\<^sub>s\<^sub>t_subset by fast
lemma fv_trms\<^sub>s\<^sub>s\<^sub>t_subset:
"fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t S) \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t S"
"fv\<^sub>s\<^sub>s\<^sub>t S \<subseteq> fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t S)"
proof (induction S)
case (Cons x S)
have *: "fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t (x#S)) = fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p x) \<union> fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t S)"
"fv\<^sub>s\<^sub>s\<^sub>t (x#S) = fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<union> fv\<^sub>s\<^sub>s\<^sub>t S" "vars\<^sub>s\<^sub>s\<^sub>t (x#S) = vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<union> vars\<^sub>s\<^sub>s\<^sub>t S"
unfolding trms\<^sub>s\<^sub>s\<^sub>t_def fv\<^sub>s\<^sub>s\<^sub>t_def vars\<^sub>s\<^sub>s\<^sub>t_def
by auto
{ case 1
show ?case using Cons.IH(1)
proof (cases x)
case (NegChecks X F G)
hence "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p x = trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G"
"vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x = fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G \<union> set X"
by (simp, meson vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_cases(7))
hence "fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p x) \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x"
using fv_trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_is_fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s[of F] fv_trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_is_fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s[of G]
by auto
thus ?thesis
using Cons.IH(1) *(1,3)
by blast
qed auto
}
{ case 2
show ?case using Cons.IH(2)
proof (cases x)
case (NegChecks X F G)
hence "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p x = trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G"
"fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p x = (fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G) - set X"
by auto
hence "fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<subseteq> fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p x)"
using fv_trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_is_fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s[of F] fv_trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_is_fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s[of G]
by auto
thus ?thesis
using Cons.IH(2) *(1,2)
by blast
qed auto
}
qed simp_all
lemma fv_ik_subset_fv_sst'[simp]: "fv\<^sub>s\<^sub>e\<^sub>t (ik\<^sub>s\<^sub>s\<^sub>t S) \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
unfolding ik\<^sub>s\<^sub>s\<^sub>t_def by (induct S) auto
lemma fv_ik_subset_vars_sst'[simp]: "fv\<^sub>s\<^sub>e\<^sub>t (ik\<^sub>s\<^sub>s\<^sub>t S) \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t S"
using fv_ik_subset_fv_sst' fv_trms\<^sub>s\<^sub>s\<^sub>t_subset by fast
lemma ik\<^sub>s\<^sub>s\<^sub>t_var_is_fv: "Var x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (ik\<^sub>s\<^sub>s\<^sub>t A) \<Longrightarrow> x \<in> fv\<^sub>s\<^sub>s\<^sub>t A"
by (meson fv_ik_subset_fv_sst'[of A] fv_subset_subterms subsetCE term.set_intros(3))
lemma vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases':
assumes x: "x \<in> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
shows "x \<in> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p s \<or> x \<in> fv\<^sub>s\<^sub>e\<^sub>t (\<theta> ` vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p s)"
using x vars_term_subst[of _ \<theta>] vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_cases(1,2,3,4,5,6) vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(1,2)[of _ \<theta>]
vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(3,6)[of _ _ _ \<theta>] vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(4,5)[of _ _ \<theta>]
proof (cases s)
case (NegChecks X F G)
let ?\<theta>' = "rm_vars (set X) \<theta>"
have "x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s ?\<theta>') \<or> x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s ?\<theta>') \<or> x \<in> set X"
using vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(7)[of X F G \<theta>] x NegChecks by simp
hence "x \<in> fv\<^sub>s\<^sub>e\<^sub>t (?\<theta>' ` fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F) \<or> x \<in> fv\<^sub>s\<^sub>e\<^sub>t (?\<theta>' ` fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G) \<or> x \<in> set X"
using fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_subst[of _ ?\<theta>'] by blast
hence "x \<in> fv\<^sub>s\<^sub>e\<^sub>t (\<theta> ` fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F) \<or> x \<in> fv\<^sub>s\<^sub>e\<^sub>t (\<theta> ` fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G) \<or> x \<in> set X"
using rm_vars_fv\<^sub>s\<^sub>e\<^sub>t_subst by fast
thus ?thesis
using NegChecks vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_cases(7)[of X F G]
by auto
qed simp_all
lemma vars\<^sub>s\<^sub>s\<^sub>t_subst_cases:
assumes "x \<in> vars\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
shows "x \<in> vars\<^sub>s\<^sub>s\<^sub>t S \<or> x \<in> fv\<^sub>s\<^sub>e\<^sub>t (\<theta> ` vars\<^sub>s\<^sub>s\<^sub>t S)"
using assms
proof (induction S)
case (Cons s S) thus ?case
proof (cases "x \<in> vars\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)")
case False
note * = subst_sst_cons[of s S \<theta>] vars\<^sub>s\<^sub>s\<^sub>t_Cons[of "s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>" "S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>"] vars\<^sub>s\<^sub>s\<^sub>t_Cons[of s S]
have **: "x \<in> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)" using Cons.prems False * by simp
show ?thesis using vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases'[OF **] * by auto
qed (auto simp add: vars\<^sub>s\<^sub>s\<^sub>t_def)
qed simp
lemma subset_subst_pairs_diff_exists:
fixes \<I>::"('a,'b) subst" and D D'::"('a,'b) dbstate"
shows "\<exists>Di. Di \<subseteq> D \<and> Di \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I> = (D \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I>) - D'"
by (metis (no_types, lifting) Diff_subset subset_image_iff)
lemma subset_subst_pairs_diff_exists':
fixes \<I>::"('a,'b) subst" and D::"('a,'b) dbstate"
assumes "finite D"
shows "\<exists>Di. Di \<subseteq> D \<and> Di \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I> \<subseteq> {d \<cdot>\<^sub>p \<I>} \<and> d \<cdot>\<^sub>p \<I> \<notin> (D - Di) \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I>"
using assms
proof (induction D rule: finite_induct)
case (insert d' D)
then obtain Di where IH: "Di \<subseteq> D" "Di \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I> \<subseteq> {d \<cdot>\<^sub>p \<I>}" "d \<cdot>\<^sub>p \<I> \<notin> (D - Di) \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I>" by moura
show ?case
proof (cases "d' \<cdot>\<^sub>p \<I> = d \<cdot>\<^sub>p \<I>")
case True
hence "insert d' Di \<subseteq> insert d' D" "insert d' Di \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I> \<subseteq> {d \<cdot>\<^sub>p \<I>}"
"d \<cdot>\<^sub>p \<I> \<notin> (insert d' D - insert d' Di) \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I>"
using IH by auto
thus ?thesis by metis
next
case False
hence "Di \<subseteq> insert d' D" "Di \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I> \<subseteq> {d \<cdot>\<^sub>p \<I>}"
"d \<cdot>\<^sub>p \<I> \<notin> (insert d' D - Di) \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<I>"
using IH by auto
thus ?thesis by metis
qed
qed simp
lemma stateful_strand_step_subst_inI[intro]:
"send\<langle>t\<rangle> \<in> set A \<Longrightarrow> send\<langle>t \<cdot> \<theta>\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"receive\<langle>t\<rangle> \<in> set A \<Longrightarrow> receive\<langle>t \<cdot> \<theta>\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"\<langle>c: t \<doteq> s\<rangle> \<in> set A \<Longrightarrow> \<langle>c: (t \<cdot> \<theta>) \<doteq> (s \<cdot> \<theta>)\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"insert\<langle>t, s\<rangle> \<in> set A \<Longrightarrow> insert\<langle>t \<cdot> \<theta>, s \<cdot> \<theta>\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"delete\<langle>t, s\<rangle> \<in> set A \<Longrightarrow> delete\<langle>t \<cdot> \<theta>, s \<cdot> \<theta>\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"\<langle>c: t \<in> s\<rangle> \<in> set A \<Longrightarrow> \<langle>c: (t \<cdot> \<theta>) \<in> (s \<cdot> \<theta>)\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle> \<in> set A
\<Longrightarrow> \<forall>X\<langle>\<or>\<noteq>: (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<or>\<notin>: (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>)\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"\<langle>t != s\<rangle> \<in> set A \<Longrightarrow> \<langle>t \<cdot> \<theta> != s \<cdot> \<theta>\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
"\<langle>t not in s\<rangle> \<in> set A \<Longrightarrow> \<langle>t \<cdot> \<theta> not in s \<cdot> \<theta>\<rangle> \<in> set (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
proof (induction A)
case (Cons a A)
note * = subst_sst_cons[of a A \<theta>]
{ case 1 thus ?case using Cons.IH(1) * by (cases a) auto }
{ case 2 thus ?case using Cons.IH(2) * by (cases a) auto }
{ case 3 thus ?case using Cons.IH(3) * by (cases a) auto }
{ case 4 thus ?case using Cons.IH(4) * by (cases a) auto }
{ case 5 thus ?case using Cons.IH(5) * by (cases a) auto }
{ case 6 thus ?case using Cons.IH(6) * by (cases a) auto }
{ case 7 thus ?case using Cons.IH(7) * by (cases a) auto }
{ case 8 thus ?case using Cons.IH(8) * by (cases a) auto }
{ case 9 thus ?case using Cons.IH(9) * by (cases a) auto }
qed simp_all
lemma stateful_strand_step_cases_subst:
"is_Send a = is_Send (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Receive a = is_Receive (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Equality a = is_Equality (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Insert a = is_Insert (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Delete a = is_Delete (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_InSet a = is_InSet (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_NegChecks a = is_NegChecks (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Assignment a = is_Assignment (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Check a = is_Check (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
"is_Update a = is_Update (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
by (cases a; simp_all)+
lemma stateful_strand_step_subst_inv_cases:
"send\<langle>t\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow> \<exists>t'. t = t' \<cdot> \<sigma> \<and> send\<langle>t'\<rangle> \<in> set S"
"receive\<langle>t\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow> \<exists>t'. t = t' \<cdot> \<sigma> \<and> receive\<langle>t'\<rangle> \<in> set S"
"\<langle>c: t \<doteq> s\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow> \<exists>t' s'. t = t' \<cdot> \<sigma> \<and> s = s' \<cdot> \<sigma> \<and> \<langle>c: t' \<doteq> s'\<rangle> \<in> set S"
"insert\<langle>t,s\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow> \<exists>t' s'. t = t' \<cdot> \<sigma> \<and> s = s' \<cdot> \<sigma> \<and> insert\<langle>t',s'\<rangle> \<in> set S"
"delete\<langle>t,s\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow> \<exists>t' s'. t = t' \<cdot> \<sigma> \<and> s = s' \<cdot> \<sigma> \<and> delete\<langle>t',s'\<rangle> \<in> set S"
"\<langle>c: t \<in> s\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow> \<exists>t' s'. t = t' \<cdot> \<sigma> \<and> s = s' \<cdot> \<sigma> \<and> \<langle>c: t' \<in> s'\<rangle> \<in> set S"
"\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle> \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<Longrightarrow>
\<exists>F' G'. F = F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<sigma> \<and> G = G' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<sigma> \<and>
\<forall>X\<langle>\<or>\<noteq>: F' \<or>\<notin>: G'\<rangle> \<in> set S"
proof (induction S)
case (Cons a S)
have *: "x \<in> set (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>)"
when "x \<in> set (a#S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>)" "x \<noteq> a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<sigma>" for x
using that by (simp add: subst_apply_stateful_strand_def)
{ case 1 thus ?case using Cons.IH(1)[OF *] by (cases a) auto }
{ case 2 thus ?case using Cons.IH(2)[OF *] by (cases a) auto }
{ case 3 thus ?case using Cons.IH(3)[OF *] by (cases a) auto }
{ case 4 thus ?case using Cons.IH(4)[OF *] by (cases a) auto }
{ case 5 thus ?case using Cons.IH(5)[OF *] by (cases a) auto }
{ case 6 thus ?case using Cons.IH(6)[OF *] by (cases a) auto }
{ case 7 thus ?case using Cons.IH(7)[OF *] by (cases a) auto }
qed simp_all
lemma stateful_strand_step_fv_subset_cases:
"send\<langle>t\<rangle> \<in> set S \<Longrightarrow> fv t \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
"receive\<langle>t\<rangle> \<in> set S \<Longrightarrow> fv t \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
"\<langle>c: t \<doteq> s\<rangle> \<in> set S \<Longrightarrow> fv t \<union> fv s \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
"insert\<langle>t,s\<rangle> \<in> set S \<Longrightarrow> fv t \<union> fv s \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
"delete\<langle>t,s\<rangle> \<in> set S \<Longrightarrow> fv t \<union> fv s \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
"\<langle>c: t \<in> s\<rangle> \<in> set S \<Longrightarrow> fv t \<union> fv s \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
"\<forall>X\<langle>\<or>\<noteq>: F \<or>\<notin>: G\<rangle> \<in> set S \<Longrightarrow> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G - set X \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t S"
proof (induction S)
case (Cons a S)
{ case 1 thus ?case using Cons.IH(1) by auto }
{ case 2 thus ?case using Cons.IH(2) by auto }
{ case 3 thus ?case using Cons.IH(3) by auto }
{ case 4 thus ?case using Cons.IH(4) by auto }
{ case 5 thus ?case using Cons.IH(5) by auto }
{ case 6 thus ?case using Cons.IH(6) by auto }
{ case 7 thus ?case using Cons.IH(7) by fastforce }
qed simp_all
lemma trms\<^sub>s\<^sub>s\<^sub>t_nil[simp]:
"trms\<^sub>s\<^sub>s\<^sub>t [] = {}"
unfolding trms\<^sub>s\<^sub>s\<^sub>t_def by simp
lemma trms\<^sub>s\<^sub>s\<^sub>t_mono:
"set M \<subseteq> set N \<Longrightarrow> trms\<^sub>s\<^sub>s\<^sub>t M \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t N"
by auto
lemma trms\<^sub>s\<^sub>s\<^sub>t_in:
assumes "t \<in> trms\<^sub>s\<^sub>s\<^sub>t S"
shows "\<exists>a \<in> set S. t \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a"
using assms unfolding trms\<^sub>s\<^sub>s\<^sub>t_def by simp
lemma trms\<^sub>s\<^sub>s\<^sub>t_cons: "trms\<^sub>s\<^sub>s\<^sub>t (a#A) = trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<union> trms\<^sub>s\<^sub>s\<^sub>t A"
unfolding trms\<^sub>s\<^sub>s\<^sub>t_def by force
lemma trms\<^sub>s\<^sub>s\<^sub>t_append[simp]: "trms\<^sub>s\<^sub>s\<^sub>t (A@B) = trms\<^sub>s\<^sub>s\<^sub>t A \<union> trms\<^sub>s\<^sub>s\<^sub>t B"
unfolding trms\<^sub>s\<^sub>s\<^sub>t_def by force
lemma trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst:
assumes "set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a) \<inter> subst_domain \<theta> = {}"
shows "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
proof (cases a)
case (NegChecks X F G)
hence "rm_vars (set X) \<theta> = \<theta>" using assms rm_vars_apply'[of \<theta> "set X"] by auto
hence "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>) \<union> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>)"
"trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> = (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>) \<union> (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>)"
using NegChecks image_Un by simp_all
thus ?thesis by (metis trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_subst)
qed simp_all
lemma trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst':
assumes "\<not>is_NegChecks a"
shows "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
using assms by (cases a) simp_all
lemma trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst'':
fixes t::"('a,'b) term" and \<delta>::"('a,'b) subst"
assumes "t \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (b \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>)"
shows "\<exists>s \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p b. t = s \<cdot> rm_vars (set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p b)) \<delta>"
proof (cases "is_NegChecks b")
case True
then obtain X F G where *: "b = NegChecks X F G" by (cases b) moura+
thus ?thesis using assms trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_subst[of _ "rm_vars (set X) \<delta>"] by auto
next
case False
hence "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (b \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) = trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p b \<cdot>\<^sub>s\<^sub>e\<^sub>t rm_vars (set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p b)) \<delta>"
using trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst' bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_NegChecks
by fastforce
thus ?thesis using assms by fast
qed
lemma trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst''':
fixes t::"('a,'b) term" and \<delta> \<theta>::"('a,'b) subst"
assumes "t \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (b \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
shows "\<exists>s \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p b. t = s \<cdot> rm_vars (set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p b)) \<delta> \<circ>\<^sub>s \<theta>"
proof -
obtain s where s: "s \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (b \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>)" "t = s \<cdot> \<theta>" using assms by moura
show ?thesis using trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst''[OF s(1)] s(2) by auto
qed
lemma trms\<^sub>s\<^sub>s\<^sub>t_subst:
assumes "bvars\<^sub>s\<^sub>s\<^sub>t S \<inter> subst_domain \<theta> = {}"
shows "trms\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>) = trms\<^sub>s\<^sub>s\<^sub>t S \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
using assms
proof (induction S)
case (Cons a S)
hence IH: "trms\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>) = trms\<^sub>s\<^sub>s\<^sub>t S \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>" and *: "set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a) \<inter> subst_domain \<theta> = {}"
by auto
show ?case using trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst[OF *] IH by (auto simp add: subst_apply_stateful_strand_def)
qed simp
lemma trms\<^sub>s\<^sub>s\<^sub>t_subst_cons:
"trms\<^sub>s\<^sub>s\<^sub>t (a#A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>) = trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) \<union> trms\<^sub>s\<^sub>s\<^sub>t (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
using subst_sst_cons[of a A \<delta>] trms\<^sub>s\<^sub>s\<^sub>t_cons[of a A] trms\<^sub>s\<^sub>s\<^sub>t_append by simp
lemma (in intruder_model) wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s_trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst:
assumes "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>)"
shows "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>))"
using assms
proof (cases a)
case (NegChecks X F G)
hence *: "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) =
(trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<delta>)) \<union> (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<delta>))"
by simp
have "trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta> = (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>) \<union> (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>)"
using NegChecks image_Un by simp
hence "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>)" "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>)" using * assms by auto
hence "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<cdot>\<^sub>s\<^sub>e\<^sub>t rm_vars (set X) \<delta>)"
"wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G \<cdot>\<^sub>s\<^sub>e\<^sub>t rm_vars (set X) \<delta>)"
using wf_trms_subst_rm_vars[of \<delta> "trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F" "set X"]
wf_trms_subst_rm_vars[of \<delta> "trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G" "set X"]
by fast+
thus ?thesis
using * trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_subst[of _ "rm_vars (set X) \<delta>"]
by auto
qed auto
lemma trms\<^sub>s\<^sub>s\<^sub>t_fv_vars\<^sub>s\<^sub>s\<^sub>t_subset: "t \<in> trms\<^sub>s\<^sub>s\<^sub>t A \<Longrightarrow> fv t \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t A"
proof (induction A)
case (Cons a A) thus ?case by (cases a) auto
qed simp
lemma trms\<^sub>s\<^sub>s\<^sub>t_fv_subst_subset:
assumes "t \<in> trms\<^sub>s\<^sub>s\<^sub>t S" "subst_domain \<theta> \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}"
shows "fv (t \<cdot> \<theta>) \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
using assms
proof (induction S)
case (Cons s S) show ?case
proof (cases "t \<in> trms\<^sub>s\<^sub>s\<^sub>t S")
case True
hence "fv (t \<cdot> \<theta>) \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)" using Cons.IH Cons.prems by auto
thus ?thesis using subst_sst_cons[of s S \<theta>] unfolding vars\<^sub>s\<^sub>s\<^sub>t_def by auto
next
case False
hence *: "t \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p s" "subst_domain \<theta> \<inter> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p s) = {}" using Cons.prems by auto
hence "fv (t \<cdot> \<theta>) \<subseteq> vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
proof (cases s)
case (NegChecks X F G)
hence **: "t \<in> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<or> t \<in> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G" using *(1) by auto
have ***: "rm_vars (set X) \<theta> = \<theta>" using *(2) NegChecks rm_vars_apply' by auto
have "fv (t \<cdot> \<theta>) \<subseteq> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>)"
using ** *** trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_fv_subst_subset[of t _ \<theta>] by auto
thus ?thesis using *(2) using NegChecks vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(7)[of X F G \<theta>] by blast
qed auto
thus ?thesis using subst_sst_cons[of s S \<theta>] unfolding vars\<^sub>s\<^sub>s\<^sub>t_def by auto
qed
qed simp
lemma trms\<^sub>s\<^sub>s\<^sub>t_fv_subst_subset':
assumes "t \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t S)" "fv t \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}" "fv (t \<cdot> \<theta>) \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}"
shows "fv (t \<cdot> \<theta>) \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
using assms
proof (induction S)
case (Cons s S) show ?case
proof (cases "t \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t S)")
case True
hence "fv (t \<cdot> \<theta>) \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)" using Cons.IH Cons.prems by auto
thus ?thesis using subst_sst_cons[of s S \<theta>] unfolding vars\<^sub>s\<^sub>s\<^sub>t_def by auto
next
case False
hence 0: "t \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p s)" "fv t \<inter> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p s) = {}"
"fv (t \<cdot> \<theta>) \<inter> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p s) = {}"
using Cons.prems by auto
note 1 = UN_Un UN_insert fv\<^sub>s\<^sub>e\<^sub>t.simps subst_apply_fv_subset subst_apply_fv_unfold
subst_apply_term_empty sup_bot.comm_neutral fv_subterms_set fv_subset[OF 0(1)]
note 2 = subst_apply_fv_union
have "fv (t \<cdot> \<theta>) \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
proof (cases s)
case (NegChecks X F G)
hence 3: "t \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F) \<or> t \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G)" using 0(1) by auto
have "t \<cdot> rm_vars (set X) \<theta> = t \<cdot> \<theta>" using 0(2) NegChecks rm_vars_ident[of t] by auto
hence "fv (t \<cdot> \<theta>) \<subseteq> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>)"
using 3 trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_fv_subst_subset'[of t _ "rm_vars (set X) \<theta>"] by fastforce
thus ?thesis using 0(2,3) NegChecks fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(7)[of X F G \<theta>] by auto
qed (metis (no_types, lifting) 1 trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps(1) fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(1),
metis (no_types, lifting) 1 trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps(2) fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(2),
metis (no_types, lifting) 1 2 trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps(3) fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(3),
metis (no_types, lifting) 1 2 trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps(4) fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(4),
metis (no_types, lifting) 1 2 trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps(5) fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(5),
metis (no_types, lifting) 1 2 trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps(6) fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases(6))
thus ?thesis using subst_sst_cons[of s S \<theta>] unfolding fv\<^sub>s\<^sub>s\<^sub>t_def by auto
qed
qed simp
lemma trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_funs_term_cases:
assumes "t \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)" "f \<in> funs_term t"
shows "(\<exists>u \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p s. f \<in> funs_term u) \<or> (\<exists>x \<in> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p s. f \<in> funs_term (\<theta> x))"
using assms
proof (cases s)
case (NegChecks X F G)
hence "t \<in> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>) \<or> t \<in> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>)"
using assms(1) by auto
hence "(\<exists>u\<in>trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F. f \<in> funs_term u) \<or> (\<exists>x\<in>fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F. f \<in> funs_term (rm_vars (set X) \<theta> x)) \<or>
(\<exists>u\<in>trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G. f \<in> funs_term u) \<or> (\<exists>x\<in>fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G. f \<in> funs_term (rm_vars (set X) \<theta> x))"
using trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_funs_term_cases[OF _ assms(2), of _ "rm_vars (set X) \<theta>"] by meson
hence "(\<exists>u \<in> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G. f \<in> funs_term u) \<or>
(\<exists>x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G. f \<in> funs_term (rm_vars (set X) \<theta> x))"
by blast
thus ?thesis
proof
assume "\<exists>x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G. f \<in> funs_term (rm_vars (set X) \<theta> x)"
then obtain x where x: "x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G" "f \<in> funs_term (rm_vars (set X) \<theta> x)"
by auto
hence "x \<notin> set X" "rm_vars (set X) \<theta> x = \<theta> x" by auto
thus ?thesis using x by (auto simp add: assms NegChecks)
qed (auto simp add: assms NegChecks)
qed (use assms funs_term_subst[of _ \<theta>] in auto)
lemma trms\<^sub>s\<^sub>s\<^sub>t_funs_term_cases:
assumes "t \<in> trms\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)" "f \<in> funs_term t"
shows "(\<exists>u \<in> trms\<^sub>s\<^sub>s\<^sub>t S. f \<in> funs_term u) \<or> (\<exists>x \<in> fv\<^sub>s\<^sub>s\<^sub>t S. f \<in> funs_term (\<theta> x))"
using assms(1)
proof (induction S)
case (Cons s S) thus ?case
proof (cases "t \<in> trms\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)")
case False
hence "t \<in> trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)" using Cons.prems(1) subst_sst_cons[of s S \<theta>] trms\<^sub>s\<^sub>s\<^sub>t_cons by auto
thus ?thesis using trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_funs_term_cases[OF _ assms(2)] by fastforce
qed auto
qed simp
lemma fv\<^sub>s\<^sub>s\<^sub>t_is_subterm_trms\<^sub>s\<^sub>s\<^sub>t_subst:
assumes "x \<in> fv\<^sub>s\<^sub>s\<^sub>t T"
and "bvars\<^sub>s\<^sub>s\<^sub>t T \<inter> subst_domain \<theta> = {}"
shows "\<theta> x \<in> subterms\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t (T \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>))"
using trms\<^sub>s\<^sub>s\<^sub>t_subst[OF assms(2)] subterms_subst_subset'[of \<theta> "trms\<^sub>s\<^sub>s\<^sub>t T"]
fv\<^sub>s\<^sub>s\<^sub>t_is_subterm_trms\<^sub>s\<^sub>s\<^sub>t[OF assms(1)]
by (metis (no_types, lifting) image_iff subset_iff subst_apply_term.simps(1))
lemma fv\<^sub>s\<^sub>s\<^sub>t_subst_fv_subset:
assumes "x \<in> fv\<^sub>s\<^sub>s\<^sub>t S" "x \<notin> bvars\<^sub>s\<^sub>s\<^sub>t S" "fv (\<theta> x) \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}"
shows "fv (\<theta> x) \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>)"
using assms
proof (induction S)
case (Cons a S)
note 1 = fv_subst_subset[of _ _ \<theta>]
note 2 = subst_apply_fv_union subst_apply_fv_unfold[of _ \<theta>] fv_subset image_eqI
note 3 = fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst_cases
note 4 = fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p.simps
from Cons show ?case
proof (cases "x \<in> fv\<^sub>s\<^sub>s\<^sub>t S")
case False
hence 5: "x \<in> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p a" " fv (\<theta> x) \<inter> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a) = {}" "x \<notin> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a)"
using Cons.prems by auto
hence "fv (\<theta> x) \<subseteq> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>)"
proof (cases a)
case (NegChecks X F G)
let ?\<delta> = "rm_vars (set X) \<theta>"
have *: "x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G" using NegChecks 5(1) by auto
have **: "fv (\<theta> x) \<inter> set X = {}" using NegChecks 5(2) by simp
have ***: "\<theta> x = ?\<delta> x" using NegChecks 5(3) by auto
have "fv (\<theta> x) \<subseteq> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s ?\<delta>) \<union> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s ?\<delta>)"
using fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_subst_fv_subset[of x _ ?\<delta>] * *** by auto
thus ?thesis using NegChecks ** by auto
qed (metis (full_types) 1 5(1) 3(1) 4(1), metis (full_types) 1 5(1) 3(2) 4(2),
metis (full_types) 2 5(1) 3(3) 4(3), metis (full_types) 2 5(1) 3(4) 4(4),
metis (full_types) 2 5(1) 3(5) 4(5), metis (full_types) 2 5(1) 3(6) 4(6))
thus ?thesis by (auto simp add: subst_sst_cons[of a S \<theta>])
qed (auto simp add: subst_sst_cons[of a S \<theta>])
qed simp
lemma (in intruder_model) wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s_trms\<^sub>s\<^sub>s\<^sub>t_subst:
assumes "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t A \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>)"
shows "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>))"
using assms
proof (induction A)
case (Cons a A)
hence IH: "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t (A \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>))" and *: "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>s\<^sub>e\<^sub>t \<delta>)" by auto
have "wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>))" by (rule wf\<^sub>t\<^sub>r\<^sub>m\<^sub>s_trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst[OF *])
thus ?case using IH trms\<^sub>s\<^sub>s\<^sub>t_subst_cons[of a A \<delta>] by blast
qed simp
lemma fv\<^sub>s\<^sub>s\<^sub>t_subst_obtain_var:
assumes "x \<in> fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
shows "\<exists>y \<in> fv\<^sub>s\<^sub>s\<^sub>t S. x \<in> fv (\<delta> y)"
using assms
proof (induction S)
case (Cons s S)
hence "x \<in> fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>) \<Longrightarrow> \<exists>y \<in> fv\<^sub>s\<^sub>s\<^sub>t S. x \<in> fv (\<delta> y)"
using bvars\<^sub>s\<^sub>s\<^sub>t_cons_subset[of S s]
by blast
thus ?case
proof (cases "x \<in> fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)")
case False
hence *: "x \<in> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>)"
using Cons.prems(1) subst_sst_cons[of s S \<delta>]
by fastforce
have "\<exists>y \<in> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p s. x \<in> fv (\<delta> y)"
proof (cases s)
case (NegChecks X F G)
hence "x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<delta>) \<or> x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<delta>)"
and **: "x \<notin> set X"
using * by simp_all
then obtain y where y: "y \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<or> y \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G" "x \<in> fv ((rm_vars (set X) \<delta>) y)"
using fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_subst_obtain_var[of _ _ "rm_vars (set X) \<delta>"]
by blast
have "y \<notin> set X"
proof
assume y_in: "y \<in> set X"
hence "(rm_vars (set X) \<delta>) y = Var y" by auto
hence "x = y" using y(2) by simp
thus False using ** y_in by metis
qed
thus ?thesis using NegChecks y by auto
qed (use * fv_subst_obtain_var in force)+
thus ?thesis by auto
qed auto
qed simp
lemma fv\<^sub>s\<^sub>s\<^sub>t_subst_subset_range_vars_if_subset_domain:
assumes "fv\<^sub>s\<^sub>s\<^sub>t S \<subseteq> subst_domain \<sigma>"
shows "fv\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<sigma>) \<subseteq> range_vars \<sigma>"
using assms fv\<^sub>s\<^sub>s\<^sub>t_subst_obtain_var[of _ S \<sigma>] subst_dom_vars_in_subst[of _ \<sigma>] subst_fv_imgI[of \<sigma>]
by (metis (no_types) in_mono subsetI)
lemma fv\<^sub>s\<^sub>s\<^sub>t_in_fv_trms\<^sub>s\<^sub>s\<^sub>t: "x \<in> fv\<^sub>s\<^sub>s\<^sub>t S \<Longrightarrow> x \<in> fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t S)"
proof (induction S)
case (Cons s S) thus ?case
proof (cases "x \<in> fv\<^sub>s\<^sub>s\<^sub>t S")
case False
hence *: "x \<in> fv\<^sub>s\<^sub>s\<^sub>t\<^sub>p s" using Cons.prems by simp
hence "x \<in> fv\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p s)"
proof (cases s)
case (NegChecks X F G)
hence "x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s F \<or> x \<in> fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s G" using * by simp_all
thus ?thesis using * fv\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s_in_fv_trms\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s[of x] NegChecks by auto
qed auto
thus ?thesis by simp
qed simp
qed simp
lemma stateful_strand_step_subst_comp:
assumes "range_vars \<delta> \<inter> set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p x) = {}"
shows "x \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta> \<circ>\<^sub>s \<theta> = (x \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>"
proof (cases x)
case (NegChecks X F G)
hence *: "range_vars \<delta> \<inter> set X = {}" using assms by simp
have "H \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) (\<delta> \<circ>\<^sub>s \<theta>) = (H \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<delta>) \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s rm_vars (set X) \<theta>" for H
using pairs_subst_comp rm_vars_comp[OF *] by (induct H) (auto simp add: subst_apply_pairs_def)
thus ?thesis using NegChecks by simp
qed simp_all
lemma stateful_strand_subst_comp:
assumes "range_vars \<delta> \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}"
shows "S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta> \<circ>\<^sub>s \<theta> = (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>) \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>"
using assms
proof (induction S)
case (Cons s S)
hence IH: "S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta> \<circ>\<^sub>s \<theta> = (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>) \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>" using Cons by auto
have "s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta> \<circ>\<^sub>s \<theta> = (s \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>) \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>"
using Cons.prems stateful_strand_step_subst_comp[of \<delta> s \<theta>]
unfolding range_vars_alt_def by auto
thus ?case using IH by (simp add: subst_apply_stateful_strand_def)
qed simp
lemma subst_apply_bvars_disj_NegChecks:
assumes "set X \<inter> subst_domain \<theta> = {}"
shows "NegChecks X F G \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = NegChecks X (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>) (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>)"
proof -
have "rm_vars (set X) \<theta> = \<theta>" using assms rm_vars_apply'[of \<theta> "set X"] by auto
thus ?thesis by simp
qed
lemma subst_apply_NegChecks_no_bvars[simp]:
"\<forall>[]\<langle>\<or>\<noteq>: F \<or>\<notin>: F'\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<forall>[]\<langle>\<or>\<noteq>: (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>) \<or>\<notin>: (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>)\<rangle>"
"\<forall>[]\<langle>\<or>\<noteq>: [] \<or>\<notin>: F'\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<forall>[]\<langle>\<or>\<noteq>: [] \<or>\<notin>: (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>)\<rangle>"
"\<forall>[]\<langle>\<or>\<noteq>: F \<or>\<notin>: []\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<forall>[]\<langle>\<or>\<noteq>: (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>) \<or>\<notin>: []\<rangle>"
"\<forall>[]\<langle>\<or>\<noteq>: [] \<or>\<notin>: [(t,s)]\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<forall>[]\<langle>\<or>\<noteq>: [] \<or>\<notin>: ([(t \<cdot> \<theta>,s \<cdot> \<theta>)])\<rangle>" (is ?A)
"\<forall>[]\<langle>\<or>\<noteq>: [(t,s)] \<or>\<notin>: []\<rangle> \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta> = \<forall>[]\<langle>\<or>\<noteq>: ([(t \<cdot> \<theta>,s \<cdot> \<theta>)]) \<or>\<notin>: []\<rangle>" (is ?B)
by simp_all
lemma setops\<^sub>s\<^sub>s\<^sub>t_mono:
"set M \<subseteq> set N \<Longrightarrow> setops\<^sub>s\<^sub>s\<^sub>t M \<subseteq> setops\<^sub>s\<^sub>s\<^sub>t N"
by (auto simp add: setops\<^sub>s\<^sub>s\<^sub>t_def)
lemma setops\<^sub>s\<^sub>s\<^sub>t_nil[simp]: "setops\<^sub>s\<^sub>s\<^sub>t [] = {}"
by (simp add: setops\<^sub>s\<^sub>s\<^sub>t_def)
lemma setops\<^sub>s\<^sub>s\<^sub>t_cons[simp]: "setops\<^sub>s\<^sub>s\<^sub>t (a#A) = setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<union> setops\<^sub>s\<^sub>s\<^sub>t A"
by (simp add: setops\<^sub>s\<^sub>s\<^sub>t_def)
lemma setops\<^sub>s\<^sub>s\<^sub>t_cons_subset[simp]: "setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> setops\<^sub>s\<^sub>s\<^sub>t (a#A)"
using setops\<^sub>s\<^sub>s\<^sub>t_cons[of a A] by blast
lemma setops\<^sub>s\<^sub>s\<^sub>t_append: "setops\<^sub>s\<^sub>s\<^sub>t (A@B) = setops\<^sub>s\<^sub>s\<^sub>t A \<union> setops\<^sub>s\<^sub>s\<^sub>t B"
proof (induction A)
case (Cons a A) thus ?case by (cases a) (auto simp add: setops\<^sub>s\<^sub>s\<^sub>t_def)
qed (simp add: setops\<^sub>s\<^sub>s\<^sub>t_def)
lemma setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p_member_iff:
"(t,s) \<in> setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p x \<longleftrightarrow>
(x = Insert t s \<or> x = Delete t s \<or> (\<exists>ac. x = InSet ac t s) \<or>
(\<exists>X F F'. x = NegChecks X F F' \<and> (t,s) \<in> set F'))"
by (cases x) auto
lemma setops\<^sub>s\<^sub>s\<^sub>t_member_iff:
"(t,s) \<in> setops\<^sub>s\<^sub>s\<^sub>t A \<longleftrightarrow>
(Insert t s \<in> set A \<or> Delete t s \<in> set A \<or> (\<exists>ac. InSet ac t s \<in> set A) \<or>
(\<exists>X F F'. NegChecks X F F' \<in> set A \<and> (t,s) \<in> set F'))"
(is "?P \<longleftrightarrow> ?Q")
proof (induction A)
case (Cons a A) thus ?case
proof (cases "(t, s) \<in> setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p a")
case True thus ?thesis using setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p_member_iff[of t s a] by auto
qed auto
qed simp
lemma setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst:
assumes "set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a) \<inter> subst_domain \<theta> = {}"
shows "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<theta>"
proof (cases a)
case (NegChecks X F G)
hence "rm_vars (set X) \<theta> = \<theta>" using assms rm_vars_apply'[of \<theta> "set X"] by auto
hence "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = set (G \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<theta>)"
"setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<theta> = set G \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<theta>"
using NegChecks image_Un by simp_all
thus ?thesis by (simp add: subst_apply_pairs_def)
qed simp_all
lemma setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst':
assumes "\<not>is_NegChecks a"
shows "setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<theta>) = setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p a \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<theta>"
using assms by (cases a) auto
lemma setops\<^sub>s\<^sub>s\<^sub>t_subst:
assumes "bvars\<^sub>s\<^sub>s\<^sub>t S \<inter> subst_domain \<theta> = {}"
shows "setops\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>) = setops\<^sub>s\<^sub>s\<^sub>t S \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<theta>"
using assms
proof (induction S)
case (Cons a S)
have "bvars\<^sub>s\<^sub>s\<^sub>t S \<inter> subst_domain \<theta> = {}" and *: "set (bvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p a) \<inter> subst_domain \<theta> = {}"
using Cons.prems by auto
hence IH: "setops\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta>) = setops\<^sub>s\<^sub>s\<^sub>t S \<cdot>\<^sub>p\<^sub>s\<^sub>e\<^sub>t \<theta>"
using Cons.IH by auto
show ?case
using setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst[OF *] IH unfolding setops\<^sub>s\<^sub>s\<^sub>t_def
by (auto simp add: subst_apply_stateful_strand_def)
qed (simp add: setops\<^sub>s\<^sub>s\<^sub>t_def)
lemma setops\<^sub>s\<^sub>s\<^sub>t_subst':
fixes p::"('a,'b) term \<times> ('a,'b) term" and \<delta>::"('a,'b) subst"
assumes "p \<in> setops\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
shows "\<exists>s \<in> setops\<^sub>s\<^sub>s\<^sub>t S. \<exists>X. set X \<subseteq> bvars\<^sub>s\<^sub>s\<^sub>t S \<and> p = s \<cdot>\<^sub>p rm_vars (set X) \<delta>"
using assms
proof (induction S)
case (Cons a S)
note 0 = setops\<^sub>s\<^sub>s\<^sub>t_cons[of a S] bvars\<^sub>s\<^sub>s\<^sub>t_Cons[of a S]
note 1 = setops\<^sub>s\<^sub>s\<^sub>t_cons[of "a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>" "S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>"] subst_sst_cons[of a S \<delta>]
have "p \<in> setops\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>) \<or> p \<in> setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>)" using Cons.prems 1 by auto
thus ?case
proof
assume *: "p \<in> setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p (a \<cdot>\<^sub>s\<^sub>s\<^sub>t\<^sub>p \<delta>)"
show ?thesis using setops\<^sub>s\<^sub>s\<^sub>t\<^sub>p_subst''[OF *] 0 by blast
next
assume *: "p \<in> setops\<^sub>s\<^sub>s\<^sub>t (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
show ?thesis using Cons.IH[OF *] 0 by blast
qed
qed simp
subsection \<open>Stateful Constraint Semantics\<close>
context intruder_model
begin
definition negchecks_model where
"negchecks_model (\<I>::('a,'b) subst) (D::('a,'b) dbstate) X F G \<equiv>
(\<forall>\<delta>. subst_domain \<delta> = set X \<and> ground (subst_range \<delta>) \<longrightarrow>
(list_ex (\<lambda>f. fst f \<cdot> (\<delta> \<circ>\<^sub>s \<I>) \<noteq> snd f \<cdot> (\<delta> \<circ>\<^sub>s \<I>)) F \<or>
list_ex (\<lambda>f. f \<cdot>\<^sub>p (\<delta> \<circ>\<^sub>s \<I>) \<notin> D) G))"
fun strand_sem_stateful::
"('fun,'var) terms \<Rightarrow> ('fun,'var) dbstate \<Rightarrow> ('fun,'var) stateful_strand \<Rightarrow> ('fun,'var) subst \<Rightarrow> bool"
("\<lbrakk>_; _; _\<rbrakk>\<^sub>s")
where
"\<lbrakk>M; D; []\<rbrakk>\<^sub>s = (\<lambda>\<I>. True)"
| "\<lbrakk>M; D; Send t#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. M \<turnstile> t \<cdot> \<I> \<and> \<lbrakk>M; D; S\<rbrakk>\<^sub>s \<I>)"
| "\<lbrakk>M; D; Receive t#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. \<lbrakk>insert (t \<cdot> \<I>) M; D; S\<rbrakk>\<^sub>s \<I>)"
| "\<lbrakk>M; D; Equality _ t t'#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. t \<cdot> \<I> = t' \<cdot> \<I> \<and> \<lbrakk>M; D; S\<rbrakk>\<^sub>s \<I>)"
| "\<lbrakk>M; D; Insert t s#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. \<lbrakk>M; insert ((t,s) \<cdot>\<^sub>p \<I>) D; S\<rbrakk>\<^sub>s \<I>)"
| "\<lbrakk>M; D; Delete t s#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. \<lbrakk>M; D - {(t,s) \<cdot>\<^sub>p \<I>}; S\<rbrakk>\<^sub>s \<I>)"
| "\<lbrakk>M; D; InSet _ t s#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. (t,s) \<cdot>\<^sub>p \<I> \<in> D \<and> \<lbrakk>M; D; S\<rbrakk>\<^sub>s \<I>)"
| "\<lbrakk>M; D; NegChecks X F F'#S\<rbrakk>\<^sub>s = (\<lambda>\<I>. negchecks_model \<I> D X F F' \<and> \<lbrakk>M; D; S\<rbrakk>\<^sub>s \<I>)"
lemmas strand_sem_stateful_induct =
strand_sem_stateful.induct[case_names Nil ConsSnd ConsRcv ConsEq
ConsIns ConsDel ConsIn ConsNegChecks]
abbreviation constr_sem_stateful (infix "\<Turnstile>\<^sub>s" 91) where "\<I> \<Turnstile>\<^sub>s A \<equiv> \<lbrakk>{}; {}; A\<rbrakk>\<^sub>s \<I>"
lemma stateful_strand_sem_NegChecks_no_bvars:
"\<lbrakk>M; D; [\<langle>t not in s\<rangle>]\<rbrakk>\<^sub>s \<I> \<Longrightarrow> (t \<cdot> \<I>, s \<cdot> \<I>) \<notin> D"
"\<lbrakk>M; D; [\<langle>t != s\<rangle>]\<rbrakk>\<^sub>s \<I> \<Longrightarrow> t \<cdot> \<I> \<noteq> s \<cdot> \<I>"
by (simp_all add: negchecks_model_def empty_dom_iff_empty_subst)
lemma strand_sem_ik_mono_stateful:
"\<lbrakk>M; D; A\<rbrakk>\<^sub>s \<I> \<Longrightarrow> \<lbrakk>M \<union> M'; D; A\<rbrakk>\<^sub>s \<I>"
using ideduct_mono by (induct A arbitrary: M M' D rule: strand_sem_stateful.induct) force+
lemma strand_sem_append_stateful:
"\<lbrakk>M; D; A@B\<rbrakk>\<^sub>s \<I> \<longleftrightarrow> \<lbrakk>M; D; A\<rbrakk>\<^sub>s \<I> \<and> \<lbrakk>M \<union> (ik\<^sub>s\<^sub>s\<^sub>t A \<cdot>\<^sub>s\<^sub>e\<^sub>t \<I>); dbupd\<^sub>s\<^sub>s\<^sub>t A \<I> D; B\<rbrakk>\<^sub>s \<I>"
(is "?P \<longleftrightarrow> ?Q \<and> ?R")
proof -
have 1: "?P \<Longrightarrow> ?Q" by (induct A rule: strand_sem_stateful.induct) auto
have 2: "?P \<Longrightarrow> ?R"
proof (induction A arbitrary: M D B)
case (Cons a A) thus ?case
proof (cases a)
case (Receive t)
have "insert (t \<cdot> \<I>) (M \<union> (ik\<^sub>s\<^sub>s\<^sub>t A \<cdot>\<^sub>s\<^sub>e\<^sub>t \<I>)) = M \<union> (ik\<^sub>s\<^sub>s\<^sub>t (a#A) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<I>)"
"dbupd\<^sub>s\<^sub>s\<^sub>t A \<I> D = dbupd\<^sub>s\<^sub>s\<^sub>t (a#A) \<I> D"
using Receive by (auto simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
thus ?thesis using Cons Receive by force
qed (auto simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
qed (simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
have 3: "?Q \<Longrightarrow> ?R \<Longrightarrow> ?P"
proof (induction A arbitrary: M D)
case (Cons a A) thus ?case
proof (cases a)
case (Receive t)
have "insert (t \<cdot> \<I>) (M \<union> (ik\<^sub>s\<^sub>s\<^sub>t A \<cdot>\<^sub>s\<^sub>e\<^sub>t \<I>)) = M \<union> (ik\<^sub>s\<^sub>s\<^sub>t (a#A) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<I>)"
"dbupd\<^sub>s\<^sub>s\<^sub>t A \<I> D = dbupd\<^sub>s\<^sub>s\<^sub>t (a#A) \<I> D"
using Receive by (auto simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
thus ?thesis using Cons Receive by simp
qed (auto simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
qed (simp add: ik\<^sub>s\<^sub>s\<^sub>t_def)
show ?thesis by (metis 1 2 3)
qed
lemma negchecks_model_db_subset:
fixes F F'::"(('a,'b) term \<times> ('a,'b) term) list"
assumes "D' \<subseteq> D"
and "negchecks_model \<I> D X F F'"
shows "negchecks_model \<I> D' X F F'"
proof -
have "list_ex (\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D') F'"
when "list_ex (\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D) F'"
for \<delta>::"('a,'b) subst"
using Bex_set[of F' "\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D'"]
Bex_set[of F' "\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D"]
that assms(1)
by blast
thus ?thesis using assms(2) by (auto simp add: negchecks_model_def)
qed
lemma negchecks_model_db_supset:
fixes F F'::"(('a,'b) term \<times> ('a,'b) term) list"
assumes "D' \<subseteq> D"
and "\<forall>f \<in> set F'. \<forall>\<delta>. subst_domain \<delta> = set X \<and> ground (subst_range \<delta>) \<longrightarrow> f \<cdot>\<^sub>p (\<delta> \<circ>\<^sub>s \<I>) \<notin> D - D'"
and "negchecks_model \<I> D' X F F'"
shows "negchecks_model \<I> D X F F'"
proof -
have "list_ex (\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D) F'"
when "list_ex (\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D') F'" "subst_domain \<delta> = set X \<and> ground (subst_range \<delta>)"
for \<delta>::"('a,'b) subst"
using Bex_set[of F' "\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D'"]
Bex_set[of F' "\<lambda>f. f \<cdot>\<^sub>p \<delta> \<circ>\<^sub>s \<I> \<notin> D"]
that assms(1,2)
by blast
thus ?thesis using assms(3) by (auto simp add: negchecks_model_def)
qed
lemma negchecks_model_subst:
fixes F F'::"(('a,'b) term \<times> ('a,'b) term) list"
assumes "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> set X = {}"
shows "negchecks_model (\<delta> \<circ>\<^sub>s \<theta>) D X F F' \<longleftrightarrow> negchecks_model \<theta> D X (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>) (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
proof -
have 0: "\<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) = \<delta> \<circ>\<^sub>s (\<sigma> \<circ>\<^sub>s \<theta>)"
when \<sigma>: "subst_domain \<sigma> = set X" "ground (subst_range \<sigma>)" for \<sigma>
by (metis (no_types, lifting) \<sigma> subst_compose_assoc assms(1) inf_sup_aci(1)
subst_comp_eq_if_disjoint_vars sup_inf_absorb range_vars_alt_def)
{ fix \<sigma>::"('a,'b) subst" and t t'
assume \<sigma>: "subst_domain \<sigma> = set X" "ground (subst_range \<sigma>)"
and *: "list_ex (\<lambda>f. fst f \<cdot> (\<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)) \<noteq> snd f \<cdot> (\<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>))) F"
obtain f where f: "f \<in> set F" "fst f \<cdot> \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<noteq> snd f \<cdot> \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)"
using * by (induct F) auto
hence "(fst f \<cdot> \<delta>) \<cdot> \<sigma> \<circ>\<^sub>s \<theta> \<noteq> (snd f \<cdot> \<delta>) \<cdot> \<sigma> \<circ>\<^sub>s \<theta>" using 0[OF \<sigma>] by simp
moreover have "(fst f \<cdot> \<delta>, snd f \<cdot> \<delta>) \<in> set (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
using f(1) by (auto simp add: subst_apply_pairs_def)
ultimately have "list_ex (\<lambda>f. fst f \<cdot> (\<sigma> \<circ>\<^sub>s \<theta>) \<noteq> snd f \<cdot> (\<sigma> \<circ>\<^sub>s \<theta>)) (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
using f(1) Bex_set by fastforce
} moreover {
fix \<sigma>::"('a,'b) subst" and t t'
assume \<sigma>: "subst_domain \<sigma> = set X" "ground (subst_range \<sigma>)"
and *: "list_ex (\<lambda>f. f \<cdot>\<^sub>p \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<notin> D) F'"
obtain f where f: "f \<in> set F'" "f \<cdot>\<^sub>p \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<notin> D"
using * by (induct F') auto
hence "f \<cdot>\<^sub>p \<delta> \<cdot>\<^sub>p \<sigma> \<circ>\<^sub>s \<theta> \<notin> D" using 0[OF \<sigma>] by (metis subst_pair_compose)
moreover have "f \<cdot>\<^sub>p \<delta> \<in> set (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
using f(1) by (auto simp add: subst_apply_pairs_def)
ultimately have "list_ex (\<lambda>f. f \<cdot>\<^sub>p \<sigma> \<circ>\<^sub>s \<theta> \<notin> D) (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
using f(1) Bex_set by fastforce
} moreover {
fix \<sigma>::"('a,'b) subst" and t t'
assume \<sigma>: "subst_domain \<sigma> = set X" "ground (subst_range \<sigma>)"
and *: "list_ex (\<lambda>f. fst f \<cdot> (\<sigma> \<circ>\<^sub>s \<theta>) \<noteq> snd f \<cdot> (\<sigma> \<circ>\<^sub>s \<theta>)) (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
obtain f where f: "f \<in> set (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)" "fst f \<cdot> \<sigma> \<circ>\<^sub>s \<theta> \<noteq> snd f \<cdot> \<sigma> \<circ>\<^sub>s \<theta>"
using * by (induct F) (auto simp add: subst_apply_pairs_def)
then obtain g where g: "g \<in> set F" "f = g \<cdot>\<^sub>p \<delta>" by (auto simp add: subst_apply_pairs_def)
have "fst g \<cdot> \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<noteq> snd g \<cdot> \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)"
using f(2) g 0[OF \<sigma>] by (simp add: prod.case_eq_if)
hence "list_ex (\<lambda>f. fst f \<cdot> (\<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)) \<noteq> snd f \<cdot> (\<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>))) F"
using g Bex_set by fastforce
} moreover {
fix \<sigma>::"('a,'b) subst" and t t'
assume \<sigma>: "subst_domain \<sigma> = set X" "ground (subst_range \<sigma>)"
and *: "list_ex (\<lambda>f. f \<cdot>\<^sub>p (\<sigma> \<circ>\<^sub>s \<theta>) \<notin> D) (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
obtain f where f: "f \<in> set (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)" "f \<cdot>\<^sub>p \<sigma> \<circ>\<^sub>s \<theta> \<notin> D"
using * by (induct F') (auto simp add: subst_apply_pairs_def)
then obtain g where g: "g \<in> set F'" "f = g \<cdot>\<^sub>p \<delta>" by (auto simp add: subst_apply_pairs_def)
have "g \<cdot>\<^sub>p \<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<notin> D"
using f(2) g 0[OF \<sigma>] by (simp add: prod.case_eq_if)
hence "list_ex (\<lambda>f. f \<cdot>\<^sub>p (\<sigma> \<circ>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)) \<notin> D) F'"
using g Bex_set by fastforce
} ultimately show ?thesis using assms unfolding negchecks_model_def by blast
qed
lemma strand_sem_subst_stateful:
fixes \<delta>::"('fun,'var) subst"
assumes "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> bvars\<^sub>s\<^sub>s\<^sub>t S = {}"
shows "\<lbrakk>M; D; S\<rbrakk>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<longleftrightarrow> \<lbrakk>M; D; S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>\<rbrakk>\<^sub>s \<theta>"
proof
note [simp] = subst_sst_cons[of _ _ \<delta>] subst_subst_compose[of _ \<delta> \<theta>]
have "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> (subst_domain \<gamma> \<union> range_vars \<gamma>) = {}"
when \<delta>: "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> set X = {}"
and \<gamma>: "subst_domain \<gamma> = set X" "ground (subst_range \<gamma>)"
for X and \<gamma>::"('fun,'var) subst"
using \<delta> \<gamma> unfolding range_vars_alt_def by auto
hence 0: "\<gamma> \<circ>\<^sub>s \<delta> = \<delta> \<circ>\<^sub>s \<gamma>"
when \<delta>: "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> set X = {}"
and \<gamma>: "subst_domain \<gamma> = set X" "ground (subst_range \<gamma>)"
for \<gamma> X
by (metis \<delta> \<gamma> subst_comp_eq_if_disjoint_vars)
show "\<lbrakk>M; D; S\<rbrakk>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>) \<Longrightarrow> \<lbrakk>M; D; S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>\<rbrakk>\<^sub>s \<theta>" using assms
proof (induction S arbitrary: M D rule: strand_sem_stateful_induct)
case (ConsNegChecks M D X F F' S)
hence *: "\<lbrakk>M; D; S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>\<rbrakk>\<^sub>s \<theta>" and **: "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> set X = {}"
unfolding bvars\<^sub>s\<^sub>s\<^sub>t_def negchecks_model_def by (force, auto)
have "negchecks_model (\<delta> \<circ>\<^sub>s \<theta>) D X F F'" using ConsNegChecks by auto
hence "negchecks_model \<theta> D X (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>) (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
using 0[OF **] negchecks_model_subst[OF **] by blast
moreover have "rm_vars (set X) \<delta> = \<delta>" using ConsNegChecks.prems(2) by force
ultimately show ?case using * by auto
qed simp_all
show "\<lbrakk>M; D; S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>\<rbrakk>\<^sub>s \<theta> \<Longrightarrow> \<lbrakk>M; D; S\<rbrakk>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)" using assms
proof (induction S arbitrary: M D rule: strand_sem_stateful_induct)
case (ConsNegChecks M D X F F' S)
have \<delta>: "rm_vars (set X) \<delta> = \<delta>" using ConsNegChecks.prems(2) by force
hence *: "\<lbrakk>M; D; S\<rbrakk>\<^sub>s (\<delta> \<circ>\<^sub>s \<theta>)" and **: "(subst_domain \<delta> \<union> range_vars \<delta>) \<inter> set X = {}"
using ConsNegChecks unfolding bvars\<^sub>s\<^sub>s\<^sub>t_def negchecks_model_def by auto
have "negchecks_model \<theta> D X (F \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>) (F' \<cdot>\<^sub>p\<^sub>a\<^sub>i\<^sub>r\<^sub>s \<delta>)"
using ConsNegChecks.prems(1) \<delta> by (auto simp add: subst_compose_assoc negchecks_model_def)
hence "negchecks_model (\<delta> \<circ>\<^sub>s \<theta>) D X F F'"
using 0[OF **] negchecks_model_subst[OF **] by blast
thus ?case using * by auto
qed simp_all
qed
end
subsection \<open>Well-Formedness Lemmata\<close>
lemma wfvarsocc\<^sub>s\<^sub>s\<^sub>t_subset_wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t[simp]:
"wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S"
by (induction S)
(auto simp add: wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def
split: stateful_strand_step.split poscheckvariant.split)
lemma wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_append: "wfvarsoccs\<^sub>s\<^sub>s\<^sub>t (S@S') = wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S'"
by (simp add: wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def)
lemma wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_union[simp]:
"wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t (S@T) = wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t T"
by (simp add: wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def)
lemma wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_singleton:
"wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t [s] = wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t\<^sub>p s"
by (simp add: wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def)
lemma wf\<^sub>s\<^sub>s\<^sub>t_prefix[dest]: "wf'\<^sub>s\<^sub>s\<^sub>t V (S@S') \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t V S"
by (induct S rule: wf'\<^sub>s\<^sub>s\<^sub>t.induct) auto
lemma wf\<^sub>s\<^sub>s\<^sub>t_vars_mono: "wf'\<^sub>s\<^sub>s\<^sub>t V S \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> W) S"
proof (induction S arbitrary: V)
case (Cons x S) thus ?case
proof (cases x)
case (Send t)
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> W) S" using Cons.prems(1) Cons.IH by simp
thus ?thesis using Send by (simp add: sup_commute sup_left_commute)
next
case (Equality a t t')
show ?thesis
proof (cases a)
case Assign
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> W) S" "fv t' \<subseteq> V \<union> W" using Equality Cons.prems(1) Cons.IH by auto
thus ?thesis using Equality Assign by (simp add: sup_commute sup_left_commute)
next
case Check thus ?thesis using Equality Cons by auto
qed
next
case (InSet a t t')
show ?thesis
proof (cases a)
case Assign
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> fv t' \<union> W) S" using InSet Cons.prems(1) Cons.IH by auto
thus ?thesis using InSet Assign by (simp add: sup_commute sup_left_commute)
next
case Check thus ?thesis using InSet Cons by auto
qed
qed auto
qed simp
lemma wf\<^sub>s\<^sub>s\<^sub>tI[intro]: "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<subseteq> V \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t V S"
proof (induction S)
case (Cons x S) thus ?case
proof (cases x)
case (Send t)
hence "wf'\<^sub>s\<^sub>s\<^sub>t V S" "V \<union> fv t = V"
using Cons
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def
by auto
thus ?thesis using Send by simp
next
case (Equality a t t')
show ?thesis
proof (cases a)
case Assign
hence "wf'\<^sub>s\<^sub>s\<^sub>t V S" "fv t' \<subseteq> V"
using Equality Cons
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def
by auto
thus ?thesis using wf\<^sub>s\<^sub>s\<^sub>t_vars_mono Equality Assign by simp
next
case Check
thus ?thesis
using Equality Cons
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def
by auto
qed
next
case (InSet a t t')
show ?thesis
proof (cases a)
case Assign
hence "wf'\<^sub>s\<^sub>s\<^sub>t V S" "fv t \<union> fv t' \<subseteq> V"
using InSet Cons
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def
by auto
thus ?thesis using wf\<^sub>s\<^sub>s\<^sub>t_vars_mono InSet Assign by (simp add: Un_assoc)
next
case Check
thus ?thesis
using InSet Cons
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def
by auto
qed
qed (simp_all add: wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def)
qed (simp add: wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def)
lemma wf\<^sub>s\<^sub>s\<^sub>tI'[intro]:
assumes "\<Union>((\<lambda>x. case x of
Receive t \<Rightarrow> fv t
| Equality Assign _ t' \<Rightarrow> fv t'
| Insert t t' \<Rightarrow> fv t \<union> fv t'
| _ \<Rightarrow> {}) ` set S) \<subseteq> V"
shows "wf'\<^sub>s\<^sub>s\<^sub>t V S"
using assms
proof (induction S)
case (Cons x S) thus ?case
proof (cases x)
case (Equality a t t')
thus ?thesis using Cons by (cases a) (auto simp add: wf\<^sub>s\<^sub>s\<^sub>t_vars_mono)
next
case (InSet a t t')
thus ?thesis using Cons by (cases a) (auto simp add: wf\<^sub>s\<^sub>s\<^sub>t_vars_mono Un_assoc)
qed (simp_all add: wf\<^sub>s\<^sub>s\<^sub>t_vars_mono)
qed simp
lemma wf\<^sub>s\<^sub>s\<^sub>t_append_exec: "wf'\<^sub>s\<^sub>s\<^sub>t V (S@S') \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S) S'"
proof (induction S arbitrary: V)
case (Cons x S V) thus ?case
proof (cases x)
case (Send t)
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S) S'" using Cons.prems Cons.IH by simp
thus ?thesis using Send unfolding wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def by (auto simp add: sup_assoc)
next
case (Equality a t t') show ?thesis
proof (cases a)
case Assign
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S) S'" using Equality Cons.prems Cons.IH by auto
thus ?thesis using Equality Assign unfolding wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def by (auto simp add: sup_assoc)
next
case Check
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S) S'" using Equality Cons.prems Cons.IH by auto
thus ?thesis using Equality Check unfolding wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def by (auto simp add: sup_assoc)
qed
next
case (InSet a t t') show ?thesis
proof (cases a)
case Assign
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> fv t' \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S) S'" using InSet Cons.prems Cons.IH by auto
thus ?thesis using InSet Assign unfolding wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def by (auto simp add: sup_assoc)
next
case Check
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S) S'" using InSet Cons.prems Cons.IH by auto
thus ?thesis using InSet Check unfolding wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def by (auto simp add: sup_assoc)
qed
qed (auto simp add: wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def)
qed (simp add: wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def)
lemma wf\<^sub>s\<^sub>s\<^sub>t_append:
"wf'\<^sub>s\<^sub>s\<^sub>t X S \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t Y T \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t (X \<union> Y) (S@T)"
proof (induction X S rule: wf'\<^sub>s\<^sub>s\<^sub>t.induct)
case 1 thus ?case by (metis wf\<^sub>s\<^sub>s\<^sub>t_vars_mono Un_commute append_Nil)
next
case 3 thus ?case by (metis append_Cons Un_commute Un_assoc wf'\<^sub>s\<^sub>s\<^sub>t.simps(3))
next
case (4 V t t' S)
hence *: "fv t' \<subseteq> V" and "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> Y) (S @ T)" by simp_all
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> Y \<union> fv t) (S @ T)" by (metis Un_commute Un_assoc)
thus ?case using * by auto
next
case (8 V t t' S)
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> fv t' \<union> Y) (S @ T)" by simp_all
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> Y \<union> fv t \<union> fv t') (S @ T)" by (metis Un_commute Un_assoc)
thus ?case by auto
qed auto
lemma wf\<^sub>s\<^sub>s\<^sub>t_append_suffix:
"wf'\<^sub>s\<^sub>s\<^sub>t V S \<Longrightarrow> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S' \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> V \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t V (S@S')"
proof (induction V S rule: wf'\<^sub>s\<^sub>s\<^sub>t.induct)
case (2 V t S)
hence *: "fv t \<subseteq> V" "wf'\<^sub>s\<^sub>s\<^sub>t V S" by simp_all
hence "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S' \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> V"
using "2.prems"(2) unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def by auto
thus ?case using "2.IH" * by simp
next
case (3 V t S)
hence *: "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t) S" by simp_all
hence "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S' \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> (V \<union> fv t)"
using "3.prems"(2) unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def by auto
thus ?case using "3.IH" * by simp
next
case (4 V t t' S)
hence *: "fv t' \<subseteq> V" "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t) S" by simp_all
moreover have "vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (\<langle>t := t'\<rangle>) = fv t \<union> fv t'"
by simp
moreover have "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t (\<langle>t := t'\<rangle>#S) = fv t \<union> fv t' \<union> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S"
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def by auto
ultimately have "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S' \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> (V \<union> fv t)"
using "4.prems"(2) by blast
thus ?case using "4.IH" * by simp
next
case (6 V t t' S)
hence *: "fv t \<union> fv t' \<subseteq> V" "wf'\<^sub>s\<^sub>s\<^sub>t V S" by simp_all
moreover have "vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (insert\<langle>t,t'\<rangle>) = fv t \<union> fv t'"
by simp
moreover have "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t (insert\<langle>t,t'\<rangle>#S) = fv t \<union> fv t' \<union> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S"
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def by auto
ultimately have "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S' \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> V"
using "6.prems"(2) by blast
thus ?case using "6.IH" * by simp
next
case (8 V t t' S)
hence *: "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> fv t') S" by simp_all
moreover have "vars\<^sub>s\<^sub>s\<^sub>t\<^sub>p (select\<langle>t,t'\<rangle>) = fv t \<union> fv t'"
by simp
moreover have "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t (select\<langle>t,t'\<rangle>#S) = fv t \<union> fv t' \<union> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S"
unfolding wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def by auto
ultimately have "wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S' \<subseteq> wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t S \<union> (V \<union> fv t \<union> fv t')"
using "8.prems"(2) by blast
thus ?case using "8.IH" * by simp
qed (simp_all add: wf\<^sub>s\<^sub>s\<^sub>tI wfrestrictedvars\<^sub>s\<^sub>s\<^sub>t_def)
lemma wf\<^sub>s\<^sub>s\<^sub>t_append_suffix':
assumes "wf'\<^sub>s\<^sub>s\<^sub>t V S"
and "\<Union>((\<lambda>x. case x of
Receive t \<Rightarrow> fv t
| Equality Assign _ t' \<Rightarrow> fv t'
| Insert t t' \<Rightarrow> fv t \<union> fv t'
| _ \<Rightarrow> {}) ` set S') \<subseteq> wfvarsoccs\<^sub>s\<^sub>s\<^sub>t S \<union> V"
shows "wf'\<^sub>s\<^sub>s\<^sub>t V (S@S')"
using assms
by (induction V S rule: wf'\<^sub>s\<^sub>s\<^sub>t.induct)
(auto simp add: wf\<^sub>s\<^sub>s\<^sub>tI' wf\<^sub>s\<^sub>s\<^sub>t_vars_mono wfvarsoccs\<^sub>s\<^sub>s\<^sub>t_def)
lemma wf\<^sub>s\<^sub>s\<^sub>t_subst_apply:
"wf'\<^sub>s\<^sub>s\<^sub>t V S \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
proof (induction S arbitrary: V rule: wf'\<^sub>s\<^sub>s\<^sub>t.induct)
case (2 V t S)
hence "wf'\<^sub>s\<^sub>s\<^sub>t V S" "fv t \<subseteq> V" by simp_all
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" "fv (t \<cdot> \<delta>) \<subseteq> fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)"
using "2.IH" subst_apply_fv_subset by simp_all
thus ?case by (simp add: subst_apply_stateful_strand_def)
next
case (3 V t S)
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t) S" by simp
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` (V \<union> fv t))) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" using "3.IH" by metis
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V) \<union> fv (t \<cdot> \<delta>)) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" by (metis subst_apply_fv_union)
thus ?case by (simp add: subst_apply_stateful_strand_def)
next
case (4 V t t' S)
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t) S" "fv t' \<subseteq> V" by auto
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` (V \<union> fv t))) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" and *: "fv (t' \<cdot> \<delta>) \<subseteq> fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)"
using "4.IH" subst_apply_fv_subset by force+
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V) \<union> fv (t \<cdot> \<delta>)) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" by (metis subst_apply_fv_union)
thus ?case using * by (simp add: subst_apply_stateful_strand_def)
next
case (6 V t t' S)
hence "wf'\<^sub>s\<^sub>s\<^sub>t V S" "fv t \<union> fv t' \<subseteq> V" by auto
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" "fv (t \<cdot> \<delta>) \<subseteq> fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)" "fv (t' \<cdot> \<delta>) \<subseteq> fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V)"
using "6.IH" subst_apply_fv_subset by force+
thus ?case by (simp add: sup_assoc subst_apply_stateful_strand_def)
next
case (8 V t t' S)
hence "wf'\<^sub>s\<^sub>s\<^sub>t (V \<union> fv t \<union> fv t') S" by auto
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` (V \<union> fv t \<union> fv t'))) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)"
using "8.IH" subst_apply_fv_subset by force
hence "wf'\<^sub>s\<^sub>s\<^sub>t (fv\<^sub>s\<^sub>e\<^sub>t (\<delta> ` V) \<union> fv (t \<cdot> \<delta>) \<union> fv (t' \<cdot> \<delta>)) (S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<delta>)" by (metis subst_apply_fv_union)
thus ?case by (simp add: subst_apply_stateful_strand_def)
qed (auto simp add: subst_apply_stateful_strand_def)
end
|
No Milkweed, No Monarchs: Why Asclepias Plants Are So Very Important!
Although it won’t be official until mid-March, when the Mexican government releases the winter’s population count, ‘unofficial’ reports are anticipating a rather small migrating population. These unofficial reports are coming from the El Rosario Sanctuary in Mexico (see video example from 2016 at the end of this post); a site that can sometimes be the winter home for over 50% of the entire monarch population in Mexico. Reports and photos show butterflies densely covering approximately 18 trees. That’s good news, but last year, reports were that 50 trees were covered.
That’s not good news, but it is a call to action: PLANT MORE MILKWEED!
Humans are a fickle bunch, and try as I might, they are hard to figure. Say one thing, mean another. Say one thing, find out it wasn’t true. Commit to one thing, then change courses. Not saying I’m any different, but the dependability of nature (well maybe not weather) is something that brings me a lot of comfort. Cycles that repeat; you sort of come to depend on them. Geese come south, geese go north. Jenny wrens nesting in the same clay pots they used the year before. My mother in law’s daffodils emerging in late February, as they have been since she planted 30+ years ago. Even the sturgeon have decided the river is clean enough to make a comeback. We brought them to the brink of extinction, then we decided to bring them back. See what I mean? Fickle.
The monarch migration is the longest known insect migration on earth. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.