text
stringlengths 0
3.34M
|
---|
(* Title: HOL/Auth/n_german_lemma_on_inv__29.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
(*header{*The n_german Protocol Case Study*}*)
theory n_german_lemma_on_inv__29 imports n_german_base
begin
section{*All lemmas on causal relation between inv__29 and some rule r*}
lemma n_SendInv__part__0Vsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const GntE)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv0) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''ShrSet'') p__Inv0)) (Const false)) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv0) ''Cmd'')) (Const InvAck))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__29:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv0 p__Inv2 where a2:"p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i=p__Inv0)\<or>(i~=p__Inv0\<and>i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i=p__Inv0)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv0\<and>i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__29:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__29:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqEVsinv__29:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__29:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__29:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__29:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__29 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
State Before: α : Type u
β : Type v
inst✝¹ : TopologicalSpace α
inst✝ : T1Space α
x : α
s : Set α
⊢ {x}ᶜ ∈ 𝓝ˢ s ↔ ¬x ∈ s State After: no goals Tactic: rw [isOpen_compl_singleton.mem_nhdsSet, subset_compl_singleton_iff]
|
[STATEMENT]
lemma "(not ((C ** (D ** E)) and ((A \<longrightarrow>* (not (not (B \<longrightarrow>* not (D ** (E ** C))) ** A))) **
(B and (A ** sep_true)))))
(h::'a::heap_sep_algebra)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> ((C \<and>* D \<and>* E) h \<and> ((A \<longrightarrow>* (\<lambda>s. \<not> ((\<lambda>s. \<not> (B \<longrightarrow>* (\<lambda>s. \<not> (D \<and>* E \<and>* C) s)) s) \<and>* A) s)) \<and>* (\<lambda>s. B s \<and> (A \<and>* (\<lambda>s. True)) s)) h)
[PROOF STEP]
by separata
|
||| SipHash24 copied from C [reference implementation](https://github.com/veorq/SipHash/blob/master/siphash.c)
||| and rust https://doc.rust-lang.org/src/core/hash/sip.rs.html
module Data.Hashable.SipHashV2
import Data.Buffer
%default total
%inline
CROUNDS : Nat
CROUNDS = 2
%inline
DROUNDS : Nat
DROUNDS = 4
record HashState where
constructor MkST
v0 : Bits64
v1 : Bits64
v2 : Bits64
v3 : Bits64
initHashState : HashState
initHashState = MkST
{ v0 = 0x736f6d6570736575
, v1 = 0x646f72616e646f6d
, v2 = 0x6c7967656e657261
, v3 = 0x7465646279746573
}
record PartialHash where
constructor MkPartial
tail : Bits64
index : Bits64
length : Bits64
hst : HashState
initPartial : PartialHash
initPartial = MkPartial 0 0 0 initHashState
%inline
shl : Bits64 -> Bits64 -> Bits64
shl = prim__shl_Bits64
%inline
shr : Bits64 -> Bits64 -> Bits64
shr = prim__shr_Bits64
%inline
bor : Bits64 -> Bits64 -> Bits64
bor = prim__or_Bits64
%inline
xor : Bits64 -> Bits64 -> Bits64
xor = prim__xor_Bits64
%inline
and : Bits64 -> Bits64 -> Bits64
and = prim__and_Bits64
%inline
shr16 : Bits16 -> Bits16 -> Bits16
shr16 = prim__shr_Bits16
%inline
shr32 : Bits32 -> Bits32 -> Bits32
shr32 = prim__shr_Bits32
%inline
andInt : Int -> Int -> Int
andInt = prim__and_Int
%inline
%spec b
rotl : Bits64 -> (b : Bits64) -> Bits64
rotl x b = (x `shl` b) `bor` (x `shr` (64 - b))
-- Make the length the smallest multiple of 8 tail >= the given number
%inline
fullSize : Int -> Int
fullSize x =
let left = x `andInt` 7
in if (x `andInt` 7) == 0
then x
else x - left + 8
compress : HashState -> HashState
compress (MkST v0 v1 v2 v3) =
let v0 = v0 + v1;
v1 = v1 `rotl` 13;
v1 = v1 `xor` v0;
v0 = v0 `rotl` 32;
v2 = v2 + v3;
v3 = v3 `rotl` 16;
v3 = v3 `xor` v2;
v0 = v0 + v3;
v3 = v3 `rotl` 21;
v3 = v3 `xor` v0;
v2 = v2 + v1;
v1 = v1 `rotl` 17;
v1 = v1 `xor` v2;
v2 = v2 `rotl` 32;
in MkST v0 v1 v2 v3
%inline
%spec rounds, f
repeat : (rounds : Nat) -> (f : a -> a) -> a -> a
repeat Z f x = x
repeat (S k) f x = repeat k f (f x)
%inline
hashBits64 : HashState -> Bits64 -> HashState
hashBits64 (MkST v0 v1 v2 v3) m =
let v3 = v3 `xor` m
MkST v0 v1 v2 v3 = repeat CROUNDS compress $ MkST v0 v1 v2 v3
v0 = v0 `xor` m
in MkST v0 v1 v2 v3
export
finish : PartialHash -> Bits64
finish (MkPartial tail index length hst) =
let MkST v0 v1 v2 v3 = hst
rest = ((length `and` 0xff) `shl` 56) `bor` tail
v3 = v3 `xor` rest
MkST v0 v1 v2 v3 = repeat CROUNDS compress $ MkST v0 v1 v2 v3
v0 = v0 `xor` rest
v2 = v2 `xor` 0xff
MkST v0 v1 v2 v3 = repeat DROUNDS compress $ MkST v0 v1 v2 v3
in (v0 `xor` v1) `xor` (v2 `xor` v3)
export
addBits8 : PartialHash -> Bits8 -> PartialHash
addBits8 (MkPartial tail index len hs) x = case index of
-- finished a set of tail
56 => MkPartial 0 0 (len + 1) $ hashBits64 hs (tail `bor` (cast x `shl` 56))
-- add byte to partial
_ => MkPartial (tail `bor` (cast x `shl` index)) (index + 8) (len + 1) hs
export
addBits16 : PartialHash -> Bits16 -> PartialHash
addBits16 ph x = addBits8 (addBits8 ph $ cast x) (cast $ x `shr16` 8)
export
addBits32 : PartialHash -> Bits32 -> PartialHash
addBits32 ph x = addBits16 (addBits16 ph $ cast x) (cast $ x `shr32` 16)
export
addBits64 : PartialHash -> Bits64 -> PartialHash
addBits64 (MkPartial tail index length hst) x = MkPartial tail index (length + 8) $ hashBits64 hst x
hashBuffer' : PartialHash -> Buffer -> Int -> Int -> IO PartialHash
hashBuffer' ph buf len idx = if idx >= len
then pure ph
else do
word <- getBits64 buf idx
let ph = addBits64 ph word
hashBuffer' ph buf len (assert_smaller idx $ idx + 8)
hashBuffer : PartialHash -> Buffer -> IO PartialHash
hashBuffer ph buf = do
rawLen <- rawSize buf
let len = fullSize rawLen
Just buf' <- resizeBuffer buf len
| Nothing => hashBuffer' ph buf (rawLen `andInt` (- 7)) 0
hashBuffer' ph buf' len 0
|
{-# OPTIONS --cubical --no-import-sorts --guardedness --safe #-}
module Cubical.Codata.M.AsLimit.Coalg where
open import Cubical.Codata.M.AsLimit.Coalg.Base public
|
import argparse
import configparser
import boto3
import pandas as pd
import numpy as np
import cv2
import os
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from matplotlib import pyplot as plt
import CPR_utils as util
def prepare():
# Create dictionary for alphabets and related numbers
alphabets_dic = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J',
10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T',
20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z', 26: '0', 27: '1', 28: '2', 29:'3',
30: '4', 31: '5', 32: '6', 33: '7', 34: '8', 35: '9'}
alphabets = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
dataset_classes = []
for cls in alphabets:
dataset_classes.append([cls])
label_list = []
for l in labels:
label_list.append([l])
# One hot encoding format for output
ohe = OneHotEncoder(handle_unknown='ignore', categorical_features=None)
ohe.fit(dataset_classes)
labels_ohe = ohe.transform(label_list).toarray()
return labels_ohe
def build_model():
# CNN model
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(28,28,1)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(36, activation='softmax'))
print(model.summary())
return model
def visualization():
plt.figure(figsize=[8, 6])
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.figure(figsize=[8, 6])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Please provide amazon credentials')
parser.add_argument('--conf', default="../conf/config.cfg", help='the path of config.cfg')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.conf)
aws_access_key_id = config['AWS_access_credentials']['aws_access_key_id']
aws_secret_access_key = config['AWS_access_credentials']['aws_secret_access_key']
client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
#load data
data_path = 'data.pickle'
labels_path = 'labels.pickle'
with open('../tmp/data.pickle', 'wb') as f:
client.download_file(config['buckets']['chardata'], data_path, f)
with open('./tmp/labels.pickle', 'wb') as f:
client.download_file(config['buckets']['chardata'], labels_path, f)
d = open("../tmp/data.pickle", "rb")
l = open("../tmp/labels.pickle", "rb")
data = pickle.load(d)
labels = pickle.load(l)
labels_ohe = prepare()
data = np.array(data)
labels = np.array(labels)
# Split the data
X_train, X_test, y_train, y_test = train_test_split(data, labels_ohe, test_size=0.20, random_state=42)
X_train = X_train.reshape(29260, 28, 28, 1)
X_test = X_test.reshape(7316, 28, 28, 1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
model = build_model()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=64)
model_save_path = '../conf/cnn_classifier.h5'
model.save(model_save_path)
client.upload_file(model_save_path, config['buckets']['models'], os.path.basename(model_save_path))
print("model has been saved and uploaded!")
|
using GeometricBase
using GeometricIntegrators.Config
using GeometricIntegrators.Discontinuities
using GeometricIntegrators.Integrators
using GeometricIntegrators.Utils
using GeometricProblems.HarmonicOscillator
using Test
using CompactBasisFunctions
using QuadratureRules
using GeometricProblems.HarmonicOscillator: Δt, nt, refx, refq, refp
iode = harmonic_oscillator_iode()
QGau4 = GaussLegendreQuadrature(4)
BGau4 = Lagrange(nodes(QGau4))
### CGVI Integrators ###
cgint = IntegratorCGVI(iode, BGau4, QGau4, Δt)
cgsol = integrate(iode, cgint, nt)
@test relative_maximum_error(cgsol.q, refx) < 1E-7
### DGVI Integrators ###
dgint = IntegratorDGVI(iode, BGau4, QGau4, Δt)
dgsol = integrate(iode, dgint, nt)
@test relative_maximum_error(dgsol.q, refx) < 1E-7
dgint = IntegratorDGVIP0(iode, BGau4, QGau4, Δt)
dgsol = integrate(iode, dgint, nt)
@test relative_maximum_error(dgsol.q, refx) < 1E-7
dgint = IntegratorDGVIP1(iode, BGau4, QGau4, Δt)
dgsol = integrate(iode, dgint, nt)
@test relative_maximum_error(dgsol.q, refx) < 1E-7
dgint = IntegratorDGVIEXP(iode, BGau4, QGau4, Δt)
dgsol = integrate(iode, dgint, nt)
@test relative_maximum_error(dgsol.q, refx) < 1E-7
dgint = IntegratorDGVIPI(iode, BGau4, QGau4, Discontinuity(PathIntegralLinear(), LobattoLegendreQuadrature(2)), Δt)
dgsol = integrate(iode, dgint, nt)
@test relative_maximum_error(dgsol.q, refx) < 1E-7
|
// Implementation of the interface in uint8_image.h.
#include <errno.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <assert.h>
#include <glib.h>
#if GLIB_CHECK_VERSION (2, 6, 0)
# include <glib/gstdio.h>
#endif
#include <gsl/gsl_spline.h>
#include <gsl/gsl_histogram.h>
#include <gsl/gsl_math.h>
#include "asf_jpeg.h"
#include "uint8_image.h"
#include "asf.h"
#ifndef linux
#ifndef darwin
#ifndef win32
static double
round (double arg)
{
return floor (arg + 0.5);
}
#endif // #ifndef win32
#endif // #ifndef darwin
#endif // #ifndef linux
#include "asf_glib.h"
// Default cache size to use is 16 megabytes.
static const size_t default_cache_size = 16 * 1048576;
// This class wide data element keeps track of the number of temporary
// tile files opened by the current process, in order to give them
// unique names.
static unsigned long current_tile_file_number = 0;
// We need to ensure that multiple threads trying to create their own
// images concurently don't end up with the same temporary file
// names.
//G_LOCK_DEFINE_STATIC (current_tile_file_number);
#ifndef win32
// We don't want to let multiple threads twiddle the signal block mask
// concurrently, or we might end up with the wrong set of signals
// blocked. This lock is used to guarantee this can't happen (see the
// usage for a better explanation).
G_LOCK_DEFINE_STATIC (signal_block_activity);
#endif
// Return a FILE pointer refering to a new, already unlinked file in a
// location which hopefully has enough free space to serve as a block
// cache.
static FILE *
initialize_tile_cache_file (GString **tile_file_name)
{
// Create the temporary tile oriented storage file. This gets
// filled in in different ways depending on which creation routine
// we are using.
g_assert(*tile_file_name == NULL);
*tile_file_name = g_string_new ("");
// Here we do a slightly weird thing: if the current directory is
// writable, we create a temporary file in the current directory.
// We do this because the temporary file could well be pretty big
// and /tmp often maps to a small file system. The idea is that the
// directory the user is in is more likely to have the extra space
// required to hold the temporary file. Of course, if they have
// been carefully calculating their space requirements, they may be
// disappointed. We use a weird name that no sane user would ever
// use for one of their files, we hope.
//G_LOCK (current_tile_file_number);
g_assert (sizeof (long) >= sizeof (pid_t));
g_string_append_printf (*tile_file_name,
".uint8_image_tile_file_%ld_%lu",
(long) getpid (),
current_tile_file_number);
// This hard coded limit on the current number used to uniqueify
// file names limits us to creating no more than ULONG_MAX instances
// during a process.
g_assert (current_tile_file_number < ULONG_MAX);
current_tile_file_number++;
//G_UNLOCK (current_tile_file_number);
#ifndef win32
// We block signals while we create and unlink this file, so we
// don't end up leaving a huge temporary file somewhere.
// Theoretically, two parallel instantiations of image could end up
// in a race condition which would result in all signals ending up
// blocked after both were done with this section, so we consider
// this section critical and protect it with a lock.
G_LOCK (signal_block_activity);
sigset_t all_signals, old_set;
int return_code = sigfillset (&all_signals);
g_assert (return_code == 0);
return_code = sigprocmask (SIG_SETMASK, &all_signals, &old_set);
#endif
// FIXME?: It might be faster to use file descriptor based I/O
// everywhere, or at least for the big transfers. I'm not sure its
// worth the trouble though.
FILE *tile_file = fopen_tmp_file ((*tile_file_name)->str, "w+b");
if ( tile_file == NULL ) {
if ( errno != EACCES ) {
g_warning ("couldn't create file in current directory, and it wasn't"
"just a permissions problem");
}
else {
// Couldn't open in current directory, so try using tmpfile,
// which opens the file in the standardish place for the system.
// See the comment above about why opening in /tmp or the like
// is potentially bad.
tile_file = tmpfile ();
g_assert (tile_file != NULL);
}
}
else {
#ifndef win32
return_code = unlink_tmp_file ((*tile_file_name)->str);
g_assert (return_code == 0);
#endif
}
g_assert (tile_file != NULL);
#ifndef win32
return_code = sigprocmask (SIG_SETMASK, &old_set, NULL);
G_UNLOCK (signal_block_activity);
#endif
return tile_file;
}
// This routine does the work common to several of the differenct
// creation routines. Basically, it does everything but fill in the
// contents of the disk tile store.
static UInt8Image *
initialize_uint8_image_structure (ssize_t size_x, ssize_t size_y)
{
// Allocate instance memory.
UInt8Image *self = g_new0 (UInt8Image, 1);
// Validate and remember image size.
g_assert (size_x > 0 && size_y > 0);
self->size_x = size_x;
self->size_y = size_y;
// Greater of size_x and size_y.
size_t largest_dimension = (size_x > size_y ? size_x : size_y);
// If we can fit the entire image in a single square tile, then we
// want just a single big tile and we won't need to bother with the
// cache file since it won't ever be used, so we do things slightly
// differently. FIXME: it would be slightly better to also detect
// and specially handle the case where we have long narrow images
// that can fit in a single stip of tiles in the cache.
if ( largest_dimension * largest_dimension * sizeof (uint8_t)
<= default_cache_size ) {
self->cache_space = (largest_dimension * largest_dimension
* sizeof (uint8_t));
self->cache_area = self->cache_space / sizeof (uint8_t);
self->tile_size = largest_dimension;
self->cache_size_in_tiles = 1;
self->tile_count_x = 1;
self->tile_count_y = 1;
self->tile_count = 1;
self->tile_area = self->tile_size * self->tile_size;
self->cache = g_new (uint8_t, self->cache_area);
self->tile_addresses = g_new0 (uint8_t *, self->tile_count);
g_assert (NULL == 0x0); // Ensure g_new0 effectively sets to NULL.
// The tile queue shouldn't ever be needed in this case.
self->tile_queue = NULL;
// The tile file shouldn't ever be needed, so we set it to NULL to
// indicate this to a few other methods that use it directly, and
// to hopefully ensure that it triggers an exception if it is
// used.
self->tile_file = NULL;
return self;
}
// The default cache size compiled into the class.
self->cache_space = default_cache_size;
// Memory cache space, in pixels.
g_assert (self->cache_space % sizeof (uint8_t) == 0);
self->cache_area = self->cache_space / sizeof (uint8_t);
// How small do our tiles have to be on a side to fit two full rows
// of them in the memory cache? This is slightly tricky. In order
// to provide the services promised in the interface, we need to
// solve
//
// 2 * pow (t, 2) * ceil ((double)largest_dimension / t)
// <= self->cache_area
//
// for tile size t. I don't know the closed form solution if there
// is one, so toss out the ceil() and solve the easier
//
// 2 * pow (t, 2) * ((double)largest_dimension / t) <= self->cache_area
//
// and then decrement t iteratively until things work.
self->tile_size = self->cache_area / (2 * largest_dimension);
while ( (2 * self->tile_size * self->tile_size
* ceil ((double) largest_dimension / self->tile_size))
> self->cache_area ) {
self->tile_size--;
}
// Area of tiles, in pixels.
self->tile_area = (size_t) (self->tile_size * self->tile_size);
// Number of tiles which will fit in image cache.
self->cache_size_in_tiles = self->cache_area / self->tile_area;
// Can we fit at least as much as we intended in the cache?
g_assert (self->cache_size_in_tiles
>= 2 * (size_t) ceil ((double) largest_dimension
/ self->tile_size));
// Number of tiles image has been split into in x and y directions.
self->tile_count_x = (size_t) ceil ((double) self->size_x / self->tile_size);
self->tile_count_y = (size_t) ceil ((double) self->size_y / self->tile_size);
// Total number of image tiles image has been split into.
self->tile_count = self->tile_count_x * self->tile_count_y;
// We want to be able to pack a tile number into a pointer later, so
// we need it to fit into an integer.
g_assert (self->tile_count < INT_MAX);
// Did all that math work?
g_assert (self->tile_size * self->cache_size_in_tiles / 2
>= largest_dimension);
g_assert (self->cache_size_in_tiles * self->tile_area <= self->cache_area);
// Allocate memory for the in-memory cache.
self->cache = g_new (uint8_t, self->cache_area);
// Do we want to do mlock() here maybe?
// The addresses in the cache of the starts of each of the tiles.
// This array contains flattened tile addresses in the same way that
// image memory normally uses flattened pixel addresses, e.g. the
// address of tile x = 2, y = 4 is stored at self->tile_addresses[4
// * self->tile_count_x + 2]. If a tile isn't in the cache, the
// address is NULL (meaning it will have to be loaded).
self->tile_addresses = g_new0 (uint8_t *, self->tile_count);
g_assert (NULL == 0x0); // Ensure g_new0 effectively sets to NULL.
// Create a queue in order to keep track of which tile was loaded
// longest ago.
self->tile_queue = g_queue_new ();
// Get a new empty tile cache file pointer.
self->tile_file_name = NULL;
self->tile_file = initialize_tile_cache_file ( &(self->tile_file_name) );
return self;
}
UInt8Image *
uint8_image_thaw (FILE *file_pointer)
{
FILE *fp = file_pointer; // Convenience alias.
g_assert (file_pointer != NULL);
UInt8Image *self = g_new (UInt8Image, 1);
size_t read_count = fread (&(self->size_x), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->size_y), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->cache_space), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->cache_area), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->tile_size), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->cache_size_in_tiles), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->tile_count_x), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->tile_count_y), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->tile_count), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
read_count = fread (&(self->tile_area), sizeof (size_t), 1, fp);
g_assert (read_count == 1);
// The cache isn't serialized -- its a bit of a pain and probably
// almost never worth it.
self->cache = g_new (uint8_t, self->cache_area);
self->tile_addresses = g_new0 (uint8_t *, self->tile_count);
// We don't actually keep the tile queue in the serialized instance,
// but if the serialized pointer to it is NULL, we know we aren't
// using a tile cache file (i.e. the whole image fits in the memory
// cache).
read_count = fread (&(self->tile_queue), sizeof (GQueue *), 1, fp);
g_assert (read_count == 1);
// If there was no cache file...
if ( self->tile_queue == NULL ) {
// The tile_file structure field should also be NULL.
self->tile_file = NULL;
// we restore the file directly into the first and only tile (see
// the end of the uint8_image_new method).
self->tile_addresses[0] = self->cache;
read_count = fread (self->tile_addresses[0], sizeof (uint8_t),
self->tile_area, fp);
g_assert (read_count == self->tile_area);
}
// otherwise, an empty tile queue needs to be initialized, and the
// remainder of the serialized version is the tile block cache.
else {
self->tile_queue = g_queue_new ();
self->tile_file_name = NULL;
self->tile_file = initialize_tile_cache_file ( &(self->tile_file_name) );
uint8_t *buffer = g_new (uint8_t, self->tile_area);
size_t ii;
for ( ii = 0 ; ii < self->tile_count ; ii++ ) {
read_count = fread (buffer, sizeof (uint8_t), self->tile_area, fp);
g_assert (read_count == self->tile_area);
size_t write_count = fwrite (buffer, sizeof (uint8_t), self->tile_area,
self->tile_file);
if ( write_count < self->tile_area ) {
if ( feof (self->tile_file) ) {
fprintf (stderr,
"Premature end of file while trying to thaw UInt8Image "
"instance\n");
}
else {
g_assert (ferror (self->tile_file));
fprintf (stderr,
"Error writing tile cache file for UInt8Image instance "
"during thaw: %s\n", strerror (errno));
}
exit (EXIT_FAILURE);
}
g_assert (write_count == self->tile_area);
}
g_free (buffer);
}
return self;
}
UInt8Image *
uint8_image_new (ssize_t size_x, ssize_t size_y)
{
g_assert (size_x > 0 && size_y > 0);
UInt8Image *self = initialize_uint8_image_structure (size_x, size_y);
// If we need a tile file for an image of this size, prepare it.
if ( self->tile_file != NULL ) {
// The total width or height of all the tiles is probably greater
// than the width or height of the image itself.
size_t total_width = self->tile_count_x * self->tile_size;
size_t total_height = self->tile_count_y * self->tile_size;
// Fill the file full of zeros. FIXME: there is almost certainly
// a faster way to ensure that we have the disk space we need.
uint8_t *zero_line = g_new0 (uint8_t, total_width);
g_assert (0 == 0x0); // Ensure the g_new0 did what we think.
// We don't have to write in tile order because its all zeros anyway.
size_t ii;
for ( ii = 0 ; ii < total_height ; ii++ ) {
size_t write_count = fwrite (zero_line, sizeof (uint8_t), total_width,
self->tile_file);
// If we wrote less than expected,
if ( write_count < total_width ) {
// it must have been a write error (probably no space left),
g_assert (ferror (self->tile_file));
// so print an error message,
fprintf (stderr,
"Error writing tile cache file for UInt8Image instance: "
"%s\n", strerror (errno));
// and exit.
exit (EXIT_FAILURE);
}
}
// Done with the line of zeros.
g_free (zero_line);
}
// Everything fits in the cache (at the moment this means everything
// fits in the first tile, which is a bit of a FIXME), so just put
// it there.
else {
self->tile_addresses[0] = self->cache;
size_t ii, jj;
for ( ii = 0 ; ii < self->tile_size ; ii++ ) {
for ( jj = 0 ; jj < self->tile_size ; jj++ ) {
self->tile_addresses[0][ii * self->tile_size + jj] = 0;
}
}
}
return self;
}
UInt8Image *
uint_image_new_with_value (ssize_t size_x, ssize_t size_y, uint8_t value)
{
// Carefully clone-and-modified over from float_image.c, but not
// tested yet.
g_assert_not_reached ();
g_assert (size_x > 0 && size_y > 0);
UInt8Image *self = initialize_uint8_image_structure (size_x, size_y);
// If we need a tile file for an image of this size, prepare it.
if ( self->tile_file != NULL ) {
// The total width or height of all the tiles is probably greater
// than the width or height of the image itself.
size_t total_width = self->tile_count_x * self->tile_size;
size_t total_height = self->tile_count_y * self->tile_size;
// Fill the file full of the given value.
uint8_t *value_line = g_new (uint8_t, total_width);
size_t ii;
for ( ii = 0 ; ii < total_width ; ii++ ) {
value_line[ii] = value;
}
// We don't have to write in tile order because the values are all
// the same anyway.
for ( ii = 0 ; ii < total_height ; ii++ ) {
size_t write_count = fwrite (value_line, sizeof (uint8_t), total_width,
self->tile_file);
// If we wrote less than expected,
if ( write_count < total_width ) {
// it must have been a write error (probably no space left),
g_assert (ferror (self->tile_file));
// so print an error message,
fprintf (stderr,
"Error writing tile cache file for UInt8Image instance: "
"%s\n", strerror (errno));
// and exit.
exit (EXIT_FAILURE);
}
}
// Done with the line of values.
g_free (value_line);
}
// Everything fits in the cache (at the moment this means everything
// fits in the first tile, which is a bit of a FIXME), so just put
// it there.
else {
self->tile_addresses[0] = self->cache;
size_t ii, jj;
for ( ii = 0 ; ii < self->tile_size ; ii++ ) {
for ( jj = 0 ; jj < self->tile_size ; jj++ ) {
self->tile_addresses[0][ii * self->tile_size + jj] = value;
}
}
}
return self;
}
UInt8Image *
uint8_image_new_from_memory (ssize_t size_x, ssize_t size_y, uint8_t *buffer)
{
g_assert (size_x > 0 && size_y > 0);
g_assert_not_reached (); // Stubbed out for now.
// Compiler reassurance.
size_x = size_x;
size_y = size_y;
buffer = buffer;
return NULL;
}
// Bilinear interpolation for a point delta_x, delta_y from the lower
// left corner between values ul (upper left), ur (upper right), etc.
// The corner are considered to be corners of a unit square.
static float
bilinear_interpolate (double delta_x, double delta_y, float ul, float ur,
float ll, float lr)
{
float lv = ll + (lr - ll) * delta_x; // Lower value.
float uv = ul + (ur - ul) * delta_x; // Upper value.
return lv + (uv - lv) * delta_y;
}
UInt8Image *
uint8_image_copy (UInt8Image *model)
{
// This method should be totally fine, it is extremely trivial clone
// and modify from the corresponding method in float_image.c, but
// since its untested I disable it for the moment.
g_assert_not_reached ();
// FIXME: this could obviously be optimized a lot by copying the
// existed tile file, etc.
UInt8Image *self = uint8_image_new (model->size_x, model->size_y);
size_t ii, jj;
for ( ii = 0 ; ii < self->size_y ; ii++ ) {
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
uint8_image_set_pixel (self, jj, ii,
uint8_image_get_pixel (model, jj, ii));
}
}
return self;
}
UInt8Image *
uint8_image_new_from_model_scaled (UInt8Image *model, ssize_t scale_factor)
{
g_assert (model->size_x > 0 && model->size_y > 0);
g_assert (scale_factor > 0);
g_assert (scale_factor % 2 == 1);
UInt8Image *self
= uint8_image_new (round ((double) model->size_x / scale_factor),
round ((double) model->size_y / scale_factor));
// This method hasn't yet made the jump from FloatImage to here.
// The implementation should ultimately follow the one in FloatImage
// to achieve the interface description given in uint8_image.h.
g_assert_not_reached ();
return self;
}
UInt8Image *
uint8_image_new_subimage (UInt8Image *model, ssize_t x, ssize_t y,
ssize_t size_x, ssize_t size_y)
{
// Upper left corner must be in model.
g_assert (x >= 0 && y >= 0);
// Size of image to be created must be strictly positive.
g_assert (size_x >= 1 && size_y >= 1);
// Given model must be big enough to allow a subimage of the
// requested size to fit.
g_assert (model->size_x <= SSIZE_MAX && model->size_y <= SSIZE_MAX);
g_assert (x + size_x <= (ssize_t) model->size_x);
g_assert (y + size_y <= (ssize_t) model->size_y);
UInt8Image *self = uint8_image_new (size_x, size_y);
// Copy the image pixels from the model.
ssize_t ii, jj;
for ( ii = 0 ; ii < (ssize_t) self->size_x ; ii++ ) {
for ( jj = 0 ; jj < (ssize_t) self->size_y ; jj++ ) {
uint8_t pv = uint8_image_get_pixel (model, x + ii, y + jj);
uint8_image_set_pixel (self, ii, jj, pv);
}
}
return self;
}
// Return true iff file is size or larger.
static gboolean
is_large_enough (const char *file, off_t size)
{
struct stat stat_buffer;
#if GLIB_CHECK_VERSION(2, 6, 0)
int return_code = g_stat (file, &stat_buffer);
if ( return_code != 0 ) {
g_error ("Couldn't g_stat file %s: %s", file, strerror (errno));
}
#else
int return_code = stat (file, &stat_buffer);
if ( return_code != 0 ) {
g_error ("Couldn't stat file %s: %s", file, strerror (errno));
}
#endif
return stat_buffer.st_size >= size;
}
UInt8Image *
uint8_image_new_from_file (ssize_t size_x, ssize_t size_y, const char *file,
off_t offset)
{
g_assert (size_x > 0 && size_y > 0);
// Check in advance if the source file looks big enough (we will
// still need to check return codes as we read() data, of course).
g_assert (is_large_enough (file, offset + ((off_t) size_x * size_y
* sizeof (uint8_t))));
// Open the file to read data from.
FILE *fp = fopen (file, "rb");
// FIXME: we need some error handling and propagation here.
g_assert (fp != NULL);
UInt8Image *self = uint8_image_new_from_file_pointer (size_x, size_y, fp,
offset);
// Close file we read image from.
int return_code = fclose (fp);
g_assert (return_code == 0);
return self;
}
// Return true iff file referred to by file_pointer is larger than size.
static gboolean
file_pointed_to_larger_than (FILE *file_pointer, off_t size)
{
struct stat stat_buffer;
int return_code = fstat (fileno (file_pointer), &stat_buffer);
g_assert (return_code == 0);
return stat_buffer.st_size >= size;
}
UInt8Image *
uint8_image_new_from_file_pointer (ssize_t size_x, ssize_t size_y,
FILE *file_pointer, off_t offset)
{
g_assert (size_x > 0 && size_y > 0);
// Check in advance if the source file looks big enough (we will
// still need to check return codes as we read() data, of course).
g_assert (file_pointed_to_larger_than (file_pointer,
offset + ((off_t) size_x * size_y
* sizeof (uint8_t))));
UInt8Image *self = initialize_uint8_image_structure (size_x, size_y);
FILE *fp = file_pointer; // Convenience alias.
// Seek to the indicated offset in the file.
int return_code = FSEEK64 (fp, offset, SEEK_CUR);
g_assert (return_code == 0);
// If we need a tile file for an image of this size, we will load
// the data straight into it.
if ( self->tile_file != NULL ) {
// We will read the input image data in horizontal stips one tile
// high. Note that we probably won't be able to entirely fill the
// last tiles in each dimension with real data, since the image
// sizes rarely divide evenly by the numbers of tiles. So we fill
// it with zeros instead. The data off the edges of the image
// should never be accessed directly anyway.
// Some data for doing zero fill. If the tiles are bigger than
// the image itself, we need to make the available zero fill the
// size of the tile instead of the size of the file.
g_assert (self->tile_size <= SSIZE_MAX);
uint8_t *zero_line = g_new0 (uint8_t, (size_x > (ssize_t) self->tile_size ?
(size_t) size_x : self->tile_size));
g_assert (0 == 0x0);
// Buffer capable of holding a full strip.
uint8_t *buffer = g_new (uint8_t, self->tile_size * self->size_x);
// Reorganize data into tiles in tile oriented disk file.
size_t ii = 0;
for ( ii = 0 ; ii < self->tile_count_y ; ii++ ) {
// The "effective_height" of the strip is the portion of the
// strip for which data actually exists. If the effective
// height is less than self->tile>size, we will have to add some
// junk to fill up the extra part of the tile (which should
// never be accessed).
size_t effective_height;
if ( ii < self->tile_count_y - 1
|| self->size_y % self->tile_size == 0 ) {
effective_height = self->tile_size;
}
else {
effective_height = self->size_y % self->tile_size;
}
// Total area of the current strip.
size_t strip_area = effective_height * self->size_x;
// Read one strip of tiles worth of data from the file.
size_t read_count = fread (buffer, sizeof (uint8_t), strip_area, fp);
g_assert (read_count == strip_area);
// Write data from the strip into the tile store.
size_t jj;
for ( jj = 0 ; jj < self->tile_count_x ; jj++ ) {
// This is roughly analogous to effective_height.
size_t effective_width;
if ( jj < self->tile_count_x - 1
|| self->size_x % self->tile_size == 0) {
effective_width = self->tile_size;
}
else {
effective_width = self->size_x % self->tile_size;
}
size_t write_count; // For return of fwrite() calls.
size_t kk;
for ( kk = 0 ; kk < effective_height ; kk++ ) {
write_count
= fwrite (buffer + kk * self->size_x + jj * self->tile_size,
sizeof (uint8_t), effective_width, self->tile_file);
// If we wrote less than expected,
if ( write_count < effective_width ) {
// it must have been a write error (probably no space left),
g_assert (ferror (self->tile_file));
// so print an error message,
fprintf (stderr,
"Error writing tile cache file for UInt8Image "
"instance: %s\n", strerror (errno));
// and exit.
exit (EXIT_FAILURE);
}
if ( effective_width < self->tile_size ) {
// Amount we have left to write to fill out the last tile.
size_t edge_width = self->tile_size - effective_width;
write_count = fwrite (zero_line, sizeof (uint8_t), edge_width,
self->tile_file);
// If we wrote less than expected,
if ( write_count < edge_width ) {
// it must have been a write error (probably no space left),
g_assert (ferror (self->tile_file));
// so print an error message,
fprintf (stderr,
"Error writing tile cache file for UInt8Image "
"instance: %s\n", strerror (errno));
// and exit.
exit (EXIT_FAILURE);
}
}
}
// Finish writing the bottom of the tile for which there is no
// image data (should only happen if we are on the last strip of
// tiles).
for ( ; kk < self->tile_size ; kk++ ) {
g_assert (ii == self->tile_count_y - 1);
write_count = fwrite (zero_line, sizeof (uint8_t), self->tile_size,
self->tile_file);
// If we wrote less than expected,
if ( write_count < self->tile_size ) {
// it must have been a write error (probably no space left),
g_assert (ferror (self->tile_file));
// so print an error message,
fprintf (stderr,
"Error writing tile cache file for UInt8Image "
"instance: %s\n", strerror (errno));
// and exit.
exit (EXIT_FAILURE);
}
}
}
}
// Did we write the correct total amount of data?
g_assert (FTELL64 (self->tile_file)
== ((off_t) self->tile_area * self->tile_count
* sizeof (uint8_t)));
// Free temporary buffers.
g_free (buffer);
g_free (zero_line);
}
// Everything fits in the cache (at the moment this means everything
// fits in the first tile, which is a bit of a FIXME), so just put
// it there.
else {
self->tile_addresses[0] = self->cache;
size_t ii;
for ( ii = 0 ; ii < self->size_y ; ii++ ) {
// Address where the current row of pixels should end up.
uint8_t *row_address = self->tile_addresses[0] + ii * self->tile_size;
// Read the data.
size_t read_count = fread (row_address, sizeof (uint8_t), self->size_x,
fp);
g_assert (read_count == self->size_x);
}
}
return self;
}
UInt8Image *
uint8_image_new_from_file_scaled (ssize_t size_x, ssize_t size_y,
ssize_t original_size_x,
ssize_t original_size_y,
const char *file, off_t offset)
{
// This method has been carefully translated from the corresponding
// method in float_image.c, but it hasn't been tested yet.
g_assert_not_reached ();
g_assert (size_x > 0 && size_y > 0);
g_assert (original_size_x > 0 && original_size_y > 0);
// Image can only be scaled down with this routine, not up.
g_assert (size_x < original_size_x);
g_assert (size_y < original_size_y);
// Check in advance if the source file looks big enough (we will
// still need to check return codes as we read() data, of course).
g_assert (is_large_enough (file,
offset + ((off_t) original_size_x
* original_size_y
* sizeof (uint8_t))));
// Find the stride that we need to use in each dimension to evenly
// cover the original image space.
double stride_x = (size_x == 1 ? 0.0
: (double) (original_size_x - 1) / (size_x - 1));
double stride_y = (size_y == 1 ? 0.0
: (double) (original_size_y - 1) / (size_y - 1));
// Open the file to read data from.
FILE *fp = fopen (file, "rb");
// FIXME: we need some error handling and propagation here.
g_assert (fp != NULL);
// We will do a row at a time to save some possibly expensive
// seeking. So here we have an entire row worth of upper lefts,
// upper rights, etc.
uint8_t *uls = g_new (uint8_t, size_x);
uint8_t *urs = g_new (uint8_t, size_x);
uint8_t *lls = g_new (uint8_t, size_x);
uint8_t *lrs = g_new (uint8_t, size_x);
// Results of rounded bilinear interpolation for the current row.
uint8_t *interpolated_values = g_new (uint8_t, size_x);
// We will write the reduced resolution version of the image into a
// temporary file so we can leverage the new_from_file method and
// avoid trying to stick the whole reduced resolution image in
// memory.
FILE *reduced_image = tmpfile ();
ssize_t ii, jj;
for ( ii = 0 ; ii < size_y ; ii++ ) {
size_t read_count; // For fread calls.
int return_code; // For FSEEK64 calls.
// Input image y index of row above row of interest.
ssize_t in_ray = floor (ii * stride_y);
// Due to the vagaries of floating point arithmetic, we might run
// past the index of our last pixel by a little bit, so we correct.
if ( in_ray >= original_size_y - 1 ) {
// We better not be much over the last index though.
g_assert (in_ray < original_size_y);
// The index should be an integer, so floor should fix us up.
in_ray = floor (in_ray);
g_assert (in_ray == original_size_y - 1);
}
g_assert (in_ray < original_size_y);
// Input image y index of row below row of interest. If we would
// be off the image, we just take the last row a second time, and
// let the interpolation work things out.
ssize_t in_rby;
if ( in_ray == original_size_y - 1 ) {
in_rby = in_ray;
}
else {
in_rby = in_ray + 1;
}
// Fetch the row above.
for ( jj = 0 ; jj < size_x ; jj++ ) {
// Input image indicies of current upper left corner pixel.
ssize_t in_ul_x = floor (jj * stride_x);
// Watch for floating point inexactness (see comment above).
if ( G_UNLIKELY (in_ul_x >= original_size_x - 1) ) {
g_assert (in_ul_x < original_size_x);
in_ul_x = floor (in_ul_x);
g_assert (in_ul_x == original_size_x - 1);
}
g_assert (in_ul_x < original_size_x);
size_t in_ul_y = in_ray;
off_t sample_offset
= offset + sizeof (uint8_t) * ((off_t) in_ul_y * original_size_x
+ in_ul_x);
return_code = FSEEK64 (fp, sample_offset, SEEK_SET);
g_assert (return_code == 0);
read_count = fread (&(uls[jj]), sizeof (uint8_t), 1, fp);
g_assert (read_count == 1);
// If the upper left pixel was the last pixel in the input image,
if ( in_ul_x == original_size_x - 1 ) {
// just treat it as the upper right as well,
urs[jj] = uls[jj];
}
// otherwise read the next pixel as the upper right pixel.
else {
read_count = fread (&(urs[jj]), sizeof (uint8_t), 1, fp);
g_assert (read_count == 1);
}
}
// Fetch the row below.
for ( jj = 0 ; jj < size_x ; jj++ ) {
// Input image indicies of the lower left corner pixel.
ssize_t in_ll_x = floor (jj * stride_x);
// Watch for floating point inexactness (see comment above).
if ( G_UNLIKELY (in_ll_x >= original_size_y - 1) ) {
g_assert (in_ll_x < original_size_x);
in_ll_x = floor (in_ll_x);
g_assert (in_ll_x == original_size_x - 1);
}
g_assert (in_ll_x < original_size_x);
size_t in_ll_y = in_rby;
off_t sample_offset
= offset + sizeof (uint8_t) * ((off_t) in_ll_y * original_size_x
+ in_ll_x);
return_code = FSEEK64 (fp, sample_offset, SEEK_SET);
g_assert (return_code == 0);
read_count = fread (&(lls[jj]), sizeof (uint8_t), 1, fp);
g_assert (read_count == 1);
// If the lower left pixel was the last pixel in the input image,
if ( in_ll_x == original_size_x - 1 ) {
// just treat it as the lower right as well,
lrs[jj] = lls[jj];
}
// otherwise read the next pixel as the lower right pixel.
else {
read_count = fread (&(lrs[jj]), sizeof (uint8_t), 1, fp);
g_assert (read_count == 1);
}
}
// Perform the interpolation.
for ( jj = 0 ; jj < size_x ; jj++ ) {
double delta_x = stride_x * jj - floor (stride_x * jj);
double delta_y = -(stride_y * ii - floor (stride_y * ii));
float interpolated_value = bilinear_interpolate (delta_x, delta_y,
uls[jj], urs[jj],
lls[jj], lrs[jj]);
g_assert (interpolated_value >= 0);
g_assert (interpolated_value <= UINT8_MAX);
interpolated_values[jj] = round (interpolated_value);
}
size_t write_count = fwrite (interpolated_values, sizeof (uint8_t), size_x,
reduced_image);
g_assert (write_count == (size_t) size_x);
}
// We are done with the temporary buffers.
g_free (interpolated_values);
g_free (lrs);
g_free (lls);
g_free (urs);
g_free (uls);
// Reposition to the beginning of the temporary file to fit with
// operation of new_from_file_pointer method.
int return_code = FSEEK64 (reduced_image, (off_t) 0, SEEK_SET);
g_assert (return_code == 0);
// Slurp the scaled file back in as an instance.
UInt8Image *self
= uint8_image_new_from_file_pointer (size_x, size_y, reduced_image,
(off_t) 0);
// Now that we have an instantiated version of the image we are done
// with this temporary file.
return_code = fclose (reduced_image);
return self;
}
// Returns a new UInt8Image, for the image corresponding to the given metadata.
UInt8Image *
uint8_image_new_from_metadata(meta_parameters *meta, const char *file)
{
return uint8_image_band_new_from_metadata(meta, 0, file);
}
// Returns a new UInt8Image, for the image band corresponding to the
// given metadata.
UInt8Image *
uint8_image_band_new_from_metadata(meta_parameters *meta,
int band, const char *file)
{
int nl = meta->general->line_count;
int ns = meta->general->sample_count;
FILE * fp = FOPEN(file, "rb");
UInt8Image * bi = uint8_image_new(ns, nl);
int i,j;
unsigned char *buf = MALLOC(sizeof(unsigned char)*ns);
for (i = 0; i < nl; ++i) {
get_byte_line(fp, meta, i+band*nl, buf);
for (j = 0; j < ns; ++j)
uint8_image_set_pixel(bi, j, i, buf[j]);
}
free(buf);
fclose(fp);
return bi;
}
// Copy the contents of tile with flattened offset tile_offset from
// the memory cache to the disk file. Its probably easiest to
// understand this function by looking at how its used.
static void
cached_tile_to_disk (UInt8Image *self, size_t tile_offset)
{
// If we aren't using a tile file, this operation doesn't make
// sense.
g_assert (self->tile_file != NULL);
// We must have a legitimate tile_offset.
g_assert (tile_offset < self->tile_count);
// The tile we are trying to copy from cache to disk must be loaded
// in the cache for this operation to make sense.
g_assert (self->tile_addresses[tile_offset] != NULL);
int return_code
= FSEEK64 (self->tile_file,
(off_t) tile_offset * self->tile_area * sizeof (uint8_t),
SEEK_SET);
g_assert (return_code == 0);
size_t write_count = fwrite (self->tile_addresses[tile_offset],
sizeof (uint8_t), self->tile_area,
self->tile_file);
g_assert (write_count == self->tile_area);
}
// Return true iff tile (x, y) is already loaded into the memory cache.
static gboolean
tile_is_loaded (UInt8Image *self, ssize_t x, ssize_t y)
{
g_assert ( x >= 0 && (size_t) x < self->tile_count_x
&& y >= 0 && (size_t) y < self->tile_count_y );
size_t tile_offset = self->tile_count_x * y + x;
return self->tile_addresses[tile_offset] != NULL;
}
// Load (currently unloaded) tile (x, y) from disk cache into memory
// cache, possibly displacing the oldest tile already loaded, updating
// the load order queue, and returning the address of the tile loaded.
static uint8_t *
load_tile (UInt8Image *self, ssize_t x, ssize_t y)
{
// Make sure we haven't screwed up somehow and not created a tile
// file when in fact we should have.
g_assert (self->tile_file != NULL);
g_assert (!tile_is_loaded (self, x, y));
// Address into which tile gets loaded (to be returned).
uint8_t *tile_address;
// Offset of tile in flattened array.
size_t tile_offset = self->tile_count_x * y + x;
// We have to check and see if we have to displace an already loaded
// tile or not.
if ( self->tile_queue->length == self->cache_size_in_tiles ) {
// Displace tile loaded longest ago.
size_t oldest_tile
= GPOINTER_TO_INT (g_queue_pop_tail (self->tile_queue));
cached_tile_to_disk (self, oldest_tile);
tile_address = self->tile_addresses[oldest_tile];
self->tile_addresses[oldest_tile] = NULL;
}
else {
// Load tile into first free slot.
tile_address = self->cache + self->tile_queue->length * self->tile_area;
}
// Put the new tile address into the index, and put the index into
// the load order queue.
self->tile_addresses[tile_offset] = tile_address;
// Stash in queue by converting to a pointer (so it must fit in an int).
g_assert (tile_offset < INT_MAX);
g_queue_push_head (self->tile_queue,
GINT_TO_POINTER ((int) tile_offset));
// Load the tile data.
int return_code
= FSEEK64 (self->tile_file,
(off_t) tile_offset * self->tile_area * sizeof (uint8_t),
SEEK_SET);
g_assert (return_code == 0);
clearerr (self->tile_file);
size_t read_count = fread (tile_address, sizeof (uint8_t), self->tile_area,
self->tile_file);
if ( read_count < self->tile_area ) {
if ( ferror (self->tile_file) ) {
perror ("error reading tile cache file");
g_assert_not_reached ();
}
if ( feof (self->tile_file) ) {
fprintf (stderr,
"nothing left to read in tile cache file at offset %lld\n",
FTELL64 (self->tile_file));
g_assert_not_reached ();
}
}
g_assert (read_count == self->tile_area);
return tile_address;
}
uint8_t
uint8_image_get_pixel (UInt8Image *self, ssize_t x, ssize_t y)
{
// Are we at a valid image pixel?
g_assert (x >= 0 && (size_t) x < self->size_x);
g_assert (y >= 0 && (size_t) y < self->size_y);
// Get the pixel coordinates, including tile and pixel-in-tile.
g_assert (sizeof (long int) >= sizeof (size_t));
ldiv_t pc_x = ldiv (x, self->tile_size), pc_y = ldiv (y, self->tile_size);
// Offset of tile x, y, where tiles are viewed as pixels normally are.
size_t tile_offset = self->tile_count_x * pc_y.quot + pc_x.quot;
// Address of data for tile containing pixel of interest (may still
// have to be loaded from disk cache).
uint8_t *tile_address = self->tile_addresses[tile_offset];
// Load the tile containing the pixel of interest if necessary.
if ( G_UNLIKELY (tile_address == NULL) ) {
tile_address = load_tile (self, pc_x.quot, pc_y.quot);
}
// Return pixel of interest.
return tile_address[self->tile_size * pc_y.rem + pc_x.rem];
}
void
uint8_image_set_pixel (UInt8Image *self, ssize_t x, ssize_t y, uint8_t value)
{
// Are we at a valid image pixel?
g_assert (self != NULL);
g_assert (x >= 0 && (size_t) x <= self->size_x);
g_assert (y >= 0 && (size_t) y <= self->size_y);
// Get the pixel coordinates, including tile and pixel-in-tile.
g_assert (sizeof (long int) >= sizeof (size_t));
ldiv_t pc_x = ldiv (x, self->tile_size), pc_y = ldiv (y, self->tile_size);
// Offset of tile x, y, where tiles are viewed as pixels normally are.
size_t tile_offset = self->tile_count_x * pc_y.quot + pc_x.quot;
// Address of data for tile containing pixel of interest (may still
// have to be loaded from disk cache).
uint8_t *tile_address = self->tile_addresses[tile_offset];
// Load the tile containing the pixel of interest if necessary.
if ( G_UNLIKELY (tile_address == NULL) ) {
tile_address = load_tile (self, pc_x.quot, pc_y.quot);
}
// Set pixel of interest.
tile_address[self->tile_size * pc_y.rem + pc_x.rem] = value;
}
void
uint8_image_get_region (UInt8Image *self, ssize_t x, ssize_t y, ssize_t size_x,
ssize_t size_y, uint8_t *buffer)
{
g_assert (size_x >= 0);
g_assert (x >= 0);
g_assert ((size_t) x + (size_t) size_x - 1 < self->size_x);
g_assert (size_y >= 0);
g_assert (y >= 0);
g_assert ((size_t) y + (size_t) size_y - 1 < self->size_y);
ssize_t ii, jj; // Index variables.
for ( ii = 0 ; ii < size_y ; ii++ ) {
for ( jj = 0 ; jj < size_x ; jj++ ) {
// We are essentially returning a subimage from the big image.
// These are the indicies in the big image (self) of the current
// pixel.
size_t ix = x + jj, iy = y + ii;
buffer[ii * size_x + jj] = uint8_image_get_pixel (self, ix, iy);
}
}
}
void
uint8_image_set_region (UInt8Image *self, size_t x, size_t y, size_t size_x,
size_t size_y, uint8_t *buffer)
{
g_assert_not_reached (); // Stubbed out for now.
self = self; x = x; y = y; size_x = size_x, size_y = size_y; buffer = buffer;
}
void
uint8_image_get_row (UInt8Image *self, size_t row, uint8_t *buffer)
{
uint8_image_get_region (self, 0, row, self->size_x, 1, buffer);
}
uint8_t
uint8_image_get_pixel_with_reflection (UInt8Image *self, ssize_t x, ssize_t y)
{
// Carefully clone-and-modified over from float_image.c, but not
// tested yet.
//g_assert_not_reached ();
// Reflect at image edges as advertised.
if ( x < 0 ) {
x = -x;
}
else if ( (size_t) x >= self->size_x ) {
x = self->size_x - 2 - (x - self->size_x);
}
if ( y < 0 ) {
y = -y;
}
else if ( (size_t) y >= self->size_y ) {
y = self->size_y - 2 - (y - self->size_y);
}
return uint8_image_get_pixel (self, x, y);
}
void
uint8_image_statistics (UInt8Image *self, uint8_t *min, uint8_t *max,
double *mean, double *standard_deviation,
gboolean use_mask_value, uint8_t mask_value)
{
// Carefully clone-and-modified over from float_image.c, but not
// tested yet.
//g_assert_not_reached ();
// Minimum and maximum sample values as integers.
int imin = INT_MAX, imax = INT_MIN;
// Buffer for one row of samples.
uint8_t *row_buffer = g_new (uint8_t, self->size_x);
*mean = 0.0;
double s = 0.0;
size_t sample_count = 0; // Samples considered so far.
size_t ii, jj;
// If there is a mask value we are supposed to ignore,
if ( use_mask_value ) {
// iterate over all pixels, skipping pixels equal to mask value.
for ( ii = 0 ; ii < self->size_y ; ii++ ) {
asfPercentMeter((double)ii/(double)(self->size_y));
uint8_image_get_row (self, ii, row_buffer);
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
uint8_t cs = row_buffer[jj]; // Current sample.
if ( cs == mask_value ) {
continue;
}
if ( G_UNLIKELY (cs < imin) ) { imin = cs; }
if ( G_UNLIKELY (cs > imax) ) { imax = cs; }
double old_mean = *mean;
*mean += (cs - *mean) / (sample_count + 1);
s += (cs - old_mean) * (cs - *mean);
sample_count++;
}
}
asfPercentMeter(1.0);
}
else {
// There is no mask value to ignore, so we do the same as the
// above loop, but without the possible continue statement.
for ( ii = 0 ; ii < self->size_y ; ii++ ) {
asfPercentMeter((double)ii/(double)(self->size_y));
uint8_image_get_row (self, ii, row_buffer);
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
uint8_t cs = row_buffer[jj]; // Current sample.
if ( G_UNLIKELY (cs < imin) ) { imin = cs; }
if ( G_UNLIKELY (cs > imax) ) { imax = cs; }
double old_mean = *mean;
*mean += (cs - *mean) / (sample_count + 1);
s += (cs - old_mean) * (cs - *mean);
sample_count++;
}
}
asfPercentMeter(1.0);
}
g_free (row_buffer);
// Verify the new extrema have been found.
g_assert (imin != INT_MAX);
g_assert (imax != INT_MIN);
// The new extrema had better be in the range supported by uint8_t.
g_assert (imin >= 0);
g_assert (imax <= UINT8_MAX);
*min = imin;
*max = imax;
*standard_deviation = sqrt (s / (sample_count - 1));
}
int
uint8_image_band_statistics (UInt8Image *self, meta_stats *stats,
int line_count, int band_no,
gboolean use_mask_value, uint8_t mask_value)
{
// Carefully cloned-and-modified over from float_image.c, but not
// tested yet.
//g_assert_not_reached ();
// Minimum and maximum sample values as integers.
int imin = INT_MAX, imax = INT_MIN;
// Buffer for one row of samples.
uint8_t *row_buffer = g_new (uint8_t, self->size_x);
stats->mean = 0.0;
double s = 0.0;
size_t sample_count = 0; // Samples considered so far.
size_t ii, jj;
// If there is a mask value we are supposed to ignore,
if ( use_mask_value ) {
// iterate over all pixels, skipping pixels equal to mask value.
for ( ii = (band_no * line_count); // 0-ordered band number times lines is offset into image
ii < (band_no+1) * line_count && ii < self->size_y;
ii++ )
{
asfPercentMeter( (double)(ii - band_no*line_count)/(double)line_count );
uint8_image_get_row (self, ii, row_buffer);
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
uint8_t cs = row_buffer[jj]; // Current sample.
if ( cs == mask_value ) {
continue;
}
if ( G_UNLIKELY (cs < imin) ) { imin = cs; }
if ( G_UNLIKELY (cs > imax) ) { imax = cs; }
double old_mean = stats->mean;
stats->mean += (cs - stats->mean) / (sample_count + 1);
s += (cs - old_mean) * (cs - stats->mean);
sample_count++;
}
}
asfPercentMeter(1.0);
}
else {
// There is no mask value to ignore, so we do the same as the
// above loop, but without the possible continue statement.
for ( ii = (band_no * line_count); // 0-ordered band number times lines is offset into image
ii < (band_no+1) * line_count && ii < self->size_y;
ii++ )
{
asfPercentMeter( (double)(ii - band_no*line_count)/(double)line_count );
uint8_image_get_row (self, ii, row_buffer);
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
uint8_t cs = row_buffer[jj]; // Current sample.
if ( G_UNLIKELY (cs < imin) ) { imin = cs; }
if ( G_UNLIKELY (cs > imax) ) { imax = cs; }
double old_mean = stats->mean;
stats->mean += (cs - stats->mean) / (sample_count + 1);
s += (cs - old_mean) * (cs - stats->mean);
sample_count++;
}
}
asfPercentMeter(1.0);
}
g_free (row_buffer);
// Verify the new extrema have been found.
if (imin == INT_MAX || imax == INT_MIN)
return 1;
// The new extrema had better be in the range supported by uint8_t.
if (imin < 0 || imax > UINT8_MAX)
return 1;
stats->min = imin;
stats->max = imax;
stats->std_deviation = sqrt (s / (sample_count - 1));
return 0;
}
void
uint8_image_statistics_with_mask_interval (UInt8Image *self, uint8_t *min,
uint8_t *max, double *mean,
double *standard_deviation,
uint8_t interval_start,
uint8_t interval_end)
{
// This method is a trivial clone-and-modify of
// float_image_statistics, but it is totally untested at the moment.
g_assert_not_reached ();
// Minimum and maximum sample values as integers.
int imin = INT_MAX, imax = INT_MIN;
// Buffer for one row of samples.
uint8_t *row_buffer = g_new (uint8_t, self->size_x);
*mean = 0.0;
double s = 0.0;
size_t sample_count = 0; // Samples considered so far.
size_t ii, jj;
for ( ii = 0 ; ii < self->size_y ; ii++ ) {
uint8_image_get_row (self, ii, row_buffer);
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
uint8_t cs = row_buffer[jj]; // Current sample.
// If in the mask interval, do not consider this pixel any
// further.
if ( cs >= interval_start && cs <= interval_end ) {
continue;
}
if ( G_UNLIKELY (cs < imin) ) { imin = cs; }
if ( G_UNLIKELY (cs > imax) ) { imax = cs; }
double old_mean = *mean;
*mean += (cs - *mean) / (sample_count + 1);
s += (cs - old_mean) * (cs - *mean);
sample_count++;
}
}
g_free (row_buffer);
// Verify the new extrema have been found.
g_assert (imin != INT_MAX);
g_assert (imax != INT_MIN);
// The new extrema had better be in the range supported by uint8_t.
g_assert (imin >= 0);
g_assert (imax <= UINT8_MAX);
*min = imin;
*max = imax;
*standard_deviation = sqrt (s / (sample_count - 1));
}
void
uint8_image_approximate_statistics (UInt8Image *self, size_t stride,
double *mean, double *standard_deviation,
gboolean use_mask_value,
uint8_t mask_value)
{
// Rows and columns of samples that fit in image given stride
// stride.
size_t sample_columns = ceil (self->size_x / stride);
size_t sample_rows = ceil (self->size_y / stride);
// Total number of samples.
size_t sample_count = sample_columns * sample_rows;
// Create an image holding the sample values.
UInt8Image *sample_image = uint8_image_new (sample_columns, sample_rows);
// Load the sample values.
size_t current_sample = 0;
size_t ii;
for ( ii = 0 ; ii < sample_columns ; ii++ ) {
size_t jj;
for ( jj = 0 ; jj < sample_rows ; jj++ ) {
uint8_t sample = uint8_image_get_pixel (self, ii * stride, jj * stride);
uint8_image_set_pixel (sample_image, ii, jj, sample);
current_sample++;
}
}
// Ensure that we got the right number of samples in our image.
g_assert (current_sample == sample_count);
// Compute the exact statistics of the sampled version of the image.
// The _statistics method wants to compute min and max, so we let
// it, even though we don't do anything with them (since they are
// inaccurate).
uint8_t min, max;
uint8_image_statistics (sample_image, &min, &max, mean, standard_deviation,
use_mask_value, mask_value);
uint8_image_free (sample_image);
}
void
uint8_image_approximate_statistics_with_mask_interval
(UInt8Image *self, size_t stride, double *mean, double *standard_deviation,
uint8_t interval_start, uint8_t interval_end)
{
// This method is a trivial clone-and-modify of
// float_image_approximate_statistics, but it is totally untested at
// the moment.
g_assert_not_reached ();
// Rows and columns of samples that fit in image given stride
// stride.
size_t sample_columns = ceil (self->size_x / stride);
size_t sample_rows = ceil (self->size_y / stride);
// Total number of samples.
size_t sample_count = sample_columns * sample_rows;
// Create an image holding the sample values.
UInt8Image *sample_image = uint8_image_new (sample_columns, sample_rows);
// Load the sample values.
size_t current_sample = 0;
size_t ii;
for ( ii = 0 ; ii < sample_columns ; ii++ ) {
size_t jj;
for ( jj = 0 ; jj < sample_rows ; jj++ ) {
uint8_t sample = uint8_image_get_pixel (self, ii * stride, jj * stride);
uint8_image_set_pixel (sample_image, ii, jj, sample);
current_sample++;
}
}
// Ensure that we got the right number of samples in our image.
g_assert (current_sample == sample_count);
// Compute the exact statistics of the sampled version of the image.
// The _statistics method wants to compute min and max, so we let
// it, even though we don't do anything with them (since they are
// inaccurate).
uint8_t min, max;
uint8_image_statistics_with_mask_interval (sample_image, &min, &max,
mean, standard_deviation,
interval_start, interval_end);
uint8_image_free (sample_image);
}
gsl_histogram *
uint8_image_gsl_histogram (UInt8Image *self, double min, double max,
size_t bin_count)
{
// Carefully clone-and-modified over from float_image.c, but not
// tested yet.
g_assert_not_reached ();
// Initialize the histogram.
gsl_histogram *histogram = gsl_histogram_alloc (bin_count);
gsl_histogram_set_ranges_uniform (histogram, min, max);
// Buffer for one row of samples.
uint8_t *row_buffer = g_new (uint8_t, self->size_x);
// Populate the histogram over every sample in the image.
size_t ii, jj;
for (ii = 0 ; ii < self->size_y ; ii++ ) {
uint8_image_get_row (self, ii, row_buffer);
for ( jj = 0 ; jj < self->size_x ; jj++ ) {
gsl_histogram_increment (histogram, row_buffer[jj]);
}
}
g_free (row_buffer);
return histogram;
}
double
uint8_image_apply_kernel (UInt8Image *self, ssize_t x, ssize_t y,
gsl_matrix *kern)
{
// Carefully clone-and-modified over from float_image.c, but not
// tested yet.
g_assert_not_reached ();
g_assert (x >= 0 && (size_t) x < self->size_x);
g_assert (y >= 0 && (size_t) y < self->size_y);
g_assert (kern->size2 % 2 == 1);
g_assert (kern->size2 == kern->size1);
size_t ks = kern->size2; // Kernel size.
double sum = 0; // Result.
size_t ii;
for ( ii = 0 ; ii < kern->size1 ; ii++ ) {
ssize_t iy = y - ks / 2 + ii; // Current image y pixel index.
size_t jj;
for ( jj = 0 ; jj < kern->size2 ; jj++ ) {
ssize_t ix = x - ks / 2 + jj; // Current image x pixel index
sum += (gsl_matrix_get (kern, jj, ii)
* uint8_image_get_pixel_with_reflection (self, ix, iy));
}
}
return sum;
}
double
uint8_image_sample (UInt8Image *self, double x, double y,
uint8_image_sample_method_t sample_method)
{
g_assert (x >= 0.0 && x <= (double) self->size_x - 1.0);
g_assert (y >= 0.0 && y <= (double) self->size_y - 1.0);
switch ( sample_method ) {
case UINT8_IMAGE_SAMPLE_METHOD_NEAREST_NEIGHBOR:
return uint8_image_get_pixel (self, round (x), round (y));
break;
case UINT8_IMAGE_SAMPLE_METHOD_BILINEAR:
{
// Indicies of points we are interpolating between (x below, y
// below, etc., where below is interpreted in the numerical
// sense, not the image orientation sense.).
size_t xb = floor (x), yb = floor (y), xa = ceil (x), ya = ceil (y);
size_t ts = self->tile_size; // Convenience alias.
// Offset of xb, yb, etc. relative to tiles they lie in.
size_t xbto = xb % ts, ybto = yb % ts, xato = xa % ts, yato = ya % ts;
// Values of points we are interpolating between.
uint8_t ul, ur, ll, lr;
// If the points were are interpolating between don't span a
// tile edge, we load them straight from tile memory to save
// some time.
if ( G_LIKELY ( xbto != ts - 1 && xato != 0
&& ybto != ts - 1 && yato != 0) ) {
// The tile indicies.
size_t tx = xb / ts, ty = yb / ts;
// Tile offset in flattened list of tile addresses.
size_t tile_offset = ty * self->tile_count_x + tx;
uint8_t *tile_address = self->tile_addresses[tile_offset];
if ( G_UNLIKELY (tile_address == NULL) ) {
tile_address = load_tile (self, tx, ty);
}
ul = tile_address[ybto * self->tile_size + xbto];
ur = tile_address[ybto * self->tile_size + xato];
ll = tile_address[yato * self->tile_size + xbto];
lr = tile_address[yato * self->tile_size + xato];
}
else {
// We are spanning a tile edge, so we just get the pixels
// using the inefficient but easy get_pixel method.
ul = uint8_image_get_pixel (self, floor (x), floor (y));
ur = uint8_image_get_pixel (self, ceil (x), floor (y));
ll = uint8_image_get_pixel (self, floor (x), ceil (y));
lr = uint8_image_get_pixel (self, ceil (x), ceil (y));
}
// Upper and lower values interpolated in the x direction.
double ux = ul + (ur - ul) * (x - floor (x));
double lx = ll + (lr - ll) * (x - floor (x));
return ux + (lx - ux) * (y - floor (y));
}
break;
case UINT8_IMAGE_SAMPLE_METHOD_BICUBIC:
{
// Should never be here ...bicubic resampling can result in negative
// values and should not be used for resampling unsigned values...
//g_assert_not_reached ();
asfPrintError ("BICUBIC resampling for BYTE data is not supported.\n");
static gboolean first_time_through = TRUE;
// Splines in the x direction, and their lookup accelerators.
static double *x_indicies;
static double *values;
static gsl_spline **xss;
static gsl_interp_accel **xias;
// Spline between splines in the y direction, and lookup accelerator.
static double *y_spline_indicies;
static double *y_spline_values;
static gsl_spline *ys;
static gsl_interp_accel *yia;
// All these splines have size 4.
const size_t ss = 4;
size_t ii; // Index variable.
if ( first_time_through ) {
// Allocate memory for the splines in the x direction.
x_indicies = g_new (double, ss);
values = g_new (double, ss);
xss = g_new (gsl_spline *, ss);
xias = g_new (gsl_interp_accel *, ss);
for ( ii = 0 ; ii < ss ; ii++ ) {
xss[ii] = gsl_spline_alloc (gsl_interp_cspline, ss);
xias[ii] = gsl_interp_accel_alloc ();
}
// Allocate memory for the spline in the y direction.
y_spline_indicies = g_new (double, ss);
y_spline_values = g_new (double, ss);
ys = gsl_spline_alloc (gsl_interp_cspline, ss);
yia = gsl_interp_accel_alloc ();
first_time_through = FALSE;
}
// Get the values for the nearest 16 points.
size_t jj; // Index variable.
for ( ii = 0 ; ii < ss ; ii++ ) {
for ( jj = 0 ; jj < ss ; jj++ ) {
x_indicies[jj] = floor (x) - 1 + jj;
values[jj]
= uint8_image_get_pixel_with_reflection (self, x_indicies[jj],
floor (y) - 1 + ii);
}
gsl_spline_init (xss[ii], x_indicies, values, ss);
}
// Set up the spline that runs in the y direction.
for ( ii = 0 ; ii < ss ; ii++ ) {
y_spline_indicies[ii] = floor (y) - 1 + ii;
y_spline_values[ii] = gsl_spline_eval (xss[ii], x, xias[ii]);
}
gsl_spline_init (ys, y_spline_indicies, y_spline_values, ss);
double ret_val = gsl_spline_eval (ys, y, yia);
// NOTE... NOTE... BICUBIC resample returns negative values if the byte values are
// too close to zero and the spline fit goes negative in the neighborhood of the pixel
/*
if (ret_val > 255.0 || ret_val < 0.0) {
asfPrintWarning("Bicubic resampling of BYTE data returned out of range value (%f).\n"
"...Continuing, but negative values will be forced to zero, and values above 255\n"
" will be forced to 255\n",
ret_val);
if (ret_val > 265.0) {
asfPrintError("Bicubic resampling of BYTE data returned a value (%f) too far above 255.0 to\n"
"cap to 255.0\n", ret_val);
}
if (ret_val < -10.0) {
asfPrintError("Bicubic resampling of BYTE data returned a value (%f) too far below 0.0 to\n"
"cap to 0.0\n", ret_val);
}
ret_val = ret_val < 0.0 ? 0.0 : ret_val;
ret_val = ret_val > 255.0 ? 255.0 : ret_val;
}
*/
return ret_val;
}
break;
default:
g_assert_not_reached ();
return -42; // Reassure the compiler.
}
}
gboolean
uint8_image_equals (UInt8Image *self, UInt8Image *other)
{
// Compare image sizes.
if ( self->size_x != other->size_x ) {
return FALSE;
}
if ( self->size_y != other->size_y ) {
return FALSE;
}
size_t sz = self->size_x; // Convenience alias.
// Compare image pixels.
size_t ii, jj;
for ( ii = 0 ; ii < sz ; ii++ ) {
for ( jj = 0 ; jj < sz ; jj++ ) {
if ( G_UNLIKELY (uint8_image_get_pixel (self, jj, ii)
!= uint8_image_get_pixel (other, jj, ii)) ) {
return FALSE;
}
}
}
return TRUE;
}
// Flip an image about a horizontal line through the center of the image
void
uint8_image_flip_y(UInt8Image *self)
{
size_t ii, jj;
asfLineMeter(jj, self->size_y);
for (jj = 0; jj < self->size_y / 2; ++jj) {
asfLineMeter(2 * jj + 1, self->size_y);
size_t jj2 = self->size_y - 1 - jj;
for (ii = 0; ii < self->size_x; ++ii) {
uint8 a = uint8_image_get_pixel(self, ii, jj);
uint8 b = uint8_image_get_pixel(self, ii, jj2);
uint8_image_set_pixel(self, ii, jj, b);
uint8_image_set_pixel(self, ii, jj2, a);
}
}
asfLineMeter(1, 1);
}
// Flip an image about a vertical line through the center of the image
void
uint8_image_flip_x(UInt8Image *self)
{
size_t ii, jj;
for (ii = 0; ii < self->size_x / 2; ++ii) {
asfLineMeter(2 * ii + 1, self->size_y);
size_t ii2 = self->size_x - 1 - ii;
for (jj = 0; jj < self->size_y; ++jj) {
uint8 a = uint8_image_get_pixel(self, ii, jj);
uint8 b = uint8_image_get_pixel(self, ii2, jj);
uint8_image_set_pixel(self, ii, jj, b);
uint8_image_set_pixel(self, ii2, jj, a);
}
}
asfLineMeter(1, 1);
}
// Bring the tile cache file on the disk fully into sync with the
// latest image data stored in the memory cache.
static void
synchronize_tile_file_with_memory_cache (UInt8Image *self)
{
// If we aren't using a tile file, this operation doesn't make
// sense.
g_assert (self->tile_file != NULL);
guint ii;
for ( ii = 0 ; ii < self->tile_queue->length ; ii++ ) {
size_t tile_offset = GPOINTER_TO_INT (g_queue_peek_nth (self->tile_queue,
ii));
cached_tile_to_disk (self, tile_offset);
}
}
void
uint8_image_freeze (UInt8Image *self, FILE *file_pointer)
{
FILE *fp = file_pointer; // Convenience alias.
g_assert (file_pointer != NULL);
size_t write_count = fwrite (&(self->size_x), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->size_y), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->cache_space), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->cache_area), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->tile_size), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->cache_size_in_tiles), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->tile_count_x), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->tile_count_y), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->tile_count), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
write_count = fwrite (&(self->tile_area), sizeof (size_t), 1, fp);
g_assert (write_count == 1);
// We don't bother serializing the cache -- its a pain to keep track
// of and probably almost never worth it.
// We write the tile queue pointer away, so that when we later thaw
// the serialized version, we can tell if a cache file is in use or
// not (if it isn't tile_queue will be NULL).
write_count = fwrite (&(self->tile_queue), sizeof (GQueue *), 1, fp);
g_assert (write_count == 1);
// If there was no cache file...
if ( self->tile_queue == NULL ) {
// We store the contents of the first tile and are done.
write_count = fwrite (self->tile_addresses[0], sizeof (uint8_t),
self->tile_area, fp);
if ( write_count < self->tile_area ) {
if ( ferror (fp) ) {
fprintf (stderr, "Error writing serialized UInt8Image instance during "
"freeze: %s\n", strerror (errno));
exit (EXIT_FAILURE);
}
}
g_assert (write_count == self->tile_area);
}
// otherwise, the in memory cache needs to be copied into the tile
// file and the tile file saved in the serialized version of self.
else {
synchronize_tile_file_with_memory_cache (self);
uint8_t *buffer = g_new (uint8_t, self->tile_area);
size_t ii;
off_t tmp = FTELL64 (self->tile_file);
int return_code = FSEEK64 (self->tile_file, 0, SEEK_SET);
g_assert (return_code == 0);
for ( ii = 0 ; ii < self->tile_count ; ii++ ) {
size_t read_count = fread (buffer, sizeof (uint8_t), self->tile_area,
self->tile_file);
g_assert (read_count == self->tile_area);
write_count = fwrite (buffer, sizeof (uint8_t), self->tile_area, fp);
g_assert (write_count == self->tile_area);
}
return_code = FSEEK64 (self->tile_file, tmp, SEEK_SET);
g_assert (return_code == 0);
g_free (buffer);
}
}
int
uint8_image_band_store(UInt8Image *self, const char *file,
meta_parameters *meta, int append_flag)
{
// Give status
if (meta->general->band_count == 1)
asfPrintStatus("\n\nStoring image ...\n");
else
asfPrintStatus("\n\nStoring band ...\n");
// Open the file to write to.
FILE *fp = fopen (file, append_flag ? "ab" : "wb");
// FIXME: we need some error handling and propagation here.
g_assert (fp != NULL);
// We will write the image data in horizontal stips one line at a time.
uint8_t *line_buffer = g_new (uint8_t, self->size_x);
// Sanity check
if (meta->general->line_count != (int)self->size_y ||
meta->general->sample_count != (int)self->size_x)
{
asfPrintError("Inconsistency between metadata and image!\n"
"Metadata says: %dx%d LxS, image has %dx%d\n"
"Possibly did not write metadata before storing image.\n",
meta->general->line_count, meta->general->sample_count,
self->size_y, self->size_x);
}
// Reorganize data into tiles in tile oriented disk file.
int ii;
for ( ii = 0 ; ii < (int)self->size_y ; ii++ ) {
uint8_image_get_row (self, ii, line_buffer);
size_t write_count =
fwrite(line_buffer, sizeof(uint8_t), self->size_x, fp);
if (write_count < self->size_x) {
// it must have been a write error (no space left, possibly)
g_assert(ferror(self->tile_file));
// so print an error message
fprintf(stderr, "Error writing file %s: %s\n", file, strerror(errno));
// and exit
exit (EXIT_FAILURE);
}
g_assert(write_count == self->size_x);
}
// Done with the line buffer.
g_free (line_buffer);
// Close file being written.
int return_code = fclose (fp);
g_assert (return_code == 0);
// Return success code.
return 0;
}
int
uint8_image_store (UInt8Image *self, const char *file)
{
meta_parameters *meta;
meta = meta_read(file);
int ret = uint8_image_band_store(self, file, meta, 0);
meta_free(meta);
return ret;
}
int
uint8_image_export_as_jpeg (UInt8Image *self, const char *file,
size_t max_dimension, gboolean use_mask_value,
uint8_t mask_value)
{
// Carefully clone-and-modified over from float_image.c, but not
// tested yet.
g_assert_not_reached ();
size_t scale_factor; // Scale factor to use for output image.
if ( self->size_x > self->size_y ) {
scale_factor = ceil ((double) self->size_x / max_dimension);
}
else {
scale_factor = ceil ((double) self->size_y / max_dimension);
}
// We want the scale factor to be odd, so that we can easily use a
// standard kernel to average things.
if ( scale_factor % 2 == 0 ) {
scale_factor++;
}
// Output JPEG x and y dimensions.
size_t osx = self->size_x / scale_factor;
size_t osy = self->size_y / scale_factor;
// Number of pixels in output image.
size_t pixel_count = osx * osy;
// Pixels of the output image.
unsigned char *pixels = g_new (unsigned char, pixel_count);
JSAMPLE test_jsample; // For verifying properties of JSAMPLE type.
/* Here are some very funky checks to try to ensure that the JSAMPLE
really is the type we expect, so we can scale properly. */
g_assert (sizeof (unsigned char) == 1);
g_assert (sizeof (unsigned char) == sizeof (JSAMPLE));
test_jsample = 0;
test_jsample--;
g_assert (test_jsample == UCHAR_MAX);
// Stuff needed by libjpeg.
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_compress (&cinfo);
// Open output file.
FILE *fp = fopen (file, "wb");
if ( fp == NULL ) { perror ("error opening file"); }
// FIXME: we need some error handling and propagation here.
g_assert (fp != NULL);
// Connect jpeg output to the output file to be used.
jpeg_stdio_dest (&cinfo, fp);
// Set image parameters that libjpeg needs to know about.
cinfo.image_width = osx;
cinfo.image_height = osy;
cinfo.input_components = 1; // Grey scale => 1 color component / pixel.
cinfo.in_color_space = JCS_GRAYSCALE;
jpeg_set_defaults (&cinfo); // Use default compression parameters.
// Reassure libjpeg that we will be writing a complete JPEG file.
jpeg_start_compress (&cinfo, TRUE);
// As advertised, we will average pixels together.
g_assert (scale_factor % 2 != 0);
size_t kernel_size = scale_factor;
gsl_matrix *averaging_kernel
= gsl_matrix_alloc (kernel_size, kernel_size);
double kernel_value = 1.0 / ((double)kernel_size * kernel_size);
size_t ii, jj; // Index values.
for ( ii = 0 ; ii < averaging_kernel->size1 ; ii++ ) {
for ( jj = 0 ; jj < averaging_kernel->size2 ; jj++ ) {
gsl_matrix_set (averaging_kernel, ii, jj, kernel_value);
}
}
// Sample input image, putting scaled results into output image.
size_t sample_stride = scale_factor;
for ( ii = 0 ; ii < osy ; ii++ ) {
for ( jj = 0 ; jj < osx ; jj++ ) {
// Input image average pixel value.
double ival = uint8_image_apply_kernel (self, jj * sample_stride,
ii * sample_stride,
averaging_kernel);
// Set output value.
int32_t oval = round (ival);
// In case floating point arithmetic wierdness gets us in
// trouble, we correct.
if ( oval < 0 ) {
oval = 0;
}
else if ( oval > UINT8_MAX ) {
oval = UINT8_MAX;
}
pixels[ii * osx + jj] = oval;
}
}
// Write the jpeg, one row at a time.
const int rows_to_write = 1;
JSAMPROW *row_pointer = g_new (JSAMPROW, rows_to_write);
while ( cinfo.next_scanline < cinfo.image_height ) {
int rows_written;
row_pointer[0] = &(pixels[cinfo.next_scanline * osx]);
rows_written = jpeg_write_scanlines (&cinfo, row_pointer, rows_to_write);
g_assert (rows_written == rows_to_write);
}
g_free (row_pointer);
// Finsh compression and close the jpeg.
jpeg_finish_compress (&cinfo);
int return_code = fclose (fp);
g_assert (return_code == 0);
jpeg_destroy_compress (&cinfo);
g_free (pixels);
return 0; // Return success indicator.
}
size_t
uint8_image_get_cache_size (UInt8Image *self)
{
g_assert_not_reached (); // Stubbed out for now.
// Compiler reassurance.
self = self;
return 0;
}
void
uint8_image_set_cache_size (UInt8Image *self, size_t size)
{
g_assert_not_reached (); // Stubbed out for now.
// Compiler reassurance.
self = self; size = size;
}
void
uint8_image_free (UInt8Image *self)
{
// Close the tile file (which shouldn't have to remove it since its
// already unlinked), if we were ever using it.
if ( self->tile_file != NULL ) {
int return_code = fclose (self->tile_file);
g_assert (return_code == 0);
}
// Deallocate dynamic memory.
g_free (self->tile_addresses);
// If we didn't need a tile file, we also won't have a tile queue.
if ( self->tile_queue != NULL ) {
g_queue_free (self->tile_queue);
}
g_free (self->cache);
if (self->tile_file_name) {
#ifdef win32
unlink_tmp_file(self->tile_file_name->str);
g_string_free(self->tile_file_name, TRUE);
#endif
}
g_free (self);
}
|
module Interpolation
using ..Utils
export Interpolator, HermiteInterpolation
include("interpolation/interpolation.jl")
include("interpolation/hermite_interpolation.jl")
end
|
// This file is auto-generated, don't edit it. Thanks.
#ifndef ALIBABACLOUD_COMPUTENEST20210601_H_
#define ALIBABACLOUD_COMPUTENEST20210601_H_
#include <alibabacloud/open_api.hpp>
#include <boost/any.hpp>
#include <boost/throw_exception.hpp>
#include <darabonba/core.hpp>
#include <darabonba/util.hpp>
#include <iostream>
#include <map>
#include <vector>
using namespace std;
namespace Alibabacloud_ComputeNest20210601 {
class ContinueDeployServiceInstanceRequest : public Darabonba::Model {
public:
shared_ptr<string> clientToken{};
shared_ptr<string> parameters{};
shared_ptr<string> regionId{};
shared_ptr<string> serviceInstanceId{};
ContinueDeployServiceInstanceRequest() {}
explicit ContinueDeployServiceInstanceRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (clientToken) {
res["ClientToken"] = boost::any(*clientToken);
}
if (parameters) {
res["Parameters"] = boost::any(*parameters);
}
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("ClientToken") != m.end() && !m["ClientToken"].empty()) {
clientToken = make_shared<string>(boost::any_cast<string>(m["ClientToken"]));
}
if (m.find("Parameters") != m.end() && !m["Parameters"].empty()) {
parameters = make_shared<string>(boost::any_cast<string>(m["Parameters"]));
}
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
}
virtual ~ContinueDeployServiceInstanceRequest() = default;
};
class ContinueDeployServiceInstanceResponseBody : public Darabonba::Model {
public:
shared_ptr<string> requestId{};
ContinueDeployServiceInstanceResponseBody() {}
explicit ContinueDeployServiceInstanceResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
}
virtual ~ContinueDeployServiceInstanceResponseBody() = default;
};
class ContinueDeployServiceInstanceResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<ContinueDeployServiceInstanceResponseBody> body{};
ContinueDeployServiceInstanceResponse() {}
explicit ContinueDeployServiceInstanceResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
ContinueDeployServiceInstanceResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<ContinueDeployServiceInstanceResponseBody>(model1);
}
}
}
virtual ~ContinueDeployServiceInstanceResponse() = default;
};
class CreateServiceInstanceRequestOperationMetadata : public Darabonba::Model {
public:
shared_ptr<string> endTime{};
shared_ptr<string> resources{};
shared_ptr<string> serviceInstanceId{};
shared_ptr<string> startTime{};
CreateServiceInstanceRequestOperationMetadata() {}
explicit CreateServiceInstanceRequestOperationMetadata(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (endTime) {
res["EndTime"] = boost::any(*endTime);
}
if (resources) {
res["Resources"] = boost::any(*resources);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
if (startTime) {
res["StartTime"] = boost::any(*startTime);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("EndTime") != m.end() && !m["EndTime"].empty()) {
endTime = make_shared<string>(boost::any_cast<string>(m["EndTime"]));
}
if (m.find("Resources") != m.end() && !m["Resources"].empty()) {
resources = make_shared<string>(boost::any_cast<string>(m["Resources"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
if (m.find("StartTime") != m.end() && !m["StartTime"].empty()) {
startTime = make_shared<string>(boost::any_cast<string>(m["StartTime"]));
}
}
virtual ~CreateServiceInstanceRequestOperationMetadata() = default;
};
class CreateServiceInstanceRequestRequestTag : public Darabonba::Model {
public:
shared_ptr<string> key{};
shared_ptr<string> value{};
CreateServiceInstanceRequestRequestTag() {}
explicit CreateServiceInstanceRequestRequestTag(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (key) {
res["Key"] = boost::any(*key);
}
if (value) {
res["Value"] = boost::any(*value);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Key") != m.end() && !m["Key"].empty()) {
key = make_shared<string>(boost::any_cast<string>(m["Key"]));
}
if (m.find("Value") != m.end() && !m["Value"].empty()) {
value = make_shared<string>(boost::any_cast<string>(m["Value"]));
}
}
virtual ~CreateServiceInstanceRequestRequestTag() = default;
};
class CreateServiceInstanceRequest : public Darabonba::Model {
public:
shared_ptr<string> clientToken{};
shared_ptr<bool> enableAccountOps{};
shared_ptr<bool> enableInstanceOps{};
shared_ptr<CreateServiceInstanceRequestOperationMetadata> operationMetadata{};
shared_ptr<map<string, boost::any>> parameters{};
shared_ptr<string> regionId{};
shared_ptr<vector<CreateServiceInstanceRequestRequestTag>> requestTag{};
shared_ptr<string> resourceGroupId{};
shared_ptr<string> serviceId{};
shared_ptr<string> serviceVersion{};
shared_ptr<string> templateName{};
CreateServiceInstanceRequest() {}
explicit CreateServiceInstanceRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (clientToken) {
res["ClientToken"] = boost::any(*clientToken);
}
if (enableAccountOps) {
res["EnableAccountOps"] = boost::any(*enableAccountOps);
}
if (enableInstanceOps) {
res["EnableInstanceOps"] = boost::any(*enableInstanceOps);
}
if (operationMetadata) {
res["OperationMetadata"] = operationMetadata ? boost::any(operationMetadata->toMap()) : boost::any(map<string,boost::any>({}));
}
if (parameters) {
res["Parameters"] = boost::any(*parameters);
}
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (requestTag) {
vector<boost::any> temp1;
for(auto item1:*requestTag){
temp1.push_back(boost::any(item1.toMap()));
}
res["RequestTag"] = boost::any(temp1);
}
if (resourceGroupId) {
res["ResourceGroupId"] = boost::any(*resourceGroupId);
}
if (serviceId) {
res["ServiceId"] = boost::any(*serviceId);
}
if (serviceVersion) {
res["ServiceVersion"] = boost::any(*serviceVersion);
}
if (templateName) {
res["TemplateName"] = boost::any(*templateName);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("ClientToken") != m.end() && !m["ClientToken"].empty()) {
clientToken = make_shared<string>(boost::any_cast<string>(m["ClientToken"]));
}
if (m.find("EnableAccountOps") != m.end() && !m["EnableAccountOps"].empty()) {
enableAccountOps = make_shared<bool>(boost::any_cast<bool>(m["EnableAccountOps"]));
}
if (m.find("EnableInstanceOps") != m.end() && !m["EnableInstanceOps"].empty()) {
enableInstanceOps = make_shared<bool>(boost::any_cast<bool>(m["EnableInstanceOps"]));
}
if (m.find("OperationMetadata") != m.end() && !m["OperationMetadata"].empty()) {
if (typeid(map<string, boost::any>) == m["OperationMetadata"].type()) {
CreateServiceInstanceRequestOperationMetadata model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["OperationMetadata"]));
operationMetadata = make_shared<CreateServiceInstanceRequestOperationMetadata>(model1);
}
}
if (m.find("Parameters") != m.end() && !m["Parameters"].empty()) {
map<string, boost::any> map1 = boost::any_cast<map<string, boost::any>>(m["Parameters"]);
map<string, boost::any> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
parameters = make_shared<map<string, boost::any>>(toMap1);
}
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("RequestTag") != m.end() && !m["RequestTag"].empty()) {
if (typeid(vector<boost::any>) == m["RequestTag"].type()) {
vector<CreateServiceInstanceRequestRequestTag> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["RequestTag"])){
if (typeid(map<string, boost::any>) == item1.type()) {
CreateServiceInstanceRequestRequestTag model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
requestTag = make_shared<vector<CreateServiceInstanceRequestRequestTag>>(expect1);
}
}
if (m.find("ResourceGroupId") != m.end() && !m["ResourceGroupId"].empty()) {
resourceGroupId = make_shared<string>(boost::any_cast<string>(m["ResourceGroupId"]));
}
if (m.find("ServiceId") != m.end() && !m["ServiceId"].empty()) {
serviceId = make_shared<string>(boost::any_cast<string>(m["ServiceId"]));
}
if (m.find("ServiceVersion") != m.end() && !m["ServiceVersion"].empty()) {
serviceVersion = make_shared<string>(boost::any_cast<string>(m["ServiceVersion"]));
}
if (m.find("TemplateName") != m.end() && !m["TemplateName"].empty()) {
templateName = make_shared<string>(boost::any_cast<string>(m["TemplateName"]));
}
}
virtual ~CreateServiceInstanceRequest() = default;
};
class CreateServiceInstanceShrinkRequestOperationMetadata : public Darabonba::Model {
public:
shared_ptr<string> endTime{};
shared_ptr<string> resources{};
shared_ptr<string> serviceInstanceId{};
shared_ptr<string> startTime{};
CreateServiceInstanceShrinkRequestOperationMetadata() {}
explicit CreateServiceInstanceShrinkRequestOperationMetadata(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (endTime) {
res["EndTime"] = boost::any(*endTime);
}
if (resources) {
res["Resources"] = boost::any(*resources);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
if (startTime) {
res["StartTime"] = boost::any(*startTime);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("EndTime") != m.end() && !m["EndTime"].empty()) {
endTime = make_shared<string>(boost::any_cast<string>(m["EndTime"]));
}
if (m.find("Resources") != m.end() && !m["Resources"].empty()) {
resources = make_shared<string>(boost::any_cast<string>(m["Resources"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
if (m.find("StartTime") != m.end() && !m["StartTime"].empty()) {
startTime = make_shared<string>(boost::any_cast<string>(m["StartTime"]));
}
}
virtual ~CreateServiceInstanceShrinkRequestOperationMetadata() = default;
};
class CreateServiceInstanceShrinkRequestRequestTag : public Darabonba::Model {
public:
shared_ptr<string> key{};
shared_ptr<string> value{};
CreateServiceInstanceShrinkRequestRequestTag() {}
explicit CreateServiceInstanceShrinkRequestRequestTag(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (key) {
res["Key"] = boost::any(*key);
}
if (value) {
res["Value"] = boost::any(*value);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Key") != m.end() && !m["Key"].empty()) {
key = make_shared<string>(boost::any_cast<string>(m["Key"]));
}
if (m.find("Value") != m.end() && !m["Value"].empty()) {
value = make_shared<string>(boost::any_cast<string>(m["Value"]));
}
}
virtual ~CreateServiceInstanceShrinkRequestRequestTag() = default;
};
class CreateServiceInstanceShrinkRequest : public Darabonba::Model {
public:
shared_ptr<string> clientToken{};
shared_ptr<bool> enableAccountOps{};
shared_ptr<bool> enableInstanceOps{};
shared_ptr<CreateServiceInstanceShrinkRequestOperationMetadata> operationMetadata{};
shared_ptr<string> parametersShrink{};
shared_ptr<string> regionId{};
shared_ptr<vector<CreateServiceInstanceShrinkRequestRequestTag>> requestTag{};
shared_ptr<string> resourceGroupId{};
shared_ptr<string> serviceId{};
shared_ptr<string> serviceVersion{};
shared_ptr<string> templateName{};
CreateServiceInstanceShrinkRequest() {}
explicit CreateServiceInstanceShrinkRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (clientToken) {
res["ClientToken"] = boost::any(*clientToken);
}
if (enableAccountOps) {
res["EnableAccountOps"] = boost::any(*enableAccountOps);
}
if (enableInstanceOps) {
res["EnableInstanceOps"] = boost::any(*enableInstanceOps);
}
if (operationMetadata) {
res["OperationMetadata"] = operationMetadata ? boost::any(operationMetadata->toMap()) : boost::any(map<string,boost::any>({}));
}
if (parametersShrink) {
res["Parameters"] = boost::any(*parametersShrink);
}
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (requestTag) {
vector<boost::any> temp1;
for(auto item1:*requestTag){
temp1.push_back(boost::any(item1.toMap()));
}
res["RequestTag"] = boost::any(temp1);
}
if (resourceGroupId) {
res["ResourceGroupId"] = boost::any(*resourceGroupId);
}
if (serviceId) {
res["ServiceId"] = boost::any(*serviceId);
}
if (serviceVersion) {
res["ServiceVersion"] = boost::any(*serviceVersion);
}
if (templateName) {
res["TemplateName"] = boost::any(*templateName);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("ClientToken") != m.end() && !m["ClientToken"].empty()) {
clientToken = make_shared<string>(boost::any_cast<string>(m["ClientToken"]));
}
if (m.find("EnableAccountOps") != m.end() && !m["EnableAccountOps"].empty()) {
enableAccountOps = make_shared<bool>(boost::any_cast<bool>(m["EnableAccountOps"]));
}
if (m.find("EnableInstanceOps") != m.end() && !m["EnableInstanceOps"].empty()) {
enableInstanceOps = make_shared<bool>(boost::any_cast<bool>(m["EnableInstanceOps"]));
}
if (m.find("OperationMetadata") != m.end() && !m["OperationMetadata"].empty()) {
if (typeid(map<string, boost::any>) == m["OperationMetadata"].type()) {
CreateServiceInstanceShrinkRequestOperationMetadata model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["OperationMetadata"]));
operationMetadata = make_shared<CreateServiceInstanceShrinkRequestOperationMetadata>(model1);
}
}
if (m.find("Parameters") != m.end() && !m["Parameters"].empty()) {
parametersShrink = make_shared<string>(boost::any_cast<string>(m["Parameters"]));
}
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("RequestTag") != m.end() && !m["RequestTag"].empty()) {
if (typeid(vector<boost::any>) == m["RequestTag"].type()) {
vector<CreateServiceInstanceShrinkRequestRequestTag> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["RequestTag"])){
if (typeid(map<string, boost::any>) == item1.type()) {
CreateServiceInstanceShrinkRequestRequestTag model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
requestTag = make_shared<vector<CreateServiceInstanceShrinkRequestRequestTag>>(expect1);
}
}
if (m.find("ResourceGroupId") != m.end() && !m["ResourceGroupId"].empty()) {
resourceGroupId = make_shared<string>(boost::any_cast<string>(m["ResourceGroupId"]));
}
if (m.find("ServiceId") != m.end() && !m["ServiceId"].empty()) {
serviceId = make_shared<string>(boost::any_cast<string>(m["ServiceId"]));
}
if (m.find("ServiceVersion") != m.end() && !m["ServiceVersion"].empty()) {
serviceVersion = make_shared<string>(boost::any_cast<string>(m["ServiceVersion"]));
}
if (m.find("TemplateName") != m.end() && !m["TemplateName"].empty()) {
templateName = make_shared<string>(boost::any_cast<string>(m["TemplateName"]));
}
}
virtual ~CreateServiceInstanceShrinkRequest() = default;
};
class CreateServiceInstanceResponseBody : public Darabonba::Model {
public:
shared_ptr<string> requestId{};
shared_ptr<string> serviceInstanceId{};
shared_ptr<string> status{};
CreateServiceInstanceResponseBody() {}
explicit CreateServiceInstanceResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
if (status) {
res["Status"] = boost::any(*status);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
if (m.find("Status") != m.end() && !m["Status"].empty()) {
status = make_shared<string>(boost::any_cast<string>(m["Status"]));
}
}
virtual ~CreateServiceInstanceResponseBody() = default;
};
class CreateServiceInstanceResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<CreateServiceInstanceResponseBody> body{};
CreateServiceInstanceResponse() {}
explicit CreateServiceInstanceResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
CreateServiceInstanceResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<CreateServiceInstanceResponseBody>(model1);
}
}
}
virtual ~CreateServiceInstanceResponse() = default;
};
class DeleteServiceInstancesRequest : public Darabonba::Model {
public:
shared_ptr<string> clientToken{};
shared_ptr<string> regionId{};
shared_ptr<vector<string>> serviceInstanceId{};
DeleteServiceInstancesRequest() {}
explicit DeleteServiceInstancesRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (clientToken) {
res["ClientToken"] = boost::any(*clientToken);
}
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("ClientToken") != m.end() && !m["ClientToken"].empty()) {
clientToken = make_shared<string>(boost::any_cast<string>(m["ClientToken"]));
}
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
vector<string> toVec1;
if (typeid(vector<boost::any>) == m["ServiceInstanceId"].type()) {
vector<boost::any> vec1 = boost::any_cast<vector<boost::any>>(m["ServiceInstanceId"]);
for (auto item:vec1) {
toVec1.push_back(boost::any_cast<string>(item));
}
}
serviceInstanceId = make_shared<vector<string>>(toVec1);
}
}
virtual ~DeleteServiceInstancesRequest() = default;
};
class DeleteServiceInstancesResponseBody : public Darabonba::Model {
public:
shared_ptr<string> requestId{};
DeleteServiceInstancesResponseBody() {}
explicit DeleteServiceInstancesResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
}
virtual ~DeleteServiceInstancesResponseBody() = default;
};
class DeleteServiceInstancesResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<DeleteServiceInstancesResponseBody> body{};
DeleteServiceInstancesResponse() {}
explicit DeleteServiceInstancesResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
DeleteServiceInstancesResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<DeleteServiceInstancesResponseBody>(model1);
}
}
}
virtual ~DeleteServiceInstancesResponse() = default;
};
class GetServiceInstanceRequest : public Darabonba::Model {
public:
shared_ptr<string> regionId{};
shared_ptr<string> serviceInstanceId{};
GetServiceInstanceRequest() {}
explicit GetServiceInstanceRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
}
virtual ~GetServiceInstanceRequest() = default;
};
class GetServiceInstanceResponseBodyServiceServiceInfos : public Darabonba::Model {
public:
shared_ptr<string> image{};
shared_ptr<string> locale{};
shared_ptr<string> name{};
shared_ptr<string> shortDescription{};
GetServiceInstanceResponseBodyServiceServiceInfos() {}
explicit GetServiceInstanceResponseBodyServiceServiceInfos(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (image) {
res["Image"] = boost::any(*image);
}
if (locale) {
res["Locale"] = boost::any(*locale);
}
if (name) {
res["Name"] = boost::any(*name);
}
if (shortDescription) {
res["ShortDescription"] = boost::any(*shortDescription);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Image") != m.end() && !m["Image"].empty()) {
image = make_shared<string>(boost::any_cast<string>(m["Image"]));
}
if (m.find("Locale") != m.end() && !m["Locale"].empty()) {
locale = make_shared<string>(boost::any_cast<string>(m["Locale"]));
}
if (m.find("Name") != m.end() && !m["Name"].empty()) {
name = make_shared<string>(boost::any_cast<string>(m["Name"]));
}
if (m.find("ShortDescription") != m.end() && !m["ShortDescription"].empty()) {
shortDescription = make_shared<string>(boost::any_cast<string>(m["ShortDescription"]));
}
}
virtual ~GetServiceInstanceResponseBodyServiceServiceInfos() = default;
};
class GetServiceInstanceResponseBodyService : public Darabonba::Model {
public:
shared_ptr<string> deployMetadata{};
shared_ptr<string> deployType{};
shared_ptr<string> publishTime{};
shared_ptr<string> serviceId{};
shared_ptr<vector<GetServiceInstanceResponseBodyServiceServiceInfos>> serviceInfos{};
shared_ptr<string> serviceType{};
shared_ptr<string> status{};
shared_ptr<string> supplierName{};
shared_ptr<string> supplierUrl{};
shared_ptr<string> version{};
GetServiceInstanceResponseBodyService() {}
explicit GetServiceInstanceResponseBodyService(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (deployMetadata) {
res["DeployMetadata"] = boost::any(*deployMetadata);
}
if (deployType) {
res["DeployType"] = boost::any(*deployType);
}
if (publishTime) {
res["PublishTime"] = boost::any(*publishTime);
}
if (serviceId) {
res["ServiceId"] = boost::any(*serviceId);
}
if (serviceInfos) {
vector<boost::any> temp1;
for(auto item1:*serviceInfos){
temp1.push_back(boost::any(item1.toMap()));
}
res["ServiceInfos"] = boost::any(temp1);
}
if (serviceType) {
res["ServiceType"] = boost::any(*serviceType);
}
if (status) {
res["Status"] = boost::any(*status);
}
if (supplierName) {
res["SupplierName"] = boost::any(*supplierName);
}
if (supplierUrl) {
res["SupplierUrl"] = boost::any(*supplierUrl);
}
if (version) {
res["Version"] = boost::any(*version);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("DeployMetadata") != m.end() && !m["DeployMetadata"].empty()) {
deployMetadata = make_shared<string>(boost::any_cast<string>(m["DeployMetadata"]));
}
if (m.find("DeployType") != m.end() && !m["DeployType"].empty()) {
deployType = make_shared<string>(boost::any_cast<string>(m["DeployType"]));
}
if (m.find("PublishTime") != m.end() && !m["PublishTime"].empty()) {
publishTime = make_shared<string>(boost::any_cast<string>(m["PublishTime"]));
}
if (m.find("ServiceId") != m.end() && !m["ServiceId"].empty()) {
serviceId = make_shared<string>(boost::any_cast<string>(m["ServiceId"]));
}
if (m.find("ServiceInfos") != m.end() && !m["ServiceInfos"].empty()) {
if (typeid(vector<boost::any>) == m["ServiceInfos"].type()) {
vector<GetServiceInstanceResponseBodyServiceServiceInfos> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["ServiceInfos"])){
if (typeid(map<string, boost::any>) == item1.type()) {
GetServiceInstanceResponseBodyServiceServiceInfos model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
serviceInfos = make_shared<vector<GetServiceInstanceResponseBodyServiceServiceInfos>>(expect1);
}
}
if (m.find("ServiceType") != m.end() && !m["ServiceType"].empty()) {
serviceType = make_shared<string>(boost::any_cast<string>(m["ServiceType"]));
}
if (m.find("Status") != m.end() && !m["Status"].empty()) {
status = make_shared<string>(boost::any_cast<string>(m["Status"]));
}
if (m.find("SupplierName") != m.end() && !m["SupplierName"].empty()) {
supplierName = make_shared<string>(boost::any_cast<string>(m["SupplierName"]));
}
if (m.find("SupplierUrl") != m.end() && !m["SupplierUrl"].empty()) {
supplierUrl = make_shared<string>(boost::any_cast<string>(m["SupplierUrl"]));
}
if (m.find("Version") != m.end() && !m["Version"].empty()) {
version = make_shared<string>(boost::any_cast<string>(m["Version"]));
}
}
virtual ~GetServiceInstanceResponseBodyService() = default;
};
class GetServiceInstanceResponseBody : public Darabonba::Model {
public:
shared_ptr<string> createTime{};
shared_ptr<bool> enableInstanceOps{};
shared_ptr<bool> isOperated{};
shared_ptr<string> operatedServiceInstanceId{};
shared_ptr<string> operationEndTime{};
shared_ptr<string> operationStartTime{};
shared_ptr<string> outputs{};
shared_ptr<string> parameters{};
shared_ptr<long> progress{};
shared_ptr<string> requestId{};
shared_ptr<string> resources{};
shared_ptr<GetServiceInstanceResponseBodyService> service{};
shared_ptr<string> serviceInstanceId{};
shared_ptr<string> status{};
shared_ptr<string> statusDetail{};
shared_ptr<string> templateName{};
shared_ptr<string> updateTime{};
GetServiceInstanceResponseBody() {}
explicit GetServiceInstanceResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (createTime) {
res["CreateTime"] = boost::any(*createTime);
}
if (enableInstanceOps) {
res["EnableInstanceOps"] = boost::any(*enableInstanceOps);
}
if (isOperated) {
res["IsOperated"] = boost::any(*isOperated);
}
if (operatedServiceInstanceId) {
res["OperatedServiceInstanceId"] = boost::any(*operatedServiceInstanceId);
}
if (operationEndTime) {
res["OperationEndTime"] = boost::any(*operationEndTime);
}
if (operationStartTime) {
res["OperationStartTime"] = boost::any(*operationStartTime);
}
if (outputs) {
res["Outputs"] = boost::any(*outputs);
}
if (parameters) {
res["Parameters"] = boost::any(*parameters);
}
if (progress) {
res["Progress"] = boost::any(*progress);
}
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
if (resources) {
res["Resources"] = boost::any(*resources);
}
if (service) {
res["Service"] = service ? boost::any(service->toMap()) : boost::any(map<string,boost::any>({}));
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
if (status) {
res["Status"] = boost::any(*status);
}
if (statusDetail) {
res["StatusDetail"] = boost::any(*statusDetail);
}
if (templateName) {
res["TemplateName"] = boost::any(*templateName);
}
if (updateTime) {
res["UpdateTime"] = boost::any(*updateTime);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("CreateTime") != m.end() && !m["CreateTime"].empty()) {
createTime = make_shared<string>(boost::any_cast<string>(m["CreateTime"]));
}
if (m.find("EnableInstanceOps") != m.end() && !m["EnableInstanceOps"].empty()) {
enableInstanceOps = make_shared<bool>(boost::any_cast<bool>(m["EnableInstanceOps"]));
}
if (m.find("IsOperated") != m.end() && !m["IsOperated"].empty()) {
isOperated = make_shared<bool>(boost::any_cast<bool>(m["IsOperated"]));
}
if (m.find("OperatedServiceInstanceId") != m.end() && !m["OperatedServiceInstanceId"].empty()) {
operatedServiceInstanceId = make_shared<string>(boost::any_cast<string>(m["OperatedServiceInstanceId"]));
}
if (m.find("OperationEndTime") != m.end() && !m["OperationEndTime"].empty()) {
operationEndTime = make_shared<string>(boost::any_cast<string>(m["OperationEndTime"]));
}
if (m.find("OperationStartTime") != m.end() && !m["OperationStartTime"].empty()) {
operationStartTime = make_shared<string>(boost::any_cast<string>(m["OperationStartTime"]));
}
if (m.find("Outputs") != m.end() && !m["Outputs"].empty()) {
outputs = make_shared<string>(boost::any_cast<string>(m["Outputs"]));
}
if (m.find("Parameters") != m.end() && !m["Parameters"].empty()) {
parameters = make_shared<string>(boost::any_cast<string>(m["Parameters"]));
}
if (m.find("Progress") != m.end() && !m["Progress"].empty()) {
progress = make_shared<long>(boost::any_cast<long>(m["Progress"]));
}
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
if (m.find("Resources") != m.end() && !m["Resources"].empty()) {
resources = make_shared<string>(boost::any_cast<string>(m["Resources"]));
}
if (m.find("Service") != m.end() && !m["Service"].empty()) {
if (typeid(map<string, boost::any>) == m["Service"].type()) {
GetServiceInstanceResponseBodyService model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["Service"]));
service = make_shared<GetServiceInstanceResponseBodyService>(model1);
}
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
if (m.find("Status") != m.end() && !m["Status"].empty()) {
status = make_shared<string>(boost::any_cast<string>(m["Status"]));
}
if (m.find("StatusDetail") != m.end() && !m["StatusDetail"].empty()) {
statusDetail = make_shared<string>(boost::any_cast<string>(m["StatusDetail"]));
}
if (m.find("TemplateName") != m.end() && !m["TemplateName"].empty()) {
templateName = make_shared<string>(boost::any_cast<string>(m["TemplateName"]));
}
if (m.find("UpdateTime") != m.end() && !m["UpdateTime"].empty()) {
updateTime = make_shared<string>(boost::any_cast<string>(m["UpdateTime"]));
}
}
virtual ~GetServiceInstanceResponseBody() = default;
};
class GetServiceInstanceResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<GetServiceInstanceResponseBody> body{};
GetServiceInstanceResponse() {}
explicit GetServiceInstanceResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
GetServiceInstanceResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<GetServiceInstanceResponseBody>(model1);
}
}
}
virtual ~GetServiceInstanceResponse() = default;
};
class ListServiceInstanceLogsRequest : public Darabonba::Model {
public:
shared_ptr<string> maxResults{};
shared_ptr<string> nextToken{};
shared_ptr<string> regionId{};
shared_ptr<string> serviceInstanceId{};
ListServiceInstanceLogsRequest() {}
explicit ListServiceInstanceLogsRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (maxResults) {
res["MaxResults"] = boost::any(*maxResults);
}
if (nextToken) {
res["NextToken"] = boost::any(*nextToken);
}
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("MaxResults") != m.end() && !m["MaxResults"].empty()) {
maxResults = make_shared<string>(boost::any_cast<string>(m["MaxResults"]));
}
if (m.find("NextToken") != m.end() && !m["NextToken"].empty()) {
nextToken = make_shared<string>(boost::any_cast<string>(m["NextToken"]));
}
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
}
virtual ~ListServiceInstanceLogsRequest() = default;
};
class ListServiceInstanceLogsResponseBodyServiceInstancesLogs : public Darabonba::Model {
public:
shared_ptr<string> content{};
shared_ptr<string> phase{};
shared_ptr<string> serviceInstanceId{};
shared_ptr<string> source{};
shared_ptr<string> timestamp{};
ListServiceInstanceLogsResponseBodyServiceInstancesLogs() {}
explicit ListServiceInstanceLogsResponseBodyServiceInstancesLogs(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (content) {
res["Content"] = boost::any(*content);
}
if (phase) {
res["Phase"] = boost::any(*phase);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
if (source) {
res["Source"] = boost::any(*source);
}
if (timestamp) {
res["Timestamp"] = boost::any(*timestamp);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Content") != m.end() && !m["Content"].empty()) {
content = make_shared<string>(boost::any_cast<string>(m["Content"]));
}
if (m.find("Phase") != m.end() && !m["Phase"].empty()) {
phase = make_shared<string>(boost::any_cast<string>(m["Phase"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
if (m.find("Source") != m.end() && !m["Source"].empty()) {
source = make_shared<string>(boost::any_cast<string>(m["Source"]));
}
if (m.find("Timestamp") != m.end() && !m["Timestamp"].empty()) {
timestamp = make_shared<string>(boost::any_cast<string>(m["Timestamp"]));
}
}
virtual ~ListServiceInstanceLogsResponseBodyServiceInstancesLogs() = default;
};
class ListServiceInstanceLogsResponseBody : public Darabonba::Model {
public:
shared_ptr<string> maxResults{};
shared_ptr<string> nextToken{};
shared_ptr<string> requestId{};
shared_ptr<vector<ListServiceInstanceLogsResponseBodyServiceInstancesLogs>> serviceInstancesLogs{};
ListServiceInstanceLogsResponseBody() {}
explicit ListServiceInstanceLogsResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (maxResults) {
res["MaxResults"] = boost::any(*maxResults);
}
if (nextToken) {
res["NextToken"] = boost::any(*nextToken);
}
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
if (serviceInstancesLogs) {
vector<boost::any> temp1;
for(auto item1:*serviceInstancesLogs){
temp1.push_back(boost::any(item1.toMap()));
}
res["ServiceInstancesLogs"] = boost::any(temp1);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("MaxResults") != m.end() && !m["MaxResults"].empty()) {
maxResults = make_shared<string>(boost::any_cast<string>(m["MaxResults"]));
}
if (m.find("NextToken") != m.end() && !m["NextToken"].empty()) {
nextToken = make_shared<string>(boost::any_cast<string>(m["NextToken"]));
}
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
if (m.find("ServiceInstancesLogs") != m.end() && !m["ServiceInstancesLogs"].empty()) {
if (typeid(vector<boost::any>) == m["ServiceInstancesLogs"].type()) {
vector<ListServiceInstanceLogsResponseBodyServiceInstancesLogs> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["ServiceInstancesLogs"])){
if (typeid(map<string, boost::any>) == item1.type()) {
ListServiceInstanceLogsResponseBodyServiceInstancesLogs model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
serviceInstancesLogs = make_shared<vector<ListServiceInstanceLogsResponseBodyServiceInstancesLogs>>(expect1);
}
}
}
virtual ~ListServiceInstanceLogsResponseBody() = default;
};
class ListServiceInstanceLogsResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<ListServiceInstanceLogsResponseBody> body{};
ListServiceInstanceLogsResponse() {}
explicit ListServiceInstanceLogsResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
ListServiceInstanceLogsResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<ListServiceInstanceLogsResponseBody>(model1);
}
}
}
virtual ~ListServiceInstanceLogsResponse() = default;
};
class ListServiceInstanceResourcesRequest : public Darabonba::Model {
public:
shared_ptr<string> maxResults{};
shared_ptr<string> nextToken{};
shared_ptr<string> serviceInstanceId{};
ListServiceInstanceResourcesRequest() {}
explicit ListServiceInstanceResourcesRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (maxResults) {
res["MaxResults"] = boost::any(*maxResults);
}
if (nextToken) {
res["NextToken"] = boost::any(*nextToken);
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("MaxResults") != m.end() && !m["MaxResults"].empty()) {
maxResults = make_shared<string>(boost::any_cast<string>(m["MaxResults"]));
}
if (m.find("NextToken") != m.end() && !m["NextToken"].empty()) {
nextToken = make_shared<string>(boost::any_cast<string>(m["NextToken"]));
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
}
virtual ~ListServiceInstanceResourcesRequest() = default;
};
class ListServiceInstanceResourcesResponseBodyResources : public Darabonba::Model {
public:
shared_ptr<string> resourceARN{};
ListServiceInstanceResourcesResponseBodyResources() {}
explicit ListServiceInstanceResourcesResponseBodyResources(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (resourceARN) {
res["ResourceARN"] = boost::any(*resourceARN);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("ResourceARN") != m.end() && !m["ResourceARN"].empty()) {
resourceARN = make_shared<string>(boost::any_cast<string>(m["ResourceARN"]));
}
}
virtual ~ListServiceInstanceResourcesResponseBodyResources() = default;
};
class ListServiceInstanceResourcesResponseBody : public Darabonba::Model {
public:
shared_ptr<string> maxResults{};
shared_ptr<string> nextToken{};
shared_ptr<string> requestId{};
shared_ptr<vector<ListServiceInstanceResourcesResponseBodyResources>> resources{};
ListServiceInstanceResourcesResponseBody() {}
explicit ListServiceInstanceResourcesResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (maxResults) {
res["MaxResults"] = boost::any(*maxResults);
}
if (nextToken) {
res["NextToken"] = boost::any(*nextToken);
}
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
if (resources) {
vector<boost::any> temp1;
for(auto item1:*resources){
temp1.push_back(boost::any(item1.toMap()));
}
res["Resources"] = boost::any(temp1);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("MaxResults") != m.end() && !m["MaxResults"].empty()) {
maxResults = make_shared<string>(boost::any_cast<string>(m["MaxResults"]));
}
if (m.find("NextToken") != m.end() && !m["NextToken"].empty()) {
nextToken = make_shared<string>(boost::any_cast<string>(m["NextToken"]));
}
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
if (m.find("Resources") != m.end() && !m["Resources"].empty()) {
if (typeid(vector<boost::any>) == m["Resources"].type()) {
vector<ListServiceInstanceResourcesResponseBodyResources> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["Resources"])){
if (typeid(map<string, boost::any>) == item1.type()) {
ListServiceInstanceResourcesResponseBodyResources model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
resources = make_shared<vector<ListServiceInstanceResourcesResponseBodyResources>>(expect1);
}
}
}
virtual ~ListServiceInstanceResourcesResponseBody() = default;
};
class ListServiceInstanceResourcesResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<ListServiceInstanceResourcesResponseBody> body{};
ListServiceInstanceResourcesResponse() {}
explicit ListServiceInstanceResourcesResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
ListServiceInstanceResourcesResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<ListServiceInstanceResourcesResponseBody>(model1);
}
}
}
virtual ~ListServiceInstanceResourcesResponse() = default;
};
class ListServiceInstancesRequestFilter : public Darabonba::Model {
public:
shared_ptr<string> name{};
shared_ptr<vector<string>> value{};
ListServiceInstancesRequestFilter() {}
explicit ListServiceInstancesRequestFilter(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (name) {
res["Name"] = boost::any(*name);
}
if (value) {
res["Value"] = boost::any(*value);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Name") != m.end() && !m["Name"].empty()) {
name = make_shared<string>(boost::any_cast<string>(m["Name"]));
}
if (m.find("Value") != m.end() && !m["Value"].empty()) {
vector<string> toVec1;
if (typeid(vector<boost::any>) == m["Value"].type()) {
vector<boost::any> vec1 = boost::any_cast<vector<boost::any>>(m["Value"]);
for (auto item:vec1) {
toVec1.push_back(boost::any_cast<string>(item));
}
}
value = make_shared<vector<string>>(toVec1);
}
}
virtual ~ListServiceInstancesRequestFilter() = default;
};
class ListServiceInstancesRequestRequestTag : public Darabonba::Model {
public:
shared_ptr<string> key{};
shared_ptr<string> value{};
ListServiceInstancesRequestRequestTag() {}
explicit ListServiceInstancesRequestRequestTag(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (key) {
res["Key"] = boost::any(*key);
}
if (value) {
res["Value"] = boost::any(*value);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Key") != m.end() && !m["Key"].empty()) {
key = make_shared<string>(boost::any_cast<string>(m["Key"]));
}
if (m.find("Value") != m.end() && !m["Value"].empty()) {
value = make_shared<string>(boost::any_cast<string>(m["Value"]));
}
}
virtual ~ListServiceInstancesRequestRequestTag() = default;
};
class ListServiceInstancesRequest : public Darabonba::Model {
public:
shared_ptr<vector<ListServiceInstancesRequestFilter>> filter{};
shared_ptr<string> maxResults{};
shared_ptr<string> nextToken{};
shared_ptr<string> regionId{};
shared_ptr<vector<ListServiceInstancesRequestRequestTag>> requestTag{};
ListServiceInstancesRequest() {}
explicit ListServiceInstancesRequest(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (filter) {
vector<boost::any> temp1;
for(auto item1:*filter){
temp1.push_back(boost::any(item1.toMap()));
}
res["Filter"] = boost::any(temp1);
}
if (maxResults) {
res["MaxResults"] = boost::any(*maxResults);
}
if (nextToken) {
res["NextToken"] = boost::any(*nextToken);
}
if (regionId) {
res["RegionId"] = boost::any(*regionId);
}
if (requestTag) {
vector<boost::any> temp1;
for(auto item1:*requestTag){
temp1.push_back(boost::any(item1.toMap()));
}
res["RequestTag"] = boost::any(temp1);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Filter") != m.end() && !m["Filter"].empty()) {
if (typeid(vector<boost::any>) == m["Filter"].type()) {
vector<ListServiceInstancesRequestFilter> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["Filter"])){
if (typeid(map<string, boost::any>) == item1.type()) {
ListServiceInstancesRequestFilter model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
filter = make_shared<vector<ListServiceInstancesRequestFilter>>(expect1);
}
}
if (m.find("MaxResults") != m.end() && !m["MaxResults"].empty()) {
maxResults = make_shared<string>(boost::any_cast<string>(m["MaxResults"]));
}
if (m.find("NextToken") != m.end() && !m["NextToken"].empty()) {
nextToken = make_shared<string>(boost::any_cast<string>(m["NextToken"]));
}
if (m.find("RegionId") != m.end() && !m["RegionId"].empty()) {
regionId = make_shared<string>(boost::any_cast<string>(m["RegionId"]));
}
if (m.find("RequestTag") != m.end() && !m["RequestTag"].empty()) {
if (typeid(vector<boost::any>) == m["RequestTag"].type()) {
vector<ListServiceInstancesRequestRequestTag> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["RequestTag"])){
if (typeid(map<string, boost::any>) == item1.type()) {
ListServiceInstancesRequestRequestTag model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
requestTag = make_shared<vector<ListServiceInstancesRequestRequestTag>>(expect1);
}
}
}
virtual ~ListServiceInstancesRequest() = default;
};
class ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos : public Darabonba::Model {
public:
shared_ptr<string> image{};
shared_ptr<string> locale{};
shared_ptr<string> name{};
shared_ptr<string> shortDescription{};
ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos() {}
explicit ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (image) {
res["Image"] = boost::any(*image);
}
if (locale) {
res["Locale"] = boost::any(*locale);
}
if (name) {
res["Name"] = boost::any(*name);
}
if (shortDescription) {
res["ShortDescription"] = boost::any(*shortDescription);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("Image") != m.end() && !m["Image"].empty()) {
image = make_shared<string>(boost::any_cast<string>(m["Image"]));
}
if (m.find("Locale") != m.end() && !m["Locale"].empty()) {
locale = make_shared<string>(boost::any_cast<string>(m["Locale"]));
}
if (m.find("Name") != m.end() && !m["Name"].empty()) {
name = make_shared<string>(boost::any_cast<string>(m["Name"]));
}
if (m.find("ShortDescription") != m.end() && !m["ShortDescription"].empty()) {
shortDescription = make_shared<string>(boost::any_cast<string>(m["ShortDescription"]));
}
}
virtual ~ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos() = default;
};
class ListServiceInstancesResponseBodyServiceInstancesService : public Darabonba::Model {
public:
shared_ptr<string> deployType{};
shared_ptr<string> publishTime{};
shared_ptr<string> serviceId{};
shared_ptr<vector<ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos>> serviceInfos{};
shared_ptr<string> serviceType{};
shared_ptr<string> status{};
shared_ptr<string> supplierName{};
shared_ptr<string> supplierUrl{};
shared_ptr<string> version{};
ListServiceInstancesResponseBodyServiceInstancesService() {}
explicit ListServiceInstancesResponseBodyServiceInstancesService(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (deployType) {
res["DeployType"] = boost::any(*deployType);
}
if (publishTime) {
res["PublishTime"] = boost::any(*publishTime);
}
if (serviceId) {
res["ServiceId"] = boost::any(*serviceId);
}
if (serviceInfos) {
vector<boost::any> temp1;
for(auto item1:*serviceInfos){
temp1.push_back(boost::any(item1.toMap()));
}
res["ServiceInfos"] = boost::any(temp1);
}
if (serviceType) {
res["ServiceType"] = boost::any(*serviceType);
}
if (status) {
res["Status"] = boost::any(*status);
}
if (supplierName) {
res["SupplierName"] = boost::any(*supplierName);
}
if (supplierUrl) {
res["SupplierUrl"] = boost::any(*supplierUrl);
}
if (version) {
res["Version"] = boost::any(*version);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("DeployType") != m.end() && !m["DeployType"].empty()) {
deployType = make_shared<string>(boost::any_cast<string>(m["DeployType"]));
}
if (m.find("PublishTime") != m.end() && !m["PublishTime"].empty()) {
publishTime = make_shared<string>(boost::any_cast<string>(m["PublishTime"]));
}
if (m.find("ServiceId") != m.end() && !m["ServiceId"].empty()) {
serviceId = make_shared<string>(boost::any_cast<string>(m["ServiceId"]));
}
if (m.find("ServiceInfos") != m.end() && !m["ServiceInfos"].empty()) {
if (typeid(vector<boost::any>) == m["ServiceInfos"].type()) {
vector<ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["ServiceInfos"])){
if (typeid(map<string, boost::any>) == item1.type()) {
ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
serviceInfos = make_shared<vector<ListServiceInstancesResponseBodyServiceInstancesServiceServiceInfos>>(expect1);
}
}
if (m.find("ServiceType") != m.end() && !m["ServiceType"].empty()) {
serviceType = make_shared<string>(boost::any_cast<string>(m["ServiceType"]));
}
if (m.find("Status") != m.end() && !m["Status"].empty()) {
status = make_shared<string>(boost::any_cast<string>(m["Status"]));
}
if (m.find("SupplierName") != m.end() && !m["SupplierName"].empty()) {
supplierName = make_shared<string>(boost::any_cast<string>(m["SupplierName"]));
}
if (m.find("SupplierUrl") != m.end() && !m["SupplierUrl"].empty()) {
supplierUrl = make_shared<string>(boost::any_cast<string>(m["SupplierUrl"]));
}
if (m.find("Version") != m.end() && !m["Version"].empty()) {
version = make_shared<string>(boost::any_cast<string>(m["Version"]));
}
}
virtual ~ListServiceInstancesResponseBodyServiceInstancesService() = default;
};
class ListServiceInstancesResponseBodyServiceInstances : public Darabonba::Model {
public:
shared_ptr<string> createTime{};
shared_ptr<bool> enableInstanceOps{};
shared_ptr<string> operatedServiceInstanceId{};
shared_ptr<string> operationEndTime{};
shared_ptr<string> operationStartTime{};
shared_ptr<string> outputs{};
shared_ptr<string> parameters{};
shared_ptr<long> progress{};
shared_ptr<string> resources{};
shared_ptr<ListServiceInstancesResponseBodyServiceInstancesService> service{};
shared_ptr<string> serviceInstanceId{};
shared_ptr<string> status{};
shared_ptr<string> statusDetail{};
shared_ptr<string> templateName{};
shared_ptr<string> updateTime{};
ListServiceInstancesResponseBodyServiceInstances() {}
explicit ListServiceInstancesResponseBodyServiceInstances(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (createTime) {
res["CreateTime"] = boost::any(*createTime);
}
if (enableInstanceOps) {
res["EnableInstanceOps"] = boost::any(*enableInstanceOps);
}
if (operatedServiceInstanceId) {
res["OperatedServiceInstanceId"] = boost::any(*operatedServiceInstanceId);
}
if (operationEndTime) {
res["OperationEndTime"] = boost::any(*operationEndTime);
}
if (operationStartTime) {
res["OperationStartTime"] = boost::any(*operationStartTime);
}
if (outputs) {
res["Outputs"] = boost::any(*outputs);
}
if (parameters) {
res["Parameters"] = boost::any(*parameters);
}
if (progress) {
res["Progress"] = boost::any(*progress);
}
if (resources) {
res["Resources"] = boost::any(*resources);
}
if (service) {
res["Service"] = service ? boost::any(service->toMap()) : boost::any(map<string,boost::any>({}));
}
if (serviceInstanceId) {
res["ServiceInstanceId"] = boost::any(*serviceInstanceId);
}
if (status) {
res["Status"] = boost::any(*status);
}
if (statusDetail) {
res["StatusDetail"] = boost::any(*statusDetail);
}
if (templateName) {
res["TemplateName"] = boost::any(*templateName);
}
if (updateTime) {
res["UpdateTime"] = boost::any(*updateTime);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("CreateTime") != m.end() && !m["CreateTime"].empty()) {
createTime = make_shared<string>(boost::any_cast<string>(m["CreateTime"]));
}
if (m.find("EnableInstanceOps") != m.end() && !m["EnableInstanceOps"].empty()) {
enableInstanceOps = make_shared<bool>(boost::any_cast<bool>(m["EnableInstanceOps"]));
}
if (m.find("OperatedServiceInstanceId") != m.end() && !m["OperatedServiceInstanceId"].empty()) {
operatedServiceInstanceId = make_shared<string>(boost::any_cast<string>(m["OperatedServiceInstanceId"]));
}
if (m.find("OperationEndTime") != m.end() && !m["OperationEndTime"].empty()) {
operationEndTime = make_shared<string>(boost::any_cast<string>(m["OperationEndTime"]));
}
if (m.find("OperationStartTime") != m.end() && !m["OperationStartTime"].empty()) {
operationStartTime = make_shared<string>(boost::any_cast<string>(m["OperationStartTime"]));
}
if (m.find("Outputs") != m.end() && !m["Outputs"].empty()) {
outputs = make_shared<string>(boost::any_cast<string>(m["Outputs"]));
}
if (m.find("Parameters") != m.end() && !m["Parameters"].empty()) {
parameters = make_shared<string>(boost::any_cast<string>(m["Parameters"]));
}
if (m.find("Progress") != m.end() && !m["Progress"].empty()) {
progress = make_shared<long>(boost::any_cast<long>(m["Progress"]));
}
if (m.find("Resources") != m.end() && !m["Resources"].empty()) {
resources = make_shared<string>(boost::any_cast<string>(m["Resources"]));
}
if (m.find("Service") != m.end() && !m["Service"].empty()) {
if (typeid(map<string, boost::any>) == m["Service"].type()) {
ListServiceInstancesResponseBodyServiceInstancesService model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["Service"]));
service = make_shared<ListServiceInstancesResponseBodyServiceInstancesService>(model1);
}
}
if (m.find("ServiceInstanceId") != m.end() && !m["ServiceInstanceId"].empty()) {
serviceInstanceId = make_shared<string>(boost::any_cast<string>(m["ServiceInstanceId"]));
}
if (m.find("Status") != m.end() && !m["Status"].empty()) {
status = make_shared<string>(boost::any_cast<string>(m["Status"]));
}
if (m.find("StatusDetail") != m.end() && !m["StatusDetail"].empty()) {
statusDetail = make_shared<string>(boost::any_cast<string>(m["StatusDetail"]));
}
if (m.find("TemplateName") != m.end() && !m["TemplateName"].empty()) {
templateName = make_shared<string>(boost::any_cast<string>(m["TemplateName"]));
}
if (m.find("UpdateTime") != m.end() && !m["UpdateTime"].empty()) {
updateTime = make_shared<string>(boost::any_cast<string>(m["UpdateTime"]));
}
}
virtual ~ListServiceInstancesResponseBodyServiceInstances() = default;
};
class ListServiceInstancesResponseBody : public Darabonba::Model {
public:
shared_ptr<string> maxResults{};
shared_ptr<string> nextToken{};
shared_ptr<string> requestId{};
shared_ptr<vector<ListServiceInstancesResponseBodyServiceInstances>> serviceInstances{};
shared_ptr<long> totalCount{};
ListServiceInstancesResponseBody() {}
explicit ListServiceInstancesResponseBody(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (maxResults) {
res["MaxResults"] = boost::any(*maxResults);
}
if (nextToken) {
res["NextToken"] = boost::any(*nextToken);
}
if (requestId) {
res["RequestId"] = boost::any(*requestId);
}
if (serviceInstances) {
vector<boost::any> temp1;
for(auto item1:*serviceInstances){
temp1.push_back(boost::any(item1.toMap()));
}
res["ServiceInstances"] = boost::any(temp1);
}
if (totalCount) {
res["TotalCount"] = boost::any(*totalCount);
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("MaxResults") != m.end() && !m["MaxResults"].empty()) {
maxResults = make_shared<string>(boost::any_cast<string>(m["MaxResults"]));
}
if (m.find("NextToken") != m.end() && !m["NextToken"].empty()) {
nextToken = make_shared<string>(boost::any_cast<string>(m["NextToken"]));
}
if (m.find("RequestId") != m.end() && !m["RequestId"].empty()) {
requestId = make_shared<string>(boost::any_cast<string>(m["RequestId"]));
}
if (m.find("ServiceInstances") != m.end() && !m["ServiceInstances"].empty()) {
if (typeid(vector<boost::any>) == m["ServiceInstances"].type()) {
vector<ListServiceInstancesResponseBodyServiceInstances> expect1;
for(auto item1:boost::any_cast<vector<boost::any>>(m["ServiceInstances"])){
if (typeid(map<string, boost::any>) == item1.type()) {
ListServiceInstancesResponseBodyServiceInstances model2;
model2.fromMap(boost::any_cast<map<string, boost::any>>(item1));
expect1.push_back(model2);
}
}
serviceInstances = make_shared<vector<ListServiceInstancesResponseBodyServiceInstances>>(expect1);
}
}
if (m.find("TotalCount") != m.end() && !m["TotalCount"].empty()) {
totalCount = make_shared<long>(boost::any_cast<long>(m["TotalCount"]));
}
}
virtual ~ListServiceInstancesResponseBody() = default;
};
class ListServiceInstancesResponse : public Darabonba::Model {
public:
shared_ptr<map<string, string>> headers{};
shared_ptr<ListServiceInstancesResponseBody> body{};
ListServiceInstancesResponse() {}
explicit ListServiceInstancesResponse(const std::map<string, boost::any> &config) : Darabonba::Model(config) {
fromMap(config);
};
void validate() override {
if (!headers) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("headers is required.")));
}
if (!body) {
BOOST_THROW_EXCEPTION(boost::enable_error_info(std::runtime_error("body is required.")));
}
}
map<string, boost::any> toMap() override {
map<string, boost::any> res;
if (headers) {
res["headers"] = boost::any(*headers);
}
if (body) {
res["body"] = body ? boost::any(body->toMap()) : boost::any(map<string,boost::any>({}));
}
return res;
}
void fromMap(map<string, boost::any> m) override {
if (m.find("headers") != m.end() && !m["headers"].empty()) {
map<string, string> map1 = boost::any_cast<map<string, string>>(m["headers"]);
map<string, string> toMap1;
for (auto item:map1) {
toMap1[item.first] = item.second;
}
headers = make_shared<map<string, string>>(toMap1);
}
if (m.find("body") != m.end() && !m["body"].empty()) {
if (typeid(map<string, boost::any>) == m["body"].type()) {
ListServiceInstancesResponseBody model1;
model1.fromMap(boost::any_cast<map<string, boost::any>>(m["body"]));
body = make_shared<ListServiceInstancesResponseBody>(model1);
}
}
}
virtual ~ListServiceInstancesResponse() = default;
};
class Client : Alibabacloud_OpenApi::Client {
public:
explicit Client(const shared_ptr<Alibabacloud_OpenApi::Config>& config);
string getEndpoint(shared_ptr<string> productId,
shared_ptr<string> regionId,
shared_ptr<string> endpointRule,
shared_ptr<string> network,
shared_ptr<string> suffix,
shared_ptr<map<string, string>> endpointMap,
shared_ptr<string> endpoint);
ContinueDeployServiceInstanceResponse continueDeployServiceInstanceWithOptions(shared_ptr<ContinueDeployServiceInstanceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
ContinueDeployServiceInstanceResponse continueDeployServiceInstance(shared_ptr<ContinueDeployServiceInstanceRequest> request);
CreateServiceInstanceResponse createServiceInstanceWithOptions(shared_ptr<CreateServiceInstanceRequest> tmpReq, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
CreateServiceInstanceResponse createServiceInstance(shared_ptr<CreateServiceInstanceRequest> request);
DeleteServiceInstancesResponse deleteServiceInstancesWithOptions(shared_ptr<DeleteServiceInstancesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
DeleteServiceInstancesResponse deleteServiceInstances(shared_ptr<DeleteServiceInstancesRequest> request);
GetServiceInstanceResponse getServiceInstanceWithOptions(shared_ptr<GetServiceInstanceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
GetServiceInstanceResponse getServiceInstance(shared_ptr<GetServiceInstanceRequest> request);
ListServiceInstanceLogsResponse listServiceInstanceLogsWithOptions(shared_ptr<ListServiceInstanceLogsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
ListServiceInstanceLogsResponse listServiceInstanceLogs(shared_ptr<ListServiceInstanceLogsRequest> request);
ListServiceInstanceResourcesResponse listServiceInstanceResourcesWithOptions(shared_ptr<ListServiceInstanceResourcesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
ListServiceInstanceResourcesResponse listServiceInstanceResources(shared_ptr<ListServiceInstanceResourcesRequest> request);
ListServiceInstancesResponse listServiceInstancesWithOptions(shared_ptr<ListServiceInstancesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime);
ListServiceInstancesResponse listServiceInstances(shared_ptr<ListServiceInstancesRequest> request);
virtual ~Client() = default;
};
} // namespace Alibabacloud_ComputeNest20210601
#endif
|
%Headers
--
-- Additional methods for the action class not provided in the template
--
/.
//
// The Lexer contains an array of characters as the input stream to be parsed.
// There are methods to retrieve and classify characters.
// The lexparser "token" is implemented simply as the index of the next character in the array.
// The Lexer extends the abstract class LpgLexStream with an implementation of the abstract
// method getKind. The template defines the Lexer class and the lexer() method.
// A driver creates the action class, "Lexer", passing an Option object to the constructor.
//
$kw_lexer_class *kwLexer= nullptr;
bool printTokens =false;
static const int ECLIPSE_TAB_VALUE = 4;
int* getKeywordKinds() { return kwLexer->getKeywordKinds(); }
/**
* @deprecated function replaced by {@link #reset(char [] content, const std::wstring& filename)}
*/
void initialize(shared_ptr_wstring content, const std::wstring& filename)
{
reset(content, filename);
}
void makeToken(int left_token, int right_token, int kind)
{
lexStream->makeToken(left_token, right_token, kind);
}
void makeToken(int kind)
{
int startOffset = getLeftSpan(),
endOffset = getRightSpan();
lexStream->makeToken(startOffset, endOffset, kind);
if (printTokens) printValue(startOffset, endOffset);
}
void makeComment(int kind)
{
int startOffset = getLeftSpan(),
endOffset = getRightSpan();
lexStream->getIPrsStream()->makeAdjunct(startOffset, endOffset, kind);
}
void skipToken()
{
if (printTokens) printValue(getLeftSpan(), getRightSpan());
}
void checkForKeyWord()
{
int startOffset = getLeftSpan(),
endOffset = getRightSpan(),
kwKind = kwLexer->lexer(startOffset, endOffset);
lexStream->makeToken(startOffset, endOffset, kwKind);
if (printTokens) printValue(startOffset, endOffset);
}
//
// This flavor of checkForKeyWord is necessary when the default kind
// (which is returned when the keyword filter doesn't match) is something
// other than _IDENTIFIER.
//
void checkForKeyWord(int defaultKind)
{
int startOffset = getLeftSpan(),
endOffset = getRightSpan(),
kwKind = kwLexer->lexer(startOffset, endOffset);
if (kwKind == $_IDENTIFIER)
kwKind = defaultKind;
lexStream->makeToken(startOffset, endOffset, kwKind);
if (printTokens) printValue(startOffset, endOffset);
}
void printValue(int startOffset, int endOffset)
{
auto input = lexStream->getInputChars().data();
std::wstring s(input + startOffset,input + startOffset+ endOffset - startOffset + 1);
std::wcout << (s) << std::endl ;
}
//
//
//
struct $super_stream_class :public LpgLexStream
{
inline static int tokenKind[] =
{
$sym_type::$prefix$CtlCharNotWS$suffix$, // 000 0x00
$sym_type::$prefix$CtlCharNotWS$suffix$, // 001 0x01
$sym_type::$prefix$CtlCharNotWS$suffix$, // 002 0x02
$sym_type::$prefix$CtlCharNotWS$suffix$, // 003 0x03
$sym_type::$prefix$CtlCharNotWS$suffix$, // 004 0x04
$sym_type::$prefix$CtlCharNotWS$suffix$, // 005 0x05
$sym_type::$prefix$CtlCharNotWS$suffix$, // 006 0x06
$sym_type::$prefix$CtlCharNotWS$suffix$, // 007 0x07
$sym_type::$prefix$CtlCharNotWS$suffix$, // 008 0x08
$sym_type::$prefix$HT$suffix$, // 009 0x09
$sym_type::$prefix$LF$suffix$, // 010 0x0A
$sym_type::$prefix$CtlCharNotWS$suffix$, // 011 0x0B
$sym_type::$prefix$FF$suffix$, // 012 0x0C
$sym_type::$prefix$CR$suffix$, // 013 0x0D
$sym_type::$prefix$CtlCharNotWS$suffix$, // 014 0x0E
$sym_type::$prefix$CtlCharNotWS$suffix$, // 015 0x0F
$sym_type::$prefix$CtlCharNotWS$suffix$, // 016 0x10
$sym_type::$prefix$CtlCharNotWS$suffix$, // 017 0x11
$sym_type::$prefix$CtlCharNotWS$suffix$, // 018 0x12
$sym_type::$prefix$CtlCharNotWS$suffix$, // 019 0x13
$sym_type::$prefix$CtlCharNotWS$suffix$, // 020 0x14
$sym_type::$prefix$CtlCharNotWS$suffix$, // 021 0x15
$sym_type::$prefix$CtlCharNotWS$suffix$, // 022 0x16
$sym_type::$prefix$CtlCharNotWS$suffix$, // 023 0x17
$sym_type::$prefix$CtlCharNotWS$suffix$, // 024 0x18
$sym_type::$prefix$CtlCharNotWS$suffix$, // 025 0x19
$sym_type::$prefix$CtlCharNotWS$suffix$, // 026 0x1A
$sym_type::$prefix$CtlCharNotWS$suffix$, // 027 0x1B
$sym_type::$prefix$CtlCharNotWS$suffix$, // 028 0x1C
$sym_type::$prefix$CtlCharNotWS$suffix$, // 029 0x1D
$sym_type::$prefix$CtlCharNotWS$suffix$, // 030 0x1E
$sym_type::$prefix$CtlCharNotWS$suffix$, // 031 0x1F
$sym_type::$prefix$Space$suffix$, // 032 0x20
$sym_type::$prefix$Exclamation$suffix$, // 033 0x21
$sym_type::$prefix$DoubleQuote$suffix$, // 034 0x22
$sym_type::$prefix$Sharp$suffix$, // 035 0x23
$sym_type::$prefix$DollarSign$suffix$, // 036 0x24
$sym_type::$prefix$Percent$suffix$, // 037 0x25
$sym_type::$prefix$Ampersand$suffix$, // 038 0x26
$sym_type::$prefix$SingleQuote$suffix$, // 039 0x27
$sym_type::$prefix$LeftParen$suffix$, // 040 0x28
$sym_type::$prefix$RightParen$suffix$, // 041 0x29
$sym_type::$prefix$Star$suffix$, // 042 0x2A
$sym_type::$prefix$Plus$suffix$, // 043 0x2B
$sym_type::$prefix$Comma$suffix$, // 044 0x2C
$sym_type::$prefix$Minus$suffix$, // 045 0x2D
$sym_type::$prefix$Dot$suffix$, // 046 0x2E
$sym_type::$prefix$Slash$suffix$, // 047 0x2F
$sym_type::$prefix$0$suffix$, // 048 0x30
$sym_type::$prefix$1$suffix$, // 049 0x31
$sym_type::$prefix$2$suffix$, // 050 0x32
$sym_type::$prefix$3$suffix$, // 051 0x33
$sym_type::$prefix$4$suffix$, // 052 0x34
$sym_type::$prefix$5$suffix$, // 053 0x35
$sym_type::$prefix$6$suffix$, // 054 0x36
$sym_type::$prefix$7$suffix$, // 055 0x37
$sym_type::$prefix$8$suffix$, // 056 0x38
$sym_type::$prefix$9$suffix$, // 057 0x39
$sym_type::$prefix$Colon$suffix$, // 058 0x3A
$sym_type::$prefix$SemiColon$suffix$, // 059 0x3B
$sym_type::$prefix$LessThan$suffix$, // 060 0x3C
$sym_type::$prefix$Equal$suffix$, // 061 0x3D
$sym_type::$prefix$GreaterThan$suffix$, // 062 0x3E
$sym_type::$prefix$QuestionMark$suffix$, // 063 0x3F
$sym_type::$prefix$AtSign$suffix$, // 064 0x40
$sym_type::$prefix$A$suffix$, // 065 0x41
$sym_type::$prefix$B$suffix$, // 066 0x42
$sym_type::$prefix$C$suffix$, // 067 0x43
$sym_type::$prefix$D$suffix$, // 068 0x44
$sym_type::$prefix$E$suffix$, // 069 0x45
$sym_type::$prefix$F$suffix$, // 070 0x46
$sym_type::$prefix$G$suffix$, // 071 0x47
$sym_type::$prefix$H$suffix$, // 072 0x48
$sym_type::$prefix$I$suffix$, // 073 0x49
$sym_type::$prefix$J$suffix$, // 074 0x4A
$sym_type::$prefix$K$suffix$, // 075 0x4B
$sym_type::$prefix$L$suffix$, // 076 0x4C
$sym_type::$prefix$M$suffix$, // 077 0x4D
$sym_type::$prefix$N$suffix$, // 078 0x4E
$sym_type::$prefix$O$suffix$, // 079 0x4F
$sym_type::$prefix$P$suffix$, // 080 0x50
$sym_type::$prefix$Q$suffix$, // 081 0x51
$sym_type::$prefix$R$suffix$, // 082 0x52
$sym_type::$prefix$S$suffix$, // 083 0x53
$sym_type::$prefix$T$suffix$, // 084 0x54
$sym_type::$prefix$U$suffix$, // 085 0x55
$sym_type::$prefix$V$suffix$, // 086 0x56
$sym_type::$prefix$W$suffix$, // 087 0x57
$sym_type::$prefix$X$suffix$, // 088 0x58
$sym_type::$prefix$Y$suffix$, // 089 0x59
$sym_type::$prefix$Z$suffix$, // 090 0x5A
$sym_type::$prefix$LeftBracket$suffix$, // 091 0x5B
$sym_type::$prefix$BackSlash$suffix$, // 092 0x5C
$sym_type::$prefix$RightBracket$suffix$, // 093 0x5D
$sym_type::$prefix$Caret$suffix$, // 094 0x5E
$sym_type::$prefix$_$suffix$, // 095 0x5F
$sym_type::$prefix$BackQuote$suffix$, // 096 0x60
$sym_type::$prefix$a$suffix$, // 097 0x61
$sym_type::$prefix$b$suffix$, // 098 0x62
$sym_type::$prefix$c$suffix$, // 099 0x63
$sym_type::$prefix$d$suffix$, // 100 0x64
$sym_type::$prefix$e$suffix$, // 101 0x65
$sym_type::$prefix$f$suffix$, // 102 0x66
$sym_type::$prefix$g$suffix$, // 103 0x67
$sym_type::$prefix$h$suffix$, // 104 0x68
$sym_type::$prefix$i$suffix$, // 105 0x69
$sym_type::$prefix$j$suffix$, // 106 0x6A
$sym_type::$prefix$k$suffix$, // 107 0x6B
$sym_type::$prefix$l$suffix$, // 108 0x6C
$sym_type::$prefix$m$suffix$, // 109 0x6D
$sym_type::$prefix$n$suffix$, // 110 0x6E
$sym_type::$prefix$o$suffix$, // 111 0x6F
$sym_type::$prefix$p$suffix$, // 112 0x70
$sym_type::$prefix$q$suffix$, // 113 0x71
$sym_type::$prefix$r$suffix$, // 114 0x72
$sym_type::$prefix$s$suffix$, // 115 0x73
$sym_type::$prefix$t$suffix$, // 116 0x74
$sym_type::$prefix$u$suffix$, // 117 0x75
$sym_type::$prefix$v$suffix$, // 118 0x76
$sym_type::$prefix$w$suffix$, // 119 0x77
$sym_type::$prefix$x$suffix$, // 120 0x78
$sym_type::$prefix$y$suffix$, // 121 0x79
$sym_type::$prefix$z$suffix$, // 122 0x7A
$sym_type::$prefix$LeftBrace$suffix$, // 123 0x7B
$sym_type::$prefix$VerticalBar$suffix$, // 124 0x7C
$sym_type::$prefix$RightBrace$suffix$, // 125 0x7D
$sym_type::$prefix$Tilde$suffix$, // 126 0x7E
$sym_type::$prefix$AfterASCII$suffix$, // for all chars in range 128..65534
$sym_type::$prefix$EOF$suffix$ // for '\uffff' or 65535
};
int getKind(int i) // Classify character at ith location
{
int c = (i >= getStreamLength() ? 0xffff : getCharValue(i));
return (c < 128 // ASCII Character
? tokenKind[c]
: c == 0xffff
? $sym_type::$prefix$EOF$suffix$
: $sym_type::$prefix$AfterASCII$suffix$);
}
std::vector<std::wstring> orderedExportedSymbols() { return $exp_type::orderedTerminalSymbols; }
$super_stream_class(const std::wstring& filename, int tab):LpgLexStream(filename, tab)
{
}
$super_stream_class(shared_ptr_wstring input_chars, const std::wstring& filename, int tab):LpgLexStream(input_chars, filename, tab)
{
}
$super_stream_class(shared_ptr_wstring input_chars, const std::wstring& filename):LpgLexStream(input_chars, filename, 1)
{
}
};
./
%End
|
{-# OPTIONS --safe #-}
module Cubical.Data.Graph.Examples where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.Isomorphism
open import Cubical.Data.Empty
open import Cubical.Data.Unit renaming (Unit to ⊤)
open import Cubical.Data.Nat
open import Cubical.Data.SumFin
open import Cubical.Relation.Nullary
open import Cubical.Data.Sum
open import Cubical.Data.Sigma
open import Cubical.Data.Graph.Base
-- Some small graphs of common shape
⇒⇐ : Graph ℓ-zero ℓ-zero
Node ⇒⇐ = Fin 3
Edge ⇒⇐ fzero (fsuc fzero) = ⊤
Edge ⇒⇐ (fsuc (fsuc fzero)) (fsuc fzero) = ⊤
Edge ⇒⇐ _ _ = ⊥
⇐⇒ : Graph ℓ-zero ℓ-zero
Node ⇐⇒ = Fin 3
Edge ⇐⇒ (fsuc fzero) fzero = ⊤
Edge ⇐⇒ (fsuc fzero) (fsuc (fsuc fzero)) = ⊤
Edge ⇐⇒ _ _ = ⊥
-- paralell pair graph
⇉ : Graph ℓ-zero ℓ-zero
Node ⇉ = Fin 2
Edge ⇉ fzero (fsuc fzero) = Fin 2
Edge ⇉ _ _ = ⊥
-- The graph ω = 0 → 1 → 2 → ···
data Adj : ℕ → ℕ → Type₀ where
adj : ∀ n → Adj n (suc n)
areAdj : ∀ m n → Dec (Adj m n)
areAdj zero zero = no λ ()
areAdj zero (suc zero) = yes (adj zero)
areAdj zero (suc (suc n)) = no λ ()
areAdj (suc m) zero = no λ ()
areAdj (suc m) (suc n) = mapDec (λ { (adj .m) → adj (suc m) })
(λ { ¬a (adj .(suc m)) → ¬a (adj m) })
(areAdj m n)
ωGr : Graph ℓ-zero ℓ-zero
Node ωGr = ℕ
Edge ωGr m n with areAdj m n
... | yes _ = ⊤ -- if n ≡ (suc m)
... | no _ = ⊥ -- otherwise
record ωDiag ℓ : Type (ℓ-suc ℓ) where
field
ωNode : ℕ → Type ℓ
ωEdge : ∀ n → ωNode n → ωNode (suc n)
asDiag : Diag ℓ ωGr
asDiag $ n = ωNode n
_<$>_ asDiag {m} {n} f with areAdj m n
asDiag <$> tt | yes (adj m) = ωEdge m
-- The finite connected subgraphs of ω: 𝟘,𝟙,𝟚,𝟛,...
data AdjFin : ∀ {k} → Fin k → Fin k → Type₀ where
adj : ∀ {k} (n : Fin k) → AdjFin (finj n) (fsuc n)
adj-fsuc : ∀ {k} {m n : Fin k} → AdjFin (fsuc m) (fsuc n) → AdjFin m n
adj-fsuc {suc k} {.(finj n)} {fsuc n} (adj .(fsuc n)) = adj n
areAdjFin : ∀ {k} (m n : Fin k) → Dec (AdjFin m n)
areAdjFin {suc k} fzero fzero = no λ ()
areAdjFin {suc (suc k)} fzero (fsuc fzero) = yes (adj fzero)
areAdjFin {suc (suc k)} fzero (fsuc (fsuc n)) = no λ ()
areAdjFin {suc k} (fsuc m) fzero = no λ ()
areAdjFin {suc k} (fsuc m) (fsuc n) = mapDec (λ { (adj m) → adj (fsuc m) })
(λ { ¬a a → ¬a (adj-fsuc a) })
(areAdjFin {k} m n)
[_]Gr : ℕ → Graph ℓ-zero ℓ-zero
Node [ k ]Gr = Fin k
Edge [ k ]Gr m n with areAdjFin m n
... | yes _ = ⊤ -- if n ≡ (suc m)
... | no _ = ⊥ -- otherwise
𝟘Gr 𝟙Gr 𝟚Gr 𝟛Gr : Graph ℓ-zero ℓ-zero
𝟘Gr = [ 0 ]Gr; 𝟙Gr = [ 1 ]Gr; 𝟚Gr = [ 2 ]Gr; 𝟛Gr = [ 3 ]Gr
record [_]Diag ℓ (k : ℕ) : Type (ℓ-suc ℓ) where
field
[]Node : Fin (suc k) → Type ℓ
[]Edge : ∀ (n : Fin k) → []Node (finj n) → []Node (fsuc n)
asDiag : Diag ℓ [ suc k ]Gr
asDiag $ n = []Node n
_<$>_ asDiag {m} {n} f with areAdjFin m n
_<$>_ asDiag {.(finj n)} {fsuc n} f | yes (adj .n) = []Edge n
-- Disjoint union of graphs
module _ {ℓv ℓe ℓv' ℓe'} where
_⊎Gr_ : ∀ (G : Graph ℓv ℓe) (G' : Graph ℓv' ℓe') → Graph (ℓ-max ℓv ℓv') (ℓ-max ℓe ℓe')
Node (G ⊎Gr G') = Node G ⊎ Node G'
Edge (G ⊎Gr G') (inl x) (inl y) = Lift {j = ℓe'} (Edge G x y)
Edge (G ⊎Gr G') (inr x) (inr y) = Lift {j = ℓe } (Edge G' x y)
Edge (G ⊎Gr G') _ _ = Lift ⊥
record ⊎Diag ℓ (G : Graph ℓv ℓe) (G' : Graph ℓv' ℓe')
: Type (ℓ-max (ℓ-suc ℓ) (ℓ-max (ℓ-max ℓv ℓv') (ℓ-max ℓe ℓe'))) where
field
⊎Node : Node G ⊎ Node G' → Type ℓ
⊎Edgel : ∀ {x y} → Edge G x y → ⊎Node (inl x) → ⊎Node (inl y)
⊎Edger : ∀ {x y} → Edge G' x y → ⊎Node (inr x) → ⊎Node (inr y)
asDiag : Diag ℓ (G ⊎Gr G')
asDiag $ x = ⊎Node x
_<$>_ asDiag {inl x} {inl y} f = ⊎Edgel (lower f)
_<$>_ asDiag {inr x} {inr y} f = ⊎Edger (lower f)
-- Cartesian product of graphs
module _ {ℓv ℓe ℓv' ℓe'} where
-- We need decidable equality in order to define the cartesian product
DecGraph : ∀ ℓv ℓe → Type (ℓ-suc (ℓ-max ℓv ℓe))
DecGraph ℓv ℓe = Σ[ G ∈ Graph ℓv ℓe ] Discrete (Node G)
_×Gr_ : (G : DecGraph ℓv ℓe) (G' : DecGraph ℓv' ℓe') → Graph (ℓ-max ℓv ℓv') (ℓ-max ℓe ℓe')
Node (G ×Gr G') = Node (fst G) × Node (fst G')
Edge (G ×Gr G') (x , x') (y , y') with snd G x y | snd G' x' y'
... | yes _ | yes _ = Edge (fst G) x y ⊎ Edge (fst G') x' y'
... | yes _ | no _ = Lift {j = ℓe } (Edge (fst G') x' y')
... | no _ | yes _ = Lift {j = ℓe'} (Edge (fst G) x y)
... | no _ | no _ = Lift ⊥
record ×Diag ℓ (G : DecGraph ℓv ℓe) (G' : DecGraph ℓv' ℓe')
: Type (ℓ-max (ℓ-suc ℓ) (ℓ-max (ℓ-max ℓv ℓv') (ℓ-max ℓe ℓe'))) where
field
×Node : Node (fst G) × Node (fst G') → Type ℓ
×Edge₁ : ∀ {x y} (f : Edge (fst G) x y) (x' : Node (fst G')) → ×Node (x , x') → ×Node (y , x')
×Edge₂ : ∀ (x : Node (fst G)) {x' y'} (f : Edge (fst G') x' y') → ×Node (x , x') → ×Node (x , y')
asDiag : Diag ℓ (G ×Gr G')
asDiag $ x = ×Node x
_<$>_ asDiag {x , x'} {y , y'} f with snd G x y | snd G' x' y'
_<$>_ asDiag {x , x'} {y , y'} (inl f) | yes _ | yes p' = subst _ p' (×Edge₁ f x')
_<$>_ asDiag {x , x'} {y , y'} (inr f) | yes p | yes _ = subst _ p (×Edge₂ x f )
_<$>_ asDiag {x , x'} {y , y'} f | yes p | no _ = subst _ p (×Edge₂ x (lower f) )
_<$>_ asDiag {x , x'} {y , y'} f | no _ | yes p' = subst _ p' (×Edge₁ (lower f) x')
|
Feel free to enquire about our wide range of Business and Commercial loans for your needs.
Find the best loan to suit your unique needs with the best rates & features.
Take advantage of our long standing experience with all types of loans, even the unorthodox ones.
What's the right Motor Vehicle Loan for you?
|
<a href="https://colab.research.google.com/github/mishagrol/Seminar_Sobol/blob/master/Seminar_Soil_Sensitivity.ipynb" target="_parent"></a>
# Introduction to Digital agro
## Crop simulation models
____
### Mikhail Gasanov
E-mail: Mikhail.Gasanov[a]skoltech.ru
tg:@misha_grol
## Clone utils and files from GitHub
```python
# !git clone https://github.com/mishagrol/Seminar_Sobol.git
# !cp -r ./Seminar_Sobol/* .
```
# How to start with PCSE/WOFOST model
_____
### Documentation: [PCSE](https://pcse.readthedocs.io/)
```python
%matplotlib inline
import sys, os
import pcse
import pandas as pd
import matplotlib
import yaml
matplotlib.style.use("ggplot")
import matplotlib.pyplot as plt
print("This notebook was built with:")
print("python version: %s " % sys.version)
print("PCSE version: %s" % pcse.__version__)
```
This notebook was built with:
python version: 3.6.11 | packaged by conda-forge | (default, Aug 5 2020, 20:19:23)
[GCC Clang 10.0.1 ]
PCSE version: 5.4.2
```python
import warnings
warnings.filterwarnings("ignore")
```
```python
wofostPP = pcse.start_wofost(mode="wlp")
```
You have just successfully initialized a PCSE/WOFOST object in the Python interpreter, which is in its initial state and waiting to do some simulation. We can now advance the model state for example with 1 day:
```python
wofostPP.run()
```
Advancing the crop simulation with only 1 day, is often not so useful so the number of days to simulate can be specified as well:
```python
wofostPP.run(days=10)
```
## Getting information about state and rate variables
Retrieving information about the calculated model states or rates can be done with the `get_variable()` method on a PCSE object. For example, to retrieve the leaf area index value in the current model state you can do:
### Leaf Area Index
```python
# Leaf Area Index at this date
print(wofostPP.day)
print('LAI', wofostPP.get_variable('LAI'))
```
2000-01-12
LAI 0.2870809817505803
The `get_variable()` method can retrieve any state or rate variable that is defined somewhere in the model.
Finally, we can finish the crop season by letting it run until the model terminates because the crop reaches maturity or the harvest date:
```python
wofostPP.run_till_terminate()
```
## Retrieving and displaying WOFOST output
We can retrieve the results of the simulation at each time step using `get_output()`. In python terms this returns a list of dictionaries, one dictionary for each time step of the the simulation results. Each dictionary contains the key:value pairs of the state or rate variables that were stored at that time step.
```python
output = wofostPP.get_output()
```
The most convenient way to handle the output from WOFOST is to used the `pandas` module to convert it into a dataframe. Pandas DataFrames can be converted to a variety of formats including excel, CSV or database tables.
```python
df_crop = pd.DataFrame(output).set_index('day')
```
```python
summary_output = wofostPP.get_summary_output()
msg = "Reached maturity at {DOM} with total biomass {TAGP:.1f} kg/ha, " \
"a yield of {TWSO:.1f} kg/ha with a maximum LAI of {LAIMAX:.2f}."
for crop_cycle in summary_output:
print(msg.format(**crop_cycle))
```
Reached maturity at 2000-05-31 with total biomass 15261.8 kg/ha, a yield of 7179.8 kg/ha with a maximum LAI of 6.13.
```python
fig, (axis1, axis2) = plt.subplots(nrows=1, ncols=2, figsize=(16,8))
df_crop.LAI.plot(ax=axis1, label="LAI", color='k')
df_crop.TAGP.plot(ax=axis2, label="Total biomass")
df_crop.TWSO.plot(ax=axis2, label="Yield")
axis1.set_title("Leaf Area Index")
axis2.set_title("Crop biomass")
fig.autofmt_xdate()
r = fig.legend()
```
# Running PCSE/WOFOST with custom input data
This Jupyter notebook will show you how to read inputs from files for running PCSE/WOFOST.
thanks to **Allard de Wit**
**Prerequisites for running this notebook**
Several packages need to be installed for running PCSE/WOFOST:
1. `PCSE` and its dependencies. See the [PCSE user guide](http://pcse.readthedocs.io/en/stable/installing.html) for more information;
2. The `pandas` module for processing and storing WOFOST output;
3. The `matplotlib` module for generating charts
## Reading model parameters
### Crop parameters
```python
data_dir = 'data/'
```
```python
from pcse.fileinput import CABOFileReader
cropfile = os.path.join(data_dir, 'crop', 'SUG0601.crop')
cropdata = CABOFileReader(cropfile)
```
```python
#potato
from pcse.fileinput import CABOFileReader
cropfile_potato = os.path.join(data_dir, 'crop', 'POT701.CAB')
cropdata_potato = CABOFileReader(cropfile_potato)
```
```python
# Number of parameters for our crop
len(cropdata_potato)
```
63
### Soil parameters
The soildata dictionary provides the parameter name/value pairs related to the soil type and soil physical properties. The number of parameters is variable depending on the soil water balance type that is used for the simulation. For this example, we will use the water balance for freely draining soils and use the soil file for medium fine sand: `ec3.soil`. This file is also taken from the soil files in the [WOFOST Control Centre](http://www.wageningenur.nl/wofost).
```python
soilfile = os.path.join(data_dir, 'soil', 'ec3.soil')
soildata = CABOFileReader(soilfile)
print(soildata)
```
** $Id: ec3.new 1.2 1997/09/18 17:33:54 LEM release $
**
** SOIL DATA FILE for use with WOFOST Version 5.0, June 1990
**
** EC3-medium fine
------------------------------------
SMW: 0.104 <class 'float'>
SMFCF: 0.3 <class 'float'>
SM0: 0.41 <class 'float'>
CRAIRC: 0.06 <class 'float'>
K0: 25.586 <class 'float'>
SOPE: 1.47 <class 'float'>
KSUB: 1.47 <class 'float'>
SPADS: 0.1 <class 'float'>
SPODS: 0.03 <class 'float'>
SPASS: 0.2 <class 'float'>
SPOSS: 0.05 <class 'float'>
DEFLIM: -0.3 <class 'float'>
RDMSOL: 120 <class 'int'>
SOLNAM: EC3-medium fine <class 'str'>
SMTAB: [-1.0, 0.41, 1.0, 0.398, 1.3, 0.389, 1.491, 0.38, 2.0, 0.34, 2.4, 0.287, 2.7, 0.241, 3.4, 0.148, 4.204, 0.104, 6.0, 0.09] <class 'list'>
CONTAB: [0.0, 1.408, 1.0, 0.167, 1.3, -0.215, 1.491, -0.638, 1.7, -0.854, 2.0, -1.155, 2.4, -1.796, 2.7, -2.26, 3.0, -2.745, 3.4, -3.357, 3.7, -3.824, 4.0, -4.276, 4.204, -4.678] <class 'list'>
### Site parameters
The site parameters provide ancillary parameters that are not related to the crop or the soil. Examples are the initial conditions of the water balance such as the initial soil moisture content (WAV) and the initial and maximum surface storage (SSI, SSMAX). Also the atmospheric $CO_{2}$
concentration is a typical site parameter. For the moment, we can define these parameters directly on the Python commandline as a simple python dictionary. However, it is more convenient to use the `WOFOST71SiteDataProvider` that documents the site parameters and provides sensible defaults:
```python
from pcse.util import WOFOST71SiteDataProvider
sitedata = WOFOST71SiteDataProvider(WAV=100, CO2=360)
print(sitedata)
```
{'IFUNRN': 0, 'NOTINF': 0, 'SSI': 0.0, 'SSMAX': 0.0, 'WAV': 100.0, 'SMLIM': 0.4, 'CO2': 360.0}
### Packaging all parameters
Finally, we need to pack the different sets of parameters into one variable using the `ParameterProvider`. This is needed because PCSE expects one variable that contains all parameter values. Using this approach has the additional advantage that parameter value can be easily overridden in case of running multiple simulations with slightly different parameter values:
```python
from pcse.base import ParameterProvider
parameters = ParameterProvider(cropdata=cropdata, soildata=soildata, sitedata=sitedata)
```
## Agromanagement
The agromanagement inputs provide the start date of the agricultural campaign, the start_date/start_type of the crop simulation, the end_date/end_type of the crop simulation and the maximum duration of the crop simulation. The latter is included to avoid unrealistically long simulations for example as a results of a too high temperature sum requirement.
The agromanagement inputs are defined with a special syntax called [YAML](http://yaml.org/) which allows to easily create more complex structures which is needed for defining the agromanagement. The agromanagement file for sugar beet in Wageningen `sugarbeet_calendar.agro` can be read with the `YAMLAgroManagementReader`:
```python
from pcse.fileinput import YAMLAgroManagementReader
#crop rotation for Moscow region
agromanagement_file = os.path.join(data_dir, 'agro', 'sugarbeet_calendar_Moscow_short.agro')
#agromanagement_file = os.path.join(data_dir, 'agro', 'sugarbeet_calendar.agro')
agromanagement = YAMLAgroManagementReader(agromanagement_file)
print(agromanagement)
```
!!python/object/new:pcse.fileinput.yaml_agro_loader.YAMLAgroManagementReader
listitems:
- 2019-06-01:
CropCalendar:
crop_end_date: 2019-10-15
crop_end_type: harvest
crop_name: sugar-beet
crop_start_date: 2019-06-02
crop_start_type: emergence
max_duration: 300
variety_name: sugar-beet-601
StateEvents: null
TimedEvents:
- comment: All fertilizer amounts in kg/ha
event_signal: apply_npk
events_table:
- 2019-06-22:
K_amount: 122
N_amount: 128
P_amount: 25
name: Timed N/P/K application table
We can create a crop rotation in the model
```python
K_kg = 60
P_kg = 60
N_kg = 120
year=2017
yaml_agro = f"""
- {year}-05-01:
CropCalendar:
crop_name: 'sugar-beet'
variety_name: 'sugar-beet-601'
crop_start_date: {year}-05-20
crop_start_type: sowing
crop_end_date:
crop_end_type: maturity
max_duration: 250
TimedEvents:
- event_signal: apply_npk
name: Timed N/P/K application table
comment: All fertilizer amounts in kg/ha
events_table:
- {year}-06-22: {{N_amount : {N_kg}, P_amount: {P_kg}, K_amount: {K_kg}}}
StateEvents: null
"""
agromanagement = yaml.safe_load(yaml_agro)
print(yaml_agro)
#crop_end_date: {year_date_1}-11-15
```
- 2017-05-01:
CropCalendar:
crop_name: 'sugar-beet'
variety_name: 'sugar-beet-601'
crop_start_date: 2017-05-20
crop_start_type: sowing
crop_end_date:
crop_end_type: maturity
max_duration: 250
TimedEvents:
- event_signal: apply_npk
name: Timed N/P/K application table
comment: All fertilizer amounts in kg/ha
events_table:
- 2017-06-22: {N_amount : 120, P_amount: 60, K_amount: 60}
StateEvents: null
## Daily weather observations
Daily weather variables are needed for running the simulation. There are several data providers in PCSE for reading weather data, see the section on [weather data providers](http://pcse.readthedocs.io/en/stable/reference_guide.html#weather-data-providers) to get an overview.
For this example we will use weather data from an excel file which provides daily weather data for Wageningen for the period 2004 to 2008. We will read the data from the file using the ExcelWeatherDataProvider:
### NASA Weather Data Provider from NASA [DataBase](https://power.larc.nasa.gov/)
```python
#NASA Weather system
#Sometimes it does not work (server), upload excel file
from pcse.db import NASAPowerWeatherDataProvider
```
```python
weather = NASAPowerWeatherDataProvider(51, 5, force_update=True)
```
```python
print(weather)
```
Weather data provided by: NASAPowerWeatherDataProvider
--------Description---------
NASA/POWER SRB/FLASHFlux/MERRA2/GEOS 5.12.4 (FP-IT) 0.5 x 0.5 Degree Daily Averaged Data
----Site characteristics----
Elevation: 38.5
Latitude: 51.000
Longitude: 5.000
Data available for 1983-07-01 - 2021-02-07
Number of missing days: 6
### Problems with missing days (~1-5 %)
```python
def weather_loader(latitude, longitude):
path = './data/meteo/'
#API request to NASA database
weather = NASAPowerWeatherDataProvider(latitude, longitude, force_update=True)
# Print done if downloaded
print('____DONE_____','latitude',latitude, 'longitude',longitude,'____')
# export pcse.weather format to pandas df
df_weather = pd.DataFrame(weather.export())
#print('initial number of days:', len(df_weather))
#create full range of dates
r = pd.date_range(start=df_weather.DAY.min(), end=df_weather.DAY.max())
#extend range of dates
full_range_weather = df_weather.set_index('DAY').reindex(r).rename_axis('DAY').reset_index()
missing_days = (full_range_weather.isna()).sum().sum()
print('num_of_missing_days', missing_days)
#fill weather with fill forward method in pandas
filled_weather = full_range_weather.fillna(method='ffill', axis=0)
#save as csv file
filled_weather=filled_weather[['DAY', 'IRRAD', 'TMIN', 'TMAX', 'VAP', 'WIND', 'RAIN']]
filled_weather['SNOWDEPTH'] = 'NaN'
filled_weather[['IRRAD']] = filled_weather[['IRRAD']]/1000.
filled_weather[['VAP']] = filled_weather[['VAP']]/10.
filled_weather.DAY=filled_weather.DAY.dt.strftime('%Y%m%d')
text = open(path+"pattern.csv", "r")
text = ''.join([i for i in text]).replace("1111", str(weather.longitude))
text = ''.join([i for i in text]).replace("2222", str(weather.latitude))
text = ''.join([i for i in text]).replace("3333", str(weather.elevation))
text = ''.join([i for i in text]).replace("4444", str(weather.angstA))
text = ''.join([i for i in text]).replace("5555", str(weather.angstB))
x = open(path+f'NASA_weather_latitude_{latitude}_longitude_{longitude}.csv',"w")
x.writelines(text)
x.close()
path_to_save_csv_file = path+f'NASA_weather_latitude_{latitude}_longitude_{longitude}.csv'
filled_weather.to_csv(path_to_save_csv_file, mode='a', header=False, index=False)
#add info to weather database and save it to csv
#LOAD WEATHER as csv file
weather = pcse.fileinput.CSVWeatherDataProvider(path_to_save_csv_file)
return weather
```
```python
weather = weather_loader(55,55)
```
____DONE_____ latitude 55 longitude 55 ____
num_of_missing_days 195
```python
df_weather = pd.DataFrame(weather.export())
```
```python
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(16,4))
df_weather.set_index('DAY')['ET0'][-365:].plot(ax=ax1, label='ET0')
df_weather.set_index('DAY')['TMAX'][-365:].plot(ax=ax2, label='T MAX')
ax1.set_title('ET 0')
ax2.set_title('T°C Max')
```
## Importing, initializing and running a PCSE model
Internally, PCSE uses a simulation engine to run a crop simulation. This engine takes a configuration file that specifies the components for the crop, the soil and the agromanagement that need to be used for the simulation. So any PCSE model can be started by importing the engine and initializing it with a given configuration file and the corresponding parameters, weather data and agromanagement.
However, as many users of PCSE only need a particular configuration (for example the WOFOST model for potential production), preconfigured Engines are provided in `pcse.models`. For the sugarbeet example we will import the WOFOST model for water-limited simulation under freely draining soil conditions:
```python
from pcse.models import Wofost71_WLP_FD
wofsim = Wofost71_WLP_FD(parameters, weather, agromanagement)
wofsim.run_till_terminate()
df_results = pd.DataFrame(wofsim.get_output())
df_results = df_results.set_index("day")
df_results.tail()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>DVS</th>
<th>LAI</th>
<th>TAGP</th>
<th>TWSO</th>
<th>TWLV</th>
<th>TWST</th>
<th>TWRT</th>
<th>TRA</th>
<th>RD</th>
<th>SM</th>
<th>WWLOW</th>
</tr>
<tr>
<th>day</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2018-01-21</th>
<td>1.638268</td>
<td>4.361832</td>
<td>13001.94206</td>
<td>8070.729387</td>
<td>2295.014434</td>
<td>2636.198239</td>
<td>2072.527759</td>
<td>0.014248</td>
<td>120.0</td>
<td>0.154055</td>
<td>18.486627</td>
</tr>
<tr>
<th>2018-01-22</th>
<td>1.638268</td>
<td>4.361832</td>
<td>13001.94206</td>
<td>8070.729387</td>
<td>2295.014434</td>
<td>2636.198239</td>
<td>2072.527759</td>
<td>0.023385</td>
<td>120.0</td>
<td>0.153960</td>
<td>18.475179</td>
</tr>
<tr>
<th>2018-01-23</th>
<td>1.638268</td>
<td>4.361832</td>
<td>13001.94206</td>
<td>8070.729387</td>
<td>2295.014434</td>
<td>2636.198239</td>
<td>2072.527759</td>
<td>0.023202</td>
<td>120.0</td>
<td>0.154166</td>
<td>18.499908</td>
</tr>
<tr>
<th>2018-01-24</th>
<td>1.638268</td>
<td>4.361832</td>
<td>13001.94206</td>
<td>8070.729387</td>
<td>2295.014434</td>
<td>2636.198239</td>
<td>2072.527759</td>
<td>0.028208</td>
<td>120.0</td>
<td>0.154058</td>
<td>18.486978</td>
</tr>
<tr>
<th>2018-01-25</th>
<td>1.638268</td>
<td>4.361832</td>
<td>13001.94206</td>
<td>8070.729387</td>
<td>2295.014434</td>
<td>2636.198239</td>
<td>2072.527759</td>
<td>0.026663</td>
<td>120.0</td>
<td>0.153811</td>
<td>18.457348</td>
</tr>
</tbody>
</table>
</div>
We can then run the simulation and retrieve the time series of daily simulation output using the get_output() method on the WOFOST object. Finally, we convert the simulation results to a pandas dataframe:
```python
summary_output = wofsim.get_summary_output()
```
```python
wofsim.get_summary_output()
```
[{'DVS': 1.6382678571428573,
'LAIMAX': 4.361831916594967,
'TAGP': 13001.942059594281,
'TWSO': 8070.729386700372,
'TWLV': 2295.0144341030245,
'TWST': 2636.198238790885,
'TWRT': 2072.5277589248244,
'CTRAT': 17.5463787829338,
'RD': 120.0,
'DOS': datetime.date(2017, 5, 20),
'DOE': datetime.date(2017, 6, 1),
'DOA': datetime.date(2017, 7, 23),
'DOM': None,
'DOH': None,
'DOV': None}]
```python
msg = "Reached maturity at {DOM} with total biomass {TAGP} kg/ha "\
"and a yield of {TWSO} kg/ha."
print(msg.format(**summary_output[0]))
```
Reached maturity at None with total biomass 13001.942059594281 kg/ha and a yield of 8070.729386700372 kg/ha.
# Sensitivity analysis of models
___
```python
# !pip install SALib
```
Collecting SALib
[?25l Downloading https://files.pythonhosted.org/packages/f7/33/cee4d64f7c40f33c08cf5ef5c9b1fb5e51f194b5deceefb5567112800b70/SALib-1.3.11.tar.gz (856kB)
[K |████████████████████████████████| 860kB 3.3MB/s
[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from SALib) (1.18.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from SALib) (1.4.1)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from SALib) (3.2.2)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from SALib) (1.0.5)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->SALib) (2.8.1)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->SALib) (2.4.7)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->SALib) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->SALib) (1.2.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->SALib) (2018.9)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.1->matplotlib->SALib) (1.15.0)
Building wheels for collected packages: SALib
Building wheel for SALib (setup.py) ... [?25l[?25hdone
Created wheel for SALib: filename=SALib-1.3.11-py2.py3-none-any.whl size=729665 sha256=93e720543725a20b1f31d6b4dcd5681574a59bb5c49e364a65b935ee9a92a0b0
Stored in directory: /root/.cache/pip/wheels/62/ed/f9/a0b98754ffb2191b98324b96cbbeb1bd5d9598b39ab996b429
Successfully built SALib
Installing collected packages: SALib
Successfully installed SALib-1.3.11
## Sobol’ Sequences versus Random numbers and regular grid
```python
from SALib.sample import saltelli
from SALib.analyze import sobol
from SALib.test_functions import Ishigami
import numpy as np
```
__Docs [SALib](https://salib.readthedocs.io/en/latest/#)__
```python
```
In this example, we will perform a Sobol’ sensitivity analysis of the _Ishigami_ function, shown below. The _Ishigami_ function is commonly used to test uncertainty and sensitivity analysis methods because it exhibits strong nonlinearity and nonmonotonicity.
$f(x)=\sin \left(x_{1}\right)+ \text{a}\, \operatorname{sin}^{2}\left(x_{2}\right)+ \text{b}\, x_{3}^{4} \sin \left(x_{1}\right)$
```python
problem = {
'num_vars': 3,
'names': ['x1', 'x2', 'x3'],
'bounds': [[-np.pi, np.pi]]*3
}
```
```python
# Generate samples
param_values = saltelli.sample(problem, 10, calc_second_order=True)
param_values.shape
```
(80, 3)
Here, `param_values` is a NumPy matrix. If we run `param_values.shape`, we see that the matrix is **8000 by 3**. The Saltelli sampler generated 8000 samples. The Saltelli sampler generates $N∗(2D+2)$ samples, where in this example $N$ is 1000 (the argument we supplied) and $D$ is 3 (the number of model inputs). The keyword argument `calc_second_order=False` will exclude second-order indices, resulting in a smaller sample matrix with $N∗(D+2)$ rows instead.
```python
# Run model (example)
Y = Ishigami.evaluate(param_values)
# Perform analysis
Si = sobol.analyze(problem, Y, print_to_console=True)
# Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf'
# (first and total-order indices with bootstrap confidence intervals)
T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si)
```
Parameter S1 S1_conf ST ST_conf
x1 -0.203434 0.660158 1.345837 2.206907
x2 0.838328 0.650453 0.716922 0.567090
x3 -0.592087 0.390549 0.459872 0.420236
Parameter_1 Parameter_2 S2 S2_conf
x1 x2 0.208665 1.159449
x1 x3 1.165212 0.917190
x2 x3 0.268558 0.759311
Consider the model output as
\begin{eqnarray*}
Y=f(X)=f\left(X_{1}, \ldots, X_{p}\right),
\end{eqnarray*}
where $f$ in our case part of agro-model simulator, $X$ are $p$ varied input parameters and $Y$ is the predicted output. Following the techniques by Sobol we represent the multi-variate random function $f$ using Hoeffding decomposition:
\begin{equation}
f(X_1,\dots,X_p) = f_0 + \sum_i^p f_i + \sum_i^p\sum_{j>i}^p f_{ij} + \dots + f_{1\dots p},
\end{equation}
where $f_0$ is a constant term, $f_i = f_i(X_i)$ denotes main effects, $f_{ij} = f_{ij}(X_i, X_j)$ and others describe higher-order interactions. These terms can be written as
\begin{equation*}
\begin{split}
f_0 &= E(Y),\\
f_i &= E_{X_{\sim i}}(Y | X_i) - E(Y),\\
f_{ij} &= E_{X_{\sim ij}}(Y | X_i, X_j) - f_i - f_j - f_0,\\
\dots
\end{split}
\end{equation*}
where $E$ is mathematical expectation and $X_{\sim i}$ denotes all parameters except $i^\text{th}$. Under the assumption that the input parameters are independent, total variance $V(Y)$ of the crop yield can be decomposed as follows:
\begin{equation*}
V(Y) = \sum_i^p V_i + \sum_i^p\sum_{j>i}^p V_{ij} + \dots + V_{12\dots p},
\end{equation*}
where partial variances are
\begin{equation*}
\begin{split}
V_i &= V[f_i(X_i)] = V_{X_i}\left[E_{X_{\sim i}}(Y | X_i)\right],\\
V_{ij} &= V[f_{ij}(X_i,X_j)] = V_{X_iX_j}\left[E_{X_{\sim ij}}(Y | X_i, X_j)\right] - V_i - V_j,\\
\dots
\end{split}
\end{equation*}
## Sobol index (first order, second order, total index)
This way, sensitivity indices (SI) can be introduced as
\begin{equation}
\Large
S_i = \frac{V_i}{V(Y)},~S_{ij} = \frac{V_{ij}}{V(Y)},~\dots
\end{equation}
In order to incorporate all of the interactions for a particular parameter, one can compute the total effect index:
\begin{equation}
S_{T_i} = \frac{E_{X_{\sim i}}\left[V_{X_i}(Y|X_{\sim i})\right]}{V(Y)} = 1 - \frac{V_{X_{\sim i}}\left[E_{X_i}(Y | X_{\sim i})\right]}{V(Y)}
\end{equation}
From this assumption we can conclude:
\begin{equation}
\Large
0 \leq S_i \leq S_{T_i} \leq 1
\end{equation}
More -
* [Wiki](https://en.wikipedia.org/wiki/Sobol_sequence)
* [Habr](https://habr.com/ru/post/440892/)
* Feature selection [Skoltech ML 2020](https://github.com/adasegroup/ML2020_lectures/blob/master/lecture9/Lecture_9_Model_Feature_Selection_Sensitivity.pdf)
# Sensitivity analysis of WOFOST model
## Install modules
```python
from SALib.sample import saltelli
from SALib.analyze import sobol
from SALib.test_functions import Ishigami
import numpy as np
import pandas as pd
```
## Parameters
```python
NPK = {
'num_vars':3,
'names':['N_kg', 'P_kg', 'K_kg'],
'bounds':[[30., 60.],
[60., 90.],
[100., 130.]]
}
```
```python
Soil_parameters = {
'num_vars':5,
'names':['SMV', 'SMFCF', 'SM0', 'CRAIRC', 'K0'],
'bounds':[[0.7, 1.3],
[0.1, 0.5],
[0.2, 0.6],
[0.04, 0.08],
[22.5, 27.5]]}
```
## Generate input parameters
```python
param_values = saltelli.sample(Soil_parameters, 10)
```
$n = N \times (D \times 2 +2)$
```python
param_values.shape
```
(120, 5)
## Loop for yield prediction
```python
from pcse.fileinput import YAMLAgroManagementReader
agromanagement_file = os.path.join(data_dir, 'agro', './sugarbeet_calendar.agro')
agromanagement = YAMLAgroManagementReader(agromanagement_file)
#print(agromanagement)
Soil_parameters = {
'num_vars':5,
'names':['SMV', 'SMFCF', 'SM0', 'CRAIRC', 'K0'],
'bounds':[[0.7, 1.3],
[0.1, 0.5],
[0.2, 0.6],
[0.04, 0.08],
[22.5, 27.5]]}
param_values = saltelli.sample(Soil_parameters, N=10, calc_second_order=True)
```
Soil parameters in [PCSE model](https://pcse.readthedocs.io/en/stable/code.html?highlight=K0#pcse.soil.WaterbalanceFD)
```python
def sensitivity_soil(soil_parameters):
SMV, SMFCF, SM0, CRAIRC, K0 = soil_parameters
soildata['SMV'] = SMV
soildata['SMFCF'] = SMFCF
soildata['SM0'] = SM0
soildata['CRAIRC'] = CRAIRC
soildata['K0'] = K0
parameters = ParameterProvider(cropdata=cropdata, soildata=soildata, sitedata=sitedata)
wofsim = Wofost71_WLP_FD(parameters, wdp, agromanagement)
wofsim.run_till_terminate()
#df_results = pd.DataFrame(wofsim.get_output())
#df_results = df_results.set_index("day")
#df_results.tail()
summary_output = wofsim.get_summary_output()
yield_list.append(summary_output[0]['TWSO'])
```
```python
%%time
yield_list = []
param_values = saltelli.sample(Soil_parameters, 10, calc_second_order=True)
for step in range(len(param_values)):
sensitivity_soil(param_values[step])
print(param_values[step])
```
[ 0.83183594 0.13867188 0.40742187 0.06707031 23.90136719]
[ 1.24433594 0.13867188 0.40742187 0.06707031 23.90136719]
[ 0.83183594 0.11835938 0.40742187 0.06707031 23.90136719]
[ 0.83183594 0.13867188 0.55976563 0.06707031 23.90136719]
[ 0.83183594 0.13867188 0.40742187 0.06003906 23.90136719]
[ 0.83183594 0.13867188 0.40742187 0.06707031 22.84667969]
[ 0.83183594 0.11835938 0.55976563 0.06003906 22.84667969]
[ 1.24433594 0.13867188 0.55976563 0.06003906 22.84667969]
[ 1.24433594 0.11835938 0.40742187 0.06003906 22.84667969]
[ 1.24433594 0.11835938 0.55976563 0.06707031 22.84667969]
[ 1.24433594 0.11835938 0.55976563 0.06003906 23.90136719]
[ 1.24433594 0.11835938 0.55976563 0.06003906 22.84667969]
[ 1.13183594 0.33867188 0.20742188 0.04707031 26.40136719]
[ 0.94433594 0.33867188 0.20742188 0.04707031 26.40136719]
[ 1.13183594 0.31835938 0.20742188 0.04707031 26.40136719]
[ 1.13183594 0.33867188 0.35976562 0.04707031 26.40136719]
[ 1.13183594 0.33867188 0.20742188 0.04003906 26.40136719]
[ 1.13183594 0.33867188 0.20742188 0.04707031 25.34667969]
[ 1.13183594 0.31835938 0.35976562 0.04003906 25.34667969]
[ 0.94433594 0.33867188 0.35976562 0.04003906 25.34667969]
[ 0.94433594 0.31835938 0.20742188 0.04003906 25.34667969]
[ 0.94433594 0.31835938 0.35976562 0.04707031 25.34667969]
[ 0.94433594 0.31835938 0.35976562 0.04003906 26.40136719]
[ 0.94433594 0.31835938 0.35976562 0.04003906 25.34667969]
[ 1.28183594 0.23867188 0.50742187 0.07707031 25.15136719]
[ 0.79433594 0.23867188 0.50742187 0.07707031 25.15136719]
[ 1.28183594 0.21835938 0.50742187 0.07707031 25.15136719]
[ 1.28183594 0.23867188 0.25976562 0.07707031 25.15136719]
[ 1.28183594 0.23867188 0.50742187 0.05003906 25.15136719]
[ 1.28183594 0.23867188 0.50742187 0.07707031 26.59667969]
[ 1.28183594 0.21835938 0.25976562 0.05003906 26.59667969]
[ 0.79433594 0.23867188 0.25976562 0.05003906 26.59667969]
[ 0.79433594 0.21835938 0.50742187 0.05003906 26.59667969]
[ 0.79433594 0.21835938 0.25976562 0.07707031 26.59667969]
[ 0.79433594 0.21835938 0.25976562 0.05003906 25.15136719]
[ 0.79433594 0.21835938 0.25976562 0.05003906 26.59667969]
[ 0.98183594 0.43867188 0.30742188 0.05707031 22.65136719]
[ 1.09433594 0.43867188 0.30742188 0.05707031 22.65136719]
[ 0.98183594 0.41835937 0.30742188 0.05707031 22.65136719]
[ 0.98183594 0.43867188 0.45976563 0.05707031 22.65136719]
[ 0.98183594 0.43867188 0.30742188 0.07003906 22.65136719]
[ 0.98183594 0.43867188 0.30742188 0.05707031 24.09667969]
[ 0.98183594 0.41835937 0.45976563 0.07003906 24.09667969]
[ 1.09433594 0.43867188 0.45976563 0.07003906 24.09667969]
[ 1.09433594 0.41835937 0.30742188 0.07003906 24.09667969]
[ 1.09433594 0.41835937 0.45976563 0.05707031 24.09667969]
[ 1.09433594 0.41835937 0.45976563 0.07003906 22.65136719]
[ 1.09433594 0.41835937 0.45976563 0.07003906 24.09667969]
[ 0.90683594 0.28867188 0.25742188 0.05207031 23.27636719]
[ 1.16933594 0.28867188 0.25742188 0.05207031 23.27636719]
[ 0.90683594 0.26835938 0.25742188 0.05207031 23.27636719]
[ 0.90683594 0.28867188 0.20976563 0.05207031 23.27636719]
[ 0.90683594 0.28867188 0.25742188 0.05503906 23.27636719]
[ 0.90683594 0.28867188 0.25742188 0.05207031 25.97167969]
[ 0.90683594 0.26835938 0.20976563 0.05503906 25.97167969]
[ 1.16933594 0.28867188 0.20976563 0.05503906 25.97167969]
[ 1.16933594 0.26835938 0.25742188 0.05503906 25.97167969]
[ 1.16933594 0.26835938 0.20976563 0.05207031 25.97167969]
[ 1.16933594 0.26835938 0.20976563 0.05503906 23.27636719]
[ 1.16933594 0.26835938 0.20976563 0.05503906 25.97167969]
[ 1.20683594 0.48867187 0.45742187 0.07207031 25.77636719]
[ 0.86933594 0.48867187 0.45742187 0.07207031 25.77636719]
[ 1.20683594 0.46835938 0.45742187 0.07207031 25.77636719]
[ 1.20683594 0.48867187 0.40976563 0.07207031 25.77636719]
[ 1.20683594 0.48867187 0.45742187 0.07503906 25.77636719]
[ 1.20683594 0.48867187 0.45742187 0.07207031 23.47167969]
[ 1.20683594 0.46835938 0.40976563 0.07503906 23.47167969]
[ 0.86933594 0.48867187 0.40976563 0.07503906 23.47167969]
[ 0.86933594 0.46835938 0.45742187 0.07503906 23.47167969]
[ 0.86933594 0.46835938 0.40976563 0.07207031 23.47167969]
[ 0.86933594 0.46835938 0.40976563 0.07503906 25.77636719]
[ 0.86933594 0.46835938 0.40976563 0.07503906 23.47167969]
[ 1.05683594 0.18867188 0.35742188 0.04207031 27.02636719]
[ 0.71933594 0.18867188 0.35742188 0.04207031 27.02636719]
[ 1.05683594 0.16835938 0.35742188 0.04207031 27.02636719]
[ 1.05683594 0.18867188 0.50976562 0.04207031 27.02636719]
[ 1.05683594 0.18867188 0.35742188 0.06503906 27.02636719]
[ 1.05683594 0.18867188 0.35742188 0.04207031 24.72167969]
[ 1.05683594 0.16835938 0.50976562 0.06503906 24.72167969]
[ 0.71933594 0.18867188 0.50976562 0.06503906 24.72167969]
[ 0.71933594 0.16835938 0.35742188 0.06503906 24.72167969]
[ 0.71933594 0.16835938 0.50976562 0.04207031 24.72167969]
[ 0.71933594 0.16835938 0.50976562 0.06503906 27.02636719]
[ 0.71933594 0.16835938 0.50976562 0.06503906 24.72167969]
[ 0.75683594 0.38867188 0.55742187 0.06207031 24.52636719]
[ 1.01933594 0.38867188 0.55742187 0.06207031 24.52636719]
[ 0.75683594 0.36835938 0.55742187 0.06207031 24.52636719]
[ 0.75683594 0.38867188 0.30976562 0.06207031 24.52636719]
[ 0.75683594 0.38867188 0.55742187 0.04503906 24.52636719]
[ 0.75683594 0.38867188 0.55742187 0.06207031 27.22167969]
[ 0.75683594 0.36835938 0.30976562 0.04503906 27.22167969]
[ 1.01933594 0.38867188 0.30976562 0.04503906 27.22167969]
[ 1.01933594 0.36835938 0.55742187 0.04503906 27.22167969]
[ 1.01933594 0.36835938 0.30976562 0.06207031 27.22167969]
[ 1.01933594 0.36835938 0.30976562 0.04503906 24.52636719]
[ 1.01933594 0.36835938 0.30976562 0.04503906 27.22167969]
[ 0.73808594 0.17617188 0.21992188 0.05832031 25.62011719]
[ 0.85058594 0.17617188 0.21992188 0.05832031 25.62011719]
[ 0.73808594 0.48085937 0.21992188 0.05832031 25.62011719]
[ 0.73808594 0.17617188 0.52226562 0.05832031 25.62011719]
[ 0.73808594 0.17617188 0.21992188 0.04128906 25.62011719]
[ 0.73808594 0.17617188 0.21992188 0.05832031 24.56542969]
[ 0.73808594 0.48085937 0.52226562 0.04128906 24.56542969]
[ 0.85058594 0.17617188 0.52226562 0.04128906 24.56542969]
[ 0.85058594 0.48085937 0.21992188 0.04128906 24.56542969]
[ 0.85058594 0.48085937 0.52226562 0.05832031 24.56542969]
[ 0.85058594 0.48085937 0.52226562 0.04128906 25.62011719]
[ 0.85058594 0.48085937 0.52226562 0.04128906 24.56542969]
[ 1.03808594 0.37617188 0.41992188 0.07832031 23.12011719]
[ 1.15058594 0.37617188 0.41992188 0.07832031 23.12011719]
[ 1.03808594 0.28085938 0.41992188 0.07832031 23.12011719]
[ 1.03808594 0.37617188 0.32226562 0.07832031 23.12011719]
[ 1.03808594 0.37617188 0.41992188 0.06128906 23.12011719]
[ 1.03808594 0.37617188 0.41992188 0.07832031 27.06542969]
[ 1.03808594 0.28085938 0.32226562 0.06128906 27.06542969]
[ 1.15058594 0.37617188 0.32226562 0.06128906 27.06542969]
[ 1.15058594 0.28085938 0.41992188 0.06128906 27.06542969]
[ 1.15058594 0.28085938 0.32226562 0.07832031 27.06542969]
[ 1.15058594 0.28085938 0.32226562 0.06128906 23.12011719]
[ 1.15058594 0.28085938 0.32226562 0.06128906 27.06542969]
CPU times: user 1min 21s, sys: 229 ms, total: 1min 21s
Wall time: 1min 21s
```python
np_yield = np.array(yield_list)
Si = sobol.analyze(Soil_parameters, np_yield, print_to_console=False)
```
```python
Si_dict = dict(Si)
Si_df = pd.DataFrame()
Si_df = Si_df.append(pd.Series(Si_dict['S1']), ignore_index=True)
Si_df = Si_df.append(pd.Series(Si_dict['ST']), ignore_index=True)
Si_df = Si_df.append(pd.Series(Si_dict['S1_conf']), ignore_index=True)
Si_df = Si_df.append(pd.Series(Si_dict['ST_conf']), ignore_index=True)
Si_df = Si_df.T
Si_df.columns = ['Si', 'ST', 'Si_conf', 'ST_conf']
Si_df.rename(index={0:'SMV',1:'SMFCF', 2:'SM0', 3:'CRAIRC', 4:'K0'}, inplace=True)
Si_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Si</th>
<th>ST</th>
<th>Si_conf</th>
<th>ST_conf</th>
</tr>
</thead>
<tbody>
<tr>
<th>SMV</th>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
</tr>
<tr>
<th>SMFCF</th>
<td>-0.077417</td>
<td>0.284878</td>
<td>0.839456</td>
<td>0.638290</td>
</tr>
<tr>
<th>SM0</th>
<td>0.267504</td>
<td>0.381437</td>
<td>0.693409</td>
<td>0.688329</td>
</tr>
<tr>
<th>CRAIRC</th>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
</tr>
<tr>
<th>K0</th>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
</tr>
</tbody>
</table>
</div>
Is it ok?
\begin{equation}
\Large
0 \leq S_i \leq S_{T_i} \leq 1
\end{equation}
### For 5 years
```python
def sensitivity_weather(year):
K_kg = 60
P_kg = 60
N_kg = 120
year_date=year
print(year)
print(year_date)
yaml_agro = f"""
- {year_date}-06-01:
CropCalendar:
crop_name: 'sugar-beet'
variety_name: 'sugar-beet-601'
crop_start_date: {year_date}-06-02
crop_start_type: emergence
crop_end_date: {year_date}-10-15
crop_end_type: harvest
max_duration: 300
TimedEvents:
- event_signal: apply_npk
name: Timed N/P/K application table
comment: All fertilizer amounts in kg/ha
events_table:
- {year_date}-06-22: {{N_amount : {N_kg}, P_amount: {P_kg}, K_amount: {K_kg}}}
StateEvents: null
"""
agromanagement = yaml.safe_load(yaml_agro)
parameters = ParameterProvider(cropdata=cropdata, soildata=soildata, sitedata=sitedata)
wofsim = Wofost71_WLP_FD(parameters, moscow_weather, agromanagement)
wofsim.run_till_terminate()
summary_output = wofsim.get_summary_output()
yield_list_weather.append(summary_output[0]['TWSO'])
```
## Visualizing simulation results
Finally, we can generate some figures of WOFOST variables such as the development (DVS), total biomass (TAGP), leaf area index (LAI) and root-zone soil moisture (SM) using the MatPlotLib plotting package:
```python
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12,10))
for var, ax in zip(["DVS", "TWSO", "LAI", "SM"], axes.flatten()):
ax.plot_date(df_results.index, df_results[var], 'b-')
ax.set_title(var)
fig.autofmt_xdate()
```
# Visualization for sensitivity analysis
Plots by [Water programming group](https://waterprogramming.wordpress.com/2019/08/27/a-python-implementation-of-grouped-radial-convergence-plots-to-visualize-sobol-sensitivity-analysis-results/)
How to repeat: [Repo of SampleVIS](https://github.com/charlesrouge/SampleVis)
```python
import numpy as np
from SALib.analyze import sobol
from SALib.sample import saltelli
from fishery import fish_game
import matplotlib.pyplot as plt
import itertools
import math
```
### Why number of samples is important?
```python
# Set up dictionary with system parameters
problem = {
'num_vars': 6,
'names': ['a', 'b', 'c','h',
'K','m'],
'bounds': [[ 0.002, 2],
[0.005, 1],
[0.2, 1],
[0.001, 1],
[100, 5000],
[0.1, 1.5]]
}
# Array with n's to use
nsamples = np.arange(50, 500, 50)
# Arrays to store the index estimates
S1_estimates = np.zeros([problem['num_vars'],len(nsamples)])
ST_estimates = np.zeros([problem['num_vars'],len(nsamples)])
# Loop through all n values, create sample, evaluate model and estimate S1 & ST
for i in range(len(nsamples)):
print('n= '+ str(nsamples[i]))
# Generate samples
sampleset = saltelli.sample(problem, nsamples[i],calc_second_order=False)
# Run model for all samples
output = [fish_game(*sampleset[j,:]) for j in range(len(sampleset))]
# Perform analysis
results = sobol.analyze(problem, np.asarray(output), calc_second_order=False,print_to_console=False)
# Store estimates
ST_estimates[:,i]=results['ST']
S1_estimates[:,i]=results['S1']
np.save('ST_estimates.npy', ST_estimates)
np.save('S1_estimates.npy', S1_estimates)
S1_estimates = np.load('S1_estimates.npy')
ST_estimates = np.load('ST_estimates.npy')
# Generate figure showing evolution of indices
fig = plt.figure(figsize=(18,9))
ax1 = fig.add_subplot(1,2,1)
handles = []
for j in range(problem['num_vars']):
handles += ax1.plot(nsamples, S1_estimates[j,:], linewidth=5)
ax1.set_title('Evolution of S1 index estimates', fontsize=20)
ax1.set_ylabel('S1', fontsize=18)
ax1.set_xlabel('Number of samples (n)', fontsize=18)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax2 = fig.add_subplot(1,2,2)
for j in range(problem['num_vars']):
ax2.plot(nsamples, ST_estimates[j,:], linewidth=5)
ax2.set_title('Evolution of ST index estimates', fontsize=20)
ax2.set_ylabel('ST', fontsize=18)
ax2.tick_params(axis='both', which='major', labelsize=14)
ax2.set_xlabel('Number of samples (n)', fontsize=18)
fig.legend(handles, problem['names'], loc = 'right', fontsize=11)
plt.show()
#plt.savefig('indexevolution.png')
# Calculate parameter rankings
S1_ranks = np.zeros_like(S1_estimates)
ST_ranks = np.zeros_like(ST_estimates)
for i in range(len(nsamples)):
orderS1 = np.argsort(S1_estimates[:,i])
orderST = np.argsort(ST_estimates[:,i])
S1_ranks[:,i] = orderS1.argsort()
ST_ranks[:,i] = orderST.argsort()
# Generate figure showing evolution of ranks
fig = plt.figure(figsize=(18,9))
ax1 = fig.add_subplot(1,2,1)
handles = []
for j in range(problem['num_vars']):
handles += ax1.plot(nsamples, S1_ranks[j,:], linewidth=3)
ax1.set_title('Parameter ranking based on S1', fontsize=20)
ax1.set_ylabel('S1', fontsize=18)
ax1.set_xlabel('Number of samples (n)', fontsize=18)
ax1.set_yticklabels(np.arange(problem['num_vars']+1, 0, -1))
ax1.tick_params(axis='both', which='major', labelsize=14)
ax2 = fig.add_subplot(1,2,2)
for j in range(problem['num_vars']):
ax2.plot(nsamples, ST_ranks[j,:], linewidth=3)
ax2.set_title('Parameter ranking based on ST', fontsize=20)
ax2.set_ylabel('ST', fontsize=18)
ax2.set_yticklabels(np.arange(problem['num_vars']+1, 0, -1))
ax2.tick_params(axis='both', which='major', labelsize=14)
ax2.set_xlabel('Number of samples (n)', fontsize=18)
fig.legend(handles, problem['names'], loc = 'right', fontsize=14)
#plt.show()
#plt.savefig('rankingevolution.png')
```
## Radial plot for SA
```python
import numpy as np
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
import math
from numpy import genfromtxt
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
sns.set_style('whitegrid', {'axes_linewidth': 0, 'axes.edgecolor': 'white'})
```
## Plot function
```python
def is_significant(value, confidence_interval, threshold="conf"):
if threshold == "conf":
return value - abs(confidence_interval) > 0
else:
return value - abs(float(threshold)) > 0
def grouped_radial(SAresults, parameters, radSc=2.0, scaling=1, widthSc=0.5, STthick=1, varNameMult=1.3, colors=None, groups=None, gpNameMult=1.5, threshold="conf"):
# Derived from https://github.com/calvinwhealton/SensitivityAnalysisPlots
fig, ax = plt.subplots(1, 1)
color_map = {}
# initialize parameters and colors
if groups is None:
if colors is None:
colors = ["k"]
for i, parameter in enumerate(parameters):
color_map[parameter] = colors[i % len(colors)]
else:
if colors is None:
colors = sns.color_palette("deep", max(3, len(groups)))
for i, key in enumerate(groups.keys()):
#parameters.extend(groups[key])
for parameter in groups[key]:
color_map[parameter] = colors[i % len(colors)]
n = len(parameters)
angles = radSc*math.pi*np.arange(0, n)/n
x = radSc*np.cos(angles)
y = radSc*np.sin(angles)
# plot second-order indices
for i, j in itertools.combinations(range(n), 2):
#key1 = parameters[i]
#key2 = parameters[j]
if is_significant(SAresults["S2"][i][j], SAresults["S2_conf"][i][j], threshold):
angle = math.atan((y[j]-y[i])/(x[j]-x[i]))
if y[j]-y[i] < 0:
angle += math.pi
line_hw = scaling*(max(0, SAresults["S2"][i][j])**widthSc)/2
coords = np.empty((4, 2))
coords[0, 0] = x[i] - line_hw*math.sin(angle)
coords[1, 0] = x[i] + line_hw*math.sin(angle)
coords[2, 0] = x[j] + line_hw*math.sin(angle)
coords[3, 0] = x[j] - line_hw*math.sin(angle)
coords[0, 1] = y[i] + line_hw*math.cos(angle)
coords[1, 1] = y[i] - line_hw*math.cos(angle)
coords[2, 1] = y[j] - line_hw*math.cos(angle)
coords[3, 1] = y[j] + line_hw*math.cos(angle)
ax.add_artist(plt.Polygon(coords, color="0.75"))
# plot total order indices
for i, key in enumerate(parameters):
if is_significant(SAresults["ST"][i], SAresults["ST_conf"][i], threshold):
ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, color='w'))
ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, lw=STthick, color='0.4', fill=False))
# plot first-order indices
for i, key in enumerate(parameters):
if is_significant(SAresults["S1"][i], SAresults["S1_conf"][i], threshold):
ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["S1"][i]**widthSc)/2, color='0.4'))
# add labels
for i, key in enumerate(parameters):
ax.text(varNameMult*x[i], varNameMult*y[i], key, ha='center', va='center',
rotation=angles[i]*360/(2*math.pi) - 90,
color=color_map[key])
if groups is not None:
for i, group in enumerate(groups.keys()):
print(group)
group_angle = np.mean([angles[j] for j in range(n) if parameters[j] in groups[group]])
ax.text(gpNameMult*radSc*math.cos(group_angle), gpNameMult*radSc*math.sin(group_angle), group, ha='center', va='center',
rotation=group_angle*360/(2*math.pi) - 90,
color=colors[i % len(colors)])
ax.set_facecolor('white')
ax.set_xticks([])
ax.set_yticks([])
# ax.
plt.axis('equal')
plt.axis([-2*radSc, 2*radSc, -2*radSc, 2*radSc])
#plt.show()
return fig
```
## Range of soil parameters
```python
problem = {
'num_vars':6,
'names':['SOC', 'Sand', 'Clay', 'pH', 'CN', 'BD'],
'bounds':[[2.58, 6.20],
[0.01, 0.30],
[0.01, 0.30],
[4.6, 6.9],
[10.9, 12.4],
[900, 1350]]
}
```
```python
#names for csv files
list_of_csv=['soybean-000-2015.csv', 'sugar-beet-2011.csv', 'sugar-beet-2017.csv',
'spring-barley-2012.csv', 'sugar-beet-2014.csv']
list_of_names=['soybean-000-2015', 'sugar-beat-2011', 'sugar-beat-2017',
'spring-barley-2012', 'sugar-beat-2014']
list_of_totals=['total_SI_'+x for x in list_of_names]
list_of_first=['fisrt_SI_'+x for x in list_of_names]
list_of_second=['second_SI_'+x for x in list_of_names]
list_of_SI=['SI_'+x for x in list_of_names]
```
```python
for j, i in enumerate(list_of_csv):
all_data_csv = genfromtxt('./'+str(i), delimiter=',')
output = all_data_csv[:,2]
print(i)
list_of_SI[j] = sobol.analyze(problem, output, calc_second_order=True, conf_level=0.95, print_to_console=False)
```
soybean-000-2015.csv
sugar-beet-2011.csv
sugar-beet-2017.csv
spring-barley-2012.csv
sugar-beet-2014.csv
```python
groups={"Soil physics" : ["Sand", "Clay", "BD"],
"Soil chemistry" : ["pH", "SOC", "CN"]}
fig = grouped_radial(list_of_SI[4], ['BD', 'Sand', 'Clay', 'pH', 'CN', 'SOC'], groups=groups, threshold=0.001)
red_patch = mpatches.Patch(color='red', label='The red data')
plt.title(list_of_names[4], loc='left')
plt.show()
```
## Homework
__[Tasks](https://skoltech-my.sharepoint.com/:w:/g/personal/mikhail_gasanov_skoltech_ru/EeTPQxbrzVdPqnSENKYyoTUBay1RDYgMMW3GO3qFT2ge5g?e=4hk45V)__
Usefull
__SA and UQ__
1) [Rhodium project](https://github.com/Project-Platypus/Rhodium.git)
2) [SALib](https://github.com/SALib/SALib)
__Model__
3) [PCSE](https://pcse.readthedocs.io/en/stable/index.html)
4) How to install PCSE at local machine
`conda env create -f` [py3_pcse.yml](https://github.com/mishagrol/Seminar_Sobol/blob/master/py3_pcse.yml)
Аny questions -
Telegram - `@misha_grol`
Part 1 – Crop Yield Prediction (PCSE, MONICA)
You can use the seminars’ colab:
“https://colab.research.google.com/drive/1j4AHD8KkTRThPuNsQzQFWYSJtptQ6bUA”
1) Assess the yield of one of the crops for the Moscow region over several years (potatoes, sugar beets for 2-3 years)
Crop - (https://github.com/mishagrol/Seminar_Sobol/tree/master/data/crop)
Soil - (https://github.com/mishagrol/Seminar_Sobol/tree/master/data/soil)
Weather - NASAdataprovider in PCSE (https://pcse.readthedocs.io/en/stable/code.html?highlight=NASA#pcse.db.NASAPowerWeatherDataProvider)
Agromanagement - (https://github.com/mishagrol/Seminar_Sobol/blob/master/data/agro/sugarbeet_calendar_Moscow_short.agro)
Part 2 – Sensitivity Analysis (SALib)
1) Perform sensitivity analysis of one of the model blocks (crop, soil, agromanagement *) with SALib. You can choose one of the methods that you consider necessary (Sobol, FAST, ...).
Generate samples – In report provide the size of the resulting matrix and the sample size (N)
Conduct parameter sensitivity analysis - In report provide S1 and ST indices.
2) Generate plots (Hist, Radial convergence plot, etc.)
*3) Estimate the required number of simulations to obtain reliable values of the sensitivity indices. Try to estimate the sample size at the confidence interval of the sensitivity indices.
* Please note that working with discrete data can cause certain difficulties.
#Agro Hack https://agro-code.ru/
### bonus
__Morris method__
Generate a sample using the Method of Morris
Three variants of Morris' sampling for elementary effects is supported:
- Vanilla Morris
- Optimised trajectories when ``optimal_trajectories=True`` (using
Campolongo's enhancements from 2007 and optionally Ruano's enhancement
from 2012; ``local_optimization=True``)
- Groups with optimised trajectories when ``optimal_trajectories=True`` and
the problem definition specifies groups (note that ``local_optimization``
must be ``False``)
At present, optimised trajectories is implemented using either a brute-force
approach, which can be very slow, especially if you require more than four
trajectories, or a local method based which is much faster. Both methods now
implement working with groups of factors.
Note that the number of factors makes little difference,
but the ratio between number of optimal trajectories and the sample size
results in an exponentially increasing number of scores that must be
computed to find the optimal combination of trajectories. We suggest going
no higher than 4 from a pool of 100 samples with the brute force approach.
With local_optimization = True (which is default),
it is possible to go higher than the previously suggested 4 from 100.
```python
import sys
from SALib.analyze import morris
from SALib.sample.morris import sample
from SALib.test_functions import Sobol_G
from SALib.util import read_param_file
from SALib.plotting.morris import horizontal_bar_plot, covariance_plot, \
sample_histograms
import matplotlib.pyplot as plt
#sys.path.append('../..')
# Read the parameter range file and generate samples
#problem = read_param_file('/Users/mikhailgasanov/Documents/GIT/SALib/src/SALib/test_functions/params/Sobol_G.txt')
# or define manually without a parameter file:
problem = {
'num_vars': 8,
'names': ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8'],
'groups': None,
'bounds': [[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0]]
}
# Files with a 4th column for "group name" will be detected automatically, e.g.
# param_file = '../../src/SALib/test_functions/params/Ishigami_groups.txt'
# Generate samples
param_values = sample(problem, N=1000, num_levels=4,
optimal_trajectories=None)
# To use optimized trajectories (brute force method),
# give an integer value for optimal_trajectories
# Run the "model" -- this will happen offline for external models
Y = Sobol_G.evaluate(param_values)
# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = morris.analyze(problem, param_values, Y, conf_level=0.95,
print_to_console=True,
num_levels=4, num_resamples=100)
# Returns a dictionary with keys 'mu', 'mu_star', 'sigma', and 'mu_star_conf'
# e.g. Si['mu_star'] contains the mu* value for each parameter, in the
# same order as the parameter file
fig, (ax1, ax2) = plt.subplots(1, 2)
horizontal_bar_plot(ax1, Si, {}, sortby='mu_star', unit=r"tCO$_2$/year")
covariance_plot(ax2, Si, {}, unit=r"tCO$_2$/year")
fig2 = plt.figure()
sample_histograms(fig2, param_values, problem, {'color': 'y'})
plt.show()
```
```python
```
|
State Before: A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
⊢ map f (minpoly A x) ≠ 1 State After: case pos
A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
hx : IsIntegral A x
⊢ map f (minpoly A x) ≠ 1
case neg
A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
hx : ¬IsIntegral A x
⊢ map f (minpoly A x) ≠ 1 Tactic: by_cases hx : IsIntegral A x State Before: case pos
A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
hx : IsIntegral A x
⊢ map f (minpoly A x) ≠ 1 State After: no goals Tactic: exact mt ((monic hx).eq_one_of_map_eq_one f) (ne_one A x) State Before: case neg
A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
hx : ¬IsIntegral A x
⊢ map f (minpoly A x) ≠ 1 State After: case neg
A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
hx : ¬IsIntegral A x
⊢ 0 ≠ 1 Tactic: rw [eq_zero hx, Polynomial.map_zero] State Before: case neg
A : Type u_3
B : Type u_1
B' : Type ?u.27580
inst✝⁷ : CommRing A
inst✝⁶ : Ring B
inst✝⁵ : Ring B'
inst✝⁴ : Algebra A B
inst✝³ : Algebra A B'
x : B
inst✝² : Nontrivial B
R : Type u_2
inst✝¹ : Semiring R
inst✝ : Nontrivial R
f : A →+* R
hx : ¬IsIntegral A x
⊢ 0 ≠ 1 State After: no goals Tactic: exact zero_ne_one
|
Frederick Reines was born in Paterson , New Jersey , one of four children of Gussie ( Cohen ) and Israel Reines . His parents were Jewish emigrants from the same town in Russia , but only met in New York City , where they were later married . He had an older sister , Paula , who became a doctor , and two older brothers , David and William , who became lawyers . He said that his " early education was strongly influenced " by his studious siblings . He was the great @-@ nephew of the Rabbi Yitzchak Yaacov Reines , the founder of Mizrachi , a religious Zionist movement .
|
Require Import BenB.
Require Import BenB2.
(* ====================================================================== *)
(*
Title:
======
Authors:
Mart Brennenraedts, s1078038
Daan Weessies, s1063758
Wessel van der Lans, s1084461
Mees Ephraim, s1085936
*)
(* ====================================================================== *)
(*
[
This file has to be a valid script, meaning that it
can be executed by Coq.
Therefore, explanations in natural language have to be between
comment markers.
In this project template, text within square brackets (within
comment markers) is intended to clarify what needs to be
written where.
In the final version, we expect that all these blocks have been
replaced by (your) proper content.
]
*)
(*
Abstract:
=========
[
Explain whether you managed to prove the correctness theorem.
And how did that go: did you have to change a lot compared to
the original model as it was before you started with the proof,
or could you use your formalization without many modifications?
]
*)
(*
Focus:
Modeling Goal:
==============
Verification model
Fragment of reality:
====================
[ ... ]
Perspective:
============
[ ... ]
*)
(*
Abstractions or simplifications:
================================
[
Depending on the chosen focus, you may simplify certain aspects of
your artifact.
If you are modeling some kind of home automation system, it is not
unreasonable to assume that the net power is constant, although this
is not exactly the case in reality. However, if you are modeling an
artifact that protects against high peaks of power, these fluctuations
should be part of the model.
Write down explicitly which assumptions you have made to simplify
the artifact.
]
*)
(* ====================================================================== *)
(* Domain model *)
Definition Time := R.
(* Time in seconds in reals*)
Definition Temp := R.
(*meaning: temperature in degrees celcius in reals*)
(* Inputs and outputs of the SmartFridge *)
Variable PowerIn: Time -> Prop.
(* power is supplied to the smartfridge's powersupply *)
Variable FridgeDoorROpen: Time -> Prop.
(* the right fridge door is opened *)
Variable FridgeDoorLOpen: Time -> Prop.
(* the left fridge door is opened *)
Variable FreezerDoorROpen: Time -> Prop.
(* the right freezer door is opened *)
Variable FreezerDoorLOpen: Time -> Prop.
(* the left freezer door is opened *)
Variable ColdWater: Time -> Prop.
(* cold water is dispensed by the dispenser *)
Variable CubedIce: Time -> Prop.
(* ice cubes are dispensed by the dispenser *)
Variable CrushedIce: Time -> Prop.
(* crushed ice is dispensed by the dispenser *)
Variable WaterIn: Time -> Prop.
(* water is supplied to the IceMakerTray and to the WaterCooler *)
Variable FridgeLightOn: Time -> Prop.
(* the light inside the fridge is emitting *)
Variable WaterButtonIn: Time -> Prop.
(* The option to receive cold water is selected on the ControlPanel *)
Variable CrushedIceButtonIn: Time -> Prop.
(* The option to receive crushed ice is selected on the ControlPanel *)
Variable CubedIceButton: Time -> Prop.
(* The option to receive cubed ice is selected on the ControlPanel *)
(* Internal components for the SmartFridge *)
Variable Pow1: Time -> Prop.
(* Power flows between the Powersupply and the FridgePowerHub *)
Variable Pow2: Time -> Prop.
(* Power flows between the Powersupply and the FreezerPowerHub *)
(* Internal IceMaker variables *)
Variable PowIceMakerTray: Time -> Prop.
(* Power flows between the icemaker and the icemaker powerhub *)
Variable ReservoirFull: Time -> Prop.
(* The ice reservoir is full *)
Variable PowIceReservoir: Time -> Prop.
(* Power is flowing between the ice reservoir and the icemaker powerhub *)
Variable CrushIce: Time -> Prop.
(* There is ice that needs to be crushed*)
Variable PowIceMakerTray: Time -> Prop.
(* Power is flowing between the IceMakerPowerHub and the IceMaker tray *)
Variable FillReservoir: Time -> Prop.
(* There is room in the IceReservoir*)
Variable DispenceIce: Time -> Prop.
(* The dispenser requires ice *)
Variable PowIceCrusher: Time -> Prop.
(* Power flows between the IceCrusher and the IceMakerPowerHub *)
Variable DispenseWater: Time -> Prop.
(* The dispenser requires ice from the WaterCooler *)
Variable PowWaterCooler: Time -> Prop.
(* Power flows between the WaterCooler and the IceMakerPowerHub *)
Variable DispenseCrushedIce: Time -> Prop.
(* The dispenser requires crushed ice *)
(* FridgeDoorL variables *)
Variable PowIceMaker: Time -> Prop.
(* Power flows between the FridgeDoorLPowerHub and IceMaker *)
Variable PowControlPanel: Time -> Prop.
(* power is supplied to the ControlPanelPowerHub if and only if power is supplied to the FridgeDoorLPowerHub *)
Variable ColdWaterSignal: Time -> Prop.
(* WaterButton is being pressed *)
Variable CrushedIceSignal: Time -> Prop.
(* CrushedIceButton is being pressed *)
Variable CubedIceSignal: Time -> Prop.
(* CubedIceButton is being pressed *)
Variable PowFridgeDoorSensorL: Time -> Prop.
(* The Sensor in FridgeDoorL is supplied with power *)
(* Internal components of ControlPanel *)
Variable PowWaterButton: Time -> Prop.
(* power is supplied to the button for selecting water if and only if power is supplied to the ControlPanelPowerHub *)
Variable PowCrushedIceButton: Time -> Prop.
(* power is supplied to the button for selecting crushed ice if and only if power is supplied to the ControlPanelPowerHub *)
Variable PowCubedIceButton: Time -> Prop.
(* power is supplied to the button for selecting cubed ice if and only if power is supplied to the ControlPanelPowerHub *)
(* Internal components of the freezer *)
Variable FreezerDoorSignalL: Time -> Prop.
(* The sensor in the freezerdoor senses that the left door is open *)
Variable FreezerDoorSignalR: Time -> Prop.
(* The sensor in the freezerdoor senses that the right door is open *)
Variable FreezerTempTooHigh: Time -> Prop.
(* The temperature near the freezer sensor is higher than MaximumFreezerTemp *)
Variable PowF: Time -> Prop.
(* Power is flowing between the FreezerPowerHub and FreezerDoorL *)
Variable PowG: Time -> Prop.
(* Power is flowing between the FreezerPowerHub and FreezerCooler *)
Variable PowH: Time -> Prop.
(* Power is flowing between the FreezerPowerHub and FreezerLight *)
Variable PowI: Time -> Prop.
(* Power is flowing between the FreezerPowerHub and FreezerTempSensor *)
Variable PowJ: Time -> Prop.
(* Power is flowing between the FreezerPowerHub and FreezerDoorR *)
Variable FreezerCoolerOn: Time -> Prop.
(* The freezer is actively cooling *)
(* Internal components of the fridge *)
Variable FridgeDoorSignalL: Time -> Prop.
(* The sensor in the Fridgedoor senses that the left door is open *)
Variable FridgeDoorSignalR: Time -> Prop.
(* The sensor in the Fridgedoor senses that the right door is open *)
Variable FridgeTempTooHigh: Time -> Prop.
(* The temperature near the Fridge sensor is higher than MaximumFridgeTemp *)
Variable PowA: Time -> Prop.
(* Power is flowing between the FridgePowerHub and FridgeDoorL *)
Variable PowB: Time -> Prop.
(* Power is flowing between the FridgePowerHub and FridgeCooler *)
Variable PowC: Time -> Prop.
(* Power is flowing between the FridgePowerHub and FridgeLight *)
Variable PowD: Time -> Prop.
(* Power is flowing between the FridgePowerHub and FridgeTempSensor *)
Variable PowE: Time -> Prop.
(* Power is flowing between the FridgePowerHub and FridgeDoorR *)
Variable FridgeCoolerOn: Time -> Prop.
(* The fridge is actively cooling *)
(* Internal components of FreezerDoorL *)
Variable PowFreezerDoorSensorL: Time -> Prop.
(* power is supplied to FreezerDoorSensorL if and only if power is supplied to FreezerDoorLPowerhub *)
(* Internal components of FreezerDoorR*)
Variable PowFreezerDoorSensorR: Time -> Prop.
(* power is supplied to FreezerDoorSensorR if and only if power is supplied to FreezerDoorRPowerHub *)
(* Internal components of FridgeDoorR *)
Variable PowFridgeDoorSensorR: Time -> Prop.
(* power is supplied to FridgeDoorSensorR if and only if power is supplied to FridgeDoorRPowerhub *)
(* Constants (including their meaning) *)
Definition MaximumFridgeTemp := 4.
(* The minimum temperature at which the FridgeTempSensor gives off the FridgeTempTooHigh signal *)
Definition MaximumFreezerTemp := -5.
(* The minimum temperature at which the FreezerTempSensor gives off the FreezerTempTooHigh signal *)
(* Functions *)
Variable TimeToTemp: Time -> Temp.
(* Predicates (including their meaning and measurements) *)
(* ====================================================================== *)
(* Auxiliary predicates (including their meaning) *)
(*
[
At this place within this template you may define as many
auxiliary predicates as you want, but do not forget to include
their meaning.
]
*)
(* ====================================================================== *)
(* Components *)
(*
[
For each component you have to specify the following information:
OUTSIDE comment markers:
- The 'Definition' to be read by Coq, in a readable layout that
matches the mathematical structure of the formula.
WITHIN comment markers:
- The specification of the component in natural language. Obviously,
this specification should be consistent with the formula used
by Coq.
- If appropriate, a short explanation in natural language about
the choices that have been made.
]
*)
(* Specifications of devices in ControlPanel *)
Definition WaterButton :=
WaterButtonIn
/\
PowWaterButton
<->
ColdWaterSignal
.
(* meaning: if and only if the button to select cold water is pressed and power is supplied to the button to select cold water then the signal for cold water is sent *)
Definition CrushedIceButton :=
CrushedIceButtonIn
/\
PowCrushedIceButton
<->
CrushedIceSignal
.
(* meaning: if and only if the button to select crushed ice is pressed and power is supplied to the button to select crushed ice then the signal for crushed ice is sent *) *)
Definition CubedIceButton :=
CubedIceButtonIn
/\
PowCubedIceButton
<->
CubedIceSignal
.
(* meaning: if and only if the button to select cubed ice is pressed and power is supplied to the button to select cubed ice then the signal for cubed ice is sent *) *)
Definition ControlPanelPowerHub :=
PowControlPanel
<->
(
PowWaterButton
/\
PowCrushedIceButton
/\
PowCubedIceButton
)
.
(* meaning: if and only if power is supplied to the control panel then power is supplied to the water button, crushed ice button, and cubed ice button *)
(* specification of devices in FridgeDoorL *)
Definition FridgeDoorLPowerHub :=
PowIceMaker
/\
PowControlPanel
/\
PowFridgeDoorSensorL
<->
PowA
.
(* meaning: power is supplied to PowIceMaker, PowControlPanel and PowFridgeDoorSensorL if and only if power is supplied to PowA *)
Definition FridgeDoorSensorL :=
FridgeDoorLOpen
/\
PowFridgeDoorSensorL
<->
FridgeDoorSignalL
.
(* meaning: The signal FridgeDoorSignalL is send if and only if FridgeDoorLOpen and PowFridgeDoorSensorL are received *)
(* Specifications of devices in IceMaker *)
Definition IceMakerTray :=
WaterIn
/\
PowIceMakerTray
/\
~ReservoirFull
<->
FillReservoir
.
(* meaning: iff there's water pressure on the water line, and power is being supplied to the IceMakerTray, and the IceReservoir is not full, then the IceReservoir is being filled *)
Definition Watercooler :=
PowWaterCooler
/\
WaterIn
/\
ColdWaterSignal
<->
DispenseWater
.
(* meaning: iff power is being supplied to the WaterCooler, and there is water pressure on the water line, and WaterCooler is receiving a cold water signal from WaterButton, then water flows from the WaterCooler to the Dispenser*)
Definition IceReservoir :=
PowIceReservoir
/\
CrushedIceSignal
<->
CrushIce
/\
PowIceReservoir
/\
CubedIceSignal
<->
CubedIce
.
(* meaning: *)
(* Specifications of devices in FridgeDoorR *)
Definition FridgeDoorSensorR :=
FridgeDoorROpen
/\
PowFridgeDoorSensorR
<->
FridgeDoorSignalR
.
(* meaning: if and only if power is supplied to the sensor in the right-side fridge door and the right-side fridge door is opened, then a signal is sent from the right-side fridge door *)
Definition FridgeDoorRPowerHub :=
PowE
<->
PowFridgeDoorSensorR
.
(* meaning: iff power is being supplied to FridgeDoorRPowerHub, then it is supplying power to FridgeDoorSensorR *)
(* Specifications of devices in FreezerDoorR *)
Definition FreezerDoorSensorR :=
FreezerDoorROpen
/\
PowFreezerDoorSensorR
<->
FreezerDoorSignalR
.
(* meaning: if and only if power is supplied to the sensor in the right-side freezer door and the right-side freezer door is opened, then a signal is sent from the right-side freezer door *)
Definition FreezerDoorRPowerHub :=
PowJ
<->
PowFreezerDoorSensorR
.
(* meaning: iff power is supplied to PowJ, then power is supplied to the right-side freezer door sensor *)
(* Specifications of devices in FreezerDoorL *)
Definition FreezerDoorSensorL :=
FreezerDoorLOpen
/\
PowFreezerDoorSensorL
<->
FreezerDoorSignalL
.
(* meaning: iff power is supplied to the sensor in the left-side freezer door and the left-side freezer door is opened, then a signal is sent from the left-side freezer door *)
Definition FreezerDoorLPowerhub :=
PowF
<->
PowFreezerDoorSensorL
.
(* meaning: iff power is supplied to PowF, then power is supplied to the left-side freezer door sensor *)
(* Specifications of devices in SmartFridge *)
Definition Powersupply :=
PowerIn
<->
Pow1
/\
Pow2
.
(* Meaning: DC Power is supplied to FridgePowerhub and FreezerPowerhub iff AC Power is supplied to the Powersupply *)
(* Specifications of devices in Fridge *)
Definition FridgeLight :=
FridgeDoorSignalL
\/
FridgeDoorSignalR
/\
PowC
<->
FridgeLightOn
.
(* Meaning: The light of the fridge is turned on if and only if it receives power through PowC and one or more of its doors is open *)
Definition FridgeCooler :=
FridgeTempTooHigh
/\
PowB
<->
FridgeCoolerOn
.
(* Meaning: When both power is delivered through PowB and the FridgeTempTooHigh signal is received, then the cooler will turn on *)
Definition FridgeTempSensor :=
forall t:Time,
TimeToTemp( t ) > MaximumFridgeTemp
/\
PowD
<->
FridgeTempTooHigh
.
(* Meaning: For any moment in time if and only if the temperature is greater than the MaximumFridgeTemp and PowD supplies power then the signal FridgeTempTooHigh is send *)
Definition FridgePowerHub :=
Pow1
<->
PowA
/\
PowB
/\
PowC
/\
PowD
/\
PowE
.
(* Meaning: if and only if power is supplied to Pow1, then power is supplied to PowA, PowB, PowC, PowD and PowE *)
(* Specifications of devices in Freezer *)
Definition FreezerPowerhub :=
Pow2
<->
PowF
/\
PowG
/\
PowH
/\
PowI
/\
PowJ
.
(* meaning: iff power is supplied to Pow2, then power is supplied to PowF, PowG, PowH, PowI, and PowJ *)
Definition FreezerCooler :=
PowG
/\
FreezerTempTooHigh
<->
FreezerCoolerOn
.
(* Iff power is supplied to PowG and the temperature inside the freezer is too high, then the cooler inside the freezer will turn on *)
Definition FreezerTempSensor :=
forall t:Time,
TimeToTemp(t) > MaximumFreezerTemp
/\
PowI
<->
FreezerTempTooHigh
.
(* ====================================================================== *)
(* Specification of the overall system *)
(*
[
Here you have to specify:
OUTSIDE comment markers:
- The 'Definition' to be read by Coq, in a readable layout that
matches the mathematical structure of the formula.
WITHIN comment markers:
- The specification of the overall system in natural language.
Obviously, this specification should be consistent with the
formula used by Coq.
- If appropriate, a short explanation in natural language about
the choices that have been made.
]
*)
(* ====================================================================== *)
(* Extras *)
(*
[
It is very likely that you do not need any extras!
However, if it turns out during your proof that you have to prove
several times (almost) the same, then you may define a 'Lemma' at
this place, followed by its proof. And in the proof of the correctness
theorem, you may apply this lemma several times.
Note that it is always allowed to add lemmas to this script!
Sometimes it happens that Coq has troubles with 'trivial' properties
of numbers, that cannot be solve easily using 'lin_solve'.
In such situations, you may contact your supervisor and discuss
whether this may be solved by adding an 'Axiom', which can also be
applied later on within the proof of the correctness theorem.
]
*)
(* Correctness theorem *)
(* ====================================================================== *)
(*
[
Write down your correctness theorem in the usual notation:
Theorem CorTheorem:
Component1 /\ Component2 /\ ... /\ ComponentN -> SpecOfTheOverallSystem.
Note that as long as you don't know what natural deduction is
and you cannot start with the proof yet, you should keep this
theorem within comment markers, otherwise you will get a red cross
for stating a theorem without a proof.
For the final version you obviously have to remove these comment
markers and provide a real proof!
Note that even if your proof is correct, you won't be able to
get a green check mark, but only an orange flag, for technical
reasons. But that is no problem.
]
*)
(*
Theorem CorTheorem:
*)
|
<a href="https://colab.research.google.com/github/derek-shing/DS-Unit-2-Sprint-3-Advanced-Regression/blob/master/LS_DS2_234_Ridge_Regression.ipynb" target="_parent"></a>
# Lambda School Data Science - Ridge Regression
Regularize your way to a better tomorrow.
# Lecture
Data science depends on math, and math is generally focused on situations where:
1. a solution exists,
2. the solution is unique,
3. the solution's behavior changes continuously with the initial conditions.
These are known as [well-posed problems](https://en.wikipedia.org/wiki/Well-posed_problem), and are the sorts of assumptions so core in traditional techniques that it is easy to forget about them. But they do matter, as there can be exceptions:
1. no solution - e.g. no $x$ such that $Ax = b$
2. multiple solutions - e.g. several $x_1, x_2, ...$ such that $Ax = b$
3. "chaotic" systems - situations where small changes in initial conditions interact and reverberate in essentially unpredictable ways - for instance, the difficulty in longterm predictions of weather (N.B. not the same thing as longterm predictions of *climate*) - you can think of this as models that fail to generalize well, because they overfit on the training data (the initial conditions)
Problems suffering from the above are called ill-posed problems. Relating to linear algebra and systems of equations, the only truly well-posed problems are those with a single unique solution.
Think for a moment - what would the above plot look like if there was no solution? If there were multiple solutions? And how would that generalize to higher dimensions?
# Well-Posed problems in Linear Algebra
A lot of what you covered with linear regression was about getting matrices into the right shape for them to be solvable in this sense. But some matrices just won't submit to this, and other problems may technically "fit" linear regression but still be violating the above assumptions in subtle ways.
[Overfitting](https://en.wikipedia.org/wiki/Overfitting) is in some ways a special case of this - an overfit model uses more features/parameters than is "justified" by the data (essentially by the *dimensionality* of the data, as measured by $n$ the number of observations). As the number of features approaches the number of observations, linear regression still "works", but it starts giving fairly perverse results. In particular, it results in a model that fails to *generalize* - and so the core goal of prediction and explanatory power is undermined.
How is this related to well and ill-posed problems? It's not clearly a no solution or multiple solution case, but it does fall in the third category - overfitting results in fitting to the "noise" in the data, which means the particulars of one random sample or another (different initial conditions) will result in dramatically different models.
## Two Equations with Two Unknowns (well-posed)
\begin{align}
x-y = -1
\end{align}
\begin{align}
3x+y = 9
\end{align}
\begin{align}
\begin{bmatrix}
1 & -1 \\
3 & 1
\end{bmatrix}
\begin{bmatrix}
x \\
y
\end{bmatrix}
=
\begin{bmatrix}
-1 \\
9
\end{bmatrix}
\end{align}
```
import numpy as np
A = np.array([[1, -1], [3, 1]])
b = [[-1],[9]]
solution = np.linalg.solve(A, b)
print(solution)
x = solution[0][0]
y = solution[1][0]
print('\n')
print("x:", x)
print("y:", y)
```
[[2.]
[3.]]
x: 2.0
y: 3.0
## Two Equations with Three Unknowns (not well-posed)
\begin{align}
x-y+z = -1
\end{align}
\begin{align}
3x+y-2z = 9
\end{align}
\begin{align}
\begin{bmatrix}
1 & -1 & 1 \\
3 & 1 & -2
\end{bmatrix}
\begin{bmatrix}
x \\
y \\
z
\end{bmatrix}
=
\begin{bmatrix}
-1 \\
9
\end{bmatrix}
\end{align}
```
import numpy as np
A = np.array([[1, -1, 1], [3, 1, -2]])
b = [[-1],[9]]
solution = np.linalg.solve(A, b)
print(solution)
```
You can reduce these formulas, but there is no single solution, there are infinitely many solutions where the solution to at least one of these variables must be a function of the other variables.
Example:
<https://www.youtube.com/watch?v=tGPSEXVYw_o>
# Generalization in Machine Learning
The goal of machine learning is to end up with a model that can predict well on new data that it has never seen before. This is sometimes called "out of sample accuracy". This is what we are simulating when we do a train-test-split. We allow or model to fit to the training dataset and then we test its ability to generalize to new data by evaluating its accuracy on a test dataset. We want models that will be usable on new data indefinitely that way we can train them once and then reap the rewards of accurate predictions for a long time to come.
## Underfitting
An underfit model will not perform well on the test data and will also not generalize to new data. Because of this, we can usually detect it easily (it just performs poorly in all situations). Because it's easy to identify we either remedy it quickly or move onto new methods.
```
X = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
y = np.array([10,9,8,7,6,5,4,3,2,1,1,2,3,4,5,6,7,8,9,10])
import matplotlib.pyplot as plt
plt.scatter(X,y)
plt.show()
```
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)
```
```
plt.scatter(X_train, y_train)
plt.show()
```
```
plt.scatter(X_test, y_test)
plt.show()
```
```
from sklearn.linear_model import LinearRegression
X_train = X_train.reshape(-1, 1)
model = LinearRegression().fit(X_train, y_train)
model.score(X_train, y_train)
beta_0 = model.intercept_
beta_1 = model.coef_[0]
print("Slope Coefficient: ", beta_1)
print("\nIntercept Value: ", beta_0)
plt.scatter(X_train, y_train)
y_hat = [beta_1*x + beta_0 for x in X]
plt.plot(x, y_hat)
plt.show()
```
```
plt.scatter(X_test, y_test)
y_hat = [beta_1*x + beta_0 for x in X]
plt.plot(x, y_hat)
plt.show()
```
## Overfitting
Lets explore the problem of overfitting (and possible remedy - Ridge Regression) in the context of some housing data.
```
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.preprocessing import scale
# Load and Scale the Data
boston = load_boston()
boston.data = scale(boston.data) # Very helpful for regularization!
# Put it in a dataframe
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['Price'] = boston.target
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>CRIM</th>
<th>ZN</th>
<th>INDUS</th>
<th>CHAS</th>
<th>NOX</th>
<th>RM</th>
<th>AGE</th>
<th>DIS</th>
<th>RAD</th>
<th>TAX</th>
<th>PTRATIO</th>
<th>B</th>
<th>LSTAT</th>
<th>Price</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>-0.419782</td>
<td>0.284830</td>
<td>-1.287909</td>
<td>-0.272599</td>
<td>-0.144217</td>
<td>0.413672</td>
<td>-0.120013</td>
<td>0.140214</td>
<td>-0.982843</td>
<td>-0.666608</td>
<td>-1.459000</td>
<td>0.441052</td>
<td>-1.075562</td>
<td>24.0</td>
</tr>
<tr>
<th>1</th>
<td>-0.417339</td>
<td>-0.487722</td>
<td>-0.593381</td>
<td>-0.272599</td>
<td>-0.740262</td>
<td>0.194274</td>
<td>0.367166</td>
<td>0.557160</td>
<td>-0.867883</td>
<td>-0.987329</td>
<td>-0.303094</td>
<td>0.441052</td>
<td>-0.492439</td>
<td>21.6</td>
</tr>
<tr>
<th>2</th>
<td>-0.417342</td>
<td>-0.487722</td>
<td>-0.593381</td>
<td>-0.272599</td>
<td>-0.740262</td>
<td>1.282714</td>
<td>-0.265812</td>
<td>0.557160</td>
<td>-0.867883</td>
<td>-0.987329</td>
<td>-0.303094</td>
<td>0.396427</td>
<td>-1.208727</td>
<td>34.7</td>
</tr>
<tr>
<th>3</th>
<td>-0.416750</td>
<td>-0.487722</td>
<td>-1.306878</td>
<td>-0.272599</td>
<td>-0.835284</td>
<td>1.016303</td>
<td>-0.809889</td>
<td>1.077737</td>
<td>-0.752922</td>
<td>-1.106115</td>
<td>0.113032</td>
<td>0.416163</td>
<td>-1.361517</td>
<td>33.4</td>
</tr>
<tr>
<th>4</th>
<td>-0.412482</td>
<td>-0.487722</td>
<td>-1.306878</td>
<td>-0.272599</td>
<td>-0.835284</td>
<td>1.228577</td>
<td>-0.511180</td>
<td>1.077737</td>
<td>-0.752922</td>
<td>-1.106115</td>
<td>0.113032</td>
<td>0.441052</td>
<td>-1.026501</td>
<td>36.2</td>
</tr>
</tbody>
</table>
</div>
## preprocessing.scale(x) does the same thing as preprocessing.StandardScaler()
The difference is that `.scale(x)` is a function (lowercase naming convention)
`StandardScaler()` is a class (uppercase naming convention) with some extra functionality, they will both scale our data equally well.
```
?scale
```
```
from sklearn.preprocessing import StandardScaler
# Load and scale the data
boston = load_boston()
scaler = StandardScaler()
boston.data = scaler.fit_transform(boston.data)
# Put it in a dataframe
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['Price'] = boston.target
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>CRIM</th>
<th>ZN</th>
<th>INDUS</th>
<th>CHAS</th>
<th>NOX</th>
<th>RM</th>
<th>AGE</th>
<th>DIS</th>
<th>RAD</th>
<th>TAX</th>
<th>PTRATIO</th>
<th>B</th>
<th>LSTAT</th>
<th>Price</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>-0.419782</td>
<td>0.284830</td>
<td>-1.287909</td>
<td>-0.272599</td>
<td>-0.144217</td>
<td>0.413672</td>
<td>-0.120013</td>
<td>0.140214</td>
<td>-0.982843</td>
<td>-0.666608</td>
<td>-1.459000</td>
<td>0.441052</td>
<td>-1.075562</td>
<td>24.0</td>
</tr>
<tr>
<th>1</th>
<td>-0.417339</td>
<td>-0.487722</td>
<td>-0.593381</td>
<td>-0.272599</td>
<td>-0.740262</td>
<td>0.194274</td>
<td>0.367166</td>
<td>0.557160</td>
<td>-0.867883</td>
<td>-0.987329</td>
<td>-0.303094</td>
<td>0.441052</td>
<td>-0.492439</td>
<td>21.6</td>
</tr>
<tr>
<th>2</th>
<td>-0.417342</td>
<td>-0.487722</td>
<td>-0.593381</td>
<td>-0.272599</td>
<td>-0.740262</td>
<td>1.282714</td>
<td>-0.265812</td>
<td>0.557160</td>
<td>-0.867883</td>
<td>-0.987329</td>
<td>-0.303094</td>
<td>0.396427</td>
<td>-1.208727</td>
<td>34.7</td>
</tr>
<tr>
<th>3</th>
<td>-0.416750</td>
<td>-0.487722</td>
<td>-1.306878</td>
<td>-0.272599</td>
<td>-0.835284</td>
<td>1.016303</td>
<td>-0.809889</td>
<td>1.077737</td>
<td>-0.752922</td>
<td>-1.106115</td>
<td>0.113032</td>
<td>0.416163</td>
<td>-1.361517</td>
<td>33.4</td>
</tr>
<tr>
<th>4</th>
<td>-0.412482</td>
<td>-0.487722</td>
<td>-1.306878</td>
<td>-0.272599</td>
<td>-0.835284</td>
<td>1.228577</td>
<td>-0.511180</td>
<td>1.077737</td>
<td>-0.752922</td>
<td>-1.106115</td>
<td>0.113032</td>
<td>0.441052</td>
<td>-1.026501</td>
<td>36.2</td>
</tr>
</tbody>
</table>
</div>
```
df.shape
```
(506, 14)
## OLS Baseline Model
```
# Let's try good old least squares!
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
X = df.drop('Price', axis='columns')
y = df.Price
lin_reg = LinearRegression().fit(X, y)
mean_squared_error(y, lin_reg.predict(X))
```
21.894831181729206
That seems like a pretty good score, but...
Chances are this doesn't generalize very well. You can verify this by splitting the data to properly test model validity.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
lin_reg_split = LinearRegression().fit(X_train, y_train)
print(mean_squared_error(y, lin_reg_split.predict(X)))
print(mean_squared_error(y_test, lin_reg_split.predict(X_test)))
```
22.347018673376052
26.273991426429014
Oops! 💥 - You have overfitting if you are fitting well to training data, but not generalizing well to test data.
### What can we do?
- Use fewer features - sure, but it can be a lot of work to figure out *which* features, and (in cases like this) there may not be any good reason to really favor some features over another.
- Get more data! This is actually a pretty good approach in tech, since apps generate lots of data all the time (and we made this situation by artificially constraining our data). But for case studies, existing data, etc. it won't work.
- **Regularize!**
## Regularization just means "add bias"
OK, there's a bit more to it than that. But that's the core intuition - the problem is the model working "too well", so fix it by making it harder for the model!
It may sound strange - a technique that is purposefully "worse" - but in certain situations, it can really get results.
What's bias? In the context of statistics and machine learning, bias is when a predictive model fails to identify relationships between features and the output. In a word, bias is *underfitting*.
We want to add bias to the model because of the [bias-variance tradeoff](https://en.wikipedia.org/wiki/Bias%E2%80%93variance_tradeoff) - variance is the sensitivity of a model to the random noise in its training data (i.e. *overfitting*), and bias and variance are naturally (inversely) related. Increasing one will always decrease the other, with regards to the overall generalization error (predictive accuracy on unseen data).
Visually, the result looks like this:
The blue line is overfit, using more dimensions than are needed to explain the data and so much of the movement is based on noise and won't generalize well. The green line still fits the data, but is less susceptible to the noise - depending on how exactly we parameterize "noise" we may throw out actual correlation, but if we balance it right we keep that signal and greatly improve generalizability.
### Look carefully at the above plot and think of ways you can quantify the difference between the blue and green lines...
```
# Now with regularization via ridge regression
from sklearn.linear_model import Ridge
ridge_reg = Ridge().fit(X, y)
mean_squared_error(y, ridge_reg.predict(X))
```
21.895862166800143
```
# The score is a bit worse than OLS - but that's expected (we're adding bias)
# Let's try split
ridge_reg_split = Ridge(alpha=0).fit(X_train, y_train)
mean_squared_error(y_test, ridge_reg_split.predict(X_test))
```
26.273991426429053
```
# A little better (to same test split w/OLS) - can we improve it further?
# We just went with defaults, but as always there's plenty of parameters
help(Ridge)
```
Help on class Ridge in module sklearn.linear_model.ridge:
class Ridge(_BaseRidge, sklearn.base.RegressorMixin)
| Linear least squares with l2 regularization.
|
| Minimizes the objective function::
|
| ||y - Xw||^2_2 + alpha * ||w||^2_2
|
| This model solves a regression model where the loss function is
| the linear least squares function and regularization is given by
| the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
| This estimator has built-in support for multi-variate regression
| (i.e., when y is a 2d-array of shape [n_samples, n_targets]).
|
| Read more in the :ref:`User Guide <ridge_regression>`.
|
| Parameters
| ----------
| alpha : {float, array-like}, shape (n_targets)
| Regularization strength; must be a positive float. Regularization
| improves the conditioning of the problem and reduces the variance of
| the estimates. Larger values specify stronger regularization.
| Alpha corresponds to ``C^-1`` in other linear models such as
| LogisticRegression or LinearSVC. If an array is passed, penalties are
| assumed to be specific to the targets. Hence they must correspond in
| number.
|
| fit_intercept : boolean
| Whether to calculate the intercept for this model. If set
| to false, no intercept will be used in calculations
| (e.g. data is expected to be already centered).
|
| normalize : boolean, optional, default False
| This parameter is ignored when ``fit_intercept`` is set to False.
| If True, the regressors X will be normalized before regression by
| subtracting the mean and dividing by the l2-norm.
| If you wish to standardize, please use
| :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
| on an estimator with ``normalize=False``.
|
| copy_X : boolean, optional, default True
| If True, X will be copied; else, it may be overwritten.
|
| max_iter : int, optional
| Maximum number of iterations for conjugate gradient solver.
| For 'sparse_cg' and 'lsqr' solvers, the default value is determined
| by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
|
| tol : float
| Precision of the solution.
|
| solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
| Solver to use in the computational routines:
|
| - 'auto' chooses the solver automatically based on the type of data.
|
| - 'svd' uses a Singular Value Decomposition of X to compute the Ridge
| coefficients. More stable for singular matrices than
| 'cholesky'.
|
| - 'cholesky' uses the standard scipy.linalg.solve function to
| obtain a closed-form solution.
|
| - 'sparse_cg' uses the conjugate gradient solver as found in
| scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
| more appropriate than 'cholesky' for large-scale data
| (possibility to set `tol` and `max_iter`).
|
| - 'lsqr' uses the dedicated regularized least-squares routine
| scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
| procedure.
|
| - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
| its improved, unbiased version named SAGA. Both methods also use an
| iterative procedure, and are often faster than other solvers when
| both n_samples and n_features are large. Note that 'sag' and
| 'saga' fast convergence is only guaranteed on features with
| approximately the same scale. You can preprocess the data with a
| scaler from sklearn.preprocessing.
|
| All last five solvers support both dense and sparse data. However,
| only 'sag' and 'saga' supports sparse input when `fit_intercept` is
| True.
|
| .. versionadded:: 0.17
| Stochastic Average Gradient descent solver.
| .. versionadded:: 0.19
| SAGA solver.
|
| random_state : int, RandomState instance or None, optional, default None
| The seed of the pseudo random number generator to use when shuffling
| the data. If int, random_state is the seed used by the random number
| generator; If RandomState instance, random_state is the random number
| generator; If None, the random number generator is the RandomState
| instance used by `np.random`. Used when ``solver`` == 'sag'.
|
| .. versionadded:: 0.17
| *random_state* to support Stochastic Average Gradient.
|
| Attributes
| ----------
| coef_ : array, shape (n_features,) or (n_targets, n_features)
| Weight vector(s).
|
| intercept_ : float | array, shape = (n_targets,)
| Independent term in decision function. Set to 0.0 if
| ``fit_intercept = False``.
|
| n_iter_ : array or None, shape (n_targets,)
| Actual number of iterations for each target. Available only for
| sag and lsqr solvers. Other solvers will return None.
|
| .. versionadded:: 0.17
|
| See also
| --------
| RidgeClassifier : Ridge classifier
| RidgeCV : Ridge regression with built-in cross validation
| :class:`sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
| combines ridge regression with the kernel trick
|
| Examples
| --------
| >>> from sklearn.linear_model import Ridge
| >>> import numpy as np
| >>> n_samples, n_features = 10, 5
| >>> np.random.seed(0)
| >>> y = np.random.randn(n_samples)
| >>> X = np.random.randn(n_samples, n_features)
| >>> clf = Ridge(alpha=1.0)
| >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
| Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
| normalize=False, random_state=None, solver='auto', tol=0.001)
|
| Method resolution order:
| Ridge
| _BaseRidge
| abc.NewBase
| sklearn.linear_model.base.LinearModel
| abc.NewBase
| sklearn.base.BaseEstimator
| sklearn.base.RegressorMixin
| builtins.object
|
| Methods defined here:
|
| __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)
| Initialize self. See help(type(self)) for accurate signature.
|
| fit(self, X, y, sample_weight=None)
| Fit Ridge regression model
|
| Parameters
| ----------
| X : {array-like, sparse matrix}, shape = [n_samples, n_features]
| Training data
|
| y : array-like, shape = [n_samples] or [n_samples, n_targets]
| Target values
|
| sample_weight : float or numpy array of shape [n_samples]
| Individual weights for each sample
|
| Returns
| -------
| self : returns an instance of self.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| __abstractmethods__ = frozenset()
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.linear_model.base.LinearModel:
|
| predict(self, X)
| Predict using the linear model
|
| Parameters
| ----------
| X : array_like or sparse matrix, shape (n_samples, n_features)
| Samples.
|
| Returns
| -------
| C : array, shape (n_samples,)
| Returns predicted values.
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.base.BaseEstimator:
|
| __getstate__(self)
|
| __repr__(self)
| Return repr(self).
|
| __setstate__(self, state)
|
| get_params(self, deep=True)
| Get parameters for this estimator.
|
| Parameters
| ----------
| deep : boolean, optional
| If True, will return the parameters for this estimator and
| contained subobjects that are estimators.
|
| Returns
| -------
| params : mapping of string to any
| Parameter names mapped to their values.
|
| set_params(self, **params)
| Set the parameters of this estimator.
|
| The method works on simple estimators as well as on nested objects
| (such as pipelines). The latter have parameters of the form
| ``<component>__<parameter>`` so that it's possible to update each
| component of a nested object.
|
| Returns
| -------
| self
|
| ----------------------------------------------------------------------
| Data descriptors inherited from sklearn.base.BaseEstimator:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.base.RegressorMixin:
|
| score(self, X, y, sample_weight=None)
| Returns the coefficient of determination R^2 of the prediction.
|
| The coefficient R^2 is defined as (1 - u/v), where u is the residual
| sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
| sum of squares ((y_true - y_true.mean()) ** 2).sum().
| The best possible score is 1.0 and it can be negative (because the
| model can be arbitrarily worse). A constant model that always
| predicts the expected value of y, disregarding the input features,
| would get a R^2 score of 0.0.
|
| Parameters
| ----------
| X : array-like, shape = (n_samples, n_features)
| Test samples. For some estimators this may be a
| precomputed kernel matrix instead, shape = (n_samples,
| n_samples_fitted], where n_samples_fitted is the number of
| samples used in the fitting for the estimator.
|
| y : array-like, shape = (n_samples) or (n_samples, n_outputs)
| True values for X.
|
| sample_weight : array-like, shape = [n_samples], optional
| Sample weights.
|
| Returns
| -------
| score : float
| R^2 of self.predict(X) wrt. y.
How to tune alpha? For now, let's loop and try values.
(For longterm/stretch/next week, check out [cross-validation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html#sklearn.linear_model.RidgeCV).)
```
alphas = []
mses = []
for alpha in range(0, 200, 1):
ridge_reg_split = Ridge(alpha=alpha).fit(X_train, y_train)
mse = mean_squared_error(y_test, ridge_reg_split.predict(X_test))
print(alpha, mse)
alphas.append(alpha)
mses.append(mse)
```
0 26.273991426429053
1 26.192201358877668
2 26.118328007697226
3 26.051117952293595
4 25.989569283205444
5 25.93287356811407
6 25.880372753122625
7 25.831526788692837
8 25.785889053385123
9 25.743087513207442
10 25.702810145277628
11 25.66479356379559
12 25.628814073392263
13 25.59468057863174
14 25.56222892458644
15 25.53131734932431
16 25.50182280665663
17 25.473637974726547
18 25.44666880864133
19 25.420832527348388
20 25.396055949160893
21 25.372274108781504
22 25.349429102822004
23 25.327469121742823
24 25.30634763462536
25 25.286022699825878
26 25.266456379775086
27 25.24761424230921
28 25.229464934192976
29 25.211979815108453
30 25.19513264248022
31 25.178899299197408
32 25.163257557659424
33 25.14818687468406
34 25.13366821272317
35 25.11968388357409
36 25.106217411385522
37 25.093253412260974
38 25.080777488180427
39 25.068776133307583
40 25.057236651039766
41 25.046147080399017
42 25.035496130566347
43 25.02527312253186
44 25.015467936977434
45 25.006070967630855
46 24.997073079433843
47 24.98846557095439
48 24.980240140548986
49 24.972388855844812
50 24.96490412616673
51 24.95777867758141
52 24.951005530271846
53 24.944577977990345
54 24.9384895693689
55 24.9327340908919
56 24.92730555135946
57 24.92219816768905
58 24.917406351921144
59 24.912924699309215
60 24.9087479773882
61 24.904871115926827
62 24.901289197679855
63 24.897997449864803
64 24.8949912362963
65 24.892266050117623
66 24.889817507075655
67 24.88764133929069
68 24.885733389477625
69 24.88408960557926
70 24.882706035776263
71 24.88157882384208
72 24.880704204813682
73 24.880078500952195
74 24.879698117969724
75 24.879559541500758
76 24.87965933379892
77 24.879994130641087
78 24.8805606384229
79 24.881355631430836
80 24.882375949277577
81 24.88361849448833
82 24.88508023022692
83 24.886758178151386
84 24.888649416389928
85 24.89075107762813
86 24.89306034730016
87 24.895574461876226
88 24.898290707239912
89 24.90120641714914
90 24.90431897177517
91 24.907625796314402
92 24.911124359668285
93 24.914812173186736
94 24.918686789471128
95 24.92274580123304
96 24.92698684020521
97 24.93140757610152
98 24.93600571562298
99 24.94077900150688
100 24.945725211616683
101 24.950842158070053
102 24.95612768640294
103 24.96157967476758
104 24.96719603316249
105 24.972974702692664
106 24.97891365485829
107 24.985010890870456
108 24.99126444099231
109 24.99767236390434
110 25.004232746092597
111 25.010943701258537
112 25.017803369749362
113 25.02480991800798
114 25.0319615380414
115 25.03925644690685
116 25.04669288621445
117 25.05426912164612
118 25.061983442489353
119 25.069834161185728
120 25.077819612893087
121 25.085938155060855
122 25.09418816701804
123 25.102568049573183
124 25.111076224625787
125 25.119711134788766
126 25.128471243021377
127 25.137355032272303
128 25.14636100513223
129 25.155487683495902
130 25.164733608232837
131 25.174097338866744
132 25.183577453263027
133 25.193172547324206
134 25.2028812346929
135 25.212702146462046
136 25.222633930892243
137 25.232675253135735
138 25.24282479496694
139 25.25308125451928
140 25.26344334602802
141 25.273909799578966
142 25.284479360862818
143 25.29515079093497
144 25.305922865980495
145 25.31679437708437
146 25.32776413000649
147 25.338830944961526
148 25.349993656403374
149 25.361251112814
150 25.37260217649681
151 25.384045723373994
152 25.39558064278813
153 25.407205837307533
154 25.418920222535693
155 25.430722726924202
156 25.442612291589445
157 25.45458787013271
158 25.466648428463827
159 25.478792944627976
160 25.491020408635883
161 25.50332982229701
162 25.515720199055906
163 25.528190563831558
164 25.540739952859465
165 25.5533674135368
166 25.56607200427009
167 25.578852794325684
168 25.591708863682904
169 25.604639302889613
170 25.617643212920317
171 25.63071970503678
172 25.643867900650903
173 25.657086931189966
174 25.670375937964163
175 25.68373407203625
176 25.697160494093474
177 25.71065437432154
178 25.724214892280617
179 25.737841236783435
180 25.751532605775324
181 25.765288206216116
182 25.779107253964067
183 25.792988973661497
184 25.8069325986223
185 25.820937370721257
186 25.83500254028498
187 25.849127365984653
188 25.863311114730404
189 25.87755306156723
190 25.891852489572667
191 25.906208689755893
192 25.920620960958406
193 25.93508860975623
194 25.949610950363557
195 25.964187304537848
196 25.97881700148633
197 25.99349937777395
198 26.008233777232597
199 26.023019550871716
```
from matplotlib.pyplot import scatter
scatter(alphas, mses);
```
## What's the intuition? What are we doing?
The `alpha` parameter corresponds to the weight being given to the extra penalty being calculated by [Tikhonov regularization](https://en.wikipedia.org/wiki/Tikhonov_regularization) (this parameter is sometimes referred to as $\lambda$ in the context of ridge regression).
Normal linear regression (OLS) minimizes the **sum of square error of the residuals**.
Ridge regression minimizes the **sum of square error of the residuals** *AND* **the squared slope of the fit model, times the alpha parameter**.
This is why the MSE for the first model in the for loop (`alpha=0`) is the same as the MSE for linear regression - it's the same model!
As `alpha` is increased, we give more and more penalty to a steep slope. In two or three dimensions this is fairly easy to visualize - beyond, think of it as penalizing coefficient size. Each coefficient represents the slope of an individual dimension (feature) of the model, so ridge regression is just squaring and summing those.
So while `alpha=0` reduces to OLS, as `alpha` approaches infinity eventually the penalty gets so extreme that the model will always output every coefficient as 0 (any non-zero coefficient resulting in a penalty that outweighs whatever improvement in the residuals), and just fit a flat model with intercept at the mean of the dependent variable.
Of course, what we want is somewhere in-between these extremes. Intuitively, what we want to do is apply an appropriate "cost" or penalty to the model for fitting parameters, much like adjusted $R^2$ takes into account the cost of adding complexity to a model. What exactly is an appropriate penalty will vary, so you'll have to put on your model comparison hat and give it a go!
PS - scaling the data helps, as that way this cost is consistent and can be added uniformly across features, and it is simpler to search for the `alpha` parameter.
### Bonus - magic! ✨
Ridge regression doesn't just reduce overfitting and help with the third aspect of well-posed problems (poor generalizability). It can also fix the first two (no unique solution)!
```
df_tiny = df.sample(10, random_state=27)
print(df_tiny.shape)
X = df_tiny.drop('Price', axis='columns')
y = df_tiny.Price
lin_reg = LinearRegression().fit(X, y)
lin_reg.score(X, y) # Perfect multi-collinearity!
# NOTE - True OLS would 💥 here
# scikit protects us from actual error, but still gives a poor model
```
(10, 14)
1.0
```
ridge_reg = Ridge().fit(X, y)
ridge_reg.score(X, y) # More plausible (not "perfect")
```
0.9760119331942763
```
# Using our earlier test split
mean_squared_error(y_test, lin_reg.predict(X_test))
```
103.04429449784261
```
# Ridge generalizes *way* better (and we've not even tuned alpha)
mean_squared_error(y_test, ridge_reg.predict(X_test))
```
41.79869373639458
```
# e.g. (x1^2 + x2^2 + ...) * alpha is the extra penalty from Ridge
```
```
from sklearn.linear_model import RidgeCV
ridgecv = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]).fit(X, y)
ridgecv.score(X, y)
mean_squared_error(y_test, ridgecv.predict(X_test))
```
35.425045824319774
## And a bit more math
The regularization used by Ridge Regression is also known as **$L^2$ regularization**, due to the squaring of the slopes being summed. This corresponds to [$L^2$ space](https://en.wikipedia.org/wiki/Square-integrable_function), a metric space of square-integrable functions that generally measure what we intuitively think of as "distance" (at least, on a plane) - what is referred to as Euclidean distance.
The other famous norm is $L^1$, also known as [taxicab geometry](https://en.wikipedia.org/wiki/Taxicab_geometry), because it follows the "grid" to measure distance like a car driving around city blocks (rather than going directly like $L^2$). When referred to as a distance this is called "Manhattan distance", and can be used for regularization (see [LASSO](https://en.wikipedia.org/wiki/Lasso_(statistics%29), which [uses the $L^1$ norm](https://www.quora.com/What-is-the-difference-between-L1-and-L2-regularization-How-does-it-solve-the-problem-of-overfitting-Which-regularizer-to-use-and-when)).
All this comes down to - regularization means increasing model bias by "watering down" coefficients with a penalty typically based on some sort of distance metric, and thus reducing variance (overfitting the model to the noise in the data). It gives us another lever to try and another tool for our toolchest!
## Putting it all together - one last example
The official scikit-learn documentation has many excellent examples - [this one](https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols_ridge_variance.html#sphx-glr-auto-examples-linear-model-plot-ols-ridge-variance-py) illustrates how ridge regression effectively reduces the variance, again by increasing the bias, penalizing coefficients to reduce the effectiveness of features (but also the impact of noise).
```
Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line’s slope can vary quite a bit for each prediction due to the noise induced in the observations.
Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising shrinks the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression
```
```
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
for name, clf in classifiers.items():
fig, ax = plt.subplots(figsize=(4, 3))
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='gray')
ax.scatter(this_X, y_train, s=3, c='gray', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='red', marker='+', zorder=10)
ax.set_title(name)
ax.set_xlim(0, 2)
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
fig.tight_layout()
plt.show()
```
Between the first and the second graph, we have decreased the slope (penalized or watered-down coefficients), but we have less variance between our lines)
# Assignment
Following is data describing characteristics of blog posts, with a target feature of how many comments will be posted in the following 24 hours.
https://archive.ics.uci.edu/ml/datasets/BlogFeedback
Investigate - you can try both linear and ridge. You can also sample to smaller data size and see if that makes ridge more important. Don't forget to scale!
Focus on the training data, but if you want to load and compare to any of the test data files you can also do that.
Note - Ridge may not be that fundamentally superior in this case. That's OK! It's still good to practice both, and see if you can find parameters or sample sizes where ridge does generalize and perform better.
When you've fit models to your satisfaction, answer the following question:
```
Did you find cases where Ridge performed better? If so, describe (alpha parameter, sample size, any other relevant info/processing). If not, what do you think that tells you about the data?
```
You can create whatever plots, tables, or other results support your argument. In this case, your target audience is a fellow data scientist, *not* a layperson, so feel free to dig in!
```
# TODO - write some code!
from google.colab import drive
drive.mount('/content/gdrive')
```
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code
Enter your authorization code:
··········
Mounted at /content/gdrive
```
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import Ridge
from sklearn.preprocessing import scale
from sklearn.metrics import mean_squared_error
```
```
import pandas as pd
df = pd.read_csv('/content/gdrive/My Drive/blogData_train.csv', header=None)
```
```
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>...</th>
<th>271</th>
<th>272</th>
<th>273</th>
<th>274</th>
<th>275</th>
<th>276</th>
<th>277</th>
<th>278</th>
<th>279</th>
<th>280</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>1.0</td>
</tr>
<tr>
<th>1</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>2</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>3</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>1.0</td>
</tr>
<tr>
<th>4</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>27.0</td>
</tr>
</tbody>
</table>
<p>5 rows × 281 columns</p>
</div>
```
df.isnull().sum()
```
```
df = scale(df)
```
```
df
```
```
df = pd.DataFrame(df)
```
```
df
```
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=45)
```
```
```
```
y = df[280]
```
```
X = df.drop(280, axis='columns')
```
```
X.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>...</th>
<th>270</th>
<th>271</th>
<th>272</th>
<th>273</th>
<th>274</th>
<th>275</th>
<th>276</th>
<th>277</th>
<th>278</th>
<th>279</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>1</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>2</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>3</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>4</th>
<td>40.30467</td>
<td>53.845657</td>
<td>0.0</td>
<td>401.0</td>
<td>15.0</td>
<td>15.52416</td>
<td>32.44188</td>
<td>0.0</td>
<td>377.0</td>
<td>3.0</td>
<td>...</td>
<td>0.0</td>
<td>0.0</td>
<td>1.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
</tbody>
</table>
<p>5 rows × 280 columns</p>
</div>
```
lin_reg_split = LinearRegression().fit(X_train, y_train)
print(mean_squared_error(y, lin_reg_split.predict(X)))
print(mean_squared_error(y_test, lin_reg_split.predict(X_test)))
```
0.6367428134255578
0.602491653804951
```
ridge = Ridge(300).fit(X_train, y_train)
print(mean_squared_error(y_train, ridge.predict(X_train)))
print(mean_squared_error(y_test, ridge.predict(X_test)))
```
0.6502917208779181
0.6001996018140513
```
ridge_cv = RidgeCV([.001,.01,1,10,100,10000]).fit(X_train, y_train)
print(mean_squared_error(y_train, ridge_cv.predict(X_train)))
print(mean_squared_error(y_test, ridge_cv.predict(X_test)))
```
0.6535553660154282
0.5988194186410885
# Resources and stretch goals
Resources:
- https://www.quora.com/What-is-regularization-in-machine-learning
- https://blogs.sas.com/content/subconsciousmusings/2017/07/06/how-to-use-regularization-to-prevent-model-overfitting/
- https://machinelearningmastery.com/introduction-to-regularization-to-reduce-overfitting-and-improve-generalization-error/
- https://towardsdatascience.com/ridge-and-lasso-regression-a-complete-guide-with-python-scikit-learn-e20e34bcbf0b
- https://stats.stackexchange.com/questions/111017/question-about-standardizing-in-ridge-regression#111022
Stretch goals:
- Revisit past data you've fit OLS models to, and see if there's an `alpha` such that ridge regression results in a model with lower MSE on a train/test split
- Yes, Ridge can be applied to classification! Check out [sklearn.linear_model.RidgeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifier.html#sklearn.linear_model.RidgeClassifier), and try it on a problem you previous approached with a different classifier (note - scikit LogisticRegression also automatically penalizes based on the $L^2$ norm, so the difference won't be as dramatic)
- Implement your own function to calculate the full cost that ridge regression is optimizing (the sum of squared residuals + `alpha` times the sum of squared coefficients) - this alone won't fit a model, but you can use it to verify cost of trained models and that the coefficients from the equivalent OLS (without regularization) may have a higher cost
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e321m9_13limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition freeze :
{ freeze : feBW_tight -> feBW_limbwidths
| forall a, phiBW_limbwidths (freeze a) = phiBW_tight a }.
Proof.
Set Ltac Profiling.
Time synthesize_freeze ().
Show Ltac Profile.
Time Defined.
Print Assumptions freeze.
|
r=0.80
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7js34/media/images/d7js34-023/svc:tesseract/full/full/0.80/default.jpg Accept:application/hocr+xml
|
theory Cell_Decomp_Theorem_Helpers
imports Denef_Lemma_2_4 Denef_Lemma_2_3 Algebras_of_Cells
begin
locale common_decomp_proof_context = denef_I + denef_II
locale common_refinement_locale = common_decomp_proof_context +
fixes \<C> A c a1 a2 I f m
assumes f_closed: "f \<in> carrier (UP (SA m))"
assumes f_deg: " deg (SA m) f \<le> (Suc d)"
assumes \<C>_def: "\<C> = Cond m A c a1 a2 I"
assumes \<C>_cond: "is_cell_condition \<C>"
assumes f_taylor_cfs: "\<And> i. (taylor_expansion (SA m) c f i = \<zero>\<^bsub>SA m\<^esub>) \<or>
(taylor_expansion (SA m) c f i \<in> Units (SA m))"
(**************************************************************************************************)
(**************************************************************************************************)
section\<open>Partitions by Zero Sets\<close>
(**************************************************************************************************)
(**************************************************************************************************)
context padic_fields
begin
definition zero_set_partition where
"zero_set_partition m Fs = atoms_of (gen_boolean_algebra (carrier (Q\<^sub>p\<^bsup>m\<^esup>)) (SA_zero_set m ` Fs))"
lemma nonzero_set_as_diff:
"SA_nonzero_set m f = (carrier (Q\<^sub>p\<^bsup>m\<^esup>)) - (SA_zero_set m f)"
unfolding SA_nonzero_set_def SA_zero_set_def by auto
lemma zero_set_partition_semialg:
assumes "Fs \<subseteq> carrier (SA m)"
assumes "finite Fs"
assumes "a \<in> zero_set_partition m Fs"
shows "is_semialgebraic m a"
proof-
have 0: "(SA_zero_set m ` Fs) \<subseteq> semialg_sets m"
apply(rule subsetI)
using SA_zero_set_is_semialg assms unfolding SA_zero_set_def is_semialgebraic_def by auto
have "zero_set_partition m Fs \<subseteq> semialg_sets m"
unfolding semialg_sets_def zero_set_partition_def
apply(rule atoms_of_gen_boolean_algebra, rule gen_boolean_algebra_subalgebra)
using 0 unfolding semialg_sets_def apply blast
apply(rule gen_boolean_algebra_finite) using assms by auto
thus ?thesis using assms
using is_semialgebraicI by auto
qed
lemma partition_by_zero_sets:
assumes "finite Fs"
assumes "Fs \<subseteq> carrier (SA m)"
assumes "a \<in> zero_set_partition m Fs"
assumes "f \<in> Fs"
shows "(\<forall> x \<in> a. f x = \<zero>) \<or> (\<forall> x \<in> a. f x \<noteq> \<zero>)"
proof(cases "a \<subseteq> SA_zero_set m f")
case True
then show ?thesis unfolding SA_zero_set_def by auto
next
case False
have F0: "SA_zero_set m f \<in> (gen_boolean_algebra (carrier (Q\<^sub>p\<^bsup>m\<^esup>)) (SA_zero_set m ` Fs))"
apply(rule generator_closed)
using assms unfolding SA_zero_set_def by auto
then have F1: "a \<inter> SA_zero_set m f = {}"
using assms atoms_are_minimal[of a _ "SA_zero_set m f"] False
unfolding zero_set_partition_def by blast
have "a \<subseteq> SA_nonzero_set m f"
unfolding nonzero_set_as_diff
apply(intro atom_in_comp[of "SA_zero_set m ` Fs" _ _ "SA_zero_set m f"]
F1 F0)
using assms unfolding zero_set_partition_def by auto
thus ?thesis unfolding SA_nonzero_set_def by auto
qed
lemma of_gen_boolean_algebra_un:
"\<Union> (gen_boolean_algebra S Xs) = S"
using gen_boolean_algebra_subset[of _ S Xs]
gen_boolean_algebra.universe[of S Xs]
by auto
lemma gen_boolean_algebra_atom_un:
assumes "finite Xs"
assumes "Y \<in> gen_boolean_algebra S Xs"
shows "Y = \<Union> {a \<in> atoms_of (gen_boolean_algebra S Xs). a \<subseteq> Y}"
by(intro gen_boolean_algebra_elem_uni_of_atoms[of "gen_boolean_algebra S Xs" S]
gen_boolean_algebra_finite assms,
unfold of_gen_boolean_algebra_un gen_boolean_algebra_idempotent, auto simp: assms)
lemma gen_boolean_algebra_atoms_cover:
assumes "finite Xs"
shows "S = \<Union> (atoms_of (gen_boolean_algebra S Xs))"
using assms gen_boolean_algebra_atom_un[of Xs S S]
by (simp add: atoms_of_covers' of_gen_boolean_algebra_un)
lemma induced_partition:
assumes "Xs partitions S"
assumes "Y \<subseteq> S"
shows "(\<inter>) Y ` Xs partitions Y"
apply(intro is_partitionI disjointI)
using assms is_partitionE disjointE
apply (smt (verit, best) Sup_upper boolean_algebra_cancel.inf1 image_iff inf.absorb_iff1
inf_Sup inf_bot_right)
using assms by (metis inf.orderE inf_Sup is_partitionE(2))
lemma partition_by_zero_sets_covers:
assumes "finite Fs"
shows "carrier (Q\<^sub>p\<^bsup>m\<^esup>) = \<Union> (zero_set_partition m Fs)"
unfolding zero_set_partition_def
apply(rule gen_boolean_algebra_atoms_cover)
using assms by blast
lemma partition_by_zero_sets_disjoint:
assumes "finite Fs"
shows "disjoint (zero_set_partition m Fs)"
apply(rule disjointI)
using assms unfolding zero_set_partition_def
by (simp add: atoms_of_disjoint)
lemma partition_by_zero_sets_partitions:
assumes "finite Fs"
shows "(zero_set_partition m Fs) partitions (carrier (Q\<^sub>p\<^bsup>m\<^esup>))"
apply(rule is_partitionI)
using partition_by_zero_sets_covers partition_by_zero_sets_disjoint assms by auto
definition poly_cfs_car_part where
"poly_cfs_car_part m f = zero_set_partition m (f ` {..deg (SA m) f})"
lemma poly_cfs_car_part_semialg:
assumes "f \<in> carrier (UP (SA m))"
assumes "a \<in> poly_cfs_car_part m f"
shows "is_semialgebraic m a"
apply(rule zero_set_partition_semialg[of "f ` {..deg (SA m) f}"])
using assms cfs_closed poly_cfs_car_part_def by auto
lemma poly_cfs_car_part_memE:
assumes "f \<in> carrier (UP (SA m))"
assumes "a \<in> poly_cfs_car_part m f"
shows "(\<forall> x \<in> a. f i x = \<zero>) \<or> (\<forall> x \<in> a. f i x \<noteq> \<zero>)"
proof(cases "i > deg (SA m) f")
case True
then have T0: "f i = \<zero>\<^bsub>SA m\<^esub>"
using assms UPSA.deg_leE by blast
have "a \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms poly_cfs_car_part_semialg is_semialgebraic_closed by presburger
then have T1: "(\<forall>x\<in>a. \<zero>\<^bsub>SA m\<^esub> x = \<zero>)"
using SA_zeroE by auto
show ?thesis
unfolding T0 using T1 by auto
next
case False
show ?thesis
apply(intro partition_by_zero_sets[of "f ` {..deg (SA m) f}" m])
using False assms cfs_closed poly_cfs_car_part_def by auto
qed
lemma poly_cfs_car_part_finite:
"finite (poly_cfs_car_part m f)"
unfolding poly_cfs_car_part_def zero_set_partition_def
apply(rule atoms_finite)
by auto
lemma poly_cfs_car_part_covers:
"carrier (Q\<^sub>p\<^bsup>m\<^esup>) = \<Union> (poly_cfs_car_part m f)"
using gen_boolean_algebra_elem_uni_of_atoms
unfolding poly_cfs_car_part_def zero_set_partition_def
using partition_by_zero_sets_covers zero_set_partition_def by force
definition poly_cfs_part where
"poly_cfs_part m f A = ((\<inter>) A ` (poly_cfs_car_part m f)) - {{}}"
lemma poly_cfs_part_subset:
"\<And> a. a \<in> poly_cfs_part m f A \<Longrightarrow> a \<subseteq> A"
unfolding poly_cfs_part_def by auto
lemma partition_minus_empty:
assumes "As partitions A"
shows "(As - {{}}) partitions A"
apply(rule is_partitionI)
using assms is_partitionE disjointE disjointI apply fastforce
using assms is_partitionE(2) by auto[1]
lemma poly_cfs_part_partitions:
assumes "A \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
shows "(poly_cfs_part m f A) partitions A"
unfolding poly_cfs_part_def poly_cfs_car_part_def
apply(intro partition_minus_empty)
apply(intro induced_partition[of _ "carrier (Q\<^sub>p\<^bsup>m\<^esup>)"] )
by(rule partition_by_zero_sets_partitions, auto simp: assms)
lemma poly_cfs_part_finite:
"finite (poly_cfs_part m f A)"
unfolding poly_cfs_part_def using poly_cfs_car_part_finite by auto
lemma poly_cfs_part_memE:
assumes "f \<in> carrier (UP (SA m))"
assumes "a \<in> poly_cfs_part m f A"
shows "(\<forall> x \<in> a. f i x = \<zero>) \<or> (\<forall> x \<in> a. f i x \<noteq> \<zero>)"
using poly_cfs_car_part_memE[of f m _ i] assms
unfolding poly_cfs_part_def by auto
lemma poly_cfs_part_semialg:
assumes "is_semialgebraic m A"
assumes "f \<in> carrier (UP (SA m))"
assumes "a \<in> poly_cfs_part m f A"
shows "is_semialgebraic m a"
proof-
obtain a' where a'_def: "a' \<in> poly_cfs_car_part m f \<and> a = A \<inter> a'"
using assms poly_cfs_part_def by auto
thus ?thesis
using assms poly_cfs_part_def intersection_is_semialg poly_cfs_car_part_semialg
by auto
qed
definition poly_unit_replacement where
"poly_unit_replacement m f a = (\<lambda> i::nat. if (\<forall> x \<in> a \<inter> (carrier (Q\<^sub>p\<^bsup>m\<^esup>)). f i x = \<zero>) then \<zero>\<^bsub>SA m\<^esub>
else to_fun_unit m (f i))"
lemma poly_unit_replacement_dichotomy:
assumes "f \<in> carrier (UP (SA m))"
assumes "is_semialgebraic m a"
shows "\<And>i. poly_unit_replacement m f a i = \<zero>\<^bsub>SA m\<^esub> \<or> poly_unit_replacement m f a i \<in> Units (SA m) "
unfolding poly_unit_replacement_def using assms to_fun_unit_is_unit cfs_closed by auto
lemma poly_unit_replacement_cfs_closed:
assumes "f \<in> carrier (UP (SA m))"
shows "poly_unit_replacement m f a i \<in> carrier (SA m)"
apply(cases "\<forall> x \<in> a \<inter> (carrier (Q\<^sub>p\<^bsup>m\<^esup>)). f i x = \<zero>", unfold poly_unit_replacement_def)
using assms to_fun_unit_closed[of "f i" m] cfs_closed[of f m i] by auto
lemma poly_unit_replacement_above_deg:
assumes "f \<in> carrier (UP (SA m))"
assumes "i > deg (SA m) f"
shows "poly_unit_replacement m f a i = \<zero>\<^bsub>SA m\<^esub>"
proof-
have "f i = \<zero>\<^bsub>SA m\<^esub>"
using assms UPSA.deg_leE by blast
hence "\<forall>x\<in>a \<inter> carrier (Q\<^sub>p\<^bsup>m\<^esup>). f i x = \<zero>"
using SA_zeroE by auto
thus ?thesis
unfolding poly_unit_replacement_def by auto
qed
lemma poly_unit_replacement_closed:
assumes "f \<in> carrier (UP (SA m))"
shows "poly_unit_replacement m f a \<in> carrier (UP (SA m))"
apply(intro UP_car_memI[of "deg (SA m) f"])
apply(intro poly_unit_replacement_above_deg assms, auto)
by(rule poly_unit_replacement_cfs_closed, rule assms)
lemma poly_unit_replacement_cfs1:
assumes "f \<in> carrier (UP (SA m))"
assumes "(\<forall> x \<in> a. f i x = \<zero>)"
shows "poly_unit_replacement m f a i = \<zero>\<^bsub>SA m\<^esub>"
using assms
unfolding poly_unit_replacement_def by auto
lemma poly_unit_replacement_deg:
assumes "f \<in> carrier (UP (SA m))"
shows "deg (SA m) (poly_unit_replacement m f a) \<le> deg (SA m) f"
apply(rule deg_leqI)
apply (simp add: assms poly_unit_replacement_closed)
by (simp add: UPSA.deg_leqI assms padic_fields.poly_unit_replacement_closed padic_fields_axioms poly_unit_replacement_above_deg)
lemma poly_unit_replacement_cfs2:
assumes "f \<in> carrier (UP (SA m))"
assumes "(\<forall> x \<in> a. f i x \<noteq> \<zero>)"
assumes "is_semialgebraic m a"
assumes "a \<noteq> {}"
shows "poly_unit_replacement m f a i = (to_fun_unit m (f i))"
proof-
have "\<not> (\<forall>x\<in>a \<inter> carrier (Q\<^sub>p\<^bsup>m\<^esup>). f i x = \<zero>)"
using assms is_semialgebraic_closed by blast
thus ?thesis
unfolding poly_unit_replacement_def by auto
qed
lemma poly_unit_replacement_on_cfs_part:
assumes "is_semialgebraic m A"
assumes "f \<in> carrier (UP (SA m))"
assumes "a \<in> poly_cfs_part m f A"
shows "poly_unit_replacement m f a \<in> carrier (UP (SA m))"
"\<And>x. x \<in> a \<Longrightarrow> poly_unit_replacement m f a i x = f i x"
"\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x f =
SA_poly_to_Qp_poly m x (poly_unit_replacement m f a)"
proof-
have a_closed: "a \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms poly_cfs_part_subset is_semialgebraic_closed by blast
have a_nonempty: "a \<noteq> {}"
using assms unfolding poly_cfs_part_def by auto
show 1: "poly_unit_replacement m f a \<in> carrier (UP (SA m))"
using assms by (simp add: poly_unit_replacement_closed)
show 2: "\<And>x i. x \<in> a \<Longrightarrow> poly_unit_replacement m f a i x = f i x"
proof- fix x i assume A: "x \<in> a"
show "poly_unit_replacement m f a i x = f i x"
proof(cases "(\<forall> x \<in> a. f i x = \<zero>)")
case True
show ?thesis
using a_closed assms A True poly_unit_replacement_def SA_zeroE
by (simp add: Set.basic_monos(7))
next
case False
hence "(\<forall> x \<in> a. f i x \<noteq> \<zero>)"
using assms by (meson poly_cfs_part_memE)
have "\<not> (\<forall> x \<in> a \<inter> carrier (Q\<^sub>p\<^bsup>m\<^esup>). f i x = \<zero>)"
using a_nonempty a_closed by (simp add: False Int_absorb2)
hence "poly_unit_replacement m f a i = to_fun_unit m (f i)"
unfolding poly_unit_replacement_def by auto
then show ?thesis
using a_closed assms poly_unit_replacement_cfs2 to_fun_unit_eq[of "f i" m]
A UPSA.UP_car_memE(1) \<open>\<forall>x\<in>a. f i x \<noteq> \<zero>\<close> by auto
qed
qed
show 3: "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x f =
SA_poly_to_Qp_poly m x (poly_unit_replacement m f a)"
proof-
fix x assume A: "x \<in> a"
show "SA_poly_to_Qp_poly m x f =
SA_poly_to_Qp_poly m x (poly_unit_replacement m f a)"
proof(rule ext) fix j
have 30: "SA_poly_to_Qp_poly m x f j = f j x"
using SA_poly_to_Qp_poly_coeff[of _ m f j] A assms(2) local.a_closed
by auto
have 31: "SA_poly_to_Qp_poly m x (poly_unit_replacement m f a) j =
(poly_unit_replacement m f a) j x"
using a_closed assms 1 2 SA_poly_to_Qp_poly_coeff[of _ m f j] A
SA_poly_to_Qp_poly_coeff[of _ m "poly_unit_replacement m f a" j]
by blast
show "SA_poly_to_Qp_poly m x f j =
SA_poly_to_Qp_poly m x (poly_unit_replacement m f a) j"
unfolding 30 31 using 1 2[of x j] A by auto
qed
qed
qed
lemma(in UP_cring) taylor_expansion_inv:
assumes "f \<in> carrier (UP R)"
assumes "c \<in> carrier R"
shows "f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)"
"f = taylor_expansion R c (taylor_expansion R (\<ominus>c) f)"
proof-
have 0: "\<And> c. c \<in> carrier R \<Longrightarrow> f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)"
proof-
fix x c assume A: "c \<in> carrier R"
have 0: "X_poly_minus R c = X_poly_plus R (\<ominus> c)"
by (simp add: A UP_cring.X_minus_plus assms(2) is_UP_cring)
have 1: "f = Cring_Poly.compose R (taylor c f) (X_poly_minus R c)"
using A taylor_id[of c f] assms P_def by fastforce
show "f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)"
using 1 0 A
unfolding taylor_expansion_def taylor_def by auto
qed
show "f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)"
by(intro 0 assms)
show "f = taylor_expansion R c (taylor_expansion R (\<ominus>c) f)"
using 0[of "\<ominus> c"] assms by auto
qed
lemma(in UP_cring) taylor_expansion_closed:
assumes "f \<in> carrier (UP R)"
assumes "c \<in> carrier R"
shows "taylor_expansion R c f \<in> carrier (UP R)"
using assms taylor_closed[of f c]
unfolding P_def taylor_def by auto
lemma poly_unit_replacement_deg_lemma:
assumes "is_semialgebraic m A"
assumes "f \<in> carrier (UP (SA m))"
assumes "c \<in> carrier (SA m)"
assumes "a \<in> poly_cfs_part m (taylor_expansion (SA m) c f) A"
assumes "g = taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c)
(poly_unit_replacement m (taylor_expansion (SA m) c f) a)"
shows "deg (SA m) g \<le> deg (SA m) f"
proof-
have 0: "deg (SA m) f = deg (SA m) (taylor_expansion (SA m) c f)"
using assms UPSA.taylor_def UPSA.taylor_deg by presburger
have 1: "deg (SA m) f \<ge> deg (SA m) (poly_unit_replacement m (taylor_expansion (SA m) c f) a)"
unfolding 0 using assms
by (meson UPSA.taylor_expansion_closed poly_unit_replacement_deg)
thus ?thesis
unfolding assms using 1 assms UPSA.taylor_deg UPSA.taylor_def UPSA.taylor_deg unfolding UPSA.taylor_def
using R.cring_simprules(3) UPSA.taylor_expansion_closed padic_fields.poly_unit_replacement_closed padic_fields_axioms by presburger
qed
lemma poly_unit_replacement_on_cfs_part_taylor:
assumes "is_semialgebraic m A"
assumes "f \<in> carrier (UP (SA m))"
assumes "c \<in> carrier (SA m)"
assumes "a \<in> poly_cfs_part m (taylor_expansion (SA m) c f) A"
assumes "g = taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c)
(poly_unit_replacement m (taylor_expansion (SA m) c f) a)"
shows "g \<in> carrier (UP (SA m))"
"\<And>x i . x \<in> a \<Longrightarrow> g i x = f i x"
"\<And> x i. x \<in> a \<Longrightarrow> UPSA.pderiv m g i x = UPSA.pderiv m f i x"
"\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x g = SA_poly_to_Qp_poly m x f"
"\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (UPSA.pderiv m g) =
SA_poly_to_Qp_poly m x (UPSA.pderiv m f)"
"\<And> i. taylor_expansion (SA m) c g i = \<zero>\<^bsub>SA m\<^esub> \<or>
taylor_expansion (SA m) c g i \<in> Units (SA m)"
proof-
obtain h where h_def: "h = (poly_unit_replacement m (taylor_expansion (SA m) c f) a)"
by blast
show g_closed: "g \<in> carrier (UP (SA m))"
unfolding assms apply(intro taylor_expansion_closed poly_unit_replacement_closed)
using assms by auto
have taylor: "taylor_expansion (SA m) c f \<in> carrier (UP (SA m))"
using assms UPSA.taylor_closed UPSA.taylor_def by force
have a_sub: "a \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms poly_cfs_part_subset[of a m "taylor_expansion (SA m) c f" A] taylor
is_semialgebraic_closed by auto
have h_props: "h \<in> carrier (UP (SA m))"
"\<And>x i. x \<in> a \<Longrightarrow> h i x = taylor_expansion (SA m) c f i x"
"\<And>x i. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f) =
SA_poly_to_Qp_poly m x h"
proof-
show "h \<in> carrier (UP (SA m))"
unfolding h_def
by(intro poly_unit_replacement_on_cfs_part[of m A "taylor_expansion (SA m) c f" a]
taylor assms)
show " \<And>x i. x \<in> a \<Longrightarrow> h i x = taylor_expansion (SA m) c f i x"
unfolding h_def
by(intro poly_unit_replacement_on_cfs_part[of m A "taylor_expansion (SA m) c f" a]
taylor assms, auto)
show "\<And>x i. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f) = SA_poly_to_Qp_poly m x h"
unfolding h_def
by(intro poly_unit_replacement_on_cfs_part[of m A "taylor_expansion (SA m) c f" a]
taylor assms, auto)
qed
show 1: "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x g = SA_poly_to_Qp_poly m x f"
proof- fix x assume A: "x \<in> a"
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using a_sub A by auto
have 0: "SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f) =
SA_poly_to_Qp_poly m x h"
using h_props A by auto
have 1: "f = taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c) (taylor_expansion (SA m) c f)"
by(intro taylor_expansion_inv assms)
have 2: "SA_poly_to_Qp_poly m x
(taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c) (taylor_expansion (SA m) c f)) =
taylor_expansion Q\<^sub>p ((\<ominus>\<^bsub>SA m\<^esub> c) x) (SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f))"
apply(intro SA_poly_to_Qp_poly_taylor_poly taylor)
using assms apply auto[1]
using a_sub A by auto
hence 3: "SA_poly_to_Qp_poly m x f = taylor_expansion Q\<^sub>p ((\<ominus>\<^bsub>SA m\<^esub> c) x) (SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f))"
using 1 by auto
have 4: "SA_poly_to_Qp_poly m x g = taylor_expansion Q\<^sub>p ((\<ominus>\<^bsub>SA m\<^esub> c) x) (SA_poly_to_Qp_poly m x
(poly_unit_replacement m (taylor_expansion (SA m) c f) a))"
unfolding assms
apply(intro SA_poly_to_Qp_poly_taylor_poly x_closed)
using h_props assms unfolding h_def by auto
have 5: " (SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f)) = (SA_poly_to_Qp_poly m x
(poly_unit_replacement m (taylor_expansion (SA m) c f) a))"
using A h_props h_def by auto
show "SA_poly_to_Qp_poly m x g = SA_poly_to_Qp_poly m x f "
unfolding 3 4 5 by auto
qed
show 2: "\<And>x i. x \<in> a \<Longrightarrow> g i x = f i x"
proof- fix i x assume A: "x \<in> a"
then have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using a_sub by auto
show "g i x = f i x"
using 1[of x] g_closed assms(2) x_closed A
SA_poly_to_Qp_poly_coeff[of x m f i]
SA_poly_to_Qp_poly_coeff[of x m g i]
by auto
qed
have h_eq: "h = taylor_expansion (SA m) c g"
unfolding h_def assms apply(rule taylor_expansion_inv)
using assms h_props h_def by auto
show "\<And>i. taylor_expansion (SA m) c g i = \<zero>\<^bsub>SA m\<^esub> \<or> taylor_expansion (SA m) c g i \<in> Units (SA m)"
using assms taylor_closed h_def h_eq poly_cfs_part_semialg
poly_unit_replacement_dichotomy padic_fields_axioms taylor by presburger
have derivs_closed: "UPSA.pderiv m g \<in> carrier (UP (SA m))"
"UPSA.pderiv m f \<in> carrier (UP (SA m))"
by(auto simp: UPSA.pderiv_closed g_closed assms(2))
show 3: "\<And>x i. x \<in> a \<Longrightarrow> UPSA.pderiv m g i x = UPSA.pderiv m f i x"
proof- fix i x assume A: "x \<in> a"
then have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using a_sub by auto
have p: "g (Suc i) x = f (Suc i) x"
by(intro A 2)
have q: "UPSA.pderiv m g i = [Suc i] \<cdot>\<^bsub>SA m\<^esub> g (Suc i)"
"UPSA.pderiv m f i = [Suc i] \<cdot>\<^bsub>SA m\<^esub> f (Suc i)"
using g_closed assms(2) x_closed A derivs_closed
UPSA.pderiv_cfs[of g m i] UPSA.pderiv_cfs[of f m i] by auto
have r: "([Suc i] \<cdot>\<^bsub>SA m\<^esub> g (Suc i)) x = [Suc i] \<cdot> g (Suc i) x"
"([Suc i] \<cdot>\<^bsub>SA m\<^esub> f (Suc i)) x = [Suc i] \<cdot> f (Suc i) x"
using p x_closed cfs_closed[of f m "Suc i"] SA_add_pow_apply[of "g (Suc i)" m x "Suc i"]
cfs_closed[of g m "Suc i"] SA_add_pow_apply[of "f (Suc i)" m x "Suc i"]
g_closed assms by auto
show "UPSA.pderiv m g i x = UPSA.pderiv m f i x"
unfolding p q r by auto
qed
show "\<And>x. x \<in> a \<Longrightarrow>
SA_poly_to_Qp_poly m x (UPSA.pderiv m g) = SA_poly_to_Qp_poly m x (UPSA.pderiv m f)"
proof fix x i
assume A: "x \<in> a"
then have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using a_sub by auto
have p: "UPSA.pderiv m g i x = UPSA.pderiv m f i x"
using A 3 by auto
show "SA_poly_to_Qp_poly m x (UPSA.pderiv m g) i =
SA_poly_to_Qp_poly m x (UPSA.pderiv m f) i"
using SA_poly_to_Qp_poly_coeff[of x m "UPSA.pderiv m g" i]
SA_poly_to_Qp_poly_coeff[of x m "UPSA.pderiv m f" i]
derivs_closed x_closed
unfolding p by auto
qed
qed
definition decomp_by_cfs where
"decomp_by_cfs m f \<C> = (\<lambda> C. refine_fibres \<C> C) ` poly_cfs_part m f (fibre_set \<C>)"
lemma decomp_by_cfs_is_decomp:
assumes "f \<in> carrier (UP (SA m))"
assumes "is_cell_condition \<C>"
assumes "arity \<C> = m"
shows "is_cell_decomp m (decomp_by_cfs m f \<C>) (condition_to_set \<C>)"
proof-
obtain C c a1 a2 I where params: "\<C> = Cond m C c a1 a2 I"
using assms arity.simps by (meson equal_CondI)
have C_semialg: "is_semialgebraic m C"
using assms params is_cell_conditionE by smt
have C_closed: "C \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using C_semialg is_semialgebraic_closed by auto
have sa: "\<And> x. x \<in> poly_cfs_part m f C \<Longrightarrow> is_semialgebraic m x"
by(rule poly_cfs_part_semialg[of _ C f], intro C_semialg, rule assms, auto)
show ?thesis
unfolding decomp_by_cfs_def
apply(intro partition_to_cell_decomp[of \<C> m C c a1 a2 I] assms params)
unfolding params fibre_set.simps
apply(intro poly_cfs_part_partitions C_closed)
unfolding are_semialgebraic_def using sa apply blast
by(rule poly_cfs_part_finite)
qed
lemma decomp_by_cfs_params:
assumes "\<B> \<in> (decomp_by_cfs m f (Cond m C c a1 a2 I))"
shows "center \<B> = c"
"l_bound \<B> = a1"
"u_bound \<B> = a2"
"boundary_condition \<B> = I"
using assms unfolding decomp_by_cfs_def refine_fibres_def by auto
end
(**************************************************************************************************)
(**************************************************************************************************)
subsection\<open>Cell Decomposition Properties are Hereditary (up to Common Centers)\<close>
(**************************************************************************************************)
(**************************************************************************************************)
context common_decomp_proof_context
begin
lemma SA_poly_ubounded_mono:
assumes "SA_poly_ubounded p m f c A N"
assumes "B \<subseteq> A"
shows "SA_poly_ubounded p m f c B N"
using assms
proof -
have f1: "\<forall>R Ra rs. (\<not> R \<subseteq> Ra \<or> (rs::((nat \<Rightarrow> int) \<times> (nat \<Rightarrow> int)) set list) \<notin> R) \<or> rs \<in> Ra"
by blast
have f2: "A \<subseteq> carrier (Frac (padic_int p)\<^bsup>Suc m\<^esup>)"
by (meson SA_poly_ubounded.A_closed assms(1))
have "\<forall>i n f fa R ia. SA_poly_ubounded_axioms i n f fa R ia = (f \<in> carrier (UP (padic_fields.SA i n)) \<and> fa \<in> carrier (padic_fields.SA i n) \<and> R \<subseteq> carrier (Frac (padic_int i)\<^bsup>Suc n\<^esup>) \<and> (\<forall>na rs r. r # rs \<notin> R \<or> padic_fields.val i (UP_cring.to_fun (Frac (padic_int i)) (padic_fields.SA_poly_to_Qp_poly i n rs f) r) \<le> padic_fields.val i (UP_cring.to_fun (Frac (padic_int i)) (UP_cring.taylor_term (Frac (padic_int i)) (fa rs) (padic_fields.SA_poly_to_Qp_poly i n rs f) na) r) + eint ia))"
using SA_poly_ubounded_axioms_def by presburger
then have "SA_poly_ubounded_axioms p m f c B N"
using f2 f1 SA_poly_ubounded.P_closed SA_poly_ubounded.c_closed SA_poly_ubounded.ubounded assms(1) assms(2) by force
then show ?thesis
by (simp add: SA_poly_ubounded.intro padic_fields_axioms)
qed
end
(**************************************************************************************************)
(**************************************************************************************************)
subsection\<open>Reducing the Proof to the Sets $A_0$ and its Complement\<close>
(**************************************************************************************************)
(**************************************************************************************************)
context common_refinement_locale
begin
lemma \<C>_memE:
assumes "x \<in> condition_to_set \<C>"
shows "tl x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
"hd x \<in> carrier Q\<^sub>p"
"tl x \<in> A"
using assms unfolding \<C>_def condition_to_set.simps cell_def mem_Collect_eq
apply (meson cartesian_power_tail)
apply (metis Qp_pow_ConsE(2) assms cell_condition_set_memE(1) common_refinement_locale.\<C>_cond common_refinement_locale.\<C>_def common_refinement_locale_axioms)
using \<C>_cond \<C>_def assms condition_to_set_memE(1) by presburger
lemma c_closed: "c \<in> carrier (SA m)"
using \<C>_cond is_cell_conditionE(2) unfolding \<C>_def
by blast
lemma a1_closed: "a1 \<in> carrier (SA m)"
using \<C>_cond unfolding \<C>_def
by fastforce
lemma a2_closed: "a2 \<in> carrier (SA m)"
using \<C>_cond unfolding \<C>_def
by (meson is_cell_conditionE''(7))
lemma A_semialg: "is_semialgebraic m A"
using \<C>_cond unfolding \<C>_def
by simp
text\<open>For the sake of matching the text and brevity, we give a name to the taylor coefficients of f
expanded at c.\<close>
definition a where
"a = taylor_expansion (SA m) c f"
lemma a_closed:
"a \<in> carrier (UP (SA m))"
unfolding a_def
by (metis c_closed UPSA.taylor_def UP_cring.taylor_closed UP_cring_def f_closed padic_fields.SA_is_cring padic_fields_axioms)
lemma a_cfs_closed: "a i \<in> carrier (SA m)"
by (meson UPSA.UP_car_memE(1) local.a_closed)
lemma a_deg:
"deg (SA m) a = deg (SA m) f"
unfolding a_def
using c_closed UPSA.taylor_def UPSA.taylor_deg f_closed by force
lemma a_eval:
assumes "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
shows "a i x \<in> carrier Q\<^sub>p"
by(intro SA_car_closed[of _ m] a_cfs_closed assms)
text\<open>The set of indices for the nonzero taylor coefficients:\<close>
definition inds where
"inds = {i. a i \<in> Units (SA m) }"
lemma inds_bounded:
"i \<in> inds \<Longrightarrow> i \<le> deg (SA m) f"
unfolding inds_def mem_Collect_eq
by (metis SA_units_not_zero UPSA.deg_eqI a_deg le_cases local.a_closed)
lemma inds_bounded':
"i \<in> inds \<Longrightarrow> i \<le> Suc d"
by (meson f_deg inds_bounded le_trans)
lemma inds_finite:
"finite inds"
by (meson finite_nat_set_iff_bounded_le inds_bounded)
lemma inds_memE:
"i \<in> inds \<Longrightarrow> a i \<in> Units (SA m)"
using inds_def by blast
lemma inds_non_memE:
"i \<notin> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> a i x = \<zero>"
by (metis SA_zeroE a_def f_taylor_cfs inds_def mem_Collect_eq)
definition ind_pairs where
"ind_pairs = {(i, j) \<in> inds \<times> inds. i \<noteq> j}"
lemma finite_ind_pairs: "finite (ind_pairs)"
apply(rule finite_subset[of ind_pairs "inds \<times>inds"])
unfolding ind_pairs_def apply blast
using inds_finite by blast
lemma a_quotient_closed:
"\<And>i j. i \<in> inds \<Longrightarrow> j \<in> inds \<Longrightarrow> (a j) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> (a i) \<in> carrier (SA m)"
using inds_memE by blast
lemma a_quotient_unit: "\<And>i j. i \<in> inds \<Longrightarrow> j \<in> inds \<Longrightarrow> (a j) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> (a i) \<in> Units (SA m)"
using inds_memE by blast
lemma f_eval_formula: "\<And>x t. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> t \<in> carrier Q\<^sub>p \<Longrightarrow>
SA_poly_to_SA_fun m f (t#x) = (\<Oplus>i\<in>inds. (a i x)\<otimes>(t \<ominus> c x)[^]i)"
proof-
fix x t assume a: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" "t \<in> carrier Q\<^sub>p"
have 0: "SA_poly_to_SA_fun m f (t#x) = (\<Oplus>i\<in>{..deg (SA m) f}. (a i (tl (t#x)))\<otimes>(hd (t#x) \<ominus> c (tl (t#x)))[^]i)"
unfolding a_def
apply(rule SA_poly_to_SA_fun_taylor_expansion)
apply (simp add: f_closed)
apply (simp add: c_closed)
by (simp add: Qp_pow_ConsI a(1) a(2))
show "SA_poly_to_SA_fun m f (t # x) = (\<Oplus>i\<in>inds. a i x \<otimes> (t \<ominus> c x) [^] i)"
unfolding 0 list_tl list_hd apply(rule Qp.finsum_mono_neutral_cong)
apply(rule , intro Qp.ring_simprules Qp.nat_pow_closed a SA_car_closed[of _ m] a_cfs_closed c_closed, simp)
using inds_non_memE Qp.l_null Qp.minus_closed Qp.nat_pow_closed SA_car_closed a(1) a(2) c_closed apply presburger
by (simp add: inds_bounded subset_eq)
qed
lemma \<C>_mem_tl:
"\<And> x. x \<in> condition_to_set \<C> \<Longrightarrow> tl x \<in>A"
by (metis cell_memE(2) condition_to_set.simps common_refinement_locale.\<C>_def common_refinement_locale_axioms)
lemma \<C>_mem_hd:
"\<And> x. x \<in> condition_to_set \<C> \<Longrightarrow> hd x \<in> carrier Q\<^sub>p"
by (metis Qp_pow_ConsE(2) \<C>_def cell_condition_set_memE(1) common_refinement_locale.\<C>_cond common_refinement_locale_axioms)
lemma f_eval_formula': "\<And>x. x \<in> condition_to_set \<C> \<Longrightarrow> SA_poly_to_SA_fun m f x = (\<Oplus>i\<in>inds. (a i (tl x))\<otimes>((hd x) \<ominus> c (tl x))[^]i)"
proof-
fix x assume A: "x \<in> condition_to_set \<C>"
have 0: "x = hd x # tl x"
using A
by (metis \<C>_def cartesian_power_car_memE cell_memE(1) condition_to_set.simps list.exhaust_sel list.size(3) nat.simps(3))
have " SA_poly_to_SA_fun m f (hd x # tl x) = (\<Oplus>i\<in>inds. (a i (tl x))\<otimes>((hd x) \<ominus> c (tl x))[^]i)"
apply(rule f_eval_formula)
using A unfolding \<C>_def
apply (simp add: cartesian_power_tail cell_memE(1))
by (simp add: A \<C>_mem_hd)
thus " SA_poly_to_SA_fun m f x = (\<Oplus>i\<in>inds. a i (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] i)"
using 0 by auto
qed
end
locale one_val_point_decomp = common_refinement_locale +
fixes B\<^sub>0 Ls As Fs
assumes subset: "B\<^sub>0 \<subseteq> condition_to_set \<C>"
assumes Ls_finite: "finite Ls"
assumes nonempty: "Ls \<noteq> {}"
assumes semialg: "\<And>l. l \<in> Ls \<Longrightarrow> Fs l \<in> carrier (SA m)"
assumes semialg_fibres: "\<And> l. l \<in> Ls \<Longrightarrow> is_semialgebraic m (As l)"
assumes covers: "B\<^sub>0 = (\<Union>l \<in> Ls. condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval))"
context one_val_point_decomp
begin
lemma is_cell:
"l \<in> Ls \<Longrightarrow> is_cell_condition (Cond m (As l) c (Fs l) (Fs l) closed_interval)"
apply(rule is_cell_conditionI')
using semialg_fibres c_closed semialg is_convex_condition_def by auto
lemma one_val_point_decomposable:
"one_val_point_c_decomposable m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))
(\<Union>l \<in> Ls. condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval))"
apply(rule finite_union_one_val_point_c_decomposable)
using c_closed apply blast
using Ls_finite apply blast
using nonempty apply blast
using semialg apply blast
proof(rule subsetI)
fix x assume A: "x \<in> (\<lambda>l. condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval)) ` Ls"
then obtain l where l_def: "l \<in> Ls" "x = condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval)"
by blast
have 00: "Cond m (As l) c (Fs l) (Fs l) closed_interval \<in> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))"
unfolding c_cells_at_one_val_point_def mem_Collect_eq condition_to_set.simps
arity.simps center.simps u_bound.simps l_bound.simps boundary_condition.simps cell_def
using is_cell l_def by auto
thus "x \<in> condition_to_set ` c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))"
using l_def by blast
qed
definition first_decomp where
"first_decomp = (SOME S'. S' \<noteq> {} \<and>
is_cell_decomp m S' B\<^sub>0 \<and>
S' \<subseteq> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)))"
lemma first_decomp_prop:
"first_decomp \<noteq> {} \<and>
is_cell_decomp m first_decomp B\<^sub>0 \<and>
first_decomp \<subseteq> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))"
proof-
obtain S' where S'_def: "S' \<noteq> {} \<and>
is_cell_decomp m S' B\<^sub>0 \<and>
S' \<subseteq> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))"
using one_val_point_decomposable nonempty semialg
one_val_point_c_decomposable_nonempty[of c m "(Fs ` Ls)" "carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
B\<^sub>0]
c_closed unfolding covers by blast
show ?thesis unfolding first_decomp_def using S'_def SomeE by smt
qed
lemma bounds:
assumes "C \<in> first_decomp"
shows "u_bound C \<in> Fs ` Ls"
using first_decomp_prop assms
unfolding c_cells_at_one_val_point_def by auto
lemma decomp:
"\<exists>S'. (is_cell_decomp m S' B\<^sub>0 \<and>
(\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> (Fs ` Ls) \<and>
center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and>
boundary_condition B = closed_interval)))"
using first_decomp_prop unfolding c_cells_at_one_val_point_def
by (smt (verit, best) in_mono mem_Collect_eq)
end
context common_refinement_locale
begin
text\<open>This is just the set that denef also calls $A_0$. The proof proceeds by showing that both $A_0$ and its complement can be decomposed as desired.\<close>
definition A\<^sub>0 where
"A\<^sub>0 = {x \<in> condition_to_set \<C>. (\<forall> i \<in> inds. (\<forall> j \<in> inds. i < j \<longrightarrow> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))))}"
lemma A\<^sub>0_closed: "A\<^sub>0 \<subseteq> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
unfolding A\<^sub>0_def using condition_to_set.simps[of m A c a1 a2 I] unfolding \<C>_def cell_def
by blast
definition ordered_ind_pairs where
"ordered_ind_pairs = {(i,j) \<in> ind_pairs. i < j}"
lemma ordered_ind_pairs_unit:
assumes "i \<in> inds"
assumes "j \<in> inds"
assumes "i < j"
shows "\<exists>\<eta>\<in>Units (SA m). \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>).
(int j - int i) * ord (\<eta> x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) "
proof-
have 0: "(int j - int i) = int (j - i)"
using assms by auto
show ?thesis
unfolding 0
apply(rule denef_lemma_2_4_floor[of d])
apply (simp add: denef_II_axioms)
apply (simp add: Suc_leI assms(3))
using assms(2) diff_le_self inds_bounded' order.trans apply blast
using a_quotient_unit assms(1) assms(2) by presburger
qed
lemma ordered_ind_pairs_unit':
"\<And>ps. ps \<in> ordered_ind_pairs \<Longrightarrow>
\<exists>\<phi> \<in> Units (SA m).
(\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)
mod (int (snd ps) - int (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))"
proof-
fix ps assume A: "ps \<in> ordered_ind_pairs"
obtain i j where ij_def: "ps = (i,j)"
using A unfolding ordered_ind_pairs_def mem_Collect_eq by auto
have i_closed: "i \<in> inds"
using A unfolding ij_def mem_Collect_eq ordered_ind_pairs_def ind_pairs_def by auto
have j_closed: "j \<in> inds"
using A unfolding ij_def mem_Collect_eq ordered_ind_pairs_def ind_pairs_def by auto
have le: "i < j"
using A unfolding ij_def mem_Collect_eq ordered_ind_pairs_def ind_pairs_def by auto
show "\<exists>\<phi> \<in> Units (SA m).
(\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))"
unfolding ij_def fst_conv snd_conv
by(intro ordered_ind_pairs_unit i_closed j_closed le)
qed
lemma ordered_ind_pairs_memE:
assumes "ps \<in> ordered_ind_pairs"
shows "fst ps \<in> inds"
"snd ps \<in> inds"
"fst ps < snd ps"
using assms unfolding ordered_ind_pairs_def ind_pairs_def mem_Collect_eq by auto
lemma ordered_ind_pairs_finite:
"finite ordered_ind_pairs"
unfolding ordered_ind_pairs_def ind_pairs_def using inds_finite
by (metis (no_types, lifting) Collect_case_prod_mono case_prodD finite_ind_pairs ind_pairs_def
mem_Collect_eq predicate2I rev_finite_subset)
lemma semialg_ineq_set:
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "F = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (a i (tl x)) \<noteq>
val (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))}"
shows "is_semialgebraic (Suc m) F"
proof-
have i_in: "i \<in> inds"
using assms ordered_ind_pairs_def ind_pairs_def by force
have j_in: "j \<in>inds"
using assms ordered_ind_pairs_def ind_pairs_def by force
have i_leq_j: "i < j"
using assms unfolding ordered_ind_pairs_def by blast
obtain Ai where Ai_def: "Ai = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). a i (tl x))"
by blast
obtain Aj where Aj_def: "Aj = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). a j (tl x))"
by blast
obtain C where C_def: "C = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). c (tl x))"
by blast
obtain Hd where Hd_def: "Hd = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). hd x)"
by blast
have Hd_closed: "Hd \<in> carrier (SA (Suc m))"
using hd_is_semialg_function[of "Suc m"]
unfolding Hd_def using restrict_in_SA_car by blast
have Ai_closed: "Ai \<in> carrier (SA (Suc m))"
unfolding Ai_def apply(rule tl_comp_in_SA)
using a_cfs_closed by blast
have Aj_closed: "Aj \<in> carrier (SA (Suc m))"
unfolding Aj_def apply(rule tl_comp_in_SA)
using a_cfs_closed by blast
have C_closed: "C \<in> carrier (SA (Suc m))"
unfolding C_def apply(rule tl_comp_in_SA)
using c_closed by blast
obtain G where G_def: "G = Aj \<otimes>\<^bsub>SA (Suc m)\<^esub> (Hd \<ominus>\<^bsub>SA (Suc m)\<^esub> C)[^]\<^bsub>SA (Suc m)\<^esub>(j-i)"
by blast
have G_closed: "G \<in> carrier (SA (Suc m))"
unfolding G_def by(rule R.ring_simprules, rule Aj_closed, rule R.nat_pow_closed,
rule R.minus_closed, rule Hd_closed, rule C_closed)
have G_eval_1: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> G x = (Aj x \<otimes> (Hd x \<ominus> C x)[^](j- i))"
unfolding G_def using Aj_closed Hd_closed C_closed SA_minus_eval SA_mult SA_nat_pow by presburger
have G_eval_2: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> G x = (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))"
using G_eval_1 restrict_apply unfolding Aj_def Hd_def C_def by (smt restrict_apply)
have 2: "F = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (Ai x) \<noteq> val (G x)}"
apply(rule equalityI')
unfolding assms mem_Collect_eq apply(rule conjI, blast)
proof-
fix x assume A: "x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))"
have 00: "Ai x = a i (tl x)"
using A restrict_apply unfolding Ai_def by metis
show "val (Ai x) \<noteq> val (G x)"
unfolding 00 using G_eval_2[of x] A by smt
next
show "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (Ai x) \<noteq> val (G x) \<Longrightarrow>
x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))"
apply(rule conjI, blast)
proof-
fix x assume A: "x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (Ai x) \<noteq> val (G x)"
have 00: "Ai x = a i (tl x)"
unfolding Ai_def using A restrict_apply by smt
have 01: " G x = (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))"
apply(rule G_eval_2) using A by blast
show "val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))"
using A unfolding 00 01 by blast
qed
qed
have 3: "F = carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) - {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (Ai x) = val (G x)}"
unfolding 2 by blast
show "is_semialgebraic (Suc m) F"
unfolding 3 apply(rule diff_is_semialgebraic, rule carrier_is_semialgebraic)
by(rule semialg_val_eq_set_is_semialg, rule Ai_closed, rule G_closed)
qed
definition term_ineq_set where
"term_ineq_set ps = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (a (fst ps) (tl x)) \<noteq>
val (a (snd ps) (tl x) \<otimes> (hd x \<ominus> c (tl x))[^]((snd ps)- (fst ps)))}"
lemma term_ineq_set_semialg:
assumes "ps \<in> ordered_ind_pairs"
shows "is_semialgebraic (Suc m) (term_ineq_set ps)"
proof-
obtain i j where ij_def: "i \<in> inds \<and> j \<in> inds \<and> i< j" "ps = (i,j)"
using assms unfolding ordered_ind_pairs_def ind_pairs_def by blast
show ?thesis
using semialg_ineq_set[of i j "term_ineq_set ps"] assms ij_def
unfolding ij_def ordered_ind_pairs_def term_ineq_set_def fst_conv snd_conv
by auto
qed
lemma A\<^sub>0_as_intersection: "A\<^sub>0 = condition_to_set \<C> \<inter> \<Inter> (term_ineq_set ` ordered_ind_pairs)"
proof(rule equalityI')
show "\<And>x. x \<in> A\<^sub>0 \<Longrightarrow> x \<in> condition_to_set \<C> \<inter> \<Inter> (term_ineq_set ` ordered_ind_pairs)"
proof(rule IntI)
show " \<And>x. x \<in> A\<^sub>0 \<Longrightarrow> x \<in> condition_to_set \<C>"
unfolding A\<^sub>0_def by blast
show "\<And>x. x \<in> A\<^sub>0 \<Longrightarrow> x \<in> \<Inter> (term_ineq_set ` ordered_ind_pairs)"
proof fix x ps assume A: "x \<in> A\<^sub>0" "ps \<in> ordered_ind_pairs"
obtain i j where ij_def: "i \<in> inds \<and> j \<in> inds \<and> i< j \<and> ps = (i,j)"
using A(2) unfolding ordered_ind_pairs_def ind_pairs_def by blast
have ps_eq: "ps = (i,j)"
using ij_def by blast
have 0: "term_ineq_set ps = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))}"
unfolding ps_eq term_ineq_set_def by auto
show "x \<in> term_ineq_set ps"
using A\<^sub>0_closed ij_def unfolding A\<^sub>0_def unfolding 0 mem_Collect_eq
using A(1) A\<^sub>0_def by blast
qed
qed
show "\<And>x. x \<in> condition_to_set \<C> \<inter> \<Inter> (term_ineq_set ` ordered_ind_pairs) \<Longrightarrow> x \<in> A\<^sub>0"
proof- fix x assume A: "x \<in> condition_to_set \<C> \<inter>\<Inter> (term_ineq_set ` ordered_ind_pairs)"
show " x \<in> A\<^sub>0"
unfolding A\<^sub>0_def mem_Collect_eq
apply(rule conjI) using A apply blast
proof fix i assume i_inds: "i \<in> inds"
show "\<forall>j\<in>inds. i < j \<longrightarrow> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))"
proof fix j assume j_inds: "j \<in> inds"
show " i < j \<longrightarrow> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))"
proof assume le: "i < j"
have ordered_ind_pairs_el: "(i,j) \<in> ordered_ind_pairs"
unfolding ordered_ind_pairs_def ind_pairs_def using i_inds j_inds le by blast
show "val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))"
using A ordered_ind_pairs_el fst_conv snd_conv unfolding term_ineq_set_def by auto
qed
qed
qed
qed
qed
lemma A\<^sub>0_semialg: "is_semialgebraic (Suc m) A\<^sub>0"
unfolding A\<^sub>0_as_intersection
apply(cases "ordered_ind_pairs = {}")
apply auto[1] apply(intro condition_to_set_is_semialg[of \<C> m] \<C>_cond)
using \<C>_def arity.simps apply blast
apply(rule intersection_is_semialg, rule condition_to_set_is_semialg, rule \<C>_cond)
unfolding \<C>_def arity.simps apply blast
apply(rule finite_intersection_is_semialg, rule ordered_ind_pairs_finite, blast)
using term_ineq_set_semialg unfolding is_semialgebraic_def by blast
lemma A\<^sub>0_closures:
assumes "t#x \<in> A\<^sub>0"
assumes "i \<in> inds"
assumes "j \<in> inds"
shows "a i x \<in> Units Q\<^sub>p"
"a j x \<in> Units Q\<^sub>p"
"t \<ominus> c x \<in> carrier Q\<^sub>p"
"t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> t \<ominus> c x \<in> Units Q\<^sub>p"
proof-
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms A\<^sub>0_closed cartesian_power_head by force
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms A\<^sub>0_closed cartesian_power_tail by fastforce
show 0: "a i x \<in> Units Q\<^sub>p"
"a j x \<in> Units Q\<^sub>p"
"(t \<ominus> c x) \<in> carrier Q\<^sub>p"
unfolding Units_eq_nonzero nonzero_def mem_Collect_eq
using x_closed inds_memE SA_Units_memE' a_eval assms apply auto[1]
using x_closed inds_memE SA_Units_memE' a_eval assms apply auto[1]
using t_closed x_closed assms Qp.ring_simprules(4) c_closed SA_car_closed by auto[1]
show "t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> t \<ominus> c x \<in> Units Q\<^sub>p"
using 0 assms Qp.nonzero_memI Units_eq_nonzero by presburger
qed
lemma A\<^sub>0_memE:
assumes "t#x \<in> A\<^sub>0"
assumes "i \<in> inds"
assumes "j \<in> inds"
assumes "i < j"
assumes "t \<ominus> c x \<noteq> \<zero>"
shows "val (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> val (a j x \<otimes> (t \<ominus> c x)[^]j)"
"ord (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> ord (a j x \<otimes> (t \<ominus> c x)[^]j)"
proof-
have units: "a i x \<in> Units Q\<^sub>p"
"a j x \<in> Units Q\<^sub>p"
"(t \<ominus> c x) \<in> Units Q\<^sub>p"
using assms A\<^sub>0_closures by auto
have 0: "val (a i x) \<noteq> val (a j x \<otimes> (t \<ominus> c x) [^] (j - i))"
using assms unfolding A\<^sub>0_def mem_Collect_eq list_tl list_hd by auto
hence 1: "ord (a i x) \<noteq> ord (a j x \<otimes> (t \<ominus> c x) [^] (j - i))"
using units unfolding val_def
by (simp add: Qp.Units_pow_closed Qp.ring_in_Units_imp_not_zero)
have 2: "ord (a j x \<otimes> (t \<ominus> c x) [^] (j - i)) = ord (a j x) + (int j - int i)* ord (t \<ominus> c x)"
using units assms Qp.Units_pow_closed Units_eq_nonzero nonzero_nat_pow_ord ord_mult
by force
hence 3: "ord (a i x) + int i*ord(t \<ominus> c x) \<noteq> ord (a j x) + int j* ord (t \<ominus> c x)"
using 1 2 int_distrib(3) by force
thus "ord (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> ord (a j x \<otimes> (t \<ominus> c x) [^] j)"
using units Qp.Units_pow_closed Units_eq_nonzero nonzero_nat_pow_ord ord_mult by auto
thus "val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a j x \<otimes> (t \<ominus> c x) [^] j)"
unfolding val_def using units by auto
qed
lemma A\<^sub>0_memE':
assumes "t#x \<in> A\<^sub>0"
assumes "i \<in> inds"
assumes "j \<in> inds"
assumes "i < j"
assumes "t \<ominus> c x = \<zero>"
shows "i = 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> val (a j x \<otimes> (t \<ominus> c x)[^]j)"
"i > 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) = \<infinity>"
"val (a j x \<otimes> (t \<ominus> c x)[^]j) = \<infinity>"
proof-
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms A\<^sub>0_closed cartesian_power_head by force
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms A\<^sub>0_closed cartesian_power_tail by fastforce
have units: "a i x \<in> Units Q\<^sub>p"
"a j x \<in> Units Q\<^sub>p"
using assms A\<^sub>0_closures by auto
show 0: "val (a j x \<otimes> (t \<ominus> c x)[^]j) = \<infinity>"
using assms units unfolding assms(5) val_def
by (simp add: Qp.Units_closed Qp.nat_pow_zero)
show "i > 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) = \<infinity>"
using assms units unfolding assms(5) val_def
by (simp add: Qp.Units_closed Qp.nat_pow_zero)
show "i = 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> val (a j x \<otimes> (t \<ominus> c x)[^]j)"
unfolding 0 using units
by (metis (no_types, lifting) Group.nat_pow_0 Qp.Units_not_right_zero_divisor
Qp.nat_pow_closed Qp.zero_closed Qp.zero_not_one eint.distinct(2) val_def)
qed
text\<open>This lemma formalizes equation (3) from Denef's proof of this result.\<close>
lemma val_f_on_A\<^sub>0: "\<And>x. x \<in> A\<^sub>0 \<Longrightarrow> inds \<noteq> {} \<Longrightarrow>
val (SA_poly_to_SA_fun m f x) = (MIN i\<in>inds. (val ( (a i (tl x))\<otimes>((hd x) \<ominus> c (tl x))[^]i)))"
proof-
fix xs assume A0: "xs \<in> A\<^sub>0" "inds \<noteq> {} "
obtain t x where tx_def: "xs = t#x"
using A0 A\<^sub>0_closed Qp_pow_ConsE
by (metis (mono_tags, lifting) Suc_n_not_n cartesian_power_car_memE list.exhaust_sel list.sel(2) subset_iff)
have t_x_closed: "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using A0 A\<^sub>0_closed Qp_pow_ConsE unfolding tx_def apply force
using A0 A\<^sub>0_closed Qp_pow_ConsE unfolding tx_def by force
have diff_closed: "t \<ominus> c x \<in> carrier Q\<^sub>p"
using t_x_closed Qp.ring_simprules c_closed SA_car_closed by auto
have 100: "SA_poly_to_SA_fun m f (t#x) = (\<Oplus>i\<in>inds. (a i x)\<otimes>(t \<ominus> c x)[^]i)"
by(rule f_eval_formula, auto simp: t_x_closed)
have 101: "(\<lambda>i. a i x \<otimes> (t \<ominus> c x) [^] i) \<in> inds \<rightarrow> carrier Q\<^sub>p"
using diff_closed t_x_closed by (simp add: a_eval)
show "val (SA_poly_to_SA_fun m f xs) = (MIN i\<in>inds. val (a i (tl xs) \<otimes> (hd xs \<ominus> c (tl xs)) [^] i))"
unfolding tx_def list_tl list_hd
proof(cases "(t \<ominus> c x) = \<zero>")
case True
have T0: "\<And>i. i \<noteq> 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) = \<infinity>"
using Qp.nat_pow_zero Qp.r_null True a_eval local.val_zero t_x_closed(2) by presburger
then have T1: "\<And>i. i \<in> inds \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) \<ge> val (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat))"
by (metis basic_trans_rules(20) eint_ord_code(3) notin_closed)
have T2: "\<And>i. i \<noteq> 0 \<Longrightarrow> a i x \<otimes> (t \<ominus> c x) [^] i = \<zero>"
using Qp.nat_pow_zero Qp.r_null True a_eval t_x_closed(2) by presburger
show "val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))"
proof(cases "(0::nat) \<in> inds")
case True
have T00: " (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat)) \<in> carrier Q\<^sub>p"
by (simp add: a_eval t_x_closed(2))
have T01: "\<And>i. i \<in> inds \<Longrightarrow> a i x \<otimes> (t \<ominus> c x) [^] i \<in> carrier (Q\<^sub>p)"
using T00 T2
by (metis Qp.zero_closed)
have T02: "inds = insert 0 (inds - {0})"
using True by blast
have "(\<Oplus>i\<in>insert 0 (inds - {0}). a i x \<otimes> (t \<ominus> c x) [^] i) =
a(0::nat) x \<otimes> (t \<ominus> c x)[^] (0::nat) \<oplus> (\<Oplus>i\<in>inds-{(0::nat)}. a i x \<otimes> (t \<ominus> c x) [^] i)"
apply(rule Qp.finsum_insert[of "inds-{0}" "0::nat" "(\<lambda> i. a i x \<otimes> (t \<ominus> c x) [^] i)"])
using inds_finite apply blast
apply blast
using "101" apply blast
using T00 by blast
hence T03: "(SA_poly_to_SA_fun m f (t#x)) = (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat)) \<oplus>
(\<Oplus>i\<in>inds - {0}. a i x \<otimes> (t \<ominus> c x) [^] i)"
using T02 unfolding 100 by auto
have T04: "(MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i)) =
val (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat))"
apply(rule Min_eqI )
using inds_finite apply blast
using T1 apply blast
using True by blast
show "val (SA_poly_to_SA_fun m f (t#x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))"
using T2 Qp.finsum_zero unfolding T03 T04 tx_def
by (smt (verit, best) DiffD2 Qp.add.finprod_one_eqI Qp.r_zero T00 insertI1)
next
case False
then have F0: "\<And>i. i \<in> inds \<Longrightarrow> (a i x \<otimes> (t \<ominus> c x) [^] i) = \<zero>"
using T2 by metis
hence F1: "(SA_poly_to_SA_fun m f (t#x)) = \<zero>"
unfolding 100 using Qp.finsum_zero by (smt Qp.add.finprod_one_eqI Qp.r_zero singletonI)
have F2: " (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i)) = \<infinity>"
apply(rule Min_eqI)
using inds_finite apply blast
using F0 True local.val_zero apply force
using A0(2) F0 local.val_zero by fastforce
show "val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))"
unfolding F1 F2 val_zero by blast
qed
next
case False
show "val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))"
unfolding 100
proof(rule finsum_val_ultrametric_diff')
show "(\<lambda>i. a i x \<otimes> (t \<ominus> c x) [^] i) \<in> inds \<rightarrow> carrier Q\<^sub>p"
using 101 by blast
show "finite inds"
using inds_def inds_finite by blast
show " inds \<noteq> {}"
by (simp add: A0(2))
show "\<And>i b. i \<in> inds \<Longrightarrow>
b \<in> inds \<Longrightarrow> i \<noteq> b \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a b x \<otimes> (t \<ominus> c x) [^] b)"
proof- fix i b assume A: "i \<in> inds" "b \<in> inds" "i \<noteq> b"
show "val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a b x \<otimes> (t \<ominus> c x) [^] b)"
apply(cases "i < b")
using A A\<^sub>0_memE[of t x i b] A\<^sub>0_memE[of t x b i] A0 False unfolding tx_def by auto
qed
qed
qed
qed
text\<open>This lemma formalizes the statement from Denef's proof that ``The cells contained in $A \setminus A_0$ have the form \[
B = \{ (x,t) \mid x \in C \text{ and ord}(t - c(x)) = \text{ord}(\theta(x)) \}, ...
\]" \<close>
definition \<Theta> where \<Theta>_def: "\<Theta> = (\<lambda>ps. (SOME \<phi> .\<phi> \<in> Units (SA m) \<and>
(\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)
mod (int (snd ps) - int (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))))"
lemma \<Theta>_unit: "\<And>ps. ps \<in> ordered_ind_pairs \<Longrightarrow> \<Theta> ps \<in> Units (SA m)"
"\<And>ps. ps \<in> ordered_ind_pairs \<Longrightarrow>
(\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<Theta> ps x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)
mod (int (snd ps) - int (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))"
proof-
fix ps assume A: "ps \<in> ordered_ind_pairs"
then obtain i j where ij_def: "ps = (i,j)"
using bezw.cases by blast
have F10010: "(i,j) \<in> ordered_ind_pairs"
using A unfolding ordered_ind_pairs_def mem_Collect_eq ij_def
by metis
obtain \<phi> where \<phi>_def: "\<phi>\<in>Units (SA m) \<and> (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)
mod (int (snd ps) - int (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))"
using F10010 F_def ordered_ind_pairs_unit'[of "(i,j)"] ij_def by blast
have a: "\<Theta> ps \<in> Units (SA m) \<and> (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<Theta> ps x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)
mod (int (snd ps) - int (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))"
apply(rule SomeE[of "\<Theta> ps" _ \<phi> ])
using F10010 \<phi>_def SomeE unfolding \<Theta>_def ij_def by auto
show "\<Theta> ps \<in> Units (SA m)"
"(\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<Theta> ps x)
+ ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)
mod (int (snd ps) - int (fst ps))
= ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))"
using a unfolding ij_def by auto
qed
lemma \<Theta>_ord: "\<And>i j x. (i, j) \<in> ordered_ind_pairs \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow>
(int j - int i)*ord ((\<Theta> (i,j)) x)
+ ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - i)
= ord (((a i) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a j ) x)"
proof-
fix i j x assume F10010: "(i, j) \<in> ordered_ind_pairs" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
have "\<exists>\<eta>\<in>Units (SA m). \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>).
(int j - int i) * ord (\<eta> x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) "
apply(rule ordered_ind_pairs_unit)
using ordered_ind_pairs_unit[of i j ] F10010(1)
unfolding ordered_ind_pairs_def ind_pairs_def mem_Collect_eq by auto
then obtain \<phi> where \<phi>_def: "\<phi>\<in>Units (SA m) \<and>
( \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>).
(int j - int i) * ord (\<phi> x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x))"
by blast
have a:"(\<Theta> (i,j))\<in>Units (SA m) \<and>
( \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>).
(int j - int i) * ord ((\<Theta> (i,j)) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x))"
apply(rule SomeE[of "\<Theta> (i,j)" _ \<phi>])
using F10010 unfolding \<Theta>_def fst_conv snd_conv apply blast
using \<phi>_def by auto
have 000: "snd (case (i, j) of (x, y) \<Rightarrow> (x, int y)) = j"
by auto
have 001: "fst (case (i, j) of (x, xa) \<Rightarrow> (int x, xa)) = i"
by auto
have 002: "(\<Theta> (i,j)) \<in>Units (SA m) \<and>
(\<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>).
(int j - int i) * ord ((\<Theta> (i,j)) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x))"
using a unfolding 000 001 fst_conv snd_conv by auto
show "(int j - int i) * ord (\<Theta>(i,j) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x)"
using 002 F10010 by auto
qed
definition A\<^sub>0_comp_fibre_cover where
"A\<^sub>0_comp_fibre_cover ps =
{x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps)* ord ((\<Theta> ps) x) =
ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }
\<inter> {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val (\<Theta> ps x) \<in> I (val (a1 x)) (val (a2 x))}
\<inter> A "
lemma A\<^sub>0_comp_fibre_cover_semialg:
assumes "ps \<in> ordered_ind_pairs"
shows "is_semialgebraic m (A\<^sub>0_comp_fibre_cover ps)"
proof-
obtain i j where ij_def: "ps = (i,j)"
using assms unfolding ordered_ind_pairs_def ind_pairs_def by auto
obtain G where G_def: "G = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>).
int (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }"
by blast
have 0: "is_semialgebraic m G"
proof-
have 0: "(a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) \<in> carrier (SA m)"
using inds_memE[of "fst ps"] inds_memE[of "snd ps"] assms ordered_ind_pairs_memE
by auto
have 1: "snd ps - fst ps > 0"
using assms ordered_ind_pairs_memE[of ps] by linarith
have 2: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> ((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x \<noteq>\<zero>"
by(intro SA_Units_memE'[of _ m] a_quotient_unit ordered_ind_pairs_memE assms, auto )
have 3: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x \<in> carrier Q\<^sub>p \<and> (a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x \<noteq> \<zero>"
using 2 0 SA_car_memE by blast
have 4: " {x \<in> carrier (Q\<^sub>p\<^bsup>m + 0\<^esup>).
(a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x \<in> nonzero Q\<^sub>p \<and> ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod int (snd ps - fst ps) = 0} =
{x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) mod (snd ps - fst ps) = 0}"
apply(rule equalityI')
unfolding mem_Collect_eq nonzero_def apply (metis add_cancel_left_right)
using 3 add_cancel_left_right[of m 0] by metis
have 5: "is_semialgebraic m {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }"
proof-
have 50: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (\<Theta> ps x) \<in> nonzero Q\<^sub>p"
using \<Theta>_unit assms SA_Units_memE' SA_Units_closed SA_car_memE unfolding nonzero_def
by (metis (mono_tags, lifting) function_ring_car_closed mem_Collect_eq)
have 51: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (int (snd ps - fst ps))* ord ((\<Theta> ps) x) = ord ((\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x)"
proof- fix x assume AAA: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
have 510: "(\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x = (\<Theta> ps x [^] (snd ps - fst ps))"
using \<Theta>_unit AAA SA_Units_memE SA_Units_closed by (meson SA_nat_pow)
show "int (snd ps - fst ps) * ord (\<Theta> ps x) = ord ((\<Theta> ps [^]\<^bsub>SA m\<^esub> (snd ps - fst ps)) x)"
using 50 unfolding 510 using AAA nonzero_nat_pow_ord by presburger
qed
have 52: "{x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }
= {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>).ord ((\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }"
apply(rule equalityI') unfolding mem_Collect_eq using 51 50
apply (metis SA_nat_pow mult_of_nat_commute)
using 51 50
by presburger
have 53: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x \<in> nonzero Q\<^sub>p"
using 50 \<Theta>_unit Qp_nat_pow_nonzero SA_nat_pow by presburger
have 54: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) \<in> nonzero Q\<^sub>p"
using inds_memE by (meson "3" not_nonzero_Qp)
have 55: "{x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }
= {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val ((\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x) = val (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }"
unfolding 52 apply(rule equalityI')
unfolding mem_Collect_eq using inds_memE 50
apply (metis "3" Qp.nonzero_memE(1) Qp.nonzero_memE(2) Qp_nat_pow_nonzero SA_nat_pow val_ord')
using 53 54 unfolding val_def nonzero_def mem_Collect_eq
by (meson eint.simps(1))
have 56: "(\<Theta> ps [^]\<^bsub>SA m\<^esub> (snd ps - fst ps)) \<in> carrier (SA m)"
using assms \<Theta>_unit by blast
have 57: "(a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) \<in> carrier (SA m)"
using inds_memE ordered_ind_pairs_memE[of ps] assms by auto
show "is_semialgebraic m {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps) * ord (\<Theta> ps x) = ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)}"
unfolding 55 using 56 57 semialg_val_eq_set_is_semialg by blast
qed
thus ?thesis unfolding G_def by auto
qed
obtain G' where G'_def: "G' = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val (\<Theta> ps x) \<in> I (val (a1 x)) (val (a2 x))} \<inter> G"
by blast
have 1: "is_semialgebraic m {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val (\<Theta> ps x) \<in> I (val (a1 x)) (val (a2 x))}"
apply(rule cell_cond_semialg)
using \<C>_def \<C>_cond is_cell_conditionE(5) apply blast
using \<Theta>_unit(1) assms SA_Units_closed apply auto[1]
using \<C>_cond unfolding \<C>_def by auto
show ?thesis
unfolding A\<^sub>0_comp_fibre_cover_def
apply(intro intersection_is_semialg)
using 1 A_semialg 0 unfolding G_def by auto
qed
lemma A\<^sub>0_comp_fibre_cover_covers:
"condition_to_set \<C> - A\<^sub>0 =
(\<Union> ps \<in> ordered_ind_pairs. condition_to_set
(Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps)(\<Theta> ps) closed_interval))"
proof(rule equalityI')
fix xs assume A: "xs \<in> condition_to_set \<C> - A\<^sub>0"
then obtain ps where ps_def: "ps \<in> ordered_ind_pairs" "xs \<notin> term_ineq_set ps"
unfolding Diff_iff Int_iff A\<^sub>0_as_intersection Inter_iff by auto
obtain i j where ij_def: "i \<in> inds" "j \<in> inds" "i < j" "ps = (i,j)"
using ps_def unfolding ordered_ind_pairs_def ind_pairs_def by auto
have xs_closed: "xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
using A unfolding condition_to_set.simps \<C>_def cell_def by auto
obtain t x where tx_def: "xs = t#x" "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using xs_closed
by (metis Qp_pow_ConsE(1) Qp_pow_ConsE(2) cartesian_power_car_memE
list.exhaust_sel list.size(3) nat.distinct(2))
have 0: "val (a i x) = val (a j x \<otimes> (t \<ominus> c x) [^] (j - i))"
using xs_closed ps_def(2)
unfolding ij_def term_ineq_set_def fst_conv snd_conv mem_Collect_eq tx_def list_tl list_hd
by auto
have 1: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p"
using tx_def ij_def SA_Units_nonzero inds_memE
unfolding Units_eq_nonzero by auto
have 2: "t \<ominus> c x \<in> Units Q\<^sub>p"
using tx_def 0 1 val_zero Qp.Units_closed Qp.nat_pow_zero Qp.nonzero_memE(2)
Units_eq_nonzero ij_def(3) c_closed SA_car_closed
unfolding Units_eq_nonzero nonzero_def mem_Collect_eq
by (metis (no_types, opaque_lifting) Qp.cring_simprules(27) Qp.cring_simprules(4)
Qp.pow_zero eint.simps(3) val_def zero_less_diff)
have closures: "a i x \<in> carrier Q\<^sub>p" "a j x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p"
"(t \<ominus> c x) [^](j-i) \<in> carrier Q\<^sub>p" "\<Theta> ps x \<in> Units Q\<^sub>p"
proof-
show 0: "a i x \<in> carrier Q\<^sub>p" "a j x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p"
"(t \<ominus> c x) [^](j-i) \<in> carrier Q\<^sub>p"
using 1 using 2 by auto
show "\<Theta> ps x \<in> Units Q\<^sub>p"
by (metis SA_Units_nonzero Units_eq_nonzero \<Theta>_unit(1) ps_def(1) tx_def(3))
qed
have 3: "ord (a i x) = ord (a j x \<otimes> (t \<ominus> c x) [^] (j - i))"
using 0 val_ord 1 2 Units_eq_nonzero
by (simp add: Qp.Units_closed equal_val_imp_equal_ord(1))
have 4: "ord (a i x) = ord (a j x) + (j - i)*ord (t \<ominus> c x)"
by (metis 1 2 3 Qp.Units_pow_closed Units_nonzero_Qp int_pow_int int_pow_ord ord_mult)
have 5: "ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a j) x) = (j - i)*ord (t \<ominus> c x)"
using 4 tx_def 1 SA_div_eval Units_eq_nonzero a_cfs_closed ij_def(2) inds_memE ord_fract
by force
have 6: "ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = 0"
using 5 ij_def by (simp add: of_nat_diff)
have 7: " (int j - int i) * ord (\<Theta> (i, j) x) +
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) =
ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x)"
using \<Theta>_unit[of "(i,j)"] tx_def ij_def(4) ps_def(1) by auto
have 8: "ord (t \<ominus> c x) = ord (\<Theta> ps x)"
using 0 7 unfolding 6 unfolding 5 using ij_def
by (simp add: int_ops(6))
hence 9: "val (\<Theta> ps x) = val (t \<ominus> c x)"
using 7 closures 2 Units_eq_nonzero by force
have 10: "x \<in> A\<^sub>0_comp_fibre_cover ps"
unfolding A\<^sub>0_comp_fibre_cover_def mem_Collect_eq Int_iff 9
unfolding fst_conv snd_conv ij_def
using tx_def 7 ij_def A unfolding 6 \<C>_def condition_to_set.simps cell_def mem_Collect_eq
tx_def Diff_iff
by (simp add: "5" "8")
have "xs \<in> condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval)"
unfolding condition_to_set.simps
by(intro cell_memI xs_closed, unfold tx_def list_tl list_hd closed_interval_def, intro 10,
auto simp: 9)
thus "xs \<in> (\<Union>ps\<in>ordered_ind_pairs.
condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval))"
using ps_def by auto
next
fix xs assume A: "xs \<in> (\<Union>ps\<in>ordered_ind_pairs.
condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval))"
then obtain ps where ps_def: "ps \<in> ordered_ind_pairs"
"xs \<in> condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval)"
by auto
obtain i j where ij_def: "i \<in> inds" "j \<in> inds" "i < j" "ps = (i,j)"
using ps_def unfolding ordered_ind_pairs_def ind_pairs_def by auto
have xs_closed: "xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
using ps_def unfolding condition_to_set.simps \<C>_def cell_def by auto
obtain t x where tx_def: "xs = t#x" "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using xs_closed
by (metis Qp_pow_ConsE(1) Qp_pow_ConsE(2) cartesian_power_car_memE
list.exhaust_sel list.size(3) nat.distinct(2))
have props: "val (t \<ominus> c x) = val (\<Theta> ps x)" "x \<in> A\<^sub>0_comp_fibre_cover ps"
using ps_def
unfolding tx_def condition_to_set.simps cell_def mem_Collect_eq list_tl
list_hd closed_interval_def by auto
have closures: "\<Theta> ps x \<in> Units Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<in> Units Q\<^sub>p"
"a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p"
proof-
show 0: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p"
using ij_def
apply (metis SA_Units_nonzero Units_eq_nonzero inds_memE tx_def(3))
using ij_def
by (metis SA_Units_nonzero Units_eq_nonzero inds_memE tx_def(3))
show 1: "\<Theta> ps x \<in> Units Q\<^sub>p"
by (metis SA_Units_nonzero Units_eq_nonzero \<Theta>_unit(1) ps_def(1) tx_def(3))
show 2: "t \<ominus> c x \<in> carrier Q\<^sub>p"
using tx_def c_closed Qp.cring_simprules(4) SA_car_closed by presburger
show 3: "t \<ominus> c x \<in> Units Q\<^sub>p"
using 1 2 props val_zero
by (metis Units_eq_nonzero equal_val_imp_equal_ord(2))
qed
have 1: "int (j - i) * ord (\<Theta> (i, j) x) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x)"
using props
unfolding A\<^sub>0_comp_fibre_cover_def mem_Collect_eq ij_def snd_conv fst_conv Int_iff by auto
hence 2: "int (j - i) * ord (t \<ominus> c x) = ord (a i x) - ord (a j x)"
using props 1 closures
by (metis SA_div_eval Units_eq_nonzero a_cfs_closed equal_val_imp_equal_ord(1) ij_def(2)
ij_def(4) inds_memE ord_fract tx_def(3))
hence 3: "val (a i x) = val (a j x \<otimes> (t \<ominus> c x) [^] (j - i))"
using ij_def closures val_ord Qp.Units_m_closed Qp.nat_pow_nonzero Units_eq_nonzero
nonzero_nat_pow_ord ord_mult by auto
have 4: "xs \<notin> A\<^sub>0"
using props ij_def 3
unfolding A\<^sub>0_comp_fibre_cover_def mem_Collect_eq ij_def snd_conv fst_conv Int_iff A\<^sub>0_def
by (metis list.sel(1) list.sel(3) tx_def(1))
have 5: "xs \<in> condition_to_set \<C>"
unfolding \<C>_def condition_to_set.simps
apply(intro cell_memI xs_closed, unfold tx_def list_tl list_hd)
using props unfolding A\<^sub>0_comp_fibre_cover_def Int_iff mem_Collect_eq props by auto
show "xs \<in> condition_to_set \<C> - A\<^sub>0"
using 4 5 by auto
qed
lemma A\<^sub>0_comp_decomp:
"\<exists>S'. (is_cell_decomp m S' (condition_to_set \<C> - A\<^sub>0) \<and>
(\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and>
center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and>
boundary_condition B = closed_interval)))"
proof(cases "ordered_ind_pairs = {}")
case True
hence 0: "(condition_to_set \<C> - A\<^sub>0) = {}"
unfolding A\<^sub>0_as_intersection True by auto
have "is_cell_decomp m {} (condition_to_set \<C> - A\<^sub>0)"
unfolding 0 is_partition_def disjoint_def is_cell_decomp_def by auto
thus ?thesis by blast
next
case False
interpret one_val_point_decomp _ _ _ _ _ _ _ _ _ _ _ _ _ "condition_to_set \<C> - A\<^sub>0"
ordered_ind_pairs A\<^sub>0_comp_fibre_cover
\<Theta>
apply(intro one_val_point_decomp.intro one_val_point_decomp_axioms.intro
common_refinement_locale_axioms A\<^sub>0_comp_fibre_cover_semialg ordered_ind_pairs_finite
False)
apply auto[1]
using \<Theta>_unit unfolding A\<^sub>0_comp_fibre_cover_covers Q\<^sub>p_def Z\<^sub>p_def \<iota>_def
by auto
show ?thesis
using decomp \<Theta>_unit(1)
by (metis (no_types, opaque_lifting) image_iff)
qed
definition A\<^sub>0_comp_decomp where
"A\<^sub>0_comp_decomp = (SOME S'. (is_cell_decomp m S' (condition_to_set \<C> - A\<^sub>0) \<and>
(\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and>
l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval))))"
lemma A\<^sub>0_comp_decompE:
"(is_cell_decomp m A\<^sub>0_comp_decomp (condition_to_set \<C> - A\<^sub>0) \<and>
(\<forall>B \<in> A\<^sub>0_comp_decomp.
(\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and>
center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and>
boundary_condition B = closed_interval)))"
proof-
obtain S' where S'_def: "(is_cell_decomp m S' (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))"
using A\<^sub>0_comp_decomp by blast
show ?thesis apply(rule SomeE[of "A\<^sub>0_comp_decomp" _ S'])
unfolding A\<^sub>0_comp_decomp_def apply blast
by(rule S'_def)
qed
text\<open>That $A_0$ can be decomposed as desired is relatively easy to show:\<close>
lemma A\<^sub>0_decomp:
assumes "inds \<noteq> {}"
shows "\<exists>S. is_cell_decomp m S A\<^sub>0 \<and>
(\<forall>B\<in>S. center B = c \<and>
(\<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N))"
proof-
have 0: "\<exists>S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall>B\<in>S. center B = c)"
proof-
have 0: "\<exists>S. is_cell_decomp m S (condition_to_set \<C> - (condition_to_set \<C> - A\<^sub>0)) \<and> (\<forall>A\<in>S. center A = c)"
apply(rule cell_decomp_same_center[of \<C> m A c a1 a2 I "condition_to_set \<C> - A\<^sub>0"])
apply (simp add: \<C>_cond)
using \<C>_def apply blast
apply blast
using A\<^sub>0_comp_decompE
by auto
have 1: "(condition_to_set \<C> - (condition_to_set \<C> - A\<^sub>0)) = A\<^sub>0"
using A\<^sub>0_def by auto
show ?thesis
using "0" "1" by auto
qed
then obtain S where S_def: "is_cell_decomp m S A\<^sub>0 \<and> (\<forall>B\<in>S. center B = c)"
by blast
have "(\<forall>B\<in>S. \<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N)"
proof
fix B
assume A: "B \<in> S"
have center_B: "center B = c"
using A S_def by blast
show "\<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N"
apply(rule exI, rule SA_poly_uboundedI[of _ _ _ _ 0])
using f_closed apply blast
unfolding center_B
apply (simp add: c_closed)
using A S_def
apply (metis is_cellI is_cell_decompE(3) is_cell_decompE(4) is_cell_subset)
proof-
fix x t i assume B': " t # x \<in> condition_to_set B"
then have P0: "t # x \<in> A\<^sub>0"
using A S_def is_cell_decompE
by (meson in_mono is_cell_decomp_subset)
hence P1: " val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))"
using val_f_on_A\<^sub>0[of "t#x"] assms P0
unfolding list_tl list_hd by auto
have t_closed: "t \<in> carrier Q\<^sub>p"
using P0 A\<^sub>0_def cartesian_power_head
by (metis (no_types, lifting) B' cell_memE(1) condition_to_set.simps list_hd padic_fields.condition_decomp' padic_fields_axioms)
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using P0 A\<^sub>0_def cartesian_power_tail[of "t#x" Q\<^sub>p m]
by (metis (no_types, lifting) A\<^sub>0_closed list_tl subsetD)
have x_closed': "x \<in> A"
using P0 A\<^sub>0_def \<C>_memE(3) by fastforce
have P2: "val (SA_poly_to_Qp_poly m x f \<bullet> t) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))"
using P1 SA_poly_to_SA_fun_formula[of f m x t] A x_closed t_closed
using f_closed by force
have P3: "i \<in> inds \<Longrightarrow> val (SA_poly_to_Qp_poly m x f \<bullet> t) \<le> (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i))"
apply(rule MinE''[of inds])
using inds_finite apply blast
apply blast
using P2 apply blast
by blast
have P4: "UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t =
taylor_expansion Q\<^sub>p (c x) (SA_poly_to_Qp_poly m x f) i \<otimes> (t \<ominus> c x) [^] i"
using A UPQ.to_fun_taylor_term[of "SA_poly_to_Qp_poly m x f" t "c x" i]
SA_poly_to_Qp_poly_closed[of x m f] t_closed c_closed x_closed
SA_car_memE(3)[of c m]
unfolding UPQ.taylor_def
using f_closed by blast
have P5: "taylor_expansion (SA m) c f i x = taylor_expansion Q\<^sub>p (c x) (SA_poly_to_Qp_poly m x f) i"
using SA_poly_to_Qp_poly_taylor_cfs[of f m x c i] c_closed x_closed f_closed by blast
have P6: "i \<in> inds \<Longrightarrow> (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t) = (a i x)\<otimes>(t \<ominus> c x)[^]i"
using a_eval a_def x_closed' unfolding P4 P5 a_def
by auto
have P7: "i \<in> inds \<Longrightarrow> val (SA_poly_to_Qp_poly m x f \<bullet> t) \<le> val (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t)"
using P6 unfolding UPQ.taylor_term_def
using P3 by presburger
have P8: "i \<notin> inds \<Longrightarrow> UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t = \<zero>"
using x_closed' inds_memE c_closed x_closed t_closed SA_car_memE(3)[of c m]
unfolding P4 P5 a_def
by (metis P5 Qp.cring_simprules(26) Qp.cring_simprules(4) Qp.nat_pow_closed SA_car_closed a_def inds_non_memE)
have P9: "i \<notin> inds \<Longrightarrow> val (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t) = \<infinity>"
using val_zero unfolding P8 by blast
show "val (SA_poly_to_Qp_poly m x f \<bullet> t)
\<le> val (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t) + eint 0"
apply(cases "i \<in> inds")
using P7 apply (metis add.right_neutral eint_defs(1))
unfolding P9
by (metis add.right_neutral eint_defs(1) eint_ord_code(3))
qed
qed
thus ?thesis using S_def by auto
qed
end
locale A\<^sub>0_refinement = common_refinement_locale +
fixes B b b1 b2 J
assumes B_cell: "is_cell_condition B"
assumes B_eq: "B = Cond m b c b1 b2 J"
assumes B_subset: "condition_to_set B \<subseteq> A\<^sub>0"
context A\<^sub>0_refinement
begin
text\<open>We wish to decompose the set $A_0$ into finer cells so that on each cell, there is always
a fixed $i_0$ so that $val (a_{i_0}(x)(t- c(x))^{i_0})$ is minimal. This was easy to do on
the complement of $A_0$ because this value did not depend on $t$, but here this will take some
extra work. Here we assume we already have obtained a cell in a decomposition of $A_0$, and
will further decompose this cell until we have our desired property.\<close>
definition refinement_functions where
"refinement_functions = insert \<zero>\<^bsub>SA m\<^esub> (\<Theta> ` ordered_ind_pairs)"
definition refined_decomp where
"refined_decomp = (SOME S. is_cell_decomp m S (condition_to_set (Cond m b c b1 b2 J)) \<and>
(\<forall>C\<in>S. center C = c \<and>
(\<forall>f\<in>refinement_functions.
\<forall>g\<in>refinement_functions.
\<forall>I. is_convex_condition I \<longrightarrow>
condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or>
condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {})))"
lemma refined_decomp_prop:
"is_cell_decomp m refined_decomp (condition_to_set (Cond m b c b1 b2 J)) \<and>
(\<forall>C\<in> refined_decomp. center C = c \<and>
(\<forall>f\<in>refinement_functions.
\<forall>g\<in>refinement_functions.
\<forall>I. is_convex_condition I \<longrightarrow>
condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or>
condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {}))"
proof-
have 0: "finite refinement_functions"
proof-
have 0: "refinement_functions \<subseteq> insert \<zero>\<^bsub>SA m\<^esub> (\<Theta> ` ind_pairs)"
unfolding refinement_functions_def ordered_ind_pairs_def
by auto
have "finite (insert \<zero>\<^bsub>SA m\<^esub> (\<Theta> ` ind_pairs))"
using finite_ind_pairs by auto
thus ?thesis using 0 finite_subset by blast
qed
have 1: "refinement_functions \<subseteq> carrier (SA m)"
unfolding refinement_functions_def
using \<Theta>_unit by blast
have 0: " \<exists>S. is_cell_decomp m S (condition_to_set (Cond m b c b1 b2 J)) \<and>
(\<forall>C\<in>S. center C = c \<and>
(\<forall>f\<in>refinement_functions.
\<forall>g\<in>refinement_functions.
\<forall>I. is_convex_condition I \<longrightarrow>
condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or>
condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {}))"
using 0 1 semialg_boundary_cell_decomp[of refinement_functions m B b c b1 b2 J]
refinement_functions_def B_eq B_cell by auto
then obtain S where S_def: "is_cell_decomp m S (condition_to_set (Cond m b c b1 b2 J)) \<and>
(\<forall>C\<in>S. center C = c \<and>
(\<forall>f\<in>refinement_functions.
\<forall>g\<in>refinement_functions.
\<forall>I. is_convex_condition I \<longrightarrow>
condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or>
condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {}))"
by blast
thus ?thesis using refined_decomp_def SomeE[of refined_decomp _ S]
by blast
qed
lemma refined_decomp_subset:
assumes "\<B> \<in> refined_decomp"
shows "condition_to_set \<B> \<subseteq> condition_to_set B"
using assms is_cell_decomp_subset[of m refined_decomp "condition_to_set B" \<B>] refined_decomp_prop
unfolding B_eq by auto
lemma refined_decomp_closure:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
shows "t \<in> carrier Q\<^sub>p"
"x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
"t \<ominus> c x \<in> carrier Q\<^sub>p"
proof-
show "t \<in> carrier Q\<^sub>p"
"x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell refined_decomp_subset Qp_pow_ConsE[of "t#x" m]
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd
by auto
thus "t \<ominus> c x \<in> carrier Q\<^sub>p"
using Qp.cring_simprules(4) SA_car_closed c_closed by presburger
qed
lemma refined_decomp_static_order1:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
shows "\<And>s y. s#y \<in> condition_to_set \<B>
\<Longrightarrow> val (t \<ominus> c x) \<le> val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) \<le> val (\<Theta>(i,j) y)"
"\<And>s y. s#y \<in> condition_to_set \<B>
\<Longrightarrow> val (t \<ominus> c x) < val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) < val (\<Theta>(i,j) y)"
proof-
fix s y assume a: "s#y \<in> condition_to_set \<B>"
have sy_in: "s#y \<in> condition_to_set B"
using a assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have s_closed: "s \<in> carrier Q\<^sub>p"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(2) list.sel(1))
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(1) list.sel(3))
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have i_le_j: "i < j"
using assms ordered_ind_pairs_def by auto
have in_inds: "i \<in> inds" "j \<in> inds"
using assms ordered_ind_pairs_def ind_pairs_def by auto
have F0: "t#x \<in> A\<^sub>0"
using tx_in assms B_subset by blast
show "val (\<Theta>(i,j) x) \<ge> val (t \<ominus> c x) \<Longrightarrow> val (\<Theta>(i,j) y) \<ge> val (s \<ominus> c y)"
proof-
assume A: "val (\<Theta>(i,j) x) \<ge> val (t \<ominus> c x)"
then have 0: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_ray)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_ray_def
using B_cell B_eq Qp_pow_ConsI padic_fields.condition_to_set_memE'(1) padic_fields_axioms
t_closed tx_in x_closed by auto
hence 1: "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_ray)"
using assms refined_decomp_prop unfolding refinement_functions_def is_convex_condition_def
by blast
hence "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_ray)"
using a by auto
thus "val (\<Theta>(i,j) y) \<ge> val (s \<ominus> c y)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_ray_def
by auto
qed
show "val (\<Theta>(i,j) x) > val (t \<ominus> c x) \<Longrightarrow> val (\<Theta>(i,j) y) > val (s \<ominus> c y)"
proof-
assume A: "val (\<Theta>(i,j) x) > val (t \<ominus> c x)"
then have 0: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) open_ray)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd
using A\<^sub>0_closed B_cell B_eq F0 condition_to_set_memE'(1) open_ray_memI tx_in by auto
hence 1: "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) open_ray)"
using assms refined_decomp_prop unfolding refinement_functions_def is_convex_condition_def
by blast
hence "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) open_ray)"
using a by auto
thus "val (\<Theta>(i,j) y) > val (s \<ominus> c y)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd open_ray_def
by auto
qed
qed
lemma refined_decomp_static_order2:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
shows "\<And>s y. s#y \<in> condition_to_set \<B>
\<Longrightarrow> val (t \<ominus> c x) \<ge> val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) \<ge> val (\<Theta>(i,j) y)"
"\<And>s y. s#y \<in> condition_to_set \<B>
\<Longrightarrow> val (t \<ominus> c x) > val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) > val (\<Theta>(i,j) y)"
proof-
fix s y assume A: "s#y \<in> condition_to_set \<B> "
have 0: "val (s \<ominus> c y) < val (\<Theta> (i, j) y) \<Longrightarrow> val (t \<ominus> c x) < val (\<Theta> (i, j) x)"
using A assms refined_decomp_static_order1(2)[of \<B> s y i j t x] by auto
have 1: "val (s \<ominus> c y) \<le> val (\<Theta> (i, j) y) \<Longrightarrow> val (t \<ominus> c x) \<le> val (\<Theta> (i, j) x)"
using A assms refined_decomp_static_order1(1)[of \<B> s y i j t x] by auto
have 2: "\<And> x y::eint. x < y \<longleftrightarrow> \<not> y \<le> x"
by auto
show "val (t \<ominus> c x) \<ge> val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) \<ge> val (\<Theta>(i,j) y)"
using 0 unfolding 2 by auto
show "val (t \<ominus> c x) > val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) > val (\<Theta>(i,j) y)"
using 1 unfolding 2 by auto
qed
lemma val_in_B_zero:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "t \<ominus> c x = \<zero>"
shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> s \<ominus> c y = \<zero>"
proof-
fix s y assume A: "s#y \<in> condition_to_set \<B>"
have sy_in: "s#y \<in> condition_to_set B"
using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have s_closed: "s \<in> carrier Q\<^sub>p"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(2) list.sel(1))
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(1) list.sel(3))
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have F0: "t#x \<in> A\<^sub>0"
using tx_in assms B_subset by blast
have "t#x \<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd
closed_interval_def assms val_def
using A\<^sub>0_closed B_cell B_eq SA_zeroE F0 padic_fields.condition_to_set_memE'(1)
padic_fields_axioms tx_in x_closed by auto
then have "condition_to_set \<B> \<subseteq>condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
using assms refined_decomp_prop
unfolding is_convex_condition_def refinement_functions_def by blast
hence "s#y \<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
using A by auto
thus "s \<ominus> c y = \<zero>"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def
val_def
by (smt (verit, best) Extended_Int.infinity_ileE SA_zeroE y_closed)
qed
lemma val_in_B_nonzero:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "t \<ominus> c x \<noteq> \<zero>"
shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> s \<ominus> c y \<noteq> \<zero>"
proof-
fix s y assume A: "s#y \<in> condition_to_set \<B>"
have sy_in: "s#y \<in> condition_to_set B"
using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have s_closed: "s \<in> carrier Q\<^sub>p"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(2) list.sel(1))
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(1) list.sel(3))
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have F0: "t#x \<in> A\<^sub>0"
using tx_in assms B_subset by blast
have "t#x \<notin> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd
closed_interval_def val_def
using A\<^sub>0_closed B_cell B_eq SA_zeroE F0 padic_fields.condition_to_set_memE'(1)
padic_fields_axioms assms tx_in x_closed by auto
then have "condition_to_set \<B> \<inter> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval) = {}"
using assms refined_decomp_prop
unfolding is_convex_condition_def refinement_functions_def by blast
hence "s#y \<notin> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
using A by auto
thus "s \<ominus> c y \<noteq> \<zero>"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def
by (metis A assms(1) assms(2) assms(3) assms(4) val_in_B_zero)
qed
lemma ineq_equivalence:
assumes "\<alpha> \<in> Units Q\<^sub>p"
assumes "\<beta> \<in> Units Q\<^sub>p"
assumes "x \<in> Units Q\<^sub>p"
shows "val (\<alpha> \<otimes> x[^](i::nat)) < val (\<beta> \<otimes> x[^](j::nat))
\<Longrightarrow> ord \<alpha> - ord \<beta> < (int j- int i)*ord x"
"ord \<alpha> - ord \<beta> < (int j- int i)*ord x
\<Longrightarrow> val (\<alpha> \<otimes> x[^](i::nat)) < val (\<beta> \<otimes> x[^](j::nat))"
"val (\<alpha> \<otimes> x[^](i::nat)) > val (\<beta> \<otimes> x[^](j::nat))
\<Longrightarrow> ord \<alpha> - ord \<beta> > (int j- int i)*ord x"
"ord \<alpha> - ord \<beta> > (int j- int i)*ord x
\<Longrightarrow> val (\<alpha> \<otimes> x[^](i::nat)) > val (\<beta> \<otimes> x[^](j::nat))"
by(auto simp: Qp.Units_pow_closed Units_nonzero_Qp assms(1) assms(2) assms(3)
int_distrib(3) nonzero_nat_pow_ord ord_mult)
lemma val_ineq_theta_ineq1:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "t \<ominus> c x \<noteq> \<zero>"
shows "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) < val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<Longrightarrow> val (t \<ominus> c x) > val (\<Theta>(i,j) x)"
"val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<Longrightarrow> val (\<Theta> (i, j) x) \<ge> val (t \<ominus> c x)"
"val (t \<ominus> c x) > val (\<Theta> (i, j) x) \<Longrightarrow>
val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j)"
"val (t \<ominus> c x) \<le> val (\<Theta>(i,j) x) \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
proof-
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have inA0: "t#x \<in> A\<^sub>0"
using tx_in assms B_subset by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have i_le_j: "i < j"
using assms ordered_ind_pairs_def by auto
have in_inds: "i \<in> inds" "j \<in> inds"
using assms ordered_ind_pairs_def ind_pairs_def by auto
have 0: "(int j - int i) * ord (\<Theta>(i,j) x)
+ (ord (a i x) - ord (a j x)) mod (int j - int i)
= ord (a i x) - ord (a j x)"
using x_closed \<Theta>_ord[of i j x] assms
by (metis (mono_tags, opaque_lifting) SA_Units_nonzero SA_div_eval
a_cfs_closed in_inds(1) in_inds(2) inds_memE ord_fract)
have units: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" "t \<ominus> c x \<in> Units Q\<^sub>p"
using i_le_j in_inds A\<^sub>0_closures assms inA0 by auto
have diff_pos: "(int j - int i) > 0"
using i_le_j by auto
have mod_pos: "(ord (a i x) - ord (a j x)) mod (int j - int i) \<ge> 0"
using assms by (simp add: i_le_j)
have ineq: "val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a j x \<otimes> (t \<ominus> c x) [^] j)"
"ord (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> ord (a j x \<otimes> (t \<ominus> c x) [^] j)"
using inA0 assms in_inds i_le_j A\<^sub>0_memE[of t x i j] by auto
show g1: "val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j) \<Longrightarrow>
val (\<Theta> (i, j) x) < val (t \<ominus> c x)"
proof-
assume A: "val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j)"
have 1: "ord (a i x) - ord (a j x) < (int j- int i)*ord (t \<ominus> c x)"
by(rule ineq_equivalence, auto simp: units A)
hence 2: "(int j - int i) * ord (\<Theta>(i,j) x) < (int j - int i)* ord(t \<ominus> c x)"
using mod_pos 1 0 by auto
hence 3: "ord (\<Theta>(i,j) x) < ord(t \<ominus> c x)"
by (simp add: i_le_j)
thus "val (\<Theta> (i, j) x) < val (t \<ominus> c x)"
by (metis (mono_tags, lifting) SA_Units_memE' \<Theta>_unit assms(3) assms(4) eint_ord_simps(2)
val_def x_closed)
qed
show g2: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<Longrightarrow> val (\<Theta> (i, j) x) \<ge> val (t \<ominus> c x)"
proof-
assume A: "val (a i x \<otimes> (t \<ominus> c x) [^] i) > val (a j x \<otimes> (t \<ominus> c x) [^] j)"
have 1: "ord (a i x) - ord (a j x) > (int j- int i)*ord (t \<ominus> c x)"
by(rule ineq_equivalence, auto simp: units A)
hence 2: "(int j - int i) * ord (\<Theta>(i,j) x) + (ord (a i x) - ord (a j x)) mod (int j - int i)
> (int j - int i)* ord(t \<ominus> c x)"
using mod_pos 1 0 by auto
hence 3: "(ord (a i x) - ord (a j x)) mod (int j - int i)
> (int j - int i)*( ord(t \<ominus> c x) - ord (\<Theta>(i,j) x))"
by (smt (verit, ccfv_SIG) nat_distrib(2))
have 4: "( ord(t \<ominus> c x) - ord (\<Theta>(i,j) x)) \<le> 0"
proof-
have R: "\<And> m a b::int. m > 0 \<Longrightarrow> a mod m > m*b \<Longrightarrow> b \<le> 0"
by (smt (verit, ccfv_SIG) Euclidean_Division.pos_mod_bound mod_mult_self1_is_0 mod_pos_pos_trivial mult_less_cancel_right mult_sign_intros(1))
show ?thesis
apply(rule R[of "(int j - int i)" _ "(ord (a i x) - ord (a j x)) mod (int j - int i)"])
using i_le_j 3 by auto
qed
hence 3: "ord (\<Theta>(i,j) x) \<ge> ord(t \<ominus> c x)"
by (simp add: i_le_j)
thus "val (\<Theta> (i, j) x) \<ge> val (t \<ominus> c x)"
using Units_eq_nonzero eint_ord_simps(1) eint_ord_simps(3) units(3) val_def val_ord
by presburger
qed
have "val (t \<ominus> c x) > val (\<Theta> (i, j) x) \<Longrightarrow>
val (a i x \<otimes> (t \<ominus> c x) [^] i) \<le> val (a j x \<otimes> (t \<ominus> c x) [^] j)"
using g2 notin_closed by blast
thus g3: "val (t \<ominus> c x) > val (\<Theta> (i, j) x) \<Longrightarrow>
val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j)"
using g2 ineq using ineq by auto
have "val (t \<ominus> c x) \<le> val (\<Theta> (i, j) x) \<Longrightarrow>
val (a j x \<otimes> (t \<ominus> c x) [^] j) \<le> val (a i x \<otimes> (t \<ominus> c x) [^] i)"
using g1 notin_closed by blast
thus g4: "val (t \<ominus> c x) \<le> val (\<Theta> (i, j) x) \<Longrightarrow>
val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)"
using g1 ineq using ineq by auto
qed
lemma val_in_B0:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "t \<ominus> c x \<noteq> \<zero>"
assumes "val (t \<ominus> c x) = val (\<Theta> (i,j) x)"
shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow>
val (s \<ominus> c y) = val (\<Theta> (i,j) y)"
proof-
fix s y assume A: "s#y \<in> condition_to_set \<B>"
have sy_in: "s#y \<in> condition_to_set B"
using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have s_closed: "s \<in> carrier Q\<^sub>p"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(2) list.sel(1))
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(1) list.sel(3))
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have i_le_j: "i < j"
using assms ordered_ind_pairs_def by auto
have in_inds: "i \<in> inds" "j \<in> inds"
using assms ordered_ind_pairs_def ind_pairs_def by auto
have F0: "t#x \<in> A\<^sub>0"
using tx_in assms B_subset by blast
have F1: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_interval)"
using assms tx_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def
by (simp add: SA_zeroE val_def x_closed)
hence "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_interval)"
using assms refined_decomp_prop
unfolding is_convex_condition_def refinement_functions_def
by blast
hence F3: "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_interval)"
using A by auto
thus "val (s \<ominus> c y) = val (\<Theta> (i, j) y)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def
by auto
qed
lemma val_in_B1:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "t \<ominus> c x \<noteq> \<zero>"
assumes "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) < val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i y)\<otimes>(s \<ominus> c y)[^]i) < val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
proof-
fix s y assume A: "s#y \<in> condition_to_set \<B>"
have sy_in: "s#y \<in> condition_to_set B"
using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have s_closed: "s \<in> carrier Q\<^sub>p"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(2) list.sel(1))
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(1) list.sel(3))
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have i_le_j: "i < j"
using assms ordered_ind_pairs_def by auto
have in_inds: "i \<in> inds" "j \<in> inds"
using assms ordered_ind_pairs_def ind_pairs_def by auto
have F0: "t#x \<in> A\<^sub>0"
using tx_in assms B_subset by blast
have F1: "val (t \<ominus> c x) > val (\<Theta>(i,j) x)"
using val_ineq_theta_ineq1 assms by auto
have F2: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) \<zero>\<^bsub>SA m\<^esub> left_closed_interval)"
using assms tx_in F1
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd left_closed_interval_def
by (simp add: SA_zeroE val_def x_closed)
hence "condition_to_set \<B> \<subseteq>condition_to_set (Cond m b c (\<Theta>(i,j)) \<zero>\<^bsub>SA m\<^esub> left_closed_interval)"
using assms refined_decomp_prop
unfolding is_convex_condition_def refinement_functions_def
by blast
hence F3: "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) \<zero>\<^bsub>SA m\<^esub> left_closed_interval)"
using A by auto
hence F4: "val (s \<ominus> c y) \<ge> val (\<Theta>(i,j) y)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd left_closed_interval_def
by auto
have F5: "s \<ominus> c y \<noteq> \<zero>"
using F3 unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd left_closed_interval_def
using local.val_zero by force
have F6: "val (s \<ominus> c y) \<noteq> val (\<Theta>(i,j) y)"
using val_in_B0[of \<B> s y i j t x] assms A F1
by (metis F5 basic_trans_rules(20))
hence F7: "val (s \<ominus> c y) > val (\<Theta>(i,j) y)"
using F4 F6 by auto
show "val (a i y \<otimes> (s \<ominus> c y) [^] i) < val (a j y \<otimes> (s \<ominus> c y) [^] j)"
apply(rule val_ineq_theta_ineq1[of \<B>])
using assms A F7 F5 by auto
qed
lemma val_in_B2:
assumes "\<B> \<in> refined_decomp"
assumes "t#x \<in> condition_to_set \<B>"
assumes "(i,j) \<in> ordered_ind_pairs"
assumes "t \<ominus> c x \<noteq> \<zero>"
assumes "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
proof-
fix s y assume A: "s#y \<in> condition_to_set \<B>"
have sy_in: "s#y \<in> condition_to_set B"
using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have s_closed: "s \<in> carrier Q\<^sub>p"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(2) list.sel(1))
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms B_cell sy_in
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq
by (metis Qp_pow_ConsE(1) list.sel(3))
have tx_in: "t#x \<in> condition_to_set B"
using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast
have t_closed: "t \<in> carrier Q\<^sub>p"
using assms refined_decomp_closure by auto
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using assms refined_decomp_closure by auto
have i_le_j: "i < j"
using assms ordered_ind_pairs_def by auto
have in_inds: "i \<in> inds" "j \<in> inds"
using assms ordered_ind_pairs_def ind_pairs_def by auto
have F0: "t#x \<in> A\<^sub>0" "s#y \<in> A\<^sub>0"
using tx_in sy_in assms B_subset by auto
have F1: "val (a j y \<otimes> (s \<ominus> c y) [^] j) \<noteq> val (a i y \<otimes> (s \<ominus> c y) [^] i)"
"val (a j x \<otimes> (t \<ominus> c x) [^] j) \<noteq> val (a i x \<otimes> (t \<ominus> c x) [^] i)"
using F0 A\<^sub>0_memE assms
apply (metis A i_le_j in_inds(1) in_inds(2) val_in_B_nonzero)
using F0 A\<^sub>0_memE assms by auto
show "val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i)"
using val_in_B1[of \<B> s y i j t x] F1 assms
by (metis A basic_trans_rules(20) notin_closed val_in_B_zero val_ineq_theta_ineq1(3) val_ineq_theta_ineq1(4))
qed
lemma pre_val_in_B:
assumes "\<B> \<in> refined_decomp"
assumes "(i,j) \<in> ordered_ind_pairs"
shows "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j) "
proof-
fix s y t x
assume A: " t#x \<in> condition_to_set \<B>" "s#y \<in> condition_to_set \<B>"
have 0: "condition_to_set \<B> \<subseteq> condition_to_set B"
using assms B_eq is_cell_decomp_subset refined_decomp_prop by blast
have units: "a j x \<in> Units Q\<^sub>p" "a i x \<in> Units Q\<^sub>p" "a i y \<in> Units Q\<^sub>p" "a j y \<in> Units Q\<^sub>p"
using A\<^sub>0_closures A B_subset assms 0
unfolding ordered_ind_pairs_def ind_pairs_def by auto
have 1: "i \<in> inds" "j \<in> inds" "i < j"
using assms unfolding ordered_ind_pairs_def ind_pairs_def by auto
show "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j) "
proof(cases "t \<ominus> c x = \<zero>")
case True
then have T0: "s \<ominus> c y = \<zero>"
using A val_in_B_zero[of \<B> t x i j] assms by auto
have j_pos: "j > 0"
using assms ordered_ind_pairs_def by auto
hence T1: "(a j y \<otimes> \<zero> [^] j) = \<zero>" "(a j x \<otimes> \<zero> [^] j) = \<zero>"
using assms units Qp.Units_closed Qp.pow_zero Qp.r_null
by auto
show ?thesis
unfolding T1 val_def True
using T0 T1(1) eint_ord_code(6) by presburger
next
case False
then have F0: "s \<ominus> c y \<noteq> \<zero>"
using A val_in_B_nonzero[of \<B> t x i j] assms by auto
have F1: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
"val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<noteq> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
using A\<^sub>0_memE[of _ _ i j] assms A 1
apply (meson 0 B_subset False subset_iff)
using A\<^sub>0_memE[of _ _ i j] assms A 1
by (meson "0" B_subset F0 basic_trans_rules(31))
show ?thesis
proof
show 0: "val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i) \<Longrightarrow>
val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i)"
apply(rule val_in_B2[of \<B> t x])
using assms A False by auto
show 1: "val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i) \<Longrightarrow>
val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)"
using assms F0 A val_in_B2[of \<B> s y i j] 0 F1 by blast
qed
qed
qed
lemma val_in_B:
assumes "\<B> \<in> refined_decomp"
assumes "i \<in> inds"
assumes "j \<in> inds"
assumes "i \<noteq> j"
shows "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
"\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
"\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
t \<ominus> c x = \<zero>
\<longleftrightarrow> s \<ominus> c y = \<zero>"
proof-
have sub: "condition_to_set \<B> \<subseteq> A\<^sub>0"
using assms B_eq B_subset is_cell_decomp_subset refined_decomp_prop by blast
show "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
proof(cases "i < j")
case True
show "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
using A\<^sub>0_memE(1)[of _ _ i j] sub assms True by auto
next
case False
show "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
using A\<^sub>0_memE(1)[of _ _ j i] sub assms False
by (smt (z3) nat_neq_iff subset_iff)
qed
next
show "\<And>s y t x.
t # x \<in> condition_to_set \<B> \<Longrightarrow>
s # y \<in> condition_to_set \<B> \<Longrightarrow>
(val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)) =
(val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i))"
proof(cases "i < j")
case True
then have "(i,j) \<in> ordered_ind_pairs"
unfolding ordered_ind_pairs_def ind_pairs_def using assms by auto
thus "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j) "
using assms pre_val_in_B by metis
next
case False
then have ind: "(j,i) \<in> ordered_ind_pairs"
unfolding ordered_ind_pairs_def ind_pairs_def using assms by auto
hence F0: "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a j x)\<otimes>(t \<ominus> c x)[^]j) > val ((a i x)\<otimes>(t \<ominus> c x)[^]i)
\<longleftrightarrow> val ((a j y)\<otimes>(s \<ominus> c y)[^]j) > val ((a i y)\<otimes>(s \<ominus> c y)[^]i) "
using assms pre_val_in_B by metis
fix t x s y assume A: " t # x \<in> condition_to_set \<B>" " s # y \<in> condition_to_set \<B>"
have inA0: "t#x \<in> A\<^sub>0" "s#y \<in> A\<^sub>0"
using A B_subset assms is_cell_decomp_subset refined_decomp_prop B_eq basic_trans_rules(31)
apply metis
using A B_subset assms is_cell_decomp_subset refined_decomp_prop B_eq basic_trans_rules(31)
by metis
have units: "a j x \<in> Units Q\<^sub>p" "a i x \<in> Units Q\<^sub>p" "a i y \<in> Units Q\<^sub>p" "a j y \<in> Units Q\<^sub>p"
using A\<^sub>0_closures(1,2) A inA0 B_subset assms A ind
unfolding ordered_ind_pairs_def ind_pairs_def by auto
show "(val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)) =
(val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i))"
proof(cases "t \<ominus> c x = \<zero>")
case T: True
then have T0: "s \<ominus> c y = \<zero>"
using ind assms A val_in_B_zero[of \<B> t x j i s y] by auto
have i_pos: "i > 0"
using ind assms ordered_ind_pairs_def by auto
hence T1: "(a i y \<otimes> \<zero> [^] i) = \<zero>" "(a i x \<otimes> \<zero> [^] i) = \<zero>"
using assms A units Qp.Units_closed Qp.pow_zero Qp.r_null by auto
hence T2: "val (a i y \<otimes> \<zero> [^] i) = \<infinity>" "val (a i x \<otimes> \<zero> [^] i) = \<infinity>"
using val_def by auto
show ?thesis
using units
unfolding T0 T2 T
by (metis (no_types, opaque_lifting) Qp.Units_closed Qp.Units_not_right_zero_divisor
Qp.cring_simprules(2) Qp.cring_simprules(27) Qp.nat_pow_closed eint.distinct(2)
eint_ord_simps(4) val_def)
next
case F: False
then have 0: "s \<ominus> c y \<noteq> \<zero>"
using ind assms A val_in_B_nonzero[of \<B> t x j i s y] by auto
have 1: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
"val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<noteq> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
using False F 0 inA0 assms A\<^sub>0_memE[of s y j i] A\<^sub>0_memE[of t x j i] by auto
then show ?thesis
using A F0[of t x s y] by auto
qed
qed
next
have "\<And>s y t x. t # x \<in> condition_to_set \<B> \<Longrightarrow> s # y \<in> condition_to_set \<B>
\<Longrightarrow> (t \<ominus> c x = \<zero>) \<Longrightarrow> (s \<ominus> c y = \<zero>)"
proof-
fix t x s y assume A: " t # x \<in> condition_to_set \<B>" " s # y \<in> condition_to_set \<B>"
show "(t \<ominus> c x = \<zero>) \<Longrightarrow> (s \<ominus> c y = \<zero>)"
proof-
assume B: "t \<ominus> c x = \<zero>"
then have "t#x\<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def
using A
by (metis (mono_tags, lifting) A\<^sub>0_closed B_cell B_eq B_subset SA_zeroE assms(1)
cartesian_power_tail eint_ord_simps(3) list.sel(3) local.val_zero
padic_fields.condition_to_set_memE'(1) padic_fields.is_cell_decomp_subset
padic_fields_axioms refined_decomp_prop subset_iff)
hence "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
using assms A refined_decomp_prop
unfolding is_convex_condition_def refinement_functions_def
by blast
hence "s#y\<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)"
using A by auto
thus "s \<ominus> c y = \<zero>"
unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def
by (metis Qp.cring_simprules(4) Qp_pow_ConsE(2) SA_car_closed SA_zeroE c_closed
cartesian_power_tail list.sel(1) list.sel(3) val_ineq)
qed
qed
thus "\<And>s y t x.
t # x \<in> condition_to_set \<B> \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow>
(t \<ominus> c x = \<zero>) = (s \<ominus> c y = \<zero>)"
by metis
qed
lemma static_order:
assumes "\<B> \<in> refined_decomp"
assumes "i \<in> inds"
assumes "j \<in> inds"
shows "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<ge> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<ge> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
proof(cases "i = j")
case True
then show "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<ge> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<ge> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
by auto
next
case ne: False
fix s y t x assume A: "t#x \<in> condition_to_set \<B>" "s#y \<in> condition_to_set \<B>"
show "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<ge> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<ge> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
proof(cases "t \<ominus> c x = \<zero>")
case True
then have T0: "s \<ominus> c y = \<zero>"
using A assms val_in_B(3)[of \<B> i j t x s y] ne by auto
show ?thesis
unfolding T0 True
apply(cases "i = 0")
apply (smt (z3) A(1) A(2) A\<^sub>0_memE'(3) B_eq B_subset ne T0 True assms(1)
assms(2) assms(3) bot_nat_0.not_eq_extremum eint_ord_simps(4) notin_closed
padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop
subset_iff val_in_B(1))
apply(cases "j = 0")
apply (smt (verit) A(1) A(2) A\<^sub>0_memE'(3) B_eq B_subset T0 True assms(1) assms(2)
assms(3) basic_trans_rules(31) bot_nat_0.not_eq_extremum eint_ord_simps(3)
padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop)
by (smt (z3) A(1) A(2) A\<^sub>0_closures(2) B_eq B_subset Qp.Units_closed
Qp.cring_simprules(27) Qp.nat_pow_zero assms(1) assms(2) assms(3)
padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop subset_iff)
next
case False
have F0: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)
\<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
using A assms val_in_B(1)[of \<B> i j t x s y] by auto
have F1: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)"
using ne False A assms val_in_B(2)[of \<B> i j t x] by auto
have F2: "val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<noteq> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
using False A ne assms val_in_B(3)[of \<B> i j t x s y] val_in_B(2)[of \<B> i j s y] by auto
show ?thesis
using F1 F2 F0 by auto
qed
qed
lemma exists_uniform_i0:
assumes "\<B> \<in> refined_decomp"
assumes "inds \<noteq> {}"
shows "\<exists>i\<^sub>0 \<in> inds . (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B>
\<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(t \<ominus> c x)[^]j))"
proof(cases "condition_to_set \<B> = {}")
case True
then show ?thesis using assms by blast
next
case False
have R: "\<And> xs. xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> \<exists> t x. xs = t#x"
proof-
have "\<And> xs. xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> length xs > 0"
by (simp add: cartesian_power_car_memE)
thus "\<And> xs. xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> \<exists> t x. xs = t#x"
by (meson cartesian_power_car_memE length_Suc_conv)
qed
have bsub: "condition_to_set \<B> \<subseteq> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
using assms refined_decomp_prop
by (metis is_cellI is_cell_decompE(3) is_cell_decompE(4) is_cell_subset)
then obtain t x where tx_def: "t#x \<in> condition_to_set \<B>"
using False R by blast
have "\<exists>i\<^sub>0 \<in> inds. val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))"
using assms Min_in inds_finite
by (smt (verit, best) finite_imageI imageE image_is_empty)
then obtain i\<^sub>0 where i\<^sub>0_def: "i\<^sub>0 \<in> inds \<and>
val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))"
by blast
have i\<^sub>0_in: "i\<^sub>0 \<in> inds"
using i\<^sub>0_def by auto
have i\<^sub>0_min: "\<And> j. j \<in> inds \<Longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ( (a j x)\<otimes>(t \<ominus> c x)[^]j)"
using inds_finite i\<^sub>0_in i\<^sub>0_def by auto
have d: "\<forall>j .
\<forall>s y. s # y \<in> condition_to_set \<B> \<longrightarrow>
val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)"
proof-
have d0: "\<And> j s y. j \<notin> inds \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow>
val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)"
proof-
fix j s y assume A: " j \<notin> inds" "s # y \<in> condition_to_set \<B>"
have diff: "s \<ominus> c y \<in> carrier Q\<^sub>p"
using A
by (metis (no_types, lifting) Qp.cring_simprules(4) Qp_pow_ConsE(2) SA_car_closed bsub
c_closed cartesian_power_tail list.sel(1) list.sel(3) subsetD)
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using A bsub cartesian_power_tail by fastforce
have zero: "a j y = \<zero>"
using A y_closed inds_non_memE[of j y] y_closed by auto
have inf: "val (a j y \<otimes> (s \<ominus> c y) [^] j) = \<infinity>"
using diff unfolding zero val_def by auto
show "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)"
unfolding inf by auto
qed
have d1: "\<And> j s y. j \<in> inds \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow>
val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)"
using assms i\<^sub>0_min tx_def i\<^sub>0_in static_order[of \<B> _ i\<^sub>0] i\<^sub>0_in by smt
show ?thesis
using d0 d1 by smt
qed
show ?thesis
by(rule bexI[of _ i\<^sub>0], rule d, rule i\<^sub>0_in)
qed
lemma exists_uniform_i:
assumes "\<B> \<in> refined_decomp"
shows "\<exists>i\<^sub>0 . (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B>
\<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(t \<ominus> c x)[^]j))"
proof(cases "inds = {}")
case True
have "\<And> j t x. t#x \<in> condition_to_set \<B> \<Longrightarrow> val ((a j x)\<otimes>(t \<ominus> c x)[^]j) = \<infinity>"
proof-
fix j t x assume A: "t#x \<in> condition_to_set \<B>"
have 0: "t \<ominus> c x \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" "t \<in> carrier Q\<^sub>p"
using A assms refined_decomp_closure by auto
have 1: "a j x = \<zero>"
using A True inds_non_memE 0 by auto
show "val (a j x \<otimes> (t \<ominus> c x) [^] j) = \<infinity> "
using 0 unfolding 1 val_def by auto
qed
thus ?thesis by auto
next
case False
then show ?thesis using assms exists_uniform_i0 by blast
qed
end
context common_refinement_locale
begin
definition has_minimal_i where
"has_minimal_i \<B> = (\<exists>i\<^sub>0 . (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B>
\<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)))"
text\<open>This lemma statement is long-winded because we need to simultaneously extract a piece of
information relevant to the proof of cell decomposition theorem $I$ as well as one relevant
to theorem II.\<close>
lemma A\<^sub>0_comp_minimal_i_decomp:
assumes "inds \<noteq> {}"
shows "\<exists> S. is_cell_decomp m S (condition_to_set \<C> - A\<^sub>0) \<and>
(\<forall> \<B> \<in> S. has_minimal_i \<B> \<and>
(\<exists> \<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and>
u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and>
(\<forall>j. \<forall>t. \<forall>x.
t#x \<in> condition_to_set \<B> \<longrightarrow>
val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j))))"
proof-
obtain S where S_def: "(is_cell_decomp m S (condition_to_set \<C> - A\<^sub>0) \<and>
(\<forall>B\<in>S. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and>
u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))"
using A\<^sub>0_comp_decomp by blast
show ?thesis
apply(rule refine_each_cell[of _ S])
using S_def apply blast
proof-
fix B assume A: "B \<in> S"
obtain b where b_def: "b = fibre_set B"
by blast
obtain \<phi> where \<phi>_def: " \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and>
boundary_condition B = closed_interval"
using A S_def by blast
have B_eq: "B = Cond m b c \<phi> \<phi> closed_interval"
using A \<phi>_def b_def condition_decomp' S_def is_cell_decompE(4)
by metis
have \<phi>_closed: "\<phi> \<in> carrier (SA m)"
using \<phi>_def SA_Units_closed by blast
have \<phi>_nonzero: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> \<phi> x \<noteq> \<zero>"
using \<phi>_def SA_Units_memE' by blast
have \<phi>_nonzero': "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> \<phi> x \<in> nonzero Q\<^sub>p"
using \<phi>_closed \<phi>_nonzero SA_car_memE(3) unfolding nonzero_def by blast
have B_cell_cond: "is_cell_condition B"
using A S_def is_cell_decompE by meson
have B0_semialg: "is_semialgebraic m b"
using B_eq B_cell_cond is_cell_conditionE by blast
obtain H where H_def: "H = (\<lambda>i. a i \<otimes>\<^bsub>SA m\<^esub>\<phi>[^]\<^bsub>SA m\<^esub> i)"
by blast
have H_closed: "\<And> i. i \<in> inds \<Longrightarrow> H i \<in> carrier (SA m)"
unfolding H_def using inds_memE \<phi>_closed SA_Units_closed[of _ m]
by blast
have H_unit: "\<And>i. i \<in> inds \<Longrightarrow> H i \<in> Units (SA m)"
unfolding H_def using inds_memE \<phi>_def R.Units_pow_closed by blast
have H_eval: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> H i x = a i x \<otimes> (\<phi> x [^] i)"
unfolding H_def using \<phi>_closed inds_memE a_closed SA_mult SA_nat_pow by presburger
have H_nonzero: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> H i x \<noteq> \<zero>"
using H_unit SA_Units_memE' by blast
have H_nonzero': "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> H i x \<in> nonzero Q\<^sub>p"
unfolding nonzero_def mem_Collect_eq using SA_car_memE(3) H_closed H_nonzero by blast
have H_ord: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> ord (H i x) = ord (a i x) + i*ord(\<phi> x)"
using H_eval \<phi>_nonzero inds_memE ord_mult nonzero_nat_pow_ord
by (metis Qp_nat_pow_nonzero SA_Units_nonzero \<phi>_nonzero')
have H_val: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> val (H i x) = val (a i x) + val (\<phi> x [^] i)"
using \<phi>_nonzero inds_memE H_eval val_mult
by (metis Qp_nat_pow_nonzero SA_Units_nonzero \<phi>_nonzero' val_mult0)
have b_semialg: "is_semialgebraic m b"
using b_def B0_semialg by linarith
have "\<exists>Bs. finite Bs \<and> Bs partitions b \<and> (\<forall>b\<in>Bs. is_semialgebraic m b \<and> static_order_type (H ` inds) b)"
apply(rule static_order_type_decomp[of "H ` inds" m b])
using inds_finite apply blast
using H_unit apply blast
using b_semialg by auto
then obtain Bs0 where Bs0_def:
"finite Bs0 \<and> Bs0 partitions b \<and> (\<forall>b\<in>Bs0. is_semialgebraic m b \<and>
static_order_type (H ` inds) b)"
by blast
obtain Bs where Bs_def: "Bs = Bs0 - {{}}"
by blast
have Bs_finite: "finite Bs"
using Bs_def Bs0_def by blast
have Bs_semialg: "\<And>b. b \<in> Bs \<Longrightarrow> is_semialgebraic m b"
using Bs_def Bs0_def by blast
have Bs_partitions: "Bs partitions b"
unfolding Bs_def apply(rule is_partitionI)
using Bs0_def is_partitionE Generated_Boolean_Algebra.disjoint_def apply fastforce
using Bs0_def is_partitionE(2)[of Bs0 b] by auto
have Bs_covers: "\<Union> Bs = b"
using Bs_partitions is_partitionE[of Bs b] by auto
have Bs_static_order_type: "\<And>b'. b' \<in> Bs \<Longrightarrow> static_order_type (H ` inds) b'"
using Bs_def Bs0_def by auto
have B_vals: "\<And>x. x \<in> condition_to_set B \<Longrightarrow> val (hd x \<ominus> c (tl x)) = val (\<phi> (tl x))"
apply(rule basic_trans_rules(24))
unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq closed_interval_def
apply blast by blast
obtain S' where S'_def: "S' = refine_fibres B ` Bs"
by blast
have S'_decomp: "is_cell_decomp m S' (condition_to_set B)"
apply(unfold S'_def, rule partition_to_cell_decomp[of B m b c \<phi> \<phi> closed_interval] )
unfolding are_semialgebraic_def
using B_cell_cond B_eq Bs_partitions Bs_finite Bs_semialg by auto
have "(\<forall>\<B>\<in>S'. has_minimal_i \<B> \<and>
(\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and>
center \<B> = c \<and>
l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and>
(\<forall>j. \<forall>t. \<forall>x.
t#x \<in> condition_to_set \<B> \<longrightarrow>
val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j))))"
proof
fix \<B> assume a: "\<B> \<in> S'"
obtain b0 where b0_def: "b0 = fibre_set \<B>"
by blast
have b0_in: "b0 \<in> Bs"
using b0_def a unfolding S'_def refine_fibres_def by auto
have \<phi>_fact: " \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and>
boundary_condition \<B> = closed_interval"
using a S'_def \<phi>_def unfolding B_eq refine_fibres_def
by auto
show "has_minimal_i \<B> \<and> (\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and>
center \<B> = c \<and>
l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and>
(\<forall>j. \<forall>t. \<forall>x.
t#x \<in> condition_to_set \<B> \<longrightarrow>
val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j)))"
proof(cases "condition_to_set \<B> = {}")
case True
show ?thesis
using \<phi>_fact unfolding has_minimal_i_def True by auto
next
case False
obtain xs where xs_def: "xs \<in> condition_to_set \<B>"
using False by blast
have xs_closed: "xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
by (meson xs_def a S'_decomp is_cell_decompE is_cell_decomp_subset subset_iff)
obtain t x where tx_def: "xs = t#x"
by (metis xs_closed Suc_length_conv cartesian_power_car_memE)
have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using tx_def xs_closed Qp_pow_ConsE(1) by force
have x_in_b0: "x \<in> b0"
using xs_def b0_def unfolding tx_def
by (metis cell_formula(2) condition_decomp' condition_to_set.simps)
have ex: "\<exists>i\<^sub>0 \<in> inds. val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) =
(MIN i\<in>inds. (val ((a i x)\<otimes>(\<phi> x)[^]i)))"
by (smt (verit, best) assms Min_in inds_finite finite_imageI imageE image_is_empty)
then obtain i\<^sub>0 where i\<^sub>0_def: "i\<^sub>0 \<in> inds \<and> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) =
(MIN i\<in>inds. (val ((a i x)\<otimes>(\<phi> x)[^]i)))"
by blast
have i\<^sub>0_ineq: "\<And> j. j \<in> inds \<Longrightarrow> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j)"
proof- fix j assume inds: "j \<in> inds"
show " val (a i\<^sub>0 x \<otimes> \<phi> x [^] i\<^sub>0) \<le> val (a j x \<otimes> \<phi> x [^] j)"
using inds i\<^sub>0_def MinE inds_finite by auto
qed
have i\<^sub>0_ineq': "\<And> j s y. s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i\<^sub>0 y)\<otimes>(s \<ominus> c y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)"
"\<And> j s y. s#y \<in> condition_to_set \<B> \<Longrightarrow>
val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)"
proof-
fix j s y assume b: " s#y \<in> condition_to_set \<B>"
have sy_closed: "s#y \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
by (meson b a S'_decomp is_cell_decompE is_cell_decomp_subset subset_iff)
have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
using sy_closed Qp_pow_ConsE(1) by force
have y_in_b0: "y \<in> b0"
by (metis b b0_def cell_formula(2) condition_decomp' condition_to_set.simps)
have diff: "s \<ominus> c y \<in> carrier Q\<^sub>p"
using y_closed b
by (metis Qp.cring_simprules(4) Qp_pow_ConsE(2) SA_car_closed list.sel(1)
sy_closed common_refinement_locale.c_closed common_refinement_locale_axioms)
have phiy: "\<phi> y \<in> carrier Q\<^sub>p"
using y_closed SA_car_closed \<phi>_closed by auto
have "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j) \<and>
val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)"
proof(cases "j \<in> inds")
case True
have 0: "val (H i\<^sub>0 x) \<le> val (H j x)"
unfolding H_def using True i\<^sub>0_ineq[of j] x_closed
using H_def H_eval i\<^sub>0_def by fastforce
hence i\<^sub>0_inds: "i\<^sub>0 \<in> inds"
using i\<^sub>0_def True x_closed inds_non_memE[of i\<^sub>0 x] unfolding H_def
by force
hence 1: "val (H i\<^sub>0 y) \<le> val (H j y)"
using Bs_static_order_type[of b0] b0_in i\<^sub>0_inds True
by (smt (z3) 0 basic_trans_rules(20) image_eqI notin_closed
static_order_type_def x_in_b0 y_in_b0)
have 2: "val (s \<ominus> c y) = val (\<phi> y)"
using B_vals[of "t#x"] B_vals[of "s#y"] b xs_def a S'_decomp
unfolding list_tl list_hd tx_def
by (meson basic_trans_rules(31) is_cell_decomp_subset)
have 3: "H i\<^sub>0 y = (a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0" "H j y = (a j y)\<otimes>(\<phi> y)[^]j"
using H_eval i\<^sub>0_inds y_closed True by auto
have un: "\<phi> y \<in> Units Q\<^sub>p"
using y_closed Units_eq_nonzero \<phi>_nonzero' by blast
show "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j) \<and>
val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)"
using 1 2 diff H_val H_ord un
by (smt (verit, ccfv_SIG) "3"(1) "3"(2) Qp.nat_pow_closed True Units_eq_nonzero
a_eval equal_val_imp_equal_ord(2) i\<^sub>0_inds val_mult val_of_power y_closed)
next
case False
have F1: "a j y = \<zero>"
using False inds_non_memE y_closed by auto
show "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j) \<and>
val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)"
using diff phiy unfolding F1 val_def by auto
qed
thus "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)"
" val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)"
by auto
qed
thus "has_minimal_i \<B> \<and>
(\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and>
boundary_condition \<B> = closed_interval \<and>
(\<forall>j t x. t # x \<in> condition_to_set \<B> \<longrightarrow>
val (a i\<^sub>0 x \<otimes> \<phi> x [^] i\<^sub>0) \<le> val (a j x \<otimes> \<phi> x [^] j)))"
by (metis \<phi>_fact has_minimal_i_def)
qed
qed
thus "\<exists>S. is_cell_decomp m S (condition_to_set B) \<and>
(\<forall>\<B>\<in>S. has_minimal_i \<B> \<and>
(\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and>
center \<B> = c \<and>
l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and>
(\<forall>j. \<forall>t. \<forall>x.
t#x \<in> condition_to_set \<B> \<longrightarrow>
val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j))))"
using S'_decomp by auto
qed
qed
lemma A\<^sub>0_minimal_i_decomp:
assumes "inds \<noteq> {}"
shows "\<exists> S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall> \<B> \<in> S. center \<B> = c \<and> has_minimal_i \<B>)"
proof-
obtain S where S_def: " is_cell_decomp m S A\<^sub>0 \<and>
(\<forall>B\<in>S. center B = c \<and>
(\<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N))"
using A\<^sub>0_decomp assms by auto
show ?thesis
proof(rule refine_each_cell[of m S])
show " is_cell_decomp m S A\<^sub>0"
using S_def by auto
fix B assume A: "B \<in> S"
have B_center: "center B = c"
using S_def A by auto
have sub: "condition_to_set B \<subseteq> A\<^sub>0"
using A S_def is_cell_decomp_subset[of m S A\<^sub>0] by auto
have cell: "is_cell_condition B"
using A S_def is_cell_decompE by auto
obtain b b1 b2 J where params: "B = Cond m b c b1 b2 J"
using A S_def B_center condition_decomp' is_cell_decompE(4) by blast
have 0: "A\<^sub>0_refinement p d \<C> A c a1 a2 I f m B b b1 b2 J"
using sub cell params
by (meson A\<^sub>0_refinement.intro A\<^sub>0_refinement_axioms.intro common_refinement_locale_axioms)
show "\<exists>S. is_cell_decomp m S (condition_to_set B) \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)"
using 0 A\<^sub>0_refinement.exists_uniform_i Q\<^sub>p_def Z\<^sub>p_def A\<^sub>0_refinement.refined_decomp_prop
by (smt (z3) params common_refinement_locale.has_minimal_i_def common_refinement_locale_axioms)
qed
qed
lemma \<C>_comp_minimal_i_decomp:
shows "\<exists> S. is_cell_decomp m S (condition_to_set \<C>) \<and> (\<forall> \<B> \<in> S. center \<B> = c \<and> has_minimal_i \<B>)"
proof-
have A: "is_cell_decomp m {\<C>} (condition_to_set \<C>)"
using \<C>_cond \<C>_def arity.simps condition_to_set_cell_decomp by blast
show ?thesis
proof(cases "inds = {}")
case True
have "\<And> t x j. t # x \<in> condition_to_set \<C> ==>
val (a j x \<otimes> (t \<ominus> c x) [^] j) = \<infinity>"
proof-
fix t x j
assume A: "t#x \<in> condition_to_set \<C>"
have 0: "a j x = \<zero>"
using inds_non_memE A unfolding True
by (metis \<C>_memE(1) empty_iff list.sel(3))
have 1: "(t \<ominus> c x) \<in> carrier Q\<^sub>p"
using A
by (metis Qp.cring_simprules(4) SA_car_closed \<C>_cond \<C>_def \<C>_mem_hd cartesian_power_tail
cell_condition_set_memE(1) list.sel(1) list.sel(3) common_refinement_locale.c_closed common_refinement_locale_axioms)
show "val (a j x \<otimes> (t \<ominus> c x) [^] j) = \<infinity>"
using 1 unfolding 0 val_def by auto
qed
hence "has_minimal_i \<C>"
unfolding has_minimal_i_def by auto
thus "\<exists>S. is_cell_decomp m S (condition_to_set \<C>) \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)"
using A \<C>_def center.simps by auto
next
case False
show ?thesis
proof(rule binary_refinement[of _ "{\<C>}"], rule A)
have "is_semialgebraic (Suc m) A\<^sub>0 \<and>
A\<^sub>0 \<subseteq> condition_to_set \<C> \<and>
(\<exists>S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)) \<and>
(\<exists>S. is_cell_decomp m S (condition_to_set \<C> - A\<^sub>0) \<and>
(\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>))"
using A\<^sub>0_semialg A\<^sub>0_def A\<^sub>0_minimal_i_decomp A\<^sub>0_comp_minimal_i_decomp False by auto
thus "\<And>C. C \<in> {\<C>} \<Longrightarrow>
\<exists>C0. is_semialgebraic (Suc m) C0 \<and>
C0 \<subseteq> condition_to_set C \<and>
(\<exists>S. is_cell_decomp m S C0 \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)) \<and>
(\<exists>S. is_cell_decomp m S (condition_to_set C - C0) \<and>
(\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>))" by auto
qed
qed
qed
end
end
|
#pragma once
#include <gsl/gsl>
#include <memory>
struct SpeexResamplerState_;
typedef struct SpeexResamplerState_ SpeexResamplerState;
namespace Halley
{
struct AudioResamplerResult
{
size_t nRead;
size_t nWritten;
};
class AudioResampler
{
public:
AudioResampler(int from, int to, int nChannels, float quality = 1.0f);
~AudioResampler();
AudioResamplerResult resample(gsl::span<const float> src, gsl::span<float> dst, size_t channel);
AudioResamplerResult resampleInterleaved(gsl::span<const float> src, gsl::span<float> dst);
AudioResamplerResult resampleInterleaved(gsl::span<const short> src, gsl::span<short> dst);
AudioResamplerResult resampleNoninterleaved(gsl::span<const float> src, gsl::span<float> dst, const size_t numChannels);
size_t numOutputSamples(size_t numInputSamples) const;
private:
std::unique_ptr<SpeexResamplerState, void(*)(SpeexResamplerState*)> resampler;
size_t nChannels;
int from;
int to;
};
}
|
/-
unit tests suggested by Kevin Buzzard
-/
import data.complex.basic -- ℕ, ℤ, ℚ, ℝ, ℂ
import norm_cast
constants (an bn cn dn : ℕ) (az bz cz dz : ℤ) (aq bq cq dq : ℚ)
constants (ar br cr dr : ℝ) (ac bc cc dc : ℂ)
example : (an : ℤ) = bn → an = bn := λ h, by exact_mod_cast h -- by simp
example : an = bn → (an : ℤ) = bn := λ h, by exact_mod_cast h -- by simp
example : az = bz ↔ (az : ℚ) = bz := by norm_cast -- by simp
example : (aq : ℝ) = br ↔ (aq : ℂ) = br := by norm_cast
example : (an : ℚ) = bz ↔ (an : ℂ) = bz := by norm_cast
example : (((an : ℤ) : ℚ) : ℝ) = bq ↔ ((an : ℚ) : ℂ) = (bq : ℝ) :=
by norm_cast
example : (an : ℤ) < bn ↔ an < bn := by norm_cast -- by simp
example : (an : ℚ) < bz ↔ (an : ℝ) < bz := by norm_cast
example : ((an : ℤ) : ℝ) < bq ↔ (an : ℚ) < bq := by norm_cast
example : (an : ℤ) ≠ (bn : ℤ) ↔ an ≠ bn := by norm_cast -- by simp
-- zero and one cause special problems
example : 0 < (bq : ℝ) ↔ 0 < bq := by norm_cast -- by simp
example : az > (1 : ℕ) ↔ az > 1 := by norm_cast -- by simp
example : az > (0 : ℕ) ↔ az > 0 := by norm_cast -- by simp
example : (an : ℤ) ≠ 0 ↔ an ≠ 0 := by norm_cast -- by simp
example : aq < (1 : ℕ) ↔ (aq : ℝ) < (1 : ℤ) := by norm_cast
example : (an : ℤ) + bn = (an + bn : ℕ) := by norm_cast -- by simp
example : (an : ℂ) + bq = ((an + bq) : ℚ) := by norm_cast -- by simp
example : (((an : ℤ) : ℚ) : ℝ) + bn = (an + (bn : ℤ)) := by norm_cast -- by simp
example : (((((an : ℚ) : ℝ) * bq) + (cq : ℝ) ^ dn) : ℂ) = (an : ℂ) * (bq : ℝ) + cq ^ dn :=
by norm_cast -- by simp
example : ((an : ℤ) : ℝ) < bq ∧ (cr : ℂ) ^ 2 = dz ↔ (an : ℚ) < bq ∧ ((cr ^ 2) : ℂ) = dz :=
by norm_cast
example : (an : ℤ) = 1 → an = 1 := λ h, by exact_mod_cast h
example : (an : ℤ) < 5 → an < 5 := λ h, by exact_mod_cast h
example : an < 5 → (an : ℤ) < 5 := λ h, by exact_mod_cast h
example : (an + 5) < 10 → (an : ℤ) + 5 < 10 := λ h, by exact_mod_cast h
example : (an : ℤ) + 5 < 10 → (an + 5) < 10 := λ h, by exact_mod_cast h
example : ((an + 5 : ℕ) : ℤ) < 10 → an + 5 < 10 := λ h, by exact_mod_cast h
example : an + 5 < 10 → ((an + 5 : ℕ) : ℤ) < 10 := λ h, by exact_mod_cast h
example (h : (cz : ℚ) = az / bz) : (cz : ℝ) = az / bz :=
by rw_mod_cast [← rat.cast_coe_int az, h]
example (h : bn ≤ an) : an - bn = 1 ↔ (an - bn : ℤ) = 1 :=
by norm_cast
|
lemma closure_approachable_le: fixes S :: "'a::metric_space set" shows "x \<in> closure S \<longleftrightarrow> (\<forall>e>0. \<exists>y\<in>S. dist y x \<le> e)"
|
/*
ODE: a program to get optime Runge-Kutta and multi-steps methods.
Copyright 2011-2019, Javier Burguete Tolosa.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY Javier Burguete Tolosa ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL Javier Burguete Tolosa OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file rk_6_2.c
* \brief Source file to optimize Runge-Kutta 6 steps 2nd order methods.
* \author Javier Burguete Tolosa.
* \copyright Copyright 2011-2019.
*/
#define _GNU_SOURCE
#include <string.h>
#include <math.h>
#include <libxml/parser.h>
#include <glib.h>
#include <libintl.h>
#include <gsl/gsl_rng.h>
#include "config.h"
#include "utils.h"
#include "optimize.h"
#include "rk.h"
#include "rk_6_2.h"
#define DEBUG_RK_6_2 0 ///< macro to debug.
/**
* Function to obtain the coefficients of a 6 steps 2nd order Runge-Kutta
* method.
*/
int
rk_tb_6_2 (Optimize * optimize) ///< Optimize struct.
{
long double *tb, *r;
#if DEBUG_RK_6_2
fprintf (stderr, "rk_tb_6_2: start\n");
#endif
tb = optimize->coefficient;
r = optimize->random_data;
t6 (tb) = 1.L;
t1 (tb) = r[0];
t2 (tb) = r[1];
b21 (tb) = r[2];
t3 (tb) = r[3];
b31 (tb) = r[4];
b32 (tb) = r[5];
t4 (tb) = r[6];
b41 (tb) = r[7];
b42 (tb) = r[8];
b43 (tb) = r[9];
t5 (tb) = r[10];
b51 (tb) = r[11];
b52 (tb) = r[12];
b53 (tb) = r[13];
b54 (tb) = r[14];
b62 (tb) = r[15];
b63 (tb) = r[16];
b64 (tb) = r[17];
b65 (tb) = r[18];
b61 (tb) = (0.5L - b62 (tb) * t2 (tb) - b63 (tb) * t3 (tb)
- b64 (tb) * t4 (tb) - b65 (tb) * t5 (tb)) / t1 (tb);
if (isnan (b61 (tb)))
return 0;
rk_b_6 (tb);
#if DEBUG_RK_6_2
rk_print_tb (optimize, "rk_tb_6_2", stderr);
fprintf (stderr, "rk_tb_6_2: end\n");
#endif
return 1;
}
/**
* Function to obtain the coefficients of a 6 steps 2nd order, 3rd order in
* equations depending only in time, Runge-Kutta method.
*/
int
rk_tb_6_2t (Optimize * optimize) ///< Optimize struct.
{
long double *tb, *r;
#if DEBUG_RK_6_2
fprintf (stderr, "rk_tb_6_2t: start\n");
#endif
tb = optimize->coefficient;
r = optimize->random_data;
t6 (tb) = 1.L;
t1 (tb) = r[0];
t2 (tb) = r[1];
b21 (tb) = r[2];
t3 (tb) = r[3];
b31 (tb) = r[4];
b32 (tb) = r[5];
t4 (tb) = r[6];
b41 (tb) = r[7];
b42 (tb) = r[8];
b43 (tb) = r[9];
t5 (tb) = r[10];
b51 (tb) = r[11];
b52 (tb) = r[12];
b53 (tb) = r[13];
b54 (tb) = r[14];
b61 (tb) = r[15];
b62 (tb) = r[16];
b63 (tb) = r[17];
b64 (tb) = (1.L / 3.L - 0.5L * t5 (tb)
- b61 (tb) * t1 (tb) * (t1 (tb) - t5 (tb))
- b62 (tb) * t2 (tb) * (t2 (tb) - t5 (tb))
- b63 (tb) * t3 (tb) * (t3 (tb) - t5 (tb)))
/ (t4 (tb) * (t4 (tb) - t5 (tb)));
if (isnan (b64 (tb)))
return 0;
b65 (tb) = (0.5L - b61 (tb) * t1 (tb) - b62 (tb) * t2 (tb)
- b63 (tb) * t3 (tb) - b64 (tb) * t4 (tb)) / t5 (tb);
if (isnan (b65 (tb)))
return 0;
rk_b_6 (tb);
#if DEBUG_RK_6_2
rk_print_tb (optimize, "rk_tb_6_2t", stderr);
fprintf (stderr, "rk_tb_6_2t: end\n");
#endif
return 1;
}
/**
* Function to obtain the coefficients of a 6 steps 1st-2nd order Runge-Kutta
* pair.
*/
int
rk_tb_6_2p (Optimize * optimize) ///< Optimize struct.
{
long double *tb;
#if DEBUG_RK_6_2
fprintf (stderr, "rk_tb_6_2p: start\n");
#endif
if (!rk_tb_6_2 (optimize))
return 0;
tb = optimize->coefficient;
e61 (tb) = e62 (tb) = e63 (tb) = e64 (tb) = 0.L;
rk_e_6 (tb);
#if DEBUG_RK_6_2
fprintf (stderr, "rk_tb_6_2p: end\n");
#endif
return 1;
}
/**
* Function to obtain the coefficients of a 6 steps 1st-2nd order, 1st-3rd order
* in equations depending only in time, Runge-Kutta pair.
*/
int
rk_tb_6_2tp (Optimize * optimize) ///< Optimize struct.
{
long double *tb;
#if DEBUG_RK_6_2
fprintf (stderr, "rk_tb_6_2tp: start\n");
#endif
if (!rk_tb_6_2t (optimize))
return 0;
tb = optimize->coefficient;
e61 (tb) = e62 (tb) = e63 (tb) = e64 (tb) = 0.L;
rk_e_6 (tb);
#if DEBUG_RK_6_2
fprintf (stderr, "rk_tb_6_2tp: end\n");
#endif
return 1;
}
/**
* Function to calculate the objective function of a 6 steps 2nd order
* Runge-Kutta method.
*
* \return objective function value.
*/
long double
rk_objective_tb_6_2 (RK * rk) ///< RK struct.
{
long double *tb;
long double o;
#if DEBUG_RK_6_2
fprintf (stderr, "rk_objective_tb_6_2: start\n");
#endif
tb = rk->tb->coefficient;
o = fminl (0.L, b20 (tb));
if (b30 (tb) < 0.L)
o += b30 (tb);
if (b40 (tb) < 0.L)
o += b40 (tb);
if (b50 (tb) < 0.L)
o += b50 (tb);
if (b60 (tb) < 0.L)
o += b60 (tb);
if (b61 (tb) < 0.L)
o += b61 (tb);
if (o < 0.L)
{
o = 40.L - o;
goto end;
}
o = 30.L
+ fmaxl (1.L,
fmaxl (t1 (tb),
fmaxl (t2 (tb),
fmaxl (t3 (tb), fmaxl (t4 (tb), t5 (tb))))));
if (rk->strong)
{
rk_bucle_ac (rk);
o = fminl (o, *rk->ac0->optimal);
}
end:
#if DEBUG_RK_6_2
fprintf (stderr, "rk_objective_tb_6_2: optimal=%Lg\n", o);
fprintf (stderr, "rk_objective_tb_6_2: end\n");
#endif
return o;
}
/**
* Function to calculate the objective function of a 6 steps 2nd order, third
* order in equations depending only on time, Runge-Kutta method.
*
* \return objective function value.
*/
long double
rk_objective_tb_6_2t (RK * rk) ///< RK struct.
{
long double *tb;
long double o;
#if DEBUG_RK_6_2
fprintf (stderr, "rk_objective_tb_6_2t: start\n");
#endif
tb = rk->tb->coefficient;
#if DEBUG_RK_6_2
rk_print_tb (optimize, "rk_objective_tb_6_2t", stderr);
#endif
o = fminl (0.L, b20 (tb));
if (b30 (tb) < 0.L)
o += b30 (tb);
if (b40 (tb) < 0.L)
o += b40 (tb);
if (b50 (tb) < 0.L)
o += b50 (tb);
if (b60 (tb) < 0.L)
o += b60 (tb);
if (b64 (tb) < 0.L)
o += b64 (tb);
if (b65 (tb) < 0.L)
o += b65 (tb);
if (o < 0.L)
{
o = 40.L - o;
goto end;
}
o = 30.L
+ fmaxl (1.L,
fmaxl (t1 (tb),
fmaxl (t2 (tb),
fmaxl (t3 (tb), fmaxl (t4 (tb), t5 (tb))))));
if (rk->strong)
{
rk_bucle_ac (rk);
o = fminl (o, *rk->ac0->optimal);
}
end:
#if DEBUG_RK_6_2
fprintf (stderr, "rk_objective_tb_6_2t: optimal=%Lg\n", o);
fprintf (stderr, "rk_objective_tb_6_2t: end\n");
#endif
return o;
}
|
(*
File: Finite_And_Cyclic_Groups.thy
Author: Joseph Thommes, TU München; Manuel Eberl, TU München
*)
section \<open>Finite and cyclic groups\<close>
theory Finite_And_Cyclic_Groups
imports Group_Hom Generated_Groups_Extend General_Auxiliary
begin
subsection \<open>Finite groups\<close>
text \<open>We define the notion of finite groups and prove some trivial facts about them.\<close>
locale finite_group = group +
assumes fin[simp]: "finite (carrier G)"
(* Manuel Eberl *)
lemma (in finite_group) ord_pos:
assumes "x \<in> carrier G"
shows "ord x > 0"
using ord_ge_1[of x] assms by auto
lemma (in finite_group) order_gt_0 [simp,intro]: "order G > 0"
by (subst order_gt_0_iff_finite) auto
lemma (in finite_group) finite_ord_conv_Least:
assumes "x \<in> carrier G"
shows "ord x = (LEAST n::nat. 0 < n \<and> x [^] n = \<one>)"
using pow_order_eq_1 order_gt_0_iff_finite ord_conv_Least assms by auto
lemma (in finite_group) non_trivial_group_ord_gr_1:
assumes "carrier G \<noteq> {\<one>}"
shows "\<exists>e \<in> carrier G. ord e > 1"
proof -
from one_closed obtain e where e: "e \<noteq> \<one>" "e \<in> carrier G" using assms carrier_not_empty by blast
thus ?thesis using ord_eq_1[of e] le_neq_implies_less ord_ge_1 by fastforce
qed
(* Manuel Eberl *)
lemma (in finite_group) max_order_elem:
obtains a where "a \<in> carrier G" "\<forall>x \<in> carrier G. ord x \<le> ord a"
proof -
have "\<exists>x. x \<in> carrier G \<and> (\<forall>y. y \<in> carrier G \<longrightarrow> ord y \<le> ord x)"
proof (rule ex_has_greatest_nat[of _ \<one> _ "order G + 1"], safe)
show "\<one> \<in> carrier G"
by auto
next
fix x assume "x \<in> carrier G"
hence "ord x \<le> order G"
by (intro ord_le_group_order fin)
also have "\<dots> < order G + 1"
by simp
finally show "ord x < order G + 1" .
qed
thus ?thesis using that by blast
qed
lemma (in finite_group) iso_imp_finite:
assumes "G \<cong> H" "group H"
shows "finite_group H"
proof -
interpret H: group H by fact
show ?thesis
proof(unfold_locales)
show "finite (carrier H)" using iso_same_card[OF assms(1)]
by (metis card_gt_0_iff order_def order_gt_0)
qed
qed
lemma (in finite_group) finite_FactGroup:
assumes "H \<lhd> G"
shows "finite_group (G Mod H)"
proof -
interpret H: normal H G by fact
interpret Mod: group "G Mod H" using H.factorgroup_is_group .
show ?thesis
by (unfold_locales, unfold FactGroup_def RCOSETS_def, simp)
qed
lemma (in finite_group) bigger_subgroup_is_group:
assumes "subgroup H G" "card H \<ge> order G"
shows "H = carrier G"
using subgroup.subset fin assms by (metis card_seteq order_def)
text \<open>All generated subgroups of a finite group are obviously also finite.\<close>
lemma (in finite_group) finite_generate:
assumes "A \<subseteq> carrier G"
shows "finite (generate G A)"
using generate_incl[of A] rev_finite_subset[of "carrier G" "generate G A"] assms by simp
text \<open>We also provide an induction rule for finite groups inspired by Manuel Eberl's AFP entry
"Dirichlet L-Functions and Dirichlet's Theorem" and the contained theory "Group\_Adjoin". A property
that is true for a subgroup generated by some set and stays true when adjoining an element, is also
true for the whole group.\<close>
lemma (in finite_group) generate_induct[consumes 1, case_names base adjoin]:
assumes "A0 \<subseteq> carrier G"
assumes "A0 \<subseteq> carrier G \<Longrightarrow> P (G\<lparr>carrier := generate G A0\<rparr>)"
assumes "\<And>a A. \<lbrakk>A \<subseteq> carrier G; a \<in> carrier G - generate G A; A0 \<subseteq> A;
P (G\<lparr>carrier := generate G A\<rparr>)\<rbrakk> \<Longrightarrow> P (G\<lparr>carrier := generate G (A \<union> {a})\<rparr>)"
shows "P G"
proof -
define A where A: "A = carrier G"
hence gA: "generate G A = carrier G"
using generate_incl[of "carrier G"] generate_sincl[of "carrier G"] by simp
hence "finite A" using fin A by argo
moreover have "A0 \<subseteq> A" using assms(1) A by argo
moreover have "A \<subseteq> carrier G" using A by simp
moreover have "generate G A0 \<subseteq> generate G A" using gA generate_incl[OF assms(1)] by argo
ultimately have "P (G\<lparr>carrier := generate G A\<rparr>)" using assms(2, 3)
proof (induction "A" taking: card rule: measure_induct_rule)
case (less A)
then show ?case
proof(cases "generate G A0 = generate G A")
case True
thus ?thesis using less by force
next
case gA0: False
with less(3) have s: "A0 \<subset> A" by blast
then obtain a where a: "a \<in> A - A0" by blast
have P1: "P (G\<lparr>carrier := generate G (A - {a})\<rparr>)"
proof(rule less(1))
show "card (A - {a}) < card A" using a less(2) by (meson DiffD1 card_Diff1_less)
show "A0 \<subseteq> A - {a}" using a s by blast
thus "generate G A0 \<subseteq> generate G (A - {a})" using mono_generate by presburger
qed (use less a s in auto)
show ?thesis
proof (cases "generate G A = generate G (A - {a})")
case True
then show ?thesis using P1 by simp
next
case False
have "a \<in> carrier G - generate G (A - {a})"
proof -
have "a \<notin> generate G (A - {a})"
proof
assume a2: "a \<in> generate G (A - {a})"
have "generate G (A - {a}) = generate G A"
proof (rule equalityI)
show "generate G (A - {a}) \<subseteq> generate G A" using mono_generate by auto
show "generate G A \<subseteq> generate G (A - {a})"
proof(subst (2) generate_idem[symmetric])
show "generate G A \<subseteq> generate G (generate G (A - {a}))"
by (intro mono_generate, use generate_sincl[of "A - {a}"] a2 in blast)
qed (use less in auto)
qed
with False show False by argo
qed
with a less show ?thesis by fast
qed
from less(7)[OF _ this _ P1] less(4) s a have "P (G\<lparr>carrier := generate G (A - {a} \<union> {a})\<rparr>)"
by blast
moreover have "A - {a} \<union> {a} = A" using a by blast
ultimately show ?thesis by auto
qed
qed
qed
with gA show ?thesis by simp
qed
subsection \<open>Finite abelian groups\<close>
text \<open>Another trivial locale: the finite abelian group with some trivial facts.\<close>
locale finite_comm_group = finite_group + comm_group
lemma (in finite_comm_group) iso_imp_finite_comm:
assumes "G \<cong> H" "group H"
shows "finite_comm_group H"
proof -
interpret H: group H by fact
interpret H: comm_group H by (intro iso_imp_comm_group[OF assms(1)], unfold_locales)
interpret H: finite_group H by (intro iso_imp_finite[OF assms(1)], unfold_locales)
show ?thesis by unfold_locales
qed
lemma (in finite_comm_group) finite_comm_FactGroup:
assumes "subgroup H G"
shows "finite_comm_group (G Mod H)"
unfolding finite_comm_group_def
proof(safe)
show "finite_group (G Mod H)" using finite_FactGroup[OF subgroup_imp_normal[OF assms]] .
show "comm_group (G Mod H)" by (simp add: abelian_FactGroup assms)
qed
(* Manuel Eberl *)
lemma (in finite_comm_group) subgroup_imp_finite_comm_group:
assumes "subgroup H G"
shows "finite_comm_group (G\<lparr>carrier := H\<rparr>)"
proof -
interpret G': group "G\<lparr>carrier := H\<rparr>" by (intro subgroup_imp_group) fact+
interpret H: subgroup H G by fact
show ?thesis by standard (use finite_subset[OF H.subset] in \<open>auto simp: m_comm\<close>)
qed
subsection \<open>Cyclic groups\<close>
text \<open>Now, the central notion of a cyclic group is introduced: a group generated
by a single element.\<close>
locale cyclic_group = group +
fixes gen :: "'a"
assumes gen_closed[intro, simp]: "gen \<in> carrier G"
assumes generator: "carrier G = generate G {gen}"
lemma (in cyclic_group) elem_is_gen_pow:
assumes "x \<in> carrier G"
shows "\<exists>n :: int. x = gen [^] n"
proof -
from generator have x_g:"x \<in> generate G {gen}" using assms by fast
with generate_pow[of gen] show ?thesis using gen_closed by blast
qed
text \<open>Every cyclic group is commutative/abelian.\<close>
sublocale cyclic_group \<subseteq> comm_group
proof(unfold_locales)
fix x y
assume "x \<in> carrier G" "y \<in> carrier G"
then obtain a b where ab:"x = gen [^] (a::int)" "y = gen [^] (b::int)"
using elem_is_gen_pow by presburger
then have "x \<otimes> y = gen [^] (a + b)" by (simp add: int_pow_mult)
also have "\<dots> = y \<otimes> x" using ab int_pow_mult
by (metis add.commute gen_closed)
finally show "x \<otimes> y = y \<otimes> x" .
qed
text \<open>Some trivial intro rules for showing that a group is cyclic.\<close>
lemma (in group) cyclic_groupI0:
assumes "a \<in> carrier G" "carrier G = generate G {a}"
shows "cyclic_group G a"
using assms by (unfold_locales; auto)
lemma (in group) cyclic_groupI1:
assumes "a \<in> carrier G" "carrier G \<subseteq> generate G {a}"
shows "cyclic_group G a"
using assms by (unfold_locales, use generate_incl[of "{a}"] in auto)
lemma (in group) cyclic_groupI2:
assumes "a \<in> carrier G"
shows "cyclic_group (G\<lparr>carrier := generate G {a}\<rparr>) a"
proof (intro group.cyclic_groupI0)
show "group (G\<lparr>carrier := generate G {a}\<rparr>)"
by (intro subgroup.subgroup_is_group group.generate_is_subgroup, use assms in simp_all)
show "a \<in> carrier (G\<lparr>carrier := generate G {a}\<rparr>)" using generate.incl[of a "{a}"] by auto
show "carrier (G\<lparr>carrier := generate G {a}\<rparr>) = generate (G\<lparr>carrier := generate G {a}\<rparr>) {a}"
using assms
by (simp add: generate_consistent generate.incl group.generate_is_subgroup)
qed
text \<open>The order of the generating element is always the same as the group order.\<close>
lemma (in cyclic_group) ord_gen_is_group_order:
shows "ord gen = order G"
proof (cases "finite (carrier G)")
case True
with generator show "ord gen = order G"
using generate_pow_card[of gen] order_def[of G] gen_closed by simp
next
case False
thus ?thesis
using generate_pow_card generator order_def[of G] card_eq_0_iff[of "carrier G"] by force
qed
text \<open>In the case of a finite group, it is sufficient to have one element of group order to know
that the group is cyclic.\<close>
lemma (in finite_group) element_ord_generates_cyclic:
assumes "a \<in> carrier G" "ord a = order G"
shows "cyclic_group G a"
proof (unfold_locales)
show "a \<in> carrier G" using assms(1) by simp
show "carrier G = generate G {a}"
using assms bigger_subgroup_is_group[OF generate_is_subgroup]
by (metis empty_subsetI fin generate_pow_card insert_subset ord_le_group_order)
qed
text \<open>Another useful fact is that a group of prime order is also cyclic.\<close>
lemma (in group) prime_order_group_is_cyc:
assumes "Factorial_Ring.prime (order G)"
obtains g where "cyclic_group G g"
proof (unfold_locales)
obtain p where order_p: "order G = p" and p_prime: "Factorial_Ring.prime p" using assms by blast
then have "card (carrier G) \<ge> 2" by (simp add: order_def prime_ge_2_nat)
then obtain a where a_in: "a \<in> carrier G" and a_not_one: "a \<noteq> \<one>" using one_unique
by (metis (no_types, lifting) card_2_iff' obtain_subset_with_card_n subset_iff)
interpret fin: finite_group G
using assms order_gt_0_iff_finite unfolding order_def by unfold_locales auto
have "ord a dvd p" using a_in order_p ord_dvd_group_order by blast
hence "ord a = p" using prime_nat_iff[of p] p_prime ord_eq_1 a_in a_not_one by blast
then interpret cyclic_group G a
using fin.element_ord_generates_cyclic order_p a_in by simp
show ?thesis using that cyclic_group_axioms .
qed
text \<open>What follows is an induction principle for cyclic groups: a predicate is true for all elements
of the group if it is true for all elements that can be formed by the generating element by just
multiplication and if it also holds under the forming of the inverse (as we by this cover
all elements of the group),\<close>
(* Manuel Eberl *)
lemma (in cyclic_group) generator_induct [consumes 1, case_names generate inv]:
assumes x: "x \<in> carrier G"
assumes IH1: "\<And>n::nat. P (gen [^] n)"
assumes IH2: "\<And>x. x \<in> carrier G \<Longrightarrow> P x \<Longrightarrow> P (inv x)"
shows "P x"
proof -
from x obtain n :: int where n: "x = gen [^] n"
using elem_is_gen_pow[of x] by auto
show ?thesis
proof (cases "n \<ge> 0")
case True
have "P (gen [^] nat n)"
by (rule IH1)
with True n show ?thesis by simp
next
case False
have "P (inv (gen [^] nat (-n)))"
by (intro IH1 IH2) auto
also have "gen [^] nat (-n) = gen [^] (-n)"
using False by simp
also have "inv \<dots> = x"
using n by (simp add: int_pow_neg)
finally show ?thesis .
qed
qed
subsection \<open>Finite cyclic groups\<close>
text \<open>Additionally, the notion of the finite cyclic group is introduced.\<close>
locale finite_cyclic_group = finite_group + cyclic_group
sublocale finite_cyclic_group \<subseteq> finite_comm_group
by unfold_locales
lemma (in finite_cyclic_group) ord_gen_gt_zero:
"ord gen > 0"
using ord_ge_1[OF fin gen_closed] by simp
text \<open>In order to prove something about an element in a finite abelian group, it is possible to show
this property for the neutral element or the generating element and inductively for the elements
that are formed by multiplying with the generator.\<close>
lemma (in finite_cyclic_group) generator_induct0 [consumes 1, case_names one step]:
assumes x: "x \<in> carrier G"
assumes IH1: "P \<one>"
assumes IH2: "\<And>x. \<lbrakk>x \<in> carrier G; P x\<rbrakk> \<Longrightarrow> P (x \<otimes> gen)"
shows "P x"
proof -
from ord_gen_gt_zero generate_nat_pow[OF _ gen_closed] obtain n::nat where n: "x = gen [^] n"
using generator x by blast
thus ?thesis by (induction n arbitrary: x, use assms in auto)
qed
lemma (in finite_cyclic_group) generator_induct1 [consumes 1, case_names gen step]:
assumes x: "x \<in> carrier G"
assumes IH1: "P gen"
assumes IH2: "\<And>x. \<lbrakk>x \<in> carrier G; P x\<rbrakk> \<Longrightarrow> P (x \<otimes> gen)"
shows "P x"
proof(rule generator_induct0[OF x])
show "\<And>x. \<lbrakk>x \<in> carrier G; P x\<rbrakk> \<Longrightarrow> P (x \<otimes> gen)" using IH2 by blast
have "P x" if "n > 0" "x = gen [^] n" for n::nat and x using that
by (induction n arbitrary: x; use assms in fastforce)
from this[OF ord_pos[OF gen_closed] pow_ord_eq_1[OF gen_closed, symmetric]] show "P \<one>" .
qed
subsection \<open>\<open>get_exp\<close> - discrete logarithm\<close>
text \<open>What now follows is the discrete logarithm for groups. It is used at several times througout
this entry and is initially used to show that two cyclic groups of the same order are isomorphic.\<close>
definition (in group) get_exp where
"get_exp g = (\<lambda>a. SOME k::int. a = g [^] k)"
text \<open>For each element with itself as the basis the discrete logarithm indeed does what expected.
This is not the strongest possible statement, but sufficient for our needs.\<close>
lemma (in group) get_exp_self_fulfills:
assumes "a \<in> carrier G"
shows "a = a [^] get_exp a a"
proof -
have "a = a [^] (1::int)" using assms by auto
moreover have "a [^] (1::int) = a [^] (SOME x::int. a [^] (1::int) = a [^] x)"
by (intro someI_ex[of "\<lambda>x::int. a [^] (1::int) = a [^] x"]; blast)
ultimately show ?thesis unfolding get_exp_def by simp
qed
lemma (in group) get_exp_self:
assumes "a \<in> carrier G"
shows "get_exp a a mod ord a = (1::int) mod ord a"
by (intro pow_eq_int_mod[OF assms], use get_exp_self_fulfills[OF assms] assms in auto)
text \<open>For cyclic groups, the discrete logarithm "works" for every element.\<close>
lemma (in cyclic_group) get_exp_fulfills:
assumes "a \<in> carrier G"
shows "a = gen [^] get_exp gen a"
proof -
from elem_is_gen_pow[OF assms] obtain k::int where k: "a = gen [^] k" by blast
moreover have "gen [^] k = gen [^] (SOME x::int. gen [^] k = gen [^] x)"
by(intro someI_ex[of "\<lambda>x::int. gen [^] k = gen [^] x"]; blast)
ultimately show ?thesis unfolding get_exp_def by blast
qed
lemma (in cyclic_group) get_exp_non_zero:
assumes"b \<in> carrier G" "b \<noteq> \<one>"
shows "get_exp gen b \<noteq> 0"
using assms get_exp_fulfills[OF assms(1)] by auto
text \<open>One well-known logarithmic identity.\<close>
lemma (in cyclic_group) get_exp_mult_mod:
assumes "a \<in> carrier G" "b \<in> carrier G"
shows "get_exp gen (a \<otimes> b) mod (ord gen) = (get_exp gen a + get_exp gen b) mod (ord gen)"
proof (intro pow_eq_int_mod[OF gen_closed])
from get_exp_fulfills[of "a \<otimes> b"] have "gen [^] get_exp gen (a \<otimes> b) = a \<otimes> b" using assms by simp
moreover have "gen [^] (get_exp gen a + get_exp gen b) = a \<otimes> b"
proof -
have "gen [^] (get_exp gen a + get_exp gen b) = gen [^] (get_exp gen a) \<otimes> gen [^] (get_exp gen b)"
using int_pow_mult by blast
with get_exp_fulfills assms show ?thesis by simp
qed
ultimately show "gen [^] get_exp gen (a \<otimes> b) = gen [^] (get_exp gen a + get_exp gen b)" by simp
qed
text \<open>We now show that all functions from a group generated by 'a' to a group generated by 'b'
that map elements from $a^k$ to $b^k$ in the other group are in fact isomorphisms between these two
groups.\<close>
lemma (in group) iso_cyclic_groups_generate:
assumes "a \<in> carrier G" "b \<in> carrier H" "group.ord G a = group.ord H b" "group H"
shows "{f. \<forall>k \<in> (UNIV::int set). f (a [^] k) = b [^]\<^bsub>H\<^esub> k}
\<subseteq> iso (G\<lparr>carrier := generate G {a}\<rparr>) (H\<lparr>carrier := generate H {b}\<rparr>)"
proof
interpret H: group H by fact
let ?A = "G\<lparr>carrier := generate G {a}\<rparr>"
let ?B = "H\<lparr>carrier := generate H {b}\<rparr>"
interpret A: cyclic_group ?A a by (intro group.cyclic_groupI2; use assms(1) in simp)
interpret B: cyclic_group ?B b by (intro group.cyclic_groupI2; use assms(2) in simp)
have sA: "subgroup (generate G {a}) G" by (intro generate_is_subgroup, use assms(1) in simp)
have sB: "subgroup (generate H {b}) H" by (intro H.generate_is_subgroup, use assms(2) in simp)
fix x
assume x: "x \<in> {f. \<forall>k\<in>(UNIV::int set). f (a [^] k) = b [^]\<^bsub>H\<^esub> k}"
have hom: "x \<in> hom ?A ?B"
proof (intro homI)
fix c
assume c: "c \<in> carrier ?A"
from A.elem_is_gen_pow[OF this] obtain k::int where k: "c = a [^] k"
using int_pow_consistent[OF sA generate.incl[of a]] by auto
with x have "x c = b [^]\<^bsub>H\<^esub> k" by blast
thus "x c \<in> carrier ?B"
using B.int_pow_closed H.int_pow_consistent[OF sB] generate.incl[of b "{b}" H] by simp
fix d
assume d: "d \<in> carrier ?A"
from A.elem_is_gen_pow[OF this] obtain l::int where l: "d = a [^] l"
using int_pow_consistent[OF sA generate.incl[of a]] by auto
with k have "c \<otimes> d = a [^] (k + l)" by (simp add: int_pow_mult assms(1))
with x have "x (c \<otimes>\<^bsub>?A\<^esub> d) = b [^]\<^bsub>H\<^esub> (k + l)" by simp
also have "\<dots> = b [^]\<^bsub>H\<^esub> k \<otimes>\<^bsub>H\<^esub> b [^]\<^bsub>H\<^esub> l" by (simp add: H.int_pow_mult assms(2))
finally show "x (c \<otimes>\<^bsub>?A\<^esub> d) = x c \<otimes>\<^bsub>?B\<^esub> x d" using x k l by simp
qed
then interpret xgh: group_hom ?A ?B x unfolding group_hom_def group_hom_axioms_def by blast
have "kernel ?A ?B x = {\<one>}"
proof(intro equalityI)
show "{\<one>} \<subseteq> kernel ?A ?B x" using xgh.one_in_kernel by auto
have "c = \<one>" if "c \<in> kernel ?A ?B x" for c
proof -
from that have c: "c \<in> carrier ?A" unfolding kernel_def by blast
from A.elem_is_gen_pow[OF this] obtain k::int where k: "c = a [^] k"
using int_pow_consistent[OF sA generate.incl[of a]] by auto
moreover have "x c = \<one>\<^bsub>H\<^esub>" using that x unfolding kernel_def by auto
ultimately have "\<one>\<^bsub>H\<^esub> = b [^]\<^bsub>H\<^esub> k" using x by simp
with assms(3) have "a [^] k = \<one>"
using int_pow_eq_id[OF assms(1), of k] H.int_pow_eq_id[OF assms(2), of k] by simp
thus "c = \<one>" using k by blast
qed
thus "kernel ?A ?B x \<subseteq> {\<one>}" by blast
qed
moreover have "carrier ?B \<subseteq> x ` carrier ?A"
proof
fix c
assume c: "c \<in> carrier ?B"
from B.elem_is_gen_pow[OF this] obtain k::int where k: "c = b [^]\<^bsub>H\<^esub> k"
using H.int_pow_consistent[OF sB generate.incl[of b]] by auto
then have "x (a [^] k) = c" using x by blast
moreover have "a [^] k \<in> carrier ?A"
using int_pow_consistent[OF sA generate.incl[of a]] A.int_pow_closed generate.incl[of a]
by fastforce
ultimately show "c \<in> x ` carrier ?A" by blast
qed
ultimately show "x \<in> iso ?A ?B" using hom xgh.iso_iff unfolding kernel_def by auto
qed
text \<open>This is then used to derive the isomorphism of two cyclic groups of the same order as a
direct consequence.\<close>
lemma (in cyclic_group) iso_cyclic_groups_same_order:
assumes "cyclic_group H h" "order G = order H"
shows "G \<cong> H"
proof(intro is_isoI)
interpret H: cyclic_group H h by fact
define f where "f = (\<lambda>a. h [^]\<^bsub>H\<^esub> get_exp gen a)"
from assms(2) have o: "ord gen = H.ord h" using ord_gen_is_group_order H.ord_gen_is_group_order
by simp
have "\<forall>k \<in> (UNIV::int set). f (gen [^] k) = h [^]\<^bsub>H\<^esub> k"
proof
fix k
assume k: "k \<in> (UNIV::int set)"
have "gen [^] k = gen [^] (SOME x::int. gen [^] k = gen [^] x)"
by(intro someI_ex[of "\<lambda>x::int. gen [^] k = gen [^] x"]; blast)
moreover have "(SOME x::int. gen [^] k = gen [^] x) = (SOME x::int. h [^]\<^bsub>H\<^esub> k = h [^]\<^bsub>H\<^esub> x)"
proof -
have "gen [^] k = gen [^] x \<longleftrightarrow> h [^]\<^bsub>H\<^esub> k = h [^]\<^bsub>H\<^esub> x" for x::int
by (simp add: o group.int_pow_eq)
thus ?thesis by simp
qed
moreover have "h [^]\<^bsub>H\<^esub> k = h [^]\<^bsub>H\<^esub> (SOME x::int. h [^]\<^bsub>H\<^esub> k = h [^]\<^bsub>H\<^esub> x)"
by(intro someI_ex[of "\<lambda>x::int. h [^]\<^bsub>H\<^esub> k = h [^]\<^bsub>H\<^esub> x"]; blast)
ultimately show "f (gen [^] k) = h [^]\<^bsub>H\<^esub> k" unfolding f_def get_exp_def by metis
qed
thus "f \<in> iso G H"
using iso_cyclic_groups_generate[OF gen_closed H.gen_closed o H.is_group]
by (auto simp flip: generator H.generator)
qed
subsection \<open>Integer modular groups\<close>
text \<open>We show that \<open>integer_mod_group\<close> (written as \<open>Z n\<close>) is in fact a cyclic group.
For $n \neq 1$ it is generated by $1$ and in the other case by $0$.\<close>
notation integer_mod_group ("Z")
lemma Zn_neq1_cyclic_group:
assumes "n \<noteq> 1"
shows "cyclic_group (Z n) 1"
proof(unfold cyclic_group_def cyclic_group_axioms_def, safe)
show "group (Z n)" using group_integer_mod_group .
then interpret group "Z n" .
show oc: "1 \<in> carrier (Z n)"
unfolding integer_mod_group_def integer_group_def using assms by force
show "x \<in> generate (Z n) {1}" if "x \<in> carrier (Z n)" for x
using generate_pow[OF oc] that int_pow_integer_mod_group solve_equation subgroup_self
by fastforce
show "x \<in> carrier (Z n)" if "x \<in> generate (Z n) {1}" for x using generate_incl[of "{1}"] that oc
by fast
qed
lemma Z1_cyclic_group: "cyclic_group (Z 1) 0"
proof(unfold cyclic_group_def cyclic_group_axioms_def, safe)
show "group (Z 1)" using group_integer_mod_group .
then interpret group "Z 1" .
show "0 \<in> carrier (Z 1)" unfolding integer_mod_group_def by simp
thus "x \<in> carrier (Z 1)" if "x \<in> generate (Z 1) {0}" for x using generate_incl[of "{0}"] that
by fast
show "x \<in> generate (Z 1) {0}" if "x \<in> carrier (Z 1)" for x
proof -
from that have "x = 0" unfolding integer_mod_group_def by auto
with generate.one[of "Z 1" "{0}"] show "x \<in> generate (Z 1) {0}" unfolding integer_mod_group_def
by simp
qed
qed
lemma Zn_cyclic_group:
obtains x where "cyclic_group (Z n) x"
using Z1_cyclic_group Zn_neq1_cyclic_group by metis
text \<open>Moreover, its order is just $n$.\<close>
lemma Zn_order: "order (Z n) = n"
by (unfold integer_mod_group_def integer_group_def order_def, auto)
text \<open>Consequently, \<open>Z n\<close> is isomorphic to any cyclic group of order $n$.\<close>
lemma (in cyclic_group) Zn_iso:
assumes "order G = n"
shows "G \<cong> Z n"
using Zn_order Zn_cyclic_group iso_cyclic_groups_same_order assms by metis
no_notation integer_mod_group ("Z")
end
|
\section{Plots}
\label{sec:outstreams_plot}
New plots that are specific to particular applications are possible through \xmlNode{OutStreams}
\xmlNode{Plot} plugins.
These plotting plugins should inherit from the \texttt{PlotPlugin} base class defined in
\begin{lstlisting}[language=bash]
raven/framework/PluginBaseClasses/OutStreamPlotPlugin.py
\end{lstlisting}
which sets up the plotting tool to be found when RAVEN runs.
A good example of the \texttt{PlotPlugin} can be found in the RAVEN ExamplePlugin, found at
\begin{lstlisting}[language=bash]
raven/plugins/ExamplePlugin/src/CorrelationPlot.py
\end{lstlisting}
There are a few methods that can or must be implemented for new plotting strategies.
%
%
\subsection{\texttt{run} method, required}
The \texttt{run} method is the primary execution method for the PlotPlugin. Here, whatever data
handling and plotting mechanics will be executed. Note that \texttt{run} does not receive any inputs;
often, the source for the data to be plotted will be identified in the \texttt{initialize} method.
The \texttt{run} method can perform many actions including data manipulation, creation of
\texttt{matplotlib} figures and axes, saving figures to file, and so forth. In the end, \texttt{run}
should not return anything.
%
%
\subsection{Constructor, optional}
As the constructor for Python classes, the \texttt{\_\_init\_\_} method should be extended to define
any instance variables used in the plugin class. If no instance variables are used, this may be ommitted.
Any call to \texttt{\_\_init\_\_} must include a call to the parent's constructor, such as
\begin{lstlisting}[language=python]
def __init__(self):
""" ... """
super().__init__()
\end{lstlisting}
This assures access to basic RAVEN functionalities required to use the plugin.
%
%
\subsection{Input Handling methods, optional}
The class method \texttt{getInputSpecification} and corresponding instance method
\texttt{handleInput} are how RAVEN determines what user inputs are allowed and how those input
values get stored in the plugin. Acceptable inputs are defined in \texttt{getInputSpecification}
and then read in during \texttt{handleInput}. Both of these methods require a call to \texttt{super}
to function as expected.
%
%
\subsection{Initialization, optional}
The aptly-named \texttt{initialize} method is used at the start of every RAVEN \xmlNode{Step} to prepare
for execution. A common task for this method is to find the source of the data to plot. To make this
process easier, RAVEN provides a \texttt{self.findSource} method that can search the input dictionary
provided to \texttt{initialize} and find a \xmlNode{DataObject} by string name.
%
%
|
! Check constraint induced by an array declaration
subroutine trust02(n, a)
real a(n)
j = n
return
end
|
[STATEMENT]
lemma mdeg_gt_0_if_nempty: "xs \<noteq> {||} \<Longrightarrow> max_deg (Node r xs) > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xs \<noteq> {||} \<Longrightarrow> 0 < max_deg (Node r xs)
[PROOF STEP]
using fcard_fempty
[PROOF STATE]
proof (prove)
using this:
fcard {||} = 0
goal (1 subgoal):
1. xs \<noteq> {||} \<Longrightarrow> 0 < max_deg (Node r xs)
[PROOF STEP]
by auto
|
module Web.Internal.DomPrim
import JS
import Web.Internal.Types
--------------------------------------------------------------------------------
-- Interfaces
--------------------------------------------------------------------------------
namespace AbortController
export
%foreign "browser:lambda:()=> new AbortController()"
prim__new : PrimIO AbortController
export
%foreign "browser:lambda:x=>x.signal"
prim__signal : AbortController -> PrimIO AbortSignal
export
%foreign "browser:lambda:x=>x.abort()"
prim__abort : AbortController -> PrimIO ()
namespace AbortSignal
export
%foreign "browser:lambda:x=>x.abort()"
prim__abort : PrimIO AbortSignal
export
%foreign "browser:lambda:x=>x.aborted"
prim__aborted : AbortSignal -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.onabort"
prim__onabort : AbortSignal -> PrimIO (Nullable EventHandlerNonNull)
export
%foreign "browser:lambda:(x,v)=>{x.onabort = v}"
prim__setOnabort : AbortSignal -> Nullable EventHandlerNonNull -> PrimIO ()
namespace AbstractRange
export
%foreign "browser:lambda:x=>x.collapsed"
prim__collapsed : AbstractRange -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.endContainer"
prim__endContainer : AbstractRange -> PrimIO Node
export
%foreign "browser:lambda:x=>x.endOffset"
prim__endOffset : AbstractRange -> PrimIO Bits32
export
%foreign "browser:lambda:x=>x.startContainer"
prim__startContainer : AbstractRange -> PrimIO Node
export
%foreign "browser:lambda:x=>x.startOffset"
prim__startOffset : AbstractRange -> PrimIO Bits32
namespace Attr
export
%foreign "browser:lambda:x=>x.localName"
prim__localName : Attr -> PrimIO String
export
%foreign "browser:lambda:x=>x.name"
prim__name : Attr -> PrimIO String
export
%foreign "browser:lambda:x=>x.namespaceURI"
prim__namespaceURI : Attr -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.ownerElement"
prim__ownerElement : Attr -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:x=>x.prefix"
prim__prefix : Attr -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.specified"
prim__specified : Attr -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.value"
prim__value : Attr -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.value = v}"
prim__setValue : Attr -> String -> PrimIO ()
namespace CharacterData
export
%foreign "browser:lambda:x=>x.data"
prim__data : CharacterData -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.data = v}"
prim__setData : CharacterData -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.length"
prim__length : CharacterData -> PrimIO Bits32
export
%foreign "browser:lambda:(x,a)=>x.appendData(a)"
prim__appendData : CharacterData -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.deleteData(a,b)"
prim__deleteData : CharacterData -> Bits32 -> Bits32 -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.insertData(a,b)"
prim__insertData : CharacterData -> Bits32 -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b,c)=>x.replaceData(a,b,c)"
prim__replaceData : CharacterData -> Bits32 -> Bits32 -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.substringData(a,b)"
prim__substringData : CharacterData -> Bits32 -> Bits32 -> PrimIO String
namespace Comment
export
%foreign "browser:lambda:(a)=> new Comment(a)"
prim__new : UndefOr String -> PrimIO Comment
namespace CustomEvent
export
%foreign "browser:lambda:(a,b)=> new CustomEvent(a,b)"
prim__new : String -> UndefOr CustomEventInit -> PrimIO CustomEvent
export
%foreign "browser:lambda:x=>x.detail"
prim__detail : CustomEvent -> PrimIO AnyPtr
export
%foreign "browser:lambda:(x,a,b,c,d)=>x.initCustomEvent(a,b,c,d)"
prim__initCustomEvent : CustomEvent
-> String
-> UndefOr Boolean
-> UndefOr Boolean
-> UndefOr AnyPtr
-> PrimIO ()
namespace DOMImplementation
export
%foreign "browser:lambda:(x,a,b,c)=>x.createDocument(a,b,c)"
prim__createDocument : DOMImplementation
-> Nullable String
-> String
-> UndefOr (Nullable DocumentType)
-> PrimIO XMLDocument
export
%foreign "browser:lambda:(x,a,b,c)=>x.createDocumentType(a,b,c)"
prim__createDocumentType : DOMImplementation
-> String
-> String
-> String
-> PrimIO DocumentType
export
%foreign "browser:lambda:(x,a)=>x.createHTMLDocument(a)"
prim__createHTMLDocument : DOMImplementation
-> UndefOr String
-> PrimIO Document
export
%foreign "browser:lambda:x=>x.hasFeature()"
prim__hasFeature : DOMImplementation -> PrimIO Boolean
namespace DOMTokenList
export
%foreign "browser:lambda:x=>x.length"
prim__length : DOMTokenList -> PrimIO Bits32
export
%foreign "browser:lambda:x=>x.value"
prim__value : DOMTokenList -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.value = v}"
prim__setValue : DOMTokenList -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,va)=>x.add(...va())"
prim__add : DOMTokenList -> IO (Array String) -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.contains(a)"
prim__contains : DOMTokenList -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.item(a)"
prim__item : DOMTokenList -> Bits32 -> PrimIO (Nullable String)
export
%foreign "browser:lambda:(x,va)=>x.remove(...va())"
prim__remove : DOMTokenList -> IO (Array String) -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.replace(a,b)"
prim__replace : DOMTokenList -> String -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.supports(a)"
prim__supports : DOMTokenList -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b)=>x.toggle(a,b)"
prim__toggle : DOMTokenList -> String -> UndefOr Boolean -> PrimIO Boolean
namespace Document
export
%foreign "browser:lambda:()=> new Document()"
prim__new : PrimIO Document
export
%foreign "browser:lambda:(o,x)=>o[x]"
prim__get : Document -> String -> PrimIO Object
export
%foreign "browser:lambda:x=>x.URL"
prim__URL : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.alinkColor"
prim__alinkColor : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.alinkColor = v}"
prim__setAlinkColor : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.all"
prim__all : Document -> PrimIO HTMLAllCollection
export
%foreign "browser:lambda:x=>x.anchors"
prim__anchors : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.applets"
prim__applets : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.bgColor"
prim__bgColor : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.bgColor = v}"
prim__setBgColor : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.body"
prim__body : Document -> PrimIO (Nullable HTMLElement)
export
%foreign "browser:lambda:(x,v)=>{x.body = v}"
prim__setBody : Document -> Nullable HTMLElement -> PrimIO ()
export
%foreign "browser:lambda:x=>x.characterSet"
prim__characterSet : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.charset"
prim__charset : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.compatMode"
prim__compatMode : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.contentType"
prim__contentType : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.cookie"
prim__cookie : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.cookie = v}"
prim__setCookie : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.currentScript"
prim__currentScript : Document
-> PrimIO (Nullable (Union2 HTMLScriptElement
SVGScriptElement))
export
%foreign "browser:lambda:x=>x.defaultView"
prim__defaultView : Document -> PrimIO (Nullable WindowProxy)
export
%foreign "browser:lambda:x=>x.designMode"
prim__designMode : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.designMode = v}"
prim__setDesignMode : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.dir"
prim__dir : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.dir = v}"
prim__setDir : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.doctype"
prim__doctype : Document -> PrimIO (Nullable DocumentType)
export
%foreign "browser:lambda:x=>x.documentElement"
prim__documentElement : Document -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:x=>x.documentURI"
prim__documentURI : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.domain"
prim__domain : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.domain = v}"
prim__setDomain : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.embeds"
prim__embeds : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.fgColor"
prim__fgColor : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.fgColor = v}"
prim__setFgColor : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.forms"
prim__forms : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.head"
prim__head : Document -> PrimIO (Nullable HTMLHeadElement)
export
%foreign "browser:lambda:x=>x.hidden"
prim__hidden : Document -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.images"
prim__images : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.implementation"
prim__implementation : Document -> PrimIO DOMImplementation
export
%foreign "browser:lambda:x=>x.inputEncoding"
prim__inputEncoding : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.lastModified"
prim__lastModified : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.linkColor"
prim__linkColor : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.linkColor = v}"
prim__setLinkColor : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.links"
prim__links : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.location"
prim__location : Document -> PrimIO (Nullable Location)
export
%foreign "browser:lambda:x=>x.onreadystatechange"
prim__onreadystatechange : Document -> PrimIO (Nullable EventHandlerNonNull)
export
%foreign "browser:lambda:(x,v)=>{x.onreadystatechange = v}"
prim__setOnreadystatechange : Document
-> Nullable EventHandlerNonNull
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.onvisibilitychange"
prim__onvisibilitychange : Document -> PrimIO (Nullable EventHandlerNonNull)
export
%foreign "browser:lambda:(x,v)=>{x.onvisibilitychange = v}"
prim__setOnvisibilitychange : Document
-> Nullable EventHandlerNonNull
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.plugins"
prim__plugins : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.readyState"
prim__readyState : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.referrer"
prim__referrer : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.rootElement"
prim__rootElement : Document -> PrimIO (Nullable SVGSVGElement)
export
%foreign "browser:lambda:x=>x.scripts"
prim__scripts : Document -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.timeline"
prim__timeline : Document -> PrimIO DocumentTimeline
export
%foreign "browser:lambda:x=>x.title"
prim__title : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.title = v}"
prim__setTitle : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.visibilityState"
prim__visibilityState : Document -> PrimIO String
export
%foreign "browser:lambda:x=>x.vlinkColor"
prim__vlinkColor : Document -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.vlinkColor = v}"
prim__setVlinkColor : Document -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.adoptNode(a)"
prim__adoptNode : Document -> Node -> PrimIO Node
export
%foreign "browser:lambda:x=>x.captureEvents()"
prim__captureEvents : Document -> PrimIO ()
export
%foreign "browser:lambda:x=>x.clear()"
prim__clear : Document -> PrimIO ()
export
%foreign "browser:lambda:x=>x.close()"
prim__close : Document -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.createAttribute(a)"
prim__createAttribute : Document -> String -> PrimIO Attr
export
%foreign "browser:lambda:(x,a,b)=>x.createAttributeNS(a,b)"
prim__createAttributeNS : Document -> Nullable String -> String -> PrimIO Attr
export
%foreign "browser:lambda:(x,a)=>x.createCDATASection(a)"
prim__createCDATASection : Document -> String -> PrimIO CDATASection
export
%foreign "browser:lambda:(x,a)=>x.createComment(a)"
prim__createComment : Document -> String -> PrimIO Comment
export
%foreign "browser:lambda:x=>x.createDocumentFragment()"
prim__createDocumentFragment : Document -> PrimIO DocumentFragment
export
%foreign "browser:lambda:(x,a,b)=>x.createElement(a,b)"
prim__createElement : Document
-> String
-> UndefOr (Union2 String ElementCreationOptions)
-> PrimIO Element
export
%foreign "browser:lambda:(x,a,b,c)=>x.createElementNS(a,b,c)"
prim__createElementNS : Document
-> Nullable String
-> String
-> UndefOr (Union2 String ElementCreationOptions)
-> PrimIO Element
export
%foreign "browser:lambda:(x,a)=>x.createEvent(a)"
prim__createEvent : Document -> String -> PrimIO Event
export
%foreign "browser:lambda:(x,a,b,c)=>x.createNodeIterator(a,b,c)"
prim__createNodeIterator : Document
-> Node
-> UndefOr Bits32
-> UndefOr (Nullable NodeFilter)
-> PrimIO NodeIterator
export
%foreign "browser:lambda:(x,a,b)=>x.createProcessingInstruction(a,b)"
prim__createProcessingInstruction : Document
-> String
-> String
-> PrimIO ProcessingInstruction
export
%foreign "browser:lambda:x=>x.createRange()"
prim__createRange : Document -> PrimIO Range
export
%foreign "browser:lambda:(x,a)=>x.createTextNode(a)"
prim__createTextNode : Document -> String -> PrimIO Text
export
%foreign "browser:lambda:(x,a,b,c)=>x.createTreeWalker(a,b,c)"
prim__createTreeWalker : Document
-> Node
-> UndefOr Bits32
-> UndefOr (Nullable NodeFilter)
-> PrimIO TreeWalker
export
%foreign "browser:lambda:(x,a,b,c)=>x.execCommand(a,b,c)"
prim__execCommand : Document
-> String
-> UndefOr Boolean
-> UndefOr String
-> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.getAnimations()"
prim__getAnimations : Document -> PrimIO (Array Animation)
export
%foreign "browser:lambda:(x,a)=>x.getElementsByClassName(a)"
prim__getElementsByClassName : Document -> String -> PrimIO HTMLCollection
export
%foreign "browser:lambda:(x,a)=>x.getElementsByName(a)"
prim__getElementsByName : Document -> String -> PrimIO NodeList
export
%foreign "browser:lambda:(x,a)=>x.getElementsByTagName(a)"
prim__getElementsByTagName : Document -> String -> PrimIO HTMLCollection
export
%foreign "browser:lambda:(x,a,b)=>x.getElementsByTagNameNS(a,b)"
prim__getElementsByTagNameNS : Document
-> Nullable String
-> String
-> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.hasFocus()"
prim__hasFocus : Document -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b)=>x.importNode(a,b)"
prim__importNode : Document -> Node -> UndefOr Boolean -> PrimIO Node
export
%foreign "browser:lambda:(x,a,b)=>x.open(a,b)"
prim__open : Document -> UndefOr String -> UndefOr String -> PrimIO Document
export
%foreign "browser:lambda:(x,a,b,c)=>x.open(a,b,c)"
prim__open1 : Document
-> String
-> String
-> String
-> PrimIO (Nullable WindowProxy)
export
%foreign "browser:lambda:(x,a)=>x.queryCommandEnabled(a)"
prim__queryCommandEnabled : Document -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.queryCommandIndeterm(a)"
prim__queryCommandIndeterm : Document -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.queryCommandState(a)"
prim__queryCommandState : Document -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.queryCommandSupported(a)"
prim__queryCommandSupported : Document -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.queryCommandValue(a)"
prim__queryCommandValue : Document -> String -> PrimIO String
export
%foreign "browser:lambda:x=>x.releaseEvents()"
prim__releaseEvents : Document -> PrimIO ()
export
%foreign "browser:lambda:(x,va)=>x.write(...va())"
prim__write : Document -> IO (Array String) -> PrimIO ()
export
%foreign "browser:lambda:(x,va)=>x.writeln(...va())"
prim__writeln : Document -> IO (Array String) -> PrimIO ()
namespace DocumentFragment
export
%foreign "browser:lambda:()=> new DocumentFragment()"
prim__new : PrimIO DocumentFragment
namespace DocumentType
export
%foreign "browser:lambda:x=>x.name"
prim__name : DocumentType -> PrimIO String
export
%foreign "browser:lambda:x=>x.publicId"
prim__publicId : DocumentType -> PrimIO String
export
%foreign "browser:lambda:x=>x.systemId"
prim__systemId : DocumentType -> PrimIO String
namespace Element
export
%foreign "browser:lambda:x=>x.attributes"
prim__attributes : Element -> PrimIO NamedNodeMap
export
%foreign "browser:lambda:x=>x.classList"
prim__classList : Element -> PrimIO DOMTokenList
export
%foreign "browser:lambda:x=>x.className"
prim__className : Element -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.className = v}"
prim__setClassName : Element -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.id"
prim__id : Element -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.id = v}"
prim__setId : Element -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.localName"
prim__localName : Element -> PrimIO String
export
%foreign "browser:lambda:x=>x.namespaceURI"
prim__namespaceURI : Element -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.outerHTML"
prim__outerHTML : Element -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.outerHTML = v}"
prim__setOuterHTML : Element -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.prefix"
prim__prefix : Element -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.shadowRoot"
prim__shadowRoot : Element -> PrimIO (Nullable ShadowRoot)
export
%foreign "browser:lambda:x=>x.slot"
prim__slot : Element -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.slot = v}"
prim__setSlot : Element -> String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.tagName"
prim__tagName : Element -> PrimIO String
export
%foreign "browser:lambda:(x,a)=>x.attachShadow(a)"
prim__attachShadow : Element -> ShadowRootInit -> PrimIO ShadowRoot
export
%foreign "browser:lambda:(x,a)=>x.closest(a)"
prim__closest : Element -> String -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:(x,a)=>x.getAttribute(a)"
prim__getAttribute : Element -> String -> PrimIO (Nullable String)
export
%foreign "browser:lambda:(x,a,b)=>x.getAttributeNS(a,b)"
prim__getAttributeNS : Element
-> Nullable String
-> String
-> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.getAttributeNames()"
prim__getAttributeNames : Element -> PrimIO (Array String)
export
%foreign "browser:lambda:(x,a)=>x.getAttributeNode(a)"
prim__getAttributeNode : Element -> String -> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a,b)=>x.getAttributeNodeNS(a,b)"
prim__getAttributeNodeNS : Element
-> Nullable String
-> String
-> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a)=>x.getElementsByClassName(a)"
prim__getElementsByClassName : Element -> String -> PrimIO HTMLCollection
export
%foreign "browser:lambda:(x,a)=>x.getElementsByTagName(a)"
prim__getElementsByTagName : Element -> String -> PrimIO HTMLCollection
export
%foreign "browser:lambda:(x,a,b)=>x.getElementsByTagNameNS(a,b)"
prim__getElementsByTagNameNS : Element
-> Nullable String
-> String
-> PrimIO HTMLCollection
export
%foreign "browser:lambda:(x,a)=>x.hasAttribute(a)"
prim__hasAttribute : Element -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b)=>x.hasAttributeNS(a,b)"
prim__hasAttributeNS : Element -> Nullable String -> String -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.hasAttributes()"
prim__hasAttributes : Element -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b)=>x.insertAdjacentElement(a,b)"
prim__insertAdjacentElement : Element
-> String
-> Element
-> PrimIO (Nullable Element)
export
%foreign "browser:lambda:(x,a,b)=>x.insertAdjacentHTML(a,b)"
prim__insertAdjacentHTML : Element -> String -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.insertAdjacentText(a,b)"
prim__insertAdjacentText : Element -> String -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.matches(a)"
prim__matches : Element -> String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.pseudo(a)"
prim__pseudo : Element -> String -> PrimIO (Nullable CSSPseudoElement)
export
%foreign "browser:lambda:(x,a)=>x.removeAttribute(a)"
prim__removeAttribute : Element -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.removeAttributeNS(a,b)"
prim__removeAttributeNS : Element -> Nullable String -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.removeAttributeNode(a)"
prim__removeAttributeNode : Element -> Attr -> PrimIO Attr
export
%foreign "browser:lambda:(x,a,b)=>x.setAttribute(a,b)"
prim__setAttribute : Element -> String -> String -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b,c)=>x.setAttributeNS(a,b,c)"
prim__setAttributeNS : Element
-> Nullable String
-> String
-> String
-> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.setAttributeNode(a)"
prim__setAttributeNode : Element -> Attr -> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a)=>x.setAttributeNodeNS(a)"
prim__setAttributeNodeNS : Element -> Attr -> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a,b)=>x.toggleAttribute(a,b)"
prim__toggleAttribute : Element -> String -> UndefOr Boolean -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.webkitMatchesSelector(a)"
prim__webkitMatchesSelector : Element -> String -> PrimIO Boolean
namespace Event
export
%foreign "browser:lambda:(a,b)=> new Event(a,b)"
prim__new : String -> UndefOr EventInit -> PrimIO Event
export
%foreign "browser:lambda:x=>x.bubbles"
prim__bubbles : Event -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.cancelBubble"
prim__cancelBubble : Event -> PrimIO Boolean
export
%foreign "browser:lambda:(x,v)=>{x.cancelBubble = v}"
prim__setCancelBubble : Event -> Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.cancelable"
prim__cancelable : Event -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.composed"
prim__composed : Event -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.currentTarget"
prim__currentTarget : Event -> PrimIO (Nullable EventTarget)
export
%foreign "browser:lambda:x=>x.defaultPrevented"
prim__defaultPrevented : Event -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.eventPhase"
prim__eventPhase : Event -> PrimIO Bits16
export
%foreign "browser:lambda:x=>x.isTrusted"
prim__isTrusted : Event -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.returnValue"
prim__returnValue : Event -> PrimIO Boolean
export
%foreign "browser:lambda:(x,v)=>{x.returnValue = v}"
prim__setReturnValue : Event -> Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.srcElement"
prim__srcElement : Event -> PrimIO (Nullable EventTarget)
export
%foreign "browser:lambda:x=>x.target"
prim__target : Event -> PrimIO (Nullable EventTarget)
export
%foreign "browser:lambda:x=>x.timeStamp"
prim__timeStamp : Event -> PrimIO Double
export
%foreign "browser:lambda:x=>x.type"
prim__type : Event -> PrimIO String
export
%foreign "browser:lambda:x=>x.composedPath()"
prim__composedPath : Event -> PrimIO (Array EventTarget)
export
%foreign "browser:lambda:(x,a,b,c)=>x.initEvent(a,b,c)"
prim__initEvent : Event
-> String
-> UndefOr Boolean
-> UndefOr Boolean
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.preventDefault()"
prim__preventDefault : Event -> PrimIO ()
export
%foreign "browser:lambda:x=>x.stopImmediatePropagation()"
prim__stopImmediatePropagation : Event -> PrimIO ()
export
%foreign "browser:lambda:x=>x.stopPropagation()"
prim__stopPropagation : Event -> PrimIO ()
namespace EventTarget
export
%foreign "browser:lambda:()=> new EventTarget()"
prim__new : PrimIO EventTarget
export
%foreign "browser:lambda:(x,a,b,c)=>x.addEventListener(a,b,c)"
prim__addEventListener : EventTarget
-> String
-> Nullable EventListener
-> UndefOr (Union2 AddEventListenerOptions Boolean)
-> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.dispatchEvent(a)"
prim__dispatchEvent : EventTarget -> Event -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b,c)=>x.removeEventListener(a,b,c)"
prim__removeEventListener : EventTarget
-> String
-> Nullable EventListener
-> UndefOr (Union2 EventListenerOptions Boolean)
-> PrimIO ()
namespace HTMLCollection
export
%foreign "browser:lambda:x=>x.length"
prim__length : HTMLCollection -> PrimIO Bits32
export
%foreign "browser:lambda:(x,a)=>x.item(a)"
prim__item : HTMLCollection -> Bits32 -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:(x,a)=>x.namedItem(a)"
prim__namedItem : HTMLCollection -> String -> PrimIO (Nullable Element)
namespace MutationObserver
export
%foreign "browser:lambda:(a)=> new MutationObserver(a)"
prim__new : MutationCallback -> PrimIO MutationObserver
export
%foreign "browser:lambda:x=>x.disconnect()"
prim__disconnect : MutationObserver -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.observe(a,b)"
prim__observe : MutationObserver
-> Node
-> UndefOr MutationObserverInit
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.takeRecords()"
prim__takeRecords : MutationObserver -> PrimIO (Array MutationRecord)
namespace MutationRecord
export
%foreign "browser:lambda:x=>x.addedNodes"
prim__addedNodes : MutationRecord -> PrimIO NodeList
export
%foreign "browser:lambda:x=>x.attributeName"
prim__attributeName : MutationRecord -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.attributeNamespace"
prim__attributeNamespace : MutationRecord -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.nextSibling"
prim__nextSibling : MutationRecord -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.oldValue"
prim__oldValue : MutationRecord -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.previousSibling"
prim__previousSibling : MutationRecord -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.removedNodes"
prim__removedNodes : MutationRecord -> PrimIO NodeList
export
%foreign "browser:lambda:x=>x.target"
prim__target : MutationRecord -> PrimIO Node
export
%foreign "browser:lambda:x=>x.type"
prim__type : MutationRecord -> PrimIO String
namespace NamedNodeMap
export
%foreign "browser:lambda:x=>x.length"
prim__length : NamedNodeMap -> PrimIO Bits32
export
%foreign "browser:lambda:(x,a,b)=>x.getNamedItemNS(a,b)"
prim__getNamedItemNS : NamedNodeMap
-> Nullable String
-> String
-> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a)=>x.getNamedItem(a)"
prim__getNamedItem : NamedNodeMap -> String -> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a)=>x.item(a)"
prim__item : NamedNodeMap -> Bits32 -> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a,b)=>x.removeNamedItemNS(a,b)"
prim__removeNamedItemNS : NamedNodeMap
-> Nullable String
-> String
-> PrimIO Attr
export
%foreign "browser:lambda:(x,a)=>x.removeNamedItem(a)"
prim__removeNamedItem : NamedNodeMap -> String -> PrimIO Attr
export
%foreign "browser:lambda:(x,a)=>x.setNamedItemNS(a)"
prim__setNamedItemNS : NamedNodeMap -> Attr -> PrimIO (Nullable Attr)
export
%foreign "browser:lambda:(x,a)=>x.setNamedItem(a)"
prim__setNamedItem : NamedNodeMap -> Attr -> PrimIO (Nullable Attr)
namespace Node
export
%foreign "browser:lambda:x=>x.baseURI"
prim__baseURI : Node -> PrimIO String
export
%foreign "browser:lambda:x=>x.childNodes"
prim__childNodes : Node -> PrimIO NodeList
export
%foreign "browser:lambda:x=>x.firstChild"
prim__firstChild : Node -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.isConnected"
prim__isConnected : Node -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.lastChild"
prim__lastChild : Node -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.nextSibling"
prim__nextSibling : Node -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.nodeName"
prim__nodeName : Node -> PrimIO String
export
%foreign "browser:lambda:x=>x.nodeType"
prim__nodeType : Node -> PrimIO Bits16
export
%foreign "browser:lambda:x=>x.nodeValue"
prim__nodeValue : Node -> PrimIO (Nullable String)
export
%foreign "browser:lambda:(x,v)=>{x.nodeValue = v}"
prim__setNodeValue : Node -> Nullable String -> PrimIO ()
export
%foreign "browser:lambda:x=>x.ownerDocument"
prim__ownerDocument : Node -> PrimIO (Nullable Document)
export
%foreign "browser:lambda:x=>x.parentElement"
prim__parentElement : Node -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:x=>x.parentNode"
prim__parentNode : Node -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.previousSibling"
prim__previousSibling : Node -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.textContent"
prim__textContent : Node -> PrimIO (Nullable String)
export
%foreign "browser:lambda:(x,v)=>{x.textContent = v}"
prim__setTextContent : Node -> Nullable String -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.appendChild(a)"
prim__appendChild : Node -> Node -> PrimIO Node
export
%foreign "browser:lambda:(x,a)=>x.cloneNode(a)"
prim__cloneNode : Node -> UndefOr Boolean -> PrimIO Node
export
%foreign "browser:lambda:(x,a)=>x.compareDocumentPosition(a)"
prim__compareDocumentPosition : Node -> Node -> PrimIO Bits16
export
%foreign "browser:lambda:(x,a)=>x.contains(a)"
prim__contains : Node -> Nullable Node -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.getRootNode(a)"
prim__getRootNode : Node -> UndefOr GetRootNodeOptions -> PrimIO Node
export
%foreign "browser:lambda:x=>x.hasChildNodes()"
prim__hasChildNodes : Node -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b)=>x.insertBefore(a,b)"
prim__insertBefore : Node -> Node -> Nullable Node -> PrimIO Node
export
%foreign "browser:lambda:(x,a)=>x.isDefaultNamespace(a)"
prim__isDefaultNamespace : Node -> Nullable String -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.isEqualNode(a)"
prim__isEqualNode : Node -> Nullable Node -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.isSameNode(a)"
prim__isSameNode : Node -> Nullable Node -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.lookupNamespaceURI(a)"
prim__lookupNamespaceURI : Node -> Nullable String -> PrimIO (Nullable String)
export
%foreign "browser:lambda:(x,a)=>x.lookupPrefix(a)"
prim__lookupPrefix : Node -> Nullable String -> PrimIO (Nullable String)
export
%foreign "browser:lambda:x=>x.normalize()"
prim__normalize : Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.removeChild(a)"
prim__removeChild : Node -> Node -> PrimIO Node
export
%foreign "browser:lambda:(x,a,b)=>x.replaceChild(a,b)"
prim__replaceChild : Node -> Node -> Node -> PrimIO Node
namespace NodeIterator
export
%foreign "browser:lambda:x=>x.filter"
prim__filter : NodeIterator -> PrimIO (Nullable NodeFilter)
export
%foreign "browser:lambda:x=>x.pointerBeforeReferenceNode"
prim__pointerBeforeReferenceNode : NodeIterator -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.referenceNode"
prim__referenceNode : NodeIterator -> PrimIO Node
export
%foreign "browser:lambda:x=>x.root"
prim__root : NodeIterator -> PrimIO Node
export
%foreign "browser:lambda:x=>x.whatToShow"
prim__whatToShow : NodeIterator -> PrimIO Bits32
export
%foreign "browser:lambda:x=>x.detach()"
prim__detach : NodeIterator -> PrimIO ()
export
%foreign "browser:lambda:x=>x.nextNode()"
prim__nextNode : NodeIterator -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.previousNode()"
prim__previousNode : NodeIterator -> PrimIO (Nullable Node)
namespace NodeList
export
%foreign "browser:lambda:x=>x.length"
prim__length : NodeList -> PrimIO Bits32
export
%foreign "browser:lambda:(x,a)=>x.item(a)"
prim__item : NodeList -> Bits32 -> PrimIO (Nullable Node)
namespace Performance
export
%foreign "browser:lambda:x=>x.timeOrigin"
prim__timeOrigin : Performance -> PrimIO Double
export
%foreign "browser:lambda:x=>x.now()"
prim__now : Performance -> PrimIO Double
export
%foreign "browser:lambda:x=>x.toJSON()"
prim__toJSON : Performance -> PrimIO Object
namespace ProcessingInstruction
export
%foreign "browser:lambda:x=>x.target"
prim__target : ProcessingInstruction -> PrimIO String
namespace Range
export
%foreign "browser:lambda:()=> new Range()"
prim__new : PrimIO Range
export
%foreign "browser:lambda:x=>x.commonAncestorContainer"
prim__commonAncestorContainer : Range -> PrimIO Node
export
%foreign "browser:lambda:x=>x.cloneContents()"
prim__cloneContents : Range -> PrimIO DocumentFragment
export
%foreign "browser:lambda:x=>x.cloneRange()"
prim__cloneRange : Range -> PrimIO Range
export
%foreign "browser:lambda:(x,a)=>x.collapse(a)"
prim__collapse : Range -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.compareBoundaryPoints(a,b)"
prim__compareBoundaryPoints : Range -> Bits16 -> Range -> PrimIO Int16
export
%foreign "browser:lambda:(x,a,b)=>x.comparePoint(a,b)"
prim__comparePoint : Range -> Node -> Bits32 -> PrimIO Int16
export
%foreign "browser:lambda:(x,a)=>x.createContextualFragment(a)"
prim__createContextualFragment : Range -> String -> PrimIO DocumentFragment
export
%foreign "browser:lambda:x=>x.deleteContents()"
prim__deleteContents : Range -> PrimIO ()
export
%foreign "browser:lambda:x=>x.detach()"
prim__detach : Range -> PrimIO ()
export
%foreign "browser:lambda:x=>x.extractContents()"
prim__extractContents : Range -> PrimIO DocumentFragment
export
%foreign "browser:lambda:(x,a)=>x.insertNode(a)"
prim__insertNode : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.intersectsNode(a)"
prim__intersectsNode : Range -> Node -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a,b)=>x.isPointInRange(a,b)"
prim__isPointInRange : Range -> Node -> Bits32 -> PrimIO Boolean
export
%foreign "browser:lambda:(x,a)=>x.selectNodeContents(a)"
prim__selectNodeContents : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.selectNode(a)"
prim__selectNode : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.setEndAfter(a)"
prim__setEndAfter : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.setEndBefore(a)"
prim__setEndBefore : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.setEnd(a,b)"
prim__setEnd : Range -> Node -> Bits32 -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.setStartAfter(a)"
prim__setStartAfter : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.setStartBefore(a)"
prim__setStartBefore : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:(x,a,b)=>x.setStart(a,b)"
prim__setStart : Range -> Node -> Bits32 -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.surroundContents(a)"
prim__surroundContents : Range -> Node -> PrimIO ()
export
%foreign "browser:lambda:x=>x.toString()"
prim__toString : Range -> PrimIO String
namespace ShadowRoot
export
%foreign "browser:lambda:x=>x.host"
prim__host : ShadowRoot -> PrimIO Element
export
%foreign "browser:lambda:x=>x.mode"
prim__mode : ShadowRoot -> PrimIO String
export
%foreign "browser:lambda:x=>x.onslotchange"
prim__onslotchange : ShadowRoot -> PrimIO (Nullable EventHandlerNonNull)
export
%foreign "browser:lambda:(x,v)=>{x.onslotchange = v}"
prim__setOnslotchange : ShadowRoot
-> Nullable EventHandlerNonNull
-> PrimIO ()
namespace StaticRange
export
%foreign "browser:lambda:(a)=> new StaticRange(a)"
prim__new : StaticRangeInit -> PrimIO StaticRange
namespace Text
export
%foreign "browser:lambda:(a)=> new Text(a)"
prim__new : UndefOr String -> PrimIO Text
export
%foreign "browser:lambda:x=>x.wholeText"
prim__wholeText : Text -> PrimIO String
export
%foreign "browser:lambda:(x,a)=>x.splitText(a)"
prim__splitText : Text -> Bits32 -> PrimIO Text
namespace TreeWalker
export
%foreign "browser:lambda:x=>x.currentNode"
prim__currentNode : TreeWalker -> PrimIO Node
export
%foreign "browser:lambda:(x,v)=>{x.currentNode = v}"
prim__setCurrentNode : TreeWalker -> Node -> PrimIO ()
export
%foreign "browser:lambda:x=>x.filter"
prim__filter : TreeWalker -> PrimIO (Nullable NodeFilter)
export
%foreign "browser:lambda:x=>x.root"
prim__root : TreeWalker -> PrimIO Node
export
%foreign "browser:lambda:x=>x.whatToShow"
prim__whatToShow : TreeWalker -> PrimIO Bits32
export
%foreign "browser:lambda:x=>x.firstChild()"
prim__firstChild : TreeWalker -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.lastChild()"
prim__lastChild : TreeWalker -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.nextNode()"
prim__nextNode : TreeWalker -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.nextSibling()"
prim__nextSibling : TreeWalker -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.parentNode()"
prim__parentNode : TreeWalker -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.previousNode()"
prim__previousNode : TreeWalker -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.previousSibling()"
prim__previousSibling : TreeWalker -> PrimIO (Nullable Node)
namespace XMLSerializer
export
%foreign "browser:lambda:()=> new XMLSerializer()"
prim__new : PrimIO XMLSerializer
export
%foreign "browser:lambda:(x,a)=>x.serializeToString(a)"
prim__serializeToString : XMLSerializer -> Node -> PrimIO String
namespace XPathEvaluator
export
%foreign "browser:lambda:()=> new XPathEvaluator()"
prim__new : PrimIO XPathEvaluator
namespace XPathExpression
export
%foreign "browser:lambda:(x,a,b,c)=>x.evaluate(a,b,c)"
prim__evaluate : XPathExpression
-> Node
-> UndefOr Bits16
-> UndefOr (Nullable XPathResult)
-> PrimIO XPathResult
namespace XPathResult
export
%foreign "browser:lambda:x=>x.booleanValue"
prim__booleanValue : XPathResult -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.invalidIteratorState"
prim__invalidIteratorState : XPathResult -> PrimIO Boolean
export
%foreign "browser:lambda:x=>x.numberValue"
prim__numberValue : XPathResult -> PrimIO Double
export
%foreign "browser:lambda:x=>x.resultType"
prim__resultType : XPathResult -> PrimIO Bits16
export
%foreign "browser:lambda:x=>x.singleNodeValue"
prim__singleNodeValue : XPathResult -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:x=>x.snapshotLength"
prim__snapshotLength : XPathResult -> PrimIO Bits32
export
%foreign "browser:lambda:x=>x.stringValue"
prim__stringValue : XPathResult -> PrimIO String
export
%foreign "browser:lambda:x=>x.iterateNext()"
prim__iterateNext : XPathResult -> PrimIO (Nullable Node)
export
%foreign "browser:lambda:(x,a)=>x.snapshotItem(a)"
prim__snapshotItem : XPathResult -> Bits32 -> PrimIO (Nullable Node)
--------------------------------------------------------------------------------
-- Mixins
--------------------------------------------------------------------------------
namespace ChildNode
export
%foreign "browser:lambda:(x,va)=>x.after(...va())"
prim__after : ChildNode -> IO (Array (Union2 Node String)) -> PrimIO ()
export
%foreign "browser:lambda:(x,va)=>x.before(...va())"
prim__before : ChildNode -> IO (Array (Union2 Node String)) -> PrimIO ()
export
%foreign "browser:lambda:x=>x.remove()"
prim__remove : ChildNode -> PrimIO ()
export
%foreign "browser:lambda:(x,va)=>x.replaceWith(...va())"
prim__replaceWith : ChildNode -> IO (Array (Union2 Node String)) -> PrimIO ()
namespace DocumentOrShadowRoot
export
%foreign "browser:lambda:x=>x.styleSheets"
prim__styleSheets : DocumentOrShadowRoot -> PrimIO StyleSheetList
namespace InnerHTML
export
%foreign "browser:lambda:x=>x.innerHTML"
prim__innerHTML : InnerHTML -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.innerHTML = v}"
prim__setInnerHTML : InnerHTML -> String -> PrimIO ()
namespace NonDocumentTypeChildNode
export
%foreign "browser:lambda:x=>x.nextElementSibling"
prim__nextElementSibling : NonDocumentTypeChildNode
-> PrimIO (Nullable Element)
export
%foreign "browser:lambda:x=>x.previousElementSibling"
prim__previousElementSibling : NonDocumentTypeChildNode
-> PrimIO (Nullable Element)
namespace NonElementParentNode
export
%foreign "browser:lambda:(x,a)=>x.getElementById(a)"
prim__getElementById : NonElementParentNode
-> String
-> PrimIO (Nullable Element)
namespace ParentNode
export
%foreign "browser:lambda:x=>x.childElementCount"
prim__childElementCount : ParentNode -> PrimIO Bits32
export
%foreign "browser:lambda:x=>x.children"
prim__children : ParentNode -> PrimIO HTMLCollection
export
%foreign "browser:lambda:x=>x.firstElementChild"
prim__firstElementChild : ParentNode -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:x=>x.lastElementChild"
prim__lastElementChild : ParentNode -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:(x,va)=>x.append(...va())"
prim__append : ParentNode -> IO (Array (Union2 Node String)) -> PrimIO ()
export
%foreign "browser:lambda:(x,va)=>x.prepend(...va())"
prim__prepend : ParentNode -> IO (Array (Union2 Node String)) -> PrimIO ()
export
%foreign "browser:lambda:(x,a)=>x.querySelectorAll(a)"
prim__querySelectorAll : ParentNode -> String -> PrimIO NodeList
export
%foreign "browser:lambda:(x,a)=>x.querySelector(a)"
prim__querySelector : ParentNode -> String -> PrimIO (Nullable Element)
export
%foreign "browser:lambda:(x,va)=>x.replaceChildren(...va())"
prim__replaceChildren : ParentNode
-> IO (Array (Union2 Node String))
-> PrimIO ()
namespace Slottable
export
%foreign "browser:lambda:x=>x.assignedSlot"
prim__assignedSlot : Slottable -> PrimIO (Nullable HTMLSlotElement)
namespace XPathEvaluatorBase
export
%foreign "browser:lambda:(x,a,b)=>x.createExpression(a,b)"
prim__createExpression : XPathEvaluatorBase
-> String
-> UndefOr (Nullable XPathNSResolver)
-> PrimIO XPathExpression
export
%foreign "browser:lambda:(x,a)=>x.createNSResolver(a)"
prim__createNSResolver : XPathEvaluatorBase -> Node -> PrimIO XPathNSResolver
export
%foreign "browser:lambda:(x,a,b,c,d,e)=>x.evaluate(a,b,c,d,e)"
prim__evaluate : XPathEvaluatorBase
-> String
-> Node
-> UndefOr (Nullable XPathNSResolver)
-> UndefOr Bits16
-> UndefOr (Nullable XPathResult)
-> PrimIO XPathResult
--------------------------------------------------------------------------------
-- Dictionaries
--------------------------------------------------------------------------------
namespace AddEventListenerOptions
export
%foreign "browser:lambda:(a,b,c)=> {passive: a,once: b,signal: c}"
prim__new : UndefOr Boolean
-> UndefOr Boolean
-> UndefOr AbortSignal
-> PrimIO AddEventListenerOptions
export
%foreign "browser:lambda:x=>x.once"
prim__once : AddEventListenerOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.once = v}"
prim__setOnce : AddEventListenerOptions -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.passive"
prim__passive : AddEventListenerOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.passive = v}"
prim__setPassive : AddEventListenerOptions -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.signal"
prim__signal : AddEventListenerOptions -> PrimIO (UndefOr AbortSignal)
export
%foreign "browser:lambda:(x,v)=>{x.signal = v}"
prim__setSignal : AddEventListenerOptions -> UndefOr AbortSignal -> PrimIO ()
namespace CustomEventInit
export
%foreign "browser:lambda:(a)=> {detail: a}"
prim__new : UndefOr AnyPtr -> PrimIO CustomEventInit
export
%foreign "browser:lambda:x=>x.detail"
prim__detail : CustomEventInit -> PrimIO (UndefOr AnyPtr)
export
%foreign "browser:lambda:(x,v)=>{x.detail = v}"
prim__setDetail : CustomEventInit -> UndefOr AnyPtr -> PrimIO ()
namespace ElementCreationOptions
export
%foreign "browser:lambda:(a)=> {is: a}"
prim__new : UndefOr String -> PrimIO ElementCreationOptions
export
%foreign "browser:lambda:x=>x.is"
prim__is : ElementCreationOptions -> PrimIO (UndefOr String)
export
%foreign "browser:lambda:(x,v)=>{x.is = v}"
prim__setIs : ElementCreationOptions -> UndefOr String -> PrimIO ()
namespace EventInit
export
%foreign "browser:lambda:(a,b,c)=> {bubbles: a,cancelable: b,composed: c}"
prim__new : UndefOr Boolean
-> UndefOr Boolean
-> UndefOr Boolean
-> PrimIO EventInit
export
%foreign "browser:lambda:x=>x.bubbles"
prim__bubbles : EventInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.bubbles = v}"
prim__setBubbles : EventInit -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.cancelable"
prim__cancelable : EventInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.cancelable = v}"
prim__setCancelable : EventInit -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.composed"
prim__composed : EventInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.composed = v}"
prim__setComposed : EventInit -> UndefOr Boolean -> PrimIO ()
namespace EventListenerOptions
export
%foreign "browser:lambda:(a)=> {capture: a}"
prim__new : UndefOr Boolean -> PrimIO EventListenerOptions
export
%foreign "browser:lambda:x=>x.capture"
prim__capture : EventListenerOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.capture = v}"
prim__setCapture : EventListenerOptions -> UndefOr Boolean -> PrimIO ()
namespace GetRootNodeOptions
export
%foreign "browser:lambda:(a)=> {composed: a}"
prim__new : UndefOr Boolean -> PrimIO GetRootNodeOptions
export
%foreign "browser:lambda:x=>x.composed"
prim__composed : GetRootNodeOptions -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.composed = v}"
prim__setComposed : GetRootNodeOptions -> UndefOr Boolean -> PrimIO ()
namespace MutationObserverInit
export
%foreign "browser:lambda:(a,b,c,d,e,f,g)=> {childList: a,attributes: b,characterData: c,subtree: d,attributeOldValue: e,characterDataOldValue: f,attributeFilter: g}"
prim__new : UndefOr Boolean
-> UndefOr Boolean
-> UndefOr Boolean
-> UndefOr Boolean
-> UndefOr Boolean
-> UndefOr Boolean
-> UndefOr (Array String)
-> PrimIO MutationObserverInit
export
%foreign "browser:lambda:x=>x.attributeFilter"
prim__attributeFilter : MutationObserverInit
-> PrimIO (UndefOr (Array String))
export
%foreign "browser:lambda:(x,v)=>{x.attributeFilter = v}"
prim__setAttributeFilter : MutationObserverInit
-> UndefOr (Array String)
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.attributeOldValue"
prim__attributeOldValue : MutationObserverInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.attributeOldValue = v}"
prim__setAttributeOldValue : MutationObserverInit
-> UndefOr Boolean
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.attributes"
prim__attributes : MutationObserverInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.attributes = v}"
prim__setAttributes : MutationObserverInit -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.characterData"
prim__characterData : MutationObserverInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.characterData = v}"
prim__setCharacterData : MutationObserverInit -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.characterDataOldValue"
prim__characterDataOldValue : MutationObserverInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.characterDataOldValue = v}"
prim__setCharacterDataOldValue : MutationObserverInit
-> UndefOr Boolean
-> PrimIO ()
export
%foreign "browser:lambda:x=>x.childList"
prim__childList : MutationObserverInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.childList = v}"
prim__setChildList : MutationObserverInit -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.subtree"
prim__subtree : MutationObserverInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.subtree = v}"
prim__setSubtree : MutationObserverInit -> UndefOr Boolean -> PrimIO ()
namespace ShadowRootInit
export
%foreign "browser:lambda:(a,b)=> {mode: a,delegatesFocus: b}"
prim__new : String -> UndefOr Boolean -> PrimIO ShadowRootInit
export
%foreign "browser:lambda:x=>x.delegatesFocus"
prim__delegatesFocus : ShadowRootInit -> PrimIO (UndefOr Boolean)
export
%foreign "browser:lambda:(x,v)=>{x.delegatesFocus = v}"
prim__setDelegatesFocus : ShadowRootInit -> UndefOr Boolean -> PrimIO ()
export
%foreign "browser:lambda:x=>x.mode"
prim__mode : ShadowRootInit -> PrimIO String
export
%foreign "browser:lambda:(x,v)=>{x.mode = v}"
prim__setMode : ShadowRootInit -> String -> PrimIO ()
namespace StaticRangeInit
export
%foreign "browser:lambda:(a,b,c,d)=> {startContainer: a,startOffset: b,endContainer: c,endOffset: d}"
prim__new : Node -> Bits32 -> Node -> Bits32 -> PrimIO StaticRangeInit
export
%foreign "browser:lambda:x=>x.endContainer"
prim__endContainer : StaticRangeInit -> PrimIO Node
export
%foreign "browser:lambda:(x,v)=>{x.endContainer = v}"
prim__setEndContainer : StaticRangeInit -> Node -> PrimIO ()
export
%foreign "browser:lambda:x=>x.endOffset"
prim__endOffset : StaticRangeInit -> PrimIO Bits32
export
%foreign "browser:lambda:(x,v)=>{x.endOffset = v}"
prim__setEndOffset : StaticRangeInit -> Bits32 -> PrimIO ()
export
%foreign "browser:lambda:x=>x.startContainer"
prim__startContainer : StaticRangeInit -> PrimIO Node
export
%foreign "browser:lambda:(x,v)=>{x.startContainer = v}"
prim__setStartContainer : StaticRangeInit -> Node -> PrimIO ()
export
%foreign "browser:lambda:x=>x.startOffset"
prim__startOffset : StaticRangeInit -> PrimIO Bits32
export
%foreign "browser:lambda:(x,v)=>{x.startOffset = v}"
prim__setStartOffset : StaticRangeInit -> Bits32 -> PrimIO ()
--------------------------------------------------------------------------------
-- Callbacks
--------------------------------------------------------------------------------
namespace EventListener
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toEventListener : ( Event -> IO () ) -> PrimIO EventListener
namespace MutationCallback
export
%foreign "browser:lambda:x=>(a,b)=>x(a,b)()"
prim__toMutationCallback : ( Array MutationRecord
-> MutationObserver
-> IO ()
)
-> PrimIO MutationCallback
namespace NodeFilter
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toNodeFilter : ( Node -> IO Bits16 ) -> PrimIO NodeFilter
namespace XPathNSResolver
export
%foreign "browser:lambda:x=>(a)=>x(a)()"
prim__toXPathNSResolver : ( Nullable String -> IO (Nullable String) )
-> PrimIO XPathNSResolver
|
[STATEMENT]
lemma is_empty_min_max:
"\<not> is_empty_rep (l1,h1) \<Longrightarrow> \<not> is_empty_rep (l2, h2) \<Longrightarrow> \<not> is_empty_rep (min l1 l2, max h1 h2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<not> is_empty_rep (l1, h1); \<not> is_empty_rep (l2, h2)\<rbrakk> \<Longrightarrow> \<not> is_empty_rep (min l1 l2, max h1 h2)
[PROOF STEP]
by(auto simp add: is_empty_rep_def max_def min_def split: if_splits)
|
||| Properties of factorial functions
module Data.Nat.Fact
import Data.Nat
%default total
||| Recursive definition of factorial.
factRec : Nat -> Nat
factRec Z = 1
factRec (S k) = (S k) * factRec k
||| Tail-recursive accumulator for factItr.
factAcc : Nat -> Nat -> Nat
factAcc Z acc = acc
factAcc (S k) acc = factAcc k $ (S k) * acc
||| Iterative definition of factorial.
factItr : Nat -> Nat
factItr n = factAcc n 1
----------------------------------------
||| Multiplicand-shuffling lemma.
multShuffle : (a, b, c : Nat) -> a * (b * c) = b * (a * c)
multShuffle a b c =
rewrite multAssociative a b c in
rewrite multCommutative a b in
sym $ multAssociative b a c
||| Multiplication of the accumulator.
factAccMult : (a, b, c : Nat) ->
a * factAcc b c = factAcc b (a * c)
factAccMult _ Z _ = Refl
factAccMult a (S k) c =
rewrite factAccMult a k (S k * c) in
rewrite multShuffle a (S k) c in
Refl
||| Addition of accumulators.
factAccPlus : (a, b, c : Nat) ->
factAcc a b + factAcc a c = factAcc a (b + c)
factAccPlus Z _ _ = Refl
factAccPlus (S k) b c =
rewrite factAccPlus k (S k * b) (S k * c) in
rewrite sym $ multDistributesOverPlusRight (S k) b c in
Refl
||| The recursive and iterative definitions are the equivalent.
factRecItr : (n : Nat) -> factRec n = factItr n
factRecItr Z = Refl
factRecItr (S k) =
rewrite factRecItr k in
rewrite factAccMult k k 1 in
rewrite multOneRightNeutral k in
factAccPlus k 1 k
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** Translation from Mach to PPC. *)
Require Import Coqlib.
Require Import Maps.
Require Import Errors.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Values.
Require Import Memory.
Require Import Globalenvs.
Require Import Op.
Require Import Locations.
Require Import Mach.
Require Import Asm.
(** Decomposition of integer constants. As noted in file [Asm],
immediate arguments to PowerPC instructions must fit into 16 bits,
and are interpreted after zero extension, sign extension, or
left shift by 16 bits, depending on the instruction. Integer
constants that do not fit must be synthesized using two
processor instructions. The following functions decompose
arbitrary 32-bit integers into two 16-bit halves (high and low
halves). They satisfy the following properties:
- [low_u n] is an unsigned 16-bit integer;
- [low_s n] is a signed 16-bit integer;
- [(high_u n) << 16 | low_u n] equals [n];
- [(high_s n) << 16 + low_s n] equals [n].
*)
Definition low_u (n: int) := Int.and n (Int.repr 65535).
Definition high_u (n: int) := Int.shru n (Int.repr 16).
Definition low_s (n: int) := Int.sign_ext 16 n.
Definition high_s (n: int) := Int.shru (Int.sub n (low_s n)) (Int.repr 16).
(** Smart constructors for arithmetic operations involving
a 32-bit integer constant. Depending on whether the
constant fits in 16 bits or not, one or several instructions
are generated as required to perform the operation
and prepended to the given instruction sequence [k]. *)
Definition loadimm (r: ireg) (n: int) (k: code) :=
if Int.eq (high_s n) Int.zero then
Paddi r GPR0 (Cint n) :: k
else if Int.eq (low_s n) Int.zero then
Paddis r GPR0 (Cint (high_s n)) :: k
else
Paddis r GPR0 (Cint (high_u n)) ::
Pori r r (Cint (low_u n)) :: k.
Definition addimm (r1 r2: ireg) (n: int) (k: code) :=
if Int.eq (high_s n) Int.zero then
Paddi r1 r2 (Cint n) :: k
else if Int.eq (low_s n) Int.zero then
Paddis r1 r2 (Cint (high_s n)) :: k
else
Paddis r1 r2 (Cint (high_s n)) ::
Paddi r1 r1 (Cint (low_s n)) :: k.
Definition andimm_base (r1 r2: ireg) (n: int) (k: code) :=
if Int.eq (high_u n) Int.zero then
Pandi_ r1 r2 (Cint n) :: k
else if Int.eq (low_u n) Int.zero then
Pandis_ r1 r2 (Cint (high_u n)) :: k
else
loadimm GPR0 n (Pand_ r1 r2 GPR0 :: k).
Definition andimm (r1 r2: ireg) (n: int) (k: code) :=
if is_rlw_mask n then
Prlwinm r1 r2 Int.zero n :: k
else
andimm_base r1 r2 n k.
Definition orimm (r1 r2: ireg) (n: int) (k: code) :=
if Int.eq (high_u n) Int.zero then
Pori r1 r2 (Cint n) :: k
else if Int.eq (low_u n) Int.zero then
Poris r1 r2 (Cint (high_u n)) :: k
else
Poris r1 r2 (Cint (high_u n)) ::
Pori r1 r1 (Cint (low_u n)) :: k.
Definition xorimm (r1 r2: ireg) (n: int) (k: code) :=
if Int.eq (high_u n) Int.zero then
Pxori r1 r2 (Cint n) :: k
else if Int.eq (low_u n) Int.zero then
Pxoris r1 r2 (Cint (high_u n)) :: k
else
Pxoris r1 r2 (Cint (high_u n)) ::
Pxori r1 r1 (Cint (low_u n)) :: k.
Definition rolm (r1 r2: ireg) (amount mask: int) (k: code) :=
if is_rlw_mask mask then
Prlwinm r1 r2 amount mask :: k
else
Prlwinm r1 r2 amount Int.mone :: andimm_base r1 r1 mask k.
(** Accessing slots in the stack frame. *)
Definition loadind (base: ireg) (ofs: int) (ty: typ) (dst: mreg) (k: code) :=
if Int.eq (high_s ofs) Int.zero then
match ty with
| Tint => Plwz (ireg_of dst) (Cint ofs) base :: k
| Tfloat => Plfd (freg_of dst) (Cint ofs) base :: k
end
else
loadimm GPR0 ofs
(match ty with
| Tint => Plwzx (ireg_of dst) base GPR0 :: k
| Tfloat => Plfdx (freg_of dst) base GPR0 :: k
end).
Definition storeind (src: mreg) (base: ireg) (ofs: int) (ty: typ) (k: code) :=
if Int.eq (high_s ofs) Int.zero then
match ty with
| Tint => Pstw (ireg_of src) (Cint ofs) base :: k
| Tfloat => Pstfd (freg_of src) (Cint ofs) base :: k
end
else
loadimm GPR0 ofs
(match ty with
| Tint => Pstwx (ireg_of src) base GPR0 :: k
| Tfloat => Pstfdx (freg_of src) base GPR0 :: k
end).
(** Constructor for a floating-point comparison. The PowerPC has
a single [fcmpu] instruction to compare floats, which sets
bits 0, 1 and 2 of the condition register to reflect ``less'',
``greater'' and ``equal'' conditions, respectively.
The ``less or equal'' and ``greater or equal'' conditions must be
synthesized by a [cror] instruction that computes the logical ``or''
of the corresponding two conditions. *)
Definition floatcomp (cmp: comparison) (r1 r2: freg) (k: code) :=
Pfcmpu r1 r2 ::
match cmp with
| Cle => Pcror CRbit_3 CRbit_2 CRbit_0 :: k
| Cge => Pcror CRbit_3 CRbit_2 CRbit_1 :: k
| _ => k
end.
(** Translation of a condition. Prepends to [k] the instructions
that evaluate the condition and leave its boolean result in one of
the bits of the condition register. The bit in question is
determined by the [crbit_for_cond] function. *)
Definition transl_cond
(cond: condition) (args: list mreg) (k: code) :=
match cond, args with
| Ccomp c, a1 :: a2 :: nil =>
Pcmpw (ireg_of a1) (ireg_of a2) :: k
| Ccompu c, a1 :: a2 :: nil =>
Pcmplw (ireg_of a1) (ireg_of a2) :: k
| Ccompimm c n, a1 :: nil =>
if Int.eq (high_s n) Int.zero then
Pcmpwi (ireg_of a1) (Cint n) :: k
else
loadimm GPR0 n (Pcmpw (ireg_of a1) GPR0 :: k)
| Ccompuimm c n, a1 :: nil =>
if Int.eq (high_u n) Int.zero then
Pcmplwi (ireg_of a1) (Cint n) :: k
else
loadimm GPR0 n (Pcmplw (ireg_of a1) GPR0 :: k)
| Ccompf cmp, a1 :: a2 :: nil =>
floatcomp cmp (freg_of a1) (freg_of a2) k
| Cnotcompf cmp, a1 :: a2 :: nil =>
floatcomp cmp (freg_of a1) (freg_of a2) k
| Cmaskzero n, a1 :: nil =>
andimm_base GPR0 (ireg_of a1) n k
| Cmasknotzero n, a1 :: nil =>
andimm_base GPR0 (ireg_of a1) n k
| _, _ =>
k (**r never happens for well-typed code *)
end.
(* CRbit_0 = Less
CRbit_1 = Greater
CRbit_2 = Equal
CRbit_3 = Other *)
Definition crbit_for_icmp (cmp: comparison) :=
match cmp with
| Ceq => (CRbit_2, true)
| Cne => (CRbit_2, false)
| Clt => (CRbit_0, true)
| Cle => (CRbit_1, false)
| Cgt => (CRbit_1, true)
| Cge => (CRbit_0, false)
end.
Definition crbit_for_fcmp (cmp: comparison) :=
match cmp with
| Ceq => (CRbit_2, true)
| Cne => (CRbit_2, false)
| Clt => (CRbit_0, true)
| Cle => (CRbit_3, true)
| Cgt => (CRbit_1, true)
| Cge => (CRbit_3, true)
end.
Definition crbit_for_cond (cond: condition) :=
match cond with
| Ccomp cmp => crbit_for_icmp cmp
| Ccompu cmp => crbit_for_icmp cmp
| Ccompimm cmp n => crbit_for_icmp cmp
| Ccompuimm cmp n => crbit_for_icmp cmp
| Ccompf cmp => crbit_for_fcmp cmp
| Cnotcompf cmp => let p := crbit_for_fcmp cmp in (fst p, negb (snd p))
| Cmaskzero n => (CRbit_2, true)
| Cmasknotzero n => (CRbit_2, false)
end.
(** Recognition of comparisons [>= 0] and [< 0]. *)
Inductive condition_class: condition -> list mreg -> Type :=
| condition_eq0:
forall n r, n = Int.zero -> condition_class (Ccompimm Ceq n) (r :: nil)
| condition_ne0:
forall n r, n = Int.zero -> condition_class (Ccompimm Cne n) (r :: nil)
| condition_ge0:
forall n r, n = Int.zero -> condition_class (Ccompimm Cge n) (r :: nil)
| condition_lt0:
forall n r, n = Int.zero -> condition_class (Ccompimm Clt n) (r :: nil)
| condition_default:
forall c rl, condition_class c rl.
Definition classify_condition (c: condition) (args: list mreg): condition_class c args :=
match c as z1, args as z2 return condition_class z1 z2 with
| Ccompimm Ceq n, r :: nil =>
match Int.eq_dec n Int.zero with
| left EQ => condition_eq0 n r EQ
| right _ => condition_default (Ccompimm Ceq n) (r :: nil)
end
| Ccompimm Cne n, r :: nil =>
match Int.eq_dec n Int.zero with
| left EQ => condition_ne0 n r EQ
| right _ => condition_default (Ccompimm Cne n) (r :: nil)
end
| Ccompimm Cge n, r :: nil =>
match Int.eq_dec n Int.zero with
| left EQ => condition_ge0 n r EQ
| right _ => condition_default (Ccompimm Cge n) (r :: nil)
end
| Ccompimm Clt n, r :: nil =>
match Int.eq_dec n Int.zero with
| left EQ => condition_lt0 n r EQ
| right _ => condition_default (Ccompimm Clt n) (r :: nil)
end
| x, y =>
condition_default x y
end.
(** Translation of a condition operator. The generated code sets
the [r] target register to 0 or 1 depending on the truth value of the
condition. *)
Definition transl_cond_op
(cond: condition) (args: list mreg) (r: mreg) (k: code) :=
match classify_condition cond args with
| condition_eq0 _ a _ =>
Psubfic GPR0 (ireg_of a) (Cint Int.zero) ::
Padde (ireg_of r) GPR0 (ireg_of a) :: k
| condition_ne0 _ a _ =>
Paddic GPR0 (ireg_of a) (Cint Int.mone) ::
Psubfe (ireg_of r) GPR0 (ireg_of a) :: k
| condition_ge0 _ a _ =>
Prlwinm (ireg_of r) (ireg_of a) Int.one Int.one ::
Pxori (ireg_of r) (ireg_of r) (Cint Int.one) :: k
| condition_lt0 _ a _ =>
Prlwinm (ireg_of r) (ireg_of a) Int.one Int.one :: k
| condition_default _ _ =>
let p := crbit_for_cond cond in
transl_cond cond args
(Pmfcrbit (ireg_of r) (fst p) ::
if snd p
then k
else Pxori (ireg_of r) (ireg_of r) (Cint Int.one) :: k)
end.
(** Translation of the arithmetic operation [r <- op(args)].
The corresponding instructions are prepended to [k]. *)
Definition transl_op
(op: operation) (args: list mreg) (r: mreg) (k: code) :=
match op, args with
| Omove, a1 :: nil =>
match mreg_type a1 with
| Tint => Pmr (ireg_of r) (ireg_of a1) :: k
| Tfloat => Pfmr (freg_of r) (freg_of a1) :: k
end
| Ointconst n, nil =>
loadimm (ireg_of r) n k
| Ofloatconst f, nil =>
Plfi (freg_of r) f :: k
| Oaddrsymbol s ofs, nil =>
if symbol_is_small_data s ofs then
Paddi (ireg_of r) GPR0 (Csymbol_sda s ofs) :: k
else
Paddis GPR12 GPR0 (Csymbol_high s ofs) ::
Paddi (ireg_of r) GPR12 (Csymbol_low s ofs) :: k
| Oaddrstack n, nil =>
addimm (ireg_of r) GPR1 n k
| Ocast8signed, a1 :: nil =>
Pextsb (ireg_of r) (ireg_of a1) :: k
| Ocast16signed, a1 :: nil =>
Pextsh (ireg_of r) (ireg_of a1) :: k
| Oadd, a1 :: a2 :: nil =>
Padd (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oaddimm n, a1 :: nil =>
addimm (ireg_of r) (ireg_of a1) n k
| Osub, a1 :: a2 :: nil =>
Psubfc (ireg_of r) (ireg_of a2) (ireg_of a1) :: k
| Osubimm n, a1 :: nil =>
if Int.eq (high_s n) Int.zero then
Psubfic (ireg_of r) (ireg_of a1) (Cint n) :: k
else
loadimm GPR0 n (Psubfc (ireg_of r) (ireg_of a1) GPR0 :: k)
| Omul, a1 :: a2 :: nil =>
Pmullw (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Omulimm n, a1 :: nil =>
if Int.eq (high_s n) Int.zero then
Pmulli (ireg_of r) (ireg_of a1) (Cint n) :: k
else
loadimm GPR0 n (Pmullw (ireg_of r) (ireg_of a1) GPR0 :: k)
| Odiv, a1 :: a2 :: nil =>
Pdivw (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Odivu, a1 :: a2 :: nil =>
Pdivwu (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oand, a1 :: a2 :: nil =>
Pand_ (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oandimm n, a1 :: nil =>
andimm (ireg_of r) (ireg_of a1) n k
| Oor, a1 :: a2 :: nil =>
Por (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oorimm n, a1 :: nil =>
orimm (ireg_of r) (ireg_of a1) n k
| Oxor, a1 :: a2 :: nil =>
Pxor (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oxorimm n, a1 :: nil =>
xorimm (ireg_of r) (ireg_of a1) n k
| Onot, a1 :: nil =>
Pnor (ireg_of r) (ireg_of a1) (ireg_of a1) :: k
| Onand, a1 :: a2 :: nil =>
Pnand (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Onor, a1 :: a2 :: nil =>
Pnor (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Onxor, a1 :: a2 :: nil =>
Peqv (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oandc, a1 :: a2 :: nil =>
Pandc (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oorc, a1 :: a2 :: nil =>
Porc (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oshl, a1 :: a2 :: nil =>
Pslw (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oshr, a1 :: a2 :: nil =>
Psraw (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Oshrimm n, a1 :: nil =>
Psrawi (ireg_of r) (ireg_of a1) n :: k
| Oshrximm n, a1 :: nil =>
Psrawi (ireg_of r) (ireg_of a1) n ::
Paddze (ireg_of r) (ireg_of r) :: k
| Oshru, a1 :: a2 :: nil =>
Psrw (ireg_of r) (ireg_of a1) (ireg_of a2) :: k
| Orolm amount mask, a1 :: nil =>
rolm (ireg_of r) (ireg_of a1) amount mask k
| Oroli amount mask, a1 :: a2 :: nil =>
if mreg_eq a1 r then (**r should always be true *)
Prlwimi (ireg_of r) (ireg_of a2) amount mask :: k
else
Pmr GPR0 (ireg_of a1) ::
Prlwimi GPR0 (ireg_of a2) amount mask ::
Pmr (ireg_of r) GPR0 :: k
| Onegf, a1 :: nil =>
Pfneg (freg_of r) (freg_of a1) :: k
| Oabsf, a1 :: nil =>
Pfabs (freg_of r) (freg_of a1) :: k
| Oaddf, a1 :: a2 :: nil =>
Pfadd (freg_of r) (freg_of a1) (freg_of a2) :: k
| Osubf, a1 :: a2 :: nil =>
Pfsub (freg_of r) (freg_of a1) (freg_of a2) :: k
| Omulf, a1 :: a2 :: nil =>
Pfmul (freg_of r) (freg_of a1) (freg_of a2) :: k
| Odivf, a1 :: a2 :: nil =>
Pfdiv (freg_of r) (freg_of a1) (freg_of a2) :: k
| Osingleoffloat, a1 :: nil =>
Pfrsp (freg_of r) (freg_of a1) :: k
| Ointoffloat, a1 :: nil =>
Pfcti (ireg_of r) (freg_of a1) :: k
| Ofloatofwords, a1 :: a2 :: nil =>
Pfmake (freg_of r) (ireg_of a1) (ireg_of a2) :: k
| Ocmp cmp, _ =>
transl_cond_op cmp args r k
| _, _ =>
k (**r never happens for well-typed code *)
end.
(** Common code to translate [Mload] and [Mstore] instructions. *)
Definition int_temp_for (r: mreg) :=
if mreg_eq r IT2 then GPR11 else GPR12.
Definition transl_load_store
(mk1: constant -> ireg -> instruction)
(mk2: ireg -> ireg -> instruction)
(addr: addressing) (args: list mreg)
(temp: ireg) (k: code) :=
match addr, args with
| Aindexed ofs, a1 :: nil =>
if Int.eq (high_s ofs) Int.zero then
mk1 (Cint ofs) (ireg_of a1) :: k
else
Paddis temp (ireg_of a1) (Cint (high_s ofs)) ::
mk1 (Cint (low_s ofs)) temp :: k
| Aindexed2, a1 :: a2 :: nil =>
mk2 (ireg_of a1) (ireg_of a2) :: k
| Aglobal symb ofs, nil =>
if symbol_is_small_data symb ofs then
mk1 (Csymbol_sda symb ofs) GPR0 :: k
else
Paddis temp GPR0 (Csymbol_high symb ofs) ::
mk1 (Csymbol_low symb ofs) temp :: k
| Abased symb ofs, a1 :: nil =>
Paddis temp (ireg_of a1) (Csymbol_high symb ofs) ::
mk1 (Csymbol_low symb ofs) temp :: k
| Ainstack ofs, nil =>
if Int.eq (high_s ofs) Int.zero then
mk1 (Cint ofs) GPR1 :: k
else
Paddis temp GPR1 (Cint (high_s ofs)) ::
mk1 (Cint (low_s ofs)) temp :: k
| _, _ =>
(* should not happen *) k
end.
(** Translation of arguments to annotations *)
Definition transl_annot_param (p: Mach.annot_param) : Asm.annot_param :=
match p with
| Mach.APreg r => APreg (preg_of r)
| Mach.APstack chunk ofs => APstack chunk ofs
end.
(** Translation of a Mach instruction. *)
Definition transl_instr (f: Mach.function) (i: Mach.instruction) (k: code) :=
match i with
| Mgetstack ofs ty dst =>
loadind GPR1 ofs ty dst k
| Msetstack src ofs ty =>
storeind src GPR1 ofs ty k
| Mgetparam ofs ty dst =>
Plwz GPR11 (Cint f.(fn_link_ofs)) GPR1 :: loadind GPR11 ofs ty dst k
| Mop op args res =>
transl_op op args res k
| Mload chunk addr args dst =>
match chunk with
| Mint8signed =>
transl_load_store
(Plbz (ireg_of dst)) (Plbzx (ireg_of dst)) addr args GPR12
(Pextsb (ireg_of dst) (ireg_of dst) :: k)
| Mint8unsigned =>
transl_load_store
(Plbz (ireg_of dst)) (Plbzx (ireg_of dst)) addr args GPR12 k
| Mint16signed =>
transl_load_store
(Plha (ireg_of dst)) (Plhax (ireg_of dst)) addr args GPR12 k
| Mint16unsigned =>
transl_load_store
(Plhz (ireg_of dst)) (Plhzx (ireg_of dst)) addr args GPR12 k
| Mint32 =>
transl_load_store
(Plwz (ireg_of dst)) (Plwzx (ireg_of dst)) addr args GPR12 k
| Mfloat32 =>
transl_load_store
(Plfs (freg_of dst)) (Plfsx (freg_of dst)) addr args GPR12 k
| Mfloat64 | Mfloat64al32 =>
transl_load_store
(Plfd (freg_of dst)) (Plfdx (freg_of dst)) addr args GPR12 k
end
| Mstore chunk addr args src =>
let temp := int_temp_for src in
match chunk with
| Mint8signed =>
transl_load_store
(Pstb (ireg_of src)) (Pstbx (ireg_of src)) addr args temp k
| Mint8unsigned =>
transl_load_store
(Pstb (ireg_of src)) (Pstbx (ireg_of src)) addr args temp k
| Mint16signed =>
transl_load_store
(Psth (ireg_of src)) (Psthx (ireg_of src)) addr args temp k
| Mint16unsigned =>
transl_load_store
(Psth (ireg_of src)) (Psthx (ireg_of src)) addr args temp k
| Mint32 =>
transl_load_store
(Pstw (ireg_of src)) (Pstwx (ireg_of src)) addr args temp k
| Mfloat32 =>
transl_load_store
(Pstfs (freg_of src)) (Pstfsx (freg_of src)) addr args temp k
| Mfloat64 | Mfloat64al32 =>
transl_load_store
(Pstfd (freg_of src)) (Pstfdx (freg_of src)) addr args temp k
end
| Mcall sig (inl r) =>
Pmtctr (ireg_of r) :: Pbctrl :: k
| Mcall sig (inr symb) =>
Pbl symb :: k
| Mtailcall sig (inl r) =>
Pmtctr (ireg_of r) ::
Plwz GPR0 (Cint f.(fn_retaddr_ofs)) GPR1 ::
Pmtlr GPR0 ::
Pfreeframe f.(fn_stacksize) f.(fn_link_ofs) ::
Pbctr :: k
| Mtailcall sig (inr symb) =>
Plwz GPR0 (Cint f.(fn_retaddr_ofs)) GPR1 ::
Pmtlr GPR0 ::
Pfreeframe f.(fn_stacksize) f.(fn_link_ofs) ::
Pbs symb :: k
| Mbuiltin ef args res =>
Pbuiltin ef (map preg_of args) (preg_of res) :: k
| Mannot ef args =>
Pannot ef (map transl_annot_param args) :: k
| Mlabel lbl =>
Plabel lbl :: k
| Mgoto lbl =>
Pb lbl :: k
| Mcond cond args lbl =>
let p := crbit_for_cond cond in
transl_cond cond args
(if (snd p) then Pbt (fst p) lbl :: k else Pbf (fst p) lbl :: k)
| Mjumptable arg tbl =>
Prlwinm GPR12 (ireg_of arg) (Int.repr 2) (Int.repr (-4)) ::
Pbtbl GPR12 tbl :: k
| Mreturn =>
Plwz GPR0 (Cint f.(fn_retaddr_ofs)) GPR1 ::
Pmtlr GPR0 ::
Pfreeframe f.(fn_stacksize) f.(fn_link_ofs) ::
Pblr :: k
end.
Definition transl_code (f: Mach.function) (il: list Mach.instruction) :=
List.fold_right (transl_instr f) nil il.
(** Translation of a whole function. Note that we must check
that the generated code contains less than [2^32] instructions,
otherwise the offset part of the [PC] code pointer could wrap
around, leading to incorrect executions. *)
Definition transl_function (f: Mach.function) :=
Pallocframe f.(fn_stacksize) f.(fn_link_ofs) ::
Pmflr GPR0 ::
Pstw GPR0 (Cint f.(fn_retaddr_ofs)) GPR1 ::
transl_code f f.(fn_code).
Open Local Scope string_scope.
Definition transf_function (f: Mach.function) : res Asm.code :=
let c := transl_function f in
if zlt Int.max_unsigned (list_length_z c)
then Errors.Error (msg "code size exceeded")
else Errors.OK c.
Definition transf_fundef (f: Mach.fundef) : res Asm.fundef :=
transf_partial_fundef transf_function f.
Definition transf_program (p: Mach.program) : res Asm.program :=
transform_partial_program transf_fundef p.
|
%High speed MATLAB codes
%This program use SCL with small list size as a "filter", i.e., under the
%same noise realization, If CRC-SCL with smaller L is correct, then CRC-SCL
%with larger L must be correct. You may not believe in this conjecture, but you can have
%a try, This is true with high probability (almost 1)
%This program has another accelerator. If in lower snr, CRC-SCL is correct, then at higher snr,
%the same CRC-SCL must be correct. You may not believe in this conjecture, but you can have
%a try, This is true with high probability (almost 1).
%I know above methods sound dangerous, but it is safe under following 4
%conditions
%1. The same code word
%2. The same AWGN noise realization with distribution N(0, 1)
%3. CRC must be used (Only SCL is not permitted)
%4. The same code construction in all SNR range.
%This program satisfies above 4 conditions. You can verify the bler performance by comparisons with existing results.
%Since MATLAB is not good at recursive function (too many parameters to be passed)
%I cancel the well-known recursiveCalcP() and recursiveCalcB() proposed by
%I. Tal
%Instead, 'For' function is used. You may argue that we can use objective
%oriented (OO) style. However, OO in matlab is also slow.
%Besides, the algorithms of following papers are provided.
%How to Construct Polar Codes
%Fast Successive-Cancellation Decoding of Polar Codes: Identification and Decoding of New Nodes
%beta-expansion: A Theoretical Framework for Fast and Recursive Construction of Polar Codes
%Above three algorithms are made by myself so the correctness is not guaranteed.
clear
addpath('GA/')
% addpath('HowToConstructPolarCode/')
addpath('NodeProcess/')
% addpath('BECconstruction/')
% addpath('PolarizaedChannelsPartialOrder/')
%adding above folders will take round 2 seconds
design_epsilon = 0.32;
crc_length = 16;
[gen, det, g] = get_crc_objective(crc_length);
n = 10;
N = 2^n;
K = N/2 + crc_length;
ebno_vec = [2 2.5]; %row vec, you can write it like [1 1.5 2 2.5 3]
list_vec = [1 16]; %row vec, you can write it like [1 4 16 32 ...]. The first element is always 1 for acceleration purpose. The ramaining elements are power of two.
max_runs = 1e7;
max_err = 100;
resolution = 1e4;%the results are shown per max_runs/resolution.
[bler, ber] = simulation(N, K, design_epsilon, max_runs, max_err, resolution, ebno_vec, list_vec, gen, det, g, crc_length);
|
(* Definition of Salsa according to Bernstein's paper
"The Salsa20 family of stream ciphers", http://cr.yp.to/snuffle/salsafamily-20071225.pdf*)
Require Import compcert.lib.Coqlib.
Require Import Coq.Strings.String.
Require Import msl.Extensionality.
Require Import List. Import ListNotations.
Require Import compcert.lib.Integers.
Require Import sha.functional_prog.
Require Import tweetnacl20140427.Salsa20.
Require Import tweetnacl20140427.tweetNaclBase. (*for bind, combinelist*)
Definition Step1 (x:list int): option (list int):=
match x with [x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15]
=>
let y3 := Int.xor x3 (Int.rol (Int.add x15 x11) (Int.repr 7)) in
let y4 := Int.xor x4 (Int.rol (Int.add x0 x12) (Int.repr 7)) in
let y9 := Int.xor x9 (Int.rol (Int.add x5 x1) (Int.repr 7)) in
let y14 := Int.xor x14 (Int.rol (Int.add x10 x6) (Int.repr 7)) in
Some [x0; x1; x2; y3;
y4; x5; x6; x7;
x8; y9; x10; x11;
x12; x13; y14; x15]
| _ => None
end.
Definition test1_in := map x2i
([ "61707865"; "04030201"; "08070605"; "0c0b0a09";
"100f0e0d"; "3320646e"; "01040103"; "06020905";
"00000007"; "00000000"; "79622d32"; "14131211";
"18171615"; "1c1b1a19"; "201f1e1d"; "6b206574"])%string.
Definition test1_out1 := map x2i ([
"61707865"; "04030201"; "08070605"; "95b0c8b6";
"d3c83331"; "3320646e"; "01040103"; "06020905";
"00000007"; "91b3379b"; "79622d32"; "14131211";
"18171615"; "1c1b1a19"; "130804a0"; "6b206574"])%string.
Goal Step1 test1_in = Some(test1_out1). reflexivity. Qed.
Definition Step2 (x:list int): option (list int):=
match x with [x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15]
=>
let y2 := Int.xor x2 (Int.rol (Int.add x10 x14) (Int.repr 9)) in
let y7 := Int.xor x7 (Int.rol (Int.add x15 x3) (Int.repr 9)) in
let y8 := Int.xor x8 (Int.rol (Int.add x0 x4) (Int.repr 9)) in
let y13 := Int.xor x13 (Int.rol (Int.add x5 x9) (Int.repr 9)) in
Some [x0; x1; y2; x3;
x4; x5; x6; y7;
y8; x9; x10; x11;
x12; y13; x14; x15]
| _ => None
end.
Definition test1_out2 := map x2i ([
"61707865"; "04030201"; "dc64a31d"; "95b0c8b6";
"d3c83331"; "3320646e"; "01040103"; "a45e5d04";
"71572c6d"; "91b3379b"; "79622d32"; "14131211";
"18171615"; "bb230990"; "130804a0"; "6b206574"])%string.
Goal Step2 test1_out1 = Some(test1_out2). reflexivity. Qed.
Definition Step3 (x:list int): option (list int):=
match x with [x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15]
=>
let y1 := Int.xor x1 (Int.rol (Int.add x9 x13) (Int.repr 13)) in
let y6 := Int.xor x6 (Int.rol (Int.add x14 x2) (Int.repr 13)) in
let y11 := Int.xor x11 (Int.rol (Int.add x3 x7) (Int.repr 13)) in
let y12 := Int.xor x12 (Int.rol (Int.add x4 x8) (Int.repr 13)) in
Some [x0; y1; x2; x3;
x4; x5; y6; x7;
x8; x9; x10; y11;
y12; x13; x14; x15]
| _ => None
end.
Definition test1_out3 := map x2i ([
"61707865"; "cc266b9b"; "dc64a31d"; "95b0c8b6";
"d3c83331"; "3320646e"; "95f3bcee"; "a45e5d04";
"71572c6d"; "91b3379b"; "79622d32"; "f0a45550";
"f3e4deb6"; "bb230990"; "130804a0"; "6b206574"])%string.
Goal Step3 test1_out2 = Some(test1_out3). reflexivity. Qed.
Definition Step4 (x:list int): option (list int):=
match x with [x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15]
=>
let y0 := Int.xor x0 (Int.rol (Int.add x8 x12) (Int.repr 18)) in
let y5 := Int.xor x5 (Int.rol (Int.add x13 x1) (Int.repr 18)) in
let y10 := Int.xor x10 (Int.rol (Int.add x2 x6) (Int.repr 18)) in
let y15 := Int.xor x15 (Int.rol (Int.add x7 x11) (Int.repr 18)) in
Some [y0; x1; x2; x3;
x4; y5; x6; x7;
x8; x9; y10; x11;
x12; x13; x14; y15]
| _ => None
end.
Definition test1_out4 := map x2i ([
"4dfdec95"; "cc266b9b"; "dc64a31d"; "95b0c8b6";
"d3c83331"; "e78e794b"; "95f3bcee"; "a45e5d04";
"71572c6d"; "91b3379b"; "f94fe453"; "f0a45550";
"f3e4deb6"; "bb230990"; "130804a0"; "a272317e"])%string.
Goal Step4 test1_out3 = Some(test1_out4). reflexivity. Qed.
Definition transp (x:list int): option (list int):=
match x with [x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15]
=> Some [x0; x4; x8; x12;
x1; x5; x9; x13;
x2; x6; x10; x14;
x3; x7; x11; x15]
| _ => None
end.
Definition test1_out := map x2i ([
"4dfdec95"; "d3c83331"; "71572c6d"; "f3e4deb6";
"cc266b9b"; "e78e794b"; "91b3379b"; "bb230990";
"dc64a31d"; "95f3bcee"; "f94fe453"; "130804a0";
"95b0c8b6"; "a45e5d04"; "f0a45550"; "a272317e"])%string.
Goal transp test1_out4 = Some(test1_out). reflexivity. Qed.
Definition snuffleStep x :=
bind (Step1 x) (fun y => bind (Step2 y) (fun z => bind (Step3 z) Step4)).
Definition snuffleRound x := bind (snuffleStep x) transp.
Goal snuffleRound test1_in = Some test1_out. reflexivity. Qed.
Definition test1_round2 := map x2i ([
"ba2409b1"; "1b7cce6a"; "29115dcf"; "5037e027";
"37b75378"; "348d94c8"; "3ea582b3"; "c3a9a148";
"825bfcb9"; "226ae9eb"; "63dd7748"; "7129a215";
"4effd1ec"; "5f25dc72"; "a6c3d164"; "152a26d8"])%string.
Goal snuffleRound test1_out = Some(test1_round2). reflexivity. Qed.
Fixpoint Snuffle n x :=
match n with O => Some x | S m => bind (Snuffle m x) snuffleRound end.
Goal Snuffle 1 = snuffleRound. extensionality. reflexivity. Qed.
Goal Snuffle 2 test1_in = Some(test1_round2). reflexivity. Qed.
Definition test1_round20 := map x2i ([
"58318d3e"; "0292df4f"; "a28d8215"; "a1aca723";
"697a34c7"; "f2f00ba8"; "63e9b0a1"; "27250e3a";
"b1c7f1f3"; "62066edc"; "66d3ccf1"; "b0365cf3";
"091ad09e"; "64f0c40f"; "d60d95ea"; "00be78c9"])%string.
Goal Snuffle 20 test1_in = Some(test1_round20). reflexivity. Qed.
Lemma snuffleRound_length r l:
snuffleRound r = Some l -> length l = 16%nat /\ length r = 16%nat.
Proof. intros.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H. inv H.
destruct r; simpl in H; inv H.
split; trivial.
Qed.
Lemma snuffleRound_Zlength r l (R: snuffleRound r = Some l):
Zlength l = 16 /\ Zlength r = 16.
Proof.
do 2 rewrite Zlength_correct.
destruct (snuffleRound_length _ _ R) as [A B].
rewrite A, B; split; reflexivity.
Qed.
Lemma Snuffle_length r: forall i l, Snuffle i r = Some l -> length r = 16%nat -> length l = 16%nat.
Proof. induction i; simpl; intros. inv H; trivial.
remember (Snuffle i r) as s. symmetry in Heqs; destruct s; inv H.
apply snuffleRound_length in H2. apply H2.
Qed.
Lemma Snuffle_Zlength r i l (SN:Snuffle i r = Some l)
(R:Zlength r = 16): Zlength l= 16.
Proof.
rewrite Zlength_correct.
rewrite (Snuffle_length _ _ _ SN); trivial.
apply sublist.Zlength_length in R; trivial.
Qed.
Opaque Snuffle.
|
module mod_potential
use,intrinsic :: ISO_FORTRAN_ENV, only : REAL64
use mod_particle
implicit none
private
public :: assignment (=)
public :: tension,pressure
!
type potential
real(REAL64) :: u = 0D0
real(REAL64) :: f(3) = 0D0
end type potential
!
type,extends(potential) :: tension
real(REAL64) :: c = 1D0
real(REAL64) :: core = 1D0
real(REAL64) :: r = 0D0
contains
procedure :: calc => tension_calc
end type tension
!
type,extends(potential) :: pressure
real(REAL64) :: rho = 1D0
real(REAL64) :: gra = 1D0
real(REAL64) :: h = 0D0
real(REAL64) :: vert(3) = [0D0,0D0,1D0]
contains
procedure :: calc => pressure_calc
end type pressure
!
interface assignment (=)
module procedure tension_assign,pressure_assign
end interface assignment (=)
!
contains
!
pure elemental subroutine tension_assign(l,r)
class(tension),intent(inout) :: l
class(tension),intent(in) :: r
l%u = r%u
l%f = r%f
l%c = r%c
l%core = r%core
l%r = r%r
end subroutine tension_assign
!
pure elemental subroutine pressure_assign(l,r)
class(pressure),intent(inout) :: l
class(pressure),intent(in) :: r
l%u = r%u
l%f = r%f
l%rho = r%rho
l%gra = r%gra
l%h = r%h
l%vert = r%vert
end subroutine pressure_assign
!
pure elemental subroutine tension_calc(this,p0,p1)
class(tension),intent(inout) :: this
class(particle),intent(in) :: p0,p1
real(REAL64) :: r1
!
this%f(:) = p1%q(:) - p0%q(:)
this%u = SQRT( DOT_PRODUCT( this%f, this%f ) )
r1 = 1D0 / this%u
this%f(:) = this%f(:) * r1
this%u = this%u - this%r
r1 = this%r * r1
if(r1>1D0)then
r1 = r1 * r1
r1 = r1 * r1
r1 = r1 * r1
r1 = r1 * r1
r1 = 1D0 - r1
else
r1 = 0D0
endif
!
this%u = this%u + this%core * r1
this%f(:) = this%u * this%f(:)
!
end subroutine tension_calc
!
pure elemental subroutine pressure_calc(this,p0,p1,p2,p3)
class(pressure),intent(inout) :: this
class(particle),intent(in) :: p0,p1,p2,p3
real(REAL64),parameter :: r24 = 1D0 / (6D0*4D0)
real(REAL64) :: a(3),b(3),s
!
! 3-2
! | |
! 0-1
!
this%u = this%h + this%h
this%u = this%u + this%u
!
a(:) = p0%q(:) + p1%q(:) + p2%q(:) + p3%q(:)
this%u = this%u - DOT_PRODUCT( this%vert(:), a(:) )
!
this%u = this%u * r24
!
if(this%u<0D0)then
this%u = 0D0 ; this%f(:) = 0D0 ; RETURN
endif
!
a(:) = p2%q(:) - p0%q(:)
b(:) = p1%q(:) - p0%q(:)
s = SQRT( 1D0 - DOT_PRODUCT(a,b)**2 / ( DOT_PRODUCT(a,a) * DOT_PRODUCT(b,b) ) )
b(:) = p3%q(:) - p0%q(:)
s = SQRT( 1D0 - DOT_PRODUCT(a,b)**2 / ( DOT_PRODUCT(a,a) * DOT_PRODUCT(b,b) ) ) + s
!
b(:) = p1%q(:) - p3%q(:)
!
this%f(1) = a(2) * b(3) - a(3) * b(2)
this%f(2) = a(3) * b(1) - a(1) * b(3)
this%f(3) = a(1) * b(2) - a(2) * b(1)
!
this%f(:) = this%f(:) / SQRT( DOT_PRODUCT(this%f(:),this%f(:)) )
!
this%u = this%rho * this%gra * this%u * s ! rho*g*h*s
this%f(:) = this%u * this%f(:)
!
end subroutine pressure_calc
!
end module mod_potential
|
#include <stdio.h>
#include <stdlib.h>
#include <gsl/gsl_rng.h>
/* gcc gsl_mersenne_twister.c -lgsl -lcblas */
int main(int argc, char **argv){
const gsl_rng_type * T;
gsl_rng * r;
int i, n_points;
double x;
T = gsl_rng_mt19937;
r = gsl_rng_alloc (T);
n_points = atoi(argv[1]);
for(i=0;i<n_points;i++){
x = gsl_rng_uniform(r);
fprintf(stdout, "%f\n", x);
}
return 0;
}
|
lemma topological_basis: "topological_basis B \<longleftrightarrow> (\<forall>x. open x \<longleftrightarrow> (\<exists>B'. B' \<subseteq> B \<and> \<Union>B' = x))"
|
function [M_mean,n]=meanfilt3(M,IND,k,v)
%Creating spherical mask environment with radius k
[kI,kJ,kK]=cart2im(k,k,k,v);
kI=round(kI); kJ=round(kJ); kK=round(kK);
[MASK_J,MASK_I,MASK_K]=meshgrid(-kJ:kJ,-kI:kI,-kK:kK);
% [MASK_X,MASK_Y,MASK_Z]=im2cart(MASK_I,MASK_J,MASK_K,v);
MASK_X=MASK_J.*v(2); MASK_Y=MASK_I.*v(1); MASK_Z=MASK_K.*v(3);
R=sqrt(MASK_X.^2 + MASK_Y.^2 + MASK_Z.^2);
Lv=R<=k;
MASK_I=MASK_I(Lv); MASK_J=MASK_J(Lv); MASK_K=MASK_K(Lv);
%Getting mask indices
[IND_mask]=maskfind(M,IND(:),MASK_I(:),MASK_J(:),MASK_K(:));
L_valid=IND_mask>0;
INT_valid=M(IND_mask(L_valid));
INT_mask=nan(size(IND_mask));
INT_mask(L_valid)=INT_valid;
%Calculating median, ignoring NaN's
M_mean=gnanmean(INT_mask,2);
%Calculating number of elements used in median calculation
n=sum(~isnan(INT_mask),2);
% %Creating spherical mask environment
% k=k+iseven(k);
% k_offset=round(k/2)-1;
% [MASK_J,MASK_I,MASK_K]=meshgrid(-k_offset:k_offset);
% R=sqrt(MASK_J.^2 + MASK_I.^2 + MASK_K.^2);
% Lv=R<=k_offset;
% MASK_I=MASK_I(Lv); MASK_J=MASK_J(Lv); MASK_K=MASK_K(Lv);
%
% %Getting mask indices
% [IND_mask]=maskfind(M,IND(:),MASK_I(:),MASK_J(:),MASK_K(:));
% L_valid=IND_mask>0;
% INT_valid=M(IND_mask(L_valid));
% INT_mask=nan(size(IND_mask));
% INT_mask(L_valid)=INT_valid;
%
% %Calculating median, ignoring NaN's
% M_median=nanmedian(INT_mask,2);
%
% %Calculating number of elements used in median calculation
% n=sum(~isnan(INT_mask),2);
end
%%
% _*GIBBON footer text*_
%
% License: <https://github.com/gibbonCode/GIBBON/blob/master/LICENSE>
%
% GIBBON: The Geometry and Image-based Bioengineering add-On. A toolbox for
% image segmentation, image-based modeling, meshing, and finite element
% analysis.
%
% Copyright (C) 2006-2022 Kevin Mattheus Moerman and the GIBBON contributors
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
|
/-
Copyright (c) 2023 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Kevin Buzzard
-/
import tactic
import combinatorics.simple_graph.acyclic -- trees and forests
/-
# Trees and forests
A *forest* is a graph with no cycles. A *tree* is a connected forest.
Here's how to do this in Lean. Let `G` be a graph with vertex set `V`.
-/
variables (V : Type) (G : simple_graph V)
-- Here's how to say "G is a forest"
example : Prop := G.is_acyclic
-- It's defined to mean "For all `v : V`, every walk from `v` to `v` is not a cycle. "
example : G.is_acyclic ↔ ∀ (v : V) (p : G.walk v v), ¬ p.is_cycle :=
begin
refl,
end
-- Here's how to say "G is a tree"
example : Prop := G.is_tree
example : G.is_tree ↔ G.connected ∧ G.is_acyclic:=
begin
exact G.is_tree_iff,
end
-- Here are some harder theorems from the library. Recall that a *path* is a walk
-- with no repeated vertices.
-- A graph is acyclic iff for all `v w : V`, there's at most one path from `v` to `w`.
example : G.is_acyclic ↔ ∀ (v w : V) (p q : G.path v w), p = q :=
simple_graph.is_acyclic_iff_path_unique
-- A graph is a tree iff `V` is nonempty and for all `v w : V`,
-- there's exactly one path from `v` to `w`.
example : G.is_tree ↔ nonempty V ∧ ∀ v w : V, ∃! (p : G.walk v w), p.is_path :=
simple_graph.is_tree_iff_exists_unique_path
-- If you want a logic puzzle, rephrase this in terms of `G.path`
example : G.is_tree ↔ nonempty V ∧ ∀ v w : V, ∃! (p : G.path v w), true :=
begin
rw simple_graph.is_tree_iff_exists_unique_path,
apply and_congr iff.rfl,
apply forall_congr, intro v,
apply forall_congr, intro w,
split,
{ rintro ⟨p,hp,hp2⟩,
refine ⟨⟨p, hp⟩, true.intro, _⟩,
rintro ⟨q, hq⟩ -,
ext,
exact hp2 _ hq, },
{ rintro ⟨⟨p, hp⟩, -, h2⟩,
refine ⟨p, hp, λ q hq, _⟩,
specialize h2 ⟨q, hq⟩ true.intro,
cases h2,
refl, },
end
/-
If you want a hard graph theory puzzle, prove that in a finite tree,
1 + the number of edges equals the number of vertices.
I don't think this is in the library and it would be a neat project.
Because induction on the size of `V` will be messy (it will involve
changing `V` and them moving between graphs on different types)
I think that the best way to do this would be to prove that for
an acyclic graph on a fixed `V`, #connected components + #edges = #vertices,
by induction on number of edges.
Note: the solution to this is not in the solutions!
-/
open_locale classical
example (V : Type) [fintype V] (G : simple_graph V) (hG : G.is_tree) :
1 + finset.card (G.edge_finset) = fintype.card V :=
sorry
|
[STATEMENT]
lemma bigstep_progress: "(c, s) \<Rightarrow> p \<Down> t \<Longrightarrow> p > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (c, s) \<Rightarrow> p \<Down> t \<Longrightarrow> 0 < p
[PROOF STEP]
apply(induct rule: big_step_t.induct, auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
!=============================================================================!
!
! These routines generate the factored LHS for the Navier-Stokes Equations
! using 2nd order differencing
!
!=============================================================================!
subroutine lhs1( mat, Ah, Dh, Vh11, bc, dtl, v)
!
! Form the LHS for the \xi direction
!
!=============================================================================!
use global
use buff_mod
implicit none
real :: mat(ny*nx,ndof,ndof,3), v(ny*nx,ndof)
real :: Ah(ny*nx,ndof,ndof), Dh(ny*nx,ndof,ndof), Vh11(ny*nx,6)
real :: bc(ny*ndof*ndof*14)
real :: dtl(ny*nx)
!.... first derivative operator
real :: a1, a2, a3
integer :: lrec, ier, istat
integer :: idof, jdof
!=============================================================================!
!.... second-order stencil
a1 = -alfa * pt5 / dxi
a2 = alfa * zero
a3 = alfa * pt5 / dxi
!.... \hat{A} term
do idof = 1, ndof
do jdof = 1, ndof
mat(:,idof,jdof,1) = a1 * dtl(:) * Ah(:,idof,jdof)
mat(:,idof,jdof,2) = a2 * dtl(:) * Ah(:,idof,jdof)
mat(:,idof,jdof,3) = a3 * dtl(:) * Ah(:,idof,jdof)
end do
end do
!.... \hat{D} term
do idof = 1, ndof
do jdof = 1, ndof
mat(:,idof,jdof,2) = mat(:,idof,jdof,2) + &
alfa * dtl(:) * Dh(:,idof,jdof)
end do
end do
!.... \hat{V}_{\xi\xi} term and I term
call lhs1l( mat, Vh11, dtl, buff )
!.... apply boundary treatment to the LHS
call lhsbt1( mat, Ah, Dh, Vh11, bc, dtl )
!.... apply boundary conditions to the LHS
call lhsbc1( mat, bc, v, vm )
return
end
!=============================================================================!
subroutine lhs1l( mat, Vh11, dtl, buff )
!
! Form the LHS for the \xi direction
!
!=============================================================================!
use global
implicit none
real :: mat(ny*nx,ndof,ndof,3), buff(ny*nx)
real :: dtl(ny*nx)
real :: Vh11(ny*nx,6)
!.... second derivative operator
real :: b1, b2, b3
real :: eps_i
!=============================================================================!
!.... second-order stencil
b1 = alfa / dxi**2
b2 = -alfa * two / dxi**2
b3 = alfa / dxi**2
!.... \hat{V}_{\xi\xi} term
mat(:,2,2,1) = mat(:,2,2,1) - b1 * dtl(:) * Vh11(:,1)
mat(:,2,3,1) = mat(:,2,3,1) - b1 * dtl(:) * Vh11(:,5)
mat(:,3,2,1) = mat(:,3,2,1) - b1 * dtl(:) * Vh11(:,6)
mat(:,3,3,1) = mat(:,3,3,1) - b1 * dtl(:) * Vh11(:,2)
mat(:,4,4,1) = mat(:,4,4,1) - b1 * dtl(:) * Vh11(:,3)
mat(:,5,5,1) = mat(:,5,5,1) - b1 * dtl(:) * Vh11(:,4)
mat(:,2,2,2) = mat(:,2,2,2) - b2 * dtl(:) * Vh11(:,1)
mat(:,2,3,2) = mat(:,2,3,2) - b2 * dtl(:) * Vh11(:,5)
mat(:,3,2,2) = mat(:,3,2,2) - b2 * dtl(:) * Vh11(:,6)
mat(:,3,3,2) = mat(:,3,3,2) - b2 * dtl(:) * Vh11(:,2)
mat(:,4,4,2) = mat(:,4,4,2) - b2 * dtl(:) * Vh11(:,3)
mat(:,5,5,2) = mat(:,5,5,2) - b2 * dtl(:) * Vh11(:,4)
mat(:,2,2,3) = mat(:,2,2,3) - b3 * dtl(:) * Vh11(:,1)
mat(:,2,3,3) = mat(:,2,3,3) - b3 * dtl(:) * Vh11(:,5)
mat(:,3,2,3) = mat(:,3,2,3) - b3 * dtl(:) * Vh11(:,6)
mat(:,3,3,3) = mat(:,3,3,3) - b3 * dtl(:) * Vh11(:,2)
mat(:,4,4,3) = mat(:,4,4,3) - b3 * dtl(:) * Vh11(:,3)
mat(:,5,5,3) = mat(:,5,5,3) - b3 * dtl(:) * Vh11(:,4)
!.... I term
mat(:,1,1,2) = mat(:,1,1,2) + one
mat(:,2,2,2) = mat(:,2,2,2) + one
mat(:,3,3,2) = mat(:,3,3,2) + one
mat(:,4,4,2) = mat(:,4,4,2) + one
mat(:,5,5,2) = mat(:,5,5,2) + one
!.... implicit damping term
if (eps_e .ne. zero) then
eps_i = four * eps_e
b1 = -alfa * eps_i
b2 = alfa * two * eps_i
b3 = -alfa * eps_i
mat(:,1,1,1) = mat(:,1,1,1) + b1 * dtl(:) * buff
mat(:,2,2,1) = mat(:,2,2,1) + b1 * dtl(:) * buff
mat(:,3,3,1) = mat(:,3,3,1) + b1 * dtl(:) * buff
mat(:,4,4,1) = mat(:,4,4,1) + b1 * dtl(:) * buff
mat(:,5,5,1) = mat(:,5,5,1) + b1 * dtl(:) * buff
mat(:,1,1,2) = mat(:,1,1,2) + b2 * dtl(:) * buff
mat(:,2,2,2) = mat(:,2,2,2) + b2 * dtl(:) * buff
mat(:,3,3,2) = mat(:,3,3,2) + b2 * dtl(:) * buff
mat(:,4,4,2) = mat(:,4,4,2) + b2 * dtl(:) * buff
mat(:,5,5,2) = mat(:,5,5,2) + b2 * dtl(:) * buff
mat(:,1,1,3) = mat(:,1,1,3) + b3 * dtl(:) * buff
mat(:,2,2,3) = mat(:,2,2,3) + b3 * dtl(:) * buff
mat(:,3,3,3) = mat(:,3,3,3) + b3 * dtl(:) * buff
mat(:,4,4,3) = mat(:,4,4,3) + b3 * dtl(:) * buff
mat(:,5,5,3) = mat(:,5,5,3) + b3 * dtl(:) * buff
end if
return
end
!=============================================================================!
subroutine lhs2( mat, Bh, Dh, Vh22, bc, dtl, v)
!
! Form the LHS for the \eta direction
!
!=============================================================================!
use global
use buff_mod
implicit none
real :: mat(ny*nx,ndof,ndof,3), v(ny*nx,ndof)
real :: Bh(ny*nx,ndof,ndof), Dh(ny*nx,ndof,ndof), Vh22(ny*nx,6)
real :: bc(nx*ndof*ndof*14)
real :: dtl(ny*nx)
!.... first derivative operator
real :: a1, a2, a3
integer :: lrec, ier, istat
integer :: idof, jdof
!=============================================================================!
!.... second-order stencil
a1 = -alfa * pt5 / deta
a2 = alfa * zero
a3 = alfa * pt5 / deta
!.... \hat{B} term
do idof = 1, ndof
do jdof = 1, ndof
mat(:,idof,jdof,1) = a1 * dtl(:) * Bh(:,idof,jdof)
mat(:,idof,jdof,2) = a2 * dtl(:) * Bh(:,idof,jdof)
mat(:,idof,jdof,3) = a3 * dtl(:) * Bh(:,idof,jdof)
end do
end do
!.... \hat{V}_{\eta\eta} and I terms
call lhs2l( mat, Vh22, dtl, buff )
!.... apply boundary treatment to the LHS
call lhsbt2( mat, Bh, Dh, Vh22, bc, spg, spg2, dtl )
!.... apply boundary conditions to the LHS
call lhsbc2( mat, bc, v )
return
end
!=============================================================================!
subroutine lhs2l( mat, Vh22, dtl, buff )
!
! Form the LHS for the \eta direction
!
!=============================================================================!
use global
implicit none
real :: mat(ny*nx,ndof,ndof,3), buff(ny*nx)
real :: dtl(ny*nx)
real :: Vh22(ny*nx,6)
!.... second derivative operator
real :: b1, b2, b3
integer :: lrec, ier, istat
real :: eps_i
!=============================================================================!
!.... second-order stencil
b1 = alfa / deta**2
b2 = -alfa * two / deta**2
b3 = alfa / deta**2
!.... \hat{V}_{\eta\eta} term
mat(:,2,2,1) = mat(:,2,2,1) - b1 * dtl(:) * Vh22(:,1)
mat(:,2,3,1) = mat(:,2,3,1) - b1 * dtl(:) * Vh22(:,5)
mat(:,3,2,1) = mat(:,3,2,1) - b1 * dtl(:) * Vh22(:,6)
mat(:,3,3,1) = mat(:,3,3,1) - b1 * dtl(:) * Vh22(:,2)
mat(:,4,4,1) = mat(:,4,4,1) - b1 * dtl(:) * Vh22(:,3)
mat(:,5,5,1) = mat(:,5,5,1) - b1 * dtl(:) * Vh22(:,4)
mat(:,2,2,2) = mat(:,2,2,2) - b2 * dtl(:) * Vh22(:,1)
mat(:,2,3,2) = mat(:,2,3,2) - b2 * dtl(:) * Vh22(:,5)
mat(:,3,2,2) = mat(:,3,2,2) - b2 * dtl(:) * Vh22(:,6)
mat(:,3,3,2) = mat(:,3,3,2) - b2 * dtl(:) * Vh22(:,2)
mat(:,4,4,2) = mat(:,4,4,2) - b2 * dtl(:) * Vh22(:,3)
mat(:,5,5,2) = mat(:,5,5,2) - b2 * dtl(:) * Vh22(:,4)
mat(:,2,2,3) = mat(:,2,2,3) - b3 * dtl(:) * Vh22(:,1)
mat(:,2,3,3) = mat(:,2,3,3) - b3 * dtl(:) * Vh22(:,5)
mat(:,3,2,3) = mat(:,3,2,3) - b3 * dtl(:) * Vh22(:,6)
mat(:,3,3,3) = mat(:,3,3,3) - b3 * dtl(:) * Vh22(:,2)
mat(:,4,4,3) = mat(:,4,4,3) - b3 * dtl(:) * Vh22(:,3)
mat(:,5,5,3) = mat(:,5,5,3) - b3 * dtl(:) * Vh22(:,4)
!.... I term
mat(:,1,1,2) = mat(:,1,1,2) + one
mat(:,2,2,2) = mat(:,2,2,2) + one
mat(:,3,3,2) = mat(:,3,3,2) + one
mat(:,4,4,2) = mat(:,4,4,2) + one
mat(:,5,5,2) = mat(:,5,5,2) + one
!.... implicit damping term
if (eps_e .ne. zero) then
eps_i = four * eps_e
b1 = -alfa * eps_i
b2 = alfa * two * eps_i
b3 = -alfa * eps_i
mat(:,1,1,1) = mat(:,1,1,1) + b1 * dtl(:) * buff
mat(:,2,2,1) = mat(:,2,2,1) + b1 * dtl(:) * buff
mat(:,3,3,1) = mat(:,3,3,1) + b1 * dtl(:) * buff
mat(:,4,4,1) = mat(:,4,4,1) + b1 * dtl(:) * buff
mat(:,5,5,1) = mat(:,5,5,1) + b1 * dtl(:) * eps_i * buff
mat(:,1,1,2) = mat(:,1,1,2) + b2 * dtl(:) * buff
mat(:,2,2,2) = mat(:,2,2,2) + b2 * dtl(:) * buff
mat(:,3,3,2) = mat(:,3,3,2) + b2 * dtl(:) * buff
mat(:,4,4,2) = mat(:,4,4,2) + b2 * dtl(:) * buff
mat(:,5,5,2) = mat(:,5,5,2) + b2 * dtl(:) * buff
mat(:,1,1,3) = mat(:,1,1,3) + b3 * dtl(:) * buff
mat(:,2,2,3) = mat(:,2,2,3) + b3 * dtl(:) * buff
mat(:,3,3,3) = mat(:,3,3,3) + b3 * dtl(:) * buff
mat(:,4,4,3) = mat(:,4,4,3) + b3 * dtl(:) * buff
mat(:,5,5,3) = mat(:,5,5,3) + b3 * dtl(:) * buff
end if
!.... sponge
if (ispg .eq. 1) then
mat(:,1,1,2) = mat(:,1,1,2) + alfa * dtl(:) * spg(:)
mat(:,2,2,2) = mat(:,2,2,2) + alfa * dtl(:) * spg(:)
mat(:,3,3,2) = mat(:,3,3,2) + alfa * dtl(:) * spg(:)
mat(:,4,4,2) = mat(:,4,4,2) + alfa * dtl(:) * spg(:)
mat(:,5,5,2) = mat(:,5,5,2) + alfa * dtl(:) * spg(:)
else if (ispg .ge. 2) then
mat(:,1,1,2) = mat(:,1,1,2) + alfa * dtl(:) * ( spg(:) + spg2(:) )
mat(:,2,2,2) = mat(:,2,2,2) + alfa * dtl(:) * ( spg(:) + spg2(:) )
mat(:,3,3,2) = mat(:,3,3,2) + alfa * dtl(:) * ( spg(:) + spg2(:) )
mat(:,4,4,2) = mat(:,4,4,2) + alfa * dtl(:) * ( spg(:) + spg2(:) )
mat(:,5,5,2) = mat(:,5,5,2) + alfa * dtl(:) * ( spg(:) + spg2(:) )
end if
return
end
|
section \<open>Challenge 1.A\<close>
theory Challenge1A
imports Main
begin
text \<open>Problem definition:
\<^url>\<open>https://ethz.ch/content/dam/ethz/special-interest/infk/chair-program-method/pm/documents/Verify%20This/Challenges%202019/ghc_sort.pdf\<close>\<close>
subsection \<open>Implementation\<close>
text \<open>We phrase the algorithm as a functional program.
Instead of a list of indexes for segment boundaries,
we return a list of lists, containing the segments.\<close>
text \<open>We start with auxiliary functions to take the longest
increasing/decreasing sequence from the start of the list
\<close>
fun take_incr :: "int list \<Rightarrow> _" where
"take_incr [] = []"
| "take_incr [x] = [x]"
| "take_incr (x#y#xs) = (if x<y then x#take_incr (y#xs) else [x])"
fun take_decr :: "int list \<Rightarrow> _" where
"take_decr [] = []"
| "take_decr [x] = [x]"
| "take_decr (x#y#xs) = (if x\<ge>y then x#take_decr (y#xs) else [x])"
fun take where
"take [] = []"
| "take [x] = [x]"
| "take (x#y#xs) = (if x<y then take_incr (x#y#xs) else take_decr (x#y#xs))"
definition "take2 xs \<equiv> let l=take xs in (l,drop (length l) xs)"
\<comment> \<open>Splits of a longest increasing/decreasing sequence from the list\<close>
text \<open>The main algorithm then iterates until the whole input list is split\<close>
function cuts where
"cuts xs = (if xs=[] then [] else let (c,xs) = take2 xs in c#cuts xs)"
by pat_completeness auto
subsection \<open>Termination\<close>
text \<open>First, we show termination. This will give us induction and proper unfolding lemmas.\<close>
lemma take_non_empty:
"take xs \<noteq> []" if "xs \<noteq> []"
using that
apply (cases xs)
apply clarsimp
subgoal for x ys
apply (cases ys)
apply auto
done
done
termination
apply (relation "measure length")
apply (auto simp: take2_def Let_def)
using take_non_empty
apply auto
done
declare cuts.simps[simp del]
subsection \<open>Correctness\<close>
subsubsection \<open>Property 1: The Exact Sequence is Covered\<close>
lemma tdconc: "\<exists>ys. xs = take_decr xs @ ys"
apply (induction xs rule: take_decr.induct)
apply auto
done
lemma ticonc: "\<exists>ys. xs = take_incr xs @ ys"
apply (induction xs rule: take_incr.induct)
apply auto
done
lemma take_conc: "\<exists>ys. xs = take xs@ys"
using tdconc ticonc
apply (cases xs rule: take.cases)
by auto
theorem concat_cuts: "concat (cuts xs) = xs"
apply (induction xs rule: cuts.induct)
apply (subst cuts.simps)
apply (auto simp: take2_def Let_def)
by (metis append_eq_conv_conj take_conc)
subsubsection \<open>Property 2: Monotonicity\<close>
text \<open>We define constants to specify increasing/decreasing sequences.\<close>
fun incr where
"incr [] \<longleftrightarrow> True"
| "incr [_] \<longleftrightarrow> True"
| "incr (x#y#xs) \<longleftrightarrow> x<y \<and> incr (y#xs)"
fun decr where
"decr [] \<longleftrightarrow> True"
| "decr [_] \<longleftrightarrow> True"
| "decr (x#y#xs) \<longleftrightarrow> x\<ge>y \<and> decr (y#xs)"
lemma tki: "incr (take_incr xs)"
apply (induction xs rule: take_incr.induct)
apply auto
apply (case_tac xs)
apply auto
done
lemma tkd: "decr (take_decr xs)"
apply (induction xs rule: take_decr.induct)
apply auto
apply (case_tac xs)
apply auto
done
lemma icod: "incr (take xs) \<or> decr (take xs)"
apply (cases xs rule: take.cases)
apply (auto simp: tki tkd simp del: take_incr.simps take_decr.simps)
done
theorem cuts_incr_decr: "\<forall>c\<in>set (cuts xs). incr c \<or> decr c"
apply (induction xs rule: cuts.induct)
apply (subst cuts.simps)
apply (auto simp: take2_def Let_def)
using icod by blast
subsubsection \<open>Property 3: Maximality\<close>
text \<open>Specification of a cut that consists of maximal segments:
The segements are non-empty, and for every two neighbouring segments,
the first value of the last segment cannot be used to continue the first segment:
\<close>
fun maxi where
"maxi [] \<longleftrightarrow> True"
| "maxi [c] \<longleftrightarrow> c\<noteq>[]"
| "maxi (c1#c2#cs) \<longleftrightarrow> (c1\<noteq>[] \<and> c2\<noteq>[] \<and> maxi (c2#cs) \<and> (
incr c1 \<and> \<not>(last c1 < hd c2)
\<or> decr c1 \<and> \<not>(last c1 \<ge> hd c2)
))"
text \<open>Obviously, our specification implies that there are no
empty segments\<close>
lemma maxi_imp_non_empty: "maxi xs \<Longrightarrow> []\<notin>set xs"
by (induction xs rule: maxi.induct) auto
lemma tdconc': "xs\<noteq>[] \<Longrightarrow>
\<exists>ys. xs = take_decr xs @ ys \<and> (ys\<noteq>[]
\<longrightarrow> \<not>(last (take_decr xs) \<ge> hd ys))"
apply (induction xs rule: take_decr.induct)
apply auto
apply (case_tac xs) apply (auto split: if_splits)
done
lemma ticonc': "xs\<noteq>[] \<Longrightarrow> \<exists>ys. xs = take_incr xs @ ys \<and> (ys\<noteq>[] \<longrightarrow> \<not>(last (take_incr xs) < hd ys))"
apply (induction xs rule: take_incr.induct)
apply auto
apply (case_tac xs) apply (auto split: if_splits)
done
lemma take_conc': "xs\<noteq>[] \<Longrightarrow> \<exists>ys. xs = take xs@ys \<and> (ys\<noteq>[] \<longrightarrow> (
take xs=take_incr xs \<and> \<not>(last (take_incr xs) < hd ys)
\<or> take xs=take_decr xs \<and> \<not>(last (take_decr xs) \<ge> hd ys)
))"
using tdconc' ticonc'
apply (cases xs rule: take.cases)
by auto
lemma take_decr_non_empty:
"take_decr xs \<noteq> []" if "xs \<noteq> []"
using that
apply (cases xs)
apply auto
subgoal for x ys
apply (cases ys)
apply (auto split: if_split_asm)
done
done
lemma take_incr_non_empty:
"take_incr xs \<noteq> []" if "xs \<noteq> []"
using that
apply (cases xs)
apply auto
subgoal for x ys
apply (cases ys)
apply (auto split: if_split_asm)
done
done
lemma take_conc'': "xs\<noteq>[] \<Longrightarrow> \<exists>ys. xs = take xs@ys \<and> (ys\<noteq>[] \<longrightarrow> (
incr (take xs) \<and> \<not>(last (take xs) < hd ys)
\<or> decr (take xs) \<and> \<not>(last (take xs) \<ge> hd ys)
))"
using tdconc' ticonc' tki tkd
apply (cases xs rule: take.cases)
apply auto
apply (auto simp add: take_incr_non_empty)
apply (simp add: take_decr_non_empty)
apply (metis list.distinct(1) take_incr.simps(3))
by (smt list.simps(3) take_decr.simps(3))
lemma inv_cuts: "cuts xs = c#cs \<Longrightarrow> \<exists>ys. c=take xs \<and> xs=c@ys \<and> cs = cuts ys"
apply (subst (asm) cuts.simps)
apply (cases xs rule: cuts.cases)
apply (auto split: if_splits simp: take2_def Let_def)
by (metis append_eq_conv_conj take_conc)
theorem maximal_cuts: "maxi (cuts xs)"
apply (induction "cuts xs" arbitrary: xs rule: maxi.induct)
subgoal by auto
subgoal for c xs
apply (drule sym; simp)
apply (subst (asm) cuts.simps)
apply (auto split: if_splits prod.splits simp: take2_def Let_def take_non_empty)
done
subgoal for c1 c2 cs xs
apply (drule sym)
apply simp
apply (drule inv_cuts; clarsimp)
apply auto
subgoal by (metis cuts.simps list.distinct(1) take_non_empty)
subgoal by (metis append.left_neutral inv_cuts not_Cons_self)
subgoal using icod by blast
subgoal by (metis
Nil_is_append_conv cuts.simps hd_append2 inv_cuts list.distinct(1)
same_append_eq take_conc'' take_non_empty)
subgoal by (metis
append_is_Nil_conv cuts.simps hd_append2 inv_cuts list.distinct(1)
same_append_eq take_conc'' take_non_empty)
done
done
subsubsection \<open>Equivalent Formulation Over Indexes\<close>
text \<open>After the competition, we got the comment that a specification of
monotonic sequences via indexes might be more readable.
We show that our functional specification is equivalent to a
specification over indexes.\<close>
fun ii_induction where
"ii_induction [] = ()"
| "ii_induction [_] = ()"
| "ii_induction (_#y#xs) = ii_induction (y#xs)"
locale cnvSpec =
fixes fP P
assumes [simp]: "fP [] \<longleftrightarrow> True"
assumes [simp]: "fP [x] \<longleftrightarrow> True"
assumes [simp]: "fP (a#b#xs) \<longleftrightarrow> P a b \<and> fP (b#xs)"
begin
lemma idx_spec: "fP xs \<longleftrightarrow> (\<forall>i<length xs - 1. P (xs!i) (xs!Suc i))"
apply (induction xs rule: ii_induction.induct)
using less_Suc_eq_0_disj
by auto
end
locale cnvSpec' =
fixes fP P P'
assumes [simp]: "fP [] \<longleftrightarrow> True"
assumes [simp]: "fP [x] \<longleftrightarrow> P' x"
assumes [simp]: "fP (a#b#xs) \<longleftrightarrow> P' a \<and> P' b \<and> P a b \<and> fP (b#xs)"
begin
lemma idx_spec: "fP xs \<longleftrightarrow> (\<forall>i<length xs. P' (xs!i)) \<and> (\<forall>i<length xs - 1. P (xs!i) (xs!Suc i))"
apply (induction xs rule: ii_induction.induct)
apply auto []
apply auto []
apply clarsimp
by (smt less_Suc_eq_0_disj nth_Cons_0 nth_Cons_Suc)
end
interpretation INCR: cnvSpec incr "(<)"
apply unfold_locales by auto
interpretation DECR: cnvSpec decr "(\<ge>)"
apply unfold_locales by auto
interpretation MAXI: cnvSpec' maxi "\<lambda>c1 c2. ( (
incr c1 \<and> \<not>(last c1 < hd c2)
\<or> decr c1 \<and> \<not>(last c1 \<ge> hd c2)
))"
"\<lambda>x. x \<noteq> []"
apply unfold_locales by auto
lemma incr_by_idx: "incr xs = (\<forall>i<length xs - 1. xs ! i < xs ! Suc i)"
by (rule INCR.idx_spec)
lemma decr_by_idx: "decr xs = (\<forall>i<length xs - 1. xs ! i \<ge> xs ! Suc i)"
by (rule DECR.idx_spec)
lemma maxi_by_idx: "maxi xs \<longleftrightarrow>
(\<forall>i<length xs. xs ! i \<noteq> []) \<and>
(\<forall>i<length xs - 1.
incr (xs ! i) \<and> \<not> last (xs ! i) < hd (xs ! Suc i)
\<or> decr (xs ! i) \<and> \<not> hd (xs ! Suc i) \<le> last (xs ! i)
)"
by (rule MAXI.idx_spec)
theorem all_correct:
"concat (cuts xs) = xs"
"\<forall>c\<in>set (cuts xs). incr c \<or> decr c"
"maxi (cuts xs)"
"[] \<notin> set (cuts xs)"
using cuts_incr_decr concat_cuts maximal_cuts
maxi_imp_non_empty[OF maximal_cuts]
by auto
end
|
If $x$ is a real number, then $\sqrt{x}$ is a real number.
|
Formal statement is: lemma sigma_sets_sigma_sets_eq: "M \<subseteq> Pow S \<Longrightarrow> sigma_sets S (sigma_sets S M) = sigma_sets S M" Informal statement is: If $M$ is a subset of the power set of $S$, then the $\sigma$-algebra generated by $M$ is equal to the $\sigma$-algebra generated by the $\sigma$-algebra generated by $M$.
|
from abc import ABC, abstractmethod
import numpy as np
from scipy.interpolate import CubicSpline
from astropy.units import Quantity
from astropy.nddata import StdDevUncertainty, VarianceUncertainty, InverseVariance
from ..spectra import Spectrum1D
__all__ = ['ResamplerBase', 'FluxConservingResampler',
'LinearInterpolatedResampler', 'SplineInterpolatedResampler']
class ResamplerBase(ABC):
"""
Base class for resample classes. The algorithms and needs for difference
resamples will vary quite a bit, so this class is relatively sparse.
Init paramtere here is not yet hooked up to the rest of the code, but to
show how we will want to use it in the future.
"""
def __init__(self, bin_edges='nan_fill'):
self.bin_edges = bin_edges
@abstractmethod
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return NotImplemented
@abstractmethod
def resample1d(self, orig_spectrum, fin_lamb):
"""
Workhorse method that will return the resampled Spectrum1D
object.
"""
return NotImplemented
@staticmethod
def _calc_bin_edges(x):
"""
Calculate the bin edge values of an input dispersion axis. Input values
are assumed to be the center of the bins.
todo: this should live in the main spectrum object, but we're still
figuring out the details to that implementation, so leaving here
for now.
Parameters
----------
x : ndarray
The input dispersion axis values.
Returns
-------
edges : ndarray
Calcualated bin edges, including left and right most bin edges.
"""
inside_edges = (x[1:] + x[:-1]) / 2
edges = np.insert(inside_edges, 0, 2 * x[0] - inside_edges[0])
edges = np.append(edges, 2 * x[-1] - inside_edges[-1])
return edges
class FluxConservingResampler(ResamplerBase):
"""
This resampling algorithm conserves overall integrated flux (as opposed to
flux density).
Algorithm based on the equations documented in the following paper:
https://ui.adsabs.harvard.edu/abs/2017arXiv170505165C/abstract
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
a flux conserving algorithm:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import FluxConservingResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = np.array([1, 5, 9, 13, 14, 17, 21, 22, 23])
>>> fluxc_resample = FluxConservingResampler()
>>> output_spectrum1D = fluxc_resample(input_spectra, resample_grid) # doctest: +IGNORE_OUTPUT
"""
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_lamb)
def _resample_matrix(self, orig_lamb, fin_lamb):
"""
Create a re-sampling matrix to be used in re-sampling spectra in a way
that conserves flux. This code was heavily influenced by Nick Earl's
resample rough draft: nmearl@0ff6ef1.
Parameters
----------
orig_lamb : ndarray
The original dispersion array.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_mat : ndarray
An [[N_{fin_lamb}, M_{orig_lamb}]] matrix.
"""
# Lower bin and upper bin edges
orig_edges = self._calc_bin_edges(orig_lamb)
fin_edges = self._calc_bin_edges(fin_lamb)
# I could get rid of these alias variables,
# but it does add readability
orig_low = orig_edges[:-1]
fin_low = fin_edges[:-1]
orig_upp = orig_edges[1:]
fin_upp = fin_edges[1:]
# Here's the real work in figuring out the bin overlaps
# i.e., contribution of each original bin to the resampled bin
l_inf = np.where(orig_low > fin_low[:, np.newaxis],
orig_low, fin_low[:, np.newaxis])
l_sup = np.where(orig_upp < fin_upp[:, np.newaxis],
orig_upp, fin_upp[:, np.newaxis])
resamp_mat = (l_sup - l_inf).clip(0)
resamp_mat *= (orig_upp - orig_low)
# set bins that don't overlap 100% with original bins
# to zero by checking edges, and applying generated mask
left_clip = np.where(fin_edges[:-1] - orig_edges[0] < 0, 0, 1)
right_clip = np.where(orig_edges[-1] - fin_edges[1:] < 0, 0, 1)
keep_overlapping_matrix = left_clip * right_clip
resamp_mat *= keep_overlapping_matrix[:, np.newaxis]
return resamp_mat
def resample1d(self, orig_spectrum, fin_lamb):
"""
Create a re-sampling matrix to be used in re-sampling spectra in a way
that conserves flux. If an uncertainty is present in the input spectra
it will be propagated through to the final resampled output spectra
as an InverseVariance uncertainty.
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
# Check if units on original spectrum and new wavelength (if defined)
# match
if isinstance(fin_lamb, Quantity):
if orig_spectrum.spectral_axis_unit != fin_lamb.unit:
return ValueError("Original spectrum dispersion grid and new"
"dispersion grid must have the same units.")
# todo: Would be good to return uncertainty in type it was provided?
# todo: add in weighting options
# Get provided uncertainty into variance
if orig_spectrum.uncertainty is not None:
if isinstance(orig_spectrum.uncertainty, StdDevUncertainty):
pixel_uncer = np.square(orig_spectrum.uncertainty.array)
elif isinstance(orig_spectrum.uncertainty, VarianceUncertainty):
pixel_uncer = orig_spectrum.uncertainty.array
elif isinstance(orig_spectrum.uncertainty, InverseVariance):
pixel_uncer = np.reciprocal(orig_spectrum.uncertainty.array)
else:
pixel_uncer = None
# todo: Current code doesn't like the inputs being quantity objects, may
# want to look into this more in the future
resample_grid = self._resample_matrix(np.array(orig_spectrum.spectral_axis),
np.array(fin_lamb))
# Now for some broadcasting magic to handle multi dimensional flux inputs
# Essentially this part is inserting length one dimensions as fillers
# For example, if we have a (5,6,10) input flux, and an output grid
# of 3, flux will be broadcast to (5,6,1,10) and resample_grid will
# Be broadcast to (1,1,3,10). The sum then reduces down the 10, the
# original dispersion grid, leaving 3, the new dispersion grid, as
# the last index.
new_flux_shape = list(orig_spectrum.flux.shape)
new_flux_shape.insert(-1, 1)
in_flux = orig_spectrum.flux.reshape(new_flux_shape)
ones = [1] * len(orig_spectrum.flux.shape[:-1])
new_shape_resample_grid = ones + list(resample_grid.shape)
resample_grid = resample_grid.reshape(new_shape_resample_grid)
# Calculate final flux
out_flux = np.sum(in_flux * resample_grid, axis=-1) / np.sum(
resample_grid, axis=-1)
# Calculate output uncertainty
if pixel_uncer is not None:
pixel_uncer = pixel_uncer.reshape(new_flux_shape)
out_variance = np.sum(pixel_uncer * resample_grid**2, axis=-1) / np.sum(
resample_grid**2, axis=-1)
out_uncertainty = InverseVariance(np.reciprocal(out_variance))
else:
out_uncertainty = None
# todo: for now, use the units from the pre-resampled
# spectra, although if a unit is defined for fin_lamb and it doesn't
# match the input spectrum it won't work right, will have to think
# more about how to handle that... could convert before and after
# calculation, which is probably easiest. Matrix math algorithm is
# geometry based, so won't work to just let quantity math handle it.
resampled_spectrum = Spectrum1D(flux=out_flux,
spectral_axis=np.array(fin_lamb) * orig_spectrum.spectral_axis_unit,
uncertainty=out_uncertainty)
return resampled_spectrum
class LinearInterpolatedResampler(ResamplerBase):
"""
Resample a spectrum onto a new ``spectral_axis`` using linear interpolation.
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
linear interpolation:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import LinearInterpolatedResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = np.array([1, 5, 9, 13, 14, 17, 21, 22, 23])
>>> fluxc_resample = LinearInterpolatedResampler()
>>> output_spectrum1D = fluxc_resample(input_spectra, resample_grid) # doctest: +IGNORE_OUTPUT
"""
def __init__(self, bin_edges='nan_fill'):
super().__init__(bin_edges)
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_lamb)
def _interpolation(self, orig_dispersion, flux, fin_lamb):
"""
Use specified interpolation to calculated resampled
flux.
Parameters
----------
orig_dispersion : ndarray
The original dispersion array.
flux: ndarray
The flux array from the input Spectrum1D
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_flux : ndarray
The resampled flux array generated from the interpolation.
"""
return np.interp(fin_lamb, orig_dispersion, flux, left=np.nan, right=np.nan)
def resample1d(self, orig_spectrum, fin_lamb):
"""
Call interpolation, repackage new spectra
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
if orig_spectrum.uncertainty is not None:
warn("Linear interpolation currently does not propogate uncertainties")
out_flux = self._interpolation(orig_spectrum.spectral_axis, orig_spectrum.flux,
fin_lamb)
# todo: for now, use the units from the pre-resampled
# spectra, although if a unit is defined for fin_lamb and it doesn't
# match the input spectrum it won't work right, will have to think
# more about how to handle that... could convert before and after
# calculation, which is probably easiest. Matrix math algorithm is
# geometry based, so won't work to just let quantity math handle it.
# todo: handle uncertainties for interpolated cases.
resampled_spectrum = Spectrum1D(flux=out_flux * orig_spectrum.flux.unit,
spectral_axis=np.array(fin_lamb) * orig_spectrum.spectral_axis_unit)
return resampled_spectrum
class SplineInterpolatedResampler(ResamplerBase):
"""
This resample algorithim uses a cubic spline interpolator. In the future
this can be expanded to use splines of different degrees.
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
a cubic spline interpolator:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import SplineInterpolatedResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = np.array([1, 5, 9, 13, 14, 17, 21, 22, 23])
>>> fluxc_resample = SplineInterpolatedResampler()
>>> output_spectrum1D = fluxc_resample(input_spectra, resample_grid) # doctest: +IGNORE_OUTPUT
"""
def __init__(self, bin_edges='nan_fill'):
super().__init__(bin_edges)
def __call__(self, orig_spectrum, fin_lamb):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_lamb)
def _interpolation(self, orig_dispersion, flux, fin_lamb):
"""
Use specified interpolation to calculated resampled
flux.
Parameters
----------
orig_dispersion : ndarray
The original dispersion array.
flux: ndarray
The flux array from the input Spectrum1D
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_flux : ndarray
The resampled flux array generated from the interpolation.
"""
cubic_spline = CubicSpline(orig_dispersion, flux, extrapolate=False)
return cubic_spline(fin_lamb)
def resample1d(self, orig_spectrum, fin_lamb):
"""
Call interpolation, repackage new spectra
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_lamb : ndarray
The desired dispersion array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
out_flux = self._interpolation(orig_spectrum.spectral_axis, orig_spectrum.flux,
fin_lamb)
# todo: for now, use the units from the pre-resampled
# spectra, although if a unit is defined for fin_lamb and it doesn't
# match the input spectrum it won't work right, will have to think
# more about how to handle that... could convert before and after
# calculation, which is probably easiest. Matrix math algorithm is
# geometry based, so won't work to just let quantity math handle it.
# todo: handle uncertainties for interpolated cases.
resampled_spectrum = Spectrum1D(flux=out_flux * orig_spectrum.flux.unit,
spectral_axis=np.array(fin_lamb) * orig_spectrum.spectral_axis_unit)
return resampled_spectrum
|
/*
* Copyright Andrey Semashev 2007 - 2013.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*/
/*!
* \file default_sink.hpp
* \author Andrey Semashev
* \date 08.01.2012
*
* \brief This header is the Boost.Log library implementation, see the library documentation
* at http://www.boost.org/doc/libs/release/libs/log/doc/html/index.html.
*/
#ifndef BOOST_LOG_DEFAULT_SINK_HPP_INCLUDED_
#define BOOST_LOG_DEFAULT_SINK_HPP_INCLUDED_
#include <boost/log/detail/config.hpp>
#include <boost/log/sinks/sink.hpp>
#include <boost/log/attributes/attribute_name.hpp>
#include <boost/log/attributes/value_extraction.hpp>
#include <boost/log/attributes/value_visitation.hpp>
#include <boost/log/attributes/fallback_policy.hpp>
#include <boost/log/expressions/message.hpp>
#include <boost/log/trivial.hpp>
#if !defined(BOOST_LOG_NO_THREADS)
#include <boost/thread/mutex.hpp>
#endif
#include <boost/log/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
BOOST_LOG_OPEN_NAMESPACE
namespace sinks {
namespace aux {
//! The default sink to be used when no sinks are registered in the logging core
class default_sink :
public sink
{
private:
#if !defined(BOOST_LOG_NO_THREADS)
typedef mutex mutex_type;
mutex_type m_mutex;
#endif
attribute_name const m_severity_name, m_message_name;
value_extractor< boost::log::trivial::severity_level, fallback_to_default< boost::log::trivial::severity_level > > const m_severity_extractor;
value_visitor_invoker< expressions::tag::message::value_type > m_message_visitor;
public:
default_sink();
~default_sink();
bool will_consume(attribute_value_set const&);
void consume(record_view const& rec);
void flush();
};
} // namespace aux
} // namespace sinks
BOOST_LOG_CLOSE_NAMESPACE // namespace log
} // namespace boost
#include <boost/log/detail/footer.hpp>
#endif // BOOST_LOG_DEFAULT_SINK_HPP_INCLUDED_
|
See additional information on the Chief PAC710-G TAA Compliant Adjustable Accessory Shelf below. Certified mounting experts are here to help. Call 1-800-807-1477.
|
import .love01_definitions_and_statements_demo
/- # LoVe Demo 3: Forward Proofs
When developing a proof, often it makes sense to work __forward__: to start with
what we already know and proceed step by step towards our goal. Lean's
structured proofs and raw proof terms are two style that support forward
reasoning. -/
set_option pp.beta true
set_option pp.generalized_field_notation false
namespace LoVe
namespace forward_proofs
/- ## Structured Constructs
Structured proofs are syntactic sugar sprinkled on top of Lean's
__proof terms__.
The simplest kind of structured proof is the name of a lemma, possibly with
arguments. -/
lemma add_comm (m n : ℕ) :
add m n = add n m :=
sorry
lemma add_comm_zero_left (n : ℕ) :
add 0 n = add n 0 :=
add_comm 0 n
lemma add_comm_zero_left₂ (n : ℕ) :
add 0 n = add n 0 :=
by exact add_comm 0 n
/- `fix` and `assume` move `∀`-quantified variables and assumptions from the
goal into the local context. They can be seen as structured versions of the
`intros` tactic.
`show` repeats the goal to prove. It is useful as documentation or to rephrase
the goal (up to computation). -/
lemma fst_of_two_props :
∀a b : Prop, a → b → a :=
fix a b : Prop,
assume ha : a,
assume hb : b,
show a, from
ha
lemma fst_of_two_props₂ (a b : Prop) (ha : a) (hb : b) :
a :=
show a, from
begin
exact ha
end
lemma fst_of_two_props₃ (a b : Prop) (ha : a) (hb : b) :
a :=
ha
/- `have` proves an intermediate lemma, which can refer to the local context. -/
lemma prop_comp (a b c : Prop) (hab : a → b) (hbc : b → c) :
a → c :=
assume ha : a,
have hb : b :=
hab ha,
have hc : c :=
hbc hb,
show c, from
hc
lemma prop_comp₂ (a b c : Prop) (hab : a → b) (hbc : b → c) :
a → c :=
assume ha : a,
show c, from
hbc (hab ha)
/- ## Forward Reasoning about Connectives and Quantifiers -/
lemma and_swap (a b : Prop) :
a ∧ b → b ∧ a :=
assume hab : a ∧ b,
have ha : a :=
and.elim_left hab,
have hb : b :=
and.elim_right hab,
show b ∧ a, from
and.intro hb ha
lemma or_swap (a b : Prop) :
a ∨ b → b ∨ a :=
assume hab : a ∨ b,
show b ∨ a, from
or.elim hab
(assume ha : a,
show b ∨ a, from
or.intro_right b ha)
(assume hb : b,
show b ∨ a, from
or.intro_left a hb)
def double (n : ℕ) : ℕ :=
n + n
lemma nat_exists_double_iden :
∃n : ℕ, double n = n :=
exists.intro 0
(show double 0 = 0, from
by refl)
lemma nat_exists_double_iden₂ :
∃n : ℕ, double n = n :=
exists.intro 0 (by refl)
lemma modus_ponens (a b : Prop) :
(a → b) → a → b :=
assume hab : a → b,
assume ha : a,
show b, from
hab ha
lemma not_not_intro (a : Prop) :
a → ¬¬ a :=
assume ha : a,
assume hna : ¬ a,
show false, from
hna ha
lemma forall.one_point {α : Type} (t : α) (p : α → Prop) :
(∀x, x = t → p x) ↔ p t :=
iff.intro
(assume hall : ∀x, x = t → p x,
show p t, from
begin
apply hall t,
refl
end)
(assume hp : p t,
fix x,
assume heq : x = t,
show p x, from
begin
rw heq,
exact hp
end)
lemma beast_666 (beast : ℕ) :
(∀n, n = 666 → beast ≥ n) ↔ beast ≥ 666 :=
forall.one_point _ _
#print beast_666
lemma exists.one_point {α : Type} (t : α) (p : α → Prop) :
(∃x : α, x = t ∧ p x) ↔ p t :=
iff.intro
(assume hex : ∃x, x = t ∧ p x,
show p t, from
exists.elim hex
(fix x,
assume hand : x = t ∧ p x,
show p t, from
by cc))
(assume hp : p t,
show ∃x : α, x = t ∧ p x, from
exists.intro t
(show t = t ∧ p t, from
by cc))
/- ## Calculational Proofs
In informal mathematics, we often use transitive chains of equalities,
inequalities, or equivalences (e.g., `a ≥ b ≥ c`). In Lean, such calculational
proofs are supported by `calc`.
Syntax:
calc _term₀_
_op₁_ _term₁_ :
_proof₁_
... _op₂_ _term₂_ :
_proof₂_
⋮
... _opN_ _termN_ :
_proofN_ -/
lemma two_mul_example (m n : ℕ) :
2 * m + n = m + n + m :=
calc 2 * m + n
= (m + m) + n :
by rw two_mul
... = m + n + m :
by cc
/- `calc` saves some repetition, some `have` labels, and some transitive
reasoning: -/
lemma two_mul_example₂ (m n : ℕ) :
2 * m + n = m + n + m :=
have h₁ : 2 * m + n = (m + m) + n :=
by rw two_mul,
have h₂ : (m + m) + n = m + n + m :=
by cc,
show _, from
eq.trans h₁ h₂
/- ## Forward Reasoning with Tactics
The `have`, `let`, and `calc` structured proof commands are also available as a
tactic. Even in tactic mode, it can be useful to state intermediate results and
definitions in a forward fashion.
Observe that the syntax for the tactic `let` is slightly different than for the
structured proof command `let`, with `,` instead of `in`. -/
lemma prop_comp₃ (a b c : Prop) (hab : a → b) (hbc : b → c) :
a → c :=
begin
intro ha,
have hb : b :=
hab ha,
let c' := c,
have hc : c' :=
hbc hb,
exact hc
end
/- ## Dependent Types
Dependent types are the defining feature of the dependent type theory family of
logics.
Consider a function `pick` that take a number `n : ℕ` and that returns a number
between 0 and `n`. Conceptually, `pick` has a dependent type, namely
`(n : ℕ) → {i : ℕ // i ≤ n}`
We can think of this type as a `ℕ`-indexed family, where each member's type may
depend on the index:
`pick n : {i : ℕ // i ≤ n}`
But a type may also depend on another type, e.g., `list` (or `λα, list α`) and
`λα, α → α`.
A term may depend on a type, e.g., `λα, λx : α, x` (a polymorphic identity
function).
Of course, a term may also depend on a term.
Unless otherwise specified, a __dependent type__ means a type depending on a
term. This is what we mean when we say that simple type theory does not support
dependent types.
In summary, there are four cases for `λx, t` in the calculus of inductive
constructions (cf. Barendregt's `λ`-cube):
Body (`t`) | | Argument (`x`) | Description
---------- | ------------ | -------------- | ------------------------------
A term | depending on | a term | Simply typed `λ`-expression
A type | depending on | a term | Dependent type (strictly speaking)
A term | depending on | a type | Polymorphic term
A type | depending on | a type | Type constructor
Revised typing rules:
C ⊢ t : (x : σ) → τ[x] C ⊢ u : σ
———————————————————————————————————— App'
C ⊢ t u : τ[u]
C, x : σ ⊢ t : τ[x]
———————————————————————————————— Lam'
C ⊢ (λx : σ, t) : (x : σ) → τ[x]
These two rules degenerate to `App` and `Lam` if `x` does not occur in `τ[x]`
Example of `App'`:
⊢ pick : (x : ℕ) → {y : ℕ // y ≤ x} ⊢ 5 : ℕ
——————————————————————————————————————————————— App'
⊢ pick 5 : {y : ℕ // y ≤ 5}
Example of `Lam'`:
α : Type, x : α ⊢ x : α
——————————————————————————————— Lam or Lam'
α : Type ⊢ (λx : α, x) : α → α
————————————————————————————————————————————— Lam'
⊢ (λα : Type, λx : α, x) : (α : Type) → α → α
Regrettably, the intuitive syntax `(x : σ) → τ` is not available in Lean.
Instead, we must write `∀x : σ, τ` to specify a dependent type.
Aliases:
`σ → τ` := `∀_ : σ, τ`
`Π` := `∀`
## The Curry–Howard Correspondence
`→` is used both as the implication symbol and as the type constructor of
functions. Similarly, `∀` is used both as a quantifier and in dependent types.
The two pairs of concepts not only look the same, they are the same, by the PAT
principle:
* PAT = propositions as types;
* PAT = proofs as terms.
This is also called the Curry–Howard correspondence.
Types:
* `σ → τ` is the type of total functions from `σ` to `τ`;
* `∀x : σ, τ[x]` is the dependent function type from `x : σ` to `τ[x]`.
Propositions:
* `P → Q` can be read as "`P` implies `Q`", or as the type of functions mapping
proofs of `P` to proofs of `Q`.
* `∀x : σ, Q[x]` can be read as "for all `x`, `Q[x]`", or as the type of
functions mapping values `x` of type `σ` to proofs of `Q[x]`.
Terms:
* A constant is a term.
* A variable is a term.
* `t u` is the application of function `t` to value `u`.
* `λx, t[x]` is a function mapping `x` to `t[x]`.
Proofs:
* A lemma or hypothesis name is a proof.
* `H t`, which instantiates the leading parameter or quantifier of proof `H`'
statement with term `t`, is a proof.
* `H G`, which discharges the leading assumption of `H`'s statement with
proof `G`, is a proof.
* `λh : P, H[h]` is a proof of `P → Q`, assuming `H[h]` is a proof of `Q`
for `h : P`.
* `λx : σ, H[x]` is a proof of `∀x : σ, Q[x]`, assuming `H[x]` is a proof of
`Q[x]` for `x : σ`. -/
lemma and_swap₃ (a b : Prop) :
a ∧ b → b ∧ a :=
λhab : a ∧ b, and.intro (and.elim_right hab) (and.elim_left hab)
lemma and_swap₄ (a b : Prop) :
a ∧ b → b ∧ a :=
begin
intro hab,
apply and.intro,
apply and.elim_right,
exact hab,
apply and.elim_left,
exact hab
end
/- Tactical proofs are reduced to proof terms. -/
#print and_swap₃
#print and_swap₄
end forward_proofs
/- ## Induction by Pattern Matching
By the Curry–Howard correspondence, a proof by induction is the same as a
recursively specified proof term. Thus, as alternative to the `induction'`
tactic, induction can also be done by pattern matching:
* the induction hypothesis is then available under the name of the lemma we are
proving;
* well-foundedness of the argument is often proved automatically. -/
#check reverse
lemma reverse_append {α : Type} :
∀xs ys : list α, reverse (xs ++ ys) = reverse ys ++ reverse xs
| [] ys := by simp [reverse]
| (x :: xs) ys := by simp [reverse, reverse_append xs]
lemma reverse_append₂ {α : Type} (xs ys : list α) :
reverse (xs ++ ys) = reverse ys ++ reverse xs :=
begin
induction' xs,
{ simp [reverse] },
{ simp [reverse, ih] }
end
lemma reverse_reverse {α : Type} :
∀xs : list α, reverse (reverse xs) = xs
| [] := by refl
| (x :: xs) :=
by simp [reverse, reverse_append, reverse_reverse xs]
end LoVe
|
#include <Eigen/Core>
#include <algorithm>
#include <ceres/rotation.h>
#include <glog/logging.h>
#include "sfm/twoview_info.h"
namespace GraphSfM {
void SwapCameras(TwoViewInfo* twoview_info)
{
// Invert the translation.
Eigen::Vector3d neg_of_new_position;
ceres::AngleAxisRotatePoint(twoview_info->rotation_2.data(),
twoview_info->position_2.data(),
neg_of_new_position.data());
twoview_info->position_2 = -neg_of_new_position;
// Invert the rotation.
twoview_info->rotation_2 *= -1.0;
}
} // namespace GraphSfM
|
[STATEMENT]
lemma comp_tensor_op: "(tensor_op a b) o\<^sub>C\<^sub>L (tensor_op c d) = tensor_op (a o\<^sub>C\<^sub>L c) (b o\<^sub>C\<^sub>L d)"
for a :: "'e::finite ell2 \<Rightarrow>\<^sub>C\<^sub>L 'c::finite ell2" and b :: "'f::finite ell2 \<Rightarrow>\<^sub>C\<^sub>L 'd::finite ell2" and
c :: "'a::finite ell2 \<Rightarrow>\<^sub>C\<^sub>L 'e ell2" and d :: "'b::finite ell2 \<Rightarrow>\<^sub>C\<^sub>L 'f ell2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<otimes>\<^sub>o b o\<^sub>C\<^sub>L c \<otimes>\<^sub>o d = (a o\<^sub>C\<^sub>L c) \<otimes>\<^sub>o (b o\<^sub>C\<^sub>L d)
[PROOF STEP]
apply (rule equal_ket)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. (a \<otimes>\<^sub>o b o\<^sub>C\<^sub>L c \<otimes>\<^sub>o d) *\<^sub>V ket x = ((a o\<^sub>C\<^sub>L c) \<otimes>\<^sub>o (b o\<^sub>C\<^sub>L d)) *\<^sub>V ket x
[PROOF STEP]
apply (rename_tac ij, case_tac ij, rename_tac i j, hypsubst_thin)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ij i j. (a \<otimes>\<^sub>o b o\<^sub>C\<^sub>L c \<otimes>\<^sub>o d) *\<^sub>V ket (i, j) = ((a o\<^sub>C\<^sub>L c) \<otimes>\<^sub>o (b o\<^sub>C\<^sub>L d)) *\<^sub>V ket (i, j)
[PROOF STEP]
by (simp flip: tensor_ell2_ket add: tensor_op_ell2 cblinfun_apply_cblinfun_compose)
|
Debats du Senat (hansard)
1ere Session, 36 e Legislature,
Volume 137, Numero 122
Le jeudi 18 mars 1999
L'honorable Gildas L. Molgat, President
La Journee internationale pour l'elimination de la discrimination raciale
L'Annee internationale des personnes agees
Projet de loi sur l'Agence des douanes et du revenu du Canada
Le role du Conseil de la magistrature
Les commentaires dans les medias-Avis d'interpellation
Le Tribunal penal international sur les crimes commis en ex-Yougoslavie et au Rwanda
La position de Mme la juge Louise Arbour-Avis d'interpellation
La position internationale dans le domaine des communications
Les deficits budgetaires des Forces canadiennes-Demande de reponses aux questions inscrites au Feuilleton
L'amelioration de la situation de la democratie au Timor oriental-Le role du gouvernement
La liberte religieuse au Tibet sous l'occupation chinoise-La position du gouvernement
Le regime d'indemnisation des anciens combattants de la marine marchande-La position du gouvernement
Les declarations du ministre concernant les problemes de productivite-La position du gouvernement
Reponse differee a une question orale
Depot des reponses a des questions inscrites au Feuilleton
Projet de loi modificatif-Troisieme lecture
La Loi sur les allocations aux anciens combattants
La Loi sur le ministere des Anciens combattants
La Loi sur le Tribunal des anciens combattants (revision et appel)
Projet de loi modificatif-Deuxieme lecture
Projet de loi sur les services publicitaires fournis par des editeurs etrangers
Deuxieme lecture-Ajournement du debat
L'expose du ministre des Finances-Interpellation-Suite du debat
Le rapport du comite des affaires etrangeres-Interpellation-Suite du debat
Le jeudi 18 mars 1999
La seance est ouverte a 14 heures, le President etant au fauteuil.
Aujourd'hui, l'honorable Gerald-A.
Beaudoin a ete recu Chevalier de l'Ordre de la Pleiade.
Aujourd'hui egalement, c'est la premiere journee de l'Annee de la Francophonie canadienne.
La visite fut tres appreciee de part et d'autre.
Ils nous ont tous laisse une reflexion.
Nous aimerions vous faire part de quelques messages que nous avons entendus hier soir.
L'honorable Gerald J. Comeau :
Arretons de dire Canada francais et Canada anglais.
C'est un mythe.
Il y a des francophones fiers d'un bout a l'autre du Canada.
Honorables senateurs, bonne annee de la Francophonie canadienne!
La Journee internationale pour l'elimination de la discrimination raciale
L'honorable Donald H. Oliver :
L'honorable Mary Alice Butts :
Honorables senateurs, nous devons ecouter les Canadiens.
Plus de 320 equipes de jeunes se sont deja inscrites.
L'honorable Consiglio Di Nino :
Honorables senateurs, j'aimerais faire quelques commentaires sur cette journee des plus importantes.
Je vous presenterai d'abord une breve retrospective de la question.
La declaration est entree en vigueur le 24 octobre 1970.
L'appui du Canada a la nouvelle convention n'a surpris personne.
L'evolution demographique etait attribuable en grande partie aux changements au niveau de l'immigration.
En 1971, ils representaient 29 p. 100 de la population.
En parlant de la Declaration des droits, M. Diefenbaker a declare:
Son Honneur le President :
Son Honneur le President :
La permission est-elle accordee, honorables senateurs?
Je vous remercie beaucoup, honorables senateurs.
Le tres honorable John Diefenbaker a dit d'une voix tremblante, les bajoues tremblantes:
Son Honneur le President :
Cependant, je donne la parole a un autre senateur.
L'Annee internationale des personnes agees
L'honorable Marisa Ferretti Barth :
Il s'agit des besoins specifiques des aines des communautes culturelles au Canada.
L'honorable Noel A. Kinsella (chef adjoint suppleant de l'opposition) :
Honorables senateurs, je demande la permission de faire une autre declaration.
Son Honneur le President :
La permission est-elle accordee, honorables senateurs?
Projet de loi sur l'Agence des douanes et du revenu du Canada
Le jeudi 18 mars 1999
Le comite senatorial permanent des finances nationales a l'honneur de presenter son
Son Honneur le President :
Honorables senateurs, quand lirons-nous ce projet de loi une troisieme fois?
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Son Honneur le President :
La permission est-elle accordee, honorables senateurs?
L'honorable John Lynch-Staunton (chef de l'opposition) :
Que le comite soit autorise a sieger pendant les seances et les conges du Senat;
Que le comite depose son rapport final au plus tardle 29 octobre 1999.
L'honorable Eymard G. Corbin :
Le role du Conseil de la magistrature
Les commentaires dans les medias-Avis d'interpellation
L'honorable Anne C. Cools :
d ) sur la question des declarations publiques des juges dans les medias;
Le Tribunal penal international sur les crimes commis en ex-Yougoslavie et au Rwanda
La position de Mme la juge Louise Arbour-Avis d'interpellation
L'honorable Anne C. Cools :
La position internationale dans le domaine des communications
Permission ayant ete accordee de revenir aux avis de motion:
L'honorable J. Michael Forrestall :
Honorables senateurs, je donne avis que, le mardi 23 mars 1999, je proposerai:
L'honorable Noel A. Kinsella (chef adjoint suppleant de l'opposition) :
Honorables senateurs, j'ai une question pour le leader du gouvernement au Senat.
L'honorable B. Alasdair Graham (leader du gouvernement) :
Honorables senateurs, je devrai evidemment me renseigner avant de donner une reponse a cette question.
Si la TPS s'applique a ce genre de contrat, elle a certainement ete appliquee.
Je vous remercie de votre question.
Honorables senateurs, je serai tres heureux de le faire.
En fait, la pensee m'est venue immediatement a l'esprit.
L'honorable Consiglio Di Nino :
L'honorable J. Michael Forrestall :
L'honorable B. Alasdair Graham (leader du gouvernement) :
Les deficits budgetaires des Forces canadiennes-Demande de reponses aux questions inscrites au Feuilleton
L'honorable J. Michael Forrestall :
Honorables senateurs, j'ai des questions inscrites au Feuilleton depuis le 21 octobre 1997.
Pourquoi toutes ces revisions?
Combien de temps encore faudra-t-il attendre une reponse a une question fort simple?
L'honorable B. Alasdair Graham (leader du gouvernement) :
Honorables senateurs, le gouvernement n'est pas embarrasse le moindrement.
Je vais certes approfondir la question aujourd'hui, des que le Senat se sera ajourne.
L'honorable A. Raynell Andreychuk :
L'honorable B. Alasdair Graham (leader du gouvernement) :
Le Canada prend tres au serieux toute la question de ses relations avec Cuba.
Nous croyons qu'il vaut mieux maintenir notre engagement.
Honorables senateurs, le Canada entretient un dialogue constructif avec Cuba.
Jusqu'a maintenant, nous avons pris les declarations cubaines au pied de la lettre.
La Commission des droits de la personne se penchera sous peu sur ces questions.
L'amelioration de la situation de la democratie au Timor oriental-Le role du gouvernement
L'honorable Consiglio Di Nino :
C'est peut-etre un bon presage pour l'Asie.
L'honorable B. Alasdair Graham (leader du gouvernement) :
Honorables senateurs, le Canada a joue un role preponderant.
Ils ont fait des demarches energiques a propos de la situation au Timor oriental.
La liberte religieuse au Tibet sous l'occupation chinoise-La position du gouvernement
L'honorable Consiglio Di Nino :
L'honorable B. Alasdair Graham (leader du gouvernement) :
Honorables senateurs , j'en prends note.
Le regime d'indemnisation des anciens combattants de la marine marchande-La position du gouvernement
L'honorable Mabel M. DeWare :
L'honorable B. Alasdair Graham (leader du gouvernement) :
Honorables senateurs, j'imagine que ma collegue parle du ministre des Anciens combattants.
Les declarations du ministre concernant les problemes de productivite-La position du gouvernement
L'honorable Donald H. Oliver :
L'honorable B. Alasdair Graham (leader du gouvernement) :
Nous avons herite d'un taux de chomage ecrasant et d'un deficit abusif.
Nous avons commence a reduire les taxes et les impots.
Nous avons investi plus d'argent dans la recherche, le developpement et l'education.
Nous avons investi plus d'argent dans les soins de sante.
L'usine emploie 157 travailleurs du Cap-Breton.
Pourrait-il repondre directement a ces questions?
Reponse differee a une question orale
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
( Reponse a une question posee par l'honorableNoel A. Kinsella le 3 mars 1999 )
Depot des reponses a des questions inscrites au Feuilleton
Le Conseil du Tresor-Le nombre des primes de la fonction publique versees aux fonctionnaires
Son Honneur le President :
Au nom de tous les honorables senateurs, je vous souhaite la bienvenue au Senat.
Projet de loi modificatif-Troisieme lecture
Ces examens ont confirme la validite des principes de base de la loi.
Ils ont tous grandement contribue a elaborer ce projet de loi ameliore.
Les intervenants ont vante le processus d'elaboration de cette mesure legislative.
Ils ont particulierement apprecie la possibilite d'exprimer toutes leurs preoccupations.
Son Honneur le President :
Vous plait-il, honorables senateurs, d'adopter la motion?
(La motion est adoptee et le projet de loi, lu une troisieme fois, est adopte.)
La Loi sur les allocations aux anciens combattants
La Loi sur le ministere des Anciens combattants
La Loi sur le Tribunal des anciens combattants (revision et appel)
Projet de loi modificatif-Deuxieme lecture
Les blesses, de corps et d'esprit, se comptent par milliers.
L'age moyen de ceux qui restent atteindra bientot 80 ans.
Permettez-moi maintenant de parler de certains details du projet de loi.
Les anciens prisonniers de guerre profiteront aussi de l'adoption de ce projet de loi.
Nous sommes heureux d'offrir ces avantages aux anciens prisonniers de guerre.
Il n'y a cependant pas de menus changements pour ceux qui sont touches directement.
L'honorable Orville H. Phillips :
Que voila un effort louable!
C'est donc une etape tres importante pour moi.
Il s'agissait alors d'un processus qui necessitait un temps considerable.
Je crois que cet acte de foi etait justifie.
Maintenant, elle peut se faire quelle que soit son importance.
J'en dirai plus long la-dessus un peu plus tard.
J'aurai des questions a ce sujet a l'etape de l'etude en comite.
Encore une fois, nous obtiendrons probablement la reponse a cette question en comite.
Ils ont dit que c'etait la raison a l'origine de leur exclusion.
J'aimerais faire deux suggestions, honorables senateurs.
La premiere serait de verser une rente aux marins de la marine marchande survivants.
Ces deux groupes seraient ainsi sur un pied d'egalite.
Le premier point touche l'exclusion partielle des membres du Ferry Command.
Le tribunal demande qu'on apporte trois modifications.
Il faudra faire preuve de discretion a ce chapitre.
La modification permettra au president de confier ces decisions a un membre du tribunal.
A mon avis, les modifications proposees sont d'ordre bureaucratique.
Cette disposition lui donne en fait le benefice du doute.
Son Honneur le President :
(La motion est adoptee et le projet de loi est lu une deuxieme fois.)
Son Honneur le President :
Honorables senateurs, quand lirons-nous ce projet de loi une troisieme fois?
Projet de loi sur les services publicitaires fournis par des editeurs etrangers
Deuxieme lecture-Ajournement du debat
- Honorables senateurs, je voudrais commencer par faire un rappel historique.
Il a ete imprime par John Howe, le pere du grand et celebre Joseph Howe.
Honorables senateurs, ce magazine etait lu par quelque200 abonnes.
Malheureusement, il a ete force de disparaitre apres trois ans seulement.
Les magazines constituent un phenomene assez particulier.
... le fil qui lie les fibres de notre pays.
Honorables senateurs, les periodiques satisfont aujourd'hui nos attentes.
Les periodiques font place aux grands recits comme aux petits.
Certains de nos ecrivains les plus distingues ont poli leur art dans les periodiques canadiens.
Les ecrivains et editeurs canadiens sont parmi les plus reputes au monde.
Honorables senateurs, nous avons aujourd'hui au Canada une industrie des periodiques dynamique, quoique fragile.
En 1956, quelque 661 periodiques etaient publies au Canada.
Aujourd'hui, au-dela de 1 000 editeurs produisent plus de 1 500 periodiques canadiens.
Plus de65 p. 100 de tous les periodiques diffuses au Canada sont canadiens.
Le projet de loi C-55 permettrait de contrer cette menace.
Le projet de loi dont nous sommes saisis ne changerait rien a cela.
La libre circulation des idees entre les Etats est importante et constitue un enrichissement.
Cela devrait rester et restera inchange.
Honorables senateurs, l'industrie du magazine au Canada n'est pas tres rentable.
En effet, pres de la moitie des magazines canadiens ne font pas de benefices.
Au mieux, c'est un secteur tres difficile.
C'est exactement ce que ferait le projet de loi C-55.
Il n'interdirait pas la vente de services de publicite visant d'autres marches.
La loi serait appliquee de plusieurs facons.
Cette amende s'ajouterait a toutes les autres penalites.
Le projet de loi leur accorde une protection.
Le probleme provenait du moyen choisi pour proteger cette identite.
Le gouvernement du Canada s'est conforme a la decision de l'OMC.
Evidemment, elles sont aussi conformes aux obligations imposees par l'ALENA.
Elle prevoit des peines severes contre les editeurs etrangers qui enfreindront la loi.
Le Parti reformiste est le seul a s'y etre oppose.
L'honorable John B. Stewart:
La question du senateur Stewart appelle une question complementaire.
Cette offre est-elle toujours valable?
Honorables senateurs, au moment ou je vous parle, les negociations sont interrompues.
Je sais que, la semaine derniere, les negociations se poursuivaient a un tres haut niveau.
Il n'y en a pas, a ma connaissance.
Qui est responsable de ces negociations?
C'est exact, je crois.
Qui est le responsable?
C'est la ministre du Patrimoine canadien qui est responsable du projet de loi.
De toute evidence, la mesure legislative a certaines repercussions sur le ministere du Commerce.
Qui a confie le mandat a ces negociateurs?
Il est assez evident qu'ils suivent les directives des deux ministres.
Je tiens a souligner que le Canada a respecte les regles du jeu.
L'honorable John Lynch-Staunton (chef de l'opposition) :
Honorables senateurs, j'ai une question complementaire concernant le dernier echange.
C'est bien cela au moment ou je vous parle.
Sinon, pourquoi serait-il ici?
Je ne peux le confirmer, car aucune decision de cette nature n'a ete prise.
L'honorable Noel A. Kinsella (chef adjoint suppleant de l'opposition) :
Est-ce que je comprends que le gouvernement a fait son nid?
Est-ce la decision du gouvernement?
L'opposition n'ecoute-t-elle pas?
Le gouvernement ne pliera pas?
C'est leur prerogative.
Cela pourrait etre merveilleux.
Honorables senateurs, je ne sais pas si ce serait si merveilleux que cela.
Allons-nous encore ceder devant les Americains?
Si c'est cela que vous trouvez merveilleux, alors vous avez probablement raison.
Pourquoi negocier si nous sommes satisfaits du projet de loi?
Qu'y a-t-il a negocier?
C'est bien ici la Chambre de la reflexion posee.
Donnez-nous matiere a reflexion.
Ecoutez bien, honorables senateurs.
Tels sont les principes derriere le projet de loi.
Le Canada respecte entierement ses obligations commerciales.
Notre position ne fait aucun doute.
Nous essayons de comprendre la position du gouvernement.
Je voudrais savoir si le principe de votre projet de loi est non negociable.
Honorables senateurs, imaginons un peu que le projet de loi soit adopte.
Je suppose qu'il entrerait en vigueur quatre a six semaines plus tard.
De nombreuses versions de ce projet de loi ont ete presentees.
Il a respecte les regles du jeu.
Mais ces representants ont dit que ce n'est pas ainsi qu'elles se passeraient.
Time Warner pese de tout son poids.
Quelle portee pourraient-elles avoir, aux termes des lois commerciales?
Nous ne pouvons pas toujours nous en laisser imposer.
Poursuivons nos travaux et appuyons cette mesure legislative.
C'est certes une garantie que personne ne peut donner.
C'est mon premier point.
N'est-ce pas la une grave supposition?
Si on me posait cette question, je repondrais non.
Son Honneur le President :
En fait, la question doit etre adressee au leader du gouvernement.
On suppose qu'il sera renvoye au comite la semaine prochaine.
Je m'attends a ce que le projet de loi suive la voie normale.
Honorables senateurs, j'aimerais avoir des eclaircissements.
Le senateur Murray a demande si le projet de loi serait adopte avant Paques.
On lui a repondu qu'il n'y avait pas urgence.
Pourquoi n'y a-t-il pas urgence?
Des negociations sont actuellement en cours ou pourraient reprendre.
Sur quoi portent-elles?
Ces negociations se deroulent a huis clos.
L'expose du ministre des Finances-Interpellation-Suite du debat
Cela equivaut en fait a une surtaxe imposee aux travailleurs et aux societes.
Ces cotisations concernent un groupe special.
Tous ne paient pas l'assurance-emploi.
Les agriculteurs et les travailleurs autonomes ne versent pas de cotisations.
Cet impot progressif traite les Canadiens differemment les uns des autres.
Ceux qui cotisent a l'assurance-emploi sont egalement traites differemment.
Ce n'est pas tres bon pour le Canada.
On en parle depuis nombre d'annees.
Ottawa est bien au fait du risque potentiel, selon l'OCDE.
Il a demande: Peut-on les blamer?
Selon lui, lorsque le gouvernement est trop cupide, les gens trouvent d'autres solutions.
C'est un gros probleme qui va en s'aggravant.
Les bons elements quittent le pays.
Il a parle des impots fonciers.
Il a fait appel.
La surtaxe routiere prelevee aupres de son equipe est de 2,1 millions de dollars.
Il n'est pas etonnant que les entreprises aillent s'installer ailleurs.
Dans les annees 70, les gens voulaient partir de Grande-Bretagne.
Je ne blame pas ces gens de quitter le Canada.
Ils le font parce que rester ici leur couterait des millions de dollars de plus.
Ce n'est pas qu'ils ne veuillent pas vivre ici.
Les temps sont durs pour les gens.
Parce qu'il l'est.
Le reseau TSN a plus d'influence culturelle au Canada que la SRC.
Il diffuse les jeux de niveau junior et les jeux universitaires.
C'est lui, et non la SRC, qui nous donne de la culture.
Le hockey junior ne passerait jamais a la SRC, mais il passe a TSN.
Franchement, honorables senateurs, aucun Canadien ne s'ennuiera de la SRC.
L'honorable John B. Stewart :
J'ai trois questions a lui poser, en fait.
Permettez-moi de commencer par la moins importante des trois.
Honorables senateurs, le taux d'epargne aux Etats-Unis est le double du notre.
Permettez-moi de passer a ma deuxieme question, honorables senateurs.
Tenons-nous-en aux impots federaux.
Par exemple, voudra-t-il abolir le programme de perequation?
Je viens de la Nouvelle-Ecosse, et cette province est tributaire de la perequation.
Je presume qu'il faudrait supprimer cela, ainsi que l'aide a l'enseignement postsecondaire?
Je vais m'en tenir a ces trois programmes.
Quel est l'envers de la medaille?
J'etais aussi d'avis qu'on pouvait reduire considerablement la fonction publique.
Il va dans les poches des gens.
Cela s'explique-t-il par l'effet des retombees dont vous parlez?
Des gens votent avec leurs pieds.
Ils quittent le pays.
Cette situation preoccupe les parlementaires, les parents et les habitants de partout dans le pays.
Pourquoi des gens quittent-ils le pays?
Je ne le crois pas.
Son Honneur le President :
Honorable senateur Stewart, nous nous engageons dans un debat.
Honorables senateurs, je voudrais poser une autre question.
Il est inevitable que des difficultes surviennent.
Je n'ai pas entendu le senateur en parler.
Cependant, dans son analyse, il devrait tenir compte de cet aspect tres important.
Grace a sollicitude du Senat, j'ai pu parler quelques minutes de plus.
J'aurais pu traiter de tous ces problemes.
Je ne suis pas economiste.
Le Manitoba, ou les impots sont moins eleves, a gagne 20 000 emplois.
L'honorable Pierre De Bane :
Ce mouvement se fait dans les deux sens.
Il a soutenu que la province du senateur ne beneficie pas des paiements de perequation.
Il y a d'autres valeurs qui cimentent notre societe.
J'aime ce pays, et c'est pourquoi je prends la parole aujourd'hui.
Je ne souhaite pas desunir le pays.
Toutefois, ma pensee differe legerement.
Les liberaux semblent s'exprimer comme si l'argent appartenait au gouvernement.
C'est tout a fait inequitable.
L'honorable Nicholas W. Taylor :
Mon honorable collegue est fort heureux de cette situation.
Honorables senateurs, je suis perdu.
Senateur Taylor, il faut bientot nous diriger vers l'Ouest.
L'honorable Jean-Maurice Simard :
Si seulement il avait raison!
Les budgets federaux n'ont jamais autant manque de transparence que celui-ci.
Son Honneur le President :
La permission est-elle accordee, honorables senateurs?
Les conditions demographiques de la province ne devraient pas s'ameliorer sensiblement a court terme.
Quels sont ces facteurs?
Le premier facteur entravant l'essor et le developpement economiques est le regime fiscal.
Or, le systeme fiscal est hostile au risque.
Examinons tout d'abord le role de l'inflation sur le systeme fiscal.
Or, des taux d'inflation meme faibles contribuent a alimenter les coffres de l'Etat.
Cette indexation partielle est une veritable mine d'or pour le gouvernement federal.
Cela est vrai, mais le gouvernement ne dit pas tout.
Je me suis bien documente sur cette question.
Un temoin l'a bien resume:
Le gouvernement a tort lorsqu'il dit que les travailleurs dependent de l'assurance-emploi.
C'est ensemble que nous trouverons ces solutions.
Le gouvernement en place n'a pas honore cet engagement.
Par ses mesures de compressions budgetaires, il a impose un fardeau enorme aux provinces atlantiques.
En un sens, le ministre des Finances est devenu le bureaucrate parfait.
En fait, ils depensent parce qu'ils ont des fonds a depenser.
Cette annee, il recommence.
Ce qui compte, c'est leur effet sur le resultat net du budget.
Ces mesures ponctuelles representent plus de 5 milliards de dollars en moyenne par an.
En effet, ce gouvernement a fait des choix.
De par leur nature, les budgets de l'Etat sont des documents politiques.
L'endettement chronique est heureusement devenu chose du passe.
(Sur la motion du senateur Carstairs, au nom du senateur Graham, le debat est ajourne.)
Le rapport du comite des affaires etrangeres-Interpellation-Suite du debat
L'honorable Mabel M. DeWare :
Il y a un senateur qui veut prendre la parole la-dessus.
Cette interpellation est inscrite au nom du senateur Andreychuk.
Son Honneur le President :
Vous plait-il, honorables senateurs, d'adopter la motion?
Apres suppression de ces deux paragraphes, je proposerai la motion.
Son Honneur le President :
Son Honneur le President :
Plait-il aux honorables senateurs d'adopter la motion?
(Le Senat s'ajourne au mardi 23 mars 1999, a 14 heures.)
|
lemma tendsto_compose_eventually: "g \<midarrow>l\<rightarrow> m \<Longrightarrow> (f \<longlongrightarrow> l) F \<Longrightarrow> eventually (\<lambda>x. f x \<noteq> l) F \<Longrightarrow> ((\<lambda>x. g (f x)) \<longlongrightarrow> m) F"
|
/-
Copyright (c) 2020 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anne Baanen
-/
import ring_theory.algebraic
import ring_theory.localization
/-!
# Ideals over/under ideals
This file concerns ideals lying over other ideals.
Let `f : R →+* S` be a ring homomorphism (typically a ring extension), `I` an ideal of `R` and
`J` an ideal of `S`. We say `J` lies over `I` (and `I` under `J`) if `I` is the `f`-preimage of `J`.
This is expressed here by writing `I = J.comap f`.
## Implementation notes
The proofs of the `comap_ne_bot` and `comap_lt_comap` families use an approach
specific for their situation: we construct an element in `I.comap f` from the
coefficients of a minimal polynomial.
Once mathlib has more material on the localization at a prime ideal, the results
can be proven using more general going-up/going-down theory.
-/
variables {R : Type*} [comm_ring R]
namespace ideal
open polynomial
open submodule
section comm_ring
variables {S : Type*} [comm_ring S] {f : R →+* S} {I J : ideal S}
lemma coeff_zero_mem_comap_of_root_mem_of_eval_mem {r : S} (hr : r ∈ I) {p : polynomial R}
(hp : p.eval₂ f r ∈ I) : p.coeff 0 ∈ I.comap f :=
begin
rw [←p.div_X_mul_X_add, eval₂_add, eval₂_C, eval₂_mul, eval₂_X] at hp,
refine mem_comap.mpr ((I.add_mem_iff_right _).mp hp),
exact I.mul_mem_left _ hr
end
lemma coeff_zero_mem_comap_of_root_mem {r : S} (hr : r ∈ I) {p : polynomial R}
(hp : p.eval₂ f r = 0) : p.coeff 0 ∈ I.comap f :=
coeff_zero_mem_comap_of_root_mem_of_eval_mem hr (hp.symm ▸ I.zero_mem)
lemma exists_coeff_ne_zero_mem_comap_of_non_zero_divisor_root_mem {r : S}
(r_non_zero_divisor : ∀ {x}, x * r = 0 → x = 0) (hr : r ∈ I)
{p : polynomial R} : ∀ (p_ne_zero : p ≠ 0) (hp : p.eval₂ f r = 0),
∃ i, p.coeff i ≠ 0 ∧ p.coeff i ∈ I.comap f :=
begin
refine p.rec_on_horner _ _ _,
{ intro h, contradiction },
{ intros p a coeff_eq_zero a_ne_zero ih p_ne_zero hp,
refine ⟨0, _, coeff_zero_mem_comap_of_root_mem hr hp⟩,
simp [coeff_eq_zero, a_ne_zero] },
{ intros p p_nonzero ih mul_nonzero hp,
rw [eval₂_mul, eval₂_X] at hp,
obtain ⟨i, hi, mem⟩ := ih p_nonzero (r_non_zero_divisor hp),
refine ⟨i + 1, _, _⟩; simp [hi, mem] }
end
/-- Let `P` be an ideal in `R[x]`. The map
`R[x]/P → (R / (P ∩ R))[x] / (P / (P ∩ R))`
is injective.
-/
lemma injective_quotient_le_comap_map (P : ideal (polynomial R)) :
function.injective ((map (map_ring_hom (quotient.mk (P.comap C))) P).quotient_map
(map_ring_hom (quotient.mk (P.comap C))) le_comap_map) :=
begin
refine quotient_map_injective' (le_of_eq _),
rw comap_map_of_surjective
(map_ring_hom (quotient.mk (P.comap C))) (map_surjective _ quotient.mk_surjective),
refine le_antisymm (sup_le le_rfl _) (le_sup_left_of_le le_rfl),
refine λ p hp, polynomial_mem_ideal_of_coeff_mem_ideal P p (λ n, quotient.eq_zero_iff_mem.mp _),
simpa only [coeff_map, coe_map_ring_hom] using ext_iff.mp (ideal.mem_bot.mp (mem_comap.mp hp)) n,
end
/--
The identity in this lemma asserts that the "obvious" square
```
R → (R / (P ∩ R))
↓ ↓
R[x] / P → (R / (P ∩ R))[x] / (P / (P ∩ R))
```
commutes. It is used, for instance, in the proof of `quotient_mk_comp_C_is_integral_of_jacobson`,
in the file `ring_theory/jacobson`.
-/
lemma quotient_mk_maps_eq (P : ideal (polynomial R)) :
((quotient.mk (map (map_ring_hom (quotient.mk (P.comap C))) P)).comp C).comp
(quotient.mk (P.comap C)) =
((map (map_ring_hom (quotient.mk (P.comap C))) P).quotient_map
(map_ring_hom (quotient.mk (P.comap C))) le_comap_map).comp ((quotient.mk P).comp C) :=
begin
refine ring_hom.ext (λ x, _),
repeat { rw [ring_hom.coe_comp, function.comp_app] },
rw [quotient_map_mk, coe_map_ring_hom, map_C],
end
/--
This technical lemma asserts the existence of a polynomial `p` in an ideal `P ⊂ R[x]`
that is non-zero in the quotient `R / (P ∩ R) [x]`. The assumptions are equivalent to
`P ≠ 0` and `P ∩ R = (0)`.
-/
lemma exists_nonzero_mem_of_ne_bot {P : ideal (polynomial R)}
(Pb : P ≠ ⊥) (hP : ∀ (x : R), C x ∈ P → x = 0) :
∃ p : polynomial R, p ∈ P ∧ (polynomial.map (quotient.mk (P.comap C)) p) ≠ 0 :=
begin
obtain ⟨m, hm⟩ := submodule.nonzero_mem_of_bot_lt (bot_lt_iff_ne_bot.mpr Pb),
refine ⟨m, submodule.coe_mem m, λ pp0, hm (submodule.coe_eq_zero.mp _)⟩,
refine (is_add_group_hom.injective_iff (polynomial.map (quotient.mk (P.comap C)))).mp _ _ pp0,
refine map_injective _ ((quotient.mk (P.comap C)).injective_iff_ker_eq_bot.mpr _),
rw [mk_ker],
exact (submodule.eq_bot_iff _).mpr (λ x hx, hP x (mem_comap.mp hx)),
end
end comm_ring
section integral_domain
variables {S : Type*} [integral_domain S] {f : R →+* S} {I J : ideal S}
lemma exists_coeff_ne_zero_mem_comap_of_root_mem {r : S} (r_ne_zero : r ≠ 0) (hr : r ∈ I)
{p : polynomial R} : ∀ (p_ne_zero : p ≠ 0) (hp : p.eval₂ f r = 0),
∃ i, p.coeff i ≠ 0 ∧ p.coeff i ∈ I.comap f :=
exists_coeff_ne_zero_mem_comap_of_non_zero_divisor_root_mem
(λ _ h, or.resolve_right (mul_eq_zero.mp h) r_ne_zero) hr
lemma exists_coeff_mem_comap_sdiff_comap_of_root_mem_sdiff
[is_prime I] (hIJ : I ≤ J) {r : S} (hr : r ∈ (J : set S) \ I)
{p : polynomial R} (p_ne_zero : p.map (quotient.mk (I.comap f)) ≠ 0) (hpI : p.eval₂ f r ∈ I) :
∃ i, p.coeff i ∈ (J.comap f : set R) \ (I.comap f) :=
begin
obtain ⟨hrJ, hrI⟩ := hr,
have rbar_ne_zero : quotient.mk I r ≠ 0 := mt (quotient.mk_eq_zero I).mp hrI,
have rbar_mem_J : quotient.mk I r ∈ J.map (quotient.mk I) := mem_map_of_mem hrJ,
have quotient_f : ∀ x ∈ I.comap f, (quotient.mk I).comp f x = 0,
{ simp [quotient.eq_zero_iff_mem] },
have rbar_root : (p.map (quotient.mk (I.comap f))).eval₂
(quotient.lift (I.comap f) _ quotient_f)
(quotient.mk I r) = 0,
{ convert quotient.eq_zero_iff_mem.mpr hpI,
exact trans (eval₂_map _ _ _) (hom_eval₂ p f (quotient.mk I) r).symm },
obtain ⟨i, ne_zero, mem⟩ :=
exists_coeff_ne_zero_mem_comap_of_root_mem rbar_ne_zero rbar_mem_J p_ne_zero rbar_root,
rw coeff_map at ne_zero mem,
refine ⟨i, (mem_quotient_iff_mem hIJ).mp _, mt _ ne_zero⟩,
{ simpa using mem },
simp [quotient.eq_zero_iff_mem],
end
lemma comap_ne_bot_of_root_mem {r : S} (r_ne_zero : r ≠ 0) (hr : r ∈ I)
{p : polynomial R} (p_ne_zero : p ≠ 0) (hp : p.eval₂ f r = 0) :
I.comap f ≠ ⊥ :=
λ h, let ⟨i, hi, mem⟩ := exists_coeff_ne_zero_mem_comap_of_root_mem r_ne_zero hr p_ne_zero hp in
absurd (mem_bot.mp (eq_bot_iff.mp h mem)) hi
lemma comap_lt_comap_of_root_mem_sdiff [I.is_prime] (hIJ : I ≤ J)
{r : S} (hr : r ∈ (J : set S) \ I)
{p : polynomial R} (p_ne_zero : p.map (quotient.mk (I.comap f)) ≠ 0) (hp : p.eval₂ f r ∈ I) :
I.comap f < J.comap f :=
let ⟨i, hJ, hI⟩ := exists_coeff_mem_comap_sdiff_comap_of_root_mem_sdiff hIJ hr p_ne_zero hp
in set_like.lt_iff_le_and_exists.mpr ⟨comap_mono hIJ, p.coeff i, hJ, hI⟩
variables [algebra R S]
lemma comap_ne_bot_of_algebraic_mem {x : S}
(x_ne_zero : x ≠ 0) (x_mem : x ∈ I) (hx : is_algebraic R x) : I.comap (algebra_map R S) ≠ ⊥ :=
let ⟨p, p_ne_zero, hp⟩ := hx
in comap_ne_bot_of_root_mem x_ne_zero x_mem p_ne_zero hp
lemma comap_ne_bot_of_integral_mem [nontrivial R] {x : S}
(x_ne_zero : x ≠ 0) (x_mem : x ∈ I) (hx : is_integral R x) : I.comap (algebra_map R S) ≠ ⊥ :=
comap_ne_bot_of_algebraic_mem x_ne_zero x_mem (hx.is_algebraic R)
lemma eq_bot_of_comap_eq_bot [nontrivial R] (hRS : algebra.is_integral R S)
(hI : I.comap (algebra_map R S) = ⊥) : I = ⊥ :=
begin
refine eq_bot_iff.2 (λ x hx, _),
by_cases hx0 : x = 0,
{ exact hx0.symm ▸ ideal.zero_mem ⊥ },
{ exact absurd hI (comap_ne_bot_of_integral_mem hx0 hx (hRS x)) }
end
lemma mem_of_one_mem (h : (1 : S) ∈ I) (x) : x ∈ I :=
(I.eq_top_iff_one.mpr h).symm ▸ mem_top
lemma comap_lt_comap_of_integral_mem_sdiff [hI : I.is_prime] (hIJ : I ≤ J)
{x : S} (mem : x ∈ (J : set S) \ I) (integral : is_integral R x) :
I.comap (algebra_map R S) < J.comap (algebra_map _ _) :=
begin
obtain ⟨p, p_monic, hpx⟩ := integral,
refine comap_lt_comap_of_root_mem_sdiff hIJ mem _ _,
swap,
{ apply map_monic_ne_zero p_monic,
apply quotient.nontrivial,
apply mt comap_eq_top_iff.mp,
apply hI.1 },
convert I.zero_mem
end
lemma is_maximal_of_is_integral_of_is_maximal_comap
(hRS : algebra.is_integral R S) (I : ideal S) [I.is_prime]
(hI : is_maximal (I.comap (algebra_map R S))) : is_maximal I :=
⟨⟨mt comap_eq_top_iff.mpr hI.1.1,
λ J I_lt_J, let ⟨I_le_J, x, hxJ, hxI⟩ := set_like.lt_iff_le_and_exists.mp I_lt_J in
comap_eq_top_iff.1 $ hI.1.2 _ (comap_lt_comap_of_integral_mem_sdiff I_le_J ⟨hxJ, hxI⟩ (hRS x))⟩⟩
lemma is_maximal_of_is_integral_of_is_maximal_comap' {R S : Type*} [comm_ring R] [integral_domain S]
(f : R →+* S) (hf : f.is_integral) (I : ideal S) [hI' : I.is_prime]
(hI : is_maximal (I.comap f)) : is_maximal I :=
@is_maximal_of_is_integral_of_is_maximal_comap R _ S _ f.to_algebra hf I hI' hI
lemma is_maximal_comap_of_is_integral_of_is_maximal (hRS : algebra.is_integral R S)
(I : ideal S) [hI : I.is_maximal] : is_maximal (I.comap (algebra_map R S)) :=
begin
refine quotient.maximal_of_is_field _ _,
haveI : is_prime (I.comap (algebra_map R S)) := comap_is_prime _ _,
exact is_field_of_is_integral_of_is_field (is_integral_quotient_of_is_integral hRS)
algebra_map_quotient_injective (by rwa ← quotient.maximal_ideal_iff_is_field_quotient),
end
lemma is_maximal_comap_of_is_integral_of_is_maximal' {R S : Type*} [comm_ring R] [integral_domain S]
(f : R →+* S) (hf : f.is_integral) (I : ideal S) (hI : I.is_maximal) : is_maximal (I.comap f) :=
@is_maximal_comap_of_is_integral_of_is_maximal R _ S _ f.to_algebra hf I hI
lemma integral_closure.comap_ne_bot [nontrivial R] {I : ideal (integral_closure R S)}
(I_ne_bot : I ≠ ⊥) : I.comap (algebra_map R (integral_closure R S)) ≠ ⊥ :=
let ⟨x, x_mem, x_ne_zero⟩ := I.ne_bot_iff.mp I_ne_bot in
comap_ne_bot_of_integral_mem x_ne_zero x_mem (integral_closure.is_integral x)
lemma integral_closure.eq_bot_of_comap_eq_bot [nontrivial R] {I : ideal (integral_closure R S)} :
I.comap (algebra_map R (integral_closure R S)) = ⊥ → I = ⊥ :=
imp_of_not_imp_not _ _ integral_closure.comap_ne_bot
lemma integral_closure.comap_lt_comap {I J : ideal (integral_closure R S)} [I.is_prime]
(I_lt_J : I < J) :
I.comap (algebra_map R (integral_closure R S)) < J.comap (algebra_map _ _) :=
let ⟨I_le_J, x, hxJ, hxI⟩ := set_like.lt_iff_le_and_exists.mp I_lt_J in
comap_lt_comap_of_integral_mem_sdiff I_le_J ⟨hxJ, hxI⟩ (integral_closure.is_integral x)
lemma integral_closure.is_maximal_of_is_maximal_comap
(I : ideal (integral_closure R S)) [I.is_prime]
(hI : is_maximal (I.comap (algebra_map R (integral_closure R S)))) : is_maximal I :=
is_maximal_of_is_integral_of_is_maximal_comap (λ x, integral_closure.is_integral x) I hI
/-- `comap (algebra_map R S)` is a surjection from the prime spec of `R` to prime spec of `S`.
`hP : (algebra_map R S).ker ≤ P` is a slight generalization of the extension being injective -/
lemma exists_ideal_over_prime_of_is_integral' (H : algebra.is_integral R S)
(P : ideal R) [is_prime P] (hP : (algebra_map R S).ker ≤ P) :
∃ (Q : ideal S), is_prime Q ∧ Q.comap (algebra_map R S) = P :=
begin
have hP0 : (0 : S) ∉ algebra.algebra_map_submonoid S P.prime_compl,
{ rintro ⟨x, ⟨hx, x0⟩⟩,
exact absurd (hP x0) hx },
let Rₚ := localization P.prime_compl,
let f := localization.of P.prime_compl,
let Sₚ := localization (algebra.algebra_map_submonoid S P.prime_compl),
let g := localization.of (algebra.algebra_map_submonoid S P.prime_compl),
letI : integral_domain (localization (algebra.algebra_map_submonoid S P.prime_compl)) :=
localization_map.integral_domain_localization (le_non_zero_divisors_of_domain hP0),
obtain ⟨Qₚ : ideal Sₚ, Qₚ_maximal⟩ := exists_maximal Sₚ,
haveI Qₚ_max : is_maximal (comap _ Qₚ) := @is_maximal_comap_of_is_integral_of_is_maximal Rₚ _ Sₚ _
(localization_algebra P.prime_compl f g)
(is_integral_localization f g H) _ Qₚ_maximal,
refine ⟨comap g.to_map Qₚ, ⟨comap_is_prime g.to_map Qₚ, _⟩⟩,
convert localization.at_prime.comap_maximal_ideal,
rw [comap_comap, ← local_ring.eq_maximal_ideal Qₚ_max, ← f.map_comp _],
refl
end
/-- More general going-up theorem than `exists_ideal_over_prime_of_is_integral'`.
TODO: Version of going-up theorem with arbitrary length chains (by induction on this)?
Not sure how best to write an ascending chain in Lean -/
theorem exists_ideal_over_prime_of_is_integral (H : algebra.is_integral R S)
(P : ideal R) [is_prime P] (I : ideal S) [is_prime I] (hIP : I.comap (algebra_map R S) ≤ P) :
∃ Q ≥ I, is_prime Q ∧ Q.comap (algebra_map R S) = P :=
begin
obtain ⟨Q' : ideal I.quotient, ⟨Q'_prime, hQ'⟩⟩ := @exists_ideal_over_prime_of_is_integral'
(I.comap (algebra_map R S)).quotient _ I.quotient _
ideal.quotient_algebra
(is_integral_quotient_of_is_integral H)
(map (quotient.mk (I.comap (algebra_map R S))) P)
(map_is_prime_of_surjective quotient.mk_surjective (by simp [hIP]))
(le_trans
(le_of_eq ((ring_hom.injective_iff_ker_eq_bot _).1 algebra_map_quotient_injective))
bot_le),
haveI := Q'_prime,
refine ⟨Q'.comap _, le_trans (le_of_eq mk_ker.symm) (ker_le_comap _), ⟨comap_is_prime _ Q', _⟩⟩,
rw comap_comap,
refine trans _ (trans (congr_arg (comap (quotient.mk (comap (algebra_map R S) I))) hQ') _),
{ simpa [comap_comap] },
{ refine trans (comap_map_of_surjective _ quotient.mk_surjective _) (sup_eq_left.2 _),
simpa [← ring_hom.ker_eq_comap_bot] using hIP},
end
/-- `comap (algebra_map R S)` is a surjection from the max spec of `S` to max spec of `R`.
`hP : (algebra_map R S).ker ≤ P` is a slight generalization of the extension being injective -/
lemma exists_ideal_over_maximal_of_is_integral (H : algebra.is_integral R S)
(P : ideal R) [P_max : is_maximal P] (hP : (algebra_map R S).ker ≤ P) :
∃ (Q : ideal S), is_maximal Q ∧ Q.comap (algebra_map R S) = P :=
begin
obtain ⟨Q, ⟨Q_prime, hQ⟩⟩ := exists_ideal_over_prime_of_is_integral' H P hP,
haveI : Q.is_prime := Q_prime,
exact ⟨Q, is_maximal_of_is_integral_of_is_maximal_comap H _ (hQ.symm ▸ P_max), hQ⟩,
end
end integral_domain
end ideal
|
theory L2
imports RTC
begin
section \<open>Syntax\<close>
text \<open>Lambda terms are represented by De Brujin index\<close>
type_synonym loc = nat
datatype type =
Unit |
Num |
Bool |
Fn type type (infix "\<rightarrow>" 70)
datatype type_loc = Numref
datatype binop = Plus (".+") | Geq (".\<ge>")
datatype exp =
Skip ("skip") |
Number int ("#_" [100] 100) |
Boolean bool ("$_" [100] 100) |
Binop exp binop exp ("_ _. _" [65, 1000, 65] 65) |
Seq exp exp (infixr ";" 65) |
Cond exp exp exp ("if _ then _ else _ fi" [50, 50, 50] 65) |
While exp exp ("while _ do _ od" [50, 50] 65) |
Deref loc ("!l_" [100] 100) |
Assign loc exp ("l_ := _" [0, 65] 65) |
Var nat ("`_" [100] 100) |
App exp exp ("_\<^sup>._" [65, 65] 65) |
Abs type exp ("fn _ \<Rightarrow> _" [50, 65] 65)
abbreviation true :: exp where "true \<equiv> $True"
abbreviation false :: exp where "false \<equiv> $False"
section \<open>Substitution\<close>
fun lift :: "exp \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> exp" ("_\<up>\<^sub>_\<^sup>_" [50, 55, 55] 50) where
"skip\<up>\<^sub>n\<^sup>k = skip" |
"#m\<up>\<^sub>n\<^sup>k = #m" |
"$b\<up>\<^sub>n\<^sup>k = $b" |
"`i\<up>\<^sub>n\<^sup>k = `(if i < k then i else i+n)" |
"(e1 bop. e2)\<up>\<^sub>n\<^sup>k= (e1\<up>\<^sub>n\<^sup>k) bop. (e2\<up>\<^sub>n\<^sup>k)" |
"(e1; e2)\<up>\<^sub>n\<^sup>k = (e1\<up>\<^sub>n\<^sup>k); (e2\<up>\<^sub>n\<^sup>k)" |
"(if e1 then e2 else e3 fi)\<up>\<^sub>n\<^sup>k =
if e1\<up>\<^sub>n\<^sup>k then e2\<up>\<^sub>n\<^sup>k else e3\<up>\<^sub>n\<^sup>k fi" |
"(while e1 do e2 od)\<up>\<^sub>n\<^sup>k =
while e1\<up>\<^sub>n\<^sup>k do e2\<up>\<^sub>n\<^sup>k od" |
"!l(i)\<up>\<^sub>n\<^sup>k = !l(i)" |
"(l(i):=e)\<up>\<^sub>n\<^sup>k = l(i):=(e\<up>\<^sub>n\<^sup>k)" |
"(e1\<^sup>.e2)\<up>\<^sub>n\<^sup>k = (e1\<up>\<^sub>n\<^sup>k)\<^sup>.(e2\<up>\<^sub>n\<^sup>k)" |
"(fn T \<Rightarrow> e)\<up>\<^sub>n\<^sup>k = fn T \<Rightarrow> (e\<up>\<^sub>n\<^sup>k+1)"
fun subst :: "exp \<Rightarrow> nat \<Rightarrow> exp \<Rightarrow> exp" ("_[_::=_]" [65, 50, 50] 65) where
"skip[k::=N] = skip" |
"#n[k::=N] = #n" |
"$b[k::=N] = $b" |
"`i[k::=N] = (if i < k then `i else
if i = k then N else
`(i-1))" |
"(e1 bop. e2)[k::=N] = (e1[k::=N]) bop. (e2[k::=N])" |
"(e1;e2)[k::=N] = (e1[k::=N]);(e2[k::=N])" |
"if e1 then e2 else e3 fi [k::=N] =
if e1[k::=N] then e2[k::=N] else e3[k::=N] fi" |
"while e1 do e2 od [k::=N] = while e1[k::=N] do e2[k::=N] od" |
"!l(i)[k::=N] = !l(i)" |
"(l(i):=e)[k::=N] = l(i):=(e[k::=N])" |
"(e1\<^sup>.e2)[k::=N] = (e1[k::=N])\<^sup>.(e2[k::=N])" |
"(fn T \<Rightarrow> e)[k::=N] = fn T \<Rightarrow> (e[k+1 ::= N\<up>\<^sub>1\<^sup>0])"
fun closed_at :: "exp \<Rightarrow> nat \<Rightarrow> bool" where
"closed_at skip _ = True" |
"closed_at (#_) _ = True" |
"closed_at ($_) _ = True" |
"closed_at (`x) n = (x < n)" |
"closed_at (e1 bop. e2) n= (closed_at e1 n \<and> closed_at e2 n)" |
"closed_at (e1;e2) n = (closed_at e1 n \<and> closed_at e2 n)" |
"closed_at (if e1 then e2 else e3 fi) n =
(closed_at e1 n \<and> closed_at e2 n \<and> closed_at e3 n)" |
"closed_at (while e1 do e2 od) n = (closed_at e1 n \<and> closed_at e2 n)" |
"closed_at (!l_) _ = True" |
"closed_at (l(i):=e) n = closed_at e n" |
"closed_at (fn _ \<Rightarrow> e) n = closed_at e (n+1)" |
"closed_at (e1\<^sup>.e2) n = (closed_at e1 n \<and> closed_at e2 n)"
abbreviation closed :: "exp \<Rightarrow> bool" where
"closed e \<equiv> closed_at e 0"
section \<open>Operational semantics\<close>
fun is_value :: "exp \<Rightarrow> bool" where
"is_value skip = True" |
"is_value (#_) = True" |
"is_value ($_) = True" |
"is_value (fn _ \<Rightarrow> _) = True" |
"is_value _ = False"
type_synonym store = "loc \<Rightarrow> int option"
inductive sem :: "exp \<times> store \<Rightarrow> exp \<times> store \<Rightarrow> bool" (infix "\<Rightarrow>" 50) where
"(skip; e2, s) \<Rightarrow> (e2, s)" |
"(#n1 .+. #n2, s) \<Rightarrow> (#(n1 + n2), s)" |
"(#n1 .\<ge>. #n2, s) \<Rightarrow> ($(n1 \<ge> n2), s)" |
"(e1, s) \<Rightarrow> (e1', s') \<Longrightarrow> (e1 bop. e2, s) \<Rightarrow> (e1' bop. e2, s')" |
"is_value v \<Longrightarrow> (e2, s) \<Rightarrow> (e2', s') \<Longrightarrow> (v bop. e2, s) \<Rightarrow> (v bop. e2', s')" |
"(e1, s) \<Rightarrow> (e1', s') \<Longrightarrow> (e1; e2, s) \<Rightarrow> (e1'; e2, s')" |
"(if true then e2 else e3 fi, s) \<Rightarrow> (e2, s)" |
"(if false then e2 else e3 fi, s) \<Rightarrow> (e3, s)" |
"(e1, s) \<Rightarrow> (e1', s') \<Longrightarrow> (if e1 then e2 else e3 fi, s) \<Rightarrow>
(if e1' then e2 else e3 fi, s')" |
"(while e1 do e2 od, s) \<Rightarrow> (if e1 then (e2; while e1 do e2 od) else skip fi, s)" |
"i \<in> dom s \<Longrightarrow> s i = Some n \<Longrightarrow> (!li, s) \<Rightarrow> (#n, s)" |
"i \<in> dom s \<Longrightarrow> (l(i) := #n, s) \<Rightarrow> (skip, s(i \<mapsto> n))" |
"(e, s) \<Rightarrow> (e', s') \<Longrightarrow> (l(i) := e, s) \<Rightarrow> (l(i) := e', s')" |
"(e1, s) \<Rightarrow> (e1', s') \<Longrightarrow> (e1\<^sup>.e2, s) \<Rightarrow> (e1'\<^sup>.e2, s')" |
"is_value v \<Longrightarrow> (e2, s) \<Rightarrow> (e2', s') \<Longrightarrow> (v\<^sup>.e2, s) \<Rightarrow> (v\<^sup>.e2', s')" |
"is_value v \<Longrightarrow> ((fn T \<Rightarrow> e)\<^sup>.v, s) \<Rightarrow> (e[0 ::= v], s)"
declare sem.intros[intro!]
inductive_cases sem_elims [elim!]:
"(skip, s) \<Rightarrow> (e', s')"
"(#x, s) \<Rightarrow> (e', s')"
"($x, s) \<Rightarrow> (e', s')"
"(e1 .+. e2, s) \<Rightarrow> (e', s')"
"(e1 .\<ge>. e2, s) \<Rightarrow> (e', s')"
"(e1 bop. e2, s) \<Rightarrow> (e', s')"
"(`x, s) \<Rightarrow> (e', s')"
"(e1; e2, s) \<Rightarrow> (e', s')"
"(if e1 then e2 else e3 fi, s) \<Rightarrow> (e', s')"
"(while e1 do e2 od, s) \<Rightarrow> (e', s')"
"(!l(i), s) \<Rightarrow> (e', s')"
"(l(i) := e, s) \<Rightarrow> (e', s')"
"(fn T \<Rightarrow> e, s) \<Rightarrow> (e', s')"
"(e1\<^sup>.e2, s) \<Rightarrow> (e', s')"
abbreviation sem_rtc :: "exp \<times> store \<Rightarrow> exp \<times> store \<Rightarrow> bool" (infix "\<Rightarrow>\<^sup>*" 50) where
"\<sigma> \<Rightarrow>\<^sup>* \<sigma>' \<equiv> rtc sem \<sigma> \<sigma>'"
section \<open>Type environment\<close>
definition
shift :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> nat \<Rightarrow> 'a" ("_\<langle>_:_\<rangle>" [90, 0, 0] 91) where
"\<Gamma>\<langle>i:a\<rangle> = (\<lambda>j. if j < i then \<Gamma> j else if j = i then a else \<Gamma> (j - 1))"
lemma shift_gt [simp]: "j < i \<Longrightarrow> (\<Gamma>\<langle>i:T\<rangle>) j = \<Gamma> j"
by (simp add: shift_def)
lemma shift_lt [simp]: "i < j \<Longrightarrow> (\<Gamma>\<langle>i:T\<rangle>) j = \<Gamma> (j - 1)"
by (simp add: shift_def)
lemma shift_commute [simp]: "\<Gamma>\<langle>i:U\<rangle>\<langle>0:T\<rangle> = \<Gamma>\<langle>0:T\<rangle>\<langle>Suc i:U\<rangle>"
by (rule ext) (simp_all add: shift_def split: nat.split, force)
section \<open>Typing\<close>
type_synonym type_env = "(nat \<Rightarrow> type) \<times> (nat \<Rightarrow> type_loc option)"
inductive typing :: "(nat \<Rightarrow> type) \<Rightarrow> (nat \<Rightarrow> type_loc option) \<Rightarrow> exp \<Rightarrow> type \<Rightarrow> bool" ("_, _ \<turnstile> _ : _" [50, 50, 50] 50) where
"\<Gamma>, \<Delta> \<turnstile> skip : Unit" |
"\<Gamma>, \<Delta> \<turnstile> #n : Num" |
"\<Gamma>, \<Delta> \<turnstile> $b : Bool" |
"\<Gamma>, \<Delta> \<turnstile> e1 : Num \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e2 : Num \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e1 .+. e2 : Num" |
"\<Gamma>, \<Delta> \<turnstile> e1 : Num \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e2 : Num \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e1 .\<ge>. e2 : Bool" |
"\<Gamma>, \<Delta> \<turnstile> e1 : Unit \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e2 : T \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e1; e2 : T" |
"\<Gamma>, \<Delta> \<turnstile> e1 : Bool \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e2 : T \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e3 : T \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> if e1 then e2 else e3 fi : T" |
"\<Gamma>, \<Delta> \<turnstile> e1 : Bool \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e2 : Unit \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> while e1 do e2 od : Unit" |
"\<Delta> i = Some Numref \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> !l(i) : Num" |
"\<Delta> i = Some Numref \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e : Num \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> l(i) := e : Unit" |
"\<Gamma> n = T \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> `n : T" |
"\<Gamma>\<langle>0:T\<rangle>, \<Delta> \<turnstile> e : T' \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> fn T \<Rightarrow> e : T \<rightarrow> T'" |
"\<Gamma>, \<Delta> \<turnstile> e1 : T \<rightarrow> T' \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> e2 : T \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile>e1\<^sup>.e2 : T'"
declare typing.intros[intro!]
inductive_cases typing_elims[elim!]:
"\<Gamma>, \<Delta> \<turnstile> skip : T"
"\<Gamma>, \<Delta> \<turnstile> #x : T"
"\<Gamma>, \<Delta> \<turnstile> $x : T"
"\<Gamma>, \<Delta> \<turnstile> `x : T"
"\<Gamma>, \<Delta> \<turnstile> e1 .+. e2 : T"
"\<Gamma>, \<Delta> \<turnstile> e1 .\<ge>. e2 : T"
"\<Gamma>, \<Delta> \<turnstile> e1; e2 : T"
"\<Gamma>, \<Delta> \<turnstile> if e1 then e2 else e3 fi : T"
"\<Gamma>, \<Delta> \<turnstile> while e1 do e2 od : T"
"\<Gamma>, \<Delta> \<turnstile> !l(i) : T"
"\<Gamma>, \<Delta> \<turnstile> l(i) := e : T"
"\<Gamma>, \<Delta> \<turnstile> fn T \<Rightarrow> e : T'"
"\<Gamma>, \<Delta> \<turnstile> e1\<^sup>.e2: T"
section \<open>Let constructor\<close>
definition LetVal :: "type \<Rightarrow> exp \<Rightarrow> exp \<Rightarrow> exp" ("let val _ = _ in _ end" [50, 65, 65] 65) where
"LetVal T e1 e2 \<equiv> App (Abs T e2) e1"
lemma type_let: "\<Gamma>, \<Delta> \<turnstile> e1 : T \<Longrightarrow> \<Gamma>\<langle>0:T\<rangle>, \<Delta> \<turnstile> e2 : T' \<Longrightarrow> \<Gamma>, \<Delta> \<turnstile> let val T = e1 in e2 end : T'"
by (auto simp: LetVal_def)
lemma sem_let1: "(e1, s) \<Rightarrow> (e1', s') \<Longrightarrow> (let val T = e1 in e2 end, s) \<Rightarrow> (let val T = e1' in e2 end, s')"
by (auto simp: LetVal_def)
lemma sem_let2: "is_value v \<Longrightarrow> (let val T = v in e end, s) \<Rightarrow> (e[0 ::= v], s)"
by (auto simp: LetVal_def)
section \<open>Properties about L2\<close>
lemma subst_appI: "is_value v \<Longrightarrow> e2 = e[0 ::= v] \<Longrightarrow> ((fn T \<Rightarrow> e)\<^sup>.v, s) \<Rightarrow> (e2, s)"
by auto
lemma [dest]: "is_value e \<Longrightarrow> \<forall>s. \<not> (\<exists>e' s'. (e, s) \<Rightarrow> (e', s'))"
by (induct e, auto)
theorem determinacy:
assumes "(e, s) \<Rightarrow> (e1, s1)" "(e, s) \<Rightarrow> (e2, s2)"
shows "(e1, s1) = (e2, s2)"
using assms by (induction arbitrary: e2 rule: sem.induct; (blast | clarsimp))
lemma lift_up: "e\<up>\<^sub>n\<^sup>k = e \<Longrightarrow> lift e n (Suc k) = e"
by (induct arbitrary: n k rule: lift.induct) auto
lemma shift_lift1 [intro!]: "\<Gamma>, \<Delta> \<turnstile> e : T \<Longrightarrow> \<Gamma>\<langle>i:U\<rangle>, \<Delta> \<turnstile> e\<up>\<^sub>1\<^sup>i : T"
by (induct arbitrary: i rule: typing.induct) auto
theorem subst_lemma [intro]:
assumes "\<Gamma>, \<Delta> \<turnstile> e : T" "\<Gamma>', \<Delta> \<turnstile> e' : T'" "\<Gamma> = \<Gamma>'\<langle>i:T'\<rangle>"
shows "\<Gamma>', \<Delta> \<turnstile> e[i ::= e'] : T"
using assms by (induct arbitrary: \<Gamma>' i e' rule: typing.induct) force+
lemma preservation:
assumes "\<Gamma>, \<Delta> \<turnstile> e : T" "(e, s) \<Rightarrow> (e', s')"
shows "\<Gamma>, \<Delta> \<turnstile> e' : T"
using assms by (induction arbitrary: e' rule: typing.induct) (erule sem_elims; blast)+
lemma preserv_dom:
assumes "\<Gamma>, \<Delta> \<turnstile> e : T" "(e, s) \<Rightarrow> (e', s')" "dom \<Delta> \<subseteq> dom s"
shows "dom \<Delta> \<subseteq> dom s'"
using assms by (induction arbitrary: e' s rule: typing.induct) ((erule sem_elims, simp) | blast)+
corollary pres_rtc:
assumes "(e, s) \<Rightarrow>\<^sup>* (e', s')" "\<Gamma>, \<Delta> \<turnstile> e : T" "dom \<Delta> \<subseteq> dom s"
shows "\<Gamma>, \<Delta> \<turnstile> e' : T" "dom \<Delta> \<subseteq> dom s'"
using assms by (induction rule: rtc_induct, simp+, (metis preservation preserv_dom)+)
lemma [dest]: "\<Gamma>, \<Delta> \<turnstile> e : Num \<Longrightarrow> is_value e \<Longrightarrow> \<exists>n. e = #n"
by (induction e) auto
lemma [dest]: "\<Gamma>, \<Delta> \<turnstile> e : Bool \<Longrightarrow> is_value e \<Longrightarrow> \<exists>n. e = Boolean n"
by (induction e) auto
lemma [dest]: "\<Gamma>, \<Delta> \<turnstile> e : Unit \<Longrightarrow> is_value e \<Longrightarrow> e = skip"
by (induction e) auto
lemma [dest]: "\<Gamma>, \<Delta> \<turnstile> e : T \<rightarrow> T' \<Longrightarrow> is_value e \<Longrightarrow> \<exists>e'. e = fn T \<Rightarrow> e'"
by (induct e, auto)
lemma [dest]: "\<Gamma>, \<Delta> \<turnstile> e1 : Bool \<Longrightarrow> is_value e1 \<Longrightarrow> \<exists>e' s'. (if e1 then e2 else e3 fi, s) \<Rightarrow> (e', s')"
by (induct e1, auto) (case_tac x, auto)
lemma progress:
assumes "\<Gamma>, \<Delta> \<turnstile> e : T" "closed e" "dom \<Delta> \<subseteq> dom s"
shows "is_value e \<or> (\<exists>e' s'. (e, s) \<Rightarrow> (e', s'))"
using assms by (induction arbitrary: T rule: typing.induct) (blast | simp)+
corollary safety:
assumes "\<Gamma>, \<Delta> \<turnstile> e : T" "(e, s) \<Rightarrow>\<^sup>* (e', s')"
"closed e'" "dom \<Delta> \<subseteq> dom s"
shows "is_value e' \<or> (\<exists>e'' s''. (e', s') \<Rightarrow> (e'', s''))"
by (metis assms pres_rtc progress)
theorem uniqueness:
assumes "\<Gamma>, \<Delta> \<turnstile> e : T" "\<Gamma>, \<Delta> \<turnstile> e : T'"
shows "T = T'"
using assms by (induction arbitrary: T' rule: typing.induct; blast)
end
|
function L=wfbtlength(Ls,wt,varargin);
%WFBTLENGTH WFBT length from signal
% Usage: L=wfbtlength(Ls,wt);
%
% `wfbtlength(Ls,wt)` returns the length of a Wavelet system that is long
% enough to expand a signal of length *Ls*. Please see the help on
% |wfbt| for an explanation of the parameter *wt*.
%
% If the returned length is longer than the signal length, the signal
% will be zero-padded by |wfbt| to length *L*.
%
% In addition, the function accepts flags defining boundary extension
% technique as in |wfbt|. The returned length can be longer than the
% signal length only in case of `'per'` (periodic extension).
%
% See also: wfbt, fwt
% AUTHOR: Zdenek Prusa
complainif_notposint(Ls,'Ls','WFBTLENGTH');
definput.import = {'fwt'};
[flags,kv]=ltfatarghelper({},definput,varargin);
% Initialize the wavelet filters structure
if ~isstruct(wt)
wt = wfbtinit(wt);
end
if(flags.do_per)
a = treeSub(wt);
L = filterbanklength(Ls,a);
else
L = Ls;
end
|
{- IdrisWeb Session System
Makes use of effects library to allow for persistent sessions.
Exploits to prevent against:
- Fixation
- Session ID *MUST* be regenerated when the user logs in.
- Brute force
- Big numbers, cryptographically secure and random
- Sniffing (also todo: SSL)
- Only allow sessions to be sent over an SSL connection
-}
module IdrisWeb.Session.Session
import IdrisWeb.DB.SQLite.SQLiteNew
import Effects
import IdrisWeb.Common.Random.RandC
import SimpleParser
%access public
-- SessionID should be some long-ish random string (hash?)
SessionID : Type
SessionID = String
private
DB_NAME : String
DB_NAME = "/tmp/sessions.db"
-- I think in this circumstance, tagged data types
-- would be better, since we're not passing directly
-- to a function, more just providing other functions
-- with data.
data SessionDataType = SInt Int
| SString String
| SBool Bool
| SNull
total
showSerialisedVal : SessionDataType -> (String, String)
showSerialisedVal (SInt i) = ("SInt", show i)
showSerialisedVal (SString s) = ("SString", s)
showSerialisedVal (SBool b) = ("SBool", show b)
showSerialisedVal (SNull) = ("SNull", "")
-- Given a serialised value from the DB, construct
-- the appropriate data type.
-- TODO: Probably a better way of doing it than storing the
-- type as a string in the DB: an enum would likely be better
--total
deserialiseVal : String -> String -> Maybe SessionDataType
deserialiseVal tystr s =
if tystr == "SInt" then case parse int s of
Left err => Nothing
Right (i, _) => Just $ SInt i
else if tystr == "SString" then Just $ SString s
else if tystr == "SBool" then case parse bool s of
Left err => Nothing
Right (b, _) => Just $ SBool b
else if tystr == "SNull" then Just SNull
else Nothing
-- SerialisedSession is a list of 3-tuples of <Key, Value, Type>.
SerialisedSessionEntry : Type
SerialisedSessionEntry = (String, String, String)
public -- this really shouldn't be public, TODO: change
SerialisedSession : Type
SerialisedSession = List SerialisedSessionEntry
-- SessionData is the user-facing data type, containing the session names and variables
public
SessionData : Type
SessionData = List (String, SessionDataType)
{-
deserialiseSession : SerialisedSession -> Maybe SessionData
deserialiseSession ss = sequence $ map (\(key, val, ty) => case (deserialiseVal ty val) of
Just dat => Just (key, dat)
Nothing => Nothing) ss
-}
deserialiseSession : SerialisedSession -> Maybe SessionData
deserialiseSession ss = traverse (\(key, val, ty) => case (deserialiseVal ty val) of
Just dat => Just (key, dat)
Nothing => Nothing) ss
-- showSerialisedVal : (String, String)
serialiseSession : SessionData -> SerialisedSession
serialiseSession sd = map (\(key, sdt) => let (tystr, valstr) = showSerialisedVal sdt in
(key, valstr, tystr)) sd
-- Retrieves session data as a list of (String, String) k-v pairs.
-- We marshal this back to the required types in a later function.
collectResults : EffM IO [SQLITE (Either (SQLiteExecuting InvalidRow) (SQLiteExecuting ValidRow))]
[SQLITE (SQLiteExecuting InvalidRow)]
SerialisedSession
collectResults =
if_valid then do
key <- getColumnText 0
val <- getColumnText 1
ty <- getColumnText 2
step_result <- nextRow
xs <- collectResults
Effects.pure $ (key, val, ty) :: xs
else Effects.pure []
retrieveSessionData : SessionID -> Eff IO [SQLITE ()] (Either QueryError SerialisedSession)
retrieveSessionData s_id = do
conn_res <- openDB DB_NAME
if_valid then do
let sql = "SELECT key, val, ty FROM `sessiondata` WHERE `session_key` = ?"
ps_res <- prepareStatement sql
if_valid then do
bindText 1 s_id
bind_res <- finishBind
if_valid then do
executeStatement
results <- collectResults
finaliseInvalid
closeDB
Effects.pure $ Right results
else do
let be = getBindError bind_res
cleanupBindFail
Effects.pure $ Left be
else do
cleanupPSFail
Effects.pure . Left $ getQueryError ps_res
else
Effects.pure . Left $ getQueryError conn_res
--removeSessionData : SessionID -> Eff IO [SQLITE ()]
getInsertArg : SerialisedSession -> String
getInsertArg [] = ""
-- no comma needed at the end
getInsertArg ((key, val, ty) :: []) = "(\"" ++ key ++ "\", \"" ++ val ++ "\", \"" ++ ty ++ "\")"
getInsertArg ((key, val, ty) :: xs) = "(\"" ++ key ++ "\", \"" ++ val ++ "\", \"" ++ ty ++ "\")" ++ ", " ++ (getInsertArg xs)
storeSessionRow : SessionID -> SerialisedSessionEntry -> Eff IO [SQLITE ()] (Either QueryError ())
storeSessionRow s_id (key, val, ty) = do
conn_res <- openDB DB_NAME
if_valid then do
let insert_sql = "INSERT INTO `sessiondata` (`session_key`, `key`, `val`, `ty`) VALUES (?, ?, ?, ?)"
ps_res <- prepareStatement insert_sql
if_valid then do
-- Bind the arguments to the prepared statement
bindText 1 s_id
bindText 2 key
bindText 3 val
bindText 4 ty
bind_res <- finishBind
if_valid then do
executeStatement
finalise
closeDB
Effects.pure $ Right ()
else do
let be = getBindError bind_res
cleanupBindFail
Effects.pure $ Left be
else do
cleanupPSFail
Effects.pure . Left $ getQueryError ps_res
else
Effects.pure . Left $ getQueryError conn_res
storeSessionData : SessionID -> SerialisedSession -> Eff IO [SQLITE ()] (Either QueryError ())
storeSessionData s_id [] = Effects.pure $ Right ()
storeSessionData s_id (sr :: srs) = do res <- storeSessionRow s_id sr
case res of
Left err => Effects.pure $ Left err
Right () => storeSessionData s_id srs
removeSession: SessionID -> Eff IO [SQLITE ()] (Either QueryError ())
removeSession s_id = do
conn_res <- openDB DB_NAME
if_valid then do
let delete_sql = "DELETE FROM `sessiondata` WHERE `session_key` = ?"
ps_res <- prepareStatement delete_sql
if_valid then do
bindText 1 s_id
bind_res <- finishBind
if_valid then do
executeStatement
finalise
closeDB
Effects.pure $ Right ()
else do
let be = getBindError bind_res
cleanupBindFail
Effects.pure $ Left be
else do
cleanupPSFail
Effects.pure . Left $ getQueryError ps_res
else
Effects.pure . Left $ getQueryError conn_res
-- Remove then store
updateSessionData : SessionID -> SessionData -> Eff IO [SQLITE ()] (Either QueryError ())
updateSessionData s_id sd = do
del_res <- removeSession s_id
case del_res of
Left err => Effects.pure $ Left err
Right () => do store_res <- storeSessionData s_id (serialiseSession sd)
case store_res of
Left err' => Effects.pure $ Left err'
Right () => Effects.pure $ Right ()
getSession : SessionID -> IO (Maybe SessionData)
getSession s_id = do db_res <- run [()] (retrieveSessionData s_id)
case db_res of
Left err => pure Nothing
Right ss => pure $ deserialiseSession ss
{- Session effect:
We should be able to create, update and delete sessions.
We should only be able to update and delete valid sessions.
We should only be able to create sessions when we don't have an active session.
We really should only be able to populate a session after authentication
if we generate a new session (in order to prevent session fixation attacks (but how... hmmmm)
-}
data SessionStep = SessionUninitialised
| SessionInitialised
public
data SessionRes : SessionStep -> Type where
InvalidSession : SessionRes s
ValidSession : SessionID -> SessionData -> SessionRes s
data Session : Effect where
-- Load a session from the database, given a session ID.
LoadSession : SessionID -> Session (SessionRes SessionUninitialised) (SessionRes SessionInitialised) (Maybe SessionData)
-- Updates the in-memory representation of the session
UpdateSession : SessionData -> Session (SessionRes SessionInitialised) (SessionRes SessionInitialised) ()
-- Given a session data set, creates a new session
CreateSession : SessionData -> Session (SessionRes SessionUninitialised) (SessionRes SessionInitialised) (Maybe SessionID)
-- Delete the current session
DeleteSession : Session (SessionRes SessionInitialised) (SessionRes SessionUninitialised) Bool -- Hmmm... Error handling? How?
-- Updates the DB with the new session data, discards the in-memory resources
WriteToDB : Session (SessionRes SessionInitialised) (SessionRes SessionUninitialised) Bool
-- Discards changes to the current session, disposes of resources
DiscardSessionChanges : Session (SessionRes SessionInitialised) (SessionRes SessionUninitialised) ()
GetSessionID : Session (SessionRes SessionInitialised) (SessionRes SessionInitialised) (Maybe SessionID)
GetSessionData : Session (SessionRes SessionInitialised) (SessionRes SessionInitialised) (Maybe SessionData)
SESSION : Type -> EFFECT
SESSION t = MkEff t Session
loadSession : SessionID -> EffM m [SESSION (SessionRes SessionUninitialised)]
[SESSION (SessionRes SessionInitialised)]
(Maybe SessionData)
loadSession s_id = (LoadSession s_id)
updateSession : SessionData -> Eff m [SESSION (SessionRes SessionInitialised)] ()
updateSession sd = (UpdateSession sd)
createSession : SessionData -> EffM m [SESSION (SessionRes SessionUninitialised)]
[SESSION (SessionRes SessionInitialised)]
(Maybe SessionID)
createSession sd = (CreateSession sd)
deleteSession : EffM m [SESSION (SessionRes SessionInitialised)]
[SESSION (SessionRes SessionUninitialised)]
Bool
deleteSession = DeleteSession
writeSessionToDB : EffM m [SESSION (SessionRes SessionInitialised)]
[SESSION (SessionRes SessionUninitialised)]
Bool
writeSessionToDB = WriteToDB
discardSession : EffM m [SESSION (SessionRes SessionInitialised)]
[SESSION (SessionRes SessionUninitialised)]
()
discardSession = DiscardSessionChanges
getSessionID : Eff m [SESSION (SessionRes SessionInitialised)]
(Maybe SessionID)
getSessionID = GetSessionID
getSessionData : Eff m [SESSION (SessionRes SessionInitialised)]
(Maybe SessionData)
getSessionData = GetSessionData
instance Handler Session IO where
-- Grab the session from the DB given the session key.
-- If it exists, construct the resource and return the data.
-- If not, return nothing, and reflect the invalidity in the resource.
-- This should never happen
handle (ValidSession _ _) (LoadSession _) k = k InvalidSession Nothing
handle InvalidSession (LoadSession s_id) k = do
maybe_session <- getSession s_id
case maybe_session of
Just s_data => k (ValidSession s_id s_data) (Just s_data)
Nothing => k InvalidSession Nothing
-- Update the in-memory representation of the session.
handle (ValidSession s_id s_dat) (UpdateSession s_dat') k =
k (ValidSession s_id s_dat') ()
-- If we're trying to update an invalid session, just let it fall
-- through.
handle (InvalidSession) (UpdateSession _) k =
k (InvalidSession) ()
-- Delete a session from the database, and dispose of our resources.
handle (ValidSession s_id _) DeleteSession k = do
delete_res <- run [()] (removeSession s_id)
case delete_res of
Left err => k InvalidSession False
Right () => k InvalidSession True
handle (InvalidSession) DeleteSession k = k InvalidSession False
-- Writes a session to the DB, and disposes of the in-memory resources
handle (ValidSession s_id s_dat) WriteToDB k = do
update_res <- run [()] (updateSessionData s_id s_dat)
case update_res of
Left err => do putStrLn (show err)
k InvalidSession False
Right () => k InvalidSession True
handle InvalidSession WriteToDB k = k InvalidSession False
-- Simply discard the resource without doing any writes
handle (ValidSession _ _) DiscardSessionChanges k = k InvalidSession ()
handle (InvalidSession) DiscardSessionChanges k = k InvalidSession ()
handle (ValidSession _ _) (CreateSession _) k = k InvalidSession Nothing
-- Creates a new session.
-- BIG TODO: This random number gen is extremely rudimentary, and not
-- secure enough for actual use.
-- We've also got no guarantees that the IDs generated will be unique...
-- This can be fixed by having some sort of property variable in the session
-- DB, which we increment each time, and hash alongside the random number.
-- While OK for a quick prototype, this *REALLY* must be fixed.
handle InvalidSession (CreateSession sd) k = do
rand_id <- getRandom 1000000000 21474836476 -- FIXME: This is a pathetic level of entropy...
let s_id = show rand_id -- Some hash function would be here, typically
store_res <- run [()] (storeSessionData s_id (serialiseSession sd))
case store_res of
Left err' => k InvalidSession Nothing
Right () => k (ValidSession s_id sd) (Just s_id)
handle (ValidSession s_id s_dat) GetSessionID k = k (ValidSession s_id s_dat) (Just s_id)
handle (ValidSession s_id s_dat) GetSessionData k = k (ValidSession s_id s_dat) (Just s_dat)
handle InvalidSession GetSessionID k = k InvalidSession Nothing
handle InvalidSession GetSessionData k = k InvalidSession Nothing
|
Require Import propInv5.
Local Open Scope Z.
Definition _timer1 :=36000.
Definition _timer0 := _timer1-1.
Definition _logvar := ON.
Definition _hands0 :=
(make (of_Z (_timer1+1)) ON).
Definition _hands1 := _hands0.[of_Z _timer1 <- _logvar].
Definition _dryer0 := make (of_Z (_timer1+1)) ON.
Definition _dryer1 := _dryer0.[of_Z _timer1 <- _dryer0.[of_Z (_timer1-1)]].
Definition _ctrlState0 := (make (of_Z (_timer1+1)) ctrlDrying).
Definition _ctrlState1 := _ctrlState0.[of_Z _timer1 <- _ctrlState0.[of_Z (_timer1-1)]].
Definition _ctrlTimer0 :=0.
Definition _ctrlTimer1 := _ctrlTimer0 + 1.
Definition _dryer2 := _dryer1.[of_Z _timer1 <- ON].
Definition _ctrlState2 := _ctrlState1.[of_Z _timer1 <- ctrlDrying].
Definition _ctrlTimer2 := 0.
Theorem proof5_5 :
exists hands0 hands1 dryer0 dryer1 ctrlState0 ctrlState1 ctrlTimer0 ctrlTimer1 timer0 timer1 ctrlState2 ctrlTimer2
(logvar : bool),
~((startnewloop hands0 hands1 dryer0 dryer1 ctrlState0 ctrlState1 ctrlTimer0 ctrlTimer1 timer0 timer1 logvar) /\
ctrlState1.[of_Z timer1]<>ctrlWaiting /\ ctrlState1.[of_Z timer1]=ctrlDrying /\ hands1.[of_Z timer1]=ON /\ ctrlTimer2=0 /\
ctrlState2=ctrlState1.[of_Z timer1 <- ctrlDrying] /\ ctrlTimer2<10
->
(inv hands1 dryer1 ctrlState2 ctrlTimer2 timer1)).
Proof.
exists _hands0.
exists _hands1.
exists _dryer0.
exists _dryer1.
exists _ctrlState0.
exists _ctrlState1.
exists _ctrlTimer0.
exists _ctrlTimer1.
exists _timer0.
exists _timer1.
exists _ctrlState2.
exists _ctrlTimer2.
exists _logvar.
unfold not.
intros.
assert (Hcond :
(startnewloop _hands0 _hands1 _dryer0 _dryer1 _ctrlState0 _ctrlState1 _ctrlTimer0 _ctrlTimer1 _timer0 _timer1 _logvar) /\
_ctrlState1.[of_Z _timer1]<>ctrlWaiting /\ _ctrlState1.[of_Z _timer1]=ctrlDrying /\ _hands1.[of_Z _timer1]=ON /\
_ctrlTimer2=0 /\ _ctrlState2=_ctrlState1.[of_Z _timer1 <- ctrlDrying] /\ _ctrlTimer2<10).
split.
split.
split.
(*proving propInv for timer0*)
unfold propInv5.
intros.
unfold _timer0 in H0.
unfold _timer1 in H0.
elimtype False.
auto with zarith.
(*proving extraInv for timer0*)
split.
reflexivity.
split.
reflexivity.
split.
reflexivity.
split.
reflexivity.
split.
reflexivity.
split.
reflexivity.
split.
unfold _timer0.
unfold _timer1.
auto with zarith.
split.
unfold _ctrlTimer0.
auto with zarith.
split.
intros.
right.
unfold _ctrlState0.
apply get_make.
intros.
split.
auto with zarith.
split.
unfold _hands0.
apply get_make.
split.
unfold _dryer0.
apply get_make.
intros.
elimtype False.
unfold _ctrlTimer0 in H1.
auto with zarith.
split.
symmetry.
apply Zeq_plus_swap.
reflexivity.
repeat split.
repeat split.
unfold _ctrlState1.
rewrite get_set_same.
unfold _ctrlState0.
rewrite get_make.
discriminate.
auto.
unfold _ctrlState1.
rewrite get_set_same.
unfold _ctrlState0.
rewrite get_make.
reflexivity.
auto.
(*proving negating requirement*)
apply H in Hcond.
inversion_clear Hcond.
unfold propInv5 in H0.
specialize (H0 (_timer1-(36000-1))).
assert (Hpremise : (0<(_timer1-(36000-1)) /\ (_timer1-(36000-1))<=(_timer1-(36000-1)) /\
_dryer1.[of_Z (_timer1-(36000-1))]=ON)).
unfold _timer1.
auto with zarith.
apply H0 in Hpremise.
elim Hpremise.
intros.
inversion_clear H2.
inversion_clear H4.
inversion_clear H5.
replace _dryer1.[of_Z x] with _dryer0.[of_Z x] in H6.
unfold _dryer0 in H6.
rewrite get_make in H6.
contradict H6.
discriminate.
auto.
Qed.
|
/*
microsoft-oms-auditd-plugin
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE "ExecveConverterTests"
#include <boost/test/unit_test.hpp>
#include "Logger.h"
#include "RawEventAccumulator.h"
#include "StringUtils.h"
#include "ExecveConverter.h"
#include "TestEventQueue.h"
#include <fstream>
#include <stdexcept>
#include <iostream>
extern "C" {
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
};
class RawEventQueue: public IEventBuilderAllocator {
public:
explicit RawEventQueue(std::vector<std::string>& cmdlines): _buffer(), _size(0), _cmdlines(cmdlines) {}
bool Allocate(void** data, size_t size) override {
if (_size != size) {
_size = size;
}
if (_buffer.size() < _size) {
_buffer.resize(_size);
}
*data = _buffer.data();
return true;
}
int Commit() override {
Event event(_buffer.data(), _size);
std::vector<EventRecord> recs;
for(auto& rec :event) {
if (rec.RecordType() == static_cast<uint32_t>(RecordType::EXECVE)) {
recs.emplace_back(rec);
}
}
_converter.Convert(recs, _cmdline);
_cmdlines.emplace_back(_cmdline);
_size = 0;
return 1;
}
bool Rollback() override {
_size = 0;
return true;
}
private:
std::vector<uint8_t> _buffer;
size_t _size;
std::vector<std::string>& _cmdlines;
ExecveConverter _converter;
std::string _cmdline;
};
class TestData {
public:
std::string test_name;
std::vector<std::string> event_records;
std::string cmdline;
};
std::vector<TestData> test_data = {
{
"one-arg",
{
R"event(type=EXECVE msg=audit(1.001:1): argc=1 a0="arg1")event",
},
R"cmdline(arg1)cmdline"
},
{
"two-arg",
{
R"event(type=EXECVE msg=audit(1.001:2): argc=2 a0="arg1" a1="arg2")event",
},
R"cmdline(arg1 arg2)cmdline"
},
{
"missing-arg",
{
R"event(type=EXECVE msg=audit(1.001:3): argc=5 a0="arg1" a1="arg2")event",
R"event(type=EXECVE msg=audit(1.001:3): a4="arg5")event",
},
R"cmdline(arg1 arg2 <2...3> arg5)cmdline"
},
{
"multi-part-arg",
{
R"event(type=EXECVE msg=audit(1.001:4): argc=4 a0="arg1" a1="arg2" a2_len=5 a2[0]=3031)event",
R"event(type=EXECVE msg=audit(1.001:4): a2[1]=323334 a3="arg4")event",
},
R"cmdline(arg1 arg2 01234 arg4)cmdline"
},
{
"multi-part-arg-at-end",
{
R"event(type=EXECVE msg=audit(1.001:4): argc=3 a0="arg1" a1="arg2" a2_len=5 a2[0]=3031)event",
R"event(type=EXECVE msg=audit(1.001:4): a2[1]=323334)event",
},
R"cmdline(arg1 arg2 01234)cmdline"
},
{
"missing-arg-piece-beginning",
{
R"event(type=EXECVE msg=audit(1.001:5): argc=4 a0="arg1" a1="arg2" a2_len=5)event",
R"event(type=EXECVE msg=audit(1.001:5): a2[1]=323334 a3="arg4")event",
},
R"cmdline(arg1 arg2 <...>234 arg4)cmdline"
},
{
"missing-arg-piece-middle",
{
R"event(type=EXECVE msg=audit(1.001:6): argc=4 a0="arg1" a1="arg2" a2_len=5 a2[0]=3031)event",
R"event(type=EXECVE msg=audit(1.001:6): a2[2]=3334 a3="arg4")event",
},
R"cmdline(arg1 arg2 01<...>34 arg4)cmdline"
},
{
"missing-arg-piece-end",
{
R"event(type=EXECVE msg=audit(1.001:7): argc=4 a0="arg1" a1="arg2" a2_len=5 a2[0]=3031)event",
R"event(type=EXECVE msg=audit(1.001:7): a3="arg4")event",
},
R"cmdline(arg1 arg2 01<...> arg4)cmdline"
},
{
"missing-arg-piece-end-at-end",
{
R"event(type=EXECVE msg=audit(1.001:7): argc=4 a0="arg1" a1="arg2" a2_len=5 a2[0]=3031)event",
},
R"cmdline(arg1 arg2 01<...>)cmdline"
},
{
"multi-part-len-only",
{
R"event(type=EXECVE msg=audit(1.001:7): argc=4 a0="arg1" a1="arg2" a2_len=5)event",
R"event(type=EXECVE msg=audit(1.001:7): a3="arg4")event",
},
R"cmdline(arg1 arg2 <2...2> arg4)cmdline"
},
{
"multi-part-missing-len",
{
R"event(type=EXECVE msg=audit(1.001:7): argc=4 a0="arg1" a1="arg2" a2[0]=3031)event",
R"event(type=EXECVE msg=audit(1.001:7): a2[1]=323334 a3="arg4")event",
},
R"cmdline(arg1 arg2 <2...2> arg4)cmdline"
},
};
BOOST_AUTO_TEST_CASE( basic_test ) {
std::vector<std::string> actual_cmdlines;
auto prioritizer = DefaultPrioritizer::Create(0);
auto raw_queue = new RawEventQueue(actual_cmdlines);
auto raw_allocator = std::shared_ptr<IEventBuilderAllocator>(raw_queue);
auto raw_builder = std::make_shared<EventBuilder>(raw_allocator, prioritizer);
auto metrics_queue = new TestEventQueue();
auto metrics_allocator = std::shared_ptr<IEventBuilderAllocator>(metrics_queue);
auto metrics_builder = std::make_shared<EventBuilder>(metrics_allocator, prioritizer);
auto metrics = std::make_shared<Metrics>("test", metrics_builder);
RawEventAccumulator accumulator(raw_builder, metrics);
for (auto& test : test_data) {
for (auto& line: test.event_records) {
std::unique_ptr<RawEventRecord> record = std::make_unique<RawEventRecord>();
std::memcpy(record->Data(), line.data(), line.size());
if (record->Parse(RecordType::UNKNOWN, line.size())) {
accumulator.AddRecord(std::move(record));
} else {
Logger::Warn("Received unparsable event data: %s", line.c_str());
}
}
accumulator.Flush(0);
}
BOOST_REQUIRE_EQUAL(test_data.size(), actual_cmdlines.size());
for (size_t idx = 0; idx < test_data.size(); ++idx) {
BOOST_REQUIRE_MESSAGE(test_data[idx].cmdline == actual_cmdlines[idx], "Test [" << test_data[idx].test_name << "] failed: \nExpected: " << test_data[idx].cmdline << "\nGot: " << actual_cmdlines[idx]);
}
}
|
function poly = edgeToPolyline(edge, N)
%EDGETOPOLYLINE Convert an edge to a polyline with a given number of segments
%
% POLY = edgeToPolyline(EDGE, N)
%
% Example
% edge = [10 20 60 40];
% poly = edgeToPolyline(edge, 10);
% drawEdge(edge, 'lineWidth', 2);
% hold on
% drawPoint(poly);
% axis equal;
%
% See also
% edges2d, drawEdge, drawPolyline
%
% ------
% Author: David Legland
% e-mail: [email protected]
% Created: 2011-11-25, using Matlab 7.9.0.529 (R2009b)
% Copyright 2011 INRA - Cepia Software Platform.
if N < 1
error('number of segments must be greater than 1');
end
if length(edge) == 4
% case of planar edges
p1 = edge(1:2);
p2 = edge(3:4);
poly = [linspace(p1(1), p2(1), N+1)' linspace(p1(2), p2(2), N+1)'];
else
% case of 3D edges
p1 = edge(1:3);
p2 = edge(4:6);
poly = [...
linspace(p1(1), p2(1), N+1)' ...
linspace(p1(2), p2(2), N+1)' ...
linspace(p1(3), p2(3), N+1)'];
end
|
(* Title: HOL/HOLCF/IOA/NTP/Impl.thy
Author: Tobias Nipkow & Konrad Slind
*)
section \<open>The implementation\<close>
theory Impl
imports Sender Receiver Abschannel
begin
type_synonym 'm impl_state
= "'m sender_state * 'm receiver_state * 'm packet multiset * bool multiset"
(* sender_state * receiver_state * srch_state * rsch_state *)
definition
impl_ioa :: "('m action, 'm impl_state)ioa" where
impl_def: "impl_ioa == (sender_ioa \<parallel> receiver_ioa \<parallel> srch_ioa \<parallel> rsch_ioa)"
definition sen :: "'m impl_state => 'm sender_state" where "sen = fst"
definition rec :: "'m impl_state => 'm receiver_state" where "rec = fst \<circ> snd"
definition srch :: "'m impl_state => 'm packet multiset" where "srch = fst \<circ> snd \<circ> snd"
definition rsch :: "'m impl_state => bool multiset" where "rsch = snd \<circ> snd \<circ> snd"
definition
hdr_sum :: "'m packet multiset => bool => nat" where
"hdr_sum M b == countm M (%pkt. hdr(pkt) = b)"
(* Lemma 5.1 *)
definition
"inv1(s) \<equiv>
(\<forall>b. count (rsent(rec s)) b = count (srcvd(sen s)) b + count (rsch s) b)
\<and> (\<forall>b. count (ssent(sen s)) b
= hdr_sum (rrcvd(rec s)) b + hdr_sum (srch s) b)"
(* Lemma 5.2 *)
definition
"inv2(s) ==
(rbit(rec(s)) = sbit(sen(s)) &
ssending(sen(s)) &
count (rsent(rec s)) (~sbit(sen s)) <= count (ssent(sen s)) (~sbit(sen s)) &
count (ssent(sen s)) (~sbit(sen s)) <= count (rsent(rec s)) (sbit(sen s)))
|
(rbit(rec(s)) = (~sbit(sen(s))) &
rsending(rec(s)) &
count (ssent(sen s)) (~sbit(sen s)) <= count (rsent(rec s)) (sbit(sen s)) &
count (rsent(rec s)) (sbit(sen s)) <= count (ssent(sen s)) (sbit(sen s)))"
(* Lemma 5.3 *)
definition
"inv3(s) \<equiv>
rbit(rec(s)) = sbit(sen(s))
\<longrightarrow> (\<forall>m. sq(sen(s))=[] | m \<noteq> hd(sq(sen(s)))
\<longrightarrow> count (rrcvd(rec s)) (sbit(sen(s)),m)
+ count (srch s) (sbit(sen(s)),m)
\<le> count (rsent(rec s)) (~sbit(sen s)))"
(* Lemma 5.4 *)
definition "inv4(s) == rbit(rec(s)) = (~sbit(sen(s))) --> sq(sen(s)) ~= []"
subsection \<open>Invariants\<close>
declare le_SucI [simp]
lemmas impl_ioas =
impl_def sender_ioa_def receiver_ioa_def srch_ioa_thm [THEN eq_reflection]
rsch_ioa_thm [THEN eq_reflection]
lemmas "transitions" =
sender_trans_def receiver_trans_def srch_trans_def rsch_trans_def
lemmas [simp] =
ioa_triple_proj starts_of_par trans_of_par4 in_sender_asig
in_receiver_asig in_srch_asig in_rsch_asig
declare let_weak_cong [cong]
lemma [simp]:
"a\<in>actions(sender_asig)
\<or> a\<in>actions(receiver_asig)
\<or> a\<in>actions(srch_asig)
\<or> a\<in>actions(rsch_asig)"
by (induct a) simp_all
declare split_paired_All [simp del]
(* Three Simp_sets in different sizes
----------------------------------------------
1) simpset() does not unfold the transition relations
2) ss unfolds transition relations
3) renname_ss unfolds transitions and the abstract channel *)
ML \<open>
val ss = simpset_of (\<^context> addsimps @{thms "transitions"});
val rename_ss = simpset_of (put_simpset ss \<^context> addsimps @{thms unfold_renaming});
fun tac ctxt =
asm_simp_tac (put_simpset ss ctxt
|> Simplifier.add_cong @{thm conj_cong} |> Splitter.add_split @{thm if_split})
fun tac_ren ctxt =
asm_simp_tac (put_simpset rename_ss ctxt
|> Simplifier.add_cong @{thm conj_cong} |> Splitter.add_split @{thm if_split})
\<close>
subsubsection \<open>Invariant 1\<close>
lemma raw_inv1: "invariant impl_ioa inv1"
apply (unfold impl_ioas)
apply (rule invariantI)
apply (simp add: inv1_def hdr_sum_def srcvd_def ssent_def rsent_def rrcvd_def)
apply (simp (no_asm) del: trans_of_par4 add: imp_conjR inv1_def)
txt \<open>Split proof in two\<close>
apply (rule conjI)
(* First half *)
apply (simp add: Impl.inv1_def split del: if_split)
apply (induct_tac a)
apply (tactic "EVERY1[tac \<^context>, tac \<^context>, tac \<^context>, tac \<^context>]")
apply (tactic "tac \<^context> 1")
apply (tactic "tac_ren \<^context> 1")
txt \<open>5 + 1\<close>
apply (tactic "tac \<^context> 1")
apply (tactic "tac_ren \<^context> 1")
txt \<open>4 + 1\<close>
apply (tactic \<open>EVERY1[tac \<^context>, tac \<^context>, tac \<^context>, tac \<^context>]\<close>)
txt \<open>Now the other half\<close>
apply (simp add: Impl.inv1_def split del: if_split)
apply (induct_tac a)
apply (tactic "EVERY1 [tac \<^context>, tac \<^context>]")
txt \<open>detour 1\<close>
apply (tactic "tac \<^context> 1")
apply (tactic "tac_ren \<^context> 1")
apply (rule impI)
apply (erule conjE)+
apply (simp (no_asm_simp) add: hdr_sum_def Multiset.count_def Multiset.countm_nonempty_def
split: if_split)
txt \<open>detour 2\<close>
apply (tactic "tac \<^context> 1")
apply (tactic "tac_ren \<^context> 1")
apply (rule impI)
apply (erule conjE)+
apply (simp add: Impl.hdr_sum_def Multiset.count_def Multiset.countm_nonempty_def
Multiset.delm_nonempty_def split: if_split)
apply (rule allI)
apply (rule conjI)
apply (rule impI)
apply hypsubst
apply (rule pred_suc [THEN iffD1])
apply (drule less_le_trans)
apply (cut_tac eq_packet_imp_eq_hdr [unfolded Packet.hdr_def, THEN countm_props])
apply assumption
apply assumption
apply (rule countm_done_delm [THEN mp, symmetric])
apply (rule refl)
apply (simp (no_asm_simp) add: Multiset.count_def)
apply (rule impI)
apply (simp add: neg_flip)
apply hypsubst
apply (rule countm_spurious_delm)
apply (simp (no_asm))
apply (tactic "EVERY1 [tac \<^context>, tac \<^context>, tac \<^context>,
tac \<^context>, tac \<^context>, tac \<^context>]")
done
subsubsection \<open>INVARIANT 2\<close>
lemma raw_inv2: "invariant impl_ioa inv2"
apply (rule invariantI1)
txt \<open>Base case\<close>
apply (simp add: inv2_def receiver_projections sender_projections impl_ioas)
apply (simp (no_asm_simp) add: impl_ioas split del: if_split)
apply (induct_tac "a")
txt \<open>10 cases. First 4 are simple, since state doesn't change\<close>
ML_prf \<open>val tac2 = asm_full_simp_tac (put_simpset ss \<^context> addsimps [@{thm inv2_def}])\<close>
txt \<open>10 - 7\<close>
apply (tactic "EVERY1 [tac2,tac2,tac2,tac2]")
txt \<open>6\<close>
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct1] 1\<close>)
txt \<open>6 - 5\<close>
apply (tactic "EVERY1 [tac2,tac2]")
txt \<open>4\<close>
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct1] 1\<close>)
apply (tactic "tac2 1")
txt \<open>3\<close>
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE})] 1\<close>)
apply (tactic "tac2 1")
apply (tactic \<open>fold_goals_tac \<^context> [rewrite_rule \<^context> [@{thm Packet.hdr_def}]
(@{thm Impl.hdr_sum_def})]\<close>)
apply arith
txt \<open>2\<close>
apply (tactic "tac2 1")
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct1] 1\<close>)
apply (intro strip)
apply (erule conjE)+
apply simp
txt \<open>1\<close>
apply (tactic "tac2 1")
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct2] 1\<close>)
apply (intro strip)
apply (erule conjE)+
apply (tactic \<open>fold_goals_tac \<^context>
[rewrite_rule \<^context> [@{thm Packet.hdr_def}] (@{thm Impl.hdr_sum_def})]\<close>)
apply simp
done
subsubsection \<open>INVARIANT 3\<close>
lemma raw_inv3: "invariant impl_ioa inv3"
apply (rule invariantI)
txt \<open>Base case\<close>
apply (simp add: Impl.inv3_def receiver_projections sender_projections impl_ioas)
apply (simp (no_asm_simp) add: impl_ioas split del: if_split)
apply (induct_tac "a")
ML_prf \<open>val tac3 = asm_full_simp_tac (put_simpset ss \<^context> addsimps [@{thm inv3_def}])\<close>
txt \<open>10 - 8\<close>
apply (tactic "EVERY1[tac3,tac3,tac3]")
apply (tactic "tac_ren \<^context> 1")
apply (intro strip, (erule conjE)+)
apply hypsubst
apply (erule exE)
apply simp
txt \<open>7\<close>
apply (tactic "tac3 1")
apply (tactic "tac_ren \<^context> 1")
apply force
txt \<open>6 - 3\<close>
apply (tactic "EVERY1[tac3,tac3,tac3,tac3]")
txt \<open>2\<close>
apply (tactic "asm_full_simp_tac (put_simpset ss \<^context>) 1")
apply (simp (no_asm) add: inv3_def)
apply (intro strip, (erule conjE)+)
apply (rule imp_disjL [THEN iffD1])
apply (rule impI)
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply simp
apply (erule conjE)+
apply (rule_tac j = "count (ssent (sen s)) (~sbit (sen s))" and
k = "count (rsent (rec s)) (sbit (sen s))" in le_trans)
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct2] 1\<close>)
apply (simp add: hdr_sum_def Multiset.count_def)
apply (rule add_le_mono)
apply (rule countm_props)
apply (simp (no_asm))
apply (rule countm_props)
apply (simp (no_asm))
apply assumption
txt \<open>1\<close>
apply (tactic "tac3 1")
apply (intro strip, (erule conjE)+)
apply (rule imp_disjL [THEN iffD1])
apply (rule impI)
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply simp
done
subsubsection \<open>INVARIANT 4\<close>
lemma raw_inv4: "invariant impl_ioa inv4"
apply (rule invariantI)
txt \<open>Base case\<close>
apply (simp add: Impl.inv4_def receiver_projections sender_projections impl_ioas)
apply (simp (no_asm_simp) add: impl_ioas split del: if_split)
apply (induct_tac "a")
ML_prf \<open>val tac4 = asm_full_simp_tac (put_simpset ss \<^context> addsimps [@{thm inv4_def}])\<close>
txt \<open>10 - 2\<close>
apply (tactic "EVERY1[tac4,tac4,tac4,tac4,tac4,tac4,tac4,tac4,tac4]")
txt \<open>2 b\<close>
apply (intro strip, (erule conjE)+)
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply simp
txt \<open>1\<close>
apply (tactic "tac4 1")
apply (intro strip, (erule conjE)+)
apply (rule ccontr)
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv3_def}]
(@{thm raw_inv3} RS @{thm invariantE})] 1\<close>)
apply simp
apply (rename_tac m, erule_tac x = "m" in allE)
apply simp
done
text \<open>rebind them\<close>
lemmas inv1 = raw_inv1 [THEN invariantE, unfolded inv1_def]
and inv2 = raw_inv2 [THEN invariantE, unfolded inv2_def]
and inv3 = raw_inv3 [THEN invariantE, unfolded inv3_def]
and inv4 = raw_inv4 [THEN invariantE, unfolded inv4_def]
end
|
$-\mathbb{R} = \mathbb{R}$
|
import tactic combinatorics.simple_graph.connectivity
import graph_theory.path graph_theory.pushforward graph_theory.contraction
open classical function
namespace simple_graph
variables {V V' : Type*} [decidable_eq V] [decidable_eq V'] {f : V → V'}
variables {G G' : simple_graph V} {x y z u v w a b c : V}
structure Walk (G : simple_graph V) := {a b : V} (p : G.walk a b)
namespace Walk
variables {e : G.dart} {p q : G.Walk} {hep : e.snd = p.a} {hpq : p.b = q.a}
def nil (a : V) : G.Walk := ⟨(walk.nil : G.walk a a)⟩
@[simp] lemma nil_a : (nil a : G.Walk).a = a := rfl
@[simp] lemma nil_b : (nil b : G.Walk).b = b := rfl
def cons (e : G.dart) (p : G.Walk) (h : e.snd = p.a) : G.Walk :=
by { let h' := e.is_adj, rw h at h', exact ⟨p.p.cons h'⟩ }
def step (e : G.dart) : G.Walk := cons e (nil e.snd) rfl
def rec₀ {motive : G.Walk → Sort*} :
(Π u, motive (Walk.nil u)) →
(Π e p h, motive p → motive (cons e p h)) →
Π p, motive p :=
λ h_nil h_cons ⟨p⟩, walk.rec_on p h_nil $ λ u v w h p, h_cons ⟨⟨_,_⟩,h⟩ ⟨p⟩ rfl
@[simp] lemma rec_nil {motive h_nil h_cons} :
@rec₀ V _ G motive h_nil h_cons (nil a) = h_nil a := rfl
@[simp] lemma rec_cons {motive h_nil h_cons h} :
@rec₀ V _ G motive h_nil h_cons (cons e p h) =
h_cons e p h (rec₀ h_nil h_cons p) :=
begin
rcases e with ⟨⟨u,v⟩,e⟩, rcases p with ⟨a,b,p⟩, dsimp only at h, subst v, refl
end
@[simp] lemma cons_a : (cons e p hep).a = e.fst := rfl
@[simp] lemma cons_b : (cons e p hep).b = p.b := rfl
def range (p : G.Walk) : finset V :=
p.p.support.to_finset
@[simp] lemma range_cons : (cons e p hep).range = {e.fst} ∪ p.range :=
by simpa only [range, cons, walk.support_cons, list.to_finset_cons]
@[simp] lemma range_step : (step e).range = {e.fst, e.snd} :=
by simpa only [range, step, cons, walk.support_cons, list.to_finset_cons]
@[simp] lemma range_nonempty : p.range.nonempty :=
begin
refine rec₀ _ _ p,
{ intro u, use u, simp [range] },
{ intros e p h q, use e.fst, simp }
end
def init : G.Walk → finset V :=
rec₀ (λ v, ∅) (λ e p h q, {e.fst} ∪ q)
@[simp] lemma init_cons : (cons e p hep).init = {e.fst} ∪ p.init := rec_cons
lemma range_eq_init_union_last : p.range = p.init ∪ {p.b} :=
by { refine rec₀ _ _ p, { intro u, refl }, { rintro e p h q, simp [q] } }
def tail : G.Walk → finset V :=
rec₀ (λ v, ∅) (λ e p h q, p.range)
@[simp] lemma tail_cons : (cons e p hep).tail = p.range := rec_cons
lemma range_eq_start_union_tail : p.range = {p.a} ∪ p.tail :=
by { refine rec₀ _ _ p, { intro, refl }, { intros, simp [*] } }
def edges : G.Walk → finset G.dart :=
rec₀ (λ v, ∅) (λ e p h q, {e} ∪ q)
@[simp] lemma edges_cons : (cons e p hep).edges = {e} ∪ p.edges := rec_cons
lemma first_edge : e ∈ (cons e p hep).edges := by simp
@[simp] lemma range_a : (nil a : G.Walk).range = {a} := rfl
@[simp] lemma start_mem_range : p.a ∈ p.range :=
by { refine rec₀ _ _ p; simp }
@[simp] lemma end_mem_range : p.b ∈ p.range :=
by { refine rec₀ _ _ p, simp, rintro e p h q, simp, right, exact q }
lemma range_eq_support : p.range = p.p.support.to_finset :=
begin
refine rec₀ _ _ p,
{ intro u, refl },
{ intros e p h q, rw [range_cons,q], ext, simpa }
end
def append_aux (p q : G.Walk) (hpq : p.b = q.a) : {w : G.Walk // w.a = p.a ∧ w.b = q.b} :=
begin
rcases p with ⟨a,b,p⟩, rcases q with ⟨c,d,q⟩, simp only at hpq, subst c,
refine ⟨⟨p ++ q⟩, rfl, rfl⟩,
end
def append_aux' (p q : G.Walk) (hpq : p.b = q.a) : {w : G.Walk // w.a = p.a ∧ w.b = q.b} :=
begin
rcases p with ⟨a,b,p⟩, rcases q with ⟨c,d,q⟩, simp only at hpq, subst c,
refine ⟨⟨p ++ q⟩, rfl, rfl⟩,
end
def append (p q : G.Walk) (hpq : p.b = q.a) : G.Walk :=
(append_aux p q hpq).val
@[simp] lemma append_a : (append p q hpq).a = p.a :=
(append_aux p q hpq).prop.1
@[simp] lemma append_b : (append p q hpq).b = q.b :=
(append_aux p q hpq).prop.2
@[simp] lemma append_nil_left {haq : a = q.a} : append (nil a) q haq = q :=
by { subst haq, rcases q with ⟨a,b,q⟩, refl }
@[simp] lemma append_cons :
append (cons e p hep) q hpq = cons e (append p q hpq) (by simp [hep]) :=
begin
rcases e with ⟨⟨u,v⟩,e⟩, rcases p with ⟨a,b,p⟩, rcases q with ⟨c,d,q⟩,
simp at hep hpq, substs a b, refl
end
@[simp] lemma range_append : (append p q hpq).range = p.range ∪ q.range :=
begin
revert p, refine rec₀ _ _, simp,
intros e p h q hpq, simp at hpq, specialize @q hpq, simp, rw ←q, refl
end
lemma mem_append : z ∈ (append p q hpq).p.support ↔ z ∈ p.p.support ∨ z ∈ q.p.support :=
begin
rcases p with ⟨a,b,p⟩, rcases q with ⟨d,c,q⟩, simp at hpq, subst d,
rw [append, append_aux], simp only [walk.mem_support_append_iff]
end
def push_step_aux (f : V → V') (e : G.dart) :
{w : (map f G).Walk // w.a = f e.fst ∧ w.b = f e.snd} :=
begin
by_cases f e.fst = f e.snd,
exact ⟨Walk.nil (f e.fst), rfl, h⟩,
exact ⟨Walk.step ⟨⟨_,_⟩,⟨h,e.fst,e.snd,e.is_adj,rfl,rfl⟩⟩, rfl, rfl⟩
end
def push_step (f : V → V') (e : G.dart) : (map f G).Walk :=
(push_step_aux f e).val
@[simp] lemma push_step_a : (push_step f e).a = f e.fst :=
(push_step_aux f e).prop.1
@[simp] lemma push_step_b : (push_step f e).b = f e.snd :=
(push_step_aux f e).prop.2
def push_Walk_aux (f : V → V') (p : G.Walk) :
{w : (map f G).Walk // w.a = f p.a ∧ w.b = f p.b} :=
begin
refine rec₀ _ _ p,
{ intro u, exact ⟨Walk.nil (f u), rfl, rfl⟩ },
{ intros e p h q, simp only [cons_a, cons_b],
let ee := push_step f e,
let ww := ee.append q.1 (by { rw [q.2.1,←h], exact push_step_b }),
refine ⟨ww, _, _⟩, simp,
rw [←q.2.2], exact (ee.append_aux q.1 (by { rw [q.2.1,←h], exact push_step_b })).2.2 }
end
def push_Walk (f : V → V') (p : G.Walk) : (map f G).Walk :=
(push_Walk_aux f p).val
@[simp] lemma push_Walk_a : (push_Walk f p).a = f p.a :=
(push_Walk_aux f p).prop.1
@[simp] lemma push_Walk_b : (push_Walk f p).b = f p.b :=
(push_Walk_aux f p).prop.2
@[simp] lemma push_nil : push_Walk f (@Walk.nil _ _ G a) = Walk.nil (f a) := rfl
lemma push_cons (f : V → V') (e : G.dart) (p : G.Walk) (h : e.snd = p.a) :
push_Walk f (p.cons e h) = Walk.append (push_step f e) (push_Walk f p) (by simp [h]) :=
by { rcases p with ⟨a,b,p⟩, rcases e with ⟨⟨u,v⟩,e⟩, simp at h, subst a, refl }
lemma push_cons_eq (f : V → V') (e : G.dart) (p : G.Walk) (h : e.snd = p.a) (h' : f e.fst = f e.snd) :
push_Walk f (p.cons e h) = push_Walk f p :=
begin
have : push_step f e = Walk.nil (f e.fst) := by simp [push_step,push_step_aux,h'],
rw [push_cons], simp only [this], exact append_nil_left
end
lemma push_cons_ne (f : V → V') (e : G.dart) (p : G.Walk) (h : e.snd = p.a) (h' : f e.fst ≠ f e.snd) :
push_Walk f (p.cons e h) = Walk.cons ⟨⟨_,_⟩,⟨h',e.fst,e.snd,e.is_adj,rfl,rfl⟩⟩ (push_Walk f p) (by simp [h]) :=
begin
have : push_step f e = Walk.step ⟨⟨_,_⟩,⟨h',e.fst,e.snd,e.is_adj,rfl,rfl⟩⟩ :=
by simp [push_step,push_step_aux,h'],
rw [push_cons], simp [this,step]
end
lemma push_append (f : V → V') (p q : G.Walk) (hpq : p.b = q.a) :
push_Walk f (Walk.append p q hpq) =
Walk.append (push_Walk f p) (push_Walk f q) (by simp [hpq]) :=
begin
revert p, refine rec₀ (by simp) _,
intros e p h ih hpq, by_cases h' : f e.fst = f e.snd,
{ have h₁ := push_cons_eq f e p h h',
have h₂ := push_cons_eq f e (Walk.append p q hpq) (h.trans append_a.symm) h',
simp only [h₁, h₂, ih, append_cons] },
{ have h₁ := push_cons_ne f e p h h',
have h₂ := push_cons_ne f e (Walk.append p q hpq) (h.trans append_a.symm) h',
simpa only [h₁, h₂, ih, append_cons] }
end
lemma push_eq_nil (f : V → V') (w : V') (p : G.Walk) (hp : ∀ z : V, z ∈ p.p.support → f z = w) :
push_Walk f p = Walk.nil w :=
begin
revert p, refine rec₀ _ _,
{ intros, specialize hp u (by simp [Walk.nil]), simp [hp] },
{ intros e p h ih hp,
have h₁ : f e.fst = w := by { apply hp, left, refl },
have h₂ : f e.snd = w := by { apply hp, right, rw h, exact p.p.start_mem_support },
rw push_cons_eq f e p h (h₁.trans h₂.symm),
apply ih, intros z hz, apply hp, right, exact hz }
end
@[simp] lemma push_step_range : (push_step f e).range = {f e.fst, f e.snd} :=
by { by_cases f e.fst = f e.snd; simp [push_step, push_step_aux, h] }
lemma push_range : (push_Walk f p).range = finset.image f p.range :=
begin
refine rec₀ _ _ p, simp, rintro e p h q,
rw [push_cons,range_cons,range_append,q,finset.image_union,push_step_range],
ext, split; intro h',
{ rw finset.mem_union at h' ⊢, cases h', simp at h', cases h', left, subst a, simp,
right, subst a, rw h, apply finset.mem_image_of_mem, exact start_mem_range,
right, exact h' },
{ rw finset.mem_union at h' ⊢, cases h', simp at h', subst a, left, simp, right,
exact h' }
end
variables {hf : adapted f G} {p' : (map f G).Walk} {hx : f x = p'.a} {hy : f y = p'.b}
noncomputable def pull_Walk_aux (f : V → V') (hf : adapted f G) (p' : (map f G).Walk) (x y : V)
(hx : f x = p'.a) (hy : f y = p'.b) :
{w : G.Walk // w.a = x ∧ w.b = y ∧ push_Walk f w = p'} :=
begin
revert p' x y, refine rec₀ _ _,
{ rintros u x y hx hy, simp at hx hy, subst hy, choose p h₃ using hf hx,
refine ⟨⟨p⟩,rfl,rfl,_⟩, apply push_eq_nil, exact h₃ },
{ rintros ⟨⟨u,v⟩,⟨huv,ee⟩⟩ p h ih x y hx hy,
choose xx yy h₂ h₃ h₄ using ee, substs h₃ h₄, choose p₁ h₆ using hf hx,
obtain p₂ := ih yy y (h) hy,
let pp := Walk.append ⟨p₁⟩ (p₂.val.cons ⟨⟨_,_⟩,h₂⟩ p₂.2.1.symm) rfl,
refine ⟨pp, rfl, p₂.2.2.1, _⟩,
have h₇ := push_eq_nil f (f xx) ⟨p₁⟩ h₆,
simp [pp,push_append,h₇],
have h₈ := push_cons_ne f ⟨⟨_,_⟩,h₂⟩ p₂.val p₂.2.1.symm huv, refine h₈.trans _,
congr, exact p₂.2.2.2 }
end
noncomputable def pull_Walk (f : V → V') (hf : adapted f G) (p' : (map f G).Walk) (x y : V)
(hx : f x = p'.a) (hy : f y = p'.b) : G.Walk :=
(pull_Walk_aux f hf p' x y hx hy).val
lemma pull_Walk_a : (pull_Walk f hf p' x y hx hy).a = x :=
(pull_Walk_aux f hf p' x y hx hy).prop.1
lemma pull_Walk_b : (pull_Walk f hf p' x y hx hy).b = y :=
(pull_Walk_aux f hf p' x y hx hy).prop.2.1
lemma pull_Walk_push : push_Walk f (pull_Walk f hf p' x y hx hy) = p' :=
(pull_Walk_aux f hf p' x y hx hy).prop.2.2
def transportable_to (G' : simple_graph V) (p : G.Walk) : Prop :=
∀ e : G.dart, e ∈ p.edges → G'.adj e.fst e.snd
lemma transportable_to_of_le (G_le : G ≤ G') : p.transportable_to G' :=
begin
refine rec₀ _ _ p,
{ rintro u e h, simp [edges] at h, contradiction },
{ rintro e p h q e' h', simp at h', cases h', rw h', exact G_le e.is_adj, exact q e' h' }
end
def transport (p : G.Walk) (hp : transportable_to G' p) :
{q : G'.Walk // q.a = p.a ∧ q.b = p.b ∧ q.range = p.range ∧ q.init = p.init ∧ q.tail = p.tail} :=
begin
revert p, refine rec₀ _ _,
{ rintro a hp, exact ⟨nil a, rfl, rfl, rfl, rfl, rfl⟩ },
{ rintro e p h ih hp,
have : transportable_to G' p :=
by { rintro e he, apply hp, rw [edges_cons,finset.mem_union], right, exact he },
specialize ih this, rcases ih with ⟨q,hq⟩, rw ←hq.1 at h,
exact ⟨cons ⟨⟨_,_⟩,hp e first_edge⟩ q h, by simp [hq]⟩ }
end
noncomputable def until (p : G.Walk) (X : finset V) (hX : (p.range ∩ X).nonempty) :
{q : G.Walk // q.a = p.a ∧ q.b ∈ X ∧
q.range ⊆ p.range ∧ q.init ∩ X = ∅ ∧ q.init ⊆ p.init ∧ q.tail ⊆ p.tail} :=
begin
revert p, refine rec₀ _ _,
{ rintro u hu, choose z hz using hu, simp at hz, cases hz with hz₁ hz₂, subst z,
exact ⟨nil u, rfl, hz₂, by refl, rfl, by refl, by refl⟩ },
{ rintro e p h₁ ih h₂, by_cases e.fst ∈ X,
{ exact ⟨nil e.fst, rfl, h, by simp, rfl, by simp [init], by simp [tail]⟩ },
{ simp at h₂, choose z hz using h₂, simp at hz, cases hz with hz₁ hz₂,
have : z ≠ e.fst := by { intro h, rw h at hz₂, contradiction },
simp [this] at hz₁,
have : z ∈ p.range ∩ X := finset.mem_inter.mpr ⟨hz₁,hz₂⟩,
specialize ih ⟨z,this⟩, rcases ih with ⟨q,hq₁,hq₂,hq₃,hq₄,hq₅,hq₆⟩,
rw ←hq₁ at h₁,
refine ⟨cons e q h₁, rfl, hq₂, _, _, _, by simp [hq₃]⟩,
{ simp, apply finset.union_subset_union, refl, exact hq₃ },
{ simp [finset.inter_distrib_right,hq₄,h] },
{ simp, apply finset.union_subset_union, refl, exact hq₅ }
}
}
end
noncomputable def after (p : G.Walk) (X : finset V) (hX : (p.range ∩ X).nonempty) :
{q : G.Walk // q.a ∈ X ∧ q.b = p.b ∧
q.range ⊆ p.range ∧ q.init ⊆ p.init ∧ q.tail ⊆ p.tail ∧ q.tail ∩ X = ∅} :=
begin
revert p, refine rec₀ _ _,
{ rintro u hu,
exact ⟨nil u, finset.singleton_inter_nonempty.mp hu, rfl, by refl, by refl, by refl, rfl⟩ },
{ rintro e p h₁ ih h₂, by_cases (p.range ∩ X).nonempty,
{ rcases ih h with ⟨q, hq₁, hq₂, hq₃, hq₄, hq₅, hq₆⟩,
refine ⟨q, hq₁, hq₂, _, _, _, hq₆⟩,
{ simp, apply hq₃.trans, apply finset.subset_union_right },
{ simp, apply hq₄.trans, apply finset.subset_union_right },
{ simp, apply hq₅.trans, rw range_eq_start_union_tail, apply finset.subset_union_right }
},
{ refine ⟨cons e p h₁, _, rfl, by refl, _⟩,
{ simp at h₂ ⊢, rcases h₂ with ⟨z,hz⟩, simp at hz, cases hz with hz₁ hz₂,
cases hz₁, subst z, exact hz₂, exfalso, apply h, use z, simp, exact ⟨hz₁,hz₂⟩ },
{ simp at h ⊢, exact h } } }
end
def reverse (p : G.Walk) : G.Walk := ⟨p.p.reverse⟩
@[simp] lemma reverse_a : (reverse p).a = p.b := by simp only [reverse]
@[simp] lemma reverse_b : (reverse p).b = p.a := by simp only [reverse]
@[simp] lemma reverse_range : (reverse p).range = p.range :=
by simp only [reverse, range, walk.support_reverse, list.to_finset_reverse]
end Walk
end simple_graph
|
Following the April 2010 destruction of the Deepwater Horizon while drilling the Macondo well , and the resulting oil spill , all appraisal activities at 33 wells under exploration in the Gulf of Mexico , including Tiber , were placed on hold . At least two rigs that might otherwise have been used for developing Tiber are also in use on the relief wells for the ruptured well .
|
Music Hall’s RDR-1 table-top radio is a reviewer’s companion. For when I was hard at work editing and typing reviews or making industry correspondences, the Musical Hall radio was there not simply playing crystal clear FM music from a classical station as never achieved by any other radios, it is also equipped with the Radio Data System (RDS) technology that displays stations call signs and music title, such as “BEETHOVEN EROICA IN E, OP 55”. Maybe the radio stations will begin to include information on performers before long.
The RDR-1 has a 7-watt, 3-inch speaker that occupies about 3/8 of the front panel on the left, then 2/8 of the panel to the right is provisioned with a large TUNING dial, with a smaller but more protruded VOLUME dial beneath it. The large LCD display between the speaker and the TUNING and VOLUME is joined also by two rows of shiny chrome buttons below it. The little buttons on the first row control the adjustable, 4-levels of brightness of the LCD display, BAND/AUX selection, and M1 through M5 of PRESET STATION. The second row of the little buttons control SCAN/MONO, DISPLAY of miscellaneous RDS texts, SET, AF (alternate frequency for the tuned station), CT and finally, traffic news (an SUV symbol).
The Musical Hall FM radio clock has an internal antenna that pulls in stations my other radios couldn’t get, and it also has a rear switch to provide for external antennae reception via the included cabling. Tuning is of PLL synthesis via a large knob rotary above the VOLUME knob, and the tuning action is precise and smooth, a welcome break from the ubiquitous, maddening push-button variety. I used the M1 to M5 PRESET STATION buttons beneath the large, lighted LCD display.
A rear port behind the speakers makes it very clear of what this little radio intends to do, and its ability to do it must be heard to be believed. Position this little warrior against a wall, adjust the bass to the highest level, and you want to crank it up and rock. Even with classical music.
The one feature that will make any radio perfect is a DVR-like functionality, that records ongoing programming and allows rewind, so that when the radio host/hostess makes an announcement while I am working, I could rewind a few seconds and capture what he/she was saying. The addition of CD playing function would make this little warrior more triumphant, although I must conceit to the fact that radio listening through this Music Hall is so enjoyable that I am not sure I want to miss every moment of the revitalized FM experience.
A credit card-like remote replicates all controls functions of the buttons on the RDR-1. I get it, Roy Hall must have thought about coining this little darling R2D2…This incredibly chic-looking little radio not only sounds very balanced for its size, with its maple veneer and cool aluminum trims, it adds a much-needed chicness and youth power to my daily routine. It’s amazing how differently I would feel around my workplace in the company of the RDR-1. Music Hall offers a dark wood trim on this little darling gadget, but I could use the lightheartedness of the maple-and-silver touch.
By virtue of the fact that whenever I’m at work I would turn on the RDR-1, I now listen to the RDR-1 more than my reference audio system. So, the sleek-looking, décor-friendly, space-saving little RDR-1 accomplished the most amazing feat of becoming a most indispensable element in my daily activities.
Will you please tell me if this radio has a port for headphones? Nighttime listening!
According to the owner’s manual page 3 (accessed here: http://musichallaudio.com/pdfs/music-hall-audio-rdr-1-radio-manual.pdf), the unit has an “earphone jack” on the face plate.
Hope this 3 years later reply helps!
|
module Main
main : IO ()
main = do
putStr "Enter thy name: "
x <- getLine
putStrLn ("Howdy " ++ x)
printLen : IO ()
printLen = getLine >>= putStr . show . length
printLonger : IO ()
printLonger = do
putStr "First string: "
a <- getLine
putStr "2nd string: "
b <- getLine
print $ if length a > length b then length a else length b
pL : IO ()
pL = putStr "1st: " >>= \_ => getLine >>= \a => putStr "2nd: " >>= \_ => getLine >>= \b => print $ if length a > length b then length a else length b
|
function [MATreordered,MATindices,MATcost] = reorderMAT(MAT,H,cost)
%REORDERMAT Reorder matrix for visualization
%
% [MATreordered,MATindices,MATcost] = reorderMAT(MAT,H,cost);
%
% This function reorders the connectivity matrix in order to place more
% edges closer to the diagonal. This often helps in displaying community
% structure, clusters, etc.
%
% Inputs: MAT, connection matrix
% H, number of reordering attempts
% cost, 'line' or 'circ', for shape of lattice
% (linear or ring lattice)
%
% MATreordered reordered connection matrix
% MATindices reordered indices
% MATcost cost of reordered matrix
%
%
% Olaf Sporns, Indiana University
N = length(MAT);
diagMAT = diag(diag(MAT));
MAT = MAT-diagMAT;
% generate cost function
if strcmp(cost,'line')
profil = fliplr(normpdf(1:N,0,N/2));
end;
if strcmp(cost,'circ')
profil = fliplr(normpdf(1:N,N/2,N/4));
end;
COST = toeplitz(profil,profil);
% initialize lowCOST
lowMATcost = sum(sum(COST.*MAT));
% keep track of starting configuration
MATstart = MAT;
starta = 1:N;
% reorder
for h=1:H
a = 1:N;
% choose two positions at random and flip them
r = randperm(N);
a(r(1)) = r(2);
a(r(2)) = r(1);
MATcostnew = sum(sum(MAT(a,a).*COST));
if (MATcostnew < lowMATcost)
MAT = MAT(a,a);
r2 = starta(r(2));
r1 = starta(r(1));
starta(r(1)) = r2;
starta(r(2)) = r1;
lowMATcost = MATcostnew;
end;
end; % h
MATreordered = MATstart(starta,starta) + diagMAT(starta,starta);
MATindices = starta;
MATcost = lowMATcost;
|
section {*I\_kparser\_HF*}
theory
I_kparser_HF
imports
I_kparser_base
begin
record ('stack, 'event) parserHF_conf =
parserHF_conf_fixed :: "'event list"
parserHF_conf_history :: "'event list"
parserHF_conf_stack :: "'stack list"
definition parserHF_configurations :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf set"
where
"parserHF_configurations G \<equiv>
{\<lparr>parserHF_conf_fixed = f,
parserHF_conf_history=h,
parserHF_conf_stack = l\<rparr>
| f h l.
set l \<subseteq> parser_nonterms G
\<and> set f \<subseteq> parser_events G
\<and> set h \<subseteq> parser_events G
\<and> parser_bottom G \<notin> set h
\<and> suffix h (butlast_if_match f (parser_bottom G))
\<and> (parser_bottom G \<notin> set f
\<or> (\<exists>w. f = w @ [parser_bottom G]
\<and> parser_bottom G \<notin> set w))}"
definition parserHF_configurations_ALT :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf set"
where
"parserHF_configurations_ALT G \<equiv>
{\<lparr>parserHF_conf_fixed = f,
parserHF_conf_history=h,
parserHF_conf_stack = l\<rparr>
| f h l.
set l \<subseteq> parser_nonterms G
\<and> set h \<subseteq> parser_events G - {parser_bottom G}
\<and> suffix h (butlast_if_match f (parser_bottom G))
\<and> f \<in> parser_fixed_schedulers G}"
lemma parserHF_configurations_ALT_vs_parserHF_configurations: "
valid_parser G
\<Longrightarrow> parserHF_configurations_ALT G = parserHF_configurations G"
apply(simp add: parserHF_configurations_ALT_def parserHF_configurations_def)
apply(rule antisym)
prefer 2
apply(clarsimp)
apply(rename_tac f h l)(*strict*)
apply(case_tac "parser_bottom G \<in> set f")
apply(rename_tac f h l)(*strict*)
apply(simp add: parser_schedulers_def)
apply(clarsimp)
apply(rename_tac h l w)(*strict*)
apply(rule conjI)
apply(rename_tac h l w)(*strict*)
apply(force)
apply(rename_tac h l w)(*strict*)
apply(simp add: parser_fixed_schedulers_def prefix_closure_def)
apply(rule_tac x="w @ [parser_bottom G]" in exI)
apply(rule conjI)
apply(rename_tac h l w)(*strict*)
prefer 2
apply(simp add: prefix_def)
apply(rename_tac h l w)(*strict*)
apply(simp add: parser_schedulers_def)
apply(rename_tac f h l)(*strict*)
apply(rule conjI)
apply(rename_tac f h l)(*strict*)
apply(force)
apply(rename_tac f h l)(*strict*)
apply(simp add: parser_fixed_schedulers_def)
apply(simp add: prefix_closure_def parser_schedulers_def)
apply(rule_tac
x="f@ [parser_bottom G]"
in exI)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac f h l)(*strict*)
apply(simp add: parser_fixed_schedulers_def)
apply(simp add: prefix_closure_def parser_schedulers_def)
apply(clarsimp)
apply(rename_tac f h l va)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac f h l va c)(*strict*)
apply(rule conjI)
apply(rename_tac f h l va c)(*strict*)
apply(simp add: valid_parser_def)
apply(clarsimp)
apply(rename_tac f h l va c x)(*strict*)
apply(subgoal_tac "x\<in> set(va @ [parser_bottom G])")
apply(rename_tac f h l va c x)(*strict*)
prefer 2
apply(rule_tac
A="set f"
in set_mp)
apply(rename_tac f h l va c x)(*strict*)
apply(rule_tac
t="va @ [parser_bottom G]"
and s="f @ c"
in ssubst)
apply(rename_tac f h l va c x)(*strict*)
apply(force)
apply(rename_tac f h l va c x)(*strict*)
apply(simp (no_asm))
apply(rename_tac f h l va c x)(*strict*)
apply(force)
apply(rename_tac f h l va c x)(*strict*)
apply(force)
apply(rename_tac f h l va c)(*strict*)
apply(rule conjI)
apply(rename_tac f h l va c)(*strict*)
apply(force)
apply(rename_tac f h l va c)(*strict*)
apply(rule conjI)
apply(rename_tac f h l va c)(*strict*)
apply(force)
apply(rename_tac f h l va c)(*strict*)
apply(clarsimp)
apply(rule_tac
xs="c"
in rev_cases)
apply(rename_tac f h l va c)(*strict*)
apply(clarsimp)
apply(rename_tac f h l va c ys y)(*strict*)
apply(clarsimp)
done
definition parserHF_step_relation :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation G c1 p c2 \<equiv>
p \<in> parser_rules G \<and> ((\<exists>x. parserHF_conf_stack c1=x@(rule_lpop p) \<and> parserHF_conf_stack c2=x@(rule_lpush p)) \<and> (parserHF_conf_history c2=parserHF_conf_history c1@(drop (length(parserHF_conf_fixed c1)) (butlast_if_match (rule_rpop p) (parser_bottom G))) \<and> parserHF_conf_fixed c2 = (rule_rpush p) @ (drop (length (rule_rpop p)) (parserHF_conf_fixed c1)) \<and> (prefix (rule_rpop p) (parserHF_conf_fixed c1) \<or> prefix (parserHF_conf_fixed c1) (rule_rpop p))))"
definition parserHF_step_relation_ALT :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT G c1 e c2 \<equiv>
e \<in> parser_rules G
\<and> (\<exists>x. parserHF_conf_stack c1 = x @ rule_lpop e
\<and> parserHF_conf_stack c2 = x @ rule_lpush e)
\<and> parserHF_conf_history c2
= parserHF_conf_history c1
@ drop
(length(parserHF_conf_fixed c1))
(butlast_if_match (rule_rpop e) (parser_bottom G))
\<and> parserHF_conf_fixed c2
= rule_rpush e
@ drop
(length (rule_rpop e))
(parserHF_conf_fixed c1)
\<and> (prefix (rule_rpop e) (parserHF_conf_fixed c1)
\<or> prefix (parserHF_conf_fixed c1) (rule_rpop e))"
lemma parserHF_step_relation_ALT_vs_parserHF_step_relation: "
parserHF_step_relation_ALT M c1 p c2 = parserHF_step_relation M c1 p c2"
apply(simp add: parserHF_step_relation_ALT_def parserHF_step_relation_def)
done
(*1a 1b 1c not fixed before AND not fixed afterwards*)
definition parserHF_step_relation_ALT2_1a :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_1a G c1 e c2 \<equiv>
(\<exists>v1 v2 v3 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1, parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2 @ v3, rule_lpush = s2 @ [q2], rule_rpush = v3\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v3, parserHF_conf_history = h @ v2 @ v3, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2 @ v3))"
definition parserHF_step_relation_ALT2_1b :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_1b G c1 e c2 \<equiv>
(\<exists>v1 v2 v3 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1 @ v2, parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2 @ v3, rule_lpush = s2 @ [q2], rule_rpush = v2 @ v3\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v2 @ v3, parserHF_conf_history = h @ v3, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2 @ v3))"
definition parserHF_step_relation_ALT2_1c :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_1c G c1 e c2 \<equiv>
(\<exists>v1 v2 v3 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1 @ v2 @ v3, parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2, rule_lpush = s2 @ [q2], rule_rpush = v2\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v2 @ v3, parserHF_conf_history = h, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2 @ v3))"
(*2a 2b not fixed before but fixed afterwards*)
definition parserHF_step_relation_ALT2_2a :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_2a G c1 e c2 \<equiv>
(\<exists>v1 v2 v3 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1, parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2 @ v3 @ [parser_bottom G], rule_lpush = s2 @ [q2], rule_rpush = v3 @ [parser_bottom G]\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v3 @ [parser_bottom G], parserHF_conf_history = h @ v2 @ v3, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2 @ v3))"
definition parserHF_step_relation_ALT2_2b :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_2b G c1 e c2 \<equiv>
(\<exists>v1 v2 v3 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1 @ v2, parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2 @ v3 @ [parser_bottom G], rule_lpush = s2 @ [q2], rule_rpush = v2 @ v3 @ [parser_bottom G]\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v2 @ v3 @ [parser_bottom G], parserHF_conf_history = h @ v3, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2 @ v3))"
(*3a 3b fixed before*)
definition parserHF_step_relation_ALT2_3a :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_3a G c1 e c2 \<equiv>
(\<exists>v1 v2 v3 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1 @ v2 @ v3 @ [parser_bottom G], parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2, rule_lpush = s2 @ [q2], rule_rpush = v2\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v2 @ v3 @ [parser_bottom G], parserHF_conf_history = h, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2 @ v3))"
definition parserHF_step_relation_ALT2_3b :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2_3b G c1 e c2 \<equiv>
(\<exists>v1 v2 h s s1 q1 s2 q2.
c1 = \<lparr>parserHF_conf_fixed = v1 @ v2 @ [parser_bottom G], parserHF_conf_history = h, parserHF_conf_stack = s @ s1 @ [q1]\<rparr>
\<and> e = \<lparr>rule_lpop = s1 @ [q1], rule_rpop = v1 @ v2 @ [parser_bottom G], rule_lpush = s2 @ [q2], rule_rpush = v2 @ [parser_bottom G]\<rparr>
\<and> c2 = \<lparr>parserHF_conf_fixed = v2 @ [parser_bottom G], parserHF_conf_history = h, parserHF_conf_stack = s @ s2 @ [q2]\<rparr>
\<and> parser_bottom G \<notin> set (v1 @ v2))"
definition parserHF_step_relation_ALT2 :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> ('stack, 'event) parser_step_label
\<Rightarrow> ('stack, 'event) parserHF_conf
\<Rightarrow> bool"
where
"parserHF_step_relation_ALT2 G c1 e c2 \<equiv>
e \<in> parser_rules G
\<and> (
parserHF_step_relation_ALT2_1a G c1 e c2
\<or> parserHF_step_relation_ALT2_1b G c1 e c2
\<or> parserHF_step_relation_ALT2_1c G c1 e c2
\<or> parserHF_step_relation_ALT2_2a G c1 e c2
\<or> parserHF_step_relation_ALT2_2b G c1 e c2
\<or> parserHF_step_relation_ALT2_3a G c1 e c2
\<or> parserHF_step_relation_ALT2_3b G c1 e c2
)"
lemma parserHF_step_relation_ALT2_1a_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_1a G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2_2b_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_2b G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2_1b_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_1b G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2_1c_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_1c G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2_2a_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_2a G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2_3b_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_3b G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2_3a_intro: "
e \<in> parser_rules G \<Longrightarrow> parserHF_step_relation_ALT2_3a G c1 e c2 \<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2"
apply(simp add: parserHF_step_relation_ALT2_def)
done
lemma parserHF_step_relation_ALT2__vs__parserHF_step_relation_ALT: "
valid_parser G
\<Longrightarrow> c1 \<in> parserHF_configurations G
\<Longrightarrow> parserHF_step_relation_ALT2 G c1 e c2 = parserHF_step_relation_ALT G c1 e c2"
apply(rule antisym)
apply(simp add: parserHF_step_relation_ALT2_def parserHF_step_relation_ALT_def)
apply(clarsimp)
apply(erule disjE)
apply(simp add: parserHF_step_relation_ALT2_1a_def)
apply(clarsimp)
apply (metis append_Nil2 butlast_if_match_direct2_prime butlast_if_match_pull_out drop_butlast_if_match_distrib prefix_append)
apply(erule disjE)
apply(simp add: parserHF_step_relation_ALT2_1b_def)
apply(clarsimp)
apply (metis append_assoc butlast_if_match_direct2_prime drop_butlast_if_match_distrib length_append prefix_append)
apply(erule disjE)
apply(simp add: parserHF_step_relation_ALT2_1c_def)
apply(clarsimp)
apply (metis append_assoc butlast_if_match_direct2_prime drop_length_append length_append not_set_append prefix_append)
apply(erule disjE)
apply(simp add: parserHF_step_relation_ALT2_2a_def)
apply(clarsimp)
apply (metis append_assoc butlast_if_match_direct drop_butlast_if_match_distrib length_append prefix_append)
apply(erule disjE)
apply(simp add: parserHF_step_relation_ALT2_2b_def)
apply(clarsimp)
apply (metis append_assoc butlast_if_match_direct drop_butlast_if_match_distrib length_append prefix_append)
apply(erule disjE)
apply(simp add: parserHF_step_relation_ALT2_3a_def)
apply(clarsimp)
apply(rule conjI)
apply (metis append_assoc append_self_conv butlast_if_match_direct2_prime butlast_if_match_pull_out drop_length_append le_SucI length_append)
apply (metis append_assoc prefix_def)
apply(simp add: parserHF_step_relation_ALT2_3b_def)
apply(clarsimp)
apply(rule conjI)
apply (metis add_Suc_right butlast_if_match_length_le length_Suc length_append)
apply(simp add: prefix_def)
apply(clarsimp)
apply(simp add: parserHF_step_relation_ALT_def)
apply(clarsimp)
apply(subgoal_tac "valid_parser_step_label G e")
prefer 2
apply (metis valid_parser_def)
apply(case_tac c1)
apply(rename_tac f1 h1 s1)
apply(case_tac c2)
apply(rename_tac f2 h2 s2)
apply(case_tac e)
apply(rename_tac lpop rpop lpush rpush)
apply(clarsimp)
apply(erule disjE)
apply(simp add: prefix_def)
apply(clarsimp)
apply(subgoal_tac "drop (length rpop + length c) (butlast_if_match rpop (parser_bottom G)) = []")
prefer 2
apply (metis drop_entire_butlast_if_match drop_length_append length_append)
apply(clarsimp)
apply(thin_tac "length (butlast_if_match rpop (parser_bottom G)) \<le> length rpop + length c")
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rule_tac xs="lpop" in rev_cases)
apply(force)
apply(clarsimp)
apply(rule_tac xs="lpush" in rev_cases)
apply(force)
apply(clarsimp)
apply(case_tac "\<exists>w'. kPrefix k (w @ [parser_bottom G]) @ c = w'@[parser_bottom G]")
prefer 2
apply(clarsimp)
apply(case_tac "k \<le> length w")
prefer 2
apply(simp add: parserHF_configurations_def)
apply(simp add: kPrefix_def)
apply(simp add: kPrefix_def)
apply(rule parserHF_step_relation_ALT2_1c_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_1c_def)
apply(rule_tac x="xb" in exI)
apply(clarsimp)
apply(rule conjI)
apply (metis append_same_eq append_take_drop_id drop_prefix_closureise set_take_subset subsetD triv_compl)
apply(rule conjI)
apply (metis (no_types, hide_lams) Diff_iff Un_iff all_not_in_conv empty_subsetI insert_subset set_append set_take_subset subsetD)
apply(simp add: parserHF_configurations_def)
apply(clarsimp)
apply(rule_tac xs="c" in rev_cases)
prefer 2
apply(clarsimp)
apply(rule parserHF_step_relation_ALT2_3a_intro)
apply(force)
apply(rule_tac t="kPrefix k (w @ [parser_bottom G])" and s="xb @ rpush" in ssubst)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_3a_def)
apply(rule_tac x="xb" in exI)
apply(clarsimp)
apply(case_tac "k \<le> length w")
apply(simp add: parserHF_configurations_def)
apply(simp add: kPrefix_def)
apply (metis Un_iff set_append)
apply(simp add: kPrefix_def)
apply(clarsimp)
apply(simp add: parserHF_configurations_def)
apply(clarsimp)
apply(case_tac "k \<le> length w")
apply(simp add: kPrefix_def)
apply (metis Un_iff insertCI set_append set_simps(2) set_take_subset subset_trans triv_compl)
apply(simp add: kPrefix_def)
apply(clarsimp)
apply(rule parserHF_step_relation_ALT2_3b_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_3b_def)
apply (metis triv_compl)
apply(simp add: prefix_def)
apply(clarsimp)
apply(subgoal_tac "drop (length f1) (butlast_if_match (f1 @ c) (parser_bottom G)) = butlast_if_match c (parser_bottom G)")
prefer 2
apply (metis drop_butlast_if_match_distrib)
apply(clarsimp)
apply(thin_tac "drop (length f1) (butlast_if_match (f1 @ c) (parser_bottom G)) =
butlast_if_match c (parser_bottom G)")
apply(case_tac "\<exists>c'. c = c' @[parser_bottom G]")
apply(clarsimp)
apply(subgoal_tac "butlast_if_match (c' @ [parser_bottom G]) (parser_bottom G) = c'")
prefer 2
apply (metis butlast_if_match_direct)
apply(clarsimp)
apply(thin_tac "butlast_if_match (c' @ [parser_bottom G]) (parser_bottom G) = c'")
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rule_tac xs="lpop" in rev_cases)
apply(force)
apply(clarsimp)
apply(rule_tac xs="lpush" in rev_cases)
apply(force)
apply(clarsimp)
apply(subgoal_tac "f1@c'=xb@xc")
prefer 2
apply (metis append1_eq_conv append_assoc)
apply(subgoal_tac "prefix f1 xb \<or> prefix xb f1")
prefer 2
apply (metis mutual_prefix_prefix)
apply(erule disjE)
apply(simp add: prefix_def)
apply(clarsimp)
apply(case_tac "k \<le> length w")
apply(simp add: kPrefix_def)
apply(subgoal_tac "parser_bottom G \<in> set w")
apply(force)
apply(rule_tac A="set (take k w)" in set_mp)
apply(rule set_take_subset)
apply(rule_tac A="set (f1 @ c @ xc @ [parser_bottom G])" in set_mp)
apply(force)
apply(simp (no_asm))
apply(simp add: kPrefix_def)
apply(clarsimp)
apply(rule parserHF_step_relation_ALT2_2a_intro)
apply(force)
apply(rename_tac s v1 h k v3 s1 q1 s2 q2 v2)
apply(simp add: parserHF_step_relation_ALT2_2a_def)
apply(force)
apply(simp add: prefix_def)
apply(clarsimp)
apply(case_tac "k \<le> length w")
apply(simp add: kPrefix_def)
apply(subgoal_tac "parser_bottom G \<in> set w")
apply(force)
apply(rule_tac A="set (take k w)" in set_mp)
apply(rule set_take_subset)
apply(rule_tac A="set (xb @ c @ c' @ [parser_bottom G])" in set_mp)
apply(force)
apply(simp (no_asm))
apply(simp add: kPrefix_def)
apply(clarsimp)
apply(rename_tac s h v3 k v1 s1 q1 s2 q2 v2)
apply(rule parserHF_step_relation_ALT2_2b_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_2b_def)
apply (metis triv_compl)
apply(clarsimp)
apply(rule_tac xs="c" in rev_cases)
apply(clarsimp)
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rule_tac xs="lpop" in rev_cases)
apply(force)
apply(clarsimp)
apply(rule_tac xs="lpush" in rev_cases)
apply(force)
apply(clarsimp)
apply(simp add: butlast_if_match_def)
apply(case_tac "k \<le> length w")
apply(simp add: kPrefix_def)
apply(subgoal_tac "\<exists>w'. w =xb@rpush @w' \<and> length (xb@rpush) = k")
prefer 2
apply(rule_tac x="drop k w" in exI)
apply(rule conjI)
apply (metis append_assoc append_take_drop_id)
apply (metis take_all_length)
apply(rule_tac t="take k w" and s="xb@rpush" in ssubst)
apply(force)
apply(thin_tac "xb @ rpush = take k w")
apply(clarsimp)
apply(rule parserHF_step_relation_ALT2_1c_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_1c_def)
apply (metis triv_compl)
apply(simp add: kPrefix_def)
apply(clarsimp)
apply(rename_tac s h k v1 s1 q1 s2 q2 v2)
apply(rule parserHF_step_relation_ALT2_3b_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_3b_def)
apply (metis triv_compl)
apply(clarsimp)
apply(subgoal_tac "butlast_if_match (ys @ [y]) (parser_bottom G) =ys@[y]")
prefer 2
apply (metis butlast_if_match_direct2)
apply(clarsimp)
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(thin_tac "butlast_if_match (ys @ [y]) (parser_bottom G) = ys @ [y]")
apply(case_tac "k \<le> length w")
prefer 2
apply(simp add: kPrefix_def)
apply(simp add: kPrefix_def)
apply(subgoal_tac "\<exists>w'. w =f1 @ ys @ [y] @w' \<and> length (f1 @ ys @ [y]) = k")
prefer 2
apply(rule_tac x="drop k w" in exI)
apply(rule conjI)
apply (metis append_assoc append_take_drop_id)
apply (metis take_all_length)
apply(rule_tac t="take k w" and s="f1 @ ys @ [y]" in ssubst)
apply(force)
apply(thin_tac "f1 @ ys @ [y] = take k w")
apply(clarsimp)
apply(rule_tac xs="lpop" in rev_cases)
apply(force)
apply(clarsimp)
apply(rule_tac xs="lpush" in rev_cases)
apply(force)
apply(clarsimp)
apply(subgoal_tac "prefix f1 xb \<or> prefix xb f1")
prefer 2
apply (metis mutual_prefix_prefix)
apply(erule disjE)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rule_tac xs="rpush" in rev_cases)
apply(clarsimp)
apply(rule parserHF_step_relation_ALT2_1a_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_1a_def)
apply(force)
apply(clarsimp)
apply(rule parserHF_step_relation_ALT2_1a_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_1a_def)
apply(force)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac s h v3a v3b v1 w' s1 q1 s2 q2 v2)
apply(rule parserHF_step_relation_ALT2_1b_intro)
apply(force)
apply(simp add: parserHF_step_relation_ALT2_1b_def)
apply(force)
done
definition parserHF_initial_configurations :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf set"
where
"parserHF_initial_configurations G \<equiv>
{c. parserHF_conf_history c = []
\<and> parserHF_conf_fixed c = []
\<and> parserHF_conf_stack c = [parser_initial G]}
\<inter> parserHF_configurations G"
definition parserHF_marking_configurations :: "
('stack, 'event, 'marker) parser
\<Rightarrow> ('stack, 'event) parserHF_conf set"
where
"parserHF_marking_configurations G \<equiv>
{c. \<exists>f \<in> parser_marking G. \<exists>w.
parserHF_conf_stack c = w @ [f]
\<and> parserHF_conf_fixed c \<in> {[], [parser_bottom G]}}
\<inter> parserHF_configurations G"
definition parserHF_marking_condition :: "
('stack, 'event, 'marker) parser
\<Rightarrow> (('stack, 'event) parser_step_label, ('stack, 'event) parserHF_conf)derivation
\<Rightarrow> bool"
where
"parserHF_marking_condition G d \<equiv>
\<exists>i e c.
d i = Some (pair e c)
\<and> c \<in> parserHF_marking_configurations G
\<and> (\<forall>j e' c'.
j > i
\<and> d j = Some(pair e' c')
\<longrightarrow> parserHF_conf_history c = parserHF_conf_history c')"
definition parserHF_marked_effect :: "
('stack, 'event, 'marker) parser
\<Rightarrow> (('stack, 'event) parser_step_label, ('stack, 'event) parserHF_conf)derivation
\<Rightarrow> 'event list set"
where
"parserHF_marked_effect G d \<equiv>
{w. \<exists>i e c.
d i = Some (pair e c)
\<and> w = parserHF_conf_history c
\<and> c \<in> parserHF_marking_configurations G}"
definition parserHF_unmarked_effect :: "
('stack, 'event, 'marker) parser
\<Rightarrow> (('stack, 'event) parser_step_label, ('stack, 'event) parserHF_conf)derivation
\<Rightarrow> 'event list set"
where
"parserHF_unmarked_effect G d \<equiv>
{w. \<exists>i e c.
d i = Some (pair e c)
\<and> parserHF_conf_history c = w}"
definition parserHF_get_destinations :: "
('stack, 'event, 'marker) parser
\<Rightarrow> (('stack, 'event) parser_step_label, ('stack, 'event) parserHF_conf)derivation_configuration
\<Rightarrow> ('stack, 'event) parser_destinations set"
where
"parserHF_get_destinations G der_conf \<equiv>
case der_conf of pair e c \<Rightarrow>
state ` set (parserHF_conf_stack c)
\<union> (case e of None \<Rightarrow> {} | Some e' \<Rightarrow> {rule e'})"
lemma parserHF_inst_AX_initial_configuration_belongs: "
(\<forall>G. valid_parser G \<longrightarrow> parserHF_initial_configurations G \<subseteq> parserHF_configurations G)"
apply(clarsimp)
apply(rename_tac G x)(*strict*)
apply(simp add: parserHF_initial_configurations_def)
done
lemma parserHF_inst_AX_step_relation_preserves_belongs: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c1 e c2. parserHF_step_relation G c1 e c2 \<longrightarrow> c1 \<in> parserHF_configurations G \<longrightarrow> e \<in> parser_step_labels G \<and> c2 \<in> parserHF_configurations G))"
apply(clarsimp)
apply(rename_tac G c1 e c2)(*strict*)
apply(rule context_conjI)
apply(rename_tac G c1 e c2)(*strict*)
apply(simp add: parser_step_labels_def parserHF_step_relation_def)
apply(rename_tac G c1 e c2)(*strict*)
apply(simp add: parserHF_configurations_def)
apply(clarsimp)
apply(rename_tac G e c2 f h l)(*strict*)
apply(simp add: parserHF_step_relation_def)
apply(clarsimp)
apply(rename_tac G e c2 f h x)(*strict*)
apply(case_tac c2)
apply(rename_tac G e c2 f h x parserHF_conf_fixeda parserHF_conf_historya parserHF_conf_stacka)(*strict*)
apply(clarsimp)
apply(rename_tac G e f h x)(*strict*)
apply(simp add: parser_step_labels_def)
apply(subgoal_tac "valid_parser_step_label G e")
apply(rename_tac G e f h x)(*strict*)
prefer 2
apply(simp add: valid_parser_def)
apply(rename_tac G e f h x)(*strict*)
apply(rule context_conjI)
apply(rename_tac G e f h x)(*strict*)
apply(simp add: valid_parser_step_label_def)
apply(rename_tac G e f h x)(*strict*)
apply(subgoal_tac "set (rule_rpush e) \<subseteq> parser_events G")
apply(rename_tac G e f h x)(*strict*)
prefer 2
apply(simp add: valid_parser_step_label_def)
apply(rename_tac G e f h x)(*strict*)
apply(clarsimp)
apply(rule context_conjI)
apply(rename_tac G e f h x)(*strict*)
apply(metis set_drop_subset subset_trans)
apply(rename_tac G e f h x)(*strict*)
apply(subgoal_tac "set (rule_rpop e) \<subseteq> parser_events G")
apply(rename_tac G e f h x)(*strict*)
prefer 2
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rename_tac G e f h x xa k w xc)(*strict*)
apply(rule_tac
A="set (kPrefix k (w @ [parser_bottom G]))"
in set_mp)
apply(rename_tac G e f h x xa k w xc)(*strict*)
prefer 2
apply(force)
apply(rename_tac G e f h x xa k w xc)(*strict*)
apply(rule set_kPrefix_subset)
apply(clarsimp)
apply(simp add: valid_parser_def)
apply(rule_tac
B="parser_events G - {parser_bottom G}"
in subset_trans)
apply(rename_tac G e f h x xa k w xc)(*strict*)
apply(force)
apply(rename_tac G e f h x xa k w xc)(*strict*)
apply(force)
apply(rename_tac G e f h x)(*strict*)
apply(subgoal_tac "set (drop (length f) (butlast_if_match (rule_rpop e) (parser_bottom G))) \<subseteq> set (rule_rpop e)")
apply(rename_tac G e f h x)(*strict*)
prefer 2
apply(rule_tac
B="set(butlast_if_match (rule_rpop e) (parser_bottom G))"
in subset_trans)
apply(rename_tac G e f h x)(*strict*)
apply(rule set_drop_subset)
apply(rename_tac G e f h x)(*strict*)
apply(rule_tac
B="set(rule_rpop e)"
in subset_trans)
apply(rename_tac G e f h x)(*strict*)
apply(rule set_butlast_if_match_is_subset)
apply(rename_tac G e f h x)(*strict*)
apply(force)
apply(rename_tac G e f h x)(*strict*)
apply(rule context_conjI)
apply(rename_tac G e f h x)(*strict*)
apply(force)
apply(rename_tac G e f h x)(*strict*)
apply(rule context_conjI)
apply(rename_tac G e f h x)(*strict*)
apply(simp add: valid_parser_step_label_def)
apply(case_tac "(\<exists>x. x @ rule_rpush e = rule_rpop e)")
apply(rename_tac G e f h x)(*strict*)
apply(clarsimp)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(simp add: kPrefix_def)
apply(clarsimp)
apply(case_tac "k-length w")
apply(rename_tac G e f h x xa k w)(*strict*)
apply(clarsimp)
apply(subgoal_tac "butlast_if_match (take k w) (parser_bottom G) = take k w")
apply(rename_tac G e f h x xa k w)(*strict*)
apply(clarsimp)
apply(subgoal_tac "parser_bottom G \<notin> set (drop (length f) (take k w))")
apply(rename_tac G e f h x xa k w)(*strict*)
apply(force)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule_tac
B="set(take k w)"
in nset_mp)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule set_drop_subset)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule_tac
B="set w"
in nset_mp)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule set_take_subset)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule_tac
A="parser_events G"
in not_in_diff)
apply(force)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(rule_tac
B="set w"
in nset_mp)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule set_take_subset)
apply(rename_tac G e f h x xa k w)(*strict*)
apply(rule_tac
A="parser_events G"
in not_in_diff)
apply(force)
apply(rename_tac G e f h x xa k w nat)(*strict*)
apply(clarsimp)
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(subgoal_tac "butlast_if_match (w @ [parser_bottom G]) (parser_bottom G) = w")
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(clarsimp)
apply(subgoal_tac "parser_bottom G \<notin> set (drop (length f) w)")
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(force)
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(rule_tac
B="set w"
in nset_mp)
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(rule set_drop_subset)
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(rule_tac
A="parser_events G"
in not_in_diff)
apply(force)
apply(rename_tac G e f h x xa k w nat xb)(*strict*)
apply(rule butlast_if_match_direct)
apply(force)
apply(rename_tac G e f h x)(*strict*)
apply(clarsimp)
apply(rename_tac G e f h x)(*strict*)
apply(rule propSym)
apply(subgoal_tac "\<exists>w. rule_rpop e = w @ (rule_rpush e)")
apply(rename_tac G e f h x)(*strict*)
prefer 2
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rename_tac G e f h x k w xb)(*strict*)
apply(simp add: kPrefix_def)
apply(rule_tac
x="xb"
in exI)
apply(force)
apply(rename_tac G e f h x)(*strict*)
apply(clarsimp)
apply(rename_tac G e f h x w)(*strict*)
apply(rule context_conjI)
apply(rename_tac G e f h x w)(*strict*)
apply(clarsimp)
apply(erule disjE)
apply(rename_tac G e f h x w)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e h x w c wa)(*strict*)
apply(subgoal_tac "prefix w wa \<or> prefix wa w")
apply(rename_tac G e h x w c wa)(*strict*)
apply(erule disjE)
apply(rename_tac G e h x w c wa)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e h x w c wa)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e h x c wa ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e h x c wa ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e h x c wa ca a list)(*strict*)
apply(clarsimp)
apply(rename_tac G e h x w c wa)(*strict*)
apply(rule mutual_prefix_prefix)
apply(force)
apply(rename_tac G e f h x w)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e f h x w c)(*strict*)
apply(subgoal_tac "drop (length w + length (rule_rpush e)) f = []")
apply(rename_tac G e f h x w c)(*strict*)
apply(clarsimp)
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rename_tac G e f h x w c k wa)(*strict*)
apply(case_tac e)
apply(rename_tac G e f h x w c k wa rule_lpopa rule_rpopa rule_lpusha rule_rpusha)(*strict*)
apply(clarsimp)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(simp add: kPrefix_def)
apply(case_tac "k-length wa")
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(clarsimp)
apply(subgoal_tac "parser_bottom G \<notin> set rule_rpush")
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(force)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(rule_tac
B="set(w @ rule_rpush)"
in nset_mp)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(simp (no_asm))
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(rule_tac
B="set(take k wa)"
in nset_mp)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(simp (no_asm_simp))
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(rule_tac
B="set wa"
in nset_mp)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(rule set_take_subset)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush)(*strict*)
apply(rule_tac
A="parser_events G"
in not_in_diff)
apply(force)
apply(rename_tac G f h x w c k wa rule_lpop rule_lpush rule_rpush nat)(*strict*)
apply(clarsimp)
apply(rename_tac G f h x w c k rule_lpop rule_lpush nat xa)(*strict*)
apply(case_tac c)
apply(rename_tac G f h x w c k rule_lpop rule_lpush nat xa)(*strict*)
apply(clarsimp)
apply(rename_tac G f h x w c k rule_lpop rule_lpush nat xa a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. c = w' @ [x']")
apply(rename_tac G f h x w c k rule_lpop rule_lpush nat xa a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G f h x w c k rule_lpop rule_lpush nat xa a list)(*strict*)
apply(thin_tac "c=a#list")
apply(clarsimp)
apply(rename_tac G f h x w k rule_lpop rule_lpush nat xa w')(*strict*)
apply(subgoal_tac "parser_bottom G \<notin> set xa")
apply(rename_tac G f h x w k rule_lpop rule_lpush nat xa w')(*strict*)
apply(force)
apply(rename_tac G f h x w k rule_lpop rule_lpush nat xa w')(*strict*)
apply(rule_tac
A="parser_events G"
in not_in_diff)
apply(force)
apply(rename_tac G e f h x w c)(*strict*)
apply(rule drop_all)
apply(rule_tac
j="length(w@rule_rpush e)"
in le_trans)
apply(rename_tac G e f h x w c)(*strict*)
apply(rule_tac
t="w@rule_rpush e"
and s="f@c"
in ssubst)
apply(rename_tac G e f h x w c)(*strict*)
apply(force)
apply(rename_tac G e f h x w c)(*strict*)
apply(simp (no_asm))
apply(rename_tac G e f h x w c)(*strict*)
apply(force)
apply(rename_tac G e f h x w)(*strict*)
apply(rule context_conjI)
apply(rename_tac G e f h x w)(*strict*)
apply(clarsimp)
apply(subgoal_tac "parser_bottom G \<in> set f")
apply(rename_tac G e f h x w)(*strict*)
apply(clarsimp)
apply(rename_tac G e h x w wa)(*strict*)
apply(case_tac "length w + length (rule_rpush e) - length wa")
apply(rename_tac G e h x w wa)(*strict*)
apply(clarsimp)
apply(subgoal_tac "parser_bottom G \<in> set wa")
apply(rename_tac G e h x w wa)(*strict*)
apply(force)
apply(rename_tac G e h x w wa)(*strict*)
apply(rule_tac
A="set (drop (length w + length (rule_rpush e)) wa)"
in set_mp)
apply(rename_tac G e h x w wa)(*strict*)
apply(rule set_drop_subset)
apply(rename_tac G e h x w wa)(*strict*)
apply(force)
apply(rename_tac G e h x w wa nat)(*strict*)
apply(clarsimp)
apply(rename_tac G e f h x w)(*strict*)
apply(rule_tac
A="set (drop (length w + length (rule_rpush e)) f)"
in set_mp)
apply(rename_tac G e f h x w)(*strict*)
apply(rule set_drop_subset)
apply(rename_tac G e f h x w)(*strict*)
apply(force)
apply(rename_tac G e f h x w)(*strict*)
apply(simp add: suffix_def)
apply(clarsimp)
apply(rename_tac G e f x w c)(*strict*)
apply(case_tac "parser_bottom G \<in> set f")
apply(rename_tac G e f x w c)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa)(*strict*)
apply(rule_tac
t="butlast_if_match (wa @ [parser_bottom G]) (parser_bottom G)"
and s="wa"
in ssubst)
apply(rename_tac G e x w c wa)(*strict*)
apply(rule butlast_if_match_direct)
apply(force)
apply(rename_tac G e x w c wa)(*strict*)
apply(case_tac "length w + length (rule_rpush e) - length wa")
apply(rename_tac G e x w c wa)(*strict*)
apply(clarsimp)
apply(rule_tac
t="butlast_if_match (rule_rpush e @ drop (length w + length (rule_rpush e)) wa @ [parser_bottom G]) (parser_bottom G)"
and s="rule_rpush e @ drop (length w + length (rule_rpush e)) wa"
in ssubst)
apply(rename_tac G e x w c wa)(*strict*)
apply(rule butlast_if_match_direct)
apply(force)
apply(rename_tac G e x w c wa)(*strict*)
apply(erule disjE)
apply(rename_tac G e x w c wa)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(rule_tac
t="drop (Suc (length wa)) (butlast_if_match (w @ rule_rpush e) (parser_bottom G))"
and s="[]"
in ssubst)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(rule drop_all)
apply(rule_tac
j="length(w@rule_rpush e)"
in le_trans)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(rule butlast_if_match_length_le)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(force)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(clarsimp)
apply(case_tac ca)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa)(*strict*)
apply(rule_tac
t="drop (length w + length (rule_rpush e)) wa"
and s="[]"
in ssubst)
apply(rename_tac G e x w c wa)(*strict*)
apply(rule drop_all)
apply(rule_tac
j="length(w@rule_rpush e)"
in le_trans)
apply(rename_tac G e x w c wa)(*strict*)
apply(force)
apply(rename_tac G e x w c wa)(*strict*)
apply(simp (no_asm))
apply(rename_tac G e x w c wa)(*strict*)
apply(clarsimp)
apply(case_tac "rule_rpush e")
apply(rename_tac G e x w c wa)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. rule_rpush e = w' @ [x']")
apply(rename_tac G e x w c wa a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa a list)(*strict*)
apply(thin_tac "rule_rpush e=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c wa ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(subgoal_tac "prefix (wa@[parser_bottom G]) w \<or> prefix w (wa@[parser_bottom G])")
apply(rename_tac G e x w c wa ca)(*strict*)
prefer 2
apply(rule mutual_prefix_prefix)
apply(force)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(erule disjE)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa ca)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa ca cb)(*strict*)
apply(case_tac cb)
apply(rename_tac G e x w c wa ca cb)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa ca cb a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. cb = w' @ [x']")
apply(rename_tac G e x w c wa ca cb a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa ca cb a list)(*strict*)
apply(thin_tac "cb=a#list")
apply(clarsimp)
apply(rename_tac G e x w c ca w')(*strict*)
apply(subgoal_tac "parser_bottom G \<in> set (rule_rpush e)")
apply(rename_tac G e x w c ca w')(*strict*)
apply(force)
apply(rename_tac G e x w c ca w')(*strict*)
apply(rule_tac
t="rule_rpush e"
and s="w' @ parser_bottom G # ca"
in ssubst)
apply(rename_tac G e x w c ca w')(*strict*)
apply(force)
apply(rename_tac G e x w c ca w')(*strict*)
apply(simp (no_asm))
apply(rename_tac G e x w c wa nat)(*strict*)
apply(clarsimp)
apply(case_tac "parser_bottom G \<in> set (rule_rpush e)")
apply(rename_tac G e x w c wa nat)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(rule_tac
t="butlast_if_match (wb @ [parser_bottom G]) (parser_bottom G)"
and s="wb"
in ssubst)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(rule butlast_if_match_direct)
apply(force)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(rule_tac
t="butlast_if_match (w @ wb @ [parser_bottom G]) (parser_bottom G)"
and s="w@wb"
in ssubst)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(rule butlast_if_match_direct)
apply(force)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(erule disjE)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e x w c wa nat wb ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c wa nat wb ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa nat wb ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e x w c wa nat wb ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c wa nat wb ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa nat wb ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb w')(*strict*)
apply(subgoal_tac "prefix (wa@[parser_bottom G]) w \<or> prefix w (wa@[parser_bottom G])")
apply(rename_tac G e x w c wa nat wb w')(*strict*)
prefer 2
apply(rule mutual_prefix_prefix)
apply(force)
apply(rename_tac G e x w c wa nat wb w')(*strict*)
apply(erule disjE)
apply(rename_tac G e x w c wa nat wb w')(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb w')(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb w' ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e x w c wa nat wb w' ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat wb w' ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c wa nat wb w' ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa nat wb w' ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa nat)(*strict*)
apply(rule_tac
t="butlast_if_match (rule_rpush e) (parser_bottom G)"
and s="rule_rpush e"
in ssubst)
apply(rename_tac G e x w c wa nat)(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(force)
apply(rename_tac G e x w c wa nat)(*strict*)
apply(case_tac "rule_rpush e")
apply(rename_tac G e x w c wa nat)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. rule_rpush e = w' @ [x']")
apply(rename_tac G e x w c wa nat a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa nat a list)(*strict*)
apply(thin_tac "rule_rpush e=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(rule_tac
t="butlast_if_match (w @ w' @ [x']) (parser_bottom G)"
and s="w @ w' @ [x']"
in ssubst)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(rule butlast_if_match_direct2)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(clarsimp)
apply(subgoal_tac "nat+length wa=length w + length w'")
apply(rename_tac G e x w c wa nat w' x')(*strict*)
prefer 2
apply(force)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(erule disjE)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
prefer 2
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
apply(subgoal_tac "prefix (wa@[parser_bottom G]) w \<or> prefix w (wa@[parser_bottom G])")
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
prefer 2
apply(rule mutual_prefix_prefix)
apply(force)
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
apply(erule disjE)
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x' ca cb)(*strict*)
apply(case_tac cb)
apply(rename_tac G e x w c wa nat w' x' ca cb)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x' ca cb a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. cb = w' @ [x']")
apply(rename_tac G e x w c wa nat w' x' ca cb a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa nat w' x' ca cb a list)(*strict*)
apply(thin_tac "cb=a#list")
apply(clarsimp)
apply(rename_tac G e x w c nat w' x' ca w'a)(*strict*)
apply(case_tac ca)
apply(rename_tac G e x w c nat w' x' ca w'a)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c nat w' x' ca w'a a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c nat w' x' ca w'a a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c nat w' x' ca w'a a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x')(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e x w c wa nat w' x' ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c wa nat w' x' ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c wa nat w' x' ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c wa nat w' x' ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e f x w c)(*strict*)
apply(rule_tac
t="butlast_if_match f (parser_bottom G)"
and s="f"
in ssubst)
apply(rename_tac G e f x w c)(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(force)
apply(rename_tac G e f x w c)(*strict*)
apply(clarsimp)
apply(erule disjE)
apply(rename_tac G e f x w c)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e x w c ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e x w c ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c)(*strict*)
apply(case_tac "rule_rpush e")
apply(rename_tac G e x w c)(*strict*)
apply(clarsimp)
apply(rename_tac G e x c)(*strict*)
apply(rule_tac
t="butlast_if_match [] (parser_bottom G)"
and s="[]"
in ssubst)
apply(rename_tac G e x c)(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(force)
apply(rename_tac G e x c)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. rule_rpush e = w' @ [x']")
apply(rename_tac G e x w c a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c a list)(*strict*)
apply(thin_tac "rule_rpush e=a#list")
apply(clarsimp)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule_tac
t="butlast_if_match (w' @ [x']) (parser_bottom G)"
and s="w'@[x']"
in ssubst)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule butlast_if_match_direct2)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule_tac
t="butlast_if_match (w@w' @ [x']) (parser_bottom G)"
and s="w@w'@[x']"
in ssubst)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule butlast_if_match_direct2)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e x w c ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule_tac
t="butlast_if_match (rule_rpush e @ w' @ [x']) (parser_bottom G)"
and s="(rule_rpush e @ w' @ [x'])"
in ssubst)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule butlast_if_match_direct2)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule_tac
t="drop (Suc (length w + (length (rule_rpush e) + length w'))) (butlast_if_match (w @ rule_rpush e) (parser_bottom G))"
and s="[]"
in ssubst)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule drop_all)
apply(rule_tac
j="length ((w @ rule_rpush e))"
in le_trans)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule butlast_if_match_length_le)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e f x w c)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G e f x w c ca)(*strict*)
apply(rule_tac
t="drop (length w + length (rule_rpush e)) f"
and s="[]"
in ssubst)
apply(rename_tac G e f x w c ca)(*strict*)
apply(rule drop_all)
apply(rule_tac
t="length w + length (rule_rpush e)"
and s="length (w @ rule_rpush e)"
in ssubst)
apply(rename_tac G e f x w c ca)(*strict*)
apply(force)
apply(rename_tac G e f x w c ca)(*strict*)
apply(rule_tac
t="w @ rule_rpush e"
and s="f@ca"
in ssubst)
apply(rename_tac G e f x w c ca)(*strict*)
apply(force)
apply(rename_tac G e f x w c ca)(*strict*)
apply(simp (no_asm))
apply(rename_tac G e f x w c ca)(*strict*)
apply(clarsimp)
apply(rule_tac
t="drop (length f) (butlast_if_match (w @ rule_rpush e) (parser_bottom G))"
and s="butlast_if_match ca (parser_bottom G)"
in ssubst)
apply(rename_tac G e f x w c ca)(*strict*)
apply(rule_tac
t="w @ rule_rpush e"
and s="f@ca"
in ssubst)
apply(rename_tac G e f x w c ca)(*strict*)
apply(force)
apply(rename_tac G e f x w c ca)(*strict*)
apply(rule drop_butlast_if_match_distrib)
apply(rename_tac G e f x w c ca)(*strict*)
apply(case_tac ca)
apply(rename_tac G e f x w c ca)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c)(*strict*)
apply(rule_tac
t="butlast_if_match [] (parser_bottom G)"
and s="[]"
in ssubst)
apply(rename_tac G e x w c)(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(force)
apply(rename_tac G e x w c)(*strict*)
apply(clarsimp)
apply(case_tac "rule_rpush e")
apply(rename_tac G e x w c)(*strict*)
apply(clarsimp)
apply(rename_tac G e x c)(*strict*)
apply(rule_tac
t="butlast_if_match [] (parser_bottom G)"
and s="[]"
in ssubst)
apply(rename_tac G e x c)(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(force)
apply(rename_tac G e x c)(*strict*)
apply(clarsimp)
apply(rename_tac G e x w c a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. rule_rpush e = w' @ [x']")
apply(rename_tac G e x w c a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e x w c a list)(*strict*)
apply(thin_tac "rule_rpush e=a#list")
apply(clarsimp)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule_tac
t="butlast_if_match (w' @ [x']) (parser_bottom G)"
and s="w'@[x']"
in ssubst)
apply(rename_tac G e x w c w' x')(*strict*)
apply(rule butlast_if_match_direct2)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e x w c w' x')(*strict*)
apply(force)
apply(rename_tac G e f x w c ca a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. ca = w' @ [x']")
apply(rename_tac G e f x w c ca a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e f x w c ca a list)(*strict*)
apply(thin_tac "ca=a#list")
apply(clarsimp)
apply(rename_tac G e f x w c w' x')(*strict*)
apply(case_tac "rule_rpush e")
apply(rename_tac G e f x w c w' x')(*strict*)
apply(clarsimp)
apply(rename_tac G e f x c w' x')(*strict*)
apply(rule_tac
t="butlast_if_match [] (parser_bottom G)"
and s="[]"
in ssubst)
apply(rename_tac G e f x c w' x')(*strict*)
apply(rule butlast_if_match_direct2_prime)
apply(force)
apply(rename_tac G e f x c w' x')(*strict*)
apply(clarsimp)
apply(rename_tac G e f x w c w' x' a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. rule_rpush e = w' @ [x']")
apply(rename_tac G e f x w c w' x' a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e f x w c w' x' a list)(*strict*)
apply(thin_tac "rule_rpush e=a#list")
apply(clarsimp)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(rule_tac
x="c@w"
in exI)
apply(clarsimp)
apply(rule_tac
t="f @ butlast_if_match (w' @ [x'a]) (parser_bottom G)"
and s="butlast_if_match (f @ w' @ [x'a]) (parser_bottom G)"
in ssubst)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply (metis butlast_if_match_pull_out_prime)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(rule_tac
t="w @ butlast_if_match (w'a @ [x'a]) (parser_bottom G)"
and s="butlast_if_match (w @ w'a @ [x'a]) (parser_bottom G)"
in ssubst)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply (metis butlast_if_match_pull_out dropPrecise drop_Nil not_Cons_self)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(rule_tac
t="f @ w' @ [x'a]"
and s="(f @ w')@ [x'a]"
in ssubst)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(force)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(rule_tac
t="f @ w'"
and s="w @ w'a"
in ssubst)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(force)
apply(rename_tac G e f x w c w' w'a x'a)(*strict*)
apply(force)
done
interpretation "parserHF" : loc_autHF_0
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs)
done
lemma parserHF_inst_AX_effect_inclusion1: "
(\<forall>M f. parserHF_marking_condition M f \<longrightarrow> parserHF_marked_effect M f \<subseteq> parserHF_unmarked_effect M f)"
apply(clarsimp)
apply(rename_tac M f x)(*strict*)
apply(simp add: parserHF_unmarked_effect_def parserHF_marked_effect_def)
apply(clarsimp)
apply(rename_tac M f i e c)(*strict*)
apply(rule_tac
x="i"
in exI)
apply(rule_tac
x="e"
in exI)
apply(rule_tac
x="c"
in exI)
apply(clarsimp)
done
lemma parserHF_ins_lang_sound: "
(\<forall>M. valid_parser M \<longrightarrow> parserHF.unmarked_language M \<subseteq> parser_markers M)"
apply(clarsimp)
apply(rename_tac M x)(*strict*)
apply(simp add: parserHF.unmarked_language_def parserHF_unmarked_effect_def parser_markers_def)
apply(clarsimp)
apply(rename_tac M xa d i e c)(*strict*)
apply(subgoal_tac "c \<in> parserHF_configurations M")
apply(rename_tac M xa d i e c)(*strict*)
apply(simp add: parserHF_configurations_def)
apply(clarsimp)
apply(rename_tac M xa d i e f h l)(*strict*)
apply(force)
apply(rename_tac M xa d i e c)(*strict*)
apply(rule parserHF.belongs_configurations)
apply(rename_tac M xa d i e c)(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M xa d i e c)(*strict*)
apply(force)
apply(rename_tac M xa d i e c)(*strict*)
apply(force)
apply(rename_tac M xa d i e c)(*strict*)
apply(force)
done
lemma parserHF_inst_AX_marking_condition_implies_existence_of_effect: "
(\<forall>M. valid_parser M \<longrightarrow> (\<forall>f. parserHF.derivation_initial M f \<longrightarrow> parserHF_marking_condition M f \<longrightarrow> parserHF_marked_effect M f \<noteq> {}))"
apply(simp add: parserHF_marking_condition_def parserHF_marked_effect_def)
apply(clarsimp)
apply(rename_tac M f i e c)(*strict*)
apply(force)
done
lemma parserHF_inst_AX_unmarked_effect_persists: "
(\<forall>G. valid_parser G \<longrightarrow>
(\<forall>d. ATS.derivation_initial parserHF_initial_configurations
parserHF_step_relation G d \<longrightarrow>
(\<forall>n. parserHF_unmarked_effect G (derivation_take d n)
\<subseteq> parserHF_unmarked_effect G d)))"
apply(clarsimp)
apply(rename_tac G d n xa)(*strict*)
apply(simp add: parserHF_unmarked_effect_def derivation_take_def)
apply(clarsimp)
apply(rename_tac G d n i e c)(*strict*)
apply(rule_tac
x="i"
in exI)
apply(rule_tac
x="e"
in exI)
apply(rule_tac
x="c"
in exI)
apply(clarsimp)
apply(case_tac "i\<le>n")
apply(rename_tac G d n i e c)(*strict*)
apply(force)
apply(rename_tac G d n i e c)(*strict*)
apply(force)
done
lemma parserHF_inst_ATS_axioms: "
ATS_Language_axioms valid_parser parserHF_initial_configurations
parserHF_step_relation parser_markers parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect"
apply(simp add: ATS_Language_axioms_def)
apply(simp add: parserHF_inst_AX_effect_inclusion1 parserHF_ins_lang_sound parserHF_inst_AX_marking_condition_implies_existence_of_effect parserHF_inst_AX_unmarked_effect_persists )
done
interpretation "parserHF" : loc_autHF_1
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms )
done
definition parserHF_set_history :: "
('stack, 'event) parserHF_conf
\<Rightarrow> 'event list
\<Rightarrow> ('stack, 'event) parserHF_conf"
where
"parserHF_set_history c h \<equiv>
c \<lparr>parserHF_conf_history := h\<rparr>"
lemma parserHF_inst_AX_initial_history_empty: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c. c \<in> parserHF_initial_configurations G \<longrightarrow> parserHF_conf_history c = []))"
apply(simp add: parserHF_initial_configurations_def)
done
lemma parserHF_inst_AX_steps_extend_history: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c. c \<in> parserHF_configurations G \<longrightarrow> (\<forall>e c'. parserHF_step_relation G c e c' \<longrightarrow> (\<exists>hf\<in> parser_markers G. parserHF_conf_history c' = parserHF_conf_history c @ hf))))"
apply(clarsimp)
apply(rename_tac G c e c')(*strict*)
apply(subgoal_tac "SSe \<in> parser_step_labels SSG \<and> SSc2 \<in> parserHF_configurations SSG" for SSe SSc2 SSG)
apply(rename_tac G c e c')(*strict*)
prefer 2
apply(rule parserHF.AX_step_relation_preserves_belongs)
apply(rename_tac G c e c')(*strict*)
apply(force)
apply(rename_tac G c e c')(*strict*)
apply(force)
apply(rename_tac G c e c')(*strict*)
apply(force)
apply(rename_tac G c e c')(*strict*)
apply(clarsimp)
apply(simp add: parserHF_step_relation_def parser_markers_def parser_step_labels_def)
apply(clarsimp)
apply(rename_tac G c e c' x xa)(*strict*)
apply(subgoal_tac "valid_parser_step_label G e")
apply(rename_tac G c e c' x xa)(*strict*)
prefer 2
apply(simp add: valid_parser_def)
apply(rename_tac G c e c' x xa)(*strict*)
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(rule_tac
A="set (drop (length (parserHF_conf_fixed c)) (butlast_if_match (kPrefix k (w @ [parser_bottom G])) (parser_bottom G)))"
in set_mp)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(rule_tac
B="set ((butlast_if_match (kPrefix k (w @ [parser_bottom G])) (parser_bottom G)))"
in subset_trans)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(rule set_drop_subset)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(rule_tac
B="set (((kPrefix k (w @ [parser_bottom G]))))"
in subset_trans)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(rule set_butlast_if_match_is_subset)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(rule set_kPrefix_subset)
apply(clarsimp)
apply(simp add: valid_parser_def)
apply(blast)
apply(rename_tac G c e c' x xa k w xc)(*strict*)
apply(force)
done
lemma parserHF_inst_AX_empty_history_is_history: "
(\<forall>G. valid_parser G \<longrightarrow> [] \<in> parser_markers G)"
apply(simp add: parser_markers_def)
done
lemma parserHF_inst_AX_set_get_history: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c. c \<in> parserHF_configurations G \<longrightarrow> parserHF_set_history c (parserHF_conf_history c) = c))"
apply(clarsimp)
apply(rename_tac G c)(*strict*)
apply(simp add: parserHF_set_history_def)
done
lemma parserHF_inst_AX_get_set_history: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c. c \<in> parserHF_configurations G \<longrightarrow> (\<forall>h. h \<in> parser_markers G \<longrightarrow> parserHF_conf_history (parserHF_set_history c h) = h)))"
apply(clarsimp)
apply(rename_tac G c h)(*strict*)
apply(simp add: parserHF_set_history_def)
done
lemma parserHF_inst_AX_join_history_fragments_closed: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>hf1. hf1 \<in> parser_markers G \<longrightarrow> (\<forall>hf2. hf2 \<in> parser_markers G \<longrightarrow> hf1 @ hf2 \<in> parser_markers G)))"
apply(clarsimp)
apply(rename_tac G hf1 hf2)(*strict*)
apply(simp add: parser_markers_def)
done
lemma parserHF_inst_AX_get_history_closed: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c. c \<in> parserHF_configurations G \<longrightarrow> parserHF_conf_history c \<in> parser_markers G))"
apply(clarsimp)
apply(rename_tac G c)(*strict*)
apply(simp add: parser_markers_def parserHF_configurations_def)
apply(clarsimp)
apply(rename_tac G x f h l)(*strict*)
apply(force)
done
lemma parserHF_inst_AX_mutual_prefix: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>hf1. hf1 \<in> parser_markers G \<longrightarrow> (\<forall>hf2. hf2 \<in> parser_markers G \<longrightarrow> (\<forall>hf3. hf3 \<in> parser_markers G \<longrightarrow> (\<forall>hf4. hf4 \<in> parser_markers G \<longrightarrow> hf1 @ hf2 = hf3 @ hf4 \<longrightarrow> (\<exists>hf\<in> parser_markers G. hf1 @ hf = hf3) \<or> (\<exists>hf\<in> parser_markers G. hf3 @ hf = hf1))))))"
apply(clarsimp)
apply(rename_tac G hf1 hf2 hf3 hf4)(*strict*)
apply(simp add: parser_markers_def parserHF_configurations_def)
apply(subgoal_tac "prefix hf1 hf3 \<or> prefix hf3 hf1")
apply(rename_tac G hf1 hf2 hf3 hf4)(*strict*)
prefer 2
apply(rule mutual_prefix_prefix)
apply(force)
apply(rename_tac G hf1 hf2 hf3 hf4)(*strict*)
apply(erule disjE)
apply(rename_tac G hf1 hf2 hf3 hf4)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G hf1 hf2 hf3 hf4)(*strict*)
apply(simp add: prefix_def)
apply(clarsimp)
apply(rename_tac G hf2 hf3 c)(*strict*)
apply(force)
done
lemma parserHF_inst_ATS_History_axioms: "
ATS_History_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parserHF_step_relation parser_markers
parser_markers parser_empty_history parser_empty_history_fragment
parserHF_set_history (@) (@) parserHF_conf_history"
apply(simp add: ATS_History_axioms_def)
apply(simp add: parserHF_inst_AX_initial_history_empty parserHF_inst_AX_steps_extend_history parserHF_inst_AX_empty_history_is_history parserHF_inst_AX_set_get_history parserHF_inst_AX_get_set_history parserHF_inst_AX_join_history_fragments_closed parserHF_inst_AX_get_history_closed parserHF_inst_AX_mutual_prefix )
done
interpretation "parserHF" : loc_autHF_2
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms )
done
lemma parserHF_inst_lang_finite: "
(\<forall>G. valid_parser G \<longrightarrow> parserHF.finite_marked_language G = parserHF.marked_language G)"
apply(clarsimp)
apply(rename_tac G)(*strict*)
apply(simp add: parserHF.finite_marked_language_def parserHF.marked_language_def)
apply(rule order_antisym)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(rename_tac G x d n)(*strict*)
apply(rule_tac
x="d"
in exI)
apply(clarsimp)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(rename_tac G x d)(*strict*)
apply(simp add: parserHF_marked_effect_def)
apply(clarsimp)
apply(rename_tac G d i e c)(*strict*)
apply(rule_tac
x="derivation_take d i"
in exI)
apply(rule context_conjI)
apply(rename_tac G d i e c)(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation_initial)
apply(force)
apply(rename_tac G d i e c)(*strict*)
apply(rule conjI)
apply(rename_tac G d i e c)(*strict*)
apply(rule_tac
x="i"
in exI)
apply(rule_tac
x="e"
in exI)
apply(rule_tac
x="c"
in exI)
apply(clarsimp)
apply(simp add: derivation_take_def)
apply(rename_tac G d i e c)(*strict*)
apply(rule conjI)
apply(rename_tac G d i e c)(*strict*)
apply(simp add: parserHF_marking_condition_def)
apply(rule_tac
x="i"
in exI)
apply(rule_tac
x="e"
in exI)
apply(rule_tac
x="c"
in exI)
apply(clarsimp)
apply(rename_tac G d i e c ia ea ca)(*strict*)
apply(simp add: derivation_take_def)
apply(rename_tac G d i e c)(*strict*)
apply(rule_tac
x="i"
in exI)
apply(rule maximum_of_domain_derivation_take)
apply(force)
done
lemma parserHF_inst_AX_unmarked_language_finite: "
(\<forall>G. valid_parser G \<longrightarrow> parserHF.finite_unmarked_language G = parserHF.unmarked_language G)"
apply(clarsimp)
apply(rename_tac G)(*strict*)
apply(simp add: parserHF.finite_unmarked_language_def parserHF.unmarked_language_def)
apply(rule order_antisym)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(rename_tac G x d n)(*strict*)
apply(rule_tac
x="d"
in exI)
apply(clarsimp)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(rename_tac G x d)(*strict*)
apply(simp add: parserHF_unmarked_effect_def)
apply(clarsimp)
apply(rename_tac G d i e c)(*strict*)
apply(rule_tac
x="derivation_take d i"
in exI)
apply(rule context_conjI)
apply(rename_tac G d i e c)(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation_initial)
apply(force)
apply(rename_tac G d i e c)(*strict*)
apply(rule conjI)
apply(rename_tac G d i e c)(*strict*)
apply(rule_tac
x="i"
in exI)
apply(rule_tac
x="e"
in exI)
apply(rule_tac
x="c"
in exI)
apply(clarsimp)
apply(simp add: derivation_take_def)
apply(rename_tac G d i e c)(*strict*)
apply(rule_tac
x="i"
in exI)
apply(rule maximum_of_domain_derivation_take)
apply(force)
done
definition parserHF_get_fixed_scheduler_DB :: "
('stack, 'event, 'marker) parser
\<Rightarrow> (('stack, 'event) parser_step_label, ('stack, 'event) parserHF_conf) derivation
\<Rightarrow> nat
\<Rightarrow> 'event list"
where
"parserHF_get_fixed_scheduler_DB G d n \<equiv>
parserHF_conf_fixed (the (get_configuration (d n)))"
lemma parserHF_inst_AX_fixed_scheduler_extendable_translates_backwards: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>c1. c1 \<in> parserHF_configurations G \<longrightarrow> (\<forall>e c2. parserHF_step_relation G c1 e c2 \<longrightarrow> \<not> parserHF_conf_fixed c2 \<sqsupseteq> [parser_bottom G] \<longrightarrow> \<not> parserHF_conf_fixed c1 \<sqsupseteq> [parser_bottom G])))"
apply(clarsimp)
apply(rename_tac G c1 e c2)(*strict*)
apply(subgoal_tac "valid_parser_step_label G e")
apply(rename_tac G c1 e c2)(*strict*)
prefer 2
apply(simp add: parserHF_step_relation_def valid_parser_def)
apply(rename_tac G c1 e c2)(*strict*)
apply(subgoal_tac "c2 \<in> parserHF_configurations G")
apply(rename_tac G c1 e c2)(*strict*)
prefer 2
apply(rule parserHF.AX_step_relation_preserves_belongsC)
apply(rename_tac G c1 e c2)(*strict*)
apply(force)
apply(rename_tac G c1 e c2)(*strict*)
apply(force)
apply(rename_tac G c1 e c2)(*strict*)
apply(force)
apply(rename_tac G c1 e c2)(*strict*)
apply(simp add: valid_parser_step_label_def suffix_def parserHF_configurations_def parserHF_step_relation_def prefix_def)
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb)(*strict*)
apply(case_tac "length (kPrefix k (w @ [parser_bottom G])) - length c")
apply(rename_tac G e c k w xa ca cb xb)(*strict*)
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
apply(clarsimp)
apply(simp add: kPrefix_def)
apply(case_tac "k-length w")
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
prefer 2
apply(rename_tac G e c k w xa ca cb xb nat nata)(*strict*)
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb nat nata x)(*strict*)
apply(erule_tac
x="x"
in allE)
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
apply(clarsimp)
apply(subgoal_tac "min (length w) k = k")
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
prefer 2
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
apply(clarsimp)
apply(thin_tac "min (length w) k = k")
apply(erule disjE)
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(case_tac cc)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb nat x)(*strict*)
apply(erule_tac
x="x"
in allE)
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat cc a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. cc = w' @ [x']")
apply(rename_tac G e c k w xa ca cb xb nat cc a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat cc a list)(*strict*)
apply(thin_tac "cc = a # list")
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb nat)(*strict*)
apply(clarsimp)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(subgoal_tac "parser_bottom G \<in> set w")
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(subgoal_tac "parser_bottom G \<notin> set w")
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(rule_tac
A="parser_events G"
in not_in_diff)
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(rule_tac
A="set(take k w)"
in set_mp)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(rule set_take_subset)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(rule_tac
t="take k w"
and s="c @ [parser_bottom G] @ cc"
in ssubst)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(force)
apply(rename_tac G e c k w xa ca cb xb nat cc)(*strict*)
apply(simp (no_asm))
done
lemma parserHF_inst_AX_get_fixed_scheduler_DB_in_fixed_schedulers: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>d. parserHF.derivation G d \<longrightarrow> parserHF.belongs G d \<longrightarrow> (\<forall>n. (\<exists>y. d n = Some y) \<longrightarrow> parserHF_get_fixed_scheduler_DB G d n \<in> parser_fixed_schedulers G)))"
apply(clarsimp)
apply(rename_tac G d n y)(*strict*)
apply(simp add: parserHF_get_fixed_scheduler_DB_def)
apply(simp add: parser_fixed_schedulers_def)
apply(case_tac y)
apply(rename_tac G d n y option b)(*strict*)
apply(clarsimp)
apply(rename_tac G d n option b)(*strict*)
apply(simp add: get_configuration_def)
apply(subgoal_tac "b \<in> parserHF_configurations G")
apply(rename_tac G d n option b)(*strict*)
prefer 2
apply (metis parserHF.belongs_configurations)
apply(rename_tac G d n option b)(*strict*)
apply(simp add: parserHF_configurations_def parser_schedulers_def prefix_closure_def prefix_def)
apply(clarsimp)
apply(rename_tac G d n option f h l)(*strict*)
apply(case_tac "parser_bottom G \<in> set f")
apply(rename_tac G d n option f h l)(*strict*)
apply(clarsimp)
apply(rename_tac G d n option h l w)(*strict*)
apply(rule_tac
x="w @ [parser_bottom G]"
in exI)
apply(clarsimp)
apply(rename_tac G d n option f h l)(*strict*)
apply(clarsimp)
apply(rule_tac
x="f @ [parser_bottom G]"
in exI)
apply(clarsimp)
done
lemma parserHF_inst_AX_get_fixed_scheduler_DB_restrict: "
\<forall>G. valid_parser G \<longrightarrow> (\<forall>x n. x \<le> n \<longrightarrow> (\<forall>d1. parserHF.derivation G d1 \<longrightarrow> (\<forall>d2. parserHF_get_fixed_scheduler_DB G (derivation_append d1 d2 n) x = parserHF_get_fixed_scheduler_DB G d1 x)))"
apply(clarsimp)
apply(rename_tac G x n d1 d2)(*strict*)
apply(simp add: derivation_append_def parserHF_get_fixed_scheduler_DB_def)
done
lemma parserHF_inst_ATS_SchedF_SB_axioms: "
ATS_SchedF_SB_axioms valid_parser parserHF_configurations
parserHF_step_relation parser_fixed_scheduler_extendable
parserHF_conf_fixed"
apply(simp add: ATS_SchedF_SB_axioms_def)
apply(simp add: parserHF_inst_AX_fixed_scheduler_extendable_translates_backwards )
done
interpretation "parserHF" : loc_autHF_3
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
(* fixed_schedulers *)
"parser_fixed_schedulers"
(* empty_fixed_scheduler *)
"parser_empty_fixed_scheduler"
(* fixed_scheduler_extendable *)
"parser_fixed_scheduler_extendable"
(* get_fixed_scheduler *)
"parserHF_conf_fixed"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms parserHF_inst_ATS_SchedF_SB_axioms )
done
lemma parserHF_inst_AX_schedF_db_extendable_translates_backwards: "
(\<forall>G. valid_parser G \<longrightarrow> (\<forall>d1. parserHF.derivation G d1 \<longrightarrow> parserHF.belongs G d1 \<longrightarrow> (\<forall>n x. (\<exists>y. d1 (n + x) = Some y) \<longrightarrow> \<not> parserHF_get_fixed_scheduler_DB G d1 (n + x) \<sqsupseteq> [parser_bottom G] \<longrightarrow> \<not> parserHF_get_fixed_scheduler_DB G d1 n \<sqsupseteq> [parser_bottom G])))"
apply(clarsimp)
apply(rename_tac G d1 n x y)(*strict*)
apply(simp add: parserHF_get_fixed_scheduler_DB_def)
apply(subgoal_tac "\<exists>e c. d1 n= Some (pair e c)")
apply(rename_tac G d1 n x y)(*strict*)
apply(clarsimp)
apply(rename_tac G d1 n x y e c)(*strict*)
apply(simp add: get_configuration_def)
apply(case_tac y)
apply(rename_tac G d1 n x y e c option b)(*strict*)
apply(clarsimp)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(subgoal_tac "\<not> parserHF_conf_fixed c \<sqsupseteq> [parser_bottom G]")
apply(rename_tac G d1 n x e c option b)(*strict*)
prefer 2
apply(fold parser_fixed_scheduler_extendable_def)
apply(rule parserHF.fixed_scheduler_extendable_translates_backwards_lift)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply (metis parserHF.belongs_configurations)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x e c option b)(*strict*)
apply(force)
apply(rename_tac G d1 n x y)(*strict*)
apply(rule_tac
m="n+x"
in parserHF.pre_some_position_is_some_position)
apply(rename_tac G d1 n x y)(*strict*)
apply(force)
apply(rename_tac G d1 n x y)(*strict*)
apply(force)
apply(rename_tac G d1 n x y)(*strict*)
apply(force)
done
lemma parserHF_inst_AX_state_based_vs_derivation_based_get_fixed_scheduler: "
\<forall>G. valid_parser G \<longrightarrow> (\<forall>d. parserHF.derivation_initial G d \<longrightarrow> (\<forall>n e c. d n = Some (pair e c) \<longrightarrow> parserHF_get_fixed_scheduler_DB G d n = parserHF_conf_fixed c))"
apply(clarsimp)
apply(rename_tac G d n e c)(*strict*)
apply(simp add: parserHF_get_fixed_scheduler_DB_def)
apply(simp add: get_configuration_def)
done
lemma parserHF_inst_AX_history_no_mod_after_nonextendable_fixed_sched: "
\<forall>G. valid_parser G \<longrightarrow> (\<forall>c. parserHF_conf_fixed c \<sqsupseteq> [parser_bottom G] \<longrightarrow> c \<in> parserHF_configurations G \<longrightarrow> (\<forall>e c'. parserHF_step_relation G c e c' \<longrightarrow> parserHF_conf_history c = parserHF_conf_history c'))"
apply(clarsimp)
apply(rename_tac G c e c')(*strict*)
apply(simp add: parserHF_step_relation_def)
apply(clarsimp)
apply(rename_tac G c e c' x)(*strict*)
apply(simp add: prefix_def suffix_def)
apply(clarsimp)
apply(rename_tac G c e c' x ca)(*strict*)
apply(erule disjE)
apply(rename_tac G c e c' x ca)(*strict*)
apply(clarsimp)
apply(rename_tac G c e c' x ca cb)(*strict*)
apply (metis append_length_inc drop_entire_butlast_if_match drop_eq_Nil length_Suc)
apply(rename_tac G c e c' x ca)(*strict*)
apply(clarsimp)
apply(rename_tac G c e c' x ca cb)(*strict*)
apply(subgoal_tac "cb=[]")
apply(rename_tac G c e c' x ca cb)(*strict*)
apply(clarsimp)
apply(rename_tac G c e c' x ca)(*strict*)
apply (metis butlast_if_match_length_le length_Suc)
apply(rename_tac G c e c' x ca cb)(*strict*)
apply(subgoal_tac "valid_parser_step_label G e")
apply(rename_tac G c e c' x ca cb)(*strict*)
prefer 2
apply(simp add: valid_parser_def)
apply(rename_tac G c e c' x ca cb)(*strict*)
apply(simp add: valid_parser_step_label_def)
apply(clarsimp)
apply(rename_tac G c e c' x ca cb k w xb)(*strict*)
apply(simp add: kPrefix_def)
apply(case_tac "k-length w")
apply(rename_tac G c e c' x ca cb k w xb)(*strict*)
apply(clarsimp)
apply(subgoal_tac "parser_bottom G \<in> set w")
apply(rename_tac G c e c' x ca cb k w xb)(*strict*)
apply(force)
apply(rename_tac G c e c' x ca cb k w xb)(*strict*)
apply (metis Cons_eq_appendI append_Nil append_eq_appendI append_self_conv butlast_if_match_direct butlast_if_match_direct2_prime in_set_takeD kPrefix_def list.simps(2) take_append_prime)
apply(rename_tac G c e c' x ca cb k w xb nat)(*strict*)
apply(clarsimp)
apply(rename_tac G c e c' x ca cb k w xb nat xa)(*strict*)
apply(case_tac cb)
apply(rename_tac G c e c' x ca cb k w xb nat xa)(*strict*)
apply(force)
apply(rename_tac G c e c' x ca cb k w xb nat xa a list)(*strict*)
apply(subgoal_tac "\<exists>w' x'. cb = w' @ [x']")
apply(rename_tac G c e c' x ca cb k w xb nat xa a list)(*strict*)
prefer 2
apply(rule NonEmptyListHasTailElem)
apply(force)
apply(rename_tac G c e c' x ca cb k w xb nat xa a list)(*strict*)
apply(thin_tac "cb=a#list")
apply(clarsimp)
done
lemma parserHF_inst_ATS_SchedF_DB_axioms: "
ATS_SchedF_DB_axioms valid_parser parserHF_configurations
parser_step_labels parserHF_step_relation parser_fixed_schedulers
parser_fixed_scheduler_extendable parserHF_get_fixed_scheduler_DB"
apply(simp add: ATS_SchedF_DB_axioms_def)
apply(simp add: parserHF_inst_AX_get_fixed_scheduler_DB_in_fixed_schedulers parserHF_inst_AX_fixed_scheduler_extendable_translates_backwards parserHF_inst_AX_get_fixed_scheduler_DB_restrict parserHF_inst_AX_schedF_db_extendable_translates_backwards )
done
lemma parserHF_inst_ATS_SchedF_SDB_axioms: "
ATS_SchedF_SDB_axioms valid_parser parserHF_initial_configurations parserHF_step_relation parserHF_conf_fixed parserHF_get_fixed_scheduler_DB"
apply(simp add: ATS_SchedF_SDB_axioms_def)
apply(simp add: parserHF_inst_AX_state_based_vs_derivation_based_get_fixed_scheduler )
done
lemma parserHF_inst_ATS_determHIST_SB_axioms: "
ATS_determHIST_SB_axioms valid_parser parserHF_configurations
parserHF_step_relation parserHF_conf_history
parser_fixed_scheduler_extendable parserHF_conf_fixed"
apply(simp add: ATS_determHIST_SB_axioms_def)
apply(simp add: parserHF_inst_AX_history_no_mod_after_nonextendable_fixed_sched )
done
interpretation "parserHF" : loc_autHF_6
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
(* fixed_schedulers *)
"parser_fixed_schedulers"
(* empty_fixed_scheduler *)
"parser_empty_fixed_scheduler"
(* fixed_scheduler_extendable *)
"parser_fixed_scheduler_extendable"
(* get_fixed_scheduler *)
"parserHF_conf_fixed"
(* get_fixed_scheduler_DB *)
"parserHF_get_fixed_scheduler_DB"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms parserHF_inst_ATS_SchedF_SB_axioms parserHF_inst_ATS_SchedF_DB_axioms parserHF_inst_ATS_SchedF_SDB_axioms parserHF_inst_ATS_determHIST_SB_axioms )
done
lemma parserHF_inst_ATS_Language_by_Finite_Derivations_axioms: "
ATS_Language_by_Finite_Derivations_axioms valid_parser
parserHF_initial_configurations parserHF_step_relation
parserHF_marking_condition parserHF_marked_effect
parserHF_unmarked_effect"
apply(simp add: ATS_Language_by_Finite_Derivations_axioms_def)
apply(simp add: parserHF_inst_lang_finite parserHF_inst_AX_unmarked_language_finite )
done
interpretation "parserHF" : loc_autHF_7
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
(* fixed_schedulers *)
"parser_fixed_schedulers"
(* empty_fixed_scheduler *)
"parser_empty_fixed_scheduler"
(* fixed_scheduler_extendable *)
"parser_fixed_scheduler_extendable"
(* get_fixed_scheduler *)
"parserHF_conf_fixed"
(* get_fixed_scheduler_DB *)
"parserHF_get_fixed_scheduler_DB"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms parserHF_inst_ATS_SchedF_SB_axioms parserHF_inst_ATS_SchedF_DB_axioms parserHF_inst_ATS_SchedF_SDB_axioms parserHF_inst_ATS_determHIST_SB_axioms parserHF_inst_ATS_Language_by_Finite_Derivations_axioms )
done
lemma parserHF_inst_AX_is_forward_target_deterministic_correspond_SB: "
\<forall>G. valid_parser G \<longrightarrow> parserHF.is_forward_target_deterministic_accessible G = ATS_determHIST_SB.is_forward_target_deterministicHist_SB_long parserHF_initial_configurations parserHF_step_relation parser_markers (@) (@) parserHF_conf_history parser_fixed_scheduler_extendable parserHF_conf_fixed G"
apply(clarsimp)
apply(rename_tac G)(*strict*)
apply(rule order_antisym)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(rule parserHF.is_forward_target_deterministic_accessible_implies_is_forward_target_deterministicHist_SB_long)
apply(rename_tac G)(*strict*)
apply(force)
apply(rename_tac G)(*strict*)
apply(force)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(simp add: parserHF.is_forward_target_deterministic_accessible_def)
apply(simp add: parserHF.is_forward_target_deterministicHist_SB_long_def)
apply(clarsimp)
apply(rename_tac G c c1 c2 e)(*strict*)
apply(erule_tac
x="c"
in ballE)
apply(rename_tac G c c1 c2 e)(*strict*)
prefer 2
apply(force)
apply(rename_tac G c c1 c2 e)(*strict*)
apply(erule_tac
x="c1"
in allE)
apply(erule_tac
x="c2"
in allE)
apply(clarsimp)
apply(erule_tac
x="e"
in allE)
apply(clarsimp)
apply(simp add: parserHF_step_relation_def)
apply(clarsimp)
done
lemma parserHF_inst_ATS_HistoryCT_SB_axioms: "
ATS_HistoryCT_SB_axioms valid_parser parserHF_initial_configurations parserHF_step_relation parser_markers (@) (@) parserHF_conf_history parser_fixed_scheduler_extendable parserHF_conf_fixed"
apply(simp add: ATS_HistoryCT_SB_axioms_def)
apply(simp add: parserHF_inst_AX_is_forward_target_deterministic_correspond_SB )
done
interpretation "parserHF" : loc_autHF_8
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
(* fixed_schedulers *)
"parser_fixed_schedulers"
(* empty_fixed_scheduler *)
"parser_empty_fixed_scheduler"
(* fixed_scheduler_extendable *)
"parser_fixed_scheduler_extendable"
(* get_fixed_scheduler *)
"parserHF_conf_fixed"
(* get_fixed_scheduler_DB *)
"parserHF_get_fixed_scheduler_DB"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms parserHF_inst_ATS_SchedF_SB_axioms parserHF_inst_ATS_SchedF_DB_axioms parserHF_inst_ATS_SchedF_SDB_axioms parserHF_inst_ATS_determHIST_SB_axioms parserHF_inst_ATS_Language_by_Finite_Derivations_axioms parserHF_inst_ATS_HistoryCT_SB_axioms )
done
lemma parserHF_inst_AX_is_forward_target_deterministic_correspond_DB: "
\<forall>G. valid_parser G \<longrightarrow> parserHF.is_forward_target_deterministic_accessible G = ATS_determHIST_DB.is_forward_target_deterministicHist_DB_long parserHF_initial_configurations parserHF_step_relation parser_markers (@) (@) parserHF_conf_history parser_fixed_scheduler_extendable parserHF_get_fixed_scheduler_DB G"
apply(clarsimp)
apply(rename_tac G)(*strict*)
apply(rule order_antisym)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(rule parserHF.is_forward_target_deterministic_accessible_implies_is_forward_target_deterministicHist_DB_long)
apply(rename_tac G)(*strict*)
apply(force)
apply(rename_tac G)(*strict*)
apply(force)
apply(rename_tac G)(*strict*)
apply(clarsimp)
apply(simp add: parserHF.is_forward_target_deterministic_accessible_def)
apply(simp add: parserHF.is_forward_target_deterministicHist_DB_long_def)
apply(clarsimp)
apply(rename_tac G c c1 c2 e)(*strict*)
apply(simp add: parserHF_step_relation_def)
apply(clarsimp)
done
lemma parserHF_inst_ATS_HistoryCT_DB_axioms: "
ATS_HistoryCT_DB_axioms valid_parser parserHF_initial_configurations parserHF_step_relation parser_markers (@) (@) parserHF_conf_history parser_fixed_scheduler_extendable parserHF_get_fixed_scheduler_DB"
apply(simp add: ATS_HistoryCT_DB_axioms_def)
apply(simp add: parserHF_inst_AX_is_forward_target_deterministic_correspond_DB )
done
interpretation "parserHF" : loc_autHF_9
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
(* fixed_schedulers *)
"parser_fixed_schedulers"
(* empty_fixed_scheduler *)
"parser_empty_fixed_scheduler"
(* fixed_scheduler_extendable *)
"parser_fixed_scheduler_extendable"
(* get_fixed_scheduler *)
"parserHF_conf_fixed"
(* get_fixed_scheduler_DB *)
"parserHF_get_fixed_scheduler_DB"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms parserHF_inst_ATS_SchedF_SB_axioms parserHF_inst_ATS_SchedF_DB_axioms parserHF_inst_ATS_SchedF_SDB_axioms parserHF_inst_ATS_determHIST_SB_axioms parserHF_inst_ATS_Language_by_Finite_Derivations_axioms parserHF_inst_ATS_HistoryCT_SB_axioms parserHF_inst_ATS_HistoryCT_DB_axioms )
done
lemma parserHF_inst_AX_BF_BraSBRest_DetHSB_LaOp: "
\<forall>M. valid_parser M \<longrightarrow>
ATS_determHIST_SB.is_forward_deterministicHist_SB
parserHF_initial_configurations parserHF_step_relation
parser_markers (@) (@) parserHF_conf_history
parser_fixed_scheduler_extendable parserHF_conf_fixed M \<longrightarrow>
nonblockingness_language
(ATS_Language0.unmarked_language parserHF_initial_configurations
parserHF_step_relation parserHF_unmarked_effect M)
(ATS_Language0.marked_language parserHF_initial_configurations
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect M) \<longrightarrow>
ATS_SchedF_SB.Nonblockingness_branching_restricted parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parser_fixed_scheduler_extendable parserHF_conf_fixed M"
apply(clarsimp)
apply(rename_tac M)(*strict*)
apply(simp add: parserHF.Nonblockingness_branching_restricted_def)
apply(clarsimp)
apply(rename_tac M dh n)(*strict*)
apply(subgoal_tac "\<exists>e c. dh n= Some (pair e c)")
apply(rename_tac M dh n)(*strict*)
prefer 2
apply(rule_tac
M="M"
in parserHF.some_position_has_details_before_max_dom)
apply(rename_tac M dh n)(*strict*)
apply (metis parserHF.derivation_initial_is_derivation)
apply(rename_tac M dh n)(*strict*)
apply(force)
apply(rename_tac M dh n)(*strict*)
apply(force)
apply(rename_tac M dh n)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c)(*strict*)
apply(simp add: get_configuration_def)
apply(subgoal_tac "parserHF_conf_history c \<in> prefix_closure (parserHF.marked_language M)")
apply(rename_tac M dh n e c)(*strict*)
prefer 2
apply(simp add: nonblockingness_language_def)
apply(rename_tac M dh n e c)(*strict*)
apply(rule_tac
A=" parserHF.unmarked_language M"
in set_mp)
apply(rename_tac M dh n e c)(*strict*)
apply(force)
apply(rename_tac M dh n e c)(*strict*)
apply(thin_tac " parserHF.unmarked_language M \<subseteq> (prefix_closure (parserHF.marked_language M))")
apply(rename_tac M dh n e c)(*strict*)
apply(simp add: parserHF.unmarked_language_def)
apply(rule_tac
x="dh"
in exI)
apply(clarsimp)
apply(simp add: parserHF.derivation_initial_def)
apply(simp add: parserHF_unmarked_effect_def)
apply(clarsimp)
apply(force)
apply(rename_tac M dh n e c)(*strict*)
apply(thin_tac "nonblockingness_language (parserHF.unmarked_language M) (parserHF.marked_language M)")
apply(rename_tac M dh n e c)(*strict*)
apply(simp add: prefix_closure_def parserHF.marked_language_def prefix_def)
apply(clarsimp)
apply(rename_tac M dh n e c d ca)(*strict*)
apply(simp add: parserHF_marked_effect_def)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb)(*strict*)
apply(subgoal_tac "\<exists>i e c. d i = Some (pair e c) \<and> c \<in> parserHF_marking_configurations M \<and> (\<forall>j e' c'. i < j \<and> d j = Some (pair e' c') \<longrightarrow> parserHF_conf_history c = parserHF_conf_history c' )")
apply(rename_tac M dh n e c d ca i ea cb)(*strict*)
prefer 2
apply(simp add: parserHF_marking_condition_def)
apply(rename_tac M dh n e c d ca i ea cb)(*strict*)
apply(thin_tac "parserHF_marking_condition M d")
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(subgoal_tac "dh 0 = d 0")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
prefer 2
apply(simp add: parserHF.derivation_initial_def)
apply(case_tac "d 0")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc a)(*strict*)
apply(clarsimp)
apply(case_tac a)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc a option b)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc b)(*strict*)
apply(case_tac "dh 0")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc b)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc b a)(*strict*)
apply(clarsimp)
apply(case_tac a)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc b a option ba)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc b ba)(*strict*)
apply(simp add: parserHF_initial_configurations_def)
(*
what should dc be?
ia \<le> i \<le> n : d@i ~ d@ia; d@ia ~~ dh@ia \<Longrightarrow> dh@n ~ dh@ia; dc = []
ia \<le> n \<le> i : d@i ~ d@ia; d@ia ~~ dh@ia \<Longrightarrow> dh@n ~ dh@ia; dc = []
n \<le> ia \<le> i : d@ia ~ d@ia \<Longrightarrow> dc = d (n\<dots>ia)
i \<le> ia \<le> n : d@ia ~ dh@ia \<Longrightarrow> dh@n ~ dh@ia; dc = []
i \<le> n \<le> ia : d@n ~ dh@n; dc = d (n\<dots>ia)
n \<le> i \<le> ia : d@n ~ dh@n; dc = d (n\<dots>ia)
ia \<le> n \<Longrightarrow> dc = []
n \<le> ia \<Longrightarrow> dc = d (n\<dots>ia)
*)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(case_tac "ia\<le>n")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule_tac
x="der1 c"
in exI)
apply(rule_tac
t="derivation_append dh (der1 c) n"
and s="dh"
in ssubst)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule ext)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x)(*strict*)
apply(simp add: derivation_append_def)
apply(clarsimp)
apply(simp add: der1_def)
apply(case_tac "dh x")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x a)(*strict*)
apply(rule_tac
m="x"
and d="dh"
in parserHF.no_some_beyond_maximum_of_domain)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x a)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x a)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x a)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc x a)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule parserHF.der1_is_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule parserHF.der1_belongs)
apply(rule parserHF.belongs_configurations)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply (metis)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule_tac
x="0"
in exI)
apply(rule der1_maximum_of_domain)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: derivation_append_fit_def der1_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: parserHF_marking_condition_def)
(*
where does it accept continuously?
ia \<le> i \<le> n : d@i ~ d@ia; d@ia ~~ dh@ia \<Longrightarrow> d@ia=dh@ia; @ia @i
ia \<le> n \<le> i : d@i ~ d@ia; d@ia ~~ dh@ia \<Longrightarrow> dh@n ~ dh@ia; @ia
i \<le> ia \<le> n : d@ia ~ dh@ia \<Longrightarrow> dh@n ~ dh@ia; @ia
*)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(rule_tac
x="ia"
in exI)
apply(subgoal_tac "dh ia = d ia")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
prefer 2
apply(subgoal_tac "\<exists>e c. dh ia = Some (pair e c)")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
prefer 2
apply(rule parserHF.some_position_has_details_before_max_dom)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(erule exE)+
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "\<exists>ca'. parserHF_conf_history c @ ca' = parserHF_conf_history cc")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(case_tac "i\<le>ia")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(erule_tac
x="i"
in allE)
apply(clarsimp)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "\<exists>ca'. parserHF_conf_history cb @ ca' = parserHF_conf_history cc")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(rule_tac
x="ca @ ca'"
in exI)
apply(rule_tac
t="parserHF_conf_history cc"
and s="parserHF_conf_history cb @ ca'"
in ssubst)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(rule_tac
t="parserHF_conf_history cb"
and s="parserHF_conf_history c @ ca"
in ssubst)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp (no_asm))
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history cc = parserHF_conf_history cb @ h")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(rule_tac
d="d"
and n="i"
and m="ia-i"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: valid_bounded_parser_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(erule exE)+
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(rule_tac
?d1.0="dh"
and n="ia"
and m="ia"
and ?d2.0="d"
and x="0"
and y="0"
in parserHF.is_forward_deterministicHist_derivations_coincide)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: get_configuration_def)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history c = parserHF_conf_history cd @ h")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
prefer 2
apply(rule_tac
d="dh"
and n="ia"
and m="n-ia"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rule parserHF.belongs_configurations)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(erule bexE)+
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(subgoal_tac "cb \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
prefer 2
apply(rule_tac
d="d"
in parserHF.belongs_configurations)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(case_tac "ia\<le>i")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(subgoal_tac "parserHF_conf_history cc = parserHF_conf_history cb")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
prefer 2
apply(case_tac "ia<i")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(erule_tac
x="i"
in allE)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(clarsimp)
apply(rule_tac
x="h@ca"
in bexI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(simp add: parser_markers_def)
apply(simp add: parserHF_configurations_def)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea ia eb cc ec "cd" h x f l)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history cc = parserHF_conf_history cb @ h")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
prefer 2
apply(rule_tac
d="d"
and n="i"
and m="ia-i"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h ha)(*strict*)
apply(rule_tac
x="h @ ca @ ha"
in bexI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h ha)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca' h ha)(*strict*)
apply(simp add: parser_markers_def)
apply(simp add: parserHF_configurations_def)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea ia eb cc ec "cd" h ha x f l)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(simp add: get_configuration_def)
apply(fold parser_fixed_scheduler_extendable_def)
apply(rule_tac
?d="dh"
in parserHF.fixed_scheduler_extendable_translates_backwards_lift)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply (metis parserHF.derivation_initial_is_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply (metis parserHF.belongs_configurations parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" ca')(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history c = parserHF_conf_history cc @ h")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
prefer 2
apply(rule_tac
d="dh"
and n="ia"
and m="n-ia"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(erule bexE)+
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(subgoal_tac "h@ca=[]")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
prefer 2
apply(case_tac "ia\<le>i")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(case_tac "ia<i")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(erule_tac
x="i"
in allE)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history cc = parserHF_conf_history cb @ h")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
prefer 2
apply(rule_tac
d="d"
and n="i"
and m="ia-i"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc h)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(erule_tac
x="j"
in allE)
apply(clarsimp)
apply(subgoal_tac "j\<le>n")
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
prefer 2
apply(rule_tac
d="dh"
in parserHF.allPreMaxDomSome_prime)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history c = parserHF_conf_history c' @ h")
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
prefer 2
apply(rule_tac
d="dh"
and n="j"
and m="n-j"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rule parserHF.belongs_configurations)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history c' = parserHF_conf_history cc @ h")
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
prefer 2
apply(rule_tac
d="dh"
and n="ia"
and m="j-ia"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: parserHF_marking_configurations_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(case_tac "d j")
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c')(*strict*)
apply(clarify)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c' h ha)(*strict*)
apply(force)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c' a)(*strict*)
apply(clarify)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c' a h ha)(*strict*)
apply(case_tac a)
apply(rename_tac M dh n e c d i ea cb ia eb cc j e' c' a h ha option b)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(subgoal_tac "ia>n")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
prefer 2
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(thin_tac "\<not> ia \<le> n")
apply(subgoal_tac "\<exists>e c. d n = Some (pair e c)")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
prefer 2
apply(rule_tac
m="ia"
in parserHF.pre_some_position_is_some_position)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "cd \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(rule_tac
d="d"
in parserHF.belongs_configurations)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "c \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(rule_tac
d="dh"
in parserHF.belongs_configurations)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "d n = dh n")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(rule sym)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule_tac
?d1.0="dh"
and n="n"
and m="ia"
and ?d2.0="d"
and x="0"
and y="0"
in parserHF.is_forward_deterministicHist_derivations_coincide)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(case_tac "d 0")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" a)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(case_tac "dh 0")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" a)(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: get_configuration_def)
apply(case_tac "ia<i")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(erule_tac
x="i"
in allE)
apply(clarsimp)
apply(rule_tac
x="ca"
in bexI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "cb \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "cc \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: parserHF_configurations_def parser_markers_def)
apply(clarsimp)
apply(rename_tac M dh n e d ca i ea ia eb ec x f fa fb fc h ha l la lb lc)(*strict*)
apply(rule_tac
A="set ca"
in set_mp)
apply(rename_tac M dh n e d ca i ea ia eb ec x f fa fb fc h ha l la lb lc)(*strict*)
apply(force)
apply(rename_tac M dh n e d ca i ea ia eb ec x f fa fb fc h ha l la lb lc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply (metis parserHF.belongs_configurations parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply (metis parserHF.belongs_configurations parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(subgoal_tac "i\<le>ia")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(subgoal_tac "\<exists>hf\<in> parser_markers M. parserHF_conf_history cc = parserHF_conf_history cb @ hf")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
prefer 2
apply(rule_tac
d="d"
and n="i"
and m="ia-i"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply (metis parserHF.belongs_configurations parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: get_configuration_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply(rule_tac
x="ca@hf"
in bexI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply(subgoal_tac "cb \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply(subgoal_tac "cc \<in> parserHF_configurations M")
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply(simp add: parserHF_configurations_def parser_markers_def)
apply(clarsimp)
apply(rename_tac M dh n e d ca i ea ia eb ec hf x f fa fb fc h ha l la lb lc)(*strict*)
apply(rule_tac
A="set ca"
in set_mp)
apply(rename_tac M dh n e d ca i ea ia eb ec hf x f fa fb fc h ha l la lb lc)(*strict*)
apply(force)
apply(rename_tac M dh n e d ca i ea ia eb ec hf x f fa fb fc h ha l la lb lc)(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply (metis parserHF.belongs_configurations parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd" hf)(*strict*)
apply (metis parserHF.belongs_configurations parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule_tac
x="derivation_drop (derivation_take d ia) n"
in exI)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule_tac
m="ia-n"
in parserHF.derivation_drop_preserves_derivation_prime)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: derivation_take_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule parserHF.derivation_drop_preserves_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule parserHF.derivation_take_preserves_belongs)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(force)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: derivation_take_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule_tac
x="ia-n"
in exI)
apply(simp add: maximum_of_domain_def derivation_drop_def derivation_take_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule conjI)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: derivation_append_fit_def derivation_drop_def derivation_take_def)
apply(rename_tac M dh n e c d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: parserHF_marking_condition_def)
apply(clarsimp)
apply(rename_tac M dh n d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(rule_tac
x="ia"
in exI)
apply(rule_tac
x="eb"
in exI)
apply(rule_tac
x="cc"
in exI)
apply(clarsimp)
apply(rule conjI)
apply(rename_tac M dh n d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(simp add: derivation_append_def derivation_drop_def derivation_take_def)
apply(rename_tac M dh n d ca i ea cb ia eb cc ec "cd")(*strict*)
apply(clarsimp)
apply(rename_tac M dh n d ca i ea cb ia eb cc ec "cd" j e' c')(*strict*)
apply(simp add: derivation_append_def derivation_drop_def derivation_take_def)
done
lemma parserHF_inst_BF_BraSBRest_DetHSB_LaOp_axioms: "
BF_BraSBRest_DetHSB_LaOp_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect parser_markers (@)
(@) parserHF_conf_history parser_fixed_scheduler_extendable
parserHF_conf_fixed"
apply(simp add: BF_BraSBRest_DetHSB_LaOp_axioms_def)
apply(rule parserHF_inst_AX_BF_BraSBRest_DetHSB_LaOp)
done
lemma parserHF_inst_BF_BraSBRest_DetHDB_LaOp_axioms: "
BF_BraSBRest_DetHDB_LaOp_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect parser_markers (@)
(@) parserHF_conf_history parser_fixed_scheduler_extendable
parserHF_get_fixed_scheduler_DB parserHF_conf_fixed"
apply(simp add: BF_BraSBRest_DetHDB_LaOp_axioms_def)
apply(clarsimp)
apply(rename_tac M)(*strict*)
apply(subgoal_tac "parserHF.is_forward_deterministicHist_SB M")
apply(rename_tac M)(*strict*)
apply (metis parserHF_inst_AX_BF_BraSBRest_DetHSB_LaOp)
apply(rename_tac M)(*strict*)
apply(thin_tac "nonblockingness_language (parserHF.unmarked_language M) (parserHF.marked_language M)")
apply(rename_tac M)(*strict*)
apply (metis parserHF.is_forward_deterministic_correspond_DB_SB)
done
lemma parserHF_inst_BF_BraDBRest_DetHSB_LaOp_axioms: "
BF_BraDBRest_DetHSB_LaOp_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect parser_markers (@)
(@) parserHF_conf_history parser_fixed_scheduler_extendable
parserHF_conf_fixed parserHF_get_fixed_scheduler_DB"
apply(simp add: BF_BraDBRest_DetHSB_LaOp_axioms_def)
apply(clarsimp)
apply(rename_tac M)(*strict*)
apply(rule_tac
t="ATS_SchedF_DB.Nonblockingness_branching_restricted_DB
parserHF_configurations parserHF_initial_configurations
parser_step_labels parserHF_step_relation
parserHF_marking_condition parser_fixed_scheduler_extendable
parserHF_get_fixed_scheduler_DB M"
and s="parserHF.Nonblockingness_branching_restricted M"
in subst)
apply(rename_tac M)(*strict*)
apply(rule parserHF.Nonblockingness_branching_SB_DB_restricted)
apply(force)
apply(rename_tac M)(*strict*)
apply(subgoal_tac "BF_BraSBRest_DetHDB_LaOp_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect parser_markers (@)
(@) parserHF_conf_history parser_fixed_scheduler_extendable
parserHF_get_fixed_scheduler_DB parserHF_conf_fixed")
apply(rename_tac M)(*strict*)
apply(simp add: BF_BraSBRest_DetHDB_LaOp_axioms_def)
apply(erule_tac
x="M"
in allE)
apply(erule impE)
apply(rename_tac M)(*strict*)
apply(force)
apply(rename_tac M)(*strict*)
apply(erule impE)
apply(rename_tac M)(*strict*)
apply (metis parserHF.is_forward_deterministic_correspond_DB_SB)
apply(rename_tac M)(*strict*)
apply(erule impE)
apply(rename_tac M)(*strict*)
apply(force)
apply(rename_tac M)(*strict*)
apply(force)
apply(rename_tac M)(*strict*)
apply(rule parserHF_inst_BF_BraSBRest_DetHDB_LaOp_axioms)
done
lemma parserHF_inst_BF_BraDBRest_DetHDB_LaOp_axioms: "
BF_BraDBRest_DetHDB_LaOp_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect parser_markers (@)
(@) parserHF_conf_history parser_fixed_scheduler_extendable
parserHF_get_fixed_scheduler_DB"
apply(simp add: BF_BraDBRest_DetHDB_LaOp_axioms_def)
apply(clarsimp)
apply(rename_tac M)(*strict*)
apply(subgoal_tac "BF_BraDBRest_DetHSB_LaOp_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect parser_markers (@)
(@) parserHF_conf_history parser_fixed_scheduler_extendable
parserHF_conf_fixed parserHF_get_fixed_scheduler_DB")
apply(rename_tac M)(*strict*)
apply(simp add: BF_BraDBRest_DetHSB_LaOp_axioms_def)
apply(erule_tac
x="M"
in allE)
apply(erule impE)
apply(rename_tac M)(*strict*)
apply(force)
apply(rename_tac M)(*strict*)
apply(erule impE)
apply(rename_tac M)(*strict*)
apply (metis parserHF.is_forward_deterministic_correspond_DB_SB)
apply(rename_tac M)(*strict*)
apply(erule impE)
apply(rename_tac M)(*strict*)
apply(force)
apply(rename_tac M)(*strict*)
apply(force)
apply(rename_tac M)(*strict*)
apply(rule parserHF_inst_BF_BraDBRest_DetHSB_LaOp_axioms)
done
lemma parserHF_inst_BF_Bra_OpLa_axioms: "
BF_Bra_OpLa_axioms valid_parser parserHF_configurations
parserHF_initial_configurations parser_step_labels
parserHF_step_relation parserHF_marking_condition
parserHF_marked_effect parserHF_unmarked_effect"
apply(simp add: BF_Bra_OpLa_axioms_def)
apply(clarsimp)
apply(rename_tac M)(*strict*)
apply(simp add: nonblockingness_language_def)
apply(clarsimp)
apply(rename_tac M xa)(*strict*)
apply(simp add: prefix_closure_def prefix_def)
apply(simp add: parserHF.unmarked_language_def)
apply(clarsimp)
apply(rename_tac M xa d)(*strict*)
apply(simp add: parserHF.Nonblockingness_branching_def)
apply(simp add: parserHF_unmarked_effect_def)
apply(clarsimp)
apply(rename_tac M d i e c)(*strict*)
apply(erule_tac
x="derivation_take d i"
in allE)
apply(erule impE)
apply(rename_tac M d i e c)(*strict*)
apply(rename_tac M d i e c)(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation_initial)
apply(force)
apply(rename_tac M d i e c)(*strict*)
apply(erule_tac
x="i"
in allE)
apply(erule impE)
apply(rename_tac M d i e c)(*strict*)
apply(rule maximum_of_domain_derivation_take)
apply(force)
apply(rename_tac M d i e c)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x)(*strict*)
apply(simp add: parserHF_marking_condition_def)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule_tac
x="parserHF_conf_history ca"
in exI)
apply(rule conjI)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: parserHF.marked_language_def)
apply(rule_tac
x="derivation_append (derivation_take d i) dc i"
in exI)
apply(rule context_conjI)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.derivation_append_preserves_derivation_initial)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation_initial)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.derivation_append_preserves_derivation)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: derivation_take_def)
apply(simp add: derivation_append_fit_def)
apply(case_tac "dc 0")
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca a)(*strict*)
apply(clarsimp)
apply(case_tac a)
apply(rename_tac M d i e c dc x ia ea ca a option b)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca option b)(*strict*)
apply(case_tac option)
apply(rename_tac M d i e c dc x ia ea ca option b)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca option b a)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule conjI)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: parserHF_marked_effect_def)
apply(rule_tac
x="ia"
in exI)
apply(rule_tac
x="ea"
in exI)
apply(rule_tac
x="ca"
in exI)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(simp add: parserHF_marking_condition_def)
apply(rule_tac
x="ia"
in exI)
apply(rule_tac
x="ea"
in exI)
apply(rule_tac
x="ca"
in exI)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(case_tac "ia<i")
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule_tac
x="[]"
in exI)
apply(clarsimp)
apply(erule_tac
x="i"
in allE)
apply(erule_tac
x="e"
in allE)
apply(erule_tac
x="c"
in allE)
apply(clarsimp)
apply(simp add: derivation_append_def derivation_take_def)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(subgoal_tac "\<exists>h\<in> parser_markers M. parserHF_conf_history SSc' = parserHF_conf_history SSc @ h" for SSc' SSc)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
prefer 2
apply(rule_tac
d="derivation_append (derivation_take d i) dc i"
and n="i"
and m="ia-i"
in parserHF.steps_extend_history_derivation)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: parserHF.derivation_initial_def)
apply(rule parserHF.derivation_append_preserves_derivation)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.derivation_take_preserves_derivation)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: derivation_take_def derivation_append_fit_def)
apply(case_tac "dc 0")
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca a)(*strict*)
apply(clarsimp)
apply(case_tac a)
apply(rename_tac M d i e c dc x ia ea ca a option b)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca option b)(*strict*)
apply(case_tac option)
apply(rename_tac M d i e c dc x ia ea ca option b)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca option b a)(*strict*)
apply(clarsimp)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
prefer 2
apply(simp add: get_configuration_def derivation_append_def derivation_take_def)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.belongs_configurations)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(rule parserHF.derivation_initial_belongs)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(simp add: get_configuration_def derivation_append_def derivation_take_def)
apply(rename_tac M d i e c dc x ia ea ca)(*strict*)
apply(force)
done
interpretation "parserHF" : loc_autHF_10
(* TSstructure *)
"valid_parser"
(* configurations *)
"parserHF_configurations"
(* initial_configurations *)
"parserHF_initial_configurations"
(* step_labels *)
"parser_step_labels"
(* step_relation *)
"parserHF_step_relation"
(* effects *)
"parser_markers"
(* marking_condition *)
"parserHF_marking_condition"
(* marked_effect *)
"parserHF_marked_effect"
(* unmarked_effect *)
"parserHF_unmarked_effect"
(* destinations *)
"parser_destinations"
(* get_destinations *)
"parserHF_get_destinations"
(* histories *)
"parser_markers"
(* history_fragments *)
"parser_markers"
(* empty_history *)
"parser_empty_history"
(* empty_history_fragment *)
"parser_empty_history_fragment"
(* set_history *)
"parserHF_set_history"
(* extend_history *)
"append"
(* join_history_fragments *)
"append"
(* get_history *)
"parserHF_conf_history"
(* fixed_schedulers *)
"parser_fixed_schedulers"
(* empty_fixed_scheduler *)
"parser_empty_fixed_scheduler"
(* fixed_scheduler_extendable *)
"parser_fixed_scheduler_extendable"
(* get_fixed_scheduler *)
"parserHF_conf_fixed"
(* get_fixed_scheduler_DB *)
"parserHF_get_fixed_scheduler_DB"
apply(simp add: LOCALE_DEFS parser_interpretations)
apply(simp add: parserHF_inst_AX_initial_configuration_belongs parserHF_inst_AX_step_relation_preserves_belongs )
apply(simp add: parserHF_inst_ATS_axioms parserHF_inst_ATS_History_axioms parserHF_inst_ATS_SchedF_SB_axioms parserHF_inst_ATS_SchedF_DB_axioms parserHF_inst_ATS_SchedF_SDB_axioms parserHF_inst_ATS_determHIST_SB_axioms parserHF_inst_ATS_Language_by_Finite_Derivations_axioms parserHF_inst_ATS_HistoryCT_SB_axioms parserHF_inst_ATS_HistoryCT_DB_axioms parserHF_inst_BF_BraSBRest_DetHDB_LaOp_axioms parserHF_inst_BF_BraSBRest_DetHSB_LaOp_axioms parserHF_inst_BF_BraDBRest_DetHSB_LaOp_axioms parserHF_inst_BF_BraDBRest_DetHDB_LaOp_axioms parserHF_inst_BF_Bra_OpLa_axioms )
done
lemma parserHF_history_prefix_makes_prefix: "
w1 \<in> parser_markers G
\<Longrightarrow> ATS_History.history_fragment_prefixes parser_markers (@) G w1 \<subseteq> ATS_History.history_fragment_prefixes parser_markers (@) G w2
\<Longrightarrow> w1 \<sqsubseteq> w2"
apply(simp add: parserHF.history_fragment_prefixes_def)
apply(simp add: prefix_def)
apply(subgoal_tac "w1 \<in> {hf' \<in> parser_markers G. \<exists>hf''\<in> parser_markers G. hf' @ hf'' = w1}")
prefer 2
apply(clarsimp)
apply(simp add: parser_markers_def)
apply(subgoal_tac "w1 \<in> {hf' \<in> parser_markers G. \<exists>hf''\<in> parser_markers G. hf' @ hf'' = w2}")
prefer 2
apply(force)
apply(thin_tac "{hf' \<in> parser_markers G. \<exists>hf''\<in> parser_markers G. hf' @ hf'' = w1} \<subseteq> {hf' \<in> parser_markers G. \<exists>hf''\<in> parser_markers G. hf' @ hf'' = w2}")
apply(thin_tac "w1 \<in> {hf' \<in> parser_markers G. \<exists>hf''\<in> parser_markers G. hf' @ hf'' = w1}")
apply(force)
done
lemma parserHF_history_prefix_makes_prefix_mutual: "
w1 \<in> parser_markers G
\<Longrightarrow> w2 \<in> parser_markers G
\<Longrightarrow> ATS_History.history_fragment_prefixes parser_markers (@) G w1 \<subseteq> ATS_History.history_fragment_prefixes parser_markers (@) G w2 \<or> ATS_History.history_fragment_prefixes parser_markers (@) G w2 \<subseteq> ATS_History.history_fragment_prefixes parser_markers (@) G w1
\<Longrightarrow> prefix w1 w2 \<or> prefix w2 w1"
apply(erule disjE)
apply(rule disjI1)
apply(rule parserHF_history_prefix_makes_prefix)
apply(force)
apply(force)
apply(rule disjI2)
apply(rule parserHF_history_prefix_makes_prefix)
apply(force)
apply(force)
done
lemma parserHF_is_forward_target_deterministicHist_DB_long: "
valid_parser G
\<Longrightarrow> parserHF.is_forward_target_deterministicHist_DB_long G"
apply(simp add: parserHF.is_forward_target_deterministicHist_DB_long_def)
apply(clarsimp)
apply(rename_tac c d n c1 c2 e w1 w2)(*strict*)
apply(simp add: parserHF_step_relation_def)
apply(clarsimp)
done
lemmas parserHF_interpretations =
parser_interpretations
parserHF_inst_AX_initial_configuration_belongs
parserHF_inst_AX_step_relation_preserves_belongs
parserHF_inst_ATS_axioms
parserHF_inst_ATS_History_axioms
parserHF_inst_ATS_SchedF_SB_axioms
parserHF_inst_ATS_SchedF_DB_axioms
parserHF_inst_ATS_SchedF_SDB_axioms
parserHF_inst_ATS_determHIST_SB_axioms
parserHF_inst_ATS_Language_by_Finite_Derivations_axioms
parserHF_inst_ATS_HistoryCT_SB_axioms
parserHF_inst_ATS_HistoryCT_DB_axioms
parserHF_inst_BF_BraSBRest_DetHDB_LaOp_axioms
parserHF_inst_BF_BraSBRest_DetHSB_LaOp_axioms
parserHF_inst_BF_BraDBRest_DetHSB_LaOp_axioms
parserHF_inst_BF_BraDBRest_DetHDB_LaOp_axioms
parserHF_inst_BF_Bra_OpLa_axioms
end
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall O E Eprime A B C Bprime Aprime : Universe, ((wd_ A O /\ (wd_ B O /\ (wd_ C O /\ (wd_ O E /\ (wd_ O Eprime /\ (wd_ E Eprime /\ (wd_ A Aprime /\ (col_ O E A /\ (col_ O E B /\ (col_ O E C /\ (col_ O Eprime Bprime /\ col_ O Eprime Aprime))))))))))) -> col_ O B A)).
Proof.
time tac.
Qed.
End FOFProblem.
|
Joel posted this at the end of his response to my “Argument from Evolution” article.
There it was slightly off topic and I thought that this warranted a full response in a separate space.
First, nobody (sensible) teaches morality from the ritual law of the Old Testament. You cannot impugn the moral law by citing examples of the ritual law about what is ceremonially clean or unclean, and that has no moral dimension to it. Second, nobody teaches morality from the false characterisation of God you give. God is an omni-benevolent, morally perfect being according to scripture and you’d do well to discard the false picture of the Christian view of God you have.
Here is my response, point-by-point, to the Humanist ethic communicated in the Amsterdam Declaration 2002. There is much here that the Christian can find in common with the humanist, but I think the question is which ethic (Christian or Humanist) provides a better account for our shared understanding of moral duties, values and accountabilities? Also, insofar as Humanism implies naturalism, humanism is deeply incoherent as I shall show.
The ‘long tradition’ that stretches all the way back to 1952, the first World Humanist Congress. Facts are however, the ‘many great thinkers and artists’ that humanism can legitimately claim drown in the influence made by Christianity. In thought and art it is undeniably how Christianity far out-weighs any paltry offering humanism makes. If humanism can sustain the practice of science is a discussion for another time, but the thought that Humanism gave rise to science itself is laughable. Only Christianity provides an epistemological foundation for scientists seeking to make sense of the universe, and almost every major field of science was founded by a Christian, working specifically from a Christian worldview.
Just consider these few scientists who were Christians; Isaac Newton, the father of modern physics; William Turner, the father of English botany; Johannes Kepler, the planetary laws of motion; Galileo Galilei, the father of modern astronomy; Rene Descartes, philosopher and mathematician; Blaise Pascal, physicist and mathematician who defended the scientific method; Robert Boyle, the first modern chemist; Louis Pasteur, inventor of the pasteurization method; Gregor Mendel, the father of modern genetics; Lord Kelvin, important in Thermodynamics; Max Planck, the founder of Quantum mechanics, and the list goes on.
Humanism provides no metaethical foundation for it’s ethical system. Why is a metaethical foundation necessary? One is apt to ask why the human has worth, dignity and autonomy. To finally come to rest the foundations of a morality on the worth of a human is ad hoc. Especially after the humanist’s naturalistic view of evolution makes men into mere animals. Evolution is the great leveller. What’s so special about humans on naturalism? We’re just fortunate sacks of molecules in motion that have survived against the odds by tooth and claw.
On Christian theism humans are created by God in His image. This gives us inalienable rights, guarantees the right of personal freedom of choice, as well as deep significance and meaning to life. Moreover, God expresses our worth in His eyes when he showed his love by giving His only son as a sacrifice to pay our sin-debt and conquer death on our behalf. He spared not his only son for us.
You see how Christianity gives a substantiated reason for its assertions of worth and dignity, but how humanism cannot?
This self-affirmation is astonishingly presumptuous. There is no argument here: only assertions and declarations of belief, more akin to blind faith than science and reason.
Still the Christian can agree that human thought and action are for solving the worlds problems and that the application of science and free inquiry should promote human welfare. We can agree to use science creatively and not destructively, but we’re not likely to condemn the scientist who researches dynamite to pull down an old building safely, or to minimise collateral damage during justified warfare.
On the Christian view God gave humans a mind to think and engage with the world as it is. On naturalism the mind is a physiological response to stimuli, socio-cultuarl pressures and evolutionary development. It is therefore tuned for survival and not for the apprehension of truth or rationality. It is hard to see why humanism is rational given naturalism.
There are few questions that must be asked, like who determines the ‘human values’ that temper the application of science and technology? Is it Hitler, Hefner, the Humanist or the Holy Spirit? Is it science itself, and if so doesn’t it work out that science proposes the means and the ends? If so, was Hitler rational at the time to propose and carry out his ‘Final Solution?’ After all, that was in accord by the evolutionary science being propounded in his day; was supposedly for the betterment of human welfare; and was then the human value system in vogue. At Nuremberg it was quickly realised to condemn these Nazi war criminals there needed to be a standard that stood above human and societal values, and the only values they could find to do that were rooted in God.
The need for such a transcendent absolute, or law above the law, can be illustrated by what happened at the Nuremberg Trials of World War II criminals. Those accused appealed to the fact that they were only obeying the laws of their own culture, and that they were not legally responsible to any other. Faced with this argument, Robert H. Jackson, Chief Counsel for the United States, appealed to permanent values and moral standards that transcended life-styles, particular societies, and individual nations. While he was not necessarily appealing to biblical norms in this trial, the situation illustrates the need for a transcendent basis for moral values. For example, God’s commandment against murder was not just for the Jew. It transcends culture, and it transcends generations. Murder is as wrong today as it was in the Old Testament.
Christian ethics escape this problem of cultural relativity because it is based upon the nature of God. Good is what God wills in accordance with His nature (see Mark 10:18). God provides the moral patterns which apply to all human behavior.
Also, who is it that diagnoses the ‘world’s problems?’ Is it the humanist? The smartest? The most popular? the strongest? The bible says that the major problem with this world is sin, and there is little hope for man’s efforts to rectify that problem. Sin (defined often as failure to meet God’s perfect standard, or imperfection, or breaking God’s law) is symptomatic system-wide, and the evidence for that is clear. Only a divine solution and intervention can save us from that ultimate problem.
On Christianity the solutions to the worlds problems lie in human thought and action as well as divine intervention. God also determines to use mostly use people as his agents on earth. Woe to the humanist if God exists and he/she rejects divine intervention.
Human rights are declared to be universal rights. That is they stand above all nation’s laws for all times and all places for all people. This statement is like eating white-froth if you consider the next fundamental’s (4) claim to be undogmatic and imposing no creed upon its adherents. Christianity however provides something substantive for the table. Universal human rights were developed by the founding fathers of America from their understanding of the scriptures. In Christopher Hitchen’s words Thomas Jefferson was a deist with atheistic tendencies. However, when it came to finding a ground for unalienable Rights, he pointed to the sky and said “We hold these truths to be self-evident, that all men are created equal, that they are endowed by their Creator with certain unalienable Rights, that among these are Life, Liberty and the pursuit of Happiness.”2 The abolition of slavery was a practical out-working of this same understanding from scripture: that all men are created equal.) The Bible even gives justification for democracy also, but at the moment I’m not prepared to support that contention.) Both groups of people were smart enough to recognise that if human rights are given to a human by another human, they can be taken away again. If human rights are given by God, then no man can take them away. They become inalienable and truly universal.
Humanism lacks a model of what it means to be the fullest development of a human being. On Christianity it is clear the model is Jesus. On Humanism it can only be subjective and relative. What if the fullest possible development of the human being is Hitler? You might say that he did not support democracy, but then you’d be forgetting that Hitler was the legitimate democratically elected official of that nation. You might say that Hitler was wrong because the humanist ethic is based upon understanding and support for others, but then you’d be forgetting that Hitler deeply cared for Germany and to carry out his atrocious acts all that he needed to do was create a culture that dehumanised Jews, Blacks, Homosexuals, the handy-capped, etc.
How do you decry the wicked man who says he is only becoming ‘the fullest possible development of what it means to be human,’ if he has radically changed what it means to be human. Humanism lacks a definition of what it means to be human, but Christianity has a ready anthropological definition grounded in its own basic theology.
If a person is responsible to society, then what happens when society tells you to do something that is objectively wrong, like slaughter Jews wholesale (Nazi Germany), or force husbands to watch as their pregnant wives are split open by sabres so their unborn children fall to the ground to be crushed underfoot (Saddam Hussein’s Iraq), or taking unwanted new-borns and dashing them on rocks (ancient Greeks). The list of examples is appalling in its length and brutality, but it is already clear that responsibility to society is an insufficient ethic to build a world on. There needs to be some transcendent standard above society and humanity. Christianity provides that by revealing a morally perfect transcendent God as the standard.
“Humanism is undogmatic, imposing no creed upon its adherants.” This is self-referentially incoherant. It is dogmatic in being undogmatic. It is thus really rich when it concludes that humanism is committed to education free from indoctrination. Even if it is possible to educate people free from indoctrination from operating within a worldview, this statement is as double-handed as it gets. Humanists are experts at indoctrination. You need only look at our current education system here in NZ. An example follows in the next section.
Humanism of course excludes itself from the dogmatic religious crowd, and seeks to fulfil the widespread demand for an alternative to dogmatic religion. It will do this by supplying people with another dogmatic religion (if not religion then ethical framework) and imposing it on others.
For example, take the belief that ‘morality is an intrinsic part of human nature.’ This means that humans are essentially and basically good. This is taught all throughout the education system and is one tenant of humanist indoctrination. Is it true? I leave it for you, but I think the Bible gives a far more realistic account to the state of the human heart; see Jeremiah 17:9 and Romans 3:9-19.
Christianity recognises that reliable knowledge of the world and ourselves arises also through revelation from God. If reliable knowledge arises from observation, evaluation and revision then its not really reliable is it?
The purpose underlying most (if not all) creative expressions is communication. Art is a vehicle for a message. When you start to value the form, and not the message that lies behind the form, then art becomes mere mindless entertainment; a distraction to personal development rather than an aid. Is fulfilment reduced on humanism to amusement? Take from art its purpose and society will transform into a mindless mass that is far too easily manipulated.
Christianity affirms the value of art and artistic expression by imbuing the artist with purpose, answering the deep existential questions of life; by affirming the artist is created in the image of God and is therefore a creative agent; by supplying the artist with a message, inspiration and talent; and by infusing the world waiting to be captured and mirrored by great works of art, with a sense of the sublime. Naturalism on the other-hand finds beauty an awkward notion. It is difficult to see why an apes brain would appreciate the aesthetic pleasure from a morning sunrise, the star-filled sky and or the frozen waterfall.
If human existence transcends the death of the body, then obviously humanism is not for everyone everywhere. Humanism become bankrupt if this life is not all there is or if there is a God. Besides this, based upon the refutation of points 1 through 6 it is not obvious humanism does supply an ethical and rational means of addressing the challenges of our times.
|
function rs = norm1(s, low, upp)
%tstoolbox/@signal/norm1
% Syntax:
% * rs=norm1(s) => low=0 , upp=1
% * rs=norm1(s, low) => upp=1
% * rs=norm1(s, low, upp)
%
% Scale and move signal values to be within [low,upp].
%
% Copyright 1997-2001 DPI Goettingen, License http://www.physik3.gwdg.de/tstool/gpl.txt
narginchk(1,3);
if nargin < 2
low = 0;
end
if nargin < 3
upp = 1;
end
c = norm1(s.core, low, upp);
rs = signal(c,s);
rs = addhistory(rs, ['(norm1) Transformed signal to be within [' ...
num2str(low) ',' num2str(upp) ']']);
rs = addcommandlines(rs, 's = norm1(s');
|
Sarnia has one remaining museum within its city limits : " Stones ' N Bones " , which houses over 6 @,@ 000 exhibits . The collection includes rocks , artifacts , fossils , and bones from all over the world . A previous museum , the Discovery House Museum , has been converted into to a hospice . This historic house , built between 1869 and 1875 , is recognised as a testament to Victorian Era construction .
|
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝³ : NormedField α✝
inst✝² : SeminormedAddCommGroup β
α : Type u_5
inst✝¹ : NormedField α
inst✝ : NormedSpace α β
n : ℤ
x : β
⊢ ‖n • x‖ = ‖↑n‖ * ‖x‖
[PROOFSTEP]
rw [← norm_smul, ← Int.smul_one_eq_coe, smul_assoc, one_smul]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : NormedField α
inst✝¹ : SeminormedAddCommGroup β
inst✝ : NormedSpace ℝ β
x : β
⊢ ‖x‖⁻¹ • x ∈ closedBall 0 1
[PROOFSTEP]
simp only [mem_closedBall_zero_iff, norm_smul, norm_inv, norm_norm, ← _root_.div_eq_inv_mul, div_self_le_one]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : NormedField α
inst✝¹ : SeminormedAddCommGroup β
inst✝ : NormedSpace ℝ β
t : ℝ
ht : 0 ≤ t
x : β
⊢ ‖t • x‖ = t * ‖x‖
[PROOFSTEP]
rw [norm_smul, Real.norm_eq_abs, abs_of_nonneg ht]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁵ : NormedField α
inst✝⁴ : SeminormedAddCommGroup β
E : Type u_5
inst✝³ : SeminormedAddCommGroup E
inst✝² : NormedSpace α E
F : Type u_6
inst✝¹ : SeminormedAddCommGroup F
inst✝ : NormedSpace α F
c : α
x : E
ε : ℝ
h : 0 < ε
⊢ ‖c • (id x - x)‖ = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
⊢ closure (ball x r) = closedBall x r
[PROOFSTEP]
refine' Subset.antisymm closure_ball_subset_closedBall fun y hy => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
⊢ y ∈ closure (ball x r)
[PROOFSTEP]
have : ContinuousWithinAt (fun c : ℝ => c • (y - x) + x) (Ico 0 1) 1 :=
((continuous_id.smul continuous_const).add continuous_const).continuousWithinAt
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
⊢ y ∈ closure (ball x r)
[PROOFSTEP]
convert this.mem_closure _ _
[GOAL]
case h.e'_4
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
⊢ y = 1 • (y - x) + x
[PROOFSTEP]
rw [one_smul, sub_add_cancel]
[GOAL]
case convert_2
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
⊢ 1 ∈ closure (Ico 0 1)
[PROOFSTEP]
simp [closure_Ico zero_ne_one, zero_le_one]
[GOAL]
case convert_3
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
⊢ MapsTo (fun c => c • (y - x) + x) (Ico 0 1) (ball x r)
[PROOFSTEP]
rintro c ⟨hc0, hc1⟩
[GOAL]
case convert_3.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
⊢ (fun c => c • (y - x) + x) c ∈ ball x r
[PROOFSTEP]
rw [mem_ball, dist_eq_norm, add_sub_cancel, norm_smul, Real.norm_eq_abs, abs_of_nonneg hc0, mul_comm, ← mul_one r]
[GOAL]
case convert_3.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : y ∈ closedBall x r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
⊢ ‖y - x‖ * c < r * 1
[PROOFSTEP]
rw [mem_closedBall, dist_eq_norm] at hy
[GOAL]
case convert_3.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
y : E
hy : ‖y - x‖ ≤ r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
⊢ ‖y - x‖ * c < r * 1
[PROOFSTEP]
replace hr : 0 < r := ((norm_nonneg _).trans hy).lt_of_ne hr.symm
[GOAL]
case convert_3.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
y : E
hy : ‖y - x‖ ≤ r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
hr : 0 < r
⊢ ‖y - x‖ * c < r * 1
[PROOFSTEP]
apply mul_lt_mul'
[GOAL]
case convert_3.intro.hac
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
y : E
hy : ‖y - x‖ ≤ r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
hr : 0 < r
⊢ ‖y - x‖ ≤ r
[PROOFSTEP]
assumption
[GOAL]
case convert_3.intro.hbd
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
y : E
hy : ‖y - x‖ ≤ r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
hr : 0 < r
⊢ c < 1
[PROOFSTEP]
assumption
[GOAL]
case convert_3.intro.hb
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
y : E
hy : ‖y - x‖ ≤ r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
hr : 0 < r
⊢ 0 ≤ c
[PROOFSTEP]
assumption
[GOAL]
case convert_3.intro.hc
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
y : E
hy : ‖y - x‖ ≤ r
this : ContinuousWithinAt (fun c => c • (y - x) + x) (Ico 0 1) 1
c : ℝ
hc0 : 0 ≤ c
hc1 : c < 1
hr : 0 < r
⊢ 0 < r
[PROOFSTEP]
assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
⊢ frontier (ball x r) = sphere x r
[PROOFSTEP]
rw [frontier, closure_ball x hr, isOpen_ball.interior_eq, closedBall_diff_ball]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
⊢ interior (closedBall x r) = ball x r
[PROOFSTEP]
cases' hr.lt_or_lt with hr hr
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr✝ : r ≠ 0
hr : r < 0
⊢ interior (closedBall x r) = ball x r
[PROOFSTEP]
rw [closedBall_eq_empty.2 hr, ball_eq_empty.2 hr.le, interior_empty]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr✝ : r ≠ 0
hr : 0 < r
⊢ interior (closedBall x r) = ball x r
[PROOFSTEP]
refine' Subset.antisymm _ ball_subset_interior_closedBall
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr✝ : r ≠ 0
hr : 0 < r
⊢ interior (closedBall x r) ⊆ ball x r
[PROOFSTEP]
intro y hy
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr✝ : r ≠ 0
hr : 0 < r
y : E
hy : y ∈ interior (closedBall x r)
⊢ y ∈ ball x r
[PROOFSTEP]
rcases(mem_closedBall.1 <| interior_subset hy).lt_or_eq with (hr | rfl)
[GOAL]
case inr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr✝¹ : r ≠ 0
hr✝ : 0 < r
y : E
hy : y ∈ interior (closedBall x r)
hr : dist y x < r
⊢ y ∈ ball x r
[PROOFSTEP]
exact hr
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
⊢ y ∈ ball x (dist y x)
[PROOFSTEP]
set f : ℝ → E := fun c : ℝ => c • (y - x) + x
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
⊢ y ∈ ball x (dist y x)
[PROOFSTEP]
suffices f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
by
have hfc : Continuous f := (continuous_id.smul continuous_const).add continuous_const
have hf1 : (1 : ℝ) ∈ f ⁻¹' interior (closedBall x <| dist y x) := by simpa
have h1 : (1 : ℝ) ∈ interior (Icc (-1 : ℝ) 1) :=
interior_mono this (preimage_interior_subset_interior_preimage hfc hf1)
contrapose h1
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
this : f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
⊢ y ∈ ball x (dist y x)
[PROOFSTEP]
have hfc : Continuous f := (continuous_id.smul continuous_const).add continuous_const
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
this : f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
hfc : Continuous f
⊢ y ∈ ball x (dist y x)
[PROOFSTEP]
have hf1 : (1 : ℝ) ∈ f ⁻¹' interior (closedBall x <| dist y x) := by simpa
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
this : f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
hfc : Continuous f
⊢ 1 ∈ f ⁻¹' interior (closedBall x (dist y x))
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
this : f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
hfc : Continuous f
hf1 : 1 ∈ f ⁻¹' interior (closedBall x (dist y x))
⊢ y ∈ ball x (dist y x)
[PROOFSTEP]
have h1 : (1 : ℝ) ∈ interior (Icc (-1 : ℝ) 1) := interior_mono this (preimage_interior_subset_interior_preimage hfc hf1)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
this : f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
hfc : Continuous f
hf1 : 1 ∈ f ⁻¹' interior (closedBall x (dist y x))
h1 : 1 ∈ interior (Icc (-1) 1)
⊢ y ∈ ball x (dist y x)
[PROOFSTEP]
contrapose h1
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
this : f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
hfc : Continuous f
hf1 : 1 ∈ f ⁻¹' interior (closedBall x (dist y x))
h1 : ¬y ∈ ball x (dist y x)
⊢ ¬1 ∈ interior (Icc (-1) 1)
[PROOFSTEP]
simp
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
⊢ f ⁻¹' closedBall x (dist y x) ⊆ Icc (-1) 1
[PROOFSTEP]
intro c hc
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
c : ℝ
hc : c ∈ f ⁻¹' closedBall x (dist y x)
⊢ c ∈ Icc (-1) 1
[PROOFSTEP]
rw [mem_Icc, ← abs_le, ← Real.norm_eq_abs, ← mul_le_mul_right hr]
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x y : E
hr✝ : dist y x ≠ 0
hr : 0 < dist y x
hy : y ∈ interior (closedBall x (dist y x))
f : ℝ → E := fun c => c • (y - x) + x
c : ℝ
hc : c ∈ f ⁻¹' closedBall x (dist y x)
⊢ ‖c‖ * dist y x ≤ 1 * dist y x
[PROOFSTEP]
simpa [dist_eq_norm, norm_smul] using hc
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
⊢ frontier (closedBall x r) = sphere x r
[PROOFSTEP]
rw [frontier, closure_closedBall, interior_closedBall x hr, closedBall_diff_ball]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
⊢ interior (sphere x r) = ∅
[PROOFSTEP]
rw [← frontier_closedBall x hr, interior_frontier isClosed_ball]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
inst✝⁵ : SeminormedAddCommGroup β
E : Type u_5
inst✝⁴ : SeminormedAddCommGroup E
inst✝³ : NormedSpace α E
F : Type u_6
inst✝² : SeminormedAddCommGroup F
inst✝¹ : NormedSpace α F
inst✝ : NormedSpace ℝ E
x : E
r : ℝ
hr : r ≠ 0
⊢ frontier (sphere x r) = sphere x r
[PROOFSTEP]
rw [isClosed_sphere.frontier_eq, interior_sphere x hr, diff_empty]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
e : E
⊢ DiscreteTopology { x // x ∈ AddSubgroup.zmultiples e }
[PROOFSTEP]
rcases eq_or_ne e 0 with (rfl | he)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
⊢ DiscreteTopology { x // x ∈ AddSubgroup.zmultiples 0 }
[PROOFSTEP]
rw [AddSubgroup.zmultiples_zero_eq_bot]
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
⊢ DiscreteTopology { x // x ∈ ⊥ }
[PROOFSTEP]
refine Subsingleton.discreteTopology (α := ↑(⊥ : Subspace ℚ E))
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
e : E
he : e ≠ 0
⊢ DiscreteTopology { x // x ∈ AddSubgroup.zmultiples e }
[PROOFSTEP]
rw [discreteTopology_iff_open_singleton_zero, isOpen_induced_iff]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
e : E
he : e ≠ 0
⊢ ∃ t, IsOpen t ∧ Subtype.val ⁻¹' t = {0}
[PROOFSTEP]
refine' ⟨Metric.ball 0 ‖e‖, Metric.isOpen_ball, _⟩
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
e : E
he : e ≠ 0
⊢ Subtype.val ⁻¹' ball 0 ‖e‖ = {0}
[PROOFSTEP]
ext ⟨x, hx⟩
[GOAL]
case inr.h.mk
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
e : E
he : e ≠ 0
x : E
hx : x ∈ AddSubgroup.zmultiples e
⊢ { val := x, property := hx } ∈ Subtype.val ⁻¹' ball 0 ‖e‖ ↔ { val := x, property := hx } ∈ {0}
[PROOFSTEP]
obtain ⟨k, rfl⟩ := AddSubgroup.mem_zmultiples_iff.mp hx
[GOAL]
case inr.h.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁷ : NormedField α
inst✝⁶ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁵ : SeminormedAddCommGroup E✝
inst✝⁴ : NormedSpace α E✝
F : Type u_6
inst✝³ : SeminormedAddCommGroup F
inst✝² : NormedSpace α F
E : Type u_7
inst✝¹ : NormedAddCommGroup E
inst✝ : NormedSpace ℚ E
e : E
he : e ≠ 0
k : ℤ
hx : k • e ∈ AddSubgroup.zmultiples e
⊢ { val := k • e, property := hx } ∈ Subtype.val ⁻¹' ball 0 ‖e‖ ↔ { val := k • e, property := hx } ∈ {0}
[PROOFSTEP]
rw [mem_preimage, mem_ball_zero_iff, AddSubgroup.coe_mk, mem_singleton_iff, Subtype.ext_iff, AddSubgroup.coe_mk,
AddSubgroup.coe_zero, norm_zsmul ℚ k e, Int.norm_cast_rat, Int.norm_eq_abs,
mul_lt_iff_lt_one_left (norm_pos_iff.mpr he), ← @Int.cast_one ℝ _, Int.cast_lt, Int.abs_lt_one_iff, smul_eq_zero,
or_iff_left he]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁵ : NormedField α
inst✝⁴ : SeminormedAddCommGroup β
E : Type u_5
inst✝³ : SeminormedAddCommGroup E
inst✝² : NormedSpace α E
F : Type u_6
inst✝¹ : SeminormedAddCommGroup F
inst✝ : NormedSpace α F
src✝¹ : SeminormedAddCommGroup (E × F) := seminormedAddCommGroup
src✝ : Module α (E × F) := instModule
s : α
x : E × F
⊢ ‖s • x‖ ≤ ‖s‖ * ‖x‖
[PROOFSTEP]
simp only [norm_smul, Prod.norm_def, Prod.smul_snd, Prod.smul_fst, mul_max_of_nonneg, norm_nonneg, le_rfl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁸ : NormedField α
inst✝⁷ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁶ : SeminormedAddCommGroup E✝
inst✝⁵ : NormedSpace α E✝
F : Type u_6
inst✝⁴ : SeminormedAddCommGroup F
inst✝³ : NormedSpace α F
E : ι → Type u_7
inst✝² : Fintype ι
inst✝¹ : (i : ι) → SeminormedAddCommGroup (E i)
inst✝ : (i : ι) → NormedSpace α (E i)
a : α
f : (i : ι) → E i
⊢ ‖a • f‖ ≤ ‖a‖ * ‖f‖
[PROOFSTEP]
simp_rw [← coe_nnnorm, ← NNReal.coe_mul, NNReal.coe_le_coe, Pi.nnnorm_def, NNReal.mul_finset_sup]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁸ : NormedField α
inst✝⁷ : SeminormedAddCommGroup β
E✝ : Type u_5
inst✝⁶ : SeminormedAddCommGroup E✝
inst✝⁵ : NormedSpace α E✝
F : Type u_6
inst✝⁴ : SeminormedAddCommGroup F
inst✝³ : NormedSpace α F
E : ι → Type u_7
inst✝² : Fintype ι
inst✝¹ : (i : ι) → SeminormedAddCommGroup (E i)
inst✝ : (i : ι) → NormedSpace α (E i)
a : α
f : (i : ι) → E i
⊢ (Finset.sup Finset.univ fun b => ‖(a • f) b‖₊) ≤ Finset.sup Finset.univ fun a_1 => ‖a‖₊ * ‖f a_1‖₊
[PROOFSTEP]
exact Finset.sup_mono_fun fun _ _ => norm_smul_le a _
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : AddCommGroup β
inst✝³ : Module α β
inst✝² : SeminormedAddCommGroup γ
inst✝¹ : NormedSpace α γ
inst✝ : LinearMapClass F α β γ
f : F
⊢ NormedSpace α β
[PROOFSTEP]
refine @NormedSpace.mk (α := α) (β := β) _ ?_ ?_ ?_
[GOAL]
case refine_1
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : AddCommGroup β
inst✝³ : Module α β
inst✝² : SeminormedAddCommGroup γ
inst✝¹ : NormedSpace α γ
inst✝ : LinearMapClass F α β γ
f : F
⊢ Module α β
[PROOFSTEP]
infer_instance
[GOAL]
case refine_2
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : AddCommGroup β
inst✝³ : Module α β
inst✝² : SeminormedAddCommGroup γ
inst✝¹ : NormedSpace α γ
inst✝ : LinearMapClass F α β γ
f : F
⊢ ∀ (a : α) (b : β), ‖a • b‖ ≤ ‖a‖ * ‖b‖
[PROOFSTEP]
intro a b
[GOAL]
case refine_2
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : AddCommGroup β
inst✝³ : Module α β
inst✝² : SeminormedAddCommGroup γ
inst✝¹ : NormedSpace α γ
inst✝ : LinearMapClass F α β γ
f : F
a : α
b : β
⊢ ‖a • b‖ ≤ ‖a‖ * ‖b‖
[PROOFSTEP]
change ‖(⇑f) (a • b)‖ ≤ ‖a‖ * ‖(⇑f) b‖
[GOAL]
case refine_2
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : AddCommGroup β
inst✝³ : Module α β
inst✝² : SeminormedAddCommGroup γ
inst✝¹ : NormedSpace α γ
inst✝ : LinearMapClass F α β γ
f : F
a : α
b : β
⊢ ‖↑f (a • b)‖ ≤ ‖a‖ * ‖↑f b‖
[PROOFSTEP]
exact (map_smul f a b).symm ▸ norm_smul_le a (f b)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
c : ℝ
hc : 0 ≤ c
⊢ ∃ x, ‖x‖ = c
[PROOFSTEP]
rcases exists_ne (0 : E) with ⟨x, hx⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
c : ℝ
hc : 0 ≤ c
x : E
hx : x ≠ 0
⊢ ∃ x, ‖x‖ = c
[PROOFSTEP]
rw [← norm_ne_zero_iff] at hx
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
c : ℝ
hc : 0 ≤ c
x : E
hx : ‖x‖ ≠ 0
⊢ ∃ x, ‖x‖ = c
[PROOFSTEP]
use c • ‖x‖⁻¹ • x
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
c : ℝ
hc : 0 ≤ c
x : E
hx : ‖x‖ ≠ 0
⊢ ‖c • ‖x‖⁻¹ • x‖ = c
[PROOFSTEP]
simp [norm_smul, Real.norm_of_nonneg hc, abs_of_nonneg hc, inv_mul_cancel hx]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
x : E
r : ℝ
⊢ interior (closedBall x r) = ball x r
[PROOFSTEP]
rcases eq_or_ne r 0 with (rfl | hr)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
x : E
⊢ interior (closedBall x 0) = ball x 0
[PROOFSTEP]
rw [closedBall_zero, ball_zero, interior_singleton]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
x : E
r : ℝ
hr : r ≠ 0
⊢ interior (closedBall x r) = ball x r
[PROOFSTEP]
exact interior_closedBall x hr
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
x : E
r : ℝ
⊢ frontier (closedBall x r) = sphere x r
[PROOFSTEP]
rw [frontier, closure_closedBall, interior_closedBall' x r, closedBall_diff_ball]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
x : E
r : ℝ
⊢ interior (sphere x r) = ∅
[PROOFSTEP]
rw [← frontier_closedBall' x, interior_frontier isClosed_ball]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁶ : NormedField α
E : Type u_5
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedSpace α E
F : Type u_6
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace α F
inst✝¹ : NormedSpace ℝ E
inst✝ : Nontrivial E
x : E
r : ℝ
⊢ frontier (sphere x r) = sphere x r
[PROOFSTEP]
rw [isClosed_sphere.frontier_eq, interior_sphere' x, diff_empty]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
E : Type u_6
inst✝³ : NontriviallyNormedField 𝕜
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace 𝕜 E
inst✝ : Nontrivial E
c : ℝ
⊢ ∃ x, c < ‖x‖
[PROOFSTEP]
rcases exists_ne (0 : E) with ⟨x, hx⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
E : Type u_6
inst✝³ : NontriviallyNormedField 𝕜
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace 𝕜 E
inst✝ : Nontrivial E
c : ℝ
x : E
hx : x ≠ 0
⊢ ∃ x, c < ‖x‖
[PROOFSTEP]
rcases NormedField.exists_lt_norm 𝕜 (c / ‖x‖) with ⟨r, hr⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
E : Type u_6
inst✝³ : NontriviallyNormedField 𝕜
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace 𝕜 E
inst✝ : Nontrivial E
c : ℝ
x : E
hx : x ≠ 0
r : 𝕜
hr : c / ‖x‖ < ‖r‖
⊢ ∃ x, c < ‖x‖
[PROOFSTEP]
use r • x
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
E : Type u_6
inst✝³ : NontriviallyNormedField 𝕜
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace 𝕜 E
inst✝ : Nontrivial E
c : ℝ
x : E
hx : x ≠ 0
r : 𝕜
hr : c / ‖x‖ < ‖r‖
⊢ c < ‖r • x‖
[PROOFSTEP]
rwa [norm_smul, ← _root_.div_lt_iff]
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
E : Type u_6
inst✝³ : NontriviallyNormedField 𝕜
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace 𝕜 E
inst✝ : Nontrivial E
c : ℝ
x : E
hx : x ≠ 0
r : 𝕜
hr : c / ‖x‖ < ‖r‖
⊢ 0 < ‖x‖
[PROOFSTEP]
rwa [norm_pos_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜'✝ : Type u_6
inst✝⁴ : NormedField 𝕜
inst✝³ : SeminormedRing 𝕜'✝
inst✝² : NormedAlgebra 𝕜 𝕜'✝
𝕜' : Type ?u.294395
inst✝¹ : NormedRing 𝕜'
inst✝ : NormedAlgebra 𝕜 𝕜'
⊢ NormedSpace 𝕜 𝕜'
[PROOFSTEP]
infer_instance
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝² : NormedField 𝕜
inst✝¹ : SeminormedRing 𝕜'
inst✝ : NormedAlgebra 𝕜 𝕜'
x : 𝕜
⊢ ‖↑(algebraMap 𝕜 𝕜') x‖ = ‖x‖ * ‖1‖
[PROOFSTEP]
rw [Algebra.algebraMap_eq_smul_one]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝² : NormedField 𝕜
inst✝¹ : SeminormedRing 𝕜'
inst✝ : NormedAlgebra 𝕜 𝕜'
x : 𝕜
⊢ ‖x • 1‖ = ‖x‖ * ‖1‖
[PROOFSTEP]
exact norm_smul _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝³ : NormedField 𝕜
inst✝² : SeminormedRing 𝕜'
inst✝¹ : NormedAlgebra 𝕜 𝕜'
inst✝ : NormOneClass 𝕜'
x : 𝕜
⊢ ‖↑(algebraMap 𝕜 𝕜') x‖ = ‖x‖
[PROOFSTEP]
rw [norm_algebraMap, norm_one, mul_one]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝³ : NormedField 𝕜
inst✝² : SeminormedRing 𝕜'
inst✝¹ : NormedAlgebra 𝕜 𝕜'
inst✝ : NormOneClass 𝕜'
⊢ Isometry ↑(algebraMap 𝕜 𝕜')
[PROOFSTEP]
refine' Isometry.of_dist_eq fun x y => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝³ : NormedField 𝕜
inst✝² : SeminormedRing 𝕜'
inst✝¹ : NormedAlgebra 𝕜 𝕜'
inst✝ : NormOneClass 𝕜'
x y : 𝕜
⊢ dist (↑(algebraMap 𝕜 𝕜') x) (↑(algebraMap 𝕜 𝕜') y) = dist x y
[PROOFSTEP]
rw [dist_eq_norm, dist_eq_norm, ← RingHom.map_sub, norm_algebraMap']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜✝ : Type u_5
𝕜' : Type u_6
inst✝⁵ : NormedField 𝕜✝
inst✝⁴ : SeminormedRing 𝕜'
inst✝³ : NormedAlgebra 𝕜✝ 𝕜'
𝕜 : Type ?u.479633
inst✝² : NormedDivisionRing 𝕜
inst✝¹ : CharZero 𝕜
inst✝ : NormedAlgebra ℝ 𝕜
q : ℚ
x : 𝕜
⊢ ‖q • x‖ ≤ ‖q‖ * ‖x‖
[PROOFSTEP]
rw [← smul_one_smul ℝ q x, Rat.smul_one_eq_coe, norm_smul, Rat.norm_cast_real]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝² : NormedField 𝕜
inst✝¹ : SeminormedRing 𝕜'
inst✝ : NormedAlgebra 𝕜 𝕜'
q : 𝕜
x✝ : PUnit
⊢ ‖q • x✝‖ ≤ ‖q‖ * ‖x✝‖
[PROOFSTEP]
simp only [norm_eq_zero, mul_zero, le_refl]
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : Ring β
inst✝³ : Algebra α β
inst✝² : SeminormedRing γ
inst✝¹ : NormedAlgebra α γ
inst✝ : NonUnitalAlgHomClass F α β γ
f : F
⊢ NormedAlgebra α β
[PROOFSTEP]
refine @NormedAlgebra.mk (𝕜 := α) (𝕜' := β) _ ?_ ?_ ?_
[GOAL]
case refine_1
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : Ring β
inst✝³ : Algebra α β
inst✝² : SeminormedRing γ
inst✝¹ : NormedAlgebra α γ
inst✝ : NonUnitalAlgHomClass F α β γ
f : F
⊢ Algebra α β
[PROOFSTEP]
infer_instance
[GOAL]
case refine_2
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : Ring β
inst✝³ : Algebra α β
inst✝² : SeminormedRing γ
inst✝¹ : NormedAlgebra α γ
inst✝ : NonUnitalAlgHomClass F α β γ
f : F
⊢ ∀ (r : α) (x : β), ‖r • x‖ ≤ ‖r‖ * ‖x‖
[PROOFSTEP]
intro a b
[GOAL]
case refine_2
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : Ring β
inst✝³ : Algebra α β
inst✝² : SeminormedRing γ
inst✝¹ : NormedAlgebra α γ
inst✝ : NonUnitalAlgHomClass F α β γ
f : F
a : α
b : β
⊢ ‖a • b‖ ≤ ‖a‖ * ‖b‖
[PROOFSTEP]
change ‖(⇑f) (a • b)‖ ≤ ‖a‖ * ‖(⇑f) b‖
[GOAL]
case refine_2
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
ι : Type u_4
F : Type u_5
α : Type u_6
β : Type u_7
γ : Type u_8
inst✝⁵ : NormedField α
inst✝⁴ : Ring β
inst✝³ : Algebra α β
inst✝² : SeminormedRing γ
inst✝¹ : NormedAlgebra α γ
inst✝ : NonUnitalAlgHomClass F α β γ
f : F
a : α
b : β
⊢ ‖↑f (a • b)‖ ≤ ‖a‖ * ‖↑f b‖
[PROOFSTEP]
exact (map_smul f a b).symm ▸ norm_smul_le a (f b)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
𝕜 : Type u_5
𝕜' : Type u_6
inst✝⁴ : NormedField 𝕜
inst✝³ : NormedField 𝕜'
inst✝² : NormedAlgebra 𝕜 𝕜'
E : Type u_7
inst✝¹ : SeminormedAddCommGroup E
inst✝ : NormedSpace 𝕜' E
src✝ : Module 𝕜 (RestrictScalars 𝕜 𝕜' E) := module 𝕜 𝕜' E
c : 𝕜
x : RestrictScalars 𝕜 𝕜' E
⊢ ‖↑(algebraMap 𝕜 𝕜') c‖ * ‖x‖ = ‖c‖ * ‖x‖
[PROOFSTEP]
rw [norm_algebraMap']
|
/**
*
* @file core_dgbtype2cb.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Azzam Haidar
* @date 2012-12-15
* @generated d Tue Jan 7 11:44:50 2014
*
**/
#include <lapacke.h>
#include "common.h"
//#define AU(m,n) &(A[(m) + LDA*(n)])
//#define AL(m,n) &(A[(m) + LDA*(n)])
#define AL(m_, n_) (A + NB + LDA * (n_) + ((m_)-(n_)))
#define AU(m_, n_) (A + NB + LDA * (n_) + ((m_)-(n_)+NB))
#define VQ(m) (VQ + (m))
#define VP(m) (VP + (m))
#define TAUQ(m) (TAUQ + (m))
#define TAUP(m) (TAUP + (m))
/***************************************************************************//**
*
* @ingroup CORE_double
*
* CORE_dgbtype2cb is a kernel that will operate on a region (triangle) of data
* bounded by st and ed. This kernel apply the right update remaining from the
* type1 and this later will create a bulge so it eliminate the first column of
* the created bulge and do the corresponding Left update.
*
* All detail are available on technical report or SC11 paper.
* Azzam Haidar, Hatem Ltaief, and Jack Dongarra. 2011.
* Parallel reduction to condensed forms for symmetric eigenvalue problems
* using aggregated fine-grained and memory-aware kernels. In Proceedings
* of 2011 International Conference for High Performance Computing,
* Networking, Storage and Analysis (SC '11). ACM, New York, NY, USA, ,
* Article 8 , 11 pages.
* http://doi.acm.org/10.1145/2063384.2063394
*
*******************************************************************************
*
* @param[in] N
* The order of the matrix A.
*
* @param[in] NB
* The size of the band.
*
* @param[in, out] A
* A pointer to the matrix A of size (3*NB+1)-by-N.
*
* @param[in] LDA
* The leading dimension of the matrix A. LDA >= max(1,3*NB+1)
*
* @param[in, out] V
* double array, dimension N if eigenvalue only
* requested or (LDV*blkcnt*Vblksiz) if Eigenvectors requested
* The Householder reflectors of the previous type 1 are used here
* to continue update then new one are generated to eliminate the
* bulge and stored in this array.
*
* @param[in, out] TAU
* double array, dimension (N).
* The scalar factors of the Householder reflectors of the previous
* type 1 are used here to continue update then new one are generated
* to eliminate the bulge and stored in this array.
*
* @param[in] st
* A pointer to the start index where this kernel will operate.
*
* @param[in] ed
* A pointer to the end index where this kernel will operate.
*
* @param[in] sweep
* The sweep number that is eliminated. it serve to calculate the
* pointer to the position where to store the Vs and Ts.
*
* @param[in] Vblksiz
* constant which correspond to the blocking used when applying the Vs.
* it serve to calculate the pointer to the position where to store the
* Vs and Ts.
*
* @param[in] WANTZ
* constant which indicate if Eigenvalue are requested or both
* Eigenvalue/Eigenvectors.
*
* @param[in] WORK
* Workspace of size nb.
*
*******************************************************************************
*
* @return
* \retval PLASMA_SUCCESS successful exit
* \retval <0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
/***************************************************************************
* TYPE 2-BAND-bidiag Lower/Upper columnwise-Householder
***************************************************************************/
void
CORE_dgbtype2cb(PLASMA_enum uplo, int N, int NB,
double *A, int LDA,
double *VQ, double *TAUQ,
double *VP, double *TAUP,
int st, int ed, int sweep, int Vblksiz, int WANTZ,
double *WORK)
{
double ctmp;
int i, J1, J2, len, lem, LDX;
int blkid, vpos, taupos, tpos;
LDX = LDA-1;
J1 = ed+1;
J2 = min(ed+NB,N-1);
lem = ed-st+1;
len = J2-J1+1;
if( uplo == PlasmaUpper ) {
/* ========================
* UPPER CASE
* ========================*/
if( len > 0 ) {
if( WANTZ == 0 ) {
vpos = ((sweep+1)%2)*N + st;
taupos = ((sweep+1)%2)*N + st;
} else {
findVTpos(N, NB, Vblksiz, sweep, st,
&vpos, &taupos, &tpos, &blkid);
}
/* Apply remaining Left commming from type1/3_upper */
ctmp = (*TAUQ(taupos));
LAPACKE_dlarfx_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft),
lem, len, VQ(vpos), ctmp, AU(st, J1), LDX, WORK);
}
if( len > 1 ) {
if( WANTZ == 0 ) {
vpos = ((sweep+1)%2)*N + J1;
taupos = ((sweep+1)%2)*N + J1;
} else {
findVTpos(N,NB,Vblksiz,sweep,J1, &vpos, &taupos, &tpos, &blkid);
}
/* Remove the top row of the created bulge */
*VP(vpos) = 1.;
for(i=1; i<len; i++){
*VP(vpos+i) = (*AU(st, J1+i));
*AU(st, J1+i) = 0.;
}
/* Eliminate the row at st */
ctmp = (*AU(st, J1));
LAPACKE_dlarfg_work(len, &ctmp, VP(vpos+1), 1, TAUP(taupos) );
*AU(st, J1) = ctmp;
/*
* Apply Right on A(J1:J2,st+1:ed)
* We decrease len because we start at row st+1 instead of st.
* row st is the row that has been revomved;
*/
lem = lem-1;
ctmp = *TAUP(taupos);
LAPACKE_dlarfx_work(LAPACK_COL_MAJOR, lapack_const(PlasmaRight),
lem, len, VP(vpos), ctmp, AU(st+1, J1), LDX, WORK);
}
}else{
/* ========================
* LOWER CASE
* ========================*/
if( len > 0 ) {
if( WANTZ == 0 ) {
vpos = ((sweep+1)%2)*N + st;
taupos = ((sweep+1)%2)*N + st;
} else {
findVTpos(N, NB, Vblksiz, sweep, st,
&vpos, &taupos, &tpos, &blkid);
}
/* Apply remaining Right commming from type1/3_lower */
ctmp = (*TAUP(taupos));
LAPACKE_dlarfx_work(LAPACK_COL_MAJOR, lapack_const(PlasmaRight),
len, lem, VP(vpos), ctmp, AL(J1, st), LDX, WORK);
}
if( len > 1 ) {
if( WANTZ == 0 ) {
vpos = ((sweep+1)%2)*N + J1;
taupos = ((sweep+1)%2)*N + J1;
} else {
findVTpos(N,NB,Vblksiz,sweep,J1, &vpos, &taupos, &tpos, &blkid);
}
/* Remove the first column of the created bulge */
*VQ(vpos) = 1.;
memcpy(VQ(vpos+1), AL(J1+1, st), (len-1)*sizeof(double));
memset(AL(J1+1, st), 0, (len-1)*sizeof(double));
/* Eliminate the col at st */
LAPACKE_dlarfg_work(len, AL(J1, st), VQ(vpos+1), 1, TAUQ(taupos) );
/*
* Apply left on A(J1:J2,st+1:ed)
* We decrease len because we start at col st+1 instead of st.
* col st is the col that has been revomved;
*/
lem = lem-1;
ctmp = (*TAUQ(taupos));
LAPACKE_dlarfx_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft),
len, lem, VQ(vpos), ctmp, AL(J1, st+1), LDX, WORK);
}
}
/* end of uplo case */
return;
}
/***************************************************************************/
#undef AU
#undef AL
#undef VQ
#undef VP
#undef TAUQ
#undef TAUP
|
#include <boost/thread/thread_functors.hpp>
|
data Vect : Nat -> Type -> Type where
Nil : Vect Z a
(::) : a -> Vect k a -> Vect (S k) a
%name Vect xs, ys, zs
{- 1 -}
headUnequal : DecEq a => {xs : Vect n a} -> {ys : Vect n a} ->
(contra : (x = y) -> Void) -> (x :: xs) = (y :: ys) -> Void
headUnequal contra Refl = contra Refl
tailUnequal : DecEq a => {xs : Vect n a} -> {ys : Vect n a} ->
(contra : (xs = ys) -> Void) -> (x :: xs) = (y :: ys) -> Void
tailUnequal contra Refl = contra Refl
{- 2 -}
DecEq a => DecEq (Vect n a) where
decEq [] [] = Yes Refl
decEq (x :: xs) (y :: ys) = case decEq x y of
No contra => No (headUnequal contra)
Yes Refl => case decEq xs ys of
Yes Refl => Yes Refl
No contra => No (tailUnequal contra)
|
subroutine calc_gradient(t0,IPP)
use global, only: uu,vv,dic,djc,dkc,dif,djf,dkf,pi2c,pj2c,pk2c,pi2f,pj2f,grad,npts,dxg_r,dyg_r,Npts
implicit none
integer*8, intent(in) :: IPP,t0
real*8 :: tmp0,tmp1,dx,dy,dr
integer*8:: i,j,k,ip
!$OMP PARALLEL DO PRIVATE(ip,i,j,k,dx,dy,dr,tmp0,tmp1) SHARED(IPP,t0,dic,djc,dkc,uu,vv,grad,dxg_r,dyg_r) SCHEDULE(dynamic)
do ip=1,npts
dx=dif(ip,IPP)
dy=djc(ip,IPP)
dr=dkc(ip,IPP)
!du/dx
i=pi2f(ip,IPP)
j=pj2c(ip,IPP)
k=pk2c(ip,IPP)
call interp_bilinear(dy,dr,uu(i,j:j+1,k:k+1,t0),tmp0)
call interp_bilinear(dy,dr,uu(i+1,j:j+1,k:k+1,t0),tmp1)
grad(ip,1,IPP)=(tmp1-tmp0)*dxg_r(i,j)
!du/dy
call interp_bilinear(dx,dr,uu(i:i+1,j,k:k+1,t0),tmp0)
call interp_bilinear(dx,dr,uu(i:i+1,j+1,k:k+1,t0),tmp1)
grad(ip,2,IPP)=(tmp1-tmp0)*dyg_r(i,j)
!dv/dx
i=pi2c(ip,IPP)
j=pj2f(ip,IPP)
dy=djf(ip,IPP)
dx=dic(ip,IPP)
call interp_bilinear(dy,dr,vv(i,j:j+1,k:k+1,t0),tmp0)
call interp_bilinear(dy,dr,vv(i+1,j:j+1,k:k+1,t0),tmp1)
grad(ip,3,IPP)=(tmp1-tmp0)*dxg_r(i,j)
!dv/dy
call interp_bilinear(dx,dr,vv(i:i+1,j,k:k+1,t0),tmp0)
call interp_bilinear(dx,dr,vv(i:i+1,j+1,k:k+1,t0),tmp1)
grad(ip,4,IPP)=(tmp1-tmp0)*dyg_r(i,j)
enddo
!$OMP END PARALLEL DO
end subroutine calc_gradient
|
open import Nat
open import Prelude
open import List
open import core
open import judgemental-erase
open import checks
module constructability where
-- we construct expressions and types by induction on their
-- structure. for each sub term, we call the relevant theorem, then
-- assemble the results with carefully chosen lists of actions that allow
-- us to call the appropriate zipper lemmas and maintain well-typedness
-- at every stage of the contruction.
--
-- the proof term at each stage except subsumption is, morally, just the
-- mapping of the fragment of the action semantics used by the constructs
-- in the list in the current formation context into the list monoid.
-- construction of types
construct-type : (t : τ̇) → Σ[ L ∈ List action ] runtype (▹ ⦇-⦈ ◃) L (▹ t ◃)
construct-type num = [ construct num ] , DoType TMConNum DoRefl
construct-type ⦇-⦈ = [ del ] , DoType TMDel DoRefl
construct-type (t1 ==> t2) with construct-type t1 | construct-type t2
... | (l1 , ih1) | (l2 , ih2) = l1 ++ construct arrow :: l2 ++ [ move parent ] ,
runtype++ ih1
(DoType TMConArrow
(runtype++ (ziplem-tmarr2 ih2)
(DoType TMArrParent2 DoRefl)))
mutual
-- construction of expressions in synthetic positions
construct-synth : {Γ : ·ctx} {t : τ̇} {e : ė} → (Γ ⊢ e => t) →
Σ[ L ∈ List action ]
runsynth Γ ▹ ⦇-⦈ ◃ ⦇-⦈ L ▹ e ◃ t
-- the three base cases
construct-synth (SVar x) = [ construct (var _) ] , DoSynth (SAConVar x) DoRefl
construct-synth SNum = [ construct (numlit _) ] , DoSynth SAConNumlit DoRefl
construct-synth SEHole = [ del ] , DoSynth SADel DoRefl
-- the inductive cases
construct-synth {t = t} (SAsc x) with construct-type t | construct-ana x
... | (l1 , ih1) | (l2 , ih2) = construct asc :: (l1 ++ move parent :: move (child 1) :: (l2 ++ [ move parent ])) ,
DoSynth SAConAsc
(runsynth++ (ziplem-asc2 ETTop ETTop ih1)
(DoSynth (SAMove EMAscParent2)
(DoSynth (SAMove EMAscChild1)
(runsynth++ (ziplem-asc1 ih2)
(DoSynth (SAMove EMAscParent1) DoRefl)))))
construct-synth (SAp e1 m e2) with construct-synth e1 | construct-ana e2
... | (l1 , ih1) | (l2 , ih2) = l1 ++ construct ap :: (l2 ++ [ move parent ]) ,
runsynth++ ih1
(DoSynth (SAConApArr m)
(runsynth++ (ziplem-ap2 e1 m ih2)
(DoSynth (SAMove EMApParent2) DoRefl)))
construct-synth (SPlus e1 e2 ) with construct-ana e1 | construct-ana e2
... | (l1 , ih1) | (l2 , ih2) = construct plus :: (l2 ++ move parent :: move (child 1) :: (l1 ++ [ move parent ])) ,
DoSynth (SAConPlus1 TCHole2)
(runsynth++ (ziplem-plus2 ih2)
(DoSynth (SAMove EMPlusParent2)
(DoSynth (SAMove EMPlusChild1)
(runsynth++ (ziplem-plus1 ih1)
(DoSynth (SAMove EMPlusParent1) DoRefl)))))
construct-synth (SNEHole wt) with construct-synth wt
... | (l , ih) = l ++ construct nehole :: move parent :: [] ,
runsynth++ ih
(DoSynth SAConNEHole (DoSynth (SAMove EMNEHoleParent) DoRefl))
-- construction of expressions in analytic positions
construct-ana : {Γ : ·ctx} {t : τ̇} {e : ė} → (Γ ⊢ e <= t) →
Σ[ L ∈ List action ]
runana Γ ▹ ⦇-⦈ ◃ L ▹ e ◃ t
construct-ana (ASubsume x c) with construct-synth x
... | (l , ih) = construct nehole :: l ++ (move parent :: finish :: []) ,
DoAna (AASubsume EETop SEHole SAConNEHole TCHole1)
(runana++ (ziplem-nehole-b SEHole c ih)
(DoAna (AAMove EMNEHoleParent)
(DoAna (AAFinish (ASubsume x c)) DoRefl)))
construct-ana (ALam a m e) with construct-ana e
... | (l , ih) = construct (lam _) :: (l ++ [ move parent ]) ,
DoAna (AAConLam1 a m)
(runana++ (ziplem-lam a m ih)
(DoAna (AAMove EMLamParent) DoRefl))
|
function [g]= spm_gx_adem_write(x,v,a,P)
% returns the prediction for a two-joint arm (proprioception and vision)
% FORMAT [g]= spm_gx_adem_write(x,v,a,P)
%
% x - hidden states:
% x(1) - joint angle
% x(2) - joint angle
% x(3) - angular velocity
% x(4) - angular velocity
% v - causal states{
% v(1) - exogenous force (x)
% v(2) - exogenous force (y)
% a - action
% P - parameters
%
% g - sensations:
% g(1) - joint angle (proprioception)
% g(2) - joint angle (proprioception)
% g(3) - arm location (visual)
% g(4) - arm location (visual)
%
% As for spm_dem_reach but with no visual target
%__________________________________________________________________________
% Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging
% Karl Friston
% $Id: spm_gx_adem_write.m 3901 2010-05-27 16:14:36Z karl $
% evaluate positions
%--------------------------------------------------------------------------
J = spm_dem_reach_x2J(x);
% stretch (angular) and visual (positional) information about motor plant
%==========================================================================
g = [x; J{1}; J{1} + J{2}];
|
from functools import wraps
import numpy as np
from skimage import measure
from typing import Callable, List, Tuple, Union
import autoarray as aa
from autoconf.dictable import Dictable
from autogalaxy.util.shear_field import ShearYX2D
from autogalaxy.util.shear_field import ShearYX2DIrregular
def precompute_jacobian(func):
@wraps(func)
def wrapper(lensing_obj, grid, jacobian=None):
if jacobian is None:
jacobian = lensing_obj.jacobian_from(grid=grid)
return func(lensing_obj, grid, jacobian)
return wrapper
def evaluation_grid(func):
@wraps(func)
def wrapper(
lensing_obj, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
):
if hasattr(grid, "is_evaluation_grid"):
if grid.is_evaluation_grid:
return func(lensing_obj, grid, pixel_scale)
pixel_scale_ratio = grid.pixel_scale / pixel_scale
zoom_shape_native = grid.mask.zoom_shape_native
shape_native = (
int(pixel_scale_ratio * zoom_shape_native[0]),
int(pixel_scale_ratio * zoom_shape_native[1]),
)
grid = aa.Grid2D.uniform(
shape_native=shape_native,
pixel_scales=(pixel_scale, pixel_scale),
origin=grid.mask.zoom_offset_scaled,
)
grid.is_evaluation_grid = True
return func(lensing_obj, grid, pixel_scale)
return wrapper
class OperateDeflections(Dictable):
"""
Packages methods which manipulate the 2D deflection angle map returned from the `deflections_yx_2d_from` function
of a mass object (e.g. a `MassProfile`, `Galaxy`, `Plane`).
The majority of methods are those which from the 2D deflection angle map compute lensing quantities like a 2D
shear field, magnification map or the Einstein Radius.
The methods in `CalcLens` are passed to the mass object to provide a concise API.
Parameters
----------
deflections_yx_2d_from
The function which returns the mass object's 2D deflection angles.
"""
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
raise NotImplementedError
def __eq__(self, other):
return self.__dict__ == other.__dict__ and self.__class__ is other.__class__
@precompute_jacobian
def tangential_eigen_value_from(self, grid, jacobian=None) -> aa.Array2D:
"""
Returns the tangential eigen values of lensing jacobian, which are given by the expression:
`tangential_eigen_value = 1 - convergence - shear`
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and tangential eigen values are computed
on.
jacobian
A precomputed lensing jacobian, which is passed throughout the `CalcLens` functions for efficiency.
"""
convergence = self.convergence_2d_via_jacobian_from(
grid=grid, jacobian=jacobian
)
shear_yx = self.shear_yx_2d_via_jacobian_from(grid=grid, jacobian=jacobian)
return aa.Array2D(array=1 - convergence - shear_yx.magnitudes, mask=grid.mask)
@precompute_jacobian
def radial_eigen_value_from(self, grid, jacobian=None) -> aa.Array2D:
"""
Returns the radial eigen values of lensing jacobian, which are given by the expression:
radial_eigen_value = 1 - convergence + shear
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and radial eigen values are computed on.
jacobian
A precomputed lensing jacobian, which is passed throughout the `CalcLens` functions for efficiency.
"""
convergence = self.convergence_2d_via_jacobian_from(
grid=grid, jacobian=jacobian
)
shear = self.shear_yx_2d_via_jacobian_from(grid=grid, jacobian=jacobian)
return aa.Array2D(array=1 - convergence + shear.magnitudes, mask=grid.mask)
def magnification_2d_from(self, grid) -> aa.Array2D:
"""
Returns the 2D magnification map of lensing object, which is computed as the inverse of the determinant of the
jacobian.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and magnification map are computed on.
"""
jacobian = self.jacobian_from(grid=grid)
det_jacobian = jacobian[0][0] * jacobian[1][1] - jacobian[0][1] * jacobian[1][0]
return aa.Array2D(array=1 / det_jacobian, mask=grid.mask)
def hessian_from(self, grid, buffer: float = 0.01, deflections_func=None) -> Tuple:
"""
Returns the Hessian of the lensing object, where the Hessian is the second partial derivatives of the the
potential (see equation 55 https://www.tau.ac.il/~lab3/MICROLENSING/JeruLect.pdf):
`hessian_{i,j} = d^2 / dtheta_i dtheta_j`
The Hessian is computed by evaluating the 2D deflection angles around every (y,x) coordinate on the input 2D
grid map in four directions (positive y, negative y, positive x, negative x), exploiting how the deflection
angles are the derivative of the potential.
By using evaluating the deflection angles around each grid coordinate, the Hessian can therefore be computed
using uniform or irregular 2D grids of (y,x). This can be slower, because x4 more deflection angle calculations
are required, however it is more flexible in and therefore used throughout **PyAutoLens** by default.
The Hessian is returned as a 4 entry tuple, which reflect its structure as a 2x2 matrix.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and Hessian are computed on.
buffer
The spacing in the y and x directions around each grid coordinate where deflection angles are computed and
used to estimate the derivative.
"""
if deflections_func is None:
deflections_func = self.deflections_yx_2d_from
grid_shift_y_up = np.zeros(grid.shape)
grid_shift_y_up[:, 0] = grid[:, 0] + buffer
grid_shift_y_up[:, 1] = grid[:, 1]
grid_shift_y_down = np.zeros(grid.shape)
grid_shift_y_down[:, 0] = grid[:, 0] - buffer
grid_shift_y_down[:, 1] = grid[:, 1]
grid_shift_x_left = np.zeros(grid.shape)
grid_shift_x_left[:, 0] = grid[:, 0]
grid_shift_x_left[:, 1] = grid[:, 1] - buffer
grid_shift_x_right = np.zeros(grid.shape)
grid_shift_x_right[:, 0] = grid[:, 0]
grid_shift_x_right[:, 1] = grid[:, 1] + buffer
deflections_up = deflections_func(grid=grid_shift_y_up)
deflections_down = deflections_func(grid=grid_shift_y_down)
deflections_left = deflections_func(grid=grid_shift_x_left)
deflections_right = deflections_func(grid=grid_shift_x_right)
hessian_yy = 0.5 * (deflections_up[:, 0] - deflections_down[:, 0]) / buffer
hessian_xy = 0.5 * (deflections_up[:, 1] - deflections_down[:, 1]) / buffer
hessian_yx = 0.5 * (deflections_right[:, 0] - deflections_left[:, 0]) / buffer
hessian_xx = 0.5 * (deflections_right[:, 1] - deflections_left[:, 1]) / buffer
return hessian_yy, hessian_xy, hessian_yx, hessian_xx
def convergence_2d_via_hessian_from(
self, grid, buffer: float = 0.01
) -> aa.ValuesIrregular:
"""
Returns the convergence of the lensing object, which is computed from the 2D deflection angle map via the
Hessian using the expression (see equation 56 https://www.tau.ac.il/~lab3/MICROLENSING/JeruLect.pdf):
`convergence = 0.5 * (hessian_{0,0} + hessian_{1,1}) = 0.5 * (hessian_xx + hessian_yy)`
By going via the Hessian, the convergence can be calculated at any (y,x) coordinate therefore using either a
2D uniform or irregular grid.
This calculation of the convergence is independent of analytic calculations defined within `MassProfile` objects
and can therefore be used as a cross-check.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and Hessian are computed on.
buffer
The spacing in the y and x directions around each grid coordinate where deflection angles are computed and
used to estimate the derivative.
"""
hessian_yy, hessian_xy, hessian_yx, hessian_xx = self.hessian_from(
grid=grid, buffer=buffer
)
return grid.values_from(array_slim=0.5 * (hessian_yy + hessian_xx))
def shear_yx_2d_via_hessian_from(
self, grid, buffer: float = 0.01
) -> ShearYX2DIrregular:
"""
Returns the 2D (y,x) shear vectors of the lensing object, which are computed from the 2D deflection angle map
via the Hessian using the expressions (see equation 57 https://www.tau.ac.il/~lab3/MICROLENSING/JeruLect.pdf):
`shear_y = hessian_{1,0} = hessian_{0,1} = hessian_yx = hessian_xy`
`shear_x = 0.5 * (hessian_{0,0} - hessian_{1,1}) = 0.5 * (hessian_xx - hessian_yy)`
By going via the Hessian, the shear vectors can be calculated at any (y,x) coordinate, therefore using either a
2D uniform or irregular grid.
This calculation of the shear vectors is independent of analytic calculations defined within `MassProfile`
objects and can therefore be used as a cross-check.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and Hessian are computed on.
buffer
The spacing in the y and x directions around each grid coordinate where deflection angles are computed and
used to estimate the derivative.
"""
hessian_yy, hessian_xy, hessian_yx, hessian_xx = self.hessian_from(
grid=grid, buffer=buffer
)
shear_yx_2d = np.zeros(shape=(grid.sub_shape_slim, 2))
shear_yx_2d[:, 0] = hessian_xy
shear_yx_2d[:, 1] = 0.5 * (hessian_xx - hessian_yy)
return ShearYX2DIrregular(vectors=shear_yx_2d, grid=grid)
def magnification_2d_via_hessian_from(
self, grid, buffer: float = 0.01, deflections_func=None
) -> aa.ValuesIrregular:
"""
Returns the 2D magnification map of lensing object, which is computed from the 2D deflection angle map
via the Hessian using the expressions (see equation 60 https://www.tau.ac.il/~lab3/MICROLENSING/JeruLect.pdf):
`magnification = 1.0 / det(Jacobian) = 1.0 / abs((1.0 - convergence)**2.0 - shear**2.0)`
`magnification = (1.0 - hessian_{0,0}) * (1.0 - hessian_{1, 1)) - hessian_{0,1}*hessian_{1,0}`
`magnification = (1.0 - hessian_xx) * (1.0 - hessian_yy)) - hessian_xy*hessian_yx`
By going via the Hessian, the magnification can be calculated at any (y,x) coordinate, therefore using either a
2D uniform or irregular grid.
This calculation of the magnification is independent of calculations using the Jacobian and can therefore be
used as a cross-check.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and magnification map are computed on.
"""
hessian_yy, hessian_xy, hessian_yx, hessian_xx = self.hessian_from(
grid=grid, buffer=buffer, deflections_func=deflections_func
)
det_A = (1 - hessian_xx) * (1 - hessian_yy) - hessian_xy * hessian_yx
return grid.values_from(array_slim=1.0 / det_A)
@evaluation_grid
def tangential_critical_curve_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> aa.Grid2DIrregular:
"""
Returns the tangential critical curve of lensing object, which is computed as follows:
1) Compute the tangential eigen values for every coordinate on the input grid via the Jacobian.
2) Find contours of all values in the tangential eigen values that are zero using a marching squares algorithm.
Due to the use of a marching squares algorithm that requires the zero values of the tangential eigen values to
be computed, critical curves can only be calculated using the Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and tangential eigen values are computed
on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
critical curve to be computed more accurately using a higher resolution grid.
"""
tangential_eigen_values = self.tangential_eigen_value_from(grid=grid)
tangential_critical_curve_indices = measure.find_contours(
tangential_eigen_values.native, 0
)
if len(tangential_critical_curve_indices) == 0:
return []
tangential_critical_curve = grid.mask.grid_scaled_for_marching_squares_from(
grid_pixels_1d=tangential_critical_curve_indices[0],
shape_native=tangential_eigen_values.sub_shape_native,
)
try:
return aa.Grid2DIrregular(tangential_critical_curve)
except IndexError:
return []
@evaluation_grid
def radial_critical_curve_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> aa.Grid2DIrregular:
"""
Returns the radial critical curve of lensing object, which is computed as follows:
1) Compute the radial eigen values for every coordinate on the input grid via the Jacobian.
2) Find contours of all values in the radial eigen values that are zero using a marching squares algorithm.
Due to the use of a marching squares algorithm that requires the zero values of the radial eigen values to
be computed, this critical curves can only be calculated using the Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and radial eigen values are computed
on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
critical curve to be computed more accurately using a higher resolution grid.
"""
radial_eigen_values = self.radial_eigen_value_from(grid=grid)
radial_critical_curve_indices = measure.find_contours(
radial_eigen_values.native, 0
)
if len(radial_critical_curve_indices) == 0:
return []
radial_critical_curve = grid.mask.grid_scaled_for_marching_squares_from(
grid_pixels_1d=radial_critical_curve_indices[0],
shape_native=radial_eigen_values.sub_shape_native,
)
try:
return aa.Grid2DIrregular(radial_critical_curve)
except IndexError:
return []
@evaluation_grid
def critical_curves_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> List[aa.Grid2DIrregular]:
"""
Returns the both the tangential and radial critical curves of lensing object as a two entry list of
irregular 2D grids.
The calculation of each critical curve is described in the functions `tangential_critical_curve_from()` and
`radial_critical_curve_from()`.
Due to the use of a marching squares algorithm used in each function, critical curves can only be calculated
using the Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles used to calculate the critical curves are
computed on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
critical curve to be computed more accurately using a higher resolution grid.
"""
try:
return aa.Grid2DIrregular(
[
self.tangential_critical_curve_from(
grid=grid, pixel_scale=pixel_scale
),
self.radial_critical_curve_from(grid=grid, pixel_scale=pixel_scale),
]
)
except (IndexError, ValueError):
return []
@evaluation_grid
def tangential_caustic_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> aa.Grid2DIrregular:
"""
Returns the tangential caustic of lensing object, which is computed as follows:
1) Compute the tangential eigen values for every coordinate on the input grid via the Jacobian.
2) Find contours of all values in the tangential eigen values that are zero using a marching squares algorithm.
3) Compute the lensing objects deflection angle's at the (y,x) coordinates of this tangential critical curve
contour and ray-trace it to the source-plane, therefore forming the tangential caustic.
Due to the use of a marching squares algorithm that requires the zero values of the tangential eigen values to
be computed, caustics can only be calculated using the Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and tangential eigen values are computed
on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
caustic to be computed more accurately using a higher resolution grid.
"""
tangential_critical_curve = self.tangential_critical_curve_from(
grid=grid, pixel_scale=pixel_scale
)
if len(tangential_critical_curve) == 0:
return []
deflections_critical_curve = self.deflections_yx_2d_from(
grid=tangential_critical_curve
)
return tangential_critical_curve - deflections_critical_curve
@evaluation_grid
def radial_caustic_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> aa.Grid2DIrregular:
"""
Returns the radial caustic of lensing object, which is computed as follows:
1) Compute the radial eigen values for every coordinate on the input grid via the Jacobian.
2) Find contours of all values in the radial eigen values that are zero using a marching squares algorithm.
3) Compute the lensing objects deflection angle's at the (y,x) coordinates of this radial critical curve
contour and ray-trace it to the source-plane, therefore forming the radial caustic.
Due to the use of a marching squares algorithm that requires the zero values of the radial eigen values to
be computed, this caustics can only be calculated using the Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and radial eigen values are computed
on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
caustic to be computed more accurately using a higher resolution grid.
"""
radial_critical_curve = self.radial_critical_curve_from(
grid=grid, pixel_scale=pixel_scale
)
if len(radial_critical_curve) == 0:
return []
deflections_critical_curve = self.deflections_yx_2d_from(
grid=radial_critical_curve
)
return radial_critical_curve - deflections_critical_curve
@evaluation_grid
def caustics_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> List[aa.Grid2DIrregular]:
"""
Returns the both the tangential and radial caustics of lensing object as a two entry list of
irregular 2D grids.
The calculation of each caustic is described in the functions `tangential_caustic_from()` and
`radial_caustic_from()`.
Due to the use of a marching squares algorithm used in each function, caustics can only be calculated
using the Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles used to calculate the caustics are
computed on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
caustic to be computed more accurately using a higher resolution grid.
"""
try:
return aa.Grid2DIrregular(
[
self.tangential_caustic_from(grid=grid, pixel_scale=pixel_scale),
self.radial_caustic_from(grid=grid, pixel_scale=pixel_scale),
]
)
except (IndexError, ValueError):
return []
@evaluation_grid
def area_within_tangential_critical_curve_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
) -> float:
"""
Returns the surface area within the tangential critical curve, the calculation of whihc is described in the
function `tangential_critical_curve_from()`
The area is computed via a line integral.
Due to the use of a marching squares algorithm to estimate the critical curve, this function can only use the
Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles used to calculate the tangential critical
curve are computed on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
caustic to be computed more accurately using a higher resolution grid.
"""
tangential_critical_curve = self.tangential_critical_curve_from(
grid=grid, pixel_scale=pixel_scale
)
x, y = tangential_critical_curve[:, 0], tangential_critical_curve[:, 1]
return np.abs(0.5 * np.sum(y[:-1] * np.diff(x) - x[:-1] * np.diff(y)))
@evaluation_grid
def einstein_radius_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
):
"""
Returns the Einstein radius, which is defined as the radius of the circle which contains the same area as the
area within the tangential critical curve.
This definition is sometimes referred to as the "effective Einstein radius" in the literature and is commonly
adopted in studies, for example the SLACS series of papers.
The calculation of the tangential critical curve and its are is described in the functions
`tangential_critical_curve_from()` and `area_within_tangential_critical_curve_from()`.
Due to the use of a marching squares algorithm to estimate the critical curve, this function can only use the
Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles used to calculate the tangential critical
curve are computed on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
caustic to be computed more accurately using a higher resolution grid.
"""
try:
return np.sqrt(
self.area_within_tangential_critical_curve_from(
grid=grid, pixel_scale=pixel_scale
)
/ np.pi
)
except TypeError:
raise TypeError("The grid input was unable to estimate the Einstein Radius")
@evaluation_grid
def einstein_mass_angular_from(
self, grid, pixel_scale: Union[Tuple[float, float], float] = 0.05
):
"""
Returns the angular Einstein Mass, which is defined as:
`einstein_mass = pi * einstein_radius ** 2.0`
where the Einstein radius is the radius of the circle which contains the same area as the area within the
tangential critical curve.
The Einstein mass is returned in units of arcsecond**2.0 and requires division by the lensing critical surface
density \sigma_cr to be converted to physical units like solar masses (see `autogalaxy.util.cosmology_util`).
This definition of Eisntein radius (and therefore mass) is sometimes referred to as the "effective Einstein
radius" in the literature and is commonly adopted in studies, for example the SLACS series of papers.
The calculation of the einstein radius is described in the function `einstein_radius_from()`.
Due to the use of a marching squares algorithm to estimate the critical curve, this function can only use the
Jacobian and a uniform 2D grid.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles used to calculate the tangential critical
curve are computed on.
pixel_scale
If input, the `evaluation_grid` decorator creates the 2D grid at this resolution, therefore enabling the
caustic to be computed more accurately using a higher resolution grid.
"""
return np.pi * (
self.einstein_radius_from(grid=grid, pixel_scale=pixel_scale) ** 2
)
def jacobian_from(self, grid):
"""
Returns the Jacobian of the lensing object, which is computed by taking the gradient of the 2D deflection
angle map in four direction (positive y, negative y, positive x, negative x).
By using the `np.gradient` method the Jacobian can therefore only be computed using uniform 2D grids of (y,x)
coordinates, and does not support irregular grids. For this reason, calculations by default use the Hessian,
which is slower to compute because more deflection angle calculations are necessary but more flexible in
general.
The Jacobian is returned as a list of lists, which reflect its structure as a 2x2 matrix.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and Jacobian are computed on.
"""
deflections = self.deflections_yx_2d_from(grid=grid)
# TODO : Can probably make this work on irregular grid? Is there any point?
a11 = aa.Array2D.manual_mask(
array=1.0
- np.gradient(deflections.native[:, :, 1], grid.native[0, :, 1], axis=1),
mask=grid.mask,
)
a12 = aa.Array2D.manual_mask(
array=-1.0
* np.gradient(deflections.native[:, :, 1], grid.native[:, 0, 0], axis=0),
mask=grid.mask,
)
a21 = aa.Array2D.manual_mask(
array=-1.0
* np.gradient(deflections.native[:, :, 0], grid.native[0, :, 1], axis=1),
mask=grid.mask,
)
a22 = aa.Array2D.manual_mask(
array=1
- np.gradient(deflections.native[:, :, 0], grid.native[:, 0, 0], axis=0),
mask=grid.mask,
)
return [[a11, a12], [a21, a22]]
@precompute_jacobian
def convergence_2d_via_jacobian_from(self, grid, jacobian=None) -> aa.Array2D:
"""
Returns the convergence of the lensing object, which is computed from the 2D deflection angle map via the
Jacobian using the expression (see equation 58 https://www.tau.ac.il/~lab3/MICROLENSING/JeruLect.pdf):
`convergence = 1.0 - 0.5 * (jacobian_{0,0} + jacobian_{1,1}) = 0.5 * (jacobian_xx + jacobian_yy)`
By going via the Jacobian, the convergence must be calculated using 2D uniform grid.
This calculation of the convergence is independent of analytic calculations defined within `MassProfile`
objects and the calculation via the Hessian. It can therefore be used as a cross-check.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and Jacobian are computed on.
jacobian
A precomputed lensing jacobian, which is passed throughout the `CalcLens` functions for efficiency.
"""
convergence = 1 - 0.5 * (jacobian[0][0] + jacobian[1][1])
return aa.Array2D(array=convergence, mask=grid.mask)
@precompute_jacobian
def shear_yx_2d_via_jacobian_from(
self, grid, jacobian=None
) -> Union[ShearYX2D, ShearYX2DIrregular]:
"""
Returns the 2D (y,x) shear vectors of the lensing object, which are computed from the 2D deflection angle map
via the Jacobian using the expression (see equation 58 https://www.tau.ac.il/~lab3/MICROLENSING/JeruLect.pdf):
`shear_y = -0.5 * (jacobian_{0,1} + jacobian_{1,0} = -0.5 * (jacobian_yx + jacobian_xy)`
`shear_x = 0.5 * (jacobian_{1,1} + jacobian_{0,0} = 0.5 * (jacobian_yy + jacobian_xx)`
By going via the Jacobian, the convergence must be calculated using 2D uniform grid.
This calculation of the shear vectors is independent of analytic calculations defined within `MassProfile`
objects and the calculation via the Hessian. It can therefore be used as a cross-check.
Parameters
----------
grid
The 2D grid of (y,x) arc-second coordinates the deflection angles and Jacobian are computed on.
jacobian
A precomputed lensing jacobian, which is passed throughout the `CalcLens` functions for efficiency.
"""
shear_yx_2d = np.zeros(shape=(grid.sub_shape_slim, 2))
shear_yx_2d[:, 0] = -0.5 * (jacobian[0][1] + jacobian[1][0])
shear_yx_2d[:, 1] = 0.5 * (jacobian[1][1] - jacobian[0][0])
if isinstance(grid, aa.Grid2DIrregular):
return ShearYX2DIrregular(vectors=shear_yx_2d, grid=grid)
return ShearYX2D(vectors=shear_yx_2d, grid=grid, mask=grid.mask)
|
\chapter{Rust}
\label{ch:rust}
Rust is intended to be a practical language. Its developers understand that each perspective has
advantages and drawbacks, and they try to pick the best tool for each job, rather than
dogmatically taking a stance that everything should be done in one way. Naive, paranoid,
and suspicious interfaces all have compelling use cases.
Rust uses naive interfaces to provide low-level interfaces for users to extend
the language and its libraries with. Very little of the standard library uses
the kind of magic that an external library can't provide, and we are frequently
evaluating ways to remove any remaining magic. This is necessary to compete in
the space of C and C++, where abandoning standard libraries altogether is
common.
But these naive interfaces aren't intended to be the common case in Rust. Everyday
usage is expected to be safe. As such, we also need to provide paranoid and
suspicious interfaces. Everyday usage is, however, also expected to provide fairly
low-level control and excellent performance. Garbage collection, for instance,
should be exceptional. Certainly, nothing in the standard library expects
garbage collection to be used. Rust therefore needs some way for efficient and
safe low-level interfaces to be built on top of unsafe ones.
Part of Rust's solution to this problem is simply using our favourite solutions for
each specific problem: static types, runtime bounds checks, no nulls, wrapping
arithmetic, and so on. However these solutions are ad hoc and tailored to the simpler
trust problems. For the hard trust problems, Rust has one very large holistic solution,
and it's what separates it from most other languages: ownership.
Rust's ownership model has two major aspects: controlling where and when data lives;
and controlling where and when mutation can occur. These aspects are governed by
three major features: affine types, regions, and privacy.
\section{Affine Types}
At a base level, Rust manages data ownership with an \emph{affine type system}. The
literature often describes affine types as being usable \emph{at most once}
\cite{pierce2005advanced}, but from
the perspective of ownership, affine typing means values are \emph{uniquely owned}
(there is no semantic distinction here, only a matter of perspective).
To C++ developers, affine typing can be understood as a stricter version of
\emph{move semantics}.
If a variable stores a collection, passing it to a function by-value, or
assigning it to another variable, transfers ownership of the value to the new
location. The new location gains access to the value, and the old location loses
access. Whoever owns the value knows that it's the only location
in existence that can possibly talk about the contents of the collection.
This allows the owner of the collection to soundly trust the
collection; any properties it observes and wishes to rely on will not be changed
by some other piece of code without its permission. Perhaps more importantly,
the owner of the collection knows that it can do whatever it pleases with the
collection without interfering with anyone else -- no one else is trusting it.
A simple example:
\begin{minted}{rust}
fn main() {
// A growable array
let data = Vec::new();
// transfer ownership of `data` to `data2`
let data2 = data;
// `data` is now statically inaccessible,
// and logically uninitialized
// transfer ownership of `data2` to `consume`
consume(data2)
// `data2` is now statically inaccessible,
// and logically uninitialized
}
fn consume(mut data3: Vec<u32>) {
// Mutating the collection is known to be safe, because
// `data3` knows it's the only one who can access it.
data3.push(1);
}
\end{minted}
The greatest of these rights is destruction: when a variable goes out of scope,
it destroys its value forever. This can mean simply forgetting the value,
or it can mean executing the type's destructor. In the case of a collection,
this would presumably recursively destroy all contained values, and free all of
its allocations.
Affine types are primarily useful for eliminating the \emph{use-after} family of bugs.
If only one location ever has access to a value, and a value is only invalidated
when that one location disappears, then it's trivially true that one cannot use
an invalidated value. For this reason, the most obvious applications of affine
typing are with various forms of transient resources: threads, connections,
files, allocations, and so on.
However it turns out that a surprising number of problems can be reduced to a
use-after problem. For instance, many APIs require some sequence of steps to
be executed in a certain order. This can be encoded quite easily using affine
types. Functions can produce a ``proof of work'' by returning a type that only
they have permission to produce. Similarly, functions can \emph{require} a proof
of work by consuming such a type:
\begin{minted}{rust}
fn first() -> First;
fn second(First) -> Second;
fn third(Second) -> Third;
fn alternative(First) -> Alternative;
\end{minted}
We can therefore use affine types to model valid control flow and statically
ensure correct usage. \emph{Session types} are the logical extreme of this
technique, where programs effectively ``write themselves'' due to their type
constraints. Munksgaard and Jespersen \cite{munksgaard2015practical} have an
excellent analysis of session typing in Rust, so we won't dwell on this topic.
It should be noted that affine typing isn't mandatory in Rust. Unique ownership
doesn't make sense or simply isn't important for many types like booleans and
integers. Such types can opt into \emph{copy semantics}. Copy types behave like any
other value with one simple caveat: when they're moved, the old copy of the
value is still valid.
Copy semantics can have surprising consequences though. For instance, it may be
reasonable for a random number generator to be copyable, as its internal
state is generally just some integers. It then becomes possible to
\emph{accidentally} copy the generator, causing the same number to be yielded
repeatedly. For this reason, some types which \emph{could} be copied safely don't opt
into copy semantics. In this case, affine typing is used as a lint against what
is likely, but not necessarily, a mistake.
\section{Borrows and Regions}
Affine types are all fine and good for some problems, but if that's all Rust had,
it would be a huge pain in the neck. In particular, it's very common to want
to \emph{borrow} a value. In the case of a unique borrow, affine types can encode
this fine: you simply pass the borrowed value in, and then return it back. This
is \emph{borrow threading}.
Threading is, at its best, just annoying to do. In particular, in must be written
out in the types, and performed explicitly in the code. With only affine types,
any process that borrows some data and has an actual return value requires
all of the data to be mixed in with the return value. Say we'd like to write
something like:
\begin{minted}{rust}
fn main() {
let input = get_input();
let pattern = get_pattern();
if matches(&input, &pattern) {
println!("input {} matches {}", input, pattern);
}
}
fn matches(input: &Data, pattern: &Pattern) -> bool {
// ...
return found_match;
}
\end{minted}
with only affine types we'd get something like:
\begin{minted}{rust}
fn main() {
let input = get_input();
let pattern = get_pattern();
// Need to recapture all the data we loaned
let (matches, input, pattern) = matches(input, pattern);
if matches {
println!("input {} matches {}", input, pattern);
}
}
fn matches(input: Data, pattern: Pattern)
-> (bool, Data, Pattern)
{
// ...
return (found_match, input, pattern);
}
\end{minted}
Affine types \emph{really} hit a wall when data wants to be \emph{shared}. If
several pieces of code wish to concurrently read some data, we have a serious
issue. One solution is to simply \emph{copy} the data to all the consumers. If each
has their own unique copy to work with, everyone's happy.
However, even if we're ignoring the performance aspect of this strategy (which
is non-trivial), it may simply not make sense. If the underlying resource to
share is truly affine, then there may be \emph{no} way to copy the data in a
semantic-preserving way. For instance, one cannot just blindly copy a file
handle, as each holder of the handle could then close it while the others are
trying to use it.
At the end of the day, having only values everywhere is just dang \emph{impractical}.
Rust is a practical language, so it uses a tried and true solution: pointers! Unfortunately,
pointers make everything more complicated and difficult. Affine types ``solved''
use-after errors for us, but pointers bring them back and make them \emph{far} worse.
The fact that data has been moved or destroyed says nothing of the state of
pointers to it. As C has demonstrated since its inception, pointers are all too
happy to let us view data that might be destroyed or otherwise invalid.
Garbage collection solves this problem for allocations, but does nothing to
prevent trying to use an otherwise invalidated value, such as a closed file.
Rust's solution to this problem is its most exotic tool: regions \cite{swamy2006safe}.
Like affine types, regions are something well-established in both theory and
implementation, but with little mainstream usage. Although Rust primarily cribs
them from Cyclone, they were first described by Tofte and Talpin \cite{tofte1997region}
and used in MLKit. That said, Cyclone's version of regions
is most immediately recognizable to a Rust programmer.
The idea of a region system is that pointers are associated with the region of
the program that they're valid for, and the compiler ensures that pointers don't
escape their region. This is done entirely at compile time, and has no runtime
component.
For Rust, these regions correspond to lexical scopes, which are roughly
pairs of matching braces. The restriction to lexical scopes is not fundamental,
and was simply easier to implement for the 1.0 release. It is however sufficient
for most purposes. Rust calls these regions \emph{lifetimes}.
At a base level, all a region system does is statically track what pointers are
outstanding during any given piece of code. By combining this information with
other static analysis it's possible to completely eliminate several classes of error
that are traditionally relegated to garbage collection. For ownership, region analysis allows us
to statically identify when a value is moved or destroyed while being pointed to,
and produce an error to that effect:
\begin{minted}{rust}
fn main() {
// Gets a dangling pointer
let data = compute_it(&0);
println!("{}", data);
}
fn compute_it(input: &u32) -> &u32 {
let data = input + 1;
// Returning a pointer to a local variable
return &data;
}
\end{minted}
\begin{minted}{text}
<anon>:11:13: 11:17 error: `data` does not live long enough
<anon>:11 return &data;
^~~~
<anon>:8:36: 12:2 note: reference must be valid for the
anonymous lifetime #1 defined on the block at 8:35...
<anon>: 8 fn compute_it(input: &u32) -> &u32 {
<anon>: 9 let data = input + 1;
<anon>:10 // Returning a pointer to a local variable
<anon>:11 return &data;
<anon>:12 }
<anon>:9:26: 12:2 note: ...but borrowed value is only valid
for the block suffix following statement 0 at 9:25
<anon>: 9 let data = input + 1;
<anon>:10 // Returning a pointer to a local variable
<anon>:11 return &data;
<anon>:12 }
\end{minted}
On its own, this is pretty great: no dangling pointers without the need for
garbage collection! But when combined with affine types,
we get something even more powerful than garbage collection. For instance, if
you close a file in a garbage collected language, there is nothing to prevent
old users of the file from continuing to work with it. One must guard for
this at runtime. In Rust, this is simply not a concern:
it's statically impossible. Closing the file destroys it, and that means all
pointers must be gone. At least in simple cases like this, we've enabled
pointers to be used without having to worry about a use-after.
Unfortunately, this doesn't solve problems like iterator invalidation. When an
iterator is invalidated, the collection it was pointing to wasn't \emph{destroyed},
it was just changed. In order to handle iterator invalidation, we require something
more than checking for moves.
The most extreme solution is to simply forbid internal pointers. Only allow
pointers to borrow variables on the stack, and everything else has to be copied
out. Then we never have to worry about pointers being invalidated. Unfortunately,
this would be a very limiting system. It would make composition of affine types useless,
because you could never access the components without destroying the aggregate.
It also doesn't really solve the problem the way we wanted. Most iterators we'd
be interested in providing would become inexpressible under this model. For
instance, one couldn't yield interior pointers from an iterator. Depending on
the details, any kind of tree iterator may be completely impractical.
Another extreme solution would be to forbid mutation of data. Mutations can be emulated
by creating a new object with the necessary changes, so this is in principle
possible. However this suffers from similar issues as borrow threading: It's
really annoying, and would also be make it difficult to obtain the same
control as C(++).
Yet another way is to treat all pointers into a collection as pointers \emph{to}
the collection, and forbid mutation through pointers. All mutating operations
could require by-value (and therefore unique) access, which could be done with borrow-threading.
This is unfortunate because we were trying to avoid borrow-threading by introducing
pointers in the first place, but at least we could share data immutably,
which is a definite win.
Rust basically takes this last approach, but in order to avoid the annoying pain of
threading borrows, it includes two different \emph{kinds} of pointer:
\emph{mutable references} and \emph{shared references} denoted \mintinline
{rust}{&mut} and \mintinline{rust}{&} respectively. Shared references are
exactly as we described: they can be
freely aliased, but only allow you to read the data they point to. On the other
hand, mutable references must be unique, but enable mutation of the data
they point to. This means that taking a mutable reference to some data is like moving
it, but then having the compiler automatically insert all the boiler-plate
to move it back into place when the mutable reference is gone (the
compiler does not actually move the data around when you take a mutable
reference).
Let's look at some simple examples:
\begin{minted}{rust}
let mut data = 0;
{
// Allowed to take multiple shared references
let data_ref1 = &data;
let data_ref2 = &data;
// Allowed to read through them,
// and still read the value directly
println!("{} {} {}", data_ref1, data_ref2, data);
// Not allowed to mutate through them (compiler error)
// *data_ref1 += 1;
}
{
// Allowed to take one mutable reference
let data_mut = &mut data;
// Allowed to read or write through it
println!("{}", data_mut);
*data_mut += 1;
// Allowed to move the mutable reference to someone else
data_the_second = data_mut;
// Not allowed to get an aliasing shared reference
// (compiler error)
// let data_ref = &data;
// Not allowed to get an aliasing mutable reference
// (compiler error)
// let data_mut2 = &mut data;
// Not allowed to directly access data anymore
// (compiler error)
// println!("{}", data);
// But can get use the new location fine
println!("{}", data_the_second);
}
// All borrows out of scope, allowed to access data again
data += 1;
println!("{}", data);
\end{minted}
\section{Mutable XOR Shared}
This is Rust's most critical perspective on ownership: mutation is mutually
exclusive with sharing. In order to get the most out of this perspective, Rust
doesn't allow mutability to be declared at the type level. That is, a struct's
field cannot be declared to be constant. Instead, the mutability of a value is
\emph{inherited} from how it's accessed: as long as you have something by-value or
by-mutable-reference, you can mutate it.
This stands in contrast to the perspective that mutation is something to be
avoided completely. As we've seen, mutation can cause serious problems. This
has lead some to conclude that mutation should be avoided as much
as possible. Never mutating anything does indeed satisfy Rust's requirement
that sharing and mutating be exclusive, but in a vacuous way (mutating never
occurs). Rust takes a more permissive stance: mutate all you want as long
as you're not sharing. The Rust developers have found that this eliminates
most of the problems that mutation causes in practice.
In particular, this statically eliminates iterator invalidation. For
instance, consider the following program:
\begin{minted}{rust}
fn main() {
let mut data = vec![1, 2, 3, 4, 5, 6];
for x in &data {
data.push(2 * x);
}
}
\end{minted}
What exactly the programmer intended here was unclear, and what exactly
will happen if this were allowed to compile is even more unclear.
Thankfully, in Rust we don't \emph{need} to wonder what the programmer meant or
what will happen when this is run, because it doesn't compile:
\begin{minted}{text}
<anon>:4:9: 4:13 error: cannot borrow `data` as mutable
because it is also borrowed as immutable
<anon>:4 data.push(2 * x);
^~~~
<anon>:3:15: 3:19 note: previous borrow of `data` occurs
here; the immutable borrow prevents subsequent moves
or mutable borrows of `data` until the borrow ends
<anon>:3 for x in &data {
^~~~
<anon>:5:6: 5:6 note: previous borrow ends here
<anon>:3 for x in &data {
<anon>:4 data.push(2 * x);
<anon>:5 }
^
\end{minted}
This strategy also nicely generalizes to a concurrent context. Recall that a data race is
defined to occur when two threads access a piece of data in an unsynchronized
way, and one is writing. This is exactly aliasing and mutation, which is
forbidden by Rust's scheme. As such, everything in Rust is thread-safe by default.
Of course, perfectly good concurrent algorithms and data structures are rife with aliasing and
mutability. Mutexes exist \emph{precisely} to enable aliasing and mutation in a
controlled manner. As a result, although inherited mutability is the default way to do things in
Rust, it is not the only way. A few key types provide \emph{interior mutability},
which enables their data to be mutated through shared references as long as some
runtime mechanism ensures that access is properly restricted. The most
obvious example of this is exactly the standard library's Mutex type, which
allows an \mintinline{rust}{&Mutex<T>} to become an \mintinline{rust}{&mut T} by
acquiring its lock:
\begin{minted}{rust}
use std::sync::Mutex;
fn main() {
// A Mutex owns the data it guards. In this case,
// an integer. Note that `data` is not declared
// to be mutable, which normally would make it
// impossible to update the value of the integer.
let data = Mutex::new(0);
{
// Acquire the lock
let mut handle = data.lock().unwrap();
// A handle behaves like an `&mut` to the data
*handle += 1;
// But when it goes out of scope here,
// it releases the lock.
}
}
\end{minted}
Why is this interface sound? First and foremost, any attempt to acquire the
lock will block if it's already acquired. This ensures that only one handle
exists at any given time. However, we must also guarantee that the handle doesn't
outlive the mutex, and the pointer we get out of the handle doesn't outlive the
handle. These problems are handled by ownership. Affinity and region analysis
ensures that the pointers and handles aren't duplicated or allowed to outlive
the type they refer to.
However the entire reason we care about Mutexes is for sharing across threads.
This means there is one additional problem we must worry about: the shared
data not being thread-safe. As we have noted, almost everything in Rust is
actually thread-safe by default precisely because of ownership, but two things
can potentially break this: borrows, and interior mutability. Borrows aren't
trivially safe to share between two threads because they're based around
sequential scopes. They don't really make sense with concurrent executions.
Interior mutability isn't thread-safe because it's precisely sharing and mutation.
A Mutex provides interior mutability in an inherently thread-safe way, but not
all types do. In particular, the Cell and RefCell types \emph{aren't} thread-safe.
So how do we ensure that these problematic types aren't shared across threads?
For borrows, there's actually a way to declare that a type is expected to not
contain borrows, so anything that can pass data to another thread requires that.
However the interior mutability problem requires a completely different solution:
Traits.
Traits are Rust's version of an interface. Rust actually captures thread-safety
as traits that types can implement, called Send and Sync. If a type can be moved
to another thread safely, then it is Send. If a type can be shared between two
threads safely, then it is Sync. These traits are automatically derived
compositionally; if you consist entirely of Send types, then you are Send. This
works because of affinity and ownership. We know that if we own something
that is thread-safe, then we are the only ones who can access it. So if we're
accessed in a thread-safe way, then it's accessed in a thread-safe way.
Very few types are thread-unsafe, so almost everything is Send and Sync. However
some types are specifically thread-safe even though they're based on parts that
aren't. For instance, Mutex itself is based on parts that aren't thread-safe, but
it is of course thread-safe as long as it contains thread-safe data. As such,
types can manually claim to be Send or Sync. Of course, it's possible to
make this claim incorrectly, so how is this safe to expose?
It's not safe. In fact, it's explicitly unsafe to implement these interfaces.
\section{Unsafe Rust}
Most languages are considered memory-safe. However with few exceptions, this
isn't actually true. In fact, basically \emph{every} language has unsafe bits.
The most fundamental of these is quite
simple: talking to C. C is the lingua-franca of the programming world. All
major operating systems and many major libraries primarily expose a C interface.
Any language that wants to integrate with these systems must therefore learn
how to interface with C. Because C is \emph{definitely} unsafe and can do just
about anything to a program, these languages then become transitively unsafe.
For instance a C library could pass an otherwise safe language a dangling
pointer, and there's no way for the safe language to defend against this.
See for instance, Python's ctypes module and Java's JNI framework.
Rust is no different, but it embraces this reality a little more than most
other languages. Rust is actually \emph{two} languages: Safe Rust, and Unsafe Rust.
Safe Rust is the Rust we have been focusing on for the most part. It
is intended to be completely safe with one exception: it can talk to
Unsafe Rust. Unsafe Rust, on the other hand, is definitely not a safe language.
In addition to being able to talk to C (like any safe language), it enables the
programmer to work with several constructs that would be easily unsound in
Safe Rust. Most notably, for us, it allows Send and Sync to be implemented.
However Unsafe Rust is most commonly used because it includes raw C-like pointers
which are nullable and untracked.
At first glance, Unsafe Rust appears to completely undermine Rust's claims about
safety, but we argue that it in fact \emph{improves} its safety story. In most safe
languages, if one needs to do something very low level (for performance, correctness,
or any other reason) the general solution to this is ``use C''. This has several
downsides.
First, there's a cognitive overhead. Such an application now has
its logic spread across two completely different languages with different
semantics, runtimes, and behaviors. If the safe language is what a development
team primarily works in, it's unlikely that a significant percentage of the team
is qualified to actively maintain the C components. Second, it incurs non-trivial
runtime overhead. Data must often be reformatted at the language boundary, and
this boundary is usually an opaque box for either language's optimizer.
Finally, falling back to C is simply a \emph{huge} jump in unsafety, from ``totally
safe'' to ``pervasively unsafe''.
Unsafe Rust largely avoids these issues with one simple fact:
it's just a superset of Safe Rust. Lifetimes, Affine Types, and everything else
that helps you write good Rust programs are still working exactly as before.
You're just allowed to do a few extra things that are unsafe. As a result,
there's no unnecessary runtime or semantic overhead for using Unsafe Rust.
Of course one \emph{does} need to understand how to manually uphold Safe Rust's
various guarantees when using Unsafe Rust's extra parts, and this isn't trivial.
However this is still a better situation than using C, because the unsafety is
generally much more modular. For instance, if you use Unsafe Rust to index into
an array in an unchecked manner, you don't suddenly need to worry about the
array being null, dangling, or containing uninitialized memory. All you need to
worry about is if the index is actually in bounds. You know everything else is
still normal.
In addition, Unsafe Rust doesn't require any kind of complicated foreign
function interface. It can be written inline with Safe Rust on demand. Rust's
only requirement is that you write the word ``unsafe'' \emph{somewhere} to indicate
that you understand that what you're doing is unsafe. Since unsafety is
explicitly denoted in this manner, it also enables it to be detected and linted
against if desired.
Rust's standard library (which is written entirely in Rust) makes copious use of
Unsafe Rust internally. Most fundamentally, Unsafe Rust is necessary to provide
various operating system APIs because those are written in C, and only Unsafe
Rust can talk to C. However Unsafe Rust is also used in various places to
implement core abstractions like mutexes and growable arrays.
It's important to note that the fact that these APIs use unsafe code is entirely
an implementation detail to those using the standard library. All the unsafety
is wrapped up in \emph{safe abstractions}. These abstractions serve two masters: the
consumer of the API, and the producer of the API. The benefit to consumers of an
API is fairly straight-forward: they can rest easy knowing that if something
terrible happens, it wasn't their fault. For producers of the API, these safe
abstractions mark a clear boundary for the unsafety they need to worry about.
Unsafe code can be quite difficult because it often relies on stateful
invariants. For instance, the capacity of a growable array is a piece of state
that unsafe code must trust. In order to be sound, these safe abstractions need
to rely on the final element of ownership: \emph{privacy}.
Privacy in Rust is much the same as in most other languages. Fields and functions may be marked
as public or private, and only code that is within some boundary may access anything that is
marked private.
Returning to our example, the capacity of a growable array is marked as private.
Since the abstraction boundary is often exactly the privacy boundary in Rust,
end users of a growable array are therefore prevented from directly manipulating
the capacity. Within the array's privacy boundary, this state can be arbitrarily
manipulated, but this is a closed set of code to audit and verify. The code within
the privacy boundary can therefore trust that the capacity field is only updated
by a small set of trusted code.
This rest of this thesis focuses primarily on these safe abstractions. A good safe abstraction
must have many properties:
\begin{enumerate}
\item Safety: Using the abstraction inappropriately cannot violate Rust's safety guarantees.
\item Efficiency: Ideally, an abstraction is \emph{zero cost}, meaning it is as efficient
at the task it is designed to solve as an unabstracted solution (with a decent
optimizing compiler).
\item Usability: A good abstraction should be more convenient and easy to understand than
the code it's wrapping.
\end{enumerate}
It would be \emph{excellent} if the implementation was also completely safe, but we
do not consider this a critical requirement, as Rust's standard library demonstrates.
It should be noted that Rust's reliance on safe abstractions is, in some sense,
unfortunate. For one, it makes reasoning about the performance characteristics
of a program much more difficult, as it relies on a sufficiently
smart compiler to tear away these abstractions. This in turn means Rust's unoptimized
performance is in a rather atrocious state. It's not uncommon for a newcomer to
the language to express shock that a Rust program is several times slower than
an equivalent Python program, only to learn that enabling optimizations makes
the Rust program several times \emph{faster} than the Python program (and indeed,
as fast as one would expect from the equivalent C++).
However it is our opinion that this is simply fundamental to providing a
programming environment that is safe, efficient, and usable.
|
# Lecture 10: Expectation Continued
## A Proof of Linearity (discrete case)
Let $T = X + Y$, and show that $\mathbb{E}(T) = \mathbb{E}(X) + \mathbb{E}(Y)$.
We will also show that $\mathbb{E}(cX) = c \mathbb{E}(X)$.
In general, we'd like to be in a position where
\begin{align}
\sum_{t} t P(T=t) \stackrel{?}{=} \sum_{x} x P(X=x) + \sum_{y} y P(Y=y)
\end{align}
so, let's try attacking this from the l.h.s.
Considering the image above of a discrete r.v. in Pebble World, note that
\begin{align}
\mathbb{E}(X) &= \sum_{x} x P(X=x) & &\text{grouping the pebbles per X value; weighted average} \\
&= \sum_{s}X(s)P(\{s\}) & &\text{ungrouped; sum each pebble separately} \\
\\
\\
\Rightarrow \mathbb{E}(T) &= \sum_{s} (X+Y)(s)P(\{s\}) \\
&= \sum_{s}X(s)P(\{s\}) + \sum_{s}Y(s)P(\{s\}) \\
&= \sum_{x} x P(X=x) + \sum_{y} y P(Y=y) \\
&= \mathbb{E}(X) + \mathbb{E}(Y) ~~~~ \blacksquare \\
\\
\\
\Rightarrow \mathbb{E}(cX) &= \sum_{x} cx P(X=x) \\
&= c \sum_{x} x P(X=x) \\
&= c \mathbb{E}(X) ~~~~ \blacksquare
\end{align}
----
## Negative Binomial Distribution
### Description
A misnomer: this distribution is actually non-negative, and not binomial, either.
The Negative Binomial is a generalization of the Geometric distribution, where we have a series of independent $Bern(p)$ trials and we want to know # failures before the $r^{\text{th}}$ success.
We can codify this using a bit string:
\begin{align}
& \text{1000100100001001} & \text{0 denotes failure, 1 denotes success} & \\
& r = 5 \\
& n = 11 & \text{failures}
\end{align}
Note that the very last bit position is, of course, a success.
Note also that we can permutate the preceding $r-1$ successes amongst the $n+r-1$ slots that come before that final $r^{\text{th}}$ success.
### Notation
$X \sim NB(r,p)$
### Parameters
* $r$ - the total number of successes before we stop counting
* $p$ - probability of success
### Probability mass function
\begin{align}
P(X=n) &= \binom{n+r-1}{r-1} p^r (1-p)^n & &\text{for } n = 0,1,2,\dots\\
&= \binom{n+r-1}{n} p^r (1-p)^n & &\text{or conversely}\\
\end{align}
### Expected value
Let $X_j$ be the # failures before the $(j-1)^{\text{st}}$ and $j^{\text{th}}$ success. Then we could write
\begin{align}
\mathbb{E}(X) &= \mathbb{E}(X_1 + X_2 + \dots + X_r) \\
&= \mathbb{E}(X_1) + \mathbb{E}(X_2) + \dots + \mathbb{E}(X_r) & &\text{by Linearity} \\
&= r \mathbb{E}(X_1) & &\text{by symmetry} \\
&= r \frac{q}{p} ~~~~ \blacksquare
\end{align}
----
## Revisting the Geometric: the First Success Distribution
$X \sim FS(p)$ is the geometric distribution that counts the trials until first success, *including that first success*.
Let $Y = X - 1$.
Then $Y \sim Geom(p)$
Expected value of $FS(p)$ is
\begin{align}
\mathbb{E}(X) &= E(Y) + 1 \\
&= \frac{q}{p} + 1 \\
&= \boxed{\frac{1}{p}}
\end{align}
----
## Putnam Problem
Consider a random permutation of $1, 2, 3, \dots , n$, where $n \ge 2$.
Find expected # local maxima. For example, given the permuation $\boxed{3} ~~ 2 ~~ 1 ~~ 4 ~~ \boxed{7} ~~ 5 ~~ \boxed{6}$ we have 3 local maxima:
- $\boxed{3} \gt 2$
- $4 \lt \boxed{7} \gt 5$
- $ 5 \lt \boxed{6}$
Now, there are 2 kinds of cases we need to consider:
- non-edge case: $4 ~~ \boxed{7} ~~ 5$ has probability of $\frac{1}{3}$ that the largest number is in the middle position
- edge case: in both left-edge $\boxed{3} ~~ 2$ and right-edge $5 ~~ \boxed{6}$, the probability that the larger number is in the right position is $\frac{1}{2}$
Let $I_j$ be the indicator r.v. of position $j$ having a local maximum, $1 \le j \le n$.
Using Linearity, we can say that the expected number of local maxima is given by
\begin{align}
\mathbb{E}(I_j) &= \mathbb{E}(I_1 + I_2 + \dots + I_n) \\
&= \mathbb{E}(I_1) + \mathbb{E}(I_2) + \dots + \mathbb{E}(I_n) & &\text{by Linearity} \\
&= (n-2) \frac{1}{3} + 2 \frac{1}{2} \\
&= \boxed{\frac{n+1}{3}}
\end{align}
Idiot-checking this, we have:
\begin{align}
\mathbb{E}(I_{n=2}) &= \frac{2+1}{3} & &\text{... case where } n=2 \\
&= 1 \\
\\
\\
\mathbb{E}(I_{n=\infty}) &= \frac{\infty+1}{3} & &\text{... case where } n= \infty \\
&= \infty \\
\end{align}
----
## St. Petersburg Paradox
Consider a game of chance involving a fair coin. We will flip the coin until the very first heads shows (hypergeometric distribution).
- If heads shows on the very first flip, you get $\$2$.
- If the first heads shows on the second flip, you get $\$4$.
- If the first heads shows on the third flip, you get $\$8$.
So you will get $\$2^n$ if the first heads shows up on the $n^\text{th}$ trial, including the heads flip.
_How much would you be willing to play this game?_
Let's tackle this by thinking about the expected number of $\$\$\$$ we stand to make.
Given $Y = 2^n$, find $\mathbb{E}(Y)$:
\begin{align}
\mathbb{E}(Y) &= \sum_{k=1}^\infty 2^k \frac{1}{2^{k-1}} ~ \frac{1}{2}\\
&= \sum_{k=1}^\infty 2^k \frac{1}{2^k}\\
&= \sum_{k=1}^\infty 1\\
\\
\\
\mathbb{E}(Y_{k=40}) &= \sum_{k=1}^{40} 1 \\
&= 40
\end{align}
So, the "paradox" here is that even if we capped the payout to $2^{40} \approx \$1000000000$, Linearity shows us we would only pay $40. It is very hard to grasp this, but the truth is that if you were offered this game at any price, you should take it.
----
|
setwd("~/pCloudDrive/Profissional/ciencia-de-dados/codenation.dev/desafio_nota_do_enem")
getwd()
library(dplyr) # Filtragens
library(psych) # Scatterplot Matrix
library(e1071) # SVM
library(rpart) # RANDON FOREST
library(readr) # GERAR CSV
library(tidyverse) # Drop_NA()
library(readxl) # Importar xls
library(randomForest) # MODELO RANDONFOREST
# ****************************************************
# *** DADOS ***
# *** ***
# ****************************************************
# Etapa 1 - Coletando os dados
# Para voltar ao estado normal comacurácia de 93.49 basta deixar as 3 primeiras linhas aqui antes da outra tratativa
tempo <- Sys.time()
# Prever notas faltantes de redação
source("reconstruir_dados_com_na.R", encoding = "UTF-8")
fim <- Sys.time()
tempo_para_prever_outras_notas <- fim - inicio
tempo_para_prever_outras_notas
notateste <- read.csv("test.csv")
PIB_2016 <- read_excel("PIB-2016.xls")
variaveis_descriminantes <- c("NU_NOTA_MT", "NU_NOTA_CN", "NU_NOTA_CH", "NU_NOTA_LC", "NU_NOTA_REDACAO", "Q006", "TP_ESCOLA", "Q047", "CO_UF_RESIDENCIA", "TP_SEXO", "NU_NOTA_COMP2", "Q001", "Q024", "NU_NOTA_COMP3", "NU_NOTA_COMP5", "NU_NOTA_COMP4", "NU_IDADE", "Q027", "NU_NOTA_COMP1", "SG_UF_RESIDENCIA")
nota <- nota[,variaveis_descriminantes]
colnames(nota)
# ****************************************************
# *** INCLUIR ***
# *** NOVAS VARIÁVEIS ***
# ****************************************************
#média do pib por estado
PIB_2016 = PIB_2016 %>% filter(Ano == 2016)
PIB_2016 = PIB_2016[c(names(PIB_2016)[5], names(PIB_2016)[42])]
MEDIA_PIB_2016 = aggregate(PIB_2016$`Produto Interno Bruto per capita\n(R$ 1,00)`,
by=list(PIB_2016$`Sigla da Unidade da Federação`),
FUN = mean)
colnames(MEDIA_PIB_2016) = c("SG_UF_RESIDENCIA","media")
# Criar regioes com os estados por prova
glimpse(notateste$SG_UF_RESIDENCIA)
table(notateste$SG_UF_RESIDENCIA)
x <- c(notateste$SG_UF_RESIDENCIA)
lookup <- c( AC = "NORTE", AL = "NORDESTE", AM = "NORTE", AP = "NORTE", BA = "NORDESTE", CE = "NORDESTE", DF = "CENTRO-OESTE", ES = "SUDESTE", GO = "CENTRO-OESTE", MA = "NORDESTE", MG = "SUDESTE", MS = "CENTRO-OESTE", MT = "CENTRO-OESTE", PA = "NORTE", PB = "NORDESTE", PE = "NORDESTE", PI = "NORDESTE", PR = "SUL", RJ = "SUDESTE", RN = "NORDESTE", RO = "NORTE", RR = "NORTE", RS = "SUL", SC = "SUL", SE = "NORDESTE", SP = "SUDESTE", TO = "NORTE")
uniao = lookup[x]
regioes = unname(uniao)
table(regioes)
notateste$regioes = regioes
table(notateste$regioes)
names(table(notateste$regioes))
#Transformar os dois datasets com as mesmas variáveis
notateste = merge(notateste,MEDIA_PIB_2016)
nomes <- names(notateste)
# Criar regioes com os estados por prova
glimpse(nota$SG_UF_RESIDENCIA)
table(nota$SG_UF_RESIDENCIA)
x <- c(nota$SG_UF_RESIDENCIA)
lookup <- c( AC = "NORTE", AL = "NORDESTE", AM = "NORTE", AP = "NORTE", BA = "NORDESTE", CE = "NORDESTE", DF = "CENTRO-OESTE", ES = "SUDESTE", GO = "CENTRO-OESTE", MA = "NORDESTE", MG = "SUDESTE", MS = "CENTRO-OESTE", MT = "CENTRO-OESTE", PA = "NORTE", PB = "NORDESTE", PE = "NORDESTE", PI = "NORDESTE", PR = "SUL", RJ = "SUDESTE", RN = "NORDESTE", RO = "NORTE", RR = "NORTE", RS = "SUL", SC = "SUL", SE = "NORDESTE", SP = "SUDESTE", TO = "NORTE")
uniao = lookup[x]
regioes = unname(uniao)
table(regioes)
nota$regioes = regioes
table(nota$regioes)
names(table(nota$regioes))
#Transformar os dois datasets com as mesmas variáveis
nota = merge(nota, MEDIA_PIB_2016)
# ****************************************************
# *** TREINO ***
# *** ***
# ****************************************************
# Segmentando apenas os dados que temos no treino
treinando = nota
# ****************************************************
# *** FORMATO NUMÉRICO ***
# ****************************************************
glimpse(treinando)
#Transformar para numérico
treinando$SG_UF_RESIDENCIA = as.numeric(treinando$SG_UF_RESIDENCIA)
treinando$TP_SEXO = as.numeric(treinando$TP_SEXO)
treinando$Q001 = as.numeric(treinando$Q001)
treinando$Q006 = as.numeric(treinando$Q006)
treinando$Q024 = as.numeric(treinando$Q024)
treinando$Q047 = as.numeric(treinando$Q047)
treinando$regioes = as.factor(treinando$regioes)
treinando$regioes = as.numeric(treinando$regioes)
# excluir coluna SG_UF_RESIDENCIA
treinando = treinando[c(-1)]
nota = treinando
glimpse(nota)
# ****************************************************
# *** TESTE ***
# *** ***
# ****************************************************
# Criar variável de segurança
testando = notateste
testando$Q027 = as.numeric(testando$Q027)
testando$TP_DEPENDENCIA_ADM_ESC = NULL
testando$TP_ENSINO = NULL
## LIMPEZA Retirando valores NA da tabela
testando = testando %>% drop_na()
any(is.na(testando))
# Transformar variaveis em numeric
testando$SG_UF_RESIDENCIA = as.numeric(testando$SG_UF_RESIDENCIA)
testando$TP_SEXO = as.numeric(testando$TP_SEXO)
testando$Q001 = as.numeric(testando$Q001)
testando$Q002 = as.numeric(testando$Q002)
testando$Q006 = as.numeric(testando$Q006)
testando$Q024 = as.numeric(testando$Q024)
testando$Q025 = as.numeric(testando$Q025)
testando$Q026 = as.numeric(testando$Q026)
testando$Q047 = as.numeric(testando$Q047)
testando$CO_PROVA_CN = as.numeric(testando$CO_PROVA_CN)
testando$CO_PROVA_CH = as.numeric(testando$CO_PROVA_CH)
testando$CO_PROVA_LC = as.numeric(testando$CO_PROVA_LC)
testando$CO_PROVA_MT = as.numeric(testando$CO_PROVA_MT)
testando$regioes = as.factor(testando$regioes)
testando$regioes = as.numeric(testando$regioes)
notateste = testando
testando$NU_INSCRICAO = NULL
# Vizualizar se os dados estão em estado numérico para envolver no algoritmo
glimpse(testando)
# ****************************************************
# *** MODELOS DE ML ***
# *** ***
# ****************************************************
##Regressão linear múltipla
# Etapa 3: Treinando o Modelo (usando os dados de treino)
modelo <- lm(NU_NOTA_MT ~ ., data = treinando)
# Visualizando os coeficientes
modelo
previsao1 <- predict(modelo, testando)
View(previsao1)
#treinando$Prev = previsao1
# Etapa 4: Avaliando a Performance do Modelo
# Mais detalhes sobre o modelo
summary(modelo)
####### -> 46%
# Lembre-se que correlação não implica causalidade
#### RANDONFOREST (Arvores de decisão) ####
trainset = treinando
testset = testando
# Modelo Tradicional
modelo_rf_v1 = rpart(NU_NOTA_MT ~ ., data = trainset, control = rpart.control( cp = .000999999999999999))
summary(modelo_rf_v1)
#Modelo Melhorado Ideal 600|40
model <- randomForest(NU_NOTA_MT ~
NU_NOTA_CN +
NU_NOTA_CH +
NU_NOTA_LC +
NU_NOTA_REDACAO +
Q006 +
TP_ESCOLA +
Q047 +
CO_UF_RESIDENCIA +
TP_SEXO +
NU_NOTA_COMP2 +
Q001 +
Q024 +
NU_NOTA_COMP3 +
NU_NOTA_COMP5 +
NU_NOTA_COMP4 +
NU_IDADE +
Q027 +
NU_NOTA_COMP1
,
data = trainset,
ntree = 600,
nodesize = 50)
# Previsões nos dados de teste
tree_pred = predict(model, testset)
enviotree = data.frame(notateste$NU_INSCRICAO)
colnames(enviotree) = c("NU_INSCRICAO")
enviotree$NU_NOTA_MT = tree_pred
write.csv(enviotree, "answer.csv", row.names = FALSE)
# 93.51 %
# 93.49 % COM SELEÇÃO DAS VARIÁVEIS --> NU_NOTA_MT ~ NU_NOTA_CN + NU_NOTA_CH + NU_NOTA_LC + NU_NOTA_REDACAO + Q006 + TP_ESCOLA + Q047 + CO_UF_RESIDENCIA + TP_SEXO + NU_NOTA_COMP2 + media + Q001 + Q024 + NU_NOTA_COMP3 + NU_NOTA_COMP5 + NU_NOTA_COMP4 + NU_IDADE + Q027 + NU_NOTA_COMP1 + regioes
# 93.55 % Com Reconstrução dos dados
##FILTER SELECTION
modelo <- randomForest(NU_NOTA_MT ~ . ,
data = trainset,
ntree = 600,
nodesize =30,
importance = TRUE)
importance(modelo, scale = TRUE)
#Variáveis mais relevantes Importância das feautures
importancia_pred <- as.data.frame(importance(modelo, scale = TRUE))
importancia_pred <- rownames_to_column(importancia_pred, var = "variable")
p1 <- ggplot(data = importancia_pred, aes(x = reorder(variable, `%IncMSE`),
y = `%IncMSE`,
fill = `%IncMSE`)) +
labs(x = "variable", title = "Redução do MSE") +
geom_col() +
coord_flip() +
theme_bw() +
theme(legend.position = "bottom")
p2 <- ggplot(data = importancia_pred, aes(x = reorder(variable, IncNodePurity),
y = IncNodePurity,
fill = IncNodePurity)) +
labs(x = "variable", title = "Redução de pureza") +
geom_col() +
coord_flip() +
theme_bw() +
theme(legend.position = "bottom")
library(cowplot)
cowplot::plot_grid(p1, p2)
|
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel, Scott Morrison
-/
import category_theory.limits.shapes.kernels
/-!
# The abelian image and coimage.
In an abelian category we usually want the image of a morphism `f` to be defined as
`kernel (cokernel.π f)`, and the coimage to be defined as `cokernel (kernel.ι f)`.
We make these definitions here, as `abelian.image f` and `abelian.coimage f`
(without assuming the category is actually abelian),
and later relate these to the usual categorical notions when in an abelian category.
There is a canonical morphism `coimage_image_comparison : abelian.coimage f ⟶ abelian.image f`.
Later we show that this is always an isomorphism in an abelian category,
and conversely a category with (co)kernels and finite products in which this morphism
is always an isomorphism is an abelian category.
-/
noncomputable theory
universes v u
open category_theory
open category_theory.limits
namespace category_theory.abelian
variables {C : Type u} [category.{v} C] [has_zero_morphisms C] [has_kernels C] [has_cokernels C]
variables {P Q : C} (f : P ⟶ Q)
section image
/-- The kernel of the cokernel of `f` is called the (abelian) image of `f`. -/
protected abbreviation image : C := kernel (cokernel.π f)
/-- The inclusion of the image into the codomain. -/
protected abbreviation image.ι : abelian.image f ⟶ Q :=
kernel.ι (cokernel.π f)
/-- There is a canonical epimorphism `p : P ⟶ image f` for every `f`. -/
protected abbreviation factor_thru_image : P ⟶ abelian.image f :=
kernel.lift (cokernel.π f) f $ cokernel.condition f
/-- `f` factors through its image via the canonical morphism `p`. -/
@[simp, reassoc] protected lemma image.fac :
abelian.factor_thru_image f ≫ image.ι f = f :=
kernel.lift_ι _ _ _
instance mono_factor_thru_image [mono f] : mono (abelian.factor_thru_image f) :=
mono_of_mono_fac $ image.fac f
end image
section coimage
/-- The cokernel of the kernel of `f` is called the (abelian) coimage of `f`. -/
protected abbreviation coimage : C := cokernel (kernel.ι f)
/-- The projection onto the coimage. -/
protected abbreviation coimage.π : P ⟶ abelian.coimage f :=
cokernel.π (kernel.ι f)
/-- There is a canonical monomorphism `i : coimage f ⟶ Q`. -/
protected abbreviation factor_thru_coimage : abelian.coimage f ⟶ Q :=
cokernel.desc (kernel.ι f) f $ kernel.condition f
/-- `f` factors through its coimage via the canonical morphism `p`. -/
protected lemma coimage.fac : coimage.π f ≫ abelian.factor_thru_coimage f = f :=
cokernel.π_desc _ _ _
instance epi_factor_thru_coimage [epi f] : epi (abelian.factor_thru_coimage f) :=
epi_of_epi_fac $ coimage.fac f
end coimage
/--
The canonical map from the abelian coimage to the abelian image.
In any abelian category this is an isomorphism.
Conversely, any additive category with kernels and cokernels and
in which this is always an isomorphism, is abelian.
See https://stacks.math.columbia.edu/tag/0107
-/
def coimage_image_comparison : abelian.coimage f ⟶ abelian.image f :=
cokernel.desc (kernel.ι f) (kernel.lift (cokernel.π f) f (by simp)) $ (by { ext, simp, })
/--
An alternative formulation of the canonical map from the abelian coimage to the abelian image.
-/
def coimage_image_comparison' : abelian.coimage f ⟶ abelian.image f :=
kernel.lift (cokernel.π f) (cokernel.desc (kernel.ι f) f (by simp)) (by { ext, simp, })
lemma coimage_image_comparison_eq_coimage_image_comparison' :
coimage_image_comparison f = coimage_image_comparison' f :=
by { ext, simp [coimage_image_comparison, coimage_image_comparison'], }
@[simp, reassoc]
lemma coimage_image_factorisation :
coimage.π f ≫ coimage_image_comparison f ≫ image.ι f = f :=
by simp [coimage_image_comparison]
end category_theory.abelian
|
```python
from IPython.core.display import HTML
from IPython.display import Image
HTML("""
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
""")
```
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
# *Circuitos Elétricos I*
## Semana 1 - Convenções para aplicação das Leis de Kirchhoff na análise de circuitos
### Caso 1
```python
Image("./figures/J1C1.png", width=500)
```
#### Lei de Kirchhoff das tensões (LKT)
Em qualquer malha frechada do circuito $\sum_k v_k = 0$
`Convenção arbitrária (1): ao percorrer a malha, escolha um sinal (+ ou -) para indicar aumentos de tensão e o sinal oposto para indicar quedas de tensão no somatório da LKT.`
Logo, atribuindo o sinal (-) para aumentos de tensão e o sinal (+) para quedas de tensão, ao aplicar a LKT no circuito mostrado acima, temos:
$$
\begin{align}
-10 + v_1 + v_2 &= 0\\
-v_2 + v_3 + v_4 &= 0
\end{align}
$$
#### Lei de Kirchhoff das correntes (LKC)
Em qualquer nó do circuito $\sum_k i_k = 0$
`Convenção arbitrária (2): para o nó em questão, escolha um sinal (+ ou -) para indicar correntes chegando ao nó e o sinal oposto para indicar correntes deixando o nó no somatório da LKT.`
ou, para evitar erros com troca de sinais, simplesmente faça
`Somatório das correntes chegando ao nó igual ao somatório das correntes deixando o nó.`
$$
\begin{align}
i_1 &= i_2 + i_3\\
i_3 &= -0.5~A
\end{align}
$$
#### Lei de Ohm (+convenção passiva)
`Convenção passiva (3): qualquer expressão que relacione as grandezas de tensão e corrente num elemento ideal de dois terminais deve ser escrita de acordo com a convenção passiva.`
A convenção passiva estabelece que:
1. Se o sentido de referência adotado para corrente coincide com a queda de tensão na polaridade de referência ($+ \rightarrow -$), *qualquer expressão envolvendo $v$ e $i$* para o elemento em questão deve ser escrita com **sinal positivo**.
2. Se o sentido de referência adotado para corrente coincide com o aumento de tensão na polaridade de referência ($+ \leftarrow -$), *qualquer expressão envolvendo $v$ e $i$* para o elemento em questão deve ser escrita com **sinal negativo**.
A Lei de Ohm expressa a relação entre tensão, corrente e resistência num resistor ideal. Logo, as expressões da Lei de Ohm devem obedecer a convenção passiva.
Desse modo, podemos escrever as seguintes equações para o circuito acima.
$$
\begin{align}
v_1 &= 10i_1\\
v_2 &= 50i_2\\
v_3 &= 20i_3
\end{align}
$$
Logo:
$$
\begin{align}
-10 + 10i_1 + 50i_2 &= 0\\
-50i_2 -10 + v_4 &= 0\\
i_1 - i_2 &= -0.5
\end{align}
$$
Rearranjando as equações:
$$
\begin{align}
10i_1 + 50i_2 &= 10\\
-50i_2 + v_4 &= 10\\
i_1 - i_2 &= -0.5
\end{align}
$$
### Solução das equações
```python
import sympy as sp
import numpy as np
```
```python
# define as N variáveis desconhecidas
i1, i2, v4 = sp.symbols('i1, i2, v4')
# define os sistema de N equações
eq1 = sp.Eq()
eq2 = sp.Eq()
eq3 = sp.Eq()
# resolve o sistema
soluc = sp.solve((eq1, eq2, eq3), dict=True)
i1 = np.array([sol[i1] for sol in soluc])
i2 = np.array([sol[i2] for sol in soluc])
v4 = np.array([sol[v4] for sol in soluc])
i3 = -0.5
print('Solução do sistema:\n\n i1 = %.2f A,\n i2 = %.2f A,\n i3 = %.2f A,\n v4 = %.2f V.' %(i1, i2, i3, v4))
```
#### Cálculo das potências
```python
# expressões para a Lei de Ohm (convenção passiva)
v1 =
v2 =
v3 =
# expressões para as potências (convenção passiva)
p10V =
p1 =
p2 =
p3 =
p4 =
print('Potências:\n\n p10V = %.2f W\n p1 = %.2f W,\n p2 = %.2f W,\n p3 = %.2f W,\n p4 = %.2f W\n' %(p10V, p1, p2, p3, p4))
```
```python
# calcula somatório das potências
print('Somatório das potências : %.2f W\n' %(p10V+p1+p2+p3+p4))
```
Simulação do circuito: https://tinyurl.com/yfbwd4vz
### Caso 2
```python
Image("./figures/J1C2.png", width=500)
```
```python
# define as N variáveis desconhecidas
i1, i2, v4 = sp.symbols('i1, i2, v4')
# define os sistema de N equações
eq1 = sp.Eq( )
eq2 = sp.Eq( )
eq3 = sp.Eq( )
# resolve o sistema
soluc = sp.solve((eq1, eq2, eq3), dict=True)
i1 = np.array([sol[i1] for sol in soluc])
i2 = np.array([sol[i2] for sol in soluc])
v4 = np.array([sol[v4] for sol in soluc])
i3 = 0.5
print('Solução do sistema:\n\n i1 = %.2f A,\n i2 = %.2f A,\n i3 = %.2f A,\n v4 = %.2f V.' %(i1, i2, i3, v4))
```
```python
# expressões para a Lei de Ohm (convenção passiva)
v1 =
v2 =
v3 =
# expressões para as potências (convenção passiva)
p10V =
p1 =
p2 =
p3 =
p4 =
print('Potências:\n\n p10V = %.2f W\n p1 = %.2f W,\n p2 = %.2f W,\n p3 = %.2f W,\n p4 = %.2f W\n' %(p10V, p1, p2, p3, p4))
```
### Caso 3
```python
Image("./figures/J1C3.png", width=500)
```
```python
# define as N variáveis desconhecidas
i1, i2, v4 = sp.symbols('i1, i2, v4')
# define os sistema de N equações
eq1 = sp.Eq( )
eq2 = sp.Eq( )
eq3 = sp.Eq( )
# resolve o sistema
soluc = sp.solve((eq1, eq2, eq3), dict=True)
i1 = np.array([sol[i1] for sol in soluc])
i2 = np.array([sol[i2] for sol in soluc])
v4 = np.array([sol[v4] for sol in soluc])
i3 = 0.5
print('Solução do sistema:\n\n i1 = %.2f A,\n i2 = %.2f A,\n i3 = %.2f A,\n v4 = %.2f V.' %(i1, i2, i3, v4))
```
```python
# expressões para a Lei de Ohm (convenção passiva)
v1 =
v2 =
v3 =
# expressões para as potências (convenção passiva)
p10V =
p1 =
p2 =
p3 =
p4 =
print('Potências:\n\n p10V = %.2f W\n p1 = %.2f W,\n p2 = %.2f W,\n p3 = %.2f W,\n p4 = %.2f W\n' %(p10V, p1, p2, p3, p4))
```
|
State Before: α : Type u
β : Type v
γ : Type ?u.206424
δ : Type ?u.206427
ε : Type ?u.206430
ζ : Type ?u.206433
ι : Type u_1
π : ι → Type u_2
κ : Type ?u.206444
inst✝¹ : TopologicalSpace α
inst✝ : (i : ι) → TopologicalSpace (π i)
f✝ : α → (i : ι) → π i
f : β → (i : ι) → π i
g : (i : ι) → π i
u : Filter β
⊢ Tendsto f u (𝓝 g) ↔ ∀ (x : ι), Tendsto (fun i => f i x) u (𝓝 (g x)) State After: no goals Tactic: rw [nhds_pi, Filter.tendsto_pi]
|
\section{Evaluation}
Case studies and evaluation go here.
\begin{itemize}
\item Trivial Example- Two kinds of caching
\item Process Scheduler
\item Bookstore
\item More?
\end{itemize}
q
|
#!/usr/bin/env/python
"""
tentopy.py
Linear algebra tools for orthogonal tensor decompositions.
The power method algorithm is based on the robust tensor power method, described
in Algorithm 1 of "Tensor Decompositions for Learning Latent Variable Models"
by Anandkumar et al.
"""
import numpy as np
import math
def reconstruct(W, X3, L=25, N=20):
""" Reconstruct the eigenvalues and eigenvectors corresponding to the
probability distributions.
inputs:
W: the whitening matrix of M2
M3: the third-order moment matrix
outputs:
eigenvalues
eigenvectors
"""
evecs, evals = eig(X3, L, N)
evals = evals.flatten()
evals_rec = 1./np.array(evals)**2
evecs_rec = [np.linalg.solve(W.T, e * evecs[k, :]) for k, e in enumerate(evals)]
# now in reverse order
return evals_rec[::-1], np.array(evecs_rec[::-1])
def whiten(M2, M3):
""" Form the pseudo-whitening matrix of M2 and apply to M3 to form \tilde{M3}.
To pseudo-whitening matrix is formed by a thresholding eigenvalue
decomposition. If M2 = UDU^T, then form [D']_i = max(abs([D]_i), \epsilon).
inputs:
M2: the second-order moment matrix
M3: the third-order moment matrix
outputs:
W: the pseudo-whitening matrix
\tilde{M3}: M3(W, W, W)
"""
evals, evecs = np.linalg.eig(M2)
wp = np.diag([1 / math.sqrt(max(abs(w), 10e-12)) for w in evals])
W = np.dot(evecs, wp)
# now apply W in all directions to M3
# TODO: use np dot products
N1 = W.shape[1]
N2 = M3.shape[0]
X3 = tensor_outer(np.zeros(N1), 3)
# TODO: figure out the equivalent numpy routines
for i1 in xrange(N1):
for i2 in xrange(N1):
for i3 in xrange(N1):
for j1 in xrange(N2):
for j2 in xrange(N2):
for j3 in xrange(N2):
X3[i1, i2, i3] += M3[j1, j2, j3] * W[j1, i1] * W[j2, i2] * W[j3, i3]
return W, X3
def approx_eval(T, v):
""" Compute the approximate eigenvalue corresponding to an approximate
eigenvector.
inputs:
T: a super symmetric tensor
v: an approximate eigenvalue
output:
approximate eigenvalue T(v, v, v)
"""
return np.dot(np.tensordot(T, np.outer(v, v)), v)
def tensor_outer(v, n):
""" Compute a rank-1 order-n (n > 1) tensor computation by outer products.
input: v the basis vector
output: \otimes^3 v
"""
T = np.outer(v, v)
for i in xrange(n - 2):
T = np.outer(v, T)
return T.reshape([len(v)] * n)
def power_method(T, L, N, norm_type=2):
""" Main power method driver. Computes the (eigenvalue, eigenvector) pair
of a tensor corresponding to the largest eigenvalue.
inputs:
T: a super-symmetric tensor
L: number of inner iterations of power method to perform
N: number of iterations per inner iteration
outputs:
a tuple:
(approximate largest eigenvalue, corresponding eigenvector,
deflated tensor)
"""
k = T.shape[0]
n = len(T.shape)
thetas = []
def inner_iter(N, theta):
for t in xrange(N):
next_iter = np.tensordot(T, np.outer(theta, theta))
theta = next_iter / np.linalg.norm(next_iter, norm_type)
return theta.T
for tau in xrange(L):
# Choose a starting vector unfiormly at random from unit ball
v = np.random.randn(k)
theta_0 = v / np.linalg.norm(v, n)
theta_0 = theta_0.reshape((k, 1))
thetas.append(inner_iter(N, theta_0))
ind = np.argmax([approx_eval(T, theta) for theta in thetas])
theta_hat = inner_iter(N, thetas[ind])
lambda_hat = approx_eval(T, theta_hat)
rank1_approx = lambda_hat * tensor_outer(theta_hat, n)
return theta_hat, lambda_hat, T - rank1_approx
def eig(T, L=10, N=10, norm_type=2):
""" Compute the eigen-decomposition of a super-symmetric tensor.
inputs:
T: a super-symmetric tensor
L: number of inner iterations of power method to perform
N: number of iterations per inner iteration
outputs:
a tuple of eigenvectors and eigenvalues
"""
if sum([d == T.shape[0] for d in T.shape]) != len(T.shape):
raise Exception('Each tensor dimension must be the same')
k = T.shape[0]
evecs = []
evals = []
for i in xrange(k):
evec, eval, def_T = power_method(T if i == 0 else def_T, L, N, norm_type)
evecs.append(list(evec))
evals.append(eval)
return np.array(evecs), np.array(evals)
if __name__ == '__main__':
N = 20
T = tensor_outer(np.zeros(N), 3)
for j in xrange(N):
T[j][j][j] = j * N + 1
print eig(T, norm_type=1)
|
\documentclass[10pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{multirow}
\usepackage{amsmath,mathtools}
\usepackage{tcolorbox}
\newcommand{\mbf}[1]{\mathbf{#1}}
\newcommand{\tbf}[1]{\textbf{#1}}
\newcommand{\dsum}[3]{$\sum^{#1}_{#2}{#3}$}
\newcommand{\dint}[3]{\int^{#1}_{#2}{#3}}
\newcommand{\tit}[1]{\textit{#1}}
\newcommand{\fn}[1]{\footnote{#1}}
\newcommand{\de}[2]{\frac{d{#1}}{d{#2}}}
\newcommand{\ch}[2]{\Gamma^{#1}_{#2}}
\newcommand{\chris}{\ch{\mu}{\alpha \beta}=\frac{1}{2}g^{\mu \lambda}(\p_{\alpha} g_{\beta \lambda}+\p_\beta g_{\alpha \lambda} - \p_\lambda g_{\alpha \beta})}
\newcommand{\p}{\partial}
\newcommand{\pe}[2]{\frac{\partial{#1}}{\partial{#2}}}
\newcommand{\n}{\nonumber}
\newcommand{\cbox}{tcolorbox}
\newcommand{\cc}[1]{\left({#1}\right)}
\newcommand{\rr}[1]{\left[{#1}\right]}
\newcommand{\vd}[1]{\dot{\vec{#1}}}
\newcommand{\tx}[1]{\text{#1}}
\begin{document}
\title{List Of Phoenix Sources}
\author{Divesh Jain}
\maketitle
\newpage
\section{List Of Sources}
Below I present the list of sources by unifying data from DB1-$www.galaxyclusters.com$ and DB2(Bold font)-$https://arxiv.org/pdf/1808.04057.pdf
$
\begin{itemize}
\item The list is organized in ascending order of Redshift .\\
\item First column indicates designation of source.Second column represents Redshift as provided in DB1 and Third column represents Redshift from DB2.
\item The Sources with star marked designation are listed as candidates in DB1
\end{itemize}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\hline
\tbf{Designation} & Redshift1(z) & Redshift2 & Frequency(MHz) & Surface Brightness(mJy)\\
\hline
\multirow{4}{*}{AS753*} & 0.0130 & \tbf{0.014} & 2378 & 100\\
&&&330&8500\\
&&&1398&460\\
&&&843&1300\\
\hline
\multirow{7}{*}{A4038} & 0.0303 & \tbf{0.02819}&843&170$\pm$30\\
&&&80&19000$\pm$2700\\
&&&160&4300$\pm$500\\
&&&327&1440$\pm$150\\
&&&1400&61$\pm$3\\
&&&408&910$\pm$110\\
&&&30&32000$\pm$7000\\
\hline
\tbf{A2063}&&\tbf{0.0349}\\
\hline
\tbf{A548b-NW} &&\tbf{0.0424}\\
\hline
\tbf{A548b-N} &&\tbf{0.0424}\\
\hline
\multirow{11}{*}{A85}& 0.0557 & \tbf{0.0551}&843&200$\pm$30\\
&&&16&93000$\pm$24000\\
&&&80&34000$\pm$3700\\
&&&2700&10\\
&&&300&2739\\
&&&1400&43$\pm$3\\
&&&30&93000$\pm$13000\\
&&&408&1540$\pm$250\\
&&&1425&40.9$\pm$2.3\\
&&&160&8330$\pm$700\\
&&&327&3200$\pm$320\\
\hline
\multirow{11}{*}{A133*} & 0.0603&&4900&4$\pm$0.3\\
&&&2700&29$\pm$16\\
&&&1400&168$\pm$6\\
&&&843&530$\pm$60\\
&&&408&2620$\pm$250\\
&&&160&10900$\pm$1200\\
&&&80&35500$\pm$4300\\
&&&30&46000$\pm$13000\\
&&&330&3267.2$\pm$7.7\\
&&&1400&136.8$\pm$0.2\\
&&&327&2820$\pm$280\\
\hline
\multirow{2}{*}{A725*}& & 0.0900&1400&6$\pm$1\\
&&&327&76$\pm$9\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\hline
\tbf{Designation} & Redshift1(z) & Redshift2 & Frequency(MHz) & Surface Brightness(mJy)\\
\hline
\multirow{10}{*}{A13*} & 0.0943 & \tbf{0.0943}&160&2800$\pm$600\\
&&&1425&35.5$\pm$1.7\\
&&&80&6000$\pm$1200\\
&&&843&90$\pm$10\\
&&&160&2800$\pm$600\\
&&&1400&34$\pm$0\\
&&&408&490$\pm$80\\
&&&1400&31$\pm$0\\
&&&1400&30$\pm$3\\
&&&327&630$\pm$60\\
\hline
\multirow{2}{*}{A2048} &0.0980& \tbf{0.0972}&325&559$\pm$61\\
&&&1425&18.9$\pm$4.3\\
\hline
\multirow{3}{*}{A2443} & 0.1080 & \tbf{0.1080}&1425&6.5$\pm$0.5\\
&&&74&5310$\pm$175\\
&&&325&406$\pm$69\\
\hline
\multirow{6}{*}{A1033*} & 0.1220&&1341&53.9$\pm$7.3\\
&&&1465&45.8$\pm$1.3\\
&&&365&380$\pm$0\\
&&&1422&46.9$\pm$7.6\\
&&&1385&51.2$\pm$1.5\\
&&&608&220$\pm$0\\
\hline
\tbf{A1664} & &\tbf{0.1283}\\
\hline
\multirow{2}{*}{24P73} & 0.1500 & &1400&12$\pm$3\\
&&&325 &307$\pm$33\\
\hline
\end{tabular}
\end{center}
\tbf{Comments:}
\begin{itemize}
\item \tit{The galaxy cluster database even if has classified the above as phoenix sources, There is a special mention of 'candidates' for few of the above sources in the surface brightness column, of which there is no description.}\\
\item \tit{ The Redshift is measured from SDSS data. How do we have a discrepancy in redshift for some of the phoenix sources? How much important is redshift for us?}
\end{itemize}
\end{document}
|
[STATEMENT]
lemma continuous_Lambert_W [continuous_intros]:
assumes "continuous F f" "f (Lim F (\<lambda>x. x)) > -exp (-1) \<or> eventually (\<lambda>x. f x \<ge> -exp (-1)) F"
shows "continuous F (\<lambda>x. Lambert_W (f x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous F (\<lambda>x. Lambert_W (f x))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
continuous F f
- exp (- 1) < f (Lim F (\<lambda>x. x)) \<or> (\<forall>\<^sub>F x in F. - exp (- 1) \<le> f x)
goal (1 subgoal):
1. continuous F (\<lambda>x. Lambert_W (f x))
[PROOF STEP]
unfolding continuous_def
[PROOF STATE]
proof (prove)
using this:
(f \<longlongrightarrow> f (Lim F (\<lambda>x. x))) F
- exp (- 1) < f (Lim F (\<lambda>x. x)) \<or> (\<forall>\<^sub>F x in F. - exp (- 1) \<le> f x)
goal (1 subgoal):
1. ((\<lambda>x. Lambert_W (f x)) \<longlongrightarrow> Lambert_W (f (Lim F (\<lambda>x. x)))) F
[PROOF STEP]
by (intro tendsto_Lambert_W) auto
|
function x = haar ( n, x )
%*****************************************************************************80
%
%% HAAR performs a Haar transform.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 16 March 2011
%
% Author:
%
% Ken Beauchamp
%
% Reference:
%
% Ken Beauchamp,
% Walsh functions and their applications,
% Academic Press, 1975,
% ISBN: 0-12-084050-2,
% LC: QA404.5.B33.
%
% Parameters:
%
% Input, integer N, the number of items in X.
% N must be a power of 2.
%
% Input, real X(N), the data to be transformed.
%
% Output, real X(N), the transformed data.
%
k = i4_log_2 ( n );
for i = 1 : k
l = k + 1 - i;
l2 = 2^( l - 1 );
y(1:2*l2) = x(1:2*l2);
for j = 1 : l2
l3 = l2 + j;
jj = 2 * j - 1;
x(j) = y(jj) + y(jj+1);
x(l3) = y(jj) - y(jj+1);
end
end
return
end
|
## Copyright (c) 2018-2021, Carnegie Mellon University
## See LICENSE for details
Declare(PrunedMDPRDFT);
Declare(PrunedIMDPRDFT);
# Same as (floor(n_t/2)+1)*2.
# RClength := (n) -> (n + 1) mod 2 + n + 1;
RClength := (n) -> PRDFT1(n).dims()[1];
IJmatrix := (n) -> DirectSum(I(1), J(n-1));
tensorIJmatrix := (l) -> When(Length(l) > 0,
Tensor(IJmatrix(l[1]), tensorIJmatrix(Drop(l, 1))),
Diag([1, -1]));
# Distinct(list) means the elements of list are distinct.
Distinct := (l) -> (Length(l) = Length(Set(l)));
# pairup takes two lists of the same length,
# returns list of ordered pairs with elements of first and second.
pairup := (l1, l2) -> When(Length(l1)=0,
[],
Concat([ [l1[1], l2[1]] ],
pairup(Drop(l1, 1),
Drop(l2, 1))));
#F PrunedMDPRDFT(<dims>, <pat>, [<exp>=1])
#F Pruned multi-dimensional PRDFT (packed real DFT) non-terminal
#F dims = [ <n_1>, ..., <n_t> ] list of (positive) dimensions
#F pat = [ <l_1>, ..., <l_t> ] lists l_i having distinct elements in 0..n_i
#F exp = root of unity exponent scaling (see DFT for exact definition)
#F
#F Definition : multidimensional matrix of size M x N, where
#F M = n_1*...*n_{t-1}*(floor(n_t/2)+1)*2
#F N = Length(l_1)*..*Length(l_t)
#F This matrix has real components, for an operator with
#F N real inputs,
#F M real outputs for interleaved real and imaginary components.
#F
#F Example (direct) : MDPRDFT([4,5,6], [[0..3], [2..4], [5, 3, 1]])
#F Example (inverse) : MDPRDFT([4,5,6], [[0..3], [2..4], [5, 3, 1]], -1)
#F
#F In last dimension, t: real-to-complex DFT;
#F then dimensions t-1, ..., 1: complex-to-complex DFT.
#F
# To verify that components of m are real:
# Im(MatSPL(m)) = MatSPL(ApplyFunc(O, m.dims()));
Class(PrunedMDPRDFT, TaggedNonTerminal, rec(
a_lengths := self >> self.params[1],
a_pat := self >> self.params[2],
a_exp := self >> self.params[3],
abbrevs := [
(L, pat) -> Checked(IsList(L),
ForAll(L, IsPosInt),
ForAll(L, e->e > 1),
IsList(pat),
Length(pat) = Length(L),
ForAll(pat, IsList),
# Elements of pat[d] are distinct and in 0..L[d]-1.
ForAll(pat, Distinct),
Minimum(List(pat, Minimum)) >= 0,
ForAll(L - List(pat, Maximum), IsPosInt),
[ L, pat, 1 ]),
(L, pat, k) -> Checked(IsList(L),
ForAll(L, IsPosInt),
ForAll(L, e->e > 1),
IsList(pat),
Length(pat) = Length(L),
ForAll(pat, IsList),
ForAll(pat, Distinct),
# Elements of pat[d] are distinct and in 0..L[d]-1.
Minimum(List(pat, Minimum)) >= 0,
ForAll(L - List(pat, Maximum), IsPosInt),
IsInt(k),
Gcd(Product(L), k) = 1,
[ L, pat, k mod Product(L) ])
],
# dims() has 2 components, counting outputs and inputs:
# dims()[1] is product of all but last dimension, and RClength on last.
# dims()[2] is product of all lengths of pattern components.
dims := self >> let(a_lengths := self.a_lengths(),
a_pat := self.a_pat(),
[Product(DropLast(a_lengths, 1)) *
RClength(Last(a_lengths)),
Product(List(a_pat, Length))
]),
# Just pad pat[d] with zeroes to fill 0..lengths[d]-1,
# and then call MDPRDFT. But would that be too simple?
# It's the terminate function, not a Rule, so maybe OK?
terminate := self >> let(
a_lengths := self.a_lengths(),
a_pats := self.a_pat(),
a_exp := self.a_exp(),
mdprdft := MDPRDFT(a_lengths, a_exp),
lenpatpairs := pairup(a_lengths, a_pats),
lv := List(lenpatpairs, lp->HStack(List(lp[2], e->Scat(fBase(lp[1], e))))),
tlv := mdprdft * Tensor(lv),
tlv),
transpose := self >> PrunedIMDPRDFT(self.a_lengths(), self.a_pat(), -self.a_exp()),
isReal := True,
normalizedArithCost := (self) >> let(n := Product(self.a_lengths()),
IntDouble(2 * n * d_log(n) / d_log(2)) )
));
#F PrunedIMDPRDFT(<dims>, <pat>, [<exp>=1])
#F Pruned multi-dimensional inverse PRDFT (packed real DFT) non-terminal
#F dims = [ <n_1>, ..., <n_t> ] list of (positive) dimensions
#F pat = [ <l_1>, ..., <l_t> ] lists l_i having distinct elements in 0..n_i
#F exp = root of unity exponent scaling (see DFT for exact definition)
#F
#F Definition : multidimensional matrix of size N x M, where
#F N = Length(l_1)*..*Length(l_t)
#F M = n_1*...*n_{t-1}*(floor(n_t/2)+1)*2
#F This matrix has real components, for an operator with
#F M real inputs for interleaved real and imaginary components,
#F N real outputs.
#F
#F Example (direct) : IMDPRDFT([4,5,6], [[0..3], [2..4], [5, 3, 1]])
#F Example (inverse) : IMDPRDFT([4,5,6], [[0..3], [2..4], [5, 3, 1]], -1)
#F
#F In dimensions 1, ..., t-1: complex-to-complex DFT;
#F then in last dimension, t: complex-to-real DFT.
#F
Class(PrunedIMDPRDFT, TaggedNonTerminal, rec(
a_lengths := self >> self.params[1],
a_pat := self >> self.params[2],
a_exp := self >> self.params[3],
abbrevs := [
(L, pat) -> Checked(IsList(L),
ForAll(L, IsPosInt),
ForAll(L, e->e > 1),
IsList(pat),
Length(pat) = Length(L),
ForAll(pat, IsList),
# Elements of pat[d] are distinct and in 0..L[d]-1.
ForAll(pat, Distinct),
Minimum(List(pat, Minimum)) >= 0,
ForAll(L - List(pat, Maximum), IsPosInt),
[ L, pat, 1 ]),
(L, pat, k) -> Checked(IsList(L),
ForAll(L, IsPosInt),
ForAll(L, e->e > 1),
IsList(pat),
Length(pat) = Length(L),
ForAll(pat, IsList),
ForAll(pat, Distinct),
# Elements of pat[d] are distinct and in 0..L[d]-1.
Minimum(List(pat, Minimum)) >= 0,
ForAll(L - List(pat, Maximum), IsPosInt),
IsInt(k),
Gcd(Product(L), k) = 1,
[ L, pat, k mod Product(L) ])
],
# dims() has 2 components, counting outputs and inputs:
# dims()[1] is product of all lengths of pattern components.
# dims()[2] is product of all but last dimension, and RClength on last.
dims := self >> let(a_lengths := self.a_lengths(),
a_pat := self.a_pat(),
[Product(List(a_pat, Length)),
Product(DropLast(a_lengths, 1)) *
RClength(Last(a_lengths))
]),
# Just call IMDPRDFT and then prune in each dimension according to pat.
# It's the terminate function, not a Rule, so maybe OK?
terminate := self >> let(
a_lengths := self.a_lengths(),
a_pats := self.a_pat(),
a_exp := self.a_exp(),
imdprdft := IMDPRDFT(a_lengths, a_exp),
lenpatpairs := pairup(a_lengths, a_pats),
lv := List(lenpatpairs, lp->VStack(List(lp[2], e->Gath(fBase(lp[1], e))))),
tlv := Tensor(lv) * imdprdft,
tlv),
transpose := self >> PrunedMDPRDFT(self.a_lengths(), self.a_pat(), -self.a_exp()),
isReal := True,
normalizedArithCost := (self) >> let(n := Product(self.a_lengths()),
IntDouble(2 * n * d_log(n) / d_log(2)) )
));
|
(* In this file we formalize the definition of a certain monoidal structure on a display category and show that the total category has the structure of a monoidal category if the base category is a monoidal category and the displayed category has this certain monoidal structure.
The data of a displayed monoidal category consists of:
- A (base) category C.
- A displayed category D over C.
- A displayed tensor DT which consists of:
- D_x → D_y → D_{x ⊗_{T} y} : a → (b → a⊗_{{DT}} b).
- (a -->[f] a') → (b -->[g] b') → ( (a ⊗_{{DT}} b) -->[f ⊗^{T} g] (a' ⊗_{{DT}} b') : f'→g'→ (f' ⊗^{{DT}} g').
- A term i : D I, called the displayed unit.
- A natural transformation dlu : (i ⊗_{{TD}} (-)) -->[lu_x] (-) with naturality condition:
- (id_i ⊗^{{TD}} f') ;; dlu_b = dlu_a ;; f'
where the equality is dependent over the naturality condition of lu w.r.t. f, i.e. we have to transport.
- A natural transformation dru : ((-) ⊗_{{TD}} i) -->[ru_x] (-) with naturality condition:
- (f' ⊗^{{TD}} id_i) ;; dru_b = dru_a ;; f'
where the equality is dependent over the naturality condition of ru w.r.t. f, i.e. we have to transport.
- A natural transformation dα : ((-)⊗(-))⊗(-) -->[α_{x,y,z}] (-)⊗((-)⊗(-)) with naturality condition:
- dα_{a,b,c} ;; (f' ⊗^{{TD}} (g' ⊗^{{TD}} h')) = (f'⊗g')⊗h' ;; dα_{a',b',c'}
And the properties of a displayed monoidal category are given by:
- Displayed triangle identity:
- dα_{a,i,b} ;; (id_a ⊗ dlu_b) = dru_a ⊗ id_b.
- Displayed pentagon_identity:
-
*)
Require Import UniMath.Foundations.All.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.Core.Functors.
Require Import UniMath.CategoryTheory.Core.Isos.
Require Import UniMath.CategoryTheory.Monoidal.AlternativeDefinitions.MonoidalCategoriesCurried.
Require Import UniMath.CategoryTheory.DisplayedCats.Core.
Require Import UniMath.CategoryTheory.DisplayedCats.Functors.
Require Import UniMath.CategoryTheory.DisplayedCats.Constructions.
Require Import UniMath.CategoryTheory.DisplayedCats.Isos.
Local Open Scope cat.
Local Open Scope mor_disp_scope.
Section displayedmonoidalcategories.
Context (C : category) (D : disp_cat C) (T : tensor_data C) (I : C) (α : associator_data T) (lu : leftunitor_data T I) (ru : rightunitor_data T I) (tid : tensorfunctor_id T) (tcomp : tensorfunctor_comp T) (αnat : associator_naturality α) (αiso : associator_is_natiso α) (lunat : leftunitor_naturality lu) (luiso : leftunitor_is_natiso lu) (runat : rightunitor_naturality ru) (ruiso : rightunitor_is_natiso ru) (tri : triangle_identity lu ru α) (pen : pentagon_identity α).
Definition displayedtensor_data : UU :=
∑ (dt : ∏ (x y : C), (D x) → (D y) -> (D (x ⊗_{T} y))),
∏ (x x' y y' : C), ∏ (f : C⟦x,x'⟧) (g : C⟦y,y'⟧), ∏ (a : D x) (a' : D x') (b : D y) (b' : D y'),
(a -->[f] a') -> (b -->[g] b') -> ((dt x y a b)-->[f ⊗^{T} g] (dt x' y' a' b')).
Definition displayedtensoronobjects_from_displayedtensordata (dtd : displayedtensor_data)
: ∏ (x y : C), (D x) → (D y) -> (D (x ⊗_{T} y)) := pr1 dtd.
Notation "a ⊗_{{ dtd }} b" := (displayedtensoronobjects_from_displayedtensordata dtd _ _ a b) (at level 31).
Definition displayedtensoronmorphisms_from_displayedtensordata (dtd : displayedtensor_data) :
∏ (x x' y y' : C) (f : C ⟦ x, x' ⟧) (g : C ⟦ y, y' ⟧) (a : D x) (a' : D x') (b : D y) (b' : D y'),
(a -->[ f] a') -> (b -->[ g] b') -> ((a ⊗_{{dtd}} b) -->[ f ⊗^{ T} g ] (a' ⊗_{{ dtd}} b'))
:= pr2 dtd.
Notation "f' ⊗^{{ dtd }} g'" := (displayedtensoronmorphisms_from_displayedtensordata dtd _ _ _ _ _ _ _ _ _ _ f' g' ) (at level 31).
Definition displayedassociator_data (dtd : displayedtensor_data) : UU :=
∏ (x y z : C), ∏ (a : D x) (b : D y) (c : D z),
((a ⊗_{{dtd}} b) ⊗_{{dtd}} c) -->[(α x y z)] (a ⊗_{{dtd}} (b ⊗_{{dtd}} c)).
Definition displayedleftunitor_data (dtd : displayedtensor_data) (i : D I) : UU
:= ∏ (x : C), ∏ (a : D x), ((i ⊗_{{dtd}} a)-->[(lu x)] a).
Definition displayedrightunitor_data (dtd : displayedtensor_data) (i : D I) : UU
:= ∏ (x : C), ∏ (a : D x), ((a ⊗_{{dtd}} i)-->[(ru x)] a).
Definition displayedmonoidalcat_data : UU :=
∑ dtd : displayedtensor_data, ∑ i : D I,
(displayedleftunitor_data dtd i) × (displayedrightunitor_data dtd i) × (displayedassociator_data dtd).
Definition displayedtensordata_from_dispmoncatdata (DMD : displayedmonoidalcat_data) : displayedtensor_data := pr1 DMD.
Coercion displayedtensordata_from_dispmoncatdata : displayedmonoidalcat_data >-> displayedtensor_data.
Definition displayedunit_from_dispmoncatdata (DMD : displayedmonoidalcat_data) : D I := pr1 (pr2 DMD).
Coercion displayedunit_from_dispmoncatdata : displayedmonoidalcat_data >-> ob_disp.
Definition displayedleftunitordata_from_dispmoncatdata (DMD : displayedmonoidalcat_data) : displayedleftunitor_data DMD DMD := pr1 (pr2 (pr2 DMD)).
Coercion displayedleftunitordata_from_dispmoncatdata : displayedmonoidalcat_data >-> displayedleftunitor_data.
Definition displayedrightunitordata_from_dispmoncatdata (DMD : displayedmonoidalcat_data) : displayedrightunitor_data DMD DMD := pr1 (pr2 (pr2 (pr2 DMD))).
Coercion displayedrightunitordata_from_dispmoncatdata : displayedmonoidalcat_data >-> displayedrightunitor_data.
Definition displayedassociatordata_from_dispmoncatdata (DMD : displayedmonoidalcat_data) : displayedassociator_data DMD := pr2 (pr2 (pr2 (pr2 DMD))).
Coercion displayedassociatordata_from_dispmoncatdata : displayedmonoidalcat_data >-> displayedassociator_data.
(** PROPERTIES **)
Definition displayedtensor_id (dtd : displayedtensor_data)
:= ∏ (x y : C), ∏ (a : D x) (b : D y), ((id_disp a) ⊗^{{dtd}} (id_disp b)) = transportb _ (tid x y) (id_disp (a ⊗_{{dtd}} b)).
Definition displayedtensor_comp (dtd : displayedtensor_data)
:= ∏ (x y x' y' x'' y'': C), ∏ (a : D x) (b : D y) (a' : D x') (b' : D y') (a'' : D x'') (b'' : D y''),
∏ (f1 : C⟦x, x'⟧) (g1 : C⟦y,y'⟧) (f2 : C⟦x',x''⟧) (g2 : C⟦y',y''⟧) (f1' : a -->[f1] a') (g1' : b -->[g1] b') (f2' : a' -->[f2] a'') (g2' : b' -->[g2] b''), ((f1'⊗^{{dtd}} g1') ;; (f2'⊗^{{dtd}} g2')) = transportb _ (tcomp x y x' y' x'' y'' f1 f2 g1 g2) ((f1';;f2') ⊗^{{dtd}} (g1';;g2')).
Definition displayedassociator_naturality {dtd : displayedtensor_data} (dα : displayedassociator_data dtd) : UU :=
∏ (x x' y y' z z' : C), ∏ (a : D x) (a' : D x') (b : D y) (b' : D y') (c : D z) (c' : D z'),
∏ (f : C⟦x,x'⟧) (g : C⟦y,y'⟧) (h : C⟦z,z'⟧), ∏ (f' : a-->[f] a') (g' : b -->[g] b') (h' : c -->[h] c'),
((dα x y z a b c) ;; (f' ⊗^{{dtd}} (g' ⊗^{{dtd}} h'))) = transportb _ (αnat _ _ _ _ _ _ f g h) (((f' ⊗^{{dtd}} g') ⊗^{{dtd}} h') ;; dα _ _ _ a' b' c').
Definition displayedassociator_is_nat_iso {dtd : displayedtensor_data} (dα : displayedassociator_data dtd) : UU :=
∏ (x y z : C), ∏ (a : D x) (b : D y) (c : D z), is_z_iso_disp ((α x y z),,(αiso x y z)) (dα x y z a b c).
Definition displayedleftunitor_naturality {i : D I} {dtd : displayedtensor_data} (dlud : displayedleftunitor_data dtd i) : UU :=
∏ (x y : C), ∏ (a : D x) (b : D y) (f : C⟦x,y⟧) (f' : a -->[f] b),
(dlud x a) ;; f' = transportb _ (lunat x y f) (((id_disp i) ⊗^{{dtd}} f') ;; (dlud y b)).
Definition displayedleftunitor_is_nat_iso {i : D I} {dtd : displayedtensor_data} (dlu : displayedleftunitor_data dtd i) : UU :=
∏ (x : C), ∏ (a : D x), is_z_iso_disp (lu x,, luiso x) (dlu x a).
Definition displayedrightunitor_naturality {dtd : displayedtensor_data} {i : D I} (drud : displayedrightunitor_data dtd i) : UU :=
∏ (x y : C), ∏ (a : D x) (b : D y) (f : C⟦x,y⟧) (f' : a -->[f] b),
((drud x a) ;; f') = transportb _ (runat x y f) (( f' ⊗^{{dtd}} (id_disp i)) ;; (drud y b)).
Definition displayedrightunitor_is_nat_iso {i : D I} {dtd : displayedtensor_data} (dru : displayedrightunitor_data dtd i) : UU :=
∏ (x : C), ∏ (a : D x), is_z_iso_disp (ru x,, ruiso x) (dru x a).
Definition displayedtriangle_identity {dtd : displayedtensor_data} {i : D I} (dlud : displayedleftunitor_data dtd i) (drud : displayedrightunitor_data dtd i) (dα : displayedassociator_data dtd) := ∏ (x y : C), ∏ (a : D x) (b : D y),
((dα x I y a i b) ;; ((id_disp a) ⊗^{{dtd}} dlud y b )) = transportb _ (tri x y) ((drud x a) ⊗^{{dtd}} id_disp b).
Definition displayedpentagon_identity {dtd : displayedtensor_data} (dα : displayedassociator_data dtd) : UU
:= ∏ (w x y z: C), ∏ (e : D w) (a : D x) (b : D y) (c : D z),
(((dα _ _ _ e a b) ⊗^{{dtd}} (id_disp c)) ;; (dα _ _ _ e (a ⊗_{{dtd}} b) c) ;; ((id_disp e) ⊗^{{dtd}} (dα _ _ _ a b c))) = transportb _ (pen w x y z) ((dα (w ⊗_{T} x) y z (e ⊗_{{dtd}} a) b c) ;; (dα w x (y ⊗_{T} z) e a (b ⊗_{{dtd}} c))).
Definition displayedmonoidal_laws (DMD : displayedmonoidalcat_data) : UU :=
(displayedtensor_id DMD) × (displayedtensor_comp DMD) ×
(displayedassociator_naturality DMD) × (displayedassociator_is_nat_iso DMD) ×
(displayedleftunitor_naturality DMD) × (displayedleftunitor_is_nat_iso DMD) ×
(displayedrightunitor_naturality DMD) × (displayedrightunitor_is_nat_iso DMD) ×
(displayedtriangle_identity DMD DMD DMD) × (displayedpentagon_identity DMD).
Definition displayedtensorid_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedtensor_id DMD := pr1 DML.
Coercion displayedtensorid_from_monoidallaws : displayedmonoidal_laws >-> displayedtensor_id.
Definition displayedtensorcomp_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedtensor_comp DMD := pr1 (pr2 DML).
Coercion displayedtensorcomp_from_monoidallaws : displayedmonoidal_laws >-> displayedtensor_comp.
Definition displayedassociatornaturality_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedassociator_naturality DMD := pr1 (pr2 (pr2 DML)).
Coercion displayedassociatornaturality_from_monoidallaws : displayedmonoidal_laws >-> displayedassociator_naturality.
Definition displayedassociatorisiso_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedassociator_is_nat_iso DMD := pr1 (pr2 (pr2 (pr2 DML))).
Coercion displayedassociatorisiso_from_monoidallaws : displayedmonoidal_laws >-> displayedassociator_is_nat_iso.
Definition displayedleftunitornaturality_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedleftunitor_naturality DMD := pr1 (pr2 (pr2 (pr2 (pr2 DML)))).
Coercion displayedleftunitornaturality_from_monoidallaws : displayedmonoidal_laws >-> displayedleftunitor_naturality.
Definition displayedleftunitorisiso_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedleftunitor_is_nat_iso DMD := pr1 (pr2 (pr2 (pr2 (pr2 (pr2 DML))))).
Coercion displayedleftunitorisiso_from_monoidallaws : displayedmonoidal_laws >-> displayedleftunitor_is_nat_iso.
Definition displayedrightunitornaturality_from_monoidallaws{DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedrightunitor_naturality DMD := pr1 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 DML)))))).
Coercion displayedrightunitornaturality_from_monoidallaws : displayedmonoidal_laws >-> displayedrightunitor_naturality.
Definition displayedrightunitorisiso_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedrightunitor_is_nat_iso DMD := pr1 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 DML))))))).
Coercion displayedrightunitorisiso_from_monoidallaws : displayedmonoidal_laws >-> displayedrightunitor_is_nat_iso.
Definition displayedtriangleidentity_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedtriangle_identity DMD DMD DMD := pr1 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 DML)))))))).
Coercion displayedtriangleidentity_from_monoidallaws : displayedmonoidal_laws >-> displayedtriangle_identity.
Definition displayedpentagonidentity_from_monoidallaws {DMD : displayedmonoidalcat_data} (DML : displayedmonoidal_laws DMD) : displayedpentagon_identity DMD := pr2 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 (pr2 DML)))))))).
Coercion displayedpentagonidentity_from_monoidallaws : displayedmonoidal_laws >-> displayedpentagon_identity.
End displayedmonoidalcategories.
|
[STATEMENT]
lemma when_apply_cong[fundef_cong]:
"\<lbrakk> C = C'; s = s'; C' \<Longrightarrow> m s' = m' s' \<rbrakk> \<Longrightarrow> whenE C m s = whenE C' m' s'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>C = C'; s = s'; C' \<Longrightarrow> m s' = m' s'\<rbrakk> \<Longrightarrow> whenE C m s = whenE C' m' s'
[PROOF STEP]
by (simp add: whenE_def)
|
%default total
data InfIO : Type where
Do : IO a -> (a -> Inf InfIO) -> InfIO
(>>=) : IO a -> (a -> Inf InfIO) -> InfIO
(>>=) = Do
greet : InfIO
greet = do putStr "Enter your name: "
name <- getLine
putStrLn ("Hello" ++ name)
greet
|
lemma uniformly_continuous_on_diff[continuous_intros]: fixes f :: "'a::metric_space \<Rightarrow> 'b::real_normed_vector" assumes "uniformly_continuous_on s f" and "uniformly_continuous_on s g" shows "uniformly_continuous_on s (\<lambda>x. f x - g x)"
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj15synthconj6 : forall (lv0 : natural) (lv1 : natural), (@eq natural (mult (Succ lv0) (Succ lv1)) (plus (mult lv1 (Succ lv0)) (Succ lv0))).
Admitted.
QuickChick conj15synthconj6.
|
module CanEscape
import Language.Reflection
%language ElabReflection
-- Show that actually an erased value can escape through an elaboration script
0 n : Nat
n = 3
elabScript : Elab Nat
elabScript = check !(quote n)
M : Nat
M = %runElab elabScript
mIs3 : M = 3
mIs3 = Refl
|
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Data.Nat.Div
namespace Nat
private def gcdF (x : Nat) : (∀ x₁, x₁ < x → Nat → Nat) → Nat → Nat :=
match x with
| 0 => fun _ y => y
| succ x => fun f y => f (y % succ x) (mod_lt _ (zero_lt_succ _)) (succ x)
@[extern "lean_nat_gcd"]
def gcd (a b : @& Nat) : Nat :=
WellFounded.fix (measure id).wf gcdF a b
@[simp] theorem gcd_zero_left (y : Nat) : gcd 0 y = y :=
rfl
theorem gcd_succ (x y : Nat) : gcd (succ x) y = gcd (y % succ x) (succ x) :=
rfl
@[simp] theorem gcd_one_left (n : Nat) : gcd 1 n = 1 := by
rw [gcd_succ, mod_one]
rfl
@[simp] theorem gcd_zero_right (n : Nat) : gcd n 0 = n := by
cases n <;> simp [gcd_succ]
@[simp] theorem gcd_self (n : Nat) : gcd n n = n := by
cases n <;> simp [gcd_succ]
end Nat
|
Require Import CertiGraph.dijkstra.dijkstra_env.
Require Export CertiGraph.graph.MathAdjMatGraph.
Local Open Scope logic.
Local Open Scope Z_scope.
Section MathDijkGraph.
Context {size : Z}.
Context {inf : Z}.
Context {V_EqDec : EqDec V eq}.
Context {E_EqDec : EqDec E eq}.
(* Here is the LabeledGraph *)
Definition DijkLG := AdjMatLG.
(* The soundness condition *)
Class SoundDijk (g: DijkLG) :=
{
basic:
(* first, we can take AdjMat's soundness wholesale *)
@SoundAdjMat size inf g;
veb:
(* from the AdjMat soundness above we already know
e is representable,
but for Dijkstra we need a further constraint.
*)
forall e,
evalid g e ->
0 <= elabel g e <= (Int.max_signed / size) - 1;
cts: (* cost_to_self *)
forall v, vvalid g v -> elabel g (v, v) = 0;
sfr: (* size is further restricted *)
size * 4 <= Int.max_signed;
(* because sizeof tint = 4 *)
ifr: (* inf is further restricted *)
(Int.max_signed / size) - 1 < inf <= Int.max_signed - (Int.max_signed / size) + 1
}.
(* And here is the GeneralGraph that we will use *)
Definition DijkGG := (GeneralGraph V E DV DE DG (fun g => SoundDijk g)).
(* Some handy coercions: *)
Identity Coercion AdjMatLG_DijkLG: DijkLG >-> AdjMatLG.
Identity Coercion LabeledGraph_AdjMatLG: AdjMatLG >-> LabeledGraph.
(* We can drag out the soundness condition *)
Definition SoundDijk_DijkGG (g: DijkGG) := (@sound_gg _ _ _ _ _ _ _ _ g).
(* We can always drag out SoundAdjMat *)
Definition SoundAdjMat_DijkGG (g: DijkGG) :=
@basic g (SoundDijk_DijkGG g).
(* A DijkGG can be weakened into an AdjMatGG *)
Definition AdjMatGG_DijkGG (g: DijkGG) : AdjMatGG :=
Build_GeneralGraph DV DE DG SoundAdjMat g (SoundAdjMat_DijkGG g).
Coercion AdjMatGG_DijkGG: DijkGG >-> AdjMatGG.
(* Great! So now when we want to access an AdjMat
plugin, we can simply use the AdjMat getter
and pass it a DijkGG. The coercion will be seamless.
*)
(* For the four Dijkstra-specific plugins, we create getters: *)
Definition valid_edge_bounds (g: DijkGG) :=
@veb g (SoundDijk_DijkGG g).
Definition cost_to_self (g: DijkGG) :=
@cts g (SoundDijk_DijkGG g).
Definition size_further_restricted (g: DijkGG) :=
@sfr g (SoundDijk_DijkGG g).
Definition inf_further_restricted (g: DijkGG) :=
@ifr g (SoundDijk_DijkGG g).
Lemma inf_bounds:
forall (g: DijkGG),
0 < inf < Int.max_signed.
Proof.
intros.
apply (inf_representable g).
Qed.
(* And now some lemmas that come from soundness plugins. *)
Lemma edge_cost_pos:
forall (g: DijkGG) e,
0 <= elabel g e.
Proof.
intros.
pose proof (valid_edge_bounds g e).
pose proof (invalid_edge_weight g e).
destruct (@evalid_dec _ _ _ _ g (finGraph g) e).
- apply H; trivial.
- rewrite H0 in n.
replace (elabel g e) with inf by trivial.
pose proof (@inf_representable _ _ g). lia.
Qed.
Lemma div_pos_le:
forall a b,
0 <= a ->
0 < b ->
a / b <= a.
Proof.
intros.
rewrite <- (Z2Nat.id a); trivial.
rewrite <- (Z2Nat.id b); [|lia].
remember (Z.to_nat a) as n1.
remember (Z.to_nat b) as n2.
rewrite <- div_Zdiv by lia.
apply inj_le.
replace n1 with (Nat.div n1 1) at 2.
2: apply Nat.div_1_r.
apply Nat.div_le_compat_l.
lia.
Qed.
Lemma edge_representable:
forall (g: DijkGG) e,
Int.min_signed <= elabel g e <= Int.max_signed.
Proof.
intros.
pose proof (valid_edge_bounds g e).
pose proof (invalid_edge_weight g e).
pose proof (edge_cost_pos g e).
destruct (@evalid_dec _ _ _ _ g (finGraph g) e).
- specialize (H e0).
split; trivial. rep_lia.
apply Z.le_trans with (m := (Int.max_signed / size) - 1); trivial.
apply H.
pose proof (size_representable g).
apply Z.le_trans with (m := Int.max_signed / size).
lia.
apply div_pos_le; lia.
- rewrite H0 in n.
replace (elabel g e) with inf by trivial.
pose proof (inf_representable g).
split; rep_lia.
Qed.
Lemma strong_evalid_dijk:
forall (g: DijkGG) a b,
vvalid g a ->
vvalid g b ->
elabel g (a, b) < inf ->
strong_evalid g (a,b).
Proof.
intros.
split3;
[rewrite (evalid_meaning g) |
rewrite (edge_src_fst g) |
rewrite (edge_dst_snd g)]; trivial.
split; trivial.
apply edge_representable.
Qed.
End MathDijkGraph.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.