text
stringlengths 0
3.34M
|
---|
\chapter{Introduction to php}
PHP is a server-side scripting language designed for web development but also used as a general-purpose programming language. As of January 2013, PHP was installed on more than 240 million websites (39 percent of those sampled) and 2.1 million web servers. Originally created by Rasmus Lerdorf in 1994, the reference implementation of PHP (powered by the Zend Engine) is now produced by The PHP Group. While PHP originally stood for Personal Home Page, it now stands for PHP: Hypertext Preprocessor, which is a recursive backronym.
PHP code can be simply mixed with HTML code, or it can be used in combination with various templating engines and web frameworks. PHP code is usually processed by a PHP interpreter, which is usually implemented as a web server's native module or a Common Gateway Interface (CGI) executable. After the PHP code is interpreted and executed, the web server sends the resulting output to its client, usually in the form of a part of the generated web page; for example, PHP code can generate a web page's HTML code, an image, or some other data. PHP has also evolved to include a command-line interface (CLI) capability and can be used in standalone graphical applications.
The standard PHP interpreter, powered by the Zend Engine, is free software released under the PHP License. PHP has been widely ported and can be deployed on most web servers on almost every operating system and platform, free of charge.
Despite its popularity, no written specification or standard existed for the PHP language until 2014, leaving the canonical PHP interpreter as a de facto standard. Since 2014, there is ongoing work on creating a formal PHP specification.
\section{PHP introduction}
Since we assume you are familiar with other programming languages, we will only go over the basics of PHP. For this we refer to the powerpoint presentation \url{Intro_php.pptx}.
|
import algebra.big_operators
import data.finset.slice
import data.rat
import tactic
open finset nat
open_locale big_operators
namespace finset
namespace bbsetpair2
--Bollobas set pair theorem
--A and B are the families of finite sets indexed by I
--sp is the condition the family satisfies
--spi is a shortcut for disjoint-ness of A i and B i
--U is the universe of elements in any of the sets
-- it has size u
@[ext] structure setpair :=
(A : ℕ → finset ℕ) (B : ℕ → finset ℕ) (I : finset ℕ)
(sp : ∀i∈ I,∀j∈ I, ((A i)∩(B j)).nonempty ↔ i ≠ j)
(spi : ∀i∈ I, disjoint (A i) (B i))
(U : finset ℕ) (Ug : U = I.bUnion(λ i, (A i) ∪ (B i )))
(u : ℕ) (ug: u= card U)
--- we take the trivial system with I=∅
instance : inhabited setpair :={
default:= {A:=λ _,∅, B:=λ _,∅,I:=∅,
sp:=begin tauto, end,
spi:=begin tauto, end,
U:=∅, Ug:=begin tauto, end,
u:=0, ug:=begin tauto,end,}}
-- density of a setpair system
@[simp]def den (S : setpair) :ℚ := ∑ i in S.I, ((1 : ℚ)/(card(S.A i)+card(S.B i)).choose (card (S.B i)))
-- helper - if A i and B j are disjoint then i=j
lemma sp' {S : setpair} {i j : ℕ}: i∈ S.I → j∈S.I → (disjoint (S.A i) (S.B j) → i = j):=
begin
intros hi hj hd,
by_contra,
have ne: ((S.A i)∩(S.B j)).nonempty,
apply (S.sp i hi j hj).mpr h,
rw disjoint_iff_inter_eq_empty at hd,
simp only [ * , not_nonempty_empty] at *,
end
-- the erase constructor - gives a new setpair without element x
-- we discard all i such that B i contains x and then erase x from any A i containing x
@[simp]def er (S : setpair) (x : ℕ) : setpair :={
A:=λ n, erase (S.A n) x,
B:=S.B,
I:=S.I.filter(λi, x∉(S.B i)),
sp:=begin
intros i hi j hj,
rw [mem_filter] at *,
suffices same: ((S.A i).erase x ∩ S.B j) = (S.A i) ∩ (S.B j),{
rw same, exact S.sp i hi.1 j hj.1,}, cases hj, cases hi, dsimp at *,
ext1, simp only [mem_inter, mem_erase, ne.def, and.congr_left_iff, and_iff_right_iff_imp] at *,
intros m n p, cases p,solve_by_elim,
end,
U:= (S.I.filter(λi, x∉(S.B i))).bUnion(λ i, (((S.A i).erase x) ∪ (S.B i ))),
Ug:=rfl,
u:=card((S.I.filter(λi, x∉(S.B i))).bUnion(λ i, (((S.A i).erase x) ∪ (S.B i )))),
ug:=rfl,
spi:=begin
intros i hi,
rw [mem_filter] at *,
apply disjoint_of_subset_left _ (S.spi i hi.1), apply erase_subset _,end,
}
--- the sets are in the universe...
lemma univ_ss {S : setpair} {i : ℕ} : i ∈ S.I → (S.A i ∪ S.B i)⊆ S.U:=
begin
intro hi, intros x, rw [setpair.Ug,mem_bUnion],tauto,
end
-- applying er y gives a smaller universe if y was in the universe.
lemma er_univ_lt (S : setpair) {y : ℕ} : y ∈ S.U → (er S y).u < S.u :=
begin
intro hy,
have ss: (er S y).U ⊆ S.U,{
simp [er,S.Ug], intros i hi hy, intros x hx, rw [mem_bUnion], use i ,
split, exact hi, rw [mem_union] at *, cases hx with ha hb,
left, apply mem_of_mem_erase ha,right, exact hb,},
have ey: y∉ (er S y).U, {simp [er,S.Ug]},
simp [setpair.ug],
apply card_lt_card,
apply (ssubset_iff_of_subset ss).mpr _,
use [y, hy, ey],
end
-- pairs are disjoint so sizes add.
lemma card_pair {S : setpair} {i : ℕ} : i ∈ S.I → card((S.A i ∪ S.B i)) = card(S.A i) + card(S.B i):=λ hi, card_union_eq (S.spi i hi)
-- U is partitioned into each pair and their complement
lemma card_U {S :setpair} {i : ℕ} : i ∈ S.I → card(S.U\(S.A i ∪ S.B i)) + card(S.A i) + card(S.B i) = card(S.U):=
begin
intros hi, rw [add_assoc, ← card_pair hi],
apply card_sdiff_add_card_eq_card (univ_ss hi),
end
lemma card_Udiv (S :setpair) :∀i∈ S.I, ((card(S.A i) + card(S.B i)).choose (card(S.B i)):ℚ)⁻¹*(card S.U)=
((card(S.A i) + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*((card(S.U\(S.A i ∪ S.B i)) + card(S.A i) + card(S.B i))) :=
begin
intros i hi, rw ← card_U hi, norm_cast,
end
--- want to work mainly with non-trivial setpairs so assume ever A i is non-empty
def triv_sp (S : setpair) : Prop := (∃i∈S.I, card(S.A i) =0)
--- trivial sp has at most one pair.
lemma triv_imp_I1 {S :setpair} (h: triv_sp S) : card S.I ≤ 1:=
begin
by_contra h',
obtain ⟨a,ha,b,hb,ne⟩:=one_lt_card.mp (by linarith: 1< S.I.card),
obtain ⟨x,hx⟩:=(S.sp a ha b hb).mpr ne,
obtain ⟨i,h⟩:=h,
rw card_eq_zero at h,
cases h with hi hie,
rw mem_inter at hx,
have ia: i=a, {
apply sp' hi ha _, rw hie, simp only [disjoint_empty_left],
},
have ib: i=b, {
apply sp' hi hb _, rw hie, simp only [disjoint_empty_left],
},
rw [←ia,←ib] at ne, tauto,
end
-- non-trivial means each A i is non-empty so has at least one element
lemma ntriv_sp {S : setpair} (ht: ¬triv_sp S): ∀i∈S.I, 1 ≤ card(S.A i):=
begin
rw triv_sp at ht,push_neg at ht, intros i hi,
exact one_le_iff_ne_zero.mpr (ht i hi),
end
-- making the casting of the densities slightly less painful
lemma binhelp {a b c d: ℕ } (h: (a+b)*c=d*a) (hc: 0 < c) (hd: 0 < d): (↑d:ℚ)⁻¹*(a+b)=(↑c:ℚ)⁻¹*a:=
begin
have qh:((a:ℚ)+(b:ℚ))*(c:ℚ)=(d:ℚ)*(a:ℚ), norm_cast, exact h,
have dnz: (d:ℚ)≠ 0,
{ simp only [ne.def, cast_eq_zero], linarith, },
have cnz: (c:ℚ)≠ 0,
{ simp only [ne.def, cast_eq_zero], linarith, },
rw ← div_eq_inv_mul,rw ← div_eq_inv_mul,
rw div_eq_iff dnz, rw mul_comm, rw mul_div,
rw mul_comm,
symmetry,
rw div_eq_iff cnz, symmetry, rw mul_comm (↑a), exact qh,
end
-- the key fact we need for binomials -- really should have done this in the nats.
lemma binom_frac (a b : ℕ) (h:0 < a) :((a+b).choose(b):ℚ)⁻¹*(a+b)=((a-1+b).choose(b):ℚ)⁻¹*a:=
begin
have a1: a= (a -1).succ,{
rw succ_eq_add_one, linarith,},
have ab: a+b= (a-1+b).succ,{
rw [succ_eq_add_one, add_assoc,add_comm b 1,← add_assoc,←succ_eq_add_one,← a1],},
have ch: (a-1+b).succ* (a-1+b).choose (a-1)= (a-1+b).succ.choose((a-1).succ)*((a-1).succ),
apply succ_mul_choose_eq (a-1+b) (a-1),
rw [←ab,← a1] at ch,
have abn: (a-1+b)-(a-1)=b:=add_tsub_cancel_left (a-1) b,
have abs : a+b-a=b:=add_tsub_cancel_left a b,
have aln: a-1 ≤ a-1+b:=(by linarith),
have aln2: a≤ a+b:=(by linarith),
have f1: (a-1+b).choose((a-1+b)-(a-1))=(a-1+b).choose(a-1):=choose_symm aln,
rw abn at f1,
have f2: (a+b).choose(a+b-a)=(a+b).choose(a):=choose_symm aln2,
rw abs at f2,
rw [←f1, ← f2] at ch,
-- clear_except ch,
rw [mul_comm],
norm_cast,
rw [mul_comm], simp [ch],
have ap :(a-1+b).choose b > 0,{
apply choose_pos (by linarith: b≤ a-1+b),
},
have bp:(a+b).choose b >0,{
apply choose_pos (by linarith:b≤a+b),},
apply binhelp ch ap bp,
end
lemma den_rhs_1 {S : setpair} : ∑ i in S.I, ((card(S.A i) + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*card(S.U\(S.A i ∪ S.B i)) +
∑ i in S.I, ((card(S.A i) + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.A i) + card(S.B i))
= den S * S.u :=
begin
simp only [den, one_div], rw [setpair.ug],rw sum_mul, rw ← sum_add_distrib,
apply sum_congr _ _, refl,
intros i hi, rw ← mul_add, norm_cast, rw ← add_assoc,
convert card_Udiv S i hi, exact card_U hi, norm_cast, exact (card_U hi).symm,
end
lemma den_rhs_2 {S : setpair} (ht: ¬triv_sp S) : ∑ i in S.I, ((card(S.A i) + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.A i) + card(S.B i)) =
∑ i in S.I, ((card(S.A i)-1 + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.A i))
:=
begin
apply sum_congr _ _, refl,
intros i hi, rw binom_frac _ _ ((ntriv_sp ht) i hi),
end
lemma den_help_1 {S : setpair} : ∀i, ∀y∈(S.U\((S.A i)∪(S.B i))), (S.A i).card +(S.B i).card = ((S.A i).erase y).card + (S.B i).card:=
begin
intros i y hy, simp only [add_left_inj], rw card_erase_eq_ite,split_ifs,
simp only [*, mem_sdiff, mem_union, true_or, not_true, and_false] at *,
end
lemma den_help_2 {S : setpair} : ∀i, ∀y∈(S.A i), (S.A i).card - 1 +(S.B i).card = ((S.A i).erase y).card + (S.B i).card:=
begin
intros i y hy, simp only [add_left_inj], rw card_erase_of_mem hy,
end
lemma den_rhs_3 {S : setpair} : ∀ i∈S.I , ((card(S.A i) + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.U\((S.A i)∪(S.B i)))) =
∑y in (S.U\((S.A i)∪(S.B i))), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹:=
begin
intros i hi, rw card_eq_sum_ones (S.U\((S.A i)∪(S.B i))),
push_cast, rw zero_add, rw mul_sum, rw mul_one,
rw sum_congr _ _, refl,
intros y hy,rw (den_help_1 i y hy),
end
lemma den_rhs_4 {S : setpair} : ∀ i∈S.I , ((card(S.A i) - 1 + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.A i)) =
∑y in (S.A i), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹:=
begin
intros i hi, nth_rewrite 1 card_eq_sum_ones (S.A i),
push_cast, rw zero_add, rw mul_sum, rw mul_one,
rw sum_congr _ _, refl,
intros y hy,rw (den_help_2 i y hy),
end
lemma den_rhs_5 {S : setpair} : ∑ i in S.I , ((card(S.A i) + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.U\((S.A i)∪(S.B i)))) =
∑ i in S.I, (∑y in (S.U\((S.A i)∪(S.B i))), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹):=
begin
apply sum_congr (rfl) (den_rhs_3),
end
lemma den_rhs_6 {S : setpair} : ∑ i in S.I, ((card(S.A i) - 1 + card(S.B i)).choose (card (S.B i)):ℚ)⁻¹*(card(S.A i)) =
∑ i in S.I, (∑y in (S.A i), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹):=
begin
apply sum_congr (rfl) (den_rhs_4),
end
lemma disj_den {S : setpair} : ∀i, disjoint (S.U\((S.A i)∪(S.B i))) (S.A i):=
begin
intros i,
apply disjoint_of_subset_right (subset_union_left (S.A i) (S.B i)),
exact sdiff_disjoint,
end
lemma sp_sdiff (S : setpair) : ∀i∈S.I, (S.U\((S.A i)∪(S.B i)))∪ (S.A i) = (S.U\(S.B i)) :=
begin
intros i hi,
have he: (S.A i)∪ (S.B i)⊆ S.U:=univ_ss hi,
have heA: (S.A i)⊆ S.U:=subset_trans (subset_union_left (S.A i) (S.B i)) he,
have ab: disjoint (S.A i) (S.B i):=S.spi i hi,
rw sdiff_union_distrib, ext x,split, simp only [mem_union, mem_inter, mem_sdiff] at *,
intro h,
rcases h, exact h.2, split, exact heA h, intro hb,
exact ab (mem_inter.mpr ⟨h,hb⟩),
intros h, rw [mem_union,mem_inter,mem_sdiff],
by_cases hA: x∈ (S.A i) ,
right, exact hA, left,split, simp only [*, mem_sdiff, not_false_iff, and_self] at *, exact h,
end
lemma den_rhs_7 {S : setpair} : ∀i∈ S.I, (∑y in (S.U\((S.A i)∪(S.B i))), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹)
+(∑y in (S.A i), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹)=(∑y in (S.U\(S.B i)), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹)
:=
begin
intros i hi, rw [← sp_sdiff S i hi, sum_union (disj_den i )],
end
lemma den_rhs_8 {S : setpair} : ∑ i in S.I, ((∑y in (S.U\((S.A i)∪(S.B i))), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹)
+(∑y in (S.A i), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹))=∑ i in S.I,(∑y in (S.U\(S.B i)), ((((S.A i).erase y).card + card(S.B i)).choose ((S.B i).card):ℚ)⁻¹)
:=
begin
apply sum_congr (rfl) (den_rhs_7),
end
lemma doublecount {A B : finset ℕ} {f: ℕ → ℕ → ℚ} {p: ℕ → ℕ → Prop} [decidable_rel p] :
∑ a in A, ∑ b in filter (λ i , p a i) B, f a b = ∑ b in B, ∑ a in filter (λ i, p i b) A, f a b:=
begin
have inL: ∀a∈A,∑ b in filter (λ i, p a i) B, (f a b)= ∑ b in B, ite (p a b) (f a b) 0,{
intros a ha, rw sum_filter,},
have inL2: ∑a in A, ∑ b in filter (λ i, p a i) B, (f a b)= ∑ a in A, ∑ b in B, ite (p a b) (f a b) 0, {
apply sum_congr, refl, exact inL,},
have inR: ∀b∈B,∑ a in filter (λ i, p i b) A, (f a b)= ∑ a in A, ite (p a b) (f a b) 0,{
intros b hb, rw sum_filter,},
have inR2: ∑b in B, ∑ a in filter (λ i, p i b) A, (f a b)= ∑ b in B, ∑ a in A, ite (p a b) (f a b) 0, {
apply sum_congr, refl, exact inR,},
rw [inL2,inR2,sum_comm],
end
lemma doublecount' {A B : finset ℕ} {f g: ℕ → ℕ → ℚ} {a b : ℕ} {p: ℕ → ℕ → Prop} [decidable_rel p] : (∀a∈ A,∀b∈B , p a b → f a b = g a b)
→ ∑ a in A, ∑ b in filter (λ i , p a i) B, f a b = ∑ b in B, ∑ a in filter (λ i, p i b) A, g a b:=
begin
intros h, rw doublecount, apply sum_congr, refl, intros b hb, apply sum_congr, refl,
intro x, rw mem_filter, intro ha, exact h x ha.1 b hb ha.2,
end
lemma dc_4 {S : setpair} {f: ℕ → ℕ → ℚ} : ∑ i in S.I, ∑ y in S.U\(S.B i), f y i = ∑ i in S.I, ∑ y in filter (λ x, x∉(S.B i)) S.U, f y i:=
begin
have H: ∀ i∈S.I, S.U\S.B i = filter (λ x, x∉ S.B i) S.U,{
intros i hi, ext x,rw [mem_sdiff,mem_filter],},
have H1: ∀ i∈S.I,∑ y in S.U\(S.B i), f y i = ∑ y in filter (λ x, x∉(S.B i)) S.U, f y i,{
intros i hi, apply sum_congr , exact H i hi, intros x hx,refl,},
apply sum_congr, refl,exact H1,
end
lemma den_double {S : setpair} (ht: ¬triv_sp S) : (∑ y in S.U, den (er S y))= den S * S.u :=
begin
rw ← den_rhs_1, simp only [den, er, one_div],
rw [den_rhs_2 ht, den_rhs_5 , den_rhs_6 , ← sum_add_distrib],
rw [den_rhs_8,dc_4,doublecount],
end
lemma trival_mem {S : setpair} (h: ∃i∈S.I, (S.A i) = ∅) : card S.I ≤ 1 :=
begin
by_contra h',
obtain ⟨a,ha,b,hb,ne⟩:=one_lt_card.mp (by linarith: 1< S.I.card),
obtain ⟨x,hx⟩:=(S.sp a ha b hb).mpr ne,
obtain ⟨i, hi, inem⟩:=h,
rw mem_inter at hx,
have ha' : disjoint (S.A i) (S.B a),
rw inem, simp only [disjoint_empty_left],
have hb' : disjoint (S.A i) (S.B b),
rw inem, simp only [disjoint_empty_left],
have ia: i=a:=sp' hi ha ha',
have ib: i=b:=sp' hi hb hb',
rw ia at ib, tauto,
end
lemma trival_univ {S : setpair} (h: S.u = 0) : card S.I ≤ 1 :=
begin
by_contra h',
obtain ⟨a,ha,b,hb,ne⟩:=one_lt_card.mp (by linarith: 1< S.I.card),
obtain ⟨x,hx⟩:=(S.sp a ha b hb).mpr ne,
have xinU: x∈ S.U,{
simp only [mem_inter, setpair.Ug, mem_bUnion, mem_union, exists_prop, not_le, _root_.ne.def] at *,
use [a,ha,hx.1], },
rw [setpair.ug,card_eq_zero] at h,
have :S.U.nonempty:=⟨x,xinU⟩,
rw h at this, apply not_nonempty_empty this,
end
lemma den_triv {S : setpair} (h: triv_sp S) :den S≤ 1:=
begin
have I1: card(S.I) ≤ 1:=triv_imp_I1 h,
have Ic: card S.I ∈ Icc 0 1, {
simp only [mem_Icc, zero_le, true_and,I1],},
fin_cases Ic,
{-- empty sum is zero
have : den S =0,{
rw card_eq_zero at Ic, rw [den, Ic],
apply sum_empty,
},linarith,
},
{--- only have I={a} so sum over singleton
rw card_eq_one at Ic, rw [den],
cases Ic with a ha, rw ha,
rw sum_singleton,
set d:=((S.A a).card + (S.B a).card).choose ((S.B a).card),
have nch: 0< d :=choose_pos (le_add_self),
have d1: 1 ≤ d:= by linarith,
clear_except d1,
rw one_div, apply inv_le_one _, rwa one_le_cast,
},
end
lemma emp_U_den {S : setpair} (h: S.u = 0): den S ≤ 1 :=
begin
have Ic: card S.I ∈ Icc 0 1, {
simp only [mem_Icc, zero_le, true_and, trival_univ h],},
fin_cases Ic,
{-- empty sum is zero
have : den S =0,{
rw card_eq_zero at Ic, rw [den, Ic],
apply sum_empty,
},linarith,
},
{--- only have I={a} so sum over singleton
rw card_eq_one at Ic, rw [den],
cases Ic with a ha, rw ha,
rw sum_singleton,
set d:=((S.A a).card + (S.B a).card).choose ((S.B a).card),
have nch: 0< d :=choose_pos (le_add_self),
have d1: 1 ≤ d:= by linarith,
clear_except d1,
rw one_div, apply inv_le_one _, rwa one_le_cast,
},
end
theorem bollobas_sp {S : setpair} {n : ℕ}: S.u=n → den S ≤ 1 :=
begin
revert S,
--- should really do induction on S.wA so n= 0 → I.nonempty → (∃i∈S.I, (S.A i) = ∅)
induction n using nat.strong_induction_on with n hn,
intros S hs,
cases nat.eq_zero_or_pos n with Uem,
{rw ← hs at Uem, exact emp_U_den Uem,},
by_cases ht: triv_sp S,
{--- do case where there is an empty set in A or no sets at all!
exact den_triv ht, },
{ rw ← hs at h,
apply (mul_le_iff_le_one_left (cast_pos.mpr h)).mp,
have e: (∑ y in S.U, den (er S y)) = den S * S.u,{
exact den_double ht,},
rw ← e,
have dc: ∀y∈ S.U, (er S y).u< S.u,{
intros y hy, exact er_univ_lt S hy,},
have dd: ∀y∈ S.U, den(er S y)≤ (1:ℚ),{
intros y hy, rw hs at dc,
apply hn ((er S y).u) (dc y hy),refl,},
{ rw setpair.ug,
rw card_eq_sum_ones,
convert sum_le_sum dd, norm_cast,},
exact rat.nontrivial, },
end
theorem bollobas_sp' {S : setpair} : den S ≤ 1 :=
begin
set n:ℕ:=S.u with h, exact bollobas_sp h,
end
end bbsetpair2
end finset
|
The following are scrapbook layouts that you can view online, and get ideas from to use in your own scrapbooking. Layouts, paper piecing, using wire, scrapbook album cover ideas, graphics ideas and more scrapbooking ideas.
If you are looking just for free blank scrapbook layouts to print and use in your own scrapbooking, visit our Free Scrapbooking Layouts page.
A list of links to creative page layouts, graphics and memory book ideas.
This gallery now contains 24446 entries in 88 different categories. Browse through the categories, search by keyword or layout type, or just preview the latest entries.
Have you been wanting to try your hand at paper tearing? This pattern gives you a chance to give it a tear - the torn edge gives terrific dimension to the flag!
Lots more scrapbook layouts organized in categories.
Again, many pages posted. These are organized into categories - birthday, Christmas, etc.
Great for using with your scrapbook paper piecing. Tons of fun patterns and shapes!
|
-- Proof: insertion sort computes a permutation of the input list
module sortnat where
open import bool
open import eq
open import nat
open import list
open import nondet
open import nondet-thms
-- non-deterministic insert.
--This takes a nondeterministic list because I need to be able to call it with the result of perm
--
--implementation in curry:
-- ndinsert x [] = []
-- ndinsert x (y:ys) = (x : y : xs) ? (y : insert x ys)
ndinsert : {A : Set} → A → 𝕃 A → ND (𝕃 A)
ndinsert x [] = Val ( x :: [])
ndinsert x (y :: ys) = (Val ( x :: y :: ys ))
?? ((_::_ y) $* (ndinsert x ys))
--non-deterministic permutation
--this is identical to the curry code (except for the Val constructor)
perm : {A : Set} → (𝕃 A) → ND (𝕃 A)
perm [] = Val []
perm (x :: xs) = (ndinsert x) *$* (perm xs)
--insert a value into a sorted list.
--this is identical to curry or haskell code.
--
--note that the structure here is identical to ndinsert
insert : ℕ → 𝕃 ℕ → 𝕃 ℕ
insert x [] = x :: []
insert x (y :: ys) = if x < y then (x :: y :: ys)
else (y :: insert x ys)
--simple insertion sort
--again this is identical to curry or haskell
--also, note that the structure is identical to perm
sort : 𝕃 ℕ → 𝕃 ℕ
sort [] = []
sort (x :: xs) = insert x (sort xs)
--If introduction rule for non-deterministic values.
--if x and y are both possible values in z then
--∀ c. if c then x else y will give us either x or y, so it must be a possible value of z.
--
ifIntro : {A : Set} → (x : A) → (y : A) → (nx : ND A)
→ x ∈ nx → y ∈ nx → (c : 𝔹) → (if c then x else y) ∈ nx
ifIntro x y nx p q tt = p
ifIntro x y nx p q ff = q
---------------------------------------------------------------------------
--
-- this should prove that if xs ∈ nxs then, insert x xs ∈ ndinsert x nxs
-- parameters:
-- x : the value we are inserting into the list
-- xs : the list
--
--returns: insert x xs ∈ ndinsert x xs
-- a proof that inserting a value in a list is ok with non-deterministic lists
insert=ndinsert : (y : ℕ) → (xs : 𝕃 ℕ) → (insert y xs) ∈ (ndinsert y xs)
-- the first case is simple: inserting into an empty list is trivial
insert=ndinsert y [] = ndrefl
--The recursive case is the interesting one.
--The list to insert an element has the form (x :: xs)
--At this point we have two possible cases.
--Either y is smaller then every element in (x :: xs), in which case it's inserted at the front,
--or y is larger than x, in which case it's inserted somewhere in xs.
--Since both of these cases are covered by ndinsert we can invoke the ifIntro lemma, to say that we don't care which case it is.
--
--variables:
-- step : one step of insert
-- l : the left hand side of insert y xs (the then branch)
-- r : the right hand side of insert y xs (the else branch)
-- nr : a non-deterministic r
-- (Val l) : a non-deterministic l (but since ndinsert only has a deterministic value on the left it's not very interesting)
-- rec : The recursive call. If y isn't inserted into the front, then we need to find it.
-- l∈step : a proof that l is a possible value for step
-- r∈step : a proof that r is a possible value for step
insert=ndinsert y (x :: xs) = ifIntro l r step l∈step r∈step (y < x)
where step = ndinsert y (x :: xs)
l = (y :: x :: xs)
r = x :: insert y xs
nl = Val l
nr = (_::_ x) $* (ndinsert y xs)
rec = ∈-$* (_::_ x) (insert y xs) (ndinsert y xs)
(insert=ndinsert y xs)
l∈step = left nl nr ndrefl
r∈step = right nl nr rec
---------------------------------------------------------------------------
-- Main theorem: Sorting a list preserves permutations
-- all of the work is really done by insert=ndinsert
sortPerm : (xs : 𝕃 ℕ) → sort xs ∈ perm xs
sortPerm [] = ndrefl
sortPerm (x :: xs) = ∈-*$* (sort xs) (perm xs) (insert x) (ndinsert x)
(sortPerm xs) (insert=ndinsert x (sort xs))
|
Formal statement is: lemma has_derivative_inverse_strong: fixes f :: "'n::euclidean_space \<Rightarrow> 'n" assumes "open S" and "x \<in> S" and contf: "continuous_on S f" and gf: "\<And>x. x \<in> S \<Longrightarrow> g (f x) = x" and derf: "(f has_derivative f') (at x)" and id: "f' \<circ> g' = id" shows "(g has_derivative g') (at (f x))" Informal statement is: Suppose $f$ is a continuous function from an open set $S$ to itself, and $f$ has a continuous inverse $g$. If $f$ is differentiable at $x \in S$, then $g$ is differentiable at $f(x)$.
|
function kern = rbfwhiteKernExpandParam(kern, params)
% RBFWHITEKERNEXPANDPARAM Create kernel structure from RBF-WHITE kernel's
% parameters.
% FORMAT
% DESC returns a RBF-WHITE kernel structure filled with the parameters in
% the given vector. This is used as a helper function to enable parameters
% to be optimised in, for example, the NETLAB optimisation functions.
% ARG kern : the kernel structure in which the parameters are to be
% placed.
% ARG param : vector of parameters which are to be placed in the
% kernel structure.
% RETURN kern : kernel structure with the given parameters in the
% relevant locations.
%
% SEEALSO : rbfwhiteKernParamInit, rbfwhiteKernExtractParam, kernExpandParam
%
% COPYRIGHT : David Luengo, 2009
% KERN
kern.inverseWidth = params(1);
kern.variance = params(2);
|
If $f$ is convex on $S$ and $c \geq 0$, then $c f$ is convex on $S$.
|
(* Title: JinjaThreads/MM/JMM_J_Typesafe.thy
Author: Andreas Lochbihler
*)
section \<open>JMM type safety for source code\<close>
theory JMM_J_Typesafe imports
JMM_Typesafe2
DRF_J
begin
locale J_allocated_heap_conf' =
h: J_heap_conf
addr2thread_id thread_id2addr
spurious_wakeups
empty_heap allocate "\<lambda>_. typeof_addr" heap_read heap_write hconf
P
+
h: J_allocated_heap
addr2thread_id thread_id2addr
spurious_wakeups
empty_heap allocate "\<lambda>_. typeof_addr" heap_read heap_write
allocated
P
+
heap''
addr2thread_id thread_id2addr
spurious_wakeups
empty_heap allocate typeof_addr heap_read heap_write
P
for addr2thread_id :: "('addr :: addr) \<Rightarrow> 'thread_id"
and thread_id2addr :: "'thread_id \<Rightarrow> 'addr"
and spurious_wakeups :: bool
and empty_heap :: "'heap"
and allocate :: "'heap \<Rightarrow> htype \<Rightarrow> ('heap \<times> 'addr) set"
and typeof_addr :: "'addr \<rightharpoonup> htype"
and heap_read :: "'heap \<Rightarrow> 'addr \<Rightarrow> addr_loc \<Rightarrow> 'addr val \<Rightarrow> bool"
and heap_write :: "'heap \<Rightarrow> 'addr \<Rightarrow> addr_loc \<Rightarrow> 'addr val \<Rightarrow> 'heap \<Rightarrow> bool"
and hconf :: "'heap \<Rightarrow> bool"
and allocated :: "'heap \<Rightarrow> 'addr set"
and P :: "'addr J_prog"
sublocale J_allocated_heap_conf' < h: J_allocated_heap_conf
addr2thread_id thread_id2addr
spurious_wakeups
empty_heap allocate "\<lambda>_. typeof_addr" heap_read heap_write hconf allocated
P
by(unfold_locales)
context J_allocated_heap_conf' begin
lemma red_New_type_match:
"\<lbrakk> h.red' P t e s ta e' s'; NewHeapElem ad CTn \<in> set \<lbrace>ta\<rbrace>\<^bsub>o\<^esub>; typeof_addr ad \<noteq> None \<rbrakk>
\<Longrightarrow> typeof_addr ad = \<lfloor>CTn\<rfloor>"
and reds_New_type_match:
"\<lbrakk> h.reds' P t es s ta es' s'; NewHeapElem ad CTn \<in> set \<lbrace>ta\<rbrace>\<^bsub>o\<^esub>; typeof_addr ad \<noteq> None \<rbrakk>
\<Longrightarrow> typeof_addr ad = \<lfloor>CTn\<rfloor>"
by(induct rule: h.red_reds.inducts)(auto dest: allocate_typeof_addr_SomeD red_external_New_type_match)
lemma mred_known_addrs_typing':
assumes wf: "wf_J_prog P"
and ok: "h.start_heap_ok"
shows "known_addrs_typing' addr2thread_id thread_id2addr empty_heap allocate typeof_addr heap_write allocated h.J_known_addrs final_expr (h.mred P) (\<lambda>t x h. \<exists>ET. h.sconf_type_ok ET t x h) P"
proof -
interpret known_addrs_typing
addr2thread_id thread_id2addr
spurious_wakeups
empty_heap allocate "\<lambda>_. typeof_addr" heap_read heap_write
allocated h.J_known_addrs
final_expr "h.mred P" "\<lambda>t x h. \<exists>ET. h.sconf_type_ok ET t x h"
P
using assms by(rule h.mred_known_addrs_typing)
show ?thesis by unfold_locales(auto dest: red_New_type_match)
qed
lemma J_legal_read_value_typeable:
assumes wf: "wf_J_prog P"
and wf_start: "h.wf_start_state P C M vs"
and legal: "weakly_legal_execution P (h.J_\<E> P C M vs status) (E, ws)"
and a: "enat a < llength E"
and read: "action_obs E a = NormalAction (ReadMem ad al v)"
shows "\<exists>T. P \<turnstile> ad@al : T \<and> P \<turnstile> v :\<le> T"
proof -
note wf
moreover from wf_start have "h.start_heap_ok" by cases
moreover from wf wf_start
have "ts_ok (\<lambda>t x h. \<exists>ET. h.sconf_type_ok ET t x h) (thr (h.J_start_state P C M vs)) h.start_heap"
by(rule h.J_start_state_sconf_type_ok)
moreover from wf have "wf_syscls P" by(rule wf_prog_wf_syscls)
ultimately show ?thesis using legal a read
by(rule known_addrs_typing'.weakly_legal_read_value_typeable[OF mred_known_addrs_typing'])
qed
end
subsection \<open>Specific part for JMM implementation 2\<close>
abbreviation jmm_J_\<E>
:: "addr J_prog \<Rightarrow> cname \<Rightarrow> mname \<Rightarrow> addr val list \<Rightarrow> status \<Rightarrow> (addr \<times> (addr, addr) obs_event action) llist set"
where
"jmm_J_\<E> P \<equiv>
J_heap_base.J_\<E> addr2thread_id thread_id2addr jmm_spurious_wakeups jmm_empty jmm_allocate (jmm_typeof_addr P) jmm_heap_read jmm_heap_write P"
abbreviation jmm'_J_\<E>
:: "addr J_prog \<Rightarrow> cname \<Rightarrow> mname \<Rightarrow> addr val list \<Rightarrow> status \<Rightarrow> (addr \<times> (addr, addr) obs_event action) llist set"
where
"jmm'_J_\<E> P \<equiv>
J_heap_base.J_\<E> addr2thread_id thread_id2addr jmm_spurious_wakeups jmm_empty jmm_allocate (jmm_typeof_addr P) (jmm_heap_read_typed P) jmm_heap_write P"
lemma jmm_J_heap_conf:
"J_heap_conf addr2thread_id thread_id2addr jmm_empty jmm_allocate (jmm_typeof_addr P) jmm_heap_write jmm_hconf P"
by(unfold_locales)
lemma jmm_J_allocated_heap_conf: "J_allocated_heap_conf addr2thread_id thread_id2addr jmm_empty jmm_allocate (jmm_typeof_addr P) jmm_heap_write jmm_hconf jmm_allocated P"
by(unfold_locales)
lemma jmm_J_allocated_heap_conf':
"J_allocated_heap_conf' addr2thread_id thread_id2addr jmm_empty jmm_allocate (jmm_typeof_addr' P) jmm_heap_write jmm_hconf jmm_allocated P"
apply(rule J_allocated_heap_conf'.intro)
apply(unfold jmm_typeof_addr'_conv_jmm_typeof_addr)
apply(unfold_locales)
done
lemma red_heap_read_typedD:
"J_heap_base.red' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) (heap_base.heap_read_typed (\<lambda>_ :: 'heap. typeof_addr) heap_read P) heap_write P t e s ta e' s' \<longleftrightarrow>
J_heap_base.red' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) heap_read heap_write P t e s ta e' s' \<and>
(\<forall>ad al v T. ReadMem ad al v \<in> set \<lbrace>ta\<rbrace>\<^bsub>o\<^esub> \<longrightarrow> heap_base'.addr_loc_type TYPE('heap) typeof_addr P ad al T \<longrightarrow> heap_base'.conf TYPE('heap) typeof_addr P v T)"
(is "?lhs1 \<longleftrightarrow> ?rhs1a \<and> ?rhs1b")
and reds_heap_read_typedD:
"J_heap_base.reds' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) (heap_base.heap_read_typed (\<lambda>_ :: 'heap. typeof_addr) heap_read P) heap_write P t es s ta es' s' \<longleftrightarrow>
J_heap_base.reds' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) heap_read heap_write P t es s ta es' s' \<and>
(\<forall>ad al v T. ReadMem ad al v \<in> set \<lbrace>ta\<rbrace>\<^bsub>o\<^esub> \<longrightarrow> heap_base'.addr_loc_type TYPE('heap) typeof_addr P ad al T \<longrightarrow> heap_base'.conf TYPE('heap) typeof_addr P v T)"
(is "?lhs2 \<longleftrightarrow> ?rhs2a \<and> ?rhs2b")
proof -
have "(?lhs1 \<longrightarrow> ?rhs1a \<and> ?rhs1b) \<and> (?lhs2 \<longrightarrow> ?rhs2a \<and> ?rhs2b)"
apply(induct rule: J_heap_base.red_reds.induct)
prefer 50 (* RedCallExternal *)
apply(subst (asm) red_external_heap_read_typed)
apply(fastforce intro!: J_heap_base.red_reds.RedCallExternal simp add: convert_extTA_def)
prefer 49 (* RedCall *)
apply(fastforce dest: J_heap_base.red_reds.RedCall)
apply(auto intro: J_heap_base.red_reds.intros dest: heap_base.heap_read_typed_into_heap_read heap_base.heap_read_typed_typed dest: heap_base'.addr_loc_type_conv_addr_loc_type[THEN fun_cong, THEN fun_cong, THEN fun_cong, THEN iffD2] heap_base'.conf_conv_conf[THEN fun_cong, THEN fun_cong, THEN iffD1])
done
moreover have "(?rhs1a \<longrightarrow> ?rhs1b \<longrightarrow> ?lhs1) \<and> (?rhs2a \<longrightarrow> ?rhs2b \<longrightarrow> ?lhs2)"
apply(induct rule: J_heap_base.red_reds.induct)
prefer 50 (* RedCallExternal *)
apply simp
apply(intro strip)
apply(erule (1) J_heap_base.red_reds.RedCallExternal)
apply(subst red_external_heap_read_typed, erule conjI)
apply(blast+)[4]
prefer 49 (* RedCall *)
apply(fastforce dest: J_heap_base.red_reds.RedCall)
apply(auto intro: J_heap_base.red_reds.intros intro!: heap_base.heap_read_typedI dest: heap_base'.addr_loc_type_conv_addr_loc_type[THEN fun_cong, THEN fun_cong, THEN fun_cong, THEN iffD1] intro: heap_base'.conf_conv_conf[THEN fun_cong, THEN fun_cong, THEN iffD2])
done
ultimately show "?lhs1 \<longleftrightarrow> ?rhs1a \<and> ?rhs1b" "?lhs2 \<longleftrightarrow> ?rhs2a \<and> ?rhs2b" by blast+
qed
lemma if_mred_heap_read_typedD:
"multithreaded_base.init_fin final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) (heap_base.heap_read_typed (\<lambda>_ :: 'heap. typeof_addr) heap_read P) heap_write P) t xh ta x'h' \<longleftrightarrow>
if_heap_read_typed final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) heap_read heap_write P) typeof_addr P t xh ta x'h'"
unfolding multithreaded_base.init_fin.simps
by(subst red_heap_read_typedD) fastforce
lemma J_\<E>_heap_read_typedI:
"\<lbrakk> E \<in> J_heap_base.J_\<E> addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) heap_read heap_write P C M vs status;
\<And>ad al v T. \<lbrakk> NormalAction (ReadMem ad al v) \<in> snd ` lset E; heap_base'.addr_loc_type TYPE('heap) typeof_addr P ad al T \<rbrakk> \<Longrightarrow> heap_base'.conf TYPE('heap) typeof_addr P v T \<rbrakk>
\<Longrightarrow> E \<in> J_heap_base.J_\<E> addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) (heap_base.heap_read_typed (\<lambda>_ :: 'heap. typeof_addr) heap_read P) heap_write P C M vs status"
apply(erule imageE, hypsubst)
apply(rule imageI)
apply(erule multithreaded_base.\<E>.cases, hypsubst)
apply(rule multithreaded_base.\<E>.intros)
apply(subst if_mred_heap_read_typedD[abs_def])
apply(erule if_mthr_Runs_heap_read_typedI)
apply(auto simp add: image_Un lset_lmap[symmetric] lmap_lconcat llist.map_comp o_def split_def simp del: lset_lmap)
done
lemma jmm'_redI:
"\<lbrakk> J_heap_base.red' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr jmm_heap_read jmm_heap_write P t e s ta e' s';
final_thread.actions_ok (final_thread.init_fin_final final_expr) S t ta \<rbrakk>
\<Longrightarrow> \<exists>ta e' s'. J_heap_base.red' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr (heap_base.heap_read_typed typeof_addr jmm_heap_read P) jmm_heap_write P t e s ta e' s' \<and> final_thread.actions_ok (final_thread.init_fin_final final_expr) S t ta"
(is "\<lbrakk> ?red'; ?aok \<rbrakk> \<Longrightarrow> ?concl")
and jmm'_redsI:
"\<lbrakk> J_heap_base.reds' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr jmm_heap_read jmm_heap_write P t es s ta es' s';
final_thread.actions_ok (final_thread.init_fin_final final_expr) S t ta \<rbrakk>
\<Longrightarrow> \<exists>ta es' s'. J_heap_base.reds' addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr (heap_base.heap_read_typed typeof_addr jmm_heap_read P) jmm_heap_write P t es s ta es' s' \<and>
final_thread.actions_ok (final_thread.init_fin_final final_expr) S t ta"
(is "\<lbrakk> ?reds'; ?aoks \<rbrakk> \<Longrightarrow> ?concls")
proof -
note [simp del] = split_paired_Ex
and [simp add] = final_thread.actions_ok_iff heap_base.THE_addr_loc_type heap_base.defval_conf
and [intro] = jmm_heap_read_typed_default_val
let ?v = "\<lambda>h a al. default_val (THE T. heap_base.addr_loc_type typeof_addr P h a al T)"
have "(?red' \<longrightarrow> ?aok \<longrightarrow> ?concl) \<and> (?reds' \<longrightarrow> ?aoks \<longrightarrow> ?concls)"
proof(induct rule: J_heap_base.red_reds.induct)
case (23 h a T n i v l) (* RedAAcc *)
thus ?case by(auto 4 6 intro: J_heap_base.red_reds.RedAAcc[where v="?v h a (ACell (nat (sint i)))"])
next
case (35 h a D F v l) (* RedFAcc *)
thus ?case by(auto 4 5 intro: J_heap_base.red_reds.RedFAcc[where v="?v h a (CField D F)"])
next
case RedCASSucceed: (45 h a D F v v' h') (* RedCASSucceed *)
thus ?case
proof(cases "v = ?v h a (CField D F)")
case True
with RedCASSucceed show ?thesis
by(fastforce intro: J_heap_base.red_reds.RedCASSucceed[where v="?v h a (CField D F)"])
next
case False
with RedCASSucceed show ?thesis
by(fastforce intro: J_heap_base.red_reds.RedCASFail[where v''="?v h a (CField D F)"])
qed
next
case RedCASFail: (46 h a D F v'' v v' l)
thus ?case
proof(cases "v = ?v h a (CField D F)")
case True
with RedCASFail show ?thesis
by(fastforce intro: J_heap_base.red_reds.RedCASSucceed[where v="?v h a (CField D F)"] jmm_heap_write.intros)
next
case False
with RedCASFail show ?thesis
by(fastforce intro: J_heap_base.red_reds.RedCASFail[where v''="?v h a (CField D F)"])
qed
next
case (50 s a hU M Ts T D vs ta va h' ta' e' s') (* RedCallExternal *)
thus ?case
apply clarify
apply(drule jmm'_red_externalI, simp)
apply(auto 4 4 intro: J_heap_base.red_reds.RedCallExternal)
done
next
case (52 e h l V vo ta e' h' l' T) (* BlockRed *)
thus ?case
by(clarify)(iprover intro: J_heap_base.red_reds.BlockRed)
qed(blast intro: J_heap_base.red_reds.intros)+
thus "\<lbrakk> ?red'; ?aok \<rbrakk> \<Longrightarrow> ?concl" and "\<lbrakk> ?reds'; ?aoks \<rbrakk> \<Longrightarrow> ?concls" by blast+
qed
lemma if_mred_heap_read_not_stuck:
"\<lbrakk> multithreaded_base.init_fin final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr jmm_heap_read jmm_heap_write P) t xh ta x'h';
final_thread.actions_ok (final_thread.init_fin_final final_expr) s t ta \<rbrakk>
\<Longrightarrow>
\<exists>ta x'h'. multithreaded_base.init_fin final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr (heap_base.heap_read_typed typeof_addr jmm_heap_read P) jmm_heap_write P) t xh ta x'h' \<and> final_thread.actions_ok (final_thread.init_fin_final final_expr) s t ta"
apply(erule multithreaded_base.init_fin.cases)
apply hypsubst
apply clarify
apply(drule jmm'_redI)
apply(simp add: final_thread.actions_ok_iff)
apply clarify
apply(subst (2) split_paired_Ex)
apply(subst (2) split_paired_Ex)
apply(subst (2) split_paired_Ex)
apply(rule exI conjI)+
apply(rule multithreaded_base.init_fin.intros)
apply(simp)
apply(simp add: final_thread.actions_ok_iff)
apply(blast intro: multithreaded_base.init_fin.intros)
apply(blast intro: multithreaded_base.init_fin.intros)
done
lemma if_mredT_heap_read_not_stuck:
"multithreaded_base.redT (final_thread.init_fin_final final_expr) (multithreaded_base.init_fin final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr jmm_heap_read jmm_heap_write P)) convert_RA' s tta s'
\<Longrightarrow> \<exists>tta s'. multithreaded_base.redT (final_thread.init_fin_final final_expr) (multithreaded_base.init_fin final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate typeof_addr (heap_base.heap_read_typed typeof_addr jmm_heap_read P) jmm_heap_write P)) convert_RA' s tta s'"
apply(erule multithreaded_base.redT.cases)
apply hypsubst
apply(drule (1) if_mred_heap_read_not_stuck)
apply(erule exE)+
apply(rename_tac ta' x'h')
apply(insert redT_updWs_total)
apply(erule_tac x="t" in meta_allE)
apply(erule_tac x="wset s" in meta_allE)
apply(erule_tac x="\<lbrace>ta'\<rbrace>\<^bsub>w\<^esub>" in meta_allE)
apply clarsimp
apply(rule exI)+
apply(auto intro!: multithreaded_base.redT.intros)[1]
apply hypsubst
apply(rule exI conjI)+
apply(rule multithreaded_base.redT.redT_acquire)
apply assumption+
done
lemma J_\<E>_heap_read_typedD:
"E \<in> J_heap_base.J_\<E> addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_. typeof_addr) (heap_base.heap_read_typed (\<lambda>_. typeof_addr) jmm_heap_read P) jmm_heap_write P C M vs status
\<Longrightarrow> E \<in> J_heap_base.J_\<E> addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_. typeof_addr) jmm_heap_read jmm_heap_write P C M vs status"
apply(erule imageE, hypsubst)
apply(rule imageI)
apply(erule multithreaded_base.\<E>.cases, hypsubst)
apply(rule multithreaded_base.\<E>.intros)
apply(subst (asm) if_mred_heap_read_typedD[abs_def])
apply(erule if_mthr_Runs_heap_read_typedD)
apply(erule if_mredT_heap_read_not_stuck[where typeof_addr="\<lambda>_. typeof_addr", unfolded if_mred_heap_read_typedD[abs_def]])
done
lemma J_\<E>_typesafe_subset: "jmm'_J_\<E> P C M vs status \<subseteq> jmm_J_\<E> P C M vs status"
unfolding jmm_typeof_addr_def[abs_def]
by(rule subsetI)(erule J_\<E>_heap_read_typedD)
lemma J_legal_typesafe1:
assumes wfP: "wf_J_prog P"
and ok: "jmm_wf_start_state P C M vs"
and legal: "legal_execution P (jmm_J_\<E> P C M vs status) (E, ws)"
shows "legal_execution P (jmm'_J_\<E> P C M vs status) (E, ws)"
proof -
let ?\<E> = "jmm_J_\<E> P C M vs status"
let ?\<E>' = "jmm'_J_\<E> P C M vs status"
from legal obtain J
where justified: "P \<turnstile> (E, ws) justified_by J"
and range: "range (justifying_exec \<circ> J) \<subseteq> ?\<E>"
and E: "E \<in> ?\<E>" and wf: "P \<turnstile> (E, ws) \<surd>" by(auto simp add: gen_legal_execution.simps)
let ?J = "J(0 := \<lparr>committed = {}, justifying_exec = justifying_exec (J 1), justifying_ws = justifying_ws (J 1), action_translation = id\<rparr>)"
from wfP have wf_sys: "wf_syscls P" by(rule wf_prog_wf_syscls)
from justified have "P \<turnstile> (justifying_exec (J 1), justifying_ws (J 1)) \<surd>"
by(simp add: justification_well_formed_def)
with justified have "P \<turnstile> (E, ws) justified_by ?J" by(rule drop_0th_justifying_exec)
moreover have "range (justifying_exec \<circ> ?J) \<subseteq> ?\<E>'"
proof
fix \<xi>
assume "\<xi> \<in> range (justifying_exec \<circ> ?J)"
then obtain n where "\<xi> = justifying_exec (?J n)" by auto
then obtain n where \<xi>: "\<xi> = justifying_exec (J n)" and n: "n > 0" by(auto split: if_split_asm)
from range \<xi> have "\<xi> \<in> ?\<E>" by auto
thus "\<xi> \<in> ?\<E>'" unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def]
proof(rule J_\<E>_heap_read_typedI)
fix ad al v T
assume read: "NormalAction (ReadMem ad al v) \<in> snd ` lset \<xi>"
and adal: "P \<turnstile>jmm ad@al : T"
from read obtain a where a: "enat a < llength \<xi>" "action_obs \<xi> a = NormalAction (ReadMem ad al v)"
unfolding lset_conv_lnth by(auto simp add: action_obs_def)
with J_allocated_heap_conf'.mred_known_addrs_typing'[OF jmm_J_allocated_heap_conf' wfP jmm_start_heap_ok]
J_heap_conf.J_start_state_sconf_type_ok[OF jmm_J_heap_conf wfP ok]
wf_sys is_justified_by_imp_is_weakly_justified_by[OF justified wf] range n
have "\<exists>T. P \<turnstile>jmm ad@al : T \<and> P \<turnstile>jmm v :\<le> T"
unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def] \<xi>
by(rule known_addrs_typing'.read_value_typeable_justifying)
thus "P \<turnstile>jmm v :\<le> T" using adal
by(auto dest: jmm.addr_loc_type_fun[unfolded jmm_typeof_addr_conv_jmm_typeof_addr', unfolded heap_base'.addr_loc_type_conv_addr_loc_type])
qed
qed
moreover from E have "E \<in> ?\<E>'"
unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def]
proof(rule J_\<E>_heap_read_typedI)
fix ad al v T
assume read: "NormalAction (ReadMem ad al v) \<in> snd ` lset E"
and adal: "P \<turnstile>jmm ad@al : T"
from read obtain a where a: "enat a < llength E" "action_obs E a = NormalAction (ReadMem ad al v)"
unfolding lset_conv_lnth by(auto simp add: action_obs_def)
with jmm_J_allocated_heap_conf' wfP ok legal_imp_weakly_legal_execution[OF legal]
have "\<exists>T. P \<turnstile>jmm ad@al : T \<and> P \<turnstile>jmm v :\<le> T"
unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def]
by(rule J_allocated_heap_conf'.J_legal_read_value_typeable)
thus "P \<turnstile>jmm v :\<le> T" using adal
by(auto dest: jmm.addr_loc_type_fun[unfolded jmm_typeof_addr_conv_jmm_typeof_addr', unfolded heap_base'.addr_loc_type_conv_addr_loc_type])
qed
ultimately show ?thesis using wf unfolding gen_legal_execution.simps by blast
qed
lemma J_weakly_legal_typesafe1:
assumes wfP: "wf_J_prog P"
and ok: "jmm_wf_start_state P C M vs"
and legal: "weakly_legal_execution P (jmm_J_\<E> P C M vs status) (E, ws)"
shows "weakly_legal_execution P (jmm'_J_\<E> P C M vs status) (E, ws)"
proof -
let ?\<E> = "jmm_J_\<E> P C M vs status"
let ?\<E>' = "jmm'_J_\<E> P C M vs status"
from legal obtain J
where justified: "P \<turnstile> (E, ws) weakly_justified_by J"
and range: "range (justifying_exec \<circ> J) \<subseteq> ?\<E>"
and E: "E \<in> ?\<E>" and wf: "P \<turnstile> (E, ws) \<surd>" by(auto simp add: gen_legal_execution.simps)
let ?J = "J(0 := \<lparr>committed = {}, justifying_exec = justifying_exec (J 1), justifying_ws = justifying_ws (J 1), action_translation = id\<rparr>)"
from wfP have wf_sys: "wf_syscls P" by(rule wf_prog_wf_syscls)
from justified have "P \<turnstile> (justifying_exec (J 1), justifying_ws (J 1)) \<surd>"
by(simp add: justification_well_formed_def)
with justified have "P \<turnstile> (E, ws) weakly_justified_by ?J" by(rule drop_0th_weakly_justifying_exec)
moreover have "range (justifying_exec \<circ> ?J) \<subseteq> ?\<E>'"
proof
fix \<xi>
assume "\<xi> \<in> range (justifying_exec \<circ> ?J)"
then obtain n where "\<xi> = justifying_exec (?J n)" by auto
then obtain n where \<xi>: "\<xi> = justifying_exec (J n)" and n: "n > 0" by(auto split: if_split_asm)
from range \<xi> have "\<xi> \<in> ?\<E>" by auto
thus "\<xi> \<in> ?\<E>'" unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def]
proof(rule J_\<E>_heap_read_typedI)
fix ad al v T
assume read: "NormalAction (ReadMem ad al v) \<in> snd ` lset \<xi>"
and adal: "P \<turnstile>jmm ad@al : T"
from read obtain a where a: "enat a < llength \<xi>" "action_obs \<xi> a = NormalAction (ReadMem ad al v)"
unfolding lset_conv_lnth by(auto simp add: action_obs_def)
with J_allocated_heap_conf'.mred_known_addrs_typing'[OF jmm_J_allocated_heap_conf' wfP jmm_start_heap_ok]
J_heap_conf.J_start_state_sconf_type_ok[OF jmm_J_heap_conf wfP ok]
wf_sys justified range n
have "\<exists>T. P \<turnstile>jmm ad@al : T \<and> P \<turnstile>jmm v :\<le> T"
unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def] \<xi>
by(rule known_addrs_typing'.read_value_typeable_justifying)
thus "P \<turnstile>jmm v :\<le> T" using adal
by(auto dest: jmm.addr_loc_type_fun[unfolded jmm_typeof_addr_conv_jmm_typeof_addr', unfolded heap_base'.addr_loc_type_conv_addr_loc_type])
qed
qed
moreover from E have "E \<in> ?\<E>'"
unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def]
proof(rule J_\<E>_heap_read_typedI)
fix ad al v T
assume read: "NormalAction (ReadMem ad al v) \<in> snd ` lset E"
and adal: "P \<turnstile>jmm ad@al : T"
from read obtain a where a: "enat a < llength E" "action_obs E a = NormalAction (ReadMem ad al v)"
unfolding lset_conv_lnth by(auto simp add: action_obs_def)
with jmm_J_allocated_heap_conf' wfP ok legal
have "\<exists>T. P \<turnstile>jmm ad@al : T \<and> P \<turnstile>jmm v :\<le> T"
unfolding jmm_typeof_addr'_conv_jmm_type_addr[symmetric, abs_def]
by(rule J_allocated_heap_conf'.J_legal_read_value_typeable)
thus "P \<turnstile>jmm v :\<le> T" using adal
by(auto dest: jmm.addr_loc_type_fun[unfolded jmm_typeof_addr_conv_jmm_typeof_addr', unfolded heap_base'.addr_loc_type_conv_addr_loc_type])
qed
ultimately show ?thesis using wf unfolding gen_legal_execution.simps by blast
qed
lemma J_legal_typesafe2:
assumes legal: "legal_execution P (jmm'_J_\<E> P C M vs status) (E, ws)"
shows "legal_execution P (jmm_J_\<E> P C M vs status) (E, ws)"
proof -
let ?\<E> = "jmm_J_\<E> P C M vs status"
let ?\<E>' = "jmm'_J_\<E> P C M vs status"
from legal obtain J
where justified: "P \<turnstile> (E, ws) justified_by J"
and range: "range (justifying_exec \<circ> J) \<subseteq> ?\<E>'"
and E: "E \<in> ?\<E>'" and wf: "P \<turnstile> (E, ws) \<surd>" by(auto simp add: gen_legal_execution.simps)
from range E have "range (justifying_exec \<circ> J) \<subseteq> ?\<E>" "E \<in> ?\<E>"
using J_\<E>_typesafe_subset[of P status C M vs] by blast+
with justified wf
show ?thesis by(auto simp add: gen_legal_execution.simps)
qed
lemma J_weakly_legal_typesafe2:
assumes legal: "weakly_legal_execution P (jmm'_J_\<E> P C M vs status) (E, ws)"
shows "weakly_legal_execution P (jmm_J_\<E> P C M vs status) (E, ws)"
proof -
let ?\<E> = "jmm_J_\<E> P C M vs status"
let ?\<E>' = "jmm'_J_\<E> P C M vs status"
from legal obtain J
where justified: "P \<turnstile> (E, ws) weakly_justified_by J"
and range: "range (justifying_exec \<circ> J) \<subseteq> ?\<E>'"
and E: "E \<in> ?\<E>'" and wf: "P \<turnstile> (E, ws) \<surd>" by(auto simp add: gen_legal_execution.simps)
from range E have "range (justifying_exec \<circ> J) \<subseteq> ?\<E>" "E \<in> ?\<E>"
using J_\<E>_typesafe_subset[of P status C M vs] by blast+
with justified wf
show ?thesis by(auto simp add: gen_legal_execution.simps)
qed
theorem J_weakly_legal_typesafe:
assumes "wf_J_prog P"
and "jmm_wf_start_state P C M vs"
shows "weakly_legal_execution P (jmm_J_\<E> P C M vs status) = weakly_legal_execution P (jmm'_J_\<E> P C M vs status)"
apply(rule ext iffI)+
apply(clarify, erule J_weakly_legal_typesafe1[OF assms])
apply(clarify, erule J_weakly_legal_typesafe2)
done
theorem J_legal_typesafe:
assumes "wf_J_prog P"
and "jmm_wf_start_state P C M vs"
shows "legal_execution P (jmm_J_\<E> P C M vs status) = legal_execution P (jmm'_J_\<E> P C M vs status)"
apply(rule ext iffI)+
apply(clarify, erule J_legal_typesafe1[OF assms])
apply(clarify, erule J_legal_typesafe2)
done
end
|
module TestCandidate
import Wordle
import Test: @test, @test_throws
c = Wordle.Candidate("alone")
@test c.chars === ('a', 'l', 'o', 'n', 'e')
@test String(c) === "alone"
# Too short
@test_throws Exception Candidate("true")
# Not all lowercase
@test_throws Exception Candidate("Alone")
# Not all ASCII
@test_throws Exception Candidate("señor")
end # module
|
function acq = acqimiqr_vbmc(Xs,vp,gp,optimState,fmu,fs2,fbar,vtot)
%VBMC_ACQIMIQR Integrated median interquantile range acquisition function.
u = 0.6745; % norminv(0.75)
if isempty(Xs)
% Return acquisition function info struct
acq.importance_sampling = true;
acq.importance_sampling_vp = false;
acq.log_flag = true;
return;
elseif ischar(Xs)
switch lower(Xs)
case 'islogf1'
% Importance sampling log base proposal (shared part)
acq = fmu;
case 'islogf2'
% Importance sampling log base proposal (added part)
% (Full log base proposal is fixed + added)
fs = sqrt(fs2);
acq = u*fs + log1p(-exp(-2*u*fs));
case 'islogf'
% Importance sampling log base proposal distribution
fs = sqrt(fs2);
acq = fmu + u*fs + log1p(-exp(-2*u*fs));
end
return;
end
% Different importance sampling inputs for different GP hyperparameters?
multipleinputs_flag = size(optimState.ActiveImportanceSampling.Xa,3) > 1;
% Xs is in *transformed* coordinates
[Nx,D] = size(Xs);
Ns = size(fmu,2);
Na = size(optimState.ActiveImportanceSampling.Xa,1);
% Estimate observation noise at test points from nearest neighbor
[~,pos] = min(sq_dist(bsxfun(@rdivide,Xs,optimState.gplengthscale),gp.X_rescaled),[],2);
sn2 = gp.sn2new(pos);
% sn2 = min(sn2,1e4);
ys2 = fs2 + sn2; % Predictive variance at test points
if multipleinputs_flag
Xa = zeros(Na,D);
else
Xa = optimState.ActiveImportanceSampling.Xa;
end
acq = zeros(Nx,Ns);
%% Compute integrated acquisition function via importance sampling
for s = 1:Ns
hyp = gp.post(s).hyp;
L = gp.post(s).L;
Lchol = gp.post(s).Lchol;
sn2_eff = 1/gp.post(s).sW(1)^2;
if multipleinputs_flag
Xa(:,:) = optimState.ActiveImportanceSampling.Xa(:,:,s);
end
% Compute cross-kernel matrix Ks_mat
if gp.covfun(1) == 1 % Hard-coded SE-ard for speed
ell = exp(hyp(1:D))';
sf2 = exp(2*hyp(D+1));
Ks_mat = sq_dist(gp.X*diag(1./ell),Xs*diag(1./ell));
Ks_mat = sf2 * exp(-Ks_mat/2);
Ka_mat = sq_dist(Xa*diag(1./ell),Xs*diag(1./ell));
Ka_mat = sf2 * exp(-Ka_mat/2);
%Kax_mat = sq_dist(Xa*diag(1./ell),gp.X*diag(1./ell));
%Kax_mat = sf2 * exp(-Kax_mat/2);
Kax_mat(:,:) = optimState.ActiveImportanceSampling.Kax_mat(:,:,s);
else
error('Other covariance functions not supported yet.');
end
if Lchol
C = Ka_mat' - Ks_mat'*(L\(L'\Kax_mat'))/sn2_eff;
else
C = Ka_mat' + Ks_mat'*(L*Kax_mat');
end
tau2 = bsxfun(@rdivide,C.^2,ys2(:,s));
s_pred = sqrt(max(bsxfun(@minus,optimState.ActiveImportanceSampling.fs2a(:,s)',tau2),0));
lnw = optimState.ActiveImportanceSampling.lnw(s,:);
zz = bsxfun(@plus,lnw,u*s_pred + log1p(-exp(-2*u*s_pred)));
lnmax = max(zz,[],2);
acq(:,s) = log(sum(exp(bsxfun(@minus,zz,lnmax)),2)) + lnmax;
end
if Ns > 1
M = max(acq,[],2);
acq = M + log(sum(exp(bsxfun(@minus,acq,M)),2)/Ns);
end
end
%SQ_DIST Compute matrix of all pairwise squared distances between two sets
% of vectors, stored in the columns of the two matrices, a (of size n-by-D)
% and b (of size m-by-D).
function C = sq_dist(a,b)
n = size(a,1);
m = size(b,1);
mu = (m/(n+m))*mean(b,1) + (n/(n+m))*mean(a,1);
a = bsxfun(@minus,a,mu); b = bsxfun(@minus,b,mu);
C = bsxfun(@plus,sum(a.*a,2),bsxfun(@minus,sum(b.*b,2)',2*a*b'));
C = max(C,0);
end
|
##
## Copyright (C) 2015 Dato, Inc.
## All rights reserved.
##
## This software may be modified and distributed under the terms
## of the BSD license. See the LICENSE file for details.
##
## runit.sf.r
.runThisTest <- Sys.getenv("RunAllDatoCoreTests") == "yes"
if (.runThisTest) {
test.sf <- function() {
df <- data.frame(
a = c(1, 2, 3),
b = c("x", "y", "z"),
c = c(0.1, 0.2, 0.3),
stringsAsFactors = FALSE
)
sf <- as.sframe(df)
df2 <- as.data.frame(sf)
checkEquals(df, df2)
vec <- c(1:15)
sa <- as.sarray(vec)
vec2 <- as.vector(sa)
checkEquals(class(sa)[1], "sarray")
checkEquals(vec, vec2)
checkEquals(sa[10], 10)
}
}
|
#necessary library
library(data.table) #fread
library(lubridate)
library(reshape2)
library(dplyr)
library(scales)
library(stringr)
library(tidyr) # spread function
#memory.limit(size=900000) #Windows-specific #JO
YEARLIST =("19")
MONTHLIST = c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
DISTANCE_FILEPATH = "../../data/tidy/vehicle-trajectory-computation/"
COMPUTATION_FILEPATH = "../../data/tidy/"
NUM_SAMPLE_DAYS = 10
NUM_SPEED_BINS = 6
NUM_ACCEL_BINS = 6
# aggregrate_trajectory_table
sample_day_trajectories = function(year, month, num_sample_days, seed_val){
assign("dg",fread(paste0(DISTANCE_FILEPATH, paste(paste("green", "trajectory", year, month, sep = "-", collapse = ""), ".csv", sep=""))))
assign("dh",fread(paste0(DISTANCE_FILEPATH, paste(paste("heavy", "trajectory", year, month, sep = "-", collapse = ""), ".csv", sep=""))))
dg$lineid = 4
dg = subset(dg, select = c(trxtime, year, month, day, lineid, lat, lon , speed_kph , accel_mps2 , interval_seconds , dist_meters , vehicleid))
dh = subset(dh, select = c(trxtime, year, month, day, lineid, lat, lon , speed_kph , accel_mps2 , interval_seconds , dist_meters , vehicleid))
# dg = dg[, c(trxtime, year, month, day, lineid, lat, lon , speed_kph , accel_mps2 , interval_seconds , dist_meters , vehicleid)]
# dh = dh[, c(trxtime, year, month, day, lineid, lat, lon , speed_kph , accel_mps2 , interval_seconds , dist_meters , vehicleid)]
DT = rbind(dg, dh)
if (any(list("04", "06", "09", "11") %like% month)) { #for efficiency filter for months with 30 days
num_days = 30
} else if (month == "02") { # february
num_days = 28
} else {
num_days = 31
}
DT <- DT[, day:=as.integer(day)]
set.seed(seed_val)
day_subset = sample(seq(num_days), num_sample_days)
setkey(DT, day)
DT = DT[day %in% day_subset]
print(paste("Year:", year, "; Month:", month, "; Days sampled:"))
print(unique(DT$day))
remove(dg)
remove(dh)
return(DT)
}
# Unit conversion
convert_units = function(df){
df$hour = hour(df$trxtime)
df$speed_mph = df$speed_kph*0.621371 #kph to mph
df$distance_mile = df$dist_meters*0.000621371 #convert from meters to mile
df$time_hr = df$interval_seconds/3600.0 #convert from seconds to hour
return(df)
}
# Calculate the speed bins
get_speed_cutpoints <- function (DT, num_bins, test = FALSE) {
print("Computing speed cutpoints")
probabilities = seq(0, 1, 1/num_bins)
cutpoints <- quantile(DT$speed_mph, probabilities, na.rm=TRUE)
df <- data.frame(cutpoints)
print("Done")
return(df)
}
# Calculate the acceleration quantiles
get_acceleration_cutpoints <- function (DT, num_bins, test = FALSE) {
print("Computing acceleration cutpoints")
probabilities = seq(0, 1, 1/num_bins)
cutpoints <- quantile(DT$accel_mps2, probabilities, na.rm=TRUE) #read in the list from a saved file of cutpoints
df <- data.frame(cutpoints)
print("Done")
return(df)
}
main <- function (num_speed_bins, num_accel_bins, num_days_to_sample, year_list, month_list, line_number) {
DT = data.table()
SEEDLIST = c(111, 222, 333, 444, 555, 666, 777, 888, 999, 101010, 111111, 121212) #different seed for each month
seed_counter = 1
for (y in year_list) {
for (m in month_list) {
interval_DT = sample_day_trajectories(y, m, num_days_to_sample, SEEDLIST[seed_counter])
DT = rbindlist( list(DT, interval_DT) ) # obtain sampled df from all observations
##DT = DT(DT$lineid==line_number) - jimi/ Zhuo - complete
seed_counter = seed_counter + 1
remove(interval_DT)
}
}
DT = convert_units(DT)
speed_cutpoints = get_speed_cutpoints(DT, num_speed_bins)
acceleration_cutpoints = get_acceleration_cutpoints(DT, num_accel_bins)
## ZHUO - update file names
write.csv(speed_cutpoints, file.path(paste0(COMPUTATION_FILEPATH, paste0(paste("speed", y, "cutpoints", "bins" , num_speed_bins, sep = "-", collapse = ""), ".csv"))))
write.csv(acceleration_cutpoints, file.path(paste0(COMPUTATION_FILEPATH, paste0(paste("acceleration", y, "cutpoints", "bins" , num_accel_bins, sep = "-", collapse = ""), ".csv"))))
remove(DT)
}
for (LINE_NUMBER in c(1,2,3,4)) {
main(NUM_SPEED_BINS, NUM_ACCEL_BINS, NUM_SAMPLE_DAYS, YEARLIST, MONTHLIST, LINE_NUMBER)
}
|
/* linalg/choleskyc.c
*
* Copyright (C) 2007 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <config.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_complex.h>
#include <gsl/gsl_complex_math.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_errno.h>
/*
* This module contains routines related to the Cholesky decomposition
* of a complex Hermitian positive definite matrix.
*/
static void cholesky_complex_conj_vector(gsl_vector_complex *v);
/*
gsl_linalg_complex_cholesky_decomp()
Perform the Cholesky decomposition on a Hermitian positive definite
matrix. See Golub & Van Loan, "Matrix Computations" (3rd ed),
algorithm 4.2.2.
Inputs: A - (input/output) complex postive definite matrix
Return: success or error
The lower triangle of A is overwritten with the Cholesky decomposition
*/
int
gsl_linalg_complex_cholesky_decomp(gsl_matrix_complex *A)
{
const size_t N = A->size1;
if (N != A->size2)
{
GSL_ERROR("cholesky decomposition requires square matrix", GSL_ENOTSQR);
}
else
{
size_t i, j;
gsl_complex z;
double ajj;
for (j = 0; j < N; ++j)
{
z = gsl_matrix_complex_get(A, j, j);
ajj = GSL_REAL(z);
if (j > 0)
{
gsl_vector_complex_const_view aj =
gsl_matrix_complex_const_subrow(A, j, 0, j);
gsl_blas_zdotc(&aj.vector, &aj.vector, &z);
ajj -= GSL_REAL(z);
}
if (ajj <= 0.0)
{
GSL_ERROR("matrix is not positive definite", GSL_EDOM);
}
ajj = sqrt(ajj);
GSL_SET_COMPLEX(&z, ajj, 0.0);
gsl_matrix_complex_set(A, j, j, z);
if (j < N - 1)
{
gsl_vector_complex_view av =
gsl_matrix_complex_subcolumn(A, j, j + 1, N - j - 1);
if (j > 0)
{
gsl_vector_complex_view aj =
gsl_matrix_complex_subrow(A, j, 0, j);
gsl_matrix_complex_view am =
gsl_matrix_complex_submatrix(A, j + 1, 0, N - j - 1, j);
cholesky_complex_conj_vector(&aj.vector);
gsl_blas_zgemv(CblasNoTrans,
GSL_COMPLEX_NEGONE,
&am.matrix,
&aj.vector,
GSL_COMPLEX_ONE,
&av.vector);
cholesky_complex_conj_vector(&aj.vector);
}
gsl_blas_zdscal(1.0 / ajj, &av.vector);
}
}
/* Now store L^H in upper triangle */
for (i = 1; i < N; ++i)
{
for (j = 0; j < i; ++j)
{
z = gsl_matrix_complex_get(A, i, j);
gsl_matrix_complex_set(A, j, i, gsl_complex_conjugate(z));
}
}
return GSL_SUCCESS;
}
} /* gsl_linalg_complex_cholesky_decomp() */
/*
gsl_linalg_complex_cholesky_solve()
Solve A x = b where A is in cholesky form
*/
int
gsl_linalg_complex_cholesky_solve (const gsl_matrix_complex * cholesky,
const gsl_vector_complex * b,
gsl_vector_complex * x)
{
if (cholesky->size1 != cholesky->size2)
{
GSL_ERROR ("cholesky matrix must be square", GSL_ENOTSQR);
}
else if (cholesky->size1 != b->size)
{
GSL_ERROR ("matrix size must match b size", GSL_EBADLEN);
}
else if (cholesky->size2 != x->size)
{
GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN);
}
else
{
gsl_vector_complex_memcpy (x, b);
/* solve for y using forward-substitution, L y = b */
gsl_blas_ztrsv (CblasLower, CblasNoTrans, CblasNonUnit, cholesky, x);
/* perform back-substitution, L^H x = y */
gsl_blas_ztrsv (CblasLower, CblasConjTrans, CblasNonUnit, cholesky, x);
return GSL_SUCCESS;
}
} /* gsl_linalg_complex_cholesky_solve() */
/*
gsl_linalg_complex_cholesky_svx()
Solve A x = b in place where A is in cholesky form
*/
int
gsl_linalg_complex_cholesky_svx (const gsl_matrix_complex * cholesky,
gsl_vector_complex * x)
{
if (cholesky->size1 != cholesky->size2)
{
GSL_ERROR ("cholesky matrix must be square", GSL_ENOTSQR);
}
else if (cholesky->size2 != x->size)
{
GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN);
}
else
{
/* solve for y using forward-substitution, L y = b */
gsl_blas_ztrsv (CblasLower, CblasNoTrans, CblasNonUnit, cholesky, x);
/* perform back-substitution, L^H x = y */
gsl_blas_ztrsv (CblasLower, CblasConjTrans, CblasNonUnit, cholesky, x);
return GSL_SUCCESS;
}
} /* gsl_linalg_complex_cholesky_svx() */
/********************************************
* INTERNAL ROUTINES *
********************************************/
static void
cholesky_complex_conj_vector(gsl_vector_complex *v)
{
size_t i;
for (i = 0; i < v->size; ++i)
{
gsl_complex z = gsl_vector_complex_get(v, i);
gsl_vector_complex_set(v, i, gsl_complex_conjugate(z));
}
} /* cholesky_complex_conj_vector() */
|
open import Prelude
open import Reflection renaming (Term to AgTerm; Type to AgType)
open import Data.String using (String)
open import RW.Language.RTerm
open import RW.Language.RTermUtils
open import RW.Language.FinTerm
open import RW.Language.GoalGuesser 1
open import RW.Strategy
module RW.RW (db : TStratDB) where
open import RW.Utils.Monads
open import RW.Utils.Error
open Monad {{...}}
open IsError {{...}}
------------------
-- Housekeeping --
------------------
-- We need to bring our instances into scope explicitely,
-- to make Agda happy.
private
instance
ErrErr = IsError-StratErr
ErrMonad = MonadError
unarg : {A : Set} → Arg A → A
unarg (arg _ x) = x
-- We need to translate types to FinTerms, so we know how many variables
-- we're expecting to guess from instantiation.
Ag2RTypeFin : AgType → ∃ FinTerm
Ag2RTypeFin = R2FinType ∘ lift-ivar ∘ η ∘ Ag2RType
-- TODO: fix the duality: "number of ivar's lifted to ovar's vs. parameters we need to guess"
make-RWData : Name → AgTerm → List (Arg AgType) → Err StratErr RWData
make-RWData act goal ctx with η (Ag2RTerm goal) | Ag2RTypeFin (type act) | map (Ag2RType ∘ unarg) ctx
...| g' | tyℕ , ty | ctx' with forceBinary g' | forceBinary (typeResult ty)
...| just g | just a = return (rw-data g tyℕ a ctx')
...| just _ | nothing = throwError (Custom "Something strange happened with the action")
...| nothing | just _ = throwError (Custom "Something strange happened with the goal")
...| nothing | nothing = throwError (Custom "My brain just exploded.")
-- Given a goal and a list of actions to apply to such goal, return
-- a list l₁ ⋯ lₙ such that ∀ 0 < i ≤ n . tyᵢ : p1 lᵢ → p1 lᵢ₊₁
Ag2RTypeFin* : RTerm ⊥ → List AgType → Maybe (List (RBinApp ⊥ × ∃ (RBinApp ∘ Fin)))
Ag2RTypeFin* (rapp n (g₁ ∷ g₂ ∷ [])) tys
= mapM (return ∘ Ag2RTypeFin) tys
>>= mapM (λ v → forceBinary (typeResult (p2 v)) >>= (return ∘ (_,_ $ p1 v)))
>>= λ tys' → (divideGoal (n , g₁ , g₂) tys' >>= assemble)
>>= λ gs → return (zip gs tys')
where
assemble : {A : Set} → List (RTerm A) → Maybe (List (RBinApp A))
assemble (x1 ∷ x2 ∷ []) = just ((n , x1 , x2) ∷ [])
assemble (x1 ∷ x2 ∷ l) = assemble (x2 ∷ l) >>= return ∘ (_∷_ (n , x1 , x2))
assemble _ = nothing
Ag2RTypeFin* _ _ = nothing
-- Produces a list of RWData, one for each 'guessed' step.
make-RWData* : List Name → AgTerm → List (Arg AgType) → Err StratErr (List RWData)
make-RWData* acts goal ctx with Ag2RTerm goal | map type acts | map (Ag2RType ∘ unarg) ctx
...| g' | tys | ctx' with Ag2RTypeFin* g' tys
...| nothing = throwError (Custom "Are you sure you can apply those steps?")
...| just r = i2 (map (λ x → rw-data (p1 x) (p1 (p2 x)) (p2 (p2 x)) ctx') r)
postulate
RW-error : ∀{a}{A : Set a} → String → A
RWerr : Name → RWData → Err StratErr (RWData × UData × RTerm ⊥)
RWerr act d
= runUStrats d
>>= λ u → runTStrats db d act u
>>= λ v → return (d , u , v)
-- A variant with less information, more suitable to be map'ed.
RWerr-less : Name → RWData → Err StratErr (RTerm ⊥)
RWerr-less act d = RWerr act d >>= return ∘ p2 ∘ p2
----------------
-- By Tactics --
----------------
-- Standard debugging version.
by' : Name → List (Arg AgType) → AgTerm → (RWData × UData × RTerm ⊥)
by' act ctx goal with runErr (make-RWData act goal ctx >>= RWerr act)
...| i1 err = RW-error err
...| i2 term = term
-- This function is only beeing used to pass the context
-- given by the 'tactic' keyword around.
by : Name → List (Arg AgType) → AgTerm → AgTerm
by act ctx goal = R2AgTerm ∘ p2 ∘ p2 $ (by' act ctx goal)
-- Handling multiple actions, naive way.
-- by+ is pretty much foldM (<|>) error (by ⋯),
-- where (<|>) is the usual alternative from Error Monad.
by+ : List Name → List (Arg AgType) → AgTerm → AgTerm
by+ [] _ _ = RW-error "No suitable action"
by+ (a ∷ as) ctx goal with runErr (make-RWData a goal ctx >>= RWerr a)
...| i1 _ = by+ as ctx goal
...| i2 t = R2AgTerm ∘ p2 ∘ p2 $ t
join-tr : Name → List (RTerm ⊥) → RTerm ⊥
join-tr _ [] = ivar 0
join-tr tr (x ∷ l) = foldr (λ h r → rapp (rdef tr) (r ∷ h ∷ [])) x l
-- Handling multiple goals.
by*-err : Name → List Name → List (Arg AgType) → AgTerm → Err StratErr AgTerm
by*-err tr acts ctx goal
= make-RWData* acts goal ctx
>>= λ l → mapM (uncurry RWerr-less) (zip acts l)
>>= return ∘ R2AgTerm ∘ join-tr tr
where
unzip : {A B : Set} → List (A × B) → List A × List B
unzip [] = [] , []
unzip ((a , b) ∷ l) with unzip l
...| la , lb = a ∷ la , b ∷ lb
by*-tactic : Set
by*-tactic = List Name → List (Arg AgType) → AgTerm → AgTerm
by* : Name → by*-tactic
by* tr acts ctx goal with runErr (by*-err tr acts ctx goal)
...| i1 err = RW-error err
...| i2 res = res
------------------------------
-- Adding Tries to the cake --
------------------------------
-- The proper way to handle multiple actions.
open import RW.Data.RTrie
module Auto
(bt : RTrie) -- which trie to use,
(newHd : RTermName → RTermName) -- given the goal head, how to build the head for the action.
where
open import RW.Language.RTermTrie
our-strategy : RTermName → Name → UData → Err StratErr (RTerm ⊥)
our-strategy goal
= maybe
TStrat.how
(const $ const $ i1 no-strat)
$ filter-db db
where
no-strat : StratErr
no-strat = NoTStrat goal (newHd goal)
filter-db : TStratDB → Maybe TStrat
filter-db [] = nothing
filter-db (s ∷ ss) with TStrat.when s goal (newHd goal)
...| false = filter-db ss
...| true = just s
auto-internal : List (Arg AgType) → AgTerm → Err StratErr AgTerm
auto-internal _ goal with forceBinary $ Ag2RTerm goal
...| nothing = i1 $ Custom "non-binary goal"
...| just (hd , g₁ , g₂)
= let
options = search-action (newHd hd) (hd , g₁ , g₂) bt
strat = uncurry $ our-strategy hd
err = Custom "No option was succesful"
in try-all strat err options >>= return ∘ R2AgTerm
auto : List (Arg AgType) → AgTerm → AgTerm
auto ctx goal with runErr (auto-internal ctx goal)
...| i1 err = RW-error err
...| i2 r = r
|
#' Get document revisions.
#'
#' @export
#' @template all
#' @template return
#' @param dbname Database name
#' @param docid Document ID
#' @param simplify (logical) Simplify to character vector of revision ids.
#' If `FALSE`, gives back availability info too. Default: `TRUE`
#' @examples \dontrun{
#' user <- Sys.getenv("COUCHDB_TEST_USER")
#' pwd <- Sys.getenv("COUCHDB_TEST_PWD")
#' (x <- Cushion$new(user=user, pwd=pwd))
#'
#' if ("sofadb" %in% db_list(x)) {
#' db_delete(x, dbname = "sofadb")
#' }
#' db_create(x, dbname = "sofadb")
#'
#' doc1 <- '{"name": "drink", "beer": "IPA", "score": 5}'
#' doc_create(x, dbname="sofadb", doc1, docid="abeer")
#' doc_create(x, dbname="sofadb", doc1, docid="morebeer", as='json')
#'
#' db_revisions(x, dbname="sofadb", docid="abeer")
#' db_revisions(x, dbname="sofadb", docid="abeer", simplify=FALSE)
#' db_revisions(x, dbname="sofadb", docid="abeer", as='json')
#' db_revisions(x, dbname="sofadb", docid="abeer", simplify=FALSE, as='json')
#' }
db_revisions <- function(cushion, dbname, docid, simplify=TRUE,
as='list', ...) {
check_cushion(cushion)
call_ <- sprintf("%s/%s/%s", cushion$make_url(), dbname, docid)
tmp <- sofa_GET(call_, as = "list", query = list(revs_info = 'true'),
cushion$get_headers(), cushion$get_auth(), ...)
revs <- if (simplify) {
vapply(tmp$`_revs_info`, "[[", "", "rev")
} else {
tmp$`_revs_info`
}
if (as == 'json') jsonlite::toJSON(revs) else revs
}
|
After performing in a number of heavy metal bands in high school , Townsend was discovered by a record label in 1993 and was asked to perform lead vocals on Steve Vai 's album Sex & Religion . After recording and touring with Vai , Townsend was discouraged by what he found in the music industry , and vented his anger on the solo album Heavy as a Really Heavy Thing released under the pseudonym Strapping Young Lad . He soon assembled a band under the name , and released the critically acclaimed City in 1997 . Since then , he has released three more studio albums with Strapping Young Lad , along with solo material released under his own independent record label , HevyDevy Records .
|
##
## Zero-inflated Poissons predicting number of gifts and amount of farm help received
##
## Supplementary Table 1
##
source("init.r")
library(pscl)
library(igraph)
library(MuMIn)
##########################################################################################
## Calculate in-degrees on various networks
##
prepend_ego = function(x) paste0("Ego.", x) # helper function to add "Ego." to start of column names
nodes.hh = hh %>%
rename_all(prepend_ego)
# remove village 6
nodes.hh = subset(nodes.hh, Ego.VillageID != 6)
##
## Individual gifts
##
dat = subset(hh.dyads, TotalGifts.ind > 0 & Ego.VillageID != 6, select=c(Ego.HH, Alter.HH, TotalGifts.ind))
g.gifts_ind = graph.data.frame(dat, vertices=nodes.hh, directed=T)
# calculate in degree and put into houses dataframe
deg.in = degree(g.gifts_ind, mode="in")
nodes.hh$gift.deg.in = deg.in[ as.character(nodes.hh$Ego.HH) ]
##
## Farm help
##
dat = subset(hh.dyads, HelpObserved > 0 & Ego.VillageID != 6, select=c(Ego.HH, Alter.HH))
g.farm = graph.data.frame(dat, vertices=nodes.hh, directed=T)
# calculate in degree and put into houses dataframe
deg.in = degree(g.farm, mode="in")
nodes.hh$farm.deg.in = deg.in[ as.character(nodes.hh$Ego.HH) ]
##########################################################################################
## What predicts in-degree for farm work and gifts
##
d.hh = nodes.hh %>%
dplyr::select(Ego.HH, farm.deg.in, gift.deg.in, Ego.Size, Ego.WealthRank, Ego.VillageID, Ego.Sex, Ego.zhubo1) %>%
distinct() %>%
na.omit()
# zero-inflated poissons
# null
m.null = zeroinfl(farm.deg.in ~ 1, data=d.hh)
m.gift.null = zeroinfl(gift.deg.in ~ 1, data=d.hh)
# control
m.control = zeroinfl(farm.deg.in ~ Ego.Size + Ego.WealthRank + Ego.Sex + factor(Ego.VillageID), data=d.hh)
m.gift.control = zeroinfl(gift.deg.in ~ Ego.Size + Ego.WealthRank + Ego.Sex + factor(Ego.VillageID), data=d.hh)
# full
m.zhubo = zeroinfl(farm.deg.in ~ factor(Ego.zhubo1) + Ego.Size + Ego.WealthRank + Ego.Sex + factor(Ego.VillageID), data=d.hh)
m.gift.zhubo = zeroinfl(gift.deg.in ~ factor(Ego.zhubo1) + Ego.Size + Ego.WealthRank + Ego.Sex + factor(Ego.VillageID), data=d.hh)
#################################################################################
## model selection
##
##
## farm work
##
best.models = model.sel(m.null, m.control, m.zhubo)
top.models = mget(row.names(subset(best.models, delta<2)))
best.avg = model.avg(top.models, revised.var=T, beta=F)
# summary(best.avg)
# incidence risk ratios with CIs
zip = cbind( data.frame((coef(best.avg, full=T))), (confint(best.avg, full=T, digits=3)) )
names(zip) = c("Farm work B", "Farm work lwr", "Farm work upr")
round(zip, 3)
write.csv(best.models, file="in-degree - candidate models - farm.csv", row.names=T)
##
## gifts
##
best.models = model.sel(m.gift.null, m.gift.control, m.gift.zhubo)
top.models = mget(row.names(subset(best.models, delta<2)))
best.avg = model.avg(top.models, revised.var=T, beta=F)
# summary(best.avg)
# incidence risk ratios with CIs
zip = cbind(zip, data.frame((coef(best.avg, full=T))), (confint(best.avg, full=T, digits=3)) )
names(zip)[4:6] = c("Gifts B", "Gifts lwr", "Gifts upr")
write.csv(best.models, file="in-degree - candidate models - gift.csv", row.names=T)
write.csv(zip, file="zip - parameter estimates.csv", row.names=T)
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.meta.tactic
import Mathlib.Lean3Lib.init.meta.attribute
import Mathlib.Lean3Lib.init.meta.constructor_tactic
import Mathlib.Lean3Lib.init.meta.relation_tactics
import Mathlib.Lean3Lib.init.meta.occurrences
import Mathlib.Lean3Lib.init.data.option.basic
universes l
namespace Mathlib
def simp.default_max_steps : ℕ :=
bit0
(bit0
(bit0
(bit0
(bit0
(bit0
(bit0
(bit1
(bit0
(bit1
(bit1
(bit0
(bit1
(bit0
(bit0
(bit1
(bit0 (bit0 (bit0 (bit1 (bit1 (bit0 (bit0 1))))))))))))))))))))))
/-- Prefix the given `attr_name` with `"simp_attr"`. -/
/-- Simp lemmas are used by the "simplifier" family of tactics.
`simp_lemmas` is essentially a pair of tables `rb_map (expr_type × name) (priority_list simp_lemma)`.
One of the tables is for congruences and one is for everything else.
An individual simp lemma is:
- A kind which can be `Refl`, `Simp` or `Congr`.
- A pair of `expr`s `l ~> r`. The rb map is indexed by the name of `get_app_fn(l)`.
- A proof that `l = r` or `l ↔ r`.
- A list of the metavariables that must be filled before the proof can be applied.
- A priority number
-/
/-- Make a new table of simp lemmas -/
/-- Merge the simp_lemma tables. -/
/-- Remove the given lemmas from the table. Use the names of the lemmas. -/
/-- Makes the default simp_lemmas table which is composed of all lemmas tagged with `simp`. -/
/-- Add a simplification lemma by an expression `p`. Some conditions on `p` must hold for it to be added, see list below.
If your lemma is not being added, you can see the reasons by setting `set_option trace.simp_lemmas true`.
- `p` must have the type `Π (h₁ : _) ... (hₙ : _), LHS ~ RHS` for some reflexive, transitive relation (usually `=`).
- Any of the hypotheses `hᵢ` should either be present in `LHS` or otherwise a `Prop` or a typeclass instance.
- `LHS` should not occur within `RHS`.
- `LHS` should not occur within a hypothesis `hᵢ`.
-/
/-- Add a simplification lemma by it's declaration name. See `simp_lemmas.add` for more information.-/
/-- Adds a congruence simp lemma to simp_lemmas.
A congruence simp lemma is a lemma that breaks the simplification down into separate problems.
For example, to simplify `a ∧ b` to `c ∧ d`, we should try to simp `a` to `c` and `b` to `d`.
For examples of congruence simp lemmas look for lemmas with the `@[congr]` attribute.
```lean
lemma if_simp_congr ... (h_c : b ↔ c) (h_t : x = u) (h_e : y = v) : ite b x y = ite c u v := ...
lemma imp_congr_right (h : a → (b ↔ c)) : (a → b) ↔ (a → c) := ...
lemma and_congr (h₁ : a ↔ c) (h₂ : b ↔ d) : (a ∧ b) ↔ (c ∧ d) := ...
```
-/
/-- Add expressions to a set of simp lemmas using `simp_lemmas.add`.
This is the new version of `simp_lemmas.append`,
which also allows you to set the `symm` flag.
-/
/-- Add expressions to a set of simp lemmas using `simp_lemmas.add`.
This is the backwards-compatibility version of `simp_lemmas.append_with_symm`,
and sets all `symm` flags to `ff`.
-/
/-- `simp_lemmas.rewrite s e prove R` apply a simplification lemma from 's'
- 'e' is the expression to be "simplified"
- 'prove' is used to discharge proof obligations.
- 'r' is the equivalence relation being used (e.g., 'eq', 'iff')
- 'md' is the transparency; how aggresively should the simplifier perform reductions.
Result (new_e, pr) is the new expression 'new_e' and a proof (pr : e R new_e) -/
/-- `simp_lemmas.drewrite s e` tries to rewrite 'e' using only refl lemmas in 's' -/
namespace tactic
/- Remark: `transform` should not change the target. -/
/-- Revert a local constant, change its type using `transform`. -/
/-- `get_eqn_lemmas_for deps d` returns the automatically generated equational lemmas for definition d.
If deps is tt, then lemmas for automatically generated auxiliary declarations used to define d are also included. -/
structure dsimp_config where
md : transparency
max_steps : ℕ
canonize_instances : Bool
single_pass : Bool
fail_if_unchanged : Bool
eta : Bool
zeta : Bool
beta : Bool
proj : Bool
iota : Bool
unfold_reducible : Bool
memoize : Bool
end tactic
/-- (Definitional) Simplify the given expression using *only* reflexivity equality lemmas from the given set of lemmas.
The resulting expression is definitionally equal to the input.
The list `u` contains defintions to be delta-reduced, and projections to be reduced.-/
namespace tactic
/- Remark: the configuration parameters `cfg.md` and `cfg.eta` are ignored by this tactic. -/
/- Remark: we use transparency.instances by default to make sure that we
can unfold projections of type classes. Example:
(@has_add.add nat nat.has_add a b)
-/
/-- Tries to unfold `e` if it is a constant or a constant application.
Remark: this is not a recursive procedure. -/
structure dunfold_config extends dsimp_config where
/- Remark: in principle, dunfold can be implemented on top of dsimp. We don't do it for
performance reasons. -/
structure delta_config where
max_steps : ℕ
visit_instances : Bool
/-- Delta reduce the given constant names -/
structure unfold_proj_config extends dsimp_config where
/-- If `e` is a projection application, try to unfold it, otherwise fail. -/
structure simp_config where
max_steps : ℕ
contextual : Bool
lift_eq : Bool
canonize_instances : Bool
canonize_proofs : Bool
use_axioms : Bool
zeta : Bool
beta : Bool
eta : Bool
proj : Bool
iota : Bool
iota_eqn : Bool
constructor_eq : Bool
single_pass : Bool
fail_if_unchanged : Bool
memoize : Bool
trace_lemmas : Bool
/--
`simplify s e cfg r prove` simplify `e` using `s` using bottom-up traversal.
`discharger` is a tactic for dischaging new subgoals created by the simplifier.
If it fails, the simplifier tries to discharge the subgoal by simplifying it to `true`.
The parameter `to_unfold` specifies definitions that should be delta-reduced,
and projection applications that should be unfolded.
-/
/--
`ext_simplify_core a c s discharger pre post r e`:
- `a : α` - initial user data
- `c : simp_config` - simp configuration options
- `s : simp_lemmas` - the set of simp_lemmas to use. Remark: the simplification lemmas are not applied automatically like in the simplify tactic. The caller must use them at pre/post.
- `discharger : α → tactic α` - tactic for dischaging hypothesis in conditional rewriting rules. The argument 'α' is the current user data.
- `pre a s r p e` is invoked before visiting the children of subterm 'e'.
+ arguments:
- `a` is the current user data
- `s` is the updated set of lemmas if 'contextual' is `tt`,
- `r` is the simplification relation being used,
- `p` is the "parent" expression (if there is one).
- `e` is the current subexpression in question.
+ if it succeeds the result is `(new_a, new_e, new_pr, flag)` where
- `new_a` is the new value for the user data
- `new_e` is a new expression s.t. `r e new_e`
- `new_pr` is a proof for `r e new_e`, If it is none, the proof is assumed to be by reflexivity
- `flag` if tt `new_e` children should be visited, and `post` invoked.
- `(post a s r p e)` is invoked after visiting the children of subterm `e`,
The output is similar to `(pre a r s p e)`, but the 'flag' indicates whether the new expression should be revisited or not.
- `r` is the simplification relation. Usually `=` or `↔`.
- `e` is the input expression to be simplified.
The method returns `(a,e,pr)` where
- `a` is the final user data
- `e` is the new expression
- `pr` is the proof that the given expression equals the input expression.
Note that `ext_simplify_core` will succeed even if `pre` and `post` fail, as failures are used to indicate that the method should move on to the next subterm.
If it is desirable to propagate errors from `pre`, they can be propagated through the "user data".
An easy way to do this is to call `tactic.capture (do ...)` in the parts of `pre`/`post` where errors matter, and then use `tactic.unwrap a` on the result.
Additionally, `ext_simplify_core` does not propagate changes made to the tactic state by `pre` and `post.
If it is desirable to propagate changes to the tactic state in addition to errors, use `tactic.resume` instead of `tactic.unwrap`.
-/
structure simp_intros_config extends simp_config where
use_hyps : Bool
end Mathlib
|
-- Shadowing is allowed.
module Shadow where
module M (A : Set) where
id : Set -> Set
id A = A
|
section "Skew Binomial Heaps"
theory SkewBinomialHeap
imports Main "HOL-Library.Multiset" "Eval_Base.Eval_Base"
begin
text \<open>Skew Binomial Queues as specified by Brodal and Okasaki \cite{BrOk96}
are a data structure for priority queues with worst case O(1) {\em findMin},
{\em insert}, and {\em meld} operations, and worst-case logarithmic
{\em deleteMin} operation.
They are derived from priority queues in three steps:
\begin{enumerate}
\item Skew binomial trees are used to eliminate the possibility of
cascading links during insert operations. This reduces the complexity
of an insert operation to $O(1)$.
\item The current minimal element is cached. This approach, known as
{\em global root}, reduces the cost of a {\em findMin}-operation to
O(1).
\item By allowing skew binomial queues to contain skew binomial queues,
the cost for meld-operations is reduced to $O(1)$. This approach
is known as {\em data-structural bootstrapping}.
\end{enumerate}
In this theory, we combine Steps~2 and 3, i.e. we first implement skew binomial
queues, and then bootstrap them. The bootstrapping implicitly introduces a
global root, such that we also get a constant time findMin operation.
\<close>
locale SkewBinomialHeapStruc_loc
begin
subsection "Datatype"
datatype ('e, 'a) SkewBinomialTree =
Node (val: 'e) (prio: "'a::linorder") (rank: nat) (children: "('e , 'a) SkewBinomialTree list")
type_synonym ('e, 'a) SkewBinomialQueue = "('e, 'a::linorder) SkewBinomialTree list"
subsubsection "Abstraction to Multisets"
text \<open>Returns a multiset with all (element, priority) pairs from a queue\<close>
fun tree_to_multiset
:: "('e, 'a::linorder) SkewBinomialTree \<Rightarrow> ('e \<times> 'a) multiset"
and queue_to_multiset
:: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> ('e \<times> 'a) multiset" where
"tree_to_multiset (Node e a r ts) = {#(e,a)#} + queue_to_multiset ts" |
"queue_to_multiset [] = {#}" |
"queue_to_multiset (t#q) = tree_to_multiset t + queue_to_multiset q"
lemma ttm_children: "tree_to_multiset t =
{#(val t,prio t)#} + queue_to_multiset (children t)"
by (cases t) auto
(*lemma qtm_cons[simp]: "queue_to_multiset (t#q)
= queue_to_multiset q + tree_to_multiset t"
apply(induct q arbitrary: t)
apply simp
apply(auto simp add: union_ac)
done*)
lemma qtm_conc[simp]: "queue_to_multiset (q@q')
= queue_to_multiset q + queue_to_multiset q'"
apply2 (induct q) by(auto simp add: union_ac)
subsubsection "Invariant"
text \<open>Link two trees of rank $r$ to a new tree of rank $r+1$\<close>
fun link :: "('e, 'a::linorder) SkewBinomialTree \<Rightarrow> ('e, 'a) SkewBinomialTree \<Rightarrow>
('e, 'a) SkewBinomialTree" where
"link (Node e1 a1 r1 ts1) (Node e2 a2 r2 ts2) =
(if a1\<le>a2
then (Node e1 a1 (Suc r1) ((Node e2 a2 r2 ts2)#ts1))
else (Node e2 a2 (Suc r2) ((Node e1 a1 r1 ts1)#ts2)))"
text \<open>Link two trees of rank $r$ and a new element to a new tree of
rank $r+1$\<close>
fun skewlink :: "'e \<Rightarrow> 'a::linorder \<Rightarrow> ('e, 'a) SkewBinomialTree \<Rightarrow>
('e, 'a) SkewBinomialTree \<Rightarrow> ('e, 'a) SkewBinomialTree" where
"skewlink e a t t' = (if a \<le> (prio t) \<and> a \<le> (prio t')
then (Node e a (Suc (rank t)) [t,t'])
else (if (prio t) \<le> (prio t')
then
Node (val t) (prio t) (Suc (rank t)) (Node e a 0 [] # t' # children t)
else
Node (val t') (prio t') (Suc (rank t')) (Node e a 0 [] # t # children t')))"
text \<open>
The invariant for trees claims that a tree labeled rank $0$ has no children,
and a tree labeled rank $r + 1$ is the result of an ordinary link or
a skew link of two trees with rank $r$.\<close>
function tree_invar :: "('e, 'a::linorder) SkewBinomialTree \<Rightarrow> bool" where
"tree_invar (Node e a 0 ts) = (ts = [])" |
"tree_invar (Node e a (Suc r) ts) = (\<exists> e1 a1 ts1 e2 a2 ts2 e' a'.
tree_invar (Node e1 a1 r ts1) \<and> tree_invar (Node e2 a2 r ts2) \<and>
((Node e a (Suc r) ts) = link (Node e1 a1 r ts1) (Node e2 a2 r ts2) \<or>
(Node e a (Suc r) ts) = skewlink e' a' (Node e1 a1 r ts1) (Node e2 a2 r ts2)))"
by pat_completeness auto
termination
apply(relation "measure rank")
apply auto
done
text \<open>A heap satisfies the invariant, if all contained trees satisfy the
invariant, the ranks of the trees in the heap are distinct, except that the
first two trees may have same rank, and the ranks are ordered in ascending
order.\<close>
text \<open>First part: All trees inside the queue satisfy the invariant.\<close>
definition queue_invar :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> bool" where
"queue_invar q \<equiv> (\<forall>t \<in> set q. tree_invar t)"
lemma queue_invar_simps[simp]:
"queue_invar []"
"queue_invar (t#q) \<longleftrightarrow> tree_invar t \<and> queue_invar q"
"queue_invar (q@q') \<longleftrightarrow> queue_invar q \<and> queue_invar q'"
"queue_invar q \<Longrightarrow> t\<in>set q \<Longrightarrow> tree_invar t"
unfolding queue_invar_def by auto
text \<open>Second part: The ranks of the trees in the heap are distinct,
except that the first two trees may have same rank, and the ranks are
ordered in ascending order.\<close>
text \<open>For tail of queue\<close>
fun rank_invar :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> bool" where
"rank_invar [] = True" |
"rank_invar [t] = True" |
"rank_invar (t # t' # bq) = (rank t < rank t' \<and> rank_invar (t' # bq))"
text \<open>For whole queue: First two elements may have same rank\<close>
fun rank_skew_invar :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> bool" where
"rank_skew_invar [] = True" |
"rank_skew_invar [t] = True" |
"rank_skew_invar (t # t' # bq) = ((rank t \<le> rank t') \<and> rank_invar (t' # bq))"
definition tail_invar :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> bool" where
"tail_invar bq = (queue_invar bq \<and> rank_invar bq)"
definition invar :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> bool" where
"invar bq = (queue_invar bq \<and> rank_skew_invar bq)"
lemma invar_empty[simp]:
"invar []"
"tail_invar []"
unfolding invar_def tail_invar_def by auto
lemma invar_tail_invar:
"invar (t # bq) \<Longrightarrow> tail_invar bq"
unfolding invar_def tail_invar_def
by (cases bq) simp_all
lemma link_mset[simp]: "tree_to_multiset (link t1 t2)
= tree_to_multiset t1 +tree_to_multiset t2"
by (cases t1, cases t2, auto simp add:union_ac)
lemma link_tree_invar: "\<lbrakk>tree_invar t1; tree_invar t2; rank t1 = rank t2\<rbrakk> \<Longrightarrow>
tree_invar (link t1 t2)"
by (cases t1, cases t2, simp, blast)
lemma skewlink_mset[simp]: "tree_to_multiset (skewlink e a t1 t2)
= {# (e,a) #} + tree_to_multiset t1 + tree_to_multiset t2"
by (cases t1, cases t2, auto simp add:union_ac)
lemma skewlink_tree_invar: "\<lbrakk>tree_invar t1; tree_invar t2; rank t1 = rank t2\<rbrakk> \<Longrightarrow>
tree_invar (skewlink e a t1 t2)"
by (cases t1, cases t2, simp, blast)
lemma rank_link: "rank t = rank t' \<Longrightarrow> rank (link t t') = rank t + 1"
apply (cases t)
apply (cases t')
apply(auto)
done
lemma rank_skew_rank_invar: "rank_skew_invar (t # bq) \<Longrightarrow> rank_invar bq"
by (cases bq) simp_all
lemma rank_invar_rank_skew:
assumes "rank_invar q"
shows "rank_skew_invar q"
proof (cases q)
case Nil
then show ?thesis by simp
next
case (Cons _ list)
with assms show ?thesis
by (cases list) simp_all
qed
lemma rank_invar_cons_up:
"\<lbrakk>rank_invar (t # bq); rank t' < rank t\<rbrakk> \<Longrightarrow> rank_invar (t' # t # bq)"
by simp
lemma rank_skew_cons_up:
"\<lbrakk>rank_invar (t # bq); rank t' \<le> rank t\<rbrakk> \<Longrightarrow> rank_skew_invar (t' # t # bq)"
by simp
lemma rank_invar_cons_down: "rank_invar (t # bq) \<Longrightarrow> rank_invar bq"
by (cases bq) simp_all
lemma rank_invar_hd_cons:
"\<lbrakk>rank_invar bq; rank t < rank (hd bq)\<rbrakk> \<Longrightarrow> rank_invar (t # bq)"
apply(cases bq)
apply(auto)
done
lemma tail_invar_cons_up:
"\<lbrakk>tail_invar (t # bq); rank t' < rank t; tree_invar t'\<rbrakk>
\<Longrightarrow> tail_invar (t' # t # bq)"
unfolding tail_invar_def
apply (cases bq)
apply simp_all
done
lemma tail_invar_cons_up_invar:
"\<lbrakk>tail_invar (t # bq); rank t' \<le> rank t; tree_invar t'\<rbrakk> \<Longrightarrow> invar (t' # t # bq)"
by (cases bq) (simp_all add: invar_def tail_invar_def)
lemma tail_invar_cons_down:
"tail_invar (t # bq) \<Longrightarrow> tail_invar bq"
unfolding tail_invar_def
by (cases bq) simp_all
lemma tail_invar_app_single:
"\<lbrakk>tail_invar bq; \<forall>t \<in> set bq. rank t < rank t'; tree_invar t'\<rbrakk>
\<Longrightarrow> tail_invar (bq @ [t'])"
proof2 (induct bq)
case Nil
then show ?case by (simp add: tail_invar_def)
next
case (Cons a bq)
from \<open>tail_invar (a # bq)\<close> have "tail_invar bq"
by (rule tail_invar_cons_down)
with Cons have "tail_invar (bq @ [t'])" by simp
with Cons show ?case
by (cases bq) (simp_all add: tail_invar_cons_up tail_invar_def)
qed
lemma invar_app_single:
"\<lbrakk>invar bq; \<forall>t \<in> set bq. rank t < rank t'; tree_invar t'\<rbrakk>
\<Longrightarrow> invar (bq @ [t'])"
proof2 (induct bq)
case Nil
then show ?case by (simp add: invar_def)
next
case (Cons a bq)
show ?case
proof (cases bq)
case Nil
with Cons show ?thesis by (simp add: invar_def)
next
case Cons': (Cons ta qa)
from Cons(2) have a1: "tail_invar bq" by (rule invar_tail_invar)
from Cons(3) have a2: "\<forall>t\<in>set bq. rank t < rank t'" by simp
from a1 a2 Cons(4) tail_invar_app_single[of "bq" "t'"]
have "tail_invar (bq @ [t'])" by simp
with Cons Cons' show ?thesis
by (simp_all add: tail_invar_cons_up_invar invar_def tail_invar_def)
qed
qed
lemma invar_children:
assumes "tree_invar ((Node e a r ts)::(('e, 'a::linorder) SkewBinomialTree))"
shows "queue_invar ts" using assms
proof2 (induct r arbitrary: e a ts)
case 0
then show ?case by simp
next
case (Suc r)
from Suc(2) obtain e1 a1 ts1 e2 a2 ts2 e' a' where
inv_t1: "tree_invar (Node e1 a1 r ts1)" and
inv_t2: "tree_invar (Node e2 a2 r ts2)" and
link_or_skew:
"((Node e a (Suc r) ts) = link (Node e1 a1 r ts1) (Node e2 a2 r ts2)
\<or> (Node e a (Suc r) ts)
= skewlink e' a' (Node e1 a1 r ts1) (Node e2 a2 r ts2))"
by (simp only: tree_invar.simps) blast
from Suc(1)[OF inv_t1] inv_t2
have case1: "queue_invar ((Node e2 a2 r ts2) # ts1)" by simp
from Suc(1)[OF inv_t2] inv_t1
have case2: "queue_invar ((Node e1 a1 r ts1) # ts2)" by simp
show ?case
proof (cases "(Node e a (Suc r) ts) = link (Node e1 a1 r ts1) (Node e2 a2 r ts2)")
case True
hence "ts =
(if a1 \<le> a2
then (Node e2 a2 r ts2) # ts1
else (Node e1 a1 r ts1) # ts2)" by auto
with case1 case2 show ?thesis by simp
next
case False
with link_or_skew
have "Node e a (Suc r) ts =
skewlink e' a' (Node e1 a1 r ts1) (Node e2 a2 r ts2)" by simp
hence "ts =
(if a' \<le> a1 \<and> a' \<le> a2
then [(Node e1 a1 r ts1),(Node e2 a2 r ts2)]
else (if a1 \<le> a2
then (Node e' a' 0 []) # (Node e2 a2 r ts2) # ts1
else (Node e' a' 0 []) # (Node e1 a1 r ts1) # ts2))" by auto
with case1 case2 show ?thesis by simp
qed
qed
subsubsection "Heap Order"
fun heap_ordered :: "('e, 'a::linorder) SkewBinomialTree \<Rightarrow> bool" where
"heap_ordered (Node e a r ts)
= (\<forall>x \<in> set_mset (queue_to_multiset ts). a \<le> snd x)"
text \<open>The invariant for trees implies heap order.\<close>
lemma tree_invar_heap_ordered:
fixes t :: "('e, 'a::linorder) SkewBinomialTree"
assumes "tree_invar t"
shows "heap_ordered t"
proof (cases t)
case (Node e a nat list)
with assms show ?thesis
proof2 (induct nat arbitrary: t e a list)
case 0
then show ?case by simp
next
case (Suc nat)
from Suc(2,3) obtain t1 e1 a1 ts1 t2 e2 a2 ts2 e' a' where
inv_t1: "tree_invar t1" and
inv_t2: "tree_invar t2" and
link_or_skew: "t = link t1 t2 \<or> t = skewlink e' a' t1 t2" and
eq_t1[simp]: "t1 = (Node e1 a1 nat ts1)" and
eq_t2[simp]: "t2 = (Node e2 a2 nat ts2)"
by (simp only: tree_invar.simps) blast
note heap_t1 = Suc(1)[OF inv_t1 eq_t1]
note heap_t2 = Suc(1)[OF inv_t2 eq_t2]
from link_or_skew heap_t1 heap_t2 show ?case
by (cases "t = link t1 t2") auto
qed
qed
(***********************************************************)
(***********************************************************)
subsubsection "Height and Length"
text \<open>
Although complexity of HOL-functions cannot be expressed within
HOL, we can express the height and length of a binomial heap.
By showing that both, height and length, are logarithmic in the number
of contained elements, we give strong evidence that our functions have
logarithmic complexity in the number of elements.
\<close>
text \<open>Height of a tree and queue\<close>
fun height_tree :: "('e, ('a::linorder)) SkewBinomialTree \<Rightarrow> nat" and
height_queue :: "('e, ('a::linorder)) SkewBinomialQueue \<Rightarrow> nat"
where
"height_tree (Node e a r ts) = height_queue ts" |
"height_queue [] = 0" |
"height_queue (t # ts) = max (Suc (height_tree t)) (height_queue ts)"
lemma link_length: "size (tree_to_multiset (link t1 t2)) =
size (tree_to_multiset t1) + size (tree_to_multiset t2)"
apply(cases t1)
apply(cases t2)
apply simp
done
lemma tree_rank_estimate_upper:
"tree_invar (Node e a r ts) \<Longrightarrow>
size (tree_to_multiset (Node e a r ts)) \<le> (2::nat)^(Suc r) - 1"
proof2 (induct r arbitrary: e a ts)
case 0
then show ?case by simp
next
case (Suc r)
from Suc(2) obtain e1 a1 ts1 e2 a2 ts2 e' a' where
link:
"(Node e a (Suc r) ts) = link (Node e1 a1 r ts1) (Node e2 a2 r ts2) \<or>
(Node e a (Suc r) ts) = skewlink e' a' (Node e1 a1 r ts1) (Node e2 a2 r ts2)"
and inv1: "tree_invar (Node e1 a1 r ts1)"
and inv2: "tree_invar (Node e2 a2 r ts2)"
by simp blast
note iv1 = Suc(1)[OF inv1]
note iv2 = Suc(1)[OF inv2]
have "(2::nat)^r - 1 + (2::nat)^r - 1 \<le> (2::nat)^(Suc r) - 1" by simp
with link Suc show ?case
apply (cases "Node e a (Suc r) ts = link (Node e1 a1 r ts1) (Node e2 a2 r ts2)")
using iv1 iv2 apply (simp del: link.simps)
using iv1 iv2 apply (simp del: skewlink.simps)
done
qed
lemma tree_rank_estimate_lower:
"tree_invar (Node e a r ts) \<Longrightarrow>
size (tree_to_multiset (Node e a r ts)) \<ge> (2::nat)^r"
proof2 (induct r arbitrary: e a ts)
case 0
then show ?case by simp
next
case (Suc r)
from Suc(2) obtain e1 a1 ts1 e2 a2 ts2 e' a' where
link:
"(Node e a (Suc r) ts) = link (Node e1 a1 r ts1) (Node e2 a2 r ts2) \<or>
(Node e a (Suc r) ts) = skewlink e' a' (Node e1 a1 r ts1) (Node e2 a2 r ts2)"
and inv1: "tree_invar (Node e1 a1 r ts1)"
and inv2: "tree_invar (Node e2 a2 r ts2)"
by simp blast
note iv1 = Suc(1)[OF inv1]
note iv2 = Suc(1)[OF inv2]
have "(2::nat)^r - 1 + (2::nat)^r - 1 \<le> (2::nat)^(Suc r) - 1" by simp
with link Suc show ?case
apply (cases "Node e a (Suc r) ts = link (Node e1 a1 r ts1) (Node e2 a2 r ts2)")
using iv1 iv2 apply (simp del: link.simps)
using iv1 iv2 apply (simp del: skewlink.simps)
done
qed
lemma tree_rank_height:
"tree_invar (Node e a r ts) \<Longrightarrow> height_tree (Node e a r ts) = r"
proof2 (induct r arbitrary: e a ts)
case 0
then show ?case by simp
next
case (Suc r)
from Suc(2) obtain e1 a1 ts1 e2 a2 ts2 e' a' where
link:
"(Node e a (Suc r) ts) = link (Node e1 a1 r ts1) (Node e2 a2 r ts2) \<or>
(Node e a (Suc r) ts) = skewlink e' a' (Node e1 a1 r ts1) (Node e2 a2 r ts2)"
and inv1: "tree_invar (Node e1 a1 r ts1)"
and inv2: "tree_invar (Node e2 a2 r ts2)"
by simp blast
note iv1 = Suc(1)[OF inv1]
note iv2 = Suc(1)[OF inv2]
from Suc(2) link show ?case
apply (cases "Node e a (Suc r) ts = link (Node e1 a1 r ts1) (Node e2 a2 r ts2)")
apply (cases "a1 \<le> a2")
using iv1 iv2 apply simp
using iv1 iv2 apply simp
apply (cases "a' \<le> a1 \<and> a' \<le> a2")
apply (simp only: height_tree.simps)
using iv1 iv2 apply simp
apply (cases "a1 \<le> a2")
using iv1 iv2
apply (simp del: tree_invar.simps link.simps)
using iv1 iv2
apply (simp del: tree_invar.simps link.simps)
done
qed
text \<open>A skew binomial tree of height $h$ contains at most $2^{h+1} - 1$
elements\<close>
theorem tree_height_estimate_upper:
"tree_invar t \<Longrightarrow>
size (tree_to_multiset t) \<le> (2::nat)^(Suc (height_tree t)) - 1"
apply (cases t, simp only:)
apply (frule tree_rank_estimate_upper)
apply (frule tree_rank_height)
apply (simp only: )
done
text \<open>A skew binomial tree of height $h$ contains at least $2^{h}$ elements\<close>
theorem tree_height_estimate_lower:
"tree_invar t \<Longrightarrow> size (tree_to_multiset t) \<ge> (2::nat)^(height_tree t)"
apply (cases t, simp only:)
apply (frule tree_rank_estimate_lower)
apply (frule tree_rank_height)
apply (simp only: )
done
lemma size_mset_tree_upper: "tree_invar t \<Longrightarrow>
size (tree_to_multiset t) \<le> (2::nat)^(Suc (rank t)) - (1::nat)"
apply (cases t)
by (simp only: tree_rank_estimate_upper SkewBinomialTree.sel(3))
lemma size_mset_tree_lower: "tree_invar t \<Longrightarrow>
size (tree_to_multiset t) \<ge> (2::nat)^(rank t)"
apply (cases t)
by (simp only: tree_rank_estimate_lower SkewBinomialTree.sel(3))
lemma invar_butlast: "invar (bq @ [t]) \<Longrightarrow> invar bq"
unfolding invar_def
apply2 (induct bq)
apply simp
apply (case_tac bq)
apply simp
apply (case_tac list)
by simp_all
lemma invar_last_max:
"invar ((b#b'#bq) @ [m]) \<Longrightarrow> \<forall> t \<in> set (b'#bq). rank t < rank m"
unfolding invar_def
apply2 (induct bq) apply simp apply (case_tac bq) apply simp by simp
lemma invar_last_max': "invar ((b#b'#bq) @ [m]) \<Longrightarrow> rank b \<le> rank b'"
unfolding invar_def by simp
lemma invar_length: "invar bq \<Longrightarrow> length bq \<le> Suc (Suc (rank (last bq)))"
proof2 (induct bq rule: rev_induct)
case Nil thus ?case by simp
next
case (snoc x xs)
show ?case proof (cases xs)
case Nil thus ?thesis by simp
next
case [simp]: (Cons xxs xx)
note Cons' = Cons
thus ?thesis
proof (cases xx)
case Nil with snoc.prems Cons show ?thesis by simp
next
case (Cons xxxs xxx)
from snoc.hyps[OF invar_butlast[OF snoc.prems]] have
IH: "length xs \<le> Suc (Suc (rank (last xs)))" .
also from invar_last_max[OF snoc.prems[unfolded Cons' Cons]]
invar_last_max'[OF snoc.prems[unfolded Cons' Cons]]
last_in_set[of xs] Cons have
"Suc (rank (last xs)) \<le> rank (last (xs @ [x]))" by auto
finally show ?thesis by simp
qed
qed
qed
lemma size_queue_sum_list:
"size (queue_to_multiset bq) = sum_list (map (size \<circ> tree_to_multiset) bq)"
apply2 (induct bq) by simp_all
text \<open>
A skew binomial heap of length $l$ contains at least $2^{l-1} - 1$ elements.
\<close>
theorem queue_length_estimate_lower:
"invar bq \<Longrightarrow> (size (queue_to_multiset bq)) \<ge> 2^(length bq - 1) - 1"
proof2 (induct bq rule: rev_induct)
case Nil thus ?case by simp
next
case (snoc x xs) thus ?case
proof (cases xs)
case Nil thus ?thesis by simp
next
case [simp]: (Cons xx xxs)
from snoc.hyps[OF invar_butlast[OF snoc.prems]]
have IH: "2 ^ (length xs - 1) \<le> Suc (size (queue_to_multiset xs))" by simp
have size_q:
"size (queue_to_multiset (xs @ [x])) =
size (queue_to_multiset xs) + size (tree_to_multiset x)"
by (simp add: size_queue_sum_list)
moreover
from snoc.prems have inv_x: "tree_invar x" by (simp add: invar_def)
from size_mset_tree_lower[OF this]
have "2 ^ (rank x) \<le> size (tree_to_multiset x)" .
ultimately have
eq: "size (queue_to_multiset xs) + (2::nat)^(rank x) \<le>
size (queue_to_multiset (xs @ [x]))" by simp
from invar_length[OF snoc.prems] have "length xs \<le> (rank x + 1)" by simp
hence snd: "(2::nat) ^ (length xs - 1) \<le> (2::nat) ^ ((rank x))"
by (simp del: power.simps)
have
"(2::nat) ^ (length (xs @ [x]) - 1) =
(2::nat) ^ (length xs - 1) + (2::nat) ^ (length xs - 1)"
by auto
with IH have
"2 ^ (length (xs @ [x]) - 1) \<le>
Suc (size (queue_to_multiset xs)) + 2 ^ (length xs - 1)"
by simp
with snd have "2 ^ (length (xs @ [x]) - 1) \<le>
Suc (size (queue_to_multiset xs)) + 2 ^ rank x"
by arith
with eq show ?thesis by simp
qed
qed
subsection "Operations"
subsubsection "Empty Tree"
lemma empty_correct: "q=Nil \<longleftrightarrow> queue_to_multiset q = {#}"
apply (cases q)
apply simp
apply (case_tac a)
apply auto
done
subsubsection "Insert"
text \<open>Inserts a tree into the queue, such that two trees of same rank get
linked and are recursively inserted. This is the same definition as for
binomial queues and is used for melding.\<close>
fun ins :: "('e, 'a::linorder) SkewBinomialTree \<Rightarrow> ('e, 'a) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"ins t [] = [t]" |
"ins t' (t # bq) =
(if (rank t') < (rank t)
then t' # t # bq
else (if (rank t) < (rank t')
then t # (ins t' bq)
else ins (link t' t) bq))"
text \<open>Insert an element with priority into a queue using skewlinks.\<close>
fun insert :: "'e \<Rightarrow> 'a::linorder \<Rightarrow> ('e, 'a) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"insert e a [] = [Node e a 0 []]" |
"insert e a [t] = [Node e a 0 [],t]" |
"insert e a (t # t' # bq) =
(if rank t \<noteq> rank t'
then (Node e a 0 []) # t # t' # bq
else (skewlink e a t t') # bq)"
lemma insert_mset: "queue_invar q \<Longrightarrow>
queue_to_multiset (insert e a q) =
queue_to_multiset q + {# (e,a) #}"
apply2 (induct q rule: insert.induct) by (auto simp add: union_ac ttm_children)
lemma ins_queue_invar: "\<lbrakk>tree_invar t; queue_invar q\<rbrakk> \<Longrightarrow> queue_invar (ins t q)"
proof2 (induct q arbitrary: t)
case Nil
then show ?case by simp
next
case (Cons a q)
note iv = Cons(1)
from Cons(2,3) show ?case
apply (cases "rank t < rank a")
apply simp
apply (cases "rank t = rank a")
defer
using iv[of "t"] apply simp
proof goal_cases
case prems: 1
from prems(2) have inv_a: "tree_invar a" by simp
from prems(2) have inv_q: "queue_invar q" by simp
note inv_link = link_tree_invar[OF prems(1) inv_a prems(4)]
from iv[OF inv_link inv_q] prems(4) show ?case by simp
qed
qed
lemma insert_queue_invar: "queue_invar q \<Longrightarrow> queue_invar (insert e a q)"
proof2 (induct q rule: insert.induct)
case 1
then show ?case by simp
next
case 2
then show ?case by simp
next
case (3 e a t t' bq)
show ?case
proof (cases "rank t = rank t'")
case False
with 3 show ?thesis by simp
next
case True
from 3 have inv_t: "tree_invar t" by simp
from 3 have inv_t': "tree_invar t'" by simp
from 3 skewlink_tree_invar[OF inv_t inv_t' True, of e a] True
show ?thesis by simp
qed
qed
lemma rank_ins2:
"rank_invar bq \<Longrightarrow>
rank t \<le> rank (hd (ins t bq))
\<or> (rank (hd (ins t bq)) = rank (hd bq) \<and> bq \<noteq> [])"
apply2 (induct bq arbitrary: t)
apply auto
proof goal_cases
case prems: (1 a bq t)
hence r: "rank (link t a) = rank a + 1" by (simp add: rank_link)
with prems and prems(1)[of "(link t a)"] show ?case
apply (cases bq)
apply auto
done
qed
lemma insert_rank_invar: "rank_skew_invar q \<Longrightarrow> rank_skew_invar (insert e a q)"
proof (cases q, simp)
fix t q'
assume "rank_skew_invar q" "q = t # q'"
thus "rank_skew_invar (insert e a q)"
proof (cases "q'", (auto intro: gr0I)[1])
fix t' q''
assume "rank_skew_invar q" "q = t # q'" "q' = t' # q''"
thus "rank_skew_invar (insert e a q)"
apply(cases "rank t = rank t'") defer
apply (auto intro: gr0I)[1]
apply (simp del: skewlink.simps)
proof goal_cases
case prems: 1
with rank_invar_cons_down[of "t'" "q'"] have "rank_invar q'" by simp
show ?case
proof (cases q'')
case Nil
then show ?thesis by simp
next
case (Cons t'' q''')
with prems have "rank t' < rank t''" by simp
with prems have "rank (skewlink e a t t') \<le> rank t''" by simp
with prems Cons rank_skew_cons_up[of "t''" "q'''" "skewlink e a t t'"]
show ?thesis by simp
qed
qed
qed
qed
lemma insert_invar: "invar q \<Longrightarrow> invar (insert e a q)"
unfolding invar_def
using insert_queue_invar[of q] insert_rank_invar[of q]
by simp
theorem insert_correct:
assumes I: "invar q"
shows
"invar (insert e a q)"
"queue_to_multiset (insert e a q) = queue_to_multiset q + {# (e,a) #}"
using insert_mset[of q] insert_invar[of q] I
unfolding invar_def by simp_all
subsubsection "meld"
text \<open>Remove duplicate tree ranks by inserting the first tree of the
queue into the rest of the queue.\<close>
fun uniqify
:: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> ('e, 'a) SkewBinomialQueue"
where
"uniqify [] = []" |
"uniqify (t#bq) = ins t bq"
text \<open>Meld two uniquified queues using the same definition as for
binomial queues.\<close>
fun meldUniq
:: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> ('e,'a) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"meldUniq [] bq = bq" |
"meldUniq bq [] = bq" |
"meldUniq (t1#bq1) (t2#bq2) = (if rank t1 < rank t2
then t1 # (meldUniq bq1 (t2#bq2))
else (if rank t2 < rank t1
then t2 # (meldUniq (t1#bq1) bq2)
else ins (link t1 t2) (meldUniq bq1 bq2)))"
text \<open>Meld two queues using above functions.\<close>
definition meld
:: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> ('e, 'a) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"meld bq1 bq2 = meldUniq (uniqify bq1) (uniqify bq2)"
lemma invar_uniqify: "queue_invar q \<Longrightarrow> queue_invar (uniqify q)"
apply(cases q, simp)
apply(auto simp add: ins_queue_invar)
done
lemma invar_meldUniq: "\<lbrakk>queue_invar q; queue_invar q'\<rbrakk> \<Longrightarrow> queue_invar (meldUniq q q')"
proof2 (induct q q' rule: meldUniq.induct)
case 1
then show ?case by simp
next
case 2
then show ?case by simp
next
case (3 t1 bq1 t2 bq2)
consider (lt) "rank t1 < rank t2" | (gt) "rank t1 > rank t2" | (eq) "rank t1 = rank t2"
by atomize_elim auto
then show ?case
proof cases
case t1t2: lt
from 3(4) have inv_bq1: "queue_invar bq1" by simp
from 3(4) have inv_t1: "tree_invar t1" by simp
from 3(1)[OF t1t2 inv_bq1 3(5)] inv_t1 t1t2
show ?thesis by simp
next
case t1t2: gt
from 3(5) have inv_bq2: "queue_invar bq2" by simp
from 3(5) have inv_t2: "tree_invar t2" by simp
from t1t2 have "\<not> rank t1 < rank t2" by simp
from 3(2) [OF this t1t2 3(4) inv_bq2] inv_t2 t1t2
show ?thesis by simp
next
case t1t2: eq
from 3(4) have inv_bq1: "queue_invar bq1" by simp
from 3(4) have inv_t1: "tree_invar t1" by simp
from 3(5) have inv_bq2: "queue_invar bq2" by simp
from 3(5) have inv_t2: "tree_invar t2" by simp
note inv_link = link_tree_invar[OF inv_t1 inv_t2 t1t2]
from t1t2 have "\<not> rank t1 < rank t2" "\<not> rank t2 < rank t1" by auto
note inv_meld = 3(3)[OF this inv_bq1 inv_bq2]
from ins_queue_invar[OF inv_link inv_meld] t1t2
show ?thesis by simp
qed
qed
lemma meld_queue_invar:
assumes "queue_invar q"
and "queue_invar q'"
shows "queue_invar (meld q q')"
proof -
note inv_uniq_q = invar_uniqify[OF assms(1)]
note inv_uniq_q' = invar_uniqify[OF assms(2)]
note inv_meldUniq = invar_meldUniq[OF inv_uniq_q inv_uniq_q']
thus ?thesis by (simp add: meld_def)
qed
lemma uniqify_mset: "queue_invar q \<Longrightarrow> queue_to_multiset q = queue_to_multiset (uniqify q)"
apply (cases q)
apply simp
apply (simp add: ins_mset)
done
lemma meldUniq_mset: "\<lbrakk>queue_invar q; queue_invar q'\<rbrakk> \<Longrightarrow>
queue_to_multiset (meldUniq q q') =
queue_to_multiset q + queue_to_multiset q'"
apply2(induct q q' rule: meldUniq.induct)
by(auto simp: ins_mset link_tree_invar invar_meldUniq union_ac)
lemma meld_mset:
"\<lbrakk> queue_invar q; queue_invar q' \<rbrakk> \<Longrightarrow>
queue_to_multiset (meld q q') = queue_to_multiset q + queue_to_multiset q'"
by (simp add: meld_def meldUniq_mset invar_uniqify uniqify_mset[symmetric])
text \<open>Ins operation satisfies rank invariant, see binomial queues\<close>
lemma rank_ins: "rank_invar bq \<Longrightarrow> rank_invar (ins t bq)"
proof2 (induct bq arbitrary: t)
case Nil
then show ?case by simp
next
case (Cons a bq)
then show ?case
apply auto
proof goal_cases
case prems: 1
hence inv: "rank_invar (ins t bq)" by (cases bq) simp_all
from prems have hd: "bq \<noteq> [] \<Longrightarrow> rank a < rank (hd bq)" by (cases bq) auto
from prems have "rank t \<le> rank (hd (ins t bq))
\<or> (rank (hd (ins t bq)) = rank (hd bq) \<and> bq \<noteq> [])"
by (metis rank_ins2 rank_invar_cons_down)
with prems have "rank a < rank (hd (ins t bq))
\<or> (rank (hd (ins t bq)) = rank (hd bq) \<and> bq \<noteq> [])" by auto
with prems and inv and hd show ?case
by (auto simp add: rank_invar_hd_cons)
next
case prems: 2
hence inv: "rank_invar bq" by (cases bq) simp_all
with prems and prems(1)[of "(link t a)"] show ?case by simp
qed
qed
lemma rank_uniqify:
assumes "rank_skew_invar q"
shows "rank_invar (uniqify q)"
proof (cases q)
case Nil
then show ?thesis by simp
next
case (Cons a list)
with rank_skew_rank_invar[of "a" "list"] rank_ins[of "list" "a"] assms
show ?thesis by simp
qed
lemma rank_ins_min: "rank_invar bq \<Longrightarrow> rank (hd (ins t bq)) \<ge> min (rank t) (rank (hd bq))"
proof2 (induct bq arbitrary: t)
case Nil
then show ?case by simp
next
case (Cons a bq)
then show ?case
apply auto
proof goal_cases
case prems: 1
hence inv: "rank_invar bq" by (cases bq) simp_all
from prems have r: "rank (link t a) = rank a + 1" by (simp add: rank_link)
with prems and inv and prems(1)[of "(link t a)"] show ?case
by (cases bq) auto
qed
qed
lemma rank_invar_not_empty_hd: "\<lbrakk>rank_invar (t # bq); bq \<noteq> []\<rbrakk> \<Longrightarrow> rank t < rank (hd bq)"
apply2 (induct bq arbitrary: t) by auto
lemma rank_invar_meldUniq_strong:
"\<lbrakk>rank_invar bq1; rank_invar bq2\<rbrakk> \<Longrightarrow>
rank_invar (meldUniq bq1 bq2)
\<and> rank (hd (meldUniq bq1 bq2)) \<ge> min (rank (hd bq1)) (rank (hd bq2))"
proof2 (induct bq1 bq2 rule: meldUniq.induct)
case 1
then show ?case by simp
next
case 2
then show ?case by simp
next
case (3 t1 bq1 t2 bq2)
from 3 have inv1: "rank_invar bq1" by (cases bq1) simp_all
from 3 have inv2: "rank_invar bq2" by (cases bq2) simp_all
from inv1 and inv2 and 3 show ?case
apply auto
proof goal_cases
let ?t = "t2"
let ?bq = "bq2"
let ?meldUniq = "rank t2 < rank (hd (meldUniq (t1 # bq1) bq2))"
case prems: 1
hence "?bq \<noteq> [] \<Longrightarrow> rank ?t < rank (hd ?bq)"
by (simp add: rank_invar_not_empty_hd)
with prems have ne: "?bq \<noteq> [] \<Longrightarrow> ?meldUniq" by simp
from prems have "?bq = [] \<Longrightarrow> ?meldUniq" by simp
with ne have "?meldUniq" by (cases "?bq = []")
with prems show ?case by (simp add: rank_invar_hd_cons)
next \<comment> \<open>analog\<close>
let ?t = "t1"
let ?bq = "bq1"
let ?meldUniq = "rank t1 < rank (hd (meldUniq bq1 (t2 # bq2)))"
case prems: 2
hence "?bq \<noteq> [] \<Longrightarrow> rank ?t < rank (hd ?bq)"
by (simp add: rank_invar_not_empty_hd)
with prems have ne: "?bq \<noteq> [] \<Longrightarrow> ?meldUniq" by simp
from prems have "?bq = [] \<Longrightarrow> ?meldUniq" by simp
with ne have "?meldUniq" by (cases "?bq = []")
with prems show ?case by (simp add: rank_invar_hd_cons)
next
case 3
thus ?case by (simp add: rank_ins)
next
case prems: 4 (* Ab hier wirds hässlich *)
then have r: "rank (link t1 t2) = rank t2 + 1" by (simp add: rank_link)
have m: "meldUniq bq1 [] = bq1" by (cases bq1) auto
from inv1 and inv2 and prems have
mm: "min (rank (hd bq1)) (rank (hd bq2)) \<le> rank (hd (meldUniq bq1 bq2))"
by simp
from \<open>rank_invar (t1 # bq1)\<close> have "bq1 \<noteq> [] \<Longrightarrow> rank t1 < rank (hd bq1)"
by (simp add: rank_invar_not_empty_hd)
with prems have r1: "bq1 \<noteq> [] \<Longrightarrow> rank t2 < rank (hd bq1)" by simp
from \<open>rank_invar (t2 # bq2)\<close> have r2: "bq2 \<noteq> [] \<Longrightarrow> rank t2 < rank (hd bq2)"
by (simp add: rank_invar_not_empty_hd)
from inv1 r r1 rank_ins_min[of bq1 "(link t1 t2)"] have
abc1: "bq1 \<noteq> [] \<Longrightarrow> rank t2 \<le> rank (hd (ins (link t1 t2) bq1))" by simp
from inv2 r r2 rank_ins_min[of bq2 "(link t1 t2)"] have
abc2: "bq2 \<noteq> [] \<Longrightarrow> rank t2 \<le> rank (hd (ins (link t1 t2) bq2))" by simp
from r1 r2 mm have
"\<lbrakk>bq1 \<noteq> []; bq2 \<noteq> []\<rbrakk> \<Longrightarrow> rank t2 < rank (hd (meldUniq bq1 bq2))"
by (simp)
with \<open>rank_invar (meldUniq bq1 bq2)\<close> r
rank_ins_min[of "meldUniq bq1 bq2" "link t1 t2"]
have "\<lbrakk>bq1 \<noteq> []; bq2 \<noteq> []\<rbrakk> \<Longrightarrow>
rank t2 < rank (hd (ins (link t1 t2) (meldUniq bq1 bq2)))"
by simp
with inv1 and inv2 and r m r1 show ?case
apply(cases "bq2 = []")
apply(cases "bq1 = []")
apply(simp)
apply(auto simp add: abc1)
apply(cases "bq1 = []")
apply(simp)
apply(auto simp add: abc2)
done
qed
qed
lemma rank_meldUniq:
"\<lbrakk>rank_invar bq1; rank_invar bq2\<rbrakk> \<Longrightarrow> rank_invar (meldUniq bq1 bq2)"
by (simp only: rank_invar_meldUniq_strong)
lemma rank_meld:
"\<lbrakk>rank_skew_invar q1; rank_skew_invar q2\<rbrakk> \<Longrightarrow> rank_skew_invar (meld q1 q2)"
by (simp only: meld_def rank_meldUniq rank_uniqify rank_invar_rank_skew)
theorem meld_invar:
"\<lbrakk>invar bq1; invar bq2\<rbrakk>
\<Longrightarrow> invar (meld bq1 bq2)"
by (metis meld_queue_invar rank_meld invar_def)
theorem meld_correct:
assumes I: "invar q" "invar q'"
shows
"invar (meld q q')"
"queue_to_multiset (meld q q') = queue_to_multiset q + queue_to_multiset q'"
using meld_invar[of q q'] meld_mset[of q q'] I
unfolding invar_def by simp_all
subsubsection "Find Minimal Element"
text \<open>Find the tree containing the minimal element.\<close>
fun getMinTree :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialTree" where
"getMinTree [t] = t" |
"getMinTree (t#bq) =
(if prio t \<le> prio (getMinTree bq)
then t
else (getMinTree bq))"
text \<open>Find the minimal Element in the queue.\<close>
definition findMin :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> ('e \<times> 'a)" where
"findMin bq = (let min = getMinTree bq in (val min, prio min))"
lemma mintree_exists: "(bq \<noteq> []) = (getMinTree bq \<in> set bq)"
proof2 (induct bq)
case Nil
then show ?case by simp
next
case (Cons _ bq)
then show ?case by (cases bq) simp_all
qed
lemma treehead_in_multiset:
"t \<in> set bq \<Longrightarrow> (val t, prio t) \<in># (queue_to_multiset bq)"
apply2 (induct bq) by (simp, cases t, auto)
lemma heap_ordered_single:
"heap_ordered t = (\<forall>x \<in> set_mset (tree_to_multiset t). prio t \<le> snd x)"
by (cases t) auto
lemma getMinTree_cons:
"prio (getMinTree (y # x # xs)) \<le> prio (getMinTree (x # xs))"
apply2 (induct xs rule: getMinTree.induct) by simp_all
lemma getMinTree_min_tree: "t \<in> set bq \<Longrightarrow> prio (getMinTree bq) \<le> prio t"
apply2 (induct bq arbitrary: t rule: getMinTree.induct) by (simp, fastforce, simp)
lemma getMinTree_min_prio:
assumes "queue_invar bq"
and "y \<in> set_mset (queue_to_multiset bq)"
shows "prio (getMinTree bq) \<le> snd y"
proof -
from assms have "bq \<noteq> []" by (cases bq) simp_all
with assms have "\<exists>t \<in> set bq. (y \<in> set_mset (tree_to_multiset t))"
proof2 (induct bq)
case Nil
then show ?case by simp
next
case (Cons a bq)
then show ?case
apply (cases "y \<in> set_mset (tree_to_multiset a)")
apply simp
apply (cases bq)
apply simp_all
done
qed
from this obtain t where O:
"t \<in> set bq"
"y \<in> set_mset ((tree_to_multiset t))" by blast
obtain e a r ts where [simp]: "t = (Node e a r ts)" by (cases t) blast
from O assms(1) have inv: "tree_invar t" by simp
from tree_invar_heap_ordered[OF inv] heap_ordered.simps[of e a r ts] O
have "prio t \<le> snd y" by auto
with getMinTree_min_tree[OF O(1)] show ?thesis by simp
qed
lemma findMin_mset:
assumes I: "queue_invar q"
assumes NE: "q\<noteq>Nil"
shows "findMin q \<in># queue_to_multiset q"
"\<forall>y\<in>set_mset (queue_to_multiset q). snd (findMin q) \<le> snd y"
proof -
from NE have "getMinTree q \<in> set q" by (simp only: mintree_exists)
thus "findMin q \<in># queue_to_multiset q"
by (simp add: treehead_in_multiset findMin_def Let_def)
show "\<forall>y\<in>set_mset (queue_to_multiset q). snd (findMin q) \<le> snd y"
by (simp add: getMinTree_min_prio findMin_def Let_def NE I)
qed
theorem findMin_correct:
assumes I: "invar q"
assumes NE: "q\<noteq>Nil"
shows "findMin q \<in># queue_to_multiset q"
"\<forall>y\<in>set_mset (queue_to_multiset q). snd (findMin q) \<le> snd y"
using I NE findMin_mset
unfolding invar_def by auto
subsubsection "Delete Minimal Element"
text \<open>Insert the roots of a given queue into an other queue.\<close>
fun insertList ::
"('e, 'a::linorder) SkewBinomialQueue \<Rightarrow> ('e, 'a) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"insertList [] tbq = tbq" |
"insertList (t#bq) tbq = insertList bq (insert (val t) (prio t) tbq)"
text \<open>Remove the first tree, which has the priority $a$ within his root.\<close>
fun remove1Prio :: "'a \<Rightarrow> ('e, 'a::linorder) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"remove1Prio a [] = []" |
"remove1Prio a (t#bq) =
(if (prio t) = a then bq else t # (remove1Prio a bq))"
lemma remove1Prio_remove1[simp]:
"remove1Prio (prio (getMinTree bq)) bq = remove1 (getMinTree bq) bq"
proof2 (induct bq)
case Nil thus ?case by simp
next
case (Cons t bq)
note iv = Cons
thus ?case
proof (cases "t = getMinTree (t # bq)")
case True
with iv show ?thesis by simp
next
case False
hence ne: "bq \<noteq> []" by auto
with False have down: "getMinTree (t # bq) = getMinTree bq"
apply2 (induct bq rule: getMinTree.induct) by auto
from ne False have "prio t \<noteq> prio (getMinTree bq)"
apply2 (induct bq rule: getMinTree.induct) by auto
with down iv False ne show ?thesis by simp
qed
qed
text \<open>Return the queue without the minimal element found by findMin\<close>
definition deleteMin :: "('e, 'a::linorder) SkewBinomialQueue \<Rightarrow>
('e, 'a) SkewBinomialQueue" where
"deleteMin bq = (let min = getMinTree bq in insertList
(filter (\<lambda> t. rank t = 0) (children min))
(meld (rev (filter (\<lambda> t. rank t > 0) (children min)))
(remove1Prio (prio min) bq)))"
lemma invar_remove1: "queue_invar q \<Longrightarrow> queue_invar (remove1 t q)"
by (unfold queue_invar_def) (auto)
lemma mset_rev: "queue_to_multiset (rev q) = queue_to_multiset q"
apply2 (induct q) by(auto simp add: union_ac)
lemma in_set_subset: "t \<in> set q \<Longrightarrow> tree_to_multiset t \<subseteq># queue_to_multiset q"
proof2 (induct q)
case Nil
then show ?case by simp
next
case (Cons a q)
show ?case
proof (cases "t = a")
case True
then show ?thesis by simp
next
case False
with Cons have t_in_q: "t \<in> set q" by simp
have "queue_to_multiset q \<subseteq># queue_to_multiset (a # q)"
by simp
from subset_mset.order_trans[OF Cons(1)[OF t_in_q] this] show ?thesis .
qed
qed
lemma mset_remove1: "t \<in> set q \<Longrightarrow>
queue_to_multiset (remove1 t q) =
queue_to_multiset q - tree_to_multiset t"
apply2 (induct q) by (auto simp: in_set_subset)
lemma invar_children':
assumes "tree_invar t"
shows "queue_invar (children t)"
proof (cases t)
case (Node e a nat list)
with assms have inv: "tree_invar (Node e a nat list)" by simp
from Node invar_children[OF inv] show ?thesis by simp
qed
lemma invar_filter: "queue_invar q \<Longrightarrow> queue_invar (filter f q)"
by (unfold queue_invar_def) simp
lemma deleteMin_queue_invar:
"\<lbrakk>queue_invar q; queue_to_multiset q \<noteq> {#}\<rbrakk> \<Longrightarrow>
queue_invar (deleteMin q)"
unfolding deleteMin_def Let_def
proof goal_cases
case prems: 1
from prems(2) have q_ne: "q \<noteq> []" by auto
with prems(1) mintree_exists[of q]
have inv_min: "tree_invar (getMinTree q)" by simp
note inv_rem = invar_remove1[OF prems(1), of "getMinTree q"]
note inv_children = invar_children'[OF inv_min]
note inv_filter = invar_filter[OF inv_children, of "\<lambda>t. 0 < rank t"]
note inv_rev = iffD2[OF invar_rev inv_filter]
note inv_meld = meld_queue_invar[OF inv_rev inv_rem]
note inv_ins =
insertList_queue_invar[OF inv_meld,
of "[t\<leftarrow>children (getMinTree q). rank t = 0]"]
then show ?case by simp
qed
lemma mset_children: "queue_to_multiset (children t) =
tree_to_multiset t - {# (val t, prio t) #}"
by(cases t, auto)
lemma mset_insertList:
"\<lbrakk>\<forall>t \<in> set ts. rank t = 0 \<and> children t = [] ; queue_invar q\<rbrakk> \<Longrightarrow>
queue_to_multiset (insertList ts q) =
queue_to_multiset ts + queue_to_multiset q"
proof2 (induct ts arbitrary: q)
case Nil
then show ?case by simp
next
case (Cons a ts)
from Cons(2) have ball_ts: "\<forall>t\<in>set ts. rank t = 0 \<and> children t = []" by simp
note inv_insert = insert_queue_invar[OF Cons(3), of "val a" "prio a"]
note iv = Cons(1)[OF ball_ts inv_insert]
from Cons(2) have mset_a: "tree_to_multiset a = {# (val a, prio a)#}"
by (cases a) simp
note insert_mset[OF Cons(3), of "val a" "prio a"]
with mset_a iv show ?case by (simp add: union_ac)
qed
lemma mset_filter: "(queue_to_multiset [t\<leftarrow>q . rank t = 0]) +
queue_to_multiset [t\<leftarrow>q . 0 < rank t] =
queue_to_multiset q"
apply2 (induct q) by (auto simp add: union_ac)
lemma deleteMin_mset:
assumes "queue_invar q"
and "queue_to_multiset q \<noteq> {#}"
shows "queue_to_multiset (deleteMin q) = queue_to_multiset q - {# (findMin q) #}"
proof -
from assms(2) have q_ne: "q \<noteq> []" by auto
with mintree_exists[of q]
have min_in_q: "getMinTree q \<in> set q" by simp
with assms(1) have inv_min: "tree_invar (getMinTree q)" by simp
note inv_rem = invar_remove1[OF assms(1), of "getMinTree q"]
note inv_children = invar_children'[OF inv_min]
note inv_filter = invar_filter[OF inv_children, of "\<lambda>t. 0 < rank t"]
note inv_rev = iffD2[OF invar_rev inv_filter]
note inv_meld = meld_queue_invar[OF inv_rev inv_rem]
note mset_rem = mset_remove1[OF min_in_q]
note mset_rev = mset_rev[of "[t\<leftarrow>children (getMinTree q). 0 < rank t]"]
note mset_meld = meld_mset[OF inv_rev inv_rem]
note mset_children = mset_children[of "getMinTree q"]
thm mset_insertList[of "[t\<leftarrow>children (getMinTree q) .
rank t = 0]"]
have "\<lbrakk>tree_invar t; rank t = 0\<rbrakk> \<Longrightarrow> children t = []" for t
by (cases t) simp
with inv_children
have ball_min: "\<forall>t\<in>set [t\<leftarrow>children (getMinTree q). rank t = 0].
rank t = 0 \<and> children t = []" by (unfold queue_invar_def) auto
note mset_insertList = mset_insertList[OF ball_min inv_meld]
note mset_filter = mset_filter[of "children (getMinTree q)"]
let ?Q = "queue_to_multiset q"
let ?MT = "tree_to_multiset (getMinTree q)"
from q_ne have head_subset_min:
"{# (val (getMinTree q), prio (getMinTree q)) #} \<subseteq># ?MT"
by(cases "getMinTree q") simp
note min_subset_q = in_set_subset[OF min_in_q]
from mset_insertList mset_meld mset_rev mset_rem mset_filter mset_children
multiset_diff_union_assoc[OF head_subset_min, of "?Q - ?MT"]
mset_subset_eq_multiset_union_diff_commute[OF min_subset_q, of "?MT"]
show ?thesis
by (auto simp add: deleteMin_def Let_def union_ac findMin_def)
qed
lemma rank_insertList: "rank_skew_invar q \<Longrightarrow> rank_skew_invar (insertList ts q)"
apply2 (induct ts arbitrary: q) by (simp_all add: insert_rank_invar)
lemma insertList_invar: "invar q \<Longrightarrow> invar (insertList ts q)"
proof2 (induct ts arbitrary: q)
case Nil
then show ?case by simp
next
case (Cons a q)
show ?case
apply (unfold insertList.simps)
proof goal_cases
case 1
from Cons(2) insert_rank_invar[of "q" "val a" "prio a"]
have a1: "rank_skew_invar (insert (val a) (prio a) q)"
by (simp add: invar_def)
from Cons(2) insert_queue_invar[of "q" "val a" "prio a"]
have a2: "queue_invar (insert (val a) (prio a) q)" by (simp add: invar_def)
from a1 a2 have "invar (insert (val a) (prio a) q)" by (simp add: invar_def)
with Cons(1)[of "(insert (val a) (prio a) q)"] show ?case .
qed
qed
lemma children_rank_less:
assumes "tree_invar t"
shows "\<forall>t' \<in> set (children t). rank t' < rank t"
proof (cases t)
case (Node e a nat list)
with assms show ?thesis
proof2 (induct nat arbitrary: t e a list)
case 0
then show ?case by simp
next
case (Suc nat)
then obtain e1 a1 ts1 e2 a2 ts2 e' a' where
O: "tree_invar (Node e1 a1 nat ts1)" "tree_invar (Node e2 a2 nat ts2)"
"t = link (Node e1 a1 nat ts1) (Node e2 a2 nat ts2)
\<or> t = skewlink e' a' (Node e1 a1 nat ts1) (Node e2 a2 nat ts2)"
by (simp only: tree_invar.simps) blast
hence ch_id:
"children t = (if a1 \<le> a2 then (Node e2 a2 nat ts2)#ts1
else (Node e1 a1 nat ts1)#ts2) \<or>
children t =
(if a' \<le> a1 \<and> a' \<le> a2 then [(Node e1 a1 nat ts1), (Node e2 a2 nat ts2)]
else (if a1 \<le> a2 then (Node e' a' 0 []) # (Node e2 a2 nat ts2) # ts1
else (Node e' a' 0 []) # (Node e1 a1 nat ts1) # ts2))"
by auto
from O Suc(1)[of "Node e1 a1 nat ts1" "e1" "a1" "ts1"]
have p1: "\<forall>t'\<in>set ((Node e2 a2 nat ts2) # ts1). rank t' < Suc nat" by auto
from O Suc(1)[of "Node e2 a2 nat ts2" "e2" "a2" "ts2"]
have p2: "\<forall>t'\<in>set ((Node e1 a1 nat ts1) # ts2). rank t' < Suc nat" by auto
from O have
p3: "\<forall>t' \<in> set [(Node e1 a1 nat ts1), (Node e2 a2 nat ts2)].
rank t' < Suc nat" by simp
from O Suc(1)[of "Node e1 a1 nat ts1" "e1" "a1" "ts1"]
have
p4: "\<forall>t' \<in> set ((Node e' a' 0 []) # (Node e2 a2 nat ts2) # ts1).
rank t' < Suc nat" by auto
from O Suc(1)[of "Node e2 a2 nat ts2" "e2" "a2" "ts2"]
have p5:
"\<forall>t' \<in> set ((Node e' a' 0 []) # (Node e1 a1 nat ts1) # ts2).
rank t' < Suc nat" by auto
from Suc(3) p1 p2 p3 p4 p5 ch_id show ?case
by(cases "children t = (if a1 \<le> a2 then Node e2 a2 nat ts2 # ts1
else Node e1 a1 nat ts1 # ts2)") simp_all
qed
qed
lemma strong_rev_children:
assumes "tree_invar t"
shows "invar (rev [t \<leftarrow> children t. 0 < rank t])"
proof (cases t)
case (Node e a nat list)
with assms show ?thesis
proof2 (induct "nat" arbitrary: t e a list)
case 0
then show ?case by (simp add: invar_def)
next
case (Suc nat)
show ?case
proof (cases "nat")
case 0
with Suc obtain e1 a1 e2 a2 e' a' where
O: "tree_invar (Node e1 a1 0 [])" "tree_invar (Node e2 a2 0 [])"
"t = link (Node e1 a1 0 []) (Node e2 a2 0 [])
\<or> t = skewlink e' a' (Node e1 a1 0 []) (Node e2 a2 0 [])"
by (simp only: tree_invar.simps) blast
hence "[t \<leftarrow> children t. 0 < rank t] = []" by auto
then show ?thesis by (simp add: invar_def)
next
case Suc': (Suc n)
from Suc obtain e1 a1 ts1 e2 a2 ts2 e' a' where
O: "tree_invar (Node e1 a1 nat ts1)" "tree_invar (Node e2 a2 nat ts2)"
"t = link (Node e1 a1 nat ts1) (Node e2 a2 nat ts2)
\<or> t = skewlink e' a' (Node e1 a1 nat ts1) (Node e2 a2 nat ts2)"
by (simp only: tree_invar.simps) blast
hence ch_id:
"children t = (if a1 \<le> a2 then
(Node e2 a2 nat ts2)#ts1
else (Node e1 a1 nat ts1)#ts2)
\<or>
children t = (if a' \<le> a1 \<and> a' \<le> a2 then
[(Node e1 a1 nat ts1), (Node e2 a2 nat ts2)]
else (if a1 \<le> a2 then
(Node e' a' 0 []) # (Node e2 a2 nat ts2) # ts1
else (Node e' a' 0 []) # (Node e1 a1 nat ts1) # ts2))"
by auto
from O Suc(1)[of "Node e1 a1 nat ts1" "e1" "a1" "ts1"] have
rev_ts1: "invar (rev [t \<leftarrow> ts1. 0 < rank t])" by simp
from O children_rank_less[of "Node e1 a1 nat ts1"] have
"\<forall>t\<in>set (rev [t \<leftarrow> ts1. 0 < rank t]). rank t < rank (Node e2 a2 nat ts2)"
by simp
with O rev_ts1
invar_app_single[of "rev [t \<leftarrow> ts1. 0 < rank t]"
"Node e2 a2 nat ts2"]
have
"invar (rev ((Node e2 a2 nat ts2) # [t \<leftarrow> ts1. 0 < rank t]))"
by simp
with Suc' have p1: "invar (rev [t \<leftarrow> ((Node e2 a2 nat ts2) # ts1). 0 < rank t])"
by simp
from O Suc(1)[of "Node e2 a2 nat ts2" "e2" "a2" "ts2"]
have rev_ts2: "invar (rev [t \<leftarrow> ts2. 0 < rank t])" by simp
from O children_rank_less[of "Node e2 a2 nat ts2"]
have "\<forall>t\<in>set (rev [t \<leftarrow> ts2. 0 < rank t]).
rank t < rank (Node e1 a1 nat ts1)" by simp
with O rev_ts2 invar_app_single[of "rev [t \<leftarrow> ts2. 0 < rank t]"
"Node e1 a1 nat ts1"]
have "invar (rev [t \<leftarrow> ts2. 0 < rank t] @ [Node e1 a1 nat ts1])"
by simp
with Suc' have p2: "invar (rev [t \<leftarrow> ((Node e1 a1 nat ts1) # ts2). 0 < rank t])"
by simp
from O(1-2)
have p3: "invar (rev (filter (\<lambda> t. 0 < rank t)
[(Node e1 a1 nat ts1), (Node e2 a2 nat ts2)]))"
by (simp add: invar_def)
from p1 have p4: "invar (rev
[t \<leftarrow> ((Node e' a' 0 []) # (Node e2 a2 nat ts2) # ts1). 0 < rank t])"
by simp
from p2 have p5: "invar (rev
[t \<leftarrow> ((Node e' a' 0 []) # (Node e1 a1 nat ts1) # ts2). 0 < rank t])"
by simp
from p1 p2 p3 p4 p5 ch_id show
"invar (rev [t\<leftarrow>children t . 0 < rank t])"
by (cases "children t = (if a1 \<le> a2 then (Node e2 a2 nat ts2)#ts1
else (Node e1 a1 nat ts1)#ts2)") metis+
qed
qed
qed
lemma first_less: "rank_invar (t # bq) \<Longrightarrow> \<forall>t' \<in> set bq. rank t < rank t'"
apply2(induct bq arbitrary: t)
apply (simp)
apply (metis List.set_simps(2) insert_iff not_le_imp_less
not_less_iff_gr_or_eq order_less_le_trans rank_invar.simps(3)
rank_invar_cons_down)
done
lemma first_less_eq:
"rank_skew_invar (t # bq) \<Longrightarrow> \<forall>t' \<in> set bq. rank t \<le> rank t'"
apply2(induct bq arbitrary: t)
apply (simp)
apply (metis List.set_simps(2) insert_iff le_trans
rank_invar_rank_skew rank_skew_invar.simps(3) rank_skew_rank_invar)
done
lemma remove1_tail_invar: "tail_invar bq \<Longrightarrow> tail_invar (remove1 t bq)"
proof2 (induct bq arbitrary: t)
case Nil
then show ?case by simp
next
case (Cons a bq)
show ?case
proof (cases "t = a")
case True
from Cons(2) have "tail_invar bq" by (rule tail_invar_cons_down)
with True show ?thesis by simp
next
case False
from Cons(2) have "tail_invar bq" by (rule tail_invar_cons_down)
with Cons(1)[of "t"] have si1: "tail_invar (remove1 t bq)" .
from False have "tail_invar (remove1 t (a # bq)) = tail_invar (a # (remove1 t bq))"
by simp
show ?thesis
proof (cases "remove1 t bq")
case Nil
with si1 Cons(2) False show ?thesis by (simp add: tail_invar_def)
next
case Cons': (Cons aa list)
from Cons(2) have "tree_invar a" by (simp add: tail_invar_def)
from Cons(2) first_less[of "a" "bq"]
have "\<forall>t \<in> set (remove1 t bq). rank a < rank t"
by (metis notin_set_remove1 tail_invar_def)
with Cons' have "rank a < rank aa" by simp
with si1 Cons(2) False Cons' tail_invar_cons_up[of "aa" "list" "a"] show ?thesis
by (simp add: tail_invar_def)
qed
qed
qed
lemma invar_cons_down: "invar (t # bq) \<Longrightarrow> invar bq"
by (metis rank_invar_rank_skew tail_invar_def
invar_def invar_tail_invar)
lemma remove1_invar: "invar bq \<Longrightarrow> invar (remove1 t bq)"
proof2 (induct bq arbitrary: t)
case Nil
then show ?case by simp
next
case (Cons a bq)
show ?case
proof (cases "t = a")
case True
from Cons(2) have "invar bq" by (rule invar_cons_down)
with True show ?thesis by simp
next
case False
from Cons(2) have "invar bq" by (rule invar_cons_down)
with Cons(1)[of "t"] have si1: "invar (remove1 t bq)" .
from False have "invar (remove1 t (a # bq)) = invar (a # (remove1 t bq))"
by simp
show ?thesis
proof (cases "remove1 t bq")
case Nil
with si1 Cons(2) False show ?thesis by (simp add: invar_def)
next
case Cons': (Cons aa list)
from Cons(2) have ti: "tree_invar a" by (simp add: invar_def)
from Cons(2) have sbq: "tail_invar bq" by (metis invar_tail_invar)
hence srm: "tail_invar (remove1 t bq)" by (metis remove1_tail_invar)
from Cons(2) first_less_eq[of "a" "bq"]
have "\<forall>t \<in> set (remove1 t bq). rank a \<le> rank t"
by (metis notin_set_remove1 invar_def)
with Cons' have "rank a \<le> rank aa" by simp
with si1 Cons(2) False Cons' ti srm tail_invar_cons_up_invar[of "aa" "list" "a"]
show ?thesis by simp
qed
qed
qed
lemma deleteMin_invar:
assumes "invar bq"
and "bq \<noteq> []"
shows "invar (deleteMin bq)"
proof -
have eq: "invar (deleteMin bq) =
invar (insertList
(filter (\<lambda> t. rank t = 0) (children (getMinTree bq)))
(meld (rev (filter (\<lambda> t. rank t > 0) (children (getMinTree bq))))
(remove1 (getMinTree bq) bq)))"
by (simp add: deleteMin_def Let_def)
from assms mintree_exists[of "bq"] have ti: "tree_invar (getMinTree bq)"
by (simp add: invar_def queue_invar_def del: queue_invar_simps)
with strong_rev_children[of "getMinTree bq"] have
m1: "invar (rev [t \<leftarrow> children (getMinTree bq). 0 < rank t])" .
from remove1_invar[of "bq" "getMinTree bq"] assms(1)
have m2: "invar (remove1 (getMinTree bq) bq)" .
from meld_invar[of "rev [t \<leftarrow> children (getMinTree bq). 0 < rank t]"
"remove1 (getMinTree bq) bq"] m1 m2
have "invar (meld (rev [t \<leftarrow> children (getMinTree bq). 0 < rank t])
(remove1 (getMinTree bq) bq))" .
with insertList_invar[of
"(meld (rev [t\<leftarrow>children (getMinTree bq) . 0 < rank t])
(remove1 (getMinTree bq) bq))"
"[t\<leftarrow>children (getMinTree bq) . rank t = 0]"]
have "invar
(insertList
[t\<leftarrow>children (getMinTree bq) . rank t = 0]
(meld (rev [t\<leftarrow>children (getMinTree bq) . 0 < rank t])
(remove1 (getMinTree bq) bq)))" .
with eq show ?thesis ..
qed
theorem deleteMin_correct:
assumes I: "invar q"
and NE: "q \<noteq> Nil"
shows "invar (deleteMin q)"
and "queue_to_multiset (deleteMin q) = queue_to_multiset q - {#findMin q#}"
apply (rule deleteMin_invar[OF I NE])
using deleteMin_mset[of q] I NE
unfolding invar_def
apply (auto simp add: empty_correct)
done
(*
fun foldt and foldq where
"foldt f z (Node e a _ q) = f (foldq f z q) e a" |
"foldq f z [] = z" |
"foldq f z (t#q) = foldq f (foldt f z t) q"
lemma fold_plus:
"foldt ((\<lambda>m e a. m+{#(e,a)#})) zz t + z = foldt ((\<lambda>m e a. m+{#(e,a)#})) (zz+z) t"
"foldq ((\<lambda>m e a. m+{#(e,a)#})) zz q + z = foldq ((\<lambda>m e a. m+{#(e,a)#})) (zz+z) q"
apply (induct t and q arbitrary: zz and zz
rule: tree_to_multiset_queue_to_multiset.induct)
apply (auto simp add: union_ac)
apply (subst union_ac, simp)
done
lemma to_mset_fold:
fixes t::"('e,'a::linorder) SkewBinomialTree" and
q::"('e,'a) SkewBinomialQueue"
shows
"tree_to_multiset t = foldt (\<lambda>m e a. m+{#(e,a)#}) {#} t"
"queue_to_multiset q = foldq (\<lambda>m e a. m+{#(e,a)#}) {#} q"
apply (induct t and q rule: tree_to_multiset_queue_to_multiset.induct)
apply (auto simp add: union_ac fold_plus)
done
*)
lemmas [simp del] = insert.simps
end
interpretation SkewBinomialHeapStruc: SkewBinomialHeapStruc_loc .
subsection "Bootstrapping"
text \<open>
In this section, we implement datastructural bootstrapping, to
reduce the complexity of meld-operations to $O(1)$.
The bootstrapping also contains a {\em global root}, caching the
minimal element of the queue, and thus also reducing the complexity of
findMin-operations to $O(1)$.
Bootstrapping adds one more level of recursion:
An {\em element} is an entry and a priority queues of elements.
In the original paper on skew binomial queues \cite{BrOk96}, higher order
functors and recursive structures are used to elegantly implement bootstrapped
heaps on top of ordinary heaps. However, such concepts are not supported in
Isabelle/HOL, nor in Standard ML. Hence we have to use the
,,much less clean'' \cite{BrOk96} alternative:
We manually specialize the heap datastructure, and re-implement the functions
on the specialized data structure.
The correctness proofs are done by defining a mapping from teh specialized to
the original data structure, and reusing the correctness statements of the
original data structure.
\<close>
subsubsection "Auxiliary"
text \<open>
We first have to state some auxiliary lemmas and functions, mainly
about multisets.
\<close>
(* TODO: Some of these should be moved into the multiset library, they are
marked by *MOVE* *)
text \<open>Finding the preimage of an element\<close>
(*MOVE*)
lemma in_image_msetE:
assumes "x\<in>#image_mset f M"
obtains y where "y\<in>#M" "x=f y"
using assms
apply2 (induct M)
apply simp
apply (force split: if_split_asm)
done
text \<open>Very special lemma for images multisets of pairs, where the second
component is a function of the first component\<close>
lemma mset_image_fst_dep_pair_diff_split:
"(\<forall>e a. (e,a)\<in>#M \<longrightarrow> a=f e) \<Longrightarrow>
image_mset fst (M - {#(e, f e)#}) = image_mset fst M - {#e#}"
proof2 (induct M)
case empty thus ?case by auto
next
case (add x M)
then obtain e' where [simp]: "x=(e',f e')"
apply (cases x)
apply (force)
done
from add.prems have "\<forall>e a. (e, a) \<in># M \<longrightarrow> a = f e" by simp
with add.hyps have
IH: "image_mset fst (M - {#(e, f e)#}) = image_mset fst M - {#e#}"
by auto
show ?case proof (cases "e=e'")
case True
thus ?thesis by (simp)
next
case False
thus ?thesis
by (simp add: IH)
qed
qed
locale Bootstrapped
begin
subsubsection "Datatype"
text \<open>We manually specialize the binomial tree to contain elements, that, in,
turn, may contain trees.
Note that we specify nodes without explicit priority,
as the priority is contained in the elements stored in the nodes.
\<close>
datatype ('e, 'a) BsSkewBinomialTree =
BsNode (val: "('e, 'a::linorder) BsSkewElem")
(rank: nat) (children: "('e , 'a) BsSkewBinomialTree list")
and
('e,'a) BsSkewElem =
Element 'e (eprio: 'a) "('e,'a) BsSkewBinomialTree list"
type_synonym ('e,'a) BsSkewHeap = "unit + ('e,'a) BsSkewElem"
type_synonym ('e,'a) BsSkewBinomialQueue = "('e,'a) BsSkewBinomialTree list"
subsubsection "Specialization Boilerplate"
text \<open>
In this section, we re-define the functions
on the specialized priority queues, and show there correctness.
This is done by defining a mapping to original priority queues,
and re-using the correctness lemmas proven there.
\<close>
text \<open>Mapping to original binomial trees and queues\<close>
fun bsmapt where
"bsmapt (BsNode e r q) = SkewBinomialHeapStruc.Node e (eprio e) r (map bsmapt q)"
abbreviation bsmap where
"bsmap q == map bsmapt q"
text \<open>Invariant and mapping to multiset are defined via the mapping\<close>
abbreviation "invar q == SkewBinomialHeapStruc.invar (bsmap q)"
abbreviation "queue_to_multiset q
== image_mset fst (SkewBinomialHeapStruc.queue_to_multiset (bsmap q))"
abbreviation "tree_to_multiset t
== image_mset fst (SkewBinomialHeapStruc.tree_to_multiset (bsmapt t))"
abbreviation "queue_to_multiset_aux q
== (SkewBinomialHeapStruc.queue_to_multiset (bsmap q))"
text \<open>Now starts the re-implementation of the functions\<close>
primrec prio :: "('e, 'a::linorder) BsSkewBinomialTree \<Rightarrow> 'a" where
"prio (BsNode e r ts) = eprio e"
lemma proj_xlate:
"val t = SkewBinomialHeapStruc.val (bsmapt t)"
"prio t = SkewBinomialHeapStruc.prio (bsmapt t)"
"rank t = SkewBinomialHeapStruc.rank (bsmapt t)"
"bsmap (children t) = SkewBinomialHeapStruc.children (bsmapt t)"
"eprio (SkewBinomialHeapStruc.val (bsmapt t))
= SkewBinomialHeapStruc.prio (bsmapt t)"
apply (case_tac [!] t)
apply auto
done
fun link :: "('e, 'a::linorder) BsSkewBinomialTree
\<Rightarrow> ('e, 'a) BsSkewBinomialTree \<Rightarrow>
('e, 'a) BsSkewBinomialTree" where
"link (BsNode e1 r1 ts1) (BsNode e2 r2 ts2) =
(if eprio e1\<le>eprio e2
then (BsNode e1 (Suc r1) ((BsNode e2 r2 ts2)#ts1))
else (BsNode e2 (Suc r2) ((BsNode e1 r1 ts1)#ts2)))"
text \<open>Link two trees of rank $r$ and a new element to a new tree of
rank $r+1$\<close>
fun skewlink :: "('e,'a::linorder) BsSkewElem \<Rightarrow> ('e, 'a) BsSkewBinomialTree \<Rightarrow>
('e, 'a) BsSkewBinomialTree \<Rightarrow> ('e, 'a) BsSkewBinomialTree" where
"skewlink e t t' = (if eprio e \<le> (prio t) \<and> eprio e \<le> (prio t')
then (BsNode e (Suc (rank t)) [t,t'])
else (if (prio t) \<le> (prio t')
then
BsNode (val t) (Suc (rank t)) (BsNode e 0 [] # t' # children t)
else
BsNode (val t') (Suc (rank t')) (BsNode e 0 [] # t # children t')))"
lemma link_xlate:
"bsmapt (link t t') = SkewBinomialHeapStruc.link (bsmapt t) (bsmapt t')"
"bsmapt (skewlink e t t') =
SkewBinomialHeapStruc.skewlink e (eprio e) (bsmapt t) (bsmapt t')"
by (case_tac [!] t, case_tac [!] t') auto
fun ins :: "('e, 'a::linorder) BsSkewBinomialTree \<Rightarrow>
('e, 'a) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"ins t [] = [t]" |
"ins t' (t # bq) =
(if (rank t') < (rank t)
then t' # t # bq
else (if (rank t) < (rank t')
then t # (ins t' bq)
else ins (link t' t) bq))"
lemma ins_xlate:
"bsmap (ins t q) = SkewBinomialHeapStruc.ins (bsmapt t) (bsmap q)"
apply2(induct q arbitrary: t) by(auto simp add: proj_xlate link_xlate)
text \<open>Insert an element with priority into a queue using skewlinks.\<close>
fun insert :: "('e,'a::linorder) BsSkewElem \<Rightarrow>
('e, 'a) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"insert e [] = [BsNode e 0 []]" |
"insert e [t] = [BsNode e 0 [],t]" |
"insert e (t # t' # bq) =
(if rank t \<noteq> rank t'
then (BsNode e 0 []) # t # t' # bq
else (skewlink e t t') # bq)"
lemma insert_xlate:
"bsmap (insert e q) = SkewBinomialHeapStruc.insert e (eprio e) (bsmap q)"
apply (cases "(e,q)" rule: insert.cases)
apply (auto simp add: proj_xlate link_xlate SkewBinomialHeapStruc.insert.simps)
done
lemma insert_correct:
assumes I: "invar q"
shows
"invar (insert e q)"
"queue_to_multiset (insert e q) = queue_to_multiset q + {#(e)#}"
by (simp_all add: I SkewBinomialHeapStruc.insert_correct insert_xlate)
fun uniqify
:: "('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow> ('e, 'a) BsSkewBinomialQueue"
where
"uniqify [] = []" |
"uniqify (t#bq) = ins t bq"
fun meldUniq
:: "('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow> ('e,'a) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"meldUniq [] bq = bq" |
"meldUniq bq [] = bq" |
"meldUniq (t1#bq1) (t2#bq2) = (if rank t1 < rank t2
then t1 # (meldUniq bq1 (t2#bq2))
else (if rank t2 < rank t1
then t2 # (meldUniq (t1#bq1) bq2)
else ins (link t1 t2) (meldUniq bq1 bq2)))"
definition meld
:: "('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow> ('e, 'a) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"meld bq1 bq2 = meldUniq (uniqify bq1) (uniqify bq2)"
lemma uniqify_xlate:
"bsmap (uniqify q) = SkewBinomialHeapStruc.uniqify (bsmap q)"
by (cases q) (simp_all add: ins_xlate)
lemma meldUniq_xlate:
"bsmap (meldUniq q q') = SkewBinomialHeapStruc.meldUniq (bsmap q) (bsmap q')"
apply2 (induct q q' rule: meldUniq.induct)
apply (auto simp add: link_xlate proj_xlate uniqify_xlate ins_xlate)
done
lemma meld_correct:
assumes I: "invar q" "invar q'"
shows
"invar (meld q q')"
"queue_to_multiset (meld q q') = queue_to_multiset q + queue_to_multiset q'"
by (simp_all add: I SkewBinomialHeapStruc.meld_correct meld_xlate)
fun insertList ::
"('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow> ('e, 'a) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"insertList [] tbq = tbq" |
"insertList (t#bq) tbq = insertList bq (insert (val t) tbq)"
fun remove1Prio :: "'a \<Rightarrow> ('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"remove1Prio a [] = []" |
"remove1Prio a (t#bq) =
(if (prio t) = a then bq else t # (remove1Prio a bq))"
fun getMinTree :: "('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialTree" where
"getMinTree [t] = t" |
"getMinTree (t#bq) =
(if prio t \<le> prio (getMinTree bq)
then t
else (getMinTree bq))"
definition findMin
:: "('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow> ('e,'a) BsSkewElem" where
"findMin bq = val (getMinTree bq)"
definition deleteMin :: "('e, 'a::linorder) BsSkewBinomialQueue \<Rightarrow>
('e, 'a) BsSkewBinomialQueue" where
"deleteMin bq = (let min = getMinTree bq in insertList
(filter (\<lambda> t. rank t = 0) (children min))
(meld (rev (filter (\<lambda> t. rank t > 0) (children min)))
(remove1Prio (prio min) bq)))"
lemma insertList_xlate:
"bsmap (insertList q q')
= SkewBinomialHeapStruc.insertList (bsmap q) (bsmap q')"
apply2 (induct q arbitrary: q')
apply (auto simp add: insert_xlate proj_xlate)
done
lemma remove1Prio_xlate:
"bsmap (remove1Prio a q) = SkewBinomialHeapStruc.remove1Prio a (bsmap q)"
apply2(induct q) by(auto simp add: proj_xlate)
lemma getMinTree_xlate:
"q\<noteq>[] \<Longrightarrow> bsmapt (getMinTree q) = SkewBinomialHeapStruc.getMinTree (bsmap q)"
apply2 (induct q)
apply simp
apply (case_tac q)
apply (auto simp add: proj_xlate)
done
lemma findMin_xlate:
"q\<noteq>[] \<Longrightarrow> findMin q = fst (SkewBinomialHeapStruc.findMin (bsmap q))"
apply (unfold findMin_def SkewBinomialHeapStruc.findMin_def)
apply (simp add: proj_xlate Let_def getMinTree_xlate)
done
lemma findMin_xlate_aux:
"q\<noteq>[] \<Longrightarrow> (findMin q, eprio (findMin q)) =
(SkewBinomialHeapStruc.findMin (bsmap q))"
apply (unfold findMin_def SkewBinomialHeapStruc.findMin_def)
apply (simp add: proj_xlate Let_def getMinTree_xlate)
apply2 (induct q)
apply simp
apply (case_tac q)
apply (auto simp add: proj_xlate)
done
(* TODO: Also possible in generic formulation. Then a candidate for Misc.thy *)
lemma bsmap_filter_xlate:
"bsmap [ x\<leftarrow>l . P (bsmapt x) ] = [ x \<leftarrow> bsmap l. P x ]"
apply2(induct l) by auto
lemma bsmap_rev_xlate:
"bsmap (rev q) = rev (bsmap q)"
apply2(induct q) by auto
lemma deleteMin_xlate:
"q\<noteq>[] \<Longrightarrow> bsmap (deleteMin q) = SkewBinomialHeapStruc.deleteMin (bsmap q)"
apply (simp add:
deleteMin_def SkewBinomialHeapStruc.deleteMin_def
proj_xlate getMinTree_xlate insertList_xlate meld_xlate remove1Prio_xlate
Let_def bsmap_rev_xlate, (subst bsmap_filter_xlate)?)+
done
lemma deleteMin_correct_aux:
assumes I: "invar q"
assumes NE: "q\<noteq>[]"
shows
"invar (deleteMin q)"
"queue_to_multiset_aux (deleteMin q) = queue_to_multiset_aux q -
{# (findMin q, eprio (findMin q)) #}"
apply (simp_all add:
I NE deleteMin_xlate findMin_xlate_aux
SkewBinomialHeapStruc.deleteMin_correct)
done
lemma bsmap_fs_dep:
"(e,a)\<in>#SkewBinomialHeapStruc.tree_to_multiset (bsmapt t) \<Longrightarrow> a=eprio e"
"(e,a)\<in>#SkewBinomialHeapStruc.queue_to_multiset (bsmap q) \<Longrightarrow> a=eprio e"
thm SkewBinomialHeapStruc.tree_to_multiset_queue_to_multiset.induct
apply2 (induct "bsmapt t" and "bsmap q" arbitrary: t and q
rule: SkewBinomialHeapStruc.tree_to_multiset_queue_to_multiset.induct)
apply auto
apply (case_tac t)
apply (auto split: if_split_asm)
done
lemma bsmap_fs_depD:
"(e,a)\<in>#SkewBinomialHeapStruc.tree_to_multiset (bsmapt t)
\<Longrightarrow> e \<in># tree_to_multiset t \<and> a=eprio e"
"(e,a)\<in>#SkewBinomialHeapStruc.queue_to_multiset (bsmap q)
\<Longrightarrow> e \<in># queue_to_multiset q \<and> a=eprio e"
by (auto dest: bsmap_fs_dep intro!: image_eqI)
lemma findMin_correct_aux:
assumes I: "invar q"
assumes NE: "q\<noteq>[]"
shows "(findMin q, eprio (findMin q)) \<in># queue_to_multiset_aux q"
"\<forall>y\<in>set_mset (queue_to_multiset_aux q). snd (findMin q,eprio (findMin q)) \<le> snd y"
apply (simp_all add:
I NE findMin_xlate_aux
SkewBinomialHeapStruc.findMin_correct)
done
lemma findMin_correct:
assumes I: "invar q"
and NE: "q\<noteq>[]"
shows "findMin q \<in># queue_to_multiset q"
and "\<forall>y\<in>set_mset (queue_to_multiset q). eprio (findMin q) \<le> eprio y"
using findMin_correct_aux[OF I NE]
apply simp_all
apply (force dest: bsmap_fs_depD)
apply auto
proof goal_cases
case prems: (1 a b)
from prems(3) have "(a, eprio a) \<in># queue_to_multiset_aux q"
apply -
apply (frule bsmap_fs_dep)
apply simp
done
with prems(2)[rule_format, simplified]
show ?case by auto
qed
lemma deleteMin_correct:
assumes I: "invar q"
assumes NE: "q\<noteq>[]"
shows
"invar (deleteMin q)"
"queue_to_multiset (deleteMin q) = queue_to_multiset q -
{# findMin q #}"
using deleteMin_correct_aux[OF I NE]
apply simp_all
apply (rule mset_image_fst_dep_pair_diff_split)
apply (auto dest: bsmap_fs_dep)
done
declare insert.simps[simp del]
subsubsection "Bootstrapping: Phase 1"
text \<open>
In this section, we define the ticked versions
of the functions, as defined in \cite{BrOk96}.
These functions work on elements, i.e. only on
heaps that contain at least one entry.
Additionally, we define an invariant for elements, and
a mapping to multisets of entries, and prove correct
the ticked functions.
\<close>
primrec findMin' where "findMin' (Element e a q) = (e,a)"
fun meld':: "('e,'a::linorder) BsSkewElem \<Rightarrow>
('e,'a) BsSkewElem \<Rightarrow> ('e,'a) BsSkewElem"
where "meld' (Element e1 a1 q1) (Element e2 a2 q2) =
(if a1\<le>a2 then
Element e1 a1 (insert (Element e2 a2 q2) q1)
else
Element e2 a2 (insert (Element e1 a1 q1) q2)
)"
fun insert' where
"insert' e a q = meld' (Element e a []) q"
fun deleteMin' where
"deleteMin' (Element e a q) = (
case (findMin q) of
Element ey ay q1 \<Rightarrow>
Element ey ay (meld q1 (deleteMin q))
)"
text \<open>
Size-function for termination proofs
\<close>
fun tree_level and queue_level where
"tree_level (BsNode (Element _ _ qd) _ q) =
max (Suc (queue_level qd)) (queue_level q)" |
"queue_level [] = (0::nat)" |
"queue_level (t#q) = max (tree_level t) (queue_level q)"
fun level where
"level (Element _ _ q) = Suc (queue_level q)"
lemma level_m:
"x\<in>#tree_to_multiset t \<Longrightarrow> level x < Suc (tree_level t)"
"x\<in>#queue_to_multiset q \<Longrightarrow> level x < Suc (queue_level q)"
apply2 (induct t and q rule: tree_level_queue_level.induct)
apply (case_tac [!] x)
apply (auto simp add: less_max_iff_disj)
done
lemma level_measure:
"x \<in> set_mset (queue_to_multiset q) \<Longrightarrow> (x,(Element e a q))\<in>measure level"
"x \<in># (queue_to_multiset q) \<Longrightarrow> (x,(Element e a q))\<in>measure level"
apply (case_tac [!] x)
apply (auto dest: level_m simp del: set_image_mset)
done
text \<open>
Invariant for elements
\<close>
function elem_invar where
"elem_invar (Element e a q) \<longleftrightarrow>
(\<forall>x. x\<in># (queue_to_multiset q) \<longrightarrow> a \<le> eprio x \<and> elem_invar x) \<and>
invar q"
by pat_completeness auto
termination
proof
show "wf (measure level)" by auto
qed (rule level_measure)
text \<open>
Abstraction to multisets
\<close>
function elem_to_mset where
"elem_to_mset (Element e a q) = {# (e,a) #}
+ Union_mset (image_mset elem_to_mset (queue_to_multiset q))"
by pat_completeness auto
termination
proof
show "wf (measure level)" by auto
qed (rule level_measure)
lemma insert_correct':
assumes I: "elem_invar x"
shows
"elem_invar (insert' e a x)"
"elem_to_mset (insert' e a x) = elem_to_mset x + {#(e,a)#}"
using I
apply (case_tac [!] x)
apply (auto simp add: insert_correct union_ac)
done
lemma meld_correct':
assumes I: "elem_invar x" "elem_invar x'"
shows
"elem_invar (meld' x x')"
"elem_to_mset (meld' x x') = elem_to_mset x + elem_to_mset x'"
using I
apply (case_tac [!] x)
apply (case_tac [!] x')
apply (auto simp add: insert_correct union_ac)
done
lemma findMin'_min:
"\<lbrakk>elem_invar x; y\<in>#elem_to_mset x\<rbrakk> \<Longrightarrow> snd (findMin' x) \<le> snd y"
proof2 (induct n\<equiv>"level x" arbitrary: x rule: full_nat_induct)
case 1
note IH="1.hyps"[rule_format, OF _ refl]
note PREMS="1.prems"
obtain e a q where [simp]: "x=Element e a q" by (cases x) auto
from PREMS(2) have "y=(e,a) \<or>
y\<in>#Union_mset (image_mset elem_to_mset (queue_to_multiset q))"
(is "?C1 \<or> ?C2")
by (auto split: if_split_asm)
moreover {
assume "y=(e,a)"
with PREMS have ?case by simp
} moreover {
assume ?C2
then obtain yx where
A: "yx \<in># queue_to_multiset q" and
B: "y \<in># elem_to_mset yx"
apply (auto elim!: in_image_msetE)
done
from A PREMS have IYX: "elem_invar yx" by auto
from PREMS(1) A have "a \<le> eprio yx" by auto
hence "snd (findMin' x) \<le> snd (findMin' yx)"
by (cases yx) auto
also
from IH[OF _ IYX B] level_m(2)[OF A]
have "snd (findMin' yx) \<le> snd y" by simp
finally have ?case .
} ultimately show ?case by blast
qed
lemma deleteMin_correct':
assumes I: "elem_invar (Element e a q)"
assumes NE[simp]: "q\<noteq>[]"
shows
"elem_invar (deleteMin' (Element e a q))"
"elem_to_mset (deleteMin' (Element e a q)) =
elem_to_mset (Element e a q) - {# findMin' (Element e a q) #}"
proof -
from I have IQ[simp]: "invar q" by simp
from findMin_correct[OF IQ NE] have
FMIQ: "findMin q \<in># queue_to_multiset q" and
FMIN: "!!y. y\<in>#(queue_to_multiset q) \<Longrightarrow> eprio (findMin q) \<le> eprio y"
by (auto simp del: set_image_mset)
from FMIQ I have FMEI: "elem_invar (findMin q)" by auto
from I have FEI: "!!y. y\<in>#(queue_to_multiset q) \<Longrightarrow> elem_invar y" by auto
obtain ey ay qy where [simp]: "findMin q = Element ey ay qy"
by (cases "findMin q") auto
from FMEI have
IQY[simp]: "invar qy" and
AYMIN: "!!x. x \<in># queue_to_multiset qy \<Longrightarrow> ay \<le> eprio x" and
QEI: "!!x. x \<in># queue_to_multiset qy \<Longrightarrow> elem_invar x"
by auto
show "elem_invar (deleteMin' (Element e a q))"
using AYMIN QEI FMIN FEI
by (auto simp add: deleteMin_correct meld_correct in_diff_count)
from FMIQ have
S: "(queue_to_multiset q - {#Element ey ay qy#}) + {#Element ey ay qy#}
= queue_to_multiset q" by simp
show "elem_to_mset (deleteMin' (Element e a q)) =
elem_to_mset (Element e a q) - {# findMin' (Element e a q) #}"
apply (simp add: deleteMin_correct meld_correct)
by (subst S[symmetric], simp add: union_ac)
qed
subsubsection "Bootstrapping: Phase 2"
text \<open>
In this phase, we extend the ticked versions to also work with
empty priority queues.
\<close>
definition bs_empty where "bs_empty \<equiv> Inl ()"
primrec bs_findMin where
"bs_findMin (Inr x) = findMin' x"
fun bs_meld
:: "('e,'a::linorder) BsSkewHeap \<Rightarrow> ('e,'a) BsSkewHeap \<Rightarrow> ('e,'a) BsSkewHeap"
where
"bs_meld (Inl _) x = x" |
"bs_meld x (Inl _) = x" |
"bs_meld (Inr x) (Inr x') = Inr (meld' x x')"
primrec bs_insert
:: "'e \<Rightarrow> ('a::linorder) \<Rightarrow> ('e,'a) BsSkewHeap \<Rightarrow> ('e,'a) BsSkewHeap"
where
"bs_insert e a (Inl _) = Inr (Element e a [])" |
"bs_insert e a (Inr x) = Inr (insert' e a x)"
fun bs_deleteMin
:: "('e,'a::linorder) BsSkewHeap \<Rightarrow> ('e,'a) BsSkewHeap"
where
"bs_deleteMin (Inr (Element e a [])) = Inl ()" |
"bs_deleteMin (Inr (Element e a q)) = Inr (deleteMin' (Element e a q))"
primrec bs_invar :: "('e,'a::linorder) BsSkewHeap \<Rightarrow> bool"
where
"bs_invar (Inl _) \<longleftrightarrow> True" |
"bs_invar (Inr x) \<longleftrightarrow> elem_invar x"
lemma [simp]: "bs_invar bs_empty" by (simp add: bs_empty_def)
primrec bs_to_mset :: "('e,'a::linorder) BsSkewHeap \<Rightarrow> ('e\<times>'a) multiset"
where
"bs_to_mset (Inl _) = {#}" |
"bs_to_mset (Inr x) = elem_to_mset x"
theorem bs_empty_correct: "h=bs_empty \<longleftrightarrow> bs_to_mset h = {#}"
apply (unfold bs_empty_def)
apply (cases h)
apply simp
apply (case_tac b)
apply simp
done
lemma bs_mset_of_empty[simp]:
"bs_to_mset bs_empty = {#}"
by (simp add: bs_empty_def)
theorem bs_findMin_correct:
assumes I: "bs_invar h"
assumes NE: "h\<noteq>bs_empty"
shows "bs_findMin h \<in># bs_to_mset h"
"\<forall>y\<in>set_mset (bs_to_mset h). snd (bs_findMin h) \<le> snd y"
using I NE
apply (case_tac [!] h)
apply (auto simp add: bs_empty_def findMin_correct')
done
theorem bs_insert_correct:
assumes I: "bs_invar h"
shows
"bs_invar (bs_insert e a h)"
"bs_to_mset (bs_insert e a h) = {#(e,a)#} + bs_to_mset h"
using I
apply (case_tac [!] h)
apply (simp_all)
apply (auto simp add: meld_correct')
done
theorem bs_meld_correct:
assumes I: "bs_invar h" "bs_invar h'"
shows
"bs_invar (bs_meld h h')"
"bs_to_mset (bs_meld h h') = bs_to_mset h + bs_to_mset h'"
using I
apply (case_tac [!] h, case_tac [!] h')
apply (auto simp add: meld_correct')
done
theorem bs_deleteMin_correct:
assumes I: "bs_invar h"
assumes NE: "h \<noteq> bs_empty"
shows
"bs_invar (bs_deleteMin h)"
"bs_to_mset (bs_deleteMin h) = bs_to_mset h - {#bs_findMin h#}"
using I NE
apply (case_tac [!] h)
apply (simp_all add: bs_empty_def)
apply (case_tac [!] b)
apply (rename_tac [!] list)
apply (case_tac [!] list)
apply (simp_all del: elem_invar.simps deleteMin'.simps add: deleteMin_correct')
done
end
interpretation BsSkewBinomialHeapStruc: Bootstrapped .
subsection "Hiding the Invariant"
subsubsection "Datatype"
typedef (overloaded) ('e, 'a) SkewBinomialHeap =
"{q :: ('e,'a::linorder) BsSkewBinomialHeapStruc.BsSkewHeap. BsSkewBinomialHeapStruc.bs_invar q }"
apply (rule_tac x="BsSkewBinomialHeapStruc.bs_empty" in exI)
apply (auto)
done
lemma Rep_SkewBinomialHeap_invar[simp]:
"BsSkewBinomialHeapStruc.bs_invar (Rep_SkewBinomialHeap x)"
using Rep_SkewBinomialHeap
by (auto)
lemma [simp]:
"BsSkewBinomialHeapStruc.bs_invar q
\<Longrightarrow> Rep_SkewBinomialHeap (Abs_SkewBinomialHeap q) = q"
using Abs_SkewBinomialHeap_inverse by auto
lemma [simp, code abstype]: "Abs_SkewBinomialHeap (Rep_SkewBinomialHeap q) = q"
by (rule Rep_SkewBinomialHeap_inverse)
locale SkewBinomialHeap_loc
begin
subsubsection "Operations"
definition [code]:
"to_mset t
== BsSkewBinomialHeapStruc.bs_to_mset (Rep_SkewBinomialHeap t)"
definition empty where
"empty == Abs_SkewBinomialHeap BsSkewBinomialHeapStruc.bs_empty"
lemma [code abstract, simp]:
"Rep_SkewBinomialHeap empty = BsSkewBinomialHeapStruc.bs_empty"
by (unfold empty_def) simp
definition [code]:
"isEmpty q == Rep_SkewBinomialHeap q = BsSkewBinomialHeapStruc.bs_empty"
lemma empty_rep:
"q=empty \<longleftrightarrow> Rep_SkewBinomialHeap q = BsSkewBinomialHeapStruc.bs_empty"
apply (auto simp add: empty_def)
apply (metis Rep_SkewBinomialHeap_inverse)
done
lemma isEmpty_correct: "isEmpty q \<longleftrightarrow> q=empty"
by (simp add: empty_rep isEmpty_def)
definition
insert
:: "'e \<Rightarrow> ('a::linorder) \<Rightarrow> ('e,'a) SkewBinomialHeap
\<Rightarrow> ('e,'a) SkewBinomialHeap"
where "insert e a q ==
Abs_SkewBinomialHeap (
BsSkewBinomialHeapStruc.bs_insert e a (Rep_SkewBinomialHeap q))"
lemma [code abstract]:
"Rep_SkewBinomialHeap (insert e a q)
= BsSkewBinomialHeapStruc.bs_insert e a (Rep_SkewBinomialHeap q)"
by (simp add: insert_def BsSkewBinomialHeapStruc.bs_insert_correct)
definition [code]: "findMin q
== BsSkewBinomialHeapStruc.bs_findMin (Rep_SkewBinomialHeap q)"
definition "deleteMin q ==
if q=empty then empty
else Abs_SkewBinomialHeap (
BsSkewBinomialHeapStruc.bs_deleteMin (Rep_SkewBinomialHeap q))"
text \<open>
We don't use equality here, to prevent the code-generator
from introducing equality-class parameter for type \<open>'a\<close>.
Instead we use a case-distinction to check for emptiness.
\<close>
lemma [code abstract]: "Rep_SkewBinomialHeap (deleteMin q) =
(case (Rep_SkewBinomialHeap q) of Inl _ \<Rightarrow> BsSkewBinomialHeapStruc.bs_empty |
_ \<Rightarrow> BsSkewBinomialHeapStruc.bs_deleteMin (Rep_SkewBinomialHeap q))"
proof (cases "(Rep_SkewBinomialHeap q)")
case [simp]: (Inl a)
hence "(Rep_SkewBinomialHeap q) = BsSkewBinomialHeapStruc.bs_empty"
apply (cases q)
apply (auto simp add: BsSkewBinomialHeapStruc.bs_empty_def)
done
thus ?thesis
apply (auto simp add: deleteMin_def
BsSkewBinomialHeapStruc.bs_deleteMin_correct
BsSkewBinomialHeapStruc.bs_empty_correct empty_rep )
done
next
case (Inr x)
hence "(Rep_SkewBinomialHeap q) \<noteq> BsSkewBinomialHeapStruc.bs_empty"
apply (cases q)
apply (auto simp add: BsSkewBinomialHeapStruc.bs_empty_def)
done
thus ?thesis
apply (simp add: Inr)
apply (fold Inr)
apply (auto simp add: deleteMin_def
BsSkewBinomialHeapStruc.bs_deleteMin_correct
BsSkewBinomialHeapStruc.bs_empty_correct empty_rep )
done
qed
(*
lemma [code abstract]: "Rep_SkewBinomialHeap (deleteMin q) =
(if (Rep_SkewBinomialHeap q = BsSkewBinomialHeapStruc.bs_empty) then BsSkewBinomialHeapStruc.bs_empty
else BsSkewBinomialHeapStruc.bs_deleteMin (Rep_SkewBinomialHeap q))"
by (auto simp add: deleteMin_def BsSkewBinomialHeapStruc.bs_deleteMin_correct
BsSkewBinomialHeapStruc.bs_empty_correct empty_rep)
*)
definition "meld q1 q2 ==
Abs_SkewBinomialHeap (BsSkewBinomialHeapStruc.bs_meld
(Rep_SkewBinomialHeap q1) (Rep_SkewBinomialHeap q2))"
lemma [code abstract]:
"Rep_SkewBinomialHeap (meld q1 q2)
= BsSkewBinomialHeapStruc.bs_meld (Rep_SkewBinomialHeap q1)
(Rep_SkewBinomialHeap q2)"
by (simp add: meld_def BsSkewBinomialHeapStruc.bs_meld_correct)
subsubsection "Correctness"
lemma empty_correct: "to_mset q = {#} \<longleftrightarrow> q=empty"
by (simp add: to_mset_def BsSkewBinomialHeapStruc.bs_empty_correct empty_rep)
lemma to_mset_of_empty[simp]: "to_mset empty = {#}"
by (simp add: empty_correct)
lemma insert_correct: "to_mset (insert e a q) = to_mset q + {#(e,a)#}"
apply (unfold insert_def to_mset_def)
apply (simp add: BsSkewBinomialHeapStruc.bs_insert_correct union_ac)
done
lemma findMin_correct:
assumes "q\<noteq>empty"
shows
"findMin q \<in># to_mset q"
"\<forall>y\<in>set_mset (to_mset q). snd (findMin q) \<le> snd y"
using assms
apply (unfold findMin_def to_mset_def)
apply (simp_all add: empty_rep BsSkewBinomialHeapStruc.bs_findMin_correct)
done
lemma deleteMin_correct:
assumes "q\<noteq>empty"
shows "to_mset (deleteMin q) = to_mset q - {# findMin q #}"
using assms
apply (unfold findMin_def deleteMin_def to_mset_def)
apply (simp_all add: empty_rep BsSkewBinomialHeapStruc.bs_deleteMin_correct)
done
lemma meld_correct:
shows "to_mset (meld q q') = to_mset q + to_mset q'"
apply (unfold to_mset_def meld_def)
apply (simp_all add: BsSkewBinomialHeapStruc.bs_meld_correct)
done
text \<open>Correctness lemmas to be used with simplifier\<close>
lemmas correct = empty_correct deleteMin_correct meld_correct
end
interpretation SkewBinomialHeap: SkewBinomialHeap_loc .
subsection "Documentation"
(*#DOC
fun [no_spec] SkewBinomialHeap.to_mset
Abstraction to multiset.
fun SkewBinomialHeap.empty
The empty heap. ($O(1)$)
fun SkewBinomialHeap.isEmpty
Checks whether heap is empty. Mainly used to work around
code-generation issues. ($O(1)$)
fun [long_type] SkewBinomialHeap.insert
Inserts element ($O(1)$)
fun SkewBinomialHeap.findMin
Returns a minimal element ($O(1)$)
fun [long_type] SkewBinomialHeap.deleteMin
Deletes {\em the} element that is returned by {\em find\_min}. $O(\log(n))$
fun [long_type] SkewBinomialHeap.meld
Melds two heaps ($O(1)$)
*)
text \<open>
\underline{@{term_type "SkewBinomialHeap.to_mset"}}\\
Abstraction to multiset.\\
\underline{@{term_type "SkewBinomialHeap.empty"}}\\
The empty heap. ($O(1)$)\\
{\bf Spec} \<open>SkewBinomialHeap.empty_correct\<close>:
@{thm [display] SkewBinomialHeap.empty_correct[no_vars]}
\underline{@{term_type "SkewBinomialHeap.isEmpty"}}\\
Checks whether heap is empty. Mainly used to work around
code-generation issues. ($O(1)$)\\
{\bf Spec} \<open>SkewBinomialHeap.isEmpty_correct\<close>:
@{thm [display] SkewBinomialHeap.isEmpty_correct[no_vars]}
\underline{@{term "SkewBinomialHeap.insert"}}
@{term_type [display] "SkewBinomialHeap.insert"}
Inserts element ($O(1)$)\\
{\bf Spec} \<open>SkewBinomialHeap.insert_correct\<close>:
@{thm [display] SkewBinomialHeap.insert_correct[no_vars]}
\underline{@{term_type "SkewBinomialHeap.findMin"}}\\
Returns a minimal element ($O(1)$)\\
{\bf Spec} \<open>SkewBinomialHeap.findMin_correct\<close>:
@{thm [display] SkewBinomialHeap.findMin_correct[no_vars]}
\underline{@{term "SkewBinomialHeap.deleteMin"}}
@{term_type [display] "SkewBinomialHeap.deleteMin"}
Deletes {\em the} element that is returned by {\em find\_min}. $O(\log(n))$\\
{\bf Spec} \<open>SkewBinomialHeap.deleteMin_correct\<close>:
@{thm [display] SkewBinomialHeap.deleteMin_correct[no_vars]}
\underline{@{term "SkewBinomialHeap.meld"}}
@{term_type [display] "SkewBinomialHeap.meld"}
Melds two heaps ($O(1)$)\\
{\bf Spec} \<open>SkewBinomialHeap.meld_correct\<close>:
@{thm [display] SkewBinomialHeap.meld_correct[no_vars]}
\<close>
end
|
function OptimizationModel = buildMTAproblemFromModel(model,rxnFBS,Vref,varargin)
% Returns the COBRA Optimization model needed to perform the MTA
%
% USAGE:
%
% OptimizationModel = buildMTAproblemFromModel(model,rxnFBS,Vref,alpha,epsilon)
%
% INPUT:
% model: Metabolic model (COBRA format)
% rxnFBS: Forward, Backward and Unchanged (+1;0;-1) values
% corresponding to each reaction.
% Vref: Reference flux of the source state.
% alpha: parameter of the quadratic problem (default = 0.66)
% epsilon minimun disturbance for each reaction, (default = 0)
%
% OUTPUTS:
% OptimizationModel: COBRA model struct that includes the
% stoichiometric contrains, the thermodinamic
% constrains and the binary variables.
%
% .. Authors:
% - Luis V. Valcarcel, 03/06/2015, University of Navarra, CIMA & TECNUN School of Engineering.
% - Luis V. Valcarcel, 26/10/2018, University of Navarra, CIMA & TECNUN School of Engineering.
p = inputParser; % check the inputs
% check requiered arguments
addRequired(p, 'model');
addRequired(p, 'rxnFBS', @isnumeric);
addRequired(p, 'Vref', @isnumeric);
% Check optional arguments
addOptional(p, 'alpha', 0.66, @isnumeric);
addOptional(p, 'epsilon', zeros(size(model.rxns)), @isnumeric);
% extract variables from parser
parse(p, model, rxnFBS, Vref, varargin{:});
alpha = p.Results.alpha;
epsilon = p.Results.epsilon;
% sometimes epsilon can be given as a single value
if numel(epsilon)==1
epsilon = epsilon * ones(size(model.rxns));
end
%% --- set the COBRA model ---
% variables
v = 1:length(model.rxns);
y_plus_F = (1:sum(rxnFBS==+1)) + v(end); % 1 if change in rxnForward, 0 otherwise
y_minus_F = (1:sum(rxnFBS==+1)) + y_plus_F(end); % 1 if no change in rxnForward, 0 otherwise
y_plus_B = (1:sum(rxnFBS==-1)) + y_minus_F(end); % 1 if change in rxnBackward, 0 otherwise
y_minus_B = (1:sum(rxnFBS==-1)) + y_plus_B(end); % 1 if no change in rxnBackward, 0 otherwise
n_var = y_minus_B(end);
% limits of the variables
lb = zeros(n_var,1);
ub = ones (n_var,1);
lb(v) = model.lb;
ub(v) = model.ub;
%type of variables
vartype(1:n_var) = 'B';
vartype(v) = 'C';
% constrains
Eq1 = 1:length(model.mets); % Stoichiometric matrix
Eq2 = (1:length(y_plus_F)) + Eq1(end); % Changes in Forward
Eq3 = (1:length(y_plus_F)) + Eq2(end); % Change or not change in Forward
Eq4 = (1:length(y_plus_B)) + Eq3(end); % Changes in Backward
Eq5 = (1:length(y_plus_B)) + Eq4(end); % Change or not change in Backward
nCon = Eq5(end);
% generate constrain matrix
A = spalloc(nCon, n_var, nnz(model.S) + 5*length(Eq2) + 5*length(Eq4));
b = zeros(nCon,1);
csense = char(zeros(nCon,1));
posF = find(rxnFBS == +1);
posB = find(rxnFBS == -1);
posS = find(rxnFBS == 0);
% First contraint, stoichiometric
A(Eq1,v) = model.S;
b(Eq1) = 0;
csense(Eq1) = 'E';
% Second contraint, Change or not change in Forward
A(Eq2,v(posF)) = eye(length(posF));
A(Eq2,y_plus_F) = - ( Vref(posF) + epsilon(posF) ) .* eye(length(posF));
A(Eq2,y_minus_F) = - model.lb(posF) .* eye(length(posF));
b(Eq2) = 0;
csense(Eq2) = 'G';
% Third contraint, Change or not change in Forward
A(Eq3,y_plus_F) = eye(length(Eq3));
A(Eq3,y_minus_F) = eye(length(Eq3));
b(Eq3) = 1;
csense(Eq3) = 'E';
% Fourth contraint, Backward changes
A(Eq4,posB) = eye(length(posB));
A(Eq4,y_plus_B) = - ( Vref(posB) - epsilon(posB) ) .* eye(length(posB));
A(Eq4,y_minus_B) = - model.ub(posB) .* eye(length(posB));
b(Eq4) = 0;
csense(Eq4) = 'L';
% Fiveth contraint, Change or not change in Backward
A(Eq5,y_plus_B) = eye(length(Eq5));
A(Eq5,y_minus_B) = eye(length(Eq5));
b(Eq5) = 1;
csense(Eq5) = 'E';
% Objective fuction
% linear part
c = zeros(n_var,1);
c(y_minus_F) = alpha/2;
c(y_minus_B) = alpha/2;
c(v(posS)) = -2 * Vref(posS) * (1-alpha);
% quadratic part
F = spalloc(n_var,n_var,length(posS));
F(v(posS),v(posS)) = 2 * (1-alpha) .* eye(length(posS));
% save the resultant model
OptimizationModel = struct();
[OptimizationModel.A, OptimizationModel.lb, OptimizationModel.ub] = deal(A, lb, ub);
[OptimizationModel.b, OptimizationModel.csense] = deal(b, csense);
[OptimizationModel.c, OptimizationModel.F] = deal(c, F);
[OptimizationModel.osense, OptimizationModel.vartype] = deal(+1, vartype); % +1 for minimization
%save the index of the variables
OptimizationModel.idx_variables.v = v;
OptimizationModel.idx_variables.y_plus_F = y_plus_F;
OptimizationModel.idx_variables.y_minus_F = y_minus_F;
OptimizationModel.idx_variables.y_plus_B = y_plus_B;
OptimizationModel.idx_variables.y_minus_B = y_minus_B;
end
|
example (P : Prop) : P → P :=
begin
intro p,
exact p,
end
|
lemma big_small_trans': "f \<in> L F (g) \<Longrightarrow> g \<in> l F (h) \<Longrightarrow> f \<in> L F (h)"
|
open import MLib.Algebra.PropertyCode
open import MLib.Algebra.PropertyCode.Structures
module MLib.Matrix.Tensor {c ℓ} (struct : Struct bimonoidCode c ℓ) where
open import MLib.Prelude
open import MLib.Matrix.Core
open import MLib.Matrix.Equality struct
open import MLib.Matrix.Mul struct
open import MLib.Algebra.Operations struct
open Table using (head; tail; rearrange; fromList; toList; _≗_; replicate)
open Nat using () renaming (_+_ to _+ℕ_; _*_ to _*ℕ_)
open FunctionProperties
open import MLib.Fin.Parts.Simple
-- Tensor product
_⊠_ : ∀ {m n p q} → Matrix S m n → Matrix S p q → Matrix S (m *ℕ p) (n *ℕ q)
(A ⊠ B) i j =
let i₁ , i₂ = toParts i
j₁ , j₂ = toParts j
in A i₁ j₁ *′ B i₂ j₂
private
≡⇒≡×≡×≡ :
∀ {a b c} {A : Set a} {B : Set b} {C : Set c}
{i i′ : A} {j j′ : B} {k k′ : C} →
(i , j , k) ≡ (i′ , j′ , k′) →
i ≡ i′ × j ≡ j′ × k ≡ k′
≡⇒≡×≡×≡ = Σ.map id Σ.≡⇒≡×≡ ∘ Σ.≡⇒≡×≡
open _≃_
module _ ⦃ props : Has (associative on * ∷ []) ⦄ {m n p q r s} where
⊠-associative :
(A : Matrix S m n) (B : Matrix S p q) (C : Matrix S r s) →
(A ⊠ B) ⊠ C ≃ A ⊠ (B ⊠ C)
⊠-associative A B C .m≡p = Nat.*-assoc m p r
⊠-associative A B C .n≡q = Nat.*-assoc n q s
⊠-associative A B C .equal {i} {i′} {j} {j′} i≅i′ j≅j′ =
let i₁ , i₂ , i₃ = toParts³ m p r i
j₁ , j₂ , j₃ = toParts³ n q s j
i′₁ , i′₂ , i′₃ = toParts³′ m p r i′
j′₁ , j′₂ , j′₃ = toParts³′ n q s j′
i₁-eq , i₂-eq , i₃-eq = ≡⇒≡×≡×≡ (toParts-assoc m p r i≅i′)
j₁-eq , j₂-eq , j₃-eq = ≡⇒≡×≡×≡ (toParts-assoc n q s j≅j′)
open EqReasoning S.setoid
in begin
((A ⊠ B) ⊠ C) i j ≡⟨⟩
A i₁ j₁ *′ B i₂ j₂ *′ C i₃ j₃ ≈⟨ from props (associative on *) _ _ _ ⟩
A i₁ j₁ *′ (B i₂ j₂ *′ C i₃ j₃) ≡⟨ ≡.cong₂ _*′_ (≡.cong₂ A i₁-eq j₁-eq) (≡.cong₂ _*′_ (≡.cong₂ B i₂-eq j₂-eq) (≡.cong₂ C i₃-eq j₃-eq)) ⟩
A i′₁ j′₁ *′ (B i′₂ j′₂ *′ C i′₃ j′₃) ≡⟨⟩
(A ⊠ (B ⊠ C)) i′ j′ ∎
⊠-cong : ∀ {m n m′ n′} {p q p′ q′} {A : Matrix S m n} {A′ : Matrix S m′ n′} {B : Matrix S p q} {B′ : Matrix S p′ q′} → A ≃ A′ → B ≃ B′ → (A ⊠ B) ≃ (A′ ⊠ B′)
⊠-cong A≃A′ B≃B′ with A≃A′ .m≡p | B≃B′ .m≡p | A≃A′ .n≡q | B≃B′ .n≡q
⊠-cong {A = A} {A′} {B} {B′} A≃A′ B≃B′ | ≡.refl | ≡.refl | ≡.refl | ≡.refl = lem
where
lem : (A ⊠ B) ≃ (A′ ⊠ B′)
lem .m≡p = ≡.refl
lem .n≡q = ≡.refl
lem .equal ≅.refl ≅.refl = cong * (A≃A′ .equal ≅.refl ≅.refl) (B≃B′ .equal ≅.refl ≅.refl)
⊠-identityˡ :
⦃ props : Has (1# is leftIdentity for * ∷ []) ⦄ →
∀ {m n} (A : Matrix S m n) → 1● {1} ⊠ A ≃ A
⊠-identityˡ A .m≡p = Nat.*-identityˡ _
⊠-identityˡ A .n≡q = Nat.*-identityˡ _
⊠-identityˡ ⦃ props ⦄ A .equal {i} {i′} {j} {j′} i≅i′ j≅j′ =
let i₁ , i₂ = toParts {1} i
j₁ , j₂ = toParts {1} j
-- x : i₁ ≡ zero
-- x′ : i₂ ≡ i′
-- y : j₁ ≡ zero
-- y′ : j₂ ≡ j′
x , x′ = Σ.≡⇒≡×≡ (toParts-1ˡ i i′ i≅i′)
y , y′ = Σ.≡⇒≡×≡ (toParts-1ˡ j j′ j≅j′)
open EqReasoning S.setoid
in begin
(1● {1} ⊠ A) i j ≡⟨⟩
1● {1} i₁ j₁ *′ A i₂ j₂ ≡⟨ ≡.cong₂ _*′_ (≡.cong₂ (1● {1}) x y) (≡.cong₂ A x′ y′) ⟩
1● {1} zero zero *′ A i′ j′ ≡⟨⟩
1′ *′ A i′ j′ ≈⟨ from props (1# is leftIdentity for *) _ ⟩
A i′ j′ ∎
⊠-identityʳ :
⦃ props : Has (1# is rightIdentity for * ∷ []) ⦄ →
∀ {m n} (A : Matrix S m n) → A ⊠ 1● {1} ≃ A
⊠-identityʳ A .m≡p = Nat.*-identityʳ _
⊠-identityʳ A .n≡q = Nat.*-identityʳ _
⊠-identityʳ A .equal {i} {i′} {j} {j′} i≅i′ j≅j′ with Σ.≡⇒≡×≡ (toParts-1ʳ i i′ i≅i′) | Σ.≡⇒≡×≡ (toParts-1ʳ j j′ j≅j′)
⊠-identityʳ ⦃ props ⦄ A .equal {i} {i′} {j} {j′} i≅i′ j≅j′ | x , x′ | y , y′
rewrite x | x′ | y | y′ = from props (1# is rightIdentity for *) _
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_digits
from sklearn.decomposition import SparsePCA
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Load the dataset
digits = load_digits()
X = digits['data'] / np.max(digits['data'])
# Perform a sparse PCA
spca = SparsePCA(n_components=30, alpha=2.0, normalize_components=True, random_state=1000)
spca.fit(X)
# Show the components
sns.set()
fig, ax = plt.subplots(3, 10, figsize=(22, 8))
for i in range(3):
for j in range(10):
ax[i, j].imshow(spca.components_[(3 * j) + i].reshape((8, 8)), cmap='gray')
ax[i, j].set_xticks([])
ax[i, j].set_yticks([])
plt.show()
# Transform X[0]
y = spca.transform(X[0].reshape(1, -1)).squeeze()
# Show the absolute magnitudes
fig, ax = plt.subplots(figsize=(22, 10))
ax.bar(np.arange(1, 31, 1), np.abs(y))
ax.set_xticks(np.arange(1, 31, 1))
ax.set_xlabel('Component', fontsize=16)
ax.set_ylabel('Coefficient (absolute values)', fontsize=16)
plt.show()
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
#F IsNonTerminal ( <obj> )
#F returns true, if <obj> is a non-terminal
#F
IsNonTerminal := N -> IsRec(N) and IsBound(N.isNonTerminal) and N.isNonTerminal = true;
Class(NonTerminalOps, SPLOps, rec(
));
Declare(NonTerminal);
# ==========================================================================
# NonTerminal
# ==========================================================================
Class(NonTerminal, BaseMat, rec(
#F create getter methods
#F ex: NonTerminal.WithParams("param1_getter_name", "param2_getter_name")
WithParams := arg >> CopyFields(arg[1],
RecList( ConcatList( [2..Length(arg)],
i -> [arg[i], DetachFunc(Subst(self >> self.params[$(i-1)]))] ))),
isNonTerminal := true, # for nonterm.g and IsNonTerminal() function
_short_print := false,
#--------- Transformation rules support --------------------------------
from_rChildren := (self, rch) >> let(
t := ApplyFunc(ObjId(self), DropLast(rch, 1)),
When(Last(rch), t.transpose(), t)
),
rChildren := self >>
Concatenation(self.params, [self.transposed]),
rSetChild := meth(self, n, newChild)
if n <= 0 or n > Length(self.params) + 1
then Error("<n> must be in [1..", Length(self.params)+1, "]"); fi;
if n <= Length(self.params) then
self.params[n] := newChild;
else
self.transposed := newChild;
fi;
# self.canonizeParams(); ??
self.dimensions := self.dims();
end,
#-----------------------------------------------------------------------
canonizeParams := meth(self)
local A, nump;
nump := Length(self.params);
if nump = 0 then return; fi;
for A in self.abbrevs do
if NumArgs(A) = -1 or NumArgs(A) = nump then
self.params := ApplyFunc(A, self.params);
return;
fi;
od;
Error("Nonterminal ", self.name, " can take ", List(self.abbrevs, NumArgs),
" arguments, but not ", nump);
end,
new := meth(arg)
local result, self, params;
self := arg[1];
params := arg{[2..Length(arg)]};
result := SPL(WithBases(self, rec(params := params, transposed := false )));
result.canonizeParams();
result.dimensions := result.dims();
return result;
end,
#-----------------------------------------------------------------------
__call__ := ~.new,
#-----------------------------------------------------------------------
isTerminal := False,
#-----------------------------------------------------------------------
isPermutation := False,
#-----------------------------------------------------------------------
transpose := self >>
Inherit(self, rec(transposed := not self.transposed,
dimensions := Reversed(self.dimensions))),
setTransposed := (self, v) >> When( self.transposed <> v,
self.transpose(), self),
#-----------------------------------------------------------------------
# .conjTranspose() is needed for RC(T).transpose() == RC(T.conjTranspose())
# The inert form will only work is non-terminal is not used inside RC,
# or used inside RC, but not transposed
conjTranspose := self >> InertConjTranspose(self),
isInertConjTranspose := self >> self.conjTranspose = NonTerminal.conjTranspose,
#-----------------------------------------------------------------------
print := (self, i, is) >>
Cond(
not IsBound(self.params), Print(self.name),
Print(self._print(self.params, i, is),
When(self.transposed, ".transpose()"))),
#-----------------------------------------------------------------------
toAMat := self >> self.terminate().toAMat(),
#-----------------------------------------------------------------------
export := arg >> Error("Can't export a non-terminal"),
#-----------------------------------------------------------------------
arithmeticCost := arg >> Error("Can't compute arithmetic cost of a non-terminal"),
transposeSymmetric := True,
isSymmetric := False,
area := self >> Product(self.dims())
));
|
[STATEMENT]
lemma r_amalgamation: "eval r_amalgamation [i, j, x] = amalgamation i j x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
proof (cases "parallel i j x \<up>")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. parallel i j x \<up> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
2. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
parallel i j x \<up>
goal (2 subgoals):
1. parallel i j x \<up> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
2. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
parallel i j x \<up>
[PROOF STEP]
have "eval r_parallel [i, j, x] \<up>"
[PROOF STATE]
proof (prove)
using this:
parallel i j x \<up>
goal (1 subgoal):
1. eval r_parallel [i, j, x] \<up>
[PROOF STEP]
by (simp add: r_parallel')
[PROOF STATE]
proof (state)
this:
eval r_parallel [i, j, x] \<up>
goal (2 subgoals):
1. parallel i j x \<up> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
2. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
eval r_parallel [i, j, x] \<up>
[PROOF STEP]
have "eval r_amalgamation [i, j, x] \<up>"
[PROOF STATE]
proof (prove)
using this:
eval r_parallel [i, j, x] \<up>
goal (1 subgoal):
1. eval r_amalgamation [i, j, x] \<up>
[PROOF STEP]
unfolding r_amalgamation_def
[PROOF STATE]
proof (prove)
using this:
eval r_parallel [i, j, x] \<up>
goal (1 subgoal):
1. eval (Cn 3 r_pdec2 [r_parallel]) [i, j, x] \<up>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
eval r_amalgamation [i, j, x] \<up>
goal (2 subgoals):
1. parallel i j x \<up> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
2. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
eval r_amalgamation [i, j, x] \<up>
goal (2 subgoals):
1. parallel i j x \<up> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
2. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
from True
[PROOF STATE]
proof (chain)
picking this:
parallel i j x \<up>
[PROOF STEP]
have "amalgamation i j x \<up>"
[PROOF STATE]
proof (prove)
using this:
parallel i j x \<up>
goal (1 subgoal):
1. amalgamation i j x \<up>
[PROOF STEP]
using amalgamation_def
[PROOF STATE]
proof (prove)
using this:
parallel i j x \<up>
amalgamation ?i ?j ?x \<equiv> if parallel ?i ?j ?x \<up> then None else Some (pdec2 (the (parallel ?i ?j ?x)))
goal (1 subgoal):
1. amalgamation i j x \<up>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
amalgamation i j x \<up>
goal (2 subgoals):
1. parallel i j x \<up> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
2. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
eval r_amalgamation [i, j, x] \<up>
amalgamation i j x \<up>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
eval r_amalgamation [i, j, x] \<up>
amalgamation i j x \<up>
goal (1 subgoal):
1. eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
eval r_amalgamation [i, j, x] = amalgamation i j x
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
parallel i j x \<down>
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
parallel i j x \<down>
[PROOF STEP]
have "eval r_parallel [i, j, x] \<down>"
[PROOF STATE]
proof (prove)
using this:
parallel i j x \<down>
goal (1 subgoal):
1. eval r_parallel [i, j, x] \<down>
[PROOF STEP]
by (simp add: r_parallel')
[PROOF STATE]
proof (state)
this:
eval r_parallel [i, j, x] \<down>
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
eval r_parallel [i, j, x] \<down>
[PROOF STEP]
have "eval r_amalgamation [i, j, x] = eval r_pdec2 [the (eval r_parallel [i, j, x])]"
[PROOF STATE]
proof (prove)
using this:
eval r_parallel [i, j, x] \<down>
goal (1 subgoal):
1. eval r_amalgamation [i, j, x] = eval r_pdec2 [the (eval r_parallel [i, j, x])]
[PROOF STEP]
unfolding r_amalgamation_def
[PROOF STATE]
proof (prove)
using this:
eval r_parallel [i, j, x] \<down>
goal (1 subgoal):
1. eval (Cn 3 r_pdec2 [r_parallel]) [i, j, x] = eval r_pdec2 [the (eval r_parallel [i, j, x])]
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
eval r_amalgamation [i, j, x] = eval r_pdec2 [the (eval r_parallel [i, j, x])]
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
eval r_amalgamation [i, j, x] = eval r_pdec2 [the (eval r_parallel [i, j, x])]
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
have "... \<down>= pdec2 (the (eval r_parallel [i, j, x]))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eval r_pdec2 [the (eval r_parallel [i, j, x])] \<down>= pdec2 (the (eval r_parallel [i, j, x]))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
eval r_pdec2 [the (eval r_parallel [i, j, x])] \<down>= pdec2 (the (eval r_parallel [i, j, x]))
goal (1 subgoal):
1. parallel i j x \<down> \<Longrightarrow> eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
eval r_amalgamation [i, j, x] \<down>= pdec2 (the (eval r_parallel [i, j, x]))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
eval r_amalgamation [i, j, x] \<down>= pdec2 (the (eval r_parallel [i, j, x]))
goal (1 subgoal):
1. eval r_amalgamation [i, j, x] = amalgamation i j x
[PROOF STEP]
by (simp add: False amalgamation_def r_parallel')
[PROOF STATE]
proof (state)
this:
eval r_amalgamation [i, j, x] = amalgamation i j x
goal:
No subgoals!
[PROOF STEP]
qed
|
[STATEMENT]
lemma "test (do {
tmp0 \<leftarrow> slots_document . getElementById(''test_basic'');
n \<leftarrow> createTestTree(tmp0);
tmp1 \<leftarrow> n . ''s1'';
tmp2 \<leftarrow> tmp1 . assignedElements();
tmp3 \<leftarrow> n . ''c1'';
assert_array_equals(tmp2, [tmp3])
}) slots_heap"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. test (Heap_Error_Monad.bind slots_document . getElementById(''test_basic'') (\<lambda>tmp0. Heap_Error_Monad.bind (createTestTree tmp0) (\<lambda>n. Heap_Error_Monad.bind (n . ''s1'') (\<lambda>tmp1. Heap_Error_Monad.bind tmp1 . assignedElements() (\<lambda>tmp2. Heap_Error_Monad.bind (n . ''c1'') (\<lambda>tmp3. assert_array_equals(tmp2, [tmp3]))))))) slots_heap
[PROOF STEP]
by eval
|
function [ o, x, w ] = en_r2_03_2 ( n )
%*****************************************************************************80
%
%% EN_R2_03_2 implements the Stroud rule 3.2 for region EN_R2.
%
% Discussion:
%
% The rule has order O = 2^N.
%
% The rule has precision P = 3.
%
% EN_R2 is the entire N-dimensional space with weight function
%
% w(x) = exp ( - x1^2 - x2^2 ... - xn^2 )
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 19 January 2010
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Arthur Stroud,
% Approximate Calculation of Multiple Integrals,
% Prentice Hall, 1971,
% ISBN: 0130438936,
% LC: QA311.S85.
%
% Parameters:
%
% Input, integer N, the spatial dimension.
%
% Output, integer O, the order.
%
% Output, real X(N,O), the abscissas.
%
% Output, real W(O), the weights.
%
o = 2^n;
volume = sqrt ( pi^n );
a = volume / o;
r = sqrt ( 1 / 2 );
x = zeros ( n, o );
w = zeros ( o, 1 );
k = 0;
%
% 2^N points.
%
k = k + 1;
x(1:n,k) = - r;
w(k) = a;
more = 1;
while ( more )
more = 0;
for i = n : -1 : 1
if ( x(i,k) < 0.0 )
k = k + 1;
x(1:n,k) = x(1:n,k-1);
x(i,k) = + r;
x(i+1:n,k) = - r;
w(k) = a;
more = 1;
break;
end
end
end
return
end
|
Labels- Machine Warning Labels With A Pressure-sensitive Self-adhesive Backing Can Withstand Temperatures Up To 176°F.Self-adhesive Vinyl Hazard Warning Labels Are Ideal For Marking Potential Life-threatening Conditions On Switch.
Labels- Machine Warning Labels with a Pressure-sensitive self-adhesive backing can withstand temperatures.
About The Labels- Machine Warning Labels with a Pressure-sensitive self-adhesive backing can withstand temperatures.
More from Labels- Machine Warning Labels With A Pressure-sensitive Self-adhesive Backing Can Withstand Temperatures Up To 176°F.Self-adhesive Vinyl Hazard Warning Labels Are Ideal For Marking Potential Life-threatening Conditions On Switch.
|
// Boost.Geometry Index
//
// R-tree nodes based on runtime-polymorphism, storing static-size containers
// test version throwing exceptions on creation
//
// Copyright (c) 2011-2013 Adam Wulkiewicz, Lodz, Poland.
//
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_INDEX_TEST_RTREE_THROWING_NODE_HPP
#define BOOST_GEOMETRY_INDEX_TEST_RTREE_THROWING_NODE_HPP
#include <boost/geometry/index/detail/rtree/node/dynamic_visitor.hpp>
#include <rtree/exceptions/test_throwing.hpp>
struct throwing_nodes_stats
{
static void reset_counters() { get_internal_nodes_counter_ref() = 0; get_leafs_counter_ref() = 0; }
static size_t internal_nodes_count() { return get_internal_nodes_counter_ref(); }
static size_t leafs_count() { return get_leafs_counter_ref(); }
static size_t & get_internal_nodes_counter_ref() { static size_t cc = 0; return cc; }
static size_t & get_leafs_counter_ref() { static size_t cc = 0; return cc; }
};
namespace boost { namespace geometry { namespace index {
template <size_t MaxElements, size_t MinElements>
struct linear_throwing : public linear<MaxElements, MinElements> {};
template <size_t MaxElements, size_t MinElements>
struct quadratic_throwing : public quadratic<MaxElements, MinElements> {};
template <size_t MaxElements, size_t MinElements, size_t OverlapCostThreshold = 0, size_t ReinsertedElements = detail::default_rstar_reinserted_elements_s<MaxElements>::value>
struct rstar_throwing : public rstar<MaxElements, MinElements, OverlapCostThreshold, ReinsertedElements> {};
namespace detail { namespace rtree {
// options implementation (from options.hpp)
struct node_throwing_d_mem_static_tag {};
template <size_t MaxElements, size_t MinElements>
struct options_type< linear_throwing<MaxElements, MinElements> >
{
typedef options<
linear_throwing<MaxElements, MinElements>,
insert_default_tag, choose_by_content_diff_tag, split_default_tag, linear_tag,
node_throwing_d_mem_static_tag
> type;
};
template <size_t MaxElements, size_t MinElements>
struct options_type< quadratic_throwing<MaxElements, MinElements> >
{
typedef options<
quadratic_throwing<MaxElements, MinElements>,
insert_default_tag, choose_by_content_diff_tag, split_default_tag, quadratic_tag,
node_throwing_d_mem_static_tag
> type;
};
template <size_t MaxElements, size_t MinElements, size_t OverlapCostThreshold, size_t ReinsertedElements>
struct options_type< rstar_throwing<MaxElements, MinElements, OverlapCostThreshold, ReinsertedElements> >
{
typedef options<
rstar_throwing<MaxElements, MinElements, OverlapCostThreshold, ReinsertedElements>,
insert_reinsert_tag, choose_by_overlap_diff_tag, split_default_tag, rstar_tag,
node_throwing_d_mem_static_tag
> type;
};
}} // namespace detail::rtree
// node implementation
namespace detail { namespace rtree {
template <typename Value, typename Parameters, typename Box, typename Allocators>
struct dynamic_internal_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
: public dynamic_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
{
typedef throwing_varray<
rtree::ptr_pair<Box, typename Allocators::node_pointer>,
Parameters::max_elements + 1
> elements_type;
template <typename Dummy>
inline dynamic_internal_node(Dummy const&) { throwing_nodes_stats::get_internal_nodes_counter_ref()++; }
inline ~dynamic_internal_node() { throwing_nodes_stats::get_internal_nodes_counter_ref()--; }
void apply_visitor(dynamic_visitor<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag, false> & v) { v(*this); }
void apply_visitor(dynamic_visitor<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag, true> & v) const { v(*this); }
elements_type elements;
private:
dynamic_internal_node(dynamic_internal_node const&);
dynamic_internal_node & operator=(dynamic_internal_node const&);
};
template <typename Value, typename Parameters, typename Box, typename Allocators>
struct dynamic_leaf<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
: public dynamic_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
{
typedef throwing_varray<Value, Parameters::max_elements + 1> elements_type;
template <typename Dummy>
inline dynamic_leaf(Dummy const&) { throwing_nodes_stats::get_leafs_counter_ref()++; }
inline ~dynamic_leaf() { throwing_nodes_stats::get_leafs_counter_ref()--; }
void apply_visitor(dynamic_visitor<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag, false> & v) { v(*this); }
void apply_visitor(dynamic_visitor<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag, true> & v) const { v(*this); }
elements_type elements;
private:
dynamic_leaf(dynamic_leaf const&);
dynamic_leaf & operator=(dynamic_leaf const&);
};
// elements derived type
template <typename OldValue, size_t N, typename NewValue>
struct container_from_elements_type<throwing_varray<OldValue, N>, NewValue>
{
typedef throwing_varray<NewValue, N> type;
};
// nodes traits
template <typename Value, typename Parameters, typename Box, typename Allocators>
struct node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
{
typedef dynamic_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag> type;
};
template <typename Value, typename Parameters, typename Box, typename Allocators>
struct internal_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
{
typedef dynamic_internal_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag> type;
};
template <typename Value, typename Parameters, typename Box, typename Allocators>
struct leaf<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
{
typedef dynamic_leaf<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag> type;
};
template <typename Value, typename Parameters, typename Box, typename Allocators, bool IsVisitableConst>
struct visitor<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag, IsVisitableConst>
{
typedef dynamic_visitor<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag, IsVisitableConst> type;
};
// allocators
template <typename Allocator, typename Value, typename Parameters, typename Box>
class allocators<Allocator, Value, Parameters, Box, node_throwing_d_mem_static_tag>
: public Allocator::template rebind<
typename internal_node<
Value, Parameters, Box,
allocators<Allocator, Value, Parameters, Box, node_throwing_d_mem_static_tag>,
node_throwing_d_mem_static_tag
>::type
>::other
, Allocator::template rebind<
typename leaf<
Value, Parameters, Box,
allocators<Allocator, Value, Parameters, Box, node_throwing_d_mem_static_tag>,
node_throwing_d_mem_static_tag
>::type
>::other
{
typedef typename Allocator::template rebind<
Value
>::other value_allocator_type;
public:
typedef Allocator allocator_type;
typedef Value value_type;
typedef value_type & reference;
typedef const value_type & const_reference;
typedef typename value_allocator_type::size_type size_type;
typedef typename value_allocator_type::difference_type difference_type;
typedef typename value_allocator_type::pointer pointer;
typedef typename value_allocator_type::const_pointer const_pointer;
typedef typename Allocator::template rebind<
typename node<Value, Parameters, Box, allocators, node_throwing_d_mem_static_tag>::type
>::other::pointer node_pointer;
typedef typename Allocator::template rebind<
typename internal_node<Value, Parameters, Box, allocators, node_throwing_d_mem_static_tag>::type
>::other::pointer internal_node_pointer;
typedef typename Allocator::template rebind<
typename internal_node<Value, Parameters, Box, allocators, node_throwing_d_mem_static_tag>::type
>::other internal_node_allocator_type;
typedef typename Allocator::template rebind<
typename leaf<Value, Parameters, Box, allocators, node_throwing_d_mem_static_tag>::type
>::other leaf_allocator_type;
inline allocators()
: internal_node_allocator_type()
, leaf_allocator_type()
{}
template <typename Alloc>
inline explicit allocators(Alloc const& alloc)
: internal_node_allocator_type(alloc)
, leaf_allocator_type(alloc)
{}
inline allocators(BOOST_FWD_REF(allocators) a)
: internal_node_allocator_type(boost::move(a.internal_node_allocator()))
, leaf_allocator_type(boost::move(a.leaf_allocator()))
{}
inline allocators & operator=(BOOST_FWD_REF(allocators) a)
{
internal_node_allocator() = ::boost::move(a.internal_node_allocator());
leaf_allocator() = ::boost::move(a.leaf_allocator());
return *this;
}
#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES
inline allocators & operator=(allocators const& a)
{
internal_node_allocator() = a.internal_node_allocator();
leaf_allocator() = a.leaf_allocator();
return *this;
}
#endif
void swap(allocators & a)
{
boost::swap(internal_node_allocator(), a.internal_node_allocator());
boost::swap(leaf_allocator(), a.leaf_allocator());
}
bool operator==(allocators const& a) const { return leaf_allocator() == a.leaf_allocator(); }
template <typename Alloc>
bool operator==(Alloc const& a) const { return leaf_allocator() == leaf_allocator_type(a); }
Allocator allocator() const { return Allocator(leaf_allocator()); }
internal_node_allocator_type & internal_node_allocator() { return *this; }
internal_node_allocator_type const& internal_node_allocator() const { return *this; }
leaf_allocator_type & leaf_allocator() { return *this; }
leaf_allocator_type const& leaf_allocator() const { return *this; }
};
struct node_bad_alloc : public std::exception
{
const char * what() const throw() { return "internal node creation failed."; }
};
struct throwing_node_settings
{
static void throw_if_required()
{
// throw if counter meets max count
if ( get_max_calls_ref() <= get_calls_counter_ref() )
throw node_bad_alloc();
else
++get_calls_counter_ref();
}
static void reset_calls_counter() { get_calls_counter_ref() = 0; }
static void set_max_calls(size_t mc) { get_max_calls_ref() = mc; }
static size_t & get_calls_counter_ref() { static size_t cc = 0; return cc; }
static size_t & get_max_calls_ref() { static size_t mc = (std::numeric_limits<size_t>::max)(); return mc; }
};
// create_node
template <typename Allocators, typename Value, typename Parameters, typename Box>
struct create_node<
Allocators,
dynamic_internal_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
>
{
static inline typename Allocators::node_pointer
apply(Allocators & allocators)
{
// throw if counter meets max count
throwing_node_settings::throw_if_required();
return create_dynamic_node<
typename Allocators::node_pointer,
dynamic_internal_node<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
>::apply(allocators.internal_node_allocator());
}
};
template <typename Allocators, typename Value, typename Parameters, typename Box>
struct create_node<
Allocators,
dynamic_leaf<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
>
{
static inline typename Allocators::node_pointer
apply(Allocators & allocators)
{
// throw if counter meets max count
throwing_node_settings::throw_if_required();
return create_dynamic_node<
typename Allocators::node_pointer,
dynamic_leaf<Value, Parameters, Box, Allocators, node_throwing_d_mem_static_tag>
>::apply(allocators.leaf_allocator());
}
};
}} // namespace detail::rtree
}}} // namespace boost::geometry::index
#endif // BOOST_GEOMETRY_INDEX_TEST_RTREE_THROWING_NODE_HPP
|
# n = 20;
# alpha = 0.01;
# x = rnorm(n, mean=10.5, sd=1.3);
#
# # 1.1
# hypothesisTest = t.test(x, mu=10, conf.level = 1-alpha);
# print(hypothesisTest);
#
# #1.1.a
# avg = mean(x);
# stDev = sd(x);
#
# critVal = qt(1-alpha/2, n-1, lower.tail = TRUE);
# lowBound = avg - (stDev * critVal)/sqrt(n);
# upBound = avg + (stDev * critVal)/sqrt(n);
# print((stDev * critVal)/sqrt(n))
#
# #!.1.b
# #TODO if
# print(lowBound);
# print(upBound);
#
# #1.1.c TODO if
# tVal = sqrt(n)*(avg-10)/stDev;
# critVal = qt(1-alpha/2, n-1, lower.tail = TRUE);
#
# lowBound = pt(tVal, n-1, lower.tail = FALSE);
# upBound = pt(tVal, n-1, lower.tail = TRUE);
# pVal = 2*min(lowBound, upBound);
# print(2*lowBound);
# print(2*upBound);
#
# #1.2
# #vybereme jednostranny t-test a cele to zopakujeme
# #Expiricky vime, ze avg = 10.5 a u = 10, proto je vyhodnejsi alternativa greater, protoze s danou pravdepodobnosti jsme schopni rict, ze hodnota je vetsi nez deset. Diky vlastnostem generovanych dat takto casteji zavrhneme H0 a pouzijeme Halph
#
# n = 20;
# alpha = 0.01
# x = rnorm(n, mean=10, sd=1)
# error = rnorm(n, mean=0.5, sd=0.8306624)
# y = x + error
#
# t.test(x, y=y, paired = TRUE, alternative = "less", conf.level = 1-alpha)
#
#2.1
# n = 20;
# alpha = 0.01
# x = rnorm(n, mean=10, sd=1)
# error = rnorm(n, mean=0.5, sd=0.8306624)
# y = x + error
#
# t.test(x, y=y, paired = TRUE, alternative = "less", conf.level = 1-alpha)
# n = 20;
# alpha = 0.01
# x = rnorm(n, mean=10, sd=1)
# error = rnorm(n, mean=0.5, sd=0.8306624)
# y = x + error
#
# res = t.test(x, y=y, paired = TRUE, alternative = "less", conf.level = 1-alpha)
# print(res)
# if(abs(res$statistic) > qt(1-alpha, n-1, lower.tail = TRUE)) {
# print('Reject null hypothesis!')
# }
# 2.1.b
# res = t.test(x-y, alternative = "less", conf.level = 1-alpha)
# print(res)
#2.2
# n1 = 20;
# n2 = 25;
# alpha = 0.01
# x=rnorm(n1, mean=10, sd=1.3)
# y=rnorm(n2, mean=11.25, sd=1.3)
# res = t.test(x, y=y, paired = FALSE, var.equal = TRUE, alternative = "less", conf.level = 1-alpha)
# print(res)
# if(abs(res$statistic) > qt(1-alpha, n1+n2-2, lower.tail = TRUE)) {
# print('Reject null hypothesis!')
# }
n1 = 20;
n2 = 25;
alpha = 0.01
x=rnorm(n1, mean=10, sd=1.3)
y=rnorm(n2, mean=11.28, sd=1.2)
res = t.test(x, y=y, paired = FALSE, var.equal = FALSE, alternative = "less", conf.level = 1-alpha)
print(res)
if(abs(res$statistic) > qt(1-alpha, n1+n2-2, lower.tail = TRUE)) {
print('Reject null hypothesis!')
}
#
# #2.2
# n1 = 20;
# n2 = 25;
# alpha = 0.01
# x=rnorm(n1, mean=10, sd=1.3)
# y=rnorm(n2, mean=11.25, sd=1.3)
#
# res = t.test(x, y=y, paired = FALSE, var.equal = TRUE, alternative = "less", conf.level = 1-alpha)
# print(res);
df = n1 + n2 - 2;
sdiff = sqrt(var(x)/n1+var(y)/n2);
tVal = (mean(x)-mean(y))/(sdiff);
print(tVal)
if(abs(tVal) > qt(1-alpha, df, lower.tail = TRUE)) {
print('Reject null hypothesis!')
}
# critVal = qt(1-alpha, df, lower.tail = TRUE);
#print(pt(tVal, df, lower.tail = TRUE))
#print(qt(res$p.value, df));
#2.3
# n1 = 20;
# n2 = 25;
# alpha = 0.01
# x=rnorm(n1, mean=10, sd=1.3)
# y=rnorm(n2, mean=11.28, sd=1.2)
# t.test(x, y=y, paired = FALSE, var.equal = FALSE, conf.level = 1-alpha)
#zodpadkovat vsechny kroky
#bude nutne prepsat vypocet bodu 2.2 podle vzorce pro rozdilne rozptyly
#... kdyz se muze clovek rozptylovat, muze se i ptylovat?
#3.1.a,b,c
# sequenceLength = 4000000;
# x = runif(sequenceLength, 0, 100)
# print(system.time(sort(x)))
#pouzijeme hypotezu X1 < X2 ..tj, je algoritmus X1 v prumeru rychlejsi?
#
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module order.category.BoolAlg
! leanprover-community/mathlib commit e8ac6315bcfcbaf2d19a046719c3b553206dac75
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Order.Category.HeytAlg
/-!
# The category of boolean algebras
This defines `BoolAlg`, the category of boolean algebras.
-/
open OrderDual Opposite Set
universe u
open CategoryTheory
/-- The category of boolean algebras. -/
def BoolAlg :=
Bundled BooleanAlgebra
#align BoolAlg BoolAlg
namespace BoolAlg
instance : CoeSort BoolAlg (Type _) :=
Bundled.hasCoeToSort
instance (X : BoolAlg) : BooleanAlgebra X :=
X.str
/-- Construct a bundled `BoolAlg` from a `boolean_algebra`. -/
def of (α : Type _) [BooleanAlgebra α] : BoolAlg :=
Bundled.of α
#align BoolAlg.of BoolAlg.of
@[simp]
theorem coe_of (α : Type _) [BooleanAlgebra α] : ↥(of α) = α :=
rfl
#align BoolAlg.coe_of BoolAlg.coe_of
instance : Inhabited BoolAlg :=
⟨of PUnit⟩
/-- Turn a `BoolAlg` into a `BddDistLat` by forgetting its complement operation. -/
def toBddDistLat (X : BoolAlg) : BddDistLat :=
BddDistLat.of X
#align BoolAlg.to_BddDistLat BoolAlg.toBddDistLat
@[simp]
theorem coe_toBddDistLat (X : BoolAlg) : ↥X.toBddDistLat = ↥X :=
rfl
#align BoolAlg.coe_to_BddDistLat BoolAlg.coe_toBddDistLat
instance : LargeCategory.{u} BoolAlg :=
InducedCategory.category toBddDistLat
instance : ConcreteCategory BoolAlg :=
InducedCategory.concreteCategory toBddDistLat
instance hasForgetToBddDistLat : HasForget₂ BoolAlg BddDistLat :=
InducedCategory.hasForget₂ toBddDistLat
#align BoolAlg.has_forget_to_BddDistLat BoolAlg.hasForgetToBddDistLat
section
attribute [local instance] BoundedLatticeHomClass.toBiheytingHomClass
@[simps]
instance hasForgetToHeytAlg : HasForget₂ BoolAlg HeytAlg
where forget₂ :=
{ obj := fun X => ⟨X⟩
map := fun X Y f => show BoundedLatticeHom X Y from f }
#align BoolAlg.has_forget_to_HeytAlg BoolAlg.hasForgetToHeytAlg
end
/-- Constructs an equivalence between Boolean algebras from an order isomorphism between them. -/
@[simps]
def Iso.mk {α β : BoolAlg.{u}} (e : α ≃o β) : α ≅ β
where
Hom := (e : BoundedLatticeHom α β)
inv := (e.symm : BoundedLatticeHom β α)
hom_inv_id' := by
ext
exact e.symm_apply_apply _
inv_hom_id' := by
ext
exact e.apply_symm_apply _
#align BoolAlg.iso.mk BoolAlg.Iso.mk
/-- `order_dual` as a functor. -/
@[simps]
def dual : BoolAlg ⥤ BoolAlg where
obj X := of Xᵒᵈ
map X Y := BoundedLatticeHom.dual
#align BoolAlg.dual BoolAlg.dual
/-- The equivalence between `BoolAlg` and itself induced by `order_dual` both ways. -/
@[simps Functor inverse]
def dualEquiv : BoolAlg ≌ BoolAlg :=
Equivalence.mk dual dual
(NatIso.ofComponents (fun X => Iso.mk <| OrderIso.dualDual X) fun X Y f => rfl)
(NatIso.ofComponents (fun X => Iso.mk <| OrderIso.dualDual X) fun X Y f => rfl)
#align BoolAlg.dual_equiv BoolAlg.dualEquiv
end BoolAlg
theorem boolAlg_dual_comp_forget_to_bddDistLat :
BoolAlg.dual ⋙ forget₂ BoolAlg BddDistLat = forget₂ BoolAlg BddDistLat ⋙ BddDistLat.dual :=
rfl
#align BoolAlg_dual_comp_forget_to_BddDistLat boolAlg_dual_comp_forget_to_bddDistLat
|
```python
import holoviews as hv
hv.extension('bokeh')
hv.opts.defaults(hv.opts.Curve(width=500),
hv.opts.Points(width=500),
hv.opts.Image(width=500, colorbar=True, cmap='Viridis'))
```
```python
import numpy as np
import scipy.linalg
```
# Estimador lineal óptimo
Un **estimador** es un sistema diseñado para **extraer información** a partir de una **señal**
- La señal contiene **información y ruido**
- La señal es representada como una secuencia de **datos**
Tipos de estimador
- **Filtro:** Estimo el valor actual de mi señal acentuando o eliminando una o más características
- **Predictor:** Estimo el valor futuro de mi señal
En esta lección estudiaremos estimadores lineales y óptimos
- Lineal: La cantidad estimada es una función lineal de la entrada
- Óptimo: El estimador es la mejor solución posible de acuerdo a un criterio
Para entender los fundamentos de los estimadores óptimos es necesario introducir el concepto de proceso aleatorio. Luego estudiaremos uno de los estimadores óptimos más importantes: El filtro de Wiener
## Proceso aleatorio o proceso estocástico
Un proceso estocástico es una **colección de variables aleatorias** indexadas tal que forman una secuencia. Se denotan matemáticamente como un conjunto $\{U_k\}$, con $k=0, 1, 2, \ldots, N$. El índice $k$ puede representar tiempo, espacio u otra variable independiente.
La siguiente figura muestra tres realizaciones u observaciones de un proceso estocástico con cuatro elementos
Existen muchos fenómenos cuya evolución se modela utilizando procesos aleatorios. Por ejemplo
- Los índices bursatiles
- El comportamineto de un gas dentro de un contenedor
- Las vibraciones de un motor eléctrico
- El área de una célula durante un proceso de organogénesis
A continuación revisaremos algunas de las propiedades de los procesos aleatorios
**Momentos de un proceso estocástico**
Un proceso aleatorio $U_n = (u_n, u_{n-1}, u_{n-2}, \ldots, u_{n-L})$ se describe a través de sus momentos estadísticos. Si consideramos una caracterízación de segundo orden necesitamos definir
- Momento central o media: Describe el valor central del proceso
$$
\mu(n) = \mathbb{E}[U_n]
$$
- Segundo momento o correlación: Describe la dispersión de un proceso
$$
r_{uu}(n, n-k) = \mathbb{E}[U_n U_{n-k}]
$$
- Segundo momento centrado o covarianza
$$
\begin{align}
c_{uu}(n, n-k) &= \mathbb{E}[(U_n-\mu_n) (U_{n-k}- \mu_{n-k})] \nonumber \\
&= r(n,n-k) - \mu_n \mu_{n-k} \nonumber
\end{align}
$$
- Correlación cruzada entre dos procesos
$$
r_{ud}(n, n-k) = \mathbb{E}[U_n D_{n-k}]
$$
**Proceso estacionario y ergódico**
En esta lección nos vamos a centrar en el caso simplificado donde el **proceso es estacionario**, matemáticamente esta propiedad significa que
$$
\mu(n) = \mu, \forall n
$$
y
$$
r_{uu}(n, n-k) = r_{uu}(k), \forall n
$$
es decir que los momentos estadísticos se mantienen constantes en el tiempo (no depende de $n$).
Otra simplificación que utilizaremos es que el proceso sea **ergódico**,
$$
\mathbb{E}[U_n] = \frac{1}{N} \sum_{n=1}^N u_n
$$
es decir que podemos reemplazar el valor esperado por la media muestral en el tiempo
**Densidad espectral de potencia**
La densidad espectral de potencia o *power spectral density* (PSD) mide la distribución en frecuencia de la potencia del proceso estocástico. Su definición matemática es
$$
\begin{align}
S_{uu}(f) &= \sum_{k=-\infty}^{\infty} r_{uu}(k) e^{-j 2\pi f k} \nonumber \\
&= \lim_{N\to\infty} \frac{1}{2N+1} \mathbb{E} \left [\left|\sum_{n=-N}^{N} u_n e^{-j 2\pi f n} \right|^2 \right]
\end{align}
$$
que corresponde a la transformada de Fourier de la correlación (caso estacionario)
La PSD y la correlación forman un par de Fourier, es decir que uno es la transformada de Fourier del otro.
## Filtro de Wiener
El filtro de Wiener fue publicado por Norbert Wiener en 1949 y es tal vez el ejemplo más famoso de un estimador lineal óptimo.
:::{important}
Para diseñar un estimador óptimo necesitamos un **criterio** y **condiciones** (supuestos). Luego el estimador será **óptimo según dicho criterio y bajo los supuestos considerados**. Por ejemplo podríamos suponer un escenario donde el ruido es blanco o donde el proceso es estacionario.
:::
A continuación describiremos en detalle este filtro y explicaremos como se optimiza. Luego se veran ejemplos de aplicaciones.
### Notación y arquitectura del filtro de Wiener
El filtro de Wiener es un sistema de tiempo discreto con estructura FIR y $L+1$ coeficientes. A continuación se muestra un esquema del filtro de Wiener
Del esquema podemos reconocer los elementos más importantes de este filtro
- Los coeficientes del filtro: $h_0, h_1, h_2, \ldots, h_{L}$
- La señal de entrada al filtro: $u_0, u_1, u_2, \ldots$
- La señal de salida del filtro: $y_0, y_1, y_2, \ldots$
- La señal de respuesta "deseada" u objetivo: $d_0, d_1, d_2, \ldots$
- La señal de error: $e_0, e_1, e_2, \ldots$
Al ser un filtro FIR la salida del filtro está definida como
$$
y_n = \sum_{k=0}^{L} h_k u_{n-k},
$$
es decir la convolución entre la entrada y los coeficientes. Luego la señal de error es
$$
e_n = d_n - y_n = d_n - \sum_{k=0}^{L} h_k u_{n-k}
$$
que corresponde a la diferencia entre la señal objetivo y la señal de salida.
A continuación veremos que se ajustan los coeficientes del filtro en base al criterio de optimalidad.
### Ajuste del filtro de Wiener
El criterio más común para aprender o adaptar el filtro de Wiener es el **error medio cuadrático** o *mean square error* (MSE) entre la respuesta deseada y la salida del filtro.
Asumiendo que $u$ y $d$ son secuencias de valores reales podemos escribir el MSE como
$$
\begin{align}
\text{MSE} &= \mathbb{E}\left [e_n^2 \right] \nonumber \\
&= \mathbb{E}\left [(d_n - y_n)^2 \right] \nonumber \\
&= \mathbb{E}\left [d_n^2 \right] - 2\mathbb{E}\left [ d_n y_n \right] + \mathbb{E}\left [ y_n^2 \right] \nonumber
\end{align}
$$
donde $\sigma_d^2 = \mathbb{E}\left [d_n^2 \right]$ es la varianza de la señal deseada y $\sigma_y^2 = \mathbb{E}\left [ y_n^2 \right]$ es la varianza de nuestro estimador
:::{note}
Minimizar el MSE implica acercar la salida del filtro a la respuesta deseada
:::
En este caso, igualando la derivada del MSE a cero, tenemos
$$
\begin{align}
\frac{d}{d h_j} \text{MSE} &= -2\mathbb{E}\left[ d_n \frac{d y_n}{d h_j} \right] + 2 \mathbb{E}\left[ y_n \frac{d y_n}{d h_j} \right] \nonumber \\
&= -2\mathbb{E}\left[ d_n u_{n-j} \right] + 2 \mathbb{E}\left[ y_n u_{n-j} \right] \nonumber \\
&= -2\mathbb{E}\left[ d_n u_{n-j} \right] + 2 \mathbb{E}\left[ \sum_{k=0}^{L} h_k u_{n-k} u_{n-j} \right] \nonumber \\
&= -2\mathbb{E}\left[ d_n u_{n-j} \right] + 2 \sum_{k=0}^{L} h_k \mathbb{E}\left[ u_{n-k} u_{n-j} \right] = 0 \nonumber \end{align}
$$
Si despejamos y repetimos para $j=0, \ldots, L$ obtenemos el siguiente sistema de ecuaciones
$$
\begin{align}
\begin{pmatrix}
r_{uu}(0) & r_{uu}(1) & r_{uu}(2) & \ldots & r_{uu}(L) \\
r_{uu}(1) & r_{uu}(0) & r_{uu}(1) & \ldots & r_{uu}(L-1) \\
r_{uu}(2) & r_{uu}(1) & r_{uu}(0) & \ldots & r_{uu}(L-2) \\
\vdots & \vdots & \vdots & \ddots &\vdots \\
r_{uu}(L) & r_{uu}(L-1) & r_{uu}(L-2) & \ldots & r_{uu}(0) \\
\end{pmatrix}
\begin{pmatrix}
h_0 \\
h_1 \\
h_2 \\
\vdots \\
h_L \\
\end{pmatrix} &=
\begin{pmatrix}
r_{ud}(0) \\
r_{ud}(1) \\
r_{ud}(2) \\
\vdots \\
r_{ud}(L) \\
\end{pmatrix} \nonumber \\
R_{uu} \textbf{h} &= R_{ud},
\end{align}
$$
que se conoce como las **ecuaciones de Wiener-Hopf**. Además $R_{uu}$ se conoce como matriz de auto-correlación.
Asumiendo que $R_{uu}$ es no-singular, es decir que su inversa existe, la **solución óptima en el sentido de mínimo MSE** es
$$
\textbf{h}^{*} = R_{uu} ^{-1} R_{ud}
$$
Notar que por construcción la matriz $R_{uu}$ es simétrica y hermítica. Por lo que el sistema puede resolverse de forma eficiente con $\mathcal{O}(L^2)$ operaciones usando la [recursión de Levison-Durbin](https://en.wikipedia.org/wiki/Levinson_recursion)
:::{warning}
Para llegar a la solución impusimos dos condiciones sobre la salida deseada y la entrada: (1) tienen media cero y (2) son estacionarias en el sentido amplio (es decir la correlación solo depende del retardo $m$).
:::
- Si la primera condición no se cumpliera, podría restarse la media previo al entrenamiento del filtro
- Si la segunda condición no se cumple conviene usar otro método como los que veremos en las lecciones siguientes
## Aplicaciones del filtro de Wiener
### Regresión o identificación de sistema
En regresión buscamos encontrar los coeficientes $h$ a partir de tuplas $(X, Y)$ tal que
$$
Y = h^T X + \epsilon,
$$
donde $X \in \mathbb{R}^{N\times D}$ son las variables dependientes (entrada), $Y \in \mathbb{R}^N$ es la variable dependiente (salida) y $\epsilon$ es ruido
Para entrenar el filtro
1. Asumimos que hemos observado N muestras de $X$ e $Y$
1. A partir de $u=X$ construimos $R_{uu}$
1. A partir de $d=Y$ construimos $R_{ud}$
1. Finalmente recuperamos $\textbf{h}$ usando $R_{uu} ^{-1} R_{ud}$
1. Con esto podemos interpolar $Y$
**Ejemplo** Sea por ejemplo una regresión de tipo polinomial donde queremos encontrar $h_k$ tal que
$$
\begin{align}
d_i &= f_i + \epsilon \nonumber \\
&= \sum_{k=1}^L h_k u_i^k + \epsilon \nonumber
\end{align}
$$
```python
np.random.seed(12345)
u = np.linspace(-2, 2, num=30)
f = 0.25*u**5 - 2*u**3 + 5*u # Los coeficientes reales son [0, 5, 0, -2, 0, 1/4, 0, 0, 0, ...]
d = f + np.random.randn(len(u))
```
```python
hv.Points((u, d), kdims=['u', 'd'])
```
Implementemos el filtro como una clase con dos métodos públicos `fit` (ajustar) y `predict` (predecir). El filtro tiene un argumento, el número de coeficientes $L$
```python
class Wiener_polynomial_regression:
def __init__(self, L: int):
self.L = L
self.h = np.zeros(shape=(L+1,))
def _polynomial_basis(self, u: np.ndarray) -> np.ndarray:
U = np.ones(shape=(len(u), self.L))
for i in range(1, self.L):
U[:, i] = u**i
return U
def fit(self, u: np.ndarray, d: np.ndarray):
U = self._polynomial_basis(u)
Ruu = np.dot(U.T, U)
Rud = np.dot(U.T, d[:, np.newaxis])
self.h = scipy.linalg.solve(Ruu, Rud, assume_a='pos')[:, 0]
def predict(self, u: np.ndarray):
U = self._polynomial_basis(u)
return np.dot(U, self.h)
```
:::{note}
La función `scipy.linalg.solve(A, B)` retorna la solución del sistema de ecuaciones lineal `Ax = B`. El argumento `assume_a` puede usarse para indicar que `A` es simétrica, hermítica o definido positiva.
:::
Los solución de un sistema con `10` coeficientes es:
```python
regressor = Wiener_polynomial_regression(10)
regressor.fit(u, d)
print(regressor.h)
```
¿Cómo cambia el resultado con L?
```python
uhat = np.linspace(np.amin(u), np.amax(u), num=100)
yhat = {}
for L in [2, 5, 15]:
regressor = Wiener_polynomial_regression(L)
regressor.fit(u, d)
yhat[L] = regressor.predict(uhat)
```
```python
p = [hv.Points((u, d), kdims=['u', 'd'], label='data').opts(size=4, color='k')]
for L, prediction in yhat.items():
p.append(hv.Curve((uhat, prediction), label=f'L={L}'))
hv.Overlay(p).opts(legend_position='top')
```
:::{note}
Si $L$ es muy pequeño el filtro es demasiado simple. Si $L$ es muy grande el filtro se puede sobreajustar al ruido
:::
### Predicción a futuro
En este caso asumimos que la señal deseada es la entrada en el futuro
$$
d_n = \{u_{n+1}, u_{n+2}, \ldots, u_{n+m}\}
$$
donde $m$ es el horizonte de predicción. Se llama *predicción a un paso* al caso particular $m=1$.
El largo del filtro $L$ define la cantidad de muestras pasadas que usamos para predecir. Por ejemplo un sistema de predicción a un paso con $L+1 = 3$ coeficientes:
$$
h_0 u_n + h_1 u_{n-1} + h_2 u_{n-2}= y_n = \hat u_{n+1} \approx u_{n+1}
$$
Para entrenar el filtro
1. Asumimos que la señal ha sido observada y que se cuenta con $N$ muestras para entrenar
1. Podemos formar una matriz cuyas filas son $[u_n, u_{n-1}, \ldots, u_{n-L}]$ para $n=L,L+1,\ldots, N-1$
1. Podemos formar un vector $[u_N, u_{N-1}, \ldots, u_{L+1}]^T$ (caso $m=1$)
1. Con esto podemos formar las matrices de correlación y obtener $\textbf{h}$
1. Finalmente usamos $\textbf{h}$ para predecir el futuro no observado de $u$
Nuevamente implementamos el filtro como una clase. Esta vez se utiliza la función de numpy `as_strided` para formar los vectores de "instantes pasados"
```python
from numpy.lib.stride_tricks import as_strided
class Wiener_predictor:
def __init__(self, L: int):
self.L = L
self.h = np.zeros(shape=(L+1,))
def fit(self, u: np.ndarray):
U = as_strided(u, [len(u)-self.L+1 , self.L+1],
strides=[u.strides[0], u.strides[0]])
Ruu = np.dot(U[:, :self.L].T, U[:, :self.L])
Rud = np.dot(U[:, :self.L].T, U[:, self.L][:, np.newaxis])
self.h = scipy.linalg.solve(Ruu, Rud, assume_a='pos')[:, 0]
def predict(self, u: np.ndarray, m: int=1):
u_pred = np.zeros(shape=(m+self.L, ))
u_pred[:self.L] = u
for k in range(self.L, m+L):
u_pred[k] = np.sum(self.h*u_pred[k-self.L:k])
return u_pred[self.L:]
```
Para la siguiente señal sinusoidal ¿Cómo afecta $L$ a la calidad del predictor lineal?
Utilizaremos los primeros 100 instantes para ajustar y los siguientes 100 para probar el predictor
```python
np.random.seed(12345)
t = np.linspace(0, 10, num=200)
u = np.sin(2.0*np.pi*0.5*t) + 0.25*np.random.randn(len(t))
N_fit = 100
yhat = {}
for L in [10, 20, 30]:
predictor = Wiener_predictor(L)
predictor.fit(u[:N_fit])
yhat[L] = predictor.predict(u[N_fit-L:N_fit], m=100)
```
```python
p = [hv.Points((t, u), ['instante', 'u'], label='Datos').opts(color='k')]
for L, prediction in yhat.items():
p.append(hv.Curve((t[N_fit:], prediction), label=f'L={L}'))
hv.Overlay(p).opts(legend_position='top')
```
:::{note}
Si $L$ es muy pequeño el filtro es demasiado simple. Si $L$ es muy grande el filtro se puede sobreajustar al ruido
:::
### Eliminar ruido blanco aditivo
En este caso asumimos que la señal de entrada corresponde a una señal deseada (información) que ha sido contaminada con ruido aditivo
$$
u_n = d_n + \nu_n,
$$
adicionalmente asumimos que
- el ruido es estacionario en el sentido amplio y de media cero $\mathbb{E}[\nu_n] = 0$
- el ruido es blanco, es decir no tiene correlación consigo mismo o con la señal deseada
$$
r_{\nu d}(k) = 0, \forall k
$$
- el ruido tiene una cierta varianza $\mathbb{E}[\nu_n^2] = \sigma_\nu^2, \forall n$
Notemos que en este caso $R_{uu} = R_{dd} + R_{\nu\nu}$ y $R_{ud} = R_{dd}$, luego
la señal recuperada es $\hat d_n = h^{*} u_n$ y el filtro es
$$
\vec h^{*} = \frac{R_{dd}}{R_{dd} + R_{\nu\nu}}
$$
y su respuesta en frecuencia
$$
H(f) = \frac{S_{dd}(f)}{S_{dd}(f) + S_{\nu\nu}(f)}
$$
es decir que
- en frecuencias donde la $S_{dd}(f) > S_{\nu\nu}(f)$, entonces $H(f) = 1$
- en frecuencias donde la $S_{dd}(f) < S_{\nu\nu}(f)$, entonces $H(f) = 0$
```python
```
|
{-# OPTIONS --without-K --rewriting #-}
open import Basics
open import lib.Basics
open import Flat
module Axiom.C1 {@♭ i j : ULevel} (@♭ I : Type i) (@♭ R : I → Type j) where
open import Axiom.C0 I R
postulate C1 : (index : I) → R index
|
module JS.Attribute
import Control.Monad.Either
import Data.SOP
import JS.Callback
import JS.Marshall
import JS.Nullable
import JS.Undefined
import JS.Util
||| A read-write attribute of a JS object.
|||
||| @alwaysReturns : Bool index if the attribute's getter can
||| always return a value that is neither
||| `null` nor `undefined`. This means, that the
||| attribute is non-nullable and either mandatory
||| or optional with a proper default value.
|||
||| @f : Context of values represented by the attribute.
||| This is `Maybe` if the attribute is nullable,
||| `Optional` if it is an optional attribute on
||| a dictionary type, or `I` if it is mandatory
||| and non-nullable.
|||
||| @ : Type of values stored in the attribute
public export
data Attribute : (alwaysReturns : Bool)
-> (f : Type -> Type)
-> (a : Type)
-> Type where
||| A non-optional attribute.
|||
||| This is for data types, which are guaranteed to always return
||| a value that is neither `null` nor `undefined`.
Attr : (get : JSIO a) -> (set : a -> JSIO ()) -> Attribute True I a
||| A nullable, non-optional attribute.
NullableAttr : (get : JSIO (Maybe a))
-> (set : Maybe a -> JSIO ())
-> Attribute False Maybe a
||| An optional attribute with a predefined default value.
OptionalAttr : (get : JSIO (Optional a))
-> (set : Optional a -> JSIO ())
-> (def : a)
-> Attribute True Optional a
||| An optional attribute without default value.
OptionalAttrNoDefault : (get : JSIO (Optional a))
-> (set : Optional a -> JSIO ())
-> Attribute False Optional a
||| Returns the value of an attribute in its proper context.
||| Typically used in infix notation.
|||
||| ```idris example
||| textField `get` value
||| ```
export
get : (o : obj) -> (attr : obj -> Attribute b f a) -> JSIO $ f a
get o g = case g o of
Attr gt _ => gt
NullableAttr gt _ => gt
OptionalAttr gt _ _ => gt
OptionalAttrNoDefault gt _ => gt
||| Maps a function over the value retrieved from an `Attribute`.
export
over : (a -> b) -> (attr : Attribute x f a) -> JSIO $ f b
over f (Attr gt _) = map f gt
over f (NullableAttr gt _) = map f <$> gt
over f (OptionalAttr gt _ _) = map f <$> gt
over f (OptionalAttrNoDefault gt _) = map f <$> gt
||| Flipped version of `get`. This is useful when
||| combined with the bind operator:
|||
||| ```idris example
||| body >>= to className
||| ```
export
to : (attr : obj -> Attribute b f a) -> (o : obj) -> JSIO $ f a
to = flip get
||| Sets the value of an `Attribute`.
export
set' : (attr : Attribute b f a) -> f a -> JSIO ()
set' (Attr _ s) = s
set' (NullableAttr _ s) = s
set' (OptionalAttr _ s _) = s
set' (OptionalAttrNoDefault _ s) = s
||| Gets the value of an `Attribute`. Since this operates
||| on an `Attribute True`, it is guaranteed to always yield
||| a non-nullable value.
export
getDef : (o : obj) -> (attr : obj -> Attribute True f a) -> JSIO a
getDef o g = case g o of
Attr gt _ => gt
OptionalAttr gt _ def => map (fromOptional def) gt
||| Maps a function over the value retrieved from an `Attribute`.
||| Since this operates
||| on an `Attribute True`, it is guaranteed to always yield
||| a non-nullable value.
export
overDef : (a -> b) -> (attr : Attribute True f a) -> JSIO b
overDef f a = f <$> getDef () (const a)
||| Flipped version of `getDef`.
export
toDef : (attr : obj -> Attribute True f a) -> (o : obj) -> JSIO a
toDef = flip getDef
||| Sets the value of an `Attribute`.
|||
||| ```idris example
||| disabled btn `set` True
||| ```
export
set : Attribute b f a -> a -> JSIO ()
set (Attr _ s) y = s y
set (NullableAttr _ s) y = s (Just y)
set (OptionalAttr _ s _) y = s (Def y)
set (OptionalAttrNoDefault _ s) y = s (Def y)
||| Modifies the stored value of an `Attribute`.
|||
||| Please note, that this will NOT change the attribute's
||| values, if the attribute is unset or `null`.
export
mod : Attribute b f a -> (a -> a) -> JSIO ()
mod (Attr g s) f = g >>= s . f
mod (NullableAttr g s) f = g >>= s . map f
mod (OptionalAttr g s _) f = g >>= s . map f
mod (OptionalAttrNoDefault g s) f = g >>= s . map f
||| Unsets the value of an optional or nullable attribute.
export
unset : Alternative f => (o : obj) -> (obj -> Attribute b f a) -> JSIO ()
unset o g = set' (g o) empty
infix 1 .=, =., %=, =%
||| Operator version of `set`.
|||
||| ```idris example
||| disabled btn .= True
||| ```
export
(.=) : Attribute b f a -> a -> JSIO ()
(.=) = set
||| Operator version of `mod`.
|||
||| As with `mod`, this will NOT change the attribute's
||| values, if the attribute is unset or `null`.
|||
||| ```idris example
||| toggleCheckBox : HTMLInputElement -> JSIO ()
||| toggleCheckBox cbx = checked cbx %= not
||| ```
export
(%=) : Attribute b f a -> (a -> a) -> JSIO ()
(%=) = mod
||| Like `set`, but useful when the object, on which
||| an attribute should operate, is supposed to
||| be the last argument (for instance, when
||| iterating over a foldable):
|||
||| ```idris example
||| disableAll : List HTMLButtonElement -> JSIO ()
||| disableAll buttons = for_ buttons $ disabled =. True
||| ```
export
(=.) : (obj -> Attribute b f a) -> a -> obj -> JSIO ()
(=.) f v o = set (f o) v
||| Like `mod`, but useful when the object, on which
||| an attribute should operate, is supposed to
||| be the last argument (for instance, when
||| iterating over a foldable):
|||
||| ```idris example
||| toggleAll : List HTMLInputElement -> JSIO ()
||| toggleAll cbxs = for_ cbxs $ checked =% not
||| ```
export
(=%) : (obj -> Attribute b f a) -> (a -> a) -> obj -> JSIO ()
(=%) f g o = mod (f o) g
infixr 0 !>, ?>
||| Sets a callback function at an attribute.
|||
||| ```idris
||| onclick btn !> consoleLog . jsShow
||| ```
export
(!>) : Callback a fun => Attribute b f a -> fun -> JSIO ()
a !> cb = callback cb >>= set a
||| Sets a callback action at an attribute. This is like `(!>)`
||| but ignores its input.
|||
||| ```idris
||| onclick btn ?> consoleLog "Boom!"
||| ```
export
(?>) : Callback a (x -> y) => Attribute b f a -> y -> JSIO ()
a ?> v = a !> const v
--------------------------------------------------------------------------------
-- Creating Attributes
--------------------------------------------------------------------------------
export
fromPrim : (ToFFI a b, FromFFI a b)
=> String
-> (obj -> PrimIO b)
-> (obj -> b -> PrimIO ())
-> obj
-> Attribute True I a
fromPrim msg g s o =
Attr (tryJS msg $ g o) (\a => primJS $ s o (toFFI a))
export
fromNullablePrim : (ToFFI a b, FromFFI a b)
=> String
-> (obj -> PrimIO $ Nullable b)
-> (obj -> Nullable b -> PrimIO ())
-> obj
-> Attribute False Maybe a
fromNullablePrim msg g s o =
NullableAttr (tryJS msg $ g o) (\a => primJS $ s o (toFFI a))
export
fromUndefOrPrim : (ToFFI a b, FromFFI a b)
=> String
-> (obj -> PrimIO $ UndefOr b)
-> (obj -> UndefOr b -> PrimIO ())
-> a
-> obj
-> Attribute True Optional a
fromUndefOrPrim msg g s def o =
OptionalAttr (tryJS msg $ g o) (\a => primJS $ s o (toFFI a)) def
export
fromUndefOrPrimNoDefault : (ToFFI a b, FromFFI a b)
=> String
-> (obj -> PrimIO $ UndefOr b)
-> (obj -> UndefOr b -> PrimIO ())
-> obj
-> Attribute False Optional a
fromUndefOrPrimNoDefault msg g s o =
OptionalAttrNoDefault (tryJS msg $ g o) (\a => primJS $ s o (toFFI a))
|
# Markdown 與 LaTeX簡介
每個範例內容將範例語法與輸出分為不同的 Cell 放置,雙擊 Cell 也可以查看原始碼,進行進一步的研究與實驗。
## 1. Markdown 主要語法
### 1.1 段落和斷行
_範例語法:_
```
前軍至夏口,周瑜問:「荊州有人在前面接否?」人報:「劉皇叔使糜竺來見都督。」瑜喚至,問勞軍如何。糜竺曰:「主公皆準備安排下了。」瑜曰:「皇叔何在?」竺曰:「在荊州城門相等,與都督把盞。」瑜曰:「今為汝家之事,出兵遠征;勞軍之禮,休得輕易。」糜竺領了言語先回。<br />戰船密密排在江上,依次而進。
```
_輸出:(可以看到在"戰船"前面有換行)_
前軍至夏口,周瑜問:「荊州有人在前面接否?」人報:「劉皇叔使糜竺來見都督。」瑜喚至,問勞軍如何。糜竺曰:「主公皆準備安排下了。」瑜曰:「皇叔何在?」竺曰:「在荊州城門相等,與都督把盞。」瑜曰:「今為汝家之事,出兵遠征;勞軍之禮,休得輕易。」糜竺領了言語先回。<br />戰船密密排在江上,依次而進。
### 1.2 標題 (Heading)
_範例語法:_
```
# 主標題 (等同於 HTML `<h1></h1>`)
## 次標題 (等同於 `<h2></h12`)
### 第三階 (等同於 `<h3></h3>`)
#### 第四階 (等同於 `<h4></h4>`)
##### 第五階 (等同於 `<h5></h5>`)
###### 第六階 (等同於 `<h6></h6>`)
```
_輸出:_
# 主標題 (等同於 HTML `<h1></h1>`)
## 次標題 (等同於 `<h2></h12`)
### 第三階 (等同於 `<h3></h3>`)
#### 第四階 (等同於 `<h4></h4>`)
##### 第五階 (等同於 `<h5></h5>`)
###### 第六階 (等同於 `<h6></h6>`)
### 1.3 無序號列表 (Bullet list)
_範例語法:_
```
六都清單:
- 台北市
- 大安區
- 大同區
- 新北市
* 桃園市
* 台中市
+ 台南市
+ 高雄市
```
_輸出:_
六都清單:
- 台北市
- 大安區
- 大同區
- 新北市
* 桃園市
* 台中市
+ 台南市
+ 高雄市
### 1.4 序號列表 (Numbered list)
_範例語法:_
```
1. 第一項
2. 第二項
3. 第三項
```
_輸出:_
1. 第一項
2. 第二項
3. 第三項
### 1.5 區塊引言 (blockquoting)
_範例語法:_
```
> 卻說魯肅回見周瑜,說玄德,孔明歡喜不疑,準備出城勞軍。周瑜大笑曰:「原來今番也中了吾計!」便教魯肅稟報吳侯,並遣程普引兵接應。周瑜此時箭瘡已漸平愈,身軀無事,使甘寧為先鋒,自與徐盛,丁奉為第二;淩統,呂蒙為後隊。水陸大兵五百萬,望荊州而來。
周瑜在船中,時復歡笑,以為孔明中計。
> 前軍至夏口,周瑜問:「荊州有人在前面接否?」人報:「劉皇叔使糜竺來見都督。」瑜喚至,問勞軍如何。
```
_輸出:_
> 卻說魯肅回見周瑜,說玄德,孔明歡喜不疑,準備出城勞軍。周瑜大笑曰:「原來今番也中了吾計!」便教魯肅稟報吳侯,並遣程普引兵接應。周瑜此時箭瘡已漸平愈,身軀無事,使甘寧為先鋒,自與徐盛,丁奉為第二;淩統,呂蒙為後隊。水陸大兵五百萬,望荊州而來。
周瑜在船中,時復歡笑,以為孔明中計。
> 前軍至夏口,周瑜問:「荊州有人在前面接否?」人報:「劉皇叔使糜竺來見都督。」瑜喚至,問勞軍如何。
### 1.6 程式碼區塊
_範例語法:_
\`\`\`julia
println("Hello Julia")
\`\`\`
_輸出:_
```julia
println("Hello Julia")
```
### 1.7 分隔線
_範例語法:_
```
卻說魯肅回見周瑜,說玄德,孔明歡喜不疑,準備出城勞軍。周瑜大笑曰:「原來今番也中了吾計!」便教魯肅稟報吳侯,並遣程普引兵接應。周瑜此時箭瘡已漸平愈,身軀無事,使甘寧為先鋒,自與徐盛,丁奉為第二;淩統,呂蒙為後隊。水陸大兵五百萬,望荊州而來。周瑜在船中,時復歡笑,以為孔明中計。
---
前軍至夏口,周瑜問:「荊州有人在前面接否?」人報:「劉皇叔使糜竺來見都督。」瑜喚至,問勞軍如何。糜竺曰:「主公皆準備安排下了。」瑜曰:「皇叔何在?」竺曰:「在荊州城門相等,與都督把盞。」瑜曰:「今為汝家之事,出兵遠征;勞軍之禮,休得輕易。」糜竺領了言語先回。<br />戰船密密排在江上,依次而進。
```
_輸出:_
卻說魯肅回見周瑜,說玄德,孔明歡喜不疑,準備出城勞軍。周瑜大笑曰:「原來今番也中了吾計!」便教魯肅稟報吳侯,並遣程普引兵接應。周瑜此時箭瘡已漸平愈,身軀無事,使甘寧為先鋒,自與徐盛,丁奉為第二;淩統,呂蒙為後隊。水陸大兵五百萬,望荊州而來。周瑜在船中,時復歡笑,以為孔明中計。
---
前軍至夏口,周瑜問:「荊州有人在前面接否?」人報:「劉皇叔使糜竺來見都督。」瑜喚至,問勞軍如何。糜竺曰:「主公皆準備安排下了。」瑜曰:「皇叔何在?」竺曰:「在荊州城門相等,與都督把盞。」瑜曰:「今為汝家之事,出兵遠征;勞軍之禮,休得輕易。」糜竺領了言語先回。<br />戰船密密排在江上,依次而進。
### 1.8 超連結
_範例語法:_
```
[Cupoy - 為你探索世界的新知](https://www.cupoy.com)
```
_輸出:_
[Cupoy - 為你探索世界的新知](https://www.cupoy.com)
### 1.9 嵌入圖片
_範例語法:_
```
```
_輸出:_
### 1.10 表格
_範例語法:_
```
|姓名|國家|地址|年齡|
|---|---|---|---|
|John Doe|中華民國台灣|台北市大安區敦化街1號|25|
```
_輸出:_
|姓名|國家|地址|年齡|
|---|---|---|---|
|John Doe|中華民國台灣|台北市大安區敦化街1號|25|
## 2. 用 LaTeX 寫數學公式
### 2.1 Inline 模式
_範例語法:_
```
$\LARGE f(\displaystyle\sum_i w_i x_i + b)$
```
_輸出:_
$\LARGE f(\displaystyle\sum_i w_i x_i + b)$
### 2.2 Block 模式
_範例語法:_
```
$$\LARGE f(\displaystyle\sum_i w_i x_i + b)$$
```
_輸出:_
$$\LARGE f(\displaystyle\sum_i w_i x_i + b)$$
_範例語法:_
```
\begin{equation}
\LARGE f(\displaystyle\sum_i w_i x_i + b)
\end{equation}
```
_輸出:_
\begin{equation}
\LARGE f(\displaystyle\sum_i w_i x_i + b)
\end{equation}
_範例語法:_
```
\begin{align}
\LARGE f(\displaystyle\sum_i w_i x_i + b)
\end{align}
```
_輸出:_
\begin{align}
\LARGE f(\displaystyle\sum_i w_i x_i + b)
\end{align}
## 3. 結合 Markdown 和 LaTeX 數學公式
_範例語法:_
```
公式 $\LARGE f(\displaystyle\sum_i w_i x_i + b)$ 是 Deep Learning 課程中常會見到的基本公式
結合 Markdown 和 LaTeX 數學公式,我們可以撰寫出漂亮的文件筆記,讓學習更有效率。
```
_輸出:_
公式 $\LARGE f(\displaystyle\sum_i w_i x_i + b)$ 是 Deep Learning 課程中常會見到的基本公式
結合 Markdown 和 LaTeX 數學公式,我們可以撰寫出漂亮的文件筆記,讓學習更有效率。
```julia
```
|
[GOAL]
𝕜 : Type u_1
inst✝¹¹ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝¹⁰ : NormedAddCommGroup E
inst✝⁹ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁸ : NormedAddCommGroup F
inst✝⁷ : NormedSpace 𝕜 F
G : Type u_4
inst✝⁶ : NormedAddCommGroup G
inst✝⁵ : NormedSpace 𝕜 G
G' : Type u_5
inst✝⁴ : NormedAddCommGroup G'
inst✝³ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
ι : Type u_6
inst✝² : Fintype ι
F' : ι → Type u_7
inst✝¹ : (i : ι) → NormedAddCommGroup (F' i)
inst✝ : (i : ι) → NormedSpace 𝕜 (F' i)
φ : (i : ι) → E → F' i
φ' : (i : ι) → E →L[𝕜] F' i
Φ : E → (i : ι) → F' i
Φ' : E →L[𝕜] (i : ι) → F' i
⊢ HasStrictFDerivAt Φ Φ' x ↔ ∀ (i : ι), HasStrictFDerivAt (fun x => Φ x i) (comp (proj i) Φ') x
[PROOFSTEP]
simp only [HasStrictFDerivAt, ContinuousLinearMap.coe_pi]
[GOAL]
𝕜 : Type u_1
inst✝¹¹ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝¹⁰ : NormedAddCommGroup E
inst✝⁹ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁸ : NormedAddCommGroup F
inst✝⁷ : NormedSpace 𝕜 F
G : Type u_4
inst✝⁶ : NormedAddCommGroup G
inst✝⁵ : NormedSpace 𝕜 G
G' : Type u_5
inst✝⁴ : NormedAddCommGroup G'
inst✝³ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
ι : Type u_6
inst✝² : Fintype ι
F' : ι → Type u_7
inst✝¹ : (i : ι) → NormedAddCommGroup (F' i)
inst✝ : (i : ι) → NormedSpace 𝕜 (F' i)
φ : (i : ι) → E → F' i
φ' : (i : ι) → E →L[𝕜] F' i
Φ : E → (i : ι) → F' i
Φ' : E →L[𝕜] (i : ι) → F' i
⊢ ((fun p => Φ p.fst - Φ p.snd - ↑Φ' (p.fst - p.snd)) =o[𝓝 (x, x)] fun p => p.fst - p.snd) ↔
∀ (i : ι),
(fun p => Φ p.fst i - Φ p.snd i - ↑(comp (proj i) Φ') (p.fst - p.snd)) =o[𝓝 (x, x)] fun p => p.fst - p.snd
[PROOFSTEP]
exact isLittleO_pi
[GOAL]
𝕜 : Type u_1
inst✝¹¹ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝¹⁰ : NormedAddCommGroup E
inst✝⁹ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁸ : NormedAddCommGroup F
inst✝⁷ : NormedSpace 𝕜 F
G : Type u_4
inst✝⁶ : NormedAddCommGroup G
inst✝⁵ : NormedSpace 𝕜 G
G' : Type u_5
inst✝⁴ : NormedAddCommGroup G'
inst✝³ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
ι : Type u_6
inst✝² : Fintype ι
F' : ι → Type u_7
inst✝¹ : (i : ι) → NormedAddCommGroup (F' i)
inst✝ : (i : ι) → NormedSpace 𝕜 (F' i)
φ : (i : ι) → E → F' i
φ' : (i : ι) → E →L[𝕜] F' i
Φ : E → (i : ι) → F' i
Φ' : E →L[𝕜] (i : ι) → F' i
⊢ HasFDerivAtFilter Φ Φ' x L ↔ ∀ (i : ι), HasFDerivAtFilter (fun x => Φ x i) (comp (proj i) Φ') x L
[PROOFSTEP]
simp only [HasFDerivAtFilter, ContinuousLinearMap.coe_pi]
[GOAL]
𝕜 : Type u_1
inst✝¹¹ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝¹⁰ : NormedAddCommGroup E
inst✝⁹ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁸ : NormedAddCommGroup F
inst✝⁷ : NormedSpace 𝕜 F
G : Type u_4
inst✝⁶ : NormedAddCommGroup G
inst✝⁵ : NormedSpace 𝕜 G
G' : Type u_5
inst✝⁴ : NormedAddCommGroup G'
inst✝³ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
ι : Type u_6
inst✝² : Fintype ι
F' : ι → Type u_7
inst✝¹ : (i : ι) → NormedAddCommGroup (F' i)
inst✝ : (i : ι) → NormedSpace 𝕜 (F' i)
φ : (i : ι) → E → F' i
φ' : (i : ι) → E →L[𝕜] F' i
Φ : E → (i : ι) → F' i
Φ' : E →L[𝕜] (i : ι) → F' i
⊢ ((fun x' => Φ x' - Φ x - ↑Φ' (x' - x)) =o[L] fun x' => x' - x) ↔
∀ (i : ι), (fun x' => Φ x' i - Φ x i - ↑(comp (proj i) Φ') (x' - x)) =o[L] fun x' => x' - x
[PROOFSTEP]
exact isLittleO_pi
|
function transpose(x)
%TRANSPOSE is not defined on tensors.
%
% See also TENSOR, TENSOR/PERMUTE.
%
%MATLAB Tensor Toolbox.
%Copyright 2015, Sandia Corporation.
% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others.
% http://www.sandia.gov/~tgkolda/TensorToolbox.
% Copyright (2015) Sandia Corporation. Under the terms of Contract
% DE-AC04-94AL85000, there is a non-exclusive license for use of this
% work by or on behalf of the U.S. Government. Export of this data may
% require a license from the United States Government.
% The full license terms can be found in the file LICENSE.txt
error('Transpose on tensor is not defined');
|
module Control.Monad.Codensity
import Control.Monad.Free
%access public export
%default total
data Codensity : (m : Type -> Type) -> (a : Type) -> Type where
Codense : ({b : Type} -> (a -> m b) -> m b) -> Codensity m a
runCodensity : Codensity m a -> ({b : Type} -> (a -> m b) -> m b)
runCodensity (Codense c) = c
Functor (Codensity f) where
map f (Codense c) = Codense (\k => c (k . f))
Applicative (Codensity f) where
pure x = Codense (\k => k x)
(Codense f) <*> (Codense x) = Codense (\k => x (\x' => f (\f' => k (f' x'))))
Monad (Codensity f) where
(Codense x) >>= f = Codense (\k => x (\x' => runCodensity (f x') k))
liftCodensity : Monad m => m a -> Codensity m a
liftCodensity x = Codense (x >>=)
lowerCodensity : Monad m => Codensity m a -> m a
lowerCodensity (Codense c) = c pure
Functor f => MonadFree (Codensity (Free f)) f where
wrap x = Codense (\k => wrap (map (>>= k) (map lowerCodensity x)))
|
\section{Requirements}
\label{sec:requirements}
Next, we present the most critical requirements that motivated our architecture and design. We start with a set of general requirements.
\begin{description}
\item[Leveraging new Python features.] Python is a very popular choice with many data scientists. Our framework will leverage the newest Python 3 features such as {\em Typing Interface} \cite{www-python-typing} in order to increase robustness and future-proofing of our code base.
\item[Ability to be used within Jupyter Notebooks.] ~\\
The framework must be able to integrate with Jupyter notebooks as they are very popular with today's data scientists. The functionality must be easily accessible not only as part of python programs but also within Jupyter notebooks. This is of special importance also for cloud services such as Google Colab \cite{google-colab} which for example, offers cloud-based Notebooks.
\item[Easy of use] is a critical aspect of the framework that is to be addressed from the start by allowing for ease of creation, ease of deployment, and easy use of the generated services. This is accompanied by easy to use command-line tools.
\end{description}
Next, we list some more specific requirements that motivate our architectural design.
\begin{description}
\item[Multi-Cloud Service Integration.] The framework must allow us to integrate multiple cloud services, including IaaS, PaaS, and SaaS. This also includes the ability to access AI-based services offered by the various cloud providers.
\item[Hybrid-Cloud Service Integration.] The framework must allow integrating on-premise, private, and public clouds.
\item[Generalized Analytics Service Generator.] We need a generalized analytics service generator. The first step in the activity to generate an analytics service is to
provide an OpenAPI Service generator. Our generator will allow us to
define essential analytics functions such as (a) uploading and
downloading files to an analytics service; (b) specifying the
functionality through typing enhanced python functions; and (c)
generating the code for the service.
\item[Generalized Analytics Service Deployment.] After the service is generated, it needs to be deployed. For this step
we will be reusing the Cloudmesh deployment mechanism to instantiate
services on-demand on specified cloud providers such as AWS, Azure,
and Google.
\item[Generalized Analytics Service Invocation.] The next step includes the invocation of the deployed services. While
analyzing some use cases, we identified that users often need to
invoke the same service many times to tune service parameters in a
quasi-realtime fashion while using parameters that can not be included in the URL. Hence we will need to upload input parameters through
files if the simple typing data types provided by our proposed
framework is not sufficient.
\item[REST Services Architecture.] As REST has become the most prominent architectural design principle, our Generalized service architecture needs to be able to produce REST services.
\item[Automated REST Service Generation for other Languages.] Our framework must have provisions included that allow the integration into other programming languages and, on the other hand, allows the integration of services and functions developed in other languages.
\item[Generalized Analytics Service Registry.] As users and communities may develop many different services, we must provide the ability to (a) find
specifications of generalized analytics services (b) find use-cases
of generalized analytics services (c) find infrastructure on which
such services can be deployed, and (d) find deployed analytics
services. For this, we need a registry that can be queried by the community.
\item[Generalized Composable Analytics Services.] Services must be allowed to reuse other services to allow for easy integration. Thus we need to make our services composable. This also includes the choreography of the execution of such composable services.
\end{description}
|
How prepared are you for what’s ahead and the technologies that will be the drivers of change in 2016?
Simply put, Session Initiation Protocol (SIP) and SIP Trunking can smartly transform your customers’ communications infrastructure for the better.
It can be done rapidly and cost effectively while seamlessly providing access to highly scalable unified communications so employees and customers can benefit from responsive anytime anywhere on any device availability. Also by enabling ready access to feature-rich collaborative tools, your accounts could experience an increase in workforce productivity and cooperation. Begin telling your customers why SIP is a smart technology investment. Get started with a free E-book courtesy of NEC.
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module topology.hom.open
! leanprover-community/mathlib commit 98e83c3d541c77cdb7da20d79611a780ff8e7d90
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Topology.ContinuousFunction.Basic
/-!
# Continuous open maps
This file defines bundled continuous open maps.
We use the `FunLike` design, so each type of morphisms has a companion typeclass which is meant to
be satisfied by itself and all stricter types.
## Types of morphisms
* `ContinuousOpenMap`: Continuous open maps.
## Typeclasses
* `ContinuousOpenMapClass`
-/
open Function
variable {F α β γ δ : Type _}
/-- The type of continuous open maps from `α` to `β`, aka Priestley homomorphisms. -/
structure ContinuousOpenMap (α β : Type _) [TopologicalSpace α] [TopologicalSpace β] extends
ContinuousMap α β where
map_open' : IsOpenMap toFun
#align continuous_open_map ContinuousOpenMap
infixr:25 " →CO " => ContinuousOpenMap
section
/-- `ContinuousOpenMapClass F α β` states that `F` is a type of continuous open maps.
You should extend this class when you extend `ContinuousOpenMap`. -/
class ContinuousOpenMapClass (F : Type _) (α β : outParam <| Type _) [TopologicalSpace α]
[TopologicalSpace β] extends ContinuousMapClass F α β where
map_open (f : F) : IsOpenMap f
#align continuous_open_map_class ContinuousOpenMapClass
end
export ContinuousOpenMapClass (map_open)
instance [TopologicalSpace α] [TopologicalSpace β] [ContinuousOpenMapClass F α β] :
CoeTC F (α →CO β) :=
⟨fun f => ⟨f, map_open f⟩⟩
/-! ### Continuous open maps -/
namespace ContinuousOpenMap
variable [TopologicalSpace α] [TopologicalSpace β] [TopologicalSpace γ] [TopologicalSpace δ]
instance : ContinuousOpenMapClass (α →CO β) α β where
coe f := f.toFun
coe_injective' f g h := by
obtain ⟨⟨_, _⟩, _⟩ := f
obtain ⟨⟨_, _⟩, _⟩ := g
congr
map_continuous f := f.continuous_toFun
map_open f := f.map_open'
theorem toFun_eq_coe {f : α →CO β} : f.toFun = (f : α → β) :=
rfl
#align continuous_open_map.to_fun_eq_coe ContinuousOpenMap.toFun_eq_coe
@[simp] -- porting note: new, simpNF of `toFun_eq_coe`
@[ext]
theorem ext {f g : α →CO β} (h : ∀ a, f a = g a) : f = g :=
FunLike.ext f g h
#align continuous_open_map.ext ContinuousOpenMap.ext
/-- Copy of a `ContinuousOpenMap` with a new `ContinuousMap` equal to the old one. Useful to fix
definitional equalities. -/
protected def copy (f : α →CO β) (f' : α → β) (h : f' = f) : α →CO β :=
⟨f.toContinuousMap.copy f' <| h, h.symm.subst f.map_open'⟩
#align continuous_open_map.copy ContinuousOpenMap.copy
@[simp]
theorem coe_copy (f : α →CO β) (f' : α → β) (h : f' = f) : ⇑(f.copy f' h) = f' :=
rfl
#align continuous_open_map.coe_copy ContinuousOpenMap.coe_copy
theorem copy_eq (f : α →CO β) (f' : α → β) (h : f' = f) : f.copy f' h = f :=
FunLike.ext' h
#align continuous_open_map.copy_eq ContinuousOpenMap.copy_eq
variable (α)
/-- `id` as a `ContinuousOpenMap`. -/
protected def id : α →CO α :=
⟨ContinuousMap.id _, IsOpenMap.id⟩
#align continuous_open_map.id ContinuousOpenMap.id
instance : Inhabited (α →CO α) :=
⟨ContinuousOpenMap.id _⟩
@[simp]
theorem coe_id : ⇑(ContinuousOpenMap.id α) = id :=
rfl
#align continuous_open_map.coe_id ContinuousOpenMap.coe_id
variable {α}
@[simp]
theorem id_apply (a : α) : ContinuousOpenMap.id α a = a :=
rfl
#align continuous_open_map.id_apply ContinuousOpenMap.id_apply
/-- Composition of `ContinuousOpenMap`s as a `ContinuousOpenMap`. -/
def comp (f : β →CO γ) (g : α →CO β) : ContinuousOpenMap α γ :=
⟨f.toContinuousMap.comp g.toContinuousMap, f.map_open'.comp g.map_open'⟩
#align continuous_open_map.comp ContinuousOpenMap.comp
@[simp]
theorem coe_comp (f : β →CO γ) (g : α →CO β) : (f.comp g : α → γ) = f ∘ g :=
rfl
#align continuous_open_map.coe_comp ContinuousOpenMap.coe_comp
@[simp]
theorem comp_apply (f : β →CO γ) (g : α →CO β) (a : α) : (f.comp g) a = f (g a) :=
rfl
#align continuous_open_map.comp_apply ContinuousOpenMap.comp_apply
@[simp]
theorem comp_assoc (f : γ →CO δ) (g : β →CO γ) (h : α →CO β) :
(f.comp g).comp h = f.comp (g.comp h) :=
rfl
#align continuous_open_map.comp_assoc ContinuousOpenMap.comp_assoc
@[simp]
theorem comp_id (f : α →CO β) : f.comp (ContinuousOpenMap.id α) = f :=
ext fun _ => rfl
#align continuous_open_map.comp_id ContinuousOpenMap.comp_id
@[simp]
theorem id_comp (f : α →CO β) : (ContinuousOpenMap.id β).comp f = f :=
ext fun _ => rfl
#align continuous_open_map.id_comp ContinuousOpenMap.id_comp
theorem cancel_right {g₁ g₂ : β →CO γ} {f : α →CO β} (hf : Surjective f) :
g₁.comp f = g₂.comp f ↔ g₁ = g₂ :=
⟨fun h => ext <| hf.forall.2 <| FunLike.ext_iff.1 h, fun h => congr_arg₂ _ h rfl⟩
#align continuous_open_map.cancel_right ContinuousOpenMap.cancel_right
theorem cancel_left {g : β →CO γ} {f₁ f₂ : α →CO β} (hg : Injective g) :
g.comp f₁ = g.comp f₂ ↔ f₁ = f₂ :=
⟨fun h => ext fun a => hg <| by rw [← comp_apply, h, comp_apply], congr_arg _⟩
#align continuous_open_map.cancel_left ContinuousOpenMap.cancel_left
end ContinuousOpenMap
|
(*************************************************************)
(* Copyright Dominique Larchey-Wendling [*] *)
(* *)
(* [*] Affiliation LORIA -- CNRS *)
(*************************************************************)
(* This file is distributed under the terms of the *)
(* CeCILL v2 FREE SOFTWARE LICENSE AGREEMENT *)
(*************************************************************)
Require Import List.
Ltac lrev l :=
let rec loop aa ll :=
match ll with
| ?x::?ll => loop (x::aa) ll
| nil => constr:(aa)
end
in match type of l with
| list ?t => loop (@nil t) l
end.
Ltac lflat l :=
let rec loop aa ll :=
match ll with
| ?ss++?rr => let bb := loop aa ss in
let cc := loop bb rr
in constr:(cc)
| ?x::?rr => let bb := loop ((x::nil)::aa) rr
in constr:(bb)
| nil => constr:(aa)
| ?ss => constr:(ss::aa)
end
in match type of l with
| list ?t => let r1 := loop (@nil (list t)) l
in lrev r1
end.
Ltac llin l :=
let rec loop aa ll :=
match ll with
| (?x::nil)::?ll => let bb := loop (x::aa) ll
in constr:(bb)
| ?lx::?ll => let bb := loop (lx++aa) ll
in constr:(bb)
| nil => constr:(aa)
end
in match type of l with
| list (list ?t) =>
match lrev l with
| ?lx::?rr => let bb := loop lx rr
in constr:(bb)
| nil => constr:(@nil t)
end
end.
Ltac lcut x l :=
let rec loop aa x ll :=
match ll with
| x::_ => let bb := lrev aa
in constr:(bb++ll)
| ?lz::?rr => let bb := loop (lz::aa) x rr
in constr:(bb)
end
in match type of l with
| list (list ?t) => loop (@nil (list t)) x l
end.
Ltac lmerge l :=
match l with
| ?aa++?ll => let bb := llin aa in
let cc := llin ll
in constr:(bb++cc)
end.
Ltac focus_lst z r0 :=
let r1 := lflat r0 in
let r2 := lcut z r1 in
let r3 := lmerge r2
in constr:(r3).
Ltac focus_lst_2 z r0 :=
let r1 := focus_lst z r0 in
let r2 := match r1 with
| ?l++?x::?r =>
let r3 := focus_lst z r in
match r3 with
| ?m++?y::?n => constr:((l++x::m)++y::n)
end
end
in constr:(r2).
Ltac focus_lst_3 z r0 :=
let r1 := focus_lst_2 z r0 in
let r2 := match r1 with
| ?l++?x::?r =>
let r3 := focus_lst z r in
match r3 with
| ?m++?y::?n => constr:((l++x::m)++y::n)
end
end
in constr:(r2).
Ltac focus_elt z l := focus_lst (z::nil) l.
Section test.
Variable X : Type.
Variable x y z : list X.
Variable a b c : X.
(*
Goal True.
let rr := lrev (1::2::3::4::nil) in idtac rr.
let rr := lflat ((x++a::nil++nil)++b::z++y++a::nil) in idtac rr.
let rr := llin (x::y::(a::nil)::z::(b::nil)::nil) in idtac rr.
let rr := llin (@nil (list X)) in idtac rr.
let rr := llin (@nil (list X)) in idtac rr.
let rr := lcut z ( (x::y::(a::nil)::y::(b::nil)::z::nil) ) in idtac rr.
let rr := lmerge ( (x::y::(a::nil)::nil)++(y::(b::nil)::z::nil) ) in idtac rr.
let rr := focus_lst_2 (c::nil) ((x++c::nil++nil)++b::z++y++c::nil) in idtac rr.
let rr := focus_lst_2 (c::nil) ((x++c::nil++nil)++b::z++y++c::z++c::x++c::z) in idtac rr.
let rr := focus_lst z ((x++a::nil++nil)++b::z++y++c::nil) in idtac rr.
let rr := focus_elt a (c::(x++a::nil++nil)++b::z++y++c::nil) in idtac rr.
auto.
Qed.
*)
End test.
|
open import Agda.Builtin.Nat
open import Agda.Builtin.Equality
data Ix : Set where
ix : .(i : Nat) (n : Nat) → Ix
data D : Ix → Set where
mkD : ∀ n → D (ix n n)
data ΣD : Set where
_,_ : ∀ i → D i → ΣD
foo : ΣD → Nat
foo (i , mkD n) = n
d : ΣD
d = ix 0 6 , mkD 6
-- Check that we pick the right (the non-irrelevant) `n` when binding
-- the forced argument.
check : foo d ≡ 6
check = refl
|
```python
import numpy as np
import dapy.filters as filters
from dapy.models import NettoGimenoMendesModel
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('seaborn-white')
plt.rcParams['figure.dpi'] = 100
```
## Model
One-dimensional stochastic dynamical system due to Netto et al. [1] with state dynamics defined by discrete time map
\begin{equation}
x_{t+1} = \alpha x_t + \beta \frac{x_t}{1 + z_t^2} + \gamma \cos(\delta t) + \sigma_x u_t
\end{equation}
with $u_t \sim \mathcal{N}(0, 1) ~\forall t$ and $x_0 \sim \mathcal{N}(m, s^2)$.
Observed process defined by
\begin{equation}
y_{t} = \epsilon x_t^2 + \sigma_y v_t
\end{equation}
with $v_t \sim \mathcal{N}(0, 1)$.
Standard parameter values assumed here are $\alpha = 0.5$, $\beta = 25$, $\gamma = 8$, $\delta = 1.2$, $\epsilon = 0.05$, $m=10$, $s=5$, $\sigma_x^2 = 1$, $\sigma_y^2 = 1$ and $T = 200$ simulated time steps.
### References
1. M. L. A. Netto, L. Gimeno, and M. J. Mendes. A new spline algorithm for non-linear filtering of discrete time systems. *Proceedings of the 7th Triennial World Congress*, 1979.
```python
model_params = {
'initial_state_mean': 10.,
'initial_state_std': 5.,
'state_noise_std': 1.,
'observation_noise_std': 1.,
'alpha': 0.5,
'beta': 25.,
'gamma': 8,
'delta': 1.2,
'epsilon': 0.05,
}
model = NettoGimenoMendesModel(**model_params)
```
## Generate data from model
```python
num_observation_time = 100
observation_time_indices = np.arange(num_observation_time)
seed = 20171027
rng = np.random.default_rng(seed)
state_sequence, observation_sequence = model.sample_state_and_observation_sequences(
rng, observation_time_indices)
```
<div style="line-height: 28px; width: 100%; display: flex;
flex-flow: row wrap; align-items: center;
position: relative; margin: 2px;">
<label style="margin-right: 8px; flex-shrink: 0;
font-size: var(--jp-code-font-size, 13px);
font-family: var(--jp-code-font-family, monospace);">
Sampling: 100%
</label>
<div role="progressbar" aria-valuenow="1.0"
aria-valuemin="0" aria-valuemax="1"
style="position: relative; flex-grow: 1; align-self: stretch;
margin-top: 4px; margin-bottom: 4px; height: initial;
background-color: #eee;">
<div style="background-color: var(--jp-success-color1, #4caf50); position: absolute;
bottom: 0; left: 0; width: 100%;
height: 100%;"></div>
</div>
<div style="margin-left: 8px; flex-shrink: 0;
font-family: var(--jp-code-font-family, monospace);
font-size: var(--jp-code-font-size, 13px);">
100/100 [00:00<00:00, 21597.18time-steps/s]
</div>
</div>
```python
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(observation_time_indices, state_sequence)
ax.plot(observation_time_indices, observation_sequence, '.')
ax.set_xlabel('Time index $t$')
ax.set_ylabel('State')
_ = ax.set_xlim(0, num_observation_time - 1)
ax.legend(['$x_t$', '$y_t$'], ncol=4)
fig.tight_layout()
```
## Infer state from observations
```python
def plot_results(results, z_reference=None, plot_traces=True, plot_region=False,
trace_skip=1, trace_alpha=0.25):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))
ax.plot(results['z_mean_seq'][:, 0], 'g-', lw=1)
if plot_region:
ax.fill_between(
np.arange(n_steps),
results['z_mean_seq'][:, 0] - 3 * results['z_std_seq'][:, 0],
results['z_mean_seq'][:, 0] + 3 * results['z_std_seq'][:, 0],
alpha=0.25, color='g'
)
if plot_traces:
ax.plot(results['z_particles_seq'][:, ::trace_skip, 0],
'r-', lw=0.25, alpha=trace_alpha)
if z_reference is not None:
ax.plot(z_reference[:, 0], 'k--')
ax.set_ylabel('State $z$')
ax.set_xlabel('Time index $t$')
fig.tight_layout()
return fig, ax
def plot_results(results, observation_time_indices, state_sequence=None,
plot_particles=True, plot_region=False,
particle_skip=5, trace_alpha=0.25):
fig, ax = plt.subplots(sharex=True, figsize=(12, 4))
ax.plot(results['state_mean_sequence'][:, 0], 'g-', lw=1, label='Est. mean')
if plot_region:
ax.fill_between(
observation_time_indices,
results['state_mean_sequence'][:, 0] - 3 * results['state_std_sequence'][:, 0],
results['state_mean_sequence'][:, 0] + 3 * results['state_std_sequence'][:, 0],
alpha=0.25, color='g', label='Est. mean ± 3 standard deviation'
)
if plot_particles:
lines = ax.plot(
observation_time_indices, results['state_particles_sequence'][:, ::particle_skip, 0],
'r-', lw=0.25, alpha=trace_alpha)
lines[0].set_label('Particles')
if state_sequence is not None:
ax.plot(observation_time_indices, state_sequence[:, 0], 'k--', label='Truth')
ax.set_ylabel('$x_t$')
ax.legend(loc='upper center', ncol=4)
ax.set_xlabel('Time index $t$')
fig.tight_layout()
return fig, ax
```
### Ensemble Kalman filter (perturbed observations)
```python
enkf = filters.EnsembleKalmanFilter()
```
```python
results_enkf = enkf.filter(
model, observation_sequence, observation_time_indices,
num_particle=500, rng=rng, return_particles=True)
```
<div style="line-height: 28px; width: 100%; display: flex;
flex-flow: row wrap; align-items: center;
position: relative; margin: 2px;">
<label style="margin-right: 8px; flex-shrink: 0;
font-size: var(--jp-code-font-size, 13px);
font-family: var(--jp-code-font-family, monospace);">
Filtering: 100%
</label>
<div role="progressbar" aria-valuenow="1.0"
aria-valuemin="0" aria-valuemax="1"
style="position: relative; flex-grow: 1; align-self: stretch;
margin-top: 4px; margin-bottom: 4px; height: initial;
background-color: #eee;">
<div style="background-color: var(--jp-success-color1, #4caf50); position: absolute;
bottom: 0; left: 0; width: 100%;
height: 100%;"></div>
</div>
<div style="margin-left: 8px; flex-shrink: 0;
font-family: var(--jp-code-font-family, monospace);
font-size: var(--jp-code-font-size, 13px);">
100/100 [00:00<00:00, 427.09time-steps/s]
</div>
</div>
```python
fig, axes = plot_results(results_enkf, observation_time_indices, state_sequence)
```
### Ensemble Trasform Kalman filter (deterministic square root)
```python
etkf = filters.EnsembleTransformKalmanFilter()
```
```python
results_etkf = etkf.filter(
model, observation_sequence, observation_time_indices,
num_particle=500, rng=rng, return_particles=True)
```
<div style="line-height: 28px; width: 100%; display: flex;
flex-flow: row wrap; align-items: center;
position: relative; margin: 2px;">
<label style="margin-right: 8px; flex-shrink: 0;
font-size: var(--jp-code-font-size, 13px);
font-family: var(--jp-code-font-family, monospace);">
Filtering: 100%
</label>
<div role="progressbar" aria-valuenow="1.0"
aria-valuemin="0" aria-valuemax="1"
style="position: relative; flex-grow: 1; align-self: stretch;
margin-top: 4px; margin-bottom: 4px; height: initial;
background-color: #eee;">
<div style="background-color: var(--jp-success-color1, #4caf50); position: absolute;
bottom: 0; left: 0; width: 100%;
height: 100%;"></div>
</div>
<div style="margin-left: 8px; flex-shrink: 0;
font-family: var(--jp-code-font-family, monospace);
font-size: var(--jp-code-font-size, 13px);">
100/100 [00:00<00:00, 143.24time-steps/s]
</div>
</div>
```python
fig, axes = plot_results(results_etkf, observation_time_indices, state_sequence)
```
### Bootstrap particle filter
```python
bspf = filters.BootstrapParticleFilter()
```
```python
results_bspf = bspf.filter(
model, observation_sequence, observation_time_indices,
num_particle=500, rng=rng, return_particles=True)
```
<div style="line-height: 28px; width: 100%; display: flex;
flex-flow: row wrap; align-items: center;
position: relative; margin: 2px;">
<label style="margin-right: 8px; flex-shrink: 0;
font-size: var(--jp-code-font-size, 13px);
font-family: var(--jp-code-font-family, monospace);">
Filtering: 100%
</label>
<div role="progressbar" aria-valuenow="1.0"
aria-valuemin="0" aria-valuemax="1"
style="position: relative; flex-grow: 1; align-self: stretch;
margin-top: 4px; margin-bottom: 4px; height: initial;
background-color: #eee;">
<div style="background-color: var(--jp-success-color1, #4caf50); position: absolute;
bottom: 0; left: 0; width: 100%;
height: 100%;"></div>
</div>
<div style="margin-left: 8px; flex-shrink: 0;
font-family: var(--jp-code-font-family, monospace);
font-size: var(--jp-code-font-size, 13px);">
100/100 [00:00<00:00, 3062.47time-steps/s]
</div>
</div>
```python
fig, axes = plot_results(results_bspf, observation_time_indices, state_sequence)
```
### Ensemble transform particle filter
```python
etpf = filters.EnsembleTransformParticleFilter()
```
```python
results_etpf = etpf.filter(
model, observation_sequence, observation_time_indices,
num_particle=500, rng=rng, return_particles=True)
```
<div style="line-height: 28px; width: 100%; display: flex;
flex-flow: row wrap; align-items: center;
position: relative; margin: 2px;">
<label style="margin-right: 8px; flex-shrink: 0;
font-size: var(--jp-code-font-size, 13px);
font-family: var(--jp-code-font-family, monospace);">
Filtering: 100%
</label>
<div role="progressbar" aria-valuenow="1.0"
aria-valuemin="0" aria-valuemax="1"
style="position: relative; flex-grow: 1; align-self: stretch;
margin-top: 4px; margin-bottom: 4px; height: initial;
background-color: #eee;">
<div style="background-color: var(--jp-success-color1, #4caf50); position: absolute;
bottom: 0; left: 0; width: 100%;
height: 100%;"></div>
</div>
<div style="margin-left: 8px; flex-shrink: 0;
font-family: var(--jp-code-font-family, monospace);
font-size: var(--jp-code-font-size, 13px);">
100/100 [00:02<00:00, 41.33time-steps/s]
</div>
</div>
```python
fig, axes = plot_results(results_etpf, observation_time_indices, state_sequence)
```
|
If the female is flagging, she is ready to breed. The drive to breed in a male is the strongest that he has, even stronger than food. He should be all over her. Still, if he is young and inexperienced, you may have to get him up to the female and, ah, use your hand to stimualte him a bit to build the drive. Once he gets going a little, push him away and let him come back for more. Stimulate... if the dog is a female she will hold her tale to her side exposing her gentiles. and she will pump her back lags up & down. if the dog is male he will get on the females back.
Is it possible to mate my mixed-breed female dog, short of letting her run with any dog in the local park? Owners of pedigree studs will sometimes allow mating with a non-purebred dog. This is particularly useful if you want to concentrate on a particular characteristic or conformation.... if the dog is a female she will hold her tale to her side exposing her gentiles. and she will pump her back lags up & down. if the dog is male he will get on the females back.
how can i make my male dog breed with my female dog Well, she's a smart dog not wanting to breed with a mix and have mixed breed puppies. But if she is biting at him, the real reason is probably because she is not ready yet. A ***** goes into her heat cycle and begins to show color (bleed). She should only bleed for about 9 to 10 days. The day the bleeding stops she will be ready to breed... Dog can still want to mate even if they are fixed because my sisters dog had mated with a dog and he is fixed for over2 years but he could not get her pregnant. And now he wants to mate with my 6 month old lab that I will not allow so a dog can still want to mate and he is 6 now.
However, this is a long term, 100% effective preventative measure to take if you don’t want your dog to get pregnant after mating, or ever again. Injections Most dogs, given the opportunity, will end up mating when the female is in heat.... 9/09/2016 · Parte 2 of female dog hates mating LOVELY SMART GIRL PLAYING BABY CUTE DOGS ON RICE FIELDS HOW TO PLAY WITH DOG & FEED BABY DOGS #22 - Duration: 12:11.
If your female dog approaches your male dog with her derriere prominently raised, she's communicating that she wants him to mate with her. She also might show other body clues that point to mating -- such as tightening up her back limbs and moving her tail away from the center of her body.
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE TypeOperators #-}
module DFT
( testSuite
) where
import Data.Complex
import Data.Mod.Word
import Data.Poly.Semiring (UPoly, unPoly, toPoly, dft, inverseDft, dftMult)
import qualified Data.Vector.Unboxed as U
import GHC.TypeNats (KnownNat, natVal, type (+), type (^))
import Test.Tasty
import Test.Tasty.QuickCheck hiding (scale, numTests)
import Dense ()
testSuite :: TestTree
testSuite = testGroup "DFT"
[ testGroup "dft matches reference"
[ dftMatchesRef (0 :: Mod (2 ^ 0 + 1))
, dftMatchesRef (2 :: Mod (2 ^ 1 + 1))
, dftMatchesRef (2 :: Mod (2 ^ 2 + 1))
, dftMatchesRef (3 :: Mod (2 ^ 4 + 1))
, dftMatchesRef (3 :: Mod (2 ^ 8 + 1))
]
, testGroup "dft is invertible"
[ dftIsInvertible (0 :: Mod (2 ^ 0 + 1))
, dftIsInvertible (2 :: Mod (2 ^ 1 + 1))
, dftIsInvertible (2 :: Mod (2 ^ 2 + 1))
, dftIsInvertible (3 :: Mod (2 ^ 4 + 1))
, dftIsInvertible (3 :: Mod (2 ^ 8 + 1))
]
, testProperty "dftMult matches reference" dftMultMatchesRef
]
dftMatchesRef :: KnownNat n1 => Mod n1 -> TestTree
dftMatchesRef primRoot = testProperty (show n) $ do
xs <- U.replicateM n arbitrary
pure $ dft primRoot xs === dftRef primRoot xs
where
n = fromIntegral (natVal primRoot - 1)
dftRef :: (Num a, U.Unbox a) => a -> U.Vector a -> U.Vector a
dftRef primRoot xs = U.generate (U.length xs) $
\k -> sum (map (\j -> xs U.! j * primRoot ^ (j * k)) [0..n-1])
where
n = U.length xs
dftIsInvertible :: KnownNat n1 => Mod n1 -> TestTree
dftIsInvertible primRoot = testProperty (show n) $ do
xs <- U.replicateM n arbitrary
let ys = dft primRoot xs
zs = inverseDft primRoot ys
pure $ xs === zs
where
n = fromIntegral (natVal primRoot - 1)
dftMultMatchesRef :: UPoly Int -> UPoly Int -> Property
dftMultMatchesRef xs ys = zs === dftZs
where
xs', ys', dftZs' :: UPoly (Complex Double)
xs' = toPoly $ U.map fromIntegral $ unPoly xs
ys' = toPoly $ U.map fromIntegral $ unPoly ys
dftZs' = dftMult (\k -> cis (2 * pi / fromIntegral k)) xs' ys'
zs, dftZs :: UPoly (Complex Int)
zs = toPoly $ U.map (:+ 0) $ unPoly $ xs * ys
dftZs = toPoly $ U.map (\(x :+ y) -> round x :+ round y) $ unPoly dftZs'
|
State Before: α : Type u
β : Type v
f : ℕ → α → β → β
b : β
as : List α
start : ℕ
⊢ foldrIdx f b as start = foldrIdxSpec f b as start State After: case nil
α : Type u
β : Type v
f : ℕ → α → β → β
b : β
start : ℕ
⊢ foldrIdx f b [] start = foldrIdxSpec f b [] start
case cons
α : Type u
β : Type v
f : ℕ → α → β → β
b : β
head✝ : α
tail✝ : List α
tail_ih✝ : ∀ (start : ℕ), foldrIdx f b tail✝ start = foldrIdxSpec f b tail✝ start
start : ℕ
⊢ foldrIdx f b (head✝ :: tail✝) start = foldrIdxSpec f b (head✝ :: tail✝) start Tactic: induction as generalizing start State Before: case nil
α : Type u
β : Type v
f : ℕ → α → β → β
b : β
start : ℕ
⊢ foldrIdx f b [] start = foldrIdxSpec f b [] start State After: no goals Tactic: rfl State Before: case cons
α : Type u
β : Type v
f : ℕ → α → β → β
b : β
head✝ : α
tail✝ : List α
tail_ih✝ : ∀ (start : ℕ), foldrIdx f b tail✝ start = foldrIdxSpec f b tail✝ start
start : ℕ
⊢ foldrIdx f b (head✝ :: tail✝) start = foldrIdxSpec f b (head✝ :: tail✝) start State After: no goals Tactic: simp only [foldrIdx, foldrIdxSpec_cons, *]
|
module Quantities.Core
import public Quantities.FreeAbelianGroup
import public Quantities.Power
%default total
%access public export
||| Elementary quantities
record Dimension where
constructor MkDimension
name : String
implementation Eq Dimension where
(MkDimension a) == (MkDimension b) = a == b
implementation Ord Dimension where
compare (MkDimension a) (MkDimension b) = compare a b
-- Compound quantities
||| A quantity is a property that can be measured.
Quantity : Type
Quantity = FreeAbGrp Dimension
||| The trivial quantity
Scalar : Quantity
Scalar = unit
||| Create a new quantity.
mkQuantity : List (Dimension, Integer) -> Quantity
mkQuantity = mkFreeAbGrp
infixl 6 </>
-- Synonyms (quantites are multiplied, not added!)
||| Product quantity
(<*>) : Ord a => FreeAbGrp a -> FreeAbGrp a -> FreeAbGrp a
(<*>) = (<<+>>)
||| Quotient quantity
(</>) : Ord a => FreeAbGrp a -> FreeAbGrp a -> FreeAbGrp a
a </> b = a <*> freeAbGrpInverse b
||| Convert dimensions to quantities
implicit
dimensionToQuantity : Dimension -> Quantity
dimensionToQuantity = inject
||| Elementary Unit
record ElemUnit (q : Quantity) where
constructor MkElemUnit
name : String
conversionRate : Double
-- ElemUnit with its quantity hidden
data SomeElemUnit : Type where
MkSomeElemUnit : (q : Quantity) -> ElemUnit q -> SomeElemUnit
quantity : SomeElemUnit -> Quantity
quantity (MkSomeElemUnit q _) = q
elemUnit : (seu : SomeElemUnit) -> ElemUnit (quantity seu)
elemUnit (MkSomeElemUnit _ u) = u
name : SomeElemUnit -> String
name x = name (elemUnit x)
implementation Eq SomeElemUnit where
a == b = quantity a == quantity b && name a == name b
implementation Ord SomeElemUnit where
compare a b with (compare (quantity a) (quantity b))
| LT = LT
| GT = GT
| EQ = compare (name a) (name b)
implicit
elemUnitToSomeElemUnitFreeAbGrp : ElemUnit q -> FreeAbGrp SomeElemUnit
elemUnitToSomeElemUnitFreeAbGrp {q} u = inject (MkSomeElemUnit q u)
conversionRate : SomeElemUnit -> Double
conversionRate u = conversionRate (elemUnit u)
joinedQuantity : FreeAbGrp SomeElemUnit -> Quantity
joinedQuantity = lift quantity
data Unit : Quantity -> Type where
MkUnit : (exponent : Integer) -> (elemUnits : FreeAbGrp SomeElemUnit) ->
Unit (joinedQuantity elemUnits)
rewriteUnit : r = q -> Unit q -> Unit r
rewriteUnit eq unit = rewrite eq in unit
base10Exponent : Unit q -> Integer
base10Exponent (MkUnit e _) = e
someElemUnitFreeAbGrp : Unit q -> FreeAbGrp SomeElemUnit
someElemUnitFreeAbGrp (MkUnit _ us) = us
implementation Eq (Unit q) where
x == y = base10Exponent x == base10Exponent y &&
someElemUnitFreeAbGrp x == someElemUnitFreeAbGrp y
||| The trivial unit
One : Unit Scalar
One = MkUnit 0 neutral
||| Multiples of ten
Ten : Unit Scalar
Ten = MkUnit 1 neutral
||| One hundredth
Percent : Unit Scalar
Percent = MkUnit (-2) neutral
||| One thousandth
Promille : Unit Scalar
Promille = MkUnit (-3) neutral
||| The trivial unit (synonymous with `one`)
UnitLess : Unit Scalar
UnitLess = One
implicit
elemUnitToUnit : {q : Quantity} -> ElemUnit q -> Unit q
elemUnitToUnit {q} u = rewriteUnit eq (MkUnit 0 (inject (MkSomeElemUnit q u)))
where eq = sym (inject_lift_lem quantity (MkSomeElemUnit q u))
||| Compute conversion factor from the given unit to the base unit of the
||| corresponding quantity.
joinedConversionRate : Unit q -> Double
joinedConversionRate (MkUnit e (MkFreeAbGrp us)) = fromUnits * fromExponent
where fromUnits = product $ map (\(u, i) => ((^) @{floatmultpower}) (conversionRate u) i) us
fromExponent = ((^) @{floatmultpower}) 10 e
||| Constructs a new unit given a name and conversion factor from an existing unit.
defineAsMultipleOf : String -> Double -> Unit q -> ElemUnit q
defineAsMultipleOf name factor unit = MkElemUnit name (factor * joinedConversionRate unit)
-- Syntax sugar for defining new units
syntax "< one" [name] equals [factor] [unit] ">" = defineAsMultipleOf name factor unit
implementation Show (Unit q) where
show (MkUnit 0 (MkFreeAbGrp [])) = "unitLess"
show (MkUnit e (MkFreeAbGrp [])) = "ten ^^ " ++ show e
show (MkUnit e (MkFreeAbGrp (u :: us))) = if e == 0 then fromUnits
else "ten ^^ " ++ show e ++ " <**> " ++ fromUnits
where monom : (SomeElemUnit, Integer) -> String
monom (unit, 1) = name unit
monom (unit, i) = name unit ++ " ^^ " ++ show i
fromUnits = monom u ++ concatMap ((" <**> " ++) . monom) us
||| Pretty-print a unit (using only ASCII characters)
showUnit : Unit q -> String
showUnit (MkUnit 0 (MkFreeAbGrp [])) = ""
showUnit (MkUnit e (MkFreeAbGrp [])) = "10^" ++ show e
showUnit (MkUnit e (MkFreeAbGrp (u :: us))) = if e == 0 then fromUnits
else "10^" ++ show e ++ " " ++ fromUnits
where monom : (SomeElemUnit, Integer) -> String
monom (unit, 1) = name unit
monom (unit, i) = name unit ++ "^" ++ show i
fromUnits = monom u ++ concatMap ((" " ++) . monom) us
toSuperScript : Char -> Char
toSuperScript '1' = '¹'
toSuperScript '2' = '²'
toSuperScript '3' = '³'
toSuperScript '4' = '⁴'
toSuperScript '5' = '⁵'
toSuperScript '6' = '⁶'
toSuperScript '7' = '⁷'
toSuperScript '8' = '⁸'
toSuperScript '9' = '⁹'
toSuperScript '0' = '⁰'
toSuperScript '-' = '⁻'
toSuperScript x = x
toSuper : String -> String
toSuper = pack . map toSuperScript . unpack
||| Pretty-print a unit
showUnitUnicode : Unit q -> String
showUnitUnicode (MkUnit 0 (MkFreeAbGrp [])) = ""
showUnitUnicode (MkUnit e (MkFreeAbGrp [])) = "10" ++ toSuper (show e)
showUnitUnicode (MkUnit e (MkFreeAbGrp (u :: us))) = if e == 0 then fromUnits
else "10" ++ toSuper (show e) ++ " " ++ fromUnits
where monom : (SomeElemUnit, Integer) -> String
monom (unit, 1) = name unit
monom (unit, i) = name unit ++ toSuper (show i)
fromUnits = monom u ++ concatMap ((" " ++) . monom) us
infixr 10 ^^
||| Power unit
(^^) : Unit q -> (i : Integer) -> Unit (q ^ i)
(^^) (MkUnit e us) i = rewriteUnit eq (MkUnit (i*e) (us ^ i))
where eq = sym (lift_power_lem quantity us i)
||| Inverse unit (e.g. the inverse of `second` is `one <//> second` a.k.a. `hertz`)
unitInverse : {q : Quantity} -> Unit q -> Unit (freeAbGrpInverse q)
unitInverse {q} u = rewrite (freeabgrppower_correct q (-1))
in u ^^ (-1)
infixl 6 <**>,<//>
||| Product unit
(<**>) : Unit r -> Unit s -> Unit (r <*> s)
(<**>) (MkUnit e rs) (MkUnit f ss) = rewriteUnit eq (MkUnit (e+f) (rs <*> ss))
where eq = sym (lift_mult_lem quantity rs ss)
||| Quotient unit
(<//>) : Unit r -> Unit s -> Unit (r </> s)
(<//>) a b = a <**> unitInverse b
infixl 5 =| -- sensible?
||| Numbers tagged with a unit
data Measurement : {q : Quantity} -> Unit q -> Type -> Type where
(=|) : a -> (u : Unit q) -> Measurement u a
||| Extract the number
getValue : {q : Quantity} -> {u : Unit q} ->
Measurement {q} u a -> a
getValue (x =| _) = x
implementation Functor (Measurement {q} u) where
map f (x =| _) = f x =| u
implementation Eq a => Eq (Measurement {q} u a) where
(x =| _) == (y =| _) = x == y
implementation Ord a => Ord (Measurement {q} u a) where
compare (x =| _) (y =| _) = compare x y
implementation Show a => Show (Measurement {q} u a) where
show (x =| _) = show x ++ " =| " ++ show u
implementation Num a => Semigroup (Measurement {q} u a) where
(x =| _) <+> (y =| _) = (x + y) =| u
implementation Num a => Monoid (Measurement {q} u a) where
neutral = fromInteger 0 =| u
implementation (Neg a, Num a) => Group (Measurement {q} u a) where
inverse (x =| _) = (-x) =| u
implementation (Neg a, Num a) => AbelianGroup (Measurement {q} u a) where
||| Pretty-print a measurement (using only ASCII characters)
showMeasurement : Show a => {q : Quantity} -> {u : Unit q} ->
(Measurement u a) -> String
showMeasurement (x =| u) =
show x ++ (if base10Exponent u == 0 then " " else "*") ++ showUnit u
||| Pretty-print a measurement (using only ASCII characters)
showMeasurementUnicode : Show a => {q : Quantity} -> {u : Unit q} ->
(Measurement u a) -> String
showMeasurementUnicode (x =| u) =
show x ++ (if base10Exponent u == 0 then " " else "·") ++ showUnit u
infixl 5 :|
||| Type synonym for `Measurement`
(:|) : Unit q -> Type -> Type
(:|) = Measurement
||| Flatten nested measurements
joinUnits : {q : Quantity} -> {r : Quantity} -> {u : Unit q} -> {v : Unit r} ->
(u :| (v :| a)) -> (u <**> v) :| a
joinUnits ((x =| v) =| u) = x =| (u <**> v)
||| Double with a unit
F : Unit q -> Type
F u = Measurement u Double
infixl 9 |*|,|/|
||| Product measurement
(|*|) : Num a => {q : Quantity} -> {r : Quantity} -> {u : Unit q} -> {v : Unit r} ->
u :| a -> v :| a -> (u <**> v) :| a
(|*|) (x =| u) (y =| v) = (x*y) =| (u <**> v)
||| Quotient measurement (the second measurement mustn't be zero!)
(|/|) : {q : Quantity} -> {r : Quantity} -> {u : Unit q} -> {v : Unit r} ->
F u -> F v -> F (u <//> v)
(|/|) (x =| u) (y =| v) = (x/y) =| (u <//> v)
infixl 10 |^|
||| Power measurement
(|^|) : {q : Quantity} -> {u : Unit q} -> F u -> (i : Integer) -> F (u ^^ i)
(|^|) (x =| u) i = (((^) @{floatmultpower}) x i) =| u ^^ i
||| Square root measurement
sqrt : {q : Quantity} -> {u : Unit q} -> F (u ^^ 2) -> F u
sqrt {q} {u} (x =| _) = (sqrt x) =| u
||| Round measurement to the next integer below
floor : {q : Quantity} -> {u : Unit q} -> F u -> F u
floor = map floor
||| Round measurement to the next integer above
ceiling : {q : Quantity} -> {u : Unit q} -> F u -> F u
ceiling = map ceiling
||| Convert measurements to a given unit
convertTo : {from : Unit q} -> (to : Unit q) -> F from -> F to
convertTo to (x =| from) = (x * (rateFrom / rateTo)) =| to
where rateFrom = joinedConversionRate from
rateTo = joinedConversionRate to
||| Flipped version of `convertTo`
as : {from : Unit q} -> F from -> (to : Unit q) -> F to
as x u = convertTo u x
||| Convert with implicit target unit
convert : {from : Unit q} -> {to : Unit q} -> F from -> F to
convert {to=to} x = convertTo to x
||| Promote values to measurements of the trivial quantity
implicit
toUnitLess : a -> Measurement unitLess a
toUnitLess x = x =| unitLess
implementation Num a => Num (Measurement unitLess a) where
x + y = (getValue x + getValue y) =| unitLess
x * y = (getValue x * getValue y) =| unitLess
fromInteger i = fromInteger i =| unitLess
implementation Neg a => Neg (Measurement unitLess a) where
negate x = negate (getValue x) =| unitLess
x - y = (getValue x - getValue y) =| unitLess
implementation Abs a => Abs (Measurement unitLess a) where
abs x = abs (getValue x) =| unitLess
|
A while back I asked you to send me in any questions that you are looking for an answer to and I received well over 100 responses. So, I’ve hand-picked just four to answer today and I'll come back to the others in the future. On with the questions!
Mar 17 Time in the market is your money’s best friend.
I interviewed a bloke for an upcoming episode of my podcast and he had a lot to say about investing in individual shares and why he used to buy them, but now no longer does. He took the advice of a good friend when she told him, back in the 1980s, just buy into today’s equivalent of an index fund and let time in the market be your money’s best friend. Low and behold, she was correct.
When I realised that in order to become an investor I didn’t have to learn how to pick stocks it was like the clouds had parted and the sun had finally come out. And I have American John C. Bogle to thank for that. Last week he passed away at the age of 89 but he leaves a huge legacy behind and I know that in years to come many will still be learning from him, just like I did.
The Barefoot Investor by Australian Scott Pape is an excellent book and it has been instrumental in changing the financial direction of not just Australians but also of Kiwis. Many people have asked me to work out what the Kiwi equivalents are of the providers he recommends. I’m not saying this is a conclusive list, but I’ve given it my best shot.
This question was sent in by Declan a while back, but it is one that crops up in my inbox relatively consistently as well, so I thought back to what I did and what I would do if faced with the situation he is proposing again.
Recently I had an excellent question from a fellow Happy Saver who had been comparing SmartShares, SuperLife and Sharesies, not so much to find the lowest fees, but to work out which has the smoothest and easiest system and which one allowed him to purchase shares in the fastest time.
The very first thing I would do is set up an automatic transfer to syphon off a set amount of money each week into a sub account with your bank and give it a flash name like “IKMSMOI” (investments keep my sticky mitts off it).
Two weeks ago I was giving away a book The Simple Path To Wealth by JL Collins and in order to win it I asked you to send me an email telling me the most simple strategy you have devised so far to get yourself ahead financially. I was flooded with suggestions and I thought that they were just too good not to share.
|
{-# OPTIONS --without-K --exact-split --safe #-}
module Fragment.Equational.Structures where
import Fragment.Equational.Theory.Laws as L
open import Fragment.Equational.Theory
open import Fragment.Equational.Theory.Bundles
open import Fragment.Equational.Model
open import Fragment.Algebra.Algebra
open import Level using (Level)
open import Data.Fin using (Fin; #_; suc; zero)
open import Data.Vec using ([]; _∷_)
open import Data.Vec.Relation.Binary.Pointwise.Inductive using ([]; _∷_)
open import Relation.Binary using (Setoid; Rel)
open import Algebra.Core
private
variable
a ℓ : Level
module _ {A : Set a} {_≈_ : Rel A ℓ} where
open import Algebra.Definitions _≈_
open import Algebra.Structures _≈_
module _ {_•_ : Op₂ A} where
module _ (isMagma : IsMagma _•_) where
open IsMagma isMagma
magma→setoid : Setoid a ℓ
magma→setoid = record { Carrier = A
; _≈_ = _≈_
; isEquivalence = isEquivalence
}
private
magma→⟦⟧ : Interpretation Σ-magma magma→setoid
magma→⟦⟧ MagmaOp.• (x ∷ y ∷ []) = _•_ x y
magma→⟦⟧-cong : Congruent₂ _•_ → Congruence Σ-magma magma→setoid magma→⟦⟧
magma→⟦⟧-cong c MagmaOp.• (x₁≈x₂ ∷ y₁≈y₂ ∷ []) = c x₁≈x₂ y₁≈y₂
magma→isAlgebra : IsAlgebra Σ-magma magma→setoid
magma→isAlgebra = record { ⟦_⟧ = magma→⟦⟧
; ⟦⟧-cong = magma→⟦⟧-cong ∙-cong
}
magma→algebra : Algebra Σ-magma
magma→algebra = record { ∥_∥/≈ = magma→setoid
; ∥_∥/≈-isAlgebra = magma→isAlgebra
}
private
magma→models : Models Θ-magma magma→algebra
magma→models ()
magma→isModel : IsModel Θ-magma magma→setoid
magma→isModel = record { isAlgebra = magma→isAlgebra
; models = magma→models
}
magma→model : Model Θ-magma
magma→model = record { ∥_∥/≈ = magma→setoid
; isModel = magma→isModel
}
module _ (isSemigroup : IsSemigroup _•_) where
private
open IsSemigroup isSemigroup renaming (assoc to •-assoc)
semigroup→models : Models Θ-semigroup (magma→algebra isMagma)
semigroup→models assoc θ = •-assoc (θ (# 0)) (θ (# 1)) (θ (# 2))
semigroup→isModel : IsModel Θ-semigroup (magma→setoid isMagma)
semigroup→isModel = record { isAlgebra = magma→isAlgebra isMagma
; models = semigroup→models
}
semigroup→model : Model Θ-semigroup
semigroup→model = record { ∥_∥/≈ = magma→setoid isMagma
; isModel = semigroup→isModel
}
module _ (isCSemigroup : IsCommutativeSemigroup _•_) where
private
open IsCommutativeSemigroup isCSemigroup
renaming (comm to •-comm; assoc to •-assoc)
csemigroup→models : Models Θ-csemigroup (magma→algebra isMagma)
csemigroup→models comm θ = •-comm (θ (# 0)) (θ (# 1))
csemigroup→models assoc θ = •-assoc (θ (# 0)) (θ (# 1)) (θ (# 2))
csemigroup→isModel : IsModel Θ-csemigroup (magma→setoid isMagma)
csemigroup→isModel = record { isAlgebra = magma→isAlgebra isMagma
; models = csemigroup→models
}
csemigroup→model : Model Θ-csemigroup
csemigroup→model = record { ∥_∥/≈ = magma→setoid isMagma
; isModel = csemigroup→isModel
}
|
If $S$ is locally path-connected, then the path-component and connected-component of a point $x$ in $S$ are the same.
|
State Before: F : Type ?u.130016
α : Type u_1
β : Type ?u.130022
γ : Type ?u.130025
inst✝ : Group α
s t : Set α
a b : α
⊢ (fun x x_1 => x * x_1) a ⁻¹' 1 = {a⁻¹} State After: no goals Tactic: rw [← image_mul_left', image_one, mul_one]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00a_core.causalinference.ipynb (unless otherwise specified).
__all__ = ['CausalInferenceModel', 'metalearner_cls_dict', 'metalearner_reg_dict']
# Cell
import pandas as pd
pd.set_option('display.max_columns', 500)
import time
from ..meta.tlearner import BaseTClassifier, BaseTRegressor
from ..meta.slearner import BaseSClassifier, BaseSRegressor, LRSRegressor
from ..meta.xlearner import BaseXClassifier, BaseXRegressor
from ..meta.rlearner import BaseRClassifier, BaseRRegressor
from ..meta.propensity import ElasticNetPropensityModel
from ..meta.utils import NearestNeighborMatch, create_table_one
from scipy import stats
from lightgbm import LGBMClassifier, LGBMRegressor
import numpy as np
import warnings
from copy import deepcopy
from matplotlib import pyplot as plt
from ..preprocessing import DataframePreprocessor
from sklearn.linear_model import LogisticRegression, LinearRegression
# from xgboost import XGBRegressor
# from causalml.inference.meta import XGBTRegressor, MLPTRegressor
metalearner_cls_dict = {'t-learner' : BaseTClassifier,
'x-learner' : BaseXClassifier,
'r-learner' : BaseRClassifier,
's-learner': BaseSClassifier}
metalearner_reg_dict = {'t-learner' : BaseTRegressor,
'x-learner' : BaseXRegressor,
'r-learner' : BaseRRegressor,
's-learner' : BaseSRegressor}
class CausalInferenceModel:
"""Infers causality from the data contained in `df` using a metalearner.
Usage:
```python
>>> cm = CausalInferenceModel(df,
treatment_col='Is_Male?',
outcome_col='Post_Shared?', text_col='Post_Text',
ignore_cols=['id', 'email'])
cm.fit()
```
**Parameters:**
* **df** : pandas.DataFrame containing dataset
* **method** : metalearner model to use. One of {'t-learner', 's-learner', 'x-learner', 'r-learner'} (Default: 't-learner')
* **metalearner_type** : Alias of `method` for backwards compatibility. Overrides `method` if not None.
* **treatment_col** : treatment variable; column should contain binary values: 1 for treated, 0 for untreated.
* **outcome_col** : outcome variable; column should contain the categorical or numeric outcome values
* **text_col** : (optional) text column containing the strings (e.g., articles, reviews, emails).
* **ignore_cols** : columns to ignore in the analysis
* **include_cols** : columns to include as covariates (e.g., possible confounders)
* **treatment_effect_col** : name of column to hold causal effect estimations. Does not need to exist. Created by CausalNLP.
* **learner** : an instance of a custom learner. If None, Log/Lin Regression is used for S-Learner
and a default LightGBM model will be used for all other metalearner types.
# Example
learner = LGBMClassifier(num_leaves=1000)
* **effect_learner**: used for x-learner/r-learner and must be regression model
* **min_df** : min_df parameter used for text processing using sklearn
* **max_df** : max_df parameter used for text procesing using sklearn
* **ngram_range**: ngrams used for text vectorization. default: (1,1)
* **stop_words** : stop words used for text processing (from sklearn)
* **verbose** : If 1, print informational messages. If 0, suppress.
"""
def __init__(self,
df,
method='t-learner',
metalearner_type=None, # alias for method
treatment_col='treatment',
outcome_col='outcome',
text_col=None,
ignore_cols=[],
include_cols=[],
treatment_effect_col = 'treatment_effect',
learner = None,
effect_learner=None,
min_df=0.05,
max_df=0.5,
ngram_range=(1,1),
stop_words='english',
verbose=1):
"""
constructor
"""
# for backwards compatibility
if metalearner_type is not None:
if method != 't-learner':
warnings.warn(f'metalearner_type and method are mutually exclusive. '+\
f'Used {metalearner_type} as method.')
method = metalearner_type
metalearner_list = list(metalearner_cls_dict.keys())
if method not in metalearner_list:
raise ValueError('method is required and must be one of: %s' % (metalearner_list))
self.te = treatment_effect_col # created
self.method = method
self.v = verbose
self.df = df.copy()
self.ps = None # computed by _create_metalearner, if necessary
# these are auto-populated by preprocess method
self.x = None
self.y = None
self.treatment = None
# preprocess
self.pp = DataframePreprocessor(treatment_col = treatment_col,
outcome_col = outcome_col,
text_col=text_col,
include_cols=include_cols,
ignore_cols=ignore_cols,
verbose=self.v)
self.df, self.x, self.y, self.treatment = self.pp.preprocess(self.df,
training=True,
min_df=min_df,
max_df=max_df,
ngram_range=ngram_range,
stop_words=stop_words)
# setup model
self.model = self._create_metalearner(method=self.method,
supplied_learner=learner,
supplied_effect_learner=effect_learner)
def _create_metalearner(self, method='t-learner',
supplied_learner=None, supplied_effect_learner=None):
## use LRSRegressor for s-learner regression as default instead of tree-based model
#if method =='s-learner' and supplied_learner is None: return LRSRegressor()
# set learner
default_learner = None
if self.pp.is_classification:
default_learner = LogisticRegression(max_iter=10000) if method=='s-learner' else LGBMClassifier()
else:
default_learner = LinearRegression() if method=='s-learner' else LGBMRegressor()
default_effect_learner = LGBMRegressor()
learner = default_learner if supplied_learner is None else supplied_learner
effect_learner = default_effect_learner if supplied_effect_learner is None else\
supplied_effect_learner
# set metalearner
metalearner_class = metalearner_cls_dict[method] if self.pp.is_classification \
else metalearner_reg_dict[method]
if method in ['t-learner', 's-learner']:
model = metalearner_class(learner=learner,control_name=0)
elif method in ['x-learner']:
model = metalearner_class(
control_outcome_learner=deepcopy(learner),
treatment_outcome_learner=deepcopy(learner),
control_effect_learner=deepcopy(effect_learner),
treatment_effect_learner=deepcopy(effect_learner),
control_name=0)
else:
model = metalearner_class(outcome_learner=deepcopy(learner),
effect_learner=deepcopy(effect_learner),
control_name=0)
return model
def fit(self, p=None):
"""
Fits a causal inference model and estimates outcome
with and without treatment for each observation.
For X-Learner and R-Learner, propensity scores will be computed
using default propensity model unless `p` is not None.
Parameter `p` is not used for other methods.
"""
print("start fitting causal inference model")
start_time = time.time()
self.model.fit(self.x.values, self.treatment.values, self.y.values, p=p)
preds = self._predict(self.x)
self.df[self.te] = preds
print("time to fit causal inference model: ",-start_time + time.time()," sec")
return self
def predict(self, df, p=None):
"""
Estimates the treatment effect for each observation in `df`.
The DataFrame represented by `df` should be the same format
as the one supplied to `CausalInferenceModel.__init__`.
For X-Learner and R-Learner, propensity scores will be computed
using default propensity model unless `p` is not None.
Parameter `p` is not used for other methods.
"""
_, x, _, _ = self.pp.preprocess(df, training=False)
return self._predict(x, p=p)
def _predict(self, x, p=None):
"""
Estimates the treatment effect for each observation in `x`,
where `x` is an **un-preprocessed** DataFrame of Numpy array.
"""
if isinstance(x, pd.DataFrame):
return self.model.predict(x.values, p=p)
else:
return self.model.predict(x, p=p)
def estimate_ate(self, bool_mask=None):
"""
Estimates the treatment effect for each observation in
`self.df`.
"""
df = self.df if bool_mask is None else self.df[bool_mask]
a = df[self.te].values
mean = np.mean(a)
return {'ate' : mean}
def interpret(self, plot=False, method='feature_importance'):
"""
Returns feature importances of treatment effect model.
The method parameter must be one of {'feature_importance', 'shap_values'}
"""
tau = self.df[self.te]
feature_names = self.x.columns.values
if plot:
if method=='feature_importance':
fn = self.model.plot_importance
elif method == 'shap_values':
fn = self.model.plot_shap_values
else:
raise ValueError('Unknown method: %s' % method)
else:
if method=='feature_importance':
fn = self.model.get_importance
elif method == 'shap_values':
fn = self.model.get_shap_values
else:
raise ValueError('Unknown method: %s' % method)
return fn(X=self.x, tau=tau, features = feature_names)
def compute_propensity_scores(self, x_pred=None):
"""
Computes and returns propensity scores for `CausalInferenceModel.treatment`
in addition to the Propensity model.
"""
from ..meta import propensity
return propensity.compute_propensity_score(self.x, self.treatment, X_pred=x_pred)
def _balance(self, caliper = None, n_fold=3, overwrite=False):
"""
Balances dataset to minimize bias. Currently uses propensity score matching.
Experimental and untested.
"""
if caliper is None:
warnings.warn('Since caliper is None, caliper is being set to 0.001.')
caliper = 0.001
print('-------Start balancing procedure----------')
start_time = time.time()
#Join x, y and treatment vectors
df_match = self.x.merge(self.treatment,left_index=True, right_index=True)
df_match = df_match.merge(self.y, left_index=True, right_index=True)
#ps - propensity score
df_match['ps'] = self.compute_propensity_scores(n_fold=n_fold)
#Matching model object
psm = NearestNeighborMatch(replace=False,
ratio=1,
random_state=423,
caliper=caliper)
ps_cols = list(self.pp.feature_names_one_hot)
ps_cols.append('ps')
#Apply matching model
#If error, then sample is unbiased and we don't do anything
self.flg_bias = True
self.df_matched = psm.match(data=df_match, treatment_col=self.pp.treatment_col,score_cols=['ps'])
self.x_matched = self.df_matched[self.x.columns]
self.y_matched = self.df_matched[self.pp.outcome_col]
self.treatment_matched = self.df_matched[self.pp.treatment_col]
print('-------------------MATCHING RESULTS----------------')
print('-----BEFORE MATCHING-------')
print(create_table_one(data=df_match,
treatment_col=self.pp.treatment_col,
features=list(self.pp.feature_names_one_hot)))
print('-----AFTER MATCHING-------')
print(create_table_one(data=self.df_matched,
treatment_col=self.pp.treatment_col,
features=list(self.pp.feature_names_one_hot)))
if overwrite:
self.x = self.x_matched
self.y = self.y_matched
self.treatment = self.treatment_matched
self.df = self.df_matched
print('\nBalancing prunes the dataset. ' +\
'To revert, re-invoke CausalInferencModel ' +\
'with original dataset.')
else:
print('\nBalanced data is available as variables: x_matched, y_matched, treatment_matched, df_matched')
return
def _predict_shap(self, x):
return self._predict(x)
def explain(self, df, row_index=None, row_num=0, background_size=50, nsamples=500):
"""
Explain the treatment effect estimate of a single observation using SHAP.
**Parameters:**
- **df** (pd.DataFrame): a pd.DataFrame of test data is same format as original training data DataFrame
- **row_num** (int): raw row number in DataFrame to explain (default:0, the first row)
- **background_size** (int): size of background data (SHAP parameter)
- **nsamples** (int): number of samples (SHAP parameter)
"""
try:
import shap
except ImportError:
msg = 'The explain method requires shap library. Please install with: pip install shap. '+\
'Conda users should use this command instead: conda install -c conda-forge shap'
raise ImportError(msg)
f = self._predict_shap
# preprocess dataframe
_, df_display, _, _ = self.pp.preprocess(df.copy(), training=False)
# select row
df_display_row = df_display.iloc[[row_num]]
r_key = 'row_num'
r_val = row_num
# shap
explainer = shap.KernelExplainer(f, self.x.iloc[:background_size,:])
shap_values = explainer.shap_values(df_display_row, nsamples=nsamples, l1_reg='aic')
expected_value = explainer.expected_value
if not np.issubdtype(type(explainer.expected_value), np.floating):
expected_value = explainer.expected_value[0]
if type(shap_values) == list:
shap_values = shap_values[0]
plt.show(shap.force_plot(expected_value, shap_values, df_display_row, matplotlib=True))
def get_required_columns(self):
"""
Returns required columns that must exist in any DataFrame supplied to `CausalInferenceModel.predict`.
"""
treatment_col = self.pp.treatment_col
other_cols = self.pp.feature_names
result = [treatment_col] + other_cols
if self.pp.text_col: result.append(self.pp.text_col)
return result
def tune_and_use_default_learner(self, split_pct=0.2, random_state=314, scoring=None):
"""
Tunes the hyperparameters of a default LightGBM model, replaces `CausalInferenceModel.learner`,
and returns best parameters.
Should be invoked **prior** to running `CausalInferencemodel.fit`.
If `scoring` is None, then 'roc_auc' is used for classification and 'negative_mean_squared_error'
is used for regresssion.
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(self.x.values, self.y.values,
test_size=split_pct,
random_state=random_state)
fit_params={"early_stopping_rounds":30,
"eval_metric" : 'auc' if self.pp.is_classification else 'rmse',
"eval_set" : [(X_test,y_test)],
'eval_names': ['valid'],
'verbose': 100,
'categorical_feature': 'auto'}
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
param_test ={'num_leaves': sp_randint(6, 750),
'min_child_samples': sp_randint(20, 500),
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': sp_uniform(loc=0.2, scale=0.8),
'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}
n_HP_points_to_test = 100
if self.pp.is_classification:
learner_type = LGBMClassifier
scoring = 'roc_auc' if scoring is None else scoring
else:
learner_type = LGBMRegressor
scoring = 'neg_mean_squared_error' if scoring is None else scoring
clf = learner_type(max_depth=-1, random_state=random_state, silent=True,
metric='None', n_jobs=4, n_estimators=5000)
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
gs = RandomizedSearchCV(
estimator=clf, param_distributions=param_test,
n_iter=n_HP_points_to_test,
scoring=scoring,
cv=3,
refit=True,
random_state=random_state,
verbose=True)
gs.fit(X_train, y_train, **fit_params)
print('Best score reached: {} with params: {} '.format(gs.best_score_, gs.best_params_))
best_params = gs.best_params_
self.learner = learner_type(**best_params)
return best_params
def evaluate_robustness(self, sample_size=0.8):
"""
Evaluates robustness on four sensitivity measures (see CausalML package for details on these methods):
- **Placebo Treatment**: ATE should become zero.
- **Random Cause**: ATE should not change.
- **Random Replacement**: ATE should not change.
- **Subset Data**: ATE should not change.
"""
from ..meta.sensitivity import Sensitivity
data_df = self.x.copy()
t_col = 'CausalNLP_t'
y_col = 'CausalNLP_y'
data_df[t_col] = self.treatment
data_df[y_col] = self.y
sens_x = Sensitivity(df=data_df,
inference_features=self.x.columns.values,
p_col=None,
treatment_col=t_col, outcome_col=y_col,
learner=self.model)
df = sens_x.sensitivity_analysis(methods=['Placebo Treatment',
'Random Cause',
'Subset Data',
'Random Replace',
],sample_size=sample_size)
df['Distance from Desired (should be near 0)'] = np.where(df['Method']=='Placebo Treatment',
df['New ATE']-0.0,
df['New ATE']-df['ATE'])
#df['Method'] = np.where(df['Method']=='Random Cause', 'Random Add', df['Method'])
return df
|
module LispVal
import Prelude.Bool as B
import Prelude.List as L
import Effects
import Effect.Exception
import Effect.State
mutual
public export
data LispError = NumArgs Integer (L.List LispVal)
| TypeMismatch String LispVal
| BadSpecialForm String LispVal
| NotFunction String String
| UnboundVar String LispVal
| Default String
| LispErr String
public export
EnvCtx : Type
EnvCtx = L.List (String, LispVal)
public export
Eval : Type -> Type
Eval a = Eff a [STATE EnvCtx, EXCEPTION LispError]
public export
record IFunc where
constructor MkFun
fn : L.List LispVal -> Eval LispVal
public export
data LispVal = Atom String
| List (L.List LispVal)
| DottedList (L.List LispVal) LispVal
| Number Integer
| Str String
| Fun IFunc
| Lambda IFunc EnvCtx
| Nil
| Bool B.Bool
mutual
unwordsList : List LispVal -> String
unwordsList = unwords . map showVal
p : Maybe String -> String
p varargs = case varargs of
Nothing => ""
Just arg => " . " ++ arg
export
showVal : LispVal -> String
showVal (Atom x) = x
showVal (List xs) = "(" ++ unwordsList xs ++ ")"
showVal (DottedList xs x) = "(" ++ unwordsList xs ++ " . " ++ showVal x ++ ")"
showVal (Number x) = show x
showVal (Str x) = "\"" ++ x ++ "\""
showVal (Bool True) = "#t"
showVal (Bool False) = "#f"
showVal (Fun _) = "internal function"
showVal (Lambda _ _) = "lambda function"
export
Show LispVal where
show = showVal
export
Show LispError where
show (UnboundVar m v) = m ++ ": " ++ show v
show (BadSpecialForm s v) = s ++ " : " ++ show v
show (NotFunction s f) = s ++ " : " ++ show f
show (NumArgs e f) = "Expected " ++ show e ++ " args; found " ++ (show $ length f)
show (TypeMismatch e f) = "Invalid type: expected " ++ e ++ ", found " ++ show f
show (Default s) = s
show (LispErr s) = s
show _ = "Error!!!"
|
/*
* OrientationBox.hpp
*
* Created on: 01.11.2018
* Author: tomlucas
*/
#ifndef ESTIMATORS_STATEBOXES_ORIENTATIONSPEEDBOX_HPP_
#define ESTIMATORS_STATEBOXES_ORIENTATIONSPEEDBOX_HPP_
#include "StateBox.hpp"
#include <eigen3/Eigen/Core>
#include <Eigen_Utils.hpp>
#include "OrientationBox.hpp"
namespace zavi
::estimator::state_boxes {
/**
* Box to contain orientation and angular velocity
*/
class OrientationSpeedBox : public StateBox<12,6,3> {
template<typename T>
using ROT_MATRIX= Eigen::Matrix<T,3,3>;
public:
INNER_T<double> std;
OrientationSpeedBox(std::shared_ptr<plugin::SensorPlugin> sensor):StateBox<12,6,3>(sensor) {
}
virtual ~OrientationSpeedBox() {};
template<typename T>
inline static OUTER_T<T> boxPlus(const OUTER_T<T> &state,const INNER_T<T> &delta) {
assert_inputs(state,delta);
OUTER_T<T> result;
result <<OrientationBox::boxPlus<T>(state.template block<9,1>(0,0),delta.template block<3,1>(0,0)),(state.template block<3,1>(9,0)+delta.template block<3,1>(3,0));
return result;
}
template<typename T>
inline static INNER_T<T> boxPlusInnerSpace(const INNER_T<T> &delta1,const INNER_T<T> &delta2) {
assert_inputs(delta1,delta2);
INNER_T<T> result=INNER_T<T>::Zero();
result.template block<3,1>(0,0)=OrientationBox::boxPlusInnerSpace<T>(delta1.template block<3,1>(0,0),delta2.template block<3,1>(0,0));
result.template block<3,1>(3,0)=delta1.template block<3,1>(3,0)+delta2.template block<3,1>(3,0);
return result;
}
template<typename T>
inline static INNER_T<T> boxMinus(const OUTER_T<T> &a,const OUTER_T<T> &b) {
assert_inputs(a,b);
INNER_T<T>result;
result <<OrientationBox::boxMinus<T>(a.template block<9,1>(0,0),b.template block<9,1>(0,0)),b.template block<3,1>(9,0)-a.template block<3,1>(9,0);
return result;
}
template<typename T>
inline static OUTER_T<T> stateTransition(const OUTER_T<T> & state,double time_diff ) {
assert_inputs(state,time_diff);
OUTER_T<T> result;
result << OrientationBox::boxPlus<T>(state.template block<9,1>(0,0),state.template block<3,1>(9,0)*time_diff),state.template block<3,1>(9,0);
return result;
}
inline INNER_T<double> getSTD(double time_diff) {
return std*time_diff;
}
virtual OUTER_T<double> normalise(const OUTER_T<double> &state) const {
assert_inputs(state);
OUTER_T<double> result;
result <<OrientationBox::static_normalise(state.block<9,1>(0,0)),state.block<3,1>(9,0);
return result;
}
};
}
#endif /* ESTIMATORS_STATEBOXES_ORIENTATIONSPEEDBOX_HPP_ */
|
Formal statement is: lemma limitin_canonical_iff_gen [simp]: assumes "open S" shows "limitin (top_of_set S) f l F \<longleftrightarrow> (f \<longlongrightarrow> l) F \<and> l \<in> S" Informal statement is: If $S$ is an open set, then the limit of $f$ in $S$ is the same as the limit of $f$ in the topology of $S$ and $l \in S$.
|
In Desire for Three, Jessica Tyler is looking for a fresh start and agrees to visit her sister in Desire, Oklahoma, where Dom/sub and ménage relationships are the norm. Clay and Rio Erickson, gorgeous overprotective brothers, claim her as her own. When Jesse is threatened by her ex, she must stay strong, even as her two formidable lovers are intent on possessing her.
In Blade’s Desire, Kelly Jones arrives in Desire looking for peace after being abused. She soon falls under Blade Royal’s spell, and she makes a bargain that puts her trust and body in his hands for six weeks. She hides her love from him, but Blade knows her hidden passions are a perfect match for his own.
In Creation of Desire, Boone and Chase Jackson want Rachel Robinson, but she’s a forever kind of woman and they’re wary of commitment. When other men start to move on her, they give her a night she’ll never forget. Things change when Rachel discovers she’s pregnant. The men want to make her their own, but she believes they’re only doing it for the baby’s sake. Can she and her lovers still have a future?
When lives and love are on the line, will passion triumph in Desire?
|
\documentclass{article}
\usepackage[utf8]{inputenc} %File encoding
\usepackage{ifthen} %% needed for the redefinition of \@cite below.
\newcommand{\citename}[1]{#1\protect\nocite{#1}}
\renewenvironment{glossary}
{\begin{list}{}{\setlength\labelsep{\linewidth}%
\setlength\labelwidth{0pt}%
\setlength\itemindent{-\leftmargin}%
\let\makelabel\descriptionlabel}}
{\end{list}}
\makeatletter
\renewcommand\@biblabel[1]{#1}
\renewenvironment{thebibliography}[1]
{\section*{\refname}%
\list{}{\setlength\labelwidth{1.5cm}%
\leftmargin\labelwidth \advance\leftmargin\labelsep
\let\makelabel\descriptionlabel}}%
{\endlist}
\renewcommand{\@cite}[2]{{#1\ifthenelse{\boolean{@tempswa}}{,\nolinebreak[3] #2}{}}}
\makeatother
\begin{document}
Our network uses \citename{AD}. By using \citename{AD} with \citename{JS} bases clients that have been installed using a \citename{RF} from \citename{CD}, we can expect a high level of standardization.
If you calculate with \cite{sym:pi} you always get an irrational result, because \cite{sym:pi} itself is irrational. As a matter of fact, there are \cite{sym:phi} and \cite{sym:lambda}, too.
\bibliographystyle{test8}
\bibliography{test8}
\end{document}
|
import numpy as np
import json
import copy
import random
"""
This is Step 2 in the seq2seq pipeline.
Input: dtype.
Basically use the (context, scenario, utt) to create a series of templates (x) and utt(y). For test and valid data, we will only create the template from dgpt-m greedy approach.
"""
dtype = "train"
#template_types = ["copy", "greedy", "sample", "noisy"]
template_types = ["greedy", "sample", "noisy"]
in_f = "/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/s2s_cxt_scen_utt.json"
in_f_greedy = "/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/dgpt_greedy.json"
with open(in_f_greedy, "r") as f:
greedy_data = json.load(f)
print("loaded greedy data: ", in_f_greedy, len(greedy_data))
in_f_samples = ["/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/dgpt_sample1.json", "/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/dgpt_sample2.json", "/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/dgpt_sample3.json"]
sample_datas = []
for in_f_sample in in_f_samples:
with open(in_f_sample, "r") as f:
sample_datas.append(json.load(f))
print("loaded sample data ", in_f_samples)
out_src = "/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/" + dtype + "_" + "_".join(sorted(template_types)) + ".src"
out_tgt = "/project/glucas_540/kchawla/csci699/storage/data/seq2seq/casino/" + dtype + "_utterance_" + "_".join(sorted(template_types)) + ".tgt"
def get_input(msg):
msg = (" " + '<|endoftext|>' + " ").join(msg)
msg = msg + " " + '<|endoftext|>' + " "
return msg
def get_copy_templates(item):
"""
autoencoding setup.
"""
temps = []
temps.append(item["utterance"])
return temps
def get_greedy_templates(item):
templates = []
context = get_input(item['context'])
templates.append(greedy_data[context])
return templates
def get_sample_templates(item):
templates = []
context = get_input(item['context'])
for ix in range(len(sample_datas)):
templates.append(sample_datas[ix][context])
return templates
def get_noisy_variants(temp):
"""
basically switch item types and item numbers.. only 1 noisy variant per template: sounds okay for now: we can increase the noise if there is good evidence that it helps. data is good enough to help, if it is indeed beneficial.
"""
itemtypes = ["food", "water", "firewood"]
counts = ["1", "2", "3"]
words_noisy = []
words = temp.split()
for word in words:
word_noisy = word
if("food" in word):
word_noisy = word.replace("food", random.choice(itemtypes))
elif("water" in word):
word_noisy = word.replace("water", random.choice(itemtypes))
elif("firewood" in word):
word_noisy = word.replace("firewood", random.choice(itemtypes))
if("1" in word):
word_noisy = word.replace("1", random.choice(counts))
elif("2" in word):
word_noisy = word.replace("2", random.choice(counts))
elif("3" in word):
word_noisy = word.replace("3", random.choice(counts))
words_noisy.append(word_noisy)
temp_noisy = " ".join(words_noisy)
return [temp_noisy]
def get_noisy_templates(templates):
all_temps = []
for temp in templates:
noisy_variants = get_noisy_variants(temp)
all_temps += noisy_variants
return all_temps
def get_scenario_string(scenario):
"""
rank corresponding to Food, Water, Firewood.
permutation of 1 2 3
"""
scenario_str = ""
scenario_str += "High " + scenario["value2issue"]["High"] + " " + scenario["value2reason"]["High"] + " <|endoftext|> "
scenario_str += "Medium " + scenario["value2issue"]["Medium"] + " " + scenario["value2reason"]["Medium"] + " <|endoftext|> "
scenario_str += "Low " + scenario["value2issue"]["Low"] + " " + scenario["value2reason"]["Low"] + " <|endoftext|>"
return scenario_str
def merge_scen_templates(scenario_str, templates):
scen_temps = []
for temp in templates:
merged = scenario_str + " " + temp
scen_temps.append(merged)
return scen_temps
def get_pairs(item):
"""
context, scenario, utterance
"""
templates = []
if("copy" in template_types):
templates += get_copy_templates(item)
if("greedy" in template_types):
templates += get_greedy_templates(item)
if("sample" in template_types):
templates += get_sample_templates(item)
if("noisy" in template_types):
templates += get_noisy_templates(templates)
templates = sorted(list(set(templates)))
scenario_str = get_scenario_string(item['scenario'])
utterance = item['utterance']
scen_temps = merge_scen_templates(scenario_str, templates)
pairs = []
for merged in scen_temps:
pair = [merged, utterance]
pairs.append(pair)
return pairs
with open(in_f) as f:
all_data = json.load(f)
all_data = all_data[dtype]
print("input: ", dtype, len(all_data))
#list of lists, each containing [src, tgt] pairs
out_data = []
for item in all_data:
pairs = get_pairs(item)
out_data += pairs
print("out_data: ", len(out_data))
print("sample: ", out_data[0])
with open(out_src, "w") as fsrc:
with open(out_tgt, "w") as ftgt:
for pair in out_data:
fsrc.write(pair[0] + "\n")
ftgt.write(pair[1] + "\n")
print("Output completed to: ", out_src, out_tgt)
|
@testsuite "constructors" (AT, eltypes)->begin
@testset "direct" begin
for T in eltypes
B = AT{T}(undef, 10)
@test B isa AT{T,1}
@test size(B) == (10,)
@test eltype(B) == T
B = AT{T}(undef, 10, 10)
@test B isa AT{T,2}
@test size(B) == (10, 10)
@test eltype(B) == T
B = AT{T}(undef, (10, 10))
@test B isa AT{T,2}
@test size(B) == (10, 10)
@test eltype(B) == T
end
# compare against Array
for typs in [(), (Int,), (Int,1), (Int,2), (Float32,), (Float32,1), (Float32,2)],
args in [(), (1,), (1,2), ((1,),), ((1,2),),
(undef,), (undef, 1,), (undef, 1,2), (undef, (1,),), (undef, (1,2),),
(Int,), (Int, 1,), (Int, 1,2), (Int, (1,),), (Int, (1,2),),
([1,2],), ([1 2],)]
cpu = try
Array{typs...}(args...)
catch ex
isa(ex, MethodError) || rethrow()
nothing
end
gpu = try
AT{typs...}(args...)
catch ex
isa(ex, MethodError) || rethrow()
cpu == nothing || rethrow()
nothing
end
if cpu == nothing
@test gpu == nothing
else
@test typeof(cpu) == typeof(convert(Array, gpu))
end
end
end
@testset "similar" begin
for T in eltypes
B = AT{T}(undef, 10)
B = similar(B, Int32, 11, 15)
@test B isa AT{Int32,2}
@test size(B) == (11, 15)
@test eltype(B) == Int32
B = similar(B, T)
@test B isa AT{T,2}
@test size(B) == (11, 15)
@test eltype(B) == T
B = similar(B, (5,))
@test B isa AT{T,1}
@test size(B) == (5,)
@test eltype(B) == T
B = similar(B, 7)
@test B isa AT{T,1}
@test size(B) == (7,)
@test eltype(B) == T
B = similar(AT{Int32}, (11, 15))
@test B isa AT{Int32,2}
@test size(B) == (11, 15)
@test eltype(B) == Int32
B = similar(AT{T}, (5,))
@test B isa AT{T,1}
@test size(B) == (5,)
@test eltype(B) == T
B = similar(AT{T}, 7)
@test B isa AT{T,1}
@test size(B) == (7,)
@test eltype(B) == T
B = similar(Broadcast.Broadcasted(*, (B, B)), T)
@test B isa AT{T,1}
@test size(B) == (7,)
@test eltype(B) == T
B = similar(Broadcast.Broadcasted(*, (B, B)), Int32, (11, 15))
@test B isa AT{Int32,2}
@test size(B) == (11, 15)
@test eltype(B) == Int32
end
end
@testset "convenience" begin
for T in eltypes
A = AT(rand(T, 3))
b = rand(T)
fill!(A, b)
@test A isa AT{T,1}
@test Array(A) == fill(b, 3)
A = zero(AT(rand(T, 2)))
@test A isa AT{T,1}
@test Array(A) == zero(rand(T, 2))
A = zero(AT(rand(T, 2, 2)))
@test A isa AT{T,2}
@test Array(A) == zero(rand(T, 2, 2))
A = zero(AT(rand(T, 2, 2, 2)))
@test A isa AT{T,3}
@test Array(A) == zero(rand(T, 2, 2, 2))
A = one(AT(rand(T, 2, 2)))
@test A isa AT{T,2}
@test Array(A) == one(rand(T, 2, 2))
A = oneunit(AT(rand(T, 2, 2)))
@test A isa AT{T,2}
@test Array(A) == oneunit(rand(T, 2, 2))
end
end
@testset "conversions" begin
for T in eltypes
Bc = round.(rand(10, 10) .* 10.0)
B = AT{T}(Bc)
@test size(B) == (10, 10)
@test eltype(B) == T
@test Array(B) ≈ Bc
Bc = rand(T, 10)
B = AT(Bc)
@test size(B) == (10,)
@test eltype(B) == T
@test Array(B) ≈ Bc
Bc = rand(T, 10, 10)
B = AT{T, 2}(Bc)
@test size(B) == (10, 10)
@test eltype(B) == T
@test Array(B) ≈ Bc
intervals = Dict(
Float16 => -2^11:2^11,
Float32 => -2^24:2^24,
Float64 => -2^53:2^53,
)
Bc = rand(Int8, 3, 3, 3)
B = convert(AT{T, 3}, Bc)
@test size(B) == (3, 3, 3)
@test eltype(B) == T
@test Array(B) ≈ Bc
end
end
@testset "uniformscaling" begin
for T in eltypes
x = Matrix{T}(I, 4, 2)
x1 = AT{T, 2}(I, 4, 2)
x2 = AT{T}(I, (4, 2))
x3 = AT{T, 2}(I, (4, 2))
@test Array(x1) ≈ x
@test Array(x2) ≈ x
@test Array(x3) ≈ x
x = Matrix(T(3) * I, 2, 4)
x1 = AT(T(3) * I, 2, 4)
@test eltype(x1) == T
@test Array(x1) ≈ x
end
end
end
|
import verification.semantics.stream_props
noncomputable theory
open_locale classical
-- skip s i : if current index < i, must advance; may advance to first ready index ≥ i.
-- succ s i : if current index ≤ i, must advance; may advance to first ready index > i.
/-
if current index < (i, b), must advance; may advance up to first ready index ≥ (i, b)
-/
/-- Returns the set of `q` that `s` could skip to if the current state is `x`
and it is supposed to skip past `(i, b)` -/
def skip_set {ι α : Type*} [linear_order ι] (s : Stream ι α) (x : s.σ) (i : ι) (b : bool) : set s.σ :=
{q | ∃ (n : ℕ), q = (s.next'^[n] x) ∧ (0 < n ↔ s.to_order x ≤ (i, b)) ∧
∀ m, 0 < m → m < n → s.valid (s.next'^[m] x) → s.ready (s.next'^[m] x) → s.index' (s.next'^[m] x) < i}
structure SkipStream (ι α : Type*) [linear_order ι] extends Stream ι α :=
(skip : Π x, valid x → ι → bool → σ)
(hskip : ∀ x hx i b, skip x hx i b ∈ skip_set _ x i b)
variables {ι : Type} {α : Type*} [linear_order ι]
@[simp] noncomputable def SkipStream.eval_skip [add_zero_class α] (s : SkipStream ι α) : ℕ → s.σ → (ι →₀ α)
| 0 q := 0
| (n + 1) q := if h₁ : s.valid q then (SkipStream.eval_skip n (s.skip q h₁ (s.index q h₁) (s.ready q))) + (s.eval₀ _ h₁) else 0
/-- The number of steps a stream would skip at index `(i, b)` -/
noncomputable def SkipStream.nskip (s : SkipStream ι α) (q : s.σ) (h : s.valid q) (i : ι) (b : bool) : ℕ :=
(s.hskip q h i b).some
lemma SkipStream.skip_eq (s : SkipStream ι α) (q : s.σ) (h : s.valid q) (i : ι) (b : bool) :
s.skip q h i b = (s.next'^[s.nskip q h i b] q) := (s.hskip q h i b).some_spec.1
lemma SkipStream.advance_iff (s : SkipStream ι α) (q : s.σ) (h : s.valid q) (i : ι) (b : bool) :
0 < s.nskip q h i b ↔ s.to_order q ≤ (i, b) := (s.hskip q h i b).some_spec.2.1
/-- All skipped values are less than `i` -/
lemma SkipStream.lt_of_skipped (s : SkipStream ι α) (q : s.σ) (h : s.valid q) (i : ι) (b : bool)
(m : ℕ) (h₁m : 0 < m) (h₂m : m < s.nskip q h i b) (h₃ : s.valid (s.next'^[m] q)) (h₄ : s.ready (s.next'^[m] q)) :
s.index' (s.next'^[m] q) < i := (s.hskip q h i b).some_spec.2.2 m h₁m h₂m h₃ h₄
lemma SkipStream.not_ready_of_skipped_of_monotonic (s : SkipStream ι α) (hs : s.monotonic) (q : s.σ) (h : s.valid q) (b : bool)
(m : ℕ) (h₁m : 0 < m) (h₂m : m < s.nskip q h (s.index q h) b) (h₃ : s.valid (s.next'^[m] q)) :
¬s.ready (s.next'^[m] q) := λ h₄,
begin
refine (s.lt_of_skipped q h _ b m h₁m h₂m h₃ h₄).not_le _,
simpa [Stream.index'_val h] using hs.le_index_iterate q m,
end
/-- If all skipped states are non-ready, then the eval's are equal -/
theorem Stream.eval₀_skip_eq [add_comm_monoid α] (s : Stream ι α) (q : s.σ)
(n : ℕ) (hn : ∀ m < n, s.valid (s.next'^[m] q) → ¬s.ready (s.next'^[m] q)) :
s.eval_steps n q = 0 :=
begin
induction n with n ih generalizing q, { simp, },
simp only [Stream.eval_steps, dite_eq_right_iff, forall_true_left],
intro h,
simp only [Stream.eval₀, (show ¬s.ready q, from hn 0 (nat.zero_lt_succ _) h), dif_neg, not_false_iff, add_zero],
apply ih,
intros m hm, specialize hn (m + 1) (nat.succ_lt_succ hm),
simpa [Stream.next'_val h] using hn,
end
theorem Stream.eval₀_skip_eq' [add_comm_monoid α] (s : Stream ι α) (q : s.σ) (h : s.valid q)
(n : ℕ) (hn : n ≠ 0) (hn : ∀ m, 0 < m → m < n → s.valid (s.next'^[m] q) → ¬s.ready (s.next'^[m] q)) :
s.eval_steps n q = s.eval₀ q h :=
begin
cases n, { contradiction, },
have := s.eval₀_skip_eq (s.next q h) n _,
{ simp [Stream.eval_steps, h, this], },
intros m hm,
simpa [Stream.next'_val h] using hn (m + 1) m.zero_lt_succ (nat.succ_lt_succ hm),
end
theorem SkipStream.eval_skip_eq [add_comm_monoid α] (s : SkipStream ι α) (hs : s.monotonic) (q : s.σ) (n : ℕ) :
∃ (m : ℕ), n ≤ m ∧ s.eval_skip n q = s.eval_steps m q :=
begin
induction n with n ih generalizing q, { refine ⟨0, rfl.le, _⟩, simp, },
by_cases h : s.valid q, swap,
{ refine ⟨n + 1, rfl.le, _⟩, simp [h], },
rcases ih (s.skip q h (s.index q h) (s.ready q)) with ⟨m, hm₁, hm₂⟩,
have : 0 < s.nskip q h (s.index q h) (s.ready q),
{ rw SkipStream.advance_iff, refine le_of_eq _, ext : 1; simp [Stream.index'_val h], },
refine ⟨s.nskip q h (s.index q h) (s.ready q) + m, _, _⟩,
{ rw nat.succ_le_iff, refine lt_of_le_of_lt hm₁ _, simpa, },
rw [s.eval_steps_add, s.eval₀_skip_eq' q h, ← SkipStream.skip_eq],
{ simp [h, add_comm, hm₂], }, { rwa ← zero_lt_iff, },
exact s.not_ready_of_skipped_of_monotonic hs q h _,
end
|
lemma homotopy_eqv_sing: fixes S :: "'a::real_normed_vector set" and a :: "'b::real_normed_vector" shows "S homotopy_eqv {a} \<longleftrightarrow> S \<noteq> {} \<and> contractible S"
|
\section{AVNA1 Vector Voltmeter (VVM)}
\label{sect:VVM}
The AVNA allows measurement of the gain of a device, using an internal signal generator. This is presented in terms of dB change in amplitude and shift in phase angle between the input and output. This most useful for characterizing a device, such as a filter. To do this, we have indeed built a voltmeter to measure the device output. Sometimes we would just like to see the voltage displayed directly. This lets us probe circuits for troubleshooting and design work. That is what the VVM does for us.
Phase information is important for some network diagnosis. The VVM is intended to be used with the internal signal generator when phase is to be read.
\subsection{Description}
\label{subsect:VVMDescr}
The Vector Voltmeter measures the input rms voltage at any frequency up to about 40 kHz. The frequency of the VVM is determined by the frequency of Signal Generator 1 (SG\#1). Additionally, the phase difference between SG\#1 and the input signal is displayed. By using the SG\#1 output to drive the test circuit, this phase is constant and can be a useful diagnostic tool. If an external generator is used, the measured phase difference will change at a rate determined by the frequency difference between the SG\#1 set frequency and the external generator. This, too, can be a useful measurement.
The bandwidth of the measurement is only a few Hz making the measurement very sensitive, \textit{i.e.}, low noise. The VVM operates down into the microVolt region.
\subsection{Instructions}
\label{subsect:VVMInstr}
\textbf{Using the VVM - }Operation starts by setting the frequency of SG\#1 to that appropriate for the measurement. The generator does not need to be enabled ("On") if the signal source for the measurement is generated externally. If the internal SG\#1 is used as the signal source, it should be enabled and the amplitude set. Only the frequency setting will affect the VVM measurement. The steps for controlling the signal generator are listed in the Section 6, below.
If it is possible to do so, the internal SG\#1 should be used as the signal source, and this is a requirement if phase needs to be constant and representative of the circuit.
Again starting from the main home screen Figure \ref{AVNA_000-label}, we tap the "\textsf{Vector VMeter}" button on the bottom row. This brings up the single screen used by the VVM. It will be displaying amplitude in \textsf{Volts RMS} and phase in degrees, with both shown in bold numbers. With no input, these parameters should be low in value, with the exact voltage depending on frequency. The example of Figure \ref{AVNA_015-label} is running at 996 Hz,with the input shorted to ground, and shows 9 microVolts RMS.
%
\begin{figure}[H]
\begin{center}
\includegraphics[scale=0.75]{./images/AVNA_015.pdf}
\caption{Vector Voltmeter, ready to measure but with no input.}
\label{AVNA_015-label}
\end{center}
\end{figure}
%
On the second from top text row is a notation in red, "\textsf{SigGen \#1 996 Hz}" that shows us the SG\#1 frequency without going back to the Signal Generator screens. If SG\#1 is on (enabled), the frequency is shown in white; otherwise it is shown in pink/red.
We can now measure a voltage at the chosen frequency by connecting an input to the T input terminals. The voltage amplitude shown is valid regardless of whether the 50-Ohm switch is on or off. The load of the 50-Ohms may cut the amplitude, but the voltage across the resistor is accurate. Figure \ref{AVNA_016-label} shows the resulting screen.
%
\begin{figure}[H]
\begin{center}
\includegraphics[scale=0.75]{./images/AVNA_016.pdf}
\caption{Vector Voltmeter with Sig Gen \#1 on and connected across to the VVM input.}
\label{AVNA_016-label}
\end{center}
\end{figure}
%
We can see that the input voltage is 0.100 Volts RMS.. From the second line, we see that the frequency of operation is 996 Hz, but it shows the level as 0.283 Vp-p. These, of course, represent the same level as the peak voltage is $\sqrt{2} = 1.414$ times the RMS voltage and peak-to-peak is twice the peak value. This mixed units for the voltage is a complicated issue, probably makes sense but unfortunately causes more mental exercise than one might want, at times.
Getting back to the VVM, you can see how close you are to overload by the little "\textsf{ADC \%p-p=xx.x}" on the screen. This is the per cent of full ADC range for the input. If the level gets to 100\%, the voltage display turns red. At that point the measurements are invalid.
Below the voltage and phase display is a "\textsf{Phase Offset}" input. This can be set to values from -180 to +180 degrees. This is a convenience that allows relative phase measurements to be made directly on the display. This does not restrict the range of phase measurements, and they will always display between -180 and +180 degrees.
\textbf{Input Impedance - }One more convenience is the two buttons that select "\textsf{Volts High-Z}" or "\textsf{dBm-50 Ohms}". For the dBm readout to be correct, it is necessary to 50-Ohm terminate the input; this is easily done with the "50-Ohm" slide switch. The resulting display in Figure \ref{AVNA_018-label} is shown below. Note that along with switching the amplitude units, a phase offset that has been used to bring the phase display to 0.00 (almost).
When the 50-Ohm switch is not closed, the input impedance is one Megohm in parallel with about 25 pF. This is the input impedance of many oscilloscopes, meaning that various x10 and x100 probes can be used with the VVM (as well as with the Spectrum Analyzer) to increase measurement voltage range and to add isolation from a circuit being measured. \textbf{A Warning - }An oscilloscope comes with input protection circuits that we do not have in the AVNA1. That means it is easy to damage the AVNA1. Keep the input at the box to a Volt or so and everything will be OK. Big voltages can cause damage.
%
\begin{figure}[H]
\begin{center}
\includegraphics[scale=0.75]{./images/AVNA_018.pdf}
\caption{Vector Voltmeter with display set to power into 50-Ohms, expressed in dBm.}
\label{AVNA_018-label}
\end{center}
\end{figure}
%
One last note, if you are using an external signal source for VVM, and the measurements fails showing only microvolts, it is most likely that SG \#1 is not within a few Hz of the frequency of the input signal. If the external source is not sufficiently stable, this may not be an appropriate method of measurement. A broadband RMS voltmeter could be implemented, but that is for the future.
\subsection{Discussion}
\label{subsect:VVMDiscus}
The operation of the VVM is worthy of a few words. As a test instrument, it is closely related to the transmission measurements part of the Vector Network Analyzer AVNA). Much software is shared between the two instruments. The AVNA determines the relative gain or loss, amplitude and phase, through the transmission path. The VVM is calibrated so that the magnitude of the incoming signal is measured, along with the phase difference between Signal Generator \#1 and the incoming signal.
\subsection{DSP Circuit} Two mixer (multiplier) outputs are the in-phase and quadrature signal levels. The square root of the sum of the squares of these two provides the magnitude of the incoming signal. The Signal Generator \#1 (SG \#1) sets the frequency of the measurement. Low pass filtering after the mixers sets the requirement that the incoming signal must be within a few Hz of the SG \#1 frequency. In many cases, it is easiest to just use the Signal Generator output on the \q{Z} terminals of the AVNA1 which is both on frequency and without time-varying phase.
If the SG \#1 signal is used as a signal source, the phase will not be shifting with time with time. For this the phase offset can be useful. This has up/down buttons to allow setting to 0.1 degree. The offset can be positive or negative. This is a convenience for zeroing the displayed phase and does not change the basic measurement.
\subsection{Input Range} The input signal needs to be within the range of the ADC. The maximum input is a little more than 0.2 Vrms or 0.6 V p-p. Higher voltages require an external voltage divider. The VVM can be used with a 50-Ohm input terminator; without the 50-Ohm terminator the input impedance is 1-megohm in parallel with around 25 pF. In all cases, the VVM shows the voltage at the \q{T} input terminals. The displayed voltage is the RMS value. This is the same as HP used on the (now old) 8405A VVM. If the waveform is not sinusoidal, and there are harmonics, the displayed value is for the fundamental at the frequency of SG \#1.
The very narrow bandwidth of the VVM allows low level signals to be measured. With SG \#1 turned off, there is a residual noise of about 10 microVolts. To use an external source generator, the SG \# 1 must be tuned to the same frequency. Otherwise, leakage through the CAL switch U4B in the AVNA1 causes a signal of about 55 uV with the 50 Ohm terminator or around 1.5 mV without the 50-Ohm terminator
\textbf{Single Channel Limitations - }The HP 8405 VVM has two inputs and phase-locked tuning to the reference input. The AVNA1 only has one input channel, so the two channel feature cannot be supported. But wait, there actually are two inputs. If the AVNA was rewired to remove R46 to R49 and bring those leads to a switch and to a second input connector, a full 8405A style phase-locked loop could be implemented. But, not now!
|
# required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.rbm as rbm
import src.autoencoder as autoencoder
import matplotlib.pyplot as plt
import numpy
import os
import pickle
# Phase 0: Read File
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
save_filepath = '../output/n-gram'
dump_filepath = '../output/dump'
gram_name = '4-gram.png'
dictionary_name = 'dictionary.dump'
inv_dictionary_name = 'inv-dictionary.dump'
data_train_filename = 'train.txt'
data_valid_filename = 'val.txt'
data_train_dump_filename = 'train.dump'
data_valid_dump_filename = 'valid.dump'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
gram_savepath = os.path.join(path, save_filepath, gram_name)
dictionary_dumppath = os.path.join(path, dump_filepath, dictionary_name)
inv_dictionary_dumppath = os.path.join(path, dump_filepath, inv_dictionary_name)
data_train_dump_filepath = os.path.join(path, dump_filepath, data_train_dump_filename)
data_valid_dump_filepath = os.path.join(path, dump_filepath, data_valid_dump_filename)
# load text data from path
def load_from_path(data_filepath):
# data_train: numpy.ndarray
data_input = numpy.loadtxt(data_filepath, dtype=str, delimiter='\n')
# x_train: numpy.ndarray
return data_input
data_train = load_from_path(data_train_filepath)
data_valid = load_from_path(data_valid_filepath)
# Phase 1: split
# Create a vocabulary dictionary you are required to create an entry for every word in the training set
# also, make the data lower-cased
def split_lins(data_input):
all_lines: list = []
train_size = data_input.size
# will serve as a lookup table for the words and their corresponding id.
for i in range(train_size):
tmp_list = data_input[i].lower().split()
tmp_list.insert(0, 'START')
tmp_list.append('END')
all_lines.insert(i, tmp_list)
return all_lines
all_lines_train = split_lins(data_train)
all_lines_valid = split_lins(data_valid)
# build the dictionary
def build_vocabulary(all_lines: list):
vocabulary: dict = {}
train_size = len(all_lines)
for i in range(train_size):
for word in all_lines[i]:
try:
vocabulary[word] += 1
except KeyError:
vocabulary[word] = 1
return vocabulary
vocabulary = build_vocabulary(all_lines_train)
# truncate the dictionary
def truncate_dictionary(dictionary: dict, size: int):
sorted_list = sorted(vocabulary.items(), key=lambda x:x[1])
sorted_list.reverse()
dictionary_size = size
truncated_vocabulary: dict = {}
for i in range(dictionary_size - 1):
word, freq = sorted_list[i]
truncated_vocabulary[word] = freq
truncated_vocabulary['UNK'] = 0
for i in range(dictionary_size - 1, vocabulary.__len__()):
_, freq = sorted_list[i]
truncated_vocabulary['UNK'] += freq
# re-sort the dictionary
sorted_list = sorted(truncated_vocabulary.items(), key=lambda x:x[1])
sorted_list.reverse()
dictionary_size = 8000
truncated_vocabulary: dict = {}
for i in range(dictionary_size - 1):
word, freq = sorted_list[i]
truncated_vocabulary[word] = freq
return truncated_vocabulary
truncated_vocabulary = truncate_dictionary(vocabulary, 8000)
# generate a dictionary map string to IDs
def gen_word_to_id_dict(vocabulary):
dictionary: dict = {}
idn = 0
for word in truncated_vocabulary:
dictionary[word] = idn
idn += 1
return dictionary
dict_word_to_id = gen_word_to_id_dict(truncated_vocabulary)
# replace less frequent words in all_lines with 'UNK'
def replace_with_unk(all_lines, dict_word_to_id):
tokenized_lines = []
train_size = len(all_lines)
for i in range(train_size):
tokenized_lines.append([])
for j in range(len(all_lines[i])):
if not all_lines[i][j] in truncated_vocabulary:
tokenized_lines[i].append(dict_word_to_id['UNK'])
else:
tokenized_lines[i].append(dict_word_to_id[all_lines[i][j]])
return tokenized_lines
tokenized_lines_train = replace_with_unk(all_lines_train, dict_word_to_id)
tokenized_lines_valid = replace_with_unk(all_lines_valid, dict_word_to_id)
# build a 4-gram
def build_four_gram(tokenized_lines):
four_gram: dict = {}
for i in range(len(tokenized_lines)):
cur_line = tokenized_lines[i]
cur_len = len(cur_line)
if (cur_len < 4):
continue
for j in range(cur_len-3):
cur_tuple = (cur_line[j], cur_line[j+1], cur_line[j+2], cur_line[j+3])
try:
four_gram[cur_tuple] += 1
except KeyError:
four_gram[cur_tuple] = 1
# sort the 4-gram
sorted_list = sorted(four_gram.items(), key=lambda x:x[1])
sorted_list.reverse()
return sorted_list
four_gram_train = build_four_gram(tokenized_lines_train)
four_gram_valid = build_four_gram(tokenized_lines_valid)
# plot the 4-gram
def plot_four_gram(four_gram: list):
x_axis = numpy.arange(four_gram.__len__())
y_axis = numpy.zeros(four_gram.__len__())
for i in range(four_gram.__len__()):
y_axis[i] = four_gram[i][1]
plt.figure(1)
line_1, = plt.plot(y_axis, label='4-gram')
plt.xlabel('the ids sorted by the frequency')
plt.ylabel('frequency')
plt.title('4-gram')
plt.savefig(gram_savepath)
plt.close(1)
return
flag_plot_four_gram = True
if flag_plot_four_gram:
plot_four_gram(four_gram_train)
# invert the key-value pair of dictionary
inv_dictionary = {v: k for k, v in dict_word_to_id.items()}
def print_top_four_gram(four_gram: list, top_num: int):
for i in range(top_num):
gram_tuple = four_gram[i][0]
print(inv_dictionary[gram_tuple[0]],
inv_dictionary[gram_tuple[1]],
inv_dictionary[gram_tuple[2]],
inv_dictionary[gram_tuple[3]],
sep=' ')
return
flag_print_most_frequent_grams: bool = True
if flag_print_most_frequent_grams:
print_top_four_gram(four_gram_train, 50)
# dump the dictionary for later use
with open(dictionary_dumppath, 'wb+') as f:
pickle.dump(dict_word_to_id, f)
with open(inv_dictionary_dumppath, 'wb+') as f:
pickle.dump(inv_dictionary, f)
# generate the one-hot representation of inputs
# get the number of the inputs
def process_four_gram(four_gram):
num_input: int = len(four_gram)
X: numpy.ndarray
for i in range(num_input):
tup = four_gram[i]
w1 = tup[0][0]
w2 = tup[0][1]
w3 = tup[0][2]
w4 = tup[0][3]
for j in range(tup[1]):
array = numpy.array([w1, w2, w3, w4]).reshape(1, 4)
try:
X = numpy.concatenate((X, array), axis=0)
except NameError:
X = array
return X
X_train = process_four_gram(four_gram_train)
X_valid = process_four_gram(four_gram_valid)
# dump the one-hot representation of input
with open(data_train_dump_filepath, 'wb+') as f:
pickle.dump(X_train, f)
with open(data_valid_dump_filepath, 'wb+') as f:
pickle.dump(X_valid, f)
print(truncated_vocabulary['UNK'])
# Phase 3: Compute the number of trainable parameters in the network
flag_print_num_trainable: bool = True
if flag_print_num_trainable:
print(8000 * 16 + 128 * 16 * 3 + 128 + 8000 * 128 + 8000)
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Primality
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Nat.Primality where
open import Data.Empty using (⊥)
open import Data.Fin using (Fin; toℕ)
open import Data.Fin.Properties using (all?)
open import Data.Nat using (ℕ; suc; _+_)
open import Data.Nat.Divisibility using (_∤_; _∣?_)
open import Relation.Nullary using (yes; no)
open import Relation.Nullary.Decidable using (from-yes)
open import Relation.Nullary.Negation using (¬?)
open import Relation.Unary using (Decidable)
-- Definition of primality.
Prime : ℕ → Set
Prime 0 = ⊥
Prime 1 = ⊥
Prime (suc (suc n)) = (i : Fin n) → 2 + toℕ i ∤ 2 + n
-- Decision procedure for primality.
prime? : Decidable Prime
prime? 0 = no λ()
prime? 1 = no λ()
prime? (suc (suc n)) = all? (λ _ → ¬? (_ ∣? _))
private
-- Example: 2 is prime.
2-is-prime : Prime 2
2-is-prime = from-yes (prime? 2)
|
= Forward Intelligence Team =
|
!=======================================================================
! Generated by : PSCAD v4.6.3.0
!
! Warning: The content of this file is automatically generated.
! Do not modify, as any changes made here will be lost!
!-----------------------------------------------------------------------
! Component : Main
! Description :
!-----------------------------------------------------------------------
!=======================================================================
SUBROUTINE MainDyn()
!---------------------------------------
! Standard includes
!---------------------------------------
INCLUDE 'nd.h'
INCLUDE 'emtconst.h'
INCLUDE 'emtstor.h'
INCLUDE 's0.h'
INCLUDE 's1.h'
INCLUDE 's2.h'
INCLUDE 's4.h'
INCLUDE 'branches.h'
INCLUDE 'pscadv3.h'
INCLUDE 'fnames.h'
INCLUDE 'radiolinks.h'
INCLUDE 'matlab.h'
INCLUDE 'rtconfig.h'
!---------------------------------------
! Function/Subroutine Declarations
!---------------------------------------
!---------------------------------------
! Variable Declarations
!---------------------------------------
! Subroutine Arguments
! Electrical Node Indices
INTEGER NT_1(3)
! Control Signals
REAL Ia(3), Ea(3)
! Internal Variables
REAL RVD1_1, RVD1_2, RVD1_3, RVD1_4
REAL RVD1_5, RVD1_6, RVD1_7
! Indexing variables
INTEGER ICALL_NO ! Module call num
INTEGER ISTOF, IT_0 ! Storage Indices
INTEGER SS, INODE, IBRCH ! SS/Node/Branch/Xfmr
!---------------------------------------
! Local Indices
!---------------------------------------
! Dsdyn <-> Dsout transfer index storage
NTXFR = NTXFR + 1
TXFR(NTXFR,1) = NSTOL
TXFR(NTXFR,2) = NSTOI
TXFR(NTXFR,3) = NSTOF
TXFR(NTXFR,4) = NSTOC
! Define electric network subsystem number
SS = NODE(NNODE+1)
! Increment and assign runtime configuration call indices
ICALL_NO = NCALL_NO
NCALL_NO = NCALL_NO + 1
! Increment global storage indices
ISTOF = NSTOF
NSTOF = NSTOF + 6
NPGB = NPGB + 9
INODE = NNODE + 2
NNODE = NNODE + 8
IBRCH = NBRCH(SS)
NBRCH(SS) = NBRCH(SS) + 9
NCSCS = NCSCS + 0
NCSCR = NCSCR + 0
!---------------------------------------
! Transfers from storage arrays
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
Ia(IT_0) = STOF(ISTOF + 0 + IT_0)
Ea(IT_0) = STOF(ISTOF + 3 + IT_0)
END DO
!---------------------------------------
! Electrical Node Lookup
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
NT_1(IT_0) = NODE(INODE + 0 + IT_0)
END DO
!---------------------------------------
! Configuration of Models
!---------------------------------------
IF ( TIMEZERO ) THEN
FILENAME = 'Main.dta'
CALL EMTDC_OPENFILE
SECTION = 'DATADSD:'
CALL EMTDC_GOTOSECTION
ENDIF
!---------------------------------------
! Generated code from module definition
!---------------------------------------
! 1:[source3] Three Phase Voltage Source Model 1 'Source1'
! 3-Phase source: Source1
RVD1_1 = RTCF(NRTCF+12)
RVD1_2 = RTCF(NRTCF+14)
RVD1_3 = RTCF(NRTCF+13)
CALL ESYS651_EXE(SS, (IBRCH+1), (IBRCH+2), (IBRCH+3),0,0,0, SS, NT&
&_1(1),NT_1(2),NT_1(3), 0, RVD1_2, RVD1_1, 0.05, 1.0, 1.0, 1.0,RVD1&
&_3, 1.0, 0.02, 0.05, 1.0, 0.02, 0.05, RVD1_4, RVD1_5, RVD1_6, RVD1&
&_7)
!---------------------------------------
! Feedbacks and transfers to storage
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
STOF(ISTOF + 0 + IT_0) = Ia(IT_0)
STOF(ISTOF + 3 + IT_0) = Ea(IT_0)
END DO
!---------------------------------------
! Transfer to Exports
!---------------------------------------
!---------------------------------------
! Close Model Data read
!---------------------------------------
IF ( TIMEZERO ) CALL EMTDC_CLOSEFILE
RETURN
END
!=======================================================================
SUBROUTINE MainOut()
!---------------------------------------
! Standard includes
!---------------------------------------
INCLUDE 'nd.h'
INCLUDE 'emtconst.h'
INCLUDE 'emtstor.h'
INCLUDE 's0.h'
INCLUDE 's1.h'
INCLUDE 's2.h'
INCLUDE 's4.h'
INCLUDE 'branches.h'
INCLUDE 'pscadv3.h'
INCLUDE 'fnames.h'
INCLUDE 'radiolinks.h'
INCLUDE 'matlab.h'
INCLUDE 'rtconfig.h'
!---------------------------------------
! Function/Subroutine Declarations
!---------------------------------------
REAL EMTDC_VVDC !
!---------------------------------------
! Variable Declarations
!---------------------------------------
! Electrical Node Indices
INTEGER NT_2(3)
! Control Signals
REAL Ia(3), Ea(3)
! Internal Variables
INTEGER IVD1_1
! Indexing variables
INTEGER ICALL_NO ! Module call num
INTEGER ISTOL, ISTOI, ISTOF, ISTOC, IT_0 ! Storage Indices
INTEGER IPGB ! Control/Monitoring
INTEGER SS, INODE, IBRCH ! SS/Node/Branch/Xfmr
!---------------------------------------
! Local Indices
!---------------------------------------
! Dsdyn <-> Dsout transfer index storage
NTXFR = NTXFR + 1
ISTOL = TXFR(NTXFR,1)
ISTOI = TXFR(NTXFR,2)
ISTOF = TXFR(NTXFR,3)
ISTOC = TXFR(NTXFR,4)
! Define electric network subsystem number
SS = NODE(NNODE+1)
! Increment and assign runtime configuration call indices
ICALL_NO = NCALL_NO
NCALL_NO = NCALL_NO + 1
! Increment global storage indices
IPGB = NPGB
NPGB = NPGB + 9
INODE = NNODE + 2
NNODE = NNODE + 8
IBRCH = NBRCH(SS)
NBRCH(SS) = NBRCH(SS) + 9
NCSCS = NCSCS + 0
NCSCR = NCSCR + 0
!---------------------------------------
! Transfers from storage arrays
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
Ia(IT_0) = STOF(ISTOF + 0 + IT_0)
Ea(IT_0) = STOF(ISTOF + 3 + IT_0)
END DO
!---------------------------------------
! Electrical Node Lookup
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
NT_2(IT_0) = NODE(INODE + 3 + IT_0)
END DO
!---------------------------------------
! Configuration of Models
!---------------------------------------
IF ( TIMEZERO ) THEN
FILENAME = 'Main.dta'
CALL EMTDC_OPENFILE
SECTION = 'DATADSO:'
CALL EMTDC_GOTOSECTION
ENDIF
!---------------------------------------
! Generated code from module definition
!---------------------------------------
! 10:[ammeter] Current Meter 'Ia'
Ia(1) = ( CBR((IBRCH+4), SS))
Ia(2) = ( CBR((IBRCH+5), SS))
Ia(3) = ( CBR((IBRCH+6), SS))
! 20:[voltmetergnd] Voltmeter (Line - Ground) 'Ea'
Ea(1) = EMTDC_VVDC(SS, NT_2(1), 0)
Ea(2) = EMTDC_VVDC(SS, NT_2(2), 0)
Ea(3) = EMTDC_VVDC(SS, NT_2(3), 0)
! 30:[pgb] Output Channel 'Ia'
DO IVD1_1 = 1, 3
PGB(IPGB+1+IVD1_1-1) = Ia(IVD1_1)
ENDDO
! 40:[pgb] Output Channel 'Ea'
DO IVD1_1 = 1, 3
PGB(IPGB+4+IVD1_1-1) = Ea(IVD1_1)
ENDDO
! 50:[pgb] Output Channel 'Ea'
DO IVD1_1 = 1, 3
PGB(IPGB+7+IVD1_1-1) = Ea(IVD1_1)
ENDDO
!---------------------------------------
! Feedbacks and transfers to storage
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
STOF(ISTOF + 0 + IT_0) = Ia(IT_0)
STOF(ISTOF + 3 + IT_0) = Ea(IT_0)
END DO
!---------------------------------------
! Close Model Data read
!---------------------------------------
IF ( TIMEZERO ) CALL EMTDC_CLOSEFILE
RETURN
END
!=======================================================================
SUBROUTINE MainDyn_Begin()
!---------------------------------------
! Standard includes
!---------------------------------------
INCLUDE 'nd.h'
INCLUDE 'emtconst.h'
INCLUDE 's0.h'
INCLUDE 's1.h'
INCLUDE 's4.h'
INCLUDE 'branches.h'
INCLUDE 'pscadv3.h'
INCLUDE 'radiolinks.h'
INCLUDE 'rtconfig.h'
!---------------------------------------
! Function/Subroutine Declarations
!---------------------------------------
!---------------------------------------
! Variable Declarations
!---------------------------------------
! Subroutine Arguments
! Electrical Node Indices
! Control Signals
! Internal Variables
REAL RVD1_1, RVD1_2
! Indexing variables
INTEGER ICALL_NO ! Module call num
INTEGER IT_0 ! Storage Indices
INTEGER SS, INODE, IBRCH ! SS/Node/Branch/Xfmr
!---------------------------------------
! Local Indices
!---------------------------------------
! Define electric network subsystem number
SS = NODE(NNODE+1)
! Increment and assign runtime configuration call indices
ICALL_NO = NCALL_NO
NCALL_NO = NCALL_NO + 1
! Increment global storage indices
INODE = NNODE + 2
NNODE = NNODE + 8
IBRCH = NBRCH(SS)
NBRCH(SS) = NBRCH(SS) + 9
NCSCS = NCSCS + 0
NCSCR = NCSCR + 0
!---------------------------------------
! Electrical Node Lookup
!---------------------------------------
!---------------------------------------
! Generated code from module definition
!---------------------------------------
! 1:[source3] Three Phase Voltage Source Model 1 'Source1'
CALL COMPONENT_ID(ICALL_NO,119983459)
RVD1_1 = 1.0
RVD1_2 = 0.1
CALL ESYS651_CFG(3,2,0,0,0,SS, (IBRCH+1), (IBRCH+2), (IBRCH+3),0,0&
&,0, 60.0,60.0,0.0,13.8,0.0,0.0,1.0,13.8,230.0, 1.0,80.0,2.0,1.0,1.&
&0,0.026, 1.0,80.0,RVD1_1,RVD1_2)
RETURN
END
!=======================================================================
SUBROUTINE MainOut_Begin()
!---------------------------------------
! Standard includes
!---------------------------------------
INCLUDE 'nd.h'
INCLUDE 'emtconst.h'
INCLUDE 's0.h'
INCLUDE 's1.h'
INCLUDE 's4.h'
INCLUDE 'branches.h'
INCLUDE 'pscadv3.h'
INCLUDE 'radiolinks.h'
INCLUDE 'rtconfig.h'
!---------------------------------------
! Function/Subroutine Declarations
!---------------------------------------
!---------------------------------------
! Variable Declarations
!---------------------------------------
! Subroutine Arguments
! Electrical Node Indices
INTEGER NT_2(3)
! Control Signals
! Internal Variables
! Indexing variables
INTEGER ICALL_NO ! Module call num
INTEGER IT_0 ! Storage Indices
INTEGER SS, INODE, IBRCH ! SS/Node/Branch/Xfmr
!---------------------------------------
! Local Indices
!---------------------------------------
! Define electric network subsystem number
SS = NODE(NNODE+1)
! Increment and assign runtime configuration call indices
ICALL_NO = NCALL_NO
NCALL_NO = NCALL_NO + 1
! Increment global storage indices
INODE = NNODE + 2
NNODE = NNODE + 8
IBRCH = NBRCH(SS)
NBRCH(SS) = NBRCH(SS) + 9
NCSCS = NCSCS + 0
NCSCR = NCSCR + 0
!---------------------------------------
! Electrical Node Lookup
!---------------------------------------
! Array (1:3) quantities...
DO IT_0 = 1,3
NT_2(IT_0) = NODE(INODE + 3 + IT_0)
END DO
!---------------------------------------
! Generated code from module definition
!---------------------------------------
! 30:[pgb] Output Channel 'Ia'
! 40:[pgb] Output Channel 'Ea'
! 50:[pgb] Output Channel 'Ea'
RETURN
END
|
section \<open>Pratt's Primality Certificates\<close>
text_raw \<open>\label{sec:pratt}\<close>
theory Pratt_Certificate
imports
Complex_Main
Lehmer.Lehmer
begin
text \<open>
This work formalizes Pratt's proof system as described in his article
``Every Prime has a Succinct Certificate''\cite{pratt1975certificate}.
The proof system makes use of two types of predicates:
\begin{itemize}
\item $\text{Prime}(p)$: $p$ is a prime number
\item $(p, a, x)$: \<open>\<forall>q \<in> prime_factors(x). [a^((p - 1) div q) \<noteq> 1] (mod p)\<close>
\end{itemize}
We represent these predicates with the following datatype:
\<close>
datatype pratt = Prime nat | Triple nat nat nat
text \<open>
Pratt describes an inference system consisting of the axiom $(p, a, 1)$
and the following inference rules:
\begin{itemize}
\item R1: If we know that $(p, a, x)$ and \<open>[a^((p - 1) div q) \<noteq> 1] (mod p)\<close> hold for some
prime number $q$ we can conclude $(p, a, qx)$ from that.
\item R2: If we know that $(p, a, p - 1)$ and \<open>[a^(p - 1) = 1] (mod p)\<close> hold, we can
infer $\text{Prime}(p)$.
\end{itemize}
Both rules follow from Lehmer's theorem as we will show later on.
A list of predicates (i.e., values of type @{type pratt}) is a \emph{certificate}, if it is
built according to the inference system described above. I.e., a list @{term "x # xs :: pratt list"}
is a certificate if @{term "xs :: pratt list"} is a certificate and @{term "x :: pratt"} is
either an axiom or all preconditions of @{term "x :: pratt"} occur in @{term "xs :: pratt list"}.
We call a certificate @{term "xs :: pratt list"} a \emph{certificate for @{term p}},
if @{term "Prime p"} occurs in @{term "xs :: pratt list"}.
The function \<open>valid_cert\<close> checks whether a list is a certificate.
\<close>
fun valid_cert :: "pratt list \<Rightarrow> bool" where
"valid_cert [] = True"
| R2: "valid_cert (Prime p#xs) \<longleftrightarrow> 1 < p \<and> valid_cert xs
\<and> (\<exists> a . [a^(p - 1) = 1] (mod p) \<and> Triple p a (p - 1) \<in> set xs)"
| R1: "valid_cert (Triple p a x # xs) \<longleftrightarrow> p > 1 \<and> 0 < x \<and> valid_cert xs \<and> (x=1 \<or>
(\<exists>q y. x = q * y \<and> Prime q \<in> set xs \<and> Triple p a y \<in> set xs
\<and> [a^((p - 1) div q) \<noteq> 1] (mod p)))"
text \<open>
We define a function @{term size_cert} to measure the size of a certificate, assuming
a binary encoding of numbers. We will use this to show that there is a certificate for a
prime number $p$ such that the size of the certificate is polynomially bounded in the size
of the binary representation of $p$.
\<close>
fun size_pratt :: "pratt \<Rightarrow> real" where
"size_pratt (Prime p) = log 2 p" |
"size_pratt (Triple p a x) = log 2 p + log 2 a + log 2 x"
fun size_cert :: "pratt list \<Rightarrow> real" where
"size_cert [] = 0" |
"size_cert (x # xs) = 1 + size_pratt x + size_cert xs"
subsection \<open>Soundness\<close>
text \<open>
In Section \ref{sec:pratt} we introduced the predicates $\text{Prime}(p)$ and $(p, a, x)$.
In this section we show that for a certificate every predicate occurring in this certificate
holds. In particular, if $\text{Prime}(p)$ occurs in a certificate, $p$ is prime.
\<close>
lemma prime_factors_one [simp]: shows "prime_factors (Suc 0) = {}"
using prime_factorization_1 [where ?'a = nat] by simp
lemma prime_factors_of_prime: fixes p :: nat assumes "prime p" shows "prime_factors p = {p}"
using assms by (fact prime_prime_factors)
definition pratt_triple :: "nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool" where
"pratt_triple p a x \<longleftrightarrow> x > 0 \<and> (\<forall>q\<in>prime_factors x. [a ^ ((p - 1) div q) \<noteq> 1] (mod p))"
lemma pratt_triple_1: "p > 1 \<Longrightarrow> x = 1 \<Longrightarrow> pratt_triple p a x"
by (auto simp: pratt_triple_def)
lemma pratt_triple_extend:
assumes "prime q" "pratt_triple p a y"
"p > 1" "x > 0" "x = q * y" "[a ^ ((p - 1) div q) \<noteq> 1] (mod p)"
shows "pratt_triple p a x"
proof -
have "prime_factors x = insert q (prime_factors y)"
using assms by (simp add: prime_factors_product prime_prime_factors)
also have "\<forall>r\<in>\<dots>. [a ^ ((p - 1) div r) \<noteq> 1] (mod p)"
using assms by (auto simp: pratt_triple_def)
finally show ?thesis using assms
unfolding pratt_triple_def by blast
qed
lemma pratt_triple_imp_prime:
assumes "pratt_triple p a x" "p > 1" "x = p - 1" "[a ^ (p - 1) = 1] (mod p)"
shows "prime p"
using lehmers_theorem[of p a] assms by (auto simp: pratt_triple_def)
theorem pratt_sound:
assumes 1: "valid_cert c"
assumes 2: "t \<in> set c"
shows "(t = Prime p \<longrightarrow> prime p) \<and>
(t = Triple p a x \<longrightarrow> ((\<forall>q \<in> prime_factors x . [a^((p - 1) div q) \<noteq> 1] (mod p)) \<and> 0<x))"
using assms
proof (induction c arbitrary: p a x t)
case Nil then show ?case by force
next
case (Cons y ys)
{ assume "y=Triple p a x" "x=1"
then have "(\<forall> q \<in> prime_factors x . [a^((p - 1) div q) \<noteq> 1] (mod p)) \<and> 0<x" by simp
}
moreover
{ assume x_y: "y=Triple p a x" "x~=1"
hence "x>0" using Cons.prems by auto
obtain q z where "x=q*z" "Prime q \<in> set ys \<and> Triple p a z \<in> set ys"
and cong:"[a^((p - 1) div q) \<noteq> 1] (mod p)" using Cons.prems x_y by auto
then have factors_IH:"(\<forall> r \<in> prime_factors z . [a^((p - 1) div r) \<noteq> 1] (mod p))" "prime q" "z>0"
using Cons.IH Cons.prems \<open>x>0\<close> \<open>y=Triple p a x\<close>
by force+
then have "prime_factors x = prime_factors z \<union> {q}" using \<open>x =q*z\<close> \<open>x>0\<close>
by (simp add: prime_factors_product prime_factors_of_prime)
then have "(\<forall> q \<in> prime_factors x . [a^((p - 1) div q) \<noteq> 1] (mod p)) \<and> 0 < x"
using factors_IH cong by (simp add: \<open>x>0\<close>)
}
ultimately have y_Triple:"y=Triple p a x \<Longrightarrow> (\<forall> q \<in> prime_factors x .
[a^((p - 1) div q) \<noteq> 1] (mod p)) \<and> 0<x" by linarith
{ assume y: "y=Prime p" "p>2" then
obtain a where a:"[a^(p - 1) = 1] (mod p)" "Triple p a (p - 1) \<in> set ys"
using Cons.prems by auto
then have Bier:"(\<forall>q\<in>prime_factors (p - 1). [a^((p - 1) div q) \<noteq> 1] (mod p))"
using Cons.IH Cons.prems(1) by (simp add:y(1))
then have "prime p" using lehmers_theorem[OF _ _a(1)] \<open>p>2\<close> by fastforce
}
moreover
{ assume "y=Prime p" "p=2" hence "prime p" by simp }
moreover
{ assume "y=Prime p" then have "p>1" using Cons.prems by simp }
ultimately have y_Prime:"y = Prime p \<Longrightarrow> prime p" by linarith
show ?case
proof (cases "t \<in> set ys")
case True
show ?thesis using Cons.IH[OF _ True] Cons.prems(1) by (cases y) auto
next
case False
thus ?thesis using Cons.prems(2) y_Prime y_Triple by force
qed
qed
corollary pratt_primeI:
assumes "valid_cert xs" "Prime p \<in> set xs"
shows "prime p"
using pratt_sound[OF assms] by simp
subsection \<open>Completeness\<close>
text \<open>
In this section we show completeness of Pratt's proof system, i.e., we show that for
every prime number $p$ there exists a certificate for $p$. We also give an upper
bound for the size of a minimal certificate
The prove we give is constructive. We assume that we have certificates for all prime
factors of $p - 1$ and use these to build a certificate for $p$ from that. It is
important to note that certificates can be concatenated.
\<close>
lemma valid_cert_appendI:
assumes "valid_cert r"
assumes "valid_cert s"
shows "valid_cert (r @ s)"
using assms
proof (induction r)
case (Cons y ys) then show ?case by (cases y) auto
qed simp
lemma valid_cert_concatI: "(\<forall>x \<in> set xs . valid_cert x) \<Longrightarrow> valid_cert (concat xs)"
by (induction xs) (auto simp add: valid_cert_appendI)
lemma size_pratt_le:
fixes d::real
assumes "\<forall> x \<in> set c. size_pratt x \<le> d"
shows "size_cert c \<le> length c * (1 + d)" using assms
by (induction c) (simp_all add: algebra_simps)
fun build_fpc :: "nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat list \<Rightarrow> pratt list" where
"build_fpc p a r [] = [Triple p a r]" |
"build_fpc p a r (y # ys) = Triple p a r # build_fpc p a (r div y) ys"
text \<open>
The function @{term build_fpc} helps us to construct a certificate for $p$ from
the certificates for the prime factors of $p - 1$. Called as
@{term "build_fpc p a (p - 1) qs"} where $@{term "qs"} = q_1 \ldots q_n$
is prime decomposition of $p - 1$ such that $q_1 \cdot \dotsb \cdot q_n = @{term "p - 1 :: nat"}$,
it returns the following list of predicates:
\[
(p,a,p-1), (p,a,\frac{p - 1}{q_1}), (p,a,\frac{p - 1}{q_1 q_2}), \ldots, (p,a,\frac{p-1}{q_1 \ldots q_n}) = (p,a,1)
\]
I.e., if there is an appropriate $a$ and and a certificate @{term rs} for all
prime factors of $p$, then we can construct a certificate for $p$ as
@{term [display] "Prime p # build_fpc p a (p - 1) qs @ rs"}
\<close>
text \<open>
The following lemma shows that \<open>build_fpc\<close> extends a certificate that
satisfies the preconditions described before to a correct certificate.
\<close>
lemma correct_fpc:
assumes "valid_cert xs" "p > 1"
assumes "prod_list qs = r" "r \<noteq> 0"
assumes "\<forall> q \<in> set qs . Prime q \<in> set xs"
assumes "\<forall> q \<in> set qs . [a^((p - 1) div q) \<noteq> 1] (mod p)"
shows "valid_cert (build_fpc p a r qs @ xs)"
using assms
proof (induction qs arbitrary: r)
case Nil thus ?case by auto
next
case (Cons y ys)
have "prod_list ys = r div y" using Cons.prems by auto
then have T_in: "Triple p a (prod_list ys) \<in> set (build_fpc p a (r div y) ys @ xs)"
by (cases ys) auto
have "valid_cert (build_fpc p a (r div y) ys @ xs)"
using Cons.prems by (intro Cons.IH) auto
then have "valid_cert (Triple p a r # build_fpc p a (r div y) ys @ xs)"
using \<open>r \<noteq> 0\<close> T_in Cons.prems by auto
then show ?case by simp
qed
lemma length_fpc:
"length (build_fpc p a r qs) = length qs + 1" by (induction qs arbitrary: r) auto
lemma div_gt_0:
fixes m n :: nat assumes "m \<le> n" "0 < m" shows "0 < n div m"
proof -
have "0 < m div m" using \<open>0 < m\<close> div_self by auto
also have "m div m \<le> n div m" using \<open>m \<le> n\<close> by (rule div_le_mono)
finally show ?thesis .
qed
lemma size_pratt_fpc:
assumes "a \<le> p" "r \<le> p" "0 < a" "0 < r" "0 < p" "prod_list qs = r"
shows "\<forall>x \<in> set (build_fpc p a r qs) . size_pratt x \<le> 3 * log 2 p" using assms
proof (induction qs arbitrary: r)
case Nil
then have "log 2 a \<le> log 2 p" "log 2 r \<le> log 2 p" by auto
then show ?case by simp
next
case (Cons q qs)
then have "log 2 a \<le> log 2 p" "log 2 r \<le> log 2 p" by auto
then have "log 2 a + log 2 r \<le> 2 * log 2 p" by arith
moreover have "r div q > 0" using Cons.prems by (fastforce intro: div_gt_0)
moreover hence "prod_list qs = r div q" using Cons.prems(6) by auto
moreover have "r div q \<le> p" using \<open>r\<le>p\<close> div_le_dividend[of r q] by linarith
ultimately show ?case using Cons by simp
qed
lemma concat_set:
assumes "\<forall> q \<in> qs . \<exists> c \<in> set cs . Prime q \<in> set c"
shows "\<forall> q \<in> qs . Prime q \<in> set (concat cs)"
using assms by (induction cs) auto
lemma p_in_prime_factorsE:
fixes n :: nat
assumes "p \<in> prime_factors n" "0 < n"
obtains "2 \<le> p" "p \<le> n" "p dvd n" "prime p"
proof
from assms show "prime p" by auto
then show "2 \<le> p" by (auto dest: prime_gt_1_nat)
from assms show "p dvd n" by auto
then show "p \<le> n" using \<open>0 < n\<close> by (rule dvd_imp_le)
qed
lemma prime_factors_list_prime:
fixes n :: nat
assumes "prime n"
shows "\<exists> qs. prime_factors n = set qs \<and> prod_list qs = n \<and> length qs = 1"
using assms by (auto simp add: prime_factorization_prime intro: exI [of _ "[n]"])
lemma prime_factors_list:
fixes n :: nat assumes "3 < n" "\<not> prime n"
shows "\<exists> qs. prime_factors n = set qs \<and> prod_list qs = n \<and> length qs \<ge> 2"
using assms
proof (induction n rule: less_induct)
case (less n)
obtain p where "p \<in> prime_factors n" using \<open>n > 3\<close> prime_factors_elem by force
then have p':"2 \<le> p" "p \<le> n" "p dvd n" "prime p"
using \<open>3 < n\<close> by (auto elim: p_in_prime_factorsE)
{ assume "n div p > 3" "\<not> prime (n div p)"
then obtain qs
where "prime_factors (n div p) = set qs" "prod_list qs = (n div p)" "length qs \<ge> 2"
using p' by atomize_elim (auto intro: less simp: div_gt_0)
moreover
have "prime_factors (p * (n div p)) = insert p (prime_factors (n div p))"
using \<open>3 < n\<close> \<open>2 \<le> p\<close> \<open>p \<le> n\<close> \<open>prime p\<close>
by (auto simp: prime_factors_product div_gt_0 prime_factors_of_prime)
ultimately
have "prime_factors n = set (p # qs)" "prod_list (p # qs) = n" "length (p#qs) \<ge> 2"
using \<open>p dvd n\<close> by simp_all
hence ?case by blast
}
moreover
{ assume "prime (n div p)"
then obtain qs
where "prime_factors (n div p) = set qs" "prod_list qs = (n div p)" "length qs = 1"
using prime_factors_list_prime by blast
moreover
have "prime_factors (p * (n div p)) = insert p (prime_factors (n div p))"
using \<open>3 < n\<close> \<open>2 \<le> p\<close> \<open>p \<le> n\<close> \<open>prime p\<close>
by (auto simp: prime_factors_product div_gt_0 prime_factors_of_prime)
ultimately
have "prime_factors n = set (p # qs)" "prod_list (p # qs) = n" "length (p#qs) \<ge> 2"
using \<open>p dvd n\<close> by simp_all
hence ?case by blast
} note case_prime = this
moreover
{ assume "n div p = 1"
hence "n = p" using \<open>n>3\<close> using One_leq_div[OF \<open>p dvd n\<close>] p'(2) by force
hence ?case using \<open>prime p\<close> \<open>\<not> prime n\<close> by auto
}
moreover
{ assume "n div p = 2"
hence ?case using case_prime by force
}
moreover
{ assume "n div p = 3"
hence ?case using p' case_prime by force
}
ultimately show ?case using p' div_gt_0[of p n] case_prime by fastforce
qed
lemma prod_list_ge:
fixes xs::"nat list"
assumes "\<forall> x \<in> set xs . x \<ge> 1"
shows "prod_list xs \<ge> 1" using assms by (induction xs) auto
lemma sum_list_log:
fixes b::real
fixes xs::"nat list"
assumes b: "b > 0" "b \<noteq> 1"
assumes xs:"\<forall> x \<in> set xs . x \<ge> b"
shows "(\<Sum>x\<leftarrow>xs. log b x) = log b (prod_list xs)"
using assms
proof (induction xs)
case Nil
thus ?case by simp
next
case (Cons y ys)
have "real (prod_list ys) > 0" using prod_list_ge Cons.prems by fastforce
thus ?case using log_mult[OF Cons.prems(1-2)] Cons by force
qed
lemma concat_length_le:
fixes g :: "nat \<Rightarrow> real"
assumes "\<forall> x \<in> set xs . real (length (f x)) \<le> g x"
shows "length (concat (map f xs)) \<le> (\<Sum>x\<leftarrow>xs. g x)" using assms
by (induction xs) force+
lemma prime_gt_3_impl_p_minus_one_not_prime:
fixes p::nat
assumes "prime p" "p>3"
shows "\<not> prime (p - 1)"
proof
assume "prime (p - 1)"
have "\<not> even p" using assms by (simp add: prime_odd_nat)
hence "2 dvd (p - 1)" by presburger
then obtain q where "p - 1 = 2 * q" ..
then have "2 \<in> prime_factors (p - 1)" using \<open>p>3\<close>
by (auto simp: prime_factorization_times_prime)
thus False using prime_factors_of_prime \<open>p>3\<close> \<open>prime (p - 1)\<close> by auto
qed
text \<open>
We now prove that Pratt's proof system is complete and derive upper bounds for
the length and the size of the entries of a minimal certificate.
\<close>
theorem pratt_complete':
assumes "prime p"
shows "\<exists>c. Prime p \<in> set c \<and> valid_cert c \<and> length c \<le> 6*log 2 p - 4 \<and> (\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 p)" using assms
proof (induction p rule: less_induct)
case (less p)
from \<open>prime p\<close> have "p > 1" by (rule prime_gt_1_nat)
then consider "p = 2" | " p = 3" | "p > 3" by force
thus ?case
proof cases
assume [simp]: "p = 2"
have "Prime p \<in> set [Prime 2, Triple 2 1 1]" by simp
thus ?case by fastforce
next
assume [simp]: "p = 3"
let ?cert = "[Prime 3, Triple 3 2 2, Triple 3 2 1, Prime 2, Triple 2 1 1]"
have "length ?cert \<le> 6*log 2 p - 4 \<longleftrightarrow> 3 \<le> 2 * log 2 3" by simp
also have "2 * log 2 3 = log 2 (3 ^ 2 :: real)" by (subst log_nat_power) simp_all
also have "\<dots> = log 2 9" by simp
also have "3 \<le> log 2 9 \<longleftrightarrow> True" by (subst le_log_iff) simp_all
finally show ?case
by (intro exI[where x = "?cert"]) (simp add: cong_def)
next
assume "p > 3"
have qlp: "\<forall>q \<in> prime_factors (p - 1) . q < p" using \<open>prime p\<close>
by (metis One_nat_def Suc_pred le_imp_less_Suc lessI less_trans p_in_prime_factorsE prime_gt_1_nat zero_less_diff)
hence factor_certs:"\<forall>q \<in> prime_factors (p - 1) . (\<exists>c . ((Prime q \<in> set c) \<and> (valid_cert c)
\<and> length c \<le> 6*log 2 q - 4) \<and> (\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 q))"
by (auto intro: less.IH)
obtain a where a:"[a^(p - 1) = 1] (mod p) \<and> (\<forall> q. q \<in> prime_factors (p - 1)
\<longrightarrow> [a^((p - 1) div q) \<noteq> 1] (mod p))" and a_size: "a > 0" "a < p"
using converse_lehmer[OF \<open>prime p\<close>] by blast
have "\<not> prime (p - 1)" using \<open>p>3\<close> prime_gt_3_impl_p_minus_one_not_prime \<open>prime p\<close> by auto
have "p \<noteq> 4" using \<open>prime p\<close> by auto
hence "p - 1 > 3" using \<open>p > 3\<close> by auto
then obtain qs where prod_qs_eq:"prod_list qs = p - 1"
and qs_eq:"set qs = prime_factors (p - 1)" and qs_length_eq: "length qs \<ge> 2"
using prime_factors_list[OF _ \<open>\<not> prime (p - 1)\<close>] by auto
obtain f where f:"\<forall>q \<in> prime_factors (p - 1) . \<exists> c. f q = c
\<and> ((Prime q \<in> set c) \<and> (valid_cert c) \<and> length c \<le> 6*log 2 q - 4)
\<and> (\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 q)"
using factor_certs by metis
let ?cs = "map f qs"
have cs: "\<forall>q \<in> prime_factors (p - 1) . (\<exists>c \<in> set ?cs . (Prime q \<in> set c) \<and> (valid_cert c)
\<and> length c \<le> 6*log 2 q - 4
\<and> (\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 q))"
using f qs_eq by auto
have cs_cert_size: "\<forall>c \<in> set ?cs . \<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 p"
proof
fix c assume "c \<in> set (map f qs)"
then obtain q where "c = f q" and "q \<in> set qs" by auto
hence *:"\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 q" using f qs_eq by blast
have "q < p" "q > 0" using qlp \<open>q \<in> set qs\<close> qs_eq prime_factors_gt_0_nat by auto
show "\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 p"
proof
fix x assume "x \<in> set c"
hence "size_pratt x \<le> 3 * log 2 q" using * by fastforce
also have "\<dots> \<le> 3 * log 2 p" using \<open>q < p\<close> \<open>q > 0\<close> \<open>p > 3\<close> by simp
finally show "size_pratt x \<le> 3 * log 2 p" .
qed
qed
have cs_valid_all: "\<forall>c \<in> set ?cs . valid_cert c"
using f qs_eq by fastforce
have "\<forall>x \<in> set (build_fpc p a (p - 1) qs). size_pratt x \<le> 3 * log 2 p"
using cs_cert_size a_size \<open>p > 3\<close> prod_qs_eq by (intro size_pratt_fpc) auto
hence "\<forall>x \<in> set (build_fpc p a (p - 1) qs @ concat ?cs) . size_pratt x \<le> 3 * log 2 p"
using cs_cert_size by auto
moreover
have "Triple p a (p - 1) \<in> set (build_fpc p a (p - 1) qs @ concat ?cs)" by (cases qs) auto
moreover
have "valid_cert ((build_fpc p a (p - 1) qs)@ concat ?cs)"
proof (rule correct_fpc)
show "valid_cert (concat ?cs)"
using cs_valid_all by (auto simp: valid_cert_concatI)
show "prod_list qs = p - 1" by (rule prod_qs_eq)
show "p - 1 \<noteq> 0" using prime_gt_1_nat[OF \<open>prime p\<close>] by arith
show "\<forall> q \<in> set qs . Prime q \<in> set (concat ?cs)"
using concat_set[of "prime_factors (p - 1)"] cs qs_eq by blast
show "\<forall> q \<in> set qs . [a^((p - 1) div q) \<noteq> 1] (mod p)" using qs_eq a by auto
qed (insert \<open>p > 3\<close>, simp_all)
moreover
{ let ?k = "length qs"
have qs_ge_2:"\<forall>q \<in> set qs . q \<ge> 2" using qs_eq
by (auto intro: prime_ge_2_nat)
have "\<forall>x\<in>set qs. real (length (f x)) \<le> 6 * log 2 (real x) - 4" using f qs_eq by blast
hence "length (concat ?cs) \<le> (\<Sum>q\<leftarrow>qs. 6*log 2 q - 4)" using concat_length_le
by fast
hence "length (Prime p # ((build_fpc p a (p - 1) qs)@ concat ?cs))
\<le> ((\<Sum>q\<leftarrow>(map real qs). 6*log 2 q - 4) + ?k + 2)"
by (simp add: o_def length_fpc)
also have "\<dots> = (6*(\<Sum>q\<leftarrow>(map real qs). log 2 q) + (-4 * real ?k) + ?k + 2)"
by (simp add: o_def sum_list_subtractf sum_list_triv sum_list_const_mult)
also have "\<dots> \<le> 6*log 2 (p - 1) - 4" using \<open>?k\<ge>2\<close> prod_qs_eq sum_list_log[of 2 qs] qs_ge_2
by force
also have "\<dots> \<le> 6*log 2 p - 4" using log_le_cancel_iff[of 2 "p - 1" p] \<open>p>3\<close> by force
ultimately have "length (Prime p # ((build_fpc p a (p - 1) qs)@ concat ?cs))
\<le> 6*log 2 p - 4" by linarith }
ultimately obtain c where c:"Triple p a (p - 1) \<in> set c" "valid_cert c"
"length (Prime p #c) \<le> 6*log 2 p - 4"
"(\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 p)" by blast
hence "Prime p \<in> set (Prime p # c)" "valid_cert (Prime p # c)"
"(\<forall> x \<in> set (Prime p # c). size_pratt x \<le> 3 * log 2 p)"
using a \<open>prime p\<close> by (auto simp: Primes.prime_gt_Suc_0_nat)
thus ?case using c by blast
qed
qed
text \<open>
We now recapitulate our results. A number $p$ is prime if and only if there
is a certificate for $p$. Moreover, for a prime $p$ there always is a certificate
whose size is polynomially bounded in the logarithm of $p$.
\<close>
corollary pratt:
"prime p \<longleftrightarrow> (\<exists>c. Prime p \<in> set c \<and> valid_cert c)"
using pratt_complete' pratt_sound(1) by blast
corollary pratt_size:
assumes "prime p"
shows "\<exists>c. Prime p \<in> set c \<and> valid_cert c \<and> size_cert c \<le> (6 * log 2 p - 4) * (1 + 3 * log 2 p)"
proof -
obtain c where c: "Prime p \<in> set c" "valid_cert c"
and len: "length c \<le> 6*log 2 p - 4" and "(\<forall> x \<in> set c. size_pratt x \<le> 3 * log 2 p)"
using pratt_complete' assms by blast
hence "size_cert c \<le> length c * (1 + 3 * log 2 p)" by (simp add: size_pratt_le)
also have "\<dots> \<le> (6*log 2 p - 4) * (1 + 3 * log 2 p)" using len by simp
finally show ?thesis using c by blast
qed
subsection \<open>Efficient modular exponentiation\<close>
locale efficient_power =
fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"
assumes f_assoc: "\<And>x z. f x (f x z) = f (f x x) z"
begin
function efficient_power :: "'a \<Rightarrow> 'a \<Rightarrow> nat \<Rightarrow> 'a" where
"efficient_power y x 0 = y"
| "efficient_power y x (Suc 0) = f x y"
| "n \<noteq> 0 \<Longrightarrow> even n \<Longrightarrow> efficient_power y x n = efficient_power y (f x x) (n div 2)"
| "n \<noteq> 1 \<Longrightarrow> odd n \<Longrightarrow> efficient_power y x n = efficient_power (f x y) (f x x) (n div 2)"
by force+
termination by (relation "measure (snd \<circ> snd)") (auto elim: oddE)
lemma efficient_power_code:
"efficient_power y x n =
(if n = 0 then y
else if n = 1 then f x y
else if even n then efficient_power y (f x x) (n div 2)
else efficient_power (f x y) (f x x) (n div 2))"
by (induction y x n rule: efficient_power.induct) auto
lemma efficient_power_correct: "efficient_power y x n = (f x ^^ n) y"
proof -
have [simp]: "f ^^ 2 = (\<lambda>x. f (f x))" for f :: "'a \<Rightarrow> 'a"
by (simp add: eval_nat_numeral o_def)
show ?thesis
by (induction y x n rule: efficient_power.induct)
(auto elim!: evenE oddE simp: funpow_mult [symmetric] funpow_Suc_right f_assoc
simp del: funpow.simps(2))
qed
end
interpretation mod_exp_nat: efficient_power "\<lambda>x y :: nat. (x * y) mod m"
by standard (simp add: mod_mult_left_eq mod_mult_right_eq mult_ac)
definition mod_exp_nat_aux where "mod_exp_nat_aux = mod_exp_nat.efficient_power"
lemma mod_exp_nat_aux_code [code]:
"mod_exp_nat_aux m y x n =
(if n = 0 then y
else if n = 1 then (x * y) mod m
else if even n then mod_exp_nat_aux m y ((x * x) mod m) (n div 2)
else mod_exp_nat_aux m ((x * y) mod m) ((x * x) mod m) (n div 2))"
unfolding mod_exp_nat_aux_def by (rule mod_exp_nat.efficient_power_code)
lemma mod_exp_nat_aux_correct:
"mod_exp_nat_aux m y x n mod m = (x ^ n * y) mod m"
proof -
have "mod_exp_nat_aux m y x n = ((\<lambda>y. x * y mod m) ^^ n) y"
by (simp add: mod_exp_nat_aux_def mod_exp_nat.efficient_power_correct)
also have "((\<lambda>y. x * y mod m) ^^ n) y mod m = (x ^ n * y) mod m"
proof (induction n)
case (Suc n)
hence "x * ((\<lambda>y. x * y mod m) ^^ n) y mod m = x * x ^ n * y mod m"
by (metis mod_mult_right_eq mult.assoc)
thus ?case by auto
qed auto
finally show ?thesis .
qed
definition mod_exp_nat :: "nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat"
where [code_abbrev]: "mod_exp_nat b e m = (b ^ e) mod m"
lemma mod_exp_nat_code [code]: "mod_exp_nat b e m = mod_exp_nat_aux m 1 b e mod m"
by (simp add: mod_exp_nat_def mod_exp_nat_aux_correct)
lemmas [code_unfold] = cong_def
lemma eval_mod_exp_nat_aux [simp]:
"mod_exp_nat_aux m y x 0 = y"
"mod_exp_nat_aux m y x (Suc 0) = (x * y) mod m"
"mod_exp_nat_aux m y x (numeral (num.Bit0 n)) =
mod_exp_nat_aux m y (x\<^sup>2 mod m) (numeral n)"
"mod_exp_nat_aux m y x (numeral (num.Bit1 n)) =
mod_exp_nat_aux m ((x * y) mod m) (x\<^sup>2 mod m) (numeral n)"
proof -
define n' where "n' = (numeral n :: nat)"
have [simp]: "n' \<noteq> 0" by (auto simp: n'_def)
show "mod_exp_nat_aux m y x 0 = y" and "mod_exp_nat_aux m y x (Suc 0) = (x * y) mod m"
by (simp_all add: mod_exp_nat_aux_def)
have "numeral (num.Bit0 n) = (2 * n')"
by (subst numeral.numeral_Bit0) (simp del: arith_simps add: n'_def)
also have "mod_exp_nat_aux m y x \<dots> = mod_exp_nat_aux m y (x^2 mod m) n'"
by (subst mod_exp_nat_aux_code) (simp_all add: power2_eq_square)
finally show "mod_exp_nat_aux m y x (numeral (num.Bit0 n)) =
mod_exp_nat_aux m y (x\<^sup>2 mod m) (numeral n)"
by (simp add: n'_def)
have "numeral (num.Bit1 n) = Suc (2 * n')"
by (subst numeral.numeral_Bit1) (simp del: arith_simps add: n'_def)
also have "mod_exp_nat_aux m y x \<dots> = mod_exp_nat_aux m ((x * y) mod m) (x^2 mod m) n'"
by (subst mod_exp_nat_aux_code) (simp_all add: power2_eq_square)
finally show "mod_exp_nat_aux m y x (numeral (num.Bit1 n)) =
mod_exp_nat_aux m ((x * y) mod m) (x\<^sup>2 mod m) (numeral n)"
by (simp add: n'_def)
qed
lemma eval_mod_exp [simp]:
"mod_exp_nat b' 0 m' = 1 mod m'"
"mod_exp_nat b' 1 m' = b' mod m'"
"mod_exp_nat b' (Suc 0) m' = b' mod m'"
"mod_exp_nat b' e' 0 = b' ^ e'"
"mod_exp_nat b' e' 1 = 0"
"mod_exp_nat b' e' (Suc 0) = 0"
"mod_exp_nat 0 1 m' = 0"
"mod_exp_nat 0 (Suc 0) m' = 0"
"mod_exp_nat 0 (numeral e) m' = 0"
"mod_exp_nat 1 e' m' = 1 mod m'"
"mod_exp_nat (Suc 0) e' m' = 1 mod m'"
"mod_exp_nat (numeral b) (numeral e) (numeral m) =
mod_exp_nat_aux (numeral m) 1 (numeral b) (numeral e) mod numeral m"
by (simp_all add: mod_exp_nat_def mod_exp_nat_aux_correct)
subsection \<open>Executable certificate checker\<close>
lemmas [code] = valid_cert.simps(1)
context
begin
lemma valid_cert_Cons1 [code]:
"valid_cert (Prime p # xs) \<longleftrightarrow>
p > 1 \<and> (\<exists>t\<in>set xs. case t of Prime _ \<Rightarrow> False |
Triple p' a x \<Rightarrow> p' = p \<and> x = p - 1 \<and> mod_exp_nat a (p-1) p = 1 ) \<and> valid_cert xs"
(is "?lhs = ?rhs")
proof
assume ?lhs thus ?rhs by (auto simp: mod_exp_nat_def cong_def split: pratt.splits)
next
assume ?rhs
hence "p > 1" "valid_cert xs" by blast+
moreover from \<open>?rhs\<close> obtain t where "t \<in> set xs" "case t of Prime _ \<Rightarrow> False |
Triple p' a x \<Rightarrow> p' = p \<and> x = p - 1 \<and> [a^(p-1) = 1] (mod p)"
by (auto simp: cong_def mod_exp_nat_def cong: pratt.case_cong)
ultimately show ?lhs by (cases t) auto
qed
private lemma Suc_0_mod_eq_Suc_0_iff:
"Suc 0 mod n = Suc 0 \<longleftrightarrow> n \<noteq> Suc 0"
proof -
consider "n = 0" | "n = Suc 0" | "n > 1" by (cases n) auto
thus ?thesis by cases auto
qed
private lemma Suc_0_eq_Suc_0_mod_iff:
"Suc 0 = Suc 0 mod n \<longleftrightarrow> n \<noteq> Suc 0"
using Suc_0_mod_eq_Suc_0_iff by (simp add: eq_commute)
lemma valid_cert_Cons2 [code]:
"valid_cert (Triple p a x # xs) \<longleftrightarrow> x > 0 \<and> p > 1 \<and> (x = 1 \<or> (
(\<exists>t\<in>set xs. case t of Prime _ \<Rightarrow> False |
Triple p' a' y \<Rightarrow> p' = p \<and> a' = a \<and> y dvd x \<and>
(let q = x div y in Prime q \<in> set xs \<and> mod_exp_nat a ((p-1) div q) p \<noteq> 1)))) \<and> valid_cert xs"
(is "?lhs = ?rhs")
proof
assume ?lhs
from \<open>?lhs\<close> have pos: "x > 0" and gt_1: "p > 1" and valid: "valid_cert xs" by simp_all
show ?rhs
proof (cases "x = 1")
case True
with \<open>?lhs\<close> show ?thesis by auto
next
case False
with \<open>?lhs\<close> have "(\<exists>q y. x = q * y \<and> Prime q \<in> set xs \<and> Triple p a y \<in> set xs
\<and> [a^((p - 1) div q) \<noteq> 1] (mod p))" by auto
then guess q y by (elim exE conjE) note qy = this
hence "(\<exists>t\<in>set xs. case t of Prime _ \<Rightarrow> False |
Triple p' a' y \<Rightarrow> p' = p \<and> a' = a \<and> y dvd x \<and>
(let q = x div y in Prime q \<in> set xs \<and> mod_exp_nat a ((p-1) div q) p \<noteq> 1))"
using pos gt_1 by (intro bexI [of _ "Triple p a y"])
(auto simp: Suc_0_mod_eq_Suc_0_iff Suc_0_eq_Suc_0_mod_iff cong_def mod_exp_nat_def)
with pos gt_1 valid show ?thesis by blast
qed
next
assume ?rhs
hence pos: "x > 0" and gt_1: "p > 1" and valid: "valid_cert xs" by simp_all
show ?lhs
proof (cases "x = 1")
case True
with \<open>?rhs\<close> show ?thesis by auto
next
case False
with \<open>?rhs\<close> obtain t where t: "t \<in> set xs" "case t of Prime x \<Rightarrow> False
| Triple p' a' y \<Rightarrow> p' = p \<and> a' = a \<and> y dvd x \<and> (let q = x div y
in Prime q \<in> set xs \<and> mod_exp_nat a ((p - 1) div q) p \<noteq> 1)" by auto
then obtain y where y: "t = Triple p a y" "y dvd x" "let q = x div y in Prime q \<in> set xs \<and>
mod_exp_nat a ((p - 1) div q) p \<noteq> 1"
by (cases t rule: pratt.exhaust) auto
with gt_1 have y': "let q = x div y in Prime q \<in> set xs \<and> [a^((p - 1) div q) \<noteq> 1] (mod p)"
by (auto simp: cong_def Let_def mod_exp_nat_def Suc_0_mod_eq_Suc_0_iff Suc_0_eq_Suc_0_mod_iff)
define q where "q = x div y"
have "\<exists>q y. x = q * y \<and> Prime q \<in> set xs \<and> Triple p a y \<in> set xs
\<and> [a^((p - 1) div q) \<noteq> 1] (mod p)"
by (rule exI[of _ q], rule exI[of _ y]) (insert t y y', auto simp: Let_def q_def)
with pos gt_1 valid show ?thesis by simp
qed
qed
declare valid_cert.simps(2,3) [simp del]
lemmas eval_valid_cert = valid_cert.simps(1) valid_cert_Cons1 valid_cert_Cons2
end
text \<open>
The following alternative tree representation of certificates is better suited for
efficient checking.
\<close>
datatype pratt_tree = Pratt_Node "nat \<times> nat \<times> pratt_tree list"
fun pratt_tree_number where
"pratt_tree_number (Pratt_Node (n, _, _)) = n"
text \<open>
The following function checks that a given list contains all the prime factors of the given
number.
\<close>
fun check_prime_factors_subset :: "nat \<Rightarrow> nat list \<Rightarrow> bool" where
"check_prime_factors_subset n [] \<longleftrightarrow> n = 1"
| "check_prime_factors_subset n (p # ps) \<longleftrightarrow> (if n = 0 then False else
(if p > 1 \<and> p dvd n then check_prime_factors_subset (n div p) (p # ps)
else check_prime_factors_subset n ps))"
lemma check_prime_factors_subset_0 [simp]: "\<not>check_prime_factors_subset 0 ps"
by (induction ps) auto
lemmas [simp del] = check_prime_factors_subset.simps(2)
lemma check_prime_factors_subset_Cons [simp]:
"check_prime_factors_subset (Suc 0) (p # ps) \<longleftrightarrow> check_prime_factors_subset (Suc 0) ps"
"check_prime_factors_subset 1 (p # ps) \<longleftrightarrow> check_prime_factors_subset 1 ps"
"p > 1 \<Longrightarrow> p dvd numeral n \<Longrightarrow> check_prime_factors_subset (numeral n) (p # ps) \<longleftrightarrow>
check_prime_factors_subset (numeral n div p) (p # ps)"
"p \<le> 1 \<or> \<not>p dvd numeral n \<Longrightarrow> check_prime_factors_subset (numeral n) (p # ps) \<longleftrightarrow>
check_prime_factors_subset (numeral n) ps"
by (subst check_prime_factors_subset.simps; force)+
lemma check_prime_factors_subset_correct:
assumes "check_prime_factors_subset n ps" "list_all prime ps"
shows "prime_factors n \<subseteq> set ps"
using assms
proof (induction n ps rule: check_prime_factors_subset.induct)
case (2 n p ps)
note * = this
from "2.prems" have "prime p" and "p > 1"
by (auto simp: prime_gt_Suc_0_nat)
consider "n = 0" | "n > 0" "p dvd n" | "n > 0" "\<not>(p dvd n)"
by blast
thus ?case
proof cases
case 2
hence "n div p > 0" by auto
hence "prime_factors ((n div p) * p) = insert p (prime_factors (n div p))"
using \<open>p > 1\<close> \<open>prime p\<close> by (auto simp: prime_factors_product prime_prime_factors)
also have "(n div p) * p = n"
using 2 by auto
finally show ?thesis using 2 \<open>p > 1\<close> *
by (auto simp: check_prime_factors_subset.simps(2)[of n])
next
case 3
with * and \<open>p > 1\<close> show ?thesis
by (auto simp: check_prime_factors_subset.simps(2)[of n])
qed auto
qed auto
fun valid_pratt_tree where
"valid_pratt_tree (Pratt_Node (n, a, ts)) \<longleftrightarrow>
n \<ge> 2 \<and>
check_prime_factors_subset (n - 1) (map pratt_tree_number ts) \<and>
[a ^ (n - 1) = 1] (mod n) \<and>
(\<forall>t\<in>set ts. [a ^ ((n - 1) div pratt_tree_number t) \<noteq> 1] (mod n)) \<and>
(\<forall>t\<in>set ts. valid_pratt_tree t)"
lemma valid_pratt_tree_code [code]:
"valid_pratt_tree (Pratt_Node (n, a, ts)) \<longleftrightarrow>
n \<ge> 2 \<and>
check_prime_factors_subset (n - 1) (map pratt_tree_number ts) \<and>
mod_exp_nat a (n - 1) n = 1 \<and>
(\<forall>t\<in>set ts. mod_exp_nat a ((n - 1) div pratt_tree_number t) n \<noteq> 1) \<and>
(\<forall>t\<in>set ts. valid_pratt_tree t)"
by (simp add: mod_exp_nat_def cong_def)
lemma valid_pratt_tree_imp_prime:
assumes "valid_pratt_tree t"
shows "prime (pratt_tree_number t)"
using assms
proof (induction t rule: valid_pratt_tree.induct)
case (1 n a ts)
from 1 have "prime_factors (n - 1) \<subseteq> set (map pratt_tree_number ts)"
by (intro check_prime_factors_subset_correct) (auto simp: list.pred_set)
with 1 show ?case
by (intro lehmers_theorem[where a = a]) auto
qed
lemma valid_pratt_tree_imp_prime':
assumes "PROP (Trueprop (valid_pratt_tree (Pratt_Node (n, a, ts)))) \<equiv> PROP (Trueprop True)"
shows "prime n"
proof -
have "valid_pratt_tree (Pratt_Node (n, a, ts))"
by (subst assms) auto
from valid_pratt_tree_imp_prime[OF this] show ?thesis by simp
qed
subsection \<open>Proof method setup\<close>
theorem lehmers_theorem':
fixes p :: nat
assumes "list_all prime ps" "a \<equiv> a" "n \<equiv> n"
assumes "list_all (\<lambda>p. mod_exp_nat a ((n - 1) div p) n \<noteq> 1) ps" "mod_exp_nat a (n - 1) n = 1"
assumes "check_prime_factors_subset (n - 1) ps" "2 \<le> n"
shows "prime n"
using assms check_prime_factors_subset_correct[OF assms(6,1)]
by (intro lehmers_theorem[where a = a]) (auto simp: cong_def mod_exp_nat_def list.pred_set)
lemma list_all_ConsI: "P x \<Longrightarrow> list_all P xs \<Longrightarrow> list_all P (x # xs)"
by simp
ML_file \<open>pratt.ML\<close>
method_setup pratt = \<open>
Scan.lift (Pratt.tac_config_parser -- Scan.option Pratt.cert_cartouche) >>
(fn (config, cert) => fn ctxt => SIMPLE_METHOD (HEADGOAL (Pratt.tac config cert ctxt)))
\<close> "Prove primality of natural numbers using Pratt certificates."
text \<open>
The proof method replays a given Pratt certificate to prove the primality of a given number.
If no certificate is given, the method attempts to compute one. The computed certificate is then
also printed with a prompt to insert it into the proof document so that it does not have to
be recomputed the next time.
The format of the certificates is compatible with those generated by Mathematica. Therefore,
for larger numbers, certificates generated by Mathematica can be used with this method directly.
\<close>
lemma "prime (47 :: nat)"
by (pratt (silent))
lemma "prime (2503 :: nat)"
by pratt
lemma "prime (7919 :: nat)"
by pratt
lemma "prime (131059 :: nat)"
by (pratt \<open>{131059, 2, {2, {3, 2, {2}}, {809, 3, {2, {101, 2, {2, {5, 2, {2}}}}}}}}\<close>)
end
|
# Part D: Comparison of toroidal meniscus models with different profile shapes
## Introduction
So far all the capillary entry pressures for the percoaltion examples were calculated using the ``Standard`` physics model which is the ``Washburn`` model for straight walled capillary tubes. This has been shown to be a bad model for fibrous media where the walls of throats are converging and diverging. In the study [Capillary Hysteresis in Neutrally Wettable Fibrous Media: A Pore Network Study of a Fuel Cell Electrode](http://link.springer.com/10.1007/s11242-017-0973-2) percolation in fibrous media was simulated using a meniscus model that assumed the contrictions between fibers are similar to a toroid:
This model was first proposed by Purcell and treats the inner solid profile as a circle. As the fluid invades through the center of the torus the meniscus is pinned to the surface and the "effective" contact angle becomes influenced by the converging diverging geometry and is a function of the filling angle $\alpha$. The shape of the meniscus as the invading phase moves upwards through the torus with key model parameters is shown below.
Different intrinsic contact angles through invading phase are shown above: (a) 60$^\circ$, (b) 90$^\circ$ and (c) 120$^\circ$. All scenarios clearly show an inflection of the meniscus curvature signifying a switch in the sign of the capillary pressure from negative to positive. This inflection is predicted to occur for all contact angles by the model with varying filling angle. The capillary pressure can be shown to be:
$P_C = -2\sigma cos(\theta-\alpha))/(r+R(1-cos(\alpha))$
A consequence of the circular solid profile is that all fluid behaves as non-wetting fluid because $\alpha$ can range from -90$^\circ$ to 90$^\circ$ degrees and so even if $\theta$ is 0 then the meniscus is still pinned at zero capillary pressure at the very furthest part of the throat where the $\alpha$ is 90$^\circ$
Considering other shapes of solid profile this situation can be avoided. It will be shown by reformulating the Purcell model in a more general way that allows for a flexible defintion of the solid profile that filling angle can be limited to values below 90 and allow for spontaneous imbibition (percolation threshold below zero) of highly wetting fluids.
## Set up
We will set up a trivially small network with one throat to demonstrate the use of the meniscus model. Here we do the imports and define a few functions for plotting.
```python
#from sympy import init_session, init_printing
#init_session(quiet=True)
#init_printing()
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import sympy as syp
from sympy import lambdify, symbols
from sympy import atan as sym_atan
from sympy import cos as sym_cos
from sympy import sin as sym_sin
from sympy import sqrt as sym_sqrt
from sympy import pi as sym_pi
from ipywidgets import interact, fixed
from IPython.display import display
matplotlib.rcParams['figure.figsize'] = (5, 5)
```
```python
theta = 60
fiberRad = 5e-6
throatRad = 1e-5
max_bulge = 1e-5
```
Now we define our two pore network and add the meniscus model in several modes: 'max' returns the maximum pressure experienced by the meniscus as it transitions through the throat, i.e. the burst entry pressure. 'touch' is the pressure at which the meniscus has protruded past the throat center a distance defined by the 'touch_length' dictionary key. In network simulations this could be set to the pore_diameter. Finally the 'men' mode accepts a target_Pc parameter and returns all the mensicus information required for assessing cooperative filling or plotting.
```python
import openpnm as op
import openpnm.models.physics as pm
net = op.network.Cubic(shape=[2, 1, 1], spacing=5e-5)
geo = op.geometry.StickAndBall(network=net,
pores=net.pores(),
throats=net.throats())
phase = op.phases.Water(network=net)
phase['pore.contact_angle'] = theta
phys = op.physics.Standard(network=net,
phase=phase,
geometry=geo)
geo['throat.diameter'] = throatRad*2
geo['throat.touch_length'] = max_bulge
```
We define a plotting function that uses the meniscus data:
$\alpha$ is filling angle as defined above,
$radius$ is the radius of curvature of the mensicus,
$center$ is the position of the centre of curvature relative to the throat center along the axis of the throat,
$\gamma$ is the angle between the throat axis and the line joining the meniscus center and meniscus contact point.
```python
def plot_meniscus(target_Pc, meniscus_model=None, ax=None):
throatRad = geo['throat.diameter'][0]/2
theta = np.deg2rad(phys['pore.contact_angle'][0])
throat_a = phys['throat.scale_a']
throat_b = phys['throat.scale_b']
x_points = np.arange(-0.99, 0.99, 0.01)*throat_a
if ax is None:
fig, ax = plt.subplots()
if meniscus_model.__name__ == 'purcell':
# Parameters for plotting fibers
x, R, rt, s, t = syp.symbols('x, R, rt, s, t')
y = R*syp.sqrt(1- (x/R)**2)
r = rt + (R-y)
rx = syp.lambdify((x, R, rt), r, 'numpy')
ax.plot(x_points, rx(x_points, fiberRad, throatRad), 'k-')
ax.plot(x_points, -rx(x_points, fiberRad, throatRad), 'k-')
phys.add_model(propname='throat.meniscus',
model=meniscus_model,
mode='men',
r_toroid=fiberRad,
target_Pc=target_Pc)
elif meniscus_model.__name__ == 'sinusoidal':
x, a, b, rt, sigma, theta = syp.symbols('x, a, b, rt, sigma, theta')
y = (sym_cos(sym_pi*x/(2*a)))*b
r = rt + (b-y)
rx = lambdify((x, a, b, rt), r, 'numpy')
ax.plot(x_points, rx(x_points, throat_a, throat_b, throatRad), 'k-')
ax.plot(x_points, -rx(x_points, throat_a, throat_b, throatRad), 'k-')
phys.add_model(propname='throat.meniscus',
model=meniscus_model,
mode='men',
r_toroid=fiberRad,
target_Pc=target_Pc)
else:
# General Ellipse
x, a, b, rt, sigma, theta = syp.symbols('x, a, b, rt, sigma, theta')
profile_equation = phys.models['throat.entry_pressure']['profile_equation']
if profile_equation == 'elliptical':
y = sym_sqrt(1 - (x/a)**2)*b
elif profile_equation == 'sinusoidal':
y = (sym_cos(sym_pi*x/(2*a)))*b
r = rt + (b-y)
rx = lambdify((x, a, b, rt), r, 'numpy')
ax.plot(x_points, rx(x_points, throat_a, throat_b, throatRad), 'k-')
ax.plot(x_points, -rx(x_points, throat_a, throat_b, throatRad), 'k-')
phys.add_model(propname='throat.meniscus',
model=meniscus_model,
profile_equation=profile_equation,
mode='men',
target_Pc=target_Pc)
men_data = {}
men_data['alpha'] = phys['throat.meniscus.alpha']
men_data['gamma'] = phys['throat.meniscus.gamma']
men_data['radius'] = phys['throat.meniscus.radius']
men_data['center'] = phys['throat.meniscus.center']
arc_cen = men_data['center']
arc_rad = men_data['radius']
arc_angle = men_data['gamma']
angles = np.linspace(-arc_angle, arc_angle, 100)
arcx = arc_cen + arc_rad*np.cos(angles)
arcy = arc_rad*np.sin(angles)
ax.plot(arcx, arcy, 'b-')
ax.scatter(phys['throat.meniscus.pos'], phys['throat.meniscus.rx'])
ax.axis('equal')
ax.ticklabel_format(style='sci', axis='both', scilimits=(-6,-6))
return ax
```
# Circular (Purcell)
```python
circular_model = pm.meniscus.purcell
phys.add_model(propname='throat.max',
model=circular_model,
mode='max',
r_toroid=fiberRad)
phys.add_model(propname='throat.touch',
model=circular_model,
mode='touch',
r_toroid=fiberRad)
phys.add_model(propname='throat.meniscus',
model=circular_model,
mode='men',
r_toroid=fiberRad,
target_Pc=1000)
touch_Pc = phys['throat.touch'][0]
print('Pressure at maximum bulge', np.around(touch_Pc, 0))
max_Pc_circle = phys['throat.max'][0]
print('Circular profile critical entry pressure', np.around(max_Pc_circle, 0))
```
Pressure at maximum bulge 7213.0
Circular profile critical entry pressure 8165.0
We can see that the touch_Pc calculated earlier, corresponds with the tip of the meniscus exceeding the max_bulge parameter. Try changing this and re-running to see what happens.
```python
ax = plot_meniscus(target_Pc=touch_Pc, meniscus_model=circular_model)
ax.plot([max_bulge, max_bulge], [-throatRad, throatRad], 'r--')
```
```python
ax = plot_meniscus(target_Pc=max_Pc_circle, meniscus_model=circular_model)
```
We can interact with the mensicus model by changing the target_Pc parameter.
```python
interact(plot_meniscus, target_Pc=(-2000, max_Pc_circle, 1), meniscus_model=fixed(circular_model), ax=fixed(None))
```
interactive(children=(FloatSlider(value=3082.0, description='target_Pc', max=8165.324889242946, min=-2000.0, s…
<function __main__.plot_meniscus(target_Pc, meniscus_model=None, ax=None)>
Here we can see that the critical entry pressure for the circular profile is positive, even though the intrinsic contact angle is highly non-wetting
# Sinusoidal
Now we can start to compare the different meniscus models:
```python
sinusoidal_model = pm.meniscus.sinusoidal
```
```python
display(sinusoidal_model)
```
<function openpnm.models.physics.meniscus.sinusoidal(target, mode='max', target_Pc=None, num_points=1000.0, r_toroid=5e-06, throat_diameter='throat.diameter', pore_diameter='pore.diameter', touch_length='throat.touch_length', surface_tension='pore.surface_tension', contact_angle='pore.contact_angle')>
```python
phys.add_model(propname='throat.meniscus',
model=sinusoidal_model,
mode='men',
r_toroid=fiberRad,
target_Pc=1000)
```
The equation for the solid sinusoidal profile is:
```python
x, a, b, rt, sigma, theta = syp.symbols('x, a, b, rt, sigma, theta')
y = (sym_cos(sym_pi*x/(2*a)))*b
r = rt + b-y
r
```
$\displaystyle - b \cos{\left(\frac{\pi x}{2 a} \right)} + b + rt$
```python
# Derivative of profile
rprime = r.diff(x)
rprime
```
$\displaystyle \frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a}$
```python
# Filling angle
alpha = sym_atan(rprime)
alpha
```
$\displaystyle \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)}$
```python
# angle between y axis, meniscus center and meniscus contact point
eta = sym_pi - (theta + alpha)
eta
```
$\displaystyle - \theta - \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)} + \pi$
```python
# angle between x axis, meniscus center and meniscus contact point
gamma = sym_pi/2 - eta
gamma
```
$\displaystyle \theta + \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)} - \frac{\pi}{2}$
```python
# Radius of curvature of meniscus
rm = r/sym_cos(eta)
rm
```
$\displaystyle - \frac{- b \cos{\left(\frac{\pi x}{2 a} \right)} + b + rt}{\cos{\left(\theta + \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)} \right)}}$
```python
# distance along x-axis from center of curvature to meniscus contact point
d = rm*sym_sin(eta)
d
```
$\displaystyle - \frac{\left(- b \cos{\left(\frac{\pi x}{2 a} \right)} + b + rt\right) \sin{\left(\theta + \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)} \right)}}{\cos{\left(\theta + \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)} \right)}}$
```python
# Capillary Pressure
p = 2*sigma/rm
p
```
$\displaystyle - \frac{2 \sigma \cos{\left(\theta + \operatorname{atan}{\left(\frac{\pi b \sin{\left(\frac{\pi x}{2 a} \right)}}{2 a} \right)} \right)}}{- b \cos{\left(\frac{\pi x}{2 a} \right)} + b + rt}$
```python
phys.add_model(propname='throat.max',
model=sinusoidal_model,
mode='max',
r_toroid=fiberRad)
phys.add_model(propname='throat.touch',
model=sinusoidal_model,
mode='touch',
r_toroid=fiberRad)
max_Pc_sin = phys['throat.max'][0]
print(max_Pc_sin)
```
4729.770413396985
```python
plot_meniscus(target_Pc=max_Pc_sin, meniscus_model=sinusoidal_model)
```
```python
interact(plot_meniscus, target_Pc=(-2000, max_Pc_sin, 1), meniscus_model=fixed(sinusoidal_model), ax=fixed(None))
```
interactive(children=(FloatSlider(value=1364.0, description='target_Pc', max=4729.770413396985, min=-2000.0, s…
<function __main__.plot_meniscus(target_Pc, meniscus_model=None, ax=None)>
Now the crtical entry pressure is negative signifying that spontaneous imbibition will occur
# General Elliptical
Similarly we can define an elliptical profile and use the same method to determine the capillary pressure:
```python
y = sym_sqrt(1 - (x/a)**2)*b
y
```
$\displaystyle b \sqrt{1 - \frac{x^{2}}{a^{2}}}$
In-fact this is the model that OpenPNM uses for Purcell as well with a = b = fiber radius
```python
# Scale ellipse in x direction
phys['throat.scale_a'] = fiberRad
# Scale ellipse in y direction
phys['throat.scale_b'] = fiberRad
general_model = pm.meniscus.general_toroidal
phys.add_model(propname='throat.entry_pressure',
model=general_model,
profile_equation='elliptical',
mode='max')
max_Pc_ellipse = phys['throat.entry_pressure'][0]
print(max_Pc_ellipse)
```
8165.324889242946
```python
plot_meniscus(target_Pc=max_Pc_ellipse, meniscus_model=general_model)
```
```python
max_Pc_ellipse
```
8165.324889242946
```python
interact(plot_meniscus, target_Pc=(-2000, max_Pc_ellipse, 1), meniscus_model=fixed(general_model), ax=fixed(None))
```
interactive(children=(FloatSlider(value=3082.0, description='target_Pc', max=8165.324889242946, min=-2000.0, s…
<function __main__.plot_meniscus(target_Pc, meniscus_model=None, ax=None)>
The two scale factors can now be used to determine a wide range of capillary behaviours with one general model. Below we run the model for a range of scaling factors showing the effect on the sign and magnitude of the entry pressure.
```python
bs = np.linspace(0.2, 1.0, 4)*throatRad
phys['throat.scale_a'] = throatRad
elliptical_pressures = []
sinusoidal_pressures = []
fig, (ax1, ax2) = plt.subplots(2, len(bs), figsize=(10, 10))
for i in range(len(bs)):
phys['throat.scale_b'] = bs[i]
phys.add_model(propname='throat.entry_pressure',
model=general_model,
profile_equation='elliptical',
mode='max',
num_points=1000)
Pc = phys['throat.entry_pressure']
elliptical_pressures.append(Pc)
plot_meniscus(target_Pc=Pc, meniscus_model=general_model, ax=ax1[i])
for i in range(len(bs)):
phys['throat.scale_b'] = bs[i]
phys.add_model(propname='throat.entry_pressure',
model=general_model,
profile_equation='sinusoidal',
mode='max',
num_points=1000)
Pc = phys['throat.entry_pressure']
sinusoidal_pressures.append(Pc)
plot_meniscus(target_Pc=Pc, meniscus_model=general_model, ax=ax2[i])
```
```python
plt.figure()
plt.plot(bs/throatRad, elliptical_pressures, 'g-')
plt.plot(bs/throatRad, sinusoidal_pressures, 'r-')
```
Here we can see that the two different shaped profiles lead to quite different capiallary behaviour. The elliptical profile always resuls in positive pressure and the meniscus is basically pinned to the end of the throat where highest pressure occurs as alpha always reaches 90. Whereas the sinusiodal model allows for spontaneous imbibition where a breakthrough may occur at negative capillary pressure for wetting fluids if the wall angle is shallow.
|
module Hello where
open import IO using (run; putStrLn)
import IO.Primitive as Prim using (IO)
open import Data.Nat using (ℕ)
import Data.Nat.Show as Nat using (show)
open import Data.Unit using (⊤) -- This is no upper case 't'
open import Data.String using (_++_)
age : ℕ
age = 28
main : Prim.IO ⊤
main = run (putStrLn ("Hello World! I'm " ++ Nat.show age))
|
[STATEMENT]
lemma [simp]:
"pc \<notin> pcs xt\<^sub>0 \<Longrightarrow> match_ex_table P C pc (xt\<^sub>0 @ xt\<^sub>1) = match_ex_table P C pc xt\<^sub>1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pc \<notin> pcs xt\<^sub>0 \<Longrightarrow> match_ex_table P C pc (xt\<^sub>0 @ xt\<^sub>1) = match_ex_table P C pc xt\<^sub>1
[PROOF STEP]
by (induct xt\<^sub>0) (auto simp: matches_ex_entry_def)
|
[STATEMENT]
lemma append_cols_mult_right_id2:
assumes A: "(A::'a::semiring_1 mat) \<in> carrier_mat n a"
and B: "B \<in> carrier_mat n b"
and C: "C = four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)"
and D: "D \<in> carrier_mat a a"
shows "(A @\<^sub>c B) * C = (A * D) @\<^sub>c B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
let ?C = "four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
have "(A @\<^sub>c B) * C = (A @\<^sub>c B) * ?C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = (A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
[PROOF STEP]
unfolding C
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = (A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(A @\<^sub>c B) * C = (A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(A @\<^sub>c B) * C = (A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
have "... = four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * ?C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
[PROOF STEP]
unfolding append_cols_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat A B (0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 (dim_col B)) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
[PROOF STEP]
using A B
[PROOF STATE]
proof (prove)
using this:
A \<in> carrier_mat n a
B \<in> carrier_mat n b
goal (1 subgoal):
1. four_block_mat A B (0\<^sub>m 0 (dim_col A)) (0\<^sub>m 0 (dim_col B)) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(A @\<^sub>c B) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
have "... = four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b)
(0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b) (0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b)
[PROOF STEP]
by (rule mult_four_block_mat, insert A B C D, auto)
[PROOF STATE]
proof (state)
this:
four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b) (0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
four_block_mat A B (0\<^sub>m 0 a) (0\<^sub>m 0 b) * four_block_mat D (0\<^sub>m a b) (0\<^sub>m b a) (1\<^sub>m b) = four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b) (0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b)
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
have "... = four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A*D))) (0\<^sub>m 0 (dim_col B))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b) (0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b) = four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B))
[PROOF STEP]
by (rule cong_four_block_mat, insert assms, auto)
[PROOF STATE]
proof (state)
this:
four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b) (0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b) = four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B))
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
four_block_mat (A * D + B * 0\<^sub>m b a) (A * 0\<^sub>m a b + B * 1\<^sub>m b) (0\<^sub>m 0 a * D + 0\<^sub>m 0 b * 0\<^sub>m b a) (0\<^sub>m 0 a * 0\<^sub>m a b + 0\<^sub>m 0 b * 1\<^sub>m b) = four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B))
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
have "... = (A * D) @\<^sub>c B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B)) = A * D @\<^sub>c B
[PROOF STEP]
unfolding append_cols_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B)) = four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
four_block_mat (A * D) B (0\<^sub>m 0 (dim_col (A * D))) (0\<^sub>m 0 (dim_col B)) = A * D @\<^sub>c B
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(A @\<^sub>c B) * C = A * D @\<^sub>c B
goal (1 subgoal):
1. (A @\<^sub>c B) * C = A * D @\<^sub>c B
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(A @\<^sub>c B) * C = A * D @\<^sub>c B
goal:
No subgoals!
[PROOF STEP]
qed
|
#pragma once
#include <glm/glm.hpp>
#include <gsl/span>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
namespace rev {
class IndexedModel;
class ObjFile {
public:
ObjFile(const std::string& path);
struct WavefrontObject {
std::string materialName;
using IndexedTriangle = std::array<glm::uvec3, 3>;
std::vector<IndexedTriangle> triangles;
};
gsl::span<const WavefrontObject> getWavefrontObjects() const;
const glm::vec3& positionAtIndex(size_t index) const;
const glm::vec2& textureCoordinateAtIndex(size_t index) const;
const glm::vec3& normalAtIndex(size_t index) const;
private:
void processLine(const std::string& line);
std::vector<WavefrontObject> _wfObjects;
std::vector<glm::vec3> _positions;
std::vector<glm::vec2> _textureCoordinates;
std::vector<glm::vec3> _normals;
};
} // namespace rev
|
[STATEMENT]
lemma sel_r_eq_ldeep_s_if_valid_fwd:
"\<lbrakk>r \<in> verts G; valid_tree t; directed_tree.forward (dir_tree_r r) (inorder t)\<rbrakk>
\<Longrightarrow> sel_r r = ldeep_s match_sel (revorder t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>r \<in> verts G; valid_tree t; directed_tree.forward (dir_tree_r r) (inorder t)\<rbrakk> \<Longrightarrow> sel_r r = ldeep_s match_sel (revorder t)
[PROOF STEP]
unfolding valid_tree_def distinct_relations_def inorder_eq_set[symmetric] revorder_eq_rev_inorder
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>r \<in> verts G; set (inorder t) = verts G \<and> distinct (inorder t); directed_tree.forward (dir_tree_r r) (inorder t)\<rbrakk> \<Longrightarrow> sel_r r = ldeep_s match_sel (rev (inorder t))
[PROOF STEP]
using sel_r_eq_ldeep_s_if_dst_fwd_verts
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?r \<in> verts G; distinct ?xs; directed_tree.forward (dir_tree_r ?r) ?xs; set ?xs = verts G\<rbrakk> \<Longrightarrow> sel_r ?r = ldeep_s match_sel (rev ?xs)
goal (1 subgoal):
1. \<lbrakk>r \<in> verts G; set (inorder t) = verts G \<and> distinct (inorder t); directed_tree.forward (dir_tree_r r) (inorder t)\<rbrakk> \<Longrightarrow> sel_r r = ldeep_s match_sel (rev (inorder t))
[PROOF STEP]
by blast
|
\documentclass[12pt,titlepage]{amsart}
\title{An $\alpha$-$\beta$ Monte-Carlo Engine for the Game of Hex}
\author{Eric Vaitl\\November 20, 2017\\CSCI 6550}
\usepackage[margin=1in]{geometry} % margins
\usepackage{placeins} % floatbarrier
\usepackage{setspace} % double spacing
\usepackage{listings} % code listings
\usepackage{hyperref} % \url
\usepackage{havannah} % hex boards
\doublespacing
\begin{document}
\begin{abstract}
This paper presents an $\alpha-\beta$ game engine with a simple
Monte-Carlo heuristic for the game of Hex. A sample implementation
was written and is attached in the appendix. The mathematical
background, other approaches, and heuristics are discussed and
compared.
\end{abstract}
\maketitle
\section{Introduction}
First, we'll introduce the rules of hex, some mathematical background, and the
history of the game.
Next, is a walk-though of the interesting portions of the code: The $\alpha-\beta$
routine, the child search, and the evaluation heuristic used.
Other heuristic options will be briefly looked at, then a description
of possible future work on this code base.
\section{The Game}
The game is played on a rhombus board made of hexagons:
\begin{HexBoard}[board size=5]
\end{HexBoard}
The standard size for game play is 11x11. This is a turn based game. Each turn,
the player gets to occupy one new currently unoccupied location. The object for
each player is to connect the two sides of the player's color by occupying a
connecting set of locations to form a chain \cite{Maarup05}.
Because the first player has a distinct advantage, some versions of
the game have a \emph{swap rule}. If the swap rule is in effect, the
second player can swap places with the first player right after the
first player makes his first move. This forces the first player to
make a less than optimal first move or play at a disadvantage.
In the task environment classification scheme used in \cite{RN:AIAMA:2003}, Hex
is a fully observable, multi-agent, deterministic, sequential, static, discrete
game.
\section{History}
Hex is a simple game and has been ``discovered'' at least twice. In 1942-1943
Piet Hein wrote a number of columns about Hex, which he called Polygon, in the
Danish newspaper \emph{Politiken}. Hein had a printer print up small books of
empty Hex sheets for sale. They were advertised for among other things, as a
diversion while spending a night in the public bomb shelters. The newspaper
columns petered out in mid 1943, either because of loss of interest or because
of more pressing issues (Hein's wife was Jewish).
In 1949 John Nash independently invented the game while he was a graduate
student at Princeton \cite{Nash:1952:Games}. Apparently the game became quite
popular among the other students after one of the graduate students created a
Hex board that was available in the Fine Hall common room.
At Princeton and later at Bell Labs, John Nash worked with Claude Shannon.
Shannon created an electrical analog machine that played Hex pretty well. The
board was set up as a resistance network with resistances changed based on the
moves made. The next move was made to the position with the highest voltage
drop.
Shannon's Hex machine work was well before the 1956 Dartmouth conference, which
is often regarded as the birth of AI. However, the cold war was in effect and
people working with the new electronic computers were overly optimistic about
what could be done with the simple schemes available. I thought the closing
paragraph of \cite{Nash:1952:Games} was quite enlightening:
\begin{quote}
If a military engagement be regarded as a game, such an approach might be useful
in suggesting an overall strategy. Of course, it is not probable that any analog
method quite as simple as those described above would be very useful for most
military applications.
\end{quote}
Before moving on, I'd like to point out how cool it is that the
creators of Game Theory (Nash) and Information Theory (Shannon)
collaborated to create a very passable AI program, several years
before AI existed as a separate discipline, using little more than
plug-board, a volt meter, and a box of resistors. From \cite{Nash:1952:Games}:
\begin{quote}
Shannon reports that in their experience the machine has never been
beaten if given the first move. But its infallibility has not been
proved.
\end{quote}
In 1953 Parker Brothers released Hex as a board game. Unfortunately it
is currently out of print. Hugo Piet Hein, son of Piet Hein, has a
little company that is selling Hex boards
today \footnote{\url{http://www.hexboard.com/}}.
Hex has appeared several times in the Computer Olympiads between 2000
and 2011. The last computer champion was MoHex in 2011
\cite{Henderson:2010}.
\section{Math}
Quite a bit of theory from different domains applies to Hex. Game theory,
algebraic topology, AI, and computational complexity theory are some of the
areas that have been used to analyze this simple game.
Draw a square and color two opposite sides white and the two remaining sides
black. Something that seems obvious is that if you connect two sides of the same
color with an interior line, it is not possible to also connect the remaining
two sides with an interior line unless the two lines cross. However, the
definitive proof requires the Four Color Theorem from topology.
A less obvious result is that if you fill the central square with the two colors
at least one pair of sides must be connected by a continuous line of the same
color. The easiest known proof of this seems to involve the Brouwer Fixed-Point
Theorem of algebraic topology \cite{Gale:1980} and didn't happen until 1979. An
earlier, longer proof the Jordan Curve Theorem seems to have been discovered in
1969.
So, topology gives us two results for a completed Hex game:
\begin{itemize}
\item In a completed Hex board one pair of sides must be connected.
\item In a completed Hex board, if one pair of sides are connected, the other
pair is not.
\end{itemize}
With both results, we can say that there are no draws. One side will win and the
other side will lose.
John Nash assumed without a complete proof that there were no draws as a given
in 1949 and used a strategy stealing argument from Game Theory (which is a field
he largely invented) to prove that with perfect play the first player in a Hex
game would always win. His proof was by contradiction: An extra piece on the
board never makes your position worse. Assume with perfect play the second
player could win. The first player could place one piece then play as the second
player to win. This contradicts the second player winning, so with perfect play
the first player must win.
In terms of time, the best known algorithms for solving Hex are
intractable (no polynomial time solution). However, in terms of space,
if we had an oracle to tell us if a position was winnable by white, we
could save the perfect play game tree in polynomial space. In
computational complexity theory, PSPACE is the set of decision
problems that can be solved in a polynomial amount of space. It turns
out that Hex is not only a PSPACE problem, but was shown in 1981 to be
PSPACE complete \cite{Reisch1981}. This means that any other PSPACE
problem can be reduced in polynomial time to an equivalent Hex game of
some size. As $\mathcal{NP}\subseteq $ PSPACE, a good Hex playing
program at least theoretically has other significant uses.
\section{The Program}
\FloatBarrier
The code is written in C++11, but I'm compiling with C++14 with GNU extensions.
It has been tested with both g++ 7.2.0 and clang++ 5.0.0 on my Linux box
(manjaro).
On invocation, you can optionally specify the board size, search depth, and
branching factor.
\subsection{$\alpha-\beta$}
The $\alpha-\beta$ algorithm is implemented in listing \ref{lst:ab}. It is a bit
more complicated than the reference $\alpha-\beta$ algorithm in
\cite{RN:AIAMA:2003} because there are a few more details that have to be taken
care of in real code.
We don't just need the value of the search tree, but we need the actual move to
make. We could do that with a global that is set before a return and count on
the top level call returning last, but I am allergic to globals and decided to
return a C++ \texttt{pair$<$value,move$>$} instead. The pair is aliased with the
C++11 \texttt{using} directive to both \texttt{opens\_elem} and
\texttt{alpha\_beta\_value}. A good argument could be made that we should use
the same typename for both cases, but I wasn't sure of that when I was writing
the code.
It isn't normally possible to evaluate the full tree, so we pass a
\texttt{depth} parameter that is decremented on each recursive call. When the
depth reaches 0, we just return the \texttt{leaf\_eval} of the node and set the
move to an illegal value (-1).
The same function is used for both minimum and maximum nodes. The boolean
\texttt{maximize} parameter is used to differentiate between the two.
\texttt{maximize} is passed to \texttt{find\_children} to sort and select
children nodes and is used locally to determine if we are comparing with
$\alpha$ or $\beta$.
\singlespacing
\begin{lstlisting}[language=C++,float,label={lst:ab},basicstyle=\small,
caption=$\alpha-\beta$]
alpha_beta_value alpha_beta(int depth,
bool maximize,
double alpha,
double beta){
node_state next_turn = maximize ? node_state::HUMAN : node_state::COMPUTER;
board &b = *pb;
vector<opens_elem> opens = find_children(depth, maximize);
// If there are no children or we are at our search depth, just return
// current eval.
if (depth <= 0 || opens.size() == 0) {
return alpha_beta_value(leaf_eval(b, next_turn), -1);
}
// We now have up to branch-size children to check.
double v = maximize ? -numeric_limits<double>::max() :
numeric_limits<double>::max();
int move = 0;
for (auto m: opens) {
int idx = m.second;
b(idx) = maximize ? node_state::COMPUTER : node_state::HUMAN;
auto ab = alpha_beta(depth - 1, !maximize, alpha, beta);
if (maximize) {
if (ab.first > v) {
move = idx;
v = ab.first;
}
alpha = max(alpha, v);
}else{
if (ab.first < v) {
move = idx;
v = ab.first;
}
beta = min(v, beta);
}
b(idx) = node_state::OPEN;
if (beta <= alpha) {
break;
}
}
return alpha_beta_value(v, move);
}
\end{lstlisting}
\doublespacing
\subsection{find\_children}
The \texttt{find\_children} implementation is in listing \ref{lst:fc}.
Like the game Go, Hex can have a very large branching factor. On an n$\times$n board,
the branching factor starts at $n^2$ and decreases by 1 for each move.
The branching factor for searching is set by the global \texttt{branch\_factor},
which I justify because it is set just once at program startup.
\texttt{find\_children} returns the best (or worst) \texttt{branch\_factor}
candidates based on the boolean \texttt{maximize}.
I left open the possibility of two different evaluation functions depending on
whether we are evaluating a leaf (\texttt{leaf\_eval}) or choosing interior
children for branching (\texttt{branch\_eval}). The current program actually
uses the same evaluation function for both.
After evaluating all children of the current node, the top
\texttt{branch\_factor} children of an interior node are selected and returned.
If we are in a leaf node (because \texttt{depth}$\leq 1$), then just one node is
selected and returned.
Sorting is inlined using C++11 lambda functions, which are passed to the
standard template \texttt{sort} routine. The use of \texttt{vector} instead of
traditional C arrays makes memory management much easier. C++11 r-value
references makes passing vectors much more efficient than they were in pre-C++11
days.
\singlespacing
\begin{lstlisting}[language=C++,float,label={lst:fc},
basicstyle=\small,caption=find\_children]
vector<opens_elem> find_children(int depth, bool maximize){
board &b = *pb;
int size = b.size();
vector<opens_elem> opens;
auto sortfn = maximize ?
[](opens_elem p1, opens_elem p2) {
return p1.first > p2.first;
} :
[](opens_elem p1, opens_elem p2) {
return p1.first < p2.first;
};
auto eval = depth <= 1 ? leaf_eval : branch_eval;
for (int idx = 0; idx < size * size; ++idx) {
if (b(idx) == node_state::OPEN) {
b(idx) = maximize ? node_state::COMPUTER : node_state::HUMAN;
double e = eval(b,
(maximize ? node_state::HUMAN :
node_state::COMPUTER));
b(idx) = node_state::OPEN;
opens.push_back(opens_elem(e, idx));
}
}
sort(opens.begin(), opens.end(), sortfn);
if (depth <= 1) {
opens.resize(min(1, int(opens.size())));
} else{
opens.resize(min(branch_factor, int(opens.size())));
}
return opens;
}
\end{lstlisting}
\doublespacing
\subsection{eval}
The evaluation function for this program is the function \texttt{rwins} in
Listing \ref{lst:eval}. It is almost stupidly simple. It fills out the board as
if two idiots were playing \texttt{MC\_TIMES} and saves the percent of time the
first player wins. The result is scaled from 1 to -1. If the computer wins every
time, the result is 1, if the human wins every time, the result is -1.
The \texttt{check\_winner} class is a little more complicated. It does a depth
first search of the board from each of the first columns and sees if any
location in the last column is reachable from a location in the first column. I
had considered using a union-find, but the depth first searching seems as fast
in practice.
\singlespacing
\begin{lstlisting}[language=C++,float,
label={lst:eval},basicstyle=\small,
caption=Evaluation Function]
static double rwins(const board &b,
node_state next_turn = node_state::COMPUTER){
int wins{ 0 };
for (int i = 0; i < MC_TRIALS; ++i) {
board lb(b);
fill_board(lb, next_turn);
if (check_winner(lb).winner() == winner_state::COMPUTER) {
++wins;
}
}
return (wins - MC_TRIALS / 2.0) / MC_TRIALS;
}
\end{lstlisting}
\doublespacing
\FloatBarrier
\section{Sample Play}
Here is a sample play with a board size of 4, search depth of 4, and branching
factor of 5.
For such a simple program, it is a surprisingly good player. A single mistake by
the human player is usually enough to cause a loss.:
\singlespacing
\begin{verbatim}
$ ./hex -s 4 -d 4 -f 5
C
1 2 3 4
1 . - . - . - .
\ / \ / \ / \
2 . - . - . - .
\ / \ / \ / \
H 3 . - . - . - . H
\ / \ / \ / \
4 . - . - . - .
1 2 3 4
C
Input space separated row and column: 2 2
C
1 2 3 4
1 . - . - . - .
\ / \ / \ / \
2 . - H - C - .
\ / \ / \ / \
H 3 . - . - . - . H
\ / \ / \ / \
4 . - . - . - .
1 2 3 4
C
Input space separated row and column: 3 3
C
1 2 3 4
1 . - . - . - .
\ / \ / \ / \
2 . - H - C - .
\ / \ / \ / \
H 3 . - C - H - . H
\ / \ / \ / \
4 . - . - . - .
1 2 3 4
C
Input space separated row and column: 1 4
C
1 2 3 4
1 . - . - C - H
\ / \ / \ / \
2 . - H - C - .
\ / \ / \ / \
H 3 . - C - H - . H
\ / \ / \ / \
4 . - . - . - .
1 2 3 4
C
Input space separated row and column: 4 2
C
1 2 3 4
1 . - . - C - H
\ / \ / \ / \
2 . - H - C - .
\ / \ / \ / \
H 3 . - C - H - . H
\ / \ / \ / \
4 C - H - . - .
1 2 3 4
C
winner: computer
\end{verbatim}
\doublespacing
\section{Other Heuristics}
The traditional heuristic is that resistance network used by Shannon. It may
however be a bit harder to implement in software than in hardware. While
writing the code for this project, I looked at the source code for Wolve, which
was the 2008 Computer Olympiad winner for Hex. When more advanced searches and
pattern matches fail, Wolve falls back to a resistance measure. Wolve uses a
non-standard linear algebra library which makes a best guess when trying to
solve for an under-specified set of linear equations. Unfortunately, Wolve
doesn't seem to go through the work required to get the independent equations,
so the results it gets from the resistance heuristic are suspect.
The graphical equivalent of
\begin{HexBoard}[board size=5]
\end{HexBoard}
is
\begin{tikzpicture}
\draw[step=1cm] (-2,-2) grid (2,2);
\draw (-2,-2) -- (2,2);
\draw (-2, -1) -- (1, 2);
\draw (-2, 0) -- (0, 2);
\draw (-2, 1) -- (-1, 2);
\draw (-1, -2) -- (2, 1);
\draw (0, -2) -- (2, 0);
\draw (1, -2) -- (2, -1);
\end{tikzpicture}
We assume each edge has a resistance. In order to properly to properly calculate
the resistances in a large mesh network like this, we need to use Kirchoff's
voltage and current laws to generate enough linearly independent equations to
span the space. With $|E|$ edges in a fully connected graph, we need $|E|+1$
linearly independent equations \cite{Moody2013}. Kirchoff's current law states
that the sum of the currents for a vertex is 0. With a graph $G=(V,E)$, that
gets us $|V|$ equations.
Kirchoff's voltage law says the sum of the voltages around a loop is 0. To get
the remaining equations, we start by creating a minimum spanning tree. The edges
not in that tree will each be part of an independent cycle. For each edge not in
the minimum spanning tree we walk up from the parents of the vertices in the
spanning tree until we find a common parent. The set of vertices now form a
loop. Because in a fully connected graph $G=(V,E)$ we have $|V|-1$ edges in the
spanning tree, this spanning tree procedure will give us $|E|-(|V|-1)$ new
equations. Added to the $|V|$ equations we generated from the current law, we
have the required $|E|+1$ linearly independent equations to solve the resistance
network.
With an n$\times$n Hex board plus the required source and sink, there
are a total of $3\times(n-1)^2+2n-1 + 2n$ edges. For the standard
11x11 board, that would require solving a system of 343 linearly
independent equations. This is not a trivial amount of work for
evaluating a single position.
\cite{ANSHELEVICH2002101} describes a heuristic that finds cell-to-cell loose
linkages that work better than straight-forward resistance heuristics. Here is
the simplest example:
\begin{HexBoard}[board size=5]
\HStoneGroup[color=black]{b2,c3}
\end{HexBoard}
The stones at b2 and c3 are linked in that if white tries to block the
connection by placing at either b3 or c2, black can play the other
location to complete the link. The pattern matching for these linkages
has become called H-search. H-search, with various amounts of pattern
matching, is the primary heuristic used by Six, Wolve, and MoHex,
which were the winners of the Computer Olympiad Hex competitions from
2006 through 2010.
Monte-Carlo has been used in conjunction with $\alpha-\beta$
\cite{arneson_hayward_henderson_2010}, but it is normally used for either
selection or for leaf evaluation, not for both. Most commonly, Monte-Carlo is
used for branch selection, where it is called Monte Carlo Tree Search (MCTS).
The Hex program presented here has hooks for multiple evaluations functions, but
uses the same Monte-Carlo for both leaf and branching evaluation. A more
expensive leaf evaluation, like H-search or resistance should bring the current
program closer to the level of play of the previous Computer Olympiad players.
The big difference will be the opening book and dead region elimination done by
Wolve and in particular MoHex.
\section{Other Approaches}
The current gold standard for a Hex type game is probably AlphaGo Zero
\cite{Silver2017}. Instead of a Monte-Carlo roll-out, AlphaGo Zero used a single
neural network for both position evaluation and branch selection. The neural
network is trained with an MCTS, but the MCTS branch probabilities are guided by
the position valuation of the initial neural network. After 40 days of
continuous self-play AlphaGo Zero was able to beat AlphaGo-Lee 100 games to 0.
AlphaGo-Lee is the program that defeated the human world champion in 2016. An
intermediate program called AlphaGo-Master beat a field of the strongest
professional Go players 60 games to 0. Against AlphaGo-Zero, a 100 game match
was 89/11 in favor of AlphaGo-Zero.
The Machine Learning class was full this semester, but if I can get into it next
year, I may try my hand at using a neural network for the heuristic function.
\section{Future Work}
From an engineering standpoint, the code wasn't written to
scale. Everything is in a single module and the classes are statically
defined. In order to scale and add improvements, the code will need
to be modularized. The main class interfaces need to be re-thought
out, and heuristics functions should be either virtual and
polymorphized, or passed as function pointers or as function object
references. The current code is appropriate for the single use case of
supporting this paper.
Obviously, the game needs a GUI. The code is C++ and I work on Linux, so I think
a Qt GUI \cite {Blanchette:2006} would work well. Qt is a C++ GUI framework that
is portable to Linux, Mac, Windows and Android.
It is a bit embarrassing today that the engine is single threaded. I think a
useful architecture for this project may be a single master thread that handles
user input and game play and a number of worker threads (based on the number of
cores available) that would do position evaluation. Other architectures, of
course, are also possible.
While basic thread support has been added to C++11 with \texttt{$<$thread$>$},
The asio \footnote{\url{https://think-async.com/Asio}} library provides a higher
level framework and is quite likely to be part of the C++17 standard. If
somebody wants to get serious about this, the asio framework would allow one to
distribute worker threads across multiple cores or multiple machines for large
scale scaling.
Iterative deepening with a timer would be a large improvement over statically
picking the search depth and branching factor.
Other heuristic functions, such as network resistance, edge current, or neural
network evaluation should be tried.
To test one heuristic against another, a protocol should be added for
two programs to play each other. A socket interface would allow a
competition across machines.
With a few of the above code changes, the Hex code would be a good
base to add the modern neural network based heuristics used in the
championship Go programs.
\section{Conclusion}
A basic game engine for Hex can be written in just a couple of hundred
lines of code and can play a decent game. The structure and regularity
makes Hex a good game for AI modeling.
A simple heuristic that plays Hex well can be coded up in a dozen
lines, however a full deep-learning self-training neural network is
still a Ph.D. level project that could take months to explore.
\bibliographystyle{apalike}
\bibliography{ai}
\appendix
\section{Code Listing}
The source code is online at \url{https://github.com/evaitl/hex}. It is included
below for completeness.
\singlespacing
\lstinputlisting[language=C++,basicstyle=\small]{hex.cpp}
\end{document}
|
-- import the definition of the non-euclidean maze
import mazes.noneuclidean_maze.solutions.definition
open maze direction
/-
# Non-euclidean maze.
You are in a maze of twisty passages, all distinct.
You can go north, south east or west.
If you hit the wall there's an error.
When you're at the exit (room `J`), type `out`.
Solver remark : there are 10 rooms.
-/
/- Lemma : no-side-bar
Can you escape from this non-Euclidean maze?
-/
lemma solve : can_escape A :=
begin
s,s,e,e,w,w,out,
end
|
\chapter{Introduction to Network security}
%\section{Securing network}
%\subsection{Terminologies}
%An attack vector is a path or other means by which an attacker can gain access to a server, host, or network. Attack vectors can originate from outside (external threat) or inside (internal threat) the corporate network. Internal threats have the potential to cause greater damage than external threats because employees have direct access to infrastructure devices as well as the knowledge of the corporate network.\\
%\textbf{Security Artichoke} is the analogy used to describe what a hacker must do to launch an attack in a Borderless network. They remove certain \emph{artichoke leafs}, and each \emph{leaf} of the network may reveal some sensitive data. And leaf after leaf, it all leads the hacker to more data.\\
%\textbf{Cryptography} is the study and practice of hiding information. It ensures three components of information security: Confidentiality, Integrity, and Availability.\\
%A \textbf{Security Policy} is a formal statement of the rules by which people that are given access to the technology and information assets of an organization, must abide.
%\subsection{Network topology}
%
%\textbf{White hat hackers} perform \emph{ethical} network penetration test to discover network vulnerabilities. \textbf{Grey hat hackers} do unethical things, but not for personal gain or to cause damage (e.g. disclose vulnerability publicly). \textbf{Black hat hackers} violate computer and network security for personal gain and malicious purposes.\\
%
%\paragraph{SOHO network:} Attackers may want to use someone's Internet connection for free or illegal activity, or view financial transactions. Home networks and SOHOs are typically protected using a consumer \emph{grade router}, such as a \emph{Linksys home wireless router}.
%
%\paragraph{WAN network:} Main site and Regional site are protected by an ASA (stateful firewall and VPN). Branch site is secured using hardened ISR and VPN connection to the main site. The SOHO and Mobile users connect to the main site using Cisco Anyconnect VPN client.
%
%\paragraph{Data center network:} Data center networks are interconnected to corporate sites using VPN and ASA devices along with \emph{integrated data center switches}, such as a high-speed Nexus switches. Data center physical security can be divided into two areas: Outside perimeter security and Inside perimeter security.
%
%\paragraph{Cloud and virtual network:} This kind of network uses virtual machines (VM) to provide services to their clients. VMs are also prone to specific targeted attacks as shown in the following list. The \textbf{Cisco Secure Data Center} is a solution to secure Cloud and virtual network. The core components of this solution provide: Secure Segmentation, Threat Defense, and Visibility.
%
%\begin{itemize}
%\item \textbf{Hyperjacking:} An attacker could hijack a VM hypervisor and use it as a starting point to attack other devices.
%\item \textbf{Instant on activation:} A VM that has not been used for a long period of time can introduce security vulnerabilities when activated.
%\item \textbf{Antivirus storm:} Multiple VMs attempt to download antivirus file at the same time
%\end{itemize}
%
%\paragraph{Borderless Network:} To accommodate the BYOD trend, Cisco developed the Borderless Network. To support this network, Cisco devices support Mobile Device Management (MDM) features. MDM features secure, monitor, and manage mobile devices, including corporate-owned devices and employee-owned devices.
\section{Network threats}
\subsection{Malware}
A \textbf{virus} is malicious code that is attached to executable files which are often legitimate programs. A virus is triggered by an event and cannot automatically propagate itself to other systems. Viruses are spread by USB memory drives, CDs, DVDs, network shares, and email.\\
A \textbf{Trojan horse} carries out malicious operations under the guise of a desired function. This malicious operation exploits the privileges of the user. Trojans are often found attached to online games. \\
\textbf{Worms} run by themselves, replicate and then spread very quickly (self-propagation) to slow down networks. They does not require user participation. After a host is infected, the worm is able to over the network. Most worm attacks consist of three components:
\begin{itemize}
\item \textbf{Enabling vulnerability:} A worm installs itself using an exploit mechanism, such as an email attachment, an executable file, or a Trojan horse.
\item \textbf{Propagation mechanism:} After gaining access to a device, the worm replicates itself and locates new targets.
\item \textbf{Payload:} Any malicious code that results in some action is a payload. Most often this is used to create a backdoor to the infected host or create a DoS attack.
\end{itemize}
\note Worms never really stop on the Internet. After they are released, they continue to propagate until all possible sources of infection are properly patched.\\
%Some other examples of modern malware:
%
%\begin{itemize}
%\item Ransomware -- deny access to the infected computer system, then demand a paid ransom for the restriction to be removed.
%\item Spyware -- gather information about a user and send the information to another entity
%\item Adware -- display annoying pop-up advertising pertinent to websites visited
%\item Scareware -- include scam software which uses social engineering to shock or induce anxiety by creating the perception of a threat
%\item Phishing -- attempt to convince people to divulge sensitive information, e.g. receiving an email from their bank asking users to divulge their account and PIN numbers.
%\item Rootkits -- installed on a compromised system, then hide its intrusion and maintain privileged access to the hacker.
%\end{itemize}
\subsection{Network attacks}
%The method used in this course classifies attacks in three major categories: Reconnaissance, Access, and DoS Attacks.\\
\textbf{Reconnaissance attacks} gather information about a network and scan for access. Example: information query, ping sweep\footnote{a network scanning technique that indicates the live hosts in a range of IP addresses}, port scan, Vulnerability Scanners, Exploitation tools.\\
\textbf{Access attacks} gain access or control to sensitive information. Some examples of access attacks are listed as follow:\\
\begin{itemize}
\item Password attack -- a dictionary is used for repeated login attempts
\item Trust exploitation -- uses granted privileges to access unauthorized material
\item Port redirection -- uses a compromised internal host to pass traffic through a firewall
\item Man-in-the-middle -- an unauthorized device positioned between two legitimate devices in order to redirect or capture traffic
\item Buffer overflow -- too much data sent to a buffer memory
\end{itemize}
\textbf{Denial-of-Service (DoS) attacks} prevent users from accessing a system. They are popular and simple to conduct. There are two major sources of DoS attacks:
\begin{itemize}
\item \emph{Maliciously Formatted Packets} is forwarded to a host and the receiver is unable to handle an unexpected condition, which leads to slow or crashed system.
\item \emph{Overwhelming Quantity of Traffic} causes the system to crash or become extremely slow.
\end{itemize}
\textbf{A Distributed DoS Attack (DDoS)} is similar in intent to a DoS attack, except that a DDoS attack increases in magnitude because it originates from multiple, coordinated sources. As an example, a DDoS attack could proceed as follows:
\begin{enumerate}
\item A hacker builds a network of infected machines. A network of infected hosts is called a \emph{botnet}. The compromised computers are called \emph{zombie computers}, and they are controlled by \emph{handler systems}.
\item The zombie computers continue to scan and infect more targets to create more zombies.
\item When ready, the hacker instructs the handler systems to make the botnet of zombies carry out the DDoS attack.
\end{enumerate}
\section{Mitigating Threats}
\subsection{Mitigating common network attacks}
\paragraph{Virus and Trojan horse:} The primary means of mitigating virus and Trojan horse attacks is antivirus software. However, antivirus software cannot prevent viruses from entering the network.
\paragraph{Worms:} The response to a worm attack can be broken down into four phases:
\begin{enumerate}
\item \textbf{\textbf{Containment}:} limit the spread of worm infection
\item \textbf{Inoculation:} run parallel to or subsequent to the Containment phase; all uninfected systems are patched with the appropriate vendor patch.
\item \textbf{Quarantine:} identify the infected machines
\item \textbf{Treatment:} disinfect the infected systems
\end{enumerate}
\paragraph{Reconnaissance:} \emph{Encryption} is an effective solution for \textit{sniffer attacks}. Using \emph{IPS} and \emph{firewall} can limit the impact of \emph{port scanning}. \emph{Ping sweeps} can be stopped if \emph{ICMP} echo and echo-reply are turned off on edge routers.
\paragraph{Access attacks:} strong password policy, principle of minimum trust\footnote{a system or device should not trust another unconditionally}, cryptography, OS and app patches.
\paragraph{DoS attacks:} a network utilization software and anti-spoofing technologies (port security, DHCP snooping, IP source guard, ARP inspection, and ACL).
%\subsection{Cisco SecureX architecture}
%
%The Cisco SecureX architecture is designed to provide effective security for any user, using any device, from any location, and at any time. This architecture includes the five major components: Scanning Engines, Delivery Mechanisms, Security Intelligence Operations (SIO), Policy Management Consoles, and Next-Generation Endpoints. The most important component of SecureX is SIO, which detects and blocks malicious traffic.\\
%
%The SecureX is a huge and complex computing model. Therefore, a \textbf{context-aware scanning element} is used to scale SecureX. It is a device that examines packets as well as external information to understand the full context of the situation. To be accurate, this context-aware device defines security policies based on five parameters: person ID, application, device type, location, and access time.\\
%
%\textbf{Security Intelligence Operations (SIO)} is a Cloud-based service that connects global threat information, reputation-based services, and sophisticated analysis, to SecureX network security devices.
\subsection{Cisco Network Foundation Protection Framework}
The Cisco Network Foundation Protection (NFP) framework provides comprehensive guidelines for protecting the network infrastructure. NFP logically divides routers and switches into three functional areas:
\begin{itemize}
\item\textbf{Control plane} is responsible for routing functions. We protect this plane using Routing protocol authentication, CoPP\footnote{prevents unnecessary traffic from overwhelming the route processor}, and AutoSecure.
\item\textbf{Management plane} is responsible for network security and management. Its security is implemented by password policy, RBAC\footnote{Role-based access control, restricts user access based on the role of the user.}, authorization, access reporting.
\item\textbf{Data plane} is responsible for forwarding data. Its security can be implemented using \emph{ACLs}, antispoofing mechanisms, and port security.
\end{itemize}
|
!----------------------------------------------------------------------
!----------------------------------------------------------------------
! Example 1: The AP BP normal form
!----------------------------------------------------------------------
!----------------------------------------------------------------------
SUBROUTINE FUNC(NDIM,U,ICP,PAR,IJAC,F,DFDU,DFDP)
IMPLICIT NONE
INTEGER, INTENT(IN) :: NDIM, IJAC, ICP(*)
DOUBLE PRECISION, INTENT(IN) :: U(NDIM), PAR(*)
DOUBLE PRECISION, INTENT(OUT) :: F(NDIM), DFDU(NDIM,*), DFDP(NDIM,*)
F(1) = (U(1)-PAR(1))*(U(1)-PAR(2))+PAR(3)
IF(IJAC==0)RETURN
DFDU(1,1) = U(1)-PAR(1)+U(1)-PAR(2)
IF(IJAC==1)RETURN
DFDP(1,1) = -(U(1)-PAR(2))
DFDP(1,2) = -(U(1)-PAR(1))
DFDP(1,3) = 1.0d0
END SUBROUTINE FUNC
!-----------------------------------------------------------------------
!-----------------------------------------------------------------------
SUBROUTINE STPNT(NDIM,U,PAR)
!--------- -----
IMPLICIT NONE
INTEGER, INTENT(IN) :: NDIM
DOUBLE PRECISION, INTENT(OUT) :: U(NDIM), PAR(*)
PAR(1:3) = (/ 1.0d0, 2.0d0, 0.0d0 /)
U(1) = 1.0
END SUBROUTINE STPNT
!----------------------------------------------------------------------
!----------------------------------------------------------------------
SUBROUTINE BCND
END SUBROUTINE BCND
SUBROUTINE ICND
END SUBROUTINE ICND
SUBROUTINE FOPT
END SUBROUTINE FOPT
SUBROUTINE PVLS
END SUBROUTINE PVLS
!----------------------------------------------------------------------
!----------------------------------------------------------------------
|
[STATEMENT]
lemma nu_below_mu_nu_2_nu_below_mu_nu:
assumes "has_least_fixpoint f"
and "has_greatest_fixpoint f"
and "nu_below_mu_nu_2 f"
shows "nu_below_mu_nu f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nu_below_mu_nu f
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. nu_below_mu_nu f
[PROOF STEP]
have "d(L) * \<nu> f \<le> \<mu> f \<squnion> (\<nu> f \<sqinter> L) \<squnion> d((\<mu> f \<squnion> (\<nu> f \<sqinter> L)) * bot) * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. d L * \<nu> f \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d ((\<mu> f \<squnion> \<nu> f \<sqinter> L) * bot) * top
[PROOF STEP]
using assms(3) nu_below_mu_nu_2_def
[PROOF STATE]
proof (prove)
using this:
nu_below_mu_nu_2 f
nu_below_mu_nu_2 ?f \<equiv> d L * \<nu> ?f \<le> \<mu> ?f \<squnion> \<nu> ?f \<sqinter> L \<squnion> d ((\<mu> ?f \<squnion> \<nu> ?f \<sqinter> L) * bot) * top
goal (1 subgoal):
1. d L * \<nu> f \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d ((\<mu> f \<squnion> \<nu> f \<sqinter> L) * bot) * top
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
d L * \<nu> f \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d ((\<mu> f \<squnion> \<nu> f \<sqinter> L) * bot) * top
goal (1 subgoal):
1. nu_below_mu_nu f
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
d L * \<nu> f \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d ((\<mu> f \<squnion> \<nu> f \<sqinter> L) * bot) * top
goal (1 subgoal):
1. nu_below_mu_nu f
[PROOF STEP]
have "... \<le> \<mu> f \<squnion> (\<nu> f \<sqinter> L) \<squnion> d(\<nu> f * bot) * top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d ((\<mu> f \<squnion> \<nu> f \<sqinter> L) * bot) * top \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d (\<nu> f * bot) * top
[PROOF STEP]
by (metis assms(1,2) d_isotone inf.sup_monoid.add_commute inf.sup_right_divisibility le_supI le_supI2 mu_below_nu mult_left_isotone sup_left_divisibility)
[PROOF STATE]
proof (state)
this:
\<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d ((\<mu> f \<squnion> \<nu> f \<sqinter> L) * bot) * top \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d (\<nu> f * bot) * top
goal (1 subgoal):
1. nu_below_mu_nu f
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
d L * \<nu> f \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d (\<nu> f * bot) * top
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
d L * \<nu> f \<le> \<mu> f \<squnion> \<nu> f \<sqinter> L \<squnion> d (\<nu> f * bot) * top
goal (1 subgoal):
1. nu_below_mu_nu f
[PROOF STEP]
by (simp add: nu_below_mu_nu_def)
[PROOF STATE]
proof (state)
this:
nu_below_mu_nu f
goal:
No subgoals!
[PROOF STEP]
qed
|
Formal statement is: lemma has_contour_integral_0: "((\<lambda>x. 0) has_contour_integral 0) g" Informal statement is: The contour integral of the zero function is zero.
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
{-# OPTIONS_GHC -Wno-orphans #-}
{- Module : Text.Loquate.Instances
Description : Default Loquate Instances
-}
module Text.Loquate.Instances
(
) where
--import qualified Data.ByteString as BS
--import qualified Data.ByteString.Lazy as LBS
import qualified Control.Exception.Base as E
import Data.Array.Unboxed (Array, IArray, Ix, UArray, elems)
import Data.Complex (Complex(..))
import Data.Fixed
import Data.Foldable
import Data.Int (Int16, Int32, Int64, Int8)
import Data.List.NonEmpty (NonEmpty)
import Data.Ratio (Ratio, denominator, numerator)
import qualified Data.Text as T
import qualified Data.Text.Lazy as LT
import Data.Text.Prettyprint.Doc (Pretty(..))
import Data.Typeable
import Data.Version (Version, showVersion)
import Data.Void (Void)
import Data.Word (Word16, Word32, Word64, Word8)
import GHC.Stack
import Numeric.Natural
import Text.Loquate.Class
import Text.Loquate.Doc
-- misc types
instance Lang l => Loquate l () where loq _ = pretty
instance Lang l => Loquate l Bool where loq _ = pretty
instance Lang l => Loquate l Version where loq _ = pretty . showVersion
instance Lang l => Loquate l Void where loq _ = pretty
-- textual types
instance Lang l => Loquate l Char where loq _ = pretty
instance {-# OVERLAPS #-} Lang l => Loquate l String where loq _ = pretty
-- instance Loquate l BS.ByteString where loq _ = pretty
-- instance Loquate l LBS.ByteString where loq _ = pretty
instance Lang l => Loquate l T.Text where loq _ = pretty
instance Lang l => Loquate l LT.Text where loq _ = pretty
-- numeric types
instance Lang l => Loquate l Double where loq _ = pretty
instance Lang l => Loquate l Float where loq _ = pretty
instance (Lang l, Typeable t, HasResolution t) => Loquate l (Fixed t) where loq _ = viaShow
-- | Integral types should loquate via Integer
instance Lang l => Loquate l Integer where loq _ = pretty
instance (Lang l, Loquate l Integer) => Loquate l Natural where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Int where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Int8 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Int16 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Int32 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Int64 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Word where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Word8 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Word16 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Word32 where loq l = loq l . toInteger
instance (Lang l, Loquate l Integer) => Loquate l Word64 where loq l = loq l . toInteger
-- polymorphic numeric types
instance Loquate l t => Loquate l (Complex t) where
loq l (a :+ b) = loq l a <+> "+" <+> loq l b <> "i"
instance Loquate l t => Loquate l (Ratio t) where
loq l x = loq l (numerator x) <> "/" <> loq l (denominator x)
-- polymorphic containers in dependencies of loquacious
instance Loquate l t => Loquate l (Maybe t) where
loq _ Nothing = mempty
loq l (Just x) = loq l x
instance (Loquate l t, Loquate l s) => Loquate l (t, s) where
loq l (a, b) = align $ tupled [loq l a, loq l b]
instance (Loquate l t, Loquate l s, Loquate l r) => Loquate l (t, s, r) where
loq l (a, b, c) = align $ tupled [loq l a, loq l b, loq l c]
instance (Loquate l t) => Loquate l [t] where
loq l xs = align . list $ loq l <$> xs
instance (Loquate l t) => Loquate l (NonEmpty t) where
loq l xs = align . list $ loq l <$> toList xs
instance (Typeable i, Loquate l t) => Loquate l (Array i t) where
loq l xs = align . list $ loq l <$> toList xs
instance (Loquate l t, IArray UArray t, Ix i, Typeable i) => Loquate l (UArray i t) where
loq l xs = align . list $ loq l <$> elems xs
instance (Loquate l t, Loquate l s) => Loquate l (Either t s) where
loq l (Left x) = loq l x
loq l (Right x) = loq l x
-- showing types rather than values
instance Lang l => Loquate l TypeRep where loq _ = viaShow
instance (Lang l, Typeable a) => Loquate l (Proxy a) where
loq l _ = loq l $ typeRep (Proxy :: Proxy (Proxy a))
instance (Lang l, Typeable a, Typeable b) => Loquate l (a -> b) where
loq l f = loq l $ typeOf f
instance (Lang l, Typeable a, Typeable b) => Loquate l (a :~: b) where
loq l _ = loq l (typeRep (Proxy :: Proxy a)) <+> ":~:" <+> loq l (typeRep (Proxy :: Proxy b))
instance (Lang l, Typeable a, Typeable b) => Loquate l (a :~~: b) where
loq l _ = loq l (typeRep (Proxy :: Proxy a)) <+> ":~~:" <+> loq l (typeRep (Proxy :: Proxy b))
-- call stacks!
instance Lang l => Loquate l CallStack where
loq l = withFrozenCallStack ( align . vsep . fmap loqStack . getCallStack )
where loqStack (func, srcLoc) = fromString func <+> "←" <+> loq l srcLoc
instance Lang l => Loquate l SrcLoc where
loq l SrcLoc{..} = locFile <+> parens locPackage
where
locFile = fromString srcLocFile <> colon <> loq l srcLocStartLine <> colon <> loq l srcLocStartCol
locPackage = fromString srcLocPackage <> colon <> fromString srcLocModule
-- Exception types
instance Lang l => Loquate l E.AllocationLimitExceeded where loq _ e = viaShow e
instance Lang l => Loquate l E.ArithException where loq _ e = viaShow e
instance Lang l => Loquate l E.ArrayException where loq _ e = viaShow e
instance Lang l => Loquate l E.AssertionFailed where loq _ e = viaShow e
instance Lang l => Loquate l E.AsyncException where loq _ e = viaShow e
instance Lang l => Loquate l E.BlockedIndefinitelyOnMVar where loq _ e = viaShow e
instance Lang l => Loquate l E.BlockedIndefinitelyOnSTM where loq _ e = viaShow e
instance Lang l => Loquate l E.CompactionFailed where loq _ e = viaShow e
instance Lang l => Loquate l E.Deadlock where loq _ e = viaShow e
instance Lang l => Loquate l E.FixIOException where loq _ e = viaShow e
instance Lang l => Loquate l E.IOException where loq _ e = viaShow e
instance Lang l => Loquate l E.NestedAtomically where loq _ e = viaShow e
instance Lang l => Loquate l E.NoMethodError where loq _ e = viaShow e
instance Lang l => Loquate l E.NonTermination where loq _ e = viaShow e
instance Lang l => Loquate l E.PatternMatchFail where loq _ e = viaShow e
instance Lang l => Loquate l E.RecConError where loq _ e = viaShow e
instance Lang l => Loquate l E.RecSelError where loq _ e = viaShow e
instance Lang l => Loquate l E.RecUpdError where loq _ e = viaShow e
instance Lang l => Loquate l E.SomeAsyncException where loq _ e = viaShow e
instance Lang l => Loquate l E.SomeException where loq _ e = viaShow e
instance Lang l => Loquate l E.TypeError where loq _ e = viaShow e
|
[STATEMENT]
lemma confluent_quotient_ACIDZ:
"confluent_quotient (~) (~~) (~~) (~~) (~~) (~~)
(map_rexp fst) (map_rexp snd) (map_rexp fst) (map_rexp snd)
rel_rexp rel_rexp rel_rexp set_rexp set_rexp"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. confluent_quotient (~) (~~) (~~) (~~) (~~) (~~) (map_rexp fst) (map_rexp snd) (map_rexp fst) (map_rexp snd) rel_rexp rel_rexp rel_rexp set_rexp set_rexp
[PROOF STEP]
by unfold_locales
(auto 4 4 dest: ACIDZ_set_rexp' simp: rexp.in_rel rexp.rel_compp dest: map_rexp_ACIDZ_inv intro: rtranclp_into_equivclp
intro: equivpI reflpI sympI transpI ACIDZcl_map_respects
strong_confluentp_imp_confluentp[OF strong_confluentp_ACIDZ])
|
corollary\<^marker>\<open>tag unimportant\<close> maximum_real_frontier: assumes holf: "f holomorphic_on (interior S)" and contf: "continuous_on (closure S) f" and bos: "bounded S" and leB: "\<And>z. z \<in> frontier S \<Longrightarrow> Re(f z) \<le> B" and "\<xi> \<in> S" shows "Re(f \<xi>) \<le> B"
|
A Koi Pond provides a peaceful and quiet place to help you escape from the stress of everyday life. It is also the perfect way to add beauty to any outdoor space. But having a Koi pond does require maintenance to keep it healthy and attractive.
There are so many products available to use in water gardens that are quite overwhelming. Special care is specific to each pond. With the proper care from the beginning, you can avoid costly complications later on. There are effective precautionary steps you can take to keep your Koi pond healthy year around.
Springtime is an important time to get your pond going in the right direction. Anything you did in the winter can be un-done in the spring. Turn on the pump etc.
When the water temperature reaches a contact 50 degrees, start feeding the fish again. A cold weather food is important until the temperature reaches 60 degrees then move to a regular feeding schedule.
Remove any leaves or debris as well as vacuum the sludge from the bottom of the pond.
Pump the water from the pond into a large container and then place the fish into the container filled with the pond water. Once the pond is clean, pump the water back in and then put the fish into the pond.
Begin cleaning the filter as needed.
Remove any dead foliage. Cut plants that have gone brown with age. This reduces buildup and allows the plants more room to grow.
Feed the fish but don’t overfeed.
It’s important to keep the falling leaves from ending up in the pool causing decay. This decay will throw off the ecological balance of the water garden.
Installing leaf netting over the pond will make it easier to maintain.
Water temperature is dropping so it is important to feed the fish less. Once the temperature drops below 50 degrees – stop feeding until spring.
If you have a warm winter day, and the pond begins to warm up, do not be tempted to feed the fish.
Prepare the pond. If you have a small or shallow pond, you may need a floating deicer to keep it above freezing.
If the temperature drops below 40 degrees, reduce the circulation of water by turning off the pump and draining it.
Caring for your Koi pond is both an art and science. Maintenance for your Koi pond changes with the seasons. The health and survival of your plants and fish depend on upon this proper maintenance and care. Art of the Yard can help you build and maintain a beautiful Koi pond protecting your investment.
|
module Esterel.Variable.Signal where
open import Data.Nat
using (ℕ) renaming (_≟_ to _≟ℕ_)
open import Function
using (_∘_)
open import Relation.Nullary
using (Dec ; yes ; no ; ¬_)
open import Relation.Binary
using (Decidable)
open import Relation.Binary.PropositionalEquality
using (_≡_ ; refl ; cong ; trans ; sym)
data Signal : Set where
_ₛ : (S : ℕ) → Signal
unwrap : Signal → ℕ
unwrap (n ₛ) = n
unwrap-inverse : ∀ {s} → (unwrap s) ₛ ≡ s
unwrap-inverse {_ ₛ} = refl
unwrap-injective : ∀ {s t} → unwrap s ≡ unwrap t → s ≡ t
unwrap-injective s'≡t' = trans (sym unwrap-inverse) (trans (cong _ₛ s'≡t') unwrap-inverse)
wrap : ℕ → Signal
wrap = _ₛ
bijective : ∀{x} → unwrap (wrap x) ≡ x
bijective = refl
-- for backward compatibility
unwrap-neq : ∀{k1 : Signal} → ∀{k2 : Signal} → ¬ k1 ≡ k2 → ¬ (unwrap k1) ≡ (unwrap k2)
unwrap-neq = (_∘ unwrap-injective)
_≟_ : Decidable {A = Signal} _≡_
(s ₛ) ≟ (t ₛ) with s ≟ℕ t
... | yes p = yes (cong _ₛ p)
... | no ¬p = no (¬p ∘ cong unwrap)
data Status : Set where
present : Status
absent : Status
unknown : Status
_≟ₛₜ_ : Decidable {A = Status} _≡_
present ≟ₛₜ present = yes refl
present ≟ₛₜ absent = no λ()
present ≟ₛₜ unknown = no λ()
absent ≟ₛₜ present = no λ()
absent ≟ₛₜ absent = yes refl
absent ≟ₛₜ unknown = no λ()
unknown ≟ₛₜ present = no λ()
unknown ≟ₛₜ absent = no λ()
unknown ≟ₛₜ unknown = yes refl
|
import tactic
import .tokens .commun
namespace tactic
setup_tactic_parser
@[derive has_reflect]
meta inductive Par_args
-- Par fait (appliqué à args) on obtient news (tel que news')
| obtenir (fait : pexpr) (args : list pexpr) (news : list maybe_typed_ident) : Par_args
-- Par fait (appliqué à args) on obtient news (tel que news')
| choisir (fait : pexpr) (args : list pexpr) (news : list maybe_typed_ident) : Par_args
-- Par fait (appliqué à args) il suffit de montrer que buts
| appliquer (fait : pexpr) (args : list pexpr) (buts : list pexpr) : Par_args
open Par_args
/-- Parse une liste de conséquences, peut-être vide. -/
meta def on_obtient_parser : lean.parser (list maybe_typed_ident) :=
do { news ← tk "obtient" *> maybe_typed_ident_parser*,
news' ← (tk "tel" *> tk "que" *> maybe_typed_ident_parser*) <|> pure [],
pure (news ++ news') }
/-- Parse une liste de conséquences pour `choose`. -/
meta def on_choisit_parser : lean.parser (list maybe_typed_ident) :=
do { news ← tk "choisit" *> maybe_typed_ident_parser*,
news' ← (tk "tel" *> tk "que" *> maybe_typed_ident_parser*) <|> pure [],
pure (news ++ news') }
/-- Parse un ou plusieurs nouveaux buts. -/
meta def buts_parser : lean.parser (list pexpr) :=
tk "il" *> tk "suffit" *> tk "de" *> tk "montrer" *> tk "que" *> pexpr_list_or_texpr
/-- Parser principal pour la tactique Par. -/
meta def Par_parser : lean.parser Par_args :=
with_desc "... (appliqué à ...) on obtient ... (tel que ...) / Par ... (appliqué à ...) il suffit de montrer que ..." $
do e ← texpr,
args ← applique_a_parser,
do { _ ← tk "on",
(Par_args.obtenir e args <$> on_obtient_parser) <|>
(Par_args.choisir e args <$> on_choisit_parser) } <|>
(Par_args.appliquer e args <$> buts_parser)
meta def verifie_type : maybe_typed_ident → tactic unit
| (n, some t) := do n_type ← get_local n >>= infer_type,
to_expr t >>= unify n_type
| (n, none) := skip
/-- Récupère de l'information d'une hypothèse ou d'un lemme ou bien
réduit le but à un ou plusieurs nouveaux buts en appliquant une
hypothèse ou un lemme. -/
@[interactive]
meta def Par : parse Par_parser → tactic unit
| (Par_args.obtenir fait args news) := focus1 (do
news.mmap' (λ p : maybe_typed_ident, verifie_nom p.1),
efait ← to_expr fait,
applied ← mk_mapp_pexpr efait args,
if news.length = 1 then do { -- Cas où il n'y a rien à déstructurer
nom ← match news with
| ((nom, some new) :: t) := do enew ← to_expr new,
infer_type applied >>= unify enew,
pure nom
| ((nom, none) :: t) := pure nom
| _ := fail "Il faut indiquer un nom pour l'information obtenue." -- ne devrait pas arriver
end,
hyp ← note nom none applied,
nettoyage,
news.mmap' verifie_type }
else do tactic.rcases none (to_pexpr $ applied)
$ rcases_patt.tuple $ news.map rcases_patt_of_maybe_typed_ident,
nettoyage )
| (Par_args.choisir fait args news) := focus1 (do
efait ← to_expr fait,
applied ← mk_mapp_pexpr efait args,
choose tt applied (news.map prod.fst),
nettoyage,
news.mmap' verifie_type)
| (Par_args.appliquer fait args buts) := focus1 (do
efait ← to_expr fait,
ebuts ← buts.mmap to_expr,
mk_mapp_pexpr efait args >>= apply,
vrai_buts ← get_goals,
let paires := list.zip vrai_buts buts,
focus' (paires.map (λ p : expr × pexpr, do
`(force_type %%p _) ← i_to_expr_no_subgoals ``(force_type %%p.2 %%p.1), skip))
<|> fail "Ce n'est pas ce qu'il faut démontrer")
end tactic
example (P Q : (ℕ → ℕ) → Prop) (h : true ∧ ∃ u : ℕ → ℕ, P u ∧ Q u) : true :=
begin
Par h on obtient (a : true) (u : ℕ → ℕ) (b : P u) (c : Q u),
trivial
end
example (n : ℕ) (h : ∃ k, n = 2*k) : ∃ l, n+1 = 2*l + 1 :=
begin
Par h on obtient k hk,
use k,
rw hk
end
example (n : ℕ) (h : ∃ k, n = 2*k) : ∃ l, n+1 = 2*l + 1 :=
begin
Par h on obtient k tel que hk : n = 2*k,
use k,
rw hk
end
example (n : ℕ) (h : ∃ k, n = 2*k) : ∃ l, n+1 = 2*l + 1 :=
begin
success_if_fail {
Par h on obtient k tel que (hk : 0 = 1),
},
Par h on obtient k tel que (hk : n = 2*k),
use k,
rw hk
end
example (f g : ℕ → ℕ) (hf : ∀ y, ∃ x, f x = y) (hg : ∀ y, ∃ x, g x = y) : ∀ y, ∃ x, (g ∘ f) x = y :=
begin
intro y,
success_if_fail { Par hg appliqué à y on obtient x tel que (hx : g x = x) },
Par hg appliqué à y on obtient x tel que (hx : g x = y),
Par hf appliqué à x on obtient z hz,
use z,
change g (f z) = y,
rw [hz, hx],
end
example (P Q : Prop) (h : P ∧ Q) : Q :=
begin
Par h on obtient (hP : P) (hQ : Q),
exact hQ,
end
noncomputable example (f : ℕ → ℕ) (h : ∀ y, ∃ x, f x = y) : ℕ → ℕ :=
begin
Par h on choisit g tel que (H : ∀ (y : ℕ), f (g y) = y),
exact g,
end
example (P Q : Prop) (h : P → Q) (h' : P) : Q :=
begin
Par h il suffit de montrer que P,
exact h',
end
example (P Q R : Prop) (h : P → R → Q) (hP : P) (hR : R) : Q :=
begin
Par h il suffit de montrer que [P, R],
exact hP,
exact hR
end
example (P Q : Prop) (h : ∀ n : ℕ, P → Q) (h' : P) : Q :=
begin
success_if_fail { Par h appliqué à [0, 1] il suffit de montrer que P },
Par h appliqué à 0 il suffit de montrer que P,
exact h',
end
example (Q : Prop) (h : ∀ n : ℤ, n > 0 → Q) : Q :=
begin
Par h il suffit de montrer que (1 > 0),
norm_num,
end
|
A real number is an integer if and only if its image under the map $x \mapsto \mathbb{C}$ is an integer.
|
#' Utah's beneficial use polygon shapes
#'
#' Polygons containing beneficial use designations and water body type information. Used to assign uses or standards to site locations.
#'
#' @format An sf type polygon shapefile
"bu_poly"
|
lemma convex_hull_eq: "convex hull s = s \<longleftrightarrow> convex s"
|
r=358.46
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7tg6n/media/images/d7tg6n-001/svc:tesseract/full/full/358.46/default.jpg Accept:application/hocr+xml
|
lemma offset_poly_eq_0_iff: "offset_poly p h = 0 \<longleftrightarrow> p = 0"
|
[STATEMENT]
lemma Liminf_add_ereal_right:
"F \<noteq> bot \<Longrightarrow> abs c \<noteq> \<infinity> \<Longrightarrow> Liminf F (\<lambda>n. g n + (c :: ereal)) = Liminf F g + c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>F \<noteq> bot; \<bar>c\<bar> \<noteq> \<infinity>\<rbrakk> \<Longrightarrow> Liminf F (\<lambda>n. g n + c) = Liminf F g + c
[PROOF STEP]
by (rule Liminf_compose_continuous_mono) (auto simp: mono_def add_mono continuous_on_def)
|
If $f$ is holomorphic on an open set $S$, then its derivative $f'$ is holomorphic on $S$.
|
In mid @-@ 2009 , producers decided to take Nicole 's storyline into a " u @-@ turn " , when she reverted to her " wild ways " . At the time Nicole had endured repetitive personal trauma including failed relationships , Roman being sent to prison and her best friend Belle was dying of cancer . James explained : " It 's all too much for her and she can 't handle it , so she reverts to her wild ways . " Geoff notices Nicole 's erratic behaviour and attempts to help her . She tried to " lure him into bed " after he comforted her , however he turned her down . James said she no longer had romantic feelings for Geoff , but was actually in a " vulnerable state " . She then started relying on alcohol more , and partied with fellow " wild child " Indigo Walker ( Samara Weaving ) at a " rowdy " venue . James explained that Nicole saw alcohol as an answer to her problems . The fact that " she 's trying to deal with too many things " saw Nicole transform into a messed up and depressed person .
|
lemma (in t2_space) separation_t2: "x \<noteq> y \<longleftrightarrow> (\<exists>U V. open U \<and> open V \<and> x \<in> U \<and> y \<in> V \<and> U \<inter> V = {})"
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Natural numbers represented in binary natively in Agda.
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Nat.Binary where
open import Data.Nat.Binary.Base public
open import Data.Nat.Binary.Properties public using (_≟_)
|
library(caret) # include library
dataset = "00_measurement/signalp-alone_y_pred.tsv"
dt <- read.table(dataset, header = TRUE, sep="\t") # load table
# extract file name, for naming output and rowname
f_name <- paste0(tools::file_path_sans_ext(dataset))
# define variable for confusion matrix
lvs = c("secreted","other")
actual = factor(dt$y_true, levels = lvs)
pred = factor(dt$y_pred, levels = lvs)
# calculate confusion matrix
eval_indices <- confusionMatrix(pred, actual)
# convert to tables
conf_matrix <- as.data.frame(eval_indices$table)
eval_table <- t(as.data.frame(eval_indices$byClass, row.names = NULL))
rownames(eval_table) <- f_name
# ---------- calculate FDR -------------------
False.Discovery.Rate <- 1 - eval_table[3]
eval_table <- data.frame(eval_table,False.Discovery.Rate)
# --------- calculate MCC ----------
library(mccr)
mcc_sp_alone <- mccr(dt$mcc_y_true,dt$mcc_y_pred)
mcc_to_table <- as.data.frame(mcc_sp_alone)
colnames(mcc_to_table) <- "MCC"
# merge eval table with MCC
eval_table <- data.frame(eval_table,mcc_to_table)
# export tables
f1 <- write.table(eval_table,file = sprintf("%s_evaluation_table.tsv",f_name),
sep="\t",eol = "\n")
f2 <- write.table(conf_matrix,file = sprintf("%s_confusion_matrix.tsv",f_name),
sep ="\t",eol = "\n")
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.bounded_order
import order.complete_lattice
import order.cover
import order.iterate
import tactic.monotonicity
/-!
# Successor and predecessor
This file defines successor and predecessor orders. `succ a`, the successor of an element `a : α` is
the least element greater than `a`. `pred a` is the greatest element less than `a`. Typical examples
include `ℕ`, `ℤ`, `ℕ+`, `fin n`, but also `enat`, the lexicographic order of a successor/predecessor
order...
## Typeclasses
* `succ_order`: Order equipped with a sensible successor function.
* `pred_order`: Order equipped with a sensible predecessor function.
* `is_succ_archimedean`: `succ_order` where `succ` iterated to an element gives all the greater
ones.
* `is_pred_archimedean`: `pred_order` where `pred` iterated to an element gives all the greater
ones.
## Implementation notes
Maximal elements don't have a sensible successor. Thus the naïve typeclass
```lean
class naive_succ_order (α : Type*) [preorder α] :=
(succ : α → α)
(succ_le_iff : ∀ {a b}, succ a ≤ b ↔ a < b)
(lt_succ_iff : ∀ {a b}, a < succ b ↔ a ≤ b)
```
can't apply to an `order_top` because plugging in `a = b = ⊤` into either of `succ_le_iff` and
`lt_succ_iff` yields `⊤ < ⊤` (or more generally `m < m` for a maximal element `m`).
The solution taken here is to remove the implications `≤ → <` and instead require that `a < succ a`
for all non maximal elements (enforced by the combination of `le_succ` and the contrapositive of
`maximal_of_succ_le`).
The stricter condition of every element having a sensible successor can be obtained through the
combination of `succ_order α` and `no_max_order α`.
## TODO
Is `galois_connection pred succ` always true? If not, we should introduce
```lean
class succ_pred_order (α : Type*) [preorder α] extends succ_order α, pred_order α :=
(pred_succ_gc : galois_connection (pred : α → α) succ)
```
`covers` should help here.
-/
open function
/-! ### Successor order -/
variables {α : Type*}
/-- Order equipped with a sensible successor function. -/
@[ext] class succ_order (α : Type*) [preorder α] :=
(succ : α → α)
(le_succ : ∀ a, a ≤ succ a)
(maximal_of_succ_le : ∀ ⦃a⦄, succ a ≤ a → ∀ ⦃b⦄, ¬a < b)
(succ_le_of_lt : ∀ {a b}, a < b → succ a ≤ b)
(le_of_lt_succ : ∀ {a b}, a < succ b → a ≤ b)
namespace succ_order
section preorder
variables [preorder α]
/-- A constructor for `succ_order α` usable when `α` has no maximal element. -/
def of_succ_le_iff_of_le_lt_succ (succ : α → α)
(hsucc_le_iff : ∀ {a b}, succ a ≤ b ↔ a < b)
(hle_of_lt_succ : ∀ {a b}, a < succ b → a ≤ b) :
succ_order α :=
{ succ := succ,
le_succ := λ a, (hsucc_le_iff.1 le_rfl).le,
maximal_of_succ_le := λ a ha, (lt_irrefl a (hsucc_le_iff.1 ha)).elim,
succ_le_of_lt := λ a b, hsucc_le_iff.2,
le_of_lt_succ := λ a b, hle_of_lt_succ }
variables [succ_order α]
@[simp, mono] lemma succ_le_succ {a b : α} (h : a ≤ b) : succ a ≤ succ b :=
begin
by_cases ha : ∀ ⦃c⦄, ¬a < c,
{ have hba : succ b ≤ a,
{ by_contra H,
exact ha ((h.trans (le_succ b)).lt_of_not_le H) },
by_contra H,
exact ha ((h.trans (le_succ b)).trans_lt ((hba.trans (le_succ a)).lt_of_not_le H)) },
{ push_neg at ha,
obtain ⟨c, hc⟩ := ha,
exact succ_le_of_lt ((h.trans (le_succ b)).lt_of_not_le $ λ hba,
maximal_of_succ_le (hba.trans h) (((le_succ b).trans hba).trans_lt hc)) }
end
lemma succ_mono : monotone (succ : α → α) := λ a b, succ_le_succ
lemma lt_succ_of_not_maximal {a b : α} (h : a < b) : a < succ a :=
(le_succ a).lt_of_not_le (λ ha, maximal_of_succ_le ha h)
alias lt_succ_of_not_maximal ← has_lt.lt.lt_succ
protected lemma _root_.has_lt.lt.covers_succ {a b : α} (h : a < b) : a ⋖ succ a :=
⟨h.lt_succ, λ c hc, (succ_le_of_lt hc).not_lt⟩
@[simp] lemma covers_succ_of_nonempty_Ioi {a : α} (h : (set.Ioi a).nonempty) : a ⋖ succ a :=
has_lt.lt.covers_succ h.some_mem
section no_max_order
variables [no_max_order α] {a b : α}
lemma lt_succ (a : α) : a < succ a :=
(le_succ a).lt_of_not_le $ λ h, not_exists.2 (maximal_of_succ_le h) (exists_gt a)
lemma lt_succ_iff : a < succ b ↔ a ≤ b :=
⟨le_of_lt_succ, λ h, h.trans_lt $ lt_succ b⟩
lemma succ_le_iff : succ a ≤ b ↔ a < b :=
⟨(lt_succ a).trans_le, succ_le_of_lt⟩
@[simp] lemma succ_le_succ_iff : succ a ≤ succ b ↔ a ≤ b :=
⟨λ h, le_of_lt_succ $ (lt_succ a).trans_le h, λ h, succ_le_of_lt $ h.trans_lt $ lt_succ b⟩
alias succ_le_succ_iff ↔ le_of_succ_le_succ _
lemma succ_lt_succ_iff : succ a < succ b ↔ a < b :=
by simp_rw [lt_iff_le_not_le, succ_le_succ_iff]
alias succ_lt_succ_iff ↔ lt_of_succ_lt_succ succ_lt_succ
lemma succ_strict_mono : strict_mono (succ : α → α) := λ a b, succ_lt_succ
lemma covers_succ (a : α) : a ⋖ succ a := ⟨lt_succ a, λ c hc, (succ_le_of_lt hc).not_lt⟩
end no_max_order
end preorder
section partial_order
variables [partial_order α]
/-- There is at most one way to define the successors in a `partial_order`. -/
instance : subsingleton (succ_order α) :=
begin
refine subsingleton.intro (λ h₀ h₁, _),
ext a,
by_cases ha : @succ _ _ h₀ a ≤ a,
{ refine (ha.trans (@le_succ _ _ h₁ a)).antisymm _,
by_contra H,
exact @maximal_of_succ_le _ _ h₀ _ ha _
((@le_succ _ _ h₁ a).lt_of_not_le $ λ h, H $ h.trans $ @le_succ _ _ h₀ a) },
{ exact (@succ_le_of_lt _ _ h₀ _ _ $ (@le_succ _ _ h₁ a).lt_of_not_le $ λ h,
@maximal_of_succ_le _ _ h₁ _ h _ $ (@le_succ _ _ h₀ a).lt_of_not_le ha).antisymm
(@succ_le_of_lt _ _ h₁ _ _ $ (@le_succ _ _ h₀ a).lt_of_not_le ha) }
end
variables [succ_order α]
lemma le_le_succ_iff {a b : α} : a ≤ b ∧ b ≤ succ a ↔ b = a ∨ b = succ a :=
begin
split,
{ rintro h,
rw or_iff_not_imp_left,
exact λ hba : b ≠ a, h.2.antisymm (succ_le_of_lt $ h.1.lt_of_ne $ hba.symm) },
rintro (rfl | rfl),
{ exact ⟨le_rfl, le_succ b⟩ },
{ exact ⟨le_succ a, le_rfl⟩ }
end
lemma _root_.covers.succ_eq {a b : α} (h : a ⋖ b) : succ a = b :=
(succ_le_of_lt h.lt).eq_of_not_lt $ λ h', h.2 (lt_succ_of_not_maximal h.lt) h'
section no_max_order
variables [no_max_order α] {a b : α}
lemma succ_injective : injective (succ : α → α) :=
begin
rintro a b,
simp_rw [eq_iff_le_not_lt, succ_le_succ_iff, succ_lt_succ_iff],
exact id,
end
lemma succ_eq_succ_iff : succ a = succ b ↔ a = b :=
succ_injective.eq_iff
lemma succ_ne_succ_iff : succ a ≠ succ b ↔ a ≠ b :=
succ_injective.ne_iff
alias succ_ne_succ_iff ↔ ne_of_succ_ne_succ succ_ne_succ
lemma lt_succ_iff_lt_or_eq : a < succ b ↔ (a < b ∨ a = b) :=
lt_succ_iff.trans le_iff_lt_or_eq
lemma le_succ_iff_lt_or_eq : a ≤ succ b ↔ (a ≤ b ∨ a = succ b) :=
by rw [←lt_succ_iff, ←lt_succ_iff, lt_succ_iff_lt_or_eq]
lemma _root_.covers_iff_succ_eq : a ⋖ b ↔ succ a = b :=
⟨covers.succ_eq, by { rintro rfl, exact covers_succ _ }⟩
end no_max_order
end partial_order
section order_top
variables [partial_order α] [order_top α] [succ_order α]
@[simp] lemma succ_top : succ (⊤ : α) = ⊤ :=
le_top.antisymm (le_succ _)
@[simp] lemma succ_le_iff_eq_top {a : α} : succ a ≤ a ↔ a = ⊤ :=
⟨λ h, eq_top_of_maximal (maximal_of_succ_le h), λ h, by rw [h, succ_top]⟩
@[simp] lemma lt_succ_iff_ne_top {a : α} : a < succ a ↔ a ≠ ⊤ :=
begin
simp only [lt_iff_le_not_le, true_and, le_succ a],
exact not_iff_not.2 succ_le_iff_eq_top,
end
end order_top
section order_bot
variables [partial_order α] [order_bot α] [succ_order α] [nontrivial α]
lemma bot_lt_succ (a : α) : ⊥ < succ a :=
begin
obtain ⟨b, hb⟩ := exists_ne (⊥ : α),
refine bot_lt_iff_ne_bot.2 (λ h, _),
have := eq_bot_iff.2 ((le_succ a).trans h.le),
rw this at h,
exact maximal_of_succ_le h.le (bot_lt_iff_ne_bot.2 hb),
end
lemma succ_ne_bot (a : α) : succ a ≠ ⊥ :=
(bot_lt_succ a).ne'
end order_bot
section linear_order
variables [linear_order α]
/-- A constructor for `succ_order α` usable when `α` is a linear order with no maximal element. -/
def of_succ_le_iff (succ : α → α) (hsucc_le_iff : ∀ {a b}, succ a ≤ b ↔ a < b) :
succ_order α :=
{ succ := succ,
le_succ := λ a, (hsucc_le_iff.1 le_rfl).le,
maximal_of_succ_le := λ a ha, (lt_irrefl a (hsucc_le_iff.1 ha)).elim,
succ_le_of_lt := λ a b, hsucc_le_iff.2,
le_of_lt_succ := λ a b h, le_of_not_lt ((not_congr hsucc_le_iff).1 h.not_le) }
end linear_order
section complete_lattice
variables [complete_lattice α] [succ_order α]
lemma succ_eq_infi (a : α) : succ a = ⨅ (b : α) (h : a < b), b :=
begin
refine le_antisymm (le_infi (λ b, le_infi succ_le_of_lt)) _,
obtain rfl | ha := eq_or_ne a ⊤,
{ rw succ_top,
exact le_top },
exact binfi_le _ (lt_succ_iff_ne_top.2 ha),
end
end complete_lattice
end succ_order
/-! ### Predecessor order -/
/-- Order equipped with a sensible predecessor function. -/
@[ext] class pred_order (α : Type*) [preorder α] :=
(pred : α → α)
(pred_le : ∀ a, pred a ≤ a)
(minimal_of_le_pred : ∀ ⦃a⦄, a ≤ pred a → ∀ ⦃b⦄, ¬b < a)
(le_pred_of_lt : ∀ {a b}, a < b → a ≤ pred b)
(le_of_pred_lt : ∀ {a b}, pred a < b → a ≤ b)
namespace pred_order
section preorder
variables [preorder α]
/-- A constructor for `pred_order α` usable when `α` has no minimal element. -/
def of_le_pred_iff_of_pred_le_pred (pred : α → α)
(hle_pred_iff : ∀ {a b}, a ≤ pred b ↔ a < b)
(hle_of_pred_lt : ∀ {a b}, pred a < b → a ≤ b) :
pred_order α :=
{ pred := pred,
pred_le := λ a, (hle_pred_iff.1 le_rfl).le,
minimal_of_le_pred := λ a ha, (lt_irrefl a (hle_pred_iff.1 ha)).elim,
le_pred_of_lt := λ a b, hle_pred_iff.2,
le_of_pred_lt := λ a b, hle_of_pred_lt }
variables [pred_order α]
@[simp, mono] lemma pred_le_pred {a b : α} (h : a ≤ b) : pred a ≤ pred b :=
begin
by_cases hb : ∀ ⦃c⦄, ¬c < b,
{ have hba : b ≤ pred a,
{ by_contra H,
exact hb (((pred_le a).trans h).lt_of_not_le H) },
by_contra H,
exact hb ((((pred_le b).trans hba).lt_of_not_le H).trans_le ((pred_le a).trans h)) },
{ push_neg at hb,
obtain ⟨c, hc⟩ := hb,
exact le_pred_of_lt (((pred_le a).trans h).lt_of_not_le $ λ hba,
minimal_of_le_pred (h.trans hba) $ hc.trans_le $ hba.trans $ pred_le a) }
end
lemma pred_mono : monotone (pred : α → α) := λ a b, pred_le_pred
lemma pred_lt_of_not_minimal {a b : α} (h : b < a) : pred a < a :=
(pred_le a).lt_of_not_le (λ ha, minimal_of_le_pred ha h)
alias pred_lt_of_not_minimal ← has_lt.lt.pred_lt
protected lemma _root_.has_lt.lt.pred_covers {a b : α} (h : b < a) : pred a ⋖ a :=
⟨h.pred_lt, λ c hc, (le_of_pred_lt hc).not_lt⟩
@[simp] lemma pred_covers_of_nonempty_Iio {a : α} (h : (set.Iio a).nonempty) : pred a ⋖ a :=
has_lt.lt.pred_covers h.some_mem
section no_min_order
variables [no_min_order α] {a b : α}
lemma pred_lt (a : α) : pred a < a :=
(pred_le a).lt_of_not_le $ λ h, not_exists.2 (minimal_of_le_pred h) (exists_lt a)
lemma pred_lt_iff : pred a < b ↔ a ≤ b :=
⟨le_of_pred_lt, (pred_lt a).trans_le⟩
lemma le_pred_iff : a ≤ pred b ↔ a < b :=
⟨λ h, h.trans_lt (pred_lt b), le_pred_of_lt⟩
@[simp] lemma pred_le_pred_iff : pred a ≤ pred b ↔ a ≤ b :=
⟨λ h, le_of_pred_lt $ h.trans_lt (pred_lt b), λ h, le_pred_of_lt $ (pred_lt a).trans_le h⟩
alias pred_le_pred_iff ↔ le_of_pred_le_pred _
@[simp] lemma pred_lt_pred_iff : pred a < pred b ↔ a < b :=
by simp_rw [lt_iff_le_not_le, pred_le_pred_iff]
alias pred_lt_pred_iff ↔ lt_of_pred_lt_pred pred_lt_pred
lemma pred_strict_mono : strict_mono (pred : α → α) := λ a b, pred_lt_pred
lemma pred_covers (a : α) : pred a ⋖ a := ⟨pred_lt a, λ c hc, (le_of_pred_lt hc).not_lt⟩
end no_min_order
end preorder
section partial_order
variables [partial_order α]
/-- There is at most one way to define the predecessors in a `partial_order`. -/
instance : subsingleton (pred_order α) :=
begin
refine subsingleton.intro (λ h₀ h₁, _),
ext a,
by_cases ha : a ≤ @pred _ _ h₀ a,
{ refine le_antisymm _ ((@pred_le _ _ h₁ a).trans ha),
by_contra H,
exact @minimal_of_le_pred _ _ h₀ _ ha _
((@pred_le _ _ h₁ a).lt_of_not_le $ λ h, H $ (@pred_le _ _ h₀ a).trans h) },
{ exact (@le_pred_of_lt _ _ h₁ _ _ $ (@pred_le _ _ h₀ a).lt_of_not_le ha).antisymm
(@le_pred_of_lt _ _ h₀ _ _ $ (@pred_le _ _ h₁ a).lt_of_not_le $ λ h,
@minimal_of_le_pred _ _ h₁ _ h _ $ (@pred_le _ _ h₀ a).lt_of_not_le ha) }
end
variables [pred_order α]
lemma pred_le_le_iff {a b : α} : pred a ≤ b ∧ b ≤ a ↔ b = a ∨ b = pred a :=
begin
split,
{ rintro h,
rw or_iff_not_imp_left,
exact λ hba, (le_pred_of_lt $ h.2.lt_of_ne hba).antisymm h.1 },
rintro (rfl | rfl),
{ exact ⟨pred_le b, le_rfl⟩ },
{ exact ⟨le_rfl, pred_le a⟩ }
end
lemma _root_.covers.pred_eq {a b : α} (h : a ⋖ b) : pred b = a :=
(le_pred_of_lt h.lt).eq_of_not_gt $ λ h', h.2 h' $ pred_lt_of_not_minimal h.lt
section no_min_order
variables [no_min_order α] {a b : α}
lemma pred_injective : injective (pred : α → α) :=
begin
rintro a b,
simp_rw [eq_iff_le_not_lt, pred_le_pred_iff, pred_lt_pred_iff],
exact id,
end
lemma pred_eq_pred_iff : pred a = pred b ↔ a = b :=
pred_injective.eq_iff
lemma pred_ne_pred_iff : pred a ≠ pred b ↔ a ≠ b :=
pred_injective.ne_iff
lemma pred_lt_iff_lt_or_eq : pred a < b ↔ (a < b ∨ a = b) :=
pred_lt_iff.trans le_iff_lt_or_eq
lemma le_pred_iff_lt_or_eq : pred a ≤ b ↔ (a ≤ b ∨ pred a = b) :=
by rw [←pred_lt_iff, ←pred_lt_iff, pred_lt_iff_lt_or_eq]
lemma _root_.covers_iff_pred_eq : a ⋖ b ↔ pred b = a :=
⟨covers.pred_eq, by { rintro rfl, exact pred_covers _ }⟩
end no_min_order
end partial_order
section order_bot
variables [partial_order α] [order_bot α] [pred_order α]
@[simp] lemma pred_bot : pred (⊥ : α) = ⊥ :=
(pred_le _).antisymm bot_le
@[simp] lemma le_pred_iff_eq_bot {a : α} : a ≤ pred a ↔ a = ⊥ :=
⟨λ h, eq_bot_of_minimal (minimal_of_le_pred h), λ h, by rw [h, pred_bot]⟩
@[simp] lemma pred_lt_iff_ne_bot {a : α} : pred a < a ↔ a ≠ ⊥ :=
begin
simp only [lt_iff_le_not_le, true_and, pred_le a],
exact not_iff_not.2 le_pred_iff_eq_bot,
end
end order_bot
section order_top
variables [partial_order α] [order_top α] [pred_order α]
lemma pred_lt_top [nontrivial α] (a : α) : pred a < ⊤ :=
begin
obtain ⟨b, hb⟩ := exists_ne (⊤ : α),
refine lt_top_iff_ne_top.2 (λ h, _),
have := eq_top_iff.2 (h.ge.trans (pred_le a)),
rw this at h,
exact minimal_of_le_pred h.ge (lt_top_iff_ne_top.2 hb),
end
lemma pred_ne_top [nontrivial α] (a : α) : pred a ≠ ⊤ :=
(pred_lt_top a).ne
end order_top
section linear_order
variables [linear_order α] {a b : α}
/-- A constructor for `pred_order α` usable when `α` is a linear order with no maximal element. -/
def of_le_pred_iff (pred : α → α) (hle_pred_iff : ∀ {a b}, a ≤ pred b ↔ a < b) :
pred_order α :=
{ pred := pred,
pred_le := λ a, (hle_pred_iff.1 le_rfl).le,
minimal_of_le_pred := λ a ha, (lt_irrefl a (hle_pred_iff.1 ha)).elim,
le_pred_of_lt := λ a b, hle_pred_iff.2,
le_of_pred_lt := λ a b h, le_of_not_lt ((not_congr hle_pred_iff).1 h.not_le) }
end linear_order
section complete_lattice
variables [complete_lattice α] [pred_order α]
lemma pred_eq_supr (a : α) : pred a = ⨆ (b : α) (h : b < a), b :=
begin
refine le_antisymm _ (supr_le (λ b, supr_le le_pred_of_lt)),
obtain rfl | ha := eq_or_ne a ⊥,
{ rw pred_bot,
exact bot_le },
exact @le_bsupr _ _ _ (λ b, b < a) (λ a _, a) (pred a) (pred_lt_iff_ne_bot.2 ha),
end
end complete_lattice
end pred_order
open succ_order pred_order
/-! ### Successor-predecessor orders -/
section succ_pred_order
variables [partial_order α] [succ_order α] [pred_order α] {a b : α}
protected lemma _root_.has_lt.lt.succ_pred (h : b < a) : succ (pred a) = a := h.pred_covers.succ_eq
protected lemma _root_.has_lt.lt.pred_succ (h : a < b) : pred (succ a) = a := h.covers_succ.pred_eq
@[simp] lemma succ_pred_of_nonempty_Iio {a : α} (h : (set.Iio a).nonempty) : succ (pred a) = a :=
has_lt.lt.succ_pred h.some_mem
@[simp] lemma pred_succ_of_nonempty_Ioi {a : α} (h : (set.Ioi a).nonempty) : pred (succ a) = a :=
has_lt.lt.pred_succ h.some_mem
@[simp] lemma succ_pred [no_min_order α] (a : α) : succ (pred a) = a := (pred_covers _).succ_eq
@[simp] lemma pred_succ [no_max_order α] (a : α) : pred (succ a) = a := (covers_succ _).pred_eq
end succ_pred_order
/-! ### Dual order -/
section order_dual
variables [preorder α]
instance [pred_order α] : succ_order (order_dual α) :=
{ succ := (pred : α → α),
le_succ := pred_le,
maximal_of_succ_le := minimal_of_le_pred,
succ_le_of_lt := λ a b h, le_pred_of_lt h,
le_of_lt_succ := λ a b, le_of_pred_lt }
instance [succ_order α] : pred_order (order_dual α) :=
{ pred := (succ : α → α),
pred_le := le_succ,
minimal_of_le_pred := maximal_of_succ_le,
le_pred_of_lt := λ a b h, succ_le_of_lt h,
le_of_pred_lt := λ a b, le_of_lt_succ }
end order_dual
/-! ### `with_bot`, `with_top`
Adding a greatest/least element to a `succ_order` or to a `pred_order`.
As far as successors and predecessors are concerned, there are four ways to add a bottom or top
element to an order:
* Adding a `⊤` to an `order_top`: Preserves `succ` and `pred`.
* Adding a `⊤` to a `no_max_order`: Preserves `succ`. Never preserves `pred`.
* Adding a `⊥` to an `order_bot`: Preserves `succ` and `pred`.
* Adding a `⊥` to a `no_min_order`: Preserves `pred`. Never preserves `succ`.
where "preserves `(succ/pred)`" means
`(succ/pred)_order α → (succ/pred)_order ((with_top/with_bot) α)`.
-/
section with_top
open with_top
/-! #### Adding a `⊤` to an `order_top` -/
instance [decidable_eq α] [partial_order α] [order_top α] [succ_order α] :
succ_order (with_top α) :=
{ succ := λ a, match a with
| ⊤ := ⊤
| (some a) := ite (a = ⊤) ⊤ (some (succ a))
end,
le_succ := λ a, begin
cases a,
{ exact le_top },
change ((≤) : with_top α → with_top α → Prop) _ (ite _ _ _),
split_ifs,
{ exact le_top },
{ exact some_le_some.2 (le_succ a) }
end,
maximal_of_succ_le := λ a ha b h, begin
cases a,
{ exact not_top_lt h },
change ((≤) : with_top α → with_top α → Prop) (ite _ _ _) _ at ha,
split_ifs at ha with ha',
{ exact not_top_lt (ha.trans_lt h) },
{ rw [some_le_some, succ_le_iff_eq_top] at ha,
exact ha' ha }
end,
succ_le_of_lt := λ a b h, begin
cases b,
{ exact le_top },
cases a,
{ exact (not_top_lt h).elim },
rw some_lt_some at h,
change ((≤) : with_top α → with_top α → Prop) (ite _ _ _) _,
split_ifs with ha,
{ rw ha at h,
exact (not_top_lt h).elim },
{ exact some_le_some.2 (succ_le_of_lt h) }
end,
le_of_lt_succ := λ a b h, begin
cases a,
{ exact (not_top_lt h).elim },
cases b,
{ exact le_top },
change ((<) : with_top α → with_top α → Prop) _ (ite _ _ _) at h,
rw some_le_some,
split_ifs at h with hb,
{ rw hb,
exact le_top },
{ exact le_of_lt_succ (some_lt_some.1 h) }
end }
instance [partial_order α] [order_top α] [pred_order α] : pred_order (with_top α) :=
{ pred := λ a, match a with
| ⊤ := some ⊤
| (some a) := some (pred a)
end,
pred_le := λ a, match a with
| ⊤ := le_top
| (some a) := some_le_some.2 (pred_le a)
end,
minimal_of_le_pred := λ a ha b h, begin
cases a,
{ exact (coe_lt_top (⊤ : α)).not_le ha },
cases b,
{ exact h.not_le le_top },
{ exact minimal_of_le_pred (some_le_some.1 ha) (some_lt_some.1 h) }
end,
le_pred_of_lt := λ a b h, begin
cases a,
{ exact ((le_top).not_lt h).elim },
cases b,
{ exact some_le_some.2 le_top },
exact some_le_some.2 (le_pred_of_lt $ some_lt_some.1 h),
end,
le_of_pred_lt := λ a b h, begin
cases b,
{ exact le_top },
cases a,
{ exact (not_top_lt $ some_lt_some.1 h).elim },
{ exact some_le_some.2 (le_of_pred_lt $ some_lt_some.1 h) }
end }
/-! #### Adding a `⊤` to a `no_max_order` -/
instance with_top.succ_order_of_no_max_order [partial_order α] [no_max_order α] [succ_order α] :
succ_order (with_top α) :=
{ succ := λ a, match a with
| ⊤ := ⊤
| (some a) := some (succ a)
end,
le_succ := λ a, begin
cases a,
{ exact le_top },
{ exact some_le_some.2 (le_succ a) }
end,
maximal_of_succ_le := λ a ha b h, begin
cases a,
{ exact not_top_lt h },
{ exact not_exists.2 (maximal_of_succ_le (some_le_some.1 ha)) (exists_gt a) }
end,
succ_le_of_lt := λ a b h, begin
cases a,
{ exact (not_top_lt h).elim },
cases b,
{ exact le_top} ,
{ exact some_le_some.2 (succ_le_of_lt $ some_lt_some.1 h) }
end,
le_of_lt_succ := λ a b h, begin
cases a,
{ exact (not_top_lt h).elim },
cases b,
{ exact le_top },
{ exact some_le_some.2 (le_of_lt_succ $ some_lt_some.1 h) }
end }
instance [partial_order α] [no_max_order α] [hα : nonempty α] :
is_empty (pred_order (with_top α)) :=
⟨begin
introI,
set b := pred (⊤ : with_top α) with h,
cases pred (⊤ : with_top α) with a ha; change b with pred ⊤ at h,
{ exact hα.elim (λ a, minimal_of_le_pred h.ge (coe_lt_top a)) },
{ obtain ⟨c, hc⟩ := exists_gt a,
rw [←some_lt_some, ←h] at hc,
exact (le_of_pred_lt hc).not_lt (some_lt_none _) }
end⟩
end with_top
section with_bot
open with_bot
/-! #### Adding a `⊥` to a `bot_order` -/
instance [preorder α] [order_bot α] [succ_order α] : succ_order (with_bot α) :=
{ succ := λ a, match a with
| ⊥ := some ⊥
| (some a) := some (succ a)
end,
le_succ := λ a, match a with
| ⊥ := bot_le
| (some a) := some_le_some.2 (le_succ a)
end,
maximal_of_succ_le := λ a ha b h, begin
cases a,
{ exact (none_lt_some (⊥ : α)).not_le ha },
cases b,
{ exact not_lt_bot h },
{ exact maximal_of_succ_le (some_le_some.1 ha) (some_lt_some.1 h) }
end,
succ_le_of_lt := λ a b h, begin
cases b,
{ exact (not_lt_bot h).elim },
cases a,
{ exact some_le_some.2 bot_le },
{ exact some_le_some.2 (succ_le_of_lt $ some_lt_some.1 h) }
end,
le_of_lt_succ := λ a b h, begin
cases a,
{ exact bot_le },
cases b,
{ exact (not_lt_bot $ some_lt_some.1 h).elim },
{ exact some_le_some.2 (le_of_lt_succ $ some_lt_some.1 h) }
end }
instance [decidable_eq α] [partial_order α] [order_bot α] [pred_order α] :
pred_order (with_bot α) :=
{ pred := λ a, match a with
| ⊥ := ⊥
| (some a) := ite (a = ⊥) ⊥ (some (pred a))
end,
pred_le := λ a, begin
cases a,
{ exact bot_le },
change (ite _ _ _ : with_bot α) ≤ some a,
split_ifs,
{ exact bot_le },
{ exact some_le_some.2 (pred_le a) }
end,
minimal_of_le_pred := λ a ha b h, begin
cases a,
{ exact not_lt_bot h },
change ((≤) : with_bot α → with_bot α → Prop) _ (ite _ _ _) at ha,
split_ifs at ha with ha',
{ exact not_lt_bot (h.trans_le ha) },
{ rw [some_le_some, le_pred_iff_eq_bot] at ha,
exact ha' ha }
end,
le_pred_of_lt := λ a b h, begin
cases a,
{ exact bot_le },
cases b,
{ exact (not_lt_bot h).elim },
rw some_lt_some at h,
change ((≤) : with_bot α → with_bot α → Prop) _ (ite _ _ _),
split_ifs with hb,
{ rw hb at h,
exact (not_lt_bot h).elim },
{ exact some_le_some.2 (le_pred_of_lt h) }
end,
le_of_pred_lt := λ a b h, begin
cases b,
{ exact (not_lt_bot h).elim },
cases a,
{ exact bot_le },
change ((<) : with_bot α → with_bot α → Prop) (ite _ _ _) _ at h,
rw some_le_some,
split_ifs at h with ha,
{ rw ha,
exact bot_le },
{ exact le_of_pred_lt (some_lt_some.1 h) }
end }
/-! #### Adding a `⊥` to a `no_min_order` -/
instance [partial_order α] [no_min_order α] [hα : nonempty α] :
is_empty (succ_order (with_bot α)) :=
⟨begin
introI,
set b : with_bot α := succ ⊥ with h,
cases succ (⊥ : with_bot α) with a ha; change b with succ ⊥ at h,
{ exact hα.elim (λ a, maximal_of_succ_le h.le (bot_lt_coe a)) },
{ obtain ⟨c, hc⟩ := exists_lt a,
rw [←some_lt_some, ←h] at hc,
exact (le_of_lt_succ hc).not_lt (none_lt_some _) }
end⟩
instance with_bot.pred_order_of_no_min_order [partial_order α] [no_min_order α] [pred_order α] :
pred_order (with_bot α) :=
{ pred := λ a, match a with
| ⊥ := ⊥
| (some a) := some (pred a)
end,
pred_le := λ a, begin
cases a,
{ exact bot_le },
{ exact some_le_some.2 (pred_le a) }
end,
minimal_of_le_pred := λ a ha b h, begin
cases a,
{ exact not_lt_bot h },
{ exact not_exists.2 (minimal_of_le_pred (some_le_some.1 ha)) (exists_lt a) }
end,
le_pred_of_lt := λ a b h, begin
cases b,
{ exact (not_lt_bot h).elim },
cases a,
{ exact bot_le },
{ exact some_le_some.2 (le_pred_of_lt $ some_lt_some.1 h) }
end,
le_of_pred_lt := λ a b h, begin
cases b,
{ exact (not_lt_bot h).elim },
cases a,
{ exact bot_le },
{ exact some_le_some.2 (le_of_pred_lt $ some_lt_some.1 h) }
end }
end with_bot
/-! ### Archimedeanness -/
/-- A `succ_order` is succ-archimedean if one can go from any two comparable elements by iterating
`succ` -/
class is_succ_archimedean (α : Type*) [preorder α] [succ_order α] : Prop :=
(exists_succ_iterate_of_le {a b : α} (h : a ≤ b) : ∃ n, succ^[n] a = b)
/-- A `pred_order` is pred-archimedean if one can go from any two comparable elements by iterating
`pred` -/
class is_pred_archimedean (α : Type*) [preorder α] [pred_order α] : Prop :=
(exists_pred_iterate_of_le {a b : α} (h : a ≤ b) : ∃ n, pred^[n] b = a)
export is_succ_archimedean (exists_succ_iterate_of_le)
export is_pred_archimedean (exists_pred_iterate_of_le)
section preorder
variables [preorder α]
section succ_order
variables [succ_order α] [is_succ_archimedean α] {a b : α}
instance : is_pred_archimedean (order_dual α) :=
{ exists_pred_iterate_of_le := λ a b h, by convert @exists_succ_iterate_of_le α _ _ _ _ _ h }
lemma has_le.le.exists_succ_iterate (h : a ≤ b) : ∃ n, succ^[n] a = b :=
exists_succ_iterate_of_le h
lemma exists_succ_iterate_iff_le : (∃ n, succ^[n] a = b) ↔ a ≤ b :=
begin
refine ⟨_, exists_succ_iterate_of_le⟩,
rintro ⟨n, rfl⟩,
exact id_le_iterate_of_id_le le_succ n a,
end
/-- Induction principle on a type with a `succ_order` for all elements above a given element `m`. -/
@[elab_as_eliminator] lemma succ.rec {P : α → Prop} {m : α}
(h0 : P m) (h1 : ∀ n, m ≤ n → P n → P (succ n)) ⦃n : α⦄ (hmn : m ≤ n) : P n :=
begin
obtain ⟨n, rfl⟩ := hmn.exists_succ_iterate, clear hmn,
induction n with n ih,
{ exact h0 },
{ rw [function.iterate_succ_apply'], exact h1 _ (id_le_iterate_of_id_le le_succ n m) ih }
end
lemma succ.rec_iff {p : α → Prop} (hsucc : ∀ a, p a ↔ p (succ a)) {a b : α} (h : a ≤ b) :
p a ↔ p b :=
begin
obtain ⟨n, rfl⟩ := h.exists_succ_iterate,
exact iterate.rec (λ b, p a ↔ p b) (λ c hc, hc.trans (hsucc _)) iff.rfl n,
end
end succ_order
section pred_order
variables [pred_order α] [is_pred_archimedean α] {a b : α}
instance : is_succ_archimedean (order_dual α) :=
{ exists_succ_iterate_of_le := λ a b h, by convert @exists_pred_iterate_of_le α _ _ _ _ _ h }
lemma has_le.le.exists_pred_iterate (h : a ≤ b) : ∃ n, pred^[n] b = a :=
exists_pred_iterate_of_le h
lemma exists_pred_iterate_iff_le : (∃ n, pred^[n] b = a) ↔ a ≤ b :=
@exists_succ_iterate_iff_le (order_dual α) _ _ _ _ _
/-- Induction principle on a type with a `pred_order` for all elements below a given element `m`. -/
@[elab_as_eliminator] lemma pred.rec {P : α → Prop} {m : α}
(h0 : P m) (h1 : ∀ n, n ≤ m → P n → P (pred n)) ⦃n : α⦄ (hmn : n ≤ m) : P n :=
@succ.rec (order_dual α) _ _ _ _ _ h0 h1 _ hmn
lemma pred.rec_iff {p : α → Prop} (hsucc : ∀ a, p a ↔ p (pred a)) {a b : α} (h : a ≤ b) :
p a ↔ p b :=
(@succ.rec_iff (order_dual α) _ _ _ _ hsucc _ _ h).symm
end pred_order
end preorder
section linear_order
variables [linear_order α]
section succ_order
variables [succ_order α] [is_succ_archimedean α] {a b : α}
lemma exists_succ_iterate_or : (∃ n, succ^[n] a = b) ∨ ∃ n, succ^[n] b = a :=
(le_total a b).imp exists_succ_iterate_of_le exists_succ_iterate_of_le
lemma succ.rec_linear {p : α → Prop} (hsucc : ∀ a, p a ↔ p (succ a)) (a b : α) : p a ↔ p b :=
(le_total a b).elim (succ.rec_iff hsucc) (λ h, (succ.rec_iff hsucc h).symm)
end succ_order
section pred_order
variables [pred_order α] [is_pred_archimedean α] {a b : α}
lemma exists_pred_iterate_or : (∃ n, pred^[n] b = a) ∨ ∃ n, pred^[n] a = b :=
(le_total a b).imp exists_pred_iterate_of_le exists_pred_iterate_of_le
lemma pred.rec_linear {p : α → Prop} (hsucc : ∀ a, p a ↔ p (pred a)) (a b : α) : p a ↔ p b :=
(le_total a b).elim (pred.rec_iff hsucc) (λ h, (pred.rec_iff hsucc h).symm)
end pred_order
end linear_order
section order_bot
variables [preorder α] [order_bot α] [succ_order α] [is_succ_archimedean α]
lemma succ.rec_bot (p : α → Prop) (hbot : p ⊥) (hsucc : ∀ a, p a → p (succ a)) (a : α) : p a :=
succ.rec hbot (λ x _ h, hsucc x h) (bot_le : ⊥ ≤ a)
end order_bot
section order_top
variables [preorder α] [order_top α] [pred_order α] [is_pred_archimedean α]
lemma pred.rec_top (p : α → Prop) (htop : p ⊤) (hpred : ∀ a, p a → p (pred a)) (a : α) : p a :=
pred.rec htop (λ x _ h, hpred x h) (le_top : a ≤ ⊤)
end order_top
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.