text
stringlengths 0
3.34M
|
---|
[GOAL]
α : Type u_1
p : Set α → Prop
s₀ : Set α
hp : p s₀
t : Set α
inst✝ : HasCountableSeparatingOn α p t
⊢ ∃ S, (∀ (n : ℕ), p (S n)) ∧ ∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (n : ℕ), x ∈ S n ↔ y ∈ S n) → x = y
[PROOFSTEP]
rcases exists_nonempty_countable_separating α hp t with ⟨S, hSne, hSc, hS⟩
[GOAL]
case intro.intro.intro
α : Type u_1
p : Set α → Prop
s₀ : Set α
hp : p s₀
t : Set α
inst✝ : HasCountableSeparatingOn α p t
S : Set (Set α)
hSne : Set.Nonempty S
hSc : Set.Countable S
hS :
(∀ (s : Set α), s ∈ S → p s) ∧ ∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ ∃ S, (∀ (n : ℕ), p (S n)) ∧ ∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (n : ℕ), x ∈ S n ↔ y ∈ S n) → x = y
[PROOFSTEP]
rcases hSc.exists_eq_range hSne with ⟨S, rfl⟩
[GOAL]
case intro.intro.intro.intro
α : Type u_1
p : Set α → Prop
s₀ : Set α
hp : p s₀
t : Set α
inst✝ : HasCountableSeparatingOn α p t
S : ℕ → Set α
hSne : Set.Nonempty (range S)
hSc : Set.Countable (range S)
hS :
(∀ (s : Set α), s ∈ range S → p s) ∧
∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (s : Set α), s ∈ range S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ ∃ S, (∀ (n : ℕ), p (S n)) ∧ ∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (n : ℕ), x ∈ S n ↔ y ∈ S n) → x = y
[PROOFSTEP]
use S
[GOAL]
case h
α : Type u_1
p : Set α → Prop
s₀ : Set α
hp : p s₀
t : Set α
inst✝ : HasCountableSeparatingOn α p t
S : ℕ → Set α
hSne : Set.Nonempty (range S)
hSc : Set.Countable (range S)
hS :
(∀ (s : Set α), s ∈ range S → p s) ∧
∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (s : Set α), s ∈ range S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ (∀ (n : ℕ), p (S n)) ∧ ∀ (x : α), x ∈ t → ∀ (y : α), y ∈ t → (∀ (n : ℕ), x ∈ S n ↔ y ∈ S n) → x = y
[PROOFSTEP]
simpa only [forall_range_iff] using hS
[GOAL]
α : Type u_1
p : Set α → Prop
t : Set α
q : Set ↑t → Prop
h : HasCountableSeparatingOn (↑t) q univ
hpq : ∀ (U : Set ↑t), q U → ∃ V, p V ∧ Subtype.val ⁻¹' V = U
⊢ HasCountableSeparatingOn α p t
[PROOFSTEP]
rcases h.1 with ⟨S, hSc, hSq, hS⟩
[GOAL]
case intro.intro.intro
α : Type u_1
p : Set α → Prop
t : Set α
q : Set ↑t → Prop
h : HasCountableSeparatingOn (↑t) q univ
hpq : ∀ (U : Set ↑t), q U → ∃ V, p V ∧ Subtype.val ⁻¹' V = U
S : Set (Set ↑t)
hSc : Set.Countable S
hSq : ∀ (s : Set ↑t), s ∈ S → q s
hS : ∀ (x : ↑t), x ∈ univ → ∀ (y : ↑t), y ∈ univ → (∀ (s : Set ↑t), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ HasCountableSeparatingOn α p t
[PROOFSTEP]
choose! V hpV hV using fun s hs ↦ hpq s (hSq s hs)
[GOAL]
case intro.intro.intro
α : Type u_1
p : Set α → Prop
t : Set α
q : Set ↑t → Prop
h : HasCountableSeparatingOn (↑t) q univ
hpq : ∀ (U : Set ↑t), q U → ∃ V, p V ∧ Subtype.val ⁻¹' V = U
S : Set (Set ↑t)
hSc : Set.Countable S
hSq : ∀ (s : Set ↑t), s ∈ S → q s
hS : ∀ (x : ↑t), x ∈ univ → ∀ (y : ↑t), y ∈ univ → (∀ (s : Set ↑t), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
V : Set ↑t → Set α
hpV : ∀ (s : Set ↑t), s ∈ S → p (V s)
hV : ∀ (s : Set ↑t), s ∈ S → Subtype.val ⁻¹' V s = s
⊢ HasCountableSeparatingOn α p t
[PROOFSTEP]
refine ⟨⟨V '' S, hSc.image _, ball_image_iff.2 hpV, fun x hx y hy h ↦ ?_⟩⟩
[GOAL]
case intro.intro.intro
α : Type u_1
p : Set α → Prop
t : Set α
q : Set ↑t → Prop
h✝ : HasCountableSeparatingOn (↑t) q univ
hpq : ∀ (U : Set ↑t), q U → ∃ V, p V ∧ Subtype.val ⁻¹' V = U
S : Set (Set ↑t)
hSc : Set.Countable S
hSq : ∀ (s : Set ↑t), s ∈ S → q s
hS : ∀ (x : ↑t), x ∈ univ → ∀ (y : ↑t), y ∈ univ → (∀ (s : Set ↑t), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
V : Set ↑t → Set α
hpV : ∀ (s : Set ↑t), s ∈ S → p (V s)
hV : ∀ (s : Set ↑t), s ∈ S → Subtype.val ⁻¹' V s = s
x : α
hx : x ∈ t
y : α
hy : y ∈ t
h : ∀ (s : Set α), s ∈ V '' S → (x ∈ s ↔ y ∈ s)
⊢ x = y
[PROOFSTEP]
refine congr_arg Subtype.val (hS ⟨x, hx⟩ trivial ⟨y, hy⟩ trivial fun U hU ↦ ?_)
[GOAL]
case intro.intro.intro
α : Type u_1
p : Set α → Prop
t : Set α
q : Set ↑t → Prop
h✝ : HasCountableSeparatingOn (↑t) q univ
hpq : ∀ (U : Set ↑t), q U → ∃ V, p V ∧ Subtype.val ⁻¹' V = U
S : Set (Set ↑t)
hSc : Set.Countable S
hSq : ∀ (s : Set ↑t), s ∈ S → q s
hS : ∀ (x : ↑t), x ∈ univ → ∀ (y : ↑t), y ∈ univ → (∀ (s : Set ↑t), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
V : Set ↑t → Set α
hpV : ∀ (s : Set ↑t), s ∈ S → p (V s)
hV : ∀ (s : Set ↑t), s ∈ S → Subtype.val ⁻¹' V s = s
x : α
hx : x ∈ t
y : α
hy : y ∈ t
h : ∀ (s : Set α), s ∈ V '' S → (x ∈ s ↔ y ∈ s)
U : Set ↑t
hU : U ∈ S
⊢ { val := x, property := hx } ∈ U ↔ { val := y, property := hy } ∈ U
[PROOFSTEP]
rw [← hV U hU]
[GOAL]
case intro.intro.intro
α : Type u_1
p : Set α → Prop
t : Set α
q : Set ↑t → Prop
h✝ : HasCountableSeparatingOn (↑t) q univ
hpq : ∀ (U : Set ↑t), q U → ∃ V, p V ∧ Subtype.val ⁻¹' V = U
S : Set (Set ↑t)
hSc : Set.Countable S
hSq : ∀ (s : Set ↑t), s ∈ S → q s
hS : ∀ (x : ↑t), x ∈ univ → ∀ (y : ↑t), y ∈ univ → (∀ (s : Set ↑t), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
V : Set ↑t → Set α
hpV : ∀ (s : Set ↑t), s ∈ S → p (V s)
hV : ∀ (s : Set ↑t), s ∈ S → Subtype.val ⁻¹' V s = s
x : α
hx : x ∈ t
y : α
hy : y ∈ t
h : ∀ (s : Set α), s ∈ V '' S → (x ∈ s ↔ y ∈ s)
U : Set ↑t
hU : U ∈ S
⊢ { val := x, property := hx } ∈ Subtype.val ⁻¹' V U ↔ { val := y, property := hy } ∈ Subtype.val ⁻¹' V U
[PROOFSTEP]
exact h _ (mem_image_of_mem _ hU)
[GOAL]
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
⊢ ∃ t, t ⊆ s ∧ Set.Subsingleton t ∧ t ∈ l
[PROOFSTEP]
rcases h.1 with ⟨S, hSc, hSp, hS⟩
[GOAL]
case intro.intro.intro
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ ∃ t, t ⊆ s ∧ Set.Subsingleton t ∧ t ∈ l
[PROOFSTEP]
refine ⟨s ∩ ⋂₀ (S ∩ l.sets) ∩ ⋂ (U ∈ S) (_ : Uᶜ ∈ l), Uᶜ, ?_, ?_, ?_⟩
[GOAL]
case intro.intro.intro.refine_1
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ s ∩ ⋂₀ (S ∩ l.sets) ∩ ⋂ (U : Set α) (_ : U ∈ S) (_ : Uᶜ ∈ l), Uᶜ ⊆ s
[PROOFSTEP]
exact fun _ h ↦ h.1.1
[GOAL]
case intro.intro.intro.refine_2
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ Set.Subsingleton (s ∩ ⋂₀ (S ∩ l.sets) ∩ ⋂ (U : Set α) (_ : U ∈ S) (_ : Uᶜ ∈ l), Uᶜ)
[PROOFSTEP]
intro x hx y hy
[GOAL]
case intro.intro.intro.refine_2
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x : α
hx : x ∈ s ∩ ⋂₀ (S ∩ l.sets) ∩ ⋂ (U : Set α) (_ : U ∈ S) (_ : Uᶜ ∈ l), Uᶜ
y : α
hy : y ∈ s ∩ ⋂₀ (S ∩ l.sets) ∩ ⋂ (U : Set α) (_ : U ∈ S) (_ : Uᶜ ∈ l), Uᶜ
⊢ x = y
[PROOFSTEP]
simp only [mem_sInter, mem_inter_iff, mem_iInter, mem_compl_iff] at hx hy
[GOAL]
case intro.intro.intro.refine_2
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
⊢ x = y
[PROOFSTEP]
refine hS x hx.1.1 y hy.1.1 (fun s hsS ↦ ?_)
[GOAL]
case intro.intro.intro.refine_2
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s✝ : Set α
h : HasCountableSeparatingOn α p s✝
hs : s✝ ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s✝ → ∀ (y : α), y ∈ s✝ → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
s : Set α
hsS : s ∈ S
⊢ x ∈ s ↔ y ∈ s
[PROOFSTEP]
cases hl s (hSp s hsS) with
| inl hsl => simp only [hx.1.2 s ⟨hsS, hsl⟩, hy.1.2 s ⟨hsS, hsl⟩]
| inr hsl => simp only [hx.2 s hsS hsl, hy.2 s hsS hsl]
[GOAL]
case intro.intro.intro.refine_2
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s✝ : Set α
h : HasCountableSeparatingOn α p s✝
hs : s✝ ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s✝ → ∀ (y : α), y ∈ s✝ → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
s : Set α
hsS : s ∈ S
x✝ : s ∈ l ∨ sᶜ ∈ l
⊢ x ∈ s ↔ y ∈ s
[PROOFSTEP]
cases hl s (hSp s hsS) with
| inl hsl => simp only [hx.1.2 s ⟨hsS, hsl⟩, hy.1.2 s ⟨hsS, hsl⟩]
| inr hsl => simp only [hx.2 s hsS hsl, hy.2 s hsS hsl]
[GOAL]
case intro.intro.intro.refine_2.inl
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s✝ : Set α
h : HasCountableSeparatingOn α p s✝
hs : s✝ ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s✝ → ∀ (y : α), y ∈ s✝ → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
s : Set α
hsS : s ∈ S
hsl : s ∈ l
⊢ x ∈ s ↔ y ∈ s
[PROOFSTEP]
| inl hsl => simp only [hx.1.2 s ⟨hsS, hsl⟩, hy.1.2 s ⟨hsS, hsl⟩]
[GOAL]
case intro.intro.intro.refine_2.inl
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s✝ : Set α
h : HasCountableSeparatingOn α p s✝
hs : s✝ ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s✝ → ∀ (y : α), y ∈ s✝ → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
s : Set α
hsS : s ∈ S
hsl : s ∈ l
⊢ x ∈ s ↔ y ∈ s
[PROOFSTEP]
simp only [hx.1.2 s ⟨hsS, hsl⟩, hy.1.2 s ⟨hsS, hsl⟩]
[GOAL]
case intro.intro.intro.refine_2.inr
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s✝ : Set α
h : HasCountableSeparatingOn α p s✝
hs : s✝ ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s✝ → ∀ (y : α), y ∈ s✝ → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
s : Set α
hsS : s ∈ S
hsl : sᶜ ∈ l
⊢ x ∈ s ↔ y ∈ s
[PROOFSTEP]
| inr hsl => simp only [hx.2 s hsS hsl, hy.2 s hsS hsl]
[GOAL]
case intro.intro.intro.refine_2.inr
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s✝ : Set α
h : HasCountableSeparatingOn α p s✝
hs : s✝ ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s✝ → ∀ (y : α), y ∈ s✝ → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
x y : α
hx : (x ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → x ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬x ∈ i
hy : (y ∈ s✝ ∧ ∀ (t : Set α), t ∈ S ∧ t ∈ l.sets → y ∈ t) ∧ ∀ (i : Set α), i ∈ S → iᶜ ∈ l → ¬y ∈ i
s : Set α
hsS : s ∈ S
hsl : sᶜ ∈ l
⊢ x ∈ s ↔ y ∈ s
[PROOFSTEP]
simp only [hx.2 s hsS hsl, hy.2 s hsS hsl]
[GOAL]
case intro.intro.intro.refine_3
α : Type u_1
β : Sort ?u.5043
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
h : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
S : Set (Set α)
hSc : Set.Countable S
hSp : ∀ (s : Set α), s ∈ S → p s
hS : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → (∀ (s : Set α), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ s ∩ ⋂₀ (S ∩ l.sets) ∩ ⋂ (U : Set α) (_ : U ∈ S) (_ : Uᶜ ∈ l), Uᶜ ∈ l
[PROOFSTEP]
exact
inter_mem (inter_mem hs ((countable_sInter_mem (hSc.mono (inter_subset_left _ _))).2 fun _ h ↦ h.2))
((countable_bInter_mem hSc).2 fun U hU ↦ iInter_mem.2 id)
[GOAL]
α : Type u_1
β : Sort ?u.6801
l : Filter α
inst✝¹ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
inst✝ : HasCountableSeparatingOn α p s
hs : s ∈ l
hne : Set.Nonempty s
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
⊢ ∃ a, a ∈ s ∧ {a} ∈ l
[PROOFSTEP]
rcases exists_subset_subsingleton_mem_of_forall_separating p hs hl with ⟨t, hts, ht, htl⟩
[GOAL]
case intro.intro.intro
α : Type u_1
β : Sort ?u.6801
l : Filter α
inst✝¹ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
inst✝ : HasCountableSeparatingOn α p s
hs : s ∈ l
hne : Set.Nonempty s
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
t : Set α
hts : t ⊆ s
ht : Set.Subsingleton t
htl : t ∈ l
⊢ ∃ a, a ∈ s ∧ {a} ∈ l
[PROOFSTEP]
rcases ht.eq_empty_or_singleton with rfl | ⟨x, rfl⟩
[GOAL]
case intro.intro.intro.inl
α : Type u_1
β : Sort ?u.6801
l : Filter α
inst✝¹ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
inst✝ : HasCountableSeparatingOn α p s
hs : s ∈ l
hne : Set.Nonempty s
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
hts : ∅ ⊆ s
ht : Set.Subsingleton ∅
htl : ∅ ∈ l
⊢ ∃ a, a ∈ s ∧ {a} ∈ l
[PROOFSTEP]
exact hne.imp fun a ha ↦ ⟨ha, mem_of_superset htl (empty_subset _)⟩
[GOAL]
case intro.intro.intro.inr.intro
α : Type u_1
β : Sort ?u.6801
l : Filter α
inst✝¹ : CountableInterFilter l
f g : α → β
p : Set α → Prop
s : Set α
inst✝ : HasCountableSeparatingOn α p s
hs : s ∈ l
hne : Set.Nonempty s
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
x : α
hts : {x} ⊆ s
ht : Set.Subsingleton {x}
htl : {x} ∈ l
⊢ ∃ a, a ∈ s ∧ {a} ∈ l
[PROOFSTEP]
exact ⟨x, hts rfl, htl⟩
[GOAL]
α : Type u_1
β : Sort ?u.7319
l : Filter α
inst✝² : CountableInterFilter l
f g : α → β
inst✝¹ : Nonempty α
p : Set α → Prop
s : Set α
inst✝ : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
⊢ ∃ a, {a} ∈ l
[PROOFSTEP]
rcases s.eq_empty_or_nonempty with rfl | hne
[GOAL]
case inl
α : Type u_1
β : Sort ?u.7319
l : Filter α
inst✝² : CountableInterFilter l
f g : α → β
inst✝¹ : Nonempty α
p : Set α → Prop
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
inst✝ : HasCountableSeparatingOn α p ∅
hs : ∅ ∈ l
⊢ ∃ a, {a} ∈ l
[PROOFSTEP]
exact ‹Nonempty α›.elim fun a ↦ ⟨a, mem_of_superset hs (empty_subset _)⟩
[GOAL]
case inr
α : Type u_1
β : Sort ?u.7319
l : Filter α
inst✝² : CountableInterFilter l
f g : α → β
inst✝¹ : Nonempty α
p : Set α → Prop
s : Set α
inst✝ : HasCountableSeparatingOn α p s
hs : s ∈ l
hl : ∀ (U : Set α), p U → U ∈ l ∨ Uᶜ ∈ l
hne : Set.Nonempty s
⊢ ∃ a, {a} ∈ l
[PROOFSTEP]
exact (exists_mem_singleton_mem_of_mem_of_nonempty_of_forall_separating p hs hne hl).imp fun _ ↦ And.right
[GOAL]
α : Type u_2
β : Type u_1
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set β → Prop
s : Set β
h' : HasCountableSeparatingOn β p s
hf : ∀ᶠ (x : α) in l, f x ∈ s
hg : ∀ᶠ (x : α) in l, g x ∈ s
h : ∀ (U : Set β), p U → ∀ᶠ (x : α) in l, f x ∈ U ↔ g x ∈ U
⊢ f =ᶠ[l] g
[PROOFSTEP]
rcases h'.1 with ⟨S, hSc, hSp, hS⟩
[GOAL]
case intro.intro.intro
α : Type u_2
β : Type u_1
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set β → Prop
s : Set β
h' : HasCountableSeparatingOn β p s
hf : ∀ᶠ (x : α) in l, f x ∈ s
hg : ∀ᶠ (x : α) in l, g x ∈ s
h : ∀ (U : Set β), p U → ∀ᶠ (x : α) in l, f x ∈ U ↔ g x ∈ U
S : Set (Set β)
hSc : Set.Countable S
hSp : ∀ (s : Set β), s ∈ S → p s
hS : ∀ (x : β), x ∈ s → ∀ (y : β), y ∈ s → (∀ (s : Set β), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
⊢ f =ᶠ[l] g
[PROOFSTEP]
have H : ∀ᶠ x in l, ∀ s ∈ S, f x ∈ s ↔ g x ∈ s := (eventually_countable_ball hSc).2 fun s hs ↦ (h _ (hSp _ hs))
[GOAL]
case intro.intro.intro
α : Type u_2
β : Type u_1
l : Filter α
inst✝ : CountableInterFilter l
f g : α → β
p : Set β → Prop
s : Set β
h' : HasCountableSeparatingOn β p s
hf : ∀ᶠ (x : α) in l, f x ∈ s
hg : ∀ᶠ (x : α) in l, g x ∈ s
h : ∀ (U : Set β), p U → ∀ᶠ (x : α) in l, f x ∈ U ↔ g x ∈ U
S : Set (Set β)
hSc : Set.Countable S
hSp : ∀ (s : Set β), s ∈ S → p s
hS : ∀ (x : β), x ∈ s → ∀ (y : β), y ∈ s → (∀ (s : Set β), s ∈ S → (x ∈ s ↔ y ∈ s)) → x = y
H : ∀ᶠ (x : α) in l, ∀ (s : Set β), s ∈ S → (f x ∈ s ↔ g x ∈ s)
⊢ f =ᶠ[l] g
[PROOFSTEP]
filter_upwards [H, hf, hg] with x hx hxf hxg using hS _ hxf _ hxg hx
|
// Copyright David Abrahams 2002.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/python/numeric.hpp>
#include <boost/python/tuple.hpp>
#include <boost/python/module.hpp>
#include <boost/python/def.hpp>
using namespace boost::python;
// See if we can invoke array() from C++
object new_array()
{
return numeric::array(
make_tuple(
make_tuple(1,2,3)
, make_tuple(4,5,6)
, make_tuple(7,8,9)
)
);
}
// test argument conversion
void take_array(numeric::array x)
{
}
// A separate function to invoke the info() member. Must happen
// outside any doctests since this prints directly to stdout and the
// result text includes the address of the 'self' array.
void info(numeric::array const& z)
{
z.info();
}
// Tests which work on both Numeric and numarray array objects. Of
// course all of the operators "just work" since numeric::array
// inherits that behavior from object.
void exercise(numeric::array& y, object check)
{
y[make_tuple(2,1)] = 3;
check(y);
check(y.astype('D'));
check(y.copy());
check(y.typecode());
}
// numarray-specific tests. check is a callable object which we can
// use to record intermediate results, which are later compared with
// the results of corresponding python operations.
void exercise_numarray(numeric::array& y, object check)
{
check(y.astype());
check(y.argmax());
check(y.argmax(0));
check(y.argmin());
check(y.argmin(0));
check(y.argsort());
check(y.argsort(1));
y.byteswap();
check(y);
check(y.diagonal());
check(y.diagonal(1));
check(y.diagonal(0, 1));
check(y.diagonal(0, 1, 0));
check(y.is_c_array());
check(y.isbyteswapped());
check(y.trace());
check(y.trace(1));
check(y.trace(0, 1));
check(y.trace(0, 1, 0));
check(y.new_('D'));
y.sort();
check(y);
check(y.type());
check(y.factory(make_tuple(1.2, 3.4)));
check(y.factory(make_tuple(1.2, 3.4), "Double"));
check(y.factory(make_tuple(1.2, 3.4), "Double", make_tuple(1,2,1)));
check(y.factory(make_tuple(1.2, 3.4), "Double", make_tuple(2,1,1), false));
check(y.factory(make_tuple(1.2, 3.4), "Double", make_tuple(2), true, true));
}
BOOST_PYTHON_MODULE(numpy_ext)
{
def("new_array", new_array);
def("take_array", take_array);
def("exercise", exercise);
def("exercise_numarray", exercise_numarray);
def("set_module_and_type", &numeric::array::set_module_and_type);
def("info", info);
}
#include "module_tail.cpp"
|
**EPAM Training Center Test Automation Project**
-
This project includes:
1. Calculator app
2. Tests for it |
------------------------------------------------------------------------------
-- Alter: An unguarded co-recursive function
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOT.FOTC.UnguardedCorecursion.Alter.AlterSL where
open import Codata.Musical.Notation
open import Codata.Musical.Stream
open import Data.Bool.Base
------------------------------------------------------------------------------
-- TODO (2019-01-04): Agda doesn't accept this definition which was
-- accepted by a previous version.
{-# TERMINATING #-}
alter : Stream Bool
alter = true ∷ ♯ (false ∷ ♯ alter)
{-# TERMINATING #-}
alter' : Stream Bool
alter' = true ∷ ♯ (map not alter')
|
(*
Copyright 2016 Luxembourg University
Copyright 2017 Luxembourg University
Copyright 2018 Luxembourg University
This file is part of Velisarios.
Velisarios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Velisarios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velisarios. If not, see <http://www.gnu.org/licenses/>.
Authors: Vincent Rahli
Ivana Vukotic
*)
Require Export PBFT_A_1_4.
Require Export PBFT_A_1_9.
Section PBFT_A_1_10.
Local Open Scope eo.
Local Open Scope proc.
Context { pbft_context : PBFTcontext }.
Context { pbft_auth : PBFTauth }.
Context { pbft_keys : PBFTinitial_keys }.
Context { pbft_hash : PBFThash }.
Context { pbft_hash_axioms : PBFThash_axioms }.
Definition more_than_F_have_prepared_before
(eo : EventOrdering)
(e : Event)
(R : list Rep)
(v : View)
(n : SeqNum)
(d : PBFTdigest) :=
no_repeats R
/\ F < length R
/\
forall (k : Rep),
In k R
->
exists (e' : Event) (st' : PBFTstate),
e' ≼ e
/\ loc e' = PBFTreplica k
/\ state_sm_on_event (PBFTreplicaSM k) e' = Some st'
/\ prepared (request_data v n d) st' = true.
Lemma more_than_F_have_prepared_before_implies :
forall (eo : EventOrdering) (e : Event) R v n d,
more_than_F_have_prepared_before eo e R v n d
-> more_than_F_have_prepared eo R v n d.
Proof.
introv moreThanF.
unfold more_than_F_have_prepared, more_than_F_have_prepared_before in *.
repnd; dands; auto.
introv i.
applydup moreThanF in i; exrepnd.
eexists; eexists; dands; eauto.
Qed.
Hint Resolve more_than_F_have_prepared_before_implies : pbft.
Lemma A_1_10_lt :
forall (eo : EventOrdering)
(e1 e2 : Event)
(R1 R2 : list Rep)
(n : SeqNum)
(v1 v2 : View)
(d1 d2 : PBFTdigest),
authenticated_messages_were_sent_or_byz_usys eo PBFTsys
-> PBFTcorrect_keys eo
-> v1 < v2
-> exists_at_most_f_faulty [e2] F
-> nodes_have_correct_traces_before R1 [e2]
-> more_than_F_have_prepared_before eo e1 R1 v1 n d1
-> more_than_F_have_prepared_before eo e2 R2 v2 n d2
-> d1 = d2.
Proof.
introv sendbyz corkeys ltv atmost ctraces moreThanF1 moreThanF2.
unfold more_than_F_have_prepared in moreThanF2.
destruct moreThanF2 as [norep2 [len2 moreThanF2]].
destruct (PBFTdigestdeq d1 d2); auto.
assert False; tcsp;[].
pose proof (there_is_one_good_guy_before eo R2 [e2]) as h.
repeat (autodimp h hyp); try omega;[].
exrepnd.
pose proof (moreThanF2 good) as prep; autodimp prep hyp.
exrepnd.
pose proof (PBFT_A_1_9 eo) as q; repeat (autodimp q hyp).
pose proof (q R1 v1 n d1) as q; autodimp q hyp; eauto 3 with pbft;[].
pose proof (q e' good st') as q.
repeat (autodimp q hyp); eauto 3 with pbft eo;[].
unfold prepared in prep1.
eapply prepared_implies2 in prep1;[|eauto 3 with pbft].
exrepnd.
destruct pp, b; simpl in *; ginv; simpl in *.
unfold pre_prepare2digest in *; simpl in *.
fold (mk_pre_prepare v2 n d a) in *.
hide_hyp prep1.
pose proof (h0 e2) as h0; autodimp h0 hyp.
pose proof (q v2 d a (requests2digest d)) as q.
repeat (autodimp q hyp); eauto 3 with pbft eo.
Qed.
Lemma A_1_10 :
forall (eo : EventOrdering)
(e1 e2 : Event)
(R1 R2 : list Rep)
(n : SeqNum)
(v1 v2 : View)
(d1 d2 : PBFTdigest),
authenticated_messages_were_sent_or_byz_usys eo PBFTsys
-> PBFTcorrect_keys eo
-> exists_at_most_f_faulty [e1,e2] F
-> nodes_have_correct_traces_before R1 [e2]
-> nodes_have_correct_traces_before R2 [e1]
-> more_than_F_have_prepared_before eo e1 R1 v1 n d1
-> more_than_F_have_prepared_before eo e2 R2 v2 n d2
-> d1 = d2.
Proof.
introv sendbyz corkeys atmost ctraces1 ctraces2 moreThanF1 moreThanF2.
destruct (lt_dec v1 v2) as [e|e].
{ eapply A_1_10_lt; try exact moreThanF1; try exact moreThanF2; eauto 3 with pbft eo. }
destruct (lt_dec v2 v1) as [f|f].
{ symmetry; eapply A_1_10_lt; eauto; eauto 3 with pbft eo. }
assert (v1 = v2) as xx by (apply equal_nats_implies_equal_views; omega).
subst.
clear e f.
destruct moreThanF1 as [norep1 [len1 moreThanF1]].
destruct moreThanF2 as [norep2 [len2 moreThanF2]].
pose proof (there_is_one_good_guy_before eo R1 [e1,e2]) as h.
pose proof (there_is_one_good_guy_before eo R2 [e1,e2]) as q.
repeat (autodimp h hyp); try omega;[].
repeat (autodimp q hyp); try omega;[].
exrepnd.
applydup moreThanF1 in h1; exrepnd.
applydup moreThanF2 in q1; exrepnd.
pose proof (h0 e1) as h0; simpl in h0; autodimp h0 hyp; auto.
pose proof (q0 e2) as q0; simpl in q0; autodimp q0 hyp; auto.
eapply A_1_4; try (exact h2); try (exact q2); try (exact h5); try (exact q5);
auto; allrw; eauto 3 with pbft eo.
Qed.
End PBFT_A_1_10.
Hint Resolve more_than_F_have_prepared_before_implies : pbft.
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.big_operators.basic
import Mathlib.data.finset.intervals
import Mathlib.PostPort
universes u_1 v
namespace Mathlib
/-!
# Results about big operators over intervals
We prove results about big operators over intervals (mostly the `ℕ`-valued `Ico m n`).
-/
namespace finset
theorem sum_Ico_add {δ : Type u_1} [add_comm_monoid δ] (f : ℕ → δ) (m : ℕ) (n : ℕ) (k : ℕ) :
(finset.sum (Ico m n) fun (l : ℕ) => f (k + l)) =
finset.sum (Ico (m + k) (n + k)) fun (l : ℕ) => f l :=
Eq.subst (Ico.image_add m n k) Eq.symm
(sum_image
fun (x : ℕ) (hx : x ∈ Ico m n) (y : ℕ) (hy : y ∈ Ico m n) (h : k + x = k + y) =>
nat.add_left_cancel h)
theorem prod_Ico_add {β : Type v} [comm_monoid β] (f : ℕ → β) (m : ℕ) (n : ℕ) (k : ℕ) :
(finset.prod (Ico m n) fun (l : ℕ) => f (k + l)) =
finset.prod (Ico (m + k) (n + k)) fun (l : ℕ) => f l :=
sum_Ico_add f m n k
theorem sum_Ico_succ_top {δ : Type u_1} [add_comm_monoid δ] {a : ℕ} {b : ℕ} (hab : a ≤ b)
(f : ℕ → δ) :
(finset.sum (Ico a (b + 1)) fun (k : ℕ) => f k) =
(finset.sum (Ico a b) fun (k : ℕ) => f k) + f b :=
sorry
theorem prod_Ico_succ_top {β : Type v} [comm_monoid β] {a : ℕ} {b : ℕ} (hab : a ≤ b) (f : ℕ → β) :
(finset.prod (Ico a (b + 1)) fun (k : ℕ) => f k) =
(finset.prod (Ico a b) fun (k : ℕ) => f k) * f b :=
sum_Ico_succ_top hab fun (k : ℕ) => f k
theorem sum_eq_sum_Ico_succ_bot {δ : Type u_1} [add_comm_monoid δ] {a : ℕ} {b : ℕ} (hab : a < b)
(f : ℕ → δ) :
(finset.sum (Ico a b) fun (k : ℕ) => f k) =
f a + finset.sum (Ico (a + 1) b) fun (k : ℕ) => f k :=
sorry
theorem prod_eq_prod_Ico_succ_bot {β : Type v} [comm_monoid β] {a : ℕ} {b : ℕ} (hab : a < b)
(f : ℕ → β) :
(finset.prod (Ico a b) fun (k : ℕ) => f k) =
f a * finset.prod (Ico (a + 1) b) fun (k : ℕ) => f k :=
sum_eq_sum_Ico_succ_bot hab fun (k : ℕ) => f k
theorem prod_Ico_consecutive {β : Type v} [comm_monoid β] (f : ℕ → β) {m : ℕ} {n : ℕ} {k : ℕ}
(hmn : m ≤ n) (hnk : n ≤ k) :
((finset.prod (Ico m n) fun (i : ℕ) => f i) * finset.prod (Ico n k) fun (i : ℕ) => f i) =
finset.prod (Ico m k) fun (i : ℕ) => f i :=
Eq.subst (Ico.union_consecutive hmn hnk) Eq.symm (prod_union (Ico.disjoint_consecutive m n k))
theorem sum_range_add_sum_Ico {β : Type v} [add_comm_monoid β] (f : ℕ → β) {m : ℕ} {n : ℕ}
(h : m ≤ n) :
((finset.sum (range m) fun (k : ℕ) => f k) + finset.sum (Ico m n) fun (k : ℕ) => f k) =
finset.sum (range n) fun (k : ℕ) => f k :=
Ico.zero_bot m ▸ Ico.zero_bot n ▸ sum_Ico_consecutive f (nat.zero_le m) h
theorem sum_Ico_eq_add_neg {δ : Type u_1} [add_comm_group δ] (f : ℕ → δ) {m : ℕ} {n : ℕ}
(h : m ≤ n) :
(finset.sum (Ico m n) fun (k : ℕ) => f k) =
(finset.sum (range n) fun (k : ℕ) => f k) + -finset.sum (range m) fun (k : ℕ) => f k :=
sorry
theorem sum_Ico_eq_sub {δ : Type u_1} [add_comm_group δ] (f : ℕ → δ) {m : ℕ} {n : ℕ} (h : m ≤ n) :
(finset.sum (Ico m n) fun (k : ℕ) => f k) =
(finset.sum (range n) fun (k : ℕ) => f k) - finset.sum (range m) fun (k : ℕ) => f k :=
sorry
theorem sum_Ico_eq_sum_range {β : Type v} [add_comm_monoid β] (f : ℕ → β) (m : ℕ) (n : ℕ) :
(finset.sum (Ico m n) fun (k : ℕ) => f k) =
finset.sum (range (n - m)) fun (k : ℕ) => f (m + k) :=
sorry
theorem prod_Ico_reflect {β : Type v} [comm_monoid β] (f : ℕ → β) (k : ℕ) {m : ℕ} {n : ℕ}
(h : m ≤ n + 1) :
(finset.prod (Ico k m) fun (j : ℕ) => f (n - j)) =
finset.prod (Ico (n + 1 - m) (n + 1 - k)) fun (j : ℕ) => f j :=
sorry
theorem sum_Ico_reflect {δ : Type u_1} [add_comm_monoid δ] (f : ℕ → δ) (k : ℕ) {m : ℕ} {n : ℕ}
(h : m ≤ n + 1) :
(finset.sum (Ico k m) fun (j : ℕ) => f (n - j)) =
finset.sum (Ico (n + 1 - m) (n + 1 - k)) fun (j : ℕ) => f j :=
prod_Ico_reflect f k h
theorem prod_range_reflect {β : Type v} [comm_monoid β] (f : ℕ → β) (n : ℕ) :
(finset.prod (range n) fun (j : ℕ) => f (n - 1 - j)) =
finset.prod (range n) fun (j : ℕ) => f j :=
sorry
theorem sum_range_reflect {δ : Type u_1} [add_comm_monoid δ] (f : ℕ → δ) (n : ℕ) :
(finset.sum (range n) fun (j : ℕ) => f (n - 1 - j)) = finset.sum (range n) fun (j : ℕ) => f j :=
prod_range_reflect f n
@[simp] theorem prod_Ico_id_eq_factorial (n : ℕ) :
(finset.prod (Ico 1 (n + 1)) fun (x : ℕ) => x) = nat.factorial n :=
sorry
@[simp] theorem prod_range_add_one_eq_factorial (n : ℕ) :
(finset.prod (range n) fun (x : ℕ) => x + 1) = nat.factorial n :=
sorry
/-- Gauss' summation formula -/
theorem sum_range_id_mul_two (n : ℕ) :
(finset.sum (range n) fun (i : ℕ) => i) * bit0 1 = n * (n - 1) :=
sorry
/-- Gauss' summation formula -/
theorem sum_range_id (n : ℕ) : (finset.sum (range n) fun (i : ℕ) => i) = n * (n - 1) / bit0 1 :=
sorry
end Mathlib |
double precision function ampsq_3gam1g(j1,j2,j3,j4,j5,j6,za,zb)
c--- Matrix element squared for the process
c--- 0 --> qb(j5) + q(j6) + g(j1) + gam(j2) + gam(j3) + gam(j4)
c---
c--- Taken from "Multi-Photon Amplitudes for Next-to-Leading Order QCD"
c--- V. Del Duca, W. Kilgore and F. Maltoni, hep-ph/9910253
c---
implicit none
include 'constants.f'
include 'zprods_decl.f'
include 'ewcharge.f'
include 'ewcouple.f'
include 'qcdcouple.f'
integer j1,j2,j3,j4,j5,j6,h1,h2,h3,h4,h5
double complex amp(2,2,2,2,2),amp_3gam1g_mppppm,amp_3gam1g_pmpppm,
& amp_3gam1g_ppmppm,amp_3gam1g_pppmpm,amp_3gam1g_mmpppm
c--- ordering of labels in amp is as follows:
c--- (gluon j1, photon j2, photon j3, photon j4, antiquark j5)
c--- for consistency with function names (and quark j6=3-j5)
c--- trivial amplitude
amp(2,2,2,2,2)=czip
c--- basic MHV amplitudes
amp(1,2,2,2,2)=amp_3gam1g_mppppm(j1,j2,j3,j4,j5,j6,za,zb)
amp(2,1,2,2,2)=amp_3gam1g_pmpppm(j1,j2,j3,j4,j5,j6,za,zb)
amp(2,2,1,2,2)=amp_3gam1g_ppmppm(j1,j2,j3,j4,j5,j6,za,zb)
amp(2,2,2,1,2)=amp_3gam1g_pppmpm(j1,j2,j3,j4,j5,j6,za,zb)
c--- non-MHV amplitude and permutations
amp(1,1,2,2,2)=amp_3gam1g_mmpppm(j1,j2,j3,j4,j5,j6,za,zb)
amp(1,2,1,2,2)=amp_3gam1g_mmpppm(j1,j3,j2,j4,j5,j6,za,zb)
amp(1,2,2,1,2)=amp_3gam1g_mmpppm(j1,j4,j2,j3,j5,j6,za,zb)
c--- parity and charge conjugation
amp(1,1,1,1,2)=czip
amp(2,1,1,1,2)=-amp_3gam1g_pppmpm(j4,j3,j2,j1,j6,j5,za,zb)
amp(1,2,1,1,2)=-amp_3gam1g_ppmppm(j4,j3,j2,j1,j6,j5,za,zb)
amp(1,1,2,1,2)=-amp_3gam1g_pmpppm(j4,j3,j2,j1,j6,j5,za,zb)
amp(1,1,1,2,2)=-amp_3gam1g_mppppm(j4,j3,j2,j1,j6,j5,za,zb)
amp(2,2,1,1,2)=-amp_3gam1g_mmpppm(j4,j3,j2,j1,j5,j6,za,zb)
amp(2,1,2,1,2)=-amp_3gam1g_mmpppm(j2,j4,j1,j3,j5,j6,za,zb)
amp(2,1,1,2,2)=-amp_3gam1g_mmpppm(j2,j3,j1,j4,j5,j6,za,zb)
do h1=1,2
do h2=1,2
do h3=1,2
do h4=1,2
amp(h1,h2,h3,h4,1)=amp(3-h1,3-h2,3-h3,3-h4,2)
enddo
enddo
enddo
enddo
c--- note: obvious redundancy in this routine, but might be
c--- worth checking relations for use in virtual
ampsq_3gam1g=0d0
do h1=1,2
do h2=1,2
do h3=1,2
do h4=1,2
do h5=1,2
c write(6,*) h1*10000+h2*1000+h3*100+h4*10+h5,
c & cdabs(amp(h1,h2,h3,h4,h5))**2
c & *8d0*esq**3*gsq*xn*Cf/6d0*aveqq*Q(2)**6*8d0
ampsq_3gam1g=ampsq_3gam1g+cdabs(amp(h1,h2,h3,h4,h5))**2
enddo
enddo
enddo
enddo
enddo
c pause
return
end
|
(* Title: HOL/Power.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1997 University of Cambridge
*)
section {* Exponentiation *}
theory Power
imports Num Equiv_Relations
begin
subsection {* Powers for Arbitrary Monoids *}
class power = one + times
begin
primrec power :: "'a \<Rightarrow> nat \<Rightarrow> 'a" (infixr "^" 80) where
power_0: "a ^ 0 = 1"
| power_Suc: "a ^ Suc n = a * a ^ n"
notation (latex output)
power ("(_\<^bsup>_\<^esup>)" [1000] 1000)
notation (HTML output)
power ("(_\<^bsup>_\<^esup>)" [1000] 1000)
text {* Special syntax for squares. *}
abbreviation (xsymbols)
power2 :: "'a \<Rightarrow> 'a" ("(_\<^sup>2)" [1000] 999) where
"x\<^sup>2 \<equiv> x ^ 2"
notation (latex output)
power2 ("(_\<^sup>2)" [1000] 999)
notation (HTML output)
power2 ("(_\<^sup>2)" [1000] 999)
end
context monoid_mult
begin
subclass power .
lemma power_one [simp]:
"1 ^ n = 1"
by (induct n) simp_all
lemma power_one_right [simp]:
"a ^ 1 = a"
by simp
lemma power_commutes:
"a ^ n * a = a * a ^ n"
by (induct n) (simp_all add: mult.assoc)
lemma power_Suc2:
"a ^ Suc n = a ^ n * a"
by (simp add: power_commutes)
lemma power_add:
"a ^ (m + n) = a ^ m * a ^ n"
by (induct m) (simp_all add: algebra_simps)
lemma power_mult:
"a ^ (m * n) = (a ^ m) ^ n"
by (induct n) (simp_all add: power_add)
lemma power2_eq_square: "a\<^sup>2 = a * a"
by (simp add: numeral_2_eq_2)
lemma power3_eq_cube: "a ^ 3 = a * a * a"
by (simp add: numeral_3_eq_3 mult.assoc)
lemma power_even_eq:
"a ^ (2 * n) = (a ^ n)\<^sup>2"
by (subst mult.commute) (simp add: power_mult)
lemma power_odd_eq:
"a ^ Suc (2*n) = a * (a ^ n)\<^sup>2"
by (simp add: power_even_eq)
lemma power_numeral_even:
"z ^ numeral (Num.Bit0 w) = (let w = z ^ (numeral w) in w * w)"
unfolding numeral_Bit0 power_add Let_def ..
lemma power_numeral_odd:
"z ^ numeral (Num.Bit1 w) = (let w = z ^ (numeral w) in z * w * w)"
unfolding numeral_Bit1 One_nat_def add_Suc_right add_0_right
unfolding power_Suc power_add Let_def mult.assoc ..
lemma funpow_times_power:
"(times x ^^ f x) = times (x ^ f x)"
proof (induct "f x" arbitrary: f)
case 0 then show ?case by (simp add: fun_eq_iff)
next
case (Suc n)
def g \<equiv> "\<lambda>x. f x - 1"
with Suc have "n = g x" by simp
with Suc have "times x ^^ g x = times (x ^ g x)" by simp
moreover from Suc g_def have "f x = g x + 1" by simp
ultimately show ?case by (simp add: power_add funpow_add fun_eq_iff mult.assoc)
qed
lemma power_commuting_commutes:
assumes "x * y = y * x"
shows "x ^ n * y = y * x ^n"
proof (induct n)
case (Suc n)
have "x ^ Suc n * y = x ^ n * y * x"
by (subst power_Suc2) (simp add: assms ac_simps)
also have "\<dots> = y * x ^ Suc n"
unfolding Suc power_Suc2
by (simp add: ac_simps)
finally show ?case .
qed simp
end
context comm_monoid_mult
begin
lemma power_mult_distrib [field_simps]:
"(a * b) ^ n = (a ^ n) * (b ^ n)"
by (induct n) (simp_all add: ac_simps)
end
context semiring_numeral
begin
lemma numeral_sqr: "numeral (Num.sqr k) = numeral k * numeral k"
by (simp only: sqr_conv_mult numeral_mult)
lemma numeral_pow: "numeral (Num.pow k l) = numeral k ^ numeral l"
by (induct l, simp_all only: numeral_class.numeral.simps pow.simps
numeral_sqr numeral_mult power_add power_one_right)
lemma power_numeral [simp]: "numeral k ^ numeral l = numeral (Num.pow k l)"
by (rule numeral_pow [symmetric])
end
context semiring_1
begin
lemma zero_power:
"0 < n \<Longrightarrow> 0 ^ n = 0"
by (cases n) simp_all
lemma power_zero_numeral [simp]:
"0 ^ numeral k = 0"
by (simp add: numeral_eq_Suc)
lemma zero_power2: "0\<^sup>2 = 0" (* delete? *)
by (rule power_zero_numeral)
lemma one_power2: "1\<^sup>2 = 1" (* delete? *)
by (rule power_one)
end
context comm_semiring_1
begin
text {* The divides relation *}
lemma le_imp_power_dvd:
assumes "m \<le> n" shows "a ^ m dvd a ^ n"
proof
have "a ^ n = a ^ (m + (n - m))"
using `m \<le> n` by simp
also have "\<dots> = a ^ m * a ^ (n - m)"
by (rule power_add)
finally show "a ^ n = a ^ m * a ^ (n - m)" .
qed
lemma power_le_dvd:
"a ^ n dvd b \<Longrightarrow> m \<le> n \<Longrightarrow> a ^ m dvd b"
by (rule dvd_trans [OF le_imp_power_dvd])
lemma dvd_power_same:
"x dvd y \<Longrightarrow> x ^ n dvd y ^ n"
by (induct n) (auto simp add: mult_dvd_mono)
lemma dvd_power_le:
"x dvd y \<Longrightarrow> m \<ge> n \<Longrightarrow> x ^ n dvd y ^ m"
by (rule power_le_dvd [OF dvd_power_same])
lemma dvd_power [simp]:
assumes "n > (0::nat) \<or> x = 1"
shows "x dvd (x ^ n)"
using assms proof
assume "0 < n"
then have "x ^ n = x ^ Suc (n - 1)" by simp
then show "x dvd (x ^ n)" by simp
next
assume "x = 1"
then show "x dvd (x ^ n)" by simp
qed
end
context ring_1
begin
lemma power_minus:
"(- a) ^ n = (- 1) ^ n * a ^ n"
proof (induct n)
case 0 show ?case by simp
next
case (Suc n) then show ?case
by (simp del: power_Suc add: power_Suc2 mult.assoc)
qed
lemma power_minus_Bit0:
"(- x) ^ numeral (Num.Bit0 k) = x ^ numeral (Num.Bit0 k)"
by (induct k, simp_all only: numeral_class.numeral.simps power_add
power_one_right mult_minus_left mult_minus_right minus_minus)
lemma power_minus_Bit1:
"(- x) ^ numeral (Num.Bit1 k) = - (x ^ numeral (Num.Bit1 k))"
by (simp only: eval_nat_numeral(3) power_Suc power_minus_Bit0 mult_minus_left)
lemma power2_minus [simp]:
"(- a)\<^sup>2 = a\<^sup>2"
by (rule power_minus_Bit0)
lemma power_minus1_even [simp]:
"(- 1) ^ (2*n) = 1"
proof (induct n)
case 0 show ?case by simp
next
case (Suc n) then show ?case by (simp add: power_add power2_eq_square)
qed
lemma power_minus1_odd:
"(- 1) ^ Suc (2*n) = -1"
by simp
lemma power_minus_even [simp]:
"(-a) ^ (2*n) = a ^ (2*n)"
by (simp add: power_minus [of a])
end
lemma power_eq_0_nat_iff [simp]:
fixes m n :: nat
shows "m ^ n = 0 \<longleftrightarrow> m = 0 \<and> n > 0"
by (induct n) auto
context ring_1_no_zero_divisors
begin
lemma power_eq_0_iff [simp]:
"a ^ n = 0 \<longleftrightarrow> a = 0 \<and> n > 0"
by (induct n) auto
lemma field_power_not_zero:
"a \<noteq> 0 \<Longrightarrow> a ^ n \<noteq> 0"
by (induct n) auto
lemma zero_eq_power2 [simp]:
"a\<^sup>2 = 0 \<longleftrightarrow> a = 0"
unfolding power2_eq_square by simp
lemma power2_eq_1_iff:
"a\<^sup>2 = 1 \<longleftrightarrow> a = 1 \<or> a = - 1"
unfolding power2_eq_square by (rule square_eq_1_iff)
end
context idom
begin
lemma power2_eq_iff: "x\<^sup>2 = y\<^sup>2 \<longleftrightarrow> x = y \<or> x = - y"
unfolding power2_eq_square by (rule square_eq_iff)
end
context division_ring
begin
text {* FIXME reorient or rename to @{text nonzero_inverse_power} *}
lemma nonzero_power_inverse:
"a \<noteq> 0 \<Longrightarrow> inverse (a ^ n) = (inverse a) ^ n"
by (induct n)
(simp_all add: nonzero_inverse_mult_distrib power_commutes field_power_not_zero)
end
context field
begin
lemma nonzero_power_divide:
"b \<noteq> 0 \<Longrightarrow> (a / b) ^ n = a ^ n / b ^ n"
by (simp add: divide_inverse power_mult_distrib nonzero_power_inverse)
end
subsection {* Exponentiation on ordered types *}
context linordered_ring (* TODO: move *)
begin
lemma sum_squares_ge_zero:
"0 \<le> x * x + y * y"
by (intro add_nonneg_nonneg zero_le_square)
lemma not_sum_squares_lt_zero:
"\<not> x * x + y * y < 0"
by (simp add: not_less sum_squares_ge_zero)
end
context linordered_semidom
begin
lemma zero_less_power [simp]:
"0 < a \<Longrightarrow> 0 < a ^ n"
by (induct n) simp_all
lemma zero_le_power [simp]:
"0 \<le> a \<Longrightarrow> 0 \<le> a ^ n"
by (induct n) simp_all
lemma power_mono:
"a \<le> b \<Longrightarrow> 0 \<le> a \<Longrightarrow> a ^ n \<le> b ^ n"
by (induct n) (auto intro: mult_mono order_trans [of 0 a b])
lemma one_le_power [simp]: "1 \<le> a \<Longrightarrow> 1 \<le> a ^ n"
using power_mono [of 1 a n] by simp
lemma power_le_one: "\<lbrakk>0 \<le> a; a \<le> 1\<rbrakk> \<Longrightarrow> a ^ n \<le> 1"
using power_mono [of a 1 n] by simp
lemma power_gt1_lemma:
assumes gt1: "1 < a"
shows "1 < a * a ^ n"
proof -
from gt1 have "0 \<le> a"
by (fact order_trans [OF zero_le_one less_imp_le])
have "1 * 1 < a * 1" using gt1 by simp
also have "\<dots> \<le> a * a ^ n" using gt1
by (simp only: mult_mono `0 \<le> a` one_le_power order_less_imp_le
zero_le_one order_refl)
finally show ?thesis by simp
qed
lemma power_gt1:
"1 < a \<Longrightarrow> 1 < a ^ Suc n"
by (simp add: power_gt1_lemma)
lemma one_less_power [simp]:
"1 < a \<Longrightarrow> 0 < n \<Longrightarrow> 1 < a ^ n"
by (cases n) (simp_all add: power_gt1_lemma)
lemma power_le_imp_le_exp:
assumes gt1: "1 < a"
shows "a ^ m \<le> a ^ n \<Longrightarrow> m \<le> n"
proof (induct m arbitrary: n)
case 0
show ?case by simp
next
case (Suc m)
show ?case
proof (cases n)
case 0
with Suc.prems Suc.hyps have "a * a ^ m \<le> 1" by simp
with gt1 show ?thesis
by (force simp only: power_gt1_lemma
not_less [symmetric])
next
case (Suc n)
with Suc.prems Suc.hyps show ?thesis
by (force dest: mult_left_le_imp_le
simp add: less_trans [OF zero_less_one gt1])
qed
qed
text{*Surely we can strengthen this? It holds for @{text "0<a<1"} too.*}
lemma power_inject_exp [simp]:
"1 < a \<Longrightarrow> a ^ m = a ^ n \<longleftrightarrow> m = n"
by (force simp add: order_antisym power_le_imp_le_exp)
text{*Can relax the first premise to @{term "0<a"} in the case of the
natural numbers.*}
lemma power_less_imp_less_exp:
"1 < a \<Longrightarrow> a ^ m < a ^ n \<Longrightarrow> m < n"
by (simp add: order_less_le [of m n] less_le [of "a^m" "a^n"]
power_le_imp_le_exp)
lemma power_strict_mono [rule_format]:
"a < b \<Longrightarrow> 0 \<le> a \<Longrightarrow> 0 < n \<longrightarrow> a ^ n < b ^ n"
by (induct n)
(auto simp add: mult_strict_mono le_less_trans [of 0 a b])
text{*Lemma for @{text power_strict_decreasing}*}
lemma power_Suc_less:
"0 < a \<Longrightarrow> a < 1 \<Longrightarrow> a * a ^ n < a ^ n"
by (induct n)
(auto simp add: mult_strict_left_mono)
lemma power_strict_decreasing [rule_format]:
"n < N \<Longrightarrow> 0 < a \<Longrightarrow> a < 1 \<longrightarrow> a ^ N < a ^ n"
proof (induct N)
case 0 then show ?case by simp
next
case (Suc N) then show ?case
apply (auto simp add: power_Suc_less less_Suc_eq)
apply (subgoal_tac "a * a^N < 1 * a^n")
apply simp
apply (rule mult_strict_mono) apply auto
done
qed
text{*Proof resembles that of @{text power_strict_decreasing}*}
lemma power_decreasing [rule_format]:
"n \<le> N \<Longrightarrow> 0 \<le> a \<Longrightarrow> a \<le> 1 \<longrightarrow> a ^ N \<le> a ^ n"
proof (induct N)
case 0 then show ?case by simp
next
case (Suc N) then show ?case
apply (auto simp add: le_Suc_eq)
apply (subgoal_tac "a * a^N \<le> 1 * a^n", simp)
apply (rule mult_mono) apply auto
done
qed
lemma power_Suc_less_one:
"0 < a \<Longrightarrow> a < 1 \<Longrightarrow> a ^ Suc n < 1"
using power_strict_decreasing [of 0 "Suc n" a] by simp
text{*Proof again resembles that of @{text power_strict_decreasing}*}
lemma power_increasing [rule_format]:
"n \<le> N \<Longrightarrow> 1 \<le> a \<Longrightarrow> a ^ n \<le> a ^ N"
proof (induct N)
case 0 then show ?case by simp
next
case (Suc N) then show ?case
apply (auto simp add: le_Suc_eq)
apply (subgoal_tac "1 * a^n \<le> a * a^N", simp)
apply (rule mult_mono) apply (auto simp add: order_trans [OF zero_le_one])
done
qed
text{*Lemma for @{text power_strict_increasing}*}
lemma power_less_power_Suc:
"1 < a \<Longrightarrow> a ^ n < a * a ^ n"
by (induct n) (auto simp add: mult_strict_left_mono less_trans [OF zero_less_one])
lemma power_strict_increasing [rule_format]:
"n < N \<Longrightarrow> 1 < a \<longrightarrow> a ^ n < a ^ N"
proof (induct N)
case 0 then show ?case by simp
next
case (Suc N) then show ?case
apply (auto simp add: power_less_power_Suc less_Suc_eq)
apply (subgoal_tac "1 * a^n < a * a^N", simp)
apply (rule mult_strict_mono) apply (auto simp add: less_trans [OF zero_less_one] less_imp_le)
done
qed
lemma power_increasing_iff [simp]:
"1 < b \<Longrightarrow> b ^ x \<le> b ^ y \<longleftrightarrow> x \<le> y"
by (blast intro: power_le_imp_le_exp power_increasing less_imp_le)
lemma power_strict_increasing_iff [simp]:
"1 < b \<Longrightarrow> b ^ x < b ^ y \<longleftrightarrow> x < y"
by (blast intro: power_less_imp_less_exp power_strict_increasing)
lemma power_le_imp_le_base:
assumes le: "a ^ Suc n \<le> b ^ Suc n"
and ynonneg: "0 \<le> b"
shows "a \<le> b"
proof (rule ccontr)
assume "~ a \<le> b"
then have "b < a" by (simp only: linorder_not_le)
then have "b ^ Suc n < a ^ Suc n"
by (simp only: assms power_strict_mono)
from le and this show False
by (simp add: linorder_not_less [symmetric])
qed
lemma power_less_imp_less_base:
assumes less: "a ^ n < b ^ n"
assumes nonneg: "0 \<le> b"
shows "a < b"
proof (rule contrapos_pp [OF less])
assume "~ a < b"
hence "b \<le> a" by (simp only: linorder_not_less)
hence "b ^ n \<le> a ^ n" using nonneg by (rule power_mono)
thus "\<not> a ^ n < b ^ n" by (simp only: linorder_not_less)
qed
lemma power_inject_base:
"a ^ Suc n = b ^ Suc n \<Longrightarrow> 0 \<le> a \<Longrightarrow> 0 \<le> b \<Longrightarrow> a = b"
by (blast intro: power_le_imp_le_base antisym eq_refl sym)
lemma power_eq_imp_eq_base:
"a ^ n = b ^ n \<Longrightarrow> 0 \<le> a \<Longrightarrow> 0 \<le> b \<Longrightarrow> 0 < n \<Longrightarrow> a = b"
by (cases n) (simp_all del: power_Suc, rule power_inject_base)
lemma power2_le_imp_le:
"x\<^sup>2 \<le> y\<^sup>2 \<Longrightarrow> 0 \<le> y \<Longrightarrow> x \<le> y"
unfolding numeral_2_eq_2 by (rule power_le_imp_le_base)
lemma power2_less_imp_less:
"x\<^sup>2 < y\<^sup>2 \<Longrightarrow> 0 \<le> y \<Longrightarrow> x < y"
by (rule power_less_imp_less_base)
lemma power2_eq_imp_eq:
"x\<^sup>2 = y\<^sup>2 \<Longrightarrow> 0 \<le> x \<Longrightarrow> 0 \<le> y \<Longrightarrow> x = y"
unfolding numeral_2_eq_2 by (erule (2) power_eq_imp_eq_base) simp
end
context linordered_ring_strict
begin
lemma sum_squares_eq_zero_iff:
"x * x + y * y = 0 \<longleftrightarrow> x = 0 \<and> y = 0"
by (simp add: add_nonneg_eq_0_iff)
lemma sum_squares_le_zero_iff:
"x * x + y * y \<le> 0 \<longleftrightarrow> x = 0 \<and> y = 0"
by (simp add: le_less not_sum_squares_lt_zero sum_squares_eq_zero_iff)
lemma sum_squares_gt_zero_iff:
"0 < x * x + y * y \<longleftrightarrow> x \<noteq> 0 \<or> y \<noteq> 0"
by (simp add: not_le [symmetric] sum_squares_le_zero_iff)
end
context linordered_idom
begin
lemma power_abs:
"abs (a ^ n) = abs a ^ n"
by (induct n) (auto simp add: abs_mult)
lemma abs_power_minus [simp]:
"abs ((-a) ^ n) = abs (a ^ n)"
by (simp add: power_abs)
lemma zero_less_power_abs_iff [simp]:
"0 < abs a ^ n \<longleftrightarrow> a \<noteq> 0 \<or> n = 0"
proof (induct n)
case 0 show ?case by simp
next
case (Suc n) show ?case by (auto simp add: Suc zero_less_mult_iff)
qed
lemma zero_le_power_abs [simp]:
"0 \<le> abs a ^ n"
by (rule zero_le_power [OF abs_ge_zero])
lemma zero_le_power2 [simp]:
"0 \<le> a\<^sup>2"
by (simp add: power2_eq_square)
lemma zero_less_power2 [simp]:
"0 < a\<^sup>2 \<longleftrightarrow> a \<noteq> 0"
by (force simp add: power2_eq_square zero_less_mult_iff linorder_neq_iff)
lemma power2_less_0 [simp]:
"\<not> a\<^sup>2 < 0"
by (force simp add: power2_eq_square mult_less_0_iff)
lemma power2_less_eq_zero_iff [simp]:
"a\<^sup>2 \<le> 0 \<longleftrightarrow> a = 0"
by (simp add: le_less)
lemma abs_power2 [simp]:
"abs (a\<^sup>2) = a\<^sup>2"
by (simp add: power2_eq_square abs_mult abs_mult_self)
lemma power2_abs [simp]:
"(abs a)\<^sup>2 = a\<^sup>2"
by (simp add: power2_eq_square abs_mult_self)
lemma odd_power_less_zero:
"a < 0 \<Longrightarrow> a ^ Suc (2*n) < 0"
proof (induct n)
case 0
then show ?case by simp
next
case (Suc n)
have "a ^ Suc (2 * Suc n) = (a*a) * a ^ Suc(2*n)"
by (simp add: ac_simps power_add power2_eq_square)
thus ?case
by (simp del: power_Suc add: Suc mult_less_0_iff mult_neg_neg)
qed
lemma odd_0_le_power_imp_0_le:
"0 \<le> a ^ Suc (2*n) \<Longrightarrow> 0 \<le> a"
using odd_power_less_zero [of a n]
by (force simp add: linorder_not_less [symmetric])
lemma zero_le_even_power'[simp]:
"0 \<le> a ^ (2*n)"
proof (induct n)
case 0
show ?case by simp
next
case (Suc n)
have "a ^ (2 * Suc n) = (a*a) * a ^ (2*n)"
by (simp add: ac_simps power_add power2_eq_square)
thus ?case
by (simp add: Suc zero_le_mult_iff)
qed
lemma sum_power2_ge_zero:
"0 \<le> x\<^sup>2 + y\<^sup>2"
by (intro add_nonneg_nonneg zero_le_power2)
lemma not_sum_power2_lt_zero:
"\<not> x\<^sup>2 + y\<^sup>2 < 0"
unfolding not_less by (rule sum_power2_ge_zero)
lemma sum_power2_eq_zero_iff:
"x\<^sup>2 + y\<^sup>2 = 0 \<longleftrightarrow> x = 0 \<and> y = 0"
unfolding power2_eq_square by (simp add: add_nonneg_eq_0_iff)
lemma sum_power2_le_zero_iff:
"x\<^sup>2 + y\<^sup>2 \<le> 0 \<longleftrightarrow> x = 0 \<and> y = 0"
by (simp add: le_less sum_power2_eq_zero_iff not_sum_power2_lt_zero)
lemma sum_power2_gt_zero_iff:
"0 < x\<^sup>2 + y\<^sup>2 \<longleftrightarrow> x \<noteq> 0 \<or> y \<noteq> 0"
unfolding not_le [symmetric] by (simp add: sum_power2_le_zero_iff)
end
subsection {* Miscellaneous rules *}
lemma self_le_power:
fixes x::"'a::linordered_semidom"
shows "1 \<le> x \<Longrightarrow> 0 < n \<Longrightarrow> x \<le> x ^ n"
using power_increasing[of 1 n x] power_one_right[of x] by auto
lemma power_eq_if: "p ^ m = (if m=0 then 1 else p * (p ^ (m - 1)))"
unfolding One_nat_def by (cases m) simp_all
lemma (in comm_semiring_1) power2_sum:
"(x + y)\<^sup>2 = x\<^sup>2 + y\<^sup>2 + 2 * x * y"
by (simp add: algebra_simps power2_eq_square mult_2_right)
lemma (in comm_ring_1) power2_diff:
"(x - y)\<^sup>2 = x\<^sup>2 + y\<^sup>2 - 2 * x * y"
by (simp add: algebra_simps power2_eq_square mult_2_right)
lemma power_0_Suc [simp]:
"(0::'a::{power, semiring_0}) ^ Suc n = 0"
by simp
text{*It looks plausible as a simprule, but its effect can be strange.*}
lemma power_0_left:
"0 ^ n = (if n = 0 then 1 else (0::'a::{power, semiring_0}))"
by (induct n) simp_all
lemma (in field) power_diff:
assumes nz: "a \<noteq> 0"
shows "n \<le> m \<Longrightarrow> a ^ (m - n) = a ^ m / a ^ n"
by (induct m n rule: diff_induct) (simp_all add: nz field_power_not_zero)
text{*Perhaps these should be simprules.*}
lemma power_inverse:
fixes a :: "'a::division_ring_inverse_zero"
shows "inverse (a ^ n) = inverse a ^ n"
apply (cases "a = 0")
apply (simp add: power_0_left)
apply (simp add: nonzero_power_inverse)
done (* TODO: reorient or rename to inverse_power *)
lemma power_one_over:
"1 / (a::'a::{field_inverse_zero, power}) ^ n = (1 / a) ^ n"
by (simp add: divide_inverse) (rule power_inverse)
lemma power_divide [field_simps, divide_simps]:
"(a / b) ^ n = (a::'a::field_inverse_zero) ^ n / b ^ n"
apply (cases "b = 0")
apply (simp add: power_0_left)
apply (rule nonzero_power_divide)
apply assumption
done
text {* Simprules for comparisons where common factors can be cancelled. *}
lemmas zero_compare_simps =
add_strict_increasing add_strict_increasing2 add_increasing
zero_le_mult_iff zero_le_divide_iff
zero_less_mult_iff zero_less_divide_iff
mult_le_0_iff divide_le_0_iff
mult_less_0_iff divide_less_0_iff
zero_le_power2 power2_less_0
subsection {* Exponentiation for the Natural Numbers *}
lemma nat_one_le_power [simp]:
"Suc 0 \<le> i \<Longrightarrow> Suc 0 \<le> i ^ n"
by (rule one_le_power [of i n, unfolded One_nat_def])
lemma nat_zero_less_power_iff [simp]:
"x ^ n > 0 \<longleftrightarrow> x > (0::nat) \<or> n = 0"
by (induct n) auto
lemma nat_power_eq_Suc_0_iff [simp]:
"x ^ m = Suc 0 \<longleftrightarrow> m = 0 \<or> x = Suc 0"
by (induct m) auto
lemma power_Suc_0 [simp]:
"Suc 0 ^ n = Suc 0"
by simp
text{*Valid for the naturals, but what if @{text"0<i<1"}?
Premises cannot be weakened: consider the case where @{term "i=0"},
@{term "m=1"} and @{term "n=0"}.*}
lemma nat_power_less_imp_less:
assumes nonneg: "0 < (i\<Colon>nat)"
assumes less: "i ^ m < i ^ n"
shows "m < n"
proof (cases "i = 1")
case True with less power_one [where 'a = nat] show ?thesis by simp
next
case False with nonneg have "1 < i" by auto
from power_strict_increasing_iff [OF this] less show ?thesis ..
qed
lemma power_dvd_imp_le:
"i ^ m dvd i ^ n \<Longrightarrow> (1::nat) < i \<Longrightarrow> m \<le> n"
apply (rule power_le_imp_le_exp, assumption)
apply (erule dvd_imp_le, simp)
done
lemma power2_nat_le_eq_le:
fixes m n :: nat
shows "m\<^sup>2 \<le> n\<^sup>2 \<longleftrightarrow> m \<le> n"
by (auto intro: power2_le_imp_le power_mono)
lemma power2_nat_le_imp_le:
fixes m n :: nat
assumes "m\<^sup>2 \<le> n"
shows "m \<le> n"
proof (cases m)
case 0 then show ?thesis by simp
next
case (Suc k)
show ?thesis
proof (rule ccontr)
assume "\<not> m \<le> n"
then have "n < m" by simp
with assms Suc show False
by (auto simp add: algebra_simps) (simp add: power2_eq_square)
qed
qed
subsubsection {* Cardinality of the Powerset *}
lemma card_UNIV_bool [simp]: "card (UNIV :: bool set) = 2"
unfolding UNIV_bool by simp
lemma card_Pow: "finite A \<Longrightarrow> card (Pow A) = 2 ^ card A"
proof (induct rule: finite_induct)
case empty
show ?case by auto
next
case (insert x A)
then have "inj_on (insert x) (Pow A)"
unfolding inj_on_def by (blast elim!: equalityE)
then have "card (Pow A) + card (insert x ` Pow A) = 2 * 2 ^ card A"
by (simp add: mult_2 card_image Pow_insert insert.hyps)
then show ?case using insert
apply (simp add: Pow_insert)
apply (subst card_Un_disjoint, auto)
done
qed
subsubsection {* Generalized sum over a set *}
lemma setsum_zero_power [simp]:
fixes c :: "nat \<Rightarrow> 'a::division_ring"
shows "(\<Sum>i\<in>A. c i * 0^i) = (if finite A \<and> 0 \<in> A then c 0 else 0)"
apply (cases "finite A")
by (induction A rule: finite_induct) auto
lemma setsum_zero_power' [simp]:
fixes c :: "nat \<Rightarrow> 'a::field"
shows "(\<Sum>i\<in>A. c i * 0^i / d i) = (if finite A \<and> 0 \<in> A then c 0 / d 0 else 0)"
using setsum_zero_power [of "\<lambda>i. c i / d i" A]
by auto
subsubsection {* Generalized product over a set *}
lemma setprod_constant: "finite A ==> (\<Prod>x\<in> A. (y::'a::{comm_monoid_mult})) = y^(card A)"
apply (erule finite_induct)
apply auto
done
lemma setprod_power_distrib:
fixes f :: "'a \<Rightarrow> 'b::comm_semiring_1"
shows "setprod f A ^ n = setprod (\<lambda>x. (f x) ^ n) A"
proof (cases "finite A")
case True then show ?thesis
by (induct A rule: finite_induct) (auto simp add: power_mult_distrib)
next
case False then show ?thesis
by simp
qed
lemma power_setsum:
"c ^ (\<Sum>a\<in>A. f a) = (\<Prod>a\<in>A. c ^ f a)"
by (induct A rule: infinite_finite_induct) (simp_all add: power_add)
lemma setprod_gen_delta:
assumes fS: "finite S"
shows "setprod (\<lambda>k. if k=a then b k else c) S = (if a \<in> S then (b a ::'a::comm_monoid_mult) * c^ (card S - 1) else c^ card S)"
proof-
let ?f = "(\<lambda>k. if k=a then b k else c)"
{assume a: "a \<notin> S"
hence "\<forall> k\<in> S. ?f k = c" by simp
hence ?thesis using a setprod_constant[OF fS, of c] by simp }
moreover
{assume a: "a \<in> S"
let ?A = "S - {a}"
let ?B = "{a}"
have eq: "S = ?A \<union> ?B" using a by blast
have dj: "?A \<inter> ?B = {}" by simp
from fS have fAB: "finite ?A" "finite ?B" by auto
have fA0:"setprod ?f ?A = setprod (\<lambda>i. c) ?A"
apply (rule setprod.cong) by auto
have cA: "card ?A = card S - 1" using fS a by auto
have fA1: "setprod ?f ?A = c ^ card ?A" unfolding fA0 apply (rule setprod_constant) using fS by auto
have "setprod ?f ?A * setprod ?f ?B = setprod ?f S"
using setprod.union_disjoint[OF fAB dj, of ?f, unfolded eq[symmetric]]
by simp
then have ?thesis using a cA
by (simp add: fA1 field_simps cong add: setprod.cong cong del: if_weak_cong)}
ultimately show ?thesis by blast
qed
subsection {* Code generator tweak *}
lemma power_power_power [code]:
"power = power.power (1::'a::{power}) (op *)"
unfolding power_def power.power_def ..
declare power.power.simps [code]
code_identifier
code_module Power \<rightharpoonup> (SML) Arith and (OCaml) Arith and (Haskell) Arith
end
|
\subsection{Sequential games}
\subsubsection{Introduction}
A game can have multiple rounds. For example an agent could offer a trade, and the other agent could choose to accept or reject the trade. As later agents know the other choices, and the earlier agents know their choices will be observed, the games can change.
This doesn’t change games with pure strategies, but does affect those with mixed strategies. For example, even if prisoners could see earch other in the prisoners dilemma we would still get the same outcome. The last agent still prefers to “tell”, and earlier agents know this and have no reason to not also “tell”.
But consider the football/opera game. Here the first mover is better off, and there is a pure strategy, while in the rock paper scissors game the first mover loses.
We can solve more complex games backwards. As the actions in the last round of a game have no impact on others, they can be solved separately. Agents then know what the outcome will be if an outcome is arrived at, and can treat that “subgame” as a pay-off.
This method is known as backwards induction.
\subsubsection{One-round sequential games}
\subsubsection{Backwards induction}
\subsubsection{Zero-sum games}
\subsubsection{Subgame perfect equilibrium}
\subsubsection{Nash equilibrium}
|
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright 2019-2021 Heal Research
#ifndef OPERON_DISTANCE_HPP
#define OPERON_DISTANCE_HPP
#include "core/types.hpp"
#include "vectorclass.h"
#include <immintrin.h>
#include <Eigen/Core>
namespace Operon {
namespace Distance {
namespace {
template<typename T, std::enable_if_t<std::is_integral_v<T> && sizeof(T) == 8, bool> = true>
constexpr inline auto check(T const* lhs, T const* rhs) noexcept
{
auto a = Vec4uq().load(lhs);
return a == rhs[0] | a == rhs[1] | a == rhs[2] | a == rhs[3];
}
template<typename T, std::enable_if_t<std::is_integral_v<T> && sizeof(T) == 4, bool> = true>
constexpr inline auto check(T const* lhs, T const* rhs) noexcept
{
auto a = Vec8ui().load(lhs);
return a == rhs[0] | a == rhs[1] | a == rhs[2] | a == rhs[3] | a == rhs[4] | a == rhs[5] | a == rhs[6] | a == rhs[7];
}
template<typename T, std::enable_if_t<std::is_integral_v<T> && (sizeof(T) == 4 || sizeof(T) == 8), bool> = true>
constexpr inline bool Intersect(T const* lhs, T const* rhs) noexcept
{
return horizontal_add(check(lhs, rhs));
}
// this method only works when the hash vectors are sorted
template<typename T, size_t S = size_t{32} / sizeof(T)>
inline size_t CountIntersect(Operon::Span<T> lhs, Operon::Span<T> rhs) noexcept
{
T const *p0 = lhs.data(), *pS = p0 + (lhs.size() & (-S)), *pN = p0 + lhs.size();
T const *q0 = rhs.data(), *qS = q0 + (rhs.size() & (-S)), *qN = q0 + rhs.size();
T const *p = p0, *q = q0;
while(p < pS && q < qS) {
if (Intersect(p, q)) {
break;
}
auto const a = *(p + S - 1);
auto const b = *(q + S - 1);
if (a < b) p += S;
if (a > b) q += S;
}
auto const aN = *(pN - 1);
auto const bN = *(qN - 1);
size_t count{0};
while(p < pN && q < qN) {
auto const a = *p;
auto const b = *q;
if (a > bN || b > aN) {
break;
}
count += a == b;
p += a <= b;
q += a >= b;
}
return count;
}
template<typename Container>
inline size_t CountIntersect(Container const& lhs, Container const& rhs) noexcept
{
using T = typename Container::value_type;
return CountIntersect(Operon::Span<T const>(lhs.data(), lhs.size()), Operon::Span<T const>(rhs.data(), rhs.size()));
}
} // namespace
inline double Jaccard(Operon::Vector<Operon::Hash> const& lhs, Operon::Vector<Operon::Hash> const& rhs) noexcept
{
size_t c = CountIntersect(lhs, rhs);
size_t n = lhs.size() + rhs.size();
return static_cast<double>(n - 2 * c) / static_cast<double>(n);
}
inline double SorensenDice(Operon::Vector<Operon::Hash> const& lhs, Operon::Vector<Operon::Hash> const& rhs) noexcept
{
size_t n = lhs.size() + rhs.size();
size_t c = CountIntersect(lhs, rhs);
return 1.0 - 2.0 * static_cast<double>(c) / static_cast<double>(n);
}
} // namespace Distance
} // namespace Operon
#endif
|
function [u, V, exitflag, output] = snd_solveOptimalControlProblem (snd, varargin)
%UNTITLED2 Summary of this function goes here
% solves the optimal control problem of the
% Set control and linear bounds
A = [];
b = [];
Aeq = [];
beq = [];
lb = [];
ub = [];
for k=1:snd.horizon %Aggregation
[Anew, bnew, Aeqnew, beqnew, lbnew, ubnew] = ...
snd.l_constraints( k, snd.net_load, snd.battery, snd.u0_ref);
A = blkdiag(A,Anew);
b = [b, bnew];
Aeq = blkdiag(Aeq,Aeqnew);
beq = [beq, beqnew];
lb = [lb, lbnew];
ub = [ub, ubnew];
end
% Solve optimization problem
[u, V, exitflag, output] = fmincon( @(u) snd.costfunction( snd, u ), ...
snd.u0, A, b, Aeq, beq, lb, ub, ...
@(u) snd.nonlinearconstraints(snd, u ), snd.option);
end
|
lemma support_on_if_subset: "support_on A (\<lambda>x. if P x then a else 0) \<subseteq> {x \<in> A. P x}" |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Alternative definition of divisibility without using modulus.
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Integer.Divisibility.Signed where
open import Function
open import Data.Integer
open import Data.Integer.Properties
open import Data.Integer.Divisibility as Unsigned
using (divides)
renaming (_∣_ to _∣ᵤ_)
import Data.Nat as ℕ
import Data.Nat.Divisibility as ℕ
import Data.Nat.Coprimality as ℕ
import Data.Nat.Properties as ℕ
import Data.Sign as S
import Data.Sign.Properties as SProp
open import Level
open import Relation.Binary
open import Relation.Binary.PropositionalEquality
import Relation.Binary.Reasoning.Preorder as PreorderReasoning
open import Relation.Nullary using (yes; no)
import Relation.Nullary.Decidable as DEC
------------------------------------------------------------------------
-- Type
infix 4 _∣_
record _∣_ (k z : ℤ) : Set where
constructor divides
field quotient : ℤ
equality : z ≡ quotient * k
open _∣_ using (quotient) public
------------------------------------------------------------------------
-- Conversion between signed and unsigned divisibility
∣ᵤ⇒∣ : ∀ {k i} → k ∣ᵤ i → k ∣ i
∣ᵤ⇒∣ {k} {i} (divides 0 eq) = divides (+ 0) (∣n∣≡0⇒n≡0 eq)
∣ᵤ⇒∣ {k} {i} (divides q@(ℕ.suc q') eq) with k ≟ + 0
... | yes refl = divides (+ 0) (∣n∣≡0⇒n≡0 (trans eq (ℕ.*-zeroʳ q)))
... | no ¬k≠0 = divides ((S._*_ on sign) i k ◃ q) (◃-≡ sign-eq abs-eq) where
k' = ℕ.suc (ℕ.pred ∣ k ∣)
ikq' = sign i S.* sign k ◃ ℕ.suc q'
sign-eq : sign i ≡ sign (((S._*_ on sign) i k ◃ q) * k)
sign-eq = sym $ begin
sign (((S._*_ on sign) i k ◃ ℕ.suc q') * k)
≡⟨ cong (λ m → sign (sign ikq' S.* sign k ◃ ∣ ikq' ∣ ℕ.* m))
(sym (ℕ.m≢0⇒suc[pred[m]]≡m (¬k≠0 ∘ ∣n∣≡0⇒n≡0))) ⟩
sign (sign ikq' S.* sign k ◃ ∣ ikq' ∣ ℕ.* k')
≡⟨ cong (λ m → sign (sign ikq' S.* sign k ◃ m ℕ.* k'))
(abs-◃ (sign i S.* sign k) (ℕ.suc q')) ⟩
sign (sign ikq' S.* sign k ◃ _)
≡⟨ sign-◃ (sign ikq' S.* sign k) (ℕ.pred ∣ k ∣ ℕ.+ q' ℕ.* k') ⟩
sign ikq' S.* sign k
≡⟨ cong (S._* sign k) (sign-◃ (sign i S.* sign k) q') ⟩
sign i S.* sign k S.* sign k
≡⟨ SProp.*-assoc (sign i) (sign k) (sign k) ⟩
sign i S.* (sign k S.* sign k)
≡⟨ cong (sign i S.*_) (SProp.s*s≡+ (sign k)) ⟩
sign i S.* S.+
≡⟨ SProp.*-identityʳ (sign i) ⟩
sign i
∎ where open ≡-Reasoning
abs-eq : ∣ i ∣ ≡ ∣ ((S._*_ on sign) i k ◃ q) * k ∣
abs-eq = sym $ begin
∣ ((S._*_ on sign) i k ◃ ℕ.suc q') * k ∣
≡⟨ abs-◃ (sign ikq' S.* sign k) (∣ ikq' ∣ ℕ.* ∣ k ∣) ⟩
∣ ikq' ∣ ℕ.* ∣ k ∣
≡⟨ cong (ℕ._* ∣ k ∣) (abs-◃ (sign i S.* sign k) (ℕ.suc q')) ⟩
ℕ.suc q' ℕ.* ∣ k ∣
≡⟨ sym eq ⟩
∣ i ∣
∎ where open ≡-Reasoning
∣⇒∣ᵤ : ∀ {k i} → k ∣ i → k ∣ᵤ i
∣⇒∣ᵤ {k} {i} (divides q eq) = divides ∣ q ∣ $′ begin
∣ i ∣ ≡⟨ cong ∣_∣ eq ⟩
∣ q * k ∣ ≡⟨ abs-*-commute q k ⟩
∣ q ∣ ℕ.* ∣ k ∣ ∎ where open ≡-Reasoning
------------------------------------------------------------------------
-- _∣_ is a preorder
∣-refl : Reflexive _∣_
∣-refl = ∣ᵤ⇒∣ ℕ.∣-refl
∣-reflexive : _≡_ ⇒ _∣_
∣-reflexive refl = ∣-refl
∣-trans : Transitive _∣_
∣-trans i∣j j∣k = ∣ᵤ⇒∣ (ℕ.∣-trans (∣⇒∣ᵤ i∣j) (∣⇒∣ᵤ j∣k))
∣-isPreorder : IsPreorder _≡_ _∣_
∣-isPreorder = record
{ isEquivalence = isEquivalence
; reflexive = ∣-reflexive
; trans = ∣-trans
}
∣-preorder : Preorder _ _ _
∣-preorder = record { isPreorder = ∣-isPreorder }
module ∣-Reasoning = PreorderReasoning ∣-preorder
hiding (_≈⟨_⟩_)
renaming (_∼⟨_⟩_ to _∣⟨_⟩_)
------------------------------------------------------------------------
-- Other properties of _∣_
_∣?_ : Decidable _∣_
k ∣? m = DEC.map′ ∣ᵤ⇒∣ ∣⇒∣ᵤ (∣ k ∣ ℕ.∣? ∣ m ∣)
0∣⇒≡0 : ∀ {m} → + 0 ∣ m → m ≡ + 0
0∣⇒≡0 0|m = ∣n∣≡0⇒n≡0 (ℕ.0∣⇒≡0 (∣⇒∣ᵤ 0|m))
m∣∣m∣ : ∀ {m} → m ∣ (+ ∣ m ∣)
m∣∣m∣ = ∣ᵤ⇒∣ ℕ.∣-refl
∣m∣∣m : ∀ {m} → (+ ∣ m ∣) ∣ m
∣m∣∣m = ∣ᵤ⇒∣ ℕ.∣-refl
∣m∣n⇒∣m+n : ∀ {i m n} → i ∣ m → i ∣ n → i ∣ m + n
∣m∣n⇒∣m+n (divides q refl) (divides p refl) =
divides (q + p) (sym (*-distribʳ-+ _ q p))
∣m⇒∣-m : ∀ {i m} → i ∣ m → i ∣ - m
∣m⇒∣-m {i} {m} i∣m = ∣ᵤ⇒∣ $′ begin
∣ i ∣ ∣⟨ ∣⇒∣ᵤ i∣m ⟩
∣ m ∣ ≡⟨ sym (∣-n∣≡∣n∣ m) ⟩
∣ - m ∣ ∎
where open ℕ.∣-Reasoning
∣m∣n⇒∣m-n : ∀ {i m n} → i ∣ m → i ∣ n → i ∣ m - n
∣m∣n⇒∣m-n i∣m i∣n = ∣m∣n⇒∣m+n i∣m (∣m⇒∣-m i∣n)
∣m+n∣m⇒∣n : ∀ {i m n} → i ∣ m + n → i ∣ m → i ∣ n
∣m+n∣m⇒∣n {i} {m} {n} i∣m+n i∣m = begin
i ∣⟨ ∣m∣n⇒∣m-n i∣m+n i∣m ⟩
m + n - m ≡⟨ +-comm (m + n) (- m) ⟩
- m + (m + n) ≡⟨ sym (+-assoc (- m) m n) ⟩
- m + m + n ≡⟨ cong (_+ n) (+-inverseˡ m) ⟩
+ 0 + n ≡⟨ +-identityˡ n ⟩
n ∎
where open ∣-Reasoning
∣m+n∣n⇒∣m : ∀ {i m n} → i ∣ m + n → i ∣ n → i ∣ m
∣m+n∣n⇒∣m {i} {m} {n} i|m+n i|n
rewrite +-comm m n
= ∣m+n∣m⇒∣n i|m+n i|n
∣n⇒∣m*n : ∀ {i} m {n} → i ∣ n → i ∣ m * n
∣n⇒∣m*n {i} m {n} (divides q eq) = divides (m * q) $′ begin
m * n ≡⟨ cong (m *_) eq ⟩
m * (q * i) ≡⟨ sym (*-assoc m q i) ⟩
m * q * i ∎
where open ≡-Reasoning
∣m⇒∣m*n : ∀ {i m} n → i ∣ m → i ∣ m * n
∣m⇒∣m*n {i} {m} n i|m
rewrite *-comm m n
= ∣n⇒∣m*n {i} n {m} i|m
*-monoʳ-∣ : ∀ k → (k *_) Preserves _∣_ ⟶ _∣_
*-monoʳ-∣ k i∣j = ∣ᵤ⇒∣ (Unsigned.*-monoʳ-∣ k (∣⇒∣ᵤ i∣j))
*-monoˡ-∣ : ∀ k → (_* k) Preserves _∣_ ⟶ _∣_
*-monoˡ-∣ k {i} {j} i∣j = ∣ᵤ⇒∣ (Unsigned.*-monoˡ-∣ k {i} {j} (∣⇒∣ᵤ i∣j))
*-cancelˡ-∣ : ∀ k {i j} → k ≢ + 0 → k * i ∣ k * j → i ∣ j
*-cancelˡ-∣ k k≢0 = ∣ᵤ⇒∣ ∘ Unsigned.*-cancelˡ-∣ k k≢0 ∘ ∣⇒∣ᵤ
*-cancelʳ-∣ : ∀ k {i j} → k ≢ + 0 → i * k ∣ j * k → i ∣ j
*-cancelʳ-∣ k {i} {j} k≢0 = ∣ᵤ⇒∣ ∘′ Unsigned.*-cancelʳ-∣ k {i} {j} k≢0 ∘′ ∣⇒∣ᵤ
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
src✝¹ : NormedAddCommGroup PUnit := normedAddCommGroup
src✝ : CommRing PUnit := commRing
x✝¹ x✝ : PUnit
⊢ ‖x✝¹ * x✝‖ ≤ ‖x✝¹‖ * ‖x✝‖
[PROOFSTEP]
simp
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
α : Type u_5
inst✝² : SeminormedAddCommGroup α
inst✝¹ : One α
inst✝ : NormOneClass α
⊢ ‖0‖ ≠ ‖1‖
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : SeminormedAddCommGroup α
inst✝¹ : One α
inst✝ : NormOneClass α
⊢ ‖1‖ = 1
[PROOFSTEP]
simp [ULift.norm_def]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝⁵ : SeminormedAddCommGroup α
inst✝⁴ : One α
inst✝³ : NormOneClass α
inst✝² : SeminormedAddCommGroup β
inst✝¹ : One β
inst✝ : NormOneClass β
⊢ ‖1‖ = 1
[PROOFSTEP]
simp [Prod.norm_def]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι✝ : Type u_4
ι : Type u_5
α : ι → Type u_6
inst✝⁴ : Nonempty ι
inst✝³ : Fintype ι
inst✝² : (i : ι) → SeminormedAddCommGroup (α i)
inst✝¹ : (i : ι) → One (α i)
inst✝ : ∀ (i : ι), NormOneClass (α i)
⊢ ‖1‖ = 1
[PROOFSTEP]
simp [Pi.norm_def]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι✝ : Type u_4
ι : Type u_5
α : ι → Type u_6
inst✝⁴ : Nonempty ι
inst✝³ : Fintype ι
inst✝² : (i : ι) → SeminormedAddCommGroup (α i)
inst✝¹ : (i : ι) → One (α i)
inst✝ : ∀ (i : ι), NormOneClass (α i)
⊢ (Finset.sup Finset.univ fun b => 1) = 1
[PROOFSTEP]
exact Finset.sup_const Finset.univ_nonempty 1
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
a b : α
⊢ ‖a * b‖₊ ≤ ‖a‖₊ * ‖b‖₊
[PROOFSTEP]
simpa only [← norm_toNNReal, ← Real.toNNReal_mul (norm_nonneg _)] using Real.toNNReal_mono (norm_mul_le _ _)
[GOAL]
α : Type u_1
β✝ : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : NonUnitalSeminormedRing α
β : Type u_5
inst✝¹ : NormedRing β
inst✝ : Nontrivial β
⊢ ‖1‖ ≤ ‖1‖ * ‖1‖
[PROOFSTEP]
simpa only [mul_one] using norm_mul_le (1 : β) 1
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x y : α
⊢ ‖↑(AddMonoidHom.mulRight x) y‖ ≤ ‖x‖ * ‖y‖
[PROOFSTEP]
rw [mul_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x y : α
⊢ ‖↑(AddMonoidHom.mulRight x) y‖ ≤ ‖y‖ * ‖x‖
[PROOFSTEP]
exact norm_mul_le y x
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NonUnitalSeminormedRing α
inst✝ : NonUnitalSeminormedRing β
src✝¹ : SeminormedAddCommGroup (α × β) := seminormedAddCommGroup
src✝ : NonUnitalRing (α × β) := instNonUnitalRing
x y : α × β
⊢ max (‖x.fst‖ * ‖y.fst‖) (‖x.snd‖ * ‖y.snd‖) = max (‖x.fst‖ * ‖y.fst‖) (‖y.snd‖ * ‖x.snd‖)
[PROOFSTEP]
simp [mul_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NonUnitalSeminormedRing α
inst✝ : NonUnitalSeminormedRing β
src✝¹ : SeminormedAddCommGroup (α × β) := seminormedAddCommGroup
src✝ : NonUnitalRing (α × β) := instNonUnitalRing
x y : α × β
⊢ max (‖x.fst‖ * ‖y.fst‖) (‖y.snd‖ * ‖x.snd‖) ≤ max ‖x.fst‖ ‖x.snd‖ * max ‖y.snd‖ ‖y.fst‖
[PROOFSTEP]
apply max_mul_mul_le_max_mul_max
[GOAL]
case ha
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NonUnitalSeminormedRing α
inst✝ : NonUnitalSeminormedRing β
src✝¹ : SeminormedAddCommGroup (α × β) := seminormedAddCommGroup
src✝ : NonUnitalRing (α × β) := instNonUnitalRing
x y : α × β
⊢ 0 ≤ ‖x.fst‖
[PROOFSTEP]
simp [norm_nonneg]
[GOAL]
case hd
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NonUnitalSeminormedRing α
inst✝ : NonUnitalSeminormedRing β
src✝¹ : SeminormedAddCommGroup (α × β) := seminormedAddCommGroup
src✝ : NonUnitalRing (α × β) := instNonUnitalRing
x y : α × β
⊢ 0 ≤ ‖y.snd‖
[PROOFSTEP]
simp [norm_nonneg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NonUnitalSeminormedRing α
inst✝ : NonUnitalSeminormedRing β
src✝¹ : SeminormedAddCommGroup (α × β) := seminormedAddCommGroup
src✝ : NonUnitalRing (α × β) := instNonUnitalRing
x y : α × β
⊢ max ‖x.fst‖ ‖x.snd‖ * max ‖y.snd‖ ‖y.fst‖ = max ‖x.fst‖ ‖x.snd‖ * max ‖y.fst‖ ‖y.snd‖
[PROOFSTEP]
simp [max_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
⊢ ‖↑0‖ ≤ ↑0 * ‖1‖
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
n : ℕ
⊢ ‖↑(n + 1)‖ ≤ ↑(n + 1) * ‖1‖
[PROOFSTEP]
rw [n.cast_succ, n.cast_succ, add_mul, one_mul]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
n : ℕ
⊢ ‖↑n + 1‖ ≤ ↑n * ‖1‖ + ‖1‖
[PROOFSTEP]
exact norm_add_le_of_le (Nat.norm_cast_le n) le_rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a : α
x✝ : [a] ≠ []
⊢ ‖prod [a]‖ ≤ prod (map norm [a])
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a b : α
l : List α
x✝ : a :: b :: l ≠ []
⊢ ‖prod (a :: b :: l)‖ ≤ prod (map norm (a :: b :: l))
[PROOFSTEP]
rw [List.map_cons, List.prod_cons, @List.prod_cons _ _ _ ‖a‖]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a b : α
l : List α
x✝ : a :: b :: l ≠ []
⊢ ‖a * prod (b :: l)‖ ≤ ‖a‖ * prod (map norm (b :: l))
[PROOFSTEP]
refine' le_trans (norm_mul_le _ _) (mul_le_mul_of_nonneg_left _ (norm_nonneg _))
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a b : α
l : List α
x✝ : a :: b :: l ≠ []
⊢ ‖prod (b :: l)‖ ≤ prod (map norm (b :: l))
[PROOFSTEP]
exact List.norm_prod_le' (List.cons_ne_nil b l)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
l : List α
hl : l ≠ []
⊢ prod (map norm l) = (fun a => ↑a) (prod (map nnnorm l))
[PROOFSTEP]
simp [NNReal.coe_list_prod, List.map_map]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α
inst✝ : NormOneClass α
⊢ ‖prod []‖ ≤ prod (map norm [])
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α
inst✝ : NormOneClass α
l : List α
⊢ prod (map norm l) = (fun a => ↑a) (prod (map nnnorm l))
[PROOFSTEP]
simp [NNReal.coe_list_prod, List.map_map]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α✝
α : Type u_5
inst✝ : NormedCommRing α
s : Finset ι
hs : Finset.Nonempty s
f : ι → α
⊢ ‖∏ i in s, f i‖ ≤ ∏ i in s, ‖f i‖
[PROOFSTEP]
rcases s with ⟨⟨l⟩, hl⟩
[GOAL]
case mk.mk
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α✝
α : Type u_5
inst✝ : NormedCommRing α
f : ι → α
val✝ : Multiset ι
l : List ι
hl : Multiset.Nodup (Quot.mk Setoid.r l)
hs : Finset.Nonempty { val := Quot.mk Setoid.r l, nodup := hl }
⊢ ‖∏ i in { val := Quot.mk Setoid.r l, nodup := hl }, f i‖ ≤ ∏ i in { val := Quot.mk Setoid.r l, nodup := hl }, ‖f i‖
[PROOFSTEP]
have : l.map f ≠ [] := by simpa using hs
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α✝
α : Type u_5
inst✝ : NormedCommRing α
f : ι → α
val✝ : Multiset ι
l : List ι
hl : Multiset.Nodup (Quot.mk Setoid.r l)
hs : Finset.Nonempty { val := Quot.mk Setoid.r l, nodup := hl }
⊢ List.map f l ≠ []
[PROOFSTEP]
simpa using hs
[GOAL]
case mk.mk
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α✝
α : Type u_5
inst✝ : NormedCommRing α
f : ι → α
val✝ : Multiset ι
l : List ι
hl : Multiset.Nodup (Quot.mk Setoid.r l)
hs : Finset.Nonempty { val := Quot.mk Setoid.r l, nodup := hl }
this : List.map f l ≠ []
⊢ ‖∏ i in { val := Quot.mk Setoid.r l, nodup := hl }, f i‖ ≤ ∏ i in { val := Quot.mk Setoid.r l, nodup := hl }, ‖f i‖
[PROOFSTEP]
simpa using List.norm_prod_le' this
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α✝
α : Type u_5
inst✝ : NormedCommRing α
s : Finset ι
hs : Finset.Nonempty s
f : ι → α
⊢ ∏ i in s, ‖f i‖ = (fun a => ↑a) (∏ i in s, ‖f i‖₊)
[PROOFSTEP]
simp [NNReal.coe_prod]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : SeminormedRing α✝
α : Type u_5
inst✝¹ : NormedCommRing α
inst✝ : NormOneClass α
s : Finset ι
f : ι → α
⊢ ‖∏ i in s, f i‖ ≤ ∏ i in s, ‖f i‖
[PROOFSTEP]
rcases s with ⟨⟨l⟩, hl⟩
[GOAL]
case mk.mk
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : SeminormedRing α✝
α : Type u_5
inst✝¹ : NormedCommRing α
inst✝ : NormOneClass α
f : ι → α
val✝ : Multiset ι
l : List ι
hl : Multiset.Nodup (Quot.mk Setoid.r l)
⊢ ‖∏ i in { val := Quot.mk Setoid.r l, nodup := hl }, f i‖ ≤ ∏ i in { val := Quot.mk Setoid.r l, nodup := hl }, ‖f i‖
[PROOFSTEP]
simpa using (l.map f).norm_prod_le
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : SeminormedRing α✝
α : Type u_5
inst✝¹ : NormedCommRing α
inst✝ : NormOneClass α
s : Finset ι
f : ι → α
⊢ ∏ i in s, ‖f i‖ = (fun a => ↑a) (∏ i in s, ‖f i‖₊)
[PROOFSTEP]
simp [NNReal.coe_prod]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a : α
x✝ : 0 < 1
⊢ ‖a ^ 1‖₊ ≤ ‖a‖₊ ^ 1
[PROOFSTEP]
simp only [pow_one, le_rfl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a : α
n : ℕ
x✝ : 0 < n + 2
⊢ ‖a ^ (n + 2)‖₊ ≤ ‖a‖₊ ^ (n + 2)
[PROOFSTEP]
simpa only [pow_succ _ (n + 1)] using le_trans (nnnorm_mul_le _ _) (mul_le_mul_left' (nnnorm_pow_le' a n.succ_pos) _)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α
inst✝ : NormOneClass α
a : α
n : ℕ
⊢ ‖a ^ Nat.zero‖₊ ≤ ‖a‖₊ ^ Nat.zero
[PROOFSTEP]
simp only [Nat.zero_eq, pow_zero, nnnorm_one, le_rfl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedRing α
a : α
n : ℕ
h : 0 < n
⊢ ‖a ^ n‖ ≤ ‖a‖ ^ n
[PROOFSTEP]
simpa only [NNReal.coe_pow, coe_nnnorm] using NNReal.coe_mono (nnnorm_pow_le' a h)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : SeminormedRing α
inst✝ : NormOneClass α
a : α
n : ℕ
⊢ ‖a ^ Nat.zero‖ ≤ ‖a‖ ^ Nat.zero
[PROOFSTEP]
simp only [Nat.zero_eq, pow_zero, norm_one, le_rfl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
⊢ Tendsto (fun e => ‖e.fst * e.snd - (fun p => p.fst * p.snd) x‖) (𝓝 x) (𝓝 0)
[PROOFSTEP]
have : ∀ e : α × α, ‖e.1 * e.2 - x.1 * x.2‖ ≤ ‖e.1‖ * ‖e.2 - x.2‖ + ‖e.1 - x.1‖ * ‖x.2‖ :=
by
intro e
calc
‖e.1 * e.2 - x.1 * x.2‖ ≤ ‖e.1 * (e.2 - x.2) + (e.1 - x.1) * x.2‖ := by
rw [_root_.mul_sub, _root_.sub_mul, sub_add_sub_cancel]
-- porting note: `ENNReal.{mul_sub, sub_mul}` should be protected
_ ≤ ‖e.1‖ * ‖e.2 - x.2‖ + ‖e.1 - x.1‖ * ‖x.2‖ := norm_add_le_of_le (norm_mul_le _ _) (norm_mul_le _ _)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
⊢ ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
[PROOFSTEP]
intro e
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x e : α × α
⊢ ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
[PROOFSTEP]
calc
‖e.1 * e.2 - x.1 * x.2‖ ≤ ‖e.1 * (e.2 - x.2) + (e.1 - x.1) * x.2‖ := by
rw [_root_.mul_sub, _root_.sub_mul, sub_add_sub_cancel]
-- porting note: `ENNReal.{mul_sub, sub_mul}` should be protected
_ ≤ ‖e.1‖ * ‖e.2 - x.2‖ + ‖e.1 - x.1‖ * ‖x.2‖ := norm_add_le_of_le (norm_mul_le _ _) (norm_mul_le _ _)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x e : α × α
⊢ ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst * (e.snd - x.snd) + (e.fst - x.fst) * x.snd‖
[PROOFSTEP]
rw [_root_.mul_sub, _root_.sub_mul, sub_add_sub_cancel]
-- porting note: `ENNReal.{mul_sub, sub_mul}` should be protected
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ Tendsto (fun e => ‖e.fst * e.snd - (fun p => p.fst * p.snd) x‖) (𝓝 x) (𝓝 0)
[PROOFSTEP]
refine squeeze_zero (fun e => norm_nonneg _) this ?_
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ Tendsto (fun t => ‖t.fst‖ * ‖t.snd - x.snd‖ + ‖t.fst - x.fst‖ * ‖x.snd‖) (𝓝 x) (𝓝 0)
[PROOFSTEP]
convert
((continuous_fst.tendsto x).norm.mul ((continuous_snd.tendsto x).sub tendsto_const_nhds).norm).add
(((continuous_fst.tendsto x).sub tendsto_const_nhds).norm.mul _)
-- Porting note: `show` used to select a goal to work on
[GOAL]
case h.e'_5.h.e'_3
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ 0 = ‖x.fst‖ * ‖x.snd - x.snd‖ + ‖x.fst - x.fst‖ * ?convert_4
case convert_4
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ ℝ
case convert_5
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ Tendsto (fun t => ‖x.snd‖) (𝓝 x) (𝓝 ?convert_4)
[PROOFSTEP]
rotate_right
[GOAL]
case convert_5
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ Tendsto (fun t => ‖x.snd‖) (𝓝 x) (𝓝 ?convert_4)
case h.e'_5.h.e'_3
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ 0 = ‖x.fst‖ * ‖x.snd - x.snd‖ + ‖x.fst - x.fst‖ * ?convert_4
case convert_4
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ ℝ
[PROOFSTEP]
show Tendsto _ _ _
[GOAL]
case convert_5
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ Tendsto (fun t => ‖x.snd‖) (𝓝 x) (𝓝 ?convert_4)
case h.e'_5.h.e'_3
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ 0 = ‖x.fst‖ * ‖x.snd - x.snd‖ + ‖x.fst - x.fst‖ * ?convert_4
case convert_4
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ ℝ
[PROOFSTEP]
exact tendsto_const_nhds
[GOAL]
case h.e'_5.h.e'_3
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NonUnitalSeminormedRing α
x : α × α
this : ∀ (e : α × α), ‖e.fst * e.snd - x.fst * x.snd‖ ≤ ‖e.fst‖ * ‖e.snd - x.snd‖ + ‖e.fst - x.fst‖ * ‖x.snd‖
⊢ 0 = ‖x.fst‖ * ‖x.snd - x.snd‖ + ‖x.fst - x.fst‖ * ‖x.snd‖
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
⊢ ‖1‖ * ‖1‖ = ‖1‖ * 1
[PROOFSTEP]
rw [← norm_mul, mul_one, mul_one]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
a : α
⊢ ↑‖a⁻¹‖₊ = ↑‖a‖₊⁻¹
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
z w : α
hz : z ≠ 0
hw : w ≠ 0
⊢ dist z⁻¹ w⁻¹ = dist z w / (‖z‖ * ‖w‖)
[PROOFSTEP]
rw [dist_eq_norm, inv_sub_inv' hz hw, norm_mul, norm_mul, norm_inv, norm_inv, mul_comm ‖z‖⁻¹, mul_assoc, dist_eq_norm',
div_eq_mul_inv, mul_inv]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
z w : α
hz : z ≠ 0
hw : w ≠ 0
⊢ nndist z⁻¹ w⁻¹ = nndist z w / (‖z‖₊ * ‖w‖₊)
[PROOFSTEP]
rw [← NNReal.coe_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
z w : α
hz : z ≠ 0
hw : w ≠ 0
⊢ ↑(nndist z⁻¹ w⁻¹) = ↑(nndist z w / (‖z‖₊ * ‖w‖₊))
[PROOFSTEP]
simp [-NNReal.coe_eq, dist_inv_inv₀ hz hw]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
a : α
ha : a ≠ 0
⊢ Tendsto ((fun x x_1 => x * x_1) a) (comap norm atTop) (comap norm atTop)
[PROOFSTEP]
simpa only [tendsto_comap_iff, (· ∘ ·), norm_mul] using tendsto_const_nhds.mul_atTop (norm_pos_iff.2 ha) tendsto_comap
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
a : α
ha : a ≠ 0
⊢ Tendsto (fun x => x * a) (comap norm atTop) (comap norm atTop)
[PROOFSTEP]
simpa only [tendsto_comap_iff, (· ∘ ·), norm_mul] using tendsto_comap.atTop_mul (norm_pos_iff.2 ha) tendsto_const_nhds
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
⊢ HasContinuousInv₀ α
[PROOFSTEP]
refine' ⟨fun r r0 => tendsto_iff_norm_tendsto_zero.2 _⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
⊢ Tendsto (fun e => ‖e⁻¹ - r⁻¹‖) (𝓝 r) (𝓝 0)
[PROOFSTEP]
have r0' : 0 < ‖r‖ := norm_pos_iff.2 r0
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
⊢ Tendsto (fun e => ‖e⁻¹ - r⁻¹‖) (𝓝 r) (𝓝 0)
[PROOFSTEP]
rcases exists_between r0' with ⟨ε, ε0, εr⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
⊢ Tendsto (fun e => ‖e⁻¹ - r⁻¹‖) (𝓝 r) (𝓝 0)
[PROOFSTEP]
have : ∀ᶠ e in 𝓝 r, ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε :=
by
filter_upwards [(isOpen_lt continuous_const continuous_norm).eventually_mem εr] with e he
have e0 : e ≠ 0 := norm_pos_iff.1 (ε0.trans he)
calc
‖e⁻¹ - r⁻¹‖ = ‖r‖⁻¹ * ‖r - e‖ * ‖e‖⁻¹ := by
rw [← norm_inv, ← norm_inv, ← norm_mul, ← norm_mul, _root_.mul_sub, _root_.sub_mul, mul_assoc _ e,
inv_mul_cancel r0, mul_inv_cancel e0, one_mul, mul_one]
-- porting note: `ENNReal.{mul_sub, sub_mul}` should be `protected`
_ = ‖r - e‖ / ‖r‖ / ‖e‖ := by field_simp [mul_comm]
_ ≤ ‖r - e‖ / ‖r‖ / ε := by gcongr
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
⊢ ∀ᶠ (e : α) in 𝓝 r, ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε
[PROOFSTEP]
filter_upwards [(isOpen_lt continuous_const continuous_norm).eventually_mem εr] with e he
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
e : α
he : ε < ‖e‖
⊢ ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε
[PROOFSTEP]
have e0 : e ≠ 0 := norm_pos_iff.1 (ε0.trans he)
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
e : α
he : ε < ‖e‖
e0 : e ≠ 0
⊢ ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε
[PROOFSTEP]
calc
‖e⁻¹ - r⁻¹‖ = ‖r‖⁻¹ * ‖r - e‖ * ‖e‖⁻¹ := by
rw [← norm_inv, ← norm_inv, ← norm_mul, ← norm_mul, _root_.mul_sub, _root_.sub_mul, mul_assoc _ e,
inv_mul_cancel r0, mul_inv_cancel e0, one_mul, mul_one]
-- porting note: `ENNReal.{mul_sub, sub_mul}` should be `protected`
_ = ‖r - e‖ / ‖r‖ / ‖e‖ := by field_simp [mul_comm]
_ ≤ ‖r - e‖ / ‖r‖ / ε := by gcongr
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
e : α
he : ε < ‖e‖
e0 : e ≠ 0
⊢ ‖e⁻¹ - r⁻¹‖ = ‖r‖⁻¹ * ‖r - e‖ * ‖e‖⁻¹
[PROOFSTEP]
rw [← norm_inv, ← norm_inv, ← norm_mul, ← norm_mul, _root_.mul_sub, _root_.sub_mul, mul_assoc _ e, inv_mul_cancel r0,
mul_inv_cancel e0, one_mul, mul_one]
-- porting note: `ENNReal.{mul_sub, sub_mul}` should be `protected`
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
e : α
he : ε < ‖e‖
e0 : e ≠ 0
⊢ ‖r‖⁻¹ * ‖r - e‖ * ‖e‖⁻¹ = ‖r - e‖ / ‖r‖ / ‖e‖
[PROOFSTEP]
field_simp [mul_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
e : α
he : ε < ‖e‖
e0 : e ≠ 0
⊢ ‖r - e‖ / ‖r‖ / ‖e‖ ≤ ‖r - e‖ / ‖r‖ / ε
[PROOFSTEP]
gcongr
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
this : ∀ᶠ (e : α) in 𝓝 r, ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε
⊢ Tendsto (fun e => ‖e⁻¹ - r⁻¹‖) (𝓝 r) (𝓝 0)
[PROOFSTEP]
refine' squeeze_zero' (eventually_of_forall fun _ => norm_nonneg _) this _
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
this : ∀ᶠ (e : α) in 𝓝 r, ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε
⊢ Tendsto (fun t => ‖r - t‖ / ‖r‖ / ε) (𝓝 r) (𝓝 0)
[PROOFSTEP]
refine' (((continuous_const.sub continuous_id).norm.div_const _).div_const _).tendsto' _ _ _
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NormedDivisionRing α
r : α
r0 : r ≠ 0
r0' : 0 < ‖r‖
ε : ℝ
ε0 : 0 < ε
εr : ε < ‖r‖
this : ∀ᶠ (e : α) in 𝓝 r, ‖e⁻¹ - r⁻¹‖ ≤ ‖r - e‖ / ‖r‖ / ε
⊢ ‖r - id r‖ / ‖r‖ / ε = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NormedDivisionRing α
inst✝ : Monoid β
φ : β →* α
x : β
k : ℕ+
h : x ^ ↑k = 1
⊢ ‖↑φ x‖ = 1
[PROOFSTEP]
rw [← pow_left_inj, ← norm_pow, ← map_pow, h, map_one, norm_one, one_pow]
[GOAL]
case Hxpos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NormedDivisionRing α
inst✝ : Monoid β
φ : β →* α
x : β
k : ℕ+
h : x ^ ↑k = 1
⊢ 0 ≤ ‖↑φ x‖
case Hypos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NormedDivisionRing α
inst✝ : Monoid β
φ : β →* α
x : β
k : ℕ+
h : x ^ ↑k = 1
⊢ 0 ≤ 1
case Hnpos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝¹ : NormedDivisionRing α
inst✝ : Monoid β
φ : β →* α
x : β
k : ℕ+
h : x ^ ↑k = 1
⊢ 0 < ↑k
[PROOFSTEP]
exacts [norm_nonneg _, zero_le_one, k.pos]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
r : ℝ
w : α
hw : 1 < ‖w‖
n : ℕ
hn : r < ‖w‖ ^ n
⊢ r < ‖w ^ n‖
[PROOFSTEP]
rwa [norm_pow]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
r : ℝ
hr : 0 < r
w : α
hw : r⁻¹ < ‖w‖
⊢ 0 < ‖w⁻¹‖ ∧ ‖w⁻¹‖ < r
[PROOFSTEP]
rwa [← Set.mem_Ioo, norm_inv, ← Set.mem_inv, Set.inv_Ioo_0_left hr]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
x : α
⊢ NeBot (𝓝[{x}ᶜ] x)
[PROOFSTEP]
rw [← mem_closure_iff_nhdsWithin_neBot, Metric.mem_closure_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
x : α
⊢ ∀ (ε : ℝ), ε > 0 → ∃ b, b ∈ {x}ᶜ ∧ dist x b < ε
[PROOFSTEP]
rintro ε ε0
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
x : α
ε : ℝ
ε0 : ε > 0
⊢ ∃ b, b ∈ {x}ᶜ ∧ dist x b < ε
[PROOFSTEP]
rcases exists_norm_lt α ε0 with ⟨b, hb0, hbε⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
x : α
ε : ℝ
ε0 : ε > 0
b : α
hb0 : 0 < ‖b‖
hbε : ‖b‖ < ε
⊢ ∃ b, b ∈ {x}ᶜ ∧ dist x b < ε
[PROOFSTEP]
refine' ⟨x + b, mt (Set.mem_singleton_iff.trans add_right_eq_self).1 <| norm_pos_iff.1 hb0, _⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
x : α
ε : ℝ
ε0 : ε > 0
b : α
hb0 : 0 < ‖b‖
hbε : ‖b‖ < ε
⊢ dist x (x + b) < ε
[PROOFSTEP]
rwa [dist_comm, dist_eq_norm, add_sub_cancel']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : NontriviallyNormedField α
⊢ NeBot (𝓝[{x | IsUnit x}] 0)
[PROOFSTEP]
simpa only [isUnit_iff_ne_zero] using punctured_nhds_neBot (0 : α)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
r₁ r₂ : ℝ≥0
h : r₁ < r₂
⊢ ∃ x, r₁ < ‖x‖₊ ∧ ‖x‖₊ < r₂
[PROOFSTEP]
exact_mod_cast exists_lt_norm_lt α r₁.prop h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
⊢ ∀ (a₁ a₂ : ↑(Set.range norm)), a₁ < a₂ → ∃ a, a₁ < a ∧ a < a₂
[PROOFSTEP]
rintro ⟨-, x, rfl⟩ ⟨-, y, rfl⟩ hxy
[GOAL]
case mk.intro.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
x y : α
hxy : { val := ‖x‖, property := (_ : ∃ y, ‖y‖ = ‖x‖) } < { val := ‖y‖, property := (_ : ∃ y_1, ‖y_1‖ = ‖y‖) }
⊢ ∃ a, { val := ‖x‖, property := (_ : ∃ y, ‖y‖ = ‖x‖) } < a ∧ a < { val := ‖y‖, property := (_ : ∃ y_1, ‖y_1‖ = ‖y‖) }
[PROOFSTEP]
let ⟨z, h⟩ := exists_lt_norm_lt α (norm_nonneg _) hxy
[GOAL]
case mk.intro.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
x y : α
hxy : { val := ‖x‖, property := (_ : ∃ y, ‖y‖ = ‖x‖) } < { val := ‖y‖, property := (_ : ∃ y_1, ‖y_1‖ = ‖y‖) }
z : α
h : ‖x‖ < ‖z‖ ∧ ‖z‖ < ↑{ val := ‖y‖, property := (_ : ∃ y_1, ‖y_1‖ = ‖y‖) }
⊢ ∃ a, { val := ‖x‖, property := (_ : ∃ y, ‖y‖ = ‖x‖) } < a ∧ a < { val := ‖y‖, property := (_ : ∃ y_1, ‖y_1‖ = ‖y‖) }
[PROOFSTEP]
exact ⟨⟨‖z‖, z, rfl⟩, h⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
⊢ ∀ (a₁ a₂ : ↑(Set.range nnnorm)), a₁ < a₂ → ∃ a, a₁ < a ∧ a < a₂
[PROOFSTEP]
rintro ⟨-, x, rfl⟩ ⟨-, y, rfl⟩ hxy
[GOAL]
case mk.intro.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
x y : α
hxy : { val := ‖x‖₊, property := (_ : ∃ y, ‖y‖₊ = ‖x‖₊) } < { val := ‖y‖₊, property := (_ : ∃ y_1, ‖y_1‖₊ = ‖y‖₊) }
⊢ ∃ a,
{ val := ‖x‖₊, property := (_ : ∃ y, ‖y‖₊ = ‖x‖₊) } < a ∧
a < { val := ‖y‖₊, property := (_ : ∃ y_1, ‖y_1‖₊ = ‖y‖₊) }
[PROOFSTEP]
let ⟨z, h⟩ := exists_lt_nnnorm_lt α hxy
[GOAL]
case mk.intro.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : DenselyNormedField α
x y : α
hxy : { val := ‖x‖₊, property := (_ : ∃ y, ‖y‖₊ = ‖x‖₊) } < { val := ‖y‖₊, property := (_ : ∃ y_1, ‖y_1‖₊ = ‖y‖₊) }
z : α
h :
↑{ val := ‖x‖₊, property := (_ : ∃ y, ‖y‖₊ = ‖x‖₊) } < ‖z‖₊ ∧
‖z‖₊ < ↑{ val := ‖y‖₊, property := (_ : ∃ y_1, ‖y_1‖₊ = ‖y‖₊) }
⊢ ∃ a,
{ val := ‖x‖₊, property := (_ : ∃ y, ‖y‖₊ = ‖x‖₊) } < a ∧
a < { val := ‖y‖₊, property := (_ : ∃ y_1, ‖y_1‖₊ = ‖y‖₊) }
[PROOFSTEP]
exact ⟨⟨‖z‖₊, z, rfl⟩, h⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
x✝¹ x✝ : ℝ
h₀ : 0 ≤ x✝¹
hr : x✝¹ < x✝
x : ℝ
h : x✝¹ < x ∧ x < x✝
⊢ x✝¹ < ‖x‖ ∧ ‖x‖ < x✝
[PROOFSTEP]
rwa [Real.norm_eq_abs, abs_of_nonneg (h₀.trans h.1.le)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
x y : ℝ
hx : 0 ≤ x
⊢ toNNReal x * ‖y‖₊ = ‖x * y‖₊
[PROOFSTEP]
ext
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
x y : ℝ
hx : 0 ≤ x
⊢ ↑(toNNReal x * ‖y‖₊) = ↑‖x * y‖₊
[PROOFSTEP]
simp only [NNReal.coe_mul, nnnorm_mul, coe_nnnorm, Real.toNNReal_of_nonneg, norm_of_nonneg, hx, coe_mk]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
x y : ℝ
hy : 0 ≤ y
⊢ ‖x‖₊ * toNNReal y = ‖x * y‖₊
[PROOFSTEP]
rw [mul_comm, mul_comm x, toNNReal_mul_nnnorm x hy]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
x : ℝ≥0
⊢ ‖↑x‖ = ↑x
[PROOFSTEP]
rw [Real.norm_eq_abs, x.abs_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedAddCommGroup α
a : α
⊢ ‖‖a‖‖₊ = ‖a‖₊
[PROOFSTEP]
rw [Real.nnnorm_of_nonneg (norm_nonneg a)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
inst✝ : SeminormedAddCommGroup α
a : α
⊢ { val := ‖a‖, property := (_ : 0 ≤ ‖a‖) } = ‖a‖₊
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β✝ : Type u_2
γ : Type u_3
ι : Type u_4
inst✝² : Nonempty α
inst✝¹ : SemilatticeSup α
β : Type u_5
inst✝ : SeminormedAddCommGroup β
f : α → β
b : β
⊢ (∀ (ib : ℝ), 0 < ib → ∃ ia, True ∧ ∀ (x : α), x ∈ Set.Ici ia → f x ∈ ball b ib) ↔
∀ (ε : ℝ), 0 < ε → ∃ N, ∀ (n : α), N ≤ n → ‖f n - b‖ < ε
[PROOFSTEP]
simp [dist_eq_norm]
[GOAL]
α : Type u_1
β✝ : Type u_2
γ : Type u_3
ι : Type u_4
inst✝³ : Nonempty α
inst✝² : SemilatticeSup α
inst✝¹ : NoMaxOrder α
β : Type u_5
inst✝ : SeminormedAddCommGroup β
f : α → β
b : β
⊢ (∀ (ib : ℝ), 0 < ib → ∃ ia, True ∧ ∀ (x : α), x ∈ Set.Ioi ia → f x ∈ ball b ib) ↔
∀ (ε : ℝ), 0 < ε → ∃ N, ∀ (n : α), N < n → ‖f n - b‖ < ε
[PROOFSTEP]
simp [dist_eq_norm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
src✝¹ : NormedAddCommGroup ℤ := normedAddCommGroup
src✝ : Ring ℤ := instRingInt
m n : ℤ
⊢ ‖m * n‖ = ‖m‖ * ‖n‖
[PROOFSTEP]
simp only [norm, Int.cast_mul, abs_mul]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
⊢ ‖1‖ = 1
[PROOFSTEP]
simp [← Int.norm_cast_real]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
src✝¹ : NormedAddCommGroup ℚ := normedAddCommGroup
src✝ : Field ℚ := field
r₁ r₂ : ℚ
⊢ ‖r₁ * r₂‖ = ‖r₁‖ * ‖r₂‖
[PROOFSTEP]
simp only [norm, Rat.cast_mul, abs_mul]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
r₁ r₂ : ℝ
h₀ : 0 ≤ r₁
hr : r₁ < r₂
q : ℚ
h : r₁ < ↑q ∧ ↑q < r₂
⊢ r₁ < ‖q‖ ∧ ‖q‖ < r₂
[PROOFSTEP]
rwa [← Rat.norm_cast_real, Real.norm_eq_abs, abs_of_pos (h₀.trans_lt h.1)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
F : Type u_5
R : Type u_6
S : Type u_7
inst✝² : NonUnitalRing R
inst✝¹ : NonUnitalSeminormedRing S
inst✝ : NonUnitalRingHomClass F R S
f : F
src✝¹ : SeminormedAddCommGroup R := SeminormedAddCommGroup.induced R S f
src✝ : NonUnitalRing R := inst✝²
x y : R
⊢ ‖x * y‖ ≤ ‖x‖ * ‖y‖
[PROOFSTEP]
show ‖f (x * y)‖ ≤ ‖f x‖ * ‖f y‖
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
F : Type u_5
R : Type u_6
S : Type u_7
inst✝² : NonUnitalRing R
inst✝¹ : NonUnitalSeminormedRing S
inst✝ : NonUnitalRingHomClass F R S
f : F
src✝¹ : SeminormedAddCommGroup R := SeminormedAddCommGroup.induced R S f
src✝ : NonUnitalRing R := inst✝²
x y : R
⊢ ‖↑f (x * y)‖ ≤ ‖↑f x‖ * ‖↑f y‖
[PROOFSTEP]
exact (map_mul f x y).symm ▸ norm_mul_le (f x) (f y)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
F : Type u_5
R : Type u_6
S : Type u_7
inst✝² : DivisionRing R
inst✝¹ : NormedDivisionRing S
inst✝ : NonUnitalRingHomClass F R S
f : F
hf : Function.Injective ↑f
src✝¹ : NormedAddCommGroup R := NormedAddCommGroup.induced R S f hf
src✝ : DivisionRing R := inst✝²
x y : R
⊢ ‖x * y‖ = ‖x‖ * ‖y‖
[PROOFSTEP]
show ‖f (x * y)‖ = ‖f x‖ * ‖f y‖
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
F : Type u_5
R : Type u_6
S : Type u_7
inst✝² : DivisionRing R
inst✝¹ : NormedDivisionRing S
inst✝ : NonUnitalRingHomClass F R S
f : F
hf : Function.Injective ↑f
src✝¹ : NormedAddCommGroup R := NormedAddCommGroup.induced R S f hf
src✝ : DivisionRing R := inst✝²
x y : R
⊢ ‖↑f (x * y)‖ = ‖↑f x‖ * ‖↑f y‖
[PROOFSTEP]
exact (map_mul f x y).symm ▸ norm_mul (f x) (f y)
|
function [structOut, errors] = checkStructureDefaults(params, defaults)
% Check params input parameter values against defaults put in structOut
errors = cell(0);
fNames = fieldnames(defaults);
structOut = defaults;
for k = 1:length(fNames)
try
nextValue = getStructureParameters(params, fNames{k}, ...
defaults.(fNames{k}).value);
validateattributes(nextValue, defaults.(fNames{k}).classes, ...
defaults.(fNames{k}).attributes);
structOut.(fNames{k}).value = nextValue;
catch mex
errors{end + 1} = [fNames{k} ' invalid: ' mex.message]; %#ok<AGROW>
end
end |
= = = Minor Leagues = = =
|
# using Test, Distances, TransferEntropy
# d1 = rand(50)
# d2 = rand(50)
# r = rand(50)
# tol = 1e-8
# # Directly from time series
# @test standard_te(d1, r, estimator = :tetogrid) >= 0 - tol
# @test standard_te(d2, r, estimator = :tetogrid) >= 0 - tol
# @test standard_te(d1, r, estimator = :tefreq) >= 0 - tol
# @test standard_te(d2, r, estimator = :tefreq) >= 0 - tol
# @test standard_te(d1, r, estimator = :tekNN) >= 0 - tol
# @test standard_te(d2, r, estimator = :tekraskov) >= 0 - tol
# #########################
# # Surrogates
# #########################
# @test standard_te(d1, r, which_is_surr = :driver) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :response) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :none) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :both, surr_func = aaft) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :both, surr_func = iaaft) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :both, surr_func = randomphases) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :both, surr_func = randomamplitudes) >= 0 - tol
# @test standard_te(d1, r, which_is_surr = :both, surr_func = randomshuffle) >= 0 - tol
# #########################
# # Forward prediction lags
# #########################
# @test standard_te(d1, r, ν = 1) >= 0 - tol
# @test standard_te(d1, r, ν = 5) >= 0 - tol
# #########################
# # Embedding lags and dimensions
# #########################
# d1 = rand(100)
# d2 = rand(100)
# r = rand(100)
# @test standard_te(d1, r, ν = 1, τ = 2, dim = 4) >= 0 - tol
# @test standard_te(d1, r, ν = 3, τ = 3, dim = 5) >= 0 - tol
# #########################
# # Tuning the bins
# #########################
# @test standard_te(d1, r, n_ϵ = 10) >= 0 - tol
# @test standard_te(d1, r, n_ϵ = 10, max_numbins = 5, min_numbins = 2) >= 0 - tol
##########################
# Regular TE
##########################
using Test, TransferEntropy, CausalityToolsBase
k, l, m = 1, 1, 1
τ = 1
η = 1
n_subdivs = 3
te_res1 = te_reg(rand(1000), rand(1000), k, l, m, η = η, τ = τ, n_subdivs = n_subdivs)
te_res2 = te_reg(rand(1000), rand(1000), k + 1, l, m + 1, η = η, τ = τ + 1, n_subdivs = n_subdivs)
@test te_res1 isa Vector{<:Real}
@test te_res2 isa Vector{<:Real}
@test length(te_res1) == 4
@test te_reg(rand(100), rand(100), 1, 1, 1, RectangularBinning(4)) |> typeof <: Real
@test te_reg(rand(100), rand(100), 1, 1, 1, [RectangularBinning(4), RectangularBinning(0.2)]) isa Vector{<:Real}
##########################
# Conditional TE
##########################
# Default discretization schemes
using Test
k, l, m, n = 1, 1, 1, 1
τ = 1
η = 1
n_subdivs = 3
te_res1 = te_cond(rand(1000), rand(1000), rand(1000), k, l, m, n, η = η, τ = τ, n_subdivs = n_subdivs)
te_res2 = te_cond(rand(1000), rand(1000), rand(1000), k + 1, l, m + 1, n, η = η, τ = τ + 1, n_subdivs = n_subdivs)
@test te_res1 isa Vector{<:Real}
@test te_res2 isa Vector{<:Real}
@test length(te_res1) == 4
# User-provided binning schemes
using Test
x, y, z = rand(100), rand(100), rand(100)
@test te_cond(x, y, z, 1, 1, 1, 1, RectangularBinning(4)) |> typeof <: Real
@test te_cond(x, y, z, 1, 1, 1, 1, [RectangularBinning(4), RectangularBinning(0.2)]) isa Vector{<:Real}
@test te_cond(x, y, z, 1, 1, 1, 1, RectangularBinning(3), estimator = TransferOperatorGrid()) |> typeof <: Real |
Formal statement is: lemma exp_two_pi_i' [simp]: "exp (\<i> * (of_real pi * 2)) = 1" Informal statement is: $e^{2\pi i} = 1$. |
theory mapp
imports Function_Model_Base
begin
context Function_Model begin
subsection \<open>Application relation on model Functions\<close>
lemma mapp_iff :
assumes "f : mFunc"
shows "mapp f b c \<longleftrightarrow> app (snd f) b c"
unfolding mapp_def
using assms by auto
lemma mapp_iff_pair :
assumes "<func, f'> : mFunc"
shows "mapp <func, f'> b c \<longleftrightarrow> app f' b c"
unfolding mapp_iff[OF assms] mfunc_snd_eq[OF assms] ..
lemmas mappI = iffD2[OF mapp_iff]
and mappI_pair = iffD2[OF mapp_iff_pair]
lemmas mappD = iffD1[OF mapp_iff]
and mappD_pair = iffD1[OF mapp_iff_pair]
lemma mappE :
assumes f:"f : mFunc" and bc:"mapp f b c"
obtains f' where
"f' : Function" "f = <func, f'>" "app f' b c"
proof (rule mfuncE1[OF f])
thm mfunc_pair_snd_func mappD assms
fix f' assume f_eq : "f = <func, f'>"
moreover hence f':"f' : Function"
using mfunc_pair_snd_func f by auto
have app: "app f' b c"
using mappD_pair f_eq f bc by auto
from f' f_eq app show ?thesis ..
qed
lemma mapp_m :
assumes f:"f : mFunc" and bc:"mapp f b c"
shows "b : M \<and> c : M"
proof (rule mappE[OF f bc])
fix f' assume
f' : "f' : Function" and f_eq : "f = <func, f'>"
then obtain j where
j : "j : Ord" and "f' \<in> (Tier j \<ominus> func) \<midarrow>p\<rightarrow> (Tier j \<ominus> func)"
using mE_mfunc_pair mfunc_m[OF f] by metis
hence "dom f' \<subseteq> Tier j \<ominus> func" "ran f' \<subseteq> Tier j \<ominus> func"
using fspaceD[OF tier_ex_set tier_ex_set, OF j j] by auto
moreover have "app f' b c"
using mappD_pair f bc
unfolding f_eq by auto
ultimately have "b \<in> Tier j \<ominus> func" "c \<in> Tier j \<ominus> func"
using domI[OF f'] ranI[OF f'] by auto
thus "b : M \<and> c : M"
using mI[OF j exsetD1[OF tier_set[OF j]]]
by auto
qed
text \<open>Model application is functional:\<close>
lemma mapp_functional :
assumes f : "f : mFunc"
and bc : "mapp f b c"
and bd : "mapp f b d"
shows "c = d"
using bc bd app_eqI[OF mfunc_snd_func[OF f]]
unfolding mapp_iff[OF f] by auto
lemma mapp_functional_ax :
"m\<forall>f : mFunc. m\<forall>x y z. mapp f x y \<and> mapp f x z \<longrightarrow> y = z"
unfolding mtall_def mall_def tall_def
using mapp_functional by auto
text \<open>Model functions are extensional:\<close>
lemma mfunc_ext :
assumes f : "f : mFunc" and g : "g : mFunc"
and bc:"\<And>b c. mapp f b c \<longleftrightarrow> mapp g b c"
shows "f = g"
proof (rule mfuncE1[OF f], rule mfuncE1[OF g])
fix f' g' assume
f' : "f = <func, f'>" and g' : "g = <func, g'>"
have
"snd f = snd g"
using bc fun_eqI[OF mfunc_snd_func mfunc_snd_func, OF f g]
unfolding mapp_def by auto
thus "f = g"
using mfunc_snd_eq f g unfolding f' g'
by metis
qed
lemma mfunc_ext_ax :
"m\<forall>f : mFunc. m\<forall>g : mFunc.
(m\<forall>x y. mapp f x y = mapp g x y) \<longrightarrow> f = g"
unfolding mtall_def mall_def tall_def
using mfunc_ext mapp_m by meson
end
end |
module Leap
export
isLeap : Int -> Bool
isLeap year = ?isLeap_rhs
export
version : String
version = "1.0.0"
|
The university reopened their Level I Trauma Center on August 1 , 2009 which had been closed for eleven months after the hurricane and , as of September 2009 , had reopened 370 hospital beds .
|
Formal statement is: lemma measurable_vimage_algebra2: assumes g: "g \<in> space N \<rightarrow> X" and f: "(\<lambda>x. f (g x)) \<in> measurable N M" shows "g \<in> measurable N (vimage_algebra X f M)" Informal statement is: If $g$ is a function from a measurable space $N$ to a measurable space $X$ and $f$ is a function from $X$ to a measurable space $M$, then $g$ is measurable with respect to the algebra generated by $f$. |
Classic is a term that gets thrown around too much sometimes. Whether it’s cars, movies, or songs, there are a certain number of oldies that get labeled with such a prestigious rank. But what makes it a classic? While many may debate this, what we can agree on is one main reason: a seemingly endless majesty that stands the test of time.
Today we’re talking about one special classic Arabic movie, Chitchat on the Nile (Tharthara Fawq Al-Nile). Starring Emad Hamdy, Mirvat Amin, Ahmed Ramzy, Adel Adham, Soheir Ramzy, and more immensely talented stars, it was first released on the 15th of November, 1971, but if you watch it today, 47 years later, it is still as relevant as it was years ago.
First released as a novel written by the genius author, Naguib Mahfouz, scripted into a movie by Mahmoud El-Laithy, and directed by Hussein Kamal, it tells the story of how frustrated, depressed, or utterly apathetic Egyptians were after the huge setback of the 1967 War. The story is full of imagery and symbolism, depicting history through the lives of seemingly irrelevant people in the Egyptian population.
ADEF DECA, a cultural hub that aims to create a space for freedom of expression through technology, and strives to build an intellectual environment of mutual respect, is hosting a special screening of the movie, Chitchat on the Nile, tonight. The movie starts at 7 pm, and will be followed by an open discussion where viewers can share their opinions and feelings of the film. It’s an excellent opportunity for fans of this movie as well as for people who’ve never seen it before.
You can find ADEF DECA on 143 Street 8, Mokattam. Check out their Facebook Page for more details, or call 01121147008 for more information. |
From mathcomp Require Import ssreflect ssrfun ssrbool eqtype ssrnat seq.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
(** * Subtyping *)
(** A subtype extends another type by adding
a property. The new type has a richer theory.
The new type should inherit all the original
theory. *)
(* Let's define the type of homogeneous tuples *)
(** ** Standard [sig] type *)
Print sig.
(**
Inductive sig (A : Type) (P : A -> Prop) : Type :=
| exist : forall x:A, P x -> sig P.
Notation "{ x : A | P }" :=
(sig (A:=A) (fun x => P)) : type_scope.
*)
(** [sig] type is almost the same as the [ex] type encoding
existential quantifier *)
Print ex.
(**
Inductive ex (A : Type) (P : A -> Prop) : Prop :=
| ex_intro : forall x:A, P x -> ex P.
*)
(** simple definition relying on the existing [pred] function *)
Definition predecessor (x : {n | 0 < n}) : nat :=
let: (exist m prf_m_gt_0) := x in
m.-1.
(** The _convoy pattern_ : *)
Definition pred_dep (x : {n | 0 < n}) : nat :=
let: (exist n prf_n_gt_0) := x in
match n as n return (0 < n -> nat) with
| 0 =>
fun prf : 0 < 0 => False_rect _ (notF prf)
| n'.+1 =>
fun _ => n'
end prf_n_gt_0.
(** ** Some issues *)
(** Proof irrelevance *)
Lemma eq_sig T (P : T -> Prop) x (px : P x) (px' : P x) :
(exist P x px) = (exist P x px').
Proof.
Fail by done.
(** we are stuck: we need proof irrelevance here *)
Abort.
Axiom proof_irrelevance :
forall (P : Prop) (pf1 pf2 : P), pf1 = pf2.
Lemma eq_sig T (P : T -> Prop) x (px : P x) (px' : P x) :
(exist P x px) = (exist P x px').
Proof.
rewrite [px]proof_irrelevance.
done.
Qed.
(** Decidable propositions _are_ proof irrelevant *)
Check eq_irrelevance.
(**
eq_irrelevance
: forall (T : eqType) (x y : T)
(e1 e2 : x = y),
e1 = e2
This special case of proof irrelevance is called
_Unicity of Identity Proofs_ principle (UIP).
*)
(** [sval] projection is injective for decidable propositions : *)
Lemma eq_n_gt0 (m n : {n | 0 < n}) :
sval m = sval n ->
m = n.
Proof.
case: m=> [m pfm]; case: n=> [n pfn] /=.
Fail move=> ->. (* because of the dependencies *)
move=> eq_mn; move: eq_mn pfm pfn.
move=> -> *.
congr exist.
exact: eq_irrelevance.
Qed.
(** We cannot define [sval] (aka [proj1_sig]) as a coercion *)
(**
This is because we cannot specify the target
class of this tentative coercion:
Coercion proj1_sig : sig >-> ???.
*)
(** ** Mathcomp's [subType]
(defined in [eqtype] library) *)
Print subType.
(**
Structure subType (T : Type) (P : pred T) : Type :=
SubType {
(* new type *)
sub_sort : Type;
(* projection, like proj_sig1 *)
val : sub_sort -> T;
(* constructor *)
Sub : forall x : T, P x -> sub_sort;
(* elimination principle *)
_ : forall K : sub_sort -> Type,
(forall (x : T) (Px : P x), K (Sub x Px)) ->
forall u : sub_sort, K u;
(* projection is injective *)
_ : forall (x : T) (Px : P x),
val (Sub x Px) = x
}.
*)
(** An example of a [subType] *)
Section PosSubtype.
Inductive pos := Positive m of 0 < m.
Coercion nat_of_pos p :=
let: Positive n _ := p in n.
Variables p1 p2 : pos.
(** This is something not possible with
{n | 0 < n} type *)
Check p1.-1 + p2.
Set Printing Coercions.
Check p1.-1 + p2.
Unset Printing Coercions.
(** Register [pos] as a [subType] *)
Canonical pos_subType :=
[subType for nat_of_pos].
(** Given that propositions are expressed as
booleans, we can use the fact that proofs of
these properties are irrelevant. *)
(** Hence we can build subtypes and prove that
the projection to the supertype is injective,
which lets us inherit all of the theory of
the supertype. *)
(** E.g. we can make [pos] inherit [eqType]
from [nat] type with a bit of boilerplate *)
Definition pos_eqMixin :=
Eval hnf in [eqMixin of pos by <:].
Canonical pos_eqType :=
Eval hnf in EqType pos pos_eqMixin.
Check p1 == p2.
End PosSubtype.
(** * Some standard [subType]s *)
(** ** [ordinal]: type of finite ordinals *)
(**
[ordinal n] = {0, 1, ... , n-1}
Inductive ordinal : predArgType :=
| Ordinal m of m < n.
Notation "''I_' n" := (ordinal n)
*)
From mathcomp Require Import fintype.
Definition i1 : 'I_2 :=
Ordinal (erefl : 0 < 2).
Definition i2 : 'I_2 :=
Ordinal (erefl : 1 < 2).
Compute i1 == i2.
(** Note: [ordinal] is also a [finType] *)
(** ** [tuple] type *)
From mathcomp Require Import tuple.
Definition t : 3.-tuple nat :=
[tuple 5; 6; 7].
(** It's not possible to take an out-of-bounds
element of a tuple, so there is no need
of a default element as for [nth] on [seq]s *)
About tnth.
Compute [tnth t 0].
Compute [tnth t 1].
Compute [tnth t 2].
Fail Compute [tnth t 3].
About thead.
(**
thead :
forall (n : nat) (T : Type),
(n.+1).-tuple T -> T
*)
Compute thead t. (** = 5 *)
(** [thead] of empty tuple
does not even typecheck *)
Fail Check thead [tuple].
(**
Structure tuple_of (n : nat) (T : Type) : Type :=
Tuple {
(* [:>] means "is a coercion" *)
tval :> seq T;
_ : size tval == n;
}.
*)
Section TupleExample.
Variables (m n : nat) (T : Type).
Variable t1 : m.-tuple T.
Variable t2 : n.-tuple T.
Check [tuple of t1 ++ t2] : (m + n).-tuple T.
Fail Check [tuple of t1 ++ t2] : (n + m).-tuple T.
End TupleExample.
Example seq_on_tuple (n : nat) (t : n .-tuple nat) :
size (rev [seq 2 * x | x <- rev t]) = size t.
Proof.
Set Printing Coercions.
by rewrite map_rev revK size_map.
Unset Printing Coercions.
Restart.
rewrite size_tuple. (** this should work *)
Check size_tuple.
(**
size_tuple : forall (n : nat) (T : Type)
(t : n.-tuple T), size t = n
*)
(** Why does this not fail? *)
(** rev [seq 2 * x | x <- rev t] is a list,
not a tuple *)
rewrite size_tuple.
Abort.
Print Canonical Projections.
(**
...
map <- tval ( map_tuple )
...
rev <- tval ( rev_tuple )
...
*)
(**
This works because Coq is instrumented
to automatically promote sequences to tuples
using the mechanism of Canonical Structures.
Lemma rev_tupleP n A (t : n .-tuple A) :
size (rev t) == n.
Proof. by rewrite size_rev size_tuple. Qed.
Canonical rev_tuple n A (t : n .-tuple A) :=
Tuple (rev_tupleP t).
Lemma map_tupleP n A B (f:A -> B) (t: n.-tuple A) :
size (map f t) == n.
Proof. by rewrite size_map size_tuple. Qed.
Canonical
map_tuple n A B (f:A -> B) (t: n.-tuple A) :=
Tuple (map_tupleP f t).
*)
(** Exercise: show in detail how
[rewrite size_tuple] from above works *)
(**
Since tuples are a subtype of lists, we
can reuse the theory of lists over equality types.
*)
Example test_eqtype (x y : 3.-tuple nat) :
x == y -> True.
Proof.
move=> /eqP.
Abort.
(** * Finite types *)
From mathcomp Require Import choice fintype finfun.
(**
Fig. 3 of "Packaging Mathematical Structures"
by F. Garillot, G. Gonthier, A. Mahboubi,
L. Rideau(2009)
*)
(** A [finType] structure is composed of
a list of elements of an [eqType] structure,
each element of the type being uniquely
represented in the list:
(* simplified definition of [finType] *)
Structure finType : Type :=
FinType {
sort :> countType;
enum : seq sort;
enumP : forall x,
count (pred1 x) enum = 1;
}.
*)
(**
Finite sets are then sets taken in a [finType]
domain. In the library, the basic operations
are provided.
For example, given [A] a finite set,
[card A] (or #|A|) represents the cardinality
of A. All these operations come with their
basic properties. For example, we have:
Lemma cardUI : ∀ (d: finType) (A B: {pred T}),
#|A ∪ B| + #|A ∩ B| = #|A| + #|B|.
Lemma card_image :
∀ (T T': finType) (f : T -> T')
injective f -> forall A : {pred T},
#|image f A| = #|A|.
*)
(** ** How [finType] is actually organized *)
Print Finite.type.
(**
Structure type : Type :=
Pack {
sort : Type;
_ : Finite.class_of sort
}.
*)
(** [finType] extends [choiceType] with a mixin *)
Print Finite.class_of.
(**
Structure class_of (T : Type) : Type :=
Class {
base : Choice.class_of T;
mixin : Finite.mixin_of (EqType T base)
}
*)
Print Finite.mixin_of.
(** we mix in countable and two specific fields:
an enumeration and an axiom
Structure mixin_of (T : eqType) : Type :=
Mixin {
mixin_base : Countable.mixin_of T;
mixin_enum : seq T;
_ : Finite.axiom mixin_enum
}.
*)
Print Finite.axiom.
(**
Finite.axiom =
fun (T : eqType) (e : seq T) =>
forall x : T, count_mem x e = 1
where
Notation count_mem x := (count (pred1 x)).
*)
Eval cbv in count_mem 5 [:: 1; 5; 2; 5; 3; 5; 4].
Section FinTypeExample.
Variable T : finType.
(** Cardinality of a finite type *)
Check #| T |.
(** "bounded" quantification *)
Check [forall x : T, x == x] && false.
Fail Check (forall x : T, x == x) && false.
(** We recover classical reasoning for
the bounded quantifiers: *)
Check negb_forall:
forall (T : finType) (P : pred T),
~~ [forall x, P x] = [exists x, ~~ P x].
Check negb_exists:
forall (T : finType) (P : pred T),
~~ [exists x, P x] = [forall x, ~~ P x].
(** [negb_forall] does not hold
in intuitionistic setting *)
End FinTypeExample.
(** * Examples of interfaces *)
From mathcomp Require Import finset.
Section Interfaces.
Variable chT : choiceType.
Check (@sigW chT).
Check [eqType of chT].
Variable coT : countType.
Check [countType of nat].
Check [choiceType of coT].
Check [choiceType of nat * nat].
Check [choiceType of seq coT].
Variable fT : finType.
Check [finType of bool].
Check [finType of 'I_10].
Check [finType of {ffun 'I_10 -> fT}].
Check [finType of bool * bool].
Check [finType of 3.-tuple bool].
Fail Check [finType of 3.-tuple nat].
Check {set 'I_4} : Type.
Check forall a : {set 'I_4},
(a == set0) || (1 < #|a| < 4).
Print set_type.
Check {ffun 'I_4 -> bool} : Type.
Print finfun_eqType.
Check [eqType of #| 'I_4 |.-tuple bool].
Check [finType of #| 'I_4 |.-tuple bool].
Check {ffun 'I_4 * 'I_6 -> nat} : Type.
Check [eqType of {ffun 'I_4 * 'I_6 -> nat}] : Type.
End Interfaces.
(** * Bonus *)
(* The following requires
the coq-mathcomp-algebra package
from opam package manager *)
From mathcomp Require Import all_algebra.
Open Scope ring_scope.
Print matrix.
Section Rings.
Variable R : ringType.
Check forall x : R, x * 1 == x.
(** Matrices of size 4x4 over an arbitrary ring [R] *)
Check forall m : 'M[R]_(4,4), m == m * m.
End Rings.
|
! RUN: %f18 -funparse-with-symbols %s 2>&1 | FileCheck %s
! CHECK-NOT: exit from DO CONCURRENT construct
subroutine do_concurrent_test1(n)
implicit none
integer :: j,k,l,n
mytest: if (n>0) then
mydoc: do concurrent(j=1:n)
mydo: do k=1,n
if (k==5) exit
if (k==6) exit mydo
end do mydo
do concurrent(l=1:n)
if (l==5) print *, "test"
end do
end do mydoc
do k=1,n
if (k==5) exit mytest
end do
end if mytest
end subroutine do_concurrent_test1
subroutine do_concurrent_test2(n)
implicit none
integer :: i1,i2,i3,i4,i5,i6,n
mytest2: if (n>0) then
nc1: do concurrent(i1=1:n)
nc2: do i2=1,n
nc3: do concurrent(i3=1:n)
nc4: do i4=1,n
if (i3==4) exit nc4
nc5: do concurrent(i5=1:n)
nc6: do i6=1,n
if (i6==10) print *, "hello"
end do nc6
end do nc5
end do nc4
end do nc3
end do nc2
end do nc1
end if mytest2
end subroutine do_concurrent_test2
|
The Land Forces represent the most important component of the Romanian Armed Forces and they are <unk> for execution of various military actions , with terrestrial or <unk> character , in any zone or direction .
|
The Catechism classifies scandal under the fifth commandment and defines it as " an attitude or behavior which leads another to do evil " . In the Gospel of Matthew , Jesus stated , " Whoever causes one of these little ones who believe in me to sin , it would be better for him to have a great millstone fastened round his neck and to be drowned in the depth of the sea . " The Church considers it a serious crime to cause another 's faith , hope and love to be weakened , especially if it is done to young people and the perpetrator is a person of authority such as a parent , teacher or priest .
|
# Description of the problem and solution
The task1 was to predict a person's age from the brain image data: a standard regression problem. The original dataset included 832 features as well as a lot of NaN values and a few outliers. A good preprocessing stage was necessary in order to have a well defined dataset that could be used in our regression model. First step was the imputation of the dataset. Filling each NaN value with the median of each feature column. The use of the median instead of other value (e.g. mean) is justified since a lot of outliers are included in the dataset. (e.g. 1 2 _ 5 20 median: 3 mean: 7). Next step was the feature extraction. By using the "autofeat" library (paper: https://arxiv.org/pdf/1901.07329.pdf), we extracted the 21 most important features. The way the algorithm works is going through a loop of correlation of features with target, select promising features, train Lasso regression model with promising features, filter the good features keeping the ones with non-zero regression weights. We updated the datasets by keeping only the 21 most important features. Finally, we used these updated datasets for the training of our final regression model. A lot of outlier detection techniques were used but we decided to keep the outliers and use a tree-based method for our final model. Tree-methods have been proved to be robust to outliers and we avoid risking excluded important features / points from the dataset. The "ExtraTreesRegressor" model from the "sklearn" package was used and fine tuned based on the R2 score performance in our validation set. The final model had a score >0.6 in the validation sets using cross-validation and in the submission leaderboard of ETH scored 0.6812 while the hard baseline was set to 0.65 by the Advanced Machine Learning Task1 team.
# Include all the necessary packages
```python
!pip install autofeat
from sklearn.metrics import r2_score
try:
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from autofeat import FeatureSelector
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesRegressor
```
Requirement already satisfied: autofeat in /usr/local/lib/python3.6/dist-packages (0.2.5)
Requirement already satisfied: pint in /usr/local/lib/python3.6/dist-packages (from autofeat) (0.9)
Requirement already satisfied: pandas>=0.24.0 in /usr/local/lib/python3.6/dist-packages (from autofeat) (0.25.2)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from autofeat) (0.21.3)
Requirement already satisfied: sympy in /usr/local/lib/python3.6/dist-packages (from autofeat) (1.1.1)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from autofeat) (0.14.0)
Requirement already satisfied: numpy in /tensorflow-2.0.0/python3.6 (from autofeat) (1.17.3)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from autofeat) (0.16.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24.0->autofeat) (2018.9)
Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24.0->autofeat) (2.6.1)
Requirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->autofeat) (1.3.1)
Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.6/dist-packages (from sympy->autofeat) (1.1.0)
Requirement already satisfied: six>=1.5 in /tensorflow-2.0.0/python3.6 (from python-dateutil>=2.6.1->pandas>=0.24.0->autofeat) (1.12.0)
# Load the data from the CSV files
```python
column_names_x = ['id']
for i in range(832):
column_names_x.append('x'+str(i))
raw_dataset_x = pd.read_csv('/content/X_train.csv', names=column_names_x,
na_values = "?", comment='\t',
sep=",", skipinitialspace=True, skiprows=True)
dataset_x = raw_dataset_x.copy()
dataset_x.tail()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>id</th>
<th>x0</th>
<th>x1</th>
<th>x2</th>
<th>x3</th>
<th>x4</th>
<th>x5</th>
<th>x6</th>
<th>x7</th>
<th>x8</th>
<th>x9</th>
<th>x10</th>
<th>x11</th>
<th>x12</th>
<th>x13</th>
<th>x14</th>
<th>x15</th>
<th>x16</th>
<th>x17</th>
<th>x18</th>
<th>x19</th>
<th>x20</th>
<th>x21</th>
<th>x22</th>
<th>x23</th>
<th>x24</th>
<th>x25</th>
<th>x26</th>
<th>x27</th>
<th>x28</th>
<th>x29</th>
<th>x30</th>
<th>x31</th>
<th>x32</th>
<th>x33</th>
<th>x34</th>
<th>x35</th>
<th>x36</th>
<th>x37</th>
<th>x38</th>
<th>...</th>
<th>x792</th>
<th>x793</th>
<th>x794</th>
<th>x795</th>
<th>x796</th>
<th>x797</th>
<th>x798</th>
<th>x799</th>
<th>x800</th>
<th>x801</th>
<th>x802</th>
<th>x803</th>
<th>x804</th>
<th>x805</th>
<th>x806</th>
<th>x807</th>
<th>x808</th>
<th>x809</th>
<th>x810</th>
<th>x811</th>
<th>x812</th>
<th>x813</th>
<th>x814</th>
<th>x815</th>
<th>x816</th>
<th>x817</th>
<th>x818</th>
<th>x819</th>
<th>x820</th>
<th>x821</th>
<th>x822</th>
<th>x823</th>
<th>x824</th>
<th>x825</th>
<th>x826</th>
<th>x827</th>
<th>x828</th>
<th>x829</th>
<th>x830</th>
<th>x831</th>
</tr>
</thead>
<tbody>
<tr>
<th>1207</th>
<td>1207.0</td>
<td>NaN</td>
<td>5395.719279</td>
<td>95668.548818</td>
<td>1125.414599</td>
<td>10341.757613</td>
<td>10.547165</td>
<td>108249.187400</td>
<td>1.073291e+06</td>
<td>108672.758838</td>
<td>2.365013</td>
<td>1.042126e+06</td>
<td>21031.538201</td>
<td>1059.704248</td>
<td>103860.409465</td>
<td>107.805931</td>
<td>100.350289</td>
<td>14040.029112</td>
<td>NaN</td>
<td>90414.095308</td>
<td>10.108470</td>
<td>3.215609</td>
<td>102144.219482</td>
<td>10.087478</td>
<td>3825.046407</td>
<td>89.538601</td>
<td>4550.239929</td>
<td>3542.470382</td>
<td>206892.186644</td>
<td>1.011027e+06</td>
<td>11432.036864</td>
<td>10.957698</td>
<td>976.093967</td>
<td>84.689946</td>
<td>2500.095879</td>
<td>1068.544855</td>
<td>8.868114</td>
<td>3.695648</td>
<td>10381.626637</td>
<td>8.641068e+05</td>
<td>...</td>
<td>6.934015</td>
<td>2.05604</td>
<td>9.362337</td>
<td>908.498493</td>
<td>94.234452</td>
<td>10.319140</td>
<td>4155.345879</td>
<td>8.498432e+16</td>
<td>10977.013897</td>
<td>93.766125</td>
<td>8.120353</td>
<td>10595.018829</td>
<td>9.594486e+05</td>
<td>10.485803</td>
<td>10.601033</td>
<td>101328.492269</td>
<td>2.417968</td>
<td>48.359296</td>
<td>9486.144826</td>
<td>2.185578</td>
<td>966.094287</td>
<td>9.039254e+05</td>
<td>10.510759</td>
<td>1049.753163</td>
<td>10856.049561</td>
<td>9109.098919</td>
<td>10320.748617</td>
<td>10892.743222</td>
<td>106528.008864</td>
<td>294.801642</td>
<td>10.519831</td>
<td>10.234396</td>
<td>987.456317</td>
<td>978.661701</td>
<td>109520.061346</td>
<td>102914.439553</td>
<td>8144.701025</td>
<td>10.442410</td>
<td>102380.867791</td>
<td>2.236101</td>
</tr>
<tr>
<th>1208</th>
<td>1208.0</td>
<td>93669.580198</td>
<td>3564.454295</td>
<td>96937.341346</td>
<td>1040.828378</td>
<td>8415.112792</td>
<td>10.721800</td>
<td>100628.318687</td>
<td>1.027671e+06</td>
<td>102269.472299</td>
<td>2.398768</td>
<td>1.015631e+06</td>
<td>10073.888339</td>
<td>1005.671797</td>
<td>106924.048275</td>
<td>98.512805</td>
<td>104.366843</td>
<td>11719.094168</td>
<td>115055.546469</td>
<td>95750.178334</td>
<td>NaN</td>
<td>3.446025</td>
<td>100455.697718</td>
<td>10.303138</td>
<td>3502.012112</td>
<td>84.167435</td>
<td>3462.164372</td>
<td>3411.771480</td>
<td>206892.234016</td>
<td>1.082826e+06</td>
<td>9957.026155</td>
<td>11.155088</td>
<td>1127.806594</td>
<td>107.745872</td>
<td>2298.057201</td>
<td>1031.637383</td>
<td>8.778034</td>
<td>3.477553</td>
<td>10627.490011</td>
<td>1.007256e+06</td>
<td>...</td>
<td>10.732723</td>
<td>2.14034</td>
<td>10.930583</td>
<td>956.306694</td>
<td>117.419062</td>
<td>10.361670</td>
<td>3436.194754</td>
<td>6.249342e+16</td>
<td>9866.685225</td>
<td>104.420975</td>
<td>9.171168</td>
<td>10762.025617</td>
<td>9.205254e+05</td>
<td>10.627956</td>
<td>10.101622</td>
<td>105133.566646</td>
<td>2.740250</td>
<td>55.449159</td>
<td>10959.655054</td>
<td>2.160671</td>
<td>1111.015635</td>
<td>1.118527e+06</td>
<td>10.342601</td>
<td>1030.540259</td>
<td>10097.006670</td>
<td>9039.074820</td>
<td>10895.768148</td>
<td>10315.088493</td>
<td>106898.299599</td>
<td>195.245438</td>
<td>10.618901</td>
<td>10.456550</td>
<td>1112.699713</td>
<td>NaN</td>
<td>NaN</td>
<td>106685.647900</td>
<td>7428.338174</td>
<td>10.405107</td>
<td>107051.806312</td>
<td>2.297040</td>
</tr>
<tr>
<th>1209</th>
<td>1209.0</td>
<td>94119.048262</td>
<td>NaN</td>
<td>88398.586879</td>
<td>1298.024079</td>
<td>8905.109893</td>
<td>10.294486</td>
<td>103347.543915</td>
<td>1.087571e+06</td>
<td>106082.959731</td>
<td>2.345450</td>
<td>1.074165e+06</td>
<td>49595.639929</td>
<td>1057.051949</td>
<td>103147.267707</td>
<td>NaN</td>
<td>104.099172</td>
<td>11275.080883</td>
<td>115055.523086</td>
<td>105201.547958</td>
<td>10.358605</td>
<td>2.872759</td>
<td>107834.745605</td>
<td>10.070086</td>
<td>2708.039945</td>
<td>90.315251</td>
<td>4502.083820</td>
<td>2267.035496</td>
<td>206892.199058</td>
<td>1.016735e+06</td>
<td>8545.036877</td>
<td>NaN</td>
<td>818.975938</td>
<td>95.255808</td>
<td>2614.046035</td>
<td>1002.028549</td>
<td>11.262573</td>
<td>3.016052</td>
<td>10912.480045</td>
<td>1.016656e+06</td>
<td>...</td>
<td>10.447547</td>
<td>NaN</td>
<td>10.502198</td>
<td>967.481047</td>
<td>88.216016</td>
<td>10.535296</td>
<td>3548.968573</td>
<td>3.358649e+16</td>
<td>8140.800029</td>
<td>98.172030</td>
<td>11.651412</td>
<td>8779.078077</td>
<td>7.920442e+05</td>
<td>10.525088</td>
<td>10.468168</td>
<td>104071.834129</td>
<td>2.615953</td>
<td>130.309663</td>
<td>10513.170240</td>
<td>2.228708</td>
<td>728.052692</td>
<td>1.147362e+06</td>
<td>9.968195</td>
<td>1077.035231</td>
<td>7580.079750</td>
<td>8509.033486</td>
<td>10228.351982</td>
<td>10327.801413</td>
<td>100144.263391</td>
<td>310.183226</td>
<td>10.887052</td>
<td>10.836007</td>
<td>1124.755976</td>
<td>988.193910</td>
<td>102076.305244</td>
<td>101964.413583</td>
<td>6470.242459</td>
<td>9.786964</td>
<td>NaN</td>
<td>2.222794</td>
</tr>
<tr>
<th>1210</th>
<td>1210.0</td>
<td>NaN</td>
<td>3356.949591</td>
<td>97081.843123</td>
<td>1002.778571</td>
<td>10864.284014</td>
<td>10.797471</td>
<td>102894.681201</td>
<td>NaN</td>
<td>100102.393811</td>
<td>2.436208</td>
<td>1.072574e+06</td>
<td>37587.588730</td>
<td>1047.896435</td>
<td>105241.465707</td>
<td>93.329187</td>
<td>102.255382</td>
<td>11668.076593</td>
<td>115055.525882</td>
<td>85619.926316</td>
<td>10.732048</td>
<td>2.422995</td>
<td>104044.942484</td>
<td>NaN</td>
<td>2945.014802</td>
<td>112.182044</td>
<td>3202.074919</td>
<td>4883.134639</td>
<td>206892.208501</td>
<td>1.095387e+06</td>
<td>7036.096752</td>
<td>8.290676</td>
<td>878.350888</td>
<td>93.612258</td>
<td>2560.094596</td>
<td>1011.331590</td>
<td>10.580875</td>
<td>2.822595</td>
<td>NaN</td>
<td>1.085728e+06</td>
<td>...</td>
<td>10.115872</td>
<td>2.15810</td>
<td>9.868292</td>
<td>1004.134878</td>
<td>110.999990</td>
<td>10.713020</td>
<td>3394.243287</td>
<td>1.335970e+17</td>
<td>8371.821670</td>
<td>89.703650</td>
<td>9.220535</td>
<td>8998.055593</td>
<td>1.113826e+06</td>
<td>10.302250</td>
<td>10.883418</td>
<td>101283.417629</td>
<td>2.186525</td>
<td>366.750195</td>
<td>11376.166095</td>
<td>2.186734</td>
<td>1120.066965</td>
<td>1.013800e+06</td>
<td>11.734373</td>
<td>1046.939030</td>
<td>9132.087999</td>
<td>5896.007658</td>
<td>10706.113864</td>
<td>10369.803817</td>
<td>102404.821085</td>
<td>NaN</td>
<td>10.661966</td>
<td>10.569144</td>
<td>1010.143125</td>
<td>1064.316139</td>
<td>101477.589227</td>
<td>104517.364548</td>
<td>4922.920835</td>
<td>8.566389</td>
<td>103968.235402</td>
<td>2.071553</td>
</tr>
<tr>
<th>1211</th>
<td>1211.0</td>
<td>101744.439395</td>
<td>3416.474452</td>
<td>111000.425491</td>
<td>893.080460</td>
<td>9250.598106</td>
<td>10.935695</td>
<td>102906.878188</td>
<td>NaN</td>
<td>107003.164135</td>
<td>2.547171</td>
<td>1.087477e+06</td>
<td>32084.633800</td>
<td>1075.700568</td>
<td>104998.478525</td>
<td>118.678047</td>
<td>106.993698</td>
<td>13226.038641</td>
<td>115055.543174</td>
<td>97282.156471</td>
<td>10.208538</td>
<td>3.710694</td>
<td>107439.936587</td>
<td>10.753281</td>
<td>4719.085520</td>
<td>96.505582</td>
<td>3419.271979</td>
<td>3796.393911</td>
<td>206892.165049</td>
<td>1.006348e+06</td>
<td>11518.011808</td>
<td>10.803776</td>
<td>1103.388499</td>
<td>106.356520</td>
<td>2583.097413</td>
<td>1081.964569</td>
<td>11.453524</td>
<td>3.203942</td>
<td>10257.152912</td>
<td>9.292065e+05</td>
<td>...</td>
<td>8.157006</td>
<td>2.16880</td>
<td>8.844200</td>
<td>1005.897320</td>
<td>96.253172</td>
<td>NaN</td>
<td>3289.749206</td>
<td>9.364744e+16</td>
<td>9170.136304</td>
<td>97.067706</td>
<td>NaN</td>
<td>NaN</td>
<td>1.148141e+06</td>
<td>10.989940</td>
<td>10.204516</td>
<td>100896.544241</td>
<td>2.720932</td>
<td>49.567830</td>
<td>8013.156899</td>
<td>2.550404</td>
<td>916.073375</td>
<td>1.080536e+06</td>
<td>9.101128</td>
<td>1013.211838</td>
<td>12517.066013</td>
<td>10990.064488</td>
<td>10349.678752</td>
<td>10096.876996</td>
<td>NaN</td>
<td>322.445715</td>
<td>10.599132</td>
<td>10.350638</td>
<td>962.755003</td>
<td>1093.734877</td>
<td>105353.550546</td>
<td>106061.798352</td>
<td>9302.374002</td>
<td>10.949418</td>
<td>109317.619776</td>
<td>2.496408</td>
</tr>
</tbody>
</table>
<p>5 rows × 833 columns</p>
</div>
```python
column_names_y = ['id','y']
raw_dataset_y = pd.read_csv('/content/y_train.csv', names=column_names_y,
na_values = "?", comment='\t',
sep=",", skipinitialspace=True, skiprows=True)
dataset_y = raw_dataset_y.copy()
dataset_y.tail()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>id</th>
<th>y</th>
</tr>
</thead>
<tbody>
<tr>
<th>1207</th>
<td>1207.0</td>
<td>66.0</td>
</tr>
<tr>
<th>1208</th>
<td>1208.0</td>
<td>73.0</td>
</tr>
<tr>
<th>1209</th>
<td>1209.0</td>
<td>74.0</td>
</tr>
<tr>
<th>1210</th>
<td>1210.0</td>
<td>78.0</td>
</tr>
<tr>
<th>1211</th>
<td>1211.0</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
# Print the missing values
```python
missing_values = dataset_x.isna().sum()
print (missing_values)
```
id 0
x0 81
x1 103
x2 92
x3 91
...
x827 83
x828 78
x829 98
x830 84
x831 92
Length: 833, dtype: int64
# Split the data into training and test data
```python
# Split using sklearn.model_selection
x_train, x_test, y_train, y_test = train_test_split(dataset_x, dataset_y, test_size=0.2, random_state = 100)
```
```python
train_stats = x_train.describe()
train_stats.pop("id")
train_stats = train_stats.transpose()
train_stats
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>count</th>
<th>mean</th>
<th>std</th>
<th>min</th>
<th>25%</th>
<th>50%</th>
<th>75%</th>
<th>max</th>
</tr>
</thead>
<tbody>
<tr>
<th>x0</th>
<td>909.0</td>
<td>99849.359545</td>
<td>9534.020258</td>
<td>65533.368423</td>
<td>93818.485147</td>
<td>100183.062423</td>
<td>105994.290528</td>
<td>130226.576502</td>
</tr>
<tr>
<th>x1</th>
<td>885.0</td>
<td>3698.730375</td>
<td>943.683864</td>
<td>180.312021</td>
<td>3076.550570</td>
<td>3651.110055</td>
<td>4303.892503</td>
<td>7265.213902</td>
</tr>
<tr>
<th>x2</th>
<td>891.0</td>
<td>99975.389109</td>
<td>9540.065988</td>
<td>68544.573581</td>
<td>93937.346571</td>
<td>99386.035114</td>
<td>106102.200889</td>
<td>132221.045067</td>
</tr>
<tr>
<th>x3</th>
<td>898.0</td>
<td>999.944996</td>
<td>100.903669</td>
<td>694.745271</td>
<td>935.303439</td>
<td>999.571797</td>
<td>1068.606823</td>
<td>1434.200505</td>
</tr>
<tr>
<th>x4</th>
<td>890.0</td>
<td>10001.743350</td>
<td>1001.473353</td>
<td>6681.561828</td>
<td>9339.312428</td>
<td>10021.924636</td>
<td>10646.003276</td>
<td>13560.223285</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>x827</th>
<td>906.0</td>
<td>104990.967566</td>
<td>2761.209013</td>
<td>100015.768596</td>
<td>102783.100024</td>
<td>104986.305216</td>
<td>107352.370223</td>
<td>109999.847537</td>
</tr>
<tr>
<th>x828</th>
<td>907.0</td>
<td>6827.704539</td>
<td>1387.835714</td>
<td>1696.036569</td>
<td>6002.316474</td>
<td>6835.947954</td>
<td>7652.607118</td>
<td>11276.075121</td>
</tr>
<tr>
<th>x829</th>
<td>884.0</td>
<td>10.021817</td>
<td>0.982265</td>
<td>6.899008</td>
<td>9.378562</td>
<td>9.977236</td>
<td>10.676450</td>
<td>13.188278</td>
</tr>
<tr>
<th>x830</th>
<td>902.0</td>
<td>104960.353706</td>
<td>2845.423469</td>
<td>100003.049706</td>
<td>102653.373914</td>
<td>104838.184005</td>
<td>107428.898901</td>
<td>109993.046071</td>
</tr>
<tr>
<th>x831</th>
<td>893.0</td>
<td>2.269127</td>
<td>0.169559</td>
<td>1.589261</td>
<td>2.173057</td>
<td>2.291077</td>
<td>2.374205</td>
<td>2.846222</td>
</tr>
</tbody>
</table>
<p>832 rows × 8 columns</p>
</div>
# Fill the NaN in the training data set with the median values of each column
```python
x_train = x_train.fillna(x_train.median())
x_test = x_test.fillna(x_test.median())
missing_values = x_train.isna().sum()
print (missing_values)
```
id 0
x0 0
x1 0
x2 0
x3 0
..
x827 0
x828 0
x829 0
x830 0
x831 0
Length: 833, dtype: int64
# Remove the unnecessary "id" label
```python
y_train.pop("id")
y_test.pop("id")
```
1143 1143.0
941 941.0
365 365.0
467 467.0
615 615.0
...
156 156.0
689 689.0
28 28.0
69 69.0
1198 1198.0
Name: id, Length: 243, dtype: float64
# Feature Extraction using autofeat
```python
fsel = FeatureSelector(featsel_runs=4,
max_it=150,
w_thr=1e-6,
keep=None,
n_jobs=1,
verbose=1)
new_X = fsel.fit_transform(pd.DataFrame(x_train, columns=column_names_x), y_train)
print(new_X.columns)
df_train = pd.DataFrame(x_train, columns=column_names_x)
df_test = pd.DataFrame(x_test, columns=column_names_x)
```
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py:724: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
y = column_or_1d(y, warn=True)
[featsel] Scaling data...done.
[featsel] 220/833 features after univariate filtering
[featsel] Feature selection run 1/4
[featsel] Feature selection run 2/4
[featsel] Feature selection run 3/4
[featsel] Feature selection run 4/4
[featsel] 28 features after 4 feature selection runs
[featsel] 28 features after correlation filtering
[featsel] 21 features after noise filtering
[featsel] 21 final features selected (including 0 original keep features).
Index(['x400', 'x635', 'x757', 'x516', 'x809', 'x214', 'x556', 'x617', 'x93',
'x346', 'x596', 'x255', 'x309', 'x252', 'x292', 'x738', 'x537', 'x593',
'x474', 'x614', 'x502'],
dtype='object')
# Keep only the necessary features
```python
dataset_selected_x = x_train.copy()
x_test_selected= x_test.copy()
#accepted=[400, 757, 635, 516, 132, 15, 809, 116, 214,
#556, 617, 93, 346, 596, 309, 252, 292, 474,
#593, 614]
accepted=[400, 635, 757, 516, 809, 214, 556, 617, 93,
346, 596, 255, 309, 252, 292, 738, 537, 593,
474, 614, 502]
for j in range(832):
if (j in accepted):
print (j)
else:
del dataset_selected_x['x'+str(j)]
del x_test_selected['x'+str(j)]
train_stats = dataset_selected_x.describe()
train_stats.pop("id")
train_stats = train_stats.transpose()
train_stats
```
93
214
252
255
292
309
346
400
474
502
516
537
556
593
596
614
617
635
738
757
809
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>count</th>
<th>mean</th>
<th>std</th>
<th>min</th>
<th>25%</th>
<th>50%</th>
<th>75%</th>
<th>max</th>
</tr>
</thead>
<tbody>
<tr>
<th>x93</th>
<td>969.0</td>
<td>3.519955e+03</td>
<td>6.123839e+02</td>
<td>1.789851e+03</td>
<td>3.122307e+03</td>
<td>3.492040e+03</td>
<td>3.882096e+03</td>
<td>5.902057e+03</td>
</tr>
<tr>
<th>x214</th>
<td>969.0</td>
<td>1.686814e+03</td>
<td>4.246669e+02</td>
<td>3.857756e+02</td>
<td>1.438003e+03</td>
<td>1.657997e+03</td>
<td>1.929010e+03</td>
<td>3.719025e+03</td>
</tr>
<tr>
<th>x252</th>
<td>969.0</td>
<td>5.153136e+03</td>
<td>6.443209e+03</td>
<td>-1.066389e+04</td>
<td>2.036529e+03</td>
<td>3.051817e+03</td>
<td>5.396992e+03</td>
<td>5.218476e+04</td>
</tr>
<tr>
<th>x255</th>
<td>969.0</td>
<td>1.063172e+04</td>
<td>1.847941e+03</td>
<td>3.258064e+03</td>
<td>9.580092e+03</td>
<td>1.053304e+04</td>
<td>1.170004e+04</td>
<td>1.742768e+04</td>
</tr>
<tr>
<th>x292</th>
<td>969.0</td>
<td>1.289525e+05</td>
<td>1.561997e+04</td>
<td>7.115248e+04</td>
<td>1.201669e+05</td>
<td>1.282218e+05</td>
<td>1.377145e+05</td>
<td>1.982293e+05</td>
</tr>
<tr>
<th>x309</th>
<td>969.0</td>
<td>1.360934e+04</td>
<td>2.104294e+03</td>
<td>1.787475e+03</td>
<td>1.241801e+04</td>
<td>1.355888e+04</td>
<td>1.480791e+04</td>
<td>2.142239e+04</td>
</tr>
<tr>
<th>x346</th>
<td>969.0</td>
<td>7.264234e+03</td>
<td>1.238998e+03</td>
<td>2.195927e+03</td>
<td>6.577637e+03</td>
<td>7.355575e+03</td>
<td>8.085946e+03</td>
<td>1.121581e+04</td>
</tr>
<tr>
<th>x400</th>
<td>969.0</td>
<td>2.419177e+00</td>
<td>1.566607e-01</td>
<td>1.441218e+00</td>
<td>2.325897e+00</td>
<td>2.416256e+00</td>
<td>2.507563e+00</td>
<td>3.029658e+00</td>
</tr>
<tr>
<th>x474</th>
<td>969.0</td>
<td>2.138704e+05</td>
<td>3.363320e+04</td>
<td>6.580233e+04</td>
<td>1.951336e+05</td>
<td>2.113702e+05</td>
<td>2.311579e+05</td>
<td>4.824998e+05</td>
</tr>
<tr>
<th>x502</th>
<td>969.0</td>
<td>7.341196e+13</td>
<td>5.051213e+13</td>
<td>-8.384285e+13</td>
<td>4.129257e+13</td>
<td>6.245895e+13</td>
<td>9.198935e+13</td>
<td>3.816907e+14</td>
</tr>
<tr>
<th>x516</th>
<td>969.0</td>
<td>2.633410e+00</td>
<td>2.851324e-01</td>
<td>1.586758e+00</td>
<td>2.461447e+00</td>
<td>2.632617e+00</td>
<td>2.810227e+00</td>
<td>3.709460e+00</td>
</tr>
<tr>
<th>x537</th>
<td>969.0</td>
<td>2.131250e+05</td>
<td>3.401347e+04</td>
<td>6.499225e+04</td>
<td>1.936266e+05</td>
<td>2.105931e+05</td>
<td>2.305157e+05</td>
<td>4.810740e+05</td>
</tr>
<tr>
<th>x556</th>
<td>969.0</td>
<td>6.058210e+03</td>
<td>7.982839e+02</td>
<td>3.040824e+03</td>
<td>5.588225e+03</td>
<td>5.998493e+03</td>
<td>6.472923e+03</td>
<td>9.103862e+03</td>
</tr>
<tr>
<th>x593</th>
<td>969.0</td>
<td>6.306444e+04</td>
<td>3.517018e+04</td>
<td>2.381309e+04</td>
<td>4.825502e+04</td>
<td>5.195309e+04</td>
<td>5.625009e+04</td>
<td>2.232860e+05</td>
</tr>
<tr>
<th>x596</th>
<td>969.0</td>
<td>1.994849e+04</td>
<td>2.631193e+03</td>
<td>9.693873e+03</td>
<td>1.845468e+04</td>
<td>1.980071e+04</td>
<td>2.136755e+04</td>
<td>3.037075e+04</td>
</tr>
<tr>
<th>x614</th>
<td>969.0</td>
<td>1.219718e+06</td>
<td>1.863793e+05</td>
<td>4.090431e+05</td>
<td>1.112420e+06</td>
<td>1.220616e+06</td>
<td>1.319038e+06</td>
<td>1.976749e+06</td>
</tr>
<tr>
<th>x617</th>
<td>969.0</td>
<td>1.528155e+03</td>
<td>6.897489e+02</td>
<td>-6.478282e+02</td>
<td>1.039647e+03</td>
<td>1.425202e+03</td>
<td>1.899843e+03</td>
<td>5.028253e+03</td>
</tr>
<tr>
<th>x635</th>
<td>969.0</td>
<td>2.552238e+00</td>
<td>2.211141e-01</td>
<td>1.536237e+00</td>
<td>2.441985e+00</td>
<td>2.571545e+00</td>
<td>2.685138e+00</td>
<td>3.319348e+00</td>
</tr>
<tr>
<th>x738</th>
<td>969.0</td>
<td>4.070505e+05</td>
<td>5.419321e+04</td>
<td>1.500802e+05</td>
<td>3.777463e+05</td>
<td>4.051612e+05</td>
<td>4.389639e+05</td>
<td>6.508670e+05</td>
</tr>
<tr>
<th>x757</th>
<td>969.0</td>
<td>2.556026e+00</td>
<td>2.138400e-01</td>
<td>1.513769e+00</td>
<td>2.448038e+00</td>
<td>2.577740e+00</td>
<td>2.684761e+00</td>
<td>3.292584e+00</td>
</tr>
<tr>
<th>x809</th>
<td>969.0</td>
<td>8.301919e+01</td>
<td>1.026332e+02</td>
<td>-1.034091e+02</td>
<td>3.267047e+01</td>
<td>5.403747e+01</td>
<td>9.901100e+01</td>
<td>1.245880e+03</td>
</tr>
</tbody>
</table>
</div>
# Regression model
```python
rfr = ExtraTreesRegressor(n_jobs=1, max_depth=None, n_estimators=180, random_state=0, min_samples_split=3, max_features=None)
rfr.fit(dataset_selected_x, np.ravel(y_train))
y_pred = rfr.predict(x_test_selected)
score = r2_score(y_test, y_pred)
print(score)
```
0.6037280104368132
# Export the file with our predictions for submission
```python
#accepted=[400, 757, 635, 516, 132, 15, 809, 116, 214,
#556, 617, 93, 346, 596, 309, 252, 292, 474,
#593, 614]
accepted=[400, 635, 757, 516, 809, 214, 556, 617, 93,
346, 596, 255, 309, 252, 292, 738, 537, 593,
474, 614, 502]
column_names_x = ['id']
for i in range(832):
column_names_x.append('x'+str(i))
raw_dataset_x_test = pd.read_csv('/content/X_test.csv', names=column_names_x,
na_values = "?", comment='\t',
sep=",", skipinitialspace=True, skiprows=True)
dataset_x_test = raw_dataset_x_test.copy()
dataset_x_test.tail()
dataset_x_test = dataset_x_test.fillna(dataset_x_test.median())
dataset_selected_x_test = dataset_x_test.copy()
for j in range(832):
if (j in accepted):
print (j)
else:
del dataset_selected_x_test['x'+str(j)]
predictions = rfr.predict(dataset_selected_x_test)
index = 0.0
with open('predictions.txt', 'w') as f:
f.write("%s\n" % "id,y")
for predict in predictions:
writing_str = str(index)+','+str(predict.item(0))
f.write("%s\n" % writing_str)
index = index + 1
```
93
214
252
255
292
309
346
400
474
502
516
537
556
593
596
614
617
635
738
757
809
|
Inductive subseq : list nat -> list nat -> Prop :=
| first_case: forall (l2: list nat), subseq [] l2
| second_case: forall (l1 l2: list nat) (x: nat),
subseq l1 l2 ->
subseq (x :: l1) (x :: l2)
| third_case: forall (l1 l2: list nat) (x: nat),
subseq l1 l2 -> subseq l1 (x :: l2).
Theorem subseq_refl : forall (l: list nat),
subseq l l.
Proof.
intros.
induction l as [| h t IH].
- apply first_case.
- apply second_case. apply IH.
Qed.
Theorem subseq_app : forall (l1 l2 l3: list nat),
subseq l1 l2 -> subseq l1 (l2 ++ l3).
Proof.
intros.
induction H.
- apply first_case.
- simpl. apply second_case. apply IHsubseq.
- simpl. apply third_case. apply IHsubseq.
Qed. |
# install.packages("httr")
library("httr")
print("Calling R Functions ~~~")
r <- GET("http://localhost:8000/echo",
query = list(msg = "hello"))
print(content(r)$msg)
r <- POST("http://localhost:8000/sum",
body = list(a='2',b='3'), encode = "json")
print(content(r))
print("Calling Python Functions ~~~")
r <- GET("http://localhost:5000/echo",
query = list(msg = "hello"))
print(content(r)$msg)
r <- POST("http://localhost:5000/sum",
body = list(a='2',b='3'), encode = "json")
print(content(r))
|
lemmas sums_Re = bounded_linear.sums [OF bounded_linear_Re] |
%!TEX root = main.tex
\appendix
\onecolumn
\section*{Overview}
\begin{table}[H]
\centering
\hspace*{-1cm}\begin{tabular}{lllll}
\toprule
Name & Function $\varphi(x)$ & Range of Values & $\varphi'(x)$ & Used by \\\midrule %
Sign function$^\dagger$ & $\begin{cases}+1 &\text{if } x \geq 0\\-1 &\text{if } x < 0\end{cases}$ & $\Set{-1,1}$ & $0$ & \cite{971754} \\
\parbox[t]{2.6cm}{Heaviside\\step function$^\dagger$} & $\begin{cases}+1 &\text{if } x > 0\\0 &\text{if } x < 0\end{cases}$ & $\Set{0, 1}$ & $0$ & \cite{mcculloch1943logical}\\
Logistic function & $\frac{1}{1+e^{-x}}$ & $[0, 1]$ & $\frac{e^x}{(e^x +1)^2}$ & \cite{duch1999survey} \\
Tanh & $\frac{e^x - e^{-x}}{e^x + e^{-x}} = \tanh(x)$ & $[-1, 1]$ & $\sech^2(x)$ & \cite{LeNet-5,Thoma:2014}\\
\gls{ReLU}$^\dagger$ & $\max(0, x)$ & $[0, +\infty)$ & $\begin{cases}1 &\text{if } x > 0\\0 &\text{if } x < 0\end{cases}$ & \cite{AlexNet-2012}\\
\parbox[t]{2.6cm}{\gls{LReLU}$^\dagger$\footnotemark\\(\gls{PReLU})} & $\varphi(x) = \max(\alpha x, x)$ & $(-\infty, +\infty)$ & $\begin{cases}1 &\text{if } x > 0\\\alpha &\text{if } x < 0\end{cases}$ & \cite{maas2013rectifier,he2015delving} \\
Softplus & $\log(e^x + 1)$ & $(0, +\infty)$ & $\frac{e^x}{e^x + 1}$ & \cite{dugas2001incorporating,glorot2011deep} \\
\gls{ELU} & $\begin{cases}x &\text{if } x > 0\\\alpha (e^x - 1) &\text{if } x \leq 0\end{cases}$ & $(-\infty, +\infty)$ & $\begin{cases}1 &\text{if } x > 0\\\alpha e^x &\text{otherwise}\end{cases}$ & \cite{clevert2015fast} \\
Softmax$^\ddagger$ & $o(\mathbf{x})_j = \frac{e^{x_j}}{\sum_{k=1}^K e^{x_k}}$ & $[0, 1]^K$ & $o(\mathbf{x})_j \cdot \frac{\sum_{k=1}^K e^{x_k} - e^{x_j}}{\sum_{k=1}^K e^{x_k}}$ & \cite{AlexNet-2012,Thoma:2014}\\
Maxout$^\ddagger$ & $o(\mathbf{x}) = \max_{x \in \mathbf{x}} x$ & $(-\infty, +\infty)$ & $\begin{cases}1 &\text{if } x_i = \max \mathbf{x}\\0 &\text{otherwise}\end{cases}$ & \cite{goodfellow2013maxout} \\
\bottomrule
\end{tabular}
\caption[Activation functions]{Overview of activation functions. Functions
marked with $\dagger$ are not differentiable at 0 and functions
marked with $\ddagger$ operate on all elements of a layer
simultaneously. The hyperparameters $\alpha \in (0, 1)$ of Leaky
ReLU and ELU are typically $\alpha = 0.01$. Other activation
function like randomized leaky ReLUs exist~\cite{xu2015empirical},
but are far less commonly used.\\
Some functions are smoothed versions of others, like the logistic
function for the Heaviside step function, tanh for the sign
function, softplus for ReLU.\\
Softmax is the standard activation function for the last layer of
a classification network as it produces a probability
distribution. See \Cref{fig:activation-functions-plot} for a plot
of some of them.}
\label{table:activation-functions-overview}
\end{table}
\footnotetext{$\alpha$ is a hyperparameter in leaky ReLU, but a learnable parameter in the parametric ReLU function.}
\section*{Evaluation Results}
\glsunset{LReLU}
\begin{table}[H]
\centering
\begin{tabular}{@{\extracolsep{4pt}}lcccccc@{}}
\toprule
\multirow{2}{*}{Function} & \multicolumn{4}{c}{Single model} & \multicolumn{2}{c}{Ensemble of 10} \\\cline{2-3}\cline{4-5}\cline{6-7}
& \multicolumn{2}{c}{Training set} &\multicolumn{2}{c}{Test set} & Training set & Test set \\\midrule
Identity & \SI{66.25}{\percent} & $\boldsymbol{\sigma=0.77}$ &\SI{56.74}{\percent} & \textbf{$\sigma=0.51$} & \SI{68.77}{\percent} & \SI{58.78}{\percent}\\
Logistic & \SI{51.87}{\percent} & $\sigma=3.64$ &\SI{46.54}{\percent} & $\sigma=3.22$ & \SI{61.19}{\percent} & \SI{54.58}{\percent}\\
Logistic$^-$ & \SI{66.49}{\percent} & $\sigma=1.99$ &\SI{57.84}{\percent} & $\sigma=1.15$ & \SI{69.04}{\percent} & \SI{60.10}{\percent}\\
Softmax & \SI{75.22}{\percent} & $\sigma=2.41$ &\SI{59.49}{\percent} & $\sigma=1.25$ & \SI{78.87}{\percent} & \SI{63.06}{\percent}\\
Tanh & \SI{67.27}{\percent} & $\sigma=2.38$ &\SI{55.70}{\percent} & $\sigma=1.44$ & \SI{70.21}{\percent} & \SI{58.10}{\percent}\\
Softsign & \SI{66.43}{\percent} & $\sigma=1.74$ &\SI{55.75}{\percent} & $\sigma=0.93$ & \SI{69.78}{\percent} & \SI{58.40}{\percent}\\
\gls{ReLU} & \SI{78.62}{\percent} & $\sigma=2.15$ &\SI{62.18}{\percent} & $\sigma=0.99$ & \SI{81.81}{\percent} & \SI{64.57}{\percent}\\
\gls{ReLU}$^-$ & \SI{76.01}{\percent} & $\sigma=2.31$ &\SI{62.87}{\percent} & $\sigma=1.08$ & \SI{78.18}{\percent} & \SI{64.81}{\percent}\\
Softplus & \SI{66.75}{\percent} & $\sigma=2.45$ &\SI{56.68}{\percent} & $\sigma=1.32$ & \SI{71.27}{\percent} & \SI{60.26}{\percent}\\
S2ReLU & \SI{63.32}{\percent} & $\sigma=1.69$ &\SI{56.99}{\percent} & $\sigma=1.14$ & \SI{65.80}{\percent} & \SI{59.20}{\percent}\\
\gls{LReLU} & \SI{74.92}{\percent} & $\sigma=2.49$ &\SI{61.86}{\percent} & $\sigma=1.23$ & \SI{77.67}{\percent} & \SI{64.01}{\percent}\\
\gls{PReLU} & \textbf{\SI{80.01}{\percent}} & $\sigma=2.03$ &\SI{62.16}{\percent} & $\sigma=0.73$ & \textbf{\SI{83.50}{\percent}} & \textbf{\SI{64.79}{\percent}}\\
\gls{ELU} & \SI{76.64}{\percent} & $\sigma=1.48$ &\textbf{\SI{63.38}{\percent}} & $\sigma=0.55$ & \SI{78.30}{\percent} & \SI{64.70}{\percent}\\
\bottomrule
\end{tabular}
\caption[Activation function evaluation results on CIFAR-100]{Training and
test accuracy of adjusted baseline models trained with different
activation functions on CIFAR-100. For \gls{LReLU}, $\alpha = 0.3$ was
chosen.}
\label{table:CIFAR-100-accuracies-activation-functions}
\end{table}
\begin{table}[H]
\centering
\setlength\tabcolsep{1.5pt}
\begin{tabular}{@{\extracolsep{4pt}}lcccccccr@{}}
\toprule
\multirow{2}{*}{Function} & \multicolumn{4}{c}{Single model} & \multicolumn{2}{c}{Ensemble of 10} & \multicolumn{2}{c}{Epochs}\\\cline{2-5}\cline{6-7}\cline{8-9}
& \multicolumn{2}{c}{Training set} &\multicolumn{2}{c}{Test set} & Train & Test & Range & \multicolumn{1}{c}{Mean} \\\midrule
Identity & \SI{87.92}{\percent} & $\sigma=0.40$ & \SI{84.69}{\percent} & $\sigma=0.08$ & \SI{88.59}{\percent} & \SI{85.43}{\percent} & \hphantom{0}92 -- 140 & 114.5\\%TODO: Really?
Logistic & \SI{81.46}{\percent} & $\sigma=5.08$ & \SI{79.67}{\percent} & $\sigma=4.85$ & \SI{86.38}{\percent} & \SI{84.60}{\percent} & \hphantom{0}\textbf{58} -- \hphantom{0}\textbf{91} & \textbf{77.3}\\
Softmax & \SI{88.19}{\percent} & $\sigma=0.31$ & \SI{84.70}{\percent} & $\sigma=0.15$ & \SI{88.69}{\percent} & \SI{85.43}{\percent} & 124 -- 171& 145.8\\
Tanh & \SI{88.41}{\percent} & $\sigma=0.36$ & \SI{84.46}{\percent} & $\sigma=0.27$ & \SI{89.24}{\percent} & \SI{85.45}{\percent} & \hphantom{0}89 -- 123 & 108.7\\
Softsign & \SI{88.00}{\percent} & $\sigma=0.47$ & \SI{84.46}{\percent} & $\sigma=0.23$ & \SI{88.77}{\percent} & \SI{85.33}{\percent} & \hphantom{0}77 -- 119 & 104.1\\
\gls{ReLU} & \SI{88.93}{\percent} & $\sigma=0.46$ & \textbf{\SI{85.35}{\percent}} & $\sigma=0.21$ & \SI{89.35}{\percent} & \SI{85.95}{\percent} & \hphantom{0}96 -- 132 & 102.8\\
Softplus & \SI{88.42}{\percent} & $\boldsymbol{\sigma=0.29}$ & \SI{85.16}{\percent} & $\sigma=0.15$ & \SI{88.90}{\percent} & \SI{85.73}{\percent} & 108 -- 143 & 121.0\\
\gls{LReLU} & \SI{88.61}{\percent} & $\sigma=0.41$ & \SI{85.21}{\percent} & $\boldsymbol{\sigma=0.05}$ & \SI{89.07}{\percent} & \SI{85.83}{\percent} & \hphantom{0}87 -- 117 & 104.5\\
\gls{PReLU} & \textbf{\SI{89.62}{\percent}} & $\sigma=0.41$ & \textbf{\SI{85.35}{\percent}} & $\sigma=0.17$& \textbf{\SI{90.10}{\percent}} & \SI{86.01}{\percent} & \hphantom{0}85 -- 111 & 100.5\\
\gls{ELU} & \SI{89.49}{\percent} & $\sigma=0.42$ & \textbf{\SI{85.35}{\percent}} & $\sigma=0.10$ & \SI{89.94}{\percent} & \textbf{\SI{86.03}{\percent}} & \hphantom{0}73 -- 113 & 92.4\\
\bottomrule
\end{tabular}
\caption[Activation function evaluation results on HASYv2]{Test accuracy of
adjusted baseline models trained with different activation
functions on HASYv2. For \gls{LReLU}, $\alpha = 0.3$ was chosen.}
\label{table:HASYv2-accuracies-activation-functions}
\end{table}
\begin{table}[H]
\centering
\setlength\tabcolsep{1.5pt}
\begin{tabular}{@{\extracolsep{4pt}}lcccccccr@{}}
\toprule
\multirow{2}{*}{Function} & \multicolumn{4}{c}{Single model} & \multicolumn{2}{c}{Ensemble of 10} & \multicolumn{2}{c}{Epochs}\\\cline{2-5}\cline{6-7}\cline{8-9}
& \multicolumn{2}{c}{Training set} &\multicolumn{2}{c}{Test set} & Train & Test & Range & \multicolumn{1}{c}{Mean} \\\midrule
Identity & \SI{87.49}{\percent} & $\sigma=2.50$ & \SI{69.86}{\percent} & $\sigma=1.41$ & \SI{89.78}{\percent} & \SI{71.90}{\percent} & \hphantom{0}51 -- \hphantom{0}65 & 53.4\\
Logistic & \SI{45.32}{\percent} & $\sigma=14.88$& \SI{40.85}{\percent} & $\sigma=12.56$ & \SI{51.06}{\percent} & \SI{45.49}{\percent} & \hphantom{0}38 -- \hphantom{0}93 & 74.6\\
Softmax & \SI{87.90}{\percent} & $\sigma=3.58$ & \SI{67.91}{\percent} & $\sigma=2.32$ & \SI{91.51}{\percent} & \SI{70.96}{\percent} & 108 -- 150 & 127.5\\
Tanh & \SI{85.38}{\percent} & $\sigma=4.04$ & \SI{67.65}{\percent} & $\sigma=2.01$ & \SI{90.47}{\percent} & \SI{71.29}{\percent} & 48 -- \hphantom{0}92 & 65.2\\
Softsign & \SI{88.57}{\percent} & $\sigma=4.00$ & \SI{69.32}{\percent} & $\sigma=1.68$ & \SI{93.04}{\percent} & \SI{72.40}{\percent} & 55 -- 117 & 83.2\\
\gls{ReLU} & \SI{94.35}{\percent} & $\sigma=3.38$ & \SI{71.01}{\percent} & $\sigma=1.63$ & \SI{98.20}{\percent} & \SI{74.85}{\percent} & 52 -- \hphantom{0}98 & 75.5\\
Softplus & \SI{83.03}{\percent} & $\sigma=2.07$ & \SI{68.28}{\percent} & $\sigma=1.74$ & \SI{93.04}{\percent} & \SI{75.99}{\percent} & 56 -- \hphantom{0}89 & 68.9\\
\gls{LReLU} & \SI{93.83}{\percent} & $\sigma=3.89$ & \SI{74.66}{\percent} & $\sigma=2.11$ & \SI{97.56}{\percent} & \SI{78.08}{\percent} & 52 -- 120 & 80.1\\
\gls{PReLU} & \SI{95.53}{\percent} & $\sigma=1.92$ & \SI{71.69}{\percent} & $\sigma=1.37$ & \SI{98.17}{\percent} & \SI{74.69}{\percent} & 59 -- 101 & 78.8\\
\gls{ELU} & \SI{95.42}{\percent} & $\sigma=3.57$ & \SI{75.09}{\percent} & $\sigma=2.39$ & \SI{98.54}{\percent} & \SI{78.66}{\percent} & 66 -- \hphantom{0}72 & 67.2\\
\bottomrule
\end{tabular}
\caption[Activation function evaluation results on STL-10]{Test accuracy of
adjusted baseline models trained with different activation
functions on STL-10. For \gls{LReLU}, $\alpha = 0.3$ was chosen.}
\label{table:STL-10-accuracies-activation-functions}
\end{table}
\begin{table}[H]
\centering
\hspace*{-1cm}\begin{tabular}{lllll}
\toprule
Name & Function $\varphi(x)$ & Range of Values & $\varphi'(x)$ \\\midrule % & Used by
Sign function$^\dagger$ & $\begin{cases}+1 &\text{if } x \geq 0\\-1 &\text{if } x < 0\end{cases}$ & $\Set{-1,1}$ & $0$ \\%& \cite{971754} \\
\parbox[t]{2.6cm}{Heaviside\\step function$^\dagger$} & $\begin{cases}+1 &\text{if } x > 0\\0 &\text{if } x < 0\end{cases}$ & $\Set{0, 1}$ & $0$ \\%& \cite{mcculloch1943logical}\\
Logistic function & $\frac{1}{1+e^{-x}}$ & $[0, 1]$ & $\frac{e^x}{(e^x +1)^2}$ \\%& \cite{duch1999survey} \\
Tanh & $\frac{e^x - e^{-x}}{e^x + e^{-x}} = \tanh(x)$ & $[-1, 1]$ & $\sech^2(x)$ \\%& \cite{LeNet-5,Thoma:2014}\\
\gls{ReLU}$^\dagger$ & $\max(0, x)$ & $[0, +\infty)$ & $\begin{cases}1 &\text{if } x > 0\\0 &\text{if } x < 0\end{cases}$ \\%& \cite{AlexNet-2012}\\
\parbox[t]{2.6cm}{\gls{LReLU}$^\dagger$\footnotemark\\(\gls{PReLU})} & $\varphi(x) = \max(\alpha x, x)$ & $(-\infty, +\infty)$ & $\begin{cases}1 &\text{if } x > 0\\\alpha &\text{if } x < 0\end{cases}$ \\%& \cite{maas2013rectifier,he2015delving} \\
Softplus & $\log(e^x + 1)$ & $(0, +\infty)$ & $\frac{e^x}{e^x + 1}$ \\%& \cite{dugas2001incorporating,glorot2011deep} \\
\gls{ELU} & $\begin{cases}x &\text{if } x > 0\\\alpha (e^x - 1) &\text{if } x \leq 0\end{cases}$ & $(-\infty, +\infty)$ & $\begin{cases}1 &\text{if } x > 0\\\alpha e^x &\text{otherwise}\end{cases}$ \\%& \cite{clevert2015fast} \\
Softmax$^\ddagger$ & $o(\mathbf{x})_j = \frac{e^{x_j}}{\sum_{k=1}^K e^{x_k}}$ & $[0, 1]^K$ & $o(\mathbf{x})_j \cdot \frac{\sum_{k=1}^K e^{x_k} - e^{x_j}}{\sum_{k=1}^K e^{x_k}}$ \\%& \cite{AlexNet-2012,Thoma:2014}\\
Maxout$^\ddagger$ & $o(\mathbf{x}) = \max_{x \in \mathbf{x}} x$ & $(-\infty, +\infty)$ & $\begin{cases}1 &\text{if } x_i = \max \mathbf{x}\\0 &\text{otherwise}\end{cases}$ \\%& \cite{goodfellow2013maxout} \\
\bottomrule
\end{tabular}
\caption[Activation functions]{Overview of activation functions. Functions
marked with $\dagger$ are not differentiable at 0 and functions
marked with $\ddagger$ operate on all elements of a layer
simultaneously. The hyperparameters $\alpha \in (0, 1)$ of Leaky
ReLU and ELU are typically $\alpha = 0.01$. Other activation
function like randomized leaky ReLUs exist~\cite{xu2015empirical},
but are far less commonly used.\\
Some functions are smoothed versions of others, like the logistic
function for the Heaviside step function, tanh for the sign
function, softplus for ReLU.\\
Softmax is the standard activation function for the last layer of
a classification network as it produces a probability
distribution. See \Cref{fig:activation-functions-plot} for a plot
of some of them.}
\label{table:activation-functions-overview}
\end{table}
\footnotetext{$\alpha$ is a hyperparameter in leaky ReLU, but a learnable parameter in the parametric ReLU function.}
\begin{figure}[ht]
\centering
\begin{tikzpicture}
\definecolor{color1}{HTML}{E66101}
\definecolor{color2}{HTML}{FDB863}
\definecolor{color3}{HTML}{B2ABD2}
\definecolor{color4}{HTML}{5E3C99}
\begin{axis}[
legend pos=north west,
legend cell align={left},
axis x line=middle,
axis y line=middle,
x tick label style={/pgf/number format/fixed,
/pgf/number format/fixed zerofill,
/pgf/number format/precision=1},
y tick label style={/pgf/number format/fixed,
/pgf/number format/fixed zerofill,
/pgf/number format/precision=1},
grid = major,
width=16cm,
height=8cm,
grid style={dashed, gray!30},
xmin=-2, % start the diagram at this x-coordinate
xmax= 2, % end the diagram at this x-coordinate
ymin=-1, % start the diagram at this y-coordinate
ymax= 2, % end the diagram at this y-coordinate
xlabel=x,
ylabel=y,
tick align=outside,
enlargelimits=false]
\addplot[domain=-2:2, color1, ultra thick,samples=500] {1/(1+exp(-x))};
\addplot[domain=-2:2, color2, ultra thick,samples=500] {tanh(x)};
\addplot[domain=-2:2, color4, ultra thick,samples=500] {max(0, x)};
\addplot[domain=-2:2, color4, ultra thick,samples=500, dashed] {ln(exp(x) + 1)};
\addplot[domain=-2:2, color3, ultra thick,samples=500, dotted] {max(x, exp(x) - 1)};
\addlegendentry{$\varphi_1(x)=\frac{1}{1+e^{-x}}$}
\addlegendentry{$\varphi_2(x)=\tanh(x)$}
\addlegendentry{$\varphi_3(x)=\max(0, x)$}
\addlegendentry{$\varphi_4(x)=\log(e^x + 1)$}
\addlegendentry{$\varphi_5(x)=\max(x, e^x - 1)$}
\end{axis}
\end{tikzpicture}
\caption[Activation functions]{Activation functions plotted in $[-2, +2]$.
$\tanh$ and ELU are able to produce negative numbers. The image of
ELU, ReLU and Softplus is not bound on the positive side, whereas
$\tanh$ and the logistic function are always below~1.}
\label{fig:activation-functions-plot}
\end{figure}
\glsreset{LReLU}
\twocolumn |
function [signal, state] = flt_dynamicloreta(varargin)
% Return the current source density for a given head model and data using
% the cortically-constrained LORETA (low resolution electrical
% tomographic analysis) with a Bayesian update scheme for hyperparameters.
% The reconstructed CSD time-series (or source potential maps) will be
% stored in signal.srcpot. This matrix has dimension [num_voxels x num_samples].
%
% Author: Tim Mullen, Jan 2013, SCCN/INC/UCSD
% Alejandro Ojeda, Jan 2013, SCCN/INC/UCSD
% Christian Kothe, Jan 2013, SCCN/INC/UCSD
if ~exp_beginfun('filter'), return; end
declare_properties('name','Dynamic LORETA', 'experimental',true, 'independent_channels',false, 'independent_trials',false);
arg_define(varargin, ...
arg_norep({'signal','Signal'}), ...
arg_nogui({'K','ForwardModel'},[],[],'Forward model (matrix)','shape','matrix'), ...
arg_nogui({'L','LaplacianOperator'},[],[],'Laplacian operator. Sparse matrix of N sources x N sources, this is matrix is used as the square root of the precision matrix of the sources.'), ...
arg_sub({'options','LoretaOptions'},{},...
{ ...
arg({'maxTol','MaxTolerance'},1e-12,[0 Inf],'Tolerance for hyperparameter update loop','cat','Loreta Options'), ...
arg({'maxIter','MaxIterations'},100,[1 Inf],'Maximum iterations for hyperparameter update loop','cat','Loreta Options'), ...
arg({'gridSize','GridSize'},100,[1 Inf],'Lambda grid size.'), ...
arg({'history','TrackHistory'},false,[],'Track history for hyperparameters'), ...
arg({'verbose','VerboseOutput'},false,[],'Verbosity','cat','Loreta Options'), ...
arg({'initNoiseFactor','InitialNoiseFactor'},0.001,[0 Inf],'Fraction of noise level. Used for initializing alpha parameter','cat','Loreta Options') ...
arg({'block_size','BlockSize'},5, [], 'Block granularity for processing. The inverse operator will be updated using blocks of this many samples. This assumes that the inverse solution is spatially stationary over this many samples.'), ...
arg({'skipFactor','SkipFactor'},0,[0 Inf],'Number of blocks to skip'), ...
arg({'maxblocks','MaxBlocks'},Inf,[0 Inf],'Maximum number of blocks'), ...
arg({'standardize','Standardize'},'all',{'none','channels','all'},'Rescale data to unit variance. If ''channels'', standardization is carried out across channels for each time point. If ''all'' each data sample is normalized by the standard deviation taken over all data.'), ...
arg({'useGPU','UseGPU'},false,[],'Use GPU to accelerate computation.'), ...
},'Additional options for Loreta function'), ...
arg({'verb','Verbosity'},false,[],'Verbose output'), ...
arg_nogui({'state','State'},[],[],'State object. When provided, hyperparameters will be estimated adaptively from prior state'));
if verb
fprintf('Estimating current source density using cLORETA (%s)\n',mfilename);
end
[nchs, npnts, ntrs] = size(signal.data);
if isempty(block_size) || block_size > npnts
block_size = npnts;
end
numsplits = floor(npnts/block_size);
% if necessary, cast to double-precision
if ~strcmpi(class(signal.data),'double')
signal.data = double(signal.data);
end
% normData the data
if ~strcmpi(normData,'none')
switch normData
case 'channels'
scale = std(signal.data,[],1);
case 'time'
scale = std(signal.data,[],2);
case 'all'
scale = std(signal.data(:));
end
signal.data = bsxfun(@rdivide,signal.data,scale);
% scale = std(signal.data(:));
% signal.data = signal.data./scale;
end
if isempty(state) || ~isfield(state,'iLV') || isempty(state.iLV)
if verb
fprintf('...computing SVD of LFM.\n');
end
% mode is offline or we are initializing online filter
% perform one-time SVD for faster computation.
[U,S,V] = svd(K/L,'econ');
state.iLV = L\V;
state.s2 = diag(S).^2; %s^2
state.Ut = U';
state.sigma2 = repmat({options.sigma2},1,ntrs);
state.tau2 = repmat({options.tau2},1,ntrs);
end
if npnts == 0
% no data
signal.srcpot = [];
state.srcweights = [];
exp_endfun; return;
end
signal.srcpot = zeros([size(K,2), npnts, ntrs]);
state.srcweights = zeros(size(L,1),nchs);
sum_srcweights = zeros(size(L,1),nchs);
signal.loretaHistory = struct([]);
if verb
fprintf('...assuming %d stationary blocks of length %d\n',numsplits,block_size);
end
% loop over all trials
for tr=1:ntrs
if verb
fprintf('\nTrial (%d\%d).',tr,ntrs);
end
k = 0;
% loop over sub-blocks and estimate CSD for each block
for i=0:skipFactor+1:numsplits-1
if verb
if i+1 >= floor(numsplits*(k+1)/10)
k = k + 1;
fprintf('%0.3g%%...',round((i/numsplits)*100));
end
end
range = 1+floor(i*npnts/numsplits) : min(npnts,floor((i+1)*npnts/numsplits));
% call (dynamic bayesian) loreta estimator
[signal.srcpot(:,range,tr), state.sigma2{tr}, state.tau2{tr}, state.srcweights, tmpHist] ...
= dynamicLoreta( signal.data(:,range,tr), state.Ut, state.s2, state.iLV,...
state.sigma2{tr}, state.tau2{tr}, options);
if ~isempty(tmpHist)
signal.loretaHistory{tr} = [signal.loretaHistory{tr},tmpHist];
end
if skipFactor > 0
% estimate CSD for samples between blocks using current inverse operator
range = 1+floor((i+1)*npnts/numsplits) : min(npnts,floor((i+skipFactor+1)*npnts/numsplits));
signal.srcpot(:,range,tr) = state.srcweights*signal.data(:,range,tr);
end
% running sum
sum_srcweights = sum_srcweights + state.srcweights;
end
end
if numsplits > 1
% store the mean inverse operator over all splits
state.srcweights = sum_srcweights/(numsplits*ntrs);
end
if ~strcmpi(normData,'none')
% recale data to original units
% signal.srcpot = signal.srcpot*scale;
% state.srcweights = state.srcweights/scale;
signal.srcpot = bsxfun(@times,signal.srcpot,scale);
% signal.srcpot = bsxfun(@rdivide,signal.srcpot,std(signal.srcpot,[],1));
% state.srcweights = bsxfun(@times,state.srcweights,scale'); %state.srcweights/mean(scale);
end
if verb
fprintf('done.\n');
end
exp_endfun;
|
"""Tunes `param` until `expr` evaluates to zero within δ.
`expr` must be monotonically increasing in terms of `param`."""
macro binary_opt(expr, param, min, max, δ)
:(m = $min;M = $max;z=zero($δ);
while(true)
$(esc(param)) = (m+M)/2
println("$((m+M)/2)")
e = $(esc(expr))
if norm(e) < $δ
break
elseif e < z
m = $(esc(param))
else
M = $(esc(param))
end
end)
end
"""Returns the location and values of local maxima in `sig`."""
function local_maxima(sig)
l = length(sig)
res = Tuple{Int, T}[]
if l < 3
return res
end
@inbounds for i = 2:l-1
if sig[i-1] < sig[i] && sig[i+1] < sig[i]
append!(res, (i, sig[i]))
end
end
res
end |
(************************************************************************)
(* * The Coq Proof Assistant / The Coq Development Team *)
(* v * INRIA, CNRS and contributors - Copyright 1999-2018 *)
(* <O___,, * (see CREDITS file for the list of authors) *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(* * (see LICENSE file for the text of the license) *)
(************************************************************************)
Require Import Morphisms BinInt ZDivEucl.
Local Open Scope Z_scope.
(** * Definitions of division for binary integers, Euclid convention. *)
(** In this convention, the remainder is always positive.
For other conventions, see [Z.div] and [Z.quot] in file [BinIntDef].
To avoid collision with the other divisions, we place this one
under a module.
*)
Module ZEuclid.
Definition modulo a b := Z.modulo a (Z.abs b).
Definition div a b := (Z.sgn b) * (Z.div a (Z.abs b)).
Instance mod_wd : Proper (eq==>eq==>eq) modulo.
Proof. congruence. Qed.
Instance div_wd : Proper (eq==>eq==>eq) div.
Proof. congruence. Qed.
Theorem div_mod a b : b<>0 -> a = b*(div a b) + modulo a b.
Proof.
intros Hb. unfold div, modulo.
rewrite Z.mul_assoc. rewrite Z.sgn_abs. apply Z.div_mod.
now destruct b.
Qed.
Lemma mod_always_pos a b : b<>0 -> 0 <= modulo a b < Z.abs b.
Proof.
intros Hb. unfold modulo.
apply Z.mod_pos_bound.
destruct b; compute; trivial. now destruct Hb.
Qed.
Lemma mod_bound_pos a b : 0<=a -> 0<b -> 0 <= modulo a b < b.
Proof.
intros _ Hb. rewrite <- (Z.abs_eq b) at 3 by Z.order.
apply mod_always_pos. Z.order.
Qed.
Include ZEuclidProp Z Z Z.
End ZEuclid.
|
# Common functions compatible for use with physical vectors
function dot_product(A::AbstractArray{T1,N},B::AbstractArray{T2,N}) where {T1<:Number,T2<:Number,N}
size(A) === size(B) || throw(ArgumentError("Vectors must have the same shape"))
u = zeros(promote_type(T1,T2),size(A)[1:end-1]...)
dot_product!(u,A,B)
return u
end
function dot_product!(u::AbstractArray{T1,N}, A::AbstractArray{T2,N2},B::AbstractArray{T3,N2}) where {T1<:Number,T2<:Number,T3<:Number,N,N2}
(size(A) === size(B)) || throw(ArgumentError("Vectors must have the same shape"))
(N === N2-1) || throw(ArgumentError("Output should be scalar matrix, one dimension less than input"))
T = promote_type(T1,T2,T3)
@turbo for I in CartesianIndices(u)
x_temp = zero(T)
for i in 1:N
x_temp += A[I,i]*B[I,i]
end
u[I] = x_temp
end
return u
end
function cross_product(A::AbstractArray{T1,4},B::AbstractArray{T2,4}) where {T1<:Number,T2<:Number}
size(A) === size(B) || throw(ArgumentError("Vectors must have the same shape"))
s = size(A)
u = zeros(promote_type(T1,T2),s...)
cross_product!(u,A,B)
return u
end
function cross_product!(u::AbstractArray{T1,4},A::AbstractArray{T2,4},B::AbstractArray{T3,4}) where {T1<:Number,T2<:Number,T3<:Number}
(size(A) === size(B) && size(A) === size(u))|| throw(ArgumentError("Vectors must have the same shape"))
s = size(u)
@turbo for r in 1:s[3], q in 1:s[2], p in 1:s[1]
u[p,q,r,1] = A[p,q,r,2]*B[p,q,r,3] - A[p,q,r,3]*B[p,q,r,2]
u[p,q,r,2] = A[p,q,r,3]*B[p,q,r,1] - A[p,q,r,1]*B[p,q,r,3]
u[p,q,r,3] = A[p,q,r,1]*B[p,q,r,2] - A[p,q,r,2]*B[p,q,r,1]
end
end
function square_norm(A::AbstractArray{T,N}) where {T<:Number,N}
u = zeros(T,size(A)[1:end-1]...)
square_norm!(u,A)
return u
end
function square_norm!(u::AbstractArray{T1,N1},A::AbstractArray{T2,N2}) where {T1<:Number,T2<:Number,N1,N2}
dot_product!(u,A,A)
@. u = u^0.5
end |
/*solver/fixed_point.c
* Solves a fixed point equation using
* sequence acceleration.
*
* Author: Benjamin Vatter j.
* email : [email protected]
* date: 15 August 2015
*/
#include <math.h>
#include <stdlib.h>
#include <gsl/gsl_math.h>
#include "fixed_point.h"
/*
* Finds the fixed point of a function f
* Translated from SciPy's fixed_point function
*/
void fixed_point(int n, ff_function *f, double *x0,
double xtol, int maxiter, double *out)
{
//out = malloc(sizeof(double)*n);
double *p = malloc(sizeof(double)*n);
double *p1, *p2;
double d;
int pass;
int i, j;
for (i=0; i<n; i++)
out[i] = x0[i];
for (i=0; i<maxiter; i++)
{
p1 = malloc(sizeof(double)*n);
p2 = malloc(sizeof(double)*n);
f->function(out, f->params, p1);
f->function(p1, f->params, p2);
pass = 1;
for (j=0; j<n; j++)
{
d = p2[j] - 2.0*p1[j] + out[j];
if (d==0) {
p[j] = p2[j];
} else {
p[j] = out[j] - pow(p1[j] - out[j], 2.0) / d;
}
if (out[j] == 0 && fabs(p[j]) > xtol){
pass = 0;
}
if (out[j] != 0 && (p[j] - out[j])/out[j] > xtol){
pass = 0;
}
out[j] = p[j];
}
if (pass == 1) {
free(p1);
free(p2);
break;
}
if(i+1 >= maxiter){
free(p1);
free(p2);
pass = -1;
break;
}
free(p1);
free(p2);
}
free(p);
// Free garbage result in case of no convergence
if (pass == -1) {
free(out);
}
}
|
(** This file provides an efficient proof search strategy
for our derivation system defined in [Sat]. *)
Require Import Containers.Sets.
Require Import Env Sat.
Require Import SetoidList.
Require Import Arith.
Require Import FoldProps.
Fact belim : forall b, b = false -> b = true -> False.
Proof. congruence. Qed.
(** * The functor [SATSTRATEGY] *)
Module SATCAML
(Import CNF : Cnf.CNF)
(Import E : ENV_INTERFACE CNF).
(** We start by importing some efinitions from the SAT functor. *)
Module Import S := SAT CNF E.
Module SemF := S.SemF.
Definition submodel_e G (M : Sem.model) := forall l, G |= l -> M l.
Definition compatible_e G (D : cset) :=
forall (M : Sem.model), submodel_e G M -> Sem.sat_goal M D.
Notation "G |- D" := (mk_sequent G D) (at level 80).
(** Relating lists of literals to clauses, and lists of lists of literals
to sets of clauses (the reverse of [elements]). *)
Fixpoint l2s (l : list L.t) : clause :=
match l with
| nil => {}
| a::q => {a; l2s q}
end.
Fixpoint ll2s (l : list (list L.t)) : cset :=
match l with
| nil => {}
| a::q => {l2s a; ll2s q}
end.
Property l2s_iff : forall l C, l \In l2s C <-> InA _eq l C.
Proof.
intros; induction C; simpl.
intuition.
split; simpl; intro H.
rewrite add_iff in H; destruct H; intuition.
rewrite add_iff; inversion H; intuition.
Qed.
Property ll2s_app : forall l l', ll2s (app l l') [=] ll2s l ++ ll2s l'.
Proof.
induction l; intros l'; simpl.
intro k; set_iff; intuition.
rewrite IHl, Props.union_add; reflexivity.
Qed.
Property ll2s_cfl : forall ll, cfl ll [=] ll2s ll.
Proof.
induction ll; rewrite cfl_1; simpl.
reflexivity.
apply add_m; auto. rewrite cfl_1 in IHll; exact IHll.
Qed.
Property l2s_Subset :
forall C C', (forall l, InA _eq l C -> InA _eq l C') <-> l2s C [<=] l2s C'.
Proof.
intros C C'; split; intros H k.
rewrite !l2s_iff; auto.
rewrite <- !l2s_iff; auto.
Qed.
Lemma ll2s_expand : forall (M : Sem.model) l, M l ->
forall C, C \In ll2s (L.expand l) -> Sem.sat_clause M C.
Proof.
intros M l Hl C HC.
assert (HM := Sem.wf_expand M l Hl).
set (L := L.expand l) in *; clearbody L; clear l Hl.
revert L HC HM; induction L; intros; simpl in *.
contradiction (empty_1 HC).
rewrite add_iff in HC; destruct HC as [HC|HC].
destruct (HM a (or_introl _ (refl_equal _))) as [k [Hk1 Hk2]].
clear HM; exists k; split; auto.
assert (Hk := ListIn_In Hk1); rewrite <- l2s_iff, HC in Hk; auto.
apply (IHL HC); intuition.
Qed.
(** Facts about measures of lists of (lists of) literals *)
Property lsize_pos : forall l, l <> nil -> L.lsize l > 0.
Proof.
induction l; intros; simpl; auto.
congruence.
generalize (L.size_pos a); omega.
Qed.
Property llsize_app :
forall l l', L.llsize (app l l') = L.llsize l + L.llsize l'.
Proof.
induction l; simpl; intros; intuition.
generalize (IHl l'); omega.
Qed.
(** ** Functions computing the BCP
The following functions compute the proof search in a way that is similar
to the OCaml procedure in [JFLA08]. We perform all possible binary
constraint propagation (BCP) before we start splitting on literals. *)
(** The first function reduces a clause with respect to a partial
assignment. It returns [redNone] if the clause contains a literal
that is true in the assignment, and the reduced clause in the
other case (with a flag telling if the function has changed anything). *)
Section Reduce.
Variable G : E.t.
Variable D : cset.
Inductive redRes : Type :=
| redSome : list L.t -> bool -> redRes
| redNone : redRes.
Fixpoint reduce (C : list L.t) : redRes :=
match C with
| nil => redSome nil false
| l::C' =>
if query l G then redNone
else
match reduce C' with
| redNone => redNone
| redSome Cred b =>
if query (L.mk_not l) G then
redSome Cred true
else redSome (l::Cred) b
end
end.
Inductive reduce_spec_ (C : list L.t) : redRes -> Type :=
| reduce_redSome :
forall Cred (bred : bool)
(HCred : Cred = List.filter (fun l => negb (query (L.mk_not l) G)) C)
(Hsub : forall l, List.In l Cred -> query l G = false)
(Hbred : if bred then L.lsize Cred < L.lsize C else C = Cred),
reduce_spec_ C (redSome Cred bred)
| reduce_redNone :
forall l (Hl : query l G = true) (Hin : List.In l C),
reduce_spec_ C redNone.
Theorem reduce_spec : forall C, reduce_spec_ C (reduce C).
Proof.
induction C; simpl.
constructor; auto; intros; contradiction.
case_eq (query a G); intro Hq.
constructor 2 with a; intuition.
destruct IHC; simpl.
case_eq (query (L.mk_not a) G); intro Hnq.
constructor; auto; simpl.
rewrite Hnq; simpl; auto.
generalize (L.size_pos a); destruct bred; try rewrite Hbred; omega.
constructor.
simpl; rewrite Hnq; simpl; congruence.
intros l Hl; inversion Hl; subst; eauto.
destruct bred; try congruence; simpl; omega.
constructor 2 with l; intuition.
Qed.
Unset Regular Subst Tactic.
Corollary reduce_correct : forall C Cred bred,
reduce C = redSome Cred bred -> derivable (G |- {l2s Cred; D}) ->
derivable (G |- {l2s C; D}).
Proof.
intros C Cred bred Hred Hder;
destruct (reduce_spec C); inversion Hred; subst.
set (reds := filter (fun l : L.t => query (L.mk_not l) G) (l2s C)).
assert (M : Proper (_eq ==> @eq bool) (fun l => query (L.mk_not l) G))
by (eauto with typeclass_instances).
apply ARed with reds (l2s C).
unfold reds; intros k Hk; apply (filter_2 Hk).
unfold reds; intros k Hk; apply (filter_1 Hk).
apply add_1; auto.
assert (E : l2s Cred [=] l2s C \ reds).
rewrite <- H0; unfold reds; revert M; clear; intro M; induction C; simpl.
intuition.
case_eq (query (L.mk_not a) G); intro Ha; simpl.
rewrite IHC, EProps.filter_add_1; auto.
intro k; set_iff; intuition.
apply H1; apply filter_3; auto; rewrite <- H2; assumption.
rewrite IHC, EProps.filter_add_2; auto.
intro k; set_iff; intuition.
rewrite H0 in Ha; rewrite (filter_2 H) in Ha; discriminate.
rewrite <- E; refine (weakening _ Hder _ _); split; simpl; intuition.
Qed.
(* Corollary reduce_complete : forall C Cred bred M, *)
(* reduce C = redSome Cred bred -> *)
(* submodel_e G M -> Sem.sat_clause M (l2s C) -> Sem.sat_clause M (l2s Cred). *)
(* Proof. *)
(* intros C Cred bred M Hred Hsub; *)
(* destruct (reduce_spec C); inversion Hred; subst. *)
(* intros [l Hsatl]; exists l; intuition. *)
(* rewrite <- H0; revert H H1 Hsub; clear; induction C; simpl; auto; intros. *)
(* rewrite add_iff in H1; destruct H1. *)
(* case_eq (query (L.mk_not a) G); intro Hq; simpl. *)
(* contradiction (SemF.model_em M l H). *)
(* apply Hsub; rewrite <- H0; auto. *)
(* apply add_1; auto. *)
(* destruct (negb (query (L.mk_not a) G)); [apply add_2 |]; *)
(* exact (IHC H H0 Hsub). *)
(* Qed. *)
Corollary reduce_complete : forall C Cred bred M,
reduce C = redSome Cred bred ->
submodel_e G M -> Sem.sat_clause M (l2s Cred) ->
Sem.sat_clause M (l2s C).
Proof.
intros C Cred bred M Hred Hsub;
destruct (reduce_spec C); inversion Hred; subst.
intros [l Hsatl]; exists l; intuition.
rewrite <- H0 in H1; revert H H1 Hsub; clear;
induction C; simpl; auto; intros.
destruct (query (L.mk_not a) G); simpl in *.
apply add_2; apply IHC; auto.
rewrite add_iff in H1; destruct H1; [apply add_1 | apply add_2]; auto.
Qed.
End Reduce.
(** The second function simplifies a set of clauses with respect
to a partial assignment. It returns [bcpNone] if one of the clauses
reduced to the empty clause. Otherwise, it returns the set of
simplified clauses along with a new partial assignment. Indeed,
if simplification yields a unitary clause, [AAssume] is
immediately applied and the literal is added to the partial
assignment for the rest of the simplification. Again, we return
a flag saying if the function has simplified anything or not.
*)
Section BCP.
Inductive bcpRes : Type :=
| bcpSome : E.t -> list (list L.t) -> bool -> bcpRes
| bcpNone : bcpRes.
Definition extend l s := app (L.expand l) s.
Fixpoint bcp (G : E.t) (D : list (list L.t)) : bcpRes :=
match D with
| nil => bcpSome G nil false
| C::D' =>
match reduce G C with
| redNone =>
match bcp G D' with
| bcpNone => bcpNone
| bcpSome G' D' _ => bcpSome G' D' true
end
| redSome nil bred => bcpNone
| redSome (l::nil) _ =>
match assume l G with
| Normal newG =>
match bcp newG D' with
| bcpNone => bcpNone
| bcpSome G' D' _ => bcpSome G' (extend l D') true
end
| Inconsistent => bcpNone
end
| redSome Cred bred =>
match bcp G D' with
| bcpNone => bcpNone
| bcpSome G' D' b =>
bcpSome G' (Cred::D') (bred || b)
end
end
end.
Lemma weak_assume :
forall (G : t) (D : cset) (l : L.t),
singleton l \In D -> ~ G |= l ->
forall newG, assume l G = Normal newG ->
derivable (newG |- cfl (L.expand l) ++ D) ->
derivable (G |- D).
Proof.
intros G0 D0 l Hl HGl G1 Hass1 Hder.
case_eq (query (L.mk_not l) G0); intro Hquery.
(* - if [L.mk_not l] is entailed by [G0] then
we can reduce [{l}] and apply [AConflict]. *)
apply ARed with (reds:={l}) (C:={l}); intuition.
rewrite <- (singleton_1 H); assumption.
apply AConflict; apply add_1; intro k; set_iff; intuition.
(* - otherwise, we first [AUnsat] with [l], the left branch
is our hypothesis and we reduce and apply [AConflict] on
the right. *)
case_eq (assume (L.mk_not l) G0); [intros G2 Hass2 | intros Hass2].
apply (AUnsat G0 D0 l G1 G2 Hass1 Hass2 Hder).
apply ARed with (reds:={l}) (C:={l}).
intro k; set_iff; intro Hk; apply query_assumed;
rewrite (assumed_assume Hass2); apply add_1; rewrite Hk; auto.
reflexivity.
apply union_3; auto.
apply AConflict; apply add_1; intro k; set_iff; intuition.
contradiction HGl; rewrite <- (L.mk_not_invol l);
apply assumed_inconsistent; assumption.
Qed.
Theorem bcp_correct :
forall D Dbasis G Gext Dred b,
bcp G D = bcpSome Gext Dred b ->
derivable (Gext |- ll2s Dred ++ Dbasis) ->
derivable (G |- ll2s D ++ Dbasis).
Proof with (eauto with typeclass_instances).
intro D0; induction D0; intros Dbasis G0 Gext Dred b Hbcp Hder;
simpl in Hbcp.
inversion Hbcp; subst; simpl in Hder; inversion Hder; eauto with set.
assert (Hred := reduce_correct G0 (ll2s D0 ++ Dbasis) a).
assert (Hred' := reduce_spec G0 a).
destruct (reduce G0 a) as [ared bred|].
(* - if the reduction returned a clause (it can't be empty) *)
destruct ared as [|l ared]; try discriminate.
inversion Hred'; subst; destruct ared.
(* - if it is a singleton [{l}], [l] is consistent with [G0] and
we can apply [AAssume] *)
assert (Hl' : ~ (G0 |= l)) by
(intro abs; rewrite (Hsub l (or_introl _ (refl_equal _))) in abs;
discriminate).
case_eq (assume l G0); [intros newG Hass | intros Hass];
rewrite Hass in Hbcp.
simpl; rewrite Props.union_add.
apply (Hred (l::nil) bred (refl_equal _)); clear Hred.
assert (IH := IHD0 (cfl (L.expand l) ++ Dbasis) newG); clear IHD0.
destruct (bcp newG D0) as [Gext' Dred' b'|];
try discriminate; inversion Hbcp.
assert (IH' := IH Gext' Dred' b' (refl_equal _)); clear IH; subst.
assert (Hl : Equal (l2s (l::nil)) {l})
by (simpl; symmetry; apply Props.singleton_equal_add).
destruct (In_dec (ll2s D0 ++ Dbasis) {l}).
refine (weak_assume G0 _ l (add_1 _ _) Hl' _ Hass _)...
rewrite Props.add_equal.
2:(simpl; rewrite <- Props.singleton_equal_add; exact Htrue).
rewrite <- Props.union_assoc.
rewrite (Props.union_sym (cfl (L.expand l)) (ll2s D0)).
rewrite Props.union_assoc; apply IH'.
unfold extend in Hder; rewrite ll2s_app in Hder.
rewrite ll2s_cfl, <- Props.union_assoc,
(Props.union_sym (ll2s Dred')); exact Hder.
refine (AAssume G0 _ l (add_1 _ _) _ Hass _)...
rewrite Hl, Props.remove_add; auto.
rewrite <- Props.union_assoc.
rewrite (Props.union_sym (cfl (L.expand l)) (ll2s D0)).
rewrite Props.union_assoc; apply IH'.
unfold extend in Hder; rewrite ll2s_app in Hder.
rewrite ll2s_cfl, <- Props.union_assoc,
(Props.union_sym (ll2s Dred')); exact Hder.
assert (Z : List.In l (l::nil)) by (left; auto).
assert (Hnotl : ~ (G0 |= L.mk_not l)).
rewrite HCred, filter_In in Z; destruct (query (L.mk_not l) G0);
auto; destruct Z; discriminate.
contradiction (Hnotl (assumed_inconsistent Hass)).
(* - if the reduced clause is not unitary *)
simpl; rewrite Props.union_add.
apply (Hred _ _ (refl_equal _)).
set (Cred := l::t0::ared) in *; clearbody Cred.
assert (IH := IHD0 {l2s Cred; Dbasis} G0); clear IHD0.
destruct (bcp G0 D0) as [Gext' Dred' b'|];
try discriminate; inversion Hbcp.
assert (IH' := IH Gext' Dred' b' (refl_equal _)); clear IH; subst.
rewrite Props.union_sym, <- Props.union_add, Props.union_sym.
apply IH'.
rewrite Props.union_sym, Props.union_add,
Props.union_sym, <- Props.union_add; exact Hder.
(* - if the clause was eliminated *)
assert (IH := fun D => IHD0 D G0).
destruct (bcp G0 D0); inversion Hbcp; subst.
refine (weakening _ (IH _ _ _ _ (refl_equal _) Hder) _ _).
split; simpl; try rewrite Props.union_add; intuition.
Qed.
Theorem bcp_unsat :
forall D Dbasis G, bcp G D = bcpNone -> derivable (G |- ll2s D ++ Dbasis).
Proof with (eauto with typeclass_instances).
intro D0; induction D0; intros Dbasis G0 Hbcp; simpl in Hbcp.
discriminate.
assert (Hred := reduce_correct G0 (ll2s D0 ++ Dbasis) a).
destruct (reduce_spec G0 a).
(* - if the clause reduced to the empty clause, we apply [AConflict] *)
simpl; rewrite Props.union_add.
destruct Cred as [|l Cred].
apply (Hred nil bred (refl_equal _)).
apply AConflict; apply add_1; reflexivity.
destruct Cred.
(* - if the clause is a singleton [{l}], [G0] must be
consistent with [l] *)
assert (Z : List.In l (l::nil)) by (left; auto).
assert (Hnotl : ~ (G0 |= L.mk_not l)).
rewrite HCred, filter_In in Z; destruct (query (L.mk_not l) G0);
auto; destruct Z; discriminate.
assert (Hl' : ~ (G0 |= l)) by
(intro abs; rewrite (Hsub l (or_introl _ (refl_equal _))) in abs;
discriminate).
clear Z; apply (Hred (l::nil) bred (refl_equal _)).
case_eq (assume l G0); [intros newG Hass | intros Hass];
rewrite Hass in Hbcp.
2:(contradiction (Hnotl (assumed_inconsistent Hass))).
assert (IH := IHD0 (ll2s (L.expand l) ++ Dbasis) newG);
clear IHD0.
destruct (bcp newG D0); try discriminate.
simpl; rewrite <- Props.singleton_equal_add.
assert (Hl : Equal (l2s (l::nil)) {l})
by (simpl; symmetry; apply Props.singleton_equal_add).
destruct (In_dec (ll2s D0 ++ Dbasis) {l}).
refine (weak_assume G0 _ l (add_1 _ _) Hl' _ Hass _)...
rewrite Props.add_equal; auto.
rewrite <- Props.union_assoc.
rewrite (Props.union_sym (cfl (L.expand l)) (ll2s D0)).
rewrite Props.union_assoc, ll2s_cfl; exact (IH (refl_equal _)).
refine (AAssume G0 _ l (add_1 _ _) _ Hass _)...
rewrite Props.remove_add; auto.
rewrite <- Props.union_assoc.
rewrite (Props.union_sym (cfl (L.expand l)) (ll2s D0)).
rewrite Props.union_assoc, ll2s_cfl; exact (IH (refl_equal _)).
(* if the clause is not unitary after reduction *)
assert (IH := IHD0 Dbasis G0); clear IHD0.
destruct (bcp G0 D0); try discriminate.
apply (Hred _ _ (refl_equal _)).
refine (weakening _ (IH (refl_equal _)) _ _).
split; simpl; intuition.
(* if the clause was eliminated *)
assert (IH := IHD0 Dbasis G0); clear IHD0.
destruct (bcp G0 D0); try discriminate.
refine (weakening _ (IH (refl_equal _)) _ _).
split; simpl; intuition; rewrite Props.union_add; intuition.
Qed.
Theorem bcp_progress :
forall D G Gext Dred b,
bcp G D = bcpSome Gext Dred b ->
if b then L.llsize Dred < L.llsize D else Gext = G /\ Dred = D.
Proof.
intro D0; induction D0; intros G0 Gext Dred b Hbcp; simpl in Hbcp.
inversion Hbcp; subst; split; reflexivity.
destruct (reduce_spec G0 a).
destruct Cred; try discriminate.
destruct Cred.
case_eq (assume t0 G0); [intros newG Hass | intros Hass];
rewrite Hass in Hbcp; try discriminate.
assert (IH := IHD0 newG).
destruct (bcp newG D0); inversion Hbcp; subst.
assert (IH' := IH _ _ _ (refl_equal _)); destruct b0.
unfold extend; simpl; rewrite llsize_app.
destruct bred; subst; simpl in *; generalize (L.size_expand t0); omega.
destruct IH'; subst; unfold extend; simpl; rewrite llsize_app.
destruct bred; subst; simpl in *; generalize (L.size_expand t0); omega.
assert (IH := IHD0 G0).
destruct (bcp G0 D0); inversion Hbcp; subst.
destruct bred; simpl.
assert (IH' := IH _ _ _ (refl_equal _)); destruct b0; simpl in *.
omega. rewrite (proj2 IH'); omega.
assert (IH' := IH _ _ _ (refl_equal _)); destruct b0; simpl in *.
rewrite Hbred; simpl; omega.
destruct IH'; split; congruence.
assert (IH := IHD0 G0).
destruct (bcp G0 D0); inversion Hbcp; subst.
assert (IH' := IH _ _ _ (refl_equal _)); destruct b0; simpl.
omega.
rewrite (proj2 IH'); revert Hin; clear; induction a; simpl.
contradiction. generalize (L.size_pos a); intuition.
Qed.
Theorem bcp_consistent :
forall D G Gext Dred,
bcp G D = bcpSome Gext Dred false ->
forall l C, C \In ll2s Dred -> l \In C ->
~ Gext |= l /\ ~ Gext |= L.mk_not l.
Proof.
intros D0 G0 Gext Dred Hbcp l C HC Hl.
assert (Hprog := bcp_progress D0 G0 _ _ _ Hbcp).
destruct Hprog; subst.
revert G0 l C Hl HC Hbcp; induction D0; intros; simpl in Hbcp.
simpl in HC; contradiction (empty_1 HC).
destruct (reduce_spec G0 a).
destruct Cred; try discriminate.
destruct Cred.
case_eq (assume t0 G0); [intros newG Hass | intros Hass];
rewrite Hass in Hbcp; try discriminate.
destruct (bcp newG D0); discriminate.
assert (IH := IHD0 G0 l); clear IHD0.
destruct (bcp G0 D0); inversion Hbcp; subst.
destruct bred; simpl in H3; try discriminate.
set (Z := t0 :: t1 :: Cred) in *; clearbody Z.
simpl in HC; rewrite add_iff in HC; destruct HC.
rewrite <- H in Hl; rewrite l2s_iff in Hl.
rewrite InA_alt in Hl; destruct Hl as [k [Hk1 Hk2]].
split; intro abs.
assert (Hk := Hsub k Hk2); rewrite <- Hk1 in Hk; congruence.
rewrite HCred in Hk2; rewrite filter_In in Hk2.
rewrite Hk1 in abs; destruct (query (L.mk_not k) G0);
destruct Hk2; discriminate.
clear HCred; subst; eauto.
destruct (bcp G0 D0); inversion Hbcp; subst.
Qed.
Lemma bcp_monotonic_env :
forall D G Gext Dred b,
bcp G D = bcpSome Gext Dred b -> dom G [<=] dom Gext.
Proof.
intro D0; induction D0; intros G0 Gext Dred b Hbcp;
simpl in Hbcp.
inversion Hbcp; subst; simpl; reflexivity.
destruct (reduce G0 a) as [Cred bred|].
destruct Cred as [|l Cred]; try discriminate.
destruct Cred.
case_eq (assume l G0); [intros newG Hass | intros Hass];
rewrite Hass in Hbcp; try discriminate.
assert (IH := IHD0 newG).
destruct (bcp newG D0); inversion Hbcp; subst.
transitivity (dom newG).
rewrite (assumed_assume Hass); intuition.
exact (IH _ _ _ (refl_equal _)).
assert (IH := IHD0 G0).
destruct (bcp G0 D0); inversion Hbcp; subst.
exact (IH _ _ _ (refl_equal _)).
assert (IH := IHD0 G0).
destruct (bcp G0 D0); inversion Hbcp; subst.
exact (IH _ _ _ (refl_equal _)).
Qed.
(* Theorem bcp_complete : *)
(* forall D Dbasis G Gext Dred b, *)
(* bcp G D = bcpSome Gext Dred b -> *)
(* compatible_e G (ll2s D ++ Dbasis) -> *)
(* compatible_e Gext (ll2s Dred ++ Dbasis). *)
(* Proof. *)
(* intro D0; induction D0; intros Dbasis G0 Gext Dred b Hbcp Hsat; *)
(* simpl in Hbcp. *)
(* inversion Hbcp; subst; simpl; exact Hsat. *)
(* assert (Hred := reduce_complete G0 a). *)
(* destruct (reduce G0 a) as [Cred bred|]. *)
(* destruct Cred as [|l Cred]; try discriminate. *)
(* destruct Cred. *)
(* assert (Hmon := bcp_monotonic_env D0 (assume l G0)). *)
(* assert (IH := IHD0 (ll2s (L.expand l) ++ Dbasis) (assume l G0)); *)
(* clear IHD0; destruct (bcp (assume l G0) D0); inversion Hbcp; subst. *)
(* assert (IH' := IH _ _ _ (refl_equal _)); clear IH. *)
(* intros M HM C HC; unfold extend in HC; *)
(* simpl in HC; rewrite ll2s_app in HC. *)
(* rewrite (Props.union_sym _ (ll2s l0)), Props.union_assoc in HC. *)
(* revert M HM C HC; apply IH'. *)
(* intros M HM C HC; unfold extend in HC; simpl in HC; *)
(* rewrite (Props.union_sym (ll2s D0)), Props.union_assoc in HC. *)
(* rewrite union_iff in HC; destruct HC. *)
(* apply ll2s_expand with l; auto; apply HM; apply EnvF.query_assume; auto. *)
(* apply Hsat; auto. *)
(* intros k Hk; apply HM; apply query_monotonic with G0; auto. *)
(* rewrite assumed_assume; intuition. *)
(* simpl; rewrite Props.union_add, Props.union_sym; apply add_2; auto. *)
(* assert (Hmon := bcp_monotonic_env D0 G0). *)
(* assert (IH := IHD0 Dbasis G0); *)
(* clear IHD0; destruct (bcp G0 D0); inversion Hbcp; subst. *)
(* assert (IH' := IH _ _ _ (refl_equal _)); clear IH. *)
(* intros M HM C HC; simpl in HC; rewrite Props.union_add in HC. *)
(* rewrite add_iff in HC; destruct HC. *)
(* destruct (Hred _ _ M (refl_equal _)). *)
(* intros k Hk; apply HM; apply query_monotonic with G0; auto; *)
(* exact (Hmon _ _ _ (refl_equal _)). *)
(* apply Hsat; [|simpl; apply union_2; apply add_1; auto]. *)
(* intros k Hk; apply HM; apply query_monotonic with G0; auto; *)
(* exact (Hmon _ _ _ (refl_equal _)). *)
(* exists x; rewrite <- H; exact H0. *)
(* apply IH'; auto; intros M' HM' C' HC'; apply Hsat; auto; *)
(* simpl; rewrite Props.union_add; apply add_2; auto. *)
(* assert (IH := IHD0 Dbasis G0); clear IHD0; destruct (bcp G0 D0); *)
(* inversion Hbcp; subst. *)
(* apply (IH Gext Dred b0 (refl_equal _)). *)
(* intros M HM C HC; apply Hsat; auto. *)
(* simpl; rewrite Props.union_add; apply add_2; exact HC. *)
(* Qed. *)
Theorem bcp_complete :
forall D Dbasis G Gext Dred b M,
bcp G D = bcpSome Gext Dred b ->
submodel_e Gext M -> Sem.sat_goal M (ll2s Dred ++ Dbasis) ->
submodel_e G M /\ Sem.sat_goal M (ll2s D ++ Dbasis).
Proof.
intro D0; induction D0; intros Dbasis G0 Gext Dred b M Hbcp Hsub Hsat;
simpl in Hbcp.
inversion Hbcp; subst; simpl; tauto.
assert (Hred := reduce_spec G0 a).
assert (Hred' := reduce_complete G0 a).
destruct (reduce G0 a) as [Cred bred|].
destruct Cred as [|l Cred]; try discriminate.
destruct Cred.
case_eq (assume l G0); [intros newG Hass | intros Hass];
rewrite Hass in Hbcp; try discriminate.
assert (IH := IHD0 (ll2s (L.expand l) ++ Dbasis) newG); clear IHD0.
destruct (bcp newG D0) as [Gext' Dred' b'|];
inversion Hbcp; subst.
destruct (IH Gext Dred' b' M) as [IH1 IH2]; auto.
intros C HC; apply Hsat; simpl; unfold extend;
rewrite ll2s_app, (Props.union_sym _ (ll2s Dred')),
Props.union_assoc; exact HC.
split.
intros k Hk; apply IH1; apply query_monotonic with G0; auto.
rewrite (assumed_assume Hass); intuition.
intros C HC; simpl in HC; rewrite Props.union_add, add_iff in HC;
destruct HC as [HC|HC].
destruct (Hred' _ _ M (refl_equal _)) as [k [Hk1 Hk2]].
intros k Hk; apply IH1; apply query_monotonic with G0; auto.
rewrite (assumed_assume Hass); intuition.
simpl; exists l; simpl; split; intuition.
apply IH1; apply (EnvF.query_assume Hass); auto.
exists k; rewrite <- HC; tauto.
apply IH2; revert HC; set_iff; clear; tauto.
assert (IH := IHD0 Dbasis G0); clear IHD0.
destruct (bcp G0 D0) as [Gext' Dred' b'|]; inversion Hbcp; subst.
destruct (IH Gext Dred' b' M) as [IH1 IH2]; auto.
intros C HC; apply Hsat; simpl; rewrite Props.union_add;
apply add_2; auto.
split; auto; intros C HC; simpl in HC;
rewrite Props.union_add, add_iff in HC;
destruct HC as [HC|HC].
destruct (Hred' _ _ _ (refl_equal _) IH1) as [k [Hk1 Hk2]].
apply Hsat; simpl; apply union_2; apply add_1; auto.
exists k; rewrite <- HC; tauto.
auto.
assert (IH := IHD0 Dbasis G0); clear IHD0.
destruct (bcp G0 D0); inversion Hbcp; subst.
destruct (IH Gext Dred b0 M) as [IH1 IH2]; auto.
split; auto; intros C HC; simpl in HC;
rewrite Props.union_add, add_iff in HC; destruct HC as [HC|HC].
inversion Hred; exists l; split.
apply IH1; auto. rewrite <- HC, l2s_iff; exact (ListIn_In Hin).
apply IH2; auto.
Qed.
End BCP.
(** ** The main [proof_search] function *)
(** The [proof_search] function applies [bcp] repeatedly as long as
progress has been made, and otherwise just picks a literal to split on. *)
Inductive Res : Type :=
| Sat : E.t -> Res
| Unsat.
Fixpoint proof_search (G : E.t) (D : list (list L.t))
(n : nat) {struct n} : Res :=
match n with
| O => Sat empty (* assert false *)
| S n0 =>
match bcp G D with
| bcpNone => Unsat
| bcpSome newG newD b =>
match newD with
| nil => Sat newG
| cons nil newD' => Unsat (* assert false *)
| cons (cons l C) newD' =>
(* tant qu'on a progressé avec bcp, on reessaye *)
(* (si bcp etait recursive on n'aurait pas besoin de ça *)
(* mais ca ne change rien en terme de performance ici) *)
if b then proof_search newG newD n0
else (* from that point on, G = newG, D = newD *)
match assume l G with
| Normal G1 =>
match proof_search G1 (extend l newD') n0 with
| Sat M => Sat M
| Unsat =>
let lbar := L.mk_not l in
match assume lbar G with
| Normal G2 =>
proof_search G2 (extend lbar (cons C newD')) n0
| Inconsistent => Unsat
end
end
| Inconsistent => Unsat
end
end
end
end.
Lemma expand_nonrec :
forall l C, C \In (cfl (L.expand l)) -> l \In C -> False.
Proof.
intros l C; rewrite cfl_1.
assert (Hsize := L.size_expand l).
revert Hsize; generalize (L.expand l); intro L; induction L;
intros Hsize H Hl; simpl in *.
contradiction (empty_1 H).
rewrite add_iff in H; destruct H.
set (N := L.llsize L) in *; clearbody N; clear L IHL.
rewrite <- H in Hl; clear H C; induction a.
simpl in Hl; contradiction (empty_1 Hl).
simpl in Hl; rewrite add_iff in Hl; destruct Hl.
simpl in Hsize; rewrite H in Hsize; omega.
simpl in Hsize; apply IHa; auto; omega.
apply IHL; auto.
revert Hsize; clear; induction a; simpl; auto.
intro; omega.
Qed.
Lemma expand_nonrec_2 :
forall l C, C \In (cfl (L.expand l)) -> L.mk_not l \In C -> False.
Proof.
intros l C; rewrite cfl_1.
assert (Hsize := L.size_expand l).
revert Hsize; generalize (L.expand l); intro L; induction L;
intros Hsize H Hl; simpl in *.
contradiction (empty_1 H).
rewrite add_iff in H; destruct H.
set (N := L.llsize L) in *; clearbody N; clear L IHL.
rewrite <- H in Hl; clear H C; induction a.
simpl in Hl; contradiction (empty_1 Hl).
simpl in Hl; rewrite add_iff in Hl; destruct Hl.
simpl in Hsize; rewrite H in Hsize; assert (Z := L.size_mk_not l); omega.
simpl in Hsize; apply IHa; auto; omega.
apply IHL; auto.
revert Hsize; clear; induction a; simpl; auto.
intro; omega.
Qed.
Property remove_transpose : forall (D : cset) (C C' : clause),
{{D ~ C'} ~ C} [=] {{D ~ C} ~ C'}.
Proof.
intros; intro k; set_iff; intuition.
Qed.
Lemma remove_union : forall (D D' : cset) (C : clause),
~C \In D -> {(D ++ D') ~ C} [=] D ++ {D' ~ C}.
Proof.
intros; intro k; set_iff; intuition.
intro abs; rewrite abs in H; tauto.
Qed.
Lemma union_remove : forall (D D' : cset) (C : clause),
C \In D -> D ++ D' [=] D ++ {D' ~ C}.
Proof.
intros; intro k; set_iff; intuition.
destruct (eq_dec C k); auto.
rewrite H0 in H; left; auto.
Qed.
(* Lemma remove_singleton : forall l (A : clause), *)
(* singleton l \ A =/= singleton l -> singleton l \ A === {}. *)
(* Proof. *)
(* intros; intro k; split; set_iff; intuition. *)
(* apply H; intro z; set_iff; intuition. *)
(* rewrite <- H1 in H2; rewrite H0 in H2; tauto. *)
(* Qed. *)
(* Lemma diff_union : forall (A B C : clause), C \ (A ++ B) [=] C \ A \ B. *)
(* Proof. *)
(* intros; intro k; set_iff; intuition. *)
(* Qed. *)
Theorem proof_search_unsat :
forall n G D, proof_search G D n = Unsat -> derivable (G |- ll2s D).
Proof with (eauto with typeclass_instances).
induction n; intros G0 D0; unfold proof_search.
(* - if [D0] is empty, it is satisfiable *)
intro abs; discriminate abs.
(* - otherwise, we do a step of BCP *)
fold proof_search; intro Hunsat.
assert (Hbcp := bcp_correct D0 {} G0).
assert (Hbcp2 := bcp_unsat D0 {} G0).
assert (Hprogress := bcp_progress D0 G0).
assert (Hcons := bcp_consistent D0 G0).
destruct (bcp G0 D0) as [Gext Dred b|].
(* -- if BCP returns a sequent, it can't be empty *)
assert (Hbcp' := Hbcp Gext _ _ (refl_equal _)); clear Hbcp.
rewrite !Props.empty_union_2 in Hbcp'; intuition.
rewrite !Props.empty_union_2 in Hbcp2; intuition.
(* -- if BCP returns a sequent, it can't be empty *)
destruct Dred as [|C Dred]; try discriminate.
destruct C as [|l C].
(* -- if BCP returned a sequent with the empty clause, [AConflict] *)
apply Hbcp'; simpl; apply AConflict; apply add_1; auto.
destruct b; auto.
(* -- if BCP didnt change anything... *)
assert (Hcons' := Hcons Gext _ (refl_equal _)); clear Hcons.
destruct (Hprogress _ _ _ (refl_equal _)); subst.
simpl in Hcons'; destruct (Hcons' l {l; l2s C}) as [Hl Hnotl];
try (simpl; apply add_1; reflexivity).
case_eq (assume l G0); [intros G1 Hass1 | intros Hass1];
rewrite Hass1 in Hunsat.
2:(contradiction (Hnotl (assumed_inconsistent Hass1))).
assert (IH1 := IHn G1 (extend l Dred)).
(* -- the first recursive call must have return Unsat *)
destruct (proof_search G1 (extend l Dred)); try discriminate.
case_eq (assume (L.mk_not l) G0); [intros G2 Hass2 | intros Hass2];
rewrite Hass2 in Hunsat.
2:(rewrite <- (L.mk_not_invol l) in Hl;
contradiction (Hl (assumed_inconsistent Hass2))).
destruct (In_dec (ll2s Dred) (l2s (l :: C))).
simpl; rewrite Props.add_equal; auto.
apply AUnsat with l G1 G2; auto.
unfold extend in IH1; rewrite ll2s_app in IH1.
rewrite ll2s_cfl; exact (IH1 (refl_equal _)).
destruct (In_dec (l2s C) l).
simpl in Htrue; rewrite (Props.add_equal Htrue0) in Htrue.
assert (IH2 := IHn _ _ Hunsat); unfold extend in IH2.
rewrite ll2s_app in IH2; rewrite ll2s_cfl.
simpl in IH2; rewrite (Props.add_equal Htrue) in IH2.
exact IH2.
apply ARed with {l} {l; l2s C}.
intro k; set_iff; intro Hk; apply (EnvF.query_assume Hass2);
rewrite Hk; auto.
intro k; set_iff; intuition.
apply union_3; auto.
assert (IH2 := IHn _ _ Hunsat); unfold extend in IH2.
rewrite ll2s_app in IH2; rewrite ll2s_cfl.
rewrite Props.union_sym, <- Props.union_add, Props.union_sym.
rewrite <- Props.remove_diff_singleton, (Props.remove_add Hfalse).
exact IH2.
apply AUnsat with l G1 G2; auto.
apply AElim with l (l2s (l::C)).
apply query_assumed; rewrite (assumed_assume Hass1); apply add_1; auto.
simpl; apply add_1; auto.
apply union_3; simpl; apply add_1; auto.
rewrite remove_union.
2:(intro abs; apply expand_nonrec with l {l; l2s C}; intuition).
simpl in Hfalse |- *; rewrite (Props.remove_add Hfalse).
rewrite ll2s_cfl; unfold extend in IH1.
rewrite ll2s_app in IH1; exact (IH1 (refl_equal _)).
assert (IH2 := IHn _ _ Hunsat).
unfold extend in IH2; rewrite ll2s_app, <- ll2s_cfl in IH2; simpl in *.
destruct (In_dec (l2s C) l).
simpl in *; rewrite (Props.add_equal Htrue).
exact IH2.
apply AStrongRed with (C:=l2s (l::C))(reds := {l}).
intro k; set_iff; intro Hk; apply (EnvF.query_assume Hass2);
rewrite Hk; auto.
intro k; simpl; set_iff; intuition.
apply union_3; simpl; apply add_1; auto.
rewrite remove_union.
2:(intro abs; apply expand_nonrec_2
with (L.mk_not l) {l; l2s C}; try rewrite L.mk_not_invol; intuition).
simpl in Hfalse; rewrite (Props.remove_add Hfalse).
rewrite Props.union_sym, <- Props.union_add, Props.union_sym.
rewrite <- Props.remove_diff_singleton.
rewrite (Props.remove_add Hfalse0).
exact IH2.
(* -- if BCP did not return a sequent, we apply the correctness of [bcp] *)
rewrite Props.empty_union_2 in Hbcp2.
exact (Hbcp2 (refl_equal _)).
intuition.
Qed.
Theorem proof_search_sat :
forall n G D M, L.llsize D < n ->
proof_search G D n = Sat M ->
dom G [<=] dom M /\ compatible_e M (ll2s D).
Proof.
induction n; intros G0 D0 M Hlt; unfold proof_search.
apply False_rec; omega.
fold proof_search; intros Hsat.
assert (Hbcp := bcp_complete D0 {} G0).
assert (Hmon := bcp_monotonic_env D0 G0).
assert (Hprogress := bcp_progress D0 G0).
destruct (bcp G0 D0) as [Gext Dred b|]; try discriminate.
destruct Dred as [|C Dred].
inversion Hsat; subst; split.
exact (Hmon _ _ _ (refl_equal _)).
intros Model Hsub; destruct (Hbcp _ _ _ _ (refl_equal _) Hsub).
intros k Hk; simpl in Hk.
rewrite !Props.empty_union_2 in Hk; try solve [intuition].
contradiction (empty_1 Hk).
intros C HC; apply H0; apply union_2; auto.
destruct C as [|l C]; try discriminate.
assert (Hbcp' := fun Model => Hbcp _ _ _ Model (refl_equal _)); clear Hbcp.
assert (Hprogress' := Hprogress _ _ _ (refl_equal _)); clear Hprogress.
assert (Hmon' := Hmon _ _ _ (refl_equal _)); clear Hmon.
destruct b; auto.
destruct (IHn Gext ((l::C)::Dred) M) as [IH1 IH2]; auto; try omega.
split. transitivity (dom (Gext)); auto.
intros Model Hsub; destruct (Hbcp' Model) as [Hbcp1 Hbcp2].
intros k Hk; apply Hsub; apply query_monotonic with Gext; auto.
intros B HB; rewrite !Props.empty_union_2 in HB; try solve [intuition].
apply IH2; auto.
intros B HB; apply Hbcp2; apply union_2; auto.
destruct Hprogress'; subst; clear Hmon' Hbcp'.
case_eq (assume l G0) ; [intros G1 Hass1 | intros Hass1];
rewrite Hass1 in Hsat; try discriminate.
case_eq (proof_search G1 (extend l Dred) n);
[intros M' Heq | intros Heq]; rewrite Heq in Hsat; simpl in Hsat.
inversion Hsat; subst.
destruct (IHn G1 (extend l Dred) M) as [IH1 IH2]; auto.
unfold extend; simpl in *; rewrite llsize_app.
generalize (L.size_expand l); omega.
split.
transitivity (dom G1); auto. rewrite (assumed_assume Hass1); intuition.
simpl; intros Model Hsub B HB; rewrite add_iff in HB; destruct HB.
exists l; split; [|rewrite <- H; apply add_1; auto].
apply Hsub; apply query_monotonic with G1; auto.
apply (EnvF.query_assume Hass1); auto.
apply IH2; auto; unfold extend; rewrite ll2s_app; apply union_3; auto.
case_eq (assume (L.mk_not l) G0); [intros G2 Hass2 | intros Hass2];
rewrite Hass2 in Hsat; try discriminate.
destruct (IHn G2 (extend (L.mk_not l) (C::Dred)) M) as [IH1 IH2]; auto.
unfold extend; simpl in *; rewrite llsize_app; simpl.
generalize (L.size_mk_not l) (L.size_expand (L.mk_not l)); omega.
split.
transitivity (dom G2); auto.
rewrite (assumed_assume Hass2); intuition.
simpl; intros Model Hsub B HB; rewrite add_iff in HB; destruct HB.
destruct (IH2 _ Hsub (l2s C)) as [k [Hk1 Hk2]].
unfold extend; rewrite ll2s_app; apply union_3; apply add_1; auto.
exists k; split; auto; rewrite <- H; apply add_2; auto.
apply IH2; auto; unfold extend; rewrite ll2s_app;
apply union_3; apply add_2; auto.
Qed.
(** ** The main entry point to the SAT-solver *)
Definition dpll (Pb : formula) :=
let D0 := make Pb in
let D0_as_list := List.map elements (elements D0) in
let mu := (Datatypes.S (L.llsize D0_as_list)) in
proof_search empty D0_as_list mu.
Remark l2s_elements : forall C, l2s (elements C) [=] C.
Proof.
intros C k; rewrite (elements_iff C).
remember (elements C) as L; clear C HeqL; revert k; induction L.
simpl; split; intuition.
intros k; split; intuition.
simpl in H; rewrite add_iff in H; destruct H.
constructor 1; auto.
constructor 2; exact ((proj1 (IHL k)) H).
inversion H; subst.
apply add_1; auto.
apply add_2; exact ((proj2 (IHL k)) H1).
Qed.
Remark ll2s_map_elements :
forall D, ll2s (List.map elements (elements D)) [=] D.
Proof.
intros D0 k; rewrite (elements_iff D0).
remember (elements D0) as L; clear HeqL; revert D0 k; induction L.
simpl; split; intuition.
intros D0 k; split; intuition.
simpl in H; rewrite add_iff in H; destruct H.
constructor 1. rewrite l2s_elements in H; symmetry; auto.
constructor 2; exact ((proj1 (H0 k)) H).
simpl; inversion H; subst.
apply add_1; rewrite l2s_elements; symmetry; auto.
apply add_2; exact ((proj2 (H0 k)) H2).
Qed.
Theorem dpll_correct :
forall Pb, dpll Pb = Unsat -> Sem.incompatible {} (make Pb).
Proof.
intros Pb Hunsat; unfold dpll in Hunsat.
intros M HM; apply (soundness (empty |- make Pb)).
assert (H := proof_search_unsat _ _ _ Hunsat).
rewrite ll2s_map_elements in H; assumption.
simpl; intros l; rewrite assumed_empty; set_iff; intro Hl.
rewrite <- (Sem.morphism _ _ _ Hl); apply Sem.wf_true.
Qed.
End SATCAML.
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(GD_GPL)
*)
(*
Author: Gerwin Klein
Assumptions and lemmas on machine operations.
*)
theory Machine_C
imports "../../lib/clib/Ctac"
begin
locale kernel_m = kernel +
assumes resetTimer_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp resetTimer)
(Call resetTimer_'proc)"
assumes writeTTBR0_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>addr = pd\<rbrace>) []
(doMachineOp (writeTTBR0 pd))
(Call writeTTBR0_'proc)"
assumes setHardwareASID_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>hw_asid = hw_asid\<rbrace>) []
(doMachineOp (setHardwareASID hw_asid))
(Call setHardwareASID_'proc)"
assumes isb_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp isb)
(Call isb_'proc)"
assumes dsb_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp dsb)
(Call dsb_'proc)"
assumes dmb_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp dmb)
(Call dmb_'proc)"
assumes invalidateTLB_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp invalidateTLB)
(Call invalidateTLB_'proc)"
assumes invalidateTLB_ASID_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>hw_asid = hw_asid \<rbrace>) []
(doMachineOp (invalidateTLB_ASID hw_asid))
(Call invalidateTLB_ASID_'proc)"
assumes invalidateTLB_VAASID_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>mva_plus_asid = w\<rbrace>) []
(doMachineOp (invalidateTLB_VAASID w))
(Call invalidateTLB_VAASID_'proc)"
assumes cleanByVA_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>vaddr = w1\<rbrace> \<inter> \<lbrace>\<acute>paddr = w2\<rbrace>) []
(doMachineOp (cleanByVA w1 w2))
(Call cleanByVA_'proc)"
assumes cleanByVA_PoU_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>vaddr = w1\<rbrace> \<inter> \<lbrace>\<acute>paddr = w2\<rbrace>) []
(doMachineOp (cleanByVA_PoU w1 w2))
(Call cleanByVA_PoU_'proc)"
assumes cleanByVA_PoU_preserves_kernel_bytes:
"\<forall>s. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {s} Call cleanByVA_PoU_'proc
{t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s))
\<and> (\<forall>x. snd (hrs_htd (t_hrs_' (globals s)) x) 0 \<noteq> None
\<longrightarrow> hrs_mem (t_hrs_' (globals t)) x = hrs_mem (t_hrs_' (globals s)) x)}"
assumes invalidateByVA_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>vaddr = w1\<rbrace> \<inter> \<lbrace>\<acute>paddr = w2\<rbrace>) []
(doMachineOp (invalidateByVA w1 w2))
(Call invalidateByVA_'proc)"
assumes invalidateByVA_I_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>vaddr = w1\<rbrace> \<inter> \<lbrace>\<acute>paddr = w2\<rbrace>) []
(doMachineOp (invalidateByVA_I w1 w2))
(Call invalidateByVA_I_'proc)"
assumes invalidate_I_PoU_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp invalidate_I_PoU)
(Call invalidate_I_PoU_'proc)"
assumes cleanInvalByVA_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>vaddr = w1\<rbrace> \<inter> \<lbrace>\<acute>paddr = w2\<rbrace>) []
(doMachineOp (cleanInvalByVA w1 w2))
(Call cleanInvalByVA_'proc)"
assumes branchFlush_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>vaddr = w1\<rbrace> \<inter> \<lbrace>\<acute>paddr = w2\<rbrace>) []
(doMachineOp (branchFlush w1 w2))
(Call branchFlush_'proc)"
assumes clean_D_PoU_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp clean_D_PoU)
(Call clean_D_PoU_'proc)"
assumes cleanInvalidate_D_PoC_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp cleanInvalidate_D_PoC)
(Call cleanInvalidate_D_PoC_'proc)"
assumes cleanInvalidateL2Range_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace>) []
(doMachineOp (cleanInvalidateL2Range w1 w2))
(Call cleanInvalidateL2Range_'proc)"
assumes invalidateL2Range_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace>) []
(doMachineOp (invalidateL2Range w1 w2))
(Call invalidateL2Range_'proc)"
assumes cleanL2Range_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace>) []
(doMachineOp (cleanL2Range w1 w2))
(Call plat_cleanL2Range_'proc)"
assumes clearExMonitor_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp ARM.clearExMonitor)
(Call clearExMonitor_'proc)"
assumes getIFSR_ccorres:
"ccorres (op =) ret__unsigned_long_' \<top> UNIV []
(doMachineOp getIFSR)
(Call getIFSR_'proc)"
assumes getDFSR_ccorres:
"ccorres (op =) ret__unsigned_long_' \<top> UNIV []
(doMachineOp getDFSR)
(Call getDFSR_'proc)"
assumes getFAR_ccorres:
"ccorres (op =) ret__unsigned_long_' \<top> UNIV []
(doMachineOp getFAR)
(Call getFAR_'proc)"
assumes getActiveIRQ_ccorres:
"ccorres (\<lambda>(a::10 word option) c::16 word.
case a of None \<Rightarrow> c = (0xFFFF::16 word)
| Some (x::10 word) \<Rightarrow> c = ucast x \<and> c \<noteq> (0xFFFF::16 word))
(\<lambda>t. irq_' (s\<lparr>globals := globals t, irq_' := ret__unsigned_short_' t\<rparr> ))
\<top> UNIV hs
(doMachineOp getActiveIRQ) (Call getActiveIRQ_'proc)"
(* This is not very correct, however our current implementation of Hardware in haskell is stateless *)
assumes isIRQPending_ccorres:
"ccorres (\<lambda>rv rv'. rv' = from_bool (rv \<noteq> None)) ret__unsigned_long_'
\<top> UNIV []
(doMachineOp getActiveIRQ) (Call isIRQPending_'proc)"
assumes armv_contextSwitch_HWASID_ccorres:
"ccorres dc xfdc \<top> (UNIV \<inter> {s. cap_pd_' s = pde_Ptr pd} \<inter> {s. hw_asid_' s = hwasid}) []
(doMachineOp (armv_contextSwitch_HWASID pd hwasid)) (Call armv_contextSwitch_HWASID_'proc)"
assumes getActiveIRQ_Normal:
"\<Gamma> \<turnstile> \<langle>Call getActiveIRQ_'proc, Normal s\<rangle> \<Rightarrow> s' \<Longrightarrow> isNormal s'"
assumes maskInterrupt_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>disable = from_bool m\<rbrace> \<inter> \<lbrace>\<acute>irq = ucast irq\<rbrace>) []
(doMachineOp (maskInterrupt m irq))
(Call maskInterrupt_'proc)"
assumes invalidateTLB_VAASID_spec:
"\<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> UNIV (Call invalidateMVA_'proc) UNIV"
assumes cleanCacheRange_PoU_spec:
"\<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> UNIV (Call cleanCacheRange_PoU_'proc) UNIV"
(* The following are fastpath specific assumptions.
We might want to move them somewhere else. *)
(* clearExMonitor_fp is an inline-friendly version of clearExMonitor *)
assumes clearExMonitor_fp_ccorres:
"ccorres dc xfdc (\<lambda>_. True) UNIV [] (doMachineOp ARM.clearExMonitor)
(Call clearExMonitor_fp_'proc)"
(*
@{text slowpath} is an assembly stub that switches execution
from the fastpath to the slowpath. Its contract is equivalence
to the toplevel slowpath function @{term callKernel} for the
@{text SyscallEvent} case.
*)
assumes slowpath_ccorres:
"ccorres dc xfdc
(\<lambda>s. invs' s \<and> ct_in_state' (op = Running) s)
({s. syscall_' s = syscall_from_H ev})
[SKIP]
(callKernel (SyscallEvent ev)) (Call slowpath_'proc)"
(*
@{text slowpath} does not return, but uses the regular
slowpath kernel exit instead.
*)
assumes slowpath_noreturn_spec:
"\<Gamma> \<turnstile> UNIV Call slowpath_'proc {},UNIV"
(*
@{text fastpath_restore} updates badge and msgInfo registers
and returns to the user.
*)
assumes fastpath_restore_ccorres:
"ccorres dc xfdc
(\<lambda>s. t = ksCurThread s)
({s. badge_' s = bdg} \<inter> {s. msgInfo_' s = msginfo}
\<inter> {s. cur_thread_' s = tcb_ptr_to_ctcb_ptr t})
[SKIP]
(asUser t (zipWithM_x setRegister
[ARM_H.badgeRegister, ARM_H.msgInfoRegister]
[bdg, msginfo]))
(Call fastpath_restore_'proc)"
assumes ackInterrupt_ccorres:
"ccorres dc xfdc \<top> UNIV hs
(doMachineOp (ackInterrupt irq))
(Call ackInterrupt_'proc)"
context kernel_m begin
lemma index_xf_for_sequence:
"\<forall>s f. index_' (index_'_update f s) = f (index_' s)
\<and> globals (index_'_update f s) = globals s"
by simp
lemma upto_enum_word_nth:
"\<lbrakk>i \<le> j; k \<le> unat (j - i)\<rbrakk> \<Longrightarrow> [i .e. j] ! k = i + of_nat k"
apply (clarsimp simp: upto_enum_def nth_upt nth_append)
apply (clarsimp simp: toEnum_of_nat word_le_nat_alt[symmetric])
apply (rule conjI, clarsimp)
apply (subst toEnum_of_nat, unat_arith)
apply unat_arith
apply (clarsimp simp: not_less unat_sub[symmetric])
apply unat_arith
done
lemma upto_enum_step_nth:
"\<lbrakk>a \<le> c; n \<le> unat ((c - a) div (b - a))\<rbrakk> \<Longrightarrow> [a, b .e. c] ! n = a + of_nat n * (b - a)"
apply (clarsimp simp: upto_enum_step_def not_less[symmetric])
apply (subst upto_enum_word_nth)
apply (auto simp: not_less word_of_nat_le)
done
lemma lineStart_le_mono:
"x \<le> y \<Longrightarrow> lineStart x \<le> lineStart y"
by (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 neg_mask_mono_le)
lemma neg_mask_add:
"y && mask n = 0 \<Longrightarrow> x + y && ~~ mask n = (x && ~~ mask n) + y"
by (clarsimp simp: mask_out_sub_mask mask_eqs(7)[symmetric] mask_twice)
lemma lineStart_sub:
"\<lbrakk> x && mask 5 = y && mask 5\<rbrakk> \<Longrightarrow> lineStart (x - y) = lineStart x - lineStart y"
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1)
apply (clarsimp simp: mask_out_sub_mask)
apply (clarsimp simp: mask_eqs(8)[symmetric])
done
lemma minus_minus_swap:
"\<lbrakk> a \<le> c; b \<le> d; b \<le> a; d \<le> c\<rbrakk> \<Longrightarrow> (d :: nat) - b = c - a \<Longrightarrow> a - b = c - d"
by arith
lemma minus_minus_swap':
"\<lbrakk> c \<le> a; d \<le> b; b \<le> a; d \<le> c\<rbrakk> \<Longrightarrow> (b :: nat) - d = a - c \<Longrightarrow> a - b = c - d"
by arith
lemma lineStart_mask:
"lineStart x && mask 5 = 0"
by (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_AND_NOT_mask)
lemma cachRangeOp_corres_helper:
"\<lbrakk>w1 \<le> w2; w3 \<le> w3 + (w2 - w1); w1 && mask 5 = w3 && mask 5\<rbrakk>
\<Longrightarrow> unat (lineStart w2 - lineStart w1) div 32 =
unat (lineStart (w3 + (w2 - w1)) - lineStart w3) div 32"
apply (subst dvd_div_div_eq_mult, simp)
apply (clarsimp simp: and_mask_dvd_nat[where n=5, simplified])
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1)
apply (subst mask_eqs(8)[symmetric])
apply (clarsimp simp: mask_AND_NOT_mask)
apply (clarsimp simp: and_mask_dvd_nat[where n=5, simplified])
apply (subst mask_eqs(8)[symmetric])
apply (clarsimp simp: lineStart_mask)
apply (subgoal_tac "w3 + (w2 - w1) && mask 5 = w2 && mask 5")
apply clarsimp
apply (rule_tac x=w1 and y=w3 in linorder_le_cases)
apply (subgoal_tac "lineStart (w3 + (w2 - w1)) - lineStart w2 = lineStart w3 - lineStart w1")
apply (rule word_unat.Rep_eqD)
apply (subst unat_sub, clarsimp simp: lineStart_le_mono)+
apply (rule minus_minus_swap)
apply (clarsimp simp: word_le_nat_alt[symmetric] intro!: lineStart_le_mono)
apply (clarsimp simp: word_le_nat_alt unat_plus_simple[THEN iffD1] unat_sub)
apply arith
apply (clarsimp simp: word_le_nat_alt[symmetric] lineStart_le_mono)
apply (clarsimp simp: word_le_nat_alt[symmetric] lineStart_le_mono)
apply (clarsimp simp: word_le_nat_alt[symmetric] lineStart_le_mono)
apply (subst unat_sub[symmetric], clarsimp simp: intro!: lineStart_le_mono)+
apply (clarsimp simp: word_le_nat_alt unat_plus_simple[THEN iffD1] unat_sub)
apply arith
apply clarsimp
apply (clarsimp simp: lineStart_sub[symmetric] field_simps)
apply (subgoal_tac "lineStart w2 - lineStart (w3 + (w2 - w1)) = lineStart w1 - lineStart w3")
apply (rule word_unat.Rep_eqD)
apply (subst unat_sub, clarsimp simp: lineStart_le_mono)+
apply (rule minus_minus_swap')
apply (clarsimp simp: word_le_nat_alt[symmetric] intro!: lineStart_le_mono)
apply (clarsimp simp: word_le_nat_alt unat_plus_simple[THEN iffD1] unat_sub)
apply arith
apply (clarsimp simp: word_le_nat_alt[symmetric] lineStart_le_mono)
apply (clarsimp simp: word_le_nat_alt[symmetric] lineStart_le_mono)
apply (clarsimp simp: word_le_nat_alt[symmetric] lineStart_le_mono)
apply (subst unat_sub[symmetric], clarsimp simp: intro!: lineStart_le_mono)+
apply (clarsimp simp: word_le_nat_alt unat_plus_simple[THEN iffD1] unat_sub)
apply arith
apply clarsimp
apply (clarsimp simp: lineStart_sub[symmetric] field_simps)
apply (subst mask_eqs(7)[symmetric])
apply (subst mask_eqs(8)[symmetric])
apply (clarsimp simp: mask_eqs)
done
definition "lineIndex x \<equiv> lineStart x >> cacheLineBits"
lemma shiftr_shiftl_shiftr[simp]:
"x >> a << a >> a = (x :: ('a :: len) word) >> a"
apply (rule word_eqI)
apply (simp add: word_size nth_shiftr nth_shiftl)
apply safe
apply (drule test_bit_size)
apply (simp add: word_size)
done
lemma lineIndex_def2:
"lineIndex x = x >> cacheLineBits"
by (simp add: lineIndex_def lineStart_def)
lemma lineIndex_le_mono:
"x \<le> y \<Longrightarrow> lineIndex x \<le> lineIndex y"
by (clarsimp simp: lineIndex_def2 cacheLineBits_def le_shiftr)
lemma add_right_shift:
"\<lbrakk>x && mask n = 0; y && mask n = 0; x \<le> x + y \<rbrakk>
\<Longrightarrow> (x + y :: ('a :: len) word) >> n = (x >> n) + (y >> n)"
apply (simp add: no_olen_add_nat is_aligned_mask[symmetric])
apply (simp add: unat_arith_simps shiftr_div_2n' split del: if_split)
apply (subst if_P)
apply (erule order_le_less_trans[rotated])
apply (simp add: add_mono)
apply (simp add: shiftr_div_2n' is_aligned_def)
done
lemma sub_right_shift:
"\<lbrakk>x && mask n = 0; y && mask n = 0; y \<le> x \<rbrakk>
\<Longrightarrow> (x - y) >> n = (x >> n :: ('a :: len) word) - (y >> n)"
using add_right_shift[where x="x - y" and y=y and n=n]
by (simp add: aligned_sub_aligned is_aligned_mask[symmetric]
word_sub_le)
lemma lineIndex_lineStart_diff:
"w1 \<le> w2 \<Longrightarrow> (unat (lineStart w2 - lineStart w1) div 32) = unat (lineIndex w2 - lineIndex w1)"
apply (subst shiftr_div_2n'[symmetric, where n=5, simplified])
apply (drule lineStart_le_mono)
apply (drule sub_right_shift[OF lineStart_mask lineStart_mask])
apply (simp add: lineIndex_def cacheLineBits_def)
done
lemma cacheRangeOp_ccorres:
"\<lbrakk>\<And>x y. empty_fail (oper x y);
\<forall>n. ccorres dc xfdc \<top> (\<lbrace>\<acute>index = lineIndex w1 + of_nat n\<rbrace>) hs
(doMachineOp (oper (lineStart w1 + of_nat n * 0x20)
(lineStart w3 + of_nat n * 0x20)))
f;
\<forall>s. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {s} f ({t. index_' t = index_' s}) \<rbrakk> \<Longrightarrow>
ccorres dc xfdc (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5)
(\<lbrace>\<acute>index = w1 >> 5\<rbrace>) hs
(doMachineOp (cacheRangeOp oper w1 w2 w3))
(While \<lbrace>\<acute>index < (w2 >> 5) + 1\<rbrace>
(f;; \<acute>index :== \<acute>index + 1))"
apply (clarsimp simp: cacheRangeOp_def doMachineOp_mapM_x split_def
cacheLine_def cacheLineBits_def)
apply (rule ccorres_gen_asm[where G=\<top>, simplified])
apply (rule ccorres_guard_imp)
apply (rule ccorres_rel_imp)
apply (rule_tac i="unat (lineIndex w1)" and F="\<lambda>_. \<top>"
in ccorres_mapM_x_while_gen'[OF _ _ _ _ _ index_xf_for_sequence,
where j=1, simplified], simp_all)
apply clarsimp
apply (clarsimp simp: length_upto_enum_step lineStart_le_mono)
apply (clarsimp simp: upto_enum_step_nth lineStart_le_mono)
apply (clarsimp simp: length_upto_enum_step lineStart_le_mono unat_div)
apply (subst min_absorb1[OF order_eq_refl])
apply (erule (2) cachRangeOp_corres_helper)
apply (simp add: lineIndex_lineStart_diff)
apply (simp add: lineIndex_def2 cacheLineBits_def)
apply unat_arith
apply wp
apply (clarsimp simp: length_upto_enum_step lineStart_le_mono unat_div)
apply (subst min_absorb1[OF order_eq_refl])
apply (erule (2) cachRangeOp_corres_helper)
apply (simp add: lineIndex_lineStart_diff unat_sub[OF lineIndex_le_mono])
apply (subst le_add_diff_inverse)
apply (simp add: lineIndex_le_mono word_le_nat_alt[symmetric])
apply (simp add: lineIndex_def2 cacheLineBits_def)
apply (rule unat_mono[where 'a=32 and b="0xFFFFFFFF", simplified])
apply word_bitwise
apply (simp add: lineIndex_def cacheLineBits_def lineStart_def)
done
lemma lineStart_eq_minus_mask:
"lineStart w1 = w1 - (w1 && mask 5)"
by (simp add: lineStart_def cacheLineBits_def mask_out_sub_mask[symmetric] and_not_mask)
lemma lineStart_idem[simp]:
"lineStart (lineStart x) = lineStart x"
by (simp add: lineStart_def cacheLineBits_def)
lemma cache_range_lineIndex_helper:
"lineIndex w1 + of_nat n << 5 = w1 - (w1 && mask 5) + of_nat n * 0x20"
apply (clarsimp simp: lineIndex_def cacheLineBits_def word_shiftl_add_distrib lineStart_def[symmetric, unfolded cacheLineBits_def] lineStart_eq_minus_mask[symmetric])
apply (simp add: shiftl_t2n)
done
lemma cleanCacheRange_PoC_ccorres:
"ccorres dc xfdc (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanCacheRange_PoC w1 w2 w3))
(Call cleanCacheRange_PoC_'proc)"
apply (rule ccorres_gen_asm[where G=\<top>, simplified])
apply (cinit' lift: start_' end_' pstart_')
apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def)
apply (ccorres_remove_UNIV_guard)
apply csymbr
apply (rule cacheRangeOp_ccorres[simplified dc_def])
apply (rule empty_fail_cleanByVA)
apply clarsimp
apply (cinitlift index_')
apply (rule ccorres_guard_imp2)
apply csymbr
apply (ctac add: cleanByVA_ccorres[unfolded dc_def])
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1
mask_out_sub_mask)
apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper)
apply (vcg exspec=cleanByVA_modifies)
apply clarsimp
done
lemma cleanInvalidateCacheRange_RAM_ccorres:
"ccorres dc xfdc ((\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s)
and (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5 \<and> unat (w2 - w2) \<le> gsMaxObjectSize s))
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanInvalidateCacheRange_RAM w1 w2 w3))
(Call cleanInvalidateCacheRange_RAM_'proc)"
apply (rule ccorres_gen_asm)
apply (cinit' lift: start_' end_' pstart_')
apply (clarsimp simp: word_sle_def whileAnno_def)
apply (ccorres_remove_UNIV_guard)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_basic_srnoop)
apply (simp add: cleanInvalidateCacheRange_RAM_def doMachineOp_bind
empty_fail_dsb empty_fail_cleanCacheRange_PoC empty_fail_cleanInvalidateL2Range
empty_fail_cacheRangeOp empty_fail_cleanInvalByVA)
apply (ctac (no_vcg) add: cleanCacheRange_PoC_ccorres)
apply (ctac (no_vcg) add: dsb_ccorres)
apply (ctac (no_vcg) add: cleanInvalidateL2Range_ccorres)
apply csymbr
apply (rule ccorres_split_nothrow_novcg)
apply (rule cacheRangeOp_ccorres)
apply (rule empty_fail_cleanInvalByVA)
apply clarsimp
apply (cinitlift index_')
apply (rule ccorres_guard_imp2)
apply csymbr
apply (ctac add: cleanInvalByVA_ccorres)
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1
mask_out_sub_mask)
apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper)
apply (vcg exspec=cleanInvalByVA_modifies)
apply (rule ceqv_refl)
apply (ctac (no_vcg) add: dsb_ccorres[simplified dc_def])
apply (wp | clarsimp simp: guard_is_UNIVI o_def)+
apply (frule(1) ghost_assertion_size_logic)
apply (clarsimp simp: o_def)
done
lemma cleanCacheRange_RAM_ccorres:
"ccorres dc xfdc (\<lambda>s. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5
\<and> unat (w2 - w1) \<le> gsMaxObjectSize s)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanCacheRange_RAM w1 w2 w3))
(Call cleanCacheRange_RAM_'proc)"
apply (cinit' lift: start_' end_' pstart_')
apply (simp add: cleanCacheRange_RAM_def doMachineOp_bind
empty_fail_dsb empty_fail_cleanCacheRange_PoC empty_fail_cleanL2Range)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_basic_srnoop2, simp)
apply (ctac (no_vcg) add: cleanCacheRange_PoC_ccorres)
apply (ctac (no_vcg) add: dsb_ccorres)
apply (rule_tac P="\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s"
in ccorres_cross_over_guard)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_basic_srnoop2, simp)
apply (ctac (no_vcg) add: cleanL2Range_ccorres[unfolded dc_def])
apply wp+
apply clarsimp
apply (auto dest: ghost_assertion_size_logic simp: o_def)
done
lemma cleanCacheRange_PoU_ccorres:
"ccorres dc xfdc ((\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s)
and (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5))
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (cleanCacheRange_PoU w1 w2 w3))
(Call cleanCacheRange_PoU_'proc)"
apply (rule ccorres_gen_asm)
apply (cinit' lift: start_' end_' pstart_')
apply (clarsimp simp: word_sle_def whileAnno_def)
apply (ccorres_remove_UNIV_guard)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_basic_srnoop2, simp)
apply (simp add: cleanCacheRange_PoU_def)
apply csymbr
apply (rule cacheRangeOp_ccorres[simplified dc_def])
apply (rule empty_fail_cleanByVA_PoU)
apply clarsimp
apply (cinitlift index_')
apply (rule ccorres_guard_imp2)
apply csymbr
apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def])
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1
mask_out_sub_mask)
apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper)
apply (vcg exspec=cleanByVA_PoU_modifies)
apply clarsimp
apply (frule(1) ghost_assertion_size_logic)
apply (clarsimp simp: o_def)
done
lemma dmo_if:
"(doMachineOp (if a then b else c)) = (if a then (doMachineOp b) else (doMachineOp c))"
by (simp split: if_split)
lemma invalidateCacheRange_RAM_ccorres:
"ccorres dc xfdc ((\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s)
and (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5))
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (invalidateCacheRange_RAM w1 w2 w3))
(Call invalidateCacheRange_RAM_'proc)"
apply (rule ccorres_gen_asm)
apply (cinit' lift: start_' end_' pstart_')
apply (clarsimp simp: word_sle_def whileAnno_def split del: if_split)
apply (ccorres_remove_UNIV_guard)
apply (simp add: invalidateCacheRange_RAM_def doMachineOp_bind when_def
split_if_empty_fail empty_fail_cleanCacheRange_RAM
empty_fail_invalidateL2Range empty_fail_cacheRangeOp empty_fail_invalidateByVA
empty_fail_dsb dmo_if
split del: if_split)
apply (rule ccorres_split_nothrow_novcg)
apply (rule ccorres_cond[where R=\<top>])
apply (clarsimp simp: lineStart_def cacheLineBits_def)
apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+)
apply (rule ccorres_return_Skip[unfolded dc_def])
apply ceqv
apply (rule ccorres_split_nothrow_novcg)
apply (rule ccorres_cond[where R=\<top>])
apply (clarsimp simp: lineStart_def cacheLineBits_def)
apply csymbr
apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+)
apply (rule ccorres_return_Skip[unfolded dc_def])
apply ceqv
apply (rule_tac P="\<lambda>s. unat (w2 - w1) \<le> gsMaxObjectSize s"
in ccorres_cross_over_guard)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_basic_srnoop2, simp)
apply (ctac add: invalidateL2Range_ccorres)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_basic_srnoop2, simp)
apply (csymbr)
apply (rule ccorres_split_nothrow_novcg)
apply (rule cacheRangeOp_ccorres)
apply (simp add: empty_fail_invalidateByVA)
apply clarsimp
apply (cinitlift index_')
apply (rule ccorres_guard_imp2)
apply csymbr
apply (ctac add: invalidateByVA_ccorres)
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1
mask_out_sub_mask)
apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper)
apply (vcg exspec=invalidateByVA_modifies)
apply ceqv
apply (ctac add: dsb_ccorres[unfolded dc_def])
apply wp
apply (simp add: guard_is_UNIV_def)
apply wp
apply (vcg exspec=plat_invalidateL2Range_modifies)
apply wp
apply (simp add: guard_is_UNIV_def)
apply (auto dest: ghost_assertion_size_logic simp: o_def)[1]
apply (wp | clarsimp split: if_split)+
apply (clarsimp simp: lineStart_def cacheLineBits_def guard_is_UNIV_def)
apply (clarsimp simp: lineStart_mask)
apply (subst mask_eqs(7)[symmetric])
apply (subst mask_eqs(8)[symmetric])
apply (simp add: lineStart_mask mask_eqs)
done
lemma invalidateCacheRange_I_ccorres:
"ccorres dc xfdc (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (invalidateCacheRange_I w1 w2 w3))
(Call invalidateCacheRange_I_'proc)"
apply (rule ccorres_gen_asm[where G=\<top>, simplified])
apply (cinit' lift: start_' end_' pstart_')
apply (clarsimp simp: word_sle_def whileAnno_def)
apply (ccorres_remove_UNIV_guard)
apply (simp add: invalidateCacheRange_I_def)
apply csymbr
apply (rule cacheRangeOp_ccorres[simplified dc_def])
apply (rule empty_fail_invalidateByVA_I)
apply clarsimp
apply (cinitlift index_')
apply (rule ccorres_guard_imp2)
apply csymbr
apply (ctac add: invalidateByVA_I_ccorres[unfolded dc_def])
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1
mask_out_sub_mask)
apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper)
apply (vcg exspec=invalidateByVA_I_modifies)
apply clarsimp
done
lemma branchFlushRange_ccorres:
"ccorres dc xfdc (\<lambda>_. w1 \<le> w2 \<and> w3 \<le> w3 + (w2 - w1)
\<and> w1 && mask 5 = w3 && mask 5)
(\<lbrace>\<acute>start = w1\<rbrace> \<inter> \<lbrace>\<acute>end = w2\<rbrace> \<inter> \<lbrace>\<acute>pstart = w3\<rbrace>) []
(doMachineOp (branchFlushRange w1 w2 w3))
(Call branchFlushRange_'proc)"
apply (rule ccorres_gen_asm[where G=\<top>, simplified])
apply (cinit' lift: start_' end_' pstart_')
apply (clarsimp simp: word_sle_def whileAnno_def)
apply (ccorres_remove_UNIV_guard)
apply (simp add: branchFlushRange_def)
apply csymbr
apply (rule cacheRangeOp_ccorres[simplified dc_def])
apply (rule empty_fail_branchFlush)
apply clarsimp
apply (cinitlift index_')
apply (rule ccorres_guard_imp2)
apply csymbr
apply (ctac add: branchFlush_ccorres[unfolded dc_def])
apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1
mask_out_sub_mask)
apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper)
apply (vcg exspec=branchFlush_modifies)
apply clarsimp
done
lemma cleanCaches_PoU_ccorres:
"ccorres dc xfdc \<top> UNIV []
(doMachineOp cleanCaches_PoU)
(Call cleanCaches_PoU_'proc)"
apply cinit'
apply (simp add: cleanCaches_PoU_def doMachineOp_bind
empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU)
apply (ctac (no_vcg) add: dsb_ccorres)
apply (ctac (no_vcg) add: clean_D_PoU_ccorres)
apply (ctac (no_vcg) add: dsb_ccorres)
apply (ctac (no_vcg) add: invalidate_I_PoU_ccorres)
apply (ctac (no_vcg) add: dsb_ccorres)
apply wp+
apply clarsimp
done
lemma setCurrentPD_ccorres:
"ccorres dc xfdc \<top> (\<lbrace>\<acute>addr = pd\<rbrace>) []
(doMachineOp (setCurrentPD pd))
(Call setCurrentPD_'proc)"
apply cinit'
apply (clarsimp simp: setCurrentPD_def doMachineOp_bind empty_fail_dsb empty_fail_isb
writeTTBR0_empty_fail
intro!: ccorres_cond_empty)
apply (rule ccorres_rhs_assoc)
apply (ctac (no_vcg) add: dsb_ccorres)
apply (ctac (no_vcg) add: writeTTBR0_ccorres)
apply (ctac (no_vcg) add: isb_ccorres)
apply wp+
apply clarsimp
done
end
end
|
\chapter{Introduction}
The {\libraryname} ({\libraryshort}) is a general-purpose framework for implementing {\em functional encryption} schemes. Functional encryption is a generalization that encompasses a number of novel encryption technologies, including Attribute-Based Encryption (ABE), Identity-Based Encryption (IBE) and several other new primitives.
%\medskip \noindent
%{\bf Functional Encryption.}
\section{Background: Functional Encryption}
%One of the foremost uses of cryptography is to control access to data. Encryption is an excellent tool for this purpose. In a public key encryption scheme, users encrypt data for a specific user under her {\em public key}. The user then decrypts this information using her corresponding secret key. This approach is limited in that the encryptor must $(1)$ know the precise identity of the user he is encrypting for, and $(2)$ must first obtain the user's public key.
%One approach to simplifying this problem is to use Identity-Based Encryption (IBE). In an IBE scheme the encryptor need not obtain the user's public key prior to encrypting: instead, he encrypts under the user's identity (name, email address, etc.) along with some set of master public parameters that are shared by all users. The user obtains a corresponding decryption key from a trusted authority known as the Private Key Generator.
%While IBE simplifies problem $(2)$ above, it does not answer the first problem: what if the encryptor doesn't know the precise identity of the user to whom he is encrypting? What if, instead, he simply wishes to encrypt to some specific set of users?
In a functional encryption scheme, encryptors associate ciphertext values with some value $X$. Decryptors may request a decryption key associated with a value $Y$; these are produced by a trusted authority known as the Private Key Generator (PKG). For some function $F: a \times b \rightarrow \{0,1\}$ associated with the encryption scheme, cecryption is permitted if and only if the following relationship is satisfied:
$$F(X, Y) = 1$$
The choice of encryption scheme typically defines the function $F$ as well as the form of the inputs $X, Y$. This formulation encompasses the following encryption types:
\begin{enumerate}
\item {\em Identity Based Encryption.} In an IBE scheme \cite{shamir84,bf01}, both $X$ and $Y$ are identities (arbitrary bitstrings $\{0,1\}^*$) and $F$ outputs $1$ iff $X = Y$.
\item {\em Key-Policy Attribute Based Encryption.} In a KP-ABE \cite{sw05} scheme, the value $X$ is a list of attributes associated with the ciphertext, while the key-assocated value $Y$ contains a complex ``policy'', which is typically represented by an access tree. The function $F$ outputs 1 iff the ciphertext's attributes satisfy the policy.
\item {\em Ciphertext-Policy Attribute Based Encryption.} In a CP-ABE \cite{} scheme, the value $X$ is an attribute policy that will be associated with the ciphertext, while $Y$ is an attribute list associated with the key.
\end{enumerate}
\section{Overview of the {\libraryname}}
\subsection{Supported encryption schemes}
\label{sec:supportedschemes}
The {\libraryname} currently implements the following encryption schemes. This list is expected to grow in later releases.
\begin{enumerate}
\item {\bf Lewko-Sahai-Waters KP-ABE \cite{lsw09}.} An efficient ABE scheme supporting non-monotonic access structures. Secure under the $q$-MEBDH assumption in prime-order bilinear groups.
\end{enumerate}
\subsection{What {\libraryshort} doesn't do}
\subsection{Dependencies}
\section{Outline of this Manual}
The remainder of this manual is broken into several sections. Chapter~\ref{chap:usage} gives a basic overview of the library's usage, from the point of view of an application writer. Chapter~\ref{chap:api} provides a canonical description of the library API. Finally, Chapter~\ref{chap:schemes} provides an detailed description of the schemes that are currently supported by {\libraryshort}, with some details on their internal workings.
|
section \<open>Verification Condition Testing\<close>
theory control_flow_partial_examples
imports "../../hoare/HoareLogic/PartialCorrectness/utp_hoare"
begin
section "Examples"
text{*In this section we provide a set of examples on the verification
of programs that uses control flow statements
with Hoare logic for partial correctness.
The combination of
relational algebra, ie. UTP, and lens algebra allows for a semantic based
framework for the specification of programming languages and their features. It also
allows a powerful proof tactics for the framework such as @{method rel_auto},
@{method pred_auto}, etc.*}
text{*
In the following examples:
\begin{itemize}
\<^item> The formal notation @{term "\<lbrace>Pre\<rbrace>prog\<lbrace>Post\<rbrace>\<^sub>u"} represent a hoare triple for partial
correctness.
\<^item> All variables are represented by lenses and have the type @{typ "'v \<Longrightarrow> 's"}:
where @{typ "'v"} is the view type of the lens and @{typ "'s"} is the type of the state.
\<^item> Lens properties such as @{term "weak_lens"}, @{term "mwb_lens"}, @{term "wb_lens"},
@{term "vwb_lens"}, @{term "ief_lens"}, @{term "bij_lens"}
are used to semantically express what does not change in the state space. For example
applying the property @{term "bij_lens"} of variable @{term "x"} gives the term
@{term "bij_lens x"}. Informally this means that any change on x will appear on all
other variables in the state space.The property @{term "ief_lens"} is just the opposite
of @{term "bij_lens"}.
\<^item> The formal notation @{term "x \<sharp>\<sharp> P"} is a syntactic sugar for
@{term "unrest_relation x P"}:
informally it is used to semantically express that the variable x does not occur
in the program P.
\<^item> The formal notation @{term "x :== v"} is a syntactic sugar for @{term "assigns_r [x \<mapsto>\<^sub>s v]"}:
informally it represent an assignment of a value v to a variable x.
\<^item> The formal notation @{term "&x"} is a syntactic sugar for @{term "\<langle>id\<rangle>\<^sub>s x"}:
informally it represent the content of a variable x.
\<^item> The formal notation @{term "\<guillemotleft>l\<guillemotright>"} is a syntactic sugar for @{term "lit l"}:
informally it represent a lifting of an HOL literal l to utp expression.
\<^item> The formal notation @{term "x \<bowtie> y"} is a syntactic sugar for @{term "lens_indep x y"}:
informally it is a semantic representation that uses two variables
to characterise independence between two state space regions.
\<^item> The tactics @{method rel_auto}, @{method pred_auto}, @{method rel_simp},
@{method pred_simp}, @{method rel_blast}, @{method pred_blast} are used
to discharge proofs related to UTP-relations and UTP-predicates.
\end{itemize}
*}
subsection {*block feature*}
text {*block_test1 is a scenario. The scenario represent a program where i is name of the variable
in the scope of the initial state s. In the scenario, and using the command block,
we create a new variable with the same name inside the block ie., inside the new scope.
Now i is a local var for the scope t.
In that case we can use a restore function on the state s to set the variable to its
previous value ie.,its value in the scope s, and this before we exit the block.*}
lemma blocks_test1:
assumes "weak_lens i"
shows "\<lbrace>true\<rbrace>
i :== \<guillemotleft>2::int\<guillemotright>;;
block (i :== \<guillemotleft>5\<guillemotright>) (II) (\<lambda> (s, s') (t, t'). i:== \<guillemotleft>\<lbrakk>\<langle>id\<rangle>\<^sub>s i\<rbrakk>\<^sub>e s\<guillemotright>) (\<lambda> (s, s') (t, t'). II)
\<lbrace>&i =\<^sub>u \<guillemotleft>2::int\<guillemotright>\<rbrace>\<^sub>u"
using assms by rel_auto
text {*block_test2 is similar to block_test1 but the var i is a global var.
In that case we can use restore function and the state t to set the variable to its
latest value, ie.,its value in in the scope t,probably modified inside the scope of the block.*}
lemma blocks_test2:
assumes "weak_lens i"
shows "\<lbrace>true\<rbrace>
i :== \<guillemotleft>2::int\<guillemotright>;;
block (i :== \<guillemotleft>5\<guillemotright>) (II) (\<lambda> (s, s') (t, t'). i:== \<guillemotleft>\<lbrakk>\<langle>id\<rangle>\<^sub>s i\<rbrakk>\<^sub>e t\<guillemotright>) (\<lambda> (s, s') (t, t'). II)
\<lbrace>&i =\<^sub>u \<guillemotleft>5::int\<guillemotright>\<rbrace>\<^sub>u"
using assms by rel_auto
subsection {*Infinite loops*}
text{*The next two lemmas are the witness needed to justify the theory of designs.*}
lemma 1:"while\<^sub>\<bottom> true do II od = true"
unfolding while_bot_def
apply rel_simp unfolding gfp_def apply transfer apply auto done
lemma "in\<alpha> \<sharp> ( x :== \<guillemotleft>c\<guillemotright>) \<Longrightarrow> while\<^sub>\<bottom> true do II od;; x :== \<guillemotleft>c\<guillemotright> = x :== \<guillemotleft>c\<guillemotright>"
apply (subst 1) apply (simp only: assigns_r.abs_eq )
apply (simp only: seqr_def) apply simp
apply rel_simp apply transfer apply auto done
end
|
Miss Prism — Mrs. George <unk>
|
lemma space_lebesgue_on [simp]: "space (lebesgue_on S) = S" |
lemmas prime_dvd_mult_eq_int = prime_dvd_mult_iff[where ?'a = int] |
[STATEMENT]
lemma execn_call:
"\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t;
\<Gamma>\<turnstile>\<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile> \<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t; \<Gamma>\<turnstile> \<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile> \<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> u
[PROOF STEP]
apply (simp add: call_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile> \<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t; \<Gamma>\<turnstile> \<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile> \<langle>block init (Call p) return c,Normal s\<rangle> =Suc n\<Rightarrow> u
[PROOF STEP]
apply (rule execn_block)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile> \<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t; \<Gamma>\<turnstile> \<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile> \<langle>Call p,Normal (init s)\<rangle> =Suc n\<Rightarrow> Normal ?t
2. \<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile> \<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t; \<Gamma>\<turnstile> \<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile> \<langle>c s ?t,Normal (return s ?t)\<rangle> =Suc n\<Rightarrow> u
[PROOF STEP]
apply (erule (1) Call)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile> \<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t; \<Gamma>\<turnstile> \<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile> \<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u
[PROOF STEP]
apply assumption
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE NoMonomorphismRestriction #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# OPTIONS -Wall #-}
module Statistics.Iteratee.Sample (
minMaxNBy
, range
, mean
, harmonicMean
, variance
, stdDev
) where
import Statistics.Iteratee.Compat
import Control.Arrow
import Control.Monad
import Data.Iteratee as I
import Data.Heap as Heap
-- | /O(n)/ minMaxNBy. Calculate the 'n' highest and lowest elements of the
-- stream, according to the given priority function.
-- Returns /([minimum],[maximum]/ with the /(minimum,maximum)/
-- elements listed first.
minMaxNBy
:: forall m prio s el. (Monad m, Ord prio, ListLikey s el)
=> Int
-> (el -> prio)
-> Iteratee s m ([(prio,el)],[(prio,el)])
minMaxNBy ns prio = finalize `liftM` I.foldl' step (Heap.empty,Heap.empty)
where
finalize :: (MaxPrioHeap prio el, MinPrioHeap prio el)
-> ([(prio,el)],[(prio,el)])
finalize = Heap.toDescList *** Heap.toDescList
addHeap val = Heap.insert (prio val, val)
step :: (MaxPrioHeap prio el, MinPrioHeap prio el) -> el
-> (MaxPrioHeap prio el, MinPrioHeap prio el)
step (!mins,!maxes) val = let sz = Heap.size mins
adj hp = if sz >= ns
then Heap.drop 1 hp
else hp
in (adj $ addHeap val mins
,adj $ addHeap val maxes)
{-# INLINE minMaxNBy #-}
-- | /O(n)/ Range. The difference between the largest and smallest elements of
-- a stream.
range :: (Monad m, ListLikey s el, Num el, Ord el)
=> Iteratee s m el
range = finalize `liftM` minMaxNBy 1 id
where
finalize ([mins],[maxes]) = snd maxes - snd mins
finalize _ = 0
{-# INLINE range #-}
-- | /O(n)/ Arithmetic mean. Uses Welford's algorithm.
mean :: forall s m el. (Fractional el, Monad m, ListLikey s el)
=> Iteratee s m el
mean = fst `liftM` I.foldl' step (0,0)
where
step :: (el,Integer) -> el -> (el,Integer)
step (!m,!n) x = let m' = m + (x-m) / fromIntegral n'
n' = n + 1
in (m',n')
{-# INLINE mean #-}
-- | /O(n)/ Harmonic mean.
harmonicMean :: (Fractional el, Monad m, ListLikey s el) => Iteratee s m el
harmonicMean = finalize `liftM` I.foldl' step (0,0 :: Integer)
where
finalize (m,n) = fromIntegral n / m
step (!m,!n) val = (m+(1/val),n+1)
{-# INLINE harmonicMean #-}
-- | /O(n)/ variance, using Knuth's algorithm.
var :: (Fractional el, Integral t, Monad m, ListLikey s el)
=> Iteratee s m (t, el, el)
var = I.foldl' step (0,0,0)
where
step (!n,!m,!s) x = let n' = n+1
m' = m+d/fromIntegral n'
s' = s+d* (x-m')
d = x-m
in (n',m',s')
{-# INLINE var #-}
-- | /O(n)/ Maximum likelihood estimate of a sample's variance, using Knuth's
-- algorithm.
variance :: (Fractional b, Monad m, ListLikey s b) => Iteratee s m b
variance = finalize `liftM` var
where
finalize (n,_,s)
| n > 1 = s / fromInteger n
| otherwise = 0
{-# INLINE variance #-}
-- | /O(n) Standard deviation, using Knuth's algorithm.
stdDev :: (Floating b, Monad m, Functor m, ListLikey s b) => Iteratee s m b
stdDev = sqrt `liftM` variance
{-# INLINE stdDev #-}
|
[GOAL]
a b : ℤ
⊢ natAbs a = natAbs b ↔ Associated a b
[PROOFSTEP]
refine' Int.natAbs_eq_natAbs_iff.trans _
[GOAL]
a b : ℤ
⊢ a = b ∨ a = -b ↔ Associated a b
[PROOFSTEP]
constructor
[GOAL]
case mp
a b : ℤ
⊢ a = b ∨ a = -b → Associated a b
[PROOFSTEP]
rintro (rfl | rfl)
[GOAL]
case mp.inl
a : ℤ
⊢ Associated a a
[PROOFSTEP]
rfl
[GOAL]
case mp.inr
b : ℤ
⊢ Associated (-b) b
[PROOFSTEP]
exact ⟨-1, by simp⟩
[GOAL]
b : ℤ
⊢ -b * ↑(-1) = b
[PROOFSTEP]
simp
[GOAL]
case mpr
a b : ℤ
⊢ Associated a b → a = b ∨ a = -b
[PROOFSTEP]
rintro ⟨u, rfl⟩
[GOAL]
case mpr.intro
a : ℤ
u : ℤˣ
⊢ a = a * ↑u ∨ a = -(a * ↑u)
[PROOFSTEP]
obtain rfl | rfl := Int.units_eq_one_or u
[GOAL]
case mpr.intro.inl
a : ℤ
⊢ a = a * ↑1 ∨ a = -(a * ↑1)
[PROOFSTEP]
exact Or.inl (by simp)
[GOAL]
a : ℤ
⊢ a = a * ↑1
[PROOFSTEP]
simp
[GOAL]
case mpr.intro.inr
a : ℤ
⊢ a = a * ↑(-1) ∨ a = -(a * ↑(-1))
[PROOFSTEP]
exact Or.inr (by simp)
[GOAL]
a : ℤ
⊢ a = -(a * ↑(-1))
[PROOFSTEP]
simp
|
[STATEMENT]
lemma app'Invoke[simp]:
"app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) =
(\<exists>apTs X ST' mD' rT' b'.
ST = (rev apTs) @ X # ST' \<and>
length apTs = length fpTs \<and> is_class G C \<and>
(\<forall>(aT,fT)\<in>set(zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and>
method (G,C) (mn,fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C)"
(is "?app ST LT = ?P ST LT")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) = (\<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
2. \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C \<Longrightarrow> app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
[PROOF STEP]
assume "?P ST LT"
[PROOF STATE]
proof (state)
this:
\<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>a\<in>set (zip apTs fpTs). case a of (aT, fT) \<Rightarrow> G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
goal (2 subgoals):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
2. \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C \<Longrightarrow> app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
[PROOF STEP]
thus "?app ST LT"
[PROOF STATE]
proof (prove)
using this:
\<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>a\<in>set (zip apTs fpTs). case a of (aT, fT) \<Rightarrow> G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
[PROOF STEP]
by (auto simp add: list_all2_iff)
[PROOF STATE]
proof (state)
this:
app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
assume app: "?app ST LT"
[PROOF STATE]
proof (state)
this:
app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
hence l: "length fpTs < length ST"
[PROOF STATE]
proof (prove)
using this:
app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
goal (1 subgoal):
1. length fpTs < length ST
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length fpTs < length ST
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
obtain xs ys where xs: "ST = xs @ ys" "length xs = length fpTs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>xs ys. \<lbrakk>ST = xs @ ys; length xs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>xs ys. \<lbrakk>ST = xs @ ys; length xs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "ST = take (length fpTs) ST @ drop (length fpTs) ST"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ST = take (length fpTs) ST @ drop (length fpTs) ST
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ST = take (length fpTs) ST @ drop (length fpTs) ST
goal (1 subgoal):
1. (\<And>xs ys. \<lbrakk>ST = xs @ ys; length xs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
ST = take (length fpTs) ST @ drop (length fpTs) ST
goal (1 subgoal):
1. (\<And>xs ys. \<lbrakk>ST = xs @ ys; length xs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
from l
[PROOF STATE]
proof (chain)
picking this:
length fpTs < length ST
[PROOF STEP]
have "length (take (length fpTs) ST) = length fpTs"
[PROOF STATE]
proof (prove)
using this:
length fpTs < length ST
goal (1 subgoal):
1. length (take (length fpTs) ST) = length fpTs
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length (take (length fpTs) ST) = length fpTs
goal (1 subgoal):
1. (\<And>xs ys. \<lbrakk>ST = xs @ ys; length xs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
ST = take (length fpTs) ST @ drop (length fpTs) ST
length (take (length fpTs) ST) = length fpTs
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
ST = take (length fpTs) ST @ drop (length fpTs) ST
length (take (length fpTs) ST) = length fpTs
goal (1 subgoal):
1. thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
ST = xs @ ys
length xs = length fpTs
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
obtain apTs where
"ST = (rev apTs) @ ys" and "length apTs = length fpTs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>apTs. \<lbrakk>ST = rev apTs @ ys; length apTs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>apTs. \<lbrakk>ST = rev apTs @ ys; length apTs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
from xs(1)
[PROOF STATE]
proof (chain)
picking this:
ST = xs @ ys
[PROOF STEP]
have "ST = rev (rev xs) @ ys"
[PROOF STATE]
proof (prove)
using this:
ST = xs @ ys
goal (1 subgoal):
1. ST = rev (rev xs) @ ys
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ST = rev (rev xs) @ ys
goal (1 subgoal):
1. (\<And>apTs. \<lbrakk>ST = rev apTs @ ys; length apTs = length fpTs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
ST = rev (rev xs) @ ys
[PROOF STEP]
show thesis
[PROOF STATE]
proof (prove)
using this:
ST = rev (rev xs) @ ys
goal (1 subgoal):
1. thesis
[PROOF STEP]
by (rule that) (simp add: xs(2))
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
ST = rev apTs @ ys
length apTs = length fpTs
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
ST = rev apTs @ ys
length apTs = length fpTs
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
from l xs
[PROOF STATE]
proof (chain)
picking this:
length fpTs < length ST
ST = xs @ ys
length xs = length fpTs
[PROOF STEP]
obtain X ST' where "ys = X#ST'"
[PROOF STATE]
proof (prove)
using this:
length fpTs < length ST
ST = xs @ ys
length xs = length fpTs
goal (1 subgoal):
1. (\<And>X ST'. ys = X # ST' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp add: neq_Nil_conv)
[PROOF STATE]
proof (state)
this:
ys = X # ST'
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
ST = rev apTs @ ys
length apTs = length fpTs
ys = X # ST'
[PROOF STEP]
have "ST = (rev apTs) @ X # ST'" "length apTs = length fpTs"
[PROOF STATE]
proof (prove)
using this:
ST = rev apTs @ ys
length apTs = length fpTs
ys = X # ST'
goal (1 subgoal):
1. ST = rev apTs @ X # ST' &&& length apTs = length fpTs
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
ST = rev apTs @ X # ST'
length apTs = length fpTs
goal (1 subgoal):
1. app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT) \<Longrightarrow> \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>(aT, fT)\<in>set (zip apTs fpTs). G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
with app
[PROOF STATE]
proof (chain)
picking this:
app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
ST = rev apTs @ X # ST'
length apTs = length fpTs
[PROOF STEP]
show "?P ST LT"
[PROOF STATE]
proof (prove)
using this:
app' (Invoke C mn fpTs, G, pc, maxs, rT, ST, LT)
ST = rev apTs @ X # ST'
length apTs = length fpTs
goal (1 subgoal):
1. \<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>a\<in>set (zip apTs fpTs). case a of (aT, fT) \<Rightarrow> G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
[PROOF STEP]
apply (clarsimp simp add: list_all2_iff)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. \<lbrakk>ST = rev apTs @ X # ST'; length apTs = length fpTs; G \<turnstile> X \<preceq> Class C; is_class G C; \<forall>x\<in>set (zip apTs fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa; method (G, C) (mn, fpTs) = Some (a, aa, ab, ac, ad, b)\<rbrakk> \<Longrightarrow> \<exists>apTsa Xa. (\<exists>ST'a. rev apTs @ X # ST' = rev apTsa @ Xa # ST'a) \<and> length apTsa = length fpTs \<and> (\<forall>x\<in>set (zip apTsa fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa) \<and> G \<turnstile> Xa \<preceq> Class C
[PROOF STEP]
apply (intro exI conjI)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>a aa ab ac ad b. \<lbrakk>ST = rev apTs @ X # ST'; length apTs = length fpTs; G \<turnstile> X \<preceq> Class C; is_class G C; \<forall>x\<in>set (zip apTs fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa; method (G, C) (mn, fpTs) = Some (a, aa, ab, ac, ad, b)\<rbrakk> \<Longrightarrow> rev apTs @ X # ST' = rev (?apTs25 a aa ab ac ad b) @ ?X26 a aa ab ac ad b # ?ST'30 a aa ab ac ad b
2. \<And>a aa ab ac ad b. \<lbrakk>ST = rev apTs @ X # ST'; length apTs = length fpTs; G \<turnstile> X \<preceq> Class C; is_class G C; \<forall>x\<in>set (zip apTs fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa; method (G, C) (mn, fpTs) = Some (a, aa, ab, ac, ad, b)\<rbrakk> \<Longrightarrow> length (?apTs25 a aa ab ac ad b) = length fpTs
3. \<And>a aa ab ac ad b. \<lbrakk>ST = rev apTs @ X # ST'; length apTs = length fpTs; G \<turnstile> X \<preceq> Class C; is_class G C; \<forall>x\<in>set (zip apTs fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa; method (G, C) (mn, fpTs) = Some (a, aa, ab, ac, ad, b)\<rbrakk> \<Longrightarrow> \<forall>x\<in>set (zip (?apTs25 a aa ab ac ad b) fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa
4. \<And>a aa ab ac ad b. \<lbrakk>ST = rev apTs @ X # ST'; length apTs = length fpTs; G \<turnstile> X \<preceq> Class C; is_class G C; \<forall>x\<in>set (zip apTs fpTs). case x of (x, xa) \<Rightarrow> G \<turnstile> x \<preceq> xa; method (G, C) (mn, fpTs) = Some (a, aa, ab, ac, ad, b)\<rbrakk> \<Longrightarrow> G \<turnstile> ?X26 a aa ab ac ad b \<preceq> Class C
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<exists>apTs X ST' mD' rT' b'. ST = rev apTs @ X # ST' \<and> length apTs = length fpTs \<and> is_class G C \<and> (\<forall>a\<in>set (zip apTs fpTs). case a of (aT, fT) \<Rightarrow> G \<turnstile> aT \<preceq> fT) \<and> method (G, C) (mn, fpTs) = Some (mD', rT', b') \<and> G \<turnstile> X \<preceq> Class C
goal:
No subgoals!
[PROOF STEP]
qed |
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.big_operators.basic
import Mathlib.PostPort
universes u v w
namespace Mathlib
/-!
# Results about big operators with values in an ordered algebraic structure.
Mostly monotonicity results for the `∑` operation.
-/
namespace finset
theorem le_sum_of_subadditive {α : Type u} {β : Type v} {γ : Type w} [add_comm_monoid α] [ordered_add_comm_monoid β] (f : α → β) (h_zero : f 0 = 0) (h_add : ∀ (x y : α), f (x + y) ≤ f x + f y) (s : finset γ) (g : γ → α) : f (finset.sum s fun (x : γ) => g x) ≤ finset.sum s fun (x : γ) => f (g x) := sorry
theorem abs_sum_le_sum_abs {α : Type u} {β : Type v} [linear_ordered_field α] {f : β → α} {s : finset β} : abs (finset.sum s fun (x : β) => f x) ≤ finset.sum s fun (x : β) => abs (f x) :=
le_sum_of_subadditive abs abs_zero abs_add s f
theorem abs_prod {α : Type u} {β : Type v} [linear_ordered_comm_ring α] {f : β → α} {s : finset β} : abs (finset.prod s fun (x : β) => f x) = finset.prod s fun (x : β) => abs (f x) :=
monoid_hom.map_prod (monoid_with_zero_hom.to_monoid_hom abs_hom) (fun (x : β) => f x) s
theorem sum_le_sum {α : Type u} {β : Type v} {s : finset α} {f : α → β} {g : α → β} [ordered_add_comm_monoid β] : (∀ (x : α), x ∈ s → f x ≤ g x) → (finset.sum s fun (x : α) => f x) ≤ finset.sum s fun (x : α) => g x := sorry
theorem card_le_mul_card_image_of_maps_to {α : Type u} {γ : Type w} [DecidableEq γ] {f : α → γ} {s : finset α} {t : finset γ} (Hf : ∀ (a : α), a ∈ s → f a ∈ t) (n : ℕ) (hn : ∀ (a : γ), a ∈ t → card (filter (fun (x : α) => f x = a) s) ≤ n) : card s ≤ n * card t := sorry
theorem card_le_mul_card_image {α : Type u} {γ : Type w} [DecidableEq γ] {f : α → γ} (s : finset α) (n : ℕ) (hn : ∀ (a : γ), a ∈ image f s → card (filter (fun (x : α) => f x = a) s) ≤ n) : card s ≤ n * card (image f s) :=
card_le_mul_card_image_of_maps_to (fun (x : α) => mem_image_of_mem f) n hn
theorem mul_card_image_le_card_of_maps_to {α : Type u} {γ : Type w} [DecidableEq γ] {f : α → γ} {s : finset α} {t : finset γ} (Hf : ∀ (a : α), a ∈ s → f a ∈ t) (n : ℕ) (hn : ∀ (a : γ), a ∈ t → n ≤ card (filter (fun (x : α) => f x = a) s)) : n * card t ≤ card s := sorry
theorem mul_card_image_le_card {α : Type u} {γ : Type w} [DecidableEq γ] {f : α → γ} (s : finset α) (n : ℕ) (hn : ∀ (a : γ), a ∈ image f s → n ≤ card (filter (fun (x : α) => f x = a) s)) : n * card (image f s) ≤ card s :=
mul_card_image_le_card_of_maps_to (fun (x : α) => mem_image_of_mem f) n hn
theorem sum_nonneg {α : Type u} {β : Type v} {s : finset α} {f : α → β} [ordered_add_comm_monoid β] (h : ∀ (x : α), x ∈ s → 0 ≤ f x) : 0 ≤ finset.sum s fun (x : α) => f x :=
le_trans (eq.mpr (id (Eq._oldrec (Eq.refl (0 ≤ finset.sum s fun (x : α) => 0)) sum_const_zero)) (le_refl 0))
(sum_le_sum h)
theorem sum_nonpos {α : Type u} {β : Type v} {s : finset α} {f : α → β} [ordered_add_comm_monoid β] (h : ∀ (x : α), x ∈ s → f x ≤ 0) : (finset.sum s fun (x : α) => f x) ≤ 0 :=
le_trans (sum_le_sum h)
(eq.mpr (id (Eq._oldrec (Eq.refl ((finset.sum s fun (x : α) => 0) ≤ 0)) sum_const_zero)) (le_refl 0))
theorem sum_le_sum_of_subset_of_nonneg {α : Type u} {β : Type v} {s₁ : finset α} {s₂ : finset α} {f : α → β} [ordered_add_comm_monoid β] (h : s₁ ⊆ s₂) (hf : ∀ (x : α), x ∈ s₂ → ¬x ∈ s₁ → 0 ≤ f x) : (finset.sum s₁ fun (x : α) => f x) ≤ finset.sum s₂ fun (x : α) => f x := sorry
theorem sum_mono_set_of_nonneg {α : Type u} {β : Type v} {f : α → β} [ordered_add_comm_monoid β] (hf : ∀ (x : α), 0 ≤ f x) : monotone fun (s : finset α) => finset.sum s fun (x : α) => f x :=
fun (s₁ s₂ : finset α) (hs : s₁ ≤ s₂) =>
sum_le_sum_of_subset_of_nonneg hs fun (x : α) (_x : x ∈ s₂) (_x : ¬x ∈ s₁) => hf x
theorem sum_fiberwise_le_sum_of_sum_fiber_nonneg {α : Type u} {β : Type v} {γ : Type w} [ordered_add_comm_monoid β] [DecidableEq γ] {s : finset α} {t : finset γ} {g : α → γ} {f : α → β} (h : ∀ (y : γ), ¬y ∈ t → 0 ≤ finset.sum (filter (fun (x : α) => g x = y) s) fun (x : α) => f x) : (finset.sum t fun (y : γ) => finset.sum (filter (fun (x : α) => g x = y) s) fun (x : α) => f x) ≤
finset.sum s fun (x : α) => f x := sorry
theorem sum_le_sum_fiberwise_of_sum_fiber_nonpos {α : Type u} {β : Type v} {γ : Type w} [ordered_add_comm_monoid β] [DecidableEq γ] {s : finset α} {t : finset γ} {g : α → γ} {f : α → β} (h : ∀ (y : γ), ¬y ∈ t → (finset.sum (filter (fun (x : α) => g x = y) s) fun (x : α) => f x) ≤ 0) : (finset.sum s fun (x : α) => f x) ≤
finset.sum t fun (y : γ) => finset.sum (filter (fun (x : α) => g x = y) s) fun (x : α) => f x :=
sum_fiberwise_le_sum_of_sum_fiber_nonneg h
theorem sum_eq_zero_iff_of_nonneg {α : Type u} {β : Type v} {s : finset α} {f : α → β} [ordered_add_comm_monoid β] : (∀ (x : α), x ∈ s → 0 ≤ f x) → ((finset.sum s fun (x : α) => f x) = 0 ↔ ∀ (x : α), x ∈ s → f x = 0) := sorry
theorem sum_eq_zero_iff_of_nonpos {α : Type u} {β : Type v} {s : finset α} {f : α → β} [ordered_add_comm_monoid β] : (∀ (x : α), x ∈ s → f x ≤ 0) → ((finset.sum s fun (x : α) => f x) = 0 ↔ ∀ (x : α), x ∈ s → f x = 0) :=
sum_eq_zero_iff_of_nonneg
theorem single_le_sum {α : Type u} {β : Type v} {s : finset α} {f : α → β} [ordered_add_comm_monoid β] (hf : ∀ (x : α), x ∈ s → 0 ≤ f x) {a : α} (h : a ∈ s) : f a ≤ finset.sum s fun (x : α) => f x := sorry
@[simp] theorem sum_eq_zero_iff {α : Type u} {β : Type v} {s : finset α} {f : α → β} [canonically_ordered_add_monoid β] : (finset.sum s fun (x : α) => f x) = 0 ↔ ∀ (x : α), x ∈ s → f x = 0 :=
sum_eq_zero_iff_of_nonneg fun (x : α) (hx : x ∈ s) => zero_le (f x)
theorem sum_le_sum_of_subset {α : Type u} {β : Type v} {s₁ : finset α} {s₂ : finset α} {f : α → β} [canonically_ordered_add_monoid β] (h : s₁ ⊆ s₂) : (finset.sum s₁ fun (x : α) => f x) ≤ finset.sum s₂ fun (x : α) => f x :=
sum_le_sum_of_subset_of_nonneg h fun (x : α) (h₁ : x ∈ s₂) (h₂ : ¬x ∈ s₁) => zero_le (f x)
theorem sum_mono_set {α : Type u} {β : Type v} [canonically_ordered_add_monoid β] (f : α → β) : monotone fun (s : finset α) => finset.sum s fun (x : α) => f x :=
fun (s₁ s₂ : finset α) (hs : s₁ ≤ s₂) => sum_le_sum_of_subset hs
theorem sum_le_sum_of_ne_zero {α : Type u} {β : Type v} {s₁ : finset α} {s₂ : finset α} {f : α → β} [canonically_ordered_add_monoid β] (h : ∀ (x : α), x ∈ s₁ → f x ≠ 0 → x ∈ s₂) : (finset.sum s₁ fun (x : α) => f x) ≤ finset.sum s₂ fun (x : α) => f x := sorry
theorem sum_lt_sum {α : Type u} {β : Type v} {s : finset α} {f : α → β} {g : α → β} [ordered_cancel_add_comm_monoid β] (Hle : ∀ (i : α), i ∈ s → f i ≤ g i) (Hlt : ∃ (i : α), ∃ (H : i ∈ s), f i < g i) : (finset.sum s fun (x : α) => f x) < finset.sum s fun (x : α) => g x := sorry
theorem sum_lt_sum_of_nonempty {α : Type u} {β : Type v} {s : finset α} {f : α → β} {g : α → β} [ordered_cancel_add_comm_monoid β] (hs : finset.nonempty s) (Hlt : ∀ (x : α), x ∈ s → f x < g x) : (finset.sum s fun (x : α) => f x) < finset.sum s fun (x : α) => g x :=
sum_lt_sum (fun (i : α) (hi : i ∈ s) => le_of_lt (Hlt i hi))
(Exists.dcases_on hs fun (i : α) (hi : i ∈ s) => Exists.intro i (Exists.intro hi (Hlt i hi)))
theorem sum_lt_sum_of_subset {α : Type u} {β : Type v} {s₁ : finset α} {s₂ : finset α} {f : α → β} [ordered_cancel_add_comm_monoid β] [DecidableEq α] (h : s₁ ⊆ s₂) {i : α} (hi : i ∈ s₂ \ s₁) (hpos : 0 < f i) (hnonneg : ∀ (j : α), j ∈ s₂ \ s₁ → 0 ≤ f j) : (finset.sum s₁ fun (x : α) => f x) < finset.sum s₂ fun (x : α) => f x := sorry
theorem exists_lt_of_sum_lt {α : Type u} {β : Type v} {s : finset α} {f : α → β} {g : α → β} [linear_ordered_cancel_add_comm_monoid β] (Hlt : (finset.sum s fun (x : α) => f x) < finset.sum s fun (x : α) => g x) : ∃ (i : α), ∃ (H : i ∈ s), f i < g i := sorry
theorem exists_le_of_sum_le {α : Type u} {β : Type v} {s : finset α} {f : α → β} {g : α → β} [linear_ordered_cancel_add_comm_monoid β] (hs : finset.nonempty s) (Hle : (finset.sum s fun (x : α) => f x) ≤ finset.sum s fun (x : α) => g x) : ∃ (i : α), ∃ (H : i ∈ s), f i ≤ g i := sorry
theorem exists_pos_of_sum_zero_of_exists_nonzero {α : Type u} {β : Type v} {s : finset α} [linear_ordered_cancel_add_comm_monoid β] (f : α → β) (h₁ : (finset.sum s fun (e : α) => f e) = 0) (h₂ : ∃ (x : α), ∃ (H : x ∈ s), f x ≠ 0) : ∃ (x : α), ∃ (H : x ∈ s), 0 < f x := sorry
/- this is also true for a ordered commutative multiplicative monoid -/
theorem prod_nonneg {α : Type u} {β : Type v} [linear_ordered_comm_ring β] {s : finset α} {f : α → β} (h0 : ∀ (x : α), x ∈ s → 0 ≤ f x) : 0 ≤ finset.prod s fun (x : α) => f x :=
prod_induction f (fun (x : β) => 0 ≤ x) (fun (_x _x_1 : β) (ha : 0 ≤ _x) (hb : 0 ≤ _x_1) => mul_nonneg ha hb)
zero_le_one h0
/- this is also true for a ordered commutative multiplicative monoid -/
theorem prod_pos {α : Type u} {β : Type v} [linear_ordered_comm_ring β] {s : finset α} {f : α → β} (h0 : ∀ (x : α), x ∈ s → 0 < f x) : 0 < finset.prod s fun (x : α) => f x :=
prod_induction f (fun (x : β) => 0 < x) (fun (_x _x_1 : β) (ha : 0 < _x) (hb : 0 < _x_1) => mul_pos ha hb) zero_lt_one
h0
/- this is also true for a ordered commutative multiplicative monoid -/
theorem prod_le_prod {α : Type u} {β : Type v} [linear_ordered_comm_ring β] {s : finset α} {f : α → β} {g : α → β} (h0 : ∀ (x : α), x ∈ s → 0 ≤ f x) (h1 : ∀ (x : α), x ∈ s → f x ≤ g x) : (finset.prod s fun (x : α) => f x) ≤ finset.prod s fun (x : α) => g x := sorry
theorem prod_le_one {α : Type u} {β : Type v} [linear_ordered_comm_ring β] {s : finset α} {f : α → β} (h0 : ∀ (x : α), x ∈ s → 0 ≤ f x) (h1 : ∀ (x : α), x ∈ s → f x ≤ 1) : (finset.prod s fun (x : α) => f x) ≤ 1 := sorry
/-- If `g, h ≤ f` and `g i + h i ≤ f i`, then the product of `f` over `s` is at least the
sum of the products of `g` and `h`. This is the version for `linear_ordered_comm_ring`. -/
theorem prod_add_prod_le {α : Type u} {β : Type v} [linear_ordered_comm_ring β] {s : finset α} {i : α} {f : α → β} {g : α → β} {h : α → β} (hi : i ∈ s) (h2i : g i + h i ≤ f i) (hgf : ∀ (j : α), j ∈ s → j ≠ i → g j ≤ f j) (hhf : ∀ (j : α), j ∈ s → j ≠ i → h j ≤ f j) (hg : ∀ (i : α), i ∈ s → 0 ≤ g i) (hh : ∀ (i : α), i ∈ s → 0 ≤ h i) : ((finset.prod s fun (i : α) => g i) + finset.prod s fun (i : α) => h i) ≤ finset.prod s fun (i : α) => f i := sorry
theorem prod_le_prod' {α : Type u} {β : Type v} [canonically_ordered_comm_semiring β] {s : finset α} {f : α → β} {g : α → β} (h : ∀ (i : α), i ∈ s → f i ≤ g i) : (finset.prod s fun (x : α) => f x) ≤ finset.prod s fun (x : α) => g x := sorry
/-- If `g, h ≤ f` and `g i + h i ≤ f i`, then the product of `f` over `s` is at least the
sum of the products of `g` and `h`. This is the version for `canonically_ordered_comm_semiring`.
-/
theorem prod_add_prod_le' {α : Type u} {β : Type v} [canonically_ordered_comm_semiring β] {s : finset α} {i : α} {f : α → β} {g : α → β} {h : α → β} (hi : i ∈ s) (h2i : g i + h i ≤ f i) (hgf : ∀ (j : α), j ∈ s → j ≠ i → g j ≤ f j) (hhf : ∀ (j : α), j ∈ s → j ≠ i → h j ≤ f j) : ((finset.prod s fun (i : α) => g i) + finset.prod s fun (i : α) => h i) ≤ finset.prod s fun (i : α) => f i := sorry
end finset
namespace with_top
/-- A product of finite numbers is still finite -/
theorem prod_lt_top {α : Type u} {β : Type v} [canonically_ordered_comm_semiring β] [nontrivial β] [DecidableEq β] {s : finset α} {f : α → with_top β} (h : ∀ (a : α), a ∈ s → f a < ⊤) : (finset.prod s fun (x : α) => f x) < ⊤ :=
finset.prod_induction f (fun (a : with_top β) => a < ⊤) (fun (a b : with_top β) => mul_lt_top) (coe_lt_top 1) h
/-- A sum of finite numbers is still finite -/
theorem sum_lt_top {α : Type u} {β : Type v} [ordered_add_comm_monoid β] {s : finset α} {f : α → with_top β} : (∀ (a : α), a ∈ s → f a < ⊤) → (finset.sum s fun (x : α) => f x) < ⊤ := sorry
/-- A sum of finite numbers is still finite -/
theorem sum_lt_top_iff {α : Type u} {β : Type v} [canonically_ordered_add_monoid β] {s : finset α} {f : α → with_top β} : (finset.sum s fun (x : α) => f x) < ⊤ ↔ ∀ (a : α), a ∈ s → f a < ⊤ := sorry
/-- A sum of numbers is infinite iff one of them is infinite -/
theorem sum_eq_top_iff {α : Type u} {β : Type v} [canonically_ordered_add_monoid β] {s : finset α} {f : α → with_top β} : (finset.sum s fun (x : α) => f x) = ⊤ ↔ ∃ (a : α), ∃ (H : a ∈ s), f a = ⊤ := sorry
|
REBOL [
System: "Ren-C Core Extraction of the Rebol System"
Title: "Common Routines for Tools"
Rights: {
Copyright 2012-2017 Rebol Open Source Contributors
REBOL is a trademark of REBOL Technologies
}
License: {
Licensed under the Apache License, Version 2.0
See: http://www.apache.org/licenses/LICENSE-2.0
}
Version: 2.100.0
Needs: 2.100.100
Purpose: {
These are some common routines used by the utilities
that build the system, which are found in %src/tools/
}
]
; !!! This file does not include the backwards compatibility %r2r3-future.r.
; The reason is that some code assumes it is running Ren-C, and that file
; disables features which are not backward compatible, which shouldn't be
; disabled if you *are* running Ren-C (e.g. the tests)
; Simple "divider-style" thing for remarks. At a certain verbosity level,
; it could dump those remarks out...perhaps based on how many == there are.
; (This is a good reason for retaking ==, as that looks like a divider.)
;
===: func [:remarks [any-value! <...>]] [
until [
equal? '=== take remarks
]
]
;; Repository meta data.
;; - Good for keeping fixed paths out of scripts.
;;
repo: context [
root: clean-path %../../
source-root: root
tools: what-dir
]
spaced-tab: unspaced [space space space space]
to-c-name: function [
{Take a Rebol value and transliterate it as a (likely) valid C identifier}
return: [text!]
value "Will be converted to text (via UNSPACED if BLOCK!)"
[text! block! word!]
/scope "See C's rules: http://stackoverflow.com/questions/228783/"
where "Either #global or #local (defaults global)"
[issue!]
][
all [
text? value
empty? value
] then [
fail/where ["TO-C-NAME received empty input"] 'value
]
c-chars: charset [
#"a" - #"z"
#"A" - #"Z"
#"0" - #"9"
#"_"
]
string: either block? :value [unspaced value] [form value]
string: switch string [
; Used specifically by t-routine.c to make SYM_ELLIPSIS
;
"..." [copy "ellipsis"]
; Used to make SYM_HYPHEN which is needed by `charset [#"A" - #"Z"]`
;
"-" [copy "hyphen"]
; Used to deal with the /? refinements (which may not last)
;
"?" [copy "q"]
; None of these are used at present, but included just in case
;
"*" [copy "asterisk"]
"." [copy "period"]
"!" [copy "exclamation"]
"+" [copy "plus"]
"~" [copy "tilde"]
"|" [copy "bar"]
default [
;
; If these symbols occur composite in a longer word, they use a
; shorthand; e.g. `foo?` => `foo_q`
for-each [reb c] [
#"'" "" ; isn't => isnt, don't => dont
- "_" ; foo-bar => foo_bar
* "_p" ; !!! because it symbolizes a (p)ointer in C??
. "_" ; !!! same as hyphen?
? "_q" ; (q)uestion
! "_x" ; e(x)clamation
+ "_a" ; (a)ddition
~ "_t" ; (t)ilde
| "_b" ; (b)ar
][
replace/all string (form reb) c
]
string
]
]
if empty? string [
fail [
"empty identifier produced by to-c-name for"
(mold value) "of type" (mold type of value)
]
]
repeat s string [
(head? s) and [find charset [#"0" - #"9"] s/1] and [
fail ["identifier" string "starts with digit in to-c-name"]
]
find c-chars s/1 or [
fail ["Non-alphanumeric or hyphen in" string "in to-c-name"]
]
]
where: default [#global]
case [
string/1 != "_" [<ok>]
where = 'global [
fail "global C ids starting with _ are reserved"
]
where = 'local [
find charset [#"A" - #"Z"] string/2 then [
fail "local C ids starting with _ and uppercase are reserved"
]
]
fail "/SCOPE must be #global or #local"
]
return string
]
; http://stackoverflow.com/questions/11488616/
binary-to-c: function [
{Converts a binary to a string of C source that represents an initializer
for a character array. To be "strict" C standard compatible, we do not
use a string literal due to length limits (509 characters in C89, and
4095 characters in C99). Instead we produce an array formatted as
'{0xYY, ...}' with 8 bytes per line}
return: [text!]
data [binary!]
][
out: make text! 6 * (length of data)
while [not tail? data] [
;-- grab hexes in groups of 8 bytes
hexed: enbase/base (copy/part data 8) 16
data: skip data 8
for-each [digit1 digit2] hexed [
append out unspaced [{0x} digit1 digit2 {,} space]
]
take/last out ;-- drop the last space
if tail? data [
take/last out ;-- lose that last comma
]
append out newline ;-- newline after each group, and at end
]
;-- Sanity check (should be one more byte in source than commas out)
parse out [
(comma-count: 0)
some [thru "," (comma-count: comma-count + 1)]
to end
]
assert [(comma-count + 1) = (length of head of data)]
out
]
for-each-record: function [
{Iterate a table with a header by creating an object for each row}
return: [<opt> any-value!]
'var "Word to set each time to the row made into an object record"
[word!]
table "Table of values with header block as first element"
[block!]
body "Block to evaluate each time"
[block!]
][
if not block? first table [
fail {Table of records does not start with a header block}
]
headings: map-each word first table [
if not word? word [
fail [{Heading} word {is not a word}]
]
as set-word! word
]
table: next table
while-not [tail? table] [
if (length of headings) > (length of table) [
fail {Element count isn't even multiple of header count}
]
spec: collect [
for-each column-name headings [
keep column-name
keep compose/only [lit (table/1)]
table: next table
]
]
set var has spec
do body
]
]
find-record-unique: function [
{Get a record in a table as an object, error if duplicate, blank if absent}
return: [<opt> object!]
table [block!]
{Table of values with header block as first element}
key [word!]
{Object key to search for a match on}
value
{Value that the looked up key must be uniquely equal to}
][
if not find first table key [
fail [key {not found in table headers:} (first table)]
]
result: _
for-each-record rec table [
if value <> select rec key [continue]
if result [
fail [{More than one table record matches} key {=} value]
]
; Could break, but walk whole table to verify that it is well-formed.
]
return opt result
]
parse-args: function [
args
][
ret: make block! 4
standalone: make block! 4
args: any [args copy []]
if not block? args [args: split args [some " "]]
iterate args [
name: _
value: args/1
case [
idx: find value #"=" [; name=value
name: to word! copy/part value (index of idx) - 1
value: copy next idx
]
#":" = last value [; name=value
name: to word! copy/part value (length of value) - 1
args: next args
if empty? args [
fail ["Missing value after" value]
]
value: args/1
]
]
if all [; value1,value2,...,valueN
not find value "["
find value ","
][value: mold split value ","]
either name [
append ret reduce [name value]
][; standalone-arg
append standalone value
]
]
if empty? standalone [return ret]
append ret '|
append ret standalone
]
fix-win32-path: func [
path [file!]
<local> letter colon
][
if 3 != fourth system/version [return path] ;non-windows system
drive: first path
colon: second path
all [
any [
(#"A" <= drive) and [#"Z" >= drive]
(#"a" <= drive) and [#"z" >= drive]
]
#":" = colon
] then [
insert path #"/"
remove skip path 2 ;remove ":"
]
path
]
uppercase-of: func [
{Copying variant of UPPERCASE, also FORMs words}
string [text! word!]
][
uppercase form string
]
lowercase-of: func [
{Copying variant of LOWERCASE, also FORMs words}
string [text! word!]
][
lowercase form string
]
propercase: func [value] [uppercase/part (copy value) 1]
propercase-of: func [
{Make a copy of a string with just the first character uppercase}
string [text! word!]
][
propercase form string
]
write-if-changed: function [
return: <void>
dest [file!]
content [text! block!]
][
if block? content [content: spaced content]
content: to binary! content
any [
not exists? dest
content != read dest
] then [
write dest content
]
]
relative-to-path: func [
target [file!]
base [file!]
][
target: split clean-path target "/"
base: split clean-path base "/"
if "" = last base [take/last base]
while [all [
not tail? target
not tail? base
base/1 = target/1
]] [
base: next base
target: next target
]
iterate base [base/1: %..]
append base target
to-file delimit "/" base
]
|
Formal statement is: lemma zero_less_norm_iff [simp]: "norm x > 0 \<longleftrightarrow> x \<noteq> 0" Informal statement is: The norm of a complex number is positive if and only if the complex number is nonzero. |
import data.set data.stream.basic
open stream
structure LTS:= (S : Type) (Act : Type)(TR : set (S × Act × S))
--structure LTS:= (S : Type) (A : Type) (Δ : set (S × Act × S))
structure path (M : LTS) :=
(init : M.S)
(s : stream (M.Act × M.S))
(sound : ((init, (s 0).1, (s 0).2) ∈ M.TR) ∧
∀ i : ℕ, ((s i).2, (s (i+1)).1, (s (i+1)).2) ∈ M.TR)
variable {M : LTS}
namespace path
def index (π : path M) : ℕ+ → (M.Act × M.S)
| (n) := (π.s (n-1))
lemma drop_aux (π : path M) (n : ℕ) :
((π.s n).snd, (drop (n+1) π.s 0).fst, (drop (n+1) π.s 0).snd) ∈ M.TR ∧ ∀ (i : ℕ), ((drop (n+1) π.s i).snd, (stream.drop (n+1) π.s (i + 1)).fst, (drop (n+1) π.s (i + 1)).snd) ∈ M.TR :=
begin
have := π.sound, cases this with L R,
rw stream.drop,
dsimp at *, simp at *,
split,
apply R n,
intro i,
replace R := R (i+n.succ),
rw add_assoc at R, rw add_comm n.succ 1 at R, rw ← add_assoc at R,
apply R,
end
def drop (π : path M) : ℕ → path M
| 0 := π
| (nat.succ n) :=
path.mk (π.s n).2 (π.s.drop n.succ) (drop_aux π n)
lemma drop_drop (π : path M) {i j : ℕ} :
(π.drop i).drop j = π.drop (i+j) :=
begin
cases i, {rw drop, simp},
cases j, rw [drop,drop],
rw drop, rw drop, rw drop, simp,
rw drop_drop,
split,
rw stream.drop,
simp, rw add_comm,
rw nat.add_succ,
rw add_comm, rw nat.add_succ,
rw add_comm i.succ,
rw nat.add_succ, rw add_comm,
end
end path
inductive formula (M : LTS)
| T : formula
| state_predicate (p : M.S → Prop) : formula
| act_predicate (p : M.Act → Prop) : formula
| state (s : M.S) : formula
| act (a : M.Act) : formula
| neg (x : formula) : formula
| conj (φ₁ φ₂ : formula) : formula
| disj (φ₁ φ₂ : formula) : formula
| impl (φ₁ φ₂ : formula) : formula
| next (φ : formula) : formula
| always (φ : formula) : formula
| eventually (φ : formula) : formula
| until (φ₁ φ₂ : formula) : formula
def sat {M : LTS} : formula M → path M → Prop
| formula.T := λ _, true
| (formula.state s) := λ π, π.init = s
| (formula.act a) := λ π, (path.index π 1).1 = a
| (formula.neg φ) := λ π, ¬ (sat φ) π
| (formula.conj φ₁ φ₂) := λ π, sat φ₁ π ∧ sat φ₂ π
| (formula.disj φ₁ φ₂) := λ π, sat φ₁ π ∨ sat φ₂ π
| (formula.impl φ₁ φ₂) := λ π, sat φ₁ π → sat φ₂ π
| (formula.next φ) := λ π, sat φ (π.drop 1)
| (formula.until φ₁ φ₂) := λ π,
∃ j, sat φ₂ (π.drop j) ∧ (∀ i < j, sat φ₁ (π.drop i))
| (formula.always φ) := λ π, ∀ i, sat φ (π.drop i)
| (formula.eventually φ) := λ π, ∃ i, sat φ (π.drop i)
| (formula.state_predicate p) := λ π, p π.init
| (formula.act_predicate p) := λ π, p (path.index π 1).1
notation φ ` & ` ψ := formula.conj φ ψ
notation φ ` ⅋ ` ψ := formula.disj φ ψ
notation ` !` φ := formula.neg φ
notation φ ` U ` ψ := formula.until φ ψ
notation ` ◆` φ := formula.eventually φ
notation ` ◾` φ := formula.always φ
notation φ ` ⇒ ` ψ := formula.impl φ ψ
notation π `⊨` P := sat P π
namespace formula
def weak_until (φ ψ : formula M) : formula M :=
(◾φ) ⅋ (φ U ψ)
end formula
notation φ ` W ` ψ := formula.weak_until φ ψ
namespace sat
lemma weak_until (φ ψ : formula M) (π : path M) :
sat (formula.weak_until φ ψ) π ↔ sat (◾ φ) π ∨ sat (φ U ψ) π :=
by {rw formula.weak_until, repeat {rw sat}}
end sat
lemma push_neg_tok {M : LTS} {π : path M} : ∀ P : formula M,
¬ (sat P π) ↔ sat (!P) π :=
by {intro P, refl,}
lemma always_eventually_dual (P : formula M) (π : path M) :
sat (◾!P) π ↔ (¬ sat (◆P) π) :=
by {repeat {rw sat}, tidy}
@[simp] lemma succ_add_1 {π : path M} {i : ℕ} :
(π.drop i).drop 1 = π.drop (i.succ) :=
by {rw path.drop_drop}
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Basic definitions for Characters
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Data.Char.Base where
open import Level using (zero)
import Data.Nat.Base as ℕ
open import Function
open import Relation.Binary using (Rel)
open import Relation.Binary.PropositionalEquality
------------------------------------------------------------------------
-- Re-export the type, and renamed primitives
open import Agda.Builtin.Char public using ( Char )
renaming
-- testing
( primIsLower to isLower
; primIsDigit to isDigit
; primIsAlpha to isAlpha
; primIsSpace to isSpace
; primIsAscii to isAscii
; primIsLatin1 to isLatin1
; primIsPrint to isPrint
; primIsHexDigit to isHexDigit
-- transforming
; primToUpper to toUpper
; primToLower to toLower
-- converting
; primCharToNat to toℕ
; primNatToChar to fromℕ
)
open import Agda.Builtin.String public using ()
renaming ( primShowChar to show )
infix 4 _≈_
_≈_ : Rel Char zero
_≈_ = _≡_ on toℕ
infix 4 _<_
_<_ : Rel Char zero
_<_ = ℕ._<_ on toℕ
------------------------------------------------------------------------
-- DEPRECATED NAMES
------------------------------------------------------------------------
-- Please use the new names as continuing support for the old names is
-- not guaranteed.
-- Version 1.1
toNat = toℕ
{-# WARNING_ON_USAGE toNat
"Warning: toNat was deprecated in v1.1.
Please use toℕ instead."
#-}
fromNat = fromℕ
{-# WARNING_ON_USAGE fromNat
"Warning: fromNat was deprecated in v1.1.
Please use fromℕ instead."
#-}
|
#include <stdio.h>
#include <math.h>
#include <gsl/gsl_integration.h>
void integral_recur ( int nmin, int nmax, double vals[]);
void integral_recur (int nmin, int nmax, double vals[])
{
double IN = 0;
int i = nmax - 1;
vals [nmax] = IN;
while ( i >= nmin)
{
vals[i] = (IN + exp (-1.))/((double) i);
i = i - 1;
}
}
|
Marketing is the process by which companies create customer interest in goods or services. It generates the strategy that underlies sales techniques, business communication and business developments. It is an integrated process through which companies build strong customer relationships and creates value for their customers and for themselves.
Though going by the definition, Marketing as a subject sounds too simple but one must not be deceived by it and take it lightly. Marketing has a lot of concepts to understand and deal with and thus needs to be understood perfectly. Business cannot run peacefully without appropriate marketing and thus it is important enough to understand all the techniques of Marketing. These days Digital marketing is becoming more and more popular and the marketing Network it growing many folds because of the new techniques like Website Marketing, Marketing Online and so on. Product Marketing also needs a lot of expertise and so one does require a good guise to understand them all and the Homework Helpers that we provide you here are well trained professional in the field of Marketing who can teach you all the basics and their working.
We make sure that we do a lot more than just filling you all with the theory and also give you the knowledge of application of all this. We have put forward an effort of providing you the best Help In Homework and assignment in Marketing to enable you to get a good hold of the subject and add value where ever you work. Our trained professionals are working 24/7 with the aim of making you a better professional at work and to make you able of getting out the best in you. We provide you all the newly updated material, papers and Case Studies that help you explain the concepts faster and better.
So, you're at the right place if you need Marketing Homework Help, Marketing Assignment Help and this is where you will find the answers to all your queries related to marketing. So log on to urgenthomework.com whenever you need that your mind needs to be directed towards this subject in a more better way. |
State Before: C : Type u₁
inst✝¹ : SmallCategory C
ℰ : Type u₂
inst✝ : Category ℰ
A : C ⥤ ℰ
P : Cᵒᵖ ⥤ Type u₁
E₁ E₂ : ℰ
g : E₁ ⟶ E₂
c : Cocone ((CategoryOfElements.π P).leftOp ⋙ A)
t : IsColimit c
k : c.pt ⟶ E₁
⊢ ↑(restrictYonedaHomEquiv A P E₂ t) (k ≫ g) = ↑(restrictYonedaHomEquiv A P E₁ t) k ≫ (restrictedYoneda A).map g State After: case w.h.h
C : Type u₁
inst✝¹ : SmallCategory C
ℰ : Type u₂
inst✝ : Category ℰ
A : C ⥤ ℰ
P : Cᵒᵖ ⥤ Type u₁
E₁ E₂ : ℰ
g : E₁ ⟶ E₂
c : Cocone ((CategoryOfElements.π P).leftOp ⋙ A)
t : IsColimit c
k : c.pt ⟶ E₁
x : Cᵒᵖ
X : P.obj x
⊢ (↑(restrictYonedaHomEquiv A P E₂ t) (k ≫ g)).app x X =
(↑(restrictYonedaHomEquiv A P E₁ t) k ≫ (restrictedYoneda A).map g).app x X Tactic: ext x X State Before: case w.h.h
C : Type u₁
inst✝¹ : SmallCategory C
ℰ : Type u₂
inst✝ : Category ℰ
A : C ⥤ ℰ
P : Cᵒᵖ ⥤ Type u₁
E₁ E₂ : ℰ
g : E₁ ⟶ E₂
c : Cocone ((CategoryOfElements.π P).leftOp ⋙ A)
t : IsColimit c
k : c.pt ⟶ E₁
x : Cᵒᵖ
X : P.obj x
⊢ (↑(restrictYonedaHomEquiv A P E₂ t) (k ≫ g)).app x X =
(↑(restrictYonedaHomEquiv A P E₁ t) k ≫ (restrictedYoneda A).map g).app x X State After: no goals Tactic: apply (assoc _ _ _).symm |
Bedroom Unique Car Beds Kid Decor Ideas For Boy Clipgoo Beautiful Ideas , Download this wallpaper for free in high resolution. Bedroom Unique Car Beds Kid Decor Ideas For Boy Clipgoo Beautiful Ideas was posted in July 9, 2018 at 7:43 am and This Bedroom Unique Car Beds Kid Decor Ideas For Boy Clipgoo Beautiful Ideas Wallpaper has viewed by 348 users. If you wanna have it as yours, please click full size and you will go to page download in full size, so you just choose the size above the wallpaper that you want in "Download", Click it and download the Bedroom Unique Car Beds Kid Decor Ideas For Boy Clipgoo Beautiful Ideas Wallpaper. |
(* This file is generated by Why3's Coq driver *)
(* Beware! Only edit allowed sections below *)
Require Import BuiltIn.
Require BuiltIn.
Require list.List.
Require list.Length.
Require int.Int.
Require list.Mem.
Require list.Append.
(* Why3 assumption *)
Definition unit := unit.
Axiom qtmark : Type.
Parameter qtmark_WhyType : WhyType qtmark.
Existing Instance qtmark_WhyType.
Axiom char : Type.
Parameter char_WhyType : WhyType char.
Existing Instance char_WhyType.
(* Why3 assumption *)
Inductive regexp :=
| Empty : regexp
| Epsilon : regexp
| Char : char -> regexp
| Alt : regexp -> regexp -> regexp
| Concat : regexp -> regexp -> regexp
| Star : regexp -> regexp.
Axiom regexp_WhyType : WhyType regexp.
Existing Instance regexp_WhyType.
(* Why3 assumption *)
Definition word := (list char).
(* Why3 assumption *)
Inductive mem: (list char) -> regexp -> Prop :=
| mem_eps : (mem Init.Datatypes.nil Epsilon)
| mem_char : forall (c:char), (mem
(Init.Datatypes.cons c Init.Datatypes.nil) (Char c))
| mem_altl : forall (w:(list char)) (r1:regexp) (r2:regexp), (mem w r1) ->
(mem w (Alt r1 r2))
| mem_altr : forall (w:(list char)) (r1:regexp) (r2:regexp), (mem w r2) ->
(mem w (Alt r1 r2))
| mem_concat : forall (w1:(list char)) (w2:(list char)) (r1:regexp)
(r2:regexp), (mem w1 r1) -> ((mem w2 r2) -> (mem
(Init.Datatypes.app w1 w2) (Concat r1 r2)))
| mems1 : forall (r:regexp), (mem Init.Datatypes.nil (Star r))
| mems2 : forall (w1:(list char)) (w2:(list char)) (r:regexp), (mem w1
r) -> ((mem w2 (Star r)) -> (mem (Init.Datatypes.app w1 w2) (Star r))).
Axiom inversion_mem_star_gen : forall (c:char) (w:(list char)) (r:regexp)
(w':(list char)) (r':regexp), ((w' = (Init.Datatypes.cons c w)) /\
(r' = (Star r))) -> ((mem w' r') -> exists w1:(list char),
exists w2:(list char), (w = (Init.Datatypes.app w1 w2)) /\ ((mem
(Init.Datatypes.cons c w1) r) /\ (mem w2 r'))).
Axiom inversion_mem_star : forall (c:char) (w:(list char)) (r:regexp), (mem
(Init.Datatypes.cons c w) (Star r)) -> exists w1:(list char),
exists w2:(list char), (w = (Init.Datatypes.app w1 w2)) /\ ((mem
(Init.Datatypes.cons c w1) r) /\ (mem w2 (Star r))).
(* Why3 goal *)
Theorem WP_parameter_residual : forall (r:regexp) (c:char),
forall (x:regexp), (r = (Star x)) -> forall (o:regexp),
(forall (w:(list char)), (mem w o) <-> (mem (Init.Datatypes.cons c w)
x)) -> forall (w:(list char)), (mem w (Concat o r)) <-> (mem
(Init.Datatypes.cons c w) r).
intros r c x h1 o h2 w.
subst.
intuition.
inversion H; subst; clear H.
rewrite List.app_comm_cons.
constructor; auto.
rewrite <- h2; auto.
destruct (inversion_mem_star _ _ _ H) as (w1 & w2 & hh1 & hh2 & hh3).
subst w.
constructor; auto.
now rewrite h2.
Qed.
|
[STATEMENT]
lemma establish_invarI_CB [case_names prereq init new_root finish cross_back_edge discover]:
\<comment> \<open>Establish a DFS invariant (cross and back edge cases are combined).\<close>
assumes prereq: "\<And>u v s. on_back_edge param u v s = on_cross_edge param u v s"
assumes init: "on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))"
assumes new_root: "\<And>s s' v0.
\<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s;
stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s);
s' = new_root v0 s\<rbrakk>
\<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n
SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>)
\<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))"
assumes finish: "\<And>s s' u.
\<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s;
stack s \<noteq> []; u = hd (stack s);
pending s `` {u} = {};
s' = finish u s\<rbrakk>
\<Longrightarrow> on_finish param u s' \<le>\<^sub>n
SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>)
\<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))"
assumes cross_back_edge: "\<And>s s' u v.
\<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s;
stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s);
v \<in> dom (discovered s);
discovered s' = discovered s; finished s' = finished s;
stack s' = stack s; tree_edges s' = tree_edges s; counter s' = counter s;
pending s' = pending s - {(u,v)};
cross_edges s' \<union> back_edges s' = cross_edges s \<union> back_edges s \<union> {(u,v)};
state.more s' = state.more s \<rbrakk>
\<Longrightarrow> on_cross_edge param u v s' \<le>\<^sub>n
SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>)
\<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))"
assumes discover: "\<And>s s' u v.
\<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s;
stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s);
v \<notin> dom (discovered s);
s' = discover u v (s\<lparr>pending := pending s - {(u,v)}\<rparr>)\<rbrakk>
\<Longrightarrow> on_discover param u v s' \<le>\<^sub>n
SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>)
\<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))"
shows "is_invar I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_invar I
[PROOF STEP]
proof (induct rule: establish_invarI)
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<in> dom (finished s); s' = cross_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_cross_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
5. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<notin> dom (finished s); s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
6. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
case cross_edge
[PROOF STATE]
proof (state)
this:
DFS_invar G param s_
I s_
cond s_
\<not> is_break param s_
stack s_ \<noteq> []
(u_, v_) \<in> pending s_
u_ = hd (stack s_)
v_ \<in> dom (discovered s_)
v_ \<in> dom (finished s_)
s'_ = cross_edge u_ v_ (s_\<lparr>pending := pending s_ - {(u_, v_)}\<rparr>)
goal (6 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<in> dom (finished s); s' = cross_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_cross_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
5. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<notin> dom (finished s); s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
6. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
DFS_invar G param s_
I s_
cond s_
\<not> is_break param s_
stack s_ \<noteq> []
(u_, v_) \<in> pending s_
u_ = hd (stack s_)
v_ \<in> dom (discovered s_)
v_ \<in> dom (finished s_)
s'_ = cross_edge u_ v_ (s_\<lparr>pending := pending s_ - {(u_, v_)}\<rparr>)
goal (1 subgoal):
1. on_cross_edge param u_ v_ s'_ \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'_\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'_\<lparr>state.more := x\<rparr>))
[PROOF STEP]
by (auto intro!: cross_back_edge)
[PROOF STATE]
proof (state)
this:
on_cross_edge param u_ v_ s'_ \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'_\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'_\<lparr>state.more := x\<rparr>))
goal (5 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<notin> dom (finished s); s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
5. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<notin> dom (finished s); s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
5. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
case (back_edge s s' u v)
[PROOF STATE]
proof (state)
this:
DFS_invar G param s
I s
cond s
\<not> is_break param s
stack s \<noteq> []
(u, v) \<in> pending s
u = hd (stack s)
v \<in> dom (discovered s)
v \<notin> dom (finished s)
s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)
goal (5 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<notin> dom (finished s); s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
5. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
hence
"on_cross_edge param u v s' \<le>\<^sub>n
SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>)
\<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))"
[PROOF STATE]
proof (prove)
using this:
DFS_invar G param s
I s
cond s
\<not> is_break param s
stack s \<noteq> []
(u, v) \<in> pending s
u = hd (stack s)
v \<in> dom (discovered s)
v \<notin> dom (finished s)
s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)
goal (1 subgoal):
1. on_cross_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
by (auto intro!: cross_back_edge)
[PROOF STATE]
proof (state)
this:
on_cross_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
goal (5 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<in> dom (discovered s); v \<notin> dom (finished s); s' = back_edge u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
5. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
with prereq
[PROOF STATE]
proof (chain)
picking this:
on_back_edge param ?u1 ?v1 ?s1 = on_cross_edge param ?u1 ?v1 ?s1
on_cross_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
on_back_edge param ?u1 ?v1 ?s1 = on_cross_edge param ?u1 ?v1 ?s1
on_cross_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
goal (1 subgoal):
1. on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
on_back_edge param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
goal (4 subgoals):
1. on_init param \<le>\<^sub>n SPEC (\<lambda>x. I (empty_state x))
2. \<And>s s' v0. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s = []; v0 \<in> V0; v0 \<notin> dom (discovered s); s' = new_root v0 s\<rbrakk> \<Longrightarrow> on_new_root param v0 s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
3. \<And>s s' u. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; u = hd (stack s); pending s `` {u} = {}; s' = finish u s\<rbrakk> \<Longrightarrow> on_finish param u s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
4. \<And>s s' u v. \<lbrakk>DFS_invar G param s; I s; cond s; \<not> is_break param s; stack s \<noteq> []; (u, v) \<in> pending s; u = hd (stack s); v \<notin> dom (discovered s); s' = discover u v (s\<lparr>pending := pending s - {(u, v)}\<rparr>)\<rbrakk> \<Longrightarrow> on_discover param u v s' \<le>\<^sub>n SPEC (\<lambda>x. DFS_invar G param (s'\<lparr>state.more := x\<rparr>) \<longrightarrow> I (s'\<lparr>state.more := x\<rparr>))
[PROOF STEP]
qed fact+ |
function out=subsref(x,varargin)
out=builtin('subsref',x,varargin{:});
|
Require Import ZArith.
Require Import ASN1FP.Types.BitContainer ASN1FP.Types.Bitstring.
(** * container tuples *)
(* nbs - normal bitstring (i.e. not a special value) *)
Inductive BER_nbs :=
| short_nbs
(id co : cont8)
(t s : cont1) (bb ff ee : cont2)
(e : container (8 * (c2n ee + 1))) (m : container (8*((c2n co) - (c2n ee) - 2)))
(VS1 : c2z id = real_id_b) (VS2 : c2z t = 1) (VS3 : c2z ee <= 2) (VS4 : 1 <= c2z m)
(VS5 : c2z co <= 127)
| long_nbs
(id co : cont8)
(t s : cont1) (bb ff ee : cont2)
(eo : cont8)
(e : container (8*(c2n eo))) (m : container (8 * ((c2n co) - (c2n eo) - 2)))
(VL1 : c2z id = real_id_b) (VL2 : c2z t = 1) (VL3 : c2z ee = 3) (VL4 : 1 <= c2z m)
(VL5 : c2z co <= 127).
Inductive BER_bs_aux :=
| special_aux (val : BER_special) : BER_bs_aux
| normal_aux (b : BER_nbs) : BER_bs_aux.
|
#pragma once
#include <boost/asio.hpp>
namespace boost {
class thread;
}
class Asio {
public:
static void startup();
static boost::asio::io_service &getIoService();
static void shutdown();
private:
static boost::asio::io_service *ioService;
static boost::asio::io_service::work *work;
static boost::thread *butler;
};
|
/-
Copyright (c) 2022 Tomaz Gomes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Tomaz Gomes.
-/
import data.list.sort tactic
import data.nat.log
import init.data.nat
import MergeSort.LogLemmas
/-
# Timed Split
This file defines a new version of Split that, besides splitting the input lists into two halves,
counts the number of operations made through the execution of the algorithm. Also, it presents
proofs of it's time complexity, it's equivalence to the one defined in data/list/sort.lean and of
some lemmas used in MergeSort.lean, related to the Split function.
## Main Definition
- Timed.split : list α → (list α × list α × ℕ)
## Main Results
- Timed.split_complexity :
∀ l : list α, (Timed.split l).snd.snd = l.length
- Timed.split_equivalence :
∀ l : list α, (Timed.split l).fst = list.split l
-/
variables {α : Type} (r : α → α → Prop) [decidable_rel r]
local infix ` ≼ ` : 50 := r
namespace Timed
@[simp] def split : list α → (list α × list α × ℕ)
| [] := ([], [], 0)
| (h :: t) := let (l₁, l₂, n) := split t in (h :: l₂, l₁, n + 1)
theorem split_equivalence : ∀ (l : list α) ,
(split l).fst = (list.split l).fst ∧ (split l).snd.fst = (list.split l).snd
| [] := by simp only [list.split, split, and_self]
| (h :: t) :=
begin
simp only [list.split, split],
have ih := split_equivalence t,
cases ih with ih_fst ih_snd,
cases (split t) with t_left₁ t_right₁,
cases t_right₁ with t_right₁ _,
unfold split,
cases t.split with t_left₂ t_right₂,
unfold list.split,
simp only [true_and, eq_self_iff_true],
exact ⟨ ih_snd, ih_fst ⟩,
end
theorem split_complexity : ∀ (l : list α) , (split l).snd.snd = l.length
| [] := by simp only [list.length, split]
| (h :: t) :=
begin
simp only [list.length, split],
have IH := split_complexity t,
cases split t with l₁ l₂n,
cases l₂n with l₂ n,
unfold split,
rw add_left_inj,
exact IH,
end
lemma length_split_lt {a b: α} {l l₁ l₂ : list α} {n : ℕ}
(h : split (a::b::l) = (l₁, l₂, n)):
list.length l₁ < list.length (a::b::l) ∧
list.length l₂ < list.length (a::b::l) :=
begin
have split_eq_full : l₁ = (a::b::l).split.fst ∧ l₂ = (a::b::l).split.snd :=
begin
have l₂_n_id : (l₂, n) = (split (a :: b :: l)).snd :=
(congr_arg prod.snd h).congr_right.mpr rfl,
have l₂_id : l₂ = (split (a :: b :: l)).snd.fst :=
(congr_arg prod.fst l₂_n_id).congr_right.mp rfl,
have l₁_id : l₁ = (split (a :: b :: l)).fst :=
(congr_arg prod.fst h).congr_right.mpr rfl,
have split_eq := split_equivalence (a :: b :: l),
cases split_eq,
rw split_eq_left at l₁_id,
rw split_eq_right at l₂_id,
exact ⟨ l₁_id , l₂_id ⟩,
end,
cases split_eq_full,
have reconstruct : (a::b::l).split = (l₁, l₂) :=
begin
rw split_eq_full_left,
rw split_eq_full_right,
exact prod.ext rfl rfl,
end ,
exact list.length_split_lt reconstruct,
end
lemma split_halves_length : ∀ {l l₁ l₂ : list α} {n : ℕ},
split l = (l₁, l₂, n) →
2 * list.length l₁ ≤ list.length l + 1 ∧ 2 * list.length l₂ ≤ list.length l
| [] :=
begin
intros l₁ l₂ n h,
unfold split at h,
simp only [prod.mk.inj_iff] at h,
cases h with h₁ h₂,
cases h₂ with h₂ _,
rw [← h₁, ← h₂],
simp only [ zero_le_one
, list.length
, eq_self_iff_true
, and_self
, nonpos_iff_eq_zero
, mul_zero
],
end
| (a :: t) :=
begin
intros l₁ l₂ n h',
cases e : split t with t₁ t₂m,
cases t₂m with t₂ m,
have split_id : split (a :: t) = (a :: t₂, t₁, m + 1) :=
begin
rw split,
cases split t with t₁' t₂',
cases t₂' with t₂' m₂,
unfold split,
injection e,
injection h_2,
refine congr (congr_arg prod.mk (congr_arg (list.cons a) h_3)) _,
rw h_1,
rw h_4,
end,
rw split_id at h',
injection h',
injection h_2,
have IH := split_halves_length e,
refine and.intro _ _,
{ rw ← h_1, simp only [list.length], linarith, },
{ rw ← h_3, simp only [list.length], linarith, },
end
include r
lemma split_lengths : ∀ (l l₁ l₂ : list α) {n : ℕ},
split l = (l₁, l₂, n) → l₁.length + l₂.length = l.length
| [] := by { intros l₁ l₂ n,
simp only [ and_imp
, prod.mk.inj_iff
, list.length
, add_eq_zero_iff
, split
],
intros h₁ h₂ _,
rw [← h₁, ← h₂],
simp,
}
| [a] := by { intros l₁ l₂ n,
simp only [ and_imp
, prod.mk.inj_iff
, list.length_singleton
, zero_add
, split
],
intros h₁ h₂ _,
rw [← h₁, ← h₂],
simp only [list.length],
}
| (a :: b :: t) :=
begin
intros l₁ l₂ n h,
cases e : split t with l₁' l₂'m,
cases l₂'m with l₂' m,
simp only [split] at h,
rw e at h,
unfold split at h,
have ih := split_lengths t l₁' l₂' e,
injection h,
injection h_2,
rw [← h_1, ← h_3],
simp only [list.length], linarith,
end
end Timed
|
[STATEMENT]
lemma PO_refines_implies_R_image_invariant:
assumes "PO_refines R Ta Tc"
shows "reach Tc \<subseteq> R `` reach Ta"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. reach Tc \<subseteq> R `` reach Ta
[PROOF STEP]
proof(rule INV_rule)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. init Tc \<subseteq> R `` reach Ta
2. {R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}
[PROOF STEP]
show "init Tc \<subseteq> R `` reach Ta"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. init Tc \<subseteq> R `` reach Ta
[PROOF STEP]
by (rule subset_trans[OF PO_refines_implies_R_image_init, OF assms]) (auto)
[PROOF STATE]
proof (state)
this:
init Tc \<subseteq> R `` reach Ta
goal (1 subgoal):
1. {R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. {R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}
[PROOF STEP]
show "{R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
PO_refines R Ta Tc
goal (1 subgoal):
1. {R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}
[PROOF STEP]
by (auto intro!: PO_refines_implies_R_image_trans)
[PROOF STATE]
proof (state)
this:
{R `` reach Ta \<inter> reach Tc} TS.trans Tc {> R `` reach Ta}
goal:
No subgoals!
[PROOF STEP]
qed |
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE RecordWildCards #-}
-- |
-- Module : Pact.Repl.Lib
-- Copyright : (C) 2016 Stuart Popejoy
-- License : BSD-style (see the file LICENSE)
-- Maintainer : Stuart Popejoy <[email protected]>
--
-- Built-ins for repl functionality. Not part of standard library
-- for blockchain execution, but instead for testing and dev.
--
module Pact.Repl.Lib where
import Control.Arrow ((&&&))
import Data.Default
import Data.Semigroup
import qualified Data.HashMap.Strict as HM
import qualified Data.Map as M
import Control.Monad.Reader
import Control.Monad.Catch
import Control.Monad.State.Strict (get)
import Control.Lens
import qualified Data.Set as S
import qualified Data.ByteString.Lazy as BSL
import Control.Concurrent.MVar
import Data.Aeson (eitherDecode,toJSON)
import qualified Data.Text as Text
import Data.Text.Encoding
import Data.Maybe
#if !defined(ghcjs_HOST_OS)
import Criterion
import Criterion.Types
import Pact.Analyze.Check
#if MIN_VERSION_statistics(0,14,0)
import Statistics.Types (Estimate(..))
#else
import Statistics.Resampling.Bootstrap
#endif
#endif
import Pact.Typechecker
import Pact.Types.Typecheck
-- intentionally hidden unused functions to prevent lib functions from consuming gas
import Pact.Native.Internal hiding (defRNative,defGasRNative,defNative)
import qualified Pact.Native.Internal as Native
import Pact.Types.Runtime
import Pact.Eval
import Pact.Persist.Pure
import Pact.PersistPactDb
import Pact.Types.Logger
import Pact.Repl.Types
initLibState :: Loggers -> IO LibState
initLibState loggers = do
m <- newMVar (DbEnv def persister
(newLogger loggers "Repl")
def def)
createSchema m
return (LibState m Noop def def)
-- | Native function with no gas consumption.
type ZNativeFun e = FunApp -> [Term Ref] -> Eval e (Term Name)
zeroGas :: Eval e a -> Eval e (Gas,a)
zeroGas = fmap (0,)
defZNative :: NativeDefName -> ZNativeFun e -> FunTypes (Term Name) -> Text -> NativeDef
defZNative name fun = Native.defNative name $ \fi as -> zeroGas $ fun fi as
defZRNative :: NativeDefName -> RNativeFun e -> FunTypes (Term Name) -> Text -> NativeDef
defZRNative name fun = Native.defNative name (reduced fun)
where reduced f fi as = mapM reduce as >>= zeroGas . f fi
replDefs :: NativeModule
replDefs = ("Repl",
[
defZRNative "load" load (funType tTyString [("file",tTyString)] <>
funType tTyString [("file",tTyString),("reset",tTyBool)]) $
"Load and evaluate FILE, resetting repl state beforehand if optional NO-RESET is true. " <>
"`$(load \"accounts.repl\")`"
,defZRNative "env-keys" setsigs (funType tTyString [("keys",TyList tTyString)])
"Set transaction signature KEYS. `(env-keys [\"my-key\" \"admin-key\"])`"
,defZRNative "env-data" setmsg (funType tTyString [("json",json)]) $
"Set transaction JSON data, either as encoded string, or as pact types coerced to JSON. " <>
"`(env-data { \"keyset\": { \"keys\": [\"my-key\" \"admin-key\"], \"pred\": \"keys-any\" } })`"
,defZRNative "env-step"
setstep (funType tTyString [] <>
funType tTyString [("step-idx",tTyInteger)] <>
funType tTyString [("step-idx",tTyInteger),("rollback",tTyBool)] <>
funType tTyString [("step-idx",tTyInteger),("rollback",tTyBool),("resume",TySchema TyObject (mkSchemaVar "y"))])
("Set pact step state. With no arguments, unset step. With STEP-IDX, set step index to execute. " <>
"ROLLBACK instructs to execute rollback expression, if any. RESUME sets a value to be read via 'resume'." <>
"Clears any previous pact execution state. `$(env-step 1)` `$(env-step 0 true)`")
,defZRNative "pact-state" pactState (funType (tTyObject TyAny) [])
("Inspect state from previous pact execution. Returns object with fields " <>
"'yield': yield result or 'false' if none; 'step': executed step; " <>
"'executed': indicates if step was skipped because entity did not match. `$(pact-state)`")
,defZRNative "env-entity" setentity
(funType tTyString [] <> funType tTyString [("entity",tTyString)])
("Set environment confidential ENTITY id, or unset with no argument. " <>
"Clears any previous pact execution state. `$(env-entity \"my-org\")` `$(env-entity)`")
,defZRNative "begin-tx" (tx Begin) (funType tTyString [] <>
funType tTyString [("name",tTyString)])
"Begin transaction with optional NAME. `$(begin-tx \"load module\")`"
,defZRNative "commit-tx" (tx Commit) (funType tTyString []) "Commit transaction. `$(commit-tx)`"
,defZRNative "rollback-tx" (tx Rollback) (funType tTyString []) "Rollback transaction. `$(rollback-tx)`"
,defZRNative "expect" expect (funType tTyString [("doc",tTyString),("expected",a),("actual",a)])
"Evaluate ACTUAL and verify that it equals EXPECTED. `(expect \"Sanity prevails.\" 4 (+ 2 2))`"
,defZNative "expect-failure" expectFail (funType tTyString [("doc",tTyString),("exp",a)]) $
"Evaluate EXP and succeed only if it throws an error. " <>
"`(expect-failure \"Enforce fails on false\" (enforce false \"Expected error\"))`"
,defZNative "bench" bench' (funType tTyString [("exprs",TyAny)])
"Benchmark execution of EXPRS. `$(bench (+ 1 2))`"
,defZRNative "typecheck" tc (funType tTyString [("module",tTyString)] <>
funType tTyString [("module",tTyString),("debug",tTyBool)])
"Typecheck MODULE, optionally enabling DEBUG output."
,defZRNative "env-gaslimit" setGasLimit (funType tTyString [("limit",tTyInteger)])
"Set environment gas limit to LIMIT"
,defZRNative "env-gas" envGas (funType tTyInteger [] <> funType tTyString [("gas",tTyInteger)])
"Query gas state, or set it to GAS"
,defZRNative "env-gasprice" setGasPrice (funType tTyString [("price",tTyDecimal)])
"Set environment gas price to PRICE"
,defZRNative "env-gasrate" setGasRate (funType tTyString [("rate",tTyInteger)])
"Update gas model to charge constant RATE"
#if !defined(ghcjs_HOST_OS)
,defZRNative "verify" verify (funType tTyString [("module",tTyString)]) "Verify MODULE, checking that all properties hold."
#endif
,defZRNative "json" json' (funType tTyValue [("exp",a)]) $
"Encode pact expression EXP as a JSON value. " <>
"This is only needed for tests, as Pact values are automatically represented as JSON in API output. " <>
"`(json [{ \"name\": \"joe\", \"age\": 10 } {\"name\": \"mary\", \"age\": 25 }])`"
,defZRNative "sig-keyset" sigKeyset (funType tTyKeySet [])
"Convenience to build a keyset from keys present in message signatures, using 'keys-all' as the predicate."
,defZRNative "print" print' (funType tTyString [("value",a)])
"Print a string, mainly to format newlines correctly"
,defZRNative "env-hash" envHash (funType tTyString [("hash",tTyString)])
"Set current transaction hash. HASH must be a valid BLAKE2b 512-bit hash. `(env-hash (hash \"hello\"))`"
])
where
json = mkTyVar "a" [tTyInteger,tTyString,tTyTime,tTyDecimal,tTyBool,
TyList (mkTyVar "l" []),TySchema TyObject (mkSchemaVar "o"),tTyKeySet,tTyValue]
a = mkTyVar "a" []
invokeEnv :: (MVar (DbEnv PureDb) -> IO b) -> MVar LibState -> IO b
invokeEnv f e = withMVar e $ \ls -> f $! _rlsPure ls
{-# INLINE invokeEnv #-}
repldb :: PactDb LibState
repldb = PactDb {
_readRow = \d k -> invokeEnv $ _readRow pactdb d k
, _writeRow = \wt d k v -> invokeEnv $ _writeRow pactdb wt d k v
, _keys = \t -> invokeEnv $ _keys pactdb t
, _txids = \t tid -> invokeEnv $ _txids pactdb t tid
, _createUserTable = \t m k -> invokeEnv $ _createUserTable pactdb t m k
, _getUserTableInfo = \t -> invokeEnv $ _getUserTableInfo pactdb t
, _beginTx = \tid -> invokeEnv $ _beginTx pactdb tid
, _commitTx = invokeEnv $ _commitTx pactdb
, _rollbackTx = invokeEnv $ _rollbackTx pactdb
, _getTxLog = \d t -> invokeEnv $ _getTxLog pactdb d t
}
load :: RNativeFun LibState
load _ [TLitString fn] = setop (Load (unpack fn) False) >> return (tStr $ "Loading " <> fn <> "...")
load _ [TLitString fn, TLiteral (LBool r) _] = setop (Load (unpack fn) r) >> return (tStr $ "Loading " <> fn <> "...")
load i as = argsError i as
modifyLibState :: (LibState -> (LibState,a)) -> Eval LibState a
modifyLibState f = view eePactDbVar >>= \m -> liftIO $ modifyMVar m (return . f)
setLibState :: (LibState -> LibState) -> Eval LibState ()
setLibState f = modifyLibState (f &&& const ())
viewLibState :: (LibState -> a) -> Eval LibState a
viewLibState f = modifyLibState (id &&& f)
setop :: LibOp -> Eval LibState ()
setop v = setLibState $ set rlsOp v
setenv :: Show a => Setter' (EvalEnv LibState) a -> a -> Eval LibState ()
setenv l v = setop $ UpdateEnv $ Endo (set l v)
setsigs :: RNativeFun LibState
setsigs i [TList ts _ _] = do
ks <- forM ts $ \t -> case t of
(TLitString s) -> return s
_ -> argsError i ts
setenv eeMsgSigs (S.fromList (map (PublicKey . encodeUtf8) ks))
return $ tStr "Setting transaction keys"
setsigs i as = argsError i as
setmsg :: RNativeFun LibState
setmsg i [TLitString j] =
case eitherDecode (BSL.fromStrict $ encodeUtf8 j) of
Left f -> evalError' i ("Invalid JSON: " ++ show f)
Right v -> setenv eeMsgBody v >> return (tStr "Setting transaction data")
setmsg _ [a] = setenv eeMsgBody (toJSON a) >> return (tStr "Setting transaction data")
setmsg i as = argsError i as
setstep :: RNativeFun LibState
setstep i as = case as of
[] -> setstep' Nothing >> return (tStr "Un-setting step")
[TLitInteger j] -> do
setstep' (Just $ PactStep (fromIntegral j) False def def)
return $ tStr "Setting step"
[TLitInteger j,TLiteral (LBool b) _] -> do
setstep' (Just $ PactStep (fromIntegral j) b def def)
return $ tStr "Setting step and rollback"
[TLitInteger j,TLiteral (LBool b) _,o@TObject{}] -> do
setstep' (Just $ PactStep (fromIntegral j) b def (Just o))
return $ tStr "Setting step, rollback, and resume value"
_ -> argsError i as
where
setstep' s = do
setenv eePactStep s
evalPactExec .= Nothing
setentity :: RNativeFun LibState
setentity i as = case as of
[TLitString s] -> do
setenv eeEntity $ Just (EntityName s)
evalPactExec .= Nothing
return (tStr $ "Set entity to " <> s)
[] -> do
setenv eeEntity Nothing
evalPactExec .= Nothing
return (tStr "Unset entity")
_ -> argsError i as
pactState :: RNativeFun LibState
pactState i [] = do
e <- use evalPactExec
case e of
Nothing -> evalError' i "pact-state: no pact exec in context"
Just PactExec{..} -> return $ (\o -> TObject o TyAny def)
[(tStr "yield",fromMaybe (toTerm False) _peYield)
,(tStr "executed",toTerm _peExecuted)
,(tStr "step",toTerm _peStep)]
pactState i as = argsError i as
txmsg :: Maybe Text -> Maybe TxId -> Text -> Term Name
txmsg n tid s = tStr $ s <> " Tx " <> pack (show tid) <> maybe "" (": " <>) n
tx :: Tx -> RNativeFun LibState
tx Begin i as = do
tname <- case as of
[TLitString n] -> return $ Just n
[] -> return Nothing
_ -> argsError i as
setop $ Tx (_faInfo i) Begin tname
setLibState $ set rlsTxName tname
return (tStr "")
tx t i [] = do
tname <- modifyLibState (set rlsTxName Nothing &&& view rlsTxName)
setop (Tx (_faInfo i) t tname)
return (tStr "")
tx _ i as = argsError i as
recordTest :: Text -> Maybe (FunApp,Text) -> Eval LibState ()
recordTest name failure = setLibState $ over rlsTests (++ [TestResult name failure])
testSuccess :: Text -> Text -> Eval LibState (Term Name)
testSuccess name msg = recordTest name Nothing >> return (tStr msg)
testFailure :: FunApp -> Text -> Text -> Eval LibState (Term Name)
testFailure i name msg = recordTest name (Just (i,msg)) >> return (tStr msg)
expect :: RNativeFun LibState
expect i [TLitString a,b,c] =
if b `termEq` c
then testSuccess a $ "Expect: success: " <> a
else testFailure i a $ "FAILURE: " <> a <> ": expected " <> pack (show b) <> ", received " <> pack (show c)
expect i as = argsError i as
expectFail :: ZNativeFun LibState
expectFail i as@[a,b] = do
a' <- reduce a
case a' of
TLitString msg -> do
r <- catch (Right <$> reduce b) (\(_ :: SomeException) -> return $ Left ())
case r of
Right v -> testFailure i msg $ "FAILURE: " <> msg <> ": expected failure, got result = " <> pack (show v)
Left _ -> testSuccess msg $ "Expect failure: success: " <> msg
_ -> argsError' i as
expectFail i as = argsError' i as
bench' :: ZNativeFun LibState
#if !defined(ghcjs_HOST_OS)
bench' i as = do
e <- ask
s <- get
(r :: Either SomeException Report) <-
try $ liftIO $ benchmark' $ whnfIO $ runEval s e $ do
!ts <- mapM reduce as
return $! toTerm (length ts)
case r of
Left ex -> evalError' i (show ex)
Right rpt -> do
let mean = estPoint (anMean (reportAnalysis rpt))
sd = estPoint (anStdDev (reportAnalysis rpt))
(reg,_) = splitAt 1 $ anRegress (reportAnalysis rpt)
val = case reg of
[] -> mean
(r':_) -> case M.lookup "iters" (regCoeffs r') of
Nothing -> mean
Just t -> estPoint t
tps = 1/val
tperr = (1/(val - (sd/2))) - (1/(val + (sd/2)))
liftIO $ putStrLn $ show (round tps :: Integer) ++ "/s, +-" ++ show (round tperr :: Integer) ++ "/s"
return (tStr "Done")
#else
bench' i _ = evalError' i "Benchmarking not supported in GHCJS"
#endif
tc :: RNativeFun LibState
tc i as = case as of
[TLitString s] -> go s False
[TLitString s,TLiteral (LBool d) _] -> go s d
_ -> argsError i as
where
go modname dbg = do
mdm <- HM.lookup (ModuleName modname) <$> view (eeRefStore . rsModules)
case mdm of
Nothing -> evalError' i $ "No such module: " ++ show modname
Just md -> do
r :: Either CheckerException ([TopLevel Node],[Failure]) <-
try $ liftIO $ typecheckModule dbg md
case r of
Left (CheckerException ei e) -> evalError ei ("Typechecker Internal Error: " ++ e)
Right (_,fails) -> case fails of
[] -> return $ tStr $ "Typecheck " <> modname <> ": success"
_ -> do
setop $ TcErrors $ map (\(Failure ti s) -> renderInfo (_tiInfo ti) ++ ":Warning: " ++ s) fails
return $ tStr $ "Typecheck " <> modname <> ": Unable to resolve all types"
#if !defined(ghcjs_HOST_OS)
verify :: RNativeFun LibState
verify i as = case as of
[TLitString modName] -> do
modules <- view (eeRefStore . rsModules)
let mdm = HM.lookup (ModuleName modName) modules
case mdm of
Nothing -> evalError' i $ "No such module: " ++ show modName
Just md -> do
results <- liftIO $ verifyModule modules md
setop $ TcErrors $ fmap (Text.unpack . describeCheckResult) $
toListOf (traverse . each) results
return (tStr "")
_ -> argsError i as
#endif
json' :: RNativeFun LibState
json' _ [a] = return $ TValue (toJSON a) def
json' i as = argsError i as
sigKeyset :: RNativeFun LibState
sigKeyset _ _ = view eeMsgSigs >>= \ss -> return $ toTerm $ KeySet (S.toList ss) (Name (asString KeysAll) def)
print' :: RNativeFun LibState
print' _ [v] = setop (Print v) >> return (tStr "")
print' i as = argsError i as
envHash :: RNativeFun LibState
envHash i [TLitString s] = case fromText' s of
Left err -> evalError' i $ "Bad hash value: " ++ show s ++ ": " ++ err
Right h -> do
setenv eeHash h
return $ tStr $ "Set tx hash to " <> s
envHash i as = argsError i as
setGasLimit :: RNativeFun LibState
setGasLimit _ [TLitInteger l] = do
setenv (eeGasEnv . geGasLimit) (fromIntegral l)
return $ tStr $ "Set gas limit to " <> tShow l
setGasLimit i as = argsError i as
envGas :: RNativeFun LibState
envGas _ [] = use evalGas >>= \g -> return (tLit $ LInteger $ fromIntegral g)
envGas _ [TLitInteger g] = do
evalGas .= fromIntegral g
return $ tStr $ "Set gas to " <> tShow g
envGas i as = argsError i as
setGasPrice :: RNativeFun LibState
setGasPrice _ [TLiteral (LDecimal d) _] = do
setenv (eeGasEnv . geGasPrice) (GasPrice d)
return $ tStr $ "Set gas price to " <> tShow d
setGasPrice i as = argsError i as
setGasRate :: RNativeFun LibState
setGasRate _ [TLitInteger r] = do
setenv (eeGasEnv . geGasModel) (constGasModel $ fromIntegral r)
return $ tStr $ "Set gas rate to " <> tShow r
setGasRate i as = argsError i as
|
(* 1st-order unification did not work when in competition with pattern unif. *)
Set Implicit Arguments.
Lemma test : forall
(A : Type)
(B : Type)
(f : A -> B)
(S : B -> Prop)
(EV : forall y (f':A->B), (forall x', S (f' x')) -> S (f y))
(HS : forall x', S (f x'))
(x : A),
S (f x).
Proof.
intros. eapply EV. intros.
(* worked in v8.2 but not in v8.3beta, fixed in r12898 *)
apply HS.
(* still not compatible with 8.2 because an evar can be solved in
two different ways and is left open *)
|
program fixture
use, intrinsic :: iso_fortran_env, only: wp => real64
use json_module
type(json_core) :: json
type(json_value), pointer :: root, case, array, data
double precision x(6), y(6), yc(6), a(6, 6)
data((a(i, j), i=1, 6), j=1, 6)/1.053750863028617, 0, 0, 0, 0, 0,&
&0.197684262345795, -1.068692711254786, 0, 0, 0, 0,&
&0.2626454586627623, -1.2329011995712644, -0.00372353379218051,&
&0, 0, 0,&
&-0.9740025611125269,&
&0.6893726977654734,&
&-0.955839103276798,&
&-1.2317070584140966,&
&0,&
&0,&
&-0.9106806824932887,&
&0.7412763052602079,&
&0.06851153327714439,&
&-0.3237507545879617,&
&-1.0865030469936974,&
&0,&
&-0.767790184730859,&
&-1.1197200611269833,&
&-0.4481742366033955,&
&0.47173637445323024,&
&-1.180490682884277,&
&1.4702569970829857/
data(yc(i), i=1, 6)/&
&0.7021167106675735,&
&2.5071111484833684,&
&-1.890027143624024,&
&-0.5898127901911715,&
&-1.7145022968458246,&
&-0.4209978978166964/
data(x(i), i=1, 6)/&
&-0.08252376201716412,&
&0.6060734308621007,&
&-0.8874201453170976,&
&0.10542139019376515,&
&0.3528744733184766,&
&0.5503933584550523/
call json%initialize()
call json%create_array(root, '')
y = yc
call addcase(root, '0', 'u', 6, 1._8, a, 6, x, 1, 1._8, y, 1)
y = yc
call addcase(root, '1', 'u', 6, 0._8, a, 6, x, 1, 0.35_8, y, 1)
y = yc
call addcase(root, '2', 'u', 6, 0.5_8, a, 6, x, -1, 0._8, y, -1)
y = yc
call addcase(root, '3', 'l', 6, 0.5_8, a, 6, x, -1, 0._8, y, -1)
y = yc
call addcase(root, '4', 'l', 6, 0._8, a, 6, x, -1, 1._8, y, -1)
call print(root)
contains
subroutine addcase(root, ncase, uplo, n, alpha, a, lda, x, incx, beta, y, incy)
type(json_core) :: json
type(json_value), pointer :: root, case, array
external dsymv
character(len=*) ncase, uplo
integer n, lda, incx, incy
double precision alpha, beta, y(6), x(6), a(6, 6)
call json%create_object(case, '')
call json%add(root, case)
call json%add(case, 'uplo', uplo)
call json%add(case, 'n', n)
call json%add(case, 'alpha', alpha)
call json%create_array(array, 'a')
do j = 1, 6
do i = 1, 6
call json%add(array, '', a(i, j))
enddo
enddo
call json%add(case, array)
nullify (array)
call json%add(case, 'lda', lda)
call json%add(case, 'x', x)
call json%add(case, 'incx', incx)
call json%add(case, 'beta', beta)
call json%add(case, 'y', y)
call json%add(case, 'incy', incy)
call dsymv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
call json%add(case, 'expect', y)
nullify (case)
end subroutine addcase
subroutine print(root)
use, intrinsic :: iso_fortran_env, only: real64
use json_module, CK => json_CK, CK => json_CK
implicit none
type(json_core) :: json
type(json_value), pointer :: root
logical :: status_ok
character(kind=CK, len=:), allocatable :: error_msg
call json%print(root, './tests/fixtures/level2/symv.json')
call json%destroy(root)
if (json%failed()) then
call json%check_for_errors(status_ok, error_msg)
write (*, *) 'Error: '//error_msg
call json%clear_exceptions()
call json%destroy(root)
end if
end subroutine print
end
|
!==========================================================================
elemental function gsw_alpha_on_beta (sa, ct, p)
!==========================================================================
!
! Calculates alpha divided by beta, where alpha is the thermal expansion
! coefficient and beta is the saline contraction coefficient of seawater
! from Absolute Salinity and Conservative Temperature. This function uses
! the computationally-efficient expression for specific volume in terms of
! SA, CT and p (Roquet et al., 2014).
!
! SA = Absolute Salinity [ g/kg ]
! CT = Conservative Temperature (ITS-90) [ deg C ]
! p = sea pressure [ dbar ]
! ( i.e. absolute pressure - 10.1325 dbar )
!
! alpha_on_beta = thermal expansion coefficient with respect to
! Conservative Temperature divided by the saline
! contraction coefficient at constant Conservative
! Temperature [ kg g^-1 K^-1 ]
!--------------------------------------------------------------------------
use gsw_mod_teos10_constants, only : gsw_sfac, offset
use gsw_mod_specvol_coefficients
use gsw_mod_kinds
implicit none
real (r8), intent(in) :: sa, ct, p
real (r8) :: gsw_alpha_on_beta
real (r8) :: xs, ys, z, v_ct_part, v_sa_part
xs = sqrt(gsw_sfac*sa + offset)
ys = ct*0.025_r8
z = p*1e-4_r8
v_ct_part = a000 + xs*(a100 + xs*(a200 + xs*(a300 + xs*(a400 + a500*xs)))) &
+ ys*(a010 + xs*(a110 + xs*(a210 + xs*(a310 + a410*xs))) &
+ ys*(a020 + xs*(a120 + xs*(a220 + a320*xs)) + ys*(a030 &
+ xs*(a130 + a230*xs) + ys*(a040 + a140*xs + a050*ys )))) &
+ z*(a001 + xs*(a101 + xs*(a201 + xs*(a301 + a401*xs))) &
+ ys*(a011 + xs*(a111 + xs*(a211 + a311*xs)) + ys*(a021 &
+ xs*(a121 + a221*xs) + ys*(a031 + a131*xs + a041*ys))) &
+ z*(a002 + xs*(a102 + xs*(a202 + a302*xs)) + ys*(a012 &
+ xs*(a112 + a212*xs) + ys*(a022 + a122*xs + a032*ys)) &
+ z*(a003 + a103*xs + a013*ys + a004*z)))
v_sa_part = b000 + xs*(b100 + xs*(b200 + xs*(b300 + xs*(b400 + b500*xs)))) &
+ ys*(b010 + xs*(b110 + xs*(b210 + xs*(b310 + b410*xs))) &
+ ys*(b020 + xs*(b120 + xs*(b220 + b320*xs)) + ys*(b030 &
+ xs*(b130 + b230*xs) + ys*(b040 + b140*xs + b050*ys)))) &
+ z*(b001 + xs*(b101 + xs*(b201 + xs*(b301 + b401*xs))) &
+ ys*(b011 + xs*(b111 + xs*(b211 + b311*xs)) + ys*(b021 &
+ xs*(b121 + b221*xs) + ys*(b031 + b131*xs + b041*ys))) &
+ z*(b002 + xs*(b102 + xs*(b202 + b302*xs))+ ys*(b012 &
+ xs*(b112 + b212*xs) + ys*(b022 + b122*xs + b032*ys)) &
+ z*(b003 + b103*xs + b013*ys + b004*z)))
gsw_alpha_on_beta = -(v_ct_part*xs)/(20.0_r8*gsw_sfac*v_sa_part)
return
end function
!--------------------------------------------------------------------------
|
Following the Sarajevo concert , The Edge 's solo performance of " Sunday Bloody Sunday " was performed at the majority of shows for the remainder of the tour , and a recording of the song from the Sarajevo concert was released on the CD single for " If God Will Send His Angels " on 8 December 1997 ; The Edge later stated the band had " rediscovered " the song in Sarajevo after his solo performance . A short documentary about the concert , Missing Sarajevo , was included on the DVD release of U2 's 2002 video compilation , The Best of 1990 @-@ 2000 .
|
lemma poly_div_minus_right [simp]: "x div (- y) = - (x div y)" for x y :: "'a::field poly" |
lemma countable_dense_exists: "\<exists>D::'a set. countable D \<and> (\<forall>X. p X \<longrightarrow> X \<noteq> {} \<longrightarrow> (\<exists>d \<in> D. d \<in> X))" |
```
from __future__ import division
import sympy
from sympy import *
from sympy import init_printing
init_printing(use_latex='mathjax')
from sympy.utilities.autowrap import ufuncify, autowrap
from copy import deepcopy
import numpy as np
from IPython.display import display
```
# Dielectric constant functions
Let's generate some values for $\theta_\text{1}$ and $\theta_\text{2}$ that we can check by hand.
```
from sympy.abc import D, k, kappa, omega, lamda, eta, alpha, theta
E_s1 = symbols('E_s1')
E_s, E_d, E_eff, h_s, h_d= symbols('E_s E_d E_eff h_s h_d')
k, kappa, omega, D, h = symbols('k kappa omega D h')
```
Intermediates used to calculate $\theta_\text{I}, \theta_\text{II}$:
```
intermediates = {alpha : E_eff / E_d,
eta: sqrt(k**2 + kappa**2 / E_s + omega*1j / D),
lamda: (1 - E_eff / E_s)*(k / eta)}
for key, val in intermediates.viewitems():
display(Eq(key, val))
```
$$\lambda = \frac{k}{\eta} \left(- \frac{E_{eff}}{E_{s}} + 1\right)$$
$$\alpha = \frac{E_{eff}}{E_{d}}$$
$$\eta = \sqrt{k^{2} + \frac{\kappa^{2}}{E_{s}} + \frac{1.0 i}{D} \omega}$$
```
theta_I_bracket_term = (-lamda / tanh(eta * h_s) +
(sinh(k * h_s) * sinh(eta * h_s) + alpha * cosh(k*h_s) * sinh(eta * h_s) -
lamda * (cosh(k*h_s) * cosh(eta*h_s) - 2 + lamda * sinh(k*h_s)/sinh(eta *h_s)))/
(cosh(k*h_s)*sinh(eta*h_s) + alpha * sinh(k*h_s) * sinh(eta*h_s) - lamda * sinh(k * h_s) * cosh(eta * h_s))
)
```
```
theta_I = E_s / E_eff * theta_I_bracket_term
```
```
theta_I
```
$$\frac{E_{s}}{E_{eff}} \left(- \frac{\lambda}{\tanh{\left (\eta h_{s} \right )}} + \frac{\alpha \sinh{\left (\eta h_{s} \right )} \cosh{\left (h_{s} k \right )} - \lambda \left(\frac{\lambda \sinh{\left (h_{s} k \right )}}{\sinh{\left (\eta h_{s} \right )}} + \cosh{\left (\eta h_{s} \right )} \cosh{\left (h_{s} k \right )} - 2\right) + \sinh{\left (\eta h_{s} \right )} \sinh{\left (h_{s} k \right )}}{\alpha \sinh{\left (\eta h_{s} \right )} \sinh{\left (h_{s} k \right )} - \lambda \sinh{\left (h_{s} k \right )} \cosh{\left (\eta h_{s} \right )} + \sinh{\left (\eta h_{s} \right )} \cosh{\left (h_{s} k \right )}}\right)$$
```
theta_I_python = lambdify((k, E_s, E_eff, eta, h_s, alpha, lamda), theta_I)
theta_I_c = autowrap(theta_I, language='C', backend='Cython', args=(k, E_s, E_eff, eta, h_s, alpha, lamda))
theta_I_f = autowrap(theta_I, language='F95', backend='f2py', args=(k, E_s, E_eff, eta, h_s, alpha, lamda))
```
```
theta_II = E_s / E_d * (E_eff + (1-lamda)*E_d/tanh(k*h_d))/(E_eff / tanh(k*h_d) + (1 - lamda) * E_d)
theta_II
```
$$\frac{E_{s} \left(\frac{E_{d} \left(- \lambda + 1\right)}{\tanh{\left (h_{d} k \right )}} + E_{eff}\right)}{E_{d} \left(E_{d} \left(- \lambda + 1\right) + \frac{E_{eff}}{\tanh{\left (h_{d} k \right )}}\right)}$$
```
{k:0.01, E_s:3.5-0.05j, E_d: 3.5-0.05j, E_eff: 3.5 - 0.5j, }
```
{k: 0.01, E_d: (3.5-0.05j), E_eff: (3.5-0.5j), E_s: (3.5-0.05j)}
```
%timeit theta_I_python(1, 3, 2, 0.2, 4, 1.02, 1.8)
```
100000 loops, best of 3: 3.06 µs per loop
```
%timeit theta_I_c(1, 3, 2, 0.2, 4, 1.02, 1.8)
```
1000000 loops, best of 3: 285 ns per loop
```
%timeit theta_I_f(1, 3, 2, 0.2, 4, 1.02, 1.8)
```
1000000 loops, best of 3: 456 ns per loop
```
((E_s - theta) / (E_s + theta))
```
$$\frac{E_{s} - \theta}{E_{s} + \theta}$$
```
from sympy.functions.elementary.complexes import im
```
```
complex_result = (E_s - theta) / (E_s + theta)
```
```
dielectric_I = complex_result.subs({theta:theta_I}).subs(intermediates).subs(intermediates)
```
```
dielectric_args = (k, omega, kappa, E_s, E_d, E_eff, h_s, D)
dielectric_I_python = lambdify(dielectric_args, dielectric_I, dummify=False, modules="numpy")
```
```
%timeit dielectric_I_python(k=1.1, omega=432, kappa=0.3, E_s=3-0.1j, E_d=4-1j, E_eff=4-5j, h_s=0.07, D=0.0077)
```
10000 loops, best of 3: 121 µs per loop
```
dielectric_I_python(k=1.1, omega=432, kappa=0.3, E_s=3-0.1j, E_d=4-1j, E_eff=4-5j, h_s=0.07, D=0.0077)
```
(0.6365044412573243-0.10023319309176819j)
```
lambdify(expr=tanh(h_s * intermediates[eta]), args=[k, kappa, omega, D, h_s, E_s], modules='numpy')(0.01, 0.3, 432, 0.0077, 0.07, 3-1j)
```
(1.0000000000148801-1.302525298862032e-10j)
```
lambdify(expr=(h_s * intermediates[eta]), args=[k, kappa, omega, D, h_s, E_s], modules='numpy')(0.01, 0.3, 432, 1e-6, 0.07, 3-1j)
```
(1028.78569201192+1028.7856919473827j)
```
np.tanh(100000)
```
$$1.0$$
```
sympy.printing.lambdarepr.lambdarepr(dielectric_I)
```
'(E_s - E_s*(-k*(-E_eff/E_s + 1)/(sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)*tanh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))) + (-k*(-E_eff/E_s + 1)*(k*(-E_eff/E_s + 1)*sinh(h_s*k)/(sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))) + cosh(h_s*k)*cosh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)) - 2)/sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D) + sinh(h_s*k)*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)) + E_eff*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))*cosh(h_s*k)/E_d)/(-k*(-E_eff/E_s + 1)*sinh(h_s*k)*cosh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))/sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D) + sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))*cosh(h_s*k) + E_eff*sinh(h_s*k)*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))/E_d))/E_eff)/(E_s + E_s*(-k*(-E_eff/E_s + 1)/(sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)*tanh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))) + (-k*(-E_eff/E_s + 1)*(k*(-E_eff/E_s + 1)*sinh(h_s*k)/(sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))) + cosh(h_s*k)*cosh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)) - 2)/sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D) + sinh(h_s*k)*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D)) + E_eff*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))*cosh(h_s*k)/E_d)/(-k*(-E_eff/E_s + 1)*sinh(h_s*k)*cosh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))/sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D) + sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))*cosh(h_s*k) + E_eff*sinh(h_s*k)*sinh(h_s*sqrt(k**2 + kappa**2/E_s + 1.0*I*omega/D))/E_d))/E_eff)'
## Small functions
```
_eta = intermediates[eta]
```
```
all_k = [1, 10, 100, 1000, 10000, 100000]
all_eta = [_eta.evalf(subs={k:_k, kappa:3500, E_s:3 - 0.001j, D: 0.005, omega:300})
for _k in all_k]
```
```
print(str(all_eta).replace('*I', 'j').replace(",", ",\n"))
```
[2020.78311260126 + 15.182507854811j,
2020.80760652432 + 15.182323829782j,
2023.25550170076 + 15.163955048756j,
2254.66583909462 + 13.607584302718j,
10202.1243828582 + 3.007271263178j,
100020.414581093 + 0.30674293451j]
```
_lamda = intermediates[lamda]
all_lamda = [_lamda.evalf(subs=
{k:_k, E_s:3-0.001j, D:0.005,
E_eff:3 - 100j, eta: _eta_}) for _k, _eta_ in zip(all_k, all_eta)]
```
```
print(str(all_lamda).replace('*I', 'j').replace(",", ",\n"))
```
[0.0001184255261724 + 0.0164941987549306j,
0.00118421087011718 + 0.164939988752172j,
0.0117978533636026 + 1.64740475175451j,
0.0842948437214929 + 14.7834985873234j,
-0.00125999301746353 + 32.672603689536j,
-0.0110065260871034 + 33.3261929274151j]
```
all_theta_I = [theta_I.evalf(subs={k:_k, E_s:3-0.001j, D:0.005,
E_eff:3 - 100j, eta: _eta_,
lamda:_lamda_, h_s:0.1, alpha: 0.65-0.0002j}) for _k, _eta, _lamda_ in zip(all_k, all_eta, all_lamda)]
```
```
print(str(all_theta_I).replace('*I', 'j').replace(",", ",\n"))
```
[0.00157126996626562 + 0.0210682675809495j,
0.00672782406000677 + 0.0281575198774334j,
0.050275664263775 + 0.0281213204722464j,
0.443934273416263 + 0.0140052914999941j,
0.980197277948465 + 0.000305155415174606j,
0.999795989512753 + 3.05416795636227e-6j]
```
sympy.printing.lambdarepr.lambdarepr(theta_I)
```
'E_s*(-lamda/tanh(eta*h_s) + (alpha*sinh(eta*h_s)*cosh(h_s*k) - lamda*(lamda*sinh(h_s*k)/sinh(eta*h_s) + cosh(eta*h_s)*cosh(h_s*k) - 2) + sinh(eta*h_s)*sinh(h_s*k))/(alpha*sinh(eta*h_s)*sinh(h_s*k) - lamda*sinh(h_s*k)*cosh(eta*h_s) + sinh(eta*h_s)*cosh(h_s*k)))/E_eff'
```
theta_I_python = lambdify((k, E_s, D, E_eff, eta, lamda, h_s, alpha), theta_I, dummify=False, modules='numpy')
```
```
def copy_update_dict(dictionary, **kwargs):
copied_dictionary = deepcopy(dictionary)
copied_dictionary.update(kwargs)
return copied_dictionary
```
```
theta_subs = {k: 1, E_s:3-0.001j, D:0.005, E_eff:3 - 100j, eta: 2020.78311260126 + 15.182507854811j, lamda:0.0001184255261724 + 0.0164941987549306j, h_s:0.1, alpha: 0.65-0.0002j}
theta_I_kwargs = {str(key): np.complex128(val) for key, val in theta_subs.items()}
theta_I_all_kwargs = [copy_update_dict(theta_I_kwargs, k=np.float(_k), eta=np.complex128(_eta_), lamda=np.complex128(_lamda_))
for _k, _eta_, _lamda_ in zip(all_k, all_eta, all_lamda)]
all_theta_I_lambda = [theta_I_python(**kwargs) for kwargs in theta_I_all_kwargs]
```
/Users/ryandwyer/anaconda/envs/jittermodel/lib/python2.7/site-packages/numpy/__init__.py:1: RuntimeWarning: overflow encountered in tanh
"""
/Users/ryandwyer/anaconda/envs/jittermodel/lib/python2.7/site-packages/numpy/__init__.py:1: RuntimeWarning: invalid value encountered in tanh
"""
/Users/ryandwyer/anaconda/envs/jittermodel/lib/python2.7/site-packages/numpy/__init__.py:1: RuntimeWarning: overflow encountered in sinh
"""
/Users/ryandwyer/anaconda/envs/jittermodel/lib/python2.7/site-packages/numpy/__init__.py:1: RuntimeWarning: overflow encountered in cosh
"""
/Users/ryandwyer/anaconda/envs/jittermodel/lib/python2.7/site-packages/numpy/__init__.py:1: RuntimeWarning: invalid value encountered in cosh
"""
/Users/ryandwyer/anaconda/envs/jittermodel/lib/python2.7/site-packages/numpy/__init__.py:1: RuntimeWarning: invalid value encountered in sinh
"""
```
all_theta_I_lambda
```
[(0.0015712699662656167+0.021068267580949523j),
(0.0067278240600067672+0.028157519877433347j),
(0.050275664263774951+0.028121320472246414j),
(0.44393427341626346+0.01400529149999408j),
(nan+nan*j),
(nan+nan*j)]
So for some reason the sympy version of this is implemented better.
```
theta_subs = {k: 1, E_s:3-0.001j, D:0.005, E_eff:3 - 100j, E_d: 3 - 0.001j, eta: 2020.78311260126 + 15.182507854811j, lamda:0.0001184255261724 + 0.0164941987549306j, h_s:0.1, alpha: 0.65-0.0002j}
all_theta_II = [theta_II.evalf(subs={k: _k, E_s:3-0.001j, E_eff:3 - 100j,
E_d: 3 - 0.001j, lamda:_lamda_, h_d:0.1})
for _k, _lamda_ in zip(all_k, all_lamda)]
```
```
print(str(all_theta_II).replace('*I', 'j').replace(",", ",\n"))
```
[0.101145810077246 + 0.0296480635666554j,
0.764320753451023 + 0.0123928030520502j,
0.999999996277978 + 2.1003332939236e-10j,
1.0 + 8.470329472543e-22j,
1.00000000000000,
1.00000000000000]
## Avoiding numerical errors in calculating $\theta_I$
To avoid numerical errors in calculating $\theta_I$ (associated with numpy's problems with large values of hyperbolic trigonometry functions for large values of $\theta$ (see Eric Moore's github issue). We have $\theta_I$ is
$$\frac{\epsilon_{\text{s}}}{\epsilon_{\text{eff}}} \left(- \frac{\lambda}{\tanh{\left (\eta h_{\text{s}} \right )}} + \frac{\alpha \sinh{\left (\eta h_{\text{s}} \right )} \cosh{\left (h_{\text{s}} k \right )} - \lambda \left(\frac{\lambda \sinh{\left (h_{\text{s}} k \right )}}{\sinh{\left (\eta h_{\text{s}} \right )}} + \cosh{\left (\eta h_{\text{s}} \right )} \cosh{\left (h_{\text{s}} k \right )} - 2\right) + \sinh{\left (\eta h_{\text{s}} \right )} \sinh{\left (h_{\text{s}} k \right )}}{\alpha \sinh{\left (\eta h_{\text{s}} \right )} \sinh{\left (h_{\text{s}} k \right )} - \lambda \sinh{\left (h_{\text{s}} k \right )} \cosh{\left (\eta h_{\text{s}} \right )} + \sinh{\left (\eta h_{\text{s}} \right )} \cosh{\left (h_{\text{s}} k \right )}}\right)$$
```
theta_I_numexpr = lambdify((k, E_s, D, E_eff, eta, lamda, h_s, alpha), theta_I, dummify=False, modules='numexpr')
theta_subs = {k: 1, E_s:3-0.001j, D:0.005, E_eff:3 - 100j, eta: 2020.78311260126 + 15.182507854811j, lamda:0.0001184255261724 + 0.0164941987549306j, h_s:0.1, alpha: 0.65-0.0002j}
theta_I_kwargs = {str(key): np.complex128(val) for key, val in theta_subs.items()}
theta_I_all_kwargs = [copy_update_dict(theta_I_kwargs, k=np.float(_k), eta=np.complex128(_eta_), lamda=np.complex128(_lamda_))
for _k, _eta_, _lamda_ in zip(all_k, all_eta, all_lamda)]
all_theta_I_numexpr = [theta_I_numexpr(**kwargs) for kwargs in theta_I_all_kwargs]
```
In the limit,
$$e^{k h_{\text{s}}} \gg 1$$
we can approximate,
$$\coth \eta h_\text{s} = \tanh \eta h_\text{s} = 1$$,
$$\sinh k h_\text{s} = \cosh k h_\text{s} = \exp(k h_\text{s})/2$$,
$$\sinh \eta h_\text{s} = \cosh \eta h_\text{s} = \exp(\eta h_\text{s})/2$$
```
large_k = {sinh(k*h_s): exp(k*h_s)/2, cosh(k*h_s): exp(k*h_s)/2,
sinh(eta*h_s): exp(eta*h_s)/2, cosh(eta*h_s): exp(eta*h_s)/2,
coth(eta*h_s): 1, tanh(eta*h_s): 1}
theta_I.subs(large_k)
```
$$\frac{E_{s}}{E_{eff}} \left(- \lambda + \frac{\frac{\alpha}{4} e^{\eta h_{s}} e^{h_{s} k} - \lambda \left(\lambda e^{- \eta h_{s}} e^{h_{s} k} + \frac{e^{\eta h_{s}}}{4} e^{h_{s} k} - 2\right) + \frac{e^{\eta h_{s}}}{4} e^{h_{s} k}}{\frac{\alpha}{4} e^{\eta h_{s}} e^{h_{s} k} - \frac{\lambda}{4} e^{\eta h_{s}} e^{h_{s} k} + \frac{e^{\eta h_{s}}}{4} e^{h_{s} k}}\right)$$
```
theta_full = theta_I.subs(intermediates).subs(intermediates)
def theta_k(_k):
return {k: _k, E_s:3-0.001j, D:0.005, E_d:4.65, E_eff:3 - 100j, h_s:0.1, kappa:3500, E_s:3 - 0.001j, D: 0.005, omega:300}
simplified_pre_sub = (E_s/E_eff*(-lamda + (1+alpha-lamda*(1 - 8*exp(-h_s*(k+eta)) + 4*lamda*exp(-2*eta*h_s)))/(1 + alpha - lamda)))
simplified = simplified_pre_sub.subs(intermediates).subs(intermediates)
```
```
print(theta_full.subs(theta_k(60)).evalf())
print(simplified.subs(theta_k(60)).evalf())
print(theta_I.subs(large_k).subs(intermediates).subs(intermediates).subs(theta_k(60)).evalf())
```
0.0305522794210152 + 0.0288610299476901*I
0.0305522357456196 + 0.0288606649881057*I
0.0305522357456196 + 0.0288606649881057*I
```
eta.subs(intermediates).subs(intermediates).subs(theta_k(80)).evalf()
```
$$2022.36570074502 + 15.170626889408 i$$
```
def theta_k2(_k, _D, _kappa):
return {k: _k, E_s:3-0.001j, D:_D, E_d:4.65, E_eff:3 - 100j, h_s:0.1, kappa:_kappa, E_s:3 - 0.001j, D: 0.005, omega:300}
```
```
print(theta_full.subs(theta_k2(60, 0.005/1e6, 3500*1e6)).evalf())
print(simplified.subs(theta_k2(60, 0.005/1e6, 3500*1e6)).evalf())
```
0.000909256549701857 + 0.0299730883346336*I
0.000909211401593154 + 0.0299727236530035*I
|
module Core.SchemeEval.Compile
{- TODO:
- Make a decent set of test cases
- Option to keep vs discard FCs for faster quoting where we don't need FC
Advanced TODO (possibly not worth it...):
- Write a conversion check
- Extend unification to use SObj; include SObj in Glued
-}
import Core.Case.CaseTree
import Core.Context
import Core.Core
import Core.Directory
import Core.SchemeEval.Builtins
import Core.SchemeEval.ToScheme
import Core.TT
import Data.List
import Libraries.Utils.Scheme
import System.Info
schString : String -> String
schString s = concatMap okchar (unpack s)
where
okchar : Char -> String
okchar c = if isAlphaNum c || c =='_'
then cast c
else "C-" ++ show (cast {to=Int} c)
schVarUN : UserName -> String
schVarUN (Basic n) = schString n
schVarUN (Field n) = "rf--" ++ schString n
schVarUN Underscore = "_US_"
schVarName : Name -> String
schVarName (NS ns (UN n))
= schString (showNSWithSep "-" ns) ++ "-" ++ schVarUN n
schVarName (NS ns n) = schString (showNSWithSep "-" ns) ++ "-" ++ schVarName n
schVarName (UN n) = "u--" ++ schVarUN n
schVarName (MN n i) = schString n ++ "-" ++ show i
schVarName (PV n d) = "pat--" ++ schVarName n
schVarName (DN _ n) = schVarName n
schVarName (Nested (i, x) n) = "n--" ++ show i ++ "-" ++ show x ++ "-" ++ schVarName n
schVarName (CaseBlock x y) = "case--" ++ schString x ++ "-" ++ show y
schVarName (WithBlock x y) = "with--" ++ schString x ++ "-" ++ show y
schVarName (Resolved i) = "fn--" ++ show i
schName : Name -> String
schName n = "ct-" ++ schVarName n
export
data Sym : Type where
export
nextName : Ref Sym Integer => Core Integer
nextName
= do n <- get Sym
put Sym (n + 1)
pure n
public export
data SVar = Bound String | Free String
Show SVar where
show (Bound x) = x
show (Free x) = "'" ++ x
export
getName : SVar -> String
getName (Bound x) = x
getName (Free x) = x
public export
data SchVars : List Name -> Type where
Nil : SchVars []
(::) : SVar -> SchVars ns -> SchVars (n :: ns)
Show (SchVars ns) where
show xs = show (toList xs)
where
toList : forall ns . SchVars ns -> List String
toList [] = []
toList (Bound x :: xs) = x :: toList xs
toList (Free x :: xs) = "'x" :: toList xs
getSchVar : {idx : _} -> (0 _ : IsVar n idx vars) -> SchVars vars -> String
getSchVar First (Bound x :: xs) = x
getSchVar First (Free x :: xs) = "'" ++ x
getSchVar (Later p) (x :: xs) = getSchVar p xs
{-
Encoding of NF -> Scheme
Maybe consider putting this back into a logical order, rather than the order
I implemented them in...
vector (tag>=0) name args == data constructor
vector (-10) (name, arity) (args as list) == blocked meta application
(needs to be same arity as block app, for ct-addArg)
vector (-11) symbol (args as list) == blocked local application
vector (-1) ... == type constructor
vector (-2) name (args as list) == blocked application
vector (-3) ... == Pi binder
vector (-4) ... == delay arg
vector (-5) ... == force arg
vector (-6) = Erased
vector (-7) = Type
vector (-8) ... = Lambda
vector (-9) blockedapp proc = Top level lambda (from a PMDef, so not expanded)
vector (-12) ... = PVar binding
vector (-13) ... = PVTy binding
vector (-14) ... = PLet binding
vector (-15) ... = Delayed
vector (-100 onwards) ... = constants
-}
blockedAppWith : Name -> List (SchemeObj Write) -> SchemeObj Write
blockedAppWith n args = Vector (-2) [toScheme n, vars args]
where
vars : List (SchemeObj Write) -> SchemeObj Write
vars [] = Null
vars (x :: xs) = Cons x (vars xs)
blockedMetaApp : Name -> SchemeObj Write
blockedMetaApp n
= Lambda ["arity-0"] (Vector (-10) [Cons (toScheme n) (Var "arity-0"),
Null])
unload : SchemeObj Write -> List (SchemeObj Write) -> SchemeObj Write
unload f [] = f
unload f (a :: as) = unload (Apply (Var "ct-app") [f, a]) as
compileConstant : FC -> Constant -> SchemeObj Write
compileConstant fc (I x) = Vector (-100) [IntegerVal (cast x)]
compileConstant fc (I8 x) = Vector (-101) [IntegerVal (cast x)]
compileConstant fc (I16 x) = Vector (-102) [IntegerVal (cast x)]
compileConstant fc (I32 x) = Vector (-103) [IntegerVal (cast x)]
compileConstant fc (I64 x) = Vector (-104) [IntegerVal (cast x)]
compileConstant fc (BI x) = Vector (-105) [IntegerVal x]
compileConstant fc (B8 x) = Vector (-106) [IntegerVal (cast x)]
compileConstant fc (B16 x) = Vector (-107) [IntegerVal (cast x)]
compileConstant fc (B32 x) = Vector (-108) [IntegerVal (cast x)]
compileConstant fc (B64 x) = Vector (-109) [IntegerVal (cast x)]
compileConstant fc (Str x) = StringVal x
compileConstant fc (Ch x) = CharVal x
compileConstant fc (Db x) = FloatVal x
-- Constant types get compiled as TyCon names, for matching purposes
compileConstant fc t
= Vector (-1) [IntegerVal (cast (constTag t)),
StringVal (show t),
toScheme (UN (Basic (show t))),
toScheme fc]
compileStk : Ref Sym Integer =>
{auto c : Ref Ctxt Defs} ->
SchVars vars -> List (SchemeObj Write) -> Term vars ->
Core (SchemeObj Write)
compilePiInfo : Ref Sym Integer =>
{auto c : Ref Ctxt Defs} ->
SchVars vars -> PiInfo (Term vars) ->
Core (PiInfo (SchemeObj Write))
compilePiInfo svs Implicit = pure Implicit
compilePiInfo svs Explicit = pure Explicit
compilePiInfo svs AutoImplicit = pure AutoImplicit
compilePiInfo svs (DefImplicit t)
= do t' <- compileStk svs [] t
pure (DefImplicit t')
compileStk svs stk (Local fc isLet idx p)
= pure $ unload (Var (getSchVar p svs)) stk
-- We are assuming that the bound name is a valid scheme symbol. We should
-- only see this when inventing temporary names during quoting
compileStk svs stk (Ref fc Bound name)
= pure $ unload (Symbol (show name)) stk
compileStk svs stk (Ref fc (DataCon t a) name)
= if length stk == a -- inline it if it's fully applied
then pure $ Vector (cast t)
(toScheme !(toResolvedNames name) ::
toScheme fc :: stk)
else pure $ unload (Apply (Var (schName name)) []) stk
compileStk svs stk (Ref fc (TyCon t a) name)
= if length stk == a -- inline it if it's fully applied
then pure $ Vector (-1)
(IntegerVal (cast t) ::
StringVal (show name) ::
toScheme !(toResolvedNames name) ::
toScheme fc :: stk)
else pure $ unload (Apply (Var (schName name)) []) stk
compileStk svs stk (Ref fc x name)
= pure $ unload (Apply (Var (schName name)) []) stk
compileStk svs stk (Meta fc name i xs)
= do xs' <- traverse (compileStk svs stk) xs
-- we encode the arity as first argument to the hole definition, which
-- helps in readback, so we have to apply the hole function to the
-- length of xs to be able to restore the Meta properly
pure $ unload (Apply (Var (schName name)) [])
(IntegerVal (cast (length xs)) :: stk ++ xs')
compileStk svs stk (Bind fc x (Let _ _ val _) scope)
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
val' <- compileStk svs [] val
sc' <- compileStk (Bound x' :: svs) [] scope
pure $ unload (Let x' val' sc') stk
compileStk svs stk (Bind fc x (Pi _ rig p ty) scope)
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
ty' <- compileStk svs [] ty
sc' <- compileStk (Bound x' :: svs) [] scope
p' <- compilePiInfo svs p
pure $ Vector (-3) [Lambda [x'] sc', toScheme rig, toSchemePi p',
ty', toScheme x]
compileStk svs stk (Bind fc x (PVar _ rig p ty) scope)
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
ty' <- compileStk svs [] ty
sc' <- compileStk (Bound x' :: svs) [] scope
p' <- compilePiInfo svs p
pure $ Vector (-12) [Lambda [x'] sc', toScheme rig, toSchemePi p',
ty', toScheme x]
compileStk svs stk (Bind fc x (PVTy _ rig ty) scope)
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
ty' <- compileStk svs [] ty
sc' <- compileStk (Bound x' :: svs) [] scope
pure $ Vector (-13) [Lambda [x'] sc', toScheme rig, ty', toScheme x]
compileStk svs stk (Bind fc x (PLet _ rig val ty) scope) -- we only see this on LHS
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
val' <- compileStk svs [] val
ty' <- compileStk svs [] ty
sc' <- compileStk (Bound x' :: svs) [] scope
pure $ Vector (-14) [Lambda [x'] sc', toScheme rig, val', ty', toScheme x]
compileStk svs [] (Bind fc x (Lam _ rig p ty) scope)
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
ty' <- compileStk svs [] ty
sc' <- compileStk (Bound x' :: svs) [] scope
p' <- compilePiInfo svs p
pure $ Vector (-8) [Lambda [x'] sc', toScheme rig, toSchemePi p',
ty', toScheme x]
compileStk svs (s :: stk) (Bind fc x (Lam _ _ _ _) scope)
= do i <- nextName
let x' = schVarName x ++ "-" ++ show i
sc' <- compileStk (Bound x' :: svs) stk scope
pure $ Apply (Lambda [x'] sc') [s]
compileStk svs stk (App fc fn arg)
= compileStk svs (!(compileStk svs [] arg) :: stk) fn
-- We're only using this evaluator for REPL and typechecking, not for
-- tidying up definitions or LHSs, so we'll always remove As patterns
compileStk svs stk (As fc x as pat) = compileStk svs stk pat
compileStk svs stk (TDelayed fc r ty)
= do ty' <- compileStk svs stk ty
pure $ Vector (-15) [toScheme r, ty']
compileStk svs stk (TDelay fc r ty arg)
= do ty' <- compileStk svs [] ty
arg' <- compileStk svs [] arg
pure $ Vector (-4) [toScheme r, toScheme fc,
Lambda [] ty', Lambda [] arg']
compileStk svs stk (TForce fc x tm)
= do tm' <- compileStk svs [] tm
pure $ Apply (Var "ct-doForce")
[tm',
Vector (-5) [toScheme x, toScheme fc, Lambda [] tm']]
compileStk svs stk (PrimVal fc c) = pure $ compileConstant fc c
compileStk svs stk (Erased fc imp) = pure $ Vector (-6) [toScheme fc, toScheme imp]
compileStk svs stk (TType fc u) = pure $ Vector (-7) [toScheme fc, toScheme u]
export
compile : Ref Sym Integer =>
{auto c : Ref Ctxt Defs} ->
SchVars vars -> Term vars -> Core (SchemeObj Write)
compile vars tm = compileStk vars [] tm
getArgName : Ref Sym Integer =>
Core Name
getArgName
= do i <- nextName
pure (MN "carg" (cast i))
extend : Ref Sym Integer =>
(args : List Name) -> SchVars vars ->
Core (List Name, SchVars (args ++ vars))
extend [] svs = pure ([], svs)
extend (arg :: args) svs
= do n <- getArgName
(args', svs') <- extend args svs
pure (n :: args', Bound (schVarName n) :: svs')
compileCase : Ref Sym Integer =>
{auto c : Ref Ctxt Defs} ->
(blocked : SchemeObj Write) ->
SchVars vars -> CaseTree vars -> Core (SchemeObj Write)
compileCase blk svs (Case idx p scTy xs)
= case !(caseType xs) of
CON => toSchemeConCases idx p xs
TYCON => toSchemeTyConCases idx p xs
DELAY => toSchemeDelayCases idx p xs
CONST => toSchemeConstCases idx p xs
where
data CaseType = CON | TYCON | DELAY | CONST
caseType : List (CaseAlt vs) -> Core CaseType
caseType [] = pure CON
caseType (ConCase x tag args y :: xs)
= do defs <- get Ctxt
Just gdef <- lookupCtxtExact x (gamma defs)
| Nothing => pure TYCON -- primitive type match
case definition gdef of
DCon{} => pure CON
TCon{} => pure TYCON
_ => pure CON -- or maybe throw?
caseType (DelayCase ty arg x :: xs) = pure DELAY
caseType (ConstCase x y :: xs) = pure CONST
caseType (DefaultCase x :: xs) = caseType xs
makeDefault : List (CaseAlt vars) -> Core (SchemeObj Write)
makeDefault [] = pure blk
makeDefault (DefaultCase sc :: xs) = compileCase blk svs sc
makeDefault (_ :: xs) = makeDefault xs
toSchemeConCases : (idx : Nat) -> (0 p : IsVar n idx vars) ->
List (CaseAlt vars) -> Core (SchemeObj Write)
toSchemeConCases idx p alts
= do let var = getSchVar p svs
alts' <- traverse (makeAlt var) alts
let caseblock
= Case (Apply (Var "vector-ref") [Var var, IntegerVal 0])
(mapMaybe id alts')
(Just !(makeDefault alts))
pure $ If (Apply (Var "ct-isDataCon") [Var var])
caseblock
blk
where
project : Int -> String -> List Name ->
SchemeObj Write -> SchemeObj Write
project i var [] body = body
project i var (n :: ns) body
= Let (schVarName n)
(Apply (Var "vector-ref") [Var var, IntegerVal (cast i)])
(project (i + 1) var ns body)
bindArgs : String -> (args : List Name) -> CaseTree (args ++ vars) ->
Core (SchemeObj Write)
bindArgs var args sc
= do (bind, svs') <- extend args svs
project 3 var bind <$> compileCase blk svs' sc
makeAlt : String -> CaseAlt vars ->
Core (Maybe (SchemeObj Write, SchemeObj Write))
makeAlt var (ConCase n t args sc)
= pure $ Just (IntegerVal (cast t), !(bindArgs var args sc))
-- TODO: Matching on types, including ->
makeAlt var _ = pure Nothing
toSchemeTyConCases : (idx : Nat) -> (0 p : IsVar n idx vars) ->
List (CaseAlt vars) -> Core (SchemeObj Write)
toSchemeTyConCases idx p alts
= do let var = getSchVar p svs
alts' <- traverse (makeAlt var) alts
caseblock <- addPiMatch var alts
-- work on the name, so the 2nd arg
(Case (Apply (Var "vector-ref") [Var var, IntegerVal 2])
(mapMaybe id alts')
(Just !(makeDefault alts)))
pure $ If (Apply (Var "ct-isTypeMatchable") [Var var])
caseblock
blk
where
project : Int -> String -> List Name ->
SchemeObj Write -> SchemeObj Write
project i var [] body = body
project i var (n :: ns) body
= Let (schVarName n)
(Apply (Var "vector-ref") [Var var, IntegerVal (cast i)])
(project (i + 1) var ns body)
bindArgs : String -> (args : List Name) -> CaseTree (args ++ vars) ->
Core (SchemeObj Write)
bindArgs var args sc
= do (bind, svs') <- extend args svs
project 5 var bind <$> compileCase blk svs' sc
makeAlt : String -> CaseAlt vars ->
Core (Maybe (SchemeObj Write, SchemeObj Write))
makeAlt var (ConCase (UN (Basic "->")) t [_, _] sc)
= pure Nothing -- do this in 'addPiMatch' below, since the
-- representation is different
makeAlt var (ConCase n t args sc)
= pure $ Just (StringVal (show n), !(bindArgs var args sc))
makeAlt var _ = pure Nothing
addPiMatch : String -> List (CaseAlt vars) -> SchemeObj Write ->
Core (SchemeObj Write)
addPiMatch var [] def = pure def
-- t is a function type, and conveniently the scope of a pi
-- binding is represented as a function. Lucky us! So we just need
-- to extract it then evaluate the scope
addPiMatch var (ConCase (UN (Basic "->")) _ [s, t] sc :: _) def
= do sn <- getArgName
tn <- getArgName
let svs' = Bound (schVarName sn) ::
Bound (schVarName tn) :: svs
sc' <- compileCase blk svs' sc
pure $ If (Apply (Var "ct-isPi") [Var var])
(Let (schVarName sn) (Apply (Var "vector-ref") [Var var, IntegerVal 4]) $
Let (schVarName tn) (Apply (Var "vector-ref") [Var var, IntegerVal 1]) $
sc')
def
addPiMatch var (x :: xs) def = addPiMatch var xs def
toSchemeConstCases : (idx : Nat) -> (0 p : IsVar n idx vars) ->
List (CaseAlt vars) -> Core (SchemeObj Write)
toSchemeConstCases x p alts
= do let var = getSchVar p svs
alts' <- traverse (makeAlt var) alts
let caseblock
= Cond (mapMaybe id alts')
(Just !(makeDefault alts))
pure $ If (Apply (Var "ct-isConstant") [Var var])
caseblock
blk
where
makeAlt : String -> CaseAlt vars ->
Core (Maybe (SchemeObj Write, SchemeObj Write))
makeAlt var (ConstCase c sc)
= do sc' <- compileCase blk svs sc
pure (Just (Apply (Var "equal?")
[Var var, compileConstant emptyFC c], sc'))
makeAlt var _ = pure Nothing
toSchemeDelayCases : (idx : Nat) -> (0 p : IsVar n idx vars) ->
List (CaseAlt vars) -> Core (SchemeObj Write)
-- there will only ever be one, or a default case
toSchemeDelayCases idx p (DelayCase ty arg sc :: rest)
= do let var = getSchVar p svs
tyn <- getArgName
argn <- getArgName
let svs' = Bound (schVarName tyn) ::
Bound (schVarName argn) :: svs
sc' <- compileCase blk svs' sc
pure $ If (Apply (Var "ct-isDelay") [Var var])
(Let (schVarName tyn)
(Apply (Apply (Var "vector-ref") [Var var, IntegerVal 3]) []) $
Let (schVarName argn)
(Apply (Apply (Var "vector-ref") [Var var, IntegerVal 4]) []) $
sc')
blk
toSchemeDelayCases idx p (_ :: rest) = toSchemeDelayCases idx p rest
toSchemeDelayCases idx p _ = pure Null
compileCase blk vars (STerm _ tm) = compile vars tm
compileCase blk vars _ = pure blk
varObjs : SchVars ns -> List (SchemeObj Write)
varObjs [] = []
varObjs (x :: xs) = Var (show x) :: varObjs xs
mkArgs : (ns : List Name) -> Core (SchVars ns)
mkArgs [] = pure []
mkArgs (x :: xs)
= pure $ Bound (schVarName x) :: !(mkArgs xs)
bindArgs : Name ->
(todo : SchVars ns) ->
(done : List (SchemeObj Write)) ->
SchemeObj Write -> SchemeObj Write
bindArgs n [] done body = body
bindArgs n (x :: xs) done body
= Vector (-9) [blockedAppWith n (reverse done),
Lambda [show x]
(bindArgs n xs (Var (show x) :: done) body)]
compileBody : {auto c : Ref Ctxt Defs} ->
Bool -> -- okay to reduce (if False, block)
Name -> Def -> Core (SchemeObj Write)
compileBody _ n None = pure $ blockedAppWith n []
compileBody redok n (PMDef pminfo args treeCT treeRT pats)
= do i <- newRef Sym 0
argvs <- mkArgs args
let blk = blockedAppWith n (varObjs argvs)
body <- compileCase blk argvs treeCT
let body' = if redok
then If (Apply (Var "ct-isBlockAll") []) blk body
else blk
-- If it arose from a hole, we need to take an extra argument for
-- the arity since that's what Meta gets applied to
case holeInfo pminfo of
NotHole => pure (bindArgs n argvs [] body')
SolvedHole _ => pure (Lambda ["h-0"] (bindArgs n argvs [] body'))
compileBody _ n (ExternDef arity) = pure $ blockedAppWith n []
compileBody _ n (ForeignDef arity xs) = pure $ blockedAppWith n []
compileBody _ n (Builtin x) = pure $ compileBuiltin n x
compileBody _ n (DCon tag Z newtypeArg)
= pure $ Vector (cast tag) [toScheme !(toResolvedNames n), toScheme emptyFC]
compileBody _ n (DCon tag arity newtypeArg)
= do let args = mkArgNs 0 arity
argvs <- mkArgs args
let body
= Vector (cast tag)
(toScheme n :: toScheme emptyFC ::
map (Var . schVarName) args)
pure (bindArgs n argvs [] body)
where
mkArgNs : Int -> Nat -> List Name
mkArgNs i Z = []
mkArgNs i (S k) = MN "arg" i :: mkArgNs (i+1) k
compileBody _ n (TCon tag Z parampos detpos flags mutwith datacons detagabbleBy)
= pure $ Vector (-1) [IntegerVal (cast tag), StringVal (show n),
toScheme n, toScheme emptyFC]
compileBody _ n (TCon tag arity parampos detpos flags mutwith datacons detagabbleBy)
= do let args = mkArgNs 0 arity
argvs <- mkArgs args
let body
= Vector (-1)
(IntegerVal (cast tag) ::
StringVal (show n) ::
toScheme n :: toScheme emptyFC ::
map (Var . schVarName) args)
pure (bindArgs n argvs [] body)
where
mkArgNs : Int -> Nat -> List Name
mkArgNs i Z = []
mkArgNs i (S k) = MN "arg" i :: mkArgNs (i+1) k
compileBody _ n (Hole numlocs x) = pure $ blockedMetaApp n
compileBody _ n (BySearch x maxdepth defining) = pure $ blockedMetaApp n
compileBody _ n (Guess guess envbind constraints) = pure $ blockedMetaApp n
compileBody _ n ImpBind = pure $ blockedMetaApp n
compileBody _ n (UniverseLevel _) = pure $ blockedMetaApp n
compileBody _ n Delayed = pure $ blockedMetaApp n
export
compileDef : {auto c : Ref Ctxt Defs} -> SchemeMode -> Name -> Core ()
compileDef mode n_in
= do n <- toFullNames n_in -- this is handy for readability of generated names
-- we used resolved names for blocked names, though, as
-- that's a bit better for performance
defs <- get Ctxt
Just def <- lookupCtxtExact n (gamma defs)
| Nothing => throw (UndefinedName emptyFC n)
let True = case schemeExpr def of
Nothing => True
Just (cmode, def) => cmode /= mode
| _ => pure () -- already done
-- If we're in BlockExport mode, check whether the name is
-- available for reduction.
let redok = mode == EvalAll ||
reducibleInAny (currentNS defs :: nestedNS defs)
(fullname def)
(visibility def)
-- 'n' is used in compileBody for generating names for readback,
-- and reading back resolved names is quicker because it's just
-- an integer
b <- compileBody redok !(toResolvedNames n) !(toFullNames (definition def))
let schdef = Define (schName n) b
-- Add the new definition to the current scheme runtime
Just obj <- coreLift $ evalSchemeObj schdef
| Nothing => throw (InternalError ("Compiling " ++ show n ++ " failed"))
-- Record that this one is done
ignore $ addDef n ({ schemeExpr := Just (mode, schdef) } def)
initEvalWith : {auto c : Ref Ctxt Defs} ->
String -> Core Bool
initEvalWith "chez"
= do defs <- get Ctxt
if defs.schemeEvalLoaded
then pure True
else
catch (do f <- readDataFile "chez/ct-support.ss"
Just _ <- coreLift $ evalSchemeStr $ "(begin " ++ f ++ ")"
| Nothing => pure False
put Ctxt ({ schemeEvalLoaded := True } defs)
pure True)
(\err => pure False)
initEvalWith "racket"
= do defs <- get Ctxt
if defs.schemeEvalLoaded
then pure True
else
catch (do f <- readDataFile "racket/ct-support.rkt"
Just _ <- coreLift $ evalSchemeStr $ "(begin " ++ f ++ ")"
| Nothing => pure False
put Ctxt ({ schemeEvalLoaded := True } defs)
pure True)
(\err => do coreLift $ printLn err
pure False)
initEvalWith _ = pure False -- only works on Chez for now
-- Initialise the internal functions we need to build/extend blocked
-- applications
-- These are in a support file, chez/support.ss. Returns True if loading
-- and processing succeeds. If it fails, which it probably will during a
-- bootstrap build at least, we can fall back to the default evaluator.
export
initialiseSchemeEval : {auto c : Ref Ctxt Defs} ->
Core Bool
initialiseSchemeEval = initEvalWith codegen
|
(* Title: HOL/HOLCF/IMP/HoareEx.thy
Author: Tobias Nipkow, TUM
Copyright 1997 TUM
*)
section "Correctness of Hoare by Fixpoint Reasoning"
theory HoareEx imports Denotational begin
text \<open>
An example from the HOLCF paper by Müller, Nipkow, Oheimb, Slotosch
\<^cite>\<open>MuellerNvOS99\<close>. It demonstrates fixpoint reasoning by showing
the correctness of the Hoare rule for while-loops.
\<close>
type_synonym assn = "state \<Rightarrow> bool"
definition
hoare_valid :: "[assn, com, assn] \<Rightarrow> bool" ("|= {(1_)}/ (_)/ {(1_)}" 50) where
"|= {P} c {Q} = (\<forall>s t. P s \<and> D c\<cdot>(Discr s) = Def t \<longrightarrow> Q t)"
lemma WHILE_rule_sound:
"|= {A} c {A} \<Longrightarrow> |= {A} WHILE b DO c {\<lambda>s. A s \<and> \<not> bval b s}"
apply (unfold hoare_valid_def)
apply (simp (no_asm))
apply (rule fix_ind)
apply (simp (no_asm)) \<comment> \<open>simplifier with enhanced \<open>adm\<close>-tactic\<close>
apply (simp (no_asm))
apply (simp (no_asm))
apply blast
done
end
|
module Xor where
import Nnaskell
import Numeric.LinearAlgebra.Data
xorNN = do nn <- randomNN [2, 2, 1]
return $ head $ drop 5000 $ optimizeCost (cost xorTD) nn
xorTD :: [(Vector R, Vector R)]
xorTD = [ (vector [0.0, 0.0], vector [0.0])
, (vector [1.0, 0.0], vector [1.0])
, (vector [0.0, 1.0], vector [1.0])
, (vector [1.0, 1.0], vector [0.0])
]
|
import data.int.gcd
/-1. Let a and b be coprime positive integers (recall that coprime here means gcd(a, b) = 1).
I open a fast food restaurant which sells chicken nuggets in two sizes –
you can either buy a box with a nuggets in, or a box with b nuggets in.
Prove that there is some integer N with the property that for all integers m ≥ N,
it is possible to buy exactly m nuggets. -/
variables {N m c d : ℕ}
variables {a b : ℤ }
theorem chicken_mcnugget (hp: int.gcd a b = 1) (h1: a > 0) (h2 : b >0): ∃ N, ∀ m ≥ N, m = a*c + b*d := begin
--{existsi [gcd_a a b, gcd_b a b]},
have h3: ∃ (X Y :ℤ ), X * a + Y * b=1,
let m := gcd_a a b, gcd_b a b,
apply gcd_eq_gcd_ab,
--{ existsi [gcd_a a b , gcd_b a b],
--},
let X' := X + q*b,
let Y' := Y - q*a,
have h4: X' * a + Y' * b = 1,
let Z' := -Y',
-- have h5: (Z' X': ℕ ), X' * a= 1 + Z' * b, by norm_num,
assume N = a * Z' * b,
-- appply by the lemma
end |
# Copyright (C) 2016 Electronic Arts Inc. All rights reserved.
str(mtcars)
print("Sleeping for 15 seconds")
Sys.sleep(15)
print("Saving RData file")
dir.create("/var/www/html/RServer/reports/mtcars")
save(mtcars, file = "/var/www/html/RServer/reports/mtcars/mtcars.RData")
fit <- lm(mpg~am + wt + hp, data = mtcars)
summary(fit)
print("Saving Model")
Sys.sleep(10)
save(fit, file = "/var/www/html/RServer/reports/mtcars/Model.RData")
|
State Before: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ⁅⁅H₂, H₃⁆, H₁⁆ = ⊥
h2 : ⁅⁅H₃, H₁⁆, H₂⁆ = ⊥
⊢ ⁅⁅H₁, H₂⁆, H₃⁆ = ⊥ State After: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
⊢ ∀ (g₁ : G), g₁ ∈ H₁ → ∀ (g₂ : G), g₂ ∈ H₂ → ∀ (h : G), h ∈ H₃ → ⁅h, ⁅g₁, g₂⁆⁆ = 1 Tactic: simp_rw [commutator_eq_bot_iff_le_centralizer, commutator_le,
mem_centralizer_iff_commutator_eq_one, ← commutatorElement_def] at h1 h2⊢ State Before: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
⊢ ∀ (g₁ : G), g₁ ∈ H₁ → ∀ (g₂ : G), g₂ ∈ H₂ → ∀ (h : G), h ∈ H₃ → ⁅h, ⁅g₁, g₂⁆⁆ = 1 State After: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ ⁅z, ⁅x, y⁆⁆ = 1 Tactic: intro x hx y hy z hz State Before: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ ⁅z, ⁅x, y⁆⁆ = 1 State After: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ ⁅z, ⁅x, y⁆⁆ = x * z * ⁅y, ⁅z⁻¹, x⁻¹⁆⁆⁻¹ * z⁻¹ * y * ⁅x⁻¹, ⁅y⁻¹, z⁆⁆⁻¹ * y⁻¹ * x⁻¹
G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ x * z * ⁅y, ⁅z⁻¹, x⁻¹⁆⁆⁻¹ * z⁻¹ * y * ⁅x⁻¹, ⁅y⁻¹, z⁆⁆⁻¹ * y⁻¹ * x⁻¹ = 1 Tactic: trans x * z * ⁅y, ⁅z⁻¹, x⁻¹⁆⁆⁻¹ * z⁻¹ * y * ⁅x⁻¹, ⁅y⁻¹, z⁆⁆⁻¹ * y⁻¹ * x⁻¹ State Before: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ ⁅z, ⁅x, y⁆⁆ = x * z * ⁅y, ⁅z⁻¹, x⁻¹⁆⁆⁻¹ * z⁻¹ * y * ⁅x⁻¹, ⁅y⁻¹, z⁆⁆⁻¹ * y⁻¹ * x⁻¹ State After: no goals Tactic: group State Before: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ x * z * ⁅y, ⁅z⁻¹, x⁻¹⁆⁆⁻¹ * z⁻¹ * y * ⁅x⁻¹, ⁅y⁻¹, z⁆⁆⁻¹ * y⁻¹ * x⁻¹ = 1 State After: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ x * z * 1⁻¹ * z⁻¹ * y * 1⁻¹ * y⁻¹ * x⁻¹ = 1 Tactic: rw [h1 _ (H₂.inv_mem hy) _ hz _ (H₁.inv_mem hx), h2 _ (H₃.inv_mem hz) _ (H₁.inv_mem hx) _ hy] State Before: G : Type u_1
G' : Type ?u.56694
F : Type ?u.56697
inst✝² : Group G
inst✝¹ : Group G'
inst✝ : MonoidHomClass F G G'
f : F
g₁ g₂ g₃ g : G
H₁ H₂ H₃ K₁ K₂ : Subgroup G
h1 : ∀ (g₁ : G), g₁ ∈ H₂ → ∀ (g₂ : G), g₂ ∈ H₃ → ∀ (h : G), h ∈ H₁ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
h2 : ∀ (g₁ : G), g₁ ∈ H₃ → ∀ (g₂ : G), g₂ ∈ H₁ → ∀ (h : G), h ∈ H₂ → ⁅h, ⁅g₁, g₂⁆⁆ = 1
x : G
hx : x ∈ H₁
y : G
hy : y ∈ H₂
z : G
hz : z ∈ H₃
⊢ x * z * 1⁻¹ * z⁻¹ * y * 1⁻¹ * y⁻¹ * x⁻¹ = 1 State After: no goals Tactic: group |
# Tutorial on Hyperparameter Tuning in Neural Networks
**Author:** Matthew Stewart
<br>
This notebook follows the same procedure as the Medium article **"Simple Guide to Hyperparameter Tuning in Neural Networks"**.
[https://medium.com/@matthew_stewart/simple-guide-to-hyperparameter-tuning-in-neural-networks-3fe03dad8594].
In this notebook we will optimize and fine tune a neural network to find the global minimum of a particularly troublesome function known as the Beale function. This is one of many test functions for optimization that are commonly used in academia (see https://en.wikipedia.org/wiki/Test_functions_for_optimization for more information).
```python
## Formatting
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
```
<style>
blockquote { background: #AEDE94; }
h1 {
padding-top: 25px;
padding-bottom: 25px;
text-align: left;
padding-left: 10px;
background-color: #DDDDDD;
color: black;
}
h2 {
padding-top: 10px;
padding-bottom: 10px;
text-align: left;
padding-left: 5px;
background-color: #EEEEEE;
color: black;
}
div.exercise {
background-color: #ffcccc;
border-color: #E9967A;
border-left: 5px solid #800080;
padding: 0.5em;
}
div.discussion {
background-color: #ccffcc;
border-color: #88E97A;
border-left: 5px solid #0A8000;
padding: 0.5em;
}
div.theme {
background-color: #DDDDDD;
border-color: #E9967A;
border-left: 5px solid #800080;
padding: 0.5em;
font-size: 18pt;
}
div.gc {
background-color: #AEDE94;
border-color: #E9967A;
border-left: 5px solid #800080;
padding: 0.5em;
font-size: 12pt;
}
p.q1 {
padding-top: 5px;
padding-bottom: 5px;
text-align: left;
padding-left: 5px;
background-color: #EEEEEE;
color: black;
}
header {
padding-top: 35px;
padding-bottom: 35px;
text-align: left;
padding-left: 10px;
background-color: #DDDDDD;
color: black;
}
</style>
## Learning Goals
In this notebook, we will explore ways to optimize the loss function of a multilayer perceptor (MLP) by tuning the model hyperparameters. We will also explore the use of cross-validation as a technique for checking potential values for these hyperparameters.
By the end of this notebook, you should:
- Be familiar with the use of `sklearn`'s `optimize` function.
- Be able to identify the hyperparameters that go into the training of a MLP.
- Be familiar with the implementation in `keras` of various optimization techniques.
- Know how to use callbacks
- Apply cross-validation to check for multiple values of hyperparameters.
```python
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
%matplotlib inline
```
## Part 1: Beale's function
### First let's look at function optimization in `scipy.optimize`, using Beale's function as an example
Optimizing a function $f: A\rightarrow R$, from some set A to the real numbers is finding an element $x_0\,\epsilon\, A$ such that $f(x_0)\leq f(x)$ for all $x\,\epsilon\, A$ (finding the minimum) or such that $f(x_0)\geq f(x)$ for all $x\,\epsilon\, A$ (finding the maximum).
To illustrate our point we will use a function of two parameters. Our goal is to optimize over these 2 parameters. We can extend to higher dimensions by plotting pairs of parameters against each other.
The Wikipedia article on Test functions for optimization has a few functions that are useful for evaluating optimization algorithms. Here is Beale's function:
$f(x,y)$ = $(1.5−x+xy)^2+(2.25−x+xy^2)^2+(2.625−x+xy^3)^2$
We already know that this function has a minimum at [3.0, 0.5]. Let's see if `scipy` will find it.
<pre>source: https://en.wikipedia.org/wiki/Test_functions_for_optimization</pre>
```python
# define Beale's function which we want to minimize
def objective(X):
x = X[0]; y = X[1]
return (1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2 + (2.625 - x + x*y**3)**2
```
```python
# function boundaries
xmin, xmax, xstep = -4.5, 4.5, .9
ymin, ymax, ystep = -4.5, 4.5, .9
```
```python
# Let's create some points
x1, y1 = np.meshgrid(np.arange(xmin, xmax + xstep, xstep), np.arange(ymin, ymax + ystep, ystep))
```
Let's make an initial guess
```python
# initial guess
x0 = [4., 4.]
f0 = objective(x0)
print (f0)
```
68891.203125
```python
bnds = ((xmin, xmax), (ymin, ymax))
minimum = minimize(objective, x0, bounds=bnds)
```
```python
print(minimum)
```
fun: 2.068025638865627e-12
hess_inv: <2x2 LbfgsInvHessProduct with dtype=float64>
jac: array([-1.55969780e-06, 9.89837957e-06])
message: b'CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL'
nfev: 60
nit: 14
status: 0
success: True
x: array([3.00000257, 0.50000085])
```python
real_min = [3.0, 0.5]
print (f'The answer, {minimum.x}, is very close to the optimum as we know it, which is {real_min}')
print (f'The value of the objective for {real_min} is {objective(real_min)}')
```
The answer, [3.00000257 0.50000085], is very close to the optimum as we know it, which is [3.0, 0.5]
The value of the objective for [3.0, 0.5] is 0.0
## Part 2: Optimization in neural networks
In general: **Learning Representation --> Objective function --> Optimization algorithm**
A neural network can be defined as a framework that combines inputs and tries to guess the output. If we are lucky enough to have some results, called "the ground truth", to compare the outputs produced by the network, we can calculate the **error**. So the network guesses, calculates some error function, guesses again, trying to minimize this error, guesses again, until the error does not go down any more. This is optimization.
In neural networks the most common used optimization algorithms, are flavors of **GD (gradient descent)**. The *objective function* used in gradient descent is the *loss function* which we want to minimize .
### A `keras` Refresher
`Keras` is a Python library for deep learning that can run on top of both Theano or
TensorFlow, two powerful Python libraries for fast numerical computing created and released by Facebook and Google, respectevely.
Keras was developed to make developing deep learning models as fast and easy as
possible for research and practical applications. It runs on Python 2.7 or 3.5 and can seamlessly execute on GPUs and CPUs.
Keras is built on the idea of a model. At its core we have a sequence of layers called
the `Sequential` model which is a linear stack of layers. Keras also provides the `functional API`, a way to define complex models, such as multi-output models, directed acyclic graphs, or models with shared layers.
We can summarize the construction of deep learning models in Keras using the Sequential model as follows:
1. **Define your model**: create a `Sequential` model and add layers.
2. **Compile your model**: specify loss function and optimizers and call the `.compile()` function.
3. **Fit your model**: train the model on data by calling the `.fit()` function.
4. **Make predictions**: use the model to generate predictions on new data by calling functions such as `.evaluate()` or `.predict()`.
### Callbacks: taking a peek into our model while it's training
You can look at what is happening in various stages of your model by using `callbacks`. A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training. You can pass a list of callbacks (as the keyword argument callbacks) to the `.fit()` method of the Sequential or Model classes. The relevant methods of the callbacks will then be called at each stage of the training.
- A callback function you are already familiar with is `keras.callbacks.History()`. This is automatically included in `.fit()`.
- Another very useful one is `keras.callbacks.ModelCheckpoint` which saves the model with its weights at a certain point in the training. This can prove useful if your model is running for a long time and a system failure happens. Not all is lost then. It's a good practice to save the model weights only when an improvement is observed as measured by the `acc`, for example.
- `keras.callbacks.EarlyStopping` stops the training when a monitored quantity has stopped improving.
- `keras.callbacks.LearningRateScheduler` will change the learning rate during training.
We will apply some callbacks later.
For full documentation on `callbacks` see https://keras.io/callbacks/
### What are the steps to optimizing our network?
```python
import tensorflow as tf
import keras
from keras import layers
from keras import models
from keras import utils
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import Activation
from keras.regularizers import l2
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras import datasets
from keras.callbacks import LearningRateScheduler
from keras.callbacks import History
from keras import losses
from sklearn.utils import shuffle
print(tf.VERSION)
print(tf.keras.__version__)
```
1.12.0
2.1.6-tf
Using TensorFlow backend.
```python
# fix random seed for reproducibility
np.random.seed(5)
```
### Step 1 - Deciding on the network topology (not really considered optimization but is obviously very important)
We will use the MNIST dataset which consists of grayscale images of handwritten digits (0-9) whose dimension is 28x28 pixels. Each pixel is 8 bits so its value ranges from 0 to 255.
```python
#mnist = tf.keras.datasets.mnist
mnist = keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train.shape, y_train.shape
```
((60000, 28, 28), (60000,))
Each label is a number between 0 and 9
```python
print(y_train)
```
[5 0 4 ... 5 6 8]
Let's look at some 10 of the images
```python
plt.figure(figsize=(10,10))
for i in range(10):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(y_train[i])
```
```python
x_train[45].shape
x_train[45, 15:20, 15:20]
```
array([[ 11, 198, 231, 41, 0],
[ 82, 252, 204, 0, 0],
[253, 253, 141, 0, 0],
[252, 220, 36, 0, 0],
[252, 96, 0, 0, 0]], dtype=uint8)
```python
print(f'We have {x_train.shape[0]} train samples')
print(f'We have {x_test.shape[0]} test samples')
```
We have 60000 train samples
We have 10000 test samples
#### Preprocessing the data
To run our NN we need to pre-process the data
* First we need to make the 2D image arrays into 1D (flatten them). We can either perform this by using array reshaping with `numpy.reshape()` or the `keras`' method for this: a layer called `tf.keras.layers.Flatten` which transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1D-array of 28 * 28 = 784 pixels.
* Then we need to normalize the pixel values (give them values between 0 and 1) using the following transformation:
\begin{align}
x := \dfrac{x - x_{min}}{x_{max} - x_{min}}
\textrm{}
\end{align}
In our case $x_{min} = 0$ and $x_{max} = 255$ so the formula becomes simply $x := {x}/255$
```python
# normalize the data
x_train, x_test = x_train / 255.0, x_test / 255.0
```
```python
# reshape the data into 1D vectors
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
num_classes = 10
```
```python
x_train.shape[1]
```
784
Now let's prepare our class vector (y) to a binary class matrix, e.g. for use with categorical_crossentropy.
```python
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
```
```python
y_train[0]
```
array([0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], dtype=float32)
Now we are ready to build the model!
### Step 2 - Adjusting the `learning rate`
One of the most common optimization algorithm is Stochastic Gradient Descent (SGD). The hyperparameters that can be optimized in SGD are `learning rate`, `momentum`, `decay` and `nesterov`.
`Learning rate` controls the weight at the end of each batch, and `momentum` controls how much to let the previous update influence the current weight update. `Decay` indicates the learning rate decay over each update, and `nesterov` takes the value True or False depending on if we want to apply Nesterov momentum. Typical values for those hyperparameters are lr=0.01, decay=1e-6, momentum=0.9, and nesterov=True.
The learning rate hyperparameter goes into the `optimizer` function which we will see below. Keras has a default learning rate scheduler in the `SGD` optimizer that decreases the learning rate during the stochastic gradient descent optimization algorithm. The learning rate is decreased according to this formula:
\begin{align}
lr = lr * 1./(1. + decay * epoch)
\textrm{}
\end{align}
<pre>source: http://cs231n.github.io/neural-networks-3</pre>
Let's implement a learning rate adaptation schedule in `Keras`. We'll start with SGD and a learning rate value of 0.1. We will then train the model for 60
epochs and set the decay argument to 0.0016 (0.1/60). We also include a momentum value of 0.8 since that seems to work well when using an adaptive learning rate.
```python
epochs=60
learning_rate = 0.1
decay_rate = learning_rate / epochs
momentum = 0.8
sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=False)
```
```python
# build the model
input_dim = x_train.shape[1]
lr_model = Sequential()
lr_model.add(Dense(64, activation=tf.nn.relu, kernel_initializer='uniform',
input_dim = input_dim))
lr_model.add(Dropout(0.1))
lr_model.add(Dense(64, kernel_initializer='uniform', activation=tf.nn.relu))
lr_model.add(Dense(num_classes, kernel_initializer='uniform', activation=tf.nn.softmax))
# compile the model
lr_model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['acc'])
```
```python
%%time
# Fit the model
batch_size = int(input_dim/100)
lr_model_history = lr_model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
```
Train on 60000 samples, validate on 10000 samples
Epoch 1/60
60000/60000 [==============================] - 9s 145us/step - loss: 0.3158 - acc: 0.9043 - val_loss: 0.1467 - val_acc: 0.9550
Epoch 2/60
60000/60000 [==============================] - 8s 136us/step - loss: 0.1478 - acc: 0.9555 - val_loss: 0.1194 - val_acc: 0.9617
Epoch 3/60
60000/60000 [==============================] - 8s 137us/step - loss: 0.1248 - acc: 0.9620 - val_loss: 0.1122 - val_acc: 0.9646
Epoch 4/60
60000/60000 [==============================] - 8s 136us/step - loss: 0.1167 - acc: 0.9638 - val_loss: 0.1075 - val_acc: 0.9681
Epoch 5/60
60000/60000 [==============================] - 8s 137us/step - loss: 0.1103 - acc: 0.9666 - val_loss: 0.1039 - val_acc: 0.9691
Epoch 6/60
60000/60000 [==============================] - 8s 137us/step - loss: 0.1051 - acc: 0.9677 - val_loss: 0.1015 - val_acc: 0.9694
Epoch 7/60
60000/60000 [==============================] - 8s 136us/step - loss: 0.1003 - acc: 0.9691 - val_loss: 0.1002 - val_acc: 0.9694
Epoch 8/60
60000/60000 [==============================] - 9s 144us/step - loss: 0.0961 - acc: 0.9707 - val_loss: 0.0998 - val_acc: 0.9694
Epoch 9/60
60000/60000 [==============================] - 9s 154us/step - loss: 0.0951 - acc: 0.9707 - val_loss: 0.0989 - val_acc: 0.9699
Epoch 10/60
60000/60000 [==============================] - 9s 150us/step - loss: 0.0919 - acc: 0.9721 - val_loss: 0.0978 - val_acc: 0.9696
Epoch 11/60
60000/60000 [==============================] - 8s 141us/step - loss: 0.0930 - acc: 0.9720 - val_loss: 0.0964 - val_acc: 0.9702
Epoch 12/60
60000/60000 [==============================] - 8s 141us/step - loss: 0.0899 - acc: 0.9728 - val_loss: 0.0965 - val_acc: 0.9703
Epoch 13/60
60000/60000 [==============================] - 8s 141us/step - loss: 0.0883 - acc: 0.9732 - val_loss: 0.0951 - val_acc: 0.9713
Epoch 14/60
60000/60000 [==============================] - 8s 141us/step - loss: 0.0871 - acc: 0.9733 - val_loss: 0.0958 - val_acc: 0.9705
Epoch 15/60
60000/60000 [==============================] - 8s 141us/step - loss: 0.0888 - acc: 0.9731 - val_loss: 0.0952 - val_acc: 0.9709
Epoch 16/60
60000/60000 [==============================] - 9s 145us/step - loss: 0.0857 - acc: 0.9743 - val_loss: 0.0950 - val_acc: 0.9713
Epoch 17/60
60000/60000 [==============================] - 9s 157us/step - loss: 0.0843 - acc: 0.9742 - val_loss: 0.0957 - val_acc: 0.9709
Epoch 18/60
60000/60000 [==============================] - 8s 142us/step - loss: 0.0842 - acc: 0.9749 - val_loss: 0.0942 - val_acc: 0.9719
Epoch 19/60
60000/60000 [==============================] - 9s 142us/step - loss: 0.0839 - acc: 0.9750 - val_loss: 0.0936 - val_acc: 0.9723
Epoch 20/60
60000/60000 [==============================] - 9s 142us/step - loss: 0.0824 - acc: 0.9748 - val_loss: 0.0942 - val_acc: 0.9723
Epoch 21/60
60000/60000 [==============================] - 9s 143us/step - loss: 0.0824 - acc: 0.9749 - val_loss: 0.0940 - val_acc: 0.9725
Epoch 22/60
60000/60000 [==============================] - 9s 142us/step - loss: 0.0829 - acc: 0.9752 - val_loss: 0.0938 - val_acc: 0.9718
Epoch 23/60
60000/60000 [==============================] - 9s 143us/step - loss: 0.0795 - acc: 0.9763 - val_loss: 0.0939 - val_acc: 0.9718
Epoch 24/60
60000/60000 [==============================] - 9s 149us/step - loss: 0.0796 - acc: 0.9763 - val_loss: 0.0936 - val_acc: 0.9722
Epoch 25/60
60000/60000 [==============================] - 8s 140us/step - loss: 0.0783 - acc: 0.9759 - val_loss: 0.0935 - val_acc: 0.9724
Epoch 26/60
60000/60000 [==============================] - 8s 140us/step - loss: 0.0805 - acc: 0.9755 - val_loss: 0.0937 - val_acc: 0.9721
Epoch 27/60
60000/60000 [==============================] - 8s 140us/step - loss: 0.0795 - acc: 0.9759 - val_loss: 0.0930 - val_acc: 0.9721
Epoch 28/60
60000/60000 [==============================] - 9s 148us/step - loss: 0.0786 - acc: 0.9765 - val_loss: 0.0931 - val_acc: 0.9721
Epoch 29/60
60000/60000 [==============================] - 9s 148us/step - loss: 0.0780 - acc: 0.9764 - val_loss: 0.0926 - val_acc: 0.9726
Epoch 30/60
60000/60000 [==============================] - 9s 144us/step - loss: 0.0759 - acc: 0.9768 - val_loss: 0.0925 - val_acc: 0.9726
Epoch 31/60
60000/60000 [==============================] - 9s 147us/step - loss: 0.0780 - acc: 0.9768 - val_loss: 0.0931 - val_acc: 0.9727
Epoch 32/60
60000/60000 [==============================] - 9s 155us/step - loss: 0.0768 - acc: 0.9765 - val_loss: 0.0925 - val_acc: 0.9726
Epoch 33/60
60000/60000 [==============================] - 9s 144us/step - loss: 0.0762 - acc: 0.9770 - val_loss: 0.0927 - val_acc: 0.9723
Epoch 34/60
60000/60000 [==============================] - 9s 149us/step - loss: 0.0765 - acc: 0.9770 - val_loss: 0.0928 - val_acc: 0.9723
Epoch 35/60
60000/60000 [==============================] - 8s 140us/step - loss: 0.0751 - acc: 0.9778 - val_loss: 0.0928 - val_acc: 0.9721
Epoch 36/60
60000/60000 [==============================] - 8s 138us/step - loss: 0.0756 - acc: 0.9772 - val_loss: 0.0919 - val_acc: 0.9721
Epoch 37/60
60000/60000 [==============================] - 8s 140us/step - loss: 0.0760 - acc: 0.9769 - val_loss: 0.0923 - val_acc: 0.9723
Epoch 38/60
60000/60000 [==============================] - 8s 138us/step - loss: 0.0751 - acc: 0.9772 - val_loss: 0.0921 - val_acc: 0.9726
Epoch 39/60
60000/60000 [==============================] - 9s 148us/step - loss: 0.0756 - acc: 0.9774 - val_loss: 0.0924 - val_acc: 0.9728
Epoch 40/60
60000/60000 [==============================] - 8s 141us/step - loss: 0.0750 - acc: 0.9774 - val_loss: 0.0924 - val_acc: 0.9728
Epoch 41/60
60000/60000 [==============================] - 9s 142us/step - loss: 0.0760 - acc: 0.9774 - val_loss: 0.0926 - val_acc: 0.9724
Epoch 42/60
60000/60000 [==============================] - 8s 142us/step - loss: 0.0719 - acc: 0.9783 - val_loss: 0.0920 - val_acc: 0.9730
Epoch 43/60
60000/60000 [==============================] - 9s 143us/step - loss: 0.0730 - acc: 0.9779 - val_loss: 0.0919 - val_acc: 0.9726
Epoch 44/60
60000/60000 [==============================] - 8s 140us/step - loss: 0.0722 - acc: 0.9785 - val_loss: 0.0920 - val_acc: 0.9728
Epoch 45/60
60000/60000 [==============================] - 9s 142us/step - loss: 0.0746 - acc: 0.9774 - val_loss: 0.0923 - val_acc: 0.9730
Epoch 46/60
60000/60000 [==============================] - 9s 148us/step - loss: 0.0736 - acc: 0.9778 - val_loss: 0.0920 - val_acc: 0.9729
Epoch 47/60
60000/60000 [==============================] - 9s 156us/step - loss: 0.0739 - acc: 0.9777 - val_loss: 0.0920 - val_acc: 0.9725
Epoch 48/60
60000/60000 [==============================] - 9s 151us/step - loss: 0.0720 - acc: 0.9783 - val_loss: 0.0917 - val_acc: 0.9731
Epoch 49/60
60000/60000 [==============================] - 9s 146us/step - loss: 0.0735 - acc: 0.9780 - val_loss: 0.0917 - val_acc: 0.9729
Epoch 50/60
60000/60000 [==============================] - 9s 152us/step - loss: 0.0729 - acc: 0.9780 - val_loss: 0.0923 - val_acc: 0.9723
Epoch 51/60
60000/60000 [==============================] - 9s 151us/step - loss: 0.0716 - acc: 0.9777 - val_loss: 0.0919 - val_acc: 0.9727
Epoch 52/60
60000/60000 [==============================] - 9s 145us/step - loss: 0.0716 - acc: 0.9784 - val_loss: 0.0915 - val_acc: 0.9726
Epoch 53/60
60000/60000 [==============================] - 9s 149us/step - loss: 0.0715 - acc: 0.9782 - val_loss: 0.0912 - val_acc: 0.9722
Epoch 54/60
60000/60000 [==============================] - 9s 143us/step - loss: 0.0704 - acc: 0.9786 - val_loss: 0.0911 - val_acc: 0.9720
Epoch 55/60
60000/60000 [==============================] - 9s 142us/step - loss: 0.0721 - acc: 0.9782 - val_loss: 0.0917 - val_acc: 0.9727
Epoch 56/60
60000/60000 [==============================] - 9s 143us/step - loss: 0.0717 - acc: 0.9784 - val_loss: 0.0918 - val_acc: 0.9725
Epoch 57/60
60000/60000 [==============================] - 9s 151us/step - loss: 0.0717 - acc: 0.9783 - val_loss: 0.0918 - val_acc: 0.9726
Epoch 58/60
60000/60000 [==============================] - 9s 144us/step - loss: 0.0708 - acc: 0.9783 - val_loss: 0.0916 - val_acc: 0.9725
Epoch 59/60
60000/60000 [==============================] - 8s 137us/step - loss: 0.0703 - acc: 0.9782 - val_loss: 0.0916 - val_acc: 0.9731
Epoch 60/60
60000/60000 [==============================] - 8s 137us/step - loss: 0.0703 - acc: 0.9785 - val_loss: 0.0918 - val_acc: 0.9727
CPU times: user 15min 16s, sys: 3min 41s, total: 18min 57s
Wall time: 8min 37s
```python
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(lr_model_history.history['loss']), 'r', label='train')
ax.plot(np.sqrt(lr_model_history.history['val_loss']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Loss', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
```
```python
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(lr_model_history.history['acc']), 'r', label='train')
ax.plot(np.sqrt(lr_model_history.history['val_acc']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Accuracy', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
```
### Apply a custon learning rate change using `LearningRateScheduler`
Write a function that performs the exponential learning rate decay as indicated by the following formula:
\begin{align}
lr = lr0 * e^{(-kt)}
\textrm{}
\end{align}
```python
# solution
epochs = 60
learning_rate = 0.1 # initial learning rate
decay_rate = 0.1
momentum = 0.8
# define the optimizer function
sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=False)
```
```python
input_dim = x_train.shape[1]
num_classes = 10
batch_size = 196
# build the model
exponential_decay_model = Sequential()
exponential_decay_model.add(Dense(64, activation=tf.nn.relu, kernel_initializer='uniform', input_dim = input_dim))
exponential_decay_model.add(Dropout(0.1))
exponential_decay_model.add(Dense(64, kernel_initializer='uniform', activation=tf.nn.relu))
exponential_decay_model.add(Dense(num_classes, kernel_initializer='uniform', activation=tf.nn.softmax))
# compile the model
exponential_decay_model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['acc'])
```
```python
# define the learning rate change
def exp_decay(epoch):
lrate = learning_rate * np.exp(-decay_rate*epoch)
return lrate
```
```python
# learning schedule callback
loss_history = History()
lr_rate = LearningRateScheduler(exp_decay)
callbacks_list = [loss_history, lr_rate]
# you invoke the LearningRateScheduler during the .fit() phase
exponential_decay_model_history = exponential_decay_model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks_list,
verbose=1,
validation_data=(x_test, y_test))
```
Train on 60000 samples, validate on 10000 samples
Epoch 1/60
60000/60000 [==============================] - 1s 16us/step - loss: 1.9924 - acc: 0.3865 - val_loss: 1.4953 - val_acc: 0.5841
Epoch 2/60
60000/60000 [==============================] - 1s 11us/step - loss: 1.2430 - acc: 0.6362 - val_loss: 1.0153 - val_acc: 0.7164
Epoch 3/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.9789 - acc: 0.7141 - val_loss: 0.8601 - val_acc: 0.7617
Epoch 4/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.8710 - acc: 0.7452 - val_loss: 0.7811 - val_acc: 0.7865
Epoch 5/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.8115 - acc: 0.7609 - val_loss: 0.7336 - val_acc: 0.7968
Epoch 6/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7749 - acc: 0.7678 - val_loss: 0.7030 - val_acc: 0.8035
Epoch 7/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7524 - acc: 0.7742 - val_loss: 0.6822 - val_acc: 0.8095
Epoch 8/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7342 - acc: 0.7788 - val_loss: 0.6673 - val_acc: 0.8122
Epoch 9/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7218 - acc: 0.7840 - val_loss: 0.6562 - val_acc: 0.8148
Epoch 10/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7144 - acc: 0.7836 - val_loss: 0.6475 - val_acc: 0.8168
Epoch 11/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7054 - acc: 0.7857 - val_loss: 0.6408 - val_acc: 0.8175
Epoch 12/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.7008 - acc: 0.7896 - val_loss: 0.6354 - val_acc: 0.8185
Epoch 13/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6950 - acc: 0.7885 - val_loss: 0.6311 - val_acc: 0.8197
Epoch 14/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6921 - acc: 0.7895 - val_loss: 0.6274 - val_acc: 0.8199
Epoch 15/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6888 - acc: 0.7913 - val_loss: 0.6244 - val_acc: 0.8204
Epoch 16/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6833 - acc: 0.7932 - val_loss: 0.6219 - val_acc: 0.8206
Epoch 17/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6831 - acc: 0.7942 - val_loss: 0.6199 - val_acc: 0.8208
Epoch 18/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6822 - acc: 0.7937 - val_loss: 0.6182 - val_acc: 0.8212
Epoch 19/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6790 - acc: 0.7955 - val_loss: 0.6167 - val_acc: 0.8215
Epoch 20/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6797 - acc: 0.7935 - val_loss: 0.6155 - val_acc: 0.8218
Epoch 21/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6773 - acc: 0.7953 - val_loss: 0.6144 - val_acc: 0.8222
Epoch 22/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6742 - acc: 0.7960 - val_loss: 0.6135 - val_acc: 0.8227
Epoch 23/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6746 - acc: 0.7958 - val_loss: 0.6127 - val_acc: 0.8236
Epoch 24/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6729 - acc: 0.7973 - val_loss: 0.6120 - val_acc: 0.8237
Epoch 25/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6749 - acc: 0.7963 - val_loss: 0.6114 - val_acc: 0.8238
Epoch 26/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6715 - acc: 0.7967 - val_loss: 0.6109 - val_acc: 0.8241
Epoch 27/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6728 - acc: 0.7975 - val_loss: 0.6105 - val_acc: 0.8241
Epoch 28/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6721 - acc: 0.7964 - val_loss: 0.6101 - val_acc: 0.8246
Epoch 29/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6707 - acc: 0.7972 - val_loss: 0.6098 - val_acc: 0.8247
Epoch 30/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6711 - acc: 0.7980 - val_loss: 0.6095 - val_acc: 0.8247
Epoch 31/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6721 - acc: 0.7960 - val_loss: 0.6092 - val_acc: 0.8248
Epoch 32/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6708 - acc: 0.7981 - val_loss: 0.6090 - val_acc: 0.8249
Epoch 33/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6720 - acc: 0.7968 - val_loss: 0.6088 - val_acc: 0.8249
Epoch 34/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6700 - acc: 0.7973 - val_loss: 0.6086 - val_acc: 0.8250
Epoch 35/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6714 - acc: 0.7979 - val_loss: 0.6084 - val_acc: 0.8250
Epoch 36/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6687 - acc: 0.7980 - val_loss: 0.6083 - val_acc: 0.8250
Epoch 37/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6700 - acc: 0.7963 - val_loss: 0.6082 - val_acc: 0.8250
Epoch 38/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6702 - acc: 0.7964 - val_loss: 0.6081 - val_acc: 0.8252
Epoch 39/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6711 - acc: 0.7963 - val_loss: 0.6080 - val_acc: 0.8250
Epoch 40/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6698 - acc: 0.7971 - val_loss: 0.6079 - val_acc: 0.8251
Epoch 41/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6696 - acc: 0.7967 - val_loss: 0.6079 - val_acc: 0.8252
Epoch 42/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6699 - acc: 0.7989 - val_loss: 0.6078 - val_acc: 0.8252
Epoch 43/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6701 - acc: 0.7952 - val_loss: 0.6077 - val_acc: 0.8252
Epoch 44/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6704 - acc: 0.7973 - val_loss: 0.6077 - val_acc: 0.8252
Epoch 45/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6697 - acc: 0.7971 - val_loss: 0.6076 - val_acc: 0.8252
Epoch 46/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6718 - acc: 0.7969 - val_loss: 0.6076 - val_acc: 0.8252
Epoch 47/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6708 - acc: 0.7965 - val_loss: 0.6076 - val_acc: 0.8252
Epoch 48/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6690 - acc: 0.7968 - val_loss: 0.6075 - val_acc: 0.8252
Epoch 49/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6700 - acc: 0.7959 - val_loss: 0.6075 - val_acc: 0.8252
Epoch 50/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6722 - acc: 0.7963 - val_loss: 0.6075 - val_acc: 0.8252
Epoch 51/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6698 - acc: 0.7950 - val_loss: 0.6075 - val_acc: 0.8252
Epoch 52/60
60000/60000 [==============================] - 1s 12us/step - loss: 0.6689 - acc: 0.7978 - val_loss: 0.6075 - val_acc: 0.8252
Epoch 53/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6696 - acc: 0.7973 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 54/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6678 - acc: 0.7975 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 55/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6694 - acc: 0.7969 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 56/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6688 - acc: 0.7981 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 57/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6674 - acc: 0.7988 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 58/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6685 - acc: 0.7970 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 59/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6702 - acc: 0.7953 - val_loss: 0.6074 - val_acc: 0.8252
Epoch 60/60
60000/60000 [==============================] - 1s 11us/step - loss: 0.6701 - acc: 0.7965 - val_loss: 0.6074 - val_acc: 0.8252
```python
# check on the variables that can show me the learning rate decay
exponential_decay_model_history.history.keys()
```
dict_keys(['val_loss', 'val_acc', 'loss', 'acc', 'lr'])
```python
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(exponential_decay_model_history.history['lr'] ,'r') #, label='learn rate')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Learning Rate', fontsize=20)
#ax.legend()
ax.tick_params(labelsize=20)
```
```python
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(exponential_decay_model_history.history['loss']), 'r', label='train')
ax.plot(np.sqrt(exponential_decay_model_history.history['val_loss']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Loss', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
```
### Step 3 - Choosing an `optimizer` and a `loss function`
When constructing a model and using it to make our predictions, for example to assign label scores to images ("cat", "plane", etc), we want to measure our success or failure by defining a "loss" function (or objective function). The goal of optimization is to efficiently calculate the parameters/weights that minimize this loss function. `keras` provides various types of [loss functions](https://github.com/keras-team/keras/blob/master/keras/losses.py).
Sometimes the "loss" function measures the "distance". We can define this "distance" between two data points in various ways suitable to the problem or dataset.
Distance
- Euclidean
- Manhattan
- others such as Hamming which measures distances between strings, for example. The Hamming distance of "carolin" and "cathrin" is 3.
Loss functions
- MSE (for regression)
- categorical cross-entropy (for classification)
- binary cross entropy (for classification)
```python
# build the model
input_dim = x_train.shape[1]
model = Sequential()
model.add(Dense(64, activation=tf.nn.relu, kernel_initializer='uniform',
input_dim = input_dim)) # fully-connected layer with 64 hidden units
model.add(Dropout(0.1))
model.add(Dense(64, kernel_initializer='uniform', activation=tf.nn.relu))
model.add(Dense(num_classes, kernel_initializer='uniform', activation=tf.nn.softmax))
```
```python
# defining the parameters for RMSprop (I used the keras defaults here)
rms = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer=rms,
metrics=['acc'])
```
### Step 4 - Deciding on the `batch size` and `number of epochs`
```python
%%time
batch_size = input_dim
epochs = 60
model_history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
```
Train on 60000 samples, validate on 10000 samples
Epoch 1/60
60000/60000 [==============================] - 1s 14us/step - loss: 1.1320 - acc: 0.7067 - val_loss: 0.5628 - val_acc: 0.8237
Epoch 2/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.4831 - acc: 0.8570 - val_loss: 0.3674 - val_acc: 0.8934
Epoch 3/60
60000/60000 [==============================] - 1s 9us/step - loss: 0.3665 - acc: 0.8931 - val_loss: 0.3199 - val_acc: 0.9061
Epoch 4/60
60000/60000 [==============================] - 1s 9us/step - loss: 0.3100 - acc: 0.9092 - val_loss: 0.2664 - val_acc: 0.9233
Epoch 5/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.2699 - acc: 0.9206 - val_loss: 0.2295 - val_acc: 0.9326
Epoch 6/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.2391 - acc: 0.9305 - val_loss: 0.2104 - val_acc: 0.9362
Epoch 7/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.2115 - acc: 0.9383 - val_loss: 0.1864 - val_acc: 0.9459
Epoch 8/60
60000/60000 [==============================] - 1s 9us/step - loss: 0.1900 - acc: 0.9451 - val_loss: 0.1658 - val_acc: 0.9493
Epoch 9/60
60000/60000 [==============================] - 1s 9us/step - loss: 0.1714 - acc: 0.9492 - val_loss: 0.1497 - val_acc: 0.9538
Epoch 10/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1565 - acc: 0.9539 - val_loss: 0.1404 - val_acc: 0.9591
Epoch 11/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1443 - acc: 0.9569 - val_loss: 0.1305 - val_acc: 0.9616
Epoch 12/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1334 - acc: 0.9596 - val_loss: 0.1224 - val_acc: 0.9628
Epoch 13/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1257 - acc: 0.9627 - val_loss: 0.1133 - val_acc: 0.9660
Epoch 14/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1169 - acc: 0.9652 - val_loss: 0.1116 - val_acc: 0.9674
Epoch 15/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1091 - acc: 0.9675 - val_loss: 0.1104 - val_acc: 0.9670
Epoch 16/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.1051 - acc: 0.9689 - val_loss: 0.1030 - val_acc: 0.9692
Epoch 17/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0978 - acc: 0.9697 - val_loss: 0.1044 - val_acc: 0.9686
Epoch 18/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0929 - acc: 0.9718 - val_loss: 0.0996 - val_acc: 0.9689
Epoch 19/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0882 - acc: 0.9738 - val_loss: 0.1035 - val_acc: 0.9695
Epoch 20/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0850 - acc: 0.9737 - val_loss: 0.0941 - val_acc: 0.9717
Epoch 21/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0803 - acc: 0.9751 - val_loss: 0.0953 - val_acc: 0.9715
Epoch 22/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0793 - acc: 0.9762 - val_loss: 0.0898 - val_acc: 0.9729
Epoch 23/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0747 - acc: 0.9775 - val_loss: 0.0901 - val_acc: 0.9732
Epoch 24/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0718 - acc: 0.9778 - val_loss: 0.0948 - val_acc: 0.9720
Epoch 25/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0697 - acc: 0.9781 - val_loss: 0.0908 - val_acc: 0.9727
Epoch 26/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0668 - acc: 0.9794 - val_loss: 0.0917 - val_acc: 0.9726
Epoch 27/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0648 - acc: 0.9800 - val_loss: 0.0895 - val_acc: 0.9737
Epoch 28/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0637 - acc: 0.9798 - val_loss: 0.0868 - val_acc: 0.9728
Epoch 29/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0598 - acc: 0.9813 - val_loss: 0.0883 - val_acc: 0.9736
Epoch 30/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0570 - acc: 0.9820 - val_loss: 0.0869 - val_acc: 0.9741
Epoch 31/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0555 - acc: 0.9825 - val_loss: 0.0896 - val_acc: 0.9732
Epoch 32/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0554 - acc: 0.9827 - val_loss: 0.0843 - val_acc: 0.9743
Epoch 33/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0512 - acc: 0.9836 - val_loss: 0.0843 - val_acc: 0.9746
Epoch 34/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0509 - acc: 0.9835 - val_loss: 0.0868 - val_acc: 0.9753
Epoch 35/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0491 - acc: 0.9842 - val_loss: 0.0841 - val_acc: 0.9755
Epoch 36/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0471 - acc: 0.9848 - val_loss: 0.0887 - val_acc: 0.9728
Epoch 37/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0466 - acc: 0.9850 - val_loss: 0.0876 - val_acc: 0.9756
Epoch 38/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0456 - acc: 0.9856 - val_loss: 0.0833 - val_acc: 0.9769
Epoch 39/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0431 - acc: 0.9866 - val_loss: 0.0869 - val_acc: 0.9759
Epoch 40/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0413 - acc: 0.9869 - val_loss: 0.0926 - val_acc: 0.9743
Epoch 41/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0401 - acc: 0.9872 - val_loss: 0.0851 - val_acc: 0.9756
Epoch 42/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0401 - acc: 0.9876 - val_loss: 0.0856 - val_acc: 0.9764
Epoch 43/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0392 - acc: 0.9870 - val_loss: 0.0861 - val_acc: 0.9771
Epoch 44/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0396 - acc: 0.9870 - val_loss: 0.0918 - val_acc: 0.9756
Epoch 45/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0371 - acc: 0.9883 - val_loss: 0.0866 - val_acc: 0.9766
Epoch 46/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0374 - acc: 0.9883 - val_loss: 0.0888 - val_acc: 0.9748
Epoch 47/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0376 - acc: 0.9878 - val_loss: 0.0850 - val_acc: 0.9761
Epoch 48/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0351 - acc: 0.9890 - val_loss: 0.0848 - val_acc: 0.9777
Epoch 49/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0361 - acc: 0.9884 - val_loss: 0.0850 - val_acc: 0.9771
Epoch 50/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0341 - acc: 0.9887 - val_loss: 0.0889 - val_acc: 0.9769
Epoch 51/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0322 - acc: 0.9897 - val_loss: 0.0882 - val_acc: 0.9771
Epoch 52/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0322 - acc: 0.9895 - val_loss: 0.0892 - val_acc: 0.9762
Epoch 53/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0313 - acc: 0.9892 - val_loss: 0.0916 - val_acc: 0.9771
Epoch 54/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0300 - acc: 0.9897 - val_loss: 0.0913 - val_acc: 0.9772
Epoch 55/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0306 - acc: 0.9902 - val_loss: 0.0904 - val_acc: 0.9763
Epoch 56/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0307 - acc: 0.9900 - val_loss: 0.0910 - val_acc: 0.9777
Epoch 57/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0299 - acc: 0.9901 - val_loss: 0.0918 - val_acc: 0.9763
Epoch 58/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0299 - acc: 0.9906 - val_loss: 0.0914 - val_acc: 0.9778
Epoch 59/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0284 - acc: 0.9909 - val_loss: 0.0907 - val_acc: 0.9769
Epoch 60/60
60000/60000 [==============================] - 0s 8us/step - loss: 0.0283 - acc: 0.9908 - val_loss: 0.0962 - val_acc: 0.9761
CPU times: user 1min 17s, sys: 7.4 s, total: 1min 24s
Wall time: 29.3 s
```python
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
Test loss: 0.09620895183624088
Test accuracy: 0.9761
```python
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(model_history.history['acc']), 'r', label='train_acc')
ax.plot(np.sqrt(model_history.history['val_acc']), 'b' ,label='val_acc')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Accuracy', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
```
```python
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(model_history.history['loss']), 'r', label='train')
ax.plot(np.sqrt(model_history.history['val_loss']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Loss', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
```
### Step 5 - Random restarts
This method does not seem to have an implementation in `keras`. Develop your own function for this using `keras.callbacks.LearningRateScheduler`. You can refer back to how we used it to set a custom learning rate.
### Tuning the Hyperparameters using Cross Validation
Now instead of trying different values by hand, we will use GridSearchCV from Scikit-Learn to try out several values for our hyperparameters and compare the results.
To do cross-validation with `keras` we will use the wrappers for the Scikit-Learn API. They provide a way to use Sequential Keras models (single-input only) as part of your Scikit-Learn workflow.
There are two wrappers available:
`keras.wrappers.scikit_learn.KerasClassifier(build_fn=None, **sk_params)`, which implements the Scikit-Learn classifier interface,
`keras.wrappers.scikit_learn.KerasRegressor(build_fn=None, **sk_params)`, which implements the Scikit-Learn regressor interface.
```python
import numpy
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
```
#### Trying different weight initializations
```python
# let's create a function that creates the model (required for KerasClassifier)
# while accepting the hyperparameters we want to tune
# we also pass some default values such as optimizer='rmsprop'
def create_model(init_mode='uniform'):
# define model
model = Sequential()
model.add(Dense(64, kernel_initializer=init_mode, activation=tf.nn.relu, input_dim=784))
model.add(Dropout(0.1))
model.add(Dense(64, kernel_initializer=init_mode, activation=tf.nn.relu))
model.add(Dense(10, kernel_initializer=init_mode, activation=tf.nn.softmax))
# compile model
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
return model
```
```python
%%time
seed = 7
numpy.random.seed(seed)
batch_size = 128
epochs = 10
model_CV = KerasClassifier(build_fn=create_model, epochs=epochs,
batch_size=batch_size, verbose=1)
# define the grid search parameters
init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero',
'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']
param_grid = dict(init_mode=init_mode)
grid = GridSearchCV(estimator=model_CV, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(x_train, y_train)
```
Epoch 1/10
60000/60000 [==============================] - 1s 21us/step - loss: 0.4118 - acc: 0.8824
Epoch 2/10
60000/60000 [==============================] - 1s 15us/step - loss: 0.1936 - acc: 0.9437
Epoch 3/10
60000/60000 [==============================] - 1s 14us/step - loss: 0.1482 - acc: 0.9553
Epoch 4/10
60000/60000 [==============================] - 1s 14us/step - loss: 0.1225 - acc: 0.9631
Epoch 5/10
60000/60000 [==============================] - 1s 14us/step - loss: 0.1064 - acc: 0.9676
Epoch 6/10
60000/60000 [==============================] - 1s 14us/step - loss: 0.0944 - acc: 0.9710
Epoch 7/10
60000/60000 [==============================] - 1s 14us/step - loss: 0.0876 - acc: 0.9732
Epoch 8/10
60000/60000 [==============================] - 1s 15us/step - loss: 0.0809 - acc: 0.9745
Epoch 9/10
60000/60000 [==============================] - 1s 14us/step - loss: 0.0741 - acc: 0.9775
Epoch 10/10
60000/60000 [==============================] - 1s 15us/step - loss: 0.0709 - acc: 0.9783
CPU times: user 21 s, sys: 3.56 s, total: 24.5 s
Wall time: 1min 20s
```python
# print results
print(f'Best Accuracy for {grid_result.best_score_} using {grid_result.best_params_}')
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print(f' mean={mean:.4}, std={stdev:.4} using {param}')
```
Best Accuracy for 0.9689333333333333 using {'init_mode': 'lecun_uniform'}
mean=0.9647, std=0.001438 using {'init_mode': 'uniform'}
mean=0.9689, std=0.001044 using {'init_mode': 'lecun_uniform'}
mean=0.9651, std=0.001515 using {'init_mode': 'normal'}
mean=0.1124, std=0.002416 using {'init_mode': 'zero'}
mean=0.9657, std=0.0005104 using {'init_mode': 'glorot_normal'}
mean=0.9687, std=0.0008436 using {'init_mode': 'glorot_uniform'}
mean=0.9681, std=0.002145 using {'init_mode': 'he_normal'}
mean=0.9685, std=0.001952 using {'init_mode': 'he_uniform'}
### Save Your Neural Network Model to JSON
The Hierarchical Data Format (HDF5) is a data storage format for storing large arrays of data including values for the weights in a neural network.
You can install HDF5 Python module: pip install h5py
Keras gives you the ability to describe and save any model using the JSON format.
```python
from keras.models import model_from_json
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# save weights to HDF5
model.save_weights("model.h5")
print("Model saved")
# when you want to retrieve the model: load json and create model
json_file = open('model.json', 'r')
saved_model = json_file.read()
# close the file as good practice
json_file.close()
model_from_json = model_from_json(saved_model)
# load weights into new model
model_from_json.load_weights("model.h5")
print("Model loaded")
```
Model saved
Model loaded
### Cross-validation with more than one hyperparameters
We can do cross-validation with more than one parameters simultaneously, effectively trying out combinations of them.
**Note: Cross-validation in neural networks is computationally expensive**. Think before you experiment! Multiply the number of features you are validating on to see how many combinations there are. Each combination is evaluated using the cv-fold cross-validation (cv is a parameter we choose).
For example, we can choose to search for different values of:
- batch size,
- number of epochs and
- initialization mode.
The choices are specified into a dictionary and passed to GridSearchCV.
We will perform a GridSearch for `batch size`, `number of epochs` and `initializer` combined.
```python
# repeat some of the initial values here so we make sure they were not changed
input_dim = x_train.shape[1]
num_classes = 10
# let's create a function that creates the model (required for KerasClassifier)
# while accepting the hyperparameters we want to tune
# we also pass some default values such as optimizer='rmsprop'
def create_model_2(optimizer='rmsprop', init='glorot_uniform'):
model = Sequential()
model.add(Dense(64, input_dim=input_dim, kernel_initializer=init, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(64, kernel_initializer=init, activation=tf.nn.relu))
model.add(Dense(num_classes, kernel_initializer=init, activation=tf.nn.softmax))
# compile model
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
```
```python
%%time
# fix random seed for reproducibility (this might work or might not work
# depending on each library's implenentation)
seed = 7
numpy.random.seed(seed)
# create the sklearn model for the network
model_init_batch_epoch_CV = KerasClassifier(build_fn=create_model_2, verbose=1)
# we choose the initializers that came at the top in our previous cross-validation!!
init_mode = ['glorot_uniform', 'uniform']
batches = [128, 512]
epochs = [10, 20]
# grid search for initializer, batch size and number of epochs
param_grid = dict(epochs=epochs, batch_size=batches, init=init_mode)
grid = GridSearchCV(estimator=model_init_batch_epoch_CV,
param_grid=param_grid,
cv=3)
grid_result = grid.fit(x_train, y_train)
```
Epoch 1/10
40000/40000 [==============================] - 1s 21us/step - loss: 0.4801 - acc: 0.8601
Epoch 2/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.2309 - acc: 0.9310
Epoch 3/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.1744 - acc: 0.9479
Epoch 4/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1422 - acc: 0.9575
Epoch 5/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.1214 - acc: 0.9625
Epoch 6/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1081 - acc: 0.9675
Epoch 7/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0974 - acc: 0.9693
Epoch 8/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0874 - acc: 0.9730
Epoch 9/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.0800 - acc: 0.9750
Epoch 10/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.0750 - acc: 0.9765
20000/20000 [==============================] - 0s 10us/step
40000/40000 [==============================] - 0s 6us/step
Epoch 1/10
40000/40000 [==============================] - 1s 22us/step - loss: 0.4746 - acc: 0.8656
Epoch 2/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.2264 - acc: 0.9336
Epoch 3/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1734 - acc: 0.9487
Epoch 4/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.1436 - acc: 0.9568
Epoch 5/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1256 - acc: 0.9614
Epoch 6/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1104 - acc: 0.9660
Epoch 7/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0973 - acc: 0.9707
Epoch 8/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0870 - acc: 0.9733
Epoch 9/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0818 - acc: 0.9748
Epoch 10/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.0730 - acc: 0.9770
20000/20000 [==============================] - 0s 10us/step
40000/40000 [==============================] - 0s 6us/step
Epoch 1/10
40000/40000 [==============================] - 1s 23us/step - loss: 0.4639 - acc: 0.8671
Epoch 2/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.2208 - acc: 0.9344
Epoch 3/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1693 - acc: 0.9491
Epoch 4/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1388 - acc: 0.9580
Epoch 5/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1206 - acc: 0.9634
Epoch 6/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1062 - acc: 0.9678
Epoch 7/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0956 - acc: 0.9711
Epoch 8/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0870 - acc: 0.9728
Epoch 9/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0794 - acc: 0.9750
Epoch 10/10
40000/40000 [==============================] - 1s 14us/step - loss: 0.0748 - acc: 0.9774
20000/20000 [==============================] - 0s 11us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/10
40000/40000 [==============================] - 1s 23us/step - loss: 0.7144 - acc: 0.7894
Epoch 2/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.3246 - acc: 0.9045
Epoch 3/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.2482 - acc: 0.9268
Epoch 4/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.2005 - acc: 0.9407
Epoch 5/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1673 - acc: 0.9485
Epoch 6/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1462 - acc: 0.9559
Epoch 7/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1305 - acc: 0.9604
Epoch 8/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1166 - acc: 0.9643
Epoch 9/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1079 - acc: 0.9675
Epoch 10/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0983 - acc: 0.9695
20000/20000 [==============================] - 0s 12us/step
40000/40000 [==============================] - 0s 6us/step
Epoch 1/10
40000/40000 [==============================] - 1s 24us/step - loss: 0.6894 - acc: 0.7944
Epoch 2/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.3171 - acc: 0.9061
Epoch 3/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.2358 - acc: 0.9312
Epoch 4/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1911 - acc: 0.9422
Epoch 5/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1595 - acc: 0.9526
Epoch 6/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1401 - acc: 0.9579
Epoch 7/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1230 - acc: 0.9636
Epoch 8/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1096 - acc: 0.9672
Epoch 9/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1027 - acc: 0.9692
Epoch 10/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0936 - acc: 0.9718
20000/20000 [==============================] - 0s 12us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/10
40000/40000 [==============================] - 1s 24us/step - loss: 0.7028 - acc: 0.7976
Epoch 2/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.3189 - acc: 0.9055
Epoch 3/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.2390 - acc: 0.9307
Epoch 4/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1910 - acc: 0.9435
Epoch 5/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1595 - acc: 0.9528
Epoch 6/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1376 - acc: 0.9583
Epoch 7/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1213 - acc: 0.9629
Epoch 8/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.1100 - acc: 0.9664
Epoch 9/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0987 - acc: 0.9697
Epoch 10/10
40000/40000 [==============================] - 1s 15us/step - loss: 0.0932 - acc: 0.9714
20000/20000 [==============================] - 0s 13us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/20
40000/40000 [==============================] - 1s 25us/step - loss: 0.4938 - acc: 0.8589
Epoch 2/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.2286 - acc: 0.9324
Epoch 3/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1751 - acc: 0.9470
Epoch 4/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1458 - acc: 0.9553
Epoch 5/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1237 - acc: 0.9620
Epoch 6/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1106 - acc: 0.9669
Epoch 7/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0998 - acc: 0.9696
Epoch 8/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0898 - acc: 0.9729
Epoch 9/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0799 - acc: 0.9749
Epoch 10/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0746 - acc: 0.9769
Epoch 11/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0688 - acc: 0.9783
Epoch 12/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0655 - acc: 0.9792
Epoch 13/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0619 - acc: 0.9801
Epoch 14/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0574 - acc: 0.9814
Epoch 15/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0549 - acc: 0.9836
Epoch 16/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0512 - acc: 0.9837
Epoch 17/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0464 - acc: 0.9855
Epoch 18/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0466 - acc: 0.9850
Epoch 19/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0431 - acc: 0.9863
Epoch 20/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0430 - acc: 0.9861
20000/20000 [==============================] - 0s 13us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/20
40000/40000 [==============================] - 1s 25us/step - loss: 0.4956 - acc: 0.8584
Epoch 2/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.2286 - acc: 0.9321
Epoch 3/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1746 - acc: 0.9474
Epoch 4/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1438 - acc: 0.9574
Epoch 5/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1232 - acc: 0.9634
Epoch 6/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1104 - acc: 0.9665
Epoch 7/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0986 - acc: 0.9696
Epoch 8/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0886 - acc: 0.9723
Epoch 9/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0828 - acc: 0.9741
Epoch 10/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0763 - acc: 0.9768
Epoch 11/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0679 - acc: 0.9781
Epoch 12/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0656 - acc: 0.9788
Epoch 13/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0601 - acc: 0.9810
Epoch 14/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0571 - acc: 0.9817
Epoch 15/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0517 - acc: 0.9832
Epoch 16/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0509 - acc: 0.9834
Epoch 17/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0478 - acc: 0.9850
Epoch 18/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0461 - acc: 0.9852
Epoch 19/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0442 - acc: 0.9854
Epoch 20/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0427 - acc: 0.9866
20000/20000 [==============================] - 0s 14us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/20
40000/40000 [==============================] - 1s 27us/step - loss: 0.4694 - acc: 0.8670
Epoch 2/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.2196 - acc: 0.9336
Epoch 3/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1681 - acc: 0.9495
Epoch 4/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1391 - acc: 0.9589
Epoch 5/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1200 - acc: 0.9630
Epoch 6/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1067 - acc: 0.9671
Epoch 7/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0968 - acc: 0.9713
Epoch 8/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0880 - acc: 0.9730
Epoch 9/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0815 - acc: 0.9754
Epoch 10/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0742 - acc: 0.9771
Epoch 11/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0711 - acc: 0.9778
Epoch 12/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0643 - acc: 0.9806
Epoch 13/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0613 - acc: 0.9815
Epoch 14/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0561 - acc: 0.9818
Epoch 15/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0534 - acc: 0.9832
Epoch 16/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0528 - acc: 0.9832
Epoch 17/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0479 - acc: 0.9848
Epoch 18/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0486 - acc: 0.9844
Epoch 19/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0438 - acc: 0.9861
Epoch 20/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0427 - acc: 0.9861
20000/20000 [==============================] - 0s 14us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/20
40000/40000 [==============================] - 1s 27us/step - loss: 0.6830 - acc: 0.8003
Epoch 2/20
40000/40000 [==============================] - 1s 16us/step - loss: 0.3234 - acc: 0.9044
Epoch 3/20
40000/40000 [==============================] - 1s 16us/step - loss: 0.2456 - acc: 0.9269
Epoch 4/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1982 - acc: 0.9407
Epoch 5/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1627 - acc: 0.9506
Epoch 6/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1422 - acc: 0.9561
Epoch 7/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1251 - acc: 0.9618
Epoch 8/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1134 - acc: 0.9650
Epoch 9/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1037 - acc: 0.9677
Epoch 10/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0957 - acc: 0.9705
Epoch 11/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0891 - acc: 0.9730
Epoch 12/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0832 - acc: 0.9745
Epoch 13/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0760 - acc: 0.9768
Epoch 14/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0727 - acc: 0.9778
Epoch 15/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0681 - acc: 0.9782
Epoch 16/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0628 - acc: 0.9803
Epoch 17/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0613 - acc: 0.9801
Epoch 18/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0572 - acc: 0.9824
Epoch 19/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0549 - acc: 0.9821
Epoch 20/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0521 - acc: 0.9834
20000/20000 [==============================] - 0s 15us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/20
40000/40000 [==============================] - 1s 28us/step - loss: 0.6742 - acc: 0.8034
Epoch 2/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.3142 - acc: 0.9081
Epoch 3/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.2365 - acc: 0.9306
Epoch 4/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1873 - acc: 0.9453
Epoch 5/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1563 - acc: 0.9531
Epoch 6/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1373 - acc: 0.9580
Epoch 7/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1212 - acc: 0.9640
Epoch 8/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1084 - acc: 0.9665
Epoch 9/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1014 - acc: 0.9695
Epoch 10/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0917 - acc: 0.9732
Epoch 11/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0838 - acc: 0.9739
Epoch 12/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0788 - acc: 0.9749
Epoch 13/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0720 - acc: 0.9779
Epoch 14/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0685 - acc: 0.9793
Epoch 15/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0666 - acc: 0.9797
Epoch 16/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0618 - acc: 0.9807
Epoch 17/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0578 - acc: 0.9819
Epoch 18/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0557 - acc: 0.9825
Epoch 19/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0535 - acc: 0.9831
Epoch 20/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0482 - acc: 0.9848
20000/20000 [==============================] - 0s 15us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/20
40000/40000 [==============================] - 1s 29us/step - loss: 0.6865 - acc: 0.8001
Epoch 2/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.3089 - acc: 0.9106
Epoch 3/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.2292 - acc: 0.9304
Epoch 4/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1845 - acc: 0.9435
Epoch 5/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1562 - acc: 0.9527
Epoch 6/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1371 - acc: 0.9584
Epoch 7/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1206 - acc: 0.9629
Epoch 8/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.1098 - acc: 0.9667
Epoch 9/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0990 - acc: 0.9699
Epoch 10/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0912 - acc: 0.9718
Epoch 11/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0863 - acc: 0.9740
Epoch 12/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0792 - acc: 0.9757
Epoch 13/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0721 - acc: 0.9782
Epoch 14/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0696 - acc: 0.9782
Epoch 15/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0661 - acc: 0.9796
Epoch 16/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0621 - acc: 0.9810
Epoch 17/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0588 - acc: 0.9813
Epoch 18/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0541 - acc: 0.9831
Epoch 19/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0513 - acc: 0.9834
Epoch 20/20
40000/40000 [==============================] - 1s 15us/step - loss: 0.0514 - acc: 0.9835
20000/20000 [==============================] - 0s 16us/step
40000/40000 [==============================] - 0s 7us/step
Epoch 1/10
40000/40000 [==============================] - 1s 22us/step - loss: 0.7656 - acc: 0.7926
Epoch 2/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3345 - acc: 0.9021
Epoch 3/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2633 - acc: 0.9225
Epoch 4/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2207 - acc: 0.9357
Epoch 5/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1877 - acc: 0.9450
Epoch 6/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1684 - acc: 0.9500
Epoch 7/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1517 - acc: 0.9552
Epoch 8/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1358 - acc: 0.9587
Epoch 9/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1236 - acc: 0.9627
Epoch 10/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1153 - acc: 0.9661
20000/20000 [==============================] - 0s 14us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/10
40000/40000 [==============================] - 1s 22us/step - loss: 0.7640 - acc: 0.7905
Epoch 2/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3207 - acc: 0.9055
Epoch 3/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2455 - acc: 0.9276
Epoch 4/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2050 - acc: 0.9392
Epoch 5/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1755 - acc: 0.9484
Epoch 6/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1573 - acc: 0.9524
Epoch 7/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1385 - acc: 0.9594
Epoch 8/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1247 - acc: 0.9634
Epoch 9/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1181 - acc: 0.9644
Epoch 10/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1074 - acc: 0.9679
20000/20000 [==============================] - 0s 15us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/10
40000/40000 [==============================] - 1s 23us/step - loss: 0.7858 - acc: 0.7867
Epoch 2/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3358 - acc: 0.9017
Epoch 3/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2603 - acc: 0.9250
Epoch 4/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2160 - acc: 0.9367
Epoch 5/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1879 - acc: 0.9435
Epoch 6/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1663 - acc: 0.9513
Epoch 7/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1493 - acc: 0.9552
Epoch 8/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1363 - acc: 0.9591
Epoch 9/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1262 - acc: 0.9609
Epoch 10/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1147 - acc: 0.9653
20000/20000 [==============================] - 0s 16us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/10
40000/40000 [==============================] - 1s 23us/step - loss: 1.1193 - acc: 0.6932
Epoch 2/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.4829 - acc: 0.8573
Epoch 3/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3785 - acc: 0.8885
Epoch 4/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3208 - acc: 0.9062
Epoch 5/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2811 - acc: 0.9167
Epoch 6/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2472 - acc: 0.9263
Epoch 7/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2200 - acc: 0.9362
Epoch 8/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1963 - acc: 0.9422
Epoch 9/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1772 - acc: 0.9475
Epoch 10/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1611 - acc: 0.9519
20000/20000 [==============================] - 0s 16us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/10
40000/40000 [==============================] - 1s 24us/step - loss: 1.1189 - acc: 0.6828
Epoch 2/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.4867 - acc: 0.8556
Epoch 3/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3713 - acc: 0.8919
Epoch 4/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3125 - acc: 0.9090
Epoch 5/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2748 - acc: 0.9214
Epoch 6/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2443 - acc: 0.9285
Epoch 7/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2141 - acc: 0.9383
Epoch 8/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1919 - acc: 0.9445
Epoch 9/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1767 - acc: 0.9485
Epoch 10/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1617 - acc: 0.9528
20000/20000 [==============================] - 0s 17us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/10
40000/40000 [==============================] - 1s 25us/step - loss: 1.0997 - acc: 0.7128
Epoch 2/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.4652 - acc: 0.8638
Epoch 3/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3716 - acc: 0.8919
Epoch 4/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.3165 - acc: 0.9072
Epoch 5/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2735 - acc: 0.9199
Epoch 6/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2381 - acc: 0.9299
Epoch 7/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.2094 - acc: 0.9380
Epoch 8/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1863 - acc: 0.9442
Epoch 9/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1693 - acc: 0.9491
Epoch 10/10
40000/40000 [==============================] - 0s 8us/step - loss: 0.1549 - acc: 0.9538
20000/20000 [==============================] - 0s 17us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
40000/40000 [==============================] - 1s 26us/step - loss: 0.7406 - acc: 0.7936
Epoch 2/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3331 - acc: 0.9019
Epoch 3/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2623 - acc: 0.9229
Epoch 4/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2174 - acc: 0.9372
Epoch 5/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1876 - acc: 0.9436
Epoch 6/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1659 - acc: 0.9498
Epoch 7/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1493 - acc: 0.9559
Epoch 8/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1358 - acc: 0.9602
Epoch 9/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1244 - acc: 0.9624
Epoch 10/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1135 - acc: 0.9655
Epoch 11/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1063 - acc: 0.9683
Epoch 12/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0963 - acc: 0.9711
Epoch 13/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0924 - acc: 0.9720
Epoch 14/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0897 - acc: 0.9732
Epoch 15/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0811 - acc: 0.9752
Epoch 16/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0798 - acc: 0.9750
Epoch 17/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0733 - acc: 0.9770
Epoch 18/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0695 - acc: 0.9789
Epoch 19/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0652 - acc: 0.9798
Epoch 20/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0622 - acc: 0.9815
20000/20000 [==============================] - 0s 18us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
40000/40000 [==============================] - 1s 27us/step - loss: 0.7349 - acc: 0.8012
Epoch 2/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3201 - acc: 0.9060
Epoch 3/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2541 - acc: 0.9257
Epoch 4/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2149 - acc: 0.9375
Epoch 5/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1894 - acc: 0.9443
Epoch 6/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1694 - acc: 0.9496
Epoch 7/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1552 - acc: 0.9541
Epoch 8/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1421 - acc: 0.9580
Epoch 9/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1307 - acc: 0.9605
Epoch 10/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1200 - acc: 0.9642
Epoch 11/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1110 - acc: 0.9663
Epoch 12/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1041 - acc: 0.9681
Epoch 13/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0971 - acc: 0.9698
Epoch 14/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0943 - acc: 0.9713
Epoch 15/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0864 - acc: 0.9733
Epoch 16/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0809 - acc: 0.9748
Epoch 17/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0776 - acc: 0.9762
Epoch 18/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0745 - acc: 0.9762
Epoch 19/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0712 - acc: 0.9777
Epoch 20/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0654 - acc: 0.9793
20000/20000 [==============================] - 0s 18us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
40000/40000 [==============================] - 1s 27us/step - loss: 0.7435 - acc: 0.8001
Epoch 2/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3335 - acc: 0.9029
Epoch 3/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2555 - acc: 0.9254
Epoch 4/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2168 - acc: 0.9359
Epoch 5/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1872 - acc: 0.9458
Epoch 6/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1640 - acc: 0.9514
Epoch 7/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1452 - acc: 0.9578
Epoch 8/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1313 - acc: 0.9607
Epoch 9/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1193 - acc: 0.9644
Epoch 10/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1104 - acc: 0.9670
Epoch 11/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1024 - acc: 0.9691
Epoch 12/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0955 - acc: 0.9715
Epoch 13/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0881 - acc: 0.9734
Epoch 14/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0815 - acc: 0.9756
Epoch 15/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0772 - acc: 0.9764
Epoch 16/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0745 - acc: 0.9780
Epoch 17/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0681 - acc: 0.9788
Epoch 18/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0649 - acc: 0.9806
Epoch 19/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0640 - acc: 0.9800
Epoch 20/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0604 - acc: 0.9813
20000/20000 [==============================] - 0s 19us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
40000/40000 [==============================] - 1s 28us/step - loss: 1.1126 - acc: 0.6943
Epoch 2/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.4792 - acc: 0.8568
Epoch 3/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3712 - acc: 0.8910
Epoch 4/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3155 - acc: 0.9068
Epoch 5/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2746 - acc: 0.9196
Epoch 6/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2392 - acc: 0.9293
Epoch 7/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2126 - acc: 0.9363
Epoch 8/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1923 - acc: 0.9437
Epoch 9/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1706 - acc: 0.9489
Epoch 10/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1591 - acc: 0.9530
Epoch 11/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1462 - acc: 0.9560
Epoch 12/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1342 - acc: 0.9593
Epoch 13/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1272 - acc: 0.9609
Epoch 14/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1188 - acc: 0.9639
Epoch 15/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1126 - acc: 0.9660
Epoch 16/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1072 - acc: 0.9673
Epoch 17/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1016 - acc: 0.9692
Epoch 18/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0971 - acc: 0.9697
Epoch 19/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0901 - acc: 0.9719
Epoch 20/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0869 - acc: 0.9729
20000/20000 [==============================] - 0s 19us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
40000/40000 [==============================] - 1s 28us/step - loss: 1.0906 - acc: 0.7216
Epoch 2/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.4527 - acc: 0.8658
Epoch 3/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3559 - acc: 0.8956
Epoch 4/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3055 - acc: 0.9109
Epoch 5/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2642 - acc: 0.9219
Epoch 6/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2333 - acc: 0.9317
Epoch 7/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2057 - acc: 0.9386
Epoch 8/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1858 - acc: 0.9462
Epoch 9/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1674 - acc: 0.9501
Epoch 10/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1514 - acc: 0.9548
Epoch 11/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1410 - acc: 0.9571
Epoch 12/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1303 - acc: 0.9607
Epoch 13/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1193 - acc: 0.9643
Epoch 14/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1131 - acc: 0.9649
Epoch 15/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1054 - acc: 0.9680
Epoch 16/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0999 - acc: 0.9696
Epoch 17/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0937 - acc: 0.9712
Epoch 18/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0895 - acc: 0.9731
Epoch 19/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0830 - acc: 0.9758
Epoch 20/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0790 - acc: 0.9760
20000/20000 [==============================] - 0s 20us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
40000/40000 [==============================] - 1s 29us/step - loss: 1.1233 - acc: 0.6955
Epoch 2/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.4793 - acc: 0.8588
Epoch 3/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3751 - acc: 0.8898
Epoch 4/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.3203 - acc: 0.9069
Epoch 5/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2760 - acc: 0.9196
Epoch 6/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2422 - acc: 0.9286
Epoch 7/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.2155 - acc: 0.9363
Epoch 8/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1910 - acc: 0.9437
Epoch 9/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1718 - acc: 0.9489
Epoch 10/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1584 - acc: 0.9530
Epoch 11/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1435 - acc: 0.9570
Epoch 12/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1326 - acc: 0.9603
Epoch 13/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1244 - acc: 0.9615
Epoch 14/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1171 - acc: 0.9644
Epoch 15/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1093 - acc: 0.9670
Epoch 16/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.1047 - acc: 0.9692
Epoch 17/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0984 - acc: 0.9698
Epoch 18/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0910 - acc: 0.9720
Epoch 19/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0860 - acc: 0.9734
Epoch 20/20
40000/40000 [==============================] - 0s 8us/step - loss: 0.0827 - acc: 0.9748
20000/20000 [==============================] - 0s 21us/step
40000/40000 [==============================] - 0s 4us/step
Epoch 1/20
60000/60000 [==============================] - 2s 31us/step - loss: 0.4007 - acc: 0.8851
Epoch 2/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.1892 - acc: 0.9439
Epoch 3/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.1432 - acc: 0.9567
Epoch 4/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.1185 - acc: 0.9643
Epoch 5/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.1050 - acc: 0.9678
Epoch 6/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0929 - acc: 0.9715
Epoch 7/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0848 - acc: 0.9737
Epoch 8/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0775 - acc: 0.9764
Epoch 9/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0723 - acc: 0.9780
Epoch 10/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0693 - acc: 0.9785
Epoch 11/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0637 - acc: 0.9802
Epoch 12/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0602 - acc: 0.9814
Epoch 13/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0585 - acc: 0.9816
Epoch 14/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0546 - acc: 0.9827
Epoch 15/20
60000/60000 [==============================] - 1s 18us/step - loss: 0.0512 - acc: 0.9834
Epoch 16/20
60000/60000 [==============================] - 1s 18us/step - loss: 0.0501 - acc: 0.9841
Epoch 17/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0481 - acc: 0.9844
Epoch 18/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0456 - acc: 0.9860
Epoch 19/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0444 - acc: 0.9857
Epoch 20/20
60000/60000 [==============================] - 1s 17us/step - loss: 0.0439 - acc: 0.9861
CPU times: user 7min 56s, sys: 1min 6s, total: 9min 3s
Wall time: 3min 43s
```python
# print results
print(f'Best Accuracy for {grid_result.best_score_:.4} using {grid_result.best_params_}')
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print(f'mean={mean:.4}, std={stdev:.4} using {param}')
```
Best Accuracy for 0.9712 using {'batch_size': 128, 'epochs': 20, 'init': 'glorot_uniform'}
mean=0.9687, std=0.002174 using {'batch_size': 128, 'epochs': 10, 'init': 'glorot_uniform'}
mean=0.966, std=0.000827 using {'batch_size': 128, 'epochs': 10, 'init': 'uniform'}
mean=0.9712, std=0.0006276 using {'batch_size': 128, 'epochs': 20, 'init': 'glorot_uniform'}
mean=0.97, std=0.001214 using {'batch_size': 128, 'epochs': 20, 'init': 'uniform'}
mean=0.9594, std=0.001476 using {'batch_size': 512, 'epochs': 10, 'init': 'glorot_uniform'}
mean=0.9516, std=0.003239 using {'batch_size': 512, 'epochs': 10, 'init': 'uniform'}
mean=0.9684, std=0.003607 using {'batch_size': 512, 'epochs': 20, 'init': 'glorot_uniform'}
mean=0.9633, std=0.0007962 using {'batch_size': 512, 'epochs': 20, 'init': 'uniform'}
|
[STATEMENT]
lemma second_summand_not_universe: "x \<oplus> y \<noteq> u \<Longrightarrow> y \<noteq> u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<oplus> y \<noteq> u \<Longrightarrow> y \<noteq> u
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<oplus> y \<noteq> u \<Longrightarrow> y \<noteq> u
[PROOF STEP]
assume antecedent: "x \<oplus> y \<noteq> u"
[PROOF STATE]
proof (state)
this:
x \<oplus> y \<noteq> u
goal (1 subgoal):
1. x \<oplus> y \<noteq> u \<Longrightarrow> y \<noteq> u
[PROOF STEP]
show "y \<noteq> u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<noteq> u
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. y = u \<Longrightarrow> False
[PROOF STEP]
assume "y = u"
[PROOF STATE]
proof (state)
this:
y = u
goal (1 subgoal):
1. y = u \<Longrightarrow> False
[PROOF STEP]
hence "x \<oplus> u \<noteq> u"
[PROOF STATE]
proof (prove)
using this:
y = u
goal (1 subgoal):
1. x \<oplus> u \<noteq> u
[PROOF STEP]
using antecedent
[PROOF STATE]
proof (prove)
using this:
y = u
x \<oplus> y \<noteq> u
goal (1 subgoal):
1. x \<oplus> u \<noteq> u
[PROOF STEP]
by (rule subst)
[PROOF STATE]
proof (state)
this:
x \<oplus> u \<noteq> u
goal (1 subgoal):
1. y = u \<Longrightarrow> False
[PROOF STEP]
thus "False"
[PROOF STATE]
proof (prove)
using this:
x \<oplus> u \<noteq> u
goal (1 subgoal):
1. False
[PROOF STEP]
using universe_absorbing
[PROOF STATE]
proof (prove)
using this:
x \<oplus> u \<noteq> u
?x \<oplus> u = u
goal (1 subgoal):
1. False
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
y \<noteq> u
goal:
No subgoals!
[PROOF STEP]
qed |
# Введение
Эти интерактивные тетрадки подразумевают последовательное выполнение.
Нажмите `Shift+Enter` чтобы выполнить ячейку с кодом.
```python
print("Hello","World")
```
Для работы с символьными вычисленями будем пользоваться пакетом `sympy`:
```python
from sympy import *
```
Символы для вычислений создаются через `symbols`:
```python
x, y, t = symbols("x, y, t")
alpha, beta, gamma = symbols("alpha, beta, gamma", cls=Function)
alpha(t) + x
```
Матрицы создаются из списка списков:
```python
m = Matrix([
[cos(alpha(t)), -sin(alpha(t)), 0],
[sin(alpha(t)), cos(alpha(t)), 0],
[0, 0, 1]
])
v = Matrix([
[x],
[y],
[1]
])
```
```python
rotated = m * v
rotated
```
`sympy` позволяет легко дифферинцировать выражения:
```python
velocity = diff(rotated, t)
velocity[0]
```
```python
velocity[1]
```
Помимо этого, можно заменять части выражений.
Например, если
$$\alpha(t) = t$$
получим следующие скорости:
```python
velocity[0].replace(alpha(t), t).simplify()
```
```python
velocity[1].replace(alpha(t), t).simplify()
```
А если вращение происходит с постоянным угловым ускорением
$$ \alpha(t) = t^2 $$
получим следующие линейные скорости:
```python
velocity[0].replace(alpha(t), t**2).simplify()
```
```python
velocity[1].replace(alpha(t), t**2).simplify()
```
|
import LMT
variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)]
example {a1 a2 a3 : A I E} :
((((a2).write i2 (v1)).write i1 (v1)).read i2) ≠ (v1) → False := by
arr
|
// Distributed under the MIT License.
// See LICENSE.txt for details.
#pragma once
#include <boost/functional/hash.hpp>
#include <cstddef>
#include <utility>
#include "DataStructures/DataBox/Tag.hpp"
#include "DataStructures/FixedHashMap.hpp"
#include "Domain/Structure/Direction.hpp"
#include "Domain/Structure/ElementId.hpp"
#include "Domain/Structure/MaxNumberOfNeighbors.hpp"
#include "Evolution/DgSubcell/NeighborData.hpp"
namespace evolution::dg::subcell::Tags {
/// The neighbor data for reconstruction and the RDMP troubled-cell indicator.
///
/// This also holds the self-information for the RDMP at the time level `t^n`
/// (the candidate is at `t^{n+1}`), with id `ElementId::external_boundary_id`
/// and `Direction::lower_xi()` as the (arbitrary and meaningless)
/// direction.
template <size_t Dim>
struct NeighborDataForReconstructionAndRdmpTci : db::SimpleTag {
using type =
FixedHashMap<maximum_number_of_neighbors(Dim) + 1,
std::pair<Direction<Dim>, ElementId<Dim>>, NeighborData,
boost::hash<std::pair<Direction<Dim>, ElementId<Dim>>>>;
};
} // namespace evolution::dg::subcell::Tags
|
lemma has_vector_derivative_circlepath [derivative_intros]: "((circlepath z r) has_vector_derivative (2 * pi * \<i> * r * exp (2 * of_real pi * \<i> * x))) (at x within X)" |
(** * Testcases for [we_conclude_that.v]
Authors:
- Lulof Pirée (1363638)
Creation date: 3 June 2021
--------------------------------------------------------------------------------
This file is part of Waterproof-lib.
Waterproof-lib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Waterproof-lib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Waterproof-lib. If not, see <https://www.gnu.org/licenses/>.
*)
From Ltac2 Require Import Ltac2.
From Ltac2 Require Option.
From Ltac2 Require Import Int.
Require Import Waterproof.message.
Require Import Reals.
Require Import micromega.Lra.
Require Import Waterproof.test_auxiliary.
Require Import Waterproof.selected_databases.
Require Import Waterproof.set_intuition.Disabled.
Require Import Waterproof.set_search_depth.To_5.
Require Import Waterproof.populate_database.waterproof_core.
Require Import Waterproof.populate_database.waterproof_integers.
Require Import Waterproof.populate_database.waterproof_reals.
Require Import Waterproof.populate_database.all_databases.
Require Import Waterproof.load_database.RealsAndIntegers.
Load we_conclude_that.
Require Import Waterproof.load_database.DisableWildcard.
Ltac2 store_verbosity := verbosity.
Ltac2 Set verbosity := fun () => if (le (test_verbosity ()) 0)
then -1 else (store_verbosity ()).
(* lra only works in the [R_scope] *)
Open Scope R_scope.
Lemma zero_lt_one: 0 < 1.
Proof.
ltac1:(lra).
Qed.
(* -------------------------------------------------------------------------- *)
(** * Testcases for [We conclude that ... ] *)
(** * Test 1
Base case: should easily be possible to finish the goal.
*)
Lemma test_we_conclude_1: True.
Proof.
We conclude that True.
Qed.
(** * Test 2
Error case: wrong goal provided.
*)
Lemma test_we_conclude_2: True.
Proof.
let result () := We conclude that False in
assert_raises_error result.
Abort.
(** * Test 3
Warning case: provided goal is equivalent,
but uses an alternative notation.
*)
Lemma test_we_conclude_3: 2 = 2.
Proof.
print (of_string "Should raise warning:").
We conclude that (1+1 = 2).
Qed.
(** * Test 4
Base case, like test 1 but more complex goal.
*)
Lemma test_we_conclude_4: forall A: Prop, (A \/ ~A -> True).
Proof.
intros A h.
We conclude that (True).
Qed.
(** * Test 5
Removed test case because it takes too long.
Error case: Waterprove cannot find proof
(because the statement is false!).
Lemma test_we_conclude_5: 0 = 1.
Proof.
let result () := We conclude that (0 = 1) in
assert_raises_error result.
Abort. *)
(** * Test 6
Alternative [It follows that ...] notation.
*)
Lemma test_we_conclude_6: True.
Proof.
It follows that True.
Qed.
(* -------------------------------------------------------------------------- *)
(** * Testcases for [By ... we conclude that ... ] *)
(** * Test 1
Base case: should easily be possible to finish the goal,
even with the provided lemma.
NOTE: this test would also pass if we just
write
[We conclude that (2 = 1).]
Applarently, waterprove is powerful enough to apply symmetry to hypotheses.
*)
Lemma test_by_we_conclude_1: (1 = 2) -> (2 = 1).
Proof.
intros h.
apply eq_sym in h. (* Rewrite h as (2 = 1) using symmetry of "="*)
By h we conclude that (2 = 1).
Qed.
(** * Test 2
Remove test case as this required axiom.
Base case: should easily be possible to finish the goal,
but only with the given lemma.
Lemma test_by_we_conclude_2: 0 + 0 = 20.
Proof.
assert (zero_plus_zero_is_twenty: 0+0 = 20).
rewrite zero_is_ten. (* Get goal: [10 + 10 = 20]*)
ltac1:(lra).
(* This is just to test if the extra lemma is really needed: *)
let failure () := We conclude that (0 + 0 = 10) in
assert_raises_error failure.
By zero_plus_zero_is_twenty we conclude that (0 + 0 = 20).
Qed.*)
(** * Test 3
Warning case: provided goal is equivalent,
but uses an alternative notation.
*)
Lemma test_by_we_conclude_3: 2 = 1 + 1.
Proof.
print (of_string "Should raise warning:").
By zero_lt_one we conclude that (2 = 2).
Qed.
(** * Test 4
Removed test case as it takes too long.
Error case: Waterprove cannot find proof
(because the statement is false,
even with the given lemma).
Lemma test_by_we_conclude_4: 0 = 1.
Proof.
let result () := By zero_is_ten we conclude that (0 = 1) in
assert_raises_error result.
Abort.*)
(** * Test 5
Shows that [waterprove]
can solve [(1 < 2)]
without explcitly being given
the lemma that states [0 < 1].
*)
Lemma test_by_we_conclude_5: 1 < 2.
Proof.
assert (useless: 1 = 1).
reflexivity.
By useless we conclude that (1 < 2).
Qed.
(** * Example for the SUM.
Somewhat more realistic context.
*)
Open Scope nat_scope.
Inductive even : nat -> Prop :=
even0 : even 0
| evenS : forall x:nat, even x -> even (S (S x)).
Lemma sum_example_by_we_conclude: forall x:nat, x = 2 -> even x.
Proof.
intros x h.
rewrite h. (* Change the goal to "even 2"*)
apply evenS. (* Change the goal to "even 0"*)
By even0 we conclude that (even 0).
Qed.
Require Import Waterproof.definitions.inequality_chains.
Open Scope R_scope.
(** * Test 4
We make an exception on the goal check when the argument is a chain of inequalities
*)
Goal (3 < 5).
We conclude that (& 3 < 4 < 5).
Qed.
Goal (3 = 3).
We conclude that (& 3 = 3 = 3).
Qed.
Goal forall eps : R, eps > 0 -> (Rmin (eps / 2) 1 <= eps).
intro eps.
intro eps_gt_0.
assert (& Rmin (eps/2) 1 <= eps/2 <= eps).
auto with waterproof_core reals.
auto with reals.
Qed.
Close Scope R_scope.
(** 'We conclude that' should accept (in nat_scope) (& 3 &<4 &< 5) for (3<5).*)
Goal (3 < 5).
We conclude that (& 3 < 4 < 5).
Qed.
(** 'We conclude that' should accept (in nat_scope) (& 3 &<4 &<= 5) for (3<5).*)
Goal (3 < 5).
We conclude that (& 3 < 4 <= 5).
Qed.
(** 'We conclude that' should accept (in nat_scope) (& 3 &<4 &< 5) for (3<=5).*)
Goal (3 <= 5).
We conclude that (& 3 < 4 < 5).
Qed.
(** * Test 6
Test whether wrapped goals requiring users to write out what they need to show
can be solved immediately. It is irritating to have to write something like:
'We need to show that (a < b).'
'We conclude that (a < b).'
*)
Goal (StateGoal.Wrapper (0 = 0)).
Proof.
We conclude that (0 = 0).
Qed.
(** * Test 7
Test whether the tactic throws an error for other wrappers.
*)
Goal (Case.Wrapper (0 = 1) (0 = 0)).
Proof.
Fail We conclude that (0 = 0).
Abort.
(** * Test 8 *)
(** Actually tests for [waterprove] automation suboutine, but this seemed like a
convenient place to test. *)
(** Tests whether the error points out which specific (in)equality in the chain does not hold. *)
Local Parameter A : Type.
Local Parameter x y z : A.
Goal (& x = y = z).
Proof.
Fail We conclude that (& x = y = z). (* Expected: unable to find proof (x = y) *)
Abort.
Goal (x = y) -> (& x = y = z).
Proof.
intro p.
Fail We conclude that (& x = y = z). (* Expected: unable to find proof (y = z) *)
Abort.
Ltac2 Set verbosity := store_verbosity.
|
Section Feelings.
(** You have feelings about stuff. Everyone has feeling about stuffs.
There are furthermore exactly two kinds of feelings: hate and love.
This is naturally expressed as a boolean. **)
Variable feelings : forall A : Type, A -> bool.
(** If your feelings say that it is true, it is probably true. **)
Axiom gut_feeling : forall P : Prop, feelings _ P = true -> P.
(** Alternatively, if you hate something, it is likely false. **)
Axiom hate : forall P : Prop, feelings _ P = false -> ~ P.
End Feelings.
(** Let the hate speak now. **)
Theorem nihilism : forall P, ~ P.
Proof.
intro. apply hate with (feelings := fun _ _ => false). reflexivity.
Qed.
Corollary nothing_matters : False.
Proof.
apply gut_feeling with (feelings := fun _ _ => true). reflexivity.
Qed.
|
Theorem ex50: forall a b c : Prop,
(a -> b) -> ((b -> c) -> (a -> c)).
Proof.
intros. apply (H0 (H H1)).
Qed.
Theorem ex50_1: forall a b c : Prop,
(a <-> b) -> ((b <-> c) -> (a <-> c)).
Proof.
Require Import Coq.Program.Basics.
intros. elim H. elim H0. intros. split.
apply (compose H1 H3). apply (compose H4 H2).
Qed.
Theorem ex50_2: forall a b c : Prop,
(a -> b) -> ((c -> a) -> (c -> b)).
Proof.
intros. apply (H (H0 H1)).
Qed.
Theorem ex51: forall a b c : Prop,
(a -> (b -> c)) <-> (b -> (a -> c)).
Proof.
split. intros. apply (H H1 H0). intros.
apply (H H1 H0).
Qed.
Theorem ex52: forall a b c : Prop,
(a -> (b -> c)) <-> (a /\ b -> c).
Proof.
split. intros. elim H0. assumption.
intros. apply H. split. assumption. assumption.
Qed.
Theorem ex53: forall a b : Prop,
~a -> (a -> b).
Proof.
intros. contradiction.
Qed.
Theorem ex53_1: forall a b : Prop,
((a -> b) -> a) -> a.
Proof.
Require Import Classical.
intros. apply NNPP. intro. cut a. intro.
contradiction. apply H. intro. contradiction.
Qed.
Theorem ex53_2: forall a b c : Prop,
(((a -> b) -> c) -> (a -> b)) -> (a -> b).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H1.
apply H. intro. elim H1. cut b. intro.
contradiction. apply (H2 H0). assumption.
Qed.
Theorem ex53_3: forall a b : Prop,
((((a -> b) -> a) -> a) -> a) -> a.
Proof.
Require Import Classical.
intros. apply NNPP. intro.
apply H0. apply H. intro.
apply H1. intro. apply NNPP. intro.
apply H0. assumption.
Qed.
Theorem ex53_4: forall a b : Prop,
((((a -> b) -> a) -> a) -> b) -> b.
Proof.
Require Import Coq.Program.Basics.
intros. apply H. intros. apply H0. intro.
refine (apply H (fun H0 => H1)).
Qed.
Theorem ex53_5: forall a b c : Prop,
((((((a -> b) -> c) -> c)
-> a) -> a) -> b) -> b.
Proof.
intros. apply H. intros. apply H0. intros.
apply H1. intros. apply H.
intros. assumption.
Qed.
Theorem ex53_6: forall a b c d : Prop,
((((((((a -> b) -> c) -> c) -> d)
-> d) -> a) -> a) -> b) -> b.
Proof.
intros. apply H. intros. apply H0. intros.
apply H1. intros. apply H2. intros.
apply H. intros. assumption.
Qed.
Theorem ex53_7: forall a b : Prop,
((a -> b) -> b) -> ((b -> a) -> a).
Proof.
intros. apply NNPP. intro.
apply H1. apply H0. apply H. intro. contradiction.
Qed. |
import data.set.basic
import topology.basic
import algebra.module.basic
import algebra.module.submodule
import order.complete_lattice
import analysis.normed_space.basic
-- Soient F une partie fermée non vide d'un espace normé E et x ∈ E . Montrer
-- d(x, F ) = 0 ⇐⇒ x ∈ F .
theorem exo {R E: Type*} [normed_field R] [normed_group E] [normed_space R E] :
forall (F: set E) (x: E), (is_closed F) -> (set.nonempty F) ->
set.mem x F <-> infi (dist x) = 0
:= sorry
|
# Part 1: Introduction to Python
## 1.1 [About Python](https://lectures.quantecon.org/py/about_py.html#id3)
### 1.1.1 Overview
### 1.1.2 What's Python?
### 1.1.3 Scientific Programming
- [Machine learing and data science](http://scikit-learn.org/stable/)
- [Astronomy(天文学)](http://www.astropy.org/)
- [Artificial intelligence](https://wiki.python.org/moin/PythonForArtificialIntelligence)
- [Chemistry](http://chemlab.github.io/chemlab/)
- [Computational biology](http://biopython.org/)
- [Meteorology(气象学)](https://pypi.python.org/pypi/metrology)
### 1.1.4 Learn More
#### Numerical Programming
```python
import numpy as np # Load the library
a = np.linspace(-np.pi,np.pi,100) # Create even grid from -pi to pi
b = np.cos(a) # Apply cosine to each element of a
c = np.sin(a) # Apply sin to each element of a
```
```python
np.dot(b,c) # matrix multiplication
```
-1.8041124150158794e-16
```python
b @ c # matrix multiplication
# element-wise product: np.multiply(), 或 *
```
-1.8041124150158794e-16
#### SciPy
The SciPy library is built on top of NumPy and provides additional functionality For example, let’s calculate $\int^2_{-2}{\phi(z)dz}$ where $\phi$ is the standard normal density
```python
from scipy.stats import norm # Load norm (normal distribution)
from scipy.integrate import quad # Load quad (caculating intergration)
theta = norm() # Theta follows normal distribution
value, error = quad(theta.pdf, -2, 2) # Integrate using Gaussian quadrature .pdf 概率密度函数 .cdf 累计密度函数 -2,2 积分上下限
value # 返回value的值
```
0.9544997361036417
```python
# 使用cdf手动方法计算
import scipy.stats # Load library
value=1-2*scipy.stats.norm.cdf(-2,loc=0,scale=1) #http://blog.csdn.net/claroja/article/details/72830515
value
```
0.95449973610364158
SciPy includes many of the standard routines used in
- [linear algebra(线性代数)](https://docs.scipy.org/doc/scipy/reference/linalg.html)
- [integration(整合)](https://docs.scipy.org/doc/scipy/reference/integrate.html)
- [interpolation(插补)](https://docs.scipy.org/doc/scipy/reference/interpolate.html)
- [optimization](https://docs.scipy.org/doc/scipy/reference/optimize.html)
- [distributions and random number generation](https://docs.scipy.org/doc/scipy/reference/stats.html)
- [signal processing](https://docs.scipy.org/doc/scipy/reference/signal.html)
- [etc.](https://docs.scipy.org/doc/scipy/reference/index.html)
#### Graphics
[Library Matplotlib](https://matplotlib.org/gallery.html)
Other graphics libraries include
- [Plotly](https://plot.ly/python/)
- [Bokeh](http://bokeh.pydata.org/en/latest/)
- [VPython](http://www.vpython.org/)
#### Symbolic Algebra(符号代数)
```python
from sympy import Symbol
```
Symbolic Computing (符号计算)
```python
x, y = Symbol('x'), Symbol('y') # Treat 'x' and 'y' as algebraic symbols(代数符号)
x + x + x + y
```
3*x + y
Factor Analysis (因子分析)
```python
expression = (x + y)**2 # ** 乘方
expression.expand() # .expand 展开式
```
x**2 + 2*x*y + y**2
Solving Polynomials
```python
from sympy import solve
solve(x**2 + x + 2) # 解方程
```
[-1/2 - sqrt(7)*I/2, -1/2 + sqrt(7)*I/2]
```python
solve(x**2 + 2*x + 1)
```
[-1]
Calculating Limits, Derivatives and Integrals(极限,导数,积分)
```python
from sympy import limit, sin, diff
limit(1 / x, x, 0) # 求1/x的极限,其中x趋向于0
```
oo
```python
limit(sin(x) / x, x, 0)
```
1
```python
diff(sin(x), x) # 求sin(x)对x的导数
```
cos(x)
#### Statistics
Python’s data manipulation and statistics libraries have improved rapidly over the last few years
#### Pandas
How to generate random number
如何生成随机数?
- numpy.random.uniform(low=0.0, high=1.0, size=None) [0,1)均匀分布
- numpy.random.random(size=None)
- numpy.random.random((2, 3)) 元组形式指定
- numpy.random.normal(size,loc,scale): 给出均值为loc,标准差为scale的高斯随机数
- numpy.random.rand(d0, d1, ..., dn) [0,1) 标准正态分布
numpy.random.randn(d0, d1, ..., dn) 没有限制 标准正态分布
- 当函数括号内没有参数时,则返回一个浮点数
- 当函数括号内有一个参数时,则返回秩为1的数组,不能表示向量和矩阵
- 当函数括号内有两个及以上参数时,则返回对应维度的数组,能表示向量或矩阵
- numpy.random.standard_normal() 函数与np.random.randn()类似,但是np.random.standard_normal() 的输入参数为元组(tuple)
- numpy.random.randint(low, high=None, size=None, dtype=’l’) 整数型
```python
np.random.uniform()
```
0.8474738087470629
```python
np.random.uniform(2,3,6) # [2,3)中均匀分布中的6个随机数
```
array([ 2.9860889 , 2.19519615, 2.00777363, 2.23653008, 2.05505467,
2.89117726])
```python
np.random.uniform(2,3,(2,2))
```
array([[ 2.53819935, 2.85976506],
[ 2.91869204, 2.64991819]])
```python
np.random.random()
```
0.7304829610087114
```python
np.random.random(10)
```
array([ 0.86273288, 0.5279635 , 0.33135421, 0.72708269, 0.86928968,
0.47071769, 0.05077265, 0.69756921, 0.16713672, 0.54353137])
```python
np.random.random((3,4))
```
array([[ 0.96642415, 0.37624851, 0.32901655, 0.58515682],
[ 0.62271202, 0.56993156, 0.47964188, 0.42931829],
[ 0.60408005, 0.77272266, 0.00162158, 0.52869527]])
```python
np.random.normal(1,0.5,10)
```
array([ 1.20349355, 1.17745893, 0.98227701, 1.58086968, 1.54173165,
1.01738413, 0.91850826, 1.16709306, 1.03949584, 1.00972302])
```python
np.mean(np.random.normal(1,0.5,1000))
```
0.97477448694503321
```python
np.random.normal(5,0.8,(2,4))
```
array([[ 5.37148902, 5.52165235, 3.10762657, 3.21051536],
[ 6.40331996, 5.35974761, 5.53317151, 4.81759955]])
```python
np.random.rand()
```
0.6991420637529272
```python
np.random.rand(3)
```
array([ 0.59372934, 0.2606707 , 0.85677761])
```python
np.random.rand(2,2)
```
array([[ 0.12969677, 0.26830264],
[ 0.26575202, 0.15031921]])
```python
np.random.rand(3,2,2) # 3组 2*2
```
array([[[ 0.04667944, 0.56022617],
[ 0.99076839, 0.2932098 ]],
[[ 0.70883542, 0.72276065],
[ 0.0241512 , 0.66399433]],
[[ 0.56175472, 0.93324534],
[ 0.47822311, 0.29943678]]])
```python
np.random.rand(2,4,3,3) # 4 组 3*3 , 这样的4组有2个
```
array([[[[ 0.91655043, 0.12037943, 0.85462637],
[ 0.31528861, 0.59644266, 0.24527812],
[ 0.41429085, 0.80717022, 0.21825278]],
[[ 0.89791812, 0.19810456, 0.75128835],
[ 0.34516507, 0.8161068 , 0.6074346 ],
[ 0.19217673, 0.46841586, 0.22780444]],
[[ 0.0995358 , 0.38507414, 0.13525457],
[ 0.71547676, 0.58165184, 0.54236188],
[ 0.94241399, 0.42352624, 0.52700985]],
[[ 0.55401586, 0.97291657, 0.26568722],
[ 0.94813474, 0.69113245, 0.87098301],
[ 0.58442572, 0.24803309, 0.18808223]]],
[[[ 0.82405861, 0.06174618, 0.44058703],
[ 0.04691451, 0.01917401, 0.97859124],
[ 0.95750291, 0.38926466, 0.18569959]],
[[ 0.19203849, 0.44958773, 0.61960534],
[ 0.43102461, 0.79000366, 0.43515503],
[ 0.16416595, 0.6032064 , 0.1556198 ]],
[[ 0.62336311, 0.26819523, 0.01729941],
[ 0.01069535, 0.69215579, 0.03171449],
[ 0.6912115 , 0.17182265, 0.24050492]],
[[ 0.59704035, 0.48744065, 0.74663878],
[ 0.85071639, 0.89530915, 0.23641053],
[ 0.3558962 , 0.63559743, 0.79752732]]]])
```python
np.random.randn(2,2,3)
```
array([[[ 0.41860832, 0.20000057, 1.61377788],
[ 0.34181873, -1.41733621, 0.31367332]],
[[-2.18577938, 0.47208238, 1.08411372],
[ 0.56983538, -1.25560168, -1.44829982]]])
```python
np.random.standard_normal((2,2,3))
```
array([[[ 0.33496069, 0.15313107, 0.68528172],
[ 0.20353637, -0.06418014, -0.2242228 ]],
[[ 0.01099221, 0.10426834, -0.51487265],
[ 1.17121491, -0.55703709, -0.22933557]]])
```python
np.random.randint(5,7,(2,2,3)) # [5,7) 结构 (2,2,3)
```
array([[[6, 6, 6],
[5, 5, 5]],
[[5, 6, 6],
[6, 5, 6]]])
```python
import pandas as pd
np.random.seed(1234) # 在随机数中从1234这个位置开始抽取随机数
data = np.random.randn(5,2) # 5x2 matrix of N(0, 1) random draws
dates = pd.date_range('01/03/2018', periods = 5) # 生成5期data,从2018/03/01开始
df = pd.DataFrame(data, columns = ('price', 'weight'), index = dates) # 数据框格式,(数据,列名,编号为dates)
print(df)
```
price weight
2018-01-03 0.471435 -1.190976
2018-01-04 1.432707 -0.312652
2018-01-05 -0.720589 0.887163
2018-01-06 0.859588 -0.636524
2018-01-07 0.015696 -2.242685
```python
df.mean() # 均值
```
price 0.411768
weight -0.699135
dtype: float64
#### Other Useful Statistics Libraries
- [statsmodels](http://www.statsmodels.org/stable/index.html)— various statistical routines
- [scikit-learn](http://scikit-learn.org/stable/) —(machine learning in Python (sponsored by Google, among others)
- [pyMC](http://pymc-devs.github.io/pymc/)—for Bayesian data analysis
- [pystan](https://pystan.readthedocs.io/en/latest/) Bayesian analysis based on [stan](http://mc-stan.org/)
#### Networks and Graphs
Library [NetworkX](http://networkx.github.io/)
A simple netword sample
```python
import networkx as nx # network library
import matplotlib.pyplot as plt # plot library
import numpy as np
np.random.seed(1234) # seef of random number
# Generate random graph
# function: numpy.random.uniform(low,high,size)
p = dict((i,(np.random.uniform(0,1),np.random.uniform(0,1))) for i in range(200))
# dict: dictionary
# genenrate {0:(0.5,0.8), 1:(0.6,0.7), 2:(0.2,0.9),.....,199:(0.3,0.8)}
G = nx.random_geometric_graph(200, 0.12, pos = p)
# position is stored as node attribute data for random_geometric_graph
# <networkx.classes.graph.Graph at 0xc03c588>
pos = nx.get_node_attributes(G, 'pos')
# Get node attributes from graph
```
`nx.random_geometric_graph(n, radius, dim=2, pos=None)`
- `n` : int; Number of nodes
- `radius`: float ; Distance threshold value
- `dim` : int, optional; Dimension of graph
- `pos` : dict, optional ; A dictionary keyed by node with node positions as values.
` nx.get_node_attributes(G, name)`
- `G` : NetworkX Graph
- `name` : string ; Attribute name
```python
# find node nearest the center point (0.5, 0.5)
# 离中心越远,颜色变化的设定?
dists = [(x - 0.5)**2 + (y - 0.5)**2 for x,y in list(pos.values())]
# dists = [(x - 0.2)**2 + (y - 0.8)**2 for x,y in list(pos.values())]
ncenter = np.argmin(dists) # argmin() 求出离中心最近的那个点,命名为C点
```
```python
# Plot graph, coloring by path length from central node
p = nx.single_source_shortest_path_length(G, ncenter) # 计算C点离其他点的距离
plt.figure() # 设置一副空图
nx.draw_networkx_edges(G, pos, alpha=0.5) # 画出线条 alpha:边缘透明度
nx.draw_networkx_nodes(G, # 画出点
pos, # 点的属性
nodelist=list(p.keys()), # Draw only specified nodes (default G.nodes())
node_size=120,alpha=0.5,
node_color=list(p.values()),
cmap=plt.cm.jet_r) # Colormap for mapping intensities of nodes (default=None)
plt.show()
```
#### Cloud Computing
Library: [Wakar](https://www.wakari.io/)
See also
- [Amazon Elastic Compute Cloud](http://aws.amazon.com/ec2/)
- [The Google App Engine (Python, Java, PHP or Go)](https://cloud.google.com/appengine/)
- [Pythonanywhere](https://www.pythonanywhere.com/)
- [Sagemath Cloud](https://cloud.sagemath.com/)
#### Parallel Processing
- [Parallel computing through IPython clusters](http://ipython.org/ipython-doc/stable/parallel/parallel_demos.html)
- [The Starcluster interface to Amazon’s EC2](http://star.mit.edu/cluster/)
- GPU programming through [PyCuda](https://wiki.tiker.net/PyCuda), [PyOpenCL](https://mathema.tician.de/software/pyopencl/), [Theano](http://deeplearning.net/software/theano/) or similar
#### Other Developments
- [Jupyter](http://jupyter.org/)—Python in your browser with code cells, embedded images, etc.
- [Numba](http://numba.pydata.org/) —Make Python run at the same speed as native machine code!
- [Blaze](http://blaze.pydata.org/) —a generalization of NumPy
- [PyTables](http://www.pytables.org/)— manage large data sets
- [CVXPY](https://github.com/cvxgrp/cvxpy) — convex optimization in Python
## 1.2 [Setting up Your Python Environment](https://lectures.quantecon.org/py/getting_started.html)
### 1.2.1 Overview
### 1.2.2 First Steps Anaconda
### 1.2.3 Jupyter
#### A Test Program
Don’t worry about the details for now — let’s just run it and see what happens
```python
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint = False) # [0,2pi] 中生成20个节点
radii = 10 * np.random.rand(N) # 生成[0,9]随机数
width = np.pi / 4 * np.random.rand(N) #
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
```
```python
np.random?
```
`np. `
安住 tab 选择属性
### 1.2.4 Additional Software
#### Installing QuantEcon.py
control+r
cmd
`
pip install quantecon
`
#### Method 1: Copy and Paste
#### Method 2: Run
- pwd asks Jupyter to show the PWD (or %pwd — see the comment about automagic above)
– This is where Jupyter is going to look for files to run
– Your output will look a bit different depending on your OS
- ls asks Jupyter to list files in the PWD (or %ls)
– Note that test.py is there (on our computer, because we saved it there earlier)
- cat test.py asks Jupyter to print the contents of test.py (or !type test.py onWindows)
- run test.py runs the file and prints any output
### 1.2.5 Alternatives
ipython blah blah
### 1.2.6 Exercises
## [1.3 An Introductory Example](https://lectures.quantecon.org/py/python_by_example.html)
### 1.3.1 Overview
### 1.3.2 The Task: Plotting a White Noise Process
### 1.3.3 Version 1
```python
import numpy as np
import matplotlib.pyplot as plt
x = np.random.randn(100) # 100个随机数
plt.plot(x) # 画出x
plt.show() # 呈现出来
```
```python
import numpy as np
np.sqrt(4) # 开根号
```
2.0
```python
import numpy
numpy.sqrt(4)
```
2.0
```python
from numpy import sqrt
sqrt(4)
```
2.0
### 1.3.4 Alternative Versions
#### A Version with a For Loop
```python
import numpy as np
import matplotlib.pyplot as plt
ts_length = 100
_values = [] # 设置一个空的列表,之后才能往里面加东西呀
for i in range(ts_length): # 循环 ts_length = 100 次
e = np.random.randn() # 每次循环跳出一个随机数
_values.append(e) # 将这个随机数加到已经定义的列表中
plt.plot(_values, 'b-') #'b-':line type ,the color is blue
#plt.plot(_values, 'r-') # 试试看这种颜色好了
plt.show()
```
#### Lists
- 列表用[]框起来
- 元组用()框起来
```python
x = [10, 'foo', False] # We can include heterogeneous data inside a list 不同类型的都放一起,数值型、字符型、布尔型(就是逻辑型啦)
type(x) # 看看x是什么类型的
```
list
```python
x # 告诉我x里面都是什么呢?
```
[10, 'foo', False]
```python
x.append(2.5) # 我想再加一点东西
x # 在告诉我现在x变什么样了
```
[10, 'foo', False, 2.5]
```python
x.pop() # 我想删了最后一个元素
```
2.5
```python
x # again
```
[10, 'foo', False]
```python
x[0] # 看看x的第一个数是什么,[]里面是元素的位置,记住是从0开始哟
```
10
```python
x[1] # 第二个元素是什么呢
```
'foo'
#### The For Loop 循环
```python
for i in range(ts_length): # 这个上面见过咯
e = np.random.randn()
_values.append(e)
```
```python
animals = ['dog', 'cat', 'bird']
for animal in animals: # 前面的animal 可以写成i ,a,b 啥都行
print("The plural of " + animal + " is " + animal + "s")
# 我想写成 The plural of 填充 is 填充s。
# 字符型 加上双引号,空格也要加上,用 + 连接
# 有三个元素就循环三次
```
The plural of dog is dogs
The plural of cat is cats
The plural of bird is birds
`
for variable_name in sequence:
<code block>
`
#### Code Blocks and Indentation
三种循环形式
`
for i in range(10):
if x > y:
while x < 100:
etc., etc.
`
#### While Loops
```python
import numpy as np
import matplotlib.pyplot as plt
ts_length = 100
_values = []
i = 0
while i < ts_length: # 当且仅当 i比100小的时候循环
e = np.random.randn()
_values.append(e)
i = i + 1 # 每次循环i都要增加1,一直增加到99,再加1变成100的时候就不能进入循环了,所以循环的i为0~99,100次
plt.plot(_values, 'b-')
plt.show()
```
#### User-Defined Functions
用定义函数的形式进行循环
```python
import numpy as np
import matplotlib.pyplot as plt
def generation_data(n): # 定义 一个函数,以后输入genenration_data(n)就能生成n个随机数了。
_values =[] # 定义一个空列表
for i in range(n): # 循环
e = np.random.randn()
_values.append(e)
return _values # 通过这个函数可以得到100个随机数,都在_values列表里面
data = generation_data(100) # 有了这个函数,先生成100个数
plt.plot(data,'b-') # 画图
plt.show()
```
#### Conditions
给定义的函数多添加点属性吧
```python
import numpy as np
import matplotlib.pyplot as plt
def generate_data(n, generator_type): # 定义一个可以生成n个随机数同时带有两种属性的函数
_values = []
for i in range(n):
if generator_type == 'U': # 其中一种属性就叫 U 好了
e = np.random.uniform(0,1) # U这种属性呢,会提供[0,1)均匀分布的随机数
else:
e = np.random.randn() # 如果用户没有输入他想要的属性,那就默认给他(0,1)正态分布的随机数吧
_values.append(e) # 任一情况生成的随机数都放到列表里,注意要和if对齐,因为它不属于if else内部。
return _values # 循环完成,得到了完整的列表了,要与for对齐,他是循环的总结果嘛
data = generate_data(100, 'U') # 现在来100个均匀分布的随机数吧
plt.plot(data, 'b-')
plt.show()
```
```python
import numpy as np
import matplotlib.pyplot as plt
def generate_data(n, generator_type):
_values = []
for i in range(n):
e = generator_type()
# 允许多种选择了,但是用户要回写随机数代码
# 输入np.random.uniform,这一行就变成 e = np.random.uniform(),跟上面完全一样啊
# 输入np.random.rand,这一行就变成 e = np.random.rand()
# 就是最简单的替换啊
_values.append(e)
return _values
data = generate_data(100, np.random.uniform)
plt.plot(data, 'b-')
plt.show()
```
```python
max(7, 2, 4) # 最大值
```
7
```python
m = max
m(7, 2, 4) # 这不也是简单替换吗
```
7
#### List Comprehensions
```python
animals = ['dog', 'cat', 'bird']
plurals = [animal + 's' for animal in animals] # for in 简单的循环
plurals
```
['dogs', 'cats', 'birds']
```python
range(8) # range(8)是,[0, 1, 2, 3, 4, 5, 6, 7] range(0, 8) 是[0,8)
```
range(0, 8)
```python
[x for x in range(8)]
```
[0, 1, 2, 3, 4, 5, 6, 7]
```python
doubles = [2 * x for x in range(8)]
doubles
```
[0, 2, 4, 6, 8, 10, 12, 14]
将
`
_values = []
for i in range(n):
e = generator_type()
_values.append(e)
`
简化为
`
_values = [generator_type() for i in range(n)]
`
### 1.3.5 Exercises
#### Exercise 1
n!
```python
import numpy as np
import matplotlib.pyplot as plt
def factorial(n): # 定义这个函数 factorial(n)
e = 2
_values = 1
while e < n + 1: # 因为 n 也要乘进去,所以 要循环到 < n+1 # 第二次循环e = 3进来了 # e = n< n +1可以进来
_values = _values * e # 第一次 1*2 # 第二次1*2的上一次结果再*3 # (n-1)!* n 得到 n!了
e = e + 1 # 第一次 3 # e = 4 。。。。 # e = n+1 退出循环
return _values
factorial(10)
```
3628800
```python
def factorial(n): # 书上写的很简单啊
k = 1
for i in range(n):
k = k * (i + 1)
return k
factorial(10)
```
3628800
#### Exercise 2
binomial random variable Y ~ Bin(n, p)
均匀分布,n个随机数小于P的个数
```python
from numpy.random import uniform
def binomial_rv(n, p):
count = 0
for i in range(n):
U = uniform(0,1)
if U < p:
count = count + 1
return count
binomial_rv(10,0.5)
```
7
#### Exercise 3
Compute an approximation to $\pi$ using Monte Carlo.
计算pi的值
设想往1\*1的方格随机丢种子,种子在半径为0.5的圆中的概率是 pi\*0.5\*\*2,嗯,这样算出概率然后在乘上1/(0.5\*\*2)=4,就能算出pi值了。
```python
import numpy as np
n = 100000
count = 0
for i in range(n):
x = np.random.uniform()
y = np.random.uniform()
r = np.sqrt((x - 0.5)**2 + (y - 0.5)**2)
if r < 0.5:
count += 1
count*4/n
```
3.13956
#### Exercise 4
均匀分布,连续三次大于0.5,就支付一美元
```python
from numpy.random import uniform
_values = []
for i in range(10):
a = uniform()
if a >= 0.5:
b = 1
else:
b = 0
_values.append(b)
_values
```
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
```python
count = 0
for i in range(0,9):
if _values[i:i+3] == [1,1,1]:
count = count + 1
if count>=1:
print('pay one dollar')
```
pay one dollar
```python
from numpy.random import uniform
payoff = 0
count = 0
for i in range(10):
U = uniform()
count = count + 1 if U < 0.5 else 0 #在U<0.5时+1,>=0.5时+0
if count == 3:
payoff = 1
print(payoff)
```
1
#### Exercise 5
Time series
$x_t = 0.9 * x_{t-1}+\epsilon$ 画出两百期的时间序列
```python
import numpy as np
import matplotlib.pyplot as plt
alpha = 0.9
ts_length = 200
current_x = 0
x_values = []
for i in range(ts_length + 1): #[0,200] 201个数
x_values.append(current_x)
current_x = alpha * current_x + np.random.randn() # current_x 不断进入
plt.plot(x_values, 'b-')
plt.show()
```
#### Exercise 6
```python
import numpy as np
import matplotlib.pyplot as plt
x = [np.random.randn() for i in range(100)]
plt.plot(x, 'b-', label="white noise") # 添加一个标签
plt.legend() # 要写这句话,标签才能显示出来
plt.show()
```
```python
import numpy as np
import matplotlib.pyplot as plt
for alpha in [0,0.8,0.98]: # 三个不同的系数循环
ts_length = 200
current_x = 0
x_values = []
for i in range(ts_length + 1): #[0,200] 201个数 循环内套循环
x_values.append(current_x)
current_x = alpha * current_x + np.random.randn()
plt.plot(x_values, label=r'$\alpha =$ ' + str(alpha)) # ''前面要加 r $数学公式$
plt.legend()
plt.show()
```
```python
alphas = [0.0, 0.8, 0.98]
ts_length = 200
for alpha in alphas: # 先有循环的列表,再循环
x_values = []
current_x = 0
for i in range(ts_length):
x_values.append(current_x)
current_x = alpha * current_x + np.random.randn()
plt.plot(x_values, label=r'$\alpha$ = ' + str(alpha))
plt.legend()
plt.show()
```
## 1.4 [Python Essentials](https://lectures.quantecon.org/py/python_essentials.html)
### 1.4.1 Overview
### 1.4.2 Data Types
#### Primitive Data Types
布尔型
```python
x = True
y = 100 < 10 # Python evaluates expression on right and assigns it to y
y
```
False
```python
type(y)
```
bool
```python
x + y # True 为1, False 为0
```
1
```python
x * y
```
0
```python
True + True
```
2
```python
bools = [True, True, False, True] # List of Boolean values
sum(bools)
```
3
```python
a, b = 1, 2 # 整数型
c, d = 2.5, 10.0 # 浮点型
type(a)
```
int
```python
type(c)
```
float
```python
1 / 2
```
0.5
```python
1 // 2 # 取 整
```
0
```python
x = complex(1, 2) # 复数计算
y = complex(2, 1)
x * y
```
5j
#### Containers 容器
```python
x = ('a', 'b') # Round brackets instead of the square brackets
x = 'a', 'b' # Or no brackets at all---the meaning is identical
x
```
('a', 'b')
```python
type(x) # 数组
```
tuple
```python
x = [1, 2] # List
x[0] = 10 # Now x = [10, 2] 第一个数被10替代了
```
```python
x = (1, 2) # tuple cant be changed 元组是不能被替换的
x[0] = 10
```
```python
integers = (10, 20, 30)
x, y, z = integers
x
```
10
```python
y
```
20
#### Slice Notation 切片
```python
a = [2, 4, 6, 8]
a[1:] # 从第2位往后
```
[4, 6, 8]
```python
a[1:3] # 从第2位到第3位 [1,3)
```
[4, 6]
```python
a[-2:] # 倒数第2位到最后
```
[6, 8]
```python
s = 'foobar'
s[-3:] # Select the last three elements
```
'bar'
#### Sets and Dictionaries 集与字典
```python
d = {'name': 'Frodo', 'age': 33} # 一一对应
type(d)
```
dict
```python
d['age']
```
33
```python
s1 = {'a', 'b'}
type(s1)
```
set
```python
s2 = {'b', 'c'}
s1.issubset(s2) # s1 是 s2 子集吗 X
```
False
```python
s1.intersection(s2) # 交集
```
{'b'}
```python
s3 = set(('foo', 'bar', 'foo')) # Unique elements only
s3
```
{'bar', 'foo'}
### 1.4.3 Imports
```python
import math
math.sqrt(4)
```
2.0
```python
from math import *
sqrt(4)
```
2.0
### 1.4.4 Input and Output
```python
f = open('newfile.txt', 'w') # Open 'newfile.txt' for writing
f.write('Testing\n') # Here '\n' means new line 写入
f.write('Testing again') # 写入
f.close() # 关闭
```
```python
%pwd # 文件位置
```
'C:\\Users\\Administrator\\Desktop\\QuantEcon'
```python
f = open('newfile.txt', 'r') # 打开,读入
out = f.read() # 读入
out
```
'Testing\nTesting again'
```python
print(out)
```
Testing
Testing again
```python
f = open('C:\\Users\\Administrator\\Desktop\\QuantEcon\\newfile.txt', 'r')
```
### 1.4.5 Iterating
#### Looping over Different Objects
让我们写一个us_cities.txt文件,列出美国的城市和他们的人口
```python
%%file us_cities.txt
new york: 8244910
los angeles: 3819702
chicago: 2707120
houston: 2145146
philadelphia: 1536471
phoenix: 1469471
san antonio: 1359758
san diego: 1326179
dallas: 1223229
```
Overwriting us_cities.txt
```python
data_file = open('us_cities.txt', 'r')
for line in data_file:
city,population = line.split(':') # Tuple unpacking 根据‘:’将他们分割开来
city = city.title() # Capitalize city names 城市名称大写
population = '{0:,}'.format(int(population)) # Add commas to numbers 数字添加,
print(city.ljust(15) + population) # ljust是python中用于将字符串填充至指定长度的内置函数
data_file.close()
```
New York 8,244,910
Los Angeles 3,819,702
Chicago 2,707,120
Houston 2,145,146
Philadelphia 1,536,471
Phoenix 1,469,471
San Antonio 1,359,758
San Diego 1,326,179
Dallas 1,223,229
#### Looping without Indices
```python
x_values = [1,2,3] #Some iterable x
for x in x_values:
print(x * x)
```
1
4
9
```python
for i in range(len(x_values)):
print(x_values[i] * x_values[i])
```
1
4
9
```python
# Python提供了一些工具来简化没有索引的循环
# 一个是zip(),它用于从两个序列中逐个进行配对
countries = ('Japan', 'Korea', 'China')
cities = ('Tokyo', 'Seoul', 'Beijing')
for country, city in zip(countries, cities):
print('The capital of {0} is {1}'.format(country, city)) # {0}和{1}是(country,city)列表的位置
```
The capital of Japan is Tokyo
The capital of Korea is Seoul
The capital of China is Beijing
```python
names = ['Tom', 'John']
marks = ['E', 'F']
dict(zip(names, marks))
```
{'John': 'F', 'Tom': 'E'}
If we actually need the index from a list, one option is to use enumerate()
```python
letter_list = ['a', 'b', 'c']
for index, letter in enumerate(letter_list):
print("letter_list[{0}] = '{1}'".format(index, letter)) # {0}和{1}是(index, letter)列表的位置
```
letter_list[0] = 'a'
letter_list[1] = 'b'
letter_list[2] = 'c'
### 1.4.6 Comparisons and Logical Operators
#### Comparisons
```python
x, y = 1, 2
x < y
```
True
```python
x > y
```
False
```python
1 < 2 < 3
```
True
```python
1 <= 2 <= 3
```
True
```python
x = 1 # Assignment
x == 2 # Comparison
```
False
```python
1 != 2
```
True
```python
x = 'yes' if 42 else 'no' # 其他值就是True
x
```
'yes'
```python
x = 'yes' if [] else 'no' # []和()相当于False
x
```
'no'
The rule is:
- Expressions that evaluate to zero, empty sequences or containers (strings, lists, etc.) and None are all equivalent to False
– for example, [] and () are equivalent to False in an if clause
- All other values are equivalent to True
– for example, 42 is equivalent to True in an if clause
#### Combining Expressions
```python
1 < 2 and 'f' in 'foo' # 两个都是对的
```
True
```python
1 < 2 and 'g' in 'foo' # 其中一个是错的
```
False
```python
1 < 2 or 'g' in 'foo' # x 或 y
```
True
```python
not True
```
False
```python
not not True
```
True
Remember
- P and Q is True if both are True, else False
- P or Q is False if both are False, else True
### 1.4.7 More Functions
```python
max(19, 20)
```
20
```python
range(4) # in python3 this returns a range iterator object
```
range(0, 4)
```python
list(range(4)) # will evaluate the range iterator and create a list
```
[0, 1, 2, 3]
```python
str(22) # 数值型转字符型
```
'22'
```python
type(22) # 22的类型是整数型
```
int
```python
bools = False, True, True
all(bools) # True if all are True and False otherwise
```
False
```python
any(bools) # False if all are False and True otherwise
```
True
#### The Flexibility of Python Functions
```python
def f(x):
if x < 0: # 定义一个函数, 如果x<0,则返回negetive,其他情况返回nonnegative
return 'negative'
return 'nonnegative'
```
```python
# Filename: temp.py
def f(x):
"""
This function squares its argument
"""
return x**2 # f(x) = x**2
```
```python
f(50)
```
2500
#### One-Line Functions: lambda
```python
def f(x):
return x**3
```
```python
f = lambda x: x**3 # 简化定义函数方法
```
calculating $\int_0^2{x^3dx}$
The syntax of the quad function is `quad(f, a, b)` where f is a function and a and b are numbers
```python
from scipy.integrate import quad # 计算积分
quad(lambda x:x**3, 0, 2)
```
(4.0, 4.440892098500626e-14)
#### Keyword Arguments
```python
def f(x, coefficients=(1, 1)):
a, b = coefficients
return a + b * x
```
```python
f(2, coefficients=(0, 0)) # 0+0*2
```
0
```python
f(2) # Use default values (1, 1)
```
3
### 1.4.8 Coding Style and PEP8
### 1.4.9 Exercises
#### Exercise 1
对应元素相乘
```python
x_vals = [1, 2, 3]
y_vals = [1, 1, 1]
sum([x * y for x, y in zip(x_vals, y_vals)])
```
6
0-99中偶数数量
```python
3 % 2 == 0
```
False
```python
sum([x % 2 == 0 for x in range(100)]) # x % 2余数为0 为偶数,返回True,也就是1
```
50
(x,y)同时为偶数的次数
```python
pairs = ((2, 5), (4, 2), (9, 8), (12, 10))
sum([x % 2 == 0 and y % 2 == 0 for x, y in pairs])
```
2
#### Exercise 2
计算 $ p(x)= a_0+a_1x+a_2x^2+a_3x^3+a_4x^4+...+a_nx^n$
```python
def p(x, coeff):
return sum(a * x**i for i, a in enumerate(coeff)) # 指代coeff中数值的位置,a是coeff中的数值
```
```python
p(1,[1,2])
```
3
#### Exercise 3
判断字符串中大写字母个数
```python
def f(string):
count = 0
for letter in string:
if letter == letter.upper() and letter.isalpha(): # isalpha 判断是否是英文字母 如果这个字母是大写就进入循环
count +=1
return count
f('L`Arc En Ciel')
```
4
#### Exercise 4
字符串A是否是字符串B的子集
```python
def f(seq_a, seq_b):
is_subset = True # 定义函数 先假设 a是b的子集
for a in seq_a:
if a not in seq_b: # 如果不是子集的话
is_subset = False # 修正之前的假设
return is_subset # 返回
# == test == #
print(f([1, 2], [1, 2, 3]))
print(f([1, 2, 3], [1, 2]))
print(f("L`Arc En Ciel","L`Arc En Ciel X"))
```
True
False
True
```python
def f(seq_a, seq_b):
return set(seq_a).issubset(set(seq_b)) # 超简化写法
```
#### Exercise 5
近似算x点函数值
将[a,b]区间用n个节点等分
```python
def linapprox(f, a, b, n, x):
# Evaluates the piecewise linear interpolant of f at x on the interval [a, b], with n evenly spaced grid points.
# Parameters
# ==========
# f : function
# The function to approximate
# x, a, b : scalars (floats or integers)
# Evaluation point and endpoints, with a <= x <= b
# n : integer
# Number of grid points
# Returns
# =========
# A float. The interpolant evaluated at x
length_of_interval = b - a # 区间长度
num_subintervals = n - 1 # 分成n-1份
step = length_of_interval / num_subintervals # 步长
# === find first grid point larger than x === #
point = a
while point <= x: # 求得大于x,且离x最近的节点
point += step # point = point + step
# === x must lie between the gridpoints (point - step) and point === #
# x 一定在(point-step,point)间
u, v = point - step, point
return f(u) + (x - u) * (f(v) - f(u)) / (v - u)
```
|
program flush1
! Tests for syntax (AST only):
flush (10, IOSTAT = n)
flush (20, IOMSG = n)
flush (ERR = label)
flush (30, UNIT = 40)
FLUSH 50
end program
|
[STATEMENT]
lemma Resid_along_normal_preserves_Cong\<^sub>0:
assumes "t \<approx>\<^sub>0 t'" and "u \<in> \<NN>" and "R.sources t = R.sources u"
shows "t \\ u \<approx>\<^sub>0 t' \\ u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \ u \<approx>\<^sub>0 t' \ u
[PROOF STEP]
by (metis Cong\<^sub>0_imp_coinitial R.arr_resid_iff_con R.coinitialI R.coinitial_def
R.cube R.sources_resid assms elements_are_arr forward_stable) |
# Tedium Free MLE
- toc: true
- branch: master
- badges: true
- comments: true
## Introduction
Maximum likelihood estimation has the dubious honor of being difficult for humans and machines alike (difficult for machines at least in the naïve formulation that doesn't use log-likelihood).
MLE is challenging for humans because it requires the multiplication of $n$ likelihood expressions, which is time consuming and error prone - this is the *tedium* part we're trying to avoid. Fortunately, computers are very good at repeated multiplication, even repeated *symbolic* multiplication.
## Problem Formulation and Example
MLE estimates parameters of an assumed probability distribution, given data $x_i$ observed independently from the same distribution. If that distribution has probability function $f(\cdot)$, then the likelihood of $x_i$ is $f(x_i)$.
As the $x_i$s are independent, the likelihood of all $x_i$s will be the product of their individual likelihoods. In mathematical notation, the product will be:
$$\prod_{i=1}^{n} f(x_i)$$
Probability functions (mass functions or density functions) like our $f(\cdot)$ typically have **parameters**. For instance, the Gaussian distribution has parameters $\mu$ and $\sigma^2$, and the Poisson distribution has rate parameter λ. We use MLE to estimate these parameters, so they are the unknowns in the expression and they will appear in each $f(x_i)$ term. We can restate the problem as an equality with the generic parameter $\theta$:
$$L(\theta) = \prod_{i=1}^{n} f(x_i)$$
The expression $L(\theta)$ is the likelihood. In order to find the MLE it is necessary to *maximize* this function, or find the value of $\theta$ for which $L(\theta)$ is as large as possible. This process is probably easier to show than to describe. In particular, we'll be demonstrating the usefulness of the `sympy` module in making these symbolic calculations.
### Example
Say we observed values $[3,1,2]$ generated from a Poisson. What is likelihood function of λ?
Importing the necessities and setting up some symbols and expressions:
```python
from sympy.stats import Poisson, density, E, variance
from sympy import Symbol, simplify
from sympy.abc import x
lambda_ = Symbol("lambda", positive=True)
f = Poisson("f", lambda_)
density(f)(x)
```
$\displaystyle \frac{\lambda^{x} e^{- \lambda}}{x!}$
`sympy` gives us a representation of the Poisson density to work with in the [`Poisson()` object](https://docs.sympy.org/latest/modules/stats.html#sympy.stats.Poisson), keeping track of all of the terms internally.
The likelihood expression is the product of the probability function evaluated at these three points:
```python
L_ = density(f)(3) * density(f)(1) * density(f)(2)
L_
```
$\displaystyle \frac{\lambda^{6} e^{- 3 \lambda}}{12}$
That's our expression for the likelihood $L(\theta)$ 🙂 In order to maximize the expression, we'll take the derivative expression and then solve for the value of parameter $\lambda$ where the derivative expression is equal to 0. [This value of $\lambda$ will maximize the likelihood.](https://tutorial.math.lamar.edu/classes/calci/DerivativeAppsProofs.aspx)
Finding the derivative using `sympy`:
```python
from sympy import diff
dL_ = diff(L_, lambda_)
dL_
```
$\displaystyle - \frac{\lambda^{6} e^{- 3 \lambda}}{4} + \frac{\lambda^{5} e^{- 3 \lambda}}{2}$
Setting the derivative $\frac{dL}{d\theta}$ equal to zero:
```python
from sympy import Eq
dLeqz = Eq(dL_, 0)
dLeqz
```
$\displaystyle - \frac{\lambda^{6} e^{- 3 \lambda}}{4} + \frac{\lambda^{5} e^{- 3 \lambda}}{2} = 0$
And finally, solving the equation for $\lambda$:
```python
from sympy import solve
solve(dLeqz, lambda_)
```
[2]
And that's our answer!
## Complications
There is a slight wrinkle with this approach. It is susceptible to numerical instability, which (luckily) did not affect us in this example. This is how MLE can become difficult for computers too.
Likelihoods are usually very small numbers and computers simply can't track numbers that are too small or too large. Multiplying very small numbers together repeatedly makes very VERY small numbers that can sometimes disappear completely. Without getting too distracted by the minutiae of numerical stability or underflow, we can still appreciate some bizarre behavior that results when floats are misused:
```python
6.89 + .1
```
6.989999999999999
```python
(0.1)**512
```
0.0
In the second scenario, we can imagine having 512 data points and finding that the likelihood evaluates to 0.1 (times our parameter) for every single one. Then our product would look like $g(\theta) \cdot (0.1)^{512}$. The computer just told us that one of those terms is *zero*, and we're left unable to find the parameters for our MLE.
## Solution
What do we do instead? Is there any way to make these numbers bigger, without changing the problem or solution? Is there an equivalent problem with bigger numbers?
Adding a number and multiplying by a number don't fix the problem - they just add terms to the expression, which ends up zero anyhow. However these functions do have one property that we will need to be sure we are solving an equivalent problem: *they preserve the order of the input in the output.* We call these functions **monotonic**.
The monotonic functions also include the *log* function. The log function has some very nice properties, not least of which that it makes our calculations immune to the problems we saw above. Calculating the log likelihood:
```python
from sympy import log
_ = simplify(log(L_))
_
```
$\displaystyle - 3 \lambda + 6 \log{\left(\lambda \right)} - \log{\left(12 \right)}$
And then taking the derivative as before:
```python
d_ = diff(_, lambda_)
d_
```
Setting equal to zero:
```python
_ = Eq(_, 0)
_
```
$\displaystyle -3 + \frac{6}{\lambda} = 0$
And solving:
```python
from sympy import solve
solve(_, lambda_)
```
[2]
The two solutions agree! Which is necessary, but not sufficient to show these methods are equivalent in general.
|
[STATEMENT]
lemma Enc_keys_clean_AencSet_Un:
"Enc_keys_clean (G \<union> H) \<Longrightarrow> Enc_keys_clean (AencSet G K \<union> H)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Enc_keys_clean (G \<union> H) \<Longrightarrow> Enc_keys_clean (AencSet G K \<union> H)
[PROOF STEP]
by (auto simp add: Enc_keys_clean_def dest!: parts_msgSetD) |
import numpy as np
import pandas as pd
import string
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import time
start = time.time()
#change your file location resprective of your path
file_location = "/Users/marctheshark/Documents/Github/NLP/Tokenization/data.txt"
#opening and then closing the file
def getfile(address):
file = pd.read_csv(address)
return file
#Looping through the data to pull each sentence and sentiment label
def preprocessing(your_data):
data = getfile(your_data)
sentiment = []
corpus = []
#looping over the length of the data
for i in range(1,data.shape[0]):
tweet = data.iloc[i][0]
label = data.iloc[i][1]
#Building sentences from the data and removing punctuation and lowering captilizations.
words = word_tokenize(tweet)
words = [word.lower() for word in words]
# removing any punctuation
matrix = str.maketrans('', '', string.punctuation)
removed = [word.translate(matrix) for word in words]
filter_words = [word for word in removed if word.isalpha()]
sw = set(stopwords.words('english'))
filter_words = [w for w in filter_words if not w in sw]
corpus.append(filter_words)
sentiment.append(label)
#somehow the labels and corpus is off, easily fixable
return sentiment , corpus
x,y = (preprocessing(file_location))
print(len(x))
print(len(y))
def n_gram_GetTraining(data, n):
nothing = preprocessing(data)
labels, corpus = nothing
corpus_ngram =[]
#looking through each sentence in the corpus
for i in range(len(corpus)):
sentences = corpus[i]
sentence_ngram =[]
#print(sentences)
#looking at each word of the sentence
for word in range(len(sentences)):
#storing the ngram
sentence_ngram.append(sentences[word:word+n])
#storing all ngrams in their respective sentences
corpus_ngram.append(sentence_ngram)
#splitting into unique ngram tokens
tokens = []
for j in range(len(corpus_ngram)):
index = corpus_ngram[j]
for w in range(len(index)):
next_index = index[w]
#if there are unequal ngrams dont use them
try:
if len(next_index) < n:
#print('breaking')
break
except:
w = 0
if next_index not in tokens:
tokens.append(next_index)
tokens.sort()
encoded_data = np.zeros([len(corpus_ngram), len(tokens)])
count = 0
for z in range(len(tokens)):
unique_token = tokens[z]
for e in range(len(corpus_ngram)):
if unique_token in corpus_ngram[e]:
count += 1
encoded_data[e, z] = count
return encoded_data , labels
#data is the location of you file in txt format
#wasnt able to link it from the github not sure how to do that -Marc
tri_gram = n_gram_GetTraining(file_location,3)
quad_gram = n_gram_GetTraining(file_location,4)
combination_gram = np.hstack((tri_gram, quad_gram))
x , y = quad_gram
print(x)
print(len(x))
print(len(y))
qg = pd.DataFrame(quad_gram)
print(qg)
qg.to_csv(path_or_buf= "/Users/marctheshark/Documents/NLP/Final Project/output.csv" , index=False)
#export_csv = quad_gram.to_csv (r"/Users/marctheshark/Documents/NLP/Final Project/outputdata.csv", header=True) #Don't forget to add '.csv' at the end of the path
#might be a good idea to output all three of n_grams so we dont have to reprocess them everytime
end = time.time()
print(end - start)
'''
from sklearn.model_selection import train_test_split , GridSearchCV , RepeatedKFold
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
print(len(x))
print(len(y))
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=333)
print(len(X_train))
print(len(y_train))
kf = KFold(n_splits=10)
hyperparameters = [{'kernel': ['rbf'], 'gamma' : [1, .1, .01, 0.001], 'C': [1, 10, 100]},
{'kernel': ['linear'], 'gamma' : [1, .1, .01, 0.001] , 'C': [1, 10, 100]}]
classifier = GridSearchCV( estimator= svm.SVC(random_state= 333) , param_grid= hyperparameters, cv = kf)
classifier.fit(X_train, y_train)
means = classifier.cv_results_['mean_test_score']
stds = classifier.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, classifier.cv_results_['params']):
print("%0.4f (+/-%0.03f) for %r" % (mean, std * 2, params))
print ('')
print ("The Best Training Score was:" , classifier.best_score_)
print('')
print ("The Best Parameters were: " , classifier.best_params_)
print (' ')
''' |
section \<open>Identity Through Identity Anchors\<close>
text \<^marker>\<open>tag bodyonly\<close> \<open>
In this section, we describe another approach for characterizing
the identity of a particular in a particular structure. Though we
prove that this approach is logically equivalent to the characterizations
based on identifiability, non-permutability or isomorphical uniqueness,
it nevertheless is able to better highlight the context that characterizes
the identity of a particular.
\<close>
text_raw\<open>\par\<close>
text_raw \<open>\subsection[Anchoring]{Anchoring\isalabel{subsec:anchoring}}\<close>
text \<^marker>\<open>tag bodyonly\<close> \<open>
The value of an ontology, or conceptual model, lies in the information it
carries about the concepts and assumptions that characterize a domain.
In the context of Information Systems development, one of the most important
users of a conceptual model is the database designer, and some
of the elements the DB designer expects to find in the conceptual model
are the identity conditions of the elements in the domain.
The definitions provided so far for the identity of particulars were
(1) by identifiability, (2) by isomorphical uniqueness, and (3) by
non-permutability. The first one has the disadvantage of requiring
the existence of a predicate (and of a formal language) at a
fundamental level in the foundational ontology, even though such
predicate would be useful for the purposes of the DB designer. The
other two, though not requiring the existence of elements that are
not in the particular structure, simply provides us a yes or no
answer to whether a particular has identity.
Here we introduce the notion of an \emph{identity anchor} as a
structure that represents the \emph{identity neighborhood} of a
particular in a particular structure, i.e., the elements of the
structure that play a role in the identification of the particular.
Note that we are not referring to the identity condition itself,
which would be a predicate, but to the elements of the structure
that would participate in some identity predicate.
\<close>
text_raw\<open>\par\<close>
theory Anchoring
imports "../ParticularStructures/SubStructures"
begin
context ufo_particular_theory_sig
begin
text \<^marker>\<open>tag bodyonly\<close> \<open>
Given a particular structure \<open>\<Gamma>\<close> and a particular \<open>x\<close> of \<open>\<Gamma>\<close>, and
given another particular structure \<open>\<Gamma>\<^sub>x\<close>, a particular \<open>y\<close> of \<open>\<Gamma>\<^sub>x\<close>,
and a morphism \<open>\<phi>\<close> from \<open>\<Gamma>\<^sub>x\<close> to \<open>\<Gamma>\<close>, we say that \<open>(\<Gamma>\<^sub>x,y,\<phi>)\<close> anchors
\<open>x\<close> in \<open>\<Gamma>\<close>, written as \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x\<close>, or that \<open>(\<Gamma>\<^sub>x,y,\<phi>)\<close> is an
anchor for \<open>x\<close> (in \<open>\<Gamma>\<close>), if and only if, for every
morphism \<open>\<sigma>\<close> from \<open>\<Gamma>\<^sub>x\<close> to \<open>\<Gamma>\<close>, \<open>\<sigma> y\<close> is always \<open>x\<close>.
In other words, there are sufficient elements in \<open>\<Gamma>\<^sub>x\<close> to make it
so that \<open>y\<close> (in \<open>\<Gamma>\<^sub>x\<close>) cannot be seen as anything but \<open>x\<close> in \<open>\<Gamma>\<close>.
Formally, we have:
\<close>
text_raw\<open>\par\<close>
definition anchors ::
\<open> 'p\<^sub>2
\<Rightarrow> ('p\<^sub>2,'q) particular_struct
\<Rightarrow> ('p\<^sub>2 \<Rightarrow> 'p)
\<Rightarrow> 'p
\<Rightarrow> bool\<close> (\<open>_ \<midarrow>_,_\<rightarrow>\<^sub>1 _\<close> [74,1,1,74] 75) where
\<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x \<equiv>
x \<in> \<P> \<and> \<Gamma>\<^sub>x \<lless>\<^bsub>\<phi>\<^esub> \<Gamma> \<and> y \<in> particulars \<Gamma>\<^sub>x
\<and> (\<forall>\<sigma> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub>. \<forall>z \<in> particulars \<Gamma>\<^sub>x. \<sigma> z = x \<longleftrightarrow> z = y)\<close>
text \<^marker>\<open>tag bodyonly\<close> \<open>
Note that, since \<open>x\<close> is invariant with respect to the morphisms from
\<open>\<Gamma>\<^sub>x\<close> to \<open>\<Gamma>\<close>, the choice of the morphism \<open>\<phi>\<close> doesn't matter. Thus,
we can just say that \<open>(\<Gamma>\<^sub>x,y)\<close> anchors \<open>x\<close>, or simply that \<open>\<Gamma>\<^sub>x\<close> is
an anchor for \<open>x\<close>.
Note that from a particular structure with a single particular, \<open>y\<close>, we
can always have a morphism to \<open>\<Gamma>\<close> that maps \<open>y\<close> to \<open>x\<close>. However, this
configuration would only work as anchor for \<open>x\<close> if \<open>x\<close> is the only
substantial in \<open>\<Gamma>\<close>. Otherwise, there would be morphisms from the
single-particular structure to any substantial in \<open>\<Gamma>\<close>. Thus, it always
possible to remove enough elements from an anchor in such a way that it
stops being an anchor. Conversely, if \<open>\<Gamma>'\<close> is an anchor for \<open>x\<close>, then
the addition of new elements to \<open>\<Gamma>'\<close>, while maintaing the existence of
at least one morphism to \<open>\<Gamma>\<close>, will not remove its status as an anchor
for \<open>x\<close>.
\<close>
text_raw\<open>\par\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> \<^marker>\<open>tag (proof) aponly\<close> anchorsI[intro!]:
assumes
\<open>x \<in> \<P>\<close> \<open>\<Gamma>\<^sub>x \<lless>\<^bsub>\<phi>\<^esub> \<Gamma>\<close> \<open>y \<in> particulars \<Gamma>\<^sub>x\<close>
\<open>\<And>\<phi> z. \<lbrakk> z \<in> particulars \<Gamma>\<^sub>x ; \<phi> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub> \<rbrakk>
\<Longrightarrow> \<phi> z = x \<longleftrightarrow> z = y\<close>
shows \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x\<close>
apply (simp add: anchors_def assms(1,2,3)
del: morphs_iff injectives_iff)
apply (intro ballI)
using assms by metis
lemma \<^marker>\<open>tag (proof) aponly\<close> anchorsE[elim!]:
assumes \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x\<close>
obtains
\<open>x \<in> \<P>\<close> \<open>\<Gamma>\<^sub>x \<lless>\<^bsub>\<phi>\<^esub> \<Gamma>\<close> \<open>y \<in> particulars \<Gamma>\<^sub>x\<close>
\<open>\<And>\<phi> z. \<lbrakk> z \<in> particulars \<Gamma>\<^sub>x ; \<phi> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub> \<rbrakk>
\<Longrightarrow> \<phi> z = x \<longleftrightarrow> z = y\<close>
using assms by (simp add: anchors_def)
definition anchored_particulars :: \<open>'p set\<close> (\<open>\<P>\<^sub>\<down>\<close>) where
\<open>\<P>\<^sub>\<down> \<equiv> { x | x (y :: ZF) \<Gamma>\<^sub>x \<phi> . y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x }\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> anchored_particulars_I[intro]:
fixes y :: ZF and \<Gamma>\<^sub>x and x
assumes \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x\<close>
shows \<open>x \<in> \<P>\<^sub>\<down>\<close>
using assms
by (simp add: anchored_particulars_def ; metis)
lemma \<^marker>\<open>tag (proof) aponly\<close> anchored_particulars_E[elim!]:
assumes \<open>x \<in> \<P>\<^sub>\<down>\<close>
obtains y :: ZF and \<Gamma>\<^sub>x \<phi> where \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x\<close>
using assms
by (simp add: anchored_particulars_def ; metis)
lemma \<^marker>\<open>tag (proof) aponly\<close> anchored_particulars_I1[intro!]:
fixes y :: \<open>'p\<^sub>1\<close>
assumes \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<^sub>x\<rightarrow>\<^sub>1 x\<close>
shows \<open>x \<in> \<P>\<^sub>\<down>\<close>
proof -
obtain A: \<open>x \<in> \<P>\<close> \<open>\<Gamma>\<^sub>x \<lless>\<^bsub>\<phi>\<^sub>x\<^esub> \<Gamma>\<close> \<open>y \<in> particulars \<Gamma>\<^sub>x\<close>
and B: \<open>\<And>\<phi> z. \<lbrakk> z \<in> particulars \<Gamma>\<^sub>x
; \<phi> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub> \<rbrakk> \<Longrightarrow> \<phi> z = x \<longleftrightarrow> z = y\<close>
using assms by blast
interpret I: particular_struct_injection \<open>\<Gamma>\<^sub>x\<close> \<open>\<Gamma>\<close> \<open>\<phi>\<^sub>x\<close>
using A(2) by simp
obtain \<sigma> :: \<open>'p\<^sub>1 \<Rightarrow> ZF\<close> where \<open>inj \<sigma>\<close>
using I.src.injection_to_ZF_exist by blast
interpret I2: particular_struct_bijection_1 \<open>\<Gamma>\<^sub>x\<close> \<sigma>
using I.src.inj_morph_img_isomorphism[of \<sigma>]
by (metis I.src.\<Gamma>_simps UNIV_I \<open>inj \<sigma>\<close> inj_on_id inj_on_subset
particular_struct_eqI subsetI)
have C: \<open>I2.tgt.\<Gamma> = MorphImg \<sigma> \<Gamma>\<^sub>x\<close>
using I2.tgt.\<Gamma>_simps by blast
interpret I3: particular_struct_bijection_1 \<open>MorphImg \<sigma> \<Gamma>\<^sub>x\<close> \<open>inv \<sigma>\<close>
apply (intro I2.tgt.inj_morph_img_isomorphism[simplified C])
subgoal
by (metis I2.inv_morph_morph UNIV_I image_eqI
inj_on_inv_into subsetI)
using I.src.injection_to_ZF_exist by blast
have D[simp]: \<open>inv \<sigma> ` \<sigma> ` X = X\<close> for X
using \<open>inj \<sigma>\<close> by (auto simp: image_def)
have E[simp]: \<open>inv \<sigma> (\<sigma> x) = x\<close> for x
using \<open>inj \<sigma>\<close> by (auto simp: image_def)
have F[simp]: \<open>MorphImg (inv \<sigma>) (MorphImg \<sigma> \<Gamma>\<^sub>x) = \<Gamma>\<^sub>x\<close>
apply (intro particular_struct_eqI ext
; auto simp add: particular_struct_morphism_image_simps)
subgoal using D by blast
subgoal by force
by (metis UNIV_I \<open>inj \<sigma>\<close> inv_into_f_f)
interpret I4: particular_struct_injection \<open>MorphImg \<sigma> \<Gamma>\<^sub>x\<close> \<Gamma> \<open>\<phi>\<^sub>x \<circ> inv \<sigma>\<close>
apply (intro particular_struct_injection_comp[of _ \<open>\<Gamma>\<^sub>x\<close>])
using I3.particular_struct_injection_axioms[simplified]
I.particular_struct_injection_axioms
by simp+
have G: \<open>\<phi>\<^sub>x \<circ> inv \<sigma> \<in> InjMorphs\<^bsub>MorphImg \<sigma> \<Gamma>\<^sub>x,\<Gamma>\<^esub>\<close>
using I4.particular_struct_injection_axioms by blast
then have H: \<open>MorphImg \<sigma> \<Gamma>\<^sub>x \<lless>\<^bsub>\<phi>\<^sub>x \<circ> inv \<sigma>\<^esub> \<Gamma>\<close> by blast
have J[simp]: \<open>(\<phi> z = x) = (z = \<sigma> y)\<close>
if as: \<open>z \<in> I3.src.endurants\<close>
\<open>particular_struct_morphism (MorphImg \<sigma> \<Gamma>\<^sub>x) \<Gamma> \<phi>\<close> for z \<phi>
proof -
interpret I5: particular_struct_morphism \<open>MorphImg \<sigma> \<Gamma>\<^sub>x\<close> \<Gamma> \<phi>
using as by simp
have AA: \<open>\<phi> \<circ> \<sigma> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub>\<close>
apply (intro morphs_I
particular_struct_morphism_comp[of _ \<open>MorphImg \<sigma> \<Gamma>\<^sub>x\<close>] as)
by (simp add: I2.particular_struct_morphism_axioms)
have BB: \<open>inv \<sigma> z \<in> I.src.endurants\<close>
by (metis F I3.I_img_eq_tgt_I I3.morph_image_def image_eqI as(1))
have CC:\<open>\<sigma> (inv \<sigma> z) = z\<close> using as(1)
by (meson BB E I2.morph_preserves_particulars
I3.morph_is_injective inj_onD)
have DD: \<open>(\<phi> z = x) = (inv \<sigma> z = y)\<close>
using B[OF BB AA] CC
by (simp ; metis)
show ?thesis
apply (simp add: DD)
using CC by auto
qed
have K: \<open>\<sigma> y \<midarrow>MorphImg \<sigma> \<Gamma>\<^sub>x,\<phi>\<^sub>x \<circ> inv \<sigma>\<rightarrow>\<^sub>1 x\<close>
apply (intro anchorsI I4.particular_struct_injection_axioms H A)
using A(3) by auto
show ?thesis
by (intro anchored_particulars_I[OF K])
qed
lemma \<^marker>\<open>tag (proof) aponly\<close> anchor_to_zf_I:
fixes y :: 'a
assumes \<open>y \<midarrow>\<Gamma>\<^sub>x,\<phi>\<rightarrow>\<^sub>1 x\<close>
shows \<open>\<exists>(y\<^sub>1 :: ZF) \<Gamma>\<^sub>1 \<sigma>. y\<^sub>1 \<midarrow>\<Gamma>\<^sub>1,\<sigma>\<rightarrow>\<^sub>1 x \<and> \<Gamma>\<^sub>1 \<in> IsoModels\<^bsub>\<Gamma>\<^sub>x,TYPE(ZF)\<^esub>\<close>
proof -
obtain A: \<open>x \<in> \<P>\<close> \<open>y \<in> particulars \<Gamma>\<^sub>x\<close> \<open>\<Gamma>\<^sub>x \<lless>\<^bsub>\<phi>\<^esub> \<Gamma>\<close>
\<open>\<And>\<sigma> z. \<lbrakk> z \<in> particulars \<Gamma>\<^sub>x ; \<sigma> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub> \<rbrakk>
\<Longrightarrow> \<sigma> z = x \<longleftrightarrow> z = y\<close>
using anchorsE[OF assms] by metis
interpret phi: particular_struct_injection \<Gamma>\<^sub>x \<Gamma> \<phi>
using A(3) .
obtain f :: \<open>'a \<Rightarrow> ZF\<close> where f: \<open>inj f\<close>
using phi.src.injection_to_ZF_exist by blast
have \<open>phi.src.\<Gamma> = \<Gamma>\<^sub>x\<close> by auto
have \<open>particular_struct_bijection_1 \<Gamma>\<^sub>x f\<close> using f
apply (subst \<open>phi.src.\<Gamma> = \<Gamma>\<^sub>x\<close>[symmetric])
apply (intro phi.src.inj_morph_img_isomorphism)
subgoal using inj_on_subset by blast
using inj_on_id by blast
then interpret gamma_x:
particular_struct_bijection_1 \<Gamma>\<^sub>x f
by blast
have \<open>particular_struct_injection (MorphImg f \<Gamma>\<^sub>x) \<Gamma>\<^sub>x gamma_x.inv_morph\<close>
using particular_struct_bijection_def by blast
then interpret gamma_x_inv:
particular_struct_injection \<open>MorphImg f \<Gamma>\<^sub>x\<close> \<Gamma>\<^sub>x gamma_x.inv_morph .
have \<open>particular_struct_injection (MorphImg f \<Gamma>\<^sub>x) \<Gamma> (\<phi> \<circ> gamma_x.inv_morph)\<close>
apply (intro particular_struct_injection_comp[of _ \<Gamma>\<^sub>x])
by (intro_locales)
then interpret phi_gamma_x_inv:
particular_struct_injection
\<open>MorphImg f \<Gamma>\<^sub>x\<close> \<Gamma> \<open>\<phi> \<circ> gamma_x.inv_morph\<close>
\<open>TYPE(ZF)\<close> \<open>TYPE('p)\<close> .
have R1: \<open>MorphImg f \<Gamma>\<^sub>x \<lless>\<^bsub>\<phi> \<circ> gamma_x.inv_morph\<^esub> \<Gamma>\<close>
using injectives_I[
OF phi_gamma_x_inv.particular_struct_injection_axioms]
by blast
have R2: \<open>\<phi> \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub>\<close>
using phi.particular_struct_morphism_axioms by blast
have R3[simp]: \<open>\<phi> y = x\<close>
using A(4)[OF _ R2,simplified,of y,simplified] A(2) by metis
have R4: \<open>f y \<in> gamma_x_inv.src.\<P>\<close> using A(2) by blast
have R5: \<open>f y \<midarrow>MorphImg f \<Gamma>\<^sub>x,\<phi> \<circ> gamma_x.inv_morph\<rightarrow>\<^sub>1 x\<close>
proof (intro anchorsI A(1) R1 R4)
fix \<sigma> z
assume as: \<open>z \<in> gamma_x_inv.src.\<P>\<close> \<open>\<sigma> \<in> Morphs\<^bsub>MorphImg f \<Gamma>\<^sub>x,\<Gamma>\<^esub>\<close>
interpret sigma:
particular_struct_morphism \<open>MorphImg f \<Gamma>\<^sub>x\<close> \<Gamma> \<sigma>
using as(2) by blast
interpret
particular_struct_morphism \<Gamma>\<^sub>x \<Gamma> \<open>\<phi> \<circ> gamma_x.inv_morph \<circ> f\<close>
apply (intro particular_struct_morphism_comp[
of _ \<open>MorphImg f \<Gamma>\<^sub>x\<close>])
by intro_locales
interpret sigma_f:
particular_struct_morphism \<Gamma>\<^sub>x \<Gamma> \<open>\<sigma> \<circ> f\<close>
apply (intro particular_struct_morphism_comp[of _ \<open>MorphImg f \<Gamma>\<^sub>x\<close>])
by intro_locales
have RR1: \<open>\<sigma> \<circ> f \<in> Morphs\<^bsub>\<Gamma>\<^sub>x,\<Gamma>\<^esub>\<close>
using sigma_f.particular_struct_morphism_axioms by blast
have I1: \<open>gamma_x.inv_morph (f x) = x\<close> if \<open>x \<in> phi.src.\<P>\<close> for x
using that by simp
have I2: \<open>f (gamma_x.inv_morph x) = x\<close> if \<open>x \<in> gamma_x.tgt.\<P>\<close> for x
using that by simp
show \<open>\<sigma> z = x \<longleftrightarrow> z = f y\<close>
supply R = I1 I2 A(4)[OF _ RR1,simplified] R3 as(1) A(2)
apply (intro iffI)
subgoal using R
by (metis gamma_x_inv.morph_preserves_particulars)
using R by blast
qed
have R6: \<open>MorphImg f \<Gamma>\<^sub>x \<in> IsoModels\<^bsub>\<Gamma>\<^sub>x,TYPE(ZF)\<^esub>\<close>
using gamma_x.particular_struct_bijection_1_axioms by blast
then show ?thesis using R5 by blast
qed
lemma \<^marker>\<open>tag (proof) aponly\<close> anchored_particulars_are_particulars: \<open>\<P>\<^sub>\<down> \<subseteq> \<P>\<close>
by blast
end
end |
[GOAL]
⊢ LawfulFunctor Multiset
[PROOFSTEP]
refine' { .. }
[GOAL]
case refine'_1
⊢ ∀ {α β : Type ?u.250}, Functor.mapConst = Functor.map ∘ Function.const β
[PROOFSTEP]
intros
[GOAL]
case refine'_2
⊢ ∀ {α : Type ?u.250} (x : Multiset α), id <$> x = x
[PROOFSTEP]
intros
[GOAL]
case refine'_3
⊢ ∀ {α β γ : Type ?u.250} (g : α → β) (h : β → γ) (x : Multiset α), (h ∘ g) <$> x = h <$> g <$> x
[PROOFSTEP]
intros
[GOAL]
case refine'_1
α✝ β✝ : Type ?u.250
⊢ Functor.mapConst = Functor.map ∘ Function.const β✝
[PROOFSTEP]
try simp
[GOAL]
case refine'_1
α✝ β✝ : Type ?u.250
⊢ Functor.mapConst = Functor.map ∘ Function.const β✝
[PROOFSTEP]
simp
[GOAL]
case refine'_2
α✝ : Type ?u.250
x✝ : Multiset α✝
⊢ id <$> x✝ = x✝
[PROOFSTEP]
try simp
[GOAL]
case refine'_2
α✝ : Type ?u.250
x✝ : Multiset α✝
⊢ id <$> x✝ = x✝
[PROOFSTEP]
simp
[GOAL]
case refine'_3
α✝ β✝ γ✝ : Type ?u.250
g✝ : α✝ → β✝
h✝ : β✝ → γ✝
x✝ : Multiset α✝
⊢ (h✝ ∘ g✝) <$> x✝ = h✝ <$> g✝ <$> x✝
[PROOFSTEP]
try simp
[GOAL]
case refine'_3
α✝ β✝ γ✝ : Type ?u.250
g✝ : α✝ → β✝
h✝ : β✝ → γ✝
x✝ : Multiset α✝
⊢ (h✝ ∘ g✝) <$> x✝ = h✝ <$> g✝ <$> x✝
[PROOFSTEP]
simp
[GOAL]
case refine'_1
α✝ β✝ : Type ?u.250
⊢ Functor.mapConst = Functor.map ∘ Function.const β✝
[PROOFSTEP]
rfl
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
⊢ Multiset α' → F (Multiset β')
[PROOFSTEP]
refine' Quotient.lift (Functor.map Coe.coe ∘ Traversable.traverse f) _
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
⊢ ∀ (a b : List α'),
a ≈ b → (Functor.map Coe.coe ∘ Traversable.traverse f) a = (Functor.map Coe.coe ∘ Traversable.traverse f) b
[PROOFSTEP]
introv p
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
p : a ≈ b
⊢ (Functor.map Coe.coe ∘ Traversable.traverse f) a = (Functor.map Coe.coe ∘ Traversable.traverse f) b
[PROOFSTEP]
unfold Function.comp
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
p : a ≈ b
⊢ Coe.coe <$> Traversable.traverse f a = Coe.coe <$> Traversable.traverse f b
[PROOFSTEP]
induction p
[GOAL]
case nil
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
⊢ Coe.coe <$> Traversable.traverse f [] = Coe.coe <$> Traversable.traverse f []
case cons
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x✝ : α'
l₁✝ l₂✝ : List α'
a✝ : l₁✝ ~ l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
⊢ Coe.coe <$> Traversable.traverse f (x✝ :: l₁✝) = Coe.coe <$> Traversable.traverse f (x✝ :: l₂✝)
case swap
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x✝ y✝ : α'
l✝ : List α'
⊢ Coe.coe <$> Traversable.traverse f (y✝ :: x✝ :: l✝) = Coe.coe <$> Traversable.traverse f (x✝ :: y✝ :: l✝)
case trans
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b l₁✝ l₂✝ l₃✝ : List α'
a✝¹ : l₁✝ ~ l₂✝
a✝ : l₂✝ ~ l₃✝
a_ih✝¹ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₂✝ = Coe.coe <$> Traversable.traverse f l₃✝
⊢ Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₃✝
[PROOFSTEP]
case nil => rfl
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
⊢ Coe.coe <$> Traversable.traverse f [] = Coe.coe <$> Traversable.traverse f []
[PROOFSTEP]
case nil => rfl
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
⊢ Coe.coe <$> Traversable.traverse f [] = Coe.coe <$> Traversable.traverse f []
[PROOFSTEP]
rfl
[GOAL]
case cons
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x✝ : α'
l₁✝ l₂✝ : List α'
a✝ : l₁✝ ~ l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
⊢ Coe.coe <$> Traversable.traverse f (x✝ :: l₁✝) = Coe.coe <$> Traversable.traverse f (x✝ :: l₂✝)
case swap
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x✝ y✝ : α'
l✝ : List α'
⊢ Coe.coe <$> Traversable.traverse f (y✝ :: x✝ :: l✝) = Coe.coe <$> Traversable.traverse f (x✝ :: y✝ :: l✝)
case trans
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b l₁✝ l₂✝ l₃✝ : List α'
a✝¹ : l₁✝ ~ l₂✝
a✝ : l₂✝ ~ l₃✝
a_ih✝¹ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₂✝ = Coe.coe <$> Traversable.traverse f l₃✝
⊢ Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₃✝
[PROOFSTEP]
case cons x l₁ l₂ _
h =>
have :
Multiset.cons <$> f x <*> Coe.coe <$> Traversable.traverse f l₁ =
Multiset.cons <$> f x <*> Coe.coe <$> Traversable.traverse f l₂ :=
by rw [h]
simpa [functor_norm] using this
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x : α'
l₁ l₂ : List α'
a✝ : l₁ ~ l₂
h : Coe.coe <$> Traversable.traverse f l₁ = Coe.coe <$> Traversable.traverse f l₂
⊢ Coe.coe <$> Traversable.traverse f (x :: l₁) = Coe.coe <$> Traversable.traverse f (x :: l₂)
[PROOFSTEP]
case cons x l₁ l₂ _
h =>
have :
Multiset.cons <$> f x <*> Coe.coe <$> Traversable.traverse f l₁ =
Multiset.cons <$> f x <*> Coe.coe <$> Traversable.traverse f l₂ :=
by rw [h]
simpa [functor_norm] using this
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x : α'
l₁ l₂ : List α'
a✝ : l₁ ~ l₂
h : Coe.coe <$> Traversable.traverse f l₁ = Coe.coe <$> Traversable.traverse f l₂
⊢ Coe.coe <$> Traversable.traverse f (x :: l₁) = Coe.coe <$> Traversable.traverse f (x :: l₂)
[PROOFSTEP]
have :
Multiset.cons <$> f x <*> Coe.coe <$> Traversable.traverse f l₁ =
Multiset.cons <$> f x <*> Coe.coe <$> Traversable.traverse f l₂ :=
by rw [h]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x : α'
l₁ l₂ : List α'
a✝ : l₁ ~ l₂
h : Coe.coe <$> Traversable.traverse f l₁ = Coe.coe <$> Traversable.traverse f l₂
⊢ (Seq.seq (cons <$> f x) fun x => Coe.coe <$> Traversable.traverse f l₁) =
Seq.seq (cons <$> f x) fun x => Coe.coe <$> Traversable.traverse f l₂
[PROOFSTEP]
rw [h]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x : α'
l₁ l₂ : List α'
a✝ : l₁ ~ l₂
h : Coe.coe <$> Traversable.traverse f l₁ = Coe.coe <$> Traversable.traverse f l₂
this :
(Seq.seq (cons <$> f x) fun x => Coe.coe <$> Traversable.traverse f l₁) =
Seq.seq (cons <$> f x) fun x => Coe.coe <$> Traversable.traverse f l₂
⊢ Coe.coe <$> Traversable.traverse f (x :: l₁) = Coe.coe <$> Traversable.traverse f (x :: l₂)
[PROOFSTEP]
simpa [functor_norm] using this
[GOAL]
case swap
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x✝ y✝ : α'
l✝ : List α'
⊢ Coe.coe <$> Traversable.traverse f (y✝ :: x✝ :: l✝) = Coe.coe <$> Traversable.traverse f (x✝ :: y✝ :: l✝)
case trans
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b l₁✝ l₂✝ l₃✝ : List α'
a✝¹ : l₁✝ ~ l₂✝
a✝ : l₂✝ ~ l₃✝
a_ih✝¹ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₂✝ = Coe.coe <$> Traversable.traverse f l₃✝
⊢ Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₃✝
[PROOFSTEP]
case swap x y
l =>
have :
(fun a b (l : List β') ↦ (↑(a :: b :: l) : Multiset β')) <$> f y <*> f x =
(fun a b l ↦ ↑(a :: b :: l)) <$> f x <*> f y :=
by
rw [CommApplicative.commutative_map]
congr
funext a b l
simpa [flip] using Perm.swap a b l
simp [(· ∘ ·), this, functor_norm, Coe.coe]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x y : α'
l : List α'
⊢ Coe.coe <$> Traversable.traverse f (y :: x :: l) = Coe.coe <$> Traversable.traverse f (x :: y :: l)
[PROOFSTEP]
case swap x y
l =>
have :
(fun a b (l : List β') ↦ (↑(a :: b :: l) : Multiset β')) <$> f y <*> f x =
(fun a b l ↦ ↑(a :: b :: l)) <$> f x <*> f y :=
by
rw [CommApplicative.commutative_map]
congr
funext a b l
simpa [flip] using Perm.swap a b l
simp [(· ∘ ·), this, functor_norm, Coe.coe]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x y : α'
l : List α'
⊢ Coe.coe <$> Traversable.traverse f (y :: x :: l) = Coe.coe <$> Traversable.traverse f (x :: y :: l)
[PROOFSTEP]
have :
(fun a b (l : List β') ↦ (↑(a :: b :: l) : Multiset β')) <$> f y <*> f x =
(fun a b l ↦ ↑(a :: b :: l)) <$> f x <*> f y :=
by
rw [CommApplicative.commutative_map]
congr
funext a b l
simpa [flip] using Perm.swap a b l
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x y : α'
l : List α'
⊢ (Seq.seq ((fun a b l => ↑(a :: b :: l)) <$> f y) fun x_1 => f x) =
Seq.seq ((fun a b l => ↑(a :: b :: l)) <$> f x) fun x => f y
[PROOFSTEP]
rw [CommApplicative.commutative_map]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x y : α'
l : List α'
⊢ (Seq.seq ((flip fun a b l => ↑(a :: b :: l)) <$> f x) fun x => f y) =
Seq.seq ((fun a b l => ↑(a :: b :: l)) <$> f x) fun x => f y
[PROOFSTEP]
congr
[GOAL]
case e_a.e_a
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x y : α'
l : List α'
⊢ (flip fun a b l => ↑(a :: b :: l)) = fun a b l => ↑(a :: b :: l)
[PROOFSTEP]
funext a b l
[GOAL]
case e_a.e_a.h.h.h
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a✝ b✝ : List α'
x y : α'
l✝ : List α'
a b : β'
l : List β'
⊢ flip (fun a b l => ↑(a :: b :: l)) a b l = ↑(a :: b :: l)
[PROOFSTEP]
simpa [flip] using Perm.swap a b l
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b : List α'
x y : α'
l : List α'
this :
(Seq.seq ((fun a b l => ↑(a :: b :: l)) <$> f y) fun x_1 => f x) =
Seq.seq ((fun a b l => ↑(a :: b :: l)) <$> f x) fun x => f y
⊢ Coe.coe <$> Traversable.traverse f (y :: x :: l) = Coe.coe <$> Traversable.traverse f (x :: y :: l)
[PROOFSTEP]
simp [(· ∘ ·), this, functor_norm, Coe.coe]
[GOAL]
case trans
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b l₁✝ l₂✝ l₃✝ : List α'
a✝¹ : l₁✝ ~ l₂✝
a✝ : l₂✝ ~ l₃✝
a_ih✝¹ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₂✝ = Coe.coe <$> Traversable.traverse f l₃✝
⊢ Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₃✝
[PROOFSTEP]
case trans => simp [*]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b l₁✝ l₂✝ l₃✝ : List α'
a✝¹ : l₁✝ ~ l₂✝
a✝ : l₂✝ ~ l₃✝
a_ih✝¹ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₂✝ = Coe.coe <$> Traversable.traverse f l₃✝
⊢ Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₃✝
[PROOFSTEP]
case trans => simp [*]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
a b l₁✝ l₂✝ l₃✝ : List α'
a✝¹ : l₁✝ ~ l₂✝
a✝ : l₂✝ ~ l₃✝
a_ih✝¹ : Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₂✝
a_ih✝ : Coe.coe <$> Traversable.traverse f l₂✝ = Coe.coe <$> Traversable.traverse f l₃✝
⊢ Coe.coe <$> Traversable.traverse f l₁✝ = Coe.coe <$> Traversable.traverse f l₃✝
[PROOFSTEP]
simp [*]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α✝ : Type ?u.36389
x✝ : Multiset α✝
⊢ id <$> x✝ = x✝
[PROOFSTEP]
simp only [fmap_def, id_eq, map_id']
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α✝ β✝ : Type ?u.36389
x✝¹ : α✝
x✝ : α✝ → Multiset β✝
⊢ pure x✝¹ >>= x✝ = x✝ x✝¹
[PROOFSTEP]
simp only [pure_def, bind_def, singleton_bind]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α✝ β✝ : Type ?u.36389
x✝¹ : α✝ → β✝
x✝ : Multiset α✝
⊢ (do
let y ← x✝
pure (x✝¹ y)) =
x✝¹ <$> x✝
[PROOFSTEP]
simp only [pure_def, bind_def, bind_singleton, fmap_def]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α β : Type u_1
h : α → β
⊢ Functor.map h ∘ Coe.coe = Coe.coe ∘ Functor.map h
[PROOFSTEP]
funext
[GOAL]
case h
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α β : Type u_1
h : α → β
x✝ : List α
⊢ (Functor.map h ∘ Coe.coe) x✝ = (Coe.coe ∘ Functor.map h) x✝
[PROOFSTEP]
simp only [Function.comp_apply, Coe.coe, fmap_def, coe_map, List.map_eq_map]
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α : Type u_1
x : Multiset α
⊢ traverse pure x = x
[PROOFSTEP]
refine' Quotient.inductionOn x _
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α : Type u_1
x : Multiset α
⊢ ∀ (a : List α), traverse pure (Quotient.mk (isSetoid α) a) = Quotient.mk (isSetoid α) a
[PROOFSTEP]
intro
[GOAL]
F : Type u → Type u
inst✝¹ : Applicative F
inst✝ : CommApplicative F
α' β' : Type u
f : α' → F β'
α : Type u_1
x : Multiset α
a✝ : List α
⊢ traverse pure (Quotient.mk (isSetoid α) a✝) = Quotient.mk (isSetoid α) a✝
[PROOFSTEP]
simp [traverse, Coe.coe]
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
α β γ : Type u_1
g : α → G β
h : β → H γ
x : Multiset α
⊢ traverse (Comp.mk ∘ Functor.map h ∘ g) x = Comp.mk (traverse h <$> traverse g x)
[PROOFSTEP]
refine' Quotient.inductionOn x _
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
α β γ : Type u_1
g : α → G β
h : β → H γ
x : Multiset α
⊢ ∀ (a : List α),
traverse (Comp.mk ∘ Functor.map h ∘ g) (Quotient.mk (isSetoid α) a) =
Comp.mk (traverse h <$> traverse g (Quotient.mk (isSetoid α) a))
[PROOFSTEP]
intro
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
α β γ : Type u_1
g : α → G β
h : β → H γ
x : Multiset α
a✝ : List α
⊢ traverse (Comp.mk ∘ Functor.map h ∘ g) (Quotient.mk (isSetoid α) a✝) =
Comp.mk (traverse h <$> traverse g (Quotient.mk (isSetoid α) a✝))
[PROOFSTEP]
simp only [traverse, quot_mk_to_coe, lift_coe, Coe.coe, Function.comp_apply, Functor.map_map, functor_norm]
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
α β γ : Type u_1
g : α → G β
h : β → H γ
x : Multiset α
a✝ : List α
⊢ Comp.mk (((fun x => ofList <$> x) ∘ Traversable.traverse h) <$> Traversable.traverse g a✝) =
Comp.mk
((Quotient.lift (Functor.map ofList ∘ Traversable.traverse h)
(_ :
∀ (a b : List β),
a ≈ b →
(Functor.map Coe.coe ∘ Traversable.traverse h) a = (Functor.map Coe.coe ∘ Traversable.traverse h) b) ∘
ofList) <$>
Traversable.traverse g a✝)
[PROOFSTEP]
simp only [Function.comp, lift_coe]
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → G β
h : β → γ
x : Multiset α
⊢ Functor.map h <$> traverse g x = traverse (Functor.map h ∘ g) x
[PROOFSTEP]
refine' Quotient.inductionOn x _
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → G β
h : β → γ
x : Multiset α
⊢ ∀ (a : List α),
Functor.map h <$> traverse g (Quotient.mk (isSetoid α) a) =
traverse (Functor.map h ∘ g) (Quotient.mk (isSetoid α) a)
[PROOFSTEP]
intro
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → G β
h : β → γ
x : Multiset α
a✝ : List α
⊢ Functor.map h <$> traverse g (Quotient.mk (isSetoid α) a✝) =
traverse (Functor.map h ∘ g) (Quotient.mk (isSetoid α) a✝)
[PROOFSTEP]
simp only [traverse, quot_mk_to_coe, lift_coe, Function.comp_apply, Functor.map_map, map_comp_coe]
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → G β
h : β → γ
x : Multiset α
a✝ : List α
⊢ (Coe.coe ∘ Functor.map h) <$> Traversable.traverse g a✝ = Coe.coe <$> Traversable.traverse (Functor.map h ∘ g) a✝
[PROOFSTEP]
rw [LawfulFunctor.comp_map, Traversable.map_traverse']
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → G β
h : β → γ
x : Multiset α
a✝ : List α
⊢ Coe.coe <$> Functor.map h <$> Traversable.traverse g a✝ =
Coe.coe <$> (Functor.map (Functor.map h) ∘ Traversable.traverse g) a✝
[PROOFSTEP]
rfl
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → β
h : β → G γ
x : Multiset α
⊢ traverse h (map g x) = traverse (h ∘ g) x
[PROOFSTEP]
refine' Quotient.inductionOn x _
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → β
h : β → G γ
x : Multiset α
⊢ ∀ (a : List α), traverse h (map g (Quotient.mk (isSetoid α) a)) = traverse (h ∘ g) (Quotient.mk (isSetoid α) a)
[PROOFSTEP]
intro
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → β
h : β → G γ
x : Multiset α
a✝ : List α
⊢ traverse h (map g (Quotient.mk (isSetoid α) a✝)) = traverse (h ∘ g) (Quotient.mk (isSetoid α) a✝)
[PROOFSTEP]
simp only [traverse, quot_mk_to_coe, coe_map, lift_coe, Function.comp_apply]
[GOAL]
F : Type u → Type u
inst✝³ : Applicative F
inst✝² : CommApplicative F
α' β' : Type u
f : α' → F β'
G : Type u_1 → Type u_1
inst✝¹ : Applicative G
inst✝ : CommApplicative G
α β γ : Type u_1
g : α → β
h : β → G γ
x : Multiset α
a✝ : List α
⊢ Coe.coe <$> Traversable.traverse h (List.map g a✝) = Coe.coe <$> Traversable.traverse (h ∘ g) a✝
[PROOFSTEP]
rw [← Traversable.traverse_map h g, List.map_eq_map]
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f✝ : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
eta : ApplicativeTransformation G H
α β : Type u_1
f : α → G β
x : Multiset α
⊢ (fun {α} => ApplicativeTransformation.app eta α) (traverse f x) =
traverse ((fun {α} => ApplicativeTransformation.app eta α) ∘ f) x
[PROOFSTEP]
refine' Quotient.inductionOn x _
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f✝ : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
eta : ApplicativeTransformation G H
α β : Type u_1
f : α → G β
x : Multiset α
⊢ ∀ (a : List α),
(fun {α} => ApplicativeTransformation.app eta α) (traverse f (Quotient.mk (isSetoid α) a)) =
traverse ((fun {α} => ApplicativeTransformation.app eta α) ∘ f) (Quotient.mk (isSetoid α) a)
[PROOFSTEP]
intro
[GOAL]
F : Type u → Type u
inst✝⁵ : Applicative F
inst✝⁴ : CommApplicative F
α' β' : Type u
f✝ : α' → F β'
G H : Type u_1 → Type u_1
inst✝³ : Applicative G
inst✝² : Applicative H
inst✝¹ : CommApplicative G
inst✝ : CommApplicative H
eta : ApplicativeTransformation G H
α β : Type u_1
f : α → G β
x : Multiset α
a✝ : List α
⊢ (fun {α} => ApplicativeTransformation.app eta α) (traverse f (Quotient.mk (isSetoid α) a✝)) =
traverse ((fun {α} => ApplicativeTransformation.app eta α) ∘ f) (Quotient.mk (isSetoid α) a✝)
[PROOFSTEP]
simp only [quot_mk_to_coe, traverse, lift_coe, Function.comp_apply, ApplicativeTransformation.preserves_map,
LawfulTraversable.naturality]
|
With Celebrate Express, you don't have to be a party animal to know how to get great party supplies and costumes at fantastic prices. Use our promotional keycode and you will have access to products from three party brands: Birthday Express (which offers over 150 different themed children's party supplies), 1st Wishes(which features over 30 different themes for 1st birthday parties), and Costume Express (which provides a wide array of costumes for Halloween and costume parties). Celebrate Express is also able to design and manufacture their own party products so that party planners and partgoers get a personalized unique experience. Go on, use our promotional code today at Celebrate Express and channel your inner party planner.
Click here to visit the Celebrate Express website.
HSN.com brings fashion, beauty, home goods, kitchen, and more to the shopper in search of both quality and value.
Target.com is the online store for Target, one of the largest and most successful discount retail stores. |
Formal statement is: lemma measure_null_measure[simp]: "measure (null_measure M) X = 0" Informal statement is: The measure of any set with respect to the null measure is zero. |
###VISUALISE MODEL OUTPUTS###
library(tidyverse)
library(ggpubr)
###############################################################################
folder <- dirname(rstudioapi::getSourceEditorContext()$path)
filename = 'regional_market_results_technology_options.csv'
data <- read.csv(file.path(folder, '..', 'results', 'model_results', filename))
names(data)[names(data) == 'GID_0'] <- 'country'
data$scenario_adopt = ''
data$scenario_adopt[grep("low", data$scenario)] = 'Low (2% Adoption Growth)'
data$scenario_adopt[grep("baseline", data$scenario)] = 'Baseline (3% Adoption Growth)'
data$scenario_adopt[grep("high", data$scenario)] = 'High (4% Adoption Growth)'
data$scenario_capacity = ''
data$scenario_capacity[grep("5_5_5", data$scenario)] = '5 Mbps Per User'
data$scenario_capacity[grep("10_10_10", data$scenario)] = '10 Mbps Per User'
data$scenario_capacity[grep("20_20_20", data$scenario)] = '20 Mbps Per User'
data$strategy_short = ''
data$strategy_short[grep("3G_umts_fiber", data$strategy)] = '3G (F)'
data$strategy_short[grep("3G_umts_wireless", data$strategy)] = '3G (W)'
data$strategy_short[grep("4G_epc_fiber", data$strategy)] = '4G (F)'
data$strategy_short[grep("4G_epc_wireless", data$strategy)] = '4G (W)'
data$strategy_short[grep("5G_nsa_fiber", data$strategy)] = '5G (F)'
data$strategy_short[grep("5G_nsa_wireless", data$strategy)] = '5G (W)'
data$strategy_short = factor(data$strategy_short, levels=c(
"3G (F)",
"4G (F)",
'5G (F)',
"3G (W)",
"4G (W)",
'5G (W)'
))
data = data %>% filter(data$strategy_short == '4G (W)')
data$scenario_capacity = factor(data$scenario_capacity,
levels=c("20 Mbps Per User",
"10 Mbps Per User",
"5 Mbps Per User"))
data$scenario_adopt = factor(data$scenario_adopt,
levels=c("Low (2% Adoption Growth)",
"Baseline (3% Adoption Growth)",
"High (4% Adoption Growth)"))
data <- data[(data$confidence == 50),]
data <- select(data, decile, scenario_adopt, scenario_capacity, strategy_short,
private_cost_per_network_user, government_cost_per_network_user)
data <- gather(data, metric, value, private_cost_per_network_user:
government_cost_per_network_user)
data$metric = factor(data$metric,
levels=c("private_cost_per_network_user",
"government_cost_per_network_user"),
labels=c("Private Cost",
"Government Cost"))
min_value = min(round(data$value))
max_value = max(round(data$value))
data_summary <- function(data, varname, groupnames){
require(plyr)
summary_func <- function(x, col){
c(mean = mean(x[[col]], na.rm=TRUE),
sd = sd(x[[col]], na.rm=TRUE))
}
data_sum<-ddply(data, groupnames, .fun=summary_func,
varname)
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
df2 <- data_summary(data, varname="value",
groupnames=c('decile', 'metric', 'scenario_adopt',
'scenario_capacity', 'strategy_short'))
ggplot(df2, aes(x=decile, y=value, fill=metric)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
geom_errorbar(aes(ymin=value, ymax=value+sd), width=.2,
position=position_dodge(9)) +
scale_fill_manual(values=c("#E1BE6A", "#40B0A6"), name=NULL) +
theme(legend.position = "bottom",
axis.text.x = element_text(angle = 45, hjust=1)
) +
labs(title = "Per User Social Cost of Universal Broadband by Technology for The Gambia",
colour=NULL,
subtitle = "Reported for 4G (W) using error bars representing one standard deviation",
x = NULL, y = "Cost Per User ($USD)") +
scale_x_continuous(expand = c(0, 0), breaks = seq(0,100, 10)) +
scale_y_continuous(expand = c(0, 0), limits=c(min_value+100, max_value-100)) +
theme(panel.spacing = unit(0.6, "lines")) +
guides(fill=guide_legend(ncol=3, reverse = TRUE)) +
facet_grid(scenario_capacity~scenario_adopt)
path = file.path(folder, 'figures', 'social_cost_per_user_by_strategy.png')
ggsave(path, units="in", width=8, height=8, dpi=300)
dev.off()
###############################################################################
folder <- dirname(rstudioapi::getSourceEditorContext()$path)
filename = 'regional_market_results_technology_options.csv'
data <- read.csv(file.path(folder, '..', 'results', 'model_results', filename))
names(data)[names(data) == 'GID_0'] <- 'country'
data$scenario_adopt = ''
data$scenario_adopt[grep("low", data$scenario)] = 'Low (2% Adoption Growth)'
data$scenario_adopt[grep("baseline", data$scenario)] = 'Baseline (3% Adoption Growth)'
data$scenario_adopt[grep("high", data$scenario)] = 'High (4% Adoption Growth)'
data$scenario_capacity = ''
data$scenario_capacity[grep("5_5_5", data$scenario)] = '5 Mbps Per User'
data$scenario_capacity[grep("10_10_10", data$scenario)] = '10 Mbps Per User'
data$scenario_capacity[grep("20_20_20", data$scenario)] = '20 Mbps Per User'
data$strategy_short = ''
data$strategy_short[grep("3G_umts_fiber", data$strategy)] = '3G (F)'
data$strategy_short[grep("3G_umts_wireless", data$strategy)] = '3G (W)'
data$strategy_short[grep("4G_epc_fiber", data$strategy)] = '4G (F)'
data$strategy_short[grep("4G_epc_wireless", data$strategy)] = '4G (W)'
data$strategy_short[grep("5G_nsa_fiber", data$strategy)] = '5G (F)'
data$strategy_short[grep("5G_nsa_wireless", data$strategy)] = '5G (W)'
data$strategy_short = factor(data$strategy_short, levels=c(
"3G (F)",
"4G (F)",
'5G (F)',
"3G (W)",
"4G (W)",
'5G (W)'
))
data = data %>% filter(data$strategy_short == '4G (W)')
data$scenario_capacity = factor(data$scenario_capacity,
levels=c("20 Mbps Per User",
"10 Mbps Per User",
"5 Mbps Per User"))
data$scenario_adopt = factor(data$scenario_adopt,
levels=c("Low (2% Adoption Growth)",
"Baseline (3% Adoption Growth)",
"High (4% Adoption Growth)"))
data <- data[(data$confidence == 50),]
data <- select(data, decile, scenario_adopt, scenario_capacity, strategy_short,
private_cost_per_smartphone_user,
government_cost_per_smartphone_user)
data <- gather(data, metric, value, private_cost_per_smartphone_user:
government_cost_per_smartphone_user)
data$metric = factor(data$metric,
levels=c("private_cost_per_smartphone_user",
"government_cost_per_smartphone_user"),
labels=c("Private Cost",
"Government Cost"))
min_value = min(round(data$value))
max_value = max(round(data$value))
data_summary <- function(data, varname, groupnames){
require(plyr)
summary_func <- function(x, col){
c(mean = mean(x[[col]], na.rm=TRUE),
sd = sd(x[[col]], na.rm=TRUE))
}
data_sum<-ddply(data, groupnames, .fun=summary_func,
varname)
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
df2 <- data_summary(data, varname="value",
groupnames=c('decile', 'metric', 'scenario_adopt',
'scenario_capacity', 'strategy_short'))
ggplot(df2, aes(x=decile, y=value, fill=metric)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
geom_errorbar(aes(ymin=value, ymax=value+sd), width=.2,
position=position_dodge(9)) +
scale_fill_manual(values=c("#E1BE6A", "#40B0A6"), name=NULL) +
theme(legend.position = "bottom",
axis.text.x = element_text(angle = 45, hjust=1)
) +
labs(title = "Smartphone User Social Cost of Universal Broadband by Technology for The Gambia",
colour=NULL,
subtitle = "Reported for 4G (W) using error bars representing one standard deviation",
x = NULL, y = "Cost Per Smartphone ($USD)") +
scale_x_continuous(expand = c(0, 0), breaks = seq(0,100, 10)) +
scale_y_continuous(expand = c(0, 0), limits=c(min_value+100, max_value)) +
theme(panel.spacing = unit(0.6, "lines")) +
guides(fill=guide_legend(ncol=3, reverse = TRUE)) +
facet_grid(scenario_capacity~scenario_adopt)
path = file.path(folder, 'figures', 'social_cost_per_smartphone_user_by_strategy.png')
ggsave(path, units="in", width=8, height=8, dpi=300)
dev.off()
|
-- Christian Sattler, 2013-12-31
-- Testing eta-expansion of bound record metavars, as implemented by Andreas.
module Issue376-2 where
{- A simple example. -}
module example-0 {A B : Set} where
record Prod : Set where
constructor _,_
field
fst : A
snd : B
module _ (F : (Prod → Set) → Set) where
q : (∀ f → F f) → (∀ f → F f)
q h _ = h (λ {(a , b) → _})
{- A more complex, real-life-based example: the dependent
generalization of (A × B) × (C × D) ≃ (A × C) × (B × D). -}
module example-1 where
record Σ (A : Set) (B : A → Set) : Set where
constructor _,_
field
fst : A
snd : B fst
postulate
_≃_ : (A B : Set) → Set
Σ-interchange-Type =
(A : Set) (B C : A → Set) (D : (a : A) → B a → C a → Set)
→ Σ (Σ A B) (λ {(a , b) → Σ (C a) (λ c → D a b c)})
≃ Σ (Σ A C) (λ {(a , c) → Σ (B a) (λ b → D a b c)})
postulate
Σ-interchange : Σ-interchange-Type
{- Can the implicit arguments to Σ-interchange
be inferred from the global type? -}
Σ-interchange' : Σ-interchange-Type
Σ-interchange' A B C D = Σ-interchange _ _ _ _
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.