text
stringlengths 0
3.34M
|
---|
function [MHz] = GHz2MHz(GHz)
% Convert frequency from gigahertz to megahertz.
% Chad A. Greene 2012
MHz = GHz*1e+3; |
[STATEMENT]
lemma llrg_linear_sys:
"llrg \<R> \<Longrightarrow> linear_sys \<R>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. llrg \<R> \<Longrightarrow> linear_sys \<R>
[PROOF STEP]
by (auto simp: llrg_def) |
Address(Oceano Way) is a residential street in the Lake Alhambra of East Davis.
Intersecting Streets
Alhambra Drive and across the intersection 5th Street
Gaviota Place
Los Cerros Place
Del Valle Place
Ascada Place
Concha Place
Halcon Place
Arena Drive
|
import algebra.homology.homology
import category_theory.abelian.homology
import for_mathlib.commsq
import for_mathlib.exact_lift_desc
/-!
# `has_homology f g H`
If `A B C H` are objects of an abelian category, if `f : A ⟶ B` and if `g : B ⟶ C`, then
a term of type `has_homology f g H` can be thought of as the claim that `H` "is" the
homology of the complex `A ⟶ B ⟶ C`, or, more precisely, as an isomorphism between `H`
and the homology of this complex.
-/
noncomputable theory
universes v u
open category_theory category_theory.limits
variables {𝓐 : Type u} [category.{v} 𝓐] [abelian 𝓐]
variables {A B C : 𝓐} {f : A ⟶ B} {g : B ⟶ C} {H : 𝓐}
/-- If `f : A ⟶ B` and `g : B ⟶ C` are morphisms in an abelian category, then `has_homology f g H`
is the claim that `f ≫ g = 0` and furthermore an identification of `H` with the middle homology of
the corresponding three term exact sequence formed by `f` and `g`. -/
structure has_homology (f : A ⟶ B) (g : B ⟶ C) (H : 𝓐) :=
(w : f ≫ g = 0)
(π : kernel g ⟶ H)
(ι : H ⟶ cokernel f)
(π_ι : π ≫ ι = kernel.ι _ ≫ cokernel.π _)
(ex_π : exact (kernel.lift g f w) π)
(ι_ex : exact ι (cokernel.desc f g w))
[epi_π : epi π]
[mono_ι : mono ι]
-- move me
instance (f : A ⟶ B) (g : B ⟶ C) (w : f ≫ g = 0) : epi (homology.π' f g w) := epi_comp _ _
-- move me
instance (f : A ⟶ B) (g : B ⟶ C) (w : f ≫ g = 0) : mono (homology.ι f g w) := mono_comp _ _
/-- If `f ≫ g = 0` then `homology f g w` can be identified with the homology of the three
term exact sequence coming from `f` and `g`. -/
def homology.has (f : A ⟶ B) (g : B ⟶ C) (w : f ≫ g = 0) :
has_homology f g (homology f g w) :=
{ w := w,
π := homology.π' f g w,
ι := homology.ι f g w,
π_ι := homology.π'_ι _ _ _,
ex_π := begin
delta homology.π',
rw exact_comp_iso,
exact abelian.exact_cokernel _
end,
ι_ex := begin
delta homology.ι,
rw exact_iso_comp,
exact exact_kernel_ι
end,
epi_π := by apply_instance,
mono_ι := by apply_instance }
lemma homology.has_π {f : A ⟶ B} {g : B ⟶ C} (w : f ≫ g = 0) :
(homology.has f g w).π = homology.π' f g w := rfl
lemma homology.has_ι {f : A ⟶ B} {g : B ⟶ C} (w : f ≫ g = 0) :
(homology.has f g w).ι = homology.ι f g w := rfl
namespace has_homology
attribute [instance] epi_π mono_ι
attribute [reassoc] π_ι
section misc
@[simp, reassoc] lemma ι_desc (hH : has_homology f g H) : hH.ι ≫ cokernel.desc f g hH.w = 0 :=
hH.ι_ex.w
@[simp, reassoc] lemma lift_π (hH : has_homology f g H) : kernel.lift g f hH.w ≫ hH.π = 0 :=
hH.ex_π.w
def of_iso {H₁ H₂ : 𝓐} (hH : has_homology f g H₁) (i : H₁ ≅ H₂) : has_homology f g H₂ :=
{ w := hH.w,
π := hH.π ≫ i.hom,
ι := i.inv ≫ hH.ι,
π_ι := by simp [hH.π_ι],
ex_π := exact_comp_iso.2 hH.ex_π,
ι_ex := exact_iso_comp.2 hH.ι_ex,
epi_π := epi_comp _ _,
mono_ι := mono_comp _ _ }
end misc
section degenerate
-- move this; I couldn't find it
lemma exact_iso_comp_snd_iff_exact_comp_iso_fst_iff {D : 𝓐} (f : A ⟶ B) {e : B ⟶ C} (g : C ⟶ D)
[is_iso e] : exact f (e ≫ g) ↔ exact (f ≫ e) g :=
⟨preadditive.exact_of_iso_of_exact' f (e ≫ g) (f ≫ e) g (iso.refl A) (as_iso e) (iso.refl D)
(by simp) (by simp), preadditive.exact_of_iso_of_exact' (f ≫ e) g f (e ≫ g) (iso.refl A)
(as_iso e).symm (iso.refl D) (by simp) (by simp)⟩
-- move this; I couldn't find it
lemma exact_zero_right_of_epi [epi f] : exact f (0 : B ⟶ C) :=
⟨comp_zero, image_to_kernel_epi_of_epi_of_zero f⟩
local attribute [instance] epi_comp --`mono_comp` is a global instance!
def fst_eq_zero : has_homology (0 : A ⟶ B) g (kernel g) :=
{ w := zero_comp,
π := 𝟙 _,
ι := kernel.ι g ≫ cokernel.π 0,
π_ι := by simp,
ex_π := begin
rw kernel.lift_zero,
exact exact_zero_left_of_mono A,
end,
ι_ex := begin
rw [← exact_iso_comp_snd_iff_exact_comp_iso_fst_iff, cokernel.π_desc],
exact exact_kernel_ι,
end,
epi_π := infer_instance,
mono_ι := infer_instance }
def snd_eq_zero : has_homology f (0 : B ⟶ C) (cokernel f) :=
{ w := comp_zero,
π := kernel.ι 0 ≫ cokernel.π f,
ι := 𝟙 _,
π_ι := by simp,
ex_π := begin
rw [exact_iso_comp_snd_iff_exact_comp_iso_fst_iff, kernel.lift_ι],
exact abelian.exact_cokernel f,
end,
ι_ex := begin
rw [cokernel.desc_zero],
exact exact_zero_right_of_epi,
end,
epi_π := infer_instance,
mono_ι := infer_instance }
def snd_eq_zero' (hg : g = 0) : has_homology f g (cokernel f) :=
{ w := hg.symm ▸ comp_zero,
π := kernel.ι g ≫ cokernel.π f,
ι := 𝟙 _,
π_ι := by simp,
ex_π := begin
subst hg,
simp [exact_iso_comp_snd_iff_exact_comp_iso_fst_iff, kernel.lift_ι],
exact abelian.exact_cokernel f,
end,
ι_ex := begin
subst hg,
rw [cokernel.desc_zero],
exact exact_zero_right_of_epi,
end,
epi_π := by subst hg; apply_instance,
mono_ι := infer_instance }
def fst_snd_eq_zero : has_homology (0 : A ⟶ B) (0 : B ⟶ C) B :=
{ w := comp_zero,
π := kernel.ι 0,
ι := cokernel.π 0,
π_ι := rfl,
ex_π := begin
rw kernel.lift_zero,
exact exact_zero_left_of_mono A,
end,
ι_ex := begin
rw cokernel.desc_zero,
exact exact_zero_right_of_epi,
end,
epi_π := infer_instance,
mono_ι := infer_instance }
def fst_snd_eq_zero' (hf : f = 0) (hg : g = 0) : has_homology f g B :=
{ w := hf.symm ▸ zero_comp,
π := kernel.ι g,
ι := cokernel.π f,
π_ι := rfl,
ex_π := begin
subst hf,
rw kernel.lift_zero,
exact exact_zero_left_of_mono A,
end,
ι_ex := begin
subst hg,
rw cokernel.desc_zero,
exact exact_zero_right_of_epi,
end,
epi_π := by subst hg; apply_instance,
mono_ι := by subst hf; apply_instance }
end degenerate
section ext
lemma ext_π (hH : has_homology f g H) {X : 𝓐} (φ ψ : H ⟶ X) (h : hH.π ≫ φ = hH.π ≫ ψ) : φ = ψ :=
by rwa cancel_epi at h
lemma ext_ι (hH : has_homology f g H) {X : 𝓐} (φ ψ : X ⟶ H) (h : φ ≫ hH.ι = ψ ≫ hH.ι) : φ = ψ :=
by rwa cancel_mono at h
end ext
section lift
variables (hH : has_homology f g H)
variables {X : 𝓐} (φ : X ⟶ cokernel f) (hφ : φ ≫ cokernel.desc f g hH.w = 0)
/-- If ``has_homology f g H` and `φ : X ⟶ cokernel f` composes to zero with the canonical
map `cokernel f ⟶ C` then `has_homology.lift φ` is the morphism `X ⟶ H` which recovers `φ` after
composing with the canonical map `H ⟶ cokernel f` (the statement that the triangle commutes
is `lift_comp_ι`). -/
def lift : X ⟶ H := hH.ι_ex.mono_lift φ hφ
@[simp, reassoc] lemma lift_comp_ι : hH.lift φ hφ ≫ hH.ι = φ := hH.ι_ex.mono_lift_comp φ hφ
lemma lift_unique (e : X ⟶ H) (he : e ≫ hH.ι = φ) : e = hH.lift φ hφ :=
hH.ι_ex.mono_lift_unique _ _ e he
@[simp] lemma lift_ι : hH.lift hH.ι hH.ι_desc = 𝟙 H :=
(hH.lift_unique _ _ _ $ category.id_comp _).symm
lemma π_eq_lift : hH.π = hH.lift (kernel.ι _ ≫ cokernel.π _)
(by simp only [category.assoc, cokernel.π_desc, kernel.condition]) :=
lift_unique _ _ _ _ hH.π_ι
@[reassoc] lemma comp_lift {X Y : 𝓐} (φ : X ⟶ Y) (ψ : Y ⟶ cokernel f)
(hψ : ψ ≫ cokernel.desc f g hH.w = 0) : φ ≫ hH.lift ψ hψ = hH.lift (φ ≫ ψ)
(by rw [category.assoc, hψ, comp_zero]) :=
by { apply lift_unique, rw [category.assoc, lift_comp_ι] }
lemma homology_lift_eq {X Y Z W : 𝓐} (f : X ⟶ Y) (g : Y ⟶ Z) (w : f ≫ g = 0)
(φ : W ⟶ cokernel f) (hφ) :
homology.lift f g w φ hφ = (homology.has f g w).lift φ hφ :=
begin
ext,
simp only [homology.lift_ι],
dsimp [has_homology.lift],
erw [exact.mono_lift_comp],
end
end lift
section desc
variables (hH : has_homology f g H)
variables {X : 𝓐} (φ : kernel g ⟶ X) (hφ : kernel.lift g f hH.w ≫ φ = 0)
/-- If `has_homology f g H` and `φ : kernel g ⟶ X` becomes zero when precomposed with
the canonical map from `A` to `kernel g`, then `has_homology.desc φ` is the morphism `H ⟶ X` which
recovers `φ` after composing with the canonical map `kernel g ⟶ H`. The proof that this
triangle commutes is `π_comp_desc`. -/
def desc : H ⟶ X := hH.ex_π.epi_desc φ hφ
@[simp, reassoc] lemma π_comp_desc : hH.π ≫ hH.desc φ hφ = φ := hH.ex_π.comp_epi_desc φ hφ
lemma desc_unique (e : H ⟶ X) (he : hH.π ≫ e = φ) : e = hH.desc φ hφ :=
hH.ex_π.epi_desc_unique _ _ e he
@[simp] lemma desc_π : hH.desc hH.π hH.lift_π = 𝟙 H :=
(hH.desc_unique _ _ _ $ category.comp_id _).symm
lemma ι_eq_desc : hH.ι =
hH.desc (kernel.ι _ ≫ cokernel.π _) (by simp only [kernel.lift_ι_assoc, cokernel.condition]) :=
desc_unique _ _ _ _ hH.π_ι
@[reassoc] lemma desc_comp {X Y : 𝓐} (φ : kernel g ⟶ X) (ψ : X ⟶ Y) (hφ : kernel.lift g f hH.w ≫ φ = 0) :
hH.desc φ hφ ≫ ψ = hH.desc (φ ≫ ψ) (by rw [reassoc_of hφ, zero_comp]) :=
by { apply desc_unique, rw [π_comp_desc_assoc] }
lemma homology_desc_eq {X Y Z W : 𝓐} (f : X ⟶ Y) (g : Y ⟶ Z) (w)
(φ : kernel g ⟶ W) (hφ) :
homology.desc' f g w φ hφ = (homology.has f g w).desc φ hφ :=
begin
ext,
simp only [homology.π'_desc'],
dsimp [has_homology.desc],
simp only [exact.comp_epi_desc],
end
end desc
section map
variables {A₁ B₁ C₁ H₁ A₂ B₂ C₂ H₂ A₃ B₃ C₃ H₃ : 𝓐}
variables {f₁ : A₁ ⟶ B₁} {g₁ : B₁ ⟶ C₁} (h₁ : has_homology f₁ g₁ H₁)
variables {f₂ : A₂ ⟶ B₂} {g₂ : B₂ ⟶ C₂} (h₂ : has_homology f₂ g₂ H₂)
variables {f₃ : A₃ ⟶ B₃} {g₃ : B₃ ⟶ C₃} (h₃ : has_homology f₃ g₃ H₃)
variables {α : A₁ ⟶ A₂} {β : B₁ ⟶ B₂} {γ : C₁ ⟶ C₂}
variables {α' : A₂ ⟶ A₃} {β' : B₂ ⟶ B₃} {γ' : C₂ ⟶ C₃}
variables (sq1 : commsq f₁ α β f₂) (sq2 : commsq g₁ β γ g₂)
variables (sq1' : commsq f₂ α' β' f₃) (sq2' : commsq g₂ β' γ' g₃)
include h₁ h₂ sq1 sq2
/-- If `h₁ : has_homology f₁ g₁ H₁` and `h₂ : has_homology f₂ g₂ H₂` then given compatible morphisms
`f₁ ⟶ g₁` and `f₂ ⟶ g₂`, `has_homology.map h₁ h₂` is the induced morphism `H₁ ⟶ H₂`. -/
def map : H₁ ⟶ H₂ :=
h₁.desc (h₂.lift (kernel.ι _ ≫ β ≫ cokernel.π _) $
by simp only [category.assoc, cokernel.π_desc, ← sq2.w, kernel.condition_assoc, zero_comp]) $
begin
apply h₂.ext_ι,
simp only [category.assoc, zero_comp, h₂.lift_comp_ι, kernel.lift_ι_assoc, sq1.w_assoc,
cokernel.condition, comp_zero],
end
omit h₁ h₂ sq1 sq2
@[simp, reassoc] lemma π_map :
h₁.π ≫ h₁.map h₂ sq1 sq2 = (h₂.lift (kernel.ι _ ≫ β ≫ cokernel.π _) $
by simp only [category.assoc, cokernel.π_desc, ← sq2.w, kernel.condition_assoc, zero_comp]) :=
h₁.π_comp_desc _ _
@[simp, reassoc] lemma map_ι :
h₁.map h₂ sq1 sq2 ≫ h₂.ι = (h₁.desc (kernel.ι _ ≫ β ≫ cokernel.π _) $
by simp only [kernel.lift_ι_assoc, sq1.w_assoc, cokernel.condition, comp_zero]) :=
by { apply h₁.desc_unique, rw [h₁.π_map_assoc, h₂.lift_comp_ι] }
lemma π_map_ι : h₁.π ≫ h₁.map h₂ sq1 sq2 ≫ h₂.ι = kernel.ι _ ≫ β ≫ cokernel.π _ := by simp
lemma homology_map_eq (w₁ : f₁ ≫ g₁ = 0) (w₂ : f₂ ≫ g₂ = 0)
(e₁ : α ≫ (arrow.mk f₂).hom = (arrow.mk f₁).hom ≫ β)
(e₂ : β ≫ (arrow.mk g₂).hom = (arrow.mk g₁).hom ≫ γ) :
homology.map w₁ w₂ (arrow.hom_mk e₁) (arrow.hom_mk e₂) rfl =
(homology.has f₁ g₁ w₁).map (homology.has f₂ g₂ w₂)
(commsq.of_eq e₁.symm) (commsq.of_eq e₂.symm) :=
begin
--- I don't think using `exact.epi_desc` and `exact.mono_desc` is a good choice...
rw homology.map_eq_desc'_lift_left,
apply (homology.has _ _ w₁).ext_π,
apply (homology.has _ _ w₂).ext_ι,
simp [homology_lift_eq, homology_desc_eq],
end
lemma homology_map_eq' (w₁ : f₁ ≫ g₁ = 0) (w₂ : f₂ ≫ g₂ = 0) :
homology.map w₁ w₂ ⟨α, β, sq1.w.symm⟩ ⟨β, γ, sq2.w.symm⟩ rfl =
(homology.has f₁ g₁ w₁).map (homology.has f₂ g₂ w₂) sq1 sq2 :=
homology_map_eq _ _ _ _
lemma eq_map_of_π_map_ι (φ : H₁ ⟶ H₂) (hφ : h₁.π ≫ φ ≫ h₂.ι = kernel.ι g₁ ≫ β ≫ cokernel.π f₂) :
φ = h₁.map h₂ sq1 sq2 :=
by rwa [← π_map_ι h₁ h₂ sq1 sq2, cancel_epi, cancel_mono] at hφ
@[simp, reassoc] lemma lift_map
{X : 𝓐} (φ : X ⟶ cokernel f₁) (hφ : φ ≫ cokernel.desc f₁ g₁ h₁.w = 0) :
h₁.lift φ hφ ≫ h₁.map h₂ sq1 sq2 = h₂.lift (φ ≫ cokernel.map f₁ f₂ α β sq1.w)
(by { rw [category.assoc, cokernel.map_desc, reassoc_of hφ, zero_comp], exact sq2.w }) :=
begin
apply lift_unique, rw [category.assoc, map_ι],
conv_rhs { rw [← lift_comp_ι h₁ φ hφ, category.assoc] },
congr' 1,
apply h₁.ext_π,
rw [π_comp_desc, π_ι_assoc, cokernel.π_desc],
end
-- move this
attribute [reassoc] limits.kernel.lift_map
@[simp, reassoc] lemma map_desc
{X : 𝓐} (φ : kernel g₂ ⟶ X) (hφ : kernel.lift g₂ f₂ h₂.w ≫ φ = 0) :
h₁.map h₂ sq1 sq2 ≫ h₂.desc φ hφ = h₁.desc (kernel.map g₁ g₂ β γ sq2.w ≫ φ)
(by { rw [category_theory.limits.kernel.lift_map_assoc, hφ, comp_zero], exact sq1.w }) :=
begin
apply desc_unique, rw [π_map_assoc],
conv_rhs { rw [← π_comp_desc h₂ φ hφ, ← category.assoc] },
congr' 1,
apply h₂.ext_ι,
rw [lift_comp_ι, category.assoc, π_ι, kernel.lift_ι_assoc, category.assoc],
end
/-- Gluing two commutative squares "vertically" (the convention is that `f`s and `g`s are
horizontal morphisms, and `α`s and `β`s are vertical morphisms). -/
def _root_.commsq.vcomp : commsq f₁ (α ≫ α') (β ≫ β') f₃ :=
commsq.of_eq $
calc f₁ ≫ β ≫ β' = α ≫ f₂ ≫ β' : sq1.w_assoc β'
... = α ≫ α' ≫ f₃ : congr_arg _ $ sq1'.w
... = (α ≫ α') ≫ f₃ : (category.assoc _ _ _).symm
/-- A commutative square with identity isomorphisms for the two vertical maps. -/
def _root_.commsq.vrefl (f : A ⟶ B) : commsq f (iso.refl _).hom (iso.refl _).hom f :=
commsq.of_eq $ by rw [iso.refl_hom, iso.refl_hom, category.id_comp, category.comp_id]
/-- The reflection of a vertical square with isomorphisms for the vertical maps. -/
def _root_.commsq.vinv {α : A₁ ≅ A₂} {β : B₁ ≅ B₂} (sq1 : commsq f₁ α.hom β.hom f₂) :
commsq f₂ α.inv β.inv f₁ :=
commsq.of_eq $ by rw [iso.comp_inv_eq, category.assoc, iso.eq_inv_comp, sq1.w]
lemma map_comp_map :
h₁.map h₂ sq1 sq2 ≫ h₂.map h₃ sq1' sq2' = h₁.map h₃ (sq1.vcomp sq1') (sq2.vcomp sq2') :=
begin
apply h₁.ext_π, apply h₃.ext_ι,
simp only [category.assoc, map_ι, map_desc, π_comp_desc, kernel.lift_ι_assoc],
end
lemma map_id (h : has_homology f g H) {α : A ⟶ A} {β : B ⟶ B} {γ : C ⟶ C}
(sq1 : commsq f α β f) (sq2 : commsq g β γ g) (hβ : β = 𝟙 _) :
h.map h sq1 sq2 = 𝟙 H :=
begin
apply h.ext_π, apply h.ext_ι,
rw [π_map, lift_comp_ι, category.comp_id, π_ι, hβ, category.id_comp],
end
/- The isomorphism on `has_homology` induced by isomorphisms `f₁ ≅ f₂` and `g₁ ≅ g₂`. -/
@[simps] def map_iso {α : A₁ ≅ A₂} {β : B₁ ≅ B₂} {γ : C₁ ≅ C₂}
(sq1 : commsq f₁ α.hom β.hom f₂) (sq2 : commsq g₁ β.hom γ.hom g₂) :
H₁ ≅ H₂ :=
{ hom := h₁.map h₂ sq1 sq2,
inv := h₂.map h₁ sq1.vinv sq2.vinv,
hom_inv_id' := by { rw [map_comp_map, map_id], exact β.hom_inv_id },
inv_hom_id' := by { rw [map_comp_map, map_id], exact β.inv_hom_id } }
/- The canonical isomorphism between H₁ and H₂ if both satisfy `has_homology f g Hᵢ`. -/
abbreviation iso (h₁ : has_homology f g H₁) (h₂ : has_homology f g H₂) :
H₁ ≅ H₂ :=
map_iso h₁ h₂ (_root_.commsq.vrefl f) (_root_.commsq.vrefl g)
lemma iso_inv (h₁ : has_homology f g H₁) (h₂ : has_homology f g H₂) :
(iso h₁ h₂).inv = (iso h₂ h₁).hom := rfl
lemma π_iso (h₁ : has_homology f g H₁) (h₂ : has_homology f g H₂) :
h₁.π ≫ (h₁.iso h₂).hom = h₂.π :=
begin
simp only [iso.refl_hom, category.id_comp, map_iso_hom, π_map],
exact (π_eq_lift h₂).symm,
end
lemma iso_ι (h₁ : has_homology f g H₁) (h₂ : has_homology f g H₂) :
(h₁.iso h₂).hom ≫ h₂.ι = h₁.ι :=
begin
simp only [iso.refl_hom, category.id_comp, map_iso_hom, map_ι],
exact (ι_eq_desc h₁).symm,
end
lemma map_iso_homology_map :
has_homology.map h₁ h₂ sq1 sq2 = (has_homology.iso h₁ (homology.has f₁ g₁ h₁.w)).hom ≫
(homology.map h₁.w h₂.w ⟨α, β, sq1.w.symm⟩ ⟨β, γ, sq2.w.symm⟩ rfl) ≫
(has_homology.iso h₂ (homology.has f₂ g₂ h₂.w)).inv:=
begin
apply h₁.ext_π,
apply h₂.ext_ι,
simp [homology_map_eq'],
end
end map
section op
open opposite
def op (h : has_homology f g H) : has_homology g.op f.op (op H) :=
{ w := by rw [← op_comp, h.w, op_zero],
π := (kernel_op_op f).hom ≫ h.ι.op,
ι := h.π.op ≫ (cokernel_op_op g).inv,
π_ι := by {
simp only [kernel_op_op_hom, cokernel_op_op_inv, ← op_comp, category.assoc, h.π_ι_assoc,
kernel.lift_ι_assoc, cokernel.π_desc], refl, },
ex_π := begin
rw [← exact_comp_hom_inv_comp_iff (kernel_op_op f), iso.inv_hom_id_assoc, kernel_op_op_hom],
convert h.ι_ex.op using 1,
apply quiver.hom.unop_inj,
apply category_theory.limits.coequalizer.hom_ext,
erw [unop_comp, coequalizer.π_desc_assoc, coequalizer.π_desc],
rw [← unop_comp, kernel.lift_ι, g.unop_op],
end,
ι_ex := begin
rw [← exact_comp_hom_inv_comp_iff (cokernel_op_op g), category.assoc, iso.inv_hom_id,
category.comp_id, cokernel_op_op_inv],
convert h.ex_π.op using 1,
apply quiver.hom.unop_inj,
apply category_theory.limits.equalizer.hom_ext,
erw [unop_comp, equalizer.lift_ι, category.assoc, equalizer.lift_ι],
rw [← unop_comp, cokernel.π_desc, f.unop_op],
end,
epi_π := epi_comp _ _,
mono_ι := mono_comp _ _ }
-- @[simps]
def homology_unop_iso {A B C : 𝓐ᵒᵖ} (f : A ⟶ B) (g : B ⟶ C) (w : f ≫ g = 0) :
homology f g w ≅ opposite.op (homology g.unop f.unop (by { rw [← unop_comp, w, unop_zero] })) :=
(homology.has f g w).iso (homology.has g.unop f.unop _).op
def homology_op_iso {A B C : 𝓐} (f : A ⟶ B) (g : B ⟶ C) (w : f ≫ g = 0) :
homology g.op f.op (by rw [← op_comp, w, op_zero]) ≅ opposite.op (homology f g w) :=
homology_unop_iso _ _ _
end op
end has_homology
|
lemma reduced_labelling_nonzero: "j < n \<Longrightarrow> x j \<noteq> 0 \<Longrightarrow> reduced n x \<le> j" |
[STATEMENT]
lemma type_wf_put_I:
assumes "type_wf h"
assumes "DocumentClass.type_wf (put\<^sub>O\<^sub>b\<^sub>j\<^sub>e\<^sub>c\<^sub>t ptr obj h)"
assumes "is_shadow_root_ptr_kind ptr \<Longrightarrow> is_shadow_root_kind obj"
shows "type_wf (put\<^sub>O\<^sub>b\<^sub>j\<^sub>e\<^sub>c\<^sub>t ptr obj h)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ShadowRootClass.type_wf (put ptr obj h)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
ShadowRootClass.type_wf h
DocumentClass.type_wf (put ptr obj h)
is_shadow_root_ptr_kind ptr \<Longrightarrow> is_shadow_root_kind obj
goal (1 subgoal):
1. ShadowRootClass.type_wf (put ptr obj h)
[PROOF STEP]
by(auto simp add: type_wf_defs is_shadow_root_kind_def split: option.splits) |
function c = prod(a,dim)
%PROD Implements prod(a,dim) for gradients
%
% c = prod(a,dim)
%
% functionality as Matlab function prod for matrices, parameter dim optional
%
% written 10/16/98 S.M. Rump
% modified 04/04/04 S.M. Rump set round to nearest for safety
% modified 04/06/05 S.M. Rump rounding unchanged
% modified 09/28/08 S.M. Rump check for rounding to nearest improved
%
e = 1e-30;
if 1+e==1-e % fast check for rounding to nearest
rndold = 0;
else
rndold = getround;
setround(0)
end
[m n] = size(a);
if nargin==1,
if m==1
dim=2;
else
dim=1;
end
end
if dim==1
c = ones(1,n);
for i=1:m
%VVVV c = c .* a(i,:);
s.type = '()'; s.subs = {i,':'}; c = c .* subsref(a,s);
%AAAA Matlab V5.2 bug fix
end
else
c = ones(m,1);
for i=1:n
%VVVV c = c .* a(:,i);
s.type = '()'; s.subs = {':',i}; c = c .* subsref(a,s);
%AAAA Matlab V5.2 bug fix
end
end
if rndold
setround(rndold)
end
|
[STATEMENT]
lemma wt_FVar:
"\<lbrakk>E,dt\<Turnstile>e\<Colon>-Class C; accfield (prg E) (cls E) C fn = Some (statDeclC,f);
sf=is_static f; fT=(type f); accC=cls E\<rbrakk>
\<Longrightarrow> E,dt\<Turnstile>{accC,statDeclC,sf}e..fn\<Colon>=fT"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>E,dt\<Turnstile>e\<Colon>-Class C; accfield (prg E) (cls E) C fn = Some (statDeclC, f); sf = is_static f; fT = type f; accC = cls E\<rbrakk> \<Longrightarrow> E,dt\<Turnstile>{accC,statDeclC,sf}e..fn\<Colon>=fT
[PROOF STEP]
by (auto dest: wt.FVar) |
Under CAA and EASA rules , all aircraft are required to meet certain standards of airworthiness to fly safely and legally . Aircraft that meet these standards are issued with a Certificate of Airworthiness . However , British @-@ registered aircraft which are excluded from the scope of EASA , and which cannot satisfy the requirements for the issue of a Certificate of Airworthiness , may be issued with a Permit to Fly . This allows them to fly in UK airspace subject to certain limitations , for example being restricted to day @-@ time flights under visual flight rules only . A number of organisations ( e.g. the British Microlight Aircraft Association and the Light Aircraft Association ) have obtained a standing over @-@ flight permission for Permit to Fly aircraft within their area of interest with some European countries , notably France . Permits are typically issued to vintage and historic aircraft , amateur built aircraft , and microlights .
|
#' test data set
#'
#' This is test data set.
#'
#' @format A data.frame with 400 rows and 42 variables.
#' \describe{
#' \item{usubjid}{subject identifier}
#' }
"test"
#' test Data Dictionary
#'
#' This is the documentation for the test dat aset.
#'
#' @format A data.frame with 39 rows and 2 variables.
#' \describe{
#' \item{usubjid}{subject identifier}
#' }
"testdd"
|
lemma C1_differentiable_polynomial_function: fixes p :: "real \<Rightarrow> 'a::euclidean_space" shows "polynomial_function p \<Longrightarrow> p C1_differentiable_on S" |
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
! This file was ported from Lean 3 source module logic.equiv.list
! leanprover-community/mathlib commit d11893b411025250c8e61ff2f12ccbd7ee35ab15
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Finset.Sort
import Mathlib.Data.Vector.Basic
import Mathlib.Logic.Denumerable
/-!
# Equivalences involving `List`-like types
This file defines some additional constructive equivalences using `Encodable` and the pairing
function on `ℕ`.
-/
open Nat List
namespace Encodable
variable {α : Type _}
section List
variable [Encodable α]
/-- Explicit encoding function for `List α` -/
def encodeList : List α → ℕ
| [] => 0
| a :: l => succ (pair (encode a) (encodeList l))
#align encodable.encode_list Encodable.encodeList
/-- Explicit decoding function for `List α` -/
def decodeList : ℕ → Option (List α)
| 0 => some []
| succ v =>
match unpair v, unpair_right_le v with
| (v₁, v₂), h =>
have : v₂ < succ v := lt_succ_of_le h
(· :: ·) <$> decode (α := α) v₁ <*> decodeList v₂
#align encodable.decode_list Encodable.decodeList
/-- If `α` is encodable, then so is `List α`. This uses the `pair` and `unpair` functions from
`Data.Nat.Pairing`. -/
instance _root_.List.encodable : Encodable (List α) :=
⟨encodeList, decodeList, fun l => by
induction' l with a l IH <;> simp [encodeList, decodeList, unpair_pair, encodek, *]⟩
#align list.encodable List.encodable
instance _root_.List.countable {α : Type _} [Countable α] : Countable (List α) := by
haveI := Encodable.ofCountable α
infer_instance
#align list.countable List.countable
@[simp]
theorem encode_list_nil : encode (@nil α) = 0 :=
rfl
#align encodable.encode_list_nil Encodable.encode_list_nil
@[simp]
theorem encode_list_cons (a : α) (l : List α) :
encode (a :: l) = succ (pair (encode a) (encode l)) :=
rfl
#align encodable.encode_list_cons Encodable.encode_list_cons
@[simp]
theorem decode_list_zero : decode (α := List α) 0 = some [] :=
show decodeList 0 = some [] by rw [decodeList]
#align encodable.decode_list_zero Encodable.decode_list_zero
@[simp, nolint unusedHavesSuffices] -- Porting note: false positive
theorem decode_list_succ (v : ℕ) :
decode (α := List α) (succ v) =
(· :: ·) <$> decode (α := α) v.unpair.1 <*> decode (α := List α) v.unpair.2 :=
show decodeList (succ v) = _ by
cases' e : unpair v with v₁ v₂
simp [decodeList, e]; rfl
#align encodable.decode_list_succ Encodable.decode_list_succ
theorem length_le_encode : ∀ l : List α, length l ≤ encode l
| [] => Nat.zero_le _
| _ :: l => succ_le_succ <| (length_le_encode l).trans (right_le_pair _ _)
#align encodable.length_le_encode Encodable.length_le_encode
end List
section Finset
variable [Encodable α]
private def enle : α → α → Prop :=
encode ⁻¹'o (· ≤ ·)
private theorem enle.isLinearOrder : IsLinearOrder α enle :=
(RelEmbedding.preimage ⟨encode, encode_injective⟩ (· ≤ ·)).isLinearOrder
private def decidable_enle (a b : α) : Decidable (enle a b) := by
unfold enle Order.Preimage
infer_instance
attribute [local instance] enle.isLinearOrder decidable_enle
/-- Explicit encoding function for `Multiset α` -/
def encodeMultiset (s : Multiset α) : ℕ :=
encode (s.sort enle)
#align encodable.encode_multiset Encodable.encodeMultiset
/-- Explicit decoding function for `Multiset α` -/
def decodeMultiset (n : ℕ) : Option (Multiset α) :=
((↑) : List α → Multiset α) <$> decode (α := List α) n
#align encodable.decode_multiset Encodable.decodeMultiset
/-- If `α` is encodable, then so is `Multiset α`. -/
instance _root_.Multiset.encodable : Encodable (Multiset α) :=
⟨encodeMultiset, decodeMultiset, fun s => by simp [encodeMultiset, decodeMultiset, encodek]⟩
#align multiset.encodable Multiset.encodable
/-- If `α` is countable, then so is `Multiset α`. -/
instance _root_.Multiset.countable {α : Type _} [Countable α] : Countable (Multiset α) :=
Quotient.countable
#align multiset.countable Multiset.countable
end Finset
/-- A listable type with decidable equality is encodable. -/
def encodableOfList [DecidableEq α] (l : List α) (H : ∀ x, x ∈ l) : Encodable α :=
⟨fun a => indexOf a l, l.get?, fun _ => indexOf_get? (H _)⟩
#align encodable.encodable_of_list Encodable.encodableOfList
/-- A finite type is encodable. Because the encoding is not unique, we wrap it in `Trunc` to
preserve computability. -/
def _root_.Fintype.truncEncodable (α : Type _) [DecidableEq α] [Fintype α] : Trunc (Encodable α) :=
@Quot.recOnSubsingleton' _ _ (fun s : Multiset α => (∀ x : α, x ∈ s) → Trunc (Encodable α)) _
Finset.univ.1 (fun l H => Trunc.mk <| encodableOfList l H) Finset.mem_univ
#align fintype.trunc_encodable Fintype.truncEncodable
/-- A noncomputable way to arbitrarily choose an ordering on a finite type.
It is not made into a global instance, since it involves an arbitrary choice.
This can be locally made into an instance with `local attribute [instance] Fintype.toEncodable`. -/
noncomputable def _root_.Fintype.toEncodable (α : Type _) [Fintype α] : Encodable α := by
classical exact (Fintype.truncEncodable α).out
#align fintype.to_encodable Fintype.toEncodable
/-- If `α` is encodable, then so is `Vector α n`. -/
instance _root_.Vector.encodable [Encodable α] {n} : Encodable (Vector α n) :=
Encodable.Subtype.encodable
#align vector.encodable Vector.encodable
/-- If `α` is countable, then so is `Vector α n`. -/
instance _root_.Vector.countable [Countable α] {n} : Countable (Vector α n) :=
Subtype.countable
#align vector.countable Vector.countable
/-- If `α` is encodable, then so is `Fin n → α`. -/
instance finArrow [Encodable α] {n} : Encodable (Fin n → α) :=
ofEquiv _ (Equiv.vectorEquivFin _ _).symm
#align encodable.fin_arrow Encodable.finArrow
instance finPi (n) (π : Fin n → Type _) [∀ i, Encodable (π i)] : Encodable (∀ i, π i) :=
ofEquiv _ (Equiv.piEquivSubtypeSigma (Fin n) π)
#align encodable.fin_pi Encodable.finPi
/-- If `α` is encodable, then so is `Finset α`. -/
instance _root_.Finset.encodable [Encodable α] : Encodable (Finset α) :=
haveI := decidableEqOfEncodable α
ofEquiv { s : Multiset α // s.Nodup }
⟨fun ⟨a, b⟩ => ⟨a, b⟩, fun ⟨a, b⟩ => ⟨a, b⟩, fun ⟨_, _⟩ => rfl, fun ⟨_, _⟩ => rfl⟩
#align finset.encodable Finset.encodable
/-- If `α` is countable, then so is `Finset α`. -/
instance _root_.Finset.countable [Countable α] : Countable (Finset α) :=
Finset.val_injective.countable
#align finset.countable Finset.countable
-- TODO: Unify with `fintypePi` and find a better name
/-- When `α` is finite and `β` is encodable, `α → β` is encodable too. Because the encoding is not
unique, we wrap it in `Trunc` to preserve computability. -/
def fintypeArrow (α : Type _) (β : Type _) [DecidableEq α] [Fintype α] [Encodable β] :
Trunc (Encodable (α → β)) :=
(Fintype.truncEquivFin α).map fun f =>
Encodable.ofEquiv (Fin (Fintype.card α) → β) <| Equiv.arrowCongr f (Equiv.refl _)
#align encodable.fintype_arrow Encodable.fintypeArrow
/-- When `α` is finite and all `π a` are encodable, `Π a, π a` is encodable too. Because the
encoding is not unique, we wrap it in `Trunc` to preserve computability. -/
def fintypePi (α : Type _) (π : α → Type _) [DecidableEq α] [Fintype α] [∀ a, Encodable (π a)] :
Trunc (Encodable (∀ a, π a)) :=
(Fintype.truncEncodable α).bind fun a =>
(@fintypeArrow α (Σa, π a) _ _ (@Encodable.Sigma.encodable _ _ a _)).bind fun f =>
Trunc.mk <|
@Encodable.ofEquiv _ _ (@Encodable.Subtype.encodable _ _ f _)
(Equiv.piEquivSubtypeSigma α π)
#align encodable.fintype_pi Encodable.fintypePi
/-- The elements of a `Fintype` as a sorted list. -/
def sortedUniv (α) [Fintype α] [Encodable α] : List α :=
Finset.univ.sort (Encodable.encode' α ⁻¹'o (· ≤ ·))
#align encodable.sorted_univ Encodable.sortedUniv
@[simp]
theorem mem_sortedUniv {α} [Fintype α] [Encodable α] (x : α) : x ∈ sortedUniv α :=
(Finset.mem_sort _).2 (Finset.mem_univ _)
#align encodable.mem_sorted_univ Encodable.mem_sortedUniv
@[simp]
theorem length_sortedUniv (α) [Fintype α] [Encodable α] : (sortedUniv α).length = Fintype.card α :=
Finset.length_sort _
#align encodable.length_sorted_univ Encodable.length_sortedUniv
@[simp]
theorem sortedUniv_nodup (α) [Fintype α] [Encodable α] : (sortedUniv α).Nodup :=
Finset.sort_nodup _ _
#align encodable.sorted_univ_nodup Encodable.sortedUniv_nodup
@[simp]
theorem sortedUniv_toFinset (α) [Fintype α] [Encodable α] [DecidableEq α] :
(sortedUniv α).toFinset = Finset.univ :=
Finset.sort_toFinset _ _
#align encodable.sorted_univ_to_finset Encodable.sortedUniv_toFinset
/-- An encodable `Fintype` is equivalent to the same size `fin`. -/
def fintypeEquivFin {α} [Fintype α] [Encodable α] : α ≃ Fin (Fintype.card α) :=
haveI : DecidableEq α := Encodable.decidableEqOfEncodable _
-- Porting note: used the `trans` tactic
((sortedUniv_nodup α).getEquivOfForallMemList _ mem_sortedUniv).symm.trans <|
Equiv.cast (congr_arg _ (length_sortedUniv α))
#align encodable.fintype_equiv_fin Encodable.fintypeEquivFin
/-- If `α` and `β` are encodable and `α` is a fintype, then `α → β` is encodable as well. -/
instance fintypeArrowOfEncodable {α β : Type _} [Encodable α] [Fintype α] [Encodable β] :
Encodable (α → β) :=
ofEquiv (Fin (Fintype.card α) → β) <| Equiv.arrowCongr fintypeEquivFin (Equiv.refl _)
#align encodable.fintype_arrow_of_encodable Encodable.fintypeArrowOfEncodable
end Encodable
namespace Denumerable
variable {α : Type _} {β : Type _} [Denumerable α] [Denumerable β]
open Encodable
section List
@[nolint unusedHavesSuffices] -- Porting note: false positive
theorem denumerable_list_aux : ∀ n : ℕ, ∃ a ∈ @decodeList α _ n, encodeList a = n
| 0 => by rw [decodeList]; exact ⟨_, rfl, rfl⟩
| succ v => by
cases' e : unpair v with v₁ v₂
have h := unpair_right_le v
rw [e] at h
rcases have : v₂ < succ v := lt_succ_of_le h
denumerable_list_aux v₂ with
⟨a, h₁, h₂⟩
rw [Option.mem_def] at h₁
use ofNat α v₁ :: a
simp [decodeList, e, h₂, h₁, encodeList, pair_unpair' e]
#align denumerable.denumerable_list_aux Denumerable.denumerable_list_aux
/-- If `α` is denumerable, then so is `List α`. -/
instance denumerableList : Denumerable (List α) :=
⟨denumerable_list_aux⟩
#align denumerable.denumerable_list Denumerable.denumerableList
@[simp]
theorem list_ofNat_zero : ofNat (List α) 0 = [] := by rw [← @encode_list_nil α, ofNat_encode]
#align denumerable.list_of_nat_zero Denumerable.list_ofNat_zero
@[simp, nolint unusedHavesSuffices] -- Porting note: false positive
theorem list_ofNat_succ (v : ℕ) :
ofNat (List α) (succ v) = ofNat α v.unpair.1 :: ofNat (List α) v.unpair.2 :=
ofNat_of_decode <|
show decodeList (succ v) = _ by
cases' e : unpair v with v₁ v₂
simp [decodeList, e]
rw [show decodeList v₂ = decode (α := List α) v₂ from rfl, decode_eq_ofNat, Option.seq_some,
Option.some.injEq]
#align denumerable.list_of_nat_succ Denumerable.list_ofNat_succ
end List
section Multiset
/-- Outputs the list of differences of the input list, that is
`lower [a₁, a₂, ...] n = [a₁ - n, a₂ - a₁, ...]` -/
def lower : List ℕ → ℕ → List ℕ
| [], _ => []
| m :: l, n => (m - n) :: lower l m
#align denumerable.lower Denumerable.lower
/-- Outputs the list of partial sums of the input list, that is
`raise [a₁, a₂, ...] n = [n + a₁, n + a₁ + a₂, ...]` -/
def raise : List ℕ → ℕ → List ℕ
| [], _ => []
| m :: l, n => (m + n) :: raise l (m + n)
#align denumerable.raise Denumerable.raise
theorem lower_raise : ∀ l n, lower (raise l n) n = l
| [], n => rfl
| m :: l, n => by rw [raise, lower, add_tsub_cancel_right, lower_raise l]
#align denumerable.lower_raise Denumerable.lower_raise
theorem raise_lower : ∀ {l n}, List.Sorted (· ≤ ·) (n :: l) → raise (lower l n) n = l
| [], n, _ => rfl
| m :: l, n, h =>
by
have : n ≤ m := List.rel_of_sorted_cons h _ (l.mem_cons_self _)
simp [raise, lower, tsub_add_cancel_of_le this, raise_lower h.of_cons]
#align denumerable.raise_lower Denumerable.raise_lower
theorem raise_chain : ∀ l n, List.Chain (· ≤ ·) n (raise l n)
| [], _ => List.Chain.nil
| _ :: _, _ => List.Chain.cons (Nat.le_add_left _ _) (raise_chain _ _)
#align denumerable.raise_chain Denumerable.raise_chain
/-- `raise l n` is an non-decreasing sequence. -/
theorem raise_sorted : ∀ l n, List.Sorted (· ≤ ·) (raise l n)
| [], _ => List.sorted_nil
| _ :: _, _ => List.chain_iff_pairwise.1 (raise_chain _ _)
#align denumerable.raise_sorted Denumerable.raise_sorted
/-- If `α` is denumerable, then so is `Multiset α`. Warning: this is *not* the same encoding as used
in `Multiset.encodable`. -/
instance multiset : Denumerable (Multiset α) :=
mk'
⟨fun s : Multiset α => encode <| lower ((s.map encode).sort (· ≤ ·)) 0,
fun n =>
Multiset.map (ofNat α) (raise (ofNat (List ℕ) n) 0),
fun s => by
have :=
raise_lower (List.sorted_cons.2 ⟨fun n _ => Nat.zero_le n, (s.map encode).sort_sorted _⟩)
simp [-Multiset.coe_map, this],
fun n => by
simp [-Multiset.coe_map, List.mergeSort_eq_self _ (raise_sorted _ _), lower_raise]⟩
#align denumerable.multiset Denumerable.multiset
end Multiset
section Finset
/-- Outputs the list of differences minus one of the input list, that is
`lower' [a₁, a₂, a₃, ...] n = [a₁ - n, a₂ - a₁ - 1, a₃ - a₂ - 1, ...]`. -/
def lower' : List ℕ → ℕ → List ℕ
| [], _ => []
| m :: l, n => (m - n) :: lower' l (m + 1)
#align denumerable.lower' Denumerable.lower'
/-- Outputs the list of partial sums plus one of the input list, that is
`raise [a₁, a₂, a₃, ...] n = [n + a₁, n + a₁ + a₂ + 1, n + a₁ + a₂ + a₃ + 2, ...]`. Adding one each
time ensures the elements are distinct. -/
def raise' : List ℕ → ℕ → List ℕ
| [], _ => []
| m :: l, n => (m + n) :: raise' l (m + n + 1)
#align denumerable.raise' Denumerable.raise'
theorem lower_raise' : ∀ l n, lower' (raise' l n) n = l
| [], n => rfl
| m :: l, n => by simp [raise', lower', add_tsub_cancel_right, lower_raise']
#align denumerable.lower_raise' Denumerable.lower_raise'
theorem raise_lower' : ∀ {l n}, (∀ m ∈ l, n ≤ m) → List.Sorted (· < ·) l → raise' (lower' l n) n = l
| [], n, _, _ => rfl
| m :: l, n, h₁, h₂ => by
have : n ≤ m := h₁ _ (l.mem_cons_self _)
simp [raise', lower', tsub_add_cancel_of_le this,
raise_lower' (List.rel_of_sorted_cons h₂ : ∀ a ∈ l, m < a) h₂.of_cons]
#align denumerable.raise_lower' Denumerable.raise_lower'
/-- `raise' l n` is a strictly increasing sequence. -/
theorem raise'_sorted : ∀ l n, List.Sorted (· < ·) (raise' l n)
| [], _ => List.sorted_nil
| _ :: _, _ => List.chain_iff_pairwise.1 (raise'_chain _ (lt_succ_self _))
#align denumerable.raise'_sorted Denumerable.raise'_sorted
/-- Makes `raise' l n` into a finset. Elements are distinct thanks to `raise'_sorted`. -/
def raise'Finset (l : List ℕ) (n : ℕ) : Finset ℕ :=
⟨raise' l n, (raise'_sorted _ _).imp (@ne_of_lt _ _)⟩
#align denumerable.raise'_finset Denumerable.raise'Finset
/-- If `α` is denumerable, then so is `finset α`. Warning: this is *not* the same encoding as used
in `finset.encodable`. -/
instance finset : Denumerable (Finset α) :=
mk'
⟨fun s : Finset α => encode <| lower' ((s.map (eqv α).toEmbedding).sort (· ≤ ·)) 0, fun n =>
Finset.map (eqv α).symm.toEmbedding (raise'Finset (ofNat (List ℕ) n) 0), fun s =>
Finset.eq_of_veq <| by
simp [-Multiset.coe_map, raise'Finset,
raise_lower' (fun n _ => Nat.zero_le n) (Finset.sort_sorted_lt _)],
fun n => by
simp [-Multiset.coe_map, Finset.map, raise'Finset, Finset.sort,
List.mergeSort_eq_self (· ≤ ·) ((raise'_sorted _ _).imp (@le_of_lt _ _)), lower_raise']⟩
#align denumerable.finset Denumerable.finset
end Finset
end Denumerable
namespace Equiv
/-- The type lists on unit is canonically equivalent to the natural numbers. -/
def listUnitEquiv : List Unit ≃ ℕ where
toFun := List.length
invFun n := List.replicate n ()
left_inv u := List.length_injective (by simp)
right_inv n := List.length_replicate n ()
#align equiv.list_unit_equiv Equiv.listUnitEquiv
/-- `List ℕ` is equivalent to `ℕ`. -/
def listNatEquivNat : List ℕ ≃ ℕ :=
Denumerable.eqv _
#align equiv.list_nat_equiv_nat Equiv.listNatEquivNat
/-- If `α` is equivalent to `ℕ`, then `List α` is equivalent to `α`. -/
def listEquivSelfOfEquivNat {α : Type _} (e : α ≃ ℕ) : List α ≃ α :=
calc
List α ≃ List ℕ := listEquivOfEquiv e
_ ≃ ℕ := listNatEquivNat
_ ≃ α := e.symm
#align equiv.list_equiv_self_of_equiv_nat Equiv.listEquivSelfOfEquivNat
end Equiv
|
------------------------------------------------------------------------------
-- Conversion rules for the division
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module LTC-PCF.Program.Division.ConversionRules where
open import Common.FOL.Relation.Binary.EqReasoning
open import LTC-PCF.Base
open import LTC-PCF.Data.Nat
open import LTC-PCF.Data.Nat.Inequalities
open import LTC-PCF.Program.Division.Division
------------------------------------------------------------------------------
-- Division properties
private
-- Before to prove some properties for div it is convenient
-- to have a proof for each possible execution step.
-- Initially, we define the possible states (div-s₁, div-s₂,
-- ...) and after that, we write down the proof for the execution
-- step from the state p to the state q, e.g.
--
-- proof₂₋₃ : ∀ i j → div-s₂ i j ≡ div-s₃ i j.
-- Initially, the conversion rule fix-eq is applied.
div-s₁ : D → D → D
div-s₁ i j = divh (fix divh) · i · j
-- First argument application.
div-s₂ : D → D → D
div-s₂ i j = fun · j
where
fun : D
fun = lam (λ j → if (lt i j)
then zero
else succ₁ (fix divh · (i ∸ j) · j))
-- Second argument application.
div-s₃ : D → D → D
div-s₃ i j = if (lt i j) then zero else succ₁ (fix divh · (i ∸ j) · j)
-- lt i j ≡ true.
div-s₄ : D → D → D
div-s₄ i j = if true then zero else succ₁ (fix divh · (i ∸ j) · j)
-- lt i j ≡ false.
div-s₅ : D → D → D
div-s₅ i j = if false then zero else succ₁ (fix divh · (i ∸ j) · j)
-- The conditional is true.
div-s₆ : D
div-s₆ = zero
-- The conditional is false.
div-s₇ : D → D → D
div-s₇ i j = succ₁ (fix divh · (i ∸ j) · j)
{-
To prove the execution steps, e.g.
proof₃₋₄ : ∀ i j → div-s₃ i j → divh_s₄ i j,
we usually need to prove that
... m ... ≡ ... n ... (1)
given that
m ≡ n, (2)
where (2) is a conversion rule usually.
We prove (1) using
subst : ∀ {x y} (A : D → Set) → x ≡ y → A x → A y
where
• P is given by \m → ... m ... ≡ ... n ...,
• x ≡ y is given n ≡ m (actually, we use ≡-sym (m ≡ n)) and
• P x is given by ... n ... ≡ ... n ... (i.e. ≡-refl)
-}
-- From div · i · j to div-s₁ using the conversion rule fix-eq.
proof₀₋₁ : ∀ i j → fix divh · i · j ≡ div-s₁ i j
proof₀₋₁ i j = subst (λ t → t · i · j ≡ divh (fix divh) · i · j)
(sym (fix-eq divh))
refl
-- From div-s₁ to div-s₂ using the conversion rule beta.
proof₁₋₂ : ∀ i j → div-s₁ i j ≡ div-s₂ i j
proof₁₋₂ i j =
subst (λ t → t · j ≡ fun i · j)
(sym (beta fun i))
refl
where
-- The function fun is the same that the fun part of div-s₂,
-- except that we need a fresh variable y to avoid the
-- clashing of the variable i in the application of the beta
-- rule.
fun : D → D
fun y = lam (λ j → if (lt y j)
then zero
else succ₁ (fix divh · (y ∸ j) · j))
-- From div-s₂ to div-s₃ using the conversion rule beta.
proof₂₋₃ : ∀ i j → div-s₂ i j ≡ div-s₃ i j
proof₂₋₃ i j = beta fun j
where
-- The function fun is the same that div-s₃, except that we
-- need a fresh variable y to avoid the clashing of the
-- variable j in the application of the beta rule.
fun : D → D
fun y = if (lt i y) then zero else succ₁ ((fix divh) · (i ∸ y) · y)
-- From div-s₃ to div-s₄ using the proof i<j.
proof₃_₄ : ∀ i j → i < j → div-s₃ i j ≡ div-s₄ i j
proof₃_₄ i j i<j =
subst (λ t → (if t then zero else succ₁ ((fix divh) · (i ∸ j) · j)) ≡
(if true then zero else succ₁ ((fix divh) · (i ∸ j) · j)))
(sym i<j)
refl
-- From div-s₃ to div-s₅ using the proof i≮j.
proof₃₋₅ : ∀ i j → i ≮ j → div-s₃ i j ≡ div-s₅ i j
proof₃₋₅ i j i≮j =
subst (λ t → (if t then zero else succ₁ ((fix divh) · (i ∸ j) · j)) ≡
(if false then zero else succ₁ ((fix divh) · (i ∸ j) · j)))
(sym i≮j)
refl
-- From div-s₄ to div-s₆ using the conversion rule if-true.
proof₄₋₆ : ∀ i j → div-s₄ i j ≡ div-s₆
proof₄₋₆ i j = if-true zero
-- From div-s₅ to div-s₇ using the conversion rule if-false.
proof₅₋₇ : ∀ i j → div-s₅ i j ≡ div-s₇ i j
proof₅₋₇ i j = if-false (succ₁ (fix divh · (i ∸ j) · j))
----------------------------------------------------------------------
-- The division result when the dividend is minor than the
-- the divisor.
div-x<y : ∀ {i j} → i < j → div i j ≡ zero
div-x<y {i} {j} i<j =
div i j ≡⟨ proof₀₋₁ i j ⟩
div-s₁ i j ≡⟨ proof₁₋₂ i j ⟩
div-s₂ i j ≡⟨ proof₂₋₃ i j ⟩
div-s₃ i j ≡⟨ proof₃_₄ i j i<j ⟩
div-s₄ i j ≡⟨ proof₄₋₆ i j ⟩
div-s₆ ∎
----------------------------------------------------------------------
-- The division result when the dividend is greater or equal than the
-- the divisor.
div-x≮y : ∀ {i j} → i ≮ j → div i j ≡ succ₁ (div (i ∸ j) j)
div-x≮y {i} {j} i≮j =
div i j ≡⟨ proof₀₋₁ i j ⟩
div-s₁ i j ≡⟨ proof₁₋₂ i j ⟩
div-s₂ i j ≡⟨ proof₂₋₃ i j ⟩
div-s₃ i j ≡⟨ proof₃₋₅ i j i≮j ⟩
div-s₅ i j ≡⟨ proof₅₋₇ i j ⟩
div-s₇ i j ∎
|
\label{ch:five}
The question of the existence of an analog to bound entanglement was firstly posed in \cite{GisWolf00} by Gisin and Wolf, where they analyzed comparisons and correspondences between quantum and classical protocols for key agreement.
The question about bound information was a consequence of these correspondences.
Since then the topic was picked up by the scientific community of quantum cryptography.
A probability distribution that presents bound information has not been found yet.
% Nevertheless a case for asymptotic bound information was proposed again by Wolf together with Renner \cite{RW03}.
\section{Tripartite bound information}
A later work by Ac\'in et al. proposed the existence of bound information in a tripartite case \cite{ACM04}.
They analyzed the probability distribution resulting from measurement of a known bound entangled state.
Furthermore they also show that this distribution can be \textit{activated}\footnotemark the same way as in quantum entanglement.
This result is different from what we want to achieve because the probability distribution is divided among parties Alice, Bob and Claire, with Eve being a fourth party in the distribution.
In fact, their result of bound information is valid only when considering \emph{pairs} of honest parties from the original distribution.
\footnotetext{The activation of entanglement can be roughly described as the process through which entanglement can become a useful resource for nonclassical tasks. Horodecki \textit{et al.} demonstrated the activation of bound entanglement in \cite{3H99}}
\section{The gap between the bounds can be arbitrarily large}
To distinguish and analyze the case of bound information some information theoretical measures are needed.
We saw the secret key rate (section \ref{seckeyrate}) and the intrinsic information (section \ref{intrininfo}) and we already presented the question of bound information in terms of such measures.
In \cite{RW03} a new measure of \emph{reduced intrinsic information} $\redintrinfo{X}{Y}{Z}$ is introduced as an upper bound on secret key rate, lower than the intrinsic information.
\begin{definition}\cite{RW03}
Let $P_{XYZ}$ be a a discrete probability distribution. The reduced intrinsic information of $X$ and $Y$ given $Z$ is defined as
\begin{equation} \label{eq:reducedintrinfo}
\redintrinfo{X}{Y}{Z} := \inf_{P_{U|XYZ}} (\intrinfo{X}{Y}{ZU} + \Ent (U))
\end{equation}
and for every $P_{XYZ}$ it holds
\begin{equation} \label{eq:bounds}
\keyrate{X}{Y}{Z} \leq \redintrinfo{X}{Y}{Z} \leq \intrinfo{X}{Y}{Z}
\end{equation}
\end{definition}
Reduced intrinsic information is a stronger upper bound on secret key rate than intrinsic information.
More importantly, Renner and Wolf proved that the gap between reduced and normal intrinsic information (hence also between the secret-key rate and intrinsic information) can be arbitrarily large for distributions where the range of $X$, $Y$ and $Z$ can be arbitrary large\cite{RW03}.
Considering then that the former is an upper bound to secret key rate, and the latter is a lower bound to information of formation, this implies then the existence of asymptotic bound information.
\begin{definition}\cite{RW03}
Let $P_{X_{(n)}Y_{(n)}Z_{(n)}}$ be a an arbitrary discrete $n$-ary probability distribution. Then the distribution is said to have \emph{asymptotic bound information} when
\begin{equation}
\intrinfo{X_{(n)}}{Y_{(n)}}{Z_{(n)}} \rightarrow c > 0
\end{equation}
and
\begin{equation}
\keyrate{X_{(n)}}{Y_{(n)}}{Z_{(n)}} \rightarrow 0
\end{equation}
for $n\rightarrow \infty$.
\end{definition}
\begin{figure}
\input{images/bounds}
\caption{The different measures for $P_{XYZ}$ and how they bound each other.}
\end{figure}
\section{A candidate probability distribution}\label{daproblem}
Wolf and Renner proposed in \cite{RW03} for the first time a probability distribution (Fig. \ref{Tab:candidate}) which is a valid candidate for the classical analogy of \emph{bound entanglement}.
In fact, they offer a probability distribution that asymptotically has bound information.
This example did not come directly from a translation of bound entangled states.
Moreover hey also show, for such a distribution, that
\begin{equation}
\keyrate{X}{Y}{Z} \neq \intrinfo{X}{Y}{Z}
\end{equation}
and they emphasize that this is the first time that equality does not hold. This fact disproved the conjecture posed in \cite{MW99}, that the two measured were actually the same.
For this probability distribution we have
$$ \intrinfo{X}{Y}{Z} = 3/2 ,\; \keyrate{X}{Y}{Z} = 1 , \; \redintrinfo{X}{Y}{Z} = 1 $$
With the the fact that $\redintrinfo{X}{Y}{Z} = \keyrate{X}{Y}{Z}$ does hold, and with the statements above, we can think of a model to later search for bound information in chapter \ref{ch:six}.
If the reduced intrinsic information is a useful measure, we can minimize it to find probability distributions that have no possible key extractable from it.
A condition to be a useful measure is that it must have the same lower bound as the secret-key rate.
As we will see in section \ref{problem} however, there are some conditions on the reduced intrinsic information that does not allow it to be a good measure.
\begin{figure}
\input{images/tabCandidate1}
\caption{Probability distribution proposed by Renner, Wolf and Skripsky in \cite{RW03} for which it holds that $\keyrate{X}{Y}{Z} \neq \intrinfo{X}{Y}{Z}$}
\label{Tab:candidate}
\end{figure}
More promising is a family of probabilities on a parameter $a>0$ they mention at the end (Fig. \ref{Tab:candidate2}) which is a slight modification of the first.
Here Renner and Wolf conjecture that it might be possible to achieve bound information by different values of $a$.
They also noted, however, that for $a$ too big the correlation between Alice and Bob is lost, loosing also the key cost value (or information of formation).
\begin{figure}
\input{images/tabCandidate2}
\caption{A candidate probability distribution for bound information, for $a\geq 0$ (and renormalized).}
\label{Tab:candidate2}
\end{figure}
|
If $f$ converges to $l$ and $g$ is closer to $m$ than $f$ is to $l$, then $g$ converges to $m$. |
(*
* Conversion of DataProp terms to negation normal form (NNF).
*)
Require Import Classical.
Require Import SNF.Inhabited.
Require Import SNF.Context.
Require Import SNF.DataProp.
Inductive NNFProp : TypeContext -> Type :=
| NNFOpaque : forall {env}, (Valuation env -> Prop) -> NNFProp env
| NNFAnd : forall {env}, NNFProp env -> NNFProp env -> NNFProp env
| NNFOr : forall {env}, NNFProp env -> NNFProp env -> NNFProp env
| NNFExists : forall {env} (T : Type) `{IsInhabited T}, NNFProp (TypeCons T env) -> NNFProp env
| NNFForall : forall {env} (T : Type) `{IsInhabited T}, NNFProp (TypeCons T env) -> NNFProp env
.
Fixpoint denote_nnf {env : TypeContext} (P : NNFProp env) : Valuation env -> Prop :=
match P with
| NNFOpaque A => A
| NNFAnd A B => fun v => denote_nnf A v /\ denote_nnf B v
| NNFOr A B => fun v => denote_nnf A v \/ denote_nnf B v
| NNFExists T body => fun v => exists (x:T), denote_nnf body (ValuationCons x v)
| NNFForall T body => fun v => forall (x:T), denote_nnf body (ValuationCons x v)
end.
(* so that our tactics can avoid expanding `negb` everywhere *)
Definition nnf_negb (b : bool) :=
match b with
| true => false
| false => true
end.
Fixpoint nnf' {env} (P : DataProp env) (negate : bool) : NNFProp env :=
match P with
| Opaque A => NNFOpaque (if negate then (fun v => ~(A v)) else A)
| Literal b => if negate then NNFOpaque (fun _ => if b then False else True) else NNFOpaque (fun _ => if b then True else False)
| And A B => if negate then NNFOr (nnf' A true) (nnf' B true) else NNFAnd (nnf' A false) (nnf' B false)
| Or A B => if negate then NNFAnd (nnf' A true) (nnf' B true) else NNFOr (nnf' A false) (nnf' B false)
| Implies A B => if negate then NNFAnd (nnf' A false) (nnf' B true) else NNFOr (nnf' A true) (nnf' B false)
| Not A => nnf' A (nnf_negb negate)
| Exists T body => if negate then NNFForall T (nnf' body true) else NNFExists T (nnf' body false)
| Forall T body => if negate then NNFExists T (nnf' body true) else NNFForall T (nnf' body false)
end.
Lemma nnf'_correct:
forall env (P : DataProp env) negate v,
denote_nnf (nnf' P negate) v <->
if negate
then denote (Not P) v
else denote P v.
Proof.
induction P; destruct negate; cbn in *; intros;
repeat setoid_rewrite IHP;
repeat setoid_rewrite IHP1;
repeat setoid_rewrite IHP2;
try easy.
- destruct b; easy.
- intuition; apply not_and_or; intuition.
- split.
+ apply and_not_or.
+ apply not_or_and.
- split.
+ intuition.
+ apply imply_to_and.
- split.
+ intuition.
+ apply imply_to_or.
- split.
+ intuition.
+ apply NNPP.
- split.
+ apply all_not_not_ex.
+ apply not_ex_all_not.
- split.
+ apply ex_not_not_all.
+ apply not_all_ex_not.
Qed.
Definition nnf {env} (P : DataProp env) : NNFProp env :=
nnf' P false.
Lemma nnf_correct:
forall env (P : DataProp env) v,
denote_nnf (nnf P) v <-> denote P v.
Proof.
intros.
apply nnf'_correct.
Qed.
|
Require Import Bool Arith List Omega.
Require Import Recdef Morphisms.
Require Import Program.Tactics.
Require Import Relation_Operators.
Require FMapList.
Require FMapFacts.
Require Import Classical.
Require Import Coq.Classes.RelationClasses.
Require Import OrderedType OrderedTypeEx DecidableType.
Require Import Sorting.Permutation.
Import ListNotations.
Module NatMap := FMapList.Make Nat_as_OT.
Definition address := nat.
Definition version := nat.
Definition value := nat.
Definition lock := bool.
Definition variable := nat.
Definition store := NatMap.t value.
Definition heap := address -> option (value * lock * version).
Definition tid := nat.
Ltac myauto :=
repeat match goal with
| |- context[_] =>
auto 100; intuition; cbn in *; simpl in *; auto 100
| |- context[_] =>
try contradiction; try discriminate
end.
Inductive action:=
|dummy: action
|start_txn: action
|read_item: version -> action
|write_item: value -> action
|try_commit_txn: action
|lock_write_item: action
|seq_point: action
|validate_read_item: version -> action
|abort_txn: action
|unlock_write_item: action
(*|restart_txn: action*)
|commit_txn: action
|complete_write_item: (*value -> action*)version -> action
(*|unlock_write_item: version -> action*)
(*|invalid_write_item: value -> action*)
|commit_done_txn: action.
(*|obtain_global_tid: action.*)
(*sp later than last lock, but must before the first commit*)
Definition trace := list (tid * action).
(* Return the “phase” of an action. *)
Definition action_phase (a:action) :=
match a with
| dummy => 0
| start_txn => 1
| read_item _ => 1
| write_item _ => 1
| try_commit_txn => 2
| lock_write_item => 2
| seq_point => 3
| validate_read_item _ => 3
| commit_txn => 4
| complete_write_item _ => 4
| commit_done_txn => 4
| abort_txn => 6
| unlock_write_item => 6
end.
Fixpoint trace_tid_phase tid t: nat :=
match t with
| (tid', a) :: t' =>
if Nat.eq_dec tid tid'
then action_phase a
else trace_tid_phase tid t'
| [] => 0
end.
(* Return the version number of the last committed write *)
Fixpoint trace_write_version (t:trace): version :=
match t with
| (_, complete_write_item v) :: _ => v
| _ :: t' => trace_write_version t'
| [] => 0
end.
Fixpoint trace_tid_last_write tid t: option value :=
match t with
| (tid', a) :: t' =>
if Nat.eq_dec tid tid'
then match a with
| write_item v => Some v
| complete_write_item _ => None
| _ => trace_tid_last_write tid t'
end
else trace_tid_last_write tid t'
| [] => None
end.
Fixpoint trace_validate_read tid vers aborted (t:trace) :=
match t with
| (tid', lock_write_item) :: t' =>
(tid = tid' \/ aborted) /\ trace_validate_read tid vers False t'
| (_, unlock_write_item) :: t' =>
trace_validate_read tid vers True t'
| (_, complete_write_item vers') :: _ => vers = vers'
| _ :: t' => trace_validate_read tid vers aborted t'
| [] => vers = 0
end.
Fixpoint locked_by (t:trace) default : tid :=
match t with
| (tid, lock_write_item) :: _ => tid
| (_, unlock_write_item) :: _ => default
| (_, complete_write_item _) :: _ => default
| _ :: t' => locked_by t' default
| [] => default
end.
Inductive sto_trace : trace -> Prop :=
| empty_step : sto_trace []
| start_txn_step: forall t tid,
tid > 0
-> trace_tid_phase tid t = 0
-> sto_trace t
-> sto_trace ((tid, start_txn)::t)
| read_item_step: forall t tid,
trace_tid_phase tid t = 1
-> locked_by t 0 = 0
-> sto_trace t
-> sto_trace ((tid, read_item (trace_write_version t)) :: t)
| write_item_step: forall t tid val,
trace_tid_phase tid t = 1
-> sto_trace t
-> sto_trace ((tid, write_item val) :: t)
| try_commit_txn_step: forall t tid,
trace_tid_phase tid t = 1
-> sto_trace t
-> sto_trace ((tid, try_commit_txn)::t)
| lock_write_item_step: forall t tid v,
trace_tid_phase tid t = 2
-> In (tid, write_item v) t
-> locked_by t 0 = 0
-> sto_trace t
-> sto_trace ((tid, lock_write_item) :: t)
(*sequential point*)
| seq_point_step: forall t tid,
trace_tid_phase tid t = 2
-> (forall v, In (tid, write_item v) t
-> In (tid, lock_write_item) t)
-> sto_trace t
-> sto_trace ((tid, seq_point) :: t)
| validate_read_item_step: forall t tid vers,
trace_tid_phase tid t = 3
-> locked_by t tid = tid (* unlocked or locked by me *)
-> trace_write_version t = vers
-> sto_trace t
-> sto_trace ((tid, validate_read_item vers) :: t)
| abort_txn_step: forall t tid,
trace_tid_phase tid t > 0
-> trace_tid_phase tid t < 4
-> sto_trace t
-> sto_trace ((tid, abort_txn) :: t)
| unlock_item_step: forall t tid,
trace_tid_phase tid t = 6
-> locked_by t 0 = tid
-> sto_trace t
-> sto_trace ((tid, unlock_write_item) :: t)
| commit_txn_step: forall t tid,
trace_tid_phase tid t = 3
-> (forall vers, In (tid, read_item vers) t
-> In (tid, validate_read_item vers) t)
-> sto_trace t
-> sto_trace ((tid, commit_txn) :: t)
| complete_write_item_step: forall t tid val,
trace_tid_phase tid t = 4
-> locked_by t 0 = tid
-> trace_tid_last_write tid t = Some val
-> sto_trace t
-> sto_trace ((tid, complete_write_item (S (trace_write_version t))) :: t)
| commit_done_step: forall t tid,
trace_tid_phase tid t = 4
-> locked_by t 0 <> tid
-> sto_trace t
-> sto_trace ((tid, commit_done_txn) :: t).
Hint Constructors sto_trace.
Definition example_txn:=
[(2, commit_done_txn); (2, complete_write_item 1); (2, commit_txn); (2, validate_read_item 0); (2, seq_point); (2, lock_write_item); (2, try_commit_txn); (2, write_item 4); (2, read_item 0); (2, start_txn); (1, commit_done_txn); (1, commit_txn); (1, validate_read_item 0); (1, seq_point); (1, try_commit_txn); (1, read_item 0); (1, start_txn)].
Definition example_txn2:=
[(3, commit_done_txn); (3, commit_txn); (3, validate_read_item 1); (3, seq_point); (3, try_commit_txn); (3, read_item 1); (3, start_txn); (1, abort_txn); (1, validate_read_item 1); (1, try_commit_txn); (2, commit_txn); (2, complete_write_item 1); (2, commit_txn); (2, validate_read_item 0); (2, seq_point); (2, lock_write_item); (2, try_commit_txn); (2, write_item 4); (1, read_item 0); (2, read_item 0); (2, start_txn); (1, start_txn)].
Lemma sto_trace_cons ta t:
sto_trace (ta :: t) -> sto_trace t.
Proof.
intros.
inversion H; subst; auto.
Qed.
Lemma sto_trace_app t1 t2:
sto_trace (t1 ++ t2) -> sto_trace t2.
Proof.
intros.
induction t1. rewrite app_nil_l in H. auto.
apply IHt1.
now apply sto_trace_cons with (ta:=a).
Qed.
(*
Returns the serialized sequence of transactions in the STO trace based on seq_point of each transaction
The first element (tid) of the sequence is the first transaction that completes in the serial trace
Note that STO-trace is constructed in a reverse order: the first (tid * action) pair is the last operation in the trace
*)
Function seq_list (sto_trace: trace): list nat:=
match sto_trace with
| [] => []
| (tid, seq_point) :: tail => seq_list tail ++ [tid]
| _ :: tail => seq_list tail
end.
Eval compute in seq_list example_txn.
Eval compute in seq_list example_txn2.
Lemma phase_increase_head tid a t:
sto_trace ((tid, a) :: t) ->
action_phase a >= trace_tid_phase tid t.
Proof.
intros; inversion H; cbn; omega.
Qed.
Lemma phase_increase_app tid t1 t2:
sto_trace (t1 ++ t2) ->
trace_tid_phase tid (t1 ++ t2) >= trace_tid_phase tid t2.
Proof.
induction t1; intros.
- simpl; omega.
- rewrite <- app_comm_cons in H; destruct a.
assert (sto_trace (t1 ++ t2)) by (now apply sto_trace_cons in H).
apply IHt1 in H0.
simpl; destruct (Nat.eq_dec tid t).
+ subst; apply phase_increase_head in H; omega.
+ auto.
Qed.
Lemma phase_increase_in tid a t:
sto_trace t ->
In (tid, a) t ->
trace_tid_phase tid t >= action_phase a.
Proof.
intros H I; apply in_split in I.
destruct I as [l1 [l2 L]].
assert (trace_tid_phase tid ((tid, a) :: l2) = action_phase a). {
cbn; destruct (Nat.eq_dec tid tid); omega.
}
rewrite L in *; rewrite <- H0; now apply phase_increase_app.
Qed.
Lemma phase_increase_in_app tid a (t1 t2:trace):
sto_trace (t1 ++ t2) ->
In (tid, a) (t1 ++ t2) ->
action_phase a > trace_tid_phase tid t2 ->
In (tid, a) t1.
Proof.
intros T I A.
apply in_app_or in I; destruct I as [I | I]; auto.
apply sto_trace_app in T.
apply (phase_increase_in _ _ _ T) in I.
omega.
Qed.
Lemma at_most_one_seq_point tid t:
sto_trace ((tid, seq_point) :: t) ->
~ In (tid, seq_point) t.
Proof.
intros H F.
apply (phase_increase_in _ _ _ (sto_trace_cons _ _ H)) in F.
inversion H; subst; cbn in *.
omega.
Qed.
Lemma trace_phase_in tid t:
trace_tid_phase tid t > 0 ->
exists a, In (tid, a) t.
Proof.
induction t; intros; cbn in *.
omega.
destruct a.
destruct (Nat.eq_dec tid n).
exists a; subst; now left.
apply IHt in H; destruct H as [a' H]; exists a'; now right.
Qed.
Lemma tid_nonzero tid a t:
sto_trace t ->
In (tid, a) t ->
tid > 0.
Proof.
revert tid a; induction t.
- intros tid a H I; destruct I.
- intros tid' a' H I.
destruct I; [ | now apply (IHt _ _ (sto_trace_cons _ _ H)) in H0 ].
subst; inversion H; auto.
all: assert (trace_tid_phase tid' t > 0) as GZ by omega.
all: apply trace_phase_in in GZ.
all: destruct GZ as [a GZ].
all: now apply (IHt tid' a).
Qed.
Lemma trace_phase_nonzero tid t:
sto_trace t ->
trace_tid_phase tid t > 0 ->
tid > 0.
Proof.
intros T; induction T; intros P; simpl in P.
omega.
all: destruct (Nat.eq_dec tid tid0); [subst; auto | now apply IHT].
all: try solve [apply IHT; omega].
Qed.
Lemma track_lock_cons tid tid' t a:
sto_trace ((tid', a) :: t) ->
locked_by t 0 = tid ->
locked_by ((tid', a) :: t) 0 = tid
\/ tid = 0 /\ a = lock_write_item
\/ tid = tid' /\ a = unlock_write_item
\/ tid = tid' /\ exists val, a = complete_write_item val.
Proof.
intros T L.
assert (tid' > 0) as TG. {
apply tid_nonzero with (a:=a) (t:=(tid', a)::t); cbn; auto.
}
inversion T; subst; cbn; auto.
right; right; right; eauto.
Qed.
Lemma locked_phase tid t:
sto_trace t ->
locked_by t 0 = tid ->
tid > 0 ->
trace_tid_phase tid t >= 2.
Proof.
intros T; revert tid; induction T; intros tid L G.
1: cbn in L; omega.
all: cbn.
all: destruct (Nat.eq_dec tid tid0); try omega.
all: try (now apply IHT).
1-3: subst; apply IHT in e; omega.
1-3: simpl in L; omega.
Qed.
Lemma commit_phase_cons tid p t:
sto_trace (p :: t) ->
trace_tid_phase tid t = 4 ->
trace_tid_phase tid (p :: t) = 4.
Proof.
destruct p as [tid' a]; intros T Fo; inversion T; cbn in *.
all: destruct (Nat.eq_dec tid tid'); auto.
all: subst; omega.
Qed.
Lemma commit_phase_app tid t1 t2:
sto_trace (t1 ++ t2) ->
trace_tid_phase tid t2 = 4 ->
trace_tid_phase tid (t1 ++ t2) = 4.
Proof.
induction t1; intros.
now simpl.
rewrite <- app_comm_cons in *.
apply commit_phase_cons; auto.
apply IHt1; auto.
now apply sto_trace_cons in H.
Qed.
Lemma phase_2_preserves_lock tid t1 t2:
sto_trace (t1 ++ t2) ->
trace_tid_phase tid (t1 ++ t2) = 2 ->
locked_by t2 0 = tid ->
locked_by (t1 ++ t2) 0 = tid.
Proof.
revert t2.
induction t1; intros t2 T P L.
now cbn.
destruct a as [tid' a].
cbn in P; destruct (Nat.eq_dec tid tid').
- destruct a; simpl in *; try omega.
+ rewrite e in *; clear e tid.
assert (trace_tid_phase tid' t2 >= 2). {
apply locked_phase.
now apply sto_trace_app with (t1:=(tid',try_commit_txn)::t1).
auto.
apply tid_nonzero with (a:=try_commit_txn) (t:=(tid', try_commit_txn)::t1 ++ t2).
auto.
simpl; now left.
}
inversion T; subst.
apply (phase_increase_app (locked_by t2 0)) in H3; omega.
+ auto.
- assert (locked_by (t1 ++ t2) 0 = tid) as LA. {
apply IHt1; auto.
now apply (sto_trace_cons _ _ T).
}
clear IHt1 L.
assert (tid > 0). {
apply trace_phase_nonzero with (t:=t1++t2).
now apply sto_trace_cons with (ta:=(tid',a)).
omega.
}
inversion T; cbn in *; auto; omega.
Qed.
Lemma locked_at_commit tid t1 t2 v:
sto_trace ((tid, seq_point) :: t1 ++ (tid, write_item v) :: t2) ->
locked_by (t1 ++ (tid, write_item v) :: t2) 0 = tid.
Proof.
intros T.
inversion T.
assert (tid > 0) as TG. {
apply tid_nonzero with (a:=write_item v) (t:=t); rewrite H0; auto.
apply in_or_app; right; now left.
}
assert (In (tid, lock_write_item) t1). {
assert (In (tid, lock_write_item) t). {
rewrite H0.
apply H2 with (v0:=v).
apply in_or_app; cbn; intuition.
}
apply phase_increase_in_app with (t2:=(tid, write_item v) :: t2); auto.
now rewrite <- H0.
simpl; destruct (Nat.eq_dec tid tid); intuition.
}
apply in_split in H4.
destruct H4 as [t1a [t1b T1]].
subst.
repeat rewrite <- app_assoc in *.
repeat rewrite <- app_comm_cons in *.
remember ((tid, lock_write_item) :: t1b ++ (tid, write_item v) :: t2) as tx.
assert (locked_by tx 0 = tid). {
rewrite Heqtx; now cbn.
}
assert (trace_tid_phase tid tx = 2). {
assert (trace_tid_phase tid tx >= 2). {
apply locked_phase; auto.
now apply sto_trace_app with (t1:=t1a).
}
assert (2 >= trace_tid_phase tid tx). {
rewrite <- H1.
now apply phase_increase_app.
}
omega.
}
apply phase_2_preserves_lock; auto.
Qed.
Lemma seq_point_after t1 t2 tid action:
sto_trace ((tid, action) :: t1 ++ (tid, commit_txn) :: t2)
-> action = complete_write_item (S (trace_write_version t2))
\/ action = commit_done_txn.
Proof.
intros T.
assert (trace_tid_phase tid (t1 ++ (tid, commit_txn) :: t2) = 4) as TG4. {
apply sto_trace_cons in T.
apply commit_phase_app; auto.
simpl; destruct (Nat.eq_dec tid tid); congruence.
}
inversion T; try omega.
2: now right.
left.
assert (trace_write_version (t1 ++ (tid, commit_txn) :: t2) =
trace_write_version t2). {
subst.
clear TG4 val H2 H3 H4 H5.
inversion T; subst; clear T H4 val.
remember (t1 ++ (tid, commit_txn) :: t2) as t.
clear H0.
revert t1 t2 tid Heqt H2 H3.
induction H5; intros t1 t2 tid T P L.
all: destruct t1; simpl in T.
1,2: congruence.
all: inversion T.
all: cbn in *.
all: destruct (Nat.eq_dec tid tid0); try congruence.
1-2,5,7,10,13: rewrite <- H3; apply (IHsto_trace _ _ _ H3); auto.
1-2: rewrite <- H2; apply (IHsto_trace _ _ _ H2); auto.
1: rewrite <- H4; apply (IHsto_trace _ _ _ H4); auto.
assert (tid > 0). {
apply (trace_phase_nonzero _ _ H5); omega.
}
omega.
rewrite <- H3; apply (IHsto_trace _ _ _ H3); auto.
rewrite H3; apply commit_phase_app.
rewrite <- H3; auto.
cbn; destruct (Nat.eq_dec tid tid); congruence.
assert (tid > 0). {
apply (tid_nonzero _ commit_txn _ H5).
rewrite H4; apply in_or_app; right; now left.
}
omega.
assert (tid > 0). {
apply (tid_nonzero _ commit_txn _ H5).
rewrite H4; apply in_or_app; right; now left.
}
omega.
}
now rewrite H6.
Qed.
|
<unk> , Ben . " His Own Worst Enemy . " The Jerusalem Post 12 December 2005 .
|
State Before: F : Type ?u.80415
α : Type u_1
β : Type u_2
γ : Type ?u.80424
δ : Type ?u.80427
G : Type ?u.80430
inst✝⁵ : FunLike F α fun x => β
inst✝⁴ : CommMonoid α
inst✝³ : CommMonoid β
inst✝² : CommMonoid γ
inst✝¹ : CommMonoid δ
inst✝ : CommGroup G
A : Set α
B : Set β
C : Set γ
n : ℕ
a b c d : α
f g : α →* β
h : toFreimanHom A n f = toFreimanHom A n g
⊢ f = g State After: no goals Tactic: rwa [toFreimanHom, toFreimanHom, FreimanHom.mk.injEq, FunLike.coe_fn_eq] at h |
Formal statement is: lemma ball_divide_subset: "d \<ge> 1 \<Longrightarrow> ball x (e/d) \<subseteq> ball x e" Informal statement is: If $d \geq 1$, then the ball of radius $e/d$ centered at $x$ is contained in the ball of radius $e$ centered at $x$. |
from dataclasses import dataclass
from typing import List
import numpy as np
import glob
import os
import pandas as pd
from scipy.interpolate import interp1d
from glass_transmission import get_transmittance
def transmission_at_surface(material, wavs):
sheet_path = '/Users/sl/code/python/github-packages/ucam_thruput/data/ucam_ar_coatings'
sheets = glob.glob(os.path.join(sheet_path, '*.xls'))
for sheet in sheets:
if material in sheet:
break
df = pd.read_excel(sheet, skiprows=1, names=['Wav', 'R', 'T'])
x = df['Wav'] * 10
y = df['T']
f = interp1d(x, y, kind='cubic',
bounds_error=False, fill_value=0.0)
return f(wavs)
@dataclass
class Element:
material: str = 'Air'
thickness: float = 0.0
@dataclass
class Camera:
elements: List[Element]
wavs: List[float] = np.linspace(2000, 11000, 300)
@property
def n_elements(self):
return len(self.elements)
@property
def throughput(self):
result = np.ones_like(self.wavs)
for i in range(self.n_elements - 1):
this_material = self.elements[i].material
next_material = self.elements[i+1].material
# reflections at boundaries
if this_material != 'Air' and next_material != 'Air':
# glass-to-glass, no reflection (could be better - need glue specs)
result *= 1
else:
# glass-to-air, or air-to-glass, use AR coating data
material = this_material if next_material == 'Air' else next_material
result *= transmission_at_surface(material, self.wavs)
# transmission within element
if this_material != 'Air':
transmittance = get_transmittance(this_material, self.elements[i].thickness)
result *= transmittance(self.wavs)
return result
@property
def bulk_transmission(self):
result = np.ones_like(self.wavs)
for i in range(self.n_elements - 1):
this_material = self.elements[i].material
# transmission within element
if this_material != 'Air':
transmittance = get_transmittance(this_material, self.elements[i].thickness)
result *= transmittance(self.wavs)
return result
@property
def reflections(self):
result = np.ones_like(self.wavs)
for i in range(self.n_elements - 1):
this_material = self.elements[i].material
next_material = self.elements[i+1].material
# reflections at boundaries
if this_material != 'Air' and next_material != 'Air':
# glass-to-glass, no reflection (could be better - need glue specs)
result *= 1
else:
# glass-to-air, or air-to-glass, use AR coating data
material = this_material if next_material == 'Air' else next_material
result *= transmission_at_surface(material, self.wavs)
return result
collimator = Camera(
[
Element(),
Element('N-PSK3', 12.043),
Element(),
Element('CaF2', 16.95),
Element(),
Element('LLF1', 7),
Element(),
Element('CaF2', 17.7),
Element()
]
)
uband = Camera(
[
Element(),
Element('N-SK16', 6.054),
Element(),
Element('N-SK16', 7.937),
Element('LLF1', 2.425),
Element(),
Element('LLF1', 2.513),
Element('N-SK16', 7.881),
Element(),
Element('N-SK16', 6.033),
Element()
]
)
gband = Camera(
[
Element(),
Element('N-LAK10', 6.357),
Element(),
Element('N-LAK10', 7.934),
Element('SF2', 2.445),
Element(),
Element('SF2', 2.585),
Element('N-LAK10', 7.51),
Element(),
Element('N-LAK10', 6.075),
Element()
]
)
rband = Camera(
[
Element(),
Element('N-LAK22', 6.031),
Element(),
Element('N-LAK22', 8.003),
Element('N-SF1', 2.396),
Element(),
Element('N-SF1', 2.5),
Element('N-LAK22', 6.805),
Element(),
Element('N-LAK22', 5.408),
Element()
]
)
if __name__ == "__main__":
np.savetxt('../ucam_thruput/data/ucam_cam_bl.txt', np.column_stack((uband.wavs, uband.throughput)))
np.savetxt('../ucam_thruput/data/ucam_cam_grn.txt', np.column_stack((gband.wavs, gband.throughput)))
np.savetxt('../ucam_thruput/data/ucam_cam_red.txt', np.column_stack((rband.wavs, rband.throughput)))
np.savetxt('../ucam_thruput/data/ucam_coll_wht.txt',
np.column_stack((collimator.wavs, collimator.throughput)))
np.savetxt('../ucam_thruput/data/ucam_coll_ntt.txt',
np.column_stack((collimator.wavs, collimator.throughput)))
np.savetxt('../ucam_thruput/data/ucam_coll_ntt_old.txt',
np.column_stack((collimator.wavs, collimator.throughput)))
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory Detype_AI
imports ArchRetype_AI
begin
context begin interpretation Arch .
requalify_facts
valid_arch_mdb_detype
clearMemory_invs
invs_irq_state_independent
init_arch_objects_invs_from_restricted
caps_region_kernel_window_imp
init_arch_objects_wps
end
declare clearMemory_invs[wp]
declare invs_irq_state_independent[intro!, simp]
locale Detype_AI =
fixes state_ext_type :: "'a :: state_ext itself"
assumes valid_globals_irq_node:
"\<And>s cap ptr irq. \<lbrakk> valid_global_refs (s :: 'a state); cte_wp_at ((=) cap) ptr s \<rbrakk>
\<Longrightarrow> interrupt_irq_node s irq \<notin> cap_range cap"
assumes caps_of_state_ko:
"\<And>cap s. valid_cap cap (s :: 'a state)
\<Longrightarrow> is_untyped_cap cap \<or>
cap_range cap = {} \<or>
(\<forall>ptr \<in> cap_range cap. \<exists>ko. kheap s ptr = Some ko)"
assumes mapM_x_storeWord:
"\<And>ptr. is_aligned ptr word_size_bits
\<Longrightarrow> mapM_x (\<lambda>x. storeWord (ptr + of_nat x * word_size) 0) [0..<n]
= modify (underlying_memory_update
(\<lambda>m x. if \<exists>k. x = ptr + of_nat k \<and> k < n * word_size then 0 else m x))"
assumes empty_fail_freeMemory:
"empty_fail (freeMemory ptr bits)"
assumes valid_ioports_detype:
"valid_ioports (s::'a state) \<Longrightarrow> valid_ioports (detype (untyped_range cap) s)"
lemma obj_at_detype[simp]:
"obj_at P p (detype S s) = (p \<notin> S \<and> obj_at P p s)"
by (clarsimp simp: obj_at_def detype_def)
lemma pspace_detype[simp]:
"(kheap (detype S s) ptr = Some x)
= (kheap s ptr = Some x \<and> ptr \<notin> S)"
by (simp add: detype_def)
lemma cte_wp_at_detype[simp]:
"(cte_wp_at P p (detype S s))
= (cte_wp_at P p s \<and> fst p \<notin> S)"
apply (case_tac "fst p \<in> S")
apply (simp add: cte_wp_at_cases)+
done
lemma pred_tcb_at_detype[simp]:
"(pred_tcb_at proj P t (detype S s))
= (pred_tcb_at proj P t s \<and> t \<notin> S)"
by (fastforce simp add: pred_tcb_at_def)
lemma cdt_detype[simp]:
"cdt (detype S s) = cdt s"
by (simp add: detype_def)
lemma caps_of_state_detype[simp]:
"caps_of_state (detype S s) =
(\<lambda>p. if fst p \<in> S then None else caps_of_state s p)"
by (fastforce simp add: caps_of_state_cte_wp_at)
lemma state_refs_of_detype:
"state_refs_of (detype S s) = (\<lambda>x. if x \<in> S then {} else state_refs_of s x)"
by (rule ext, simp add: state_refs_of_def detype_def)
definition
obj_reply_refs :: "cap \<Rightarrow> machine_word set"
where
"obj_reply_refs cap \<equiv> obj_refs cap \<union>
(case cap of cap.ReplyCap t m R \<Rightarrow> {t} | _ \<Rightarrow> {})"
lemma ex_cte_cap_to_obj_ref_disj:
"ex_cte_cap_wp_to P ptr s
\<Longrightarrow> ((\<exists>ptr'. cte_wp_at (\<lambda>cap. fst ptr \<in> obj_refs cap) ptr' s)
\<or> (\<exists>ptr' irq. cte_wp_at ((=) (cap.IRQHandlerCap irq)) ptr' s
\<and> ptr = (interrupt_irq_node s irq, [])))"
apply (clarsimp simp: ex_cte_cap_wp_to_def cte_wp_at_caps_of_state)
apply (frule cte_refs_obj_refs_elem, erule disjE)
apply fastforce
apply clarsimp
done
definition
"descendants_range_in S p \<equiv>
\<lambda>s. \<forall>p' \<in> descendants_of p (cdt s). cte_wp_at (\<lambda>c. cap_range c \<inter> S = {}) p' s"
lemma descendants_range_in_lift:
assumes st: "\<And>P. \<lbrace>\<lambda>s. P (cdt s)\<rbrace> f \<lbrace>\<lambda>r s. P (cdt s)\<rbrace>"
assumes untyped_range: "\<And>P p. \<lbrace>\<lambda>s. Q s \<and> cte_wp_at (\<lambda>c. P (cap_range c)) p s\<rbrace> f \<lbrace>\<lambda>r s. cte_wp_at (\<lambda>c. P (cap_range c)) p s\<rbrace>"
shows "\<lbrace>Q and descendants_range_in S slot\<rbrace> f \<lbrace>\<lambda>r. descendants_range_in S slot\<rbrace>"
apply (clarsimp simp:descendants_range_in_def)
apply (rule hoare_pre)
apply (wps st)
apply (rule hoare_vcg_ball_lift)
apply (wp untyped_range)
apply clarsimp
done
lemma set_cap_descendants_range_in:
shows "\<lbrace>cte_wp_at (\<lambda>c. cap_range c = cap_range cap) slot and descendants_range_in S slot\<rbrace>
set_cap cap slot \<lbrace>\<lambda>r. descendants_range_in S slot\<rbrace>"
apply (rule hoare_name_pre_state)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (rule hoare_pre)
apply (wp descendants_range_in_lift
[where Q = "cte_wp_at (\<lambda>c. cap_range c = cap_range cap) slot"] )
apply (wp set_cap_cte_wp_at)
apply (clarsimp simp:cte_wp_at_caps_of_state)+
done
lemma empty_descendants_range_in:
"descendants_of p (cdt s) = {} \<Longrightarrow> descendants_range_in S p s"
by (clarsimp simp:descendants_range_in_def)
lemma valid_mdb_descendants_range_in:
"valid_mdb s \<Longrightarrow> descendants_range_in S p s = (\<forall>p'\<in>descendants_of p (cdt s).
\<exists>c. (null_filter (caps_of_state s)) p' = Some c \<and> cap_range c \<inter> S = {})"
apply (clarsimp simp:descendants_range_in_def
split:if_splits)
apply (intro ext iffI ballI impI)
apply (frule(1) bspec)
apply (frule(1) descendants_of_cte_at)
apply (clarsimp simp:cte_wp_at_caps_of_state null_filter_def descendants_of_def)
apply (clarsimp simp:valid_mdb_no_null)
apply (drule(1) bspec)
apply (clarsimp simp:cte_wp_at_caps_of_state null_filter_def cap_range_def split:if_split_asm)
done
definition
"descendants_range cap p \<equiv>
\<lambda>s. \<forall>p' \<in> descendants_of p (cdt s). cte_wp_at (\<lambda>c. cap_range c \<inter> cap_range cap = {}) p' s"
lemma descendants_rangeD:
"\<lbrakk> descendants_range cap p s; cdt s \<Turnstile> p \<rightarrow> p' \<rbrakk> \<Longrightarrow>
\<exists>c. caps_of_state s p' = Some c \<and> cap_range c \<inter> cap_range cap = {}"
by (simp add: descendants_range_def descendants_of_def cte_wp_at_caps_of_state
del: split_paired_All)
lemma subset_splitE:
"\<lbrakk>A \<subseteq> B \<or> B \<subseteq> A \<or> A \<inter> B = {} ; A \<subset> B \<Longrightarrow>P;B \<subset> A \<Longrightarrow>P ;A = B \<Longrightarrow> P; A \<inter> B = {} \<Longrightarrow> P\<rbrakk> \<Longrightarrow>P"
apply (simp add:subset_iff_psubset_eq)
apply (elim disjE)
apply auto
done
lemma cap_range_untyped_range_eq[simp]:
"is_untyped_cap a \<Longrightarrow> cap_range a = untyped_range a"
by (clarsimp simp:is_cap_simps cap_range_def)
lemma (in Detype_AI) untyped_cap_descendants_range:
"\<lbrakk>valid_pspace (s :: 'a state); caps_of_state s p = Some cap; is_untyped_cap cap;valid_mdb s;
q\<in> descendants_of p (cdt s) \<rbrakk>
\<Longrightarrow> cte_wp_at (\<lambda>c. (cap_range c \<inter> usable_untyped_range cap = {})) q s"
apply (clarsimp simp: valid_pspace_def)
apply (frule(1) descendants_of_cte_at)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (case_tac "is_untyped_cap capa")
apply (frule(1) valid_cap_aligned[OF caps_of_state_valid])
apply (frule_tac cap = capa in valid_cap_aligned[OF caps_of_state_valid])
apply simp
apply (frule_tac c = capa in untyped_range_non_empty)
apply simp
apply (frule_tac c = cap in untyped_range_non_empty)
apply simp
apply (clarsimp simp:valid_mdb_def)
apply (drule untyped_incD)
apply simp+
apply clarify
apply (erule subset_splitE)
apply simp
apply (thin_tac "P\<longrightarrow>Q" for P Q)+
apply (clarsimp simp:descendants_of_def)
apply (drule(1) trancl_trans)
apply (simp add:vmdb_abs_def valid_mdb_def vmdb_abs.no_loops)
apply simp
apply simp
apply (clarsimp simp:descendants_of_def | erule disjE)+
apply (drule(1) trancl_trans)
apply (simp add:vmdb_abs_def valid_mdb_def vmdb_abs.no_loops)+
apply (thin_tac "P\<longrightarrow>Q" for P Q)+
apply (erule(1) disjoint_subset2[OF usable_range_subseteq])
apply (simp add:Int_ac)
apply (drule(1) caps_of_state_valid)+
apply (frule_tac cap = capa in caps_of_state_ko)
apply (elim disjE)
apply clarsimp+
apply (clarsimp simp:valid_cap_def is_cap_simps valid_untyped_def
simp del:usable_untyped_range.simps untyped_range.simps)
apply (rule ccontr)
apply (clarsimp dest!: int_not_emptyD simp del:usable_untyped_range.simps untyped_range.simps)
apply (thin_tac "\<forall>x y z. P x y z" for P)
apply (drule(1) bspec)
apply (clarsimp dest!: int_not_emptyD simp del:usable_untyped_range.simps untyped_range.simps)
apply (drule_tac x = x in spec)
apply (clarsimp simp del:usable_untyped_range.simps untyped_range.simps)
apply (drule(2) p_in_obj_range )
apply (erule impE)
apply (erule(1) notemptyI[OF IntI[OF _ subsetD[OF usable_range_subseteq]]])
apply (simp add:is_cap_simps)
apply assumption
apply blast
done
lemma untyped_children_in_mdbEE:
assumes ass: "untyped_children_in_mdb s" "cte_wp_at ((=) cap) ptr s" "is_untyped_cap cap" "cte_wp_at P ptr' s"
and step1: "\<And>cap'. \<lbrakk>cte_wp_at ((=) cap') ptr' s; P cap'\<rbrakk> \<Longrightarrow> obj_refs cap' \<inter> untyped_range cap \<noteq> {}"
and step2: "\<And>cap'. \<lbrakk>cte_wp_at ((=) cap') ptr' s; cap_range cap' \<inter> untyped_range cap \<noteq> {};ptr' \<in> descendants_of ptr (cdt s) \<rbrakk> \<Longrightarrow> Q"
shows "Q"
using ass
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (rule step2)
apply (simp add:cte_wp_at_caps_of_state)
apply (drule step1[rotated])
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:cap_range_def)
apply blast
apply (simp add:untyped_children_in_mdb_def del:split_paired_All)
apply (drule_tac x = ptr in spec)
apply (drule_tac x = ptr' in spec)
apply (erule impE)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (drule step1[rotated])
apply (clarsimp simp:cte_wp_at_caps_of_state)+
done
definition
"clear_um S \<equiv> (machine_state_update \<circ> underlying_memory_update)
(\<lambda>m p. if p\<in>S then 0 else m p)"
interpretation clear_um:
p_arch_idle_update_int_eq "clear_um S"
by unfold_locales (simp_all add: clear_um_def)
lemma descendants_range_inD:
"\<lbrakk>descendants_range_in S p s;p'\<in>descendants_of p (cdt s);caps_of_state s p' = Some cap\<rbrakk>
\<Longrightarrow> cap_range cap \<inter> S = {}"
by (auto simp:descendants_range_in_def cte_wp_at_caps_of_state dest!:bspec)
lemma descendants_range_def2:
"descendants_range cap p = descendants_range_in (cap_range cap) p"
by (simp add:descendants_range_in_def descendants_range_def)
lemma detype_clear_um_independent:
"detype S (clear_um T s) = clear_um T (detype S s)"
by (auto simp add: detype_def clear_um_def ext)
(* FIXME: move *)
lemma (in pspace_update_eq) zombies_final_eq[iff]:
"zombies_final (f s) = zombies_final s"
by (simp add: zombies_final_def is_final_cap'_def)
lemma valid_mdb_clear_um [iff]:
"valid_mdb (clear_um S s) = valid_mdb s"
by (simp add: clear_um_def)
lemma valid_ioc_clear_um[iff]:
"valid_ioc (clear_um S s) = valid_ioc s"
by (simp add: clear_um_def)
lemma cur_tcb_clear_um[iff]: "cur_tcb (clear_um S s) = cur_tcb s"
by (simp add: clear_um_def cur_tcb_def)
lemma untyped_children_in_mdb_clear_um[iff]:
"untyped_children_in_mdb (clear_um S s) = untyped_children_in_mdb s"
by (simp add: untyped_children_in_mdb_def clear_um_def)
lemma descendants_inc_empty_slot:
assumes desc_inc :"descendants_inc m cs'"
assumes mdb:"mdb_cte_at (\<lambda>p. \<exists>c. cs p = Some c \<and> cap.NullCap \<noteq> c) m"
assumes dom:"\<forall>x\<in> dom cs. (cs' x = cs x)"
shows "descendants_inc m cs"
using desc_inc
apply (simp add:descendants_inc_def del:split_paired_All)
apply (intro allI impI)
apply (drule spec)+
apply (erule(1) impE)
apply (simp add:descendants_of_def)
apply (frule tranclD)
apply (drule tranclD2)
apply (simp add:cdt_parent_rel_def is_cdt_parent_def)
apply (elim exE conjE)
apply (drule mdb_cte_atD[OF _ mdb])+
apply (elim exE conjE)
apply (drule bspec[OF dom,OF domI])+
apply simp
done
lemma descendants_range_imply_no_descendants:
"\<lbrakk>descendants_range cap p s;descendants_inc (cdt s) (caps_of_state s);
is_untyped_cap cap; caps_of_state s p = Some cap;valid_objs s;valid_mdb s\<rbrakk>
\<Longrightarrow> descendants_of p (cdt s)= {}"
apply (simp add:descendants_range_def is_cap_simps descendants_inc_def del:split_paired_All)
apply (elim exE)
apply (rule equals0I)
apply (drule(1) bspec)
apply (drule spec)+
apply (erule(1) impE)
apply (drule(1) descendants_of_cte_at)
apply (clarsimp simp:cte_wp_at_caps_of_state simp del:split_paired_All)
apply (drule(1) physical_valid_cap_not_empty_range[OF caps_of_state_valid_cap,rotated])
apply simp
apply auto
done
locale detype_locale =
fixes cap and ptr and s
assumes cap: "cte_wp_at ((=) cap) ptr s"
and untyped: "is_untyped_cap cap"
and nodesc: "descendants_range cap ptr s"
and invs: "invs s"
and child: "untyped_children_in_mdb s"
context detype_locale begin
lemma drange:"descendants_range_in (cap_range cap) ptr (s :: 'a state)"
using nodesc
by (simp add:descendants_range_def2)
lemma iflive: "if_live_then_nonz_cap s"
using invs by (simp add: invs_def valid_state_def valid_pspace_def)
lemma live_okE:
"\<And>P p. \<lbrakk> obj_at P p s; \<And>obj. P obj \<Longrightarrow> live obj \<rbrakk>
\<Longrightarrow> p \<notin> untyped_range cap"
apply (drule if_live_then_nonz_capD [OF iflive])
apply simp
apply (rule notI)
apply (erule ex_nonz_cap_toE)
apply (erule untyped_children_in_mdbEE [OF child cap untyped])
apply (clarsimp simp: zobj_refs_to_obj_refs)
apply blast
apply (drule descendants_range_inD[OF drange])
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:untyped)
done
lemma ifunsafe: "if_unsafe_then_cap s"
using invs by (simp add: invs_def valid_state_def valid_pspace_def)
lemma globals: "valid_global_refs s"
using invs by (simp add: invs_def valid_state_def)
(* this is should be true *)
lemma state_refs: "state_refs_of (detype (untyped_range cap) s) = state_refs_of s"
apply (rule ext, clarsimp simp add: state_refs_of_detype)
apply (rule sym, rule equals0I, drule state_refs_of_elemD)
apply (drule live_okE, rule refs_of_live, clarsimp)
apply simp
done
lemma idle: "idle_thread (detype (untyped_range cap) s) = idle_thread s"
by (simp add: detype_def)
lemma valid_arch_caps: "valid_arch_caps s"
using invs by (simp add: invs_def valid_state_def)
(* moreover *)
lemma valid_arch_state: "valid_arch_state s" using invs
by clarsimp
(* moreover *)
lemma ut_mdb: "untyped_mdb (cdt s) (caps_of_state s)"
using invs
by (clarsimp dest!: invs_mdb simp add: valid_mdb_def)
lemma arch_state_det: "\<And>r. arch_state (detype r s) = arch_state s" (* SIMP DUP*)
by (simp add: detype_def)
lemma no_obj_refs:
"\<And>slot cap' x. \<lbrakk> caps_of_state s slot = Some cap';
x \<in> obj_refs cap'; x \<in> untyped_range cap \<rbrakk> \<Longrightarrow> False"
using cap untyped
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (drule (2) untyped_mdbD)
apply blast
apply (rule ut_mdb)
apply (drule descendants_range_inD[OF drange])
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:cap_range_def)
apply blast
done
lemma valid_pspace: "valid_pspace s" using invs
by (simp add: invs_def valid_state_def)
lemma valid_global_objs: "valid_global_objs s"
using invs by (clarsimp simp: invs_def valid_state_def)
lemma cap_is_valid: "valid_cap cap s"
by (rule cte_wp_valid_cap[OF cap invs_valid_objs[OF invs]])
end
locale Detype_AI_2 =
fixes cap ptr s
assumes detype_invariants:
"\<lbrakk> cte_wp_at ((=) cap) ptr s
; is_untyped_cap cap
; descendants_range cap ptr s
; invs s
; untyped_children_in_mdb s
; ct_active s
\<rbrakk>
\<Longrightarrow> (invs and untyped_children_in_mdb)
(detype (untyped_range cap) (clear_um (untyped_range cap) s))"
locale detype_locale_gen_1 = Detype_AI "TYPE('a)" + detype_locale cap ptr s
for cap ptr
and s :: "('a :: state_ext) state" +
assumes valid_cap:
"\<And>cap'. \<lbrakk> s \<turnstile> cap'; obj_reply_refs cap' \<subseteq> (UNIV - untyped_range cap) \<rbrakk>
\<Longrightarrow> detype (untyped_range cap) s \<turnstile> cap'"
assumes glob_det: "\<And>r. global_refs (detype r s) = global_refs s"
assumes arch_valid_obj:
"\<And>p ao. \<lbrakk>ko_at (ArchObj ao) p s; arch_valid_obj ao s\<rbrakk>
\<Longrightarrow> arch_valid_obj ao (detype (untyped_range cap) s)"
assumes sym_hyp_refs_detype:
"sym_refs (state_hyp_refs_of (detype (untyped_range cap) s))"
assumes tcb_arch_detype:
"\<And>p t. \<lbrakk>ko_at (TCB t) p s; valid_arch_tcb (tcb_arch t) s\<rbrakk>
\<Longrightarrow> valid_arch_tcb (tcb_arch t) (detype (untyped_range cap) s)"
locale detype_locale_gen_2 = detype_locale_gen_1 cap ptr s
for cap ptr
and s :: "('a :: state_ext) state" +
assumes detype_invs_assms:
"valid_idle (detype (untyped_range cap) s)"
"valid_arch_state (detype (untyped_range cap) s)"
"valid_vspace_objs (detype (untyped_range cap) s)"
"valid_arch_caps (detype (untyped_range cap) s)"
"valid_kernel_mappings (detype (untyped_range cap) s)"
"valid_global_objs (detype (untyped_range cap) s)"
"valid_asid_map (detype (untyped_range cap) s)"
"valid_global_vspace_mappings (detype (untyped_range cap) s)"
"equal_kernel_mappings (detype (untyped_range cap) s)"
"pspace_in_kernel_window (detype (untyped_range cap) s)"
"valid_machine_state (clear_um (untyped_range cap) (detype (untyped_range cap) s))"
"pspace_respects_device_region (clear_um (untyped_range cap) (detype (untyped_range cap) s))"
"cap_refs_respects_device_region (clear_um (untyped_range cap) (detype (untyped_range cap) s))"
locale detype_locale_arch = detype_locale + Arch
context detype_locale_gen_1
begin
lemma irq_node:
"interrupt_irq_node (s :: 'a state) irq \<notin> untyped_range cap"
using valid_globals_irq_node [OF globals cap]
by (simp add: cap_range_def)
lemma non_null_present:
"\<And>p. cte_wp_at ((\<noteq>) cap.NullCap) p s \<Longrightarrow> fst p \<notin> untyped_range cap"
apply (drule if_unsafe_then_capD[OF _ ifunsafe], simp)
apply (drule ex_cte_cap_to_obj_ref_disj, erule disjE)
apply clarsimp
apply (erule untyped_children_in_mdbEE[OF child cap untyped])
apply blast
apply (drule descendants_range_inD[OF drange])
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:untyped)
apply (clarsimp simp: irq_node)
done
lemma non_filter_detype:
"null_filter (caps_of_state s) = null_filter (caps_of_state (detype (untyped_range cap) s))"
apply (intro iffI ext)
apply (clarsimp simp: null_filter_def split:if_splits)+
apply (rule ccontr)
apply (clarsimp dest!:caps_of_state_cteD)
apply (frule non_null_present[OF cte_wp_at_weakenE])
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply simp
done
lemma non_null_caps:
"\<And>p c. \<lbrakk> caps_of_state s p = Some c; c \<noteq> cap.NullCap \<rbrakk>
\<Longrightarrow> fst p \<notin> untyped_range cap"
by (clarsimp simp: cte_wp_at_caps_of_state non_null_present)
lemma vreply: "valid_reply_caps s"
using invs by (simp add: invs_def valid_state_def)
lemma vmaster: "valid_reply_masters s"
using invs by (simp add: invs_def valid_state_def)
lemma valid_cap2:
"\<And>cap'. \<lbrakk> \<exists>p. cte_wp_at ((=) cap') p s \<rbrakk>
\<Longrightarrow> obj_reply_refs cap' \<subseteq> (UNIV - untyped_range cap)"
apply clarsimp
apply (simp add: obj_reply_refs_def, erule disjE)
apply (erule untyped_children_in_mdbEE [OF child cap untyped])
apply blast
apply (drule descendants_range_inD[OF drange])
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:untyped)
apply (clarsimp split: cap.split_asm bool.split_asm)
apply (rename_tac t master rights)
apply (case_tac master, simp_all)
apply (frule valid_reply_mastersD' [OF _ vmaster])
apply (fastforce simp: cte_wp_at_caps_of_state dest: non_null_caps)
apply (subgoal_tac "has_reply_cap t s")
apply (drule valid_reply_capsD [OF _ vreply])
apply (simp add: pred_tcb_at_def)
apply (fastforce simp: live_def dest: live_okE)
apply (fastforce simp: has_reply_cap_def is_reply_cap_to_def elim:cte_wp_at_lift)
done
(* invariants BEGIN *)
named_theorems detype_invs_lemmas
lemma refsym : "sym_refs (state_refs_of s)"
using invs by (simp add: invs_def valid_state_def valid_pspace_def)
lemma hyprefsym : "sym_refs (state_hyp_refs_of s)"
using invs by (simp add: invs_def valid_state_def valid_pspace_def)
lemma refs_of: "\<And>obj p. \<lbrakk> ko_at obj p s \<rbrakk> \<Longrightarrow> refs_of obj \<subseteq> (UNIV - untyped_range cap \<times> UNIV)"
by (fastforce intro: refs_of_live dest!: sym_refs_ko_atD[OF _ refsym] live_okE)
lemma refs_of2: "\<And>obj p. kheap s p = Some obj
\<Longrightarrow> refs_of obj \<subseteq> (UNIV - untyped_range cap \<times> UNIV)"
by (simp add: refs_of obj_at_def)
lemma valid_obj: "\<And>p obj. \<lbrakk> valid_obj p obj s; ko_at obj p s \<rbrakk>
\<Longrightarrow> valid_obj p obj (detype (untyped_range cap) s)"
apply (clarsimp simp: valid_obj_def
split: Structures_A.kernel_object.split_asm)
apply (clarsimp simp: valid_cs_def)
apply (drule well_formed_cnode_valid_cs_size)
apply (rule valid_cap)
apply fastforce
apply (rule valid_cap2)
apply (erule ranE)
apply (fastforce simp: obj_at_def intro!: cte_wp_at_cteI)
apply (frule refs_of)
apply (clarsimp simp: valid_tcb_def obj_at_def tcb_arch_detype)
apply (rule conjI)
apply (erule ballEI)
apply (clarsimp elim!: ranE)
apply (erule valid_cap [OF _ valid_cap2])
apply (fastforce intro!: cte_wp_at_tcbI)
apply (clarsimp simp: valid_tcb_state_def valid_bound_ntfn_def
split: Structures_A.thread_state.split_asm option.splits)
apply (frule refs_of)
apply (rename_tac endpoint)
apply (case_tac endpoint, (fastforce simp: valid_ep_def)+)
apply (frule refs_of)
apply (rename_tac notification ntfn_ext)
apply (case_tac "ntfn_obj ntfn_ext")
apply (auto simp: valid_ntfn_def ntfn_bound_refs_def split: option.splits)
apply (auto intro: arch_valid_obj)
done
lemma valid_objs_detype[detype_invs_lemmas] : "valid_objs (detype (untyped_range cap) s)"
using invs_valid_objs[OF invs]
apply (clarsimp simp add: valid_objs_def dom_def)
apply (erule allE, erule impE, erule exI)
apply (clarsimp elim!: valid_obj)
apply (simp add: obj_at_def)
done
lemma pspace_aligned_detype[detype_invs_lemmas] : "pspace_aligned (detype (untyped_range cap) s)"
using invs_psp_aligned[OF invs]
apply (clarsimp simp: pspace_aligned_def)
apply (drule bspec, erule domI)
apply (clarsimp simp: detype_def)
done
lemma sym_refs_detype[detype_invs_lemmas] :
"sym_refs (state_refs_of (detype (untyped_range cap) s))"
using refsym by (simp add: state_refs)
lemmas [detype_invs_lemmas] = sym_hyp_refs_detype
lemma pspace_distinct_detype[detype_invs_lemmas]: "pspace_distinct (detype (untyped_range cap) s)"
apply (insert invs, drule invs_distinct)
apply (auto simp: pspace_distinct_def)
done
lemma cut_tcb_detype[detype_invs_lemmas]:
assumes ct_act: "ct_active s"
shows "cur_tcb (detype (untyped_range cap) s)" (* CT_ACT *)
apply (insert ct_act invs)
apply (drule tcb_at_invs)
apply (simp add: cur_tcb_def ct_in_state_def)
apply (clarsimp simp: detype_def pred_tcb_at_def)
apply (fastforce simp: live_def dest: live_okE)
done
lemma live_okE2: "\<And>obj p. \<lbrakk> kheap s p = Some obj; live obj \<rbrakk>
\<Longrightarrow> p \<notin> untyped_range cap"
by (simp add: live_okE[where P=live] obj_at_def)
lemma untyped_mdb : "\<And>m. untyped_mdb m (caps_of_state s)
\<Longrightarrow> untyped_mdb m (\<lambda>p. if fst p \<in> untyped_range cap then None else caps_of_state s p)"
apply (simp only: untyped_mdb_def)
apply (elim allEI)
apply clarsimp
done
lemma untyped_inc : "\<And>m. untyped_inc m (caps_of_state s)
\<Longrightarrow> untyped_inc m (\<lambda>p. if fst p \<in> untyped_range cap then None else caps_of_state s p)"
apply (simp only: untyped_inc_def)
apply (elim allEI)
apply clarsimp
done
lemma reply_caps_mdb : "\<And>m. reply_caps_mdb m (caps_of_state s)
\<Longrightarrow> reply_caps_mdb m (\<lambda>p. if fst p \<in> untyped_range cap then None else caps_of_state s p)"
apply (simp only: reply_caps_mdb_def)
apply (elim allEI)
apply (clarsimp elim!: exEI)
apply (fastforce dest: non_null_caps)
done
lemma reply_masters_mdb : "\<And>m. reply_masters_mdb m (caps_of_state s)
\<Longrightarrow> reply_masters_mdb m (\<lambda>p. if fst p \<in> untyped_range cap then None else caps_of_state s p)"
apply (simp only: reply_masters_mdb_def)
apply (elim allEI)
apply clarsimp
apply (drule(1) bspec)
apply (fastforce dest: non_null_caps)
done
lemma reply_mdb : "\<And>m. reply_mdb m (caps_of_state s)
\<Longrightarrow> reply_mdb m (\<lambda>p. if fst p \<in> untyped_range cap then None else caps_of_state s p)"
by (simp add: reply_mdb_def reply_caps_mdb reply_masters_mdb)
end
context detype_locale_gen_1 begin
lemma valid_mdb_detype[detype_invs_lemmas]: "valid_mdb (detype (untyped_range cap) s)"
apply (insert invs, drule invs_mdb)
apply (simp add: valid_mdb_def)
apply (rule context_conjI)
apply (safe intro!: mdb_cte_atI elim!: untyped_mdb untyped_inc reply_mdb)
apply (drule(1) mdb_cte_atD)
apply (clarsimp dest!: non_null_present)
apply (drule(1) mdb_cte_atD)
apply (clarsimp dest!: non_null_present)
apply (erule descendants_inc_empty_slot)
apply (clarsimp simp:cte_wp_at_caps_of_state swp_def)
apply clarsimp
apply (simp add: ut_revocable_def detype_def del: split_paired_All)
apply (simp add: irq_revocable_def detype_def del: split_paired_All)
apply (simp add: reply_master_revocable_def detype_def del: split_paired_All)
apply (simp add: valid_arch_mdb_detype)
done
lemma valid_ioports_detype[detype_invs_lemmas]:
"valid_ioports (detype (untyped_range cap) s)"
apply (insert invs, drule invs_valid_ioports)
by (clarsimp simp: valid_ioports_detype)
lemma untype_children_detype[detype_invs_lemmas]: "untyped_children_in_mdb (detype (untyped_range cap) s)"
apply (insert child)
apply (simp add: untyped_children_in_mdb_def)
apply (erule allEI)+
apply (clarsimp simp: detype_def)
done
lemma live_nonz_detype[detype_invs_lemmas]: "if_live_then_nonz_cap (detype (untyped_range cap) s)"
apply (insert iflive)
apply (simp add: if_live_then_nonz_cap_def ex_nonz_cap_to_def)
apply (erule allEI)
apply (rule impI, erule conjE, drule(1) mp)
apply (erule exEI)
apply clarsimp
apply (frule non_null_present [OF cte_wp_at_weakenE])
apply clarsimp+
done
lemma irq_node_detype[simp]: (* duplicated lemma *)
"\<And>r. interrupt_irq_node (detype r s) = interrupt_irq_node s"
by (simp add: detype_def)
lemma INV_9[detype_invs_lemmas]: "if_unsafe_then_cap (detype (untyped_range cap) s)"
apply (insert ifunsafe)
apply (simp add: if_unsafe_then_cap_def ex_cte_cap_wp_to_def)
apply (erule allEI, rule impI)
apply (erule allEI)
apply (clarsimp del: exE)
apply (erule exEI)
apply clarsimp
apply (frule(1) non_null_caps)
apply (frule non_null_present [OF cte_wp_at_weakenE])
apply clarsimp+
done
lemma zombies_final: "zombies_final s"
using invs by (simp add: invs_def valid_state_def valid_pspace_def)
lemma zombies_final_detype[detype_invs_lemmas]: "zombies_final (detype (untyped_range cap) s)"
apply (insert zombies_final)
apply (simp add: zombies_final_def final_cap_at_eq)
apply (elim allEI)
apply (rule impI, erule conjE, drule(1) mp)
apply (elim exEI conjE conjI allEI)
apply (rule impI, elim conjE)
apply simp
done
lemma valid_refs_detype[detype_invs_lemmas]: "valid_global_refs (detype (untyped_range cap) s)"
using globals
by (simp add: valid_global_refs_def valid_refs_def glob_det)
lemma valid_reply_caps_detype[detype_invs_lemmas]:
"valid_reply_caps (detype (untyped_range cap) s)"
using vreply
apply (clarsimp simp: valid_reply_caps_def has_reply_cap_def)
apply (rule conjI)
apply (erule allEI)
apply (rule impI)
apply (elim impE exE conjE, intro exI, assumption)
apply (simp add: pred_tcb_at_def)
apply (fastforce simp: live_def dest: live_okE)
apply (clarsimp simp: unique_reply_caps_def)
done
lemma valid_irq_detype[detype_invs_lemmas]: "valid_irq_node (detype (untyped_range cap) s)"
using invs valid_globals_irq_node [OF globals cap]
by (simp add: valid_irq_node_def invs_def valid_state_def cap_range_def)
lemma valid_reply_masters_detype[detype_invs_lemmas]:
"valid_reply_masters (detype (untyped_range cap) s)"
using vmaster by (clarsimp simp: valid_reply_masters_def)
lemma valid_irq_handlers_detype[detype_invs_lemmas]:
"valid_irq_handlers (detype (untyped_range cap) s)"
using invs
apply (simp add: valid_irq_handlers_def ran_def irq_issued_def
invs_def valid_state_def)
apply (force simp: detype_def)
done
lemma only_idle_detype[detype_invs_lemmas]: "only_idle (detype (untyped_range cap) s)"
proof -
have "only_idle s"
using invs by (simp add: invs_def valid_state_def)
thus ?thesis
apply (clarsimp simp: only_idle_def)
apply (simp add: detype_def)
done
qed
lemma cap_refs_in_kernel_detype[detype_invs_lemmas]:
"cap_refs_in_kernel_window (detype (untyped_range cap) s)"
proof -
have "cap_refs_in_kernel_window s"
using invs by (simp add: invs_def valid_state_def)
thus ?thesis
apply (simp add: cap_refs_in_kernel_window_def
valid_refs_def arch_state_det)
done
qed
lemma valid_ioc_detype[detype_invs_lemmas]: "valid_ioc (detype (untyped_range cap) s)"
proof -
have "valid_ioc s" using invs by (simp add: invs_def valid_state_def)
thus ?thesis
apply (simp add: valid_ioc_def)
apply (clarsimp simp: detype_def neq_commute)
apply (drule spec, drule spec, erule impE, assumption)
apply (frule_tac p="(a,b)" in non_null_present[simplified neq_commute])
apply simp
done
qed
lemmas p2pm1_to_mask = add_mask_fold
lemma valid_irq_states_detype[detype_invs_lemmas]: "valid_irq_states
(clear_um (untyped_range cap) (detype (untyped_range cap) s))"
proof -
have "valid_irq_states s" using invs by (simp add: invs_def valid_state_def)
thus ?thesis
apply(clarsimp simp: clear_um_def detype_def valid_irq_states_def)
done
qed
end
context detype_locale_gen_2 begin
lemma invariants:
assumes ct_act: "ct_active s"
shows "(invs and untyped_children_in_mdb)
(detype (untyped_range cap) (clear_um (untyped_range cap) s))"
using detype_invs_lemmas detype_invs_assms ct_act
by (simp add: invs_def valid_state_def valid_pspace_def
detype_clear_um_independent clear_um.state_refs_update)
end
(* detype_locale_gen_2 cap ptr s *)
(* FIXME: move *)
lemma gets_modify_comm2:
"\<forall>s. g (f s) = g s \<Longrightarrow>
(do x \<leftarrow> modify f; y \<leftarrow> gets g; m x y od) =
(do y \<leftarrow> gets g; x \<leftarrow> modify f; m x y od)"
apply (rule ext)
apply (drule spec)
by (rule gets_modify_comm)
lemma dmo_detype_comm:
assumes "empty_fail f"
shows "do_machine_op f >>= (\<lambda>s. modify (detype S)) =
modify (detype S) >>= (\<lambda>s. do_machine_op f)"
proof -
have machine_state_detype: "\<forall>s. machine_state (detype S s) = machine_state s"
by (simp add: detype_def)
have detype_msu_independent:
"\<And>f. detype S \<circ> machine_state_update f = machine_state_update f \<circ> detype S"
by (simp add: detype_def ext)
from assms
show ?thesis
apply (simp add: do_machine_op_def split_def bind_assoc)
apply (simp add: gets_modify_comm2[OF machine_state_detype])
apply (rule arg_cong_bind1)
apply (simp add: empty_fail_def select_f_walk[OF empty_fail_modify]
modify_modify detype_msu_independent)
done
qed
lemma (in Detype_AI) delete_objects_def2:
"delete_objects ptr bits \<equiv>
do modify (detype {ptr..ptr + 2 ^ bits - 1});
do_machine_op (freeMemory ptr bits)
od"
by (rule eq_reflection)
(simp add: delete_objects_def dmo_detype_comm[OF empty_fail_freeMemory])
(* FIXME: move *)
lemma modify_modify_bind:
"(modify f >>= (\<lambda>_. (modify g >>= h))) =
(modify (g \<circ> f) >>= h)"
by (simp add: modify_modify bind_assoc[symmetric])
lemma dmo_untyped_children_in_mdb[wp]:
"\<lbrace>\<lambda>s. untyped_children_in_mdb s\<rbrace>
do_machine_op f
\<lbrace>\<lambda>rv s. untyped_children_in_mdb s\<rbrace>"
by (wp | simp add: untyped_mdb_alt[symmetric] do_machine_op_def split_def)+
lemma detype_machine_state_update_comm:
"detype S (machine_state_update f s) =
machine_state_update f (detype S s)"
by (case_tac s, simp add: detype_def ext)
lemma interrupt_irq_node_detype[simp]:
"interrupt_irq_node (detype S s) = interrupt_irq_node s"
by (simp add: detype_def)
lemma cte_wp_at_delete_objects[wp]:
"\<lbrace>\<lambda>s. Q (cte_wp_at (P (interrupt_irq_node s)) p s) \<and>
fst p \<notin> {ptr..ptr + 2 ^ bits - 1}\<rbrace>
delete_objects ptr bits
\<lbrace>\<lambda>_ s. Q (cte_wp_at (P (interrupt_irq_node s)) p s)\<rbrace>"
apply (simp add: delete_objects_def do_machine_op_def split_def)
apply wp
apply (simp add: detype_machine_state_update_comm)
done
lemma cdt_delete_objects[wp]:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace> delete_objects ptr bits \<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
by (wp | simp add: delete_objects_def do_machine_op_def split_def)+
lemma of_nat_le_pow:
"\<lbrakk>x < 2 ^ n; n \<le> len_of TYPE('a)\<rbrakk> \<Longrightarrow> of_nat x \<le> (mask n :: 'a :: len word)"
apply (drule_tac a="2::nat" in power_increasing, simp)
apply (frule less_le_trans, assumption)
apply (frule of_nat_mono_maybe_le[OF unat_lt2p[of "mask n:: 'a :: len word"],
folded word_bits_def])
apply simp
apply (simp add: unat_mask min_def)
apply (erule iffD1)
apply simp
done
(* FIXME: copied from Retype_C and slightly adapted. *)
lemma (in Detype_AI) mapM_x_storeWord_step:
assumes al: "is_aligned ptr sz"
and sz2: "word_size_bits \<le> sz"
and sz: "sz <= word_bits"
shows "mapM_x (\<lambda>p. storeWord p 0) [ptr , ptr + word_size .e. ptr + 2 ^ sz - 1] =
modify (underlying_memory_update
(\<lambda>m x. if x \<in> {x. \<exists>k. x = ptr + of_nat k \<and> k < 2 ^ sz} then 0 else m x))"
using al sz
apply (simp only: upto_enum_step_def field_simps cong: if_cong)
apply (subst if_not_P)
apply (subst not_less)
apply (erule is_aligned_no_overflow)
apply (simp add: mapM_x_map comp_def upto_enum_word del: upt.simps)
apply (simp add: Suc_unat_mask_div_obfuscated[simplified mask_2pm1] min_def)
apply (subst mapM_x_storeWord)
apply (erule is_aligned_weaken [OF _ sz2])
apply (rule arg_cong)
apply (subgoal_tac "2^word_size_bits = (word_size :: nat)")
apply (cut_tac power_add[symmetric,of "2::nat" "sz - word_size_bits" word_size_bits])
apply (simp only: le_add_diff_inverse2[OF sz2])
apply (simp add: word_size_size_bits_nat)
done
lemma (in Detype_AI) mapM_storeWord_clear_um:
"is_aligned p n \<Longrightarrow> word_size_bits\<le>n \<Longrightarrow> n<=word_bits \<Longrightarrow>
do_machine_op (mapM_x (\<lambda>p. storeWord p 0) [p, p + word_size .e. p + 2 ^ n - 1]) =
modify (clear_um {x. \<exists>k. x = p + of_nat k \<and> k < 2 ^ n})"
apply (simp add: mapM_x_storeWord_step)
apply (rule ext)
apply (simp add: do_machine_op_def select_f_def split_def simpler_modify_def
simpler_gets_def bind_def return_def clear_um_def)
done
lemma intvl_range_conv':
"\<lbrakk>is_aligned (ptr::'a :: len word) bits; bits \<le> len_of TYPE('a)\<rbrakk> \<Longrightarrow>
(\<exists>k. x = ptr + of_nat k \<and> k < 2 ^ bits) \<longleftrightarrow> (ptr \<le> x \<and> x \<le> ptr + 2 ^ bits - 1)"
apply (rule iffI)
apply (clarsimp simp: x_power_minus_1 mask_2pm1[symmetric])
apply (frule is_aligned_no_overflow'[simplified mask_2pm1[symmetric]])
apply (rule conjI)
apply (rule word_plus_mono_right2, assumption)
apply (frule (2) of_nat_le_pow)
apply (rule word_plus_mono_right)
apply (rule word_of_nat_le)
apply (simp add: unat_mask)
apply simp
apply (subgoal_tac "\<exists>x'. x = ptr + of_nat x' \<and> x' < 2 ^ len_of TYPE('a)")
apply clarsimp
apply (drule(1) word_le_minus_mono_left [where x=ptr])
apply (simp only: p_assoc_help add_diff_cancel2)
apply (rule_tac x="x'" in exI)
apply (clarsimp simp: word_le_nat_alt unat_of_nat mask_2pm1[symmetric])
apply (auto simp: unat_mask min_def le_less)[1]
apply (rule_tac x="unat (x - ptr)" in exI)
apply simp
done
(* FIXME: The following lemma is similar to StoreWord_C.intvl_range_conv *)
(* FIXME: move *)
lemma intvl_range_conv:
"\<lbrakk>is_aligned (ptr :: 'a :: len word) bits; bits \<le> len_of TYPE('a)\<rbrakk> \<Longrightarrow>
{x. \<exists>k. x = ptr + of_nat k \<and> k < 2 ^ bits} = {ptr .. ptr + 2 ^ bits - 1}"
by (rule set_eqI) (simp add: intvl_range_conv')
(* FIXME: move *)
lemma gets_modify_def:
"gets f >>= (\<lambda>x. modify (g x)) = modify (\<lambda>s. g (f s) s)"
by (simp add: simpler_gets_def simpler_modify_def bind_def)
lemma valid_pspace_well_formed_cnode[intro?]:
"\<lbrakk>valid_pspace s; kheap s x = Some (CNode sz ct)\<rbrakk> \<Longrightarrow> well_formed_cnode_n sz ct"
by (erule (1) well_formed_cnode_valid_cs_size [OF valid_cs_sizeI])
lemmas cte_wp_at_cte_at = cte_wp_at_weakenE [OF _ TrueI]
lemma cte_wp_at_domI:
"cte_wp_at P c s \<Longrightarrow> fst c \<in> dom (kheap s)"
by (auto elim: cte_wp_atE)
lemmas cte_wp_at_casesE [consumes 1, case_names CapTable TCB] = cte_wp_atE
lemma dom_known_length:
"\<lbrakk> dom f = {x. length x = n}; f xs = Some cap \<rbrakk> \<Longrightarrow> n = length xs"
by (drule domI[where m=f], simp)
lemma (in Detype_AI) cte_map_not_null_outside: (*FIXME: arch_split*)
"\<lbrakk> cte_wp_at ((\<noteq>) cap.NullCap) p (s :: 'a state);
cte_wp_at ((=) cap) p' s;is_untyped_cap cap;
descendants_range cap p' s; untyped_children_in_mdb s;
if_unsafe_then_cap s; valid_global_refs s \<rbrakk>
\<Longrightarrow> fst p \<notin> untyped_range cap"
apply (simp add:descendants_range_def2)
apply (case_tac "cte_wp_at (\<lambda>c. is_zombie c \<and> obj_ref_of c = fst p) p s")
apply (rule ccontr)
apply (erule(2) untyped_children_in_mdbEE[where ptr'=p])
apply (simp add:cte_wp_at_caps_of_state is_cap_simps)
apply (clarsimp simp:cte_wp_at_caps_of_state is_cap_simps)
apply (drule descendants_range_inD)
apply (simp add:cte_wp_at_caps_of_state)
apply (simp add:cte_wp_at_caps_of_state)
apply simp
apply (drule(1) if_unsafe_then_capD, simp)
apply (drule ex_cte_cap_to_obj_ref_disj, erule disjE)
apply (clarsimp simp del:untyped_range.simps)+
apply (erule(1) untyped_children_in_mdbEE [where P="\<lambda>c. fst p \<in> f c" for f])
apply simp+
apply fastforce
apply (drule(1) descendants_range_inD)
apply (simp add:cte_wp_at_caps_of_state)
apply simp
apply clarsimp
apply (drule(1) valid_globals_irq_node, fastforce simp: cap_range_def)
done
lemma corres_submonad2:
"\<lbrakk> submonad f r g fn; submonad f' r' g' fn';
\<forall>s s'. (s, s') \<in> sr \<and> g s \<and> g' s' \<longrightarrow> (f s, f' s') \<in> ssr;
\<forall>s s' ss ss'. ((s, s') \<in> sr \<and> (ss, ss') \<in> ssr) \<longrightarrow> (r ss s, r' ss' s') \<in> sr;
corres_underlying ssr nf nf' rvr P P' x x'\<rbrakk>
\<Longrightarrow> corres_underlying sr nf nf' rvr (g and P o f) (g' and P' o f') (fn x) (fn' x')"
apply (subst submonad.fn_is_sm, assumption)+
apply (clarsimp simp: submonad_fn_def)
apply (rule corres_underlying_split [OF _ _ stateAssert_sp stateAssert_sp])
apply (fastforce simp: corres_underlying_def stateAssert_def get_def
assert_def return_def bind_def)
apply (rule corres_underlying_split [where r'="\<lambda>x y. (x, y) \<in> ssr",
OF _ _ gets_sp gets_sp])
apply clarsimp
apply (rule corres_underlying_split [where r'="\<lambda>(x, x') (y, y'). rvr x y \<and> (x', y') \<in> ssr",
OF _ _ hoare_post_taut hoare_post_taut])
defer
apply clarsimp
apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_post_taut hoare_post_taut])
apply (simp add: corres_modify')
apply clarsimp
apply (simp add: corres_underlying_def select_f_def)
apply fastforce
done
lemma corres_submonad3:
"\<lbrakk>submonad f r g fn; submonad f' r' g' fn';
\<forall>s s'. (s, s') \<in> sr \<and> g s \<and> g' s' \<longrightarrow> (f s, f' s') \<in> ssr;
\<forall>s s' ss ss'. ((s, s') \<in> sr \<and> (ss, ss') \<in> ssr) \<longrightarrow>
(r ss s, r' ss' s') \<in> sr;
\<forall>s. G s \<longrightarrow> g s \<and> P (f s); \<forall>s'. G' s' \<longrightarrow> g' s' \<and> P' (f' s');
corres_underlying ssr nf nf' rvr P P' x x'\<rbrakk>
\<Longrightarrow> corres_underlying sr nf nf' rvr G G' (fn x) (fn' x')"
apply (subst submonad.fn_is_sm, assumption)+
apply (clarsimp simp: submonad_fn_def)
apply (rule corres_underlying_split [OF _ _ stateAssert_sp stateAssert_sp])
apply (fastforce simp: corres_underlying_def stateAssert_def get_def
assert_def return_def bind_def)
apply (rule corres_underlying_split [where r'="\<lambda>x y. (x, y) \<in> ssr",
OF _ _ gets_sp gets_sp])
apply clarsimp
apply (rule corres_underlying_split [where r'="\<lambda>(x, x') (y, y'). rvr x y \<and> (x', y') \<in> ssr",
OF _ _ hoare_post_taut hoare_post_taut])
defer
apply clarsimp
apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_post_taut hoare_post_taut])
apply (simp add: corres_modify')
apply clarsimp
apply (simp add: corres_underlying_def select_f_def)
apply fastforce
done
lemma invs_untyped_children[elim!]:
"invs s \<Longrightarrow> untyped_children_in_mdb s"
by (clarsimp simp: invs_def valid_state_def valid_mdb_def
untyped_mdb_alt)
lemma dmo_valid_cap[wp]:
"\<lbrace>\<lambda>s. s \<turnstile> cap.UntypedCap dev base magnitude idx\<rbrace>
do_machine_op f
\<lbrace>\<lambda>rv s. s \<turnstile> cap.UntypedCap dev base magnitude idx\<rbrace>"
by (simp add: do_machine_op_def split_def | wp)+
lemma (in Detype_AI)cte_map_not_null_outside':
"\<lbrakk>cte_wp_at ((=) (cap.UntypedCap dev q n m)) p' (s :: 'a state);
descendants_range (cap.UntypedCap dev q n m) p' s; untyped_children_in_mdb s;
if_unsafe_then_cap s; valid_global_refs s;
cte_wp_at ((\<noteq>) cap.NullCap) p s\<rbrakk>
\<Longrightarrow> fst p \<notin> untyped_range (cap.UntypedCap dev q n m)"
by (erule (1) cte_map_not_null_outside, simp_all)
lemma refl_spec[simp]:
"\<not> (\<forall>x. x \<noteq> y)"
by clarsimp
lemma pre_helper:
"\<And>base x n. \<lbrakk> is_aligned (base :: machine_word) (n + (a::nat)); n + a < word_bits \<rbrakk>
\<Longrightarrow> base + (x && mask n) * 2^a \<in> {base .. base + 2 ^ (n + a) - 1}"
apply (subgoal_tac "(x && mask n) * bit(a) < 2 ^ (n + a)")
apply simp
apply (rule context_conjI)
apply (erule(1) is_aligned_no_wrap')
apply (subst add_diff_eq[symmetric])
apply (rule word_plus_mono_right)
apply simp
apply (erule is_aligned_no_wrap')
apply simp
apply (simp add: power_add)
apply (rule word_mult_less_mono1)
apply (rule and_mask_less_size, simp add: word_size word_bits_def)
apply (simp add: p2_gt_0 word_bits_def)
apply (simp add: word_bits_def)
apply (drule power_strict_increasing[where a="2 :: nat"], simp_all)
apply (simp add: power_add[where a="2::nat"])
done
lemmas ucast_ucast_mask_8 = ucast_ucast_mask[where 'a=8, simplified, symmetric]
lemma pspace_no_overlap_obj_range:
"\<lbrakk> pspace_no_overlap S s; kheap s p = Some obj \<rbrakk>
\<Longrightarrow> obj_range p obj \<inter> S = {}"
by (auto simp add: pspace_no_overlap_def obj_range_def field_simps)
(* FIXME: generalised version of Arch_AI.range_cover_full *)
lemma range_cover_full:
"\<lbrakk>is_aligned (ptr :: 'a :: len word) sz;sz < len_of TYPE('a)\<rbrakk> \<Longrightarrow> range_cover ptr sz sz (Suc 0)"
by (clarsimp simp:range_cover_def
unat_eq_0 le_mask_iff[symmetric] word_and_le1)
lemma range_cover_plus_us:
"range_cover ptr sz (m + us) (Suc 0) \<Longrightarrow> range_cover ptr sz m (2^us)"
apply (erule range_cover_rel)
apply simp+
done
lemma caps_overlap_reserved_subseteq:
"\<lbrakk>caps_overlap_reserved B s; A\<subseteq> B\<rbrakk> \<Longrightarrow> caps_overlap_reserved A s"
apply (clarsimp simp:caps_overlap_reserved_def)
apply (drule(1) bspec)
apply (erule disjoint_subset2)
apply simp
done
lemma range_cover_le:
"\<lbrakk>range_cover ptr sz us m; n\<le>m\<rbrakk> \<Longrightarrow> range_cover ptr sz us n"
by (clarsimp simp:range_cover_def)
lemma range_cover_ptr_le:
"\<lbrakk>range_cover ptr sz us (Suc (Suc n));ptr\<noteq> 0\<rbrakk>
\<Longrightarrow> ptr \<le> ptr + (1 + of_nat n << us)"
apply (frule range_cover_subset[where p = 0
,OF range_cover_le[where n = "Suc n"]])
apply simp+
apply (frule is_aligned_no_overflow[OF range_cover.aligned])
apply (simp add:shiftl_t2n field_simps)
apply (erule order_trans)+
apply (rule word_sub_1_le)
apply (drule(1) range_cover_no_0[where p = "Suc n"])
apply simp
apply (simp add:word_arith_nat_Suc power_add[symmetric] field_simps)
done
lemma range_cover_tail_mask:
"\<lbrakk>range_cover ptr sz us (Suc (Suc n));ptr \<noteq> 0\<rbrakk>
\<Longrightarrow> ptr + ((1::machine_word) + of_nat n << us) && ~~ mask sz = ptr && ~~ mask sz"
apply (frule(1) range_cover_ptr_le)
apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr])
apply (subst add.commute)
apply (subst add.assoc)
apply (subst is_aligned_add_helper[THEN conjunct2,OF is_aligned_neg_mask])
apply (simp add:range_cover_def)
apply (simp add:word_less_nat_alt)
apply (rule le_less_trans[OF unat_plus_gt])
apply (frule range_cover.range_cover_compare[where p = "Suc n"])
apply simp
apply (drule range_cover.sz)
apply (simp add:word_arith_nat_Suc shiftl_t2n power_add[symmetric] field_simps)
apply simp
done
lemma range_cover_unat:
"range_cover (ptr :: 'a :: len word) sz sb n
\<Longrightarrow> unat ((ptr && mask sz) + (of_nat n * 2^ sb)) =
unat (ptr && mask sz) + unat ( (of_nat n) * (2::'a word) ^ sb)"
apply (rule unat_add_lem[THEN iffD1])
apply (rule le_less_trans)
apply (frule range_cover.unat_of_nat_shift[OF _ le_refl le_refl])
apply (simp add:field_simps)
apply (subst add.commute)
apply (erule range_cover.range_cover_compare_bound)
apply (rule power_strict_increasing)
apply (clarsimp simp:range_cover_def)+
done
lemma range_cover_offset:
assumes offset: "p < n"
and cover : "range_cover ptr sz us n"
shows "range_cover (ptr + (of_nat p) * 2 ^ us) sz us (n - p)"
using assms range_cover.range_cover_compare_bound[OF cover]
apply (clarsimp simp:range_cover_def)
apply (intro conjI)
apply (erule aligned_add_aligned)
apply (subst mult.commute)
apply (simp add:is_aligned_shiftl_self[unfolded shiftl_t2n])
apply simp
apply (rule nat_mult_le_cancel1[where k = "2^ us",THEN iffD1])
apply simp
apply (subst diff_mult_distrib2)
apply (simp add: add_mult_distrib2)
apply (simp add:shiftr_div_2n' field_simps minus_mod_eq_mult_div[symmetric])
apply (rule le_trans[where j = "(n-p) * 2 ^ us + unat (ptr + of_nat p * 2 ^ us && mask sz)"])
apply (clarsimp simp:field_simps diff_mult_distrib diff_le_mono2)
apply (subst mask_eqs[symmetric])
apply (subst less_mask_eq[where x = "(ptr && mask sz) + of_nat p * 2 ^ us"])
apply (simp add:word_less_nat_alt)
apply (rule le_less_trans[OF unat_plus_gt])
apply (erule range_cover.range_cover_compare[OF cover])
apply (simp add:range_cover_unat[OF range_cover_le[OF cover]] field_simps)
apply (simp add:range_cover.unat_of_nat_shift[OF cover] diff_mult_distrib)
apply (simp add:field_simps power_add[symmetric]
range_cover.range_cover_compare_bound[OF cover])
done
lemma range_cover_bound:
assumes cover:"range_cover ptr sz us n"
shows "0<n \<Longrightarrow> ptr \<le> ptr + of_nat n * 2^ us - 1"
apply (cut_tac range_cover_subset[OF cover,where p = 0])
apply (cut_tac Retype_AI.range_cover_subset_not_empty[OF _ cover , where x = 0])
apply (clarsimp simp del: atLeastatMost_subset_iff)
apply (drule_tac c=ptr in subsetD)
apply simp
apply simp
apply (cut_tac range_cover_not_zero[OF _ cover])
apply (simp add:word_gt_0)+
done
lemma range_cover_compare_offset:
"\<lbrakk>range_cover ptr sz us t; n + 1 < t;ptr \<noteq> 0\<rbrakk>
\<Longrightarrow> ptr + (of_nat n << us) \<le> ptr + (1 + of_nat n << us)"
apply (simp add:shiftl_t2n field_simps)
apply (rule order_trans[OF range_cover_bound])
apply (rule range_cover_offset[rotated])
apply (erule_tac n = "n+1" in range_cover_le)
apply simp+
apply (simp add:field_simps)
apply (rule word_sub_1_le)
apply (drule_tac n = "n + 2" and p = "n + 1" in range_cover_no_0)
apply (erule range_cover_le)
apply simp
apply simp
apply (simp add:field_simps)
done
lemma range_cover_sz':
"range_cover (a :: 'a :: len word) b bits d \<Longrightarrow> bits < len_of TYPE('a)"
by (clarsimp simp:range_cover_def)
end
|
{-# OPTIONS --safe --warning=error --without-K #-}
open import LogicalFormulae
open import Groups.Homomorphisms.Definition
open import Groups.Definition
open import Rings.Definition
open import Rings.Homomorphisms.Definition
open import Rings.IntegralDomains.Definition
open import Setoids.Setoids
open import Sets.EquivalenceRelations
module Fields.FieldOfFractions.Lemmas {a b : _} {A : Set a} {S : Setoid {a} {b} A} {_+_ : A → A → A} {_*_ : A → A → A} {R : Ring S _+_ _*_} (I : IntegralDomain R) where
open import Fields.FieldOfFractions.Setoid I
open import Fields.FieldOfFractions.Ring I
embedIntoFieldOfFractions : A → fieldOfFractionsSet
embedIntoFieldOfFractions a = record { num = a ; denom = Ring.1R R ; denomNonzero = IntegralDomain.nontrivial I }
homIntoFieldOfFractions : RingHom R fieldOfFractionsRing embedIntoFieldOfFractions
RingHom.preserves1 homIntoFieldOfFractions = Equivalence.reflexive (Setoid.eq S)
RingHom.ringHom homIntoFieldOfFractions {a} {b} = Equivalence.transitive (Setoid.eq S) (Ring.*WellDefined R (Equivalence.reflexive (Setoid.eq S)) (Ring.identIsIdent R)) (Ring.*Commutative R)
GroupHom.groupHom (RingHom.groupHom homIntoFieldOfFractions) {x} {y} = need
where
open Setoid S
open Equivalence eq
need : ((x + y) * (Ring.1R R * Ring.1R R)) ∼ (Ring.1R R * ((x * Ring.1R R) + (Ring.1R R * y)))
need = transitive (transitive (Ring.*WellDefined R reflexive (Ring.identIsIdent R)) (transitive (Ring.*Commutative R) (transitive (Ring.identIsIdent R) (Group.+WellDefined (Ring.additiveGroup R) (symmetric (transitive (Ring.*Commutative R) (Ring.identIsIdent R))) (symmetric (Ring.identIsIdent R)))))) (symmetric (Ring.identIsIdent R))
GroupHom.wellDefined (RingHom.groupHom homIntoFieldOfFractions) x=y = transitive (Ring.*Commutative R) (Ring.*WellDefined R reflexive x=y)
where
open Equivalence (Setoid.eq S)
homIntoFieldOfFractionsIsInj : SetoidInjection S fieldOfFractionsSetoid embedIntoFieldOfFractions
SetoidInjection.wellDefined homIntoFieldOfFractionsIsInj x=y = transitive (Ring.*Commutative R) (Ring.*WellDefined R reflexive x=y)
where
open Equivalence (Setoid.eq S)
SetoidInjection.injective homIntoFieldOfFractionsIsInj x~y = transitive (symmetric identIsIdent) (transitive *Commutative (transitive x~y identIsIdent))
where
open Ring R
open Setoid S
open Equivalence eq
|
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage{setspace}
\doublespacing
\title{Predicting March Madness Outcomes}
\author{Lane Cazier}
\date{May 2021}
\begin{document}
\maketitle
\section{Introduction}
The NCAA Men's Basketball tournament, or March Madness, is one of the most popular and most watched sporting events in the U.S. There is a great deal of betting and speculation around it, and one of the biggest activities centered around March Madness is making brackets attempting to predict the outcome of every game. This paper aims to discuss Machine Learning Modeling that can help look for patterns in the data, in order to make better predictions about a team's performance in the tournament.
There are approximately 9*10E18 different possible tournament outcomes for March Madness,\[2^{32}*2^{16}*2^8*2^4*2^2*2 = 9.2234 * 10^{18}\] and so obviously trying to reach perfection is nigh-impossible, but by utilizing careful analysis of data regarding the teams participating, it is possible to increase the accuracy of predictions. Because March Madness is such a large sporting event gathering so much interest, many models have already been made, giving a large pool of past experience to draw from. To avoid duplicating strategies already tried, a different method will be employed to hopefully find new information in the available data, and this method will be discussed later in this paper.
\section{Literature Review}
As mentioned in the introduction, many machine learning methods and regression models have previously been constructed to attempt to analyze the March Madness tournament. These past models can be used to help better inform new models, and so the models and past research done that can be used will be explored here.
To predict the outcome of individual games, past game statistics are often used, but past research has found that it is sometimes difficult to know which statistics are most causally related to winning (Gumm et al). Other models have used a myriad of factors, such as average field goals made per game, average three point shots made per game, average free throw attempts, average rebounds, average steals, average blocks, and since sports statistics are collected and kept consistently, there are an incredible number of possible variables that can be used in a model(Shen et al). However, some of the most effective models have actually been fairly simple and straightforward, such as the model developed by math Professors Gregory Matthews and Michael Lopez (Ellenberg). Using logit regression, they utilized only two variables in their model: the Las Vegas point spreads, and the team efficiency ratings developed by Ken Pomeroy (Ellenberg).Their model won a previous competition hosted by Kaggle, which had many people develop models to predict win probabilities of March Madness games. Models here will use some of the same data, and attempt to keep this simple approach, and not attempt to over complicate the model. The efficiency ratings developed by Ken Pomeroy and used in the previously mentioned model are also very useful and intended to be predictive on their own (kenpom.com).
Another attempt at analyzing March Madness proved particularly useful and enlightening. As has been observed, "there is significant evidence that tournament models don't reflect regular seasons models" (Gumm et al). One previous model attempted to analyze some of the factors that differentiated March Madness games from regular season games, so as to determine if some teams gain an advantage from variables often excluded from models of the tournament. John Ezekowitz of Harvard Sports Analysis developed what he called a "Survival Analysis Model", which looks at how several factors including strength of schedule, previous March Madness experience and consistency affect the teams staying power in the tournament (Ezekowitz). These variables and several others were regressed in a Cox Proportional Hazards model, on the Y variable of how many wins a team was able to get in the tournament. This model proved to be highly successful, and some variables from it will also be included in the models developed here.
\section{Data}
Data for this project was gathered from two sources, Kaggle and kenpom.com. Data was gathered about a number of variables for every season played by a NCAA division team, for a total 5790 observations, but this data is whittled down quite a bit. Only data regarding teams which made the March Madness tournament are relevant for this project, so despite the dataset containing observations for all teams, observations of teams that did not make the tournament in a season have been dropped. The data is for the years 2005-2019. The final dataset has 987 observations, as observations for some teams in the timeframe were dropped due to difficulties joining the Kaggle and Kenpom data entries for them. The teams dropped are Texas State and Missouri State. There are seven variables used for the model. These entries are MAR, as they are missing for reasons uncorrelated to the data itself. The number of March Madness games won by the team in the given season is the independent variable, starting with the round of 32. Other variables are the team's regular season win percentage, the team's offensive efficiency, the team's defensive efficiency, the number of tournament participating teams beaten in the regular season, the team's strength of schedule, and the number of tournament games the team won in the last two years.
The regular season win percentage should be a useful indicator of how well a team generally plays. The offensive and defensive efficiency of a team are ratings based on how many points the team scores and allows per 100 possessions, and serve the same purpose as the regular season win percentage; they measure how well the team generally plays. The number of other tournament qualifying teams beaten serves as an indicator of a teams success in higher pressure, tougher games, which should transfer well to March Madness. The team's strength of schedule and March Madness wins over the previous two seasons serve the same purpose.
All the data was normalized using the min-max method to enable it to be used for a K-Nearest Neighbors model.
\section{Methods}
Instead of the normal predictive method of generating win probabilities for individual tournament games, the following models turn March Madness into a classification problem, and use supervised machine learning to solve this problem. Any given team is capable of winning anywhere from zero to six games in the tournament, and the models here seek to classify each team according to how many games they are most likely to win.
Out of the data gathered, the sample was split into seventy-five percent training data, and twenty-five percent testing data. Two models were then generated using this data, one model using K-Nearest Neighbors, and a Naive Bayes Model. The number K in the K nearest neighbors was three, which is a somewhat smaller number, but using larger values of K proved ineffective due to the smaller dataset. For any team they are given data for, the model will classify based on the number of games the model thinks it is most likely to win.
The models were made in Python, using the scikit-learn package. In order to import the data into Python, the xlrd package was used to read data stored in an excel file. The splitting of the data was achieved without any package at all. The dataset was given a randomized order in excel, using the =randbetween function and sorting the data by this number. Once this was done, one in every four observations was moved into a different 2-D array in the python script itself, splitting the data randomly into the testing and training numbers.
\section{Findings}
Here are some metrics evaluating the accuracy for the K-Nearest Neighbors model:
\begin{center}
\begin{tabular}{ c c c c c c c }
For Classification & 0 & 1 & 2 & 3 & 4 & 5 \\
Recall & .765 & .302 & .158 & .063 & 0 & 0 \\
Precision & .579 & .284 & .375 & .1 & 0 & 0 \\
F-Score & .66 & .292 & .222 & .077 & 0 & 0 \\
Accuracy: .465
\end{tabular}
\end{center}
Note: No teams that won six games were randomly selected into the testing data. Given these teams represent 1/64 of the population, this is not shocking.
Overall, this classification predictor has some issues. It tends to predict lower numbers of wins for teams. Because one half of teams get eliminated every round, most teams end up with a lower number of wins. It makes sense then, that a K-nearest neighbors method with a smaller sample size would more frequently predict lower numbers of wins, as most of the population is comprised of low scoring teams that may end up as the closest neighbors of new data points. A larger dataset could help alleviate this issue. Considering that there are only fourteen six-win teams in the data, it is unsurprising that using nearest neighbors, these teams tend to be outweighed and outnumbered. If there were more of these teams, it would help the model group similar teams in the data, and predict similar outcomes. The largest number of wins the model predicted for teams was three total wins, which would bring a team into the round of eight. Considering that only four out of sixty-four teams do better than this, this predictive tendency towards low numbers of wins also makes sense, as that is the most likely outcome for most teams. The model also did better with recall than precision due to its tendency to assign teams a low number of wins. It frequently classified a team as a zero win team accurately when the team did get zero wins, but also when a team did better.
From one perspective, an accuracy of .465 represents an encouraging number. A program that randomly assigned a classification to each team would be right only 1/7 of the time. On the other hand, a program that classified each team as a zero would be right fifty percent of the time, of half. Clearly this model has some room for improvement.
Here are some metrics to help evaluate the Naive Bayes model:
\begin{center}
\begin{tabular}{ c c c c c c c }
For Classification & 0 & 1 & 2 & 3 & 4 & 5 \\
Recall & 1 & .016 & 0 & 0 & 0 & 0 \\
Precision & .471 & 1 & 0 & 0 & 0 & 0 \\
F-Score & .64 & .031 & 0 & 0 & 0 & 0 \\
Accuracy: .465
\end{tabular}
\end{center}
Again note there are no six win teams in the testing data.
On first glance, the Naive Bayes model boasts a slightly higher accuracy score than the K-Nearest Neighbors model, but it is generally a worse model. It obtains its better accuracy by being far more likely to class a team as a zero win team. It does not predict any team to win more than one game in the tournament. It does boast a pristine recall of one for zero win teams, but this is again due to the fact it rarely classifies a team as anything else. This model was very disappointing, and not much can be salvaged here.
\section{Conclusion}
Generally, the models were somewhat disappointing. However, it is important to remember that attempting to predict March Madness results is very complicated, and accuracy is difficult. Improvements to the models are certainly possible, however. Some future work could include expanding the amount of data and observations gathered, and incorporating a new variable, such as the seed of a team, would probably help the models tremendously. Another thing I intend to try in the future are making several different models using the available data, such as a random forest model. While imperfect, I do think that the K-Nearest Neighbors model showed potential, and could be useful in its current form to show where a team with given data generally falls in an average year. I think it can be used to show if a team is more or less likely than its seeding suggests to lose early or to go further than expected. The models can be useful, and hopefully will be better with future work.
\pagebreak
\section{References}
Ellenberg, Jordan. “The Math of March Madness.” The New York Times, The New York Times, 20 Mar. 2015, www.nytimes.com/2015/03/22/opinion/sunday/making-march-madness-easy.html.
J. Gumm, A. Barrett and G. Hu, "A machine learning strategy for predicting march madness winners," 2015 IEEE/ACIS 16th International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD), 2015, pp. 1-6, doi: 10.1109/SNPD.2015.7176206.
Pomeroy, Ken. “Ratings Explanation: The Kenpom.com Blog.” The Kenpomcom Blog, Kenpom.com, 29 Nov. 2016, kenpom.com/blog/ratings-explanation/.
Shen, Gang, et al. "Predicting Results of March Madness Using Three Different Methods." Journal of Sports Research 3.1 (2016): 10-17.
“Survival of the Fittest: A New Model for NCAA Tournament Prediction.” The Harvard Sports Analysis Collective, Harvard Sports Analystics, 12 Jan. 2013, harvardsportsanalysis.wordpress.com/2012/03/14/survival-of-the-fittest-a-new-model-for-ncaa-tournament-prediction/.
\end{document} |
% --- [ Pointer analysis ] -----------------------------------------------------
\subsection{Pointer analysis}
% TODO: Update scope or remove section. Scope says pointer analysis is excluded from scope.
\todo{TODO: give brief introduction to pointer analysis, points-to sets, pointer aliasing, and the technique to merge call sites such that memory allocated at different allocation call sites (e.g. calls to \texttt{malloc}) may have the same type if proven that through type propagation, that the returned pointers are used at other parts of the program for the same variables.}
|
[STATEMENT]
lemma map_id: "map id xs = xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map id xs = xs
[PROOF STEP]
by (simp add: id_def) |
#' thrust_changes
#'
#' See the list of file/folder name changes in the internal Thrust C++ library.
#'
#' @examples
#' thrust::thrust_changes()
#'
#' @export
thrust_changes = function()
{
cat(paste(readLines(system.file("Thrust_CHANGES", package="thrust")), collapse="\n"), "\n")
}
|
[STATEMENT]
lemma hypext_sin_extract:
"(*h* sin) (\<beta> x) = sin x *\<^sub>H ba + cos x *\<^sub>H e1 + cos x *\<^sub>H e2 - sin x *\<^sub>H e12"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (*h* sin) (\<beta> x) = sin x *\<^sub>H ba + cos x *\<^sub>H e1 + cos x *\<^sub>H e2 - sin x *\<^sub>H e12
[PROOF STEP]
by (simp add: hypext_sin_Hyperdual of_comp_minus scaleH_times hyperdualx_def) |
proposition isometries_subspaces: fixes S :: "'a::euclidean_space set" and T :: "'b::euclidean_space set" assumes S: "subspace S" and T: "subspace T" and d: "dim S = dim T" obtains f g where "linear f" "linear g" "f ` S = T" "g ` T = S" "\<And>x. x \<in> S \<Longrightarrow> norm(f x) = norm x" "\<And>x. x \<in> T \<Longrightarrow> norm(g x) = norm x" "\<And>x. x \<in> S \<Longrightarrow> g(f x) = x" "\<And>x. x \<in> T \<Longrightarrow> f(g x) = x" |
module Web.Semantic.DL.Signature where
infixr 4 _,_
-- a Signature is constructed from Concept Names and Role/Relation Names
data Signature : Set₁ where
_,_ : (CN RN : Set) → Signature
-- concept name (maps to Sets)
CN : Signature → Set
CN (CN , RN) = CN
-- Role Names (or relation names)
RN : Signature → Set
RN (CN , RN) = RN
|
# James Rekow
identifyNumSubgroupsTest = function(MVec = NULL, NVec = NULL, lambdaVec = NULL, numSubgroupsVec = NULL,
iStrength = 1, univVec = NULL, sigmaMax = 0.1,
thresholdMult = 10 ^ (-1), maxSteps = 10 ^ 4, tStep = 10 ^ (-2),
intTime = 1, interSmplMult = 0.01, conGraph = NULL, rollRange = 0.025,
numReplicates = 10){
# ARGS:
#
# RETURNS: percentErrorDF - data frame containing columns for all the input data and the percent
# of replicates for which identifyNumSubgroups returned the wrong number
# of subgroups
source("subgroupsAbdListCreator.r")
source("computeInfoWeightMat.r")
source("identifyNumSubgroups.r")
# set default value of MVec
if(is.null(MVec)){
MVec = seq(40, 200, 80)
} # end if
# set default value of NVec
if(is.null(NVec)){
NVec = seq(20, 100, 40)
} # end if
# set default value of numSubgroupsVec
if(is.null(numSubgroupsVec)){
numSubgroupsVec = seq(1, 4, 1)
} # end if
# set default value of lambdaVec
if(is.null(lambdaVec)){
lambdaVec = seq(0, 200, 50)
} # end if
# set default value of univVec
if(is.null(univVec)){
univVec = seq(0, 1, 0.5)
} # end if
correctNumberIdentified = function(inputVec){
# ARGS: inputVec - a vector of the form c(M, N, numSUbgroups, lambda, univ)
#
# RETURNS: correct - boolean corresponding to whether or not identifyNumSubgroups correctly
# identified the number of subgroups in an abundance list generated using the
# input treatment
# extract parameter values from inputVec
M = inputVec[1]
N = inputVec[2]
numSubgroups = inputVec[3]
lambda = inputVec[4]
univ = inputVec[5]
# create abundance list using the input parameters for the given treatment
abdList = subgroupsAbdListCreator(M = M, numSubgroups = numSubgroups, N = N, iStrength = iStrength,
univ = univ, sigmaMax = sigmaMax, thresholdMult = thresholdMult,
maxSteps = maxSteps, tStep = tStep, intTime = intTime,
interSmplMult = interSmplMult, lambda = lambda, returnParams = FALSE,
conGraph = conGraph)
# compute the info weight matrix for the given abundance list
infoWeightMat = computeInfoWeightMat(abdList = abdList, rollRange = rollRange)
# compute percent error of algorithm's subgroup classification
numSubgroupsGuess = identifyNumSubgroups(infoMat = infoWeightMat)
# check if the algorithm identified the correct number of subgroups
correct = numSubgroupsGuess == numSubgroups
return(correct)
} # end correctNumberIdentified function
percentErrorProducer = function(inputVec){
# ARGS: inputVec - vector of the form c(M, N, lambda, univ)
#
# RETURNS: percentError - the percent of replicates at each treatment for which identifyNumSubgroups
# returned the incorrect number of subgroups
# compute whether or not the algorithm identified the correct number of subgroups for the given input
# and repeat for numReplicates replicates
correctGuessVec = replicate(numReplicates, correctNumberIdentified(inputVec = inputVec))
# compute the percent of replicates for which the algorithm failed
percentError = 100 * sum(!correctGuessVec) / numReplicates
return(percentError)
} # end percentErrorProducer function
# create a matrix of all unique combinations of elements from Mvec, NVec, numSubgroupsVec, lambdaVec,
# and univVec
inputMat = expand.grid(MVec, NVec, numSubgroupsVec, lambdaVec, univVec)
# for each combination of inputs, compute the percent error of the algorithm using those inputs
percentError = apply(inputMat, 1, percentErrorProducer)
# store data in a data frame
percentErrorDF = data.frame(M = inputMat[ , 1], N = inputMat[ , 2], numSubgroups = inputMat[ , 3],
lambda = inputMat[ , 4], univ = inputMat[ , 5],
percentError = percentError)
return(percentErrorDF)
} # end identifyNumSubgroupsTest function
|
State Before: σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ : σ₂
ab : Reaches f₂ a₂ b₂
⊢ ∃ c₁ c₂, Reaches f₂ b₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case refl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ : σ₂
⊢ ∃ c₁ c₂, Reaches f₂ a₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁
case tail
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
IH : ∃ c₁ c₂_1, Reaches f₂ c₂ c₂_1 ∧ tr c₁ c₂_1 ∧ Reaches f₁ a₁ c₁
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: induction' ab with c₂ d₂ _ cd IH State Before: case refl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ : σ₂
⊢ ∃ c₁ c₂, Reaches f₂ a₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: no goals Tactic: exact ⟨_, _, ReflTransGen.refl, aa, ReflTransGen.refl⟩ State Before: case tail
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
IH : ∃ c₁ c₂_1, Reaches f₂ c₂ c₂_1 ∧ tr c₁ c₂_1 ∧ Reaches f₁ a₁ c₁
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
e₂ : σ₂
ce : Reaches f₂ c₂ e₂
ee : tr e₁ e₂
ae : Reaches f₁ a₁ e₁
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: rcases IH with ⟨e₁, e₂, ce, ee, ae⟩ State Before: case tail.intro.intro.intro.intro
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
e₂ : σ₂
ce : Reaches f₂ c₂ e₂
ee : tr e₁ e₂
ae : Reaches f₁ a₁ e₁
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁
case tail.intro.intro.intro.intro.inr.intro.intro
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
e₂ : σ₂
ce : Reaches f₂ c₂ e₂
ee : tr e₁ e₂
ae : Reaches f₁ a₁ e₁
d' : σ₂
cd' : d' ∈ f₂ c₂
de : ReflTransGen (fun a b => b ∈ f₂ a) d' e₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: rcases ReflTransGen.cases_head ce with (rfl | ⟨d', cd', de⟩) State Before: case tail.intro.intro.intro.intro.inl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
this :
match f₁ e₁ with
| some b₁ => ∃ b₂, tr b₁ b₂ ∧ Reaches₁ f₂ c₂ b₂
| none => f₂ c₂ = none
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: have := H ee State Before: case tail.intro.intro.intro.intro.inl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
this :
match f₁ e₁ with
| some b₁ => ∃ b₂, tr b₁ b₂ ∧ Reaches₁ f₂ c₂ b₂
| none => f₂ c₂ = none
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
⊢ (match f₁ e₁ with
| some b₁ => ∃ b₂, tr b₁ b₂ ∧ Reaches₁ f₂ c₂ b₂
| none => f₂ c₂ = none) →
∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: revert this State Before: case tail.intro.intro.intro.intro.inl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
⊢ (match f₁ e₁ with
| some b₁ => ∃ b₂, tr b₁ b₂ ∧ Reaches₁ f₂ c₂ b₂
| none => f₂ c₂ = none) →
∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl.none
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
eg : f₁ e₁ = none
⊢ f₂ c₂ = none → ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁
case tail.intro.intro.intro.intro.inl.some
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
⊢ ∀ (x : σ₂), tr g₁ x → Reaches₁ f₂ c₂ x → ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: cases' eg : f₁ e₁ with g₁ <;> simp only [Respects, and_imp, exists_imp] State Before: case tail.intro.intro.intro.intro.inl.none
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
eg : f₁ e₁ = none
⊢ f₂ c₂ = none → ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl.none
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
eg : f₁ e₁ = none
c0 : f₂ c₂ = none
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: intro c0 State Before: case tail.intro.intro.intro.intro.inl.none
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
eg : f₁ e₁ = none
c0 : f₂ c₂ = none
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: no goals Tactic: cases cd.symm.trans c0 State Before: case tail.intro.intro.intro.intro.inl.some
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
⊢ ∀ (x : σ₂), tr g₁ x → Reaches₁ f₂ c₂ x → ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl.some
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
g₂ : σ₂
gg : tr g₁ g₂
cg : Reaches₁ f₂ c₂ g₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: intro g₂ gg cg State Before: case tail.intro.intro.intro.intro.inl.some
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
g₂ : σ₂
gg : tr g₁ g₂
cg : Reaches₁ f₂ c₂ g₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl.some.intro.intro
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
g₂ : σ₂
gg : tr g₁ g₂
cg : Reaches₁ f₂ c₂ g₂
d' : σ₂
cd' : d' ∈ f₂ c₂
dg : ReflTransGen (fun a b => b ∈ f₂ a) d' g₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: rcases TransGen.head'_iff.1 cg with ⟨d', cd', dg⟩ State Before: case tail.intro.intro.intro.intro.inl.some.intro.intro
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
g₂ : σ₂
gg : tr g₁ g₂
cg : Reaches₁ f₂ c₂ g₂
d' : σ₂
cd' : d' ∈ f₂ c₂
dg : ReflTransGen (fun a b => b ∈ f₂ a) d' g₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inl.some.intro.intro.refl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
g₂ : σ₂
gg : tr g₁ g₂
cg : Reaches₁ f₂ c₂ g₂
cd' : d₂ ∈ f₂ c₂
dg : ReflTransGen (fun a b => b ∈ f₂ a) d₂ g₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: cases Option.mem_unique cd cd' State Before: case tail.intro.intro.intro.intro.inl.some.intro.intro.refl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
ae : Reaches f₁ a₁ e₁
ce : Reaches f₂ c₂ c₂
ee : tr e₁ c₂
g₁ : σ₁
eg : f₁ e₁ = some g₁
g₂ : σ₂
gg : tr g₁ g₂
cg : Reaches₁ f₂ c₂ g₂
cd' : d₂ ∈ f₂ c₂
dg : ReflTransGen (fun a b => b ∈ f₂ a) d₂ g₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: no goals Tactic: exact ⟨_, _, dg, gg, ae.tail eg⟩ State Before: case tail.intro.intro.intro.intro.inr.intro.intro
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
e₂ : σ₂
ce : Reaches f₂ c₂ e₂
ee : tr e₁ e₂
ae : Reaches f₁ a₁ e₁
d' : σ₂
cd' : d' ∈ f₂ c₂
de : ReflTransGen (fun a b => b ∈ f₂ a) d' e₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: case tail.intro.intro.intro.intro.inr.intro.intro.refl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
e₂ : σ₂
ce : Reaches f₂ c₂ e₂
ee : tr e₁ e₂
ae : Reaches f₁ a₁ e₁
cd' : d₂ ∈ f₂ c₂
de : ReflTransGen (fun a b => b ∈ f₂ a) d₂ e₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ Tactic: cases Option.mem_unique cd cd' State Before: case tail.intro.intro.intro.intro.inr.intro.intro.refl
σ₁ : Type u_1
σ₂ : Type u_2
f₁ : σ₁ → Option σ₁
f₂ : σ₂ → Option σ₂
tr : σ₁ → σ₂ → Prop
H : Respects f₁ f₂ tr
a₁ : σ₁
a₂ : σ₂
aa : tr a₁ a₂
b₂ c₂ d₂ : σ₂
a✝ : ReflTransGen (fun a b => b ∈ f₂ a) a₂ c₂
cd : d₂ ∈ f₂ c₂
e₁ : σ₁
e₂ : σ₂
ce : Reaches f₂ c₂ e₂
ee : tr e₁ e₂
ae : Reaches f₁ a₁ e₁
cd' : d₂ ∈ f₂ c₂
de : ReflTransGen (fun a b => b ∈ f₂ a) d₂ e₂
⊢ ∃ c₁ c₂, Reaches f₂ d₂ c₂ ∧ tr c₁ c₂ ∧ Reaches f₁ a₁ c₁ State After: no goals Tactic: exact ⟨_, _, de, ee, ae⟩ |
function tDel = getTDel(planC)
indexS = planC{end};
tDelC = arrayfun(@(x) x.scanInfo(1).DICOMHeaders.TriggerTime,planC{indexS.scan},'un',0);
nonZeroTDelV = [tDelC{:}]>0;
n = find(nonZeroTDelV,1,'first');
tDel = tDelC{n};
end |
from typing import Tuple
from numpy import ndarray, pi, sin, sqrt
from numpy.random import rand, randn
from functions.function_base import FunctionBase
from ml.stochastic_process_samplers.stochastic_process_sampler_base import StochasticProcessSamplerBase
from ml.predictors.simple_sinusoidal_optimal_predictor import SimpleSinusoidalOptimalPredictor
class SimpleSinusoidalSampler(StochasticProcessSamplerBase):
"""
Simple sinusoidal sampler sampling the stochastic process:
Y = sin(2*pi*X) + V
where X is uniformly distributed in [0, 1] and V is Gaussian with zero mean.
"""
def __init__(self, noise_variance: float) -> None:
assert noise_variance >= 0.0, noise_variance
self.noise_variance: float = noise_variance
def get_number_inputs(self) -> int:
return 1
def get_number_outputs(self) -> int:
return 1
def random_sample(self, number_samples: int) -> Tuple[ndarray, ndarray]:
x_array_2d = rand(number_samples, 1)
y_array_2d = sin((2.0 * pi) * x_array_2d) + sqrt(self.noise_variance) * randn(*x_array_2d.shape)
return x_array_2d, y_array_2d
def get_optimal_predictor(self) -> FunctionBase:
return SimpleSinusoidalOptimalPredictor()
|
-- @@stderr --
dtrace: failed to compile script test/unittest/trace/err.D_PROTO_LEN.bad.d: [D_PROTO_LEN] line 18: trace( ) prototype mismatch: 0 args passed, 1 expected
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable triangle_ : Universe -> Universe -> Universe -> Prop.
Variable ltA_ : Universe -> Universe -> Universe -> Universe -> Universe -> Universe -> Prop.
Variable lt_ : Universe -> Universe -> Universe -> Universe -> Prop.
Variable isosceles_ : Universe -> Universe -> Universe -> Prop.
Variable congA_ : Universe -> Universe -> Universe -> Universe -> Universe -> Universe -> Prop.
Variable cong_ : Universe -> Universe -> Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable deftriangle_1 : (forall A B C : Universe, (triangle_ A B C -> ~(col_ A B C))).
Variable deftriangle2_2 : (forall A B C : Universe, (~(col_ A B C) -> triangle_ A B C)).
Variable lemma_NCorder_3 : (forall A B C : Universe, (~(col_ A B C) -> (~(col_ B A C) /\ (~(col_ B C A) /\ (~(col_ C A B) /\ (~(col_ A C B) /\ ~(col_ C B A))))))).
Variable lemma_congruencesymmetric_4 : (forall A B C D : Universe, (cong_ B C A D -> cong_ A D B C)).
Variable defisosceles_5 : (forall A B C : Universe, (isosceles_ A B C -> (triangle_ A B C /\ cong_ A B A C))).
Variable defisosceles2_6 : (forall A B C : Universe, ((triangle_ A B C /\ cong_ A B A C) -> isosceles_ A B C)).
Variable proposition_05_7 : (forall A B C : Universe, (isosceles_ A B C -> congA_ A B C A C B)).
Variable lemma_equalanglessymmetric_8 : (forall A B C Xa Xb Xc : Universe, (congA_ A B C Xa Xb Xc -> congA_ Xa Xb Xc A B C)).
Variable lemma_ABCequalsCBA_9 : (forall A B C : Universe, (~(col_ A B C) -> congA_ A B C C B A)).
Variable lemma_equalanglestransitive_10 : (forall A B C D E F P Q R : Universe, ((congA_ A B C D E F /\ congA_ D E F P Q R) -> congA_ A B C P Q R)).
Variable lemma_angleorderrespectscongruence_11 : (forall A B C D E F P Q R : Universe, ((ltA_ A B C D E F /\ congA_ P Q R D E F) -> ltA_ A B C P Q R)).
Variable lemma_angletrichotomy_12 : (forall A B C D E F : Universe, (ltA_ A B C D E F -> ~(ltA_ D E F A B C))).
Variable proposition_18_13 : (forall A B C : Universe, ((triangle_ A B C /\ lt_ A B A C) -> ltA_ B C A A B C)).
Variable lemma_angleorderrespectscongruence2_14 : (forall A B C D E F Xa Xb Xc : Universe, ((ltA_ A B C D E F /\ congA_ Xa Xb Xc A B C) -> ltA_ Xa Xb Xc D E F)).
Variable lemma_equalanglesreflexive_15 : (forall A B C : Universe, (~(col_ A B C) -> congA_ A B C A B C)).
Variable lemma_angledistinct_16 : (forall A B C Xa Xb Xc : Universe, (congA_ A B C Xa Xb Xc -> (A <> B /\ (B <> C /\ (A <> C /\ (Xa <> Xb /\ (Xb <> Xc /\ Xa <> Xc))))))).
Variable lemma_trichotomy1_17 : (forall A B C D : Universe, ((~(lt_ A B C D) /\ (~(lt_ C D A B) /\ (A <> B /\ C <> D))) -> cong_ A B C D)).
Theorem proposition_19_18 : (forall A B C : Universe, ((triangle_ A B C /\ ltA_ B C A A B C) -> lt_ A B A C)).
Proof.
time tac.
Qed.
End FOFProblem.
|
[STATEMENT]
lemma normalize_quot_id: "x \<in> normalized_fracts \<Longrightarrow> normalize_quot x = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> normalized_fracts \<Longrightarrow> normalize_quot x = x
[PROOF STEP]
by (auto simp: normalized_fracts_def normalize_quot_def case_prod_unfold) |
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import algebra.char_p.invertible
import linear_algebra.affine_space.affine_equiv
/-!
# Midpoint of a segment
## Main definitions
* `midpoint R x y`: midpoint of the segment `[x, y]`. We define it for `x` and `y`
in a module over a ring `R` with invertible `2`.
* `add_monoid_hom.of_map_midpoint`: construct an `add_monoid_hom` given a map `f` such that
`f` sends zero to zero and midpoints to midpoints.
## Main theorems
* `midpoint_eq_iff`: `z` is the midpoint of `[x, y]` if and only if `x + y = z + z`,
* `midpoint_unique`: `midpoint R x y` does not depend on `R`;
* `midpoint x y` is linear both in `x` and `y`;
* `point_reflection_midpoint_left`, `point_reflection_midpoint_right`:
`equiv.point_reflection (midpoint R x y)` swaps `x` and `y`.
We do not mark most lemmas as `@[simp]` because it is hard to tell which side is simpler.
## Tags
midpoint, add_monoid_hom
-/
open affine_map affine_equiv
section
variables (R : Type*) {V V' P P' : Type*} [ring R] [invertible (2:R)]
[add_comm_group V] [module R V] [add_torsor V P]
[add_comm_group V'] [module R V'] [add_torsor V' P']
include V
/-- `midpoint x y` is the midpoint of the segment `[x, y]`. -/
def midpoint (x y : P) : P := line_map x y (⅟2:R)
variables {R} {x y z : P}
include V'
@[simp] lemma affine_map.map_midpoint (f : P →ᵃ[R] P') (a b : P) :
f (midpoint R a b) = midpoint R (f a) (f b) :=
f.apply_line_map a b _
@[simp] lemma affine_equiv.map_midpoint (f : P ≃ᵃ[R] P') (a b : P) :
f (midpoint R a b) = midpoint R (f a) (f b) :=
f.apply_line_map a b _
omit V'
@[simp] lemma affine_equiv.point_reflection_midpoint_left (x y : P) :
point_reflection R (midpoint R x y) x = y :=
by rw [midpoint, point_reflection_apply, line_map_apply, vadd_vsub,
vadd_vadd, ← add_smul, ← two_mul, mul_inv_of_self, one_smul, vsub_vadd]
lemma midpoint_comm (x y : P) : midpoint R x y = midpoint R y x :=
by rw [midpoint, ← line_map_apply_one_sub, one_sub_inv_of_two, midpoint]
@[simp] lemma affine_equiv.point_reflection_midpoint_right (x y : P) :
point_reflection R (midpoint R x y) y = x :=
by rw [midpoint_comm, affine_equiv.point_reflection_midpoint_left]
lemma midpoint_vsub_midpoint (p₁ p₂ p₃ p₄ : P) :
midpoint R p₁ p₂ -ᵥ midpoint R p₃ p₄ = midpoint R (p₁ -ᵥ p₃) (p₂ -ᵥ p₄) :=
line_map_vsub_line_map _ _ _ _ _
lemma midpoint_vadd_midpoint (v v' : V) (p p' : P) :
midpoint R v v' +ᵥ midpoint R p p' = midpoint R (v +ᵥ p) (v' +ᵥ p') :=
line_map_vadd_line_map _ _ _ _ _
lemma midpoint_eq_iff {x y z : P} : midpoint R x y = z ↔ point_reflection R z x = y :=
eq_comm.trans ((injective_point_reflection_left_of_module R x).eq_iff'
(affine_equiv.point_reflection_midpoint_left x y)).symm
@[simp] lemma midpoint_vsub_left (p₁ p₂ : P) : midpoint R p₁ p₂ -ᵥ p₁ = (⅟2:R) • (p₂ -ᵥ p₁) :=
line_map_vsub_left _ _ _
@[simp] lemma midpoint_vsub_right (p₁ p₂ : P) : midpoint R p₁ p₂ -ᵥ p₂ = (⅟2:R) • (p₁ -ᵥ p₂) :=
by rw [midpoint_comm, midpoint_vsub_left]
@[simp] lemma left_vsub_midpoint (p₁ p₂ : P) : p₁ -ᵥ midpoint R p₁ p₂ = (⅟2:R) • (p₁ -ᵥ p₂) :=
left_vsub_line_map _ _ _
@[simp] lemma right_vsub_midpoint (p₁ p₂ : P) : p₂ -ᵥ midpoint R p₁ p₂ = (⅟2:R) • (p₂ -ᵥ p₁) :=
by rw [midpoint_comm, left_vsub_midpoint]
@[simp] lemma midpoint_sub_left (v₁ v₂ : V) : midpoint R v₁ v₂ - v₁ = (⅟2:R) • (v₂ - v₁) :=
midpoint_vsub_left v₁ v₂
@[simp] lemma midpoint_sub_right (v₁ v₂ : V) : midpoint R v₁ v₂ - v₂ = (⅟2:R) • (v₁ - v₂) :=
midpoint_vsub_right v₁ v₂
@[simp] lemma left_sub_midpoint (v₁ v₂ : V) : v₁ - midpoint R v₁ v₂ = (⅟2:R) • (v₁ - v₂) :=
left_vsub_midpoint v₁ v₂
@[simp] lemma right_sub_midpoint (v₁ v₂ : V) : v₂ - midpoint R v₁ v₂ = (⅟2:R) • (v₂ - v₁) :=
right_vsub_midpoint v₁ v₂
variable (R)
lemma midpoint_eq_midpoint_iff_vsub_eq_vsub {x x' y y' : P} :
midpoint R x y = midpoint R x' y' ↔ x -ᵥ x' = y' -ᵥ y :=
by rw [← @vsub_eq_zero_iff_eq V, midpoint_vsub_midpoint, midpoint_eq_iff, point_reflection_apply,
vsub_eq_sub, zero_sub, vadd_eq_add, add_zero, neg_eq_iff_neg_eq, neg_vsub_eq_vsub_rev, eq_comm]
lemma midpoint_eq_iff' {x y z : P} : midpoint R x y = z ↔ equiv.point_reflection z x = y :=
midpoint_eq_iff
/-- `midpoint` does not depend on the ring `R`. -/
lemma midpoint_unique (R' : Type*) [ring R'] [invertible (2:R')] [module R' V] (x y : P) :
midpoint R x y = midpoint R' x y :=
(midpoint_eq_iff' R).2 $ (midpoint_eq_iff' R').1 rfl
@[simp] lemma midpoint_self (x : P) : midpoint R x x = x :=
line_map_same_apply _ _
@[simp] lemma midpoint_add_self (x y : V) : midpoint R x y + midpoint R x y = x + y :=
calc midpoint R x y +ᵥ midpoint R x y = midpoint R x y +ᵥ midpoint R y x : by rw midpoint_comm
... = x + y : by rw [midpoint_vadd_midpoint, vadd_eq_add, vadd_eq_add, add_comm, midpoint_self]
lemma midpoint_zero_add (x y : V) : midpoint R 0 (x + y) = midpoint R x y :=
(midpoint_eq_midpoint_iff_vsub_eq_vsub R).2 $ by simp [sub_add_eq_sub_sub_swap]
lemma midpoint_eq_smul_add (x y : V) : midpoint R x y = (⅟2 : R) • (x + y) :=
by rw [midpoint_eq_iff, point_reflection_apply, vsub_eq_sub, vadd_eq_add, sub_add_eq_add_sub,
← two_smul R, smul_smul, mul_inv_of_self, one_smul, add_sub_cancel']
end
lemma line_map_inv_two {R : Type*} {V P : Type*} [division_ring R] [char_zero R]
[add_comm_group V] [module R V] [add_torsor V P] (a b : P) :
line_map a b (2⁻¹:R) = midpoint R a b :=
rfl
lemma line_map_one_half {R : Type*} {V P : Type*} [division_ring R] [char_zero R]
[add_comm_group V] [module R V] [add_torsor V P] (a b : P) :
line_map a b (1/2:R) = midpoint R a b :=
by rw [one_div, line_map_inv_two]
lemma homothety_inv_of_two {R : Type*} {V P : Type*} [comm_ring R] [invertible (2:R)]
[add_comm_group V] [module R V] [add_torsor V P] (a b : P) :
homothety a (⅟2:R) b = midpoint R a b :=
rfl
lemma homothety_inv_two {k : Type*} {V P : Type*} [field k] [char_zero k]
[add_comm_group V] [module k V] [add_torsor V P] (a b : P) :
homothety a (2⁻¹:k) b = midpoint k a b :=
rfl
lemma homothety_one_half {k : Type*} {V P : Type*} [field k] [char_zero k]
[add_comm_group V] [module k V] [add_torsor V P] (a b : P) :
homothety a (1/2:k) b = midpoint k a b :=
by rw [one_div, homothety_inv_two]
@[simp] lemma pi_midpoint_apply {k ι : Type*} {V : Π i : ι, Type*} {P : Π i : ι, Type*} [field k]
[invertible (2:k)] [Π i, add_comm_group (V i)] [Π i, module k (V i)]
[Π i, add_torsor (V i) (P i)] (f g : Π i, P i) (i : ι) :
midpoint k f g i = midpoint k (f i) (g i) := rfl
namespace add_monoid_hom
variables (R R' : Type*) {E F : Type*}
[ring R] [invertible (2:R)] [add_comm_group E] [module R E]
[ring R'] [invertible (2:R')] [add_comm_group F] [module R' F]
/-- A map `f : E → F` sending zero to zero and midpoints to midpoints is an `add_monoid_hom`. -/
def of_map_midpoint (f : E → F) (h0 : f 0 = 0)
(hm : ∀ x y, f (midpoint R x y) = midpoint R' (f x) (f y)) :
E →+ F :=
{ to_fun := f,
map_zero' := h0,
map_add' := λ x y,
calc f (x + y) = f 0 + f (x + y) : by rw [h0, zero_add]
... = midpoint R' (f 0) (f (x + y)) + midpoint R' (f 0) (f (x + y)) :
(midpoint_add_self _ _ _).symm
... = f (midpoint R x y) + f (midpoint R x y) : by rw [← hm, midpoint_zero_add]
... = f x + f y : by rw [hm, midpoint_add_self] }
@[simp] lemma coe_of_map_midpoint (f : E → F) (h0 : f 0 = 0)
(hm : ∀ x y, f (midpoint R x y) = midpoint R' (f x) (f y)) :
⇑(of_map_midpoint R R' f h0 hm) = f := rfl
end add_monoid_hom
|
Name: Helen Thomson
Phone Number: Contact her, or her Deputy at (530) 7575556 or (530) 7575557.
Office: Yolo County Board of Supervisors
Web Site: http://www.yolocounty.org/org/bos/thomson.htm
Helen currently serves as Chair of the Yolo County Board of Supervisors, after serving two terms in the California State Assembly. She began her political career as president of Davis Parent Nursery School and went on to serve as Trustee of the Davis JUSD School Board from 1975 to 1987 and then county supervisor.
Helen lives on College Park with her husband, psychiatrist Captane P. Thomson, MD, who was the first head of Yolo County Mental Health.
|
(* Title: HOL/Library/List_Lenlexorder.thy
*)
section \<open>Lexicographic order on lists\<close>
theory List_Lenlexorder
imports MainRLT
begin
instantiation list :: (ord) ord
begin
definition
list_less_def: "xs < ys \<longleftrightarrow> (xs, ys) \<in> lenlex {(u, v). u < v}"
definition
list_le_def: "(xs :: _ list) \<le> ys \<longleftrightarrow> xs < ys \<or> xs = ys"
instance ..
end
instance list :: (order) order
proof
have tr: "trans {(u, v::'a). u < v}"
using trans_def by fastforce
have \<section>: False
if "(xs,ys) \<in> lenlex {(u, v). u < v}" "(ys,xs) \<in> lenlex {(u, v). u < v}" for xs ys :: "'a list"
proof -
have "(xs,xs) \<in> lenlex {(u, v). u < v}"
using that transD [OF lenlex_transI [OF tr]] by blast
then show False
by (meson case_prodD lenlex_irreflexive less_irrefl mem_Collect_eq)
qed
show "xs \<le> xs" for xs :: "'a list" by (simp add: list_le_def)
show "xs \<le> zs" if "xs \<le> ys" and "ys \<le> zs" for xs ys zs :: "'a list"
using that transD [OF lenlex_transI [OF tr]] by (auto simp add: list_le_def list_less_def)
show "xs = ys" if "xs \<le> ys" "ys \<le> xs" for xs ys :: "'a list"
using \<section> that list_le_def list_less_def by blast
show "xs < ys \<longleftrightarrow> xs \<le> ys \<and> \<not> ys \<le> xs" for xs ys :: "'a list"
by (auto simp add: list_less_def list_le_def dest: \<section>)
qed
instance list :: (linorder) linorder
proof
fix xs ys :: "'a list"
have "total (lenlex {(u, v::'a). u < v})"
by (rule total_lenlex) (auto simp: total_on_def)
then show "xs \<le> ys \<or> ys \<le> xs"
by (auto simp add: total_on_def list_le_def list_less_def)
qed
instantiation list :: (linorder) distrib_lattice
begin
definition "(inf :: 'a list \<Rightarrow> _) = min"
definition "(sup :: 'a list \<Rightarrow> _) = max"
instance
by standard (auto simp add: inf_list_def sup_list_def max_min_distrib2)
end
lemma not_less_Nil [simp]: "\<not> x < []"
by (simp add: list_less_def)
lemma Nil_less_Cons [simp]: "[] < a # x"
by (simp add: list_less_def)
lemma Cons_less_Cons: "a # x < b # y \<longleftrightarrow> length x < length y \<or> length x = length y \<and> (a < b \<or> a = b \<and> x < y)"
using lenlex_length
by (fastforce simp: list_less_def Cons_lenlex_iff)
lemma le_Nil [simp]: "x \<le> [] \<longleftrightarrow> x = []"
unfolding list_le_def by (cases x) auto
lemma Nil_le_Cons [simp]: "[] \<le> x"
unfolding list_le_def by (cases x) auto
lemma Cons_le_Cons: "a # x \<le> b # y \<longleftrightarrow> length x < length y \<or> length x = length y \<and> (a < b \<or> a = b \<and> x \<le> y)"
by (auto simp: list_le_def Cons_less_Cons)
instantiation list :: (order) order_bot
begin
definition "bot = []"
instance
by standard (simp add: bot_list_def)
end
end
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
! This file was ported from Lean 3 source module logic.function.conjugate
! leanprover-community/mathlib commit 448144f7ae193a8990cb7473c9e9a01990f64ac7
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Logic.Function.Basic
/-!
# Semiconjugate and commuting maps
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We define the following predicates:
* `function.semiconj`: `f : α → β` semiconjugates `ga : α → α` to `gb : β → β` if `f ∘ ga = gb ∘ f`;
* `function.semiconj₂: `f : α → β` semiconjugates a binary operation `ga : α → α → α`
to `gb : β → β → β` if `f (ga x y) = gb (f x) (f y)`;
* `f : α → α` commutes with `g : α → α` if `f ∘ g = g ∘ f`, or equivalently `semiconj f g g`.
-/
namespace Function
variable {α : Type _} {β : Type _} {γ : Type _}
#print Function.Semiconj /-
/-- We say that `f : α → β` semiconjugates `ga : α → α` to `gb : β → β` if `f ∘ ga = gb ∘ f`.
We use `∀ x, f (ga x) = gb (f x)` as the definition, so given `h : function.semiconj f ga gb` and
`a : α`, we have `h a : f (ga a) = gb (f a)` and `h.comp_eq : f ∘ ga = gb ∘ f`. -/
def Semiconj (f : α → β) (ga : α → α) (gb : β → β) : Prop :=
∀ x, f (ga x) = gb (f x)
#align function.semiconj Function.Semiconj
-/
namespace Semiconj
variable {f fab : α → β} {fbc : β → γ} {ga ga' : α → α} {gb gb' : β → β} {gc gc' : γ → γ}
/- warning: function.semiconj.comp_eq -> Function.Semiconj.comp_eq is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α} {gb : β -> β}, (Function.Semiconj.{u1, u2} α β f ga gb) -> (Eq.{max (succ u1) (succ u2)} (α -> β) (Function.comp.{succ u1, succ u1, succ u2} α α β f ga) (Function.comp.{succ u1, succ u2, succ u2} α β β gb f))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α} {gb : β -> β}, (Function.Semiconj.{u2, u1} α β f ga gb) -> (Eq.{max (succ u2) (succ u1)} (α -> β) (Function.comp.{succ u2, succ u2, succ u1} α α β f ga) (Function.comp.{succ u2, succ u1, succ u1} α β β gb f))
Case conversion may be inaccurate. Consider using '#align function.semiconj.comp_eq Function.Semiconj.comp_eqₓ'. -/
protected theorem comp_eq (h : Semiconj f ga gb) : f ∘ ga = gb ∘ f :=
funext h
#align function.semiconj.comp_eq Function.Semiconj.comp_eq
/- warning: function.semiconj.eq -> Function.Semiconj.eq is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α} {gb : β -> β}, (Function.Semiconj.{u1, u2} α β f ga gb) -> (forall (x : α), Eq.{succ u2} β (f (ga x)) (gb (f x)))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α} {gb : β -> β}, (Function.Semiconj.{u2, u1} α β f ga gb) -> (forall (x : α), Eq.{succ u1} β (f (ga x)) (gb (f x)))
Case conversion may be inaccurate. Consider using '#align function.semiconj.eq Function.Semiconj.eqₓ'. -/
protected theorem eq (h : Semiconj f ga gb) (x : α) : f (ga x) = gb (f x) :=
h x
#align function.semiconj.eq Function.Semiconj.eq
/- warning: function.semiconj.comp_right -> Function.Semiconj.comp_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α} {ga' : α -> α} {gb : β -> β} {gb' : β -> β}, (Function.Semiconj.{u1, u2} α β f ga gb) -> (Function.Semiconj.{u1, u2} α β f ga' gb') -> (Function.Semiconj.{u1, u2} α β f (Function.comp.{succ u1, succ u1, succ u1} α α α ga ga') (Function.comp.{succ u2, succ u2, succ u2} β β β gb gb'))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α} {ga' : α -> α} {gb : β -> β} {gb' : β -> β}, (Function.Semiconj.{u2, u1} α β f ga gb) -> (Function.Semiconj.{u2, u1} α β f ga' gb') -> (Function.Semiconj.{u2, u1} α β f (Function.comp.{succ u2, succ u2, succ u2} α α α ga ga') (Function.comp.{succ u1, succ u1, succ u1} β β β gb gb'))
Case conversion may be inaccurate. Consider using '#align function.semiconj.comp_right Function.Semiconj.comp_rightₓ'. -/
theorem comp_right (h : Semiconj f ga gb) (h' : Semiconj f ga' gb') :
Semiconj f (ga ∘ ga') (gb ∘ gb') := fun x => by rw [comp_app, h.eq, h'.eq]
#align function.semiconj.comp_right Function.Semiconj.comp_right
/- warning: function.semiconj.comp_left -> Function.Semiconj.comp_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {fab : α -> β} {fbc : β -> γ} {ga : α -> α} {gb : β -> β} {gc : γ -> γ}, (Function.Semiconj.{u1, u2} α β fab ga gb) -> (Function.Semiconj.{u2, u3} β γ fbc gb gc) -> (Function.Semiconj.{u1, u3} α γ (Function.comp.{succ u1, succ u2, succ u3} α β γ fbc fab) ga gc)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Type.{u1}} {fab : α -> β} {fbc : β -> γ} {ga : α -> α} {gb : β -> β} {gc : γ -> γ}, (Function.Semiconj.{u3, u2} α β fab ga gb) -> (Function.Semiconj.{u2, u1} β γ fbc gb gc) -> (Function.Semiconj.{u3, u1} α γ (Function.comp.{succ u3, succ u2, succ u1} α β γ fbc fab) ga gc)
Case conversion may be inaccurate. Consider using '#align function.semiconj.comp_left Function.Semiconj.comp_leftₓ'. -/
theorem comp_left (hab : Semiconj fab ga gb) (hbc : Semiconj fbc gb gc) :
Semiconj (fbc ∘ fab) ga gc := fun x => by simp only [comp_app, hab.eq, hbc.eq]
#align function.semiconj.comp_left Function.Semiconj.comp_left
/- warning: function.semiconj.id_right -> Function.Semiconj.id_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β}, Function.Semiconj.{u1, u2} α β f (id.{succ u1} α) (id.{succ u2} β)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β}, Function.Semiconj.{u2, u1} α β f (id.{succ u2} α) (id.{succ u1} β)
Case conversion may be inaccurate. Consider using '#align function.semiconj.id_right Function.Semiconj.id_rightₓ'. -/
theorem id_right : Semiconj f id id := fun _ => rfl
#align function.semiconj.id_right Function.Semiconj.id_right
#print Function.Semiconj.id_left /-
theorem id_left : Semiconj id ga ga := fun _ => rfl
#align function.semiconj.id_left Function.Semiconj.id_left
-/
/- warning: function.semiconj.inverses_right -> Function.Semiconj.inverses_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α} {ga' : α -> α} {gb : β -> β} {gb' : β -> β}, (Function.Semiconj.{u1, u2} α β f ga gb) -> (Function.RightInverse.{succ u1, succ u1} α α ga' ga) -> (Function.LeftInverse.{succ u2, succ u2} β β gb' gb) -> (Function.Semiconj.{u1, u2} α β f ga' gb')
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α} {ga' : α -> α} {gb : β -> β} {gb' : β -> β}, (Function.Semiconj.{u2, u1} α β f ga gb) -> (Function.RightInverse.{succ u2, succ u2} α α ga' ga) -> (Function.LeftInverse.{succ u1, succ u1} β β gb' gb) -> (Function.Semiconj.{u2, u1} α β f ga' gb')
Case conversion may be inaccurate. Consider using '#align function.semiconj.inverses_right Function.Semiconj.inverses_rightₓ'. -/
theorem inverses_right (h : Semiconj f ga gb) (ha : RightInverse ga' ga) (hb : LeftInverse gb' gb) :
Semiconj f ga' gb' := fun x => by rw [← hb (f (ga' x)), ← h.eq, ha x]
#align function.semiconj.inverses_right Function.Semiconj.inverses_right
/- warning: function.semiconj.option_map -> Function.Semiconj.option_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α} {gb : β -> β}, (Function.Semiconj.{u1, u2} α β f ga gb) -> (Function.Semiconj.{u1, u2} (Option.{u1} α) (Option.{u2} β) (Option.map.{u1, u2} α β f) (Option.map.{u1, u1} α α ga) (Option.map.{u2, u2} β β gb))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α} {gb : β -> β}, (Function.Semiconj.{u2, u1} α β f ga gb) -> (Function.Semiconj.{u2, u1} (Option.{u2} α) (Option.{u1} β) (Option.map.{u2, u1} α β f) (Option.map.{u2, u2} α α ga) (Option.map.{u1, u1} β β gb))
Case conversion may be inaccurate. Consider using '#align function.semiconj.option_map Function.Semiconj.option_mapₓ'. -/
theorem option_map {f : α → β} {ga : α → α} {gb : β → β} (h : Semiconj f ga gb) :
Semiconj (Option.map f) (Option.map ga) (Option.map gb)
| none => rfl
| some a => congr_arg some <| h _
#align function.semiconj.option_map Function.Semiconj.option_map
end Semiconj
#print Function.Commute /-
/-- Two maps `f g : α → α` commute if `f (g x) = g (f x)` for all `x : α`.
Given `h : function.commute f g` and `a : α`, we have `h a : f (g a) = g (f a)` and
`h.comp_eq : f ∘ g = g ∘ f`. -/
def Commute (f g : α → α) : Prop :=
Semiconj f g g
#align function.commute Function.Commute
-/
#print Function.Semiconj.commute /-
theorem Semiconj.commute {f g : α → α} (h : Semiconj f g g) : Commute f g :=
h
#align function.semiconj.commute Function.Semiconj.commute
-/
namespace Commute
variable {f f' g g' : α → α}
#print Function.Commute.refl /-
@[refl]
theorem refl (f : α → α) : Commute f f := fun _ => Eq.refl _
#align function.commute.refl Function.Commute.refl
-/
#print Function.Commute.symm /-
@[symm]
theorem symm (h : Commute f g) : Commute g f := fun x => (h x).symm
#align function.commute.symm Function.Commute.symm
-/
#print Function.Commute.comp_right /-
theorem comp_right (h : Commute f g) (h' : Commute f g') : Commute f (g ∘ g') :=
h.compRight h'
#align function.commute.comp_right Function.Commute.comp_right
-/
#print Function.Commute.comp_left /-
theorem comp_left (h : Commute f g) (h' : Commute f' g) : Commute (f ∘ f') g :=
(h.symm.compRight h'.symm).symm
#align function.commute.comp_left Function.Commute.comp_left
-/
#print Function.Commute.id_right /-
theorem id_right : Commute f id :=
Semiconj.id_right
#align function.commute.id_right Function.Commute.id_right
-/
#print Function.Commute.id_left /-
theorem id_left : Commute id f :=
Semiconj.id_left
#align function.commute.id_left Function.Commute.id_left
-/
#print Function.Commute.option_map /-
theorem option_map {f g : α → α} : Commute f g → Commute (Option.map f) (Option.map g) :=
Semiconj.option_map
#align function.commute.option_map Function.Commute.option_map
-/
end Commute
#print Function.Semiconj₂ /-
/-- A map `f` semiconjugates a binary operation `ga` to a binary operation `gb` if
for all `x`, `y` we have `f (ga x y) = gb (f x) (f y)`. E.g., a `monoid_hom`
semiconjugates `(*)` to `(*)`. -/
def Semiconj₂ (f : α → β) (ga : α → α → α) (gb : β → β → β) : Prop :=
∀ x y, f (ga x y) = gb (f x) (f y)
#align function.semiconj₂ Function.Semiconj₂
-/
namespace Semiconj₂
variable {f : α → β} {ga : α → α → α} {gb : β → β → β}
/- warning: function.semiconj₂.eq -> Function.Semiconj₂.eq is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β}, (Function.Semiconj₂.{u1, u2} α β f ga gb) -> (forall (x : α) (y : α), Eq.{succ u2} β (f (ga x y)) (gb (f x) (f y)))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β}, (Function.Semiconj₂.{u2, u1} α β f ga gb) -> (forall (x : α) (y : α), Eq.{succ u1} β (f (ga x y)) (gb (f x) (f y)))
Case conversion may be inaccurate. Consider using '#align function.semiconj₂.eq Function.Semiconj₂.eqₓ'. -/
protected theorem eq (h : Semiconj₂ f ga gb) (x y : α) : f (ga x y) = gb (f x) (f y) :=
h x y
#align function.semiconj₂.eq Function.Semiconj₂.eq
/- warning: function.semiconj₂.comp_eq -> Function.Semiconj₂.comp_eq is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β}, (Function.Semiconj₂.{u1, u2} α β f ga gb) -> (Eq.{max (succ u1) (succ u2)} (α -> α -> β) (Function.bicompr.{u1, u1, u1, u2} α α α β f ga) (Function.bicompl.{u1, u1, u2, u2, u2} α α β β β gb f f))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β}, (Function.Semiconj₂.{u2, u1} α β f ga gb) -> (Eq.{max (succ u2) (succ u1)} (α -> α -> β) (Function.bicompr.{u2, u2, u2, u1} α α α β f ga) (Function.bicompl.{u2, u2, u1, u1, u1} α α β β β gb f f))
Case conversion may be inaccurate. Consider using '#align function.semiconj₂.comp_eq Function.Semiconj₂.comp_eqₓ'. -/
protected theorem comp_eq (h : Semiconj₂ f ga gb) : bicompr f ga = bicompl gb f f :=
funext fun x => funext <| h x
#align function.semiconj₂.comp_eq Function.Semiconj₂.comp_eq
#print Function.Semiconj₂.id_left /-
theorem id_left (op : α → α → α) : Semiconj₂ id op op := fun _ _ => rfl
#align function.semiconj₂.id_left Function.Semiconj₂.id_left
-/
/- warning: function.semiconj₂.comp -> Function.Semiconj₂.comp is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β} {f' : β -> γ} {gc : γ -> γ -> γ}, (Function.Semiconj₂.{u2, u3} β γ f' gb gc) -> (Function.Semiconj₂.{u1, u2} α β f ga gb) -> (Function.Semiconj₂.{u1, u3} α γ (Function.comp.{succ u1, succ u2, succ u3} α β γ f' f) ga gc)
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u3}} {γ : Type.{u2}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β} {f' : β -> γ} {gc : γ -> γ -> γ}, (Function.Semiconj₂.{u3, u2} β γ f' gb gc) -> (Function.Semiconj₂.{u1, u3} α β f ga gb) -> (Function.Semiconj₂.{u1, u2} α γ (Function.comp.{succ u1, succ u3, succ u2} α β γ f' f) ga gc)
Case conversion may be inaccurate. Consider using '#align function.semiconj₂.comp Function.Semiconj₂.compₓ'. -/
theorem comp {f' : β → γ} {gc : γ → γ → γ} (hf' : Semiconj₂ f' gb gc) (hf : Semiconj₂ f ga gb) :
Semiconj₂ (f' ∘ f) ga gc := fun x y => by simp only [hf'.eq, hf.eq, comp_app]
#align function.semiconj₂.comp Function.Semiconj₂.comp
/- warning: function.semiconj₂.is_associative_right -> Function.Semiconj₂.isAssociative_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β} [_inst_1 : IsAssociative.{u1} α ga], (Function.Semiconj₂.{u1, u2} α β f ga gb) -> (Function.Surjective.{succ u1, succ u2} α β f) -> (IsAssociative.{u2} β gb)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β} [_inst_1 : IsAssociative.{u2} α ga], (Function.Semiconj₂.{u2, u1} α β f ga gb) -> (Function.Surjective.{succ u2, succ u1} α β f) -> (IsAssociative.{u1} β gb)
Case conversion may be inaccurate. Consider using '#align function.semiconj₂.is_associative_right Function.Semiconj₂.isAssociative_rightₓ'. -/
theorem isAssociative_right [IsAssociative α ga] (h : Semiconj₂ f ga gb) (h_surj : Surjective f) :
IsAssociative β gb :=
⟨h_surj.forall₃.2 fun x₁ x₂ x₃ => by simp only [← h.eq, @IsAssociative.assoc _ ga]⟩
#align function.semiconj₂.is_associative_right Function.Semiconj₂.isAssociative_right
#print Function.Semiconj₂.isAssociative_left /-
theorem isAssociative_left [IsAssociative β gb] (h : Semiconj₂ f ga gb) (h_inj : Injective f) :
IsAssociative α ga :=
⟨fun x₁ x₂ x₃ => h_inj <| by simp only [h.eq, @IsAssociative.assoc _ gb]⟩
#align function.semiconj₂.is_associative_left Function.Semiconj₂.isAssociative_left
-/
/- warning: function.semiconj₂.is_idempotent_right -> Function.Semiconj₂.isIdempotent_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β} [_inst_1 : IsIdempotent.{u1} α ga], (Function.Semiconj₂.{u1, u2} α β f ga gb) -> (Function.Surjective.{succ u1, succ u2} α β f) -> (IsIdempotent.{u2} β gb)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {f : α -> β} {ga : α -> α -> α} {gb : β -> β -> β} [_inst_1 : IsIdempotent.{u2} α ga], (Function.Semiconj₂.{u2, u1} α β f ga gb) -> (Function.Surjective.{succ u2, succ u1} α β f) -> (IsIdempotent.{u1} β gb)
Case conversion may be inaccurate. Consider using '#align function.semiconj₂.is_idempotent_right Function.Semiconj₂.isIdempotent_rightₓ'. -/
theorem isIdempotent_right [IsIdempotent α ga] (h : Semiconj₂ f ga gb) (h_surj : Surjective f) :
IsIdempotent β gb :=
⟨h_surj.forall.2 fun x => by simp only [← h.eq, @IsIdempotent.idempotent _ ga]⟩
#align function.semiconj₂.is_idempotent_right Function.Semiconj₂.isIdempotent_right
#print Function.Semiconj₂.isIdempotent_left /-
theorem isIdempotent_left [IsIdempotent β gb] (h : Semiconj₂ f ga gb) (h_inj : Injective f) :
IsIdempotent α ga :=
⟨fun x => h_inj <| by rw [h.eq, @IsIdempotent.idempotent _ gb]⟩
#align function.semiconj₂.is_idempotent_left Function.Semiconj₂.isIdempotent_left
-/
end Semiconj₂
end Function
|
Formal statement is: lemma closed_scaling: fixes S :: "'a::real_normed_vector set" assumes "closed S" shows "closed ((\<lambda>x. c *\<^sub>R x) ` S)" Informal statement is: If $S$ is a closed set, then the set of all scalar multiples of elements of $S$ is also closed. |
State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p✝ q p : MvPolynomial σ R
⊢ p ≠ 0 ↔ ∃ d, coeff d p ≠ 0 State After: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p✝ q p : MvPolynomial σ R
⊢ (¬∀ (d : σ →₀ ℕ), coeff d p = 0) ↔ ∃ d, coeff d p ≠ 0 Tactic: rw [Ne.def, eq_zero_iff] State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p✝ q p : MvPolynomial σ R
⊢ (¬∀ (d : σ →₀ ℕ), coeff d p = 0) ↔ ∃ d, coeff d p ≠ 0 State After: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p✝ q p : MvPolynomial σ R
⊢ (∃ d, coeff d p ≠ 0) ↔ ∃ d, coeff d p ≠ 0 Tactic: push_neg State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p✝ q p : MvPolynomial σ R
⊢ (∃ d, coeff d p ≠ 0) ↔ ∃ d, coeff d p ≠ 0 State After: no goals Tactic: rfl |
State Before: R : Type u
inst✝ : CommRing R
x y : R
h : IsCoprime x y
z : R
⊢ IsCoprime (z * y + x) y State After: R : Type u
inst✝ : CommRing R
x y : R
h : IsCoprime x y
z : R
⊢ IsCoprime (x + z * y) y Tactic: rw [add_comm] State Before: R : Type u
inst✝ : CommRing R
x y : R
h : IsCoprime x y
z : R
⊢ IsCoprime (x + z * y) y State After: no goals Tactic: exact h.add_mul_right_left z |
[[[[[
FUNDAMENTAL DECOMPOSITION
We prove here that
H(y|x) <= H_C(x,y) - H(x) + c
]]]]]
define (all-together x*)
let c debug 100 [constant to satisfy Kraft (see lemma)]
let x debug run-utm-on debug x*
let H-of-x debug length x*
[programs we've discovered that calculate pairs
starting with x]
let programs nil
let (stage n)
[generate requirements for all new programs we've
discovered that produce (x y) pairs]
let programs
(add-to-set debug (halts? nil debug n) programs)
(stage + n 1)
[at stage n = 0, 1, 2, 3, ...]
[look at all programs with <=n bits that halt within time n]
[returns list of all of them that produce pairs (x y)]
let (halts? p bits-left)
let v try n C p [C is eval read-exp if C = U]
if = success car v (look-at cadr v)
if = 0 bits-left nil
append (halts? append p cons 0 nil - bits-left 1)
(halts? append p cons 1 nil - bits-left 1)
[returns (p) if C(p) = (x y), otherwise ()]
let (look-at v)
if (and (is-pair v)
= x car v ) cons p nil
nil
[logical "and"]
let (and p q)
if p q false
[is x a pair?]
let (is-pair? x)
if atom x false
if atom cdr x false
if atom cdr cdr x true
false
[is an element in a set?]
let (is-in-set? element set)
if atom set false
if = element car set true
(is-in-set? element cdr set)
[forms set union avoiding duplicates,
and makes requirement for each new find]
let (add-to-set new old)
if atom new old
let first-new car new
let rest-new cdr new
if (is-in-set? first-new old) (add-to-set rest-new old)
(do (make-requirement first-new)
cons first-new (add-to-set rest-new old)
)
[first argument discarded, done for side-effect only!]
let (do x y) y
[given new p such that C(p) = (x y),
we produce the requirement for C_x
that there be a program for y that is |p|-H(x)+c bits long]
let (make-requirement p)
display cons cadr cadr try no-time-limit C p
cons - + c length p H-of-x
nil
let C ' [here eval read-exp gives U]
[test case special-purpose computer C here in place of U:]
[C(00100001) with x-1 and y-1 0's gives pair (x xy)]
[loop function gives number of bits up to next 1 bit]
let (loop n)
if = 1 read-bit n
(loop + n 1)
let x (loop 1)
let y (loop 1)
cons x cons * x y nil
[HERE GOES!]
(stage 0)
define all-together
value (lambda (x*) ((' (lambda (c) ((' (lambda (x) ((' (
lambda (H-of-x) ((' (lambda (programs) ((' (lambda
(stage) ((' (lambda (halts?) ((' (lambda (look-at
) ((' (lambda (and) ((' (lambda (is-pair?) ((' (la
mbda (is-in-set?) ((' (lambda (add-to-set) ((' (la
mbda (do) ((' (lambda (make-requirement) ((' (lamb
da (C) (stage 0))) (' ((' (lambda (loop) ((' (lamb
da (x) ((' (lambda (y) (cons x (cons (* x y) nil))
)) (loop 1)))) (loop 1)))) (' (lambda (n) (if (= 1
(read-bit)) n (loop (+ n 1)))))))))) (' (lambda (
p) (display (cons (car (cdr (car (cdr (try no-time
-limit C p))))) (cons (- (+ c (length p)) H-of-x)
nil)))))))) (' (lambda (x y) y))))) (' (lambda (ne
w old) (if (atom new) old ((' (lambda (first-new)
((' (lambda (rest-new) (if (is-in-set? first-new o
ld) (add-to-set rest-new old) (do (make-requiremen
t first-new) (cons first-new (add-to-set rest-new
old)))))) (cdr new)))) (car new)))))))) (' (lambda
(element set) (if (atom set) false (if (= element
(car set)) true (is-in-set? element (cdr set)))))
)))) (' (lambda (x) (if (atom x) false (if (atom (
cdr x)) false (if (atom (cdr (cdr x))) true false)
))))))) (' (lambda (p q) (if p q false)))))) (' (l
ambda (v) (if (and (is-pair v) (= x (car v))) (con
s p nil) nil)))))) (' (lambda (p bits-left) ((' (l
ambda (v) (if (= success (car v)) (look-at (car (c
dr v))) (if (= 0 bits-left) nil (append (halts? (a
ppend p (cons 0 nil)) (- bits-left 1)) (halts? (ap
pend p (cons 1 nil)) (- bits-left 1))))))) (try n
C p))))))) (' (lambda (n) ((' (lambda (programs) (
stage (+ n 1)))) (add-to-set (debug (halts? nil (d
ebug n))) programs))))))) nil))) (debug (length x*
))))) (debug (car (cdr (try no-time-limit (' (eval
(read-exp))) (debug x*)))))))) (debug 100)))
define x* 3
define x*
value 3
length bits x*
expression (length (bits x*))
value 16
[give all-together x*]
try 60 cons cons "'
cons all-together
nil
cons cons "'
cons bits x*
nil
nil
nil
expression (try 60 (cons (cons ' (cons all-together nil)) (co
ns (cons ' (cons (bits x*) nil)) nil)) nil)
debug 100
debug (0 0 1 1 0 0 1 1 0 0 0 0 1 0 1 0)
debug 3
debug 16
debug 0
debug ()
debug 1
debug ()
debug 2
debug ()
debug 3
debug ()
debug 4
debug ((0 0 1 1))
debug 5
debug ((0 0 1 0 1) (0 0 1 1))
debug 6
debug ((0 0 1 0 0 1) (0 0 1 0 1) (0 0 1 1))
debug 7
debug ((0 0 1 0 0 0 1) (0 0 1 0 0 1) (0 0 1 0 1) (0 0 1
1))
debug 8
debug ((0 0 1 0 0 0 0 1) (0 0 1 0 0 0 1) (0 0 1 0 0 1) (
0 0 1 0 1) (0 0 1 1))
debug 9
value (failure out-of-time ((3 88) (6 89) (9 90) (12 91)
(15 92)))
define x* 4
define x*
value 4
length bits x*
expression (length (bits x*))
value 16
[give all-together x*]
try 60 cons cons "'
cons all-together
nil
cons cons "'
cons bits x*
nil
nil
nil
expression (try 60 (cons (cons ' (cons all-together nil)) (co
ns (cons ' (cons (bits x*) nil)) nil)) nil)
debug 100
debug (0 0 1 1 0 1 0 0 0 0 0 0 1 0 1 0)
debug 4
debug 16
debug 0
debug ()
debug 1
debug ()
debug 2
debug ()
debug 3
debug ()
debug 4
debug ()
debug 5
debug ((0 0 0 1 1))
debug 6
debug ((0 0 0 1 0 1) (0 0 0 1 1))
debug 7
debug ((0 0 0 1 0 0 1) (0 0 0 1 0 1) (0 0 0 1 1))
debug 8
debug ((0 0 0 1 0 0 0 1) (0 0 0 1 0 0 1) (0 0 0 1 0 1) (
0 0 0 1 1))
debug 9
value (failure out-of-time ((4 89) (8 90) (12 91) (16 92
)))
|
# Introduction
The following is a brief introduction to Python and the IPython Notebook. There is much more to learn than what is covered here. This is just enough to get you started with the tutorial.
# The IPython Notebook
IPython consists of two processes: a kernel and a frontend. The kernel executes code while the frontend provides an interface for the user enter their code. The IPython notebook is a frontend for Python which provides an interactive web-environment for executing code and displaying rich media.
### Using the Notebook
To start an IPython notebook session open a command prompt, navigate to a desired working directory then issue the following command:
`ipython notebook`
A new window will open in your web browser where you can open an existing notebook or start a new one.
Notebooks are organized with cells. You may have a code cell for inputing commands followed by its result cell which contains the output of the code. You may also have a text cell, such as this one you're reading right now. Cell type can be changed in the above dropdown menu.
There is the menubar above for navigating a notebook but you will find the following shortcuts helpful:
- `Enter` : Create a new line with in cell
- `Shift + Enter` : Execute cell and advance to next cell
- `Ctrl + Enter` : Execute cell in place (do not advance to the next cell)
- Press `esc` (command mode) then `h` to display keyboard shortcuts
At times you might run code that gets stuck in an infinite loop or simply you want to clear all your workspace variables and start over. To solve each of these problems you can click on the menu:
`Kernel -> Interrupt`
`Kernel -> Restart`
```python
from __future__ import print_function
```
### Magic Commands
These are commands to control IPython itself.
```python
# list available magic commands
%lsmagic
```
```python
%load utils.py
```
### Need Help?
In case you're lost help isn't far. The following commands should provide assistance.
```python
# Displays an overview of IPython's features
?
```
```python
# A Quick Reference for IPython
%quickref
```
```python
# For details about an object
# object_name?
round??
```
```python
# list functions available in your workspace
dir()
```
```python
# list variables available in your workspace
%whos
```
# Python
### Basic Data Types
```python
a = 5
b = 5.0
c = float(5)
d = 'dee'
e = 'e'
type(a), type(b), type(c), type(d), type(e)
```
### Data Structures
Python offers several builtin data structures for arranging data in memory. We will be making use of lists, dictionaries, tuples during this tutorial.
#### Lists
A list is a versatile container that holds objects in the order given. Lists are typically used to group similar items but may contain heterogenous data types.
```python
empty_list = []
string_list = ['lions', 'tigers', 'bears', 'sharks', 'hamsters']
int_list = [0, 1, 2, 3, 4]
int_list2 = range(5,10)
list_from_variables = [a,b,c,d,e]
list_of_lists = [empty_list,
string_list,
list_from_variables,
int_list,
int_list2]
print(list_of_lists)
```
Elements of a list are accessible by their index.
```python
print(string_list[0])
print(string_list[1:4])
print(int_list[::2]) # get every 2nd element
print(list_of_lists[1][4]) # get a nested item
```
List are mutable, meaning after a list is created we can change, add, or remove elements.
```python
int_list[2] = 222
int_list.append(5)
string_list.remove('lions')
list_from_variables.extend(int_list)
print(int_list)
print(string_list)
print(list_from_variables)
```
#### Tuples
Tuples share similarites with lists. A tuple is good for organizing related data that may be of different types. Notice they are defined with parenthesis, `()`, rather than brackets.
```python
joe_blow = (32, 'tall', 'likes hats')
print(joe_blow[1])
```
Unlike lists, tuples are immutable. They cannot be changed once defined.
```python
# this won't work
joe_blow.append('married')
# neither will this
joe_blow[2] = 'not really a fan of hats'
```
In python a function can return multiple values. These ouputs are packed into a tuple. Tuple unpacking assigns individual elements of a tuple to separate variables.
```python
pets = ('elephant', 'cow', 'rock')
pet1, pet2, pet3 = pets
```
A peculiar thing about tuples in python is defining a single element tuple. Note the trialing comma. This is necessary for python to know you want a one-element tuple.
```python
(pet1,)
```
#### Dictionaries
A dictionary is an unordered set of *key:value* pairs. Much like a language dictionary where you look up a *word* and get its *definition* in a python dictionary you look up a *key* and get its *value*.
```python
# numbers or strings may be used as keys
dictionary0 = {'key1':'value1', 'key2':'value2', 'key3':'value3'}
dictionary1 = {1:'value1', 2:'value2', 3:'value3'}
cylinder = {'mass':50, 'base':10, 'height':100}
print(dictionary0)
print(dictionary1.keys())
print(cylinder['mass'])
```
The zip function is a convenient function to help generate a dictionary. It takes sequence objects and combines them into a list of tuples. We can subsequently use the list of two element tuples to create a dictionary.
```python
keys = ['mass01', 'inertia01', 'mass02', 'inertia02']
values = [10, 1, 50, 5]
dict(zip(keys, values))
```
### Functions
Python does not use braces, `{}`, or `end` statements to seperate blocks of code. Rather, code blocks are initialized with colon, `:`, and defined by their indentation. It is convention to use four spaces for each level of indentation.
```python
def abs_value(A):
if A < 0:
A = -A
return A
abs_value(-100)
```
```python
def long_div(dividend, divisor):
quotient = dividend // divisor # // : floor division
remainder = dividend % divisor # % : modulo
return quotient, remainder
a = 430
b = 25
# an example of tuple unpacking
quo, rem = long_div(a, b)
print('%d divided %d is %d remainder %d' % (a, b, quo, rem))
```
### Modules
Modules add additional functionality not present in the default installation of python. Throughout this tutorial we will either import an entire module or import specific functions from a module.
```python
# import object from sympy into the current namespace
from numpy import array
# import multiple objects from sympy
from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point
```
Objects from these modules are now available in your namespace.
```python
# from numpy
arr = array([1,2,3,4,5])
# from sympy.physics.mechanics
inertial_frame = ReferenceFrame('I')
```
# The SciPy Stack
SciPy is a collection of open source software that brings scientific computing to Python.
- IPython : Enhanced Interactive Console
- Numpy : N-dimensional array package
- Matplotlib : a fully featured 2D plotting package
- Scipy : a collection of toolboxes for signal processing, optimization, statisitics, etc.
- Sympy : symbolic mathematics and computer algebra
- and much more...
## Numpy
### Setup
```python
from numpy import random, linspace, zeros, arange
```
### Creating Arrays
```python
# An array from a list
print(array([5, 12, -2, 9.3, 7]))
# random values
print(random.random((5)))
# linearly spaced values
# 5 values between 0 and 10 inclusive
print(linspace(0,10,5))
# range of values with a defined stepsize
# start at 0 and increase by 3
print(arange(0,14,3))
```
### Accessing Array Elements
```python
P = random.random((3,5))
# individual element
print(P[0,3])
# entire row
print(P[2])
# entire column
print(P[:,4])
# every third element
print(P[::3])
```
### Operations on Arrays
```python
# mathematical operations are always elementwise
x = arange(5)
print(x)
# the double asterisk represents exponentiation
print(x + x**2)
```
## Matplotlib
Matplotlib provides an API similar to MATLAB's
Use the magic command `%matplotlib inline` to work with matplotlib interactively. The `inline` argument allows for plots to be embedded within the IPython notebook
```python
%matplotlib inline
```
```python
from matplotlib.pyplot import plot, subplot, xlabel, ylabel, legend, tight_layout
from numpy import sin, cos, pi
```
### Examples
```python
x = arange(-pi,pi,0.1)
y1 = 2*sin(x)
y2 = x + cos(4*x)
plot(x, y1, 'r', x, y2, '--b')
plot(x[::5], y1[::5], 'og') # plot every 5th point
xlabel('x axis')
ylabel('y axis')
legend(['red', 'blue', 'green'], loc='upper left')
```
```python
x = linspace(-100,100)
for i in range(1,5):
subplot(2,2,i)
plot(x, x**i)
tight_layout() # this prevents the axis labels from overlapping
```
### Exercise
Use the provided function to create 3 different sine waves at various frequencies. Plot the 3 functions with labeled axis.
```python
def three_sine_waves(t, A, B, C):
"""
t : (type: array) an monotonically increasing array of time values
A,B,C : (type: float) frequency of sine waves
"""
y1 = sin(A*t)
y2 = sin(B*t)
y3 = sin(C*t)
return y1, y2, y3
```
```python
t =
y1, y2, y3 = three_sine_waves()
```
```python
%load exercise_solutions/n00_python_intro_data-plotting.py
```
## Scipy odeint
Scipy provides a routine for integrating first order ordinary differential equations.
### Setup
```python
from scipy.integrate import odeint
```
### Examples
```python
def dy(y,x):
return x
y0 = 0.0
x = linspace(-5.0, 5.0, 1000)
y = odeint(dy,y0,x)
plot(x,y)
```
```python
def dy(y,t,coeff):
A = coeff['A']
B = coeff['B']
C = coeff['C']
D = coeff['D']
return A*t**3 + B*t**2 + C*t + D
y0 = 2.0
t = linspace(-5.0, 3.0, 1000)
sys = {'A' : 0.25,
'B' : 0.75,
'C' : -1.5,
'D' : -2.0}
y = odeint(dy, y0, t, args=(sys,))
plot(t,y)
```
## SymPy
### Setup
```python
from sympy import *
# This is for prettier printing of equations
interactive.printing.init_printing()
```
### Creating Symbolic Variables
```python
a = symbols('a')
b = symbols('b')
gravity, mass, spring_const, time = symbols('g, m, k, t')
```
### Expressions Using Symbolic Variables
```python
a**2 + b/pi
```
```python
simplify(4*(a*a*a)*(b+b+b))
```
```python
diff(-gravity*time**2/2, time)
```
```python
# indefinte integral
integrate(-gravity,time)
```
```python
# definite integral
v0 = 5
t1 = 0
t2 = .35
position = integrate(-gravity*time + v0,(time,t1,t2))
position.subs(gravity, 9.81)
```
# Additional Resources
[1] http://docs.python.org/2/tutorial
[2] http://nbviewer.ipython.org/github/ipython/ipython/blob/master/examples/notebooks/Cell%20Magics.ipynb
[3] http://www.scipy.org/index.html
|
proposition\<^marker>\<open>tag important\<close> starlike_negligible_bounded_gmeasurable: fixes S :: "'a :: euclidean_space set" assumes S: "S \<in> sets lebesgue" and "bounded S" and eq1: "\<And>c x. \<lbrakk>(c *\<^sub>R x) \<in> S; 0 \<le> c; x \<in> S\<rbrakk> \<Longrightarrow> c = 1" shows "S \<in> null_sets lebesgue" |
%% B-A Scale-Free Network Generation and Visualization
% *By Mathew Neil George*
%% Description and Cautions
% The *SFNG* m-file is used to simulate the B-A algorithm and returns scale-free
% networks of given node sizes. Understanding the B-A algorithm is key
% to using this code to its fullest. Due to Matlab resource limitations, it may not be
% possible to generate networks much larger than 15000 nodes, and increasing the
% *mlinks* variable increases processing time severely. This code was
% developed so that one could generate a network of small size, and then
% use that network as a seed to build a greater sized network, continuing
% this process until the actual desired network size is reached. This is for
% processor and time management purposes. However, realize that the initial
% seed does not have to have scale-free properties, while the later seeds
% may happen to have these properties. Therefore, it is prudent not to make the
% initial seed size much larger than a few nodes (most commonly 5
% interconnected nodes). In addition, the *mlinks* should be kept constant
% throughout the creation of the scale-free network.
%
% The *PLplot* m-file takes a scale-free network in adjacency matrix format
% and draws a best fit line to the frequency of degrees distribution of the
% nodes. Degree is the number of links that connect to and from a single node
% For scale-free networks, the frequency of degrees distribution forms a
% power-law curve, with an exponent usually between -2 and -3. This code is
% designed to allow only non-zero frequencies to be graphed in log-log format.
% The function returns the equation of the power-law fit in a cfit variable.
%
% The *CNet* m-file function creats a network graph using the *gplot*
% function with circular coordinates. It allows for a simple, yet
% intuitive, visualization of a given network.
%% Parameters
% *SFNG*
%
% * *Nodes* is the desired network size, including the seed network size
% (i.e. Nodes minus seed network size equals the number of nodes to be
% added).
%
% * *mlinks* controls the number of links a new node can make to the existing
% network nodes.
%
% * *seed* is the original network to which the B-A algorithm links
% additional nodes with a specific preferential attachment procedure.
% This undirected adjacency matrix can be created manually, or one could
% use the *Adjacency Matrix GUI*. Each node must have at least one link.
% The *seed* variable can be replaced with a developed scale-free network
% to generate a larger one. Make sure the new *Nodes* variable is greater
% than the size of the *seed* network.
%
% *PLplot*
%
% * *Net* is the input network which is to be graphed.
%
% *CNet*
%
% * *Net* is the input network which is to be graphed.
%
% Note that variables *Nodes*, *mlinks*, and *size* must be whole numbers and
% variables *seed* and *Net* must be undirected adjacency matrices. The
% diagonol elements of any adjacency matrix used with these functions must
% all be zero.
%% Sample Output
% Here is a small example to demonstrate how to use the code. This code
% creates a seed network of 5 nodes, generates a scale-free network of 300 nodes from
% the seed network, and then performs the two graphing procedures.
seed =[0 1 0 0 1;1 0 0 1 0;0 0 0 1 0;0 1 1 0 0;1 0 0 0 0]
Net = SFNG(300, 1, seed);
PL_Equation = PLplot(Net)
CNet(Net)
%% References
% One explanation of the B-A Algorithm can be found on this PDF website
% http://arxiv.org/PS_cache/cond-mat/pdf/0107/0107420.pdf
%
% Undirected Adjecency Matrices are defined on Wikipedia.org
% http://en.wikipedia.org/wiki/Adjacency_matrix
%
% The *Adjacency Matrix GUI* file by Steve Chuang can be found on the Matlab File Exchange
% http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=6937&objectType=file
%% Acknowledgements
% Special thanks to Mark Ballerini with the Massapequa High School Science
% Research Program and Zoltan Dezso at the University of Notre Dame for
% their invaluable help in researching network theory as well as to my
% family for providing motivation and encouragement in pursuing science. |
module Scenes.Main
import API.Web.Console
import API.Web.DOM.Document
import API.Web.DOM.DocumentType
import API.Web.DOM.Element
import API.Web.DOM.NonElementParentNode
import API.Web.HTML.HTMLElement
import API.Web.HTML.HTMLCanvasElement
import API.Web.HTML.Document
import API.Web.HTML.Window
import API.WebGL.Context
import IdrisScript
import Interfaces
%access public export
record Scene where
constructor Init
window : Window
context : RenderingContext
FrameRequest Scene where
requestFrame scene callback =
requestAnimationFrame (window scene) callback >>= discardInt where
discardInt : Maybe Int -> JS_IO ()
discardInt n = pure ()
Updatable Scene where
update scene = paint scene >>= \x => log "test" >>= \x => pure scene where
getContext : Scene -> WebGLRenderingContext
getContext scene = case (context scene) of
(FromWebGLRenderingContext ctx) => ctx
paint : Scene -> JS_IO ()
paint scene = clearColor (getContext scene) 1 0 0 1 >>= \x =>
clear (getContext scene) COLOR_BUFFER_BIT
export
initSceneWith : Window -> JS_IO $ Maybe Scene
initSceneWith window = case !context of
Nothing => pure Nothing
(Just renderingContext) => pure $ Just $ Init window renderingContext
where
elm : NonElementParentNode
elm = FromDocument $ FromHTMLDocument (New "html" "0" "0") (document window)
canvas : JS_IO $ Maybe HTMLCanvasElement
canvas = case !(getElementById elm "canvas") of
Nothing => log "Could not get canvas" >>= \x => pure Nothing
(Just (FromHTMLElement (FromHTMLCanvasElement cElm))) => pure $ Just cElm
(Just (FromHTMLElement (New _ self))) => htmlCanvasElementFromPointer self
(Just (New _ self)) => htmlCanvasElementFromPointer self
context : JS_IO $ Maybe RenderingContext
context = case !canvas of
Nothing => pure Nothing
(Just canvasElem) => getContext canvasElem "webgl"
|
Harbor Commissioners today appointed William Holsinger to complete Sally Campbell’s term on the Harbor Commission which runs through the end of this year. Holsinger is an attorney and lives in San Mateo. He ran unsuccessfully for Harbor Commission in 2004. The Commissioners interviewed a field of 8 applicants including two from the Midcoast. |
import category_theory.limits.shapes.pullbacks
namespace category_theory
open limits
noncomputable theory
variables {C : Type*} [category C] {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) [has_pullback (f ≫ g) g]
def pullback.lift_comp :=
pullback.lift (𝟙 _) f (category.id_comp $ f ≫ g)
@[simp, reassoc]
lemma pullback.lift_comp_fst : pullback.lift_comp f g ≫ pullback.fst = 𝟙 _ :=
pullback.lift_fst _ _ _
lemma pullback.lift_comp_snd : pullback.lift_comp f g ≫ pullback.snd = f :=
pullback.lift_snd _ _ _
instance : is_split_mono (pullback.lift_comp f g) :=
⟨⟨⟨_, pullback.lift_comp_fst f g⟩⟩⟩
local attribute [instance] has_pullback_of_right_iso
lemma pullback.fst_lift_comp_has_pullback_aux :
has_pullback (f ≫ g) (𝟙 Y ≫ g) :=
by rwa category.id_comp
local attribute [instance] pullback.fst_lift_comp_has_pullback_aux
lemma pullback.fst_lift_comp :
pullback.fst ≫ pullback.lift_comp f g =
pullback.map_desc f (𝟙 _) g ≫ (pullback.congr_hom rfl (category.id_comp _)).hom :=
begin
apply pullback.hom_ext; simp only [pullback.congr_hom_hom, pullback.lift_fst,
pullback.lift_snd, category.assoc, category.comp_id, pullback.condition, pullback.lift_comp],
end
def pullback.lift_comp_iso_map_desc :
arrow.mk (pullback.lift_comp f g) ≅ arrow.mk (pullback.map_desc f (𝟙 _) g) :=
(arrow.iso_mk' _ _ (as_iso pullback.fst : _) (pullback.congr_hom rfl (category.id_comp _))
(pullback.fst_lift_comp f g)).symm
end category_theory |
\chapter{User-level Description}
\label{chap:userLevelDescription}
\section{Starting the Dash Line Plot Graphing Utility}
The application is distributed
\begin{itemize}
\item either as a Python script, i.e. \texttt{dash-lineplot.py},
\item or as an executable file, i.e. \texttt{dash-lineplot.exe}.
\end{itemize}
Double-clicking on any one of these files will start the application.
If the computer is not set up to associate \texttt{.py} files with the Python interpreter, open a windows command prompt window and type the following:
\footnotesize
\begin{lstlisting}
python dash-lineplot.py
\end{lstlisting}
\normalsize
Working in a command prompt window is recommended. Warning and error messages output to the screen are published to the console. This can aid in tracing unexpected errors.
Working in an environment where the script was packaged to an executable, start the application from a console by typing
\footnotesize
\begin{lstlisting}
dash-lineplot.exe
\end{lstlisting}
\normalsize
An easy, and recommended way of opening a command prompt at the correct folder location, is by following the steps:
\begin{itemize}
\item Open the File Explorer.
\item Navigate to the folder where the application Python script or executable is housed.
\item Click in die File Explorer address bar.
\item Type \texttt{cmd} into the address bar.
\item Hit enter.
\end{itemize}
Hint: See
\footnotesize
\url{https://www.howtogeek.com/235101/10-ways-to-open-the-command-prompt-in-windows-10/} \normalsize
for other ways to open a command prompt on Windows 10.
For ease of use a Windows batch script, \texttt{startPlotTool.bat}, is available in both environments. This script simply encapsulates the above commands:
\footnotesize
\begin{lstlisting}
@echo off
echo Starting the Dash Line Plot Graphing Tool ...
echo.
echo.
if [%1]==[] python dash-lineplot.py
if not [%1]==[] python dash-lineplot.py --configfile=%1
set /p DUMMY=done, hit ENTER to exit
\end{lstlisting}
\normalsize
or
\footnotesize
\begin{lstlisting}
@echo off
echo Starting the Dash Line Plot Graphing Tool ....
echo.
echo.
cd dash-lineplot
if [%1]==[] dash-lineplot.exe
if not [%1]==[] dash-lineplot.exe --configfile=%1
set /p DUMMY=done, hit ENTER to exit
\end{lstlisting}
\normalsize
Starting the script with no commandline arguments results in loading the default \texttt{dash-config.xlsx} configuration file. A configuration file name can be specified on the commandline, using any of the following options:
\footnotesize
\begin{lstlisting}
python dash-lineplot.py --configfile=anotherFileName.xlsx
dash-lineplot.exe --configfile=anotherFileName.xlsx
startPlotTool.bat anotherFileName.xlsx
\end{lstlisting}
\normalsize
Note that using the packaged version, the batch file starting the application is distributed at one folder level up from the actual distribution. This is done to give easy access to the script. The configuration file must be specified relative to the actual distribution folder. Figure~\ref{fig:folderdistEx} shows the position of the startup script relative to the distribution folder. In the script the directory is changed to the \texttt{dash-lineplot} folder and only then is the executable started. The configuration file is therefore always found on the path relative to folder \texttt{dash-lineplot}.
\begin{figure}[h]
\centering
\includegraphics[width=0.5\textwidth]{pic/folderDistEx}
\caption{Distribution folder structure showing the location of the startup batch script.
\label{fig:folderdistEx}}
\end{figure}
\section{Setup and Usage}
\subsection{Configuration File and Page Layout}
Figures~\ref{fig:dashviewConfig1} and \ref{fig:dashviewConfig2} show the documentation sheet of the Excel plotting configuration file. A short description of variables are provided on this sheet. Refer to the information presented in these figures, no further details are provided in this document.
\begin{figure}[h]
\centering
\includegraphics[width=0.90\textwidth]{pic/dashview-config1}
\caption{Documentation sheet of the Excel plotting configuration file.
\label{fig:dashviewConfig1}}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=0.90\textwidth]{pic/dashview-config2}
\caption{Documentation sheet of the Excel plotting configuration file (continued).
\label{fig:dashviewConfig2}}
\end{figure}
The header sheet in the configuration file, see annotated Figure~\ref{fig:dashview-config-header}, provides general information published on each page of the display. The user can provide a data file name on this sheet for general use. Sheets can refer to this file with the keyword \texttt{master} in the \texttt{Datafile} \texttt{Value} entry.
\begin{figure}[h]
\centering
\includegraphics[width=0.90\textwidth]{pic/dashview-config-header}
\caption{Header sheet in the configuration file.
\label{fig:dashview-config-header}}
\end{figure}
Figures~\ref{fig:dashview-config-oscPL45} and \ref{fig:dashview-config-imuRates} are two example setup sheets specifying data configuration for plotting. The setup shown in Figure~\ref{fig:dashview-config-oscPL45} results in each set of traces to be plotted on a separate Plotly figure. The setup in Figure~\ref{fig:dashview-config-imuRates} makes use of Plotly subplots. Using subplots enables hover data on traces to be displayed simultaneously for all traces defined on the sheet, see example in Figure~\ref{fig:dashview-imuRates}. Exactly the same graph set plotted without the use of subplot is shown in Figure~\ref{fig:dashview-config-imuRates-noSubplots}.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-config-oscPL45}
\caption{Example graph setup where subplots are not used, transparent markers are used and hover data format is specified.
\label{fig:dashview-config-oscPL45}}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-config-imuRates}
\caption{Example setup where subplots are used, the IMU rates are scaled, no markers are used and hover data format is specified.
\label{fig:dashview-config-imuRates}}
\end{figure}
Figure~\ref{fig:dashview-imuRates} links the entries in the configuration file (refer to Figure~\ref{fig:dashview-config-imuRates}) to the format of the graph page. Note the following:
\begin{itemize}
\item The tab name corresponds to the sheet name in the configuration file, omitting the leading \texttt{graph-} phrase.
\item The header and footer at the top and bottom of the page are from the \texttt{header} sheet.
\item The text entries just below and above these are the \texttt{GraphTop} and \texttt{GraphBottom} \texttt{Value} entries on the sheet.
\item Two sets of traces are defined on the sheet, using subplots. Usage of the subplot option enables hover data to be displayed on all traces simultaneously.
\item The hover data format is specified for the x-axis and two y-axis separately.
\item The subplot titles, y-axis and x-axis labels are as provided in the \texttt{graph-IMUrates} sheet of the configuration file.
\end{itemize}
The same data is displayed in Figure~\ref{fig:dashview-config-imuRates-noSubplots}, not using the subplot functionality.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-imuRates}
\caption{Plot tab the IMU rates graph setup example, using subplots.
\label{fig:dashview-imuRates}}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[height=0.95\textheight]{pic/dashview-config-imuRates-noSubplots}
\caption{Plot tab the IMU rates graph setup example, not using subplots.
\label{fig:dashview-config-imuRates-noSubplots}}
\end{figure}
\clearpage
\subsection{Slider Usage}
An x-axis slider is available at the top of each page, covering the data set x-axis data range. Refer to Figure~\ref{fig:dashview-subPlot2Slider1}. The user sets the minimum and maximum value of the x-axis with the slider handles. The text box immediately displays the selected range. The increments at which the slider values change are controlled by the specification from the configuration file. The user however has to click on the current page tab before the graphs are redrawn to display only data in the selected x-axis range, see Figure~\ref{fig:dashview-subPlot2Slider2}. The reset button will reset the x-axis limits to that of the data set.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-subPlot2Slider1}
\caption{The x-axis slider at the top of the page provides the capability to zoom the complete data set to a user selected x-range.
\label{fig:dashview-subPlot2Slider1}}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-subPlot2Slider2}
\caption{A click on the current page tab triggers the redraw of the page, using only data in the x-axis range selected by the user.
\label{fig:dashview-subPlot2Slider2}}
\end{figure}
\clearpage
The x-axis range can also be set by typing values in the text boxes just below the slider. Refer to Figure~\ref{fig:dashview-subPlot2Slider3}. The submit button records these selected values and activates the display of the selected range in the dedicated text box. A click on the tab will trigger an update to the graph data displayed.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-subPlot2Slider3}
\caption{The user can specify the start and end values of the x-axis using the text boxes provided. The submit button will register the values for use when redrawing the graphs.
\label{fig:dashview-subPlot2Slider3}}
\end{figure}
\clearpage
\subsection{Range Measurements on Graphs}
Two methods are available to do a measurement on the graphs (see Figure~\ref{fig:dashview-rectangleDataSelect}):
\begin{description}
\item [Click Data:] The user can click on any trace to record the clicked point in the \texttt{Click Data} box below the relevant graph. When a second data point is clicked, the range in x and y are reflected in the display text box. This functionality is available for individual Plotly figures, as well as subplot figures. The click functionality can be used in conjunction with the standard Plotly zoom functionality.
\item [Rectangle Tool Selection Data:] The Plotly rectangle tool will only appear in the toolbar of the figure if markers are used for one or more traces in the figure. For large data sets, usage of this tool is not recommended since it slows down the drawing process. If the user prefers to use this tool, the opacity of the markers can be set to 0 in order to not clutter the graph. To measure using the rectangle tool, click on the tool in the toolbar, then draw the rectangle on the graph using the mouse. The top-left, bottom-right and range in x and y are displayed in the \texttt{Rectangle Tool Selection Data} text box to below the relevant graph. This functionality can also be used in conjunction with the Plotly zoom functionality.
\end{description}
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pic/dashview-rectangleDataSelect}
\caption{Using the Click and Rectangle tools to do measurements on the data set.
\label{fig:dashview-rectangleDataSelect}}
\end{figure}
|
State Before: α : Type ?u.837272
β : Type ?u.837275
a✝ b✝ c d : ℝ≥0∞
r p q : ℝ≥0
a b : ℝ≥0∞
⊢ ENNReal.toReal (a / b) = ENNReal.toReal a / ENNReal.toReal b State After: no goals Tactic: rw [div_eq_mul_inv, toReal_mul, toReal_inv, div_eq_mul_inv] |
A. Cramton's "False Illusions" is the first tale in the False series. Madison St. Cruix has it all, a nice condo, money to spend as she pleases, and the Heavyweight Boxing Champion of the World as her fiance. But things aren't always as they seem. She's very lonely in her condo in Vegas. Her fiance is gone a lot, and by a lot I mean 99% of the time. He says he's always training, but is he really? He can't really train 24/7, can he? He doesn't come home most nights so what is Madison supposed to think he's doing? When Madison goes out with her best friend and her boyfriend a man comes and sits with them, knowing the boyfriend of her best friend. Yoel is in Vegas for work. What kind of work is hidden when he meets Madison. He can't tell her he knows who she is and why he's in Vegas. Why is Yoel in Vegas? What does he really do for work? What happens to his plan when he gets to know Madison? Find out this and more in this awesome read!!! |
[STATEMENT]
lemma sets_sup: "sets A = sets M \<Longrightarrow> sets B = sets M \<Longrightarrow> sets (sup A B) = sets M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>sets A = sets M; sets B = sets M\<rbrakk> \<Longrightarrow> sets (sup A B) = sets M
[PROOF STEP]
by (auto simp add: sup_measure_def sup_lexord_def dest: sets_eq_imp_space_eq) |
[STATEMENT]
lemma spec_imp_inv1: "spec s \<Longrightarrow> alw (holds InvGlobVacantUptoImpliesNrec) s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. spec s \<Longrightarrow> alw (holds InvGlobVacantUptoImpliesNrec) s
[PROOF STEP]
by (metis (mono_tags, lifting) alw_iff_sdrop invs_imp_InvGlobVacantUptoImpliesNrec alw_InvGlobalIncomingInfoUpright alw_InvGlobalRecordCount alw_nrec_pos) |
Require Export Utf8_core.
Require Import HoTT TruncType.
Require Import hit.Connectedness hit.Truncations.
Set Universe Polymorphism.
Local Open Scope path_scope.
(* Local Open Scope equiv_scope. *)
Section Images.
(* Definition 3 *)
Definition Im {A B} (f : A -> B) := {b : B & Trunc (-1) (hfiber f b)}.
Definition toIm {A B} (f : A -> B) : A -> Im f := fun a => (f a; tr (a;idpath)).
Definition fromIm {A B} (f : A -> B) : Im f -> B := fun im => pr1 im.
End Images.
Section Embeddings.
(* In this section, we define IsEmbedding two other way, then prove that they are all three equivalent *)
Context `{fs: Funext}.
Definition IsMono {A B : Type} (f : A -> B)
:= forall x y, IsEquiv (ap f (x:=x) (y:=y)).
Definition IsMonof {A B : Type} (f : A -> B)
:= forall (X:Type) (x y : X -> A), IsEquiv (ap (fun u => f o u) (x:=x) (y:=y)).
Lemma IsEmbedding_IsMono {A B : Type} (f : A -> B)
: IsEmbedding f <-> IsMono f.
split.
- intros H x y.
apply isequiv_fcontr. intro q.
pose (Y := equiv_path_hfiber (x;1) (y;q^)); cbn in Y.
simple refine (contr_equiv' (∃ q0 : x = y, 1 = ap f q0 @ q^) _).
simple refine (equiv_functor_sigma_id _); intro a.
pose (ff:= (equiv_inverse (BuildEquiv _ _ _ (isequiv_moveL_pV q 1 (ap f a))))).
rewrite concat_1p in ff.
exact (equiv_compose' (equiv_path_inverse q (ap f a)) ff).
simple refine (@contr_equiv' ((x; 1) = (y; q^)) _ (equiv_inverse Y) _).
- intros H b x y; simpl. pose (Y:=equiv_path_hfiber x y).
simple refine (@contr_equiv' _ _ Y _).
pose (fcontr_isequiv (ap f) (H x.1 y.1) ([email protected]^)).
match goal with
|[ i : Contr ?AA |- Contr ?BB ] => assert (X: AA <~> BB)
end.
simple refine (equiv_functor_sigma_id _); intro a.
pose (ff:= (equiv_inverse (BuildEquiv _ _ _ (isequiv_moveL_pV y.2 (ap f a) x.2)))).
exact (equiv_compose' (equiv_path_inverse (ap f a @ y.2) x.2) ff).
simple refine (@contr_equiv' _ _ X _).
Qed.
Definition IsMonof_to_isMono {A B : Type} (f : A -> B) : IsMonof f -> IsMono f.
intro H. intros x y.
unfold IsMonof in H.
specialize (H A). specialize (H (fun _ => x) (fun _ => y)).
destruct H as [inv retr sect _].
simple refine (isequiv_adjointify _ _ _ _).
- intro H.
simple refine (apD10 (f := λ _, x) (g := λ _, y) (inv _) _).
apply path_forall; intro u; exact H. exact x.
- intro u.
etransitivity; try exact (ap10_ap_postcompose f (g:=(λ _ : A, x)) (g' := (λ _ : A, y)) (inv (path_forall (λ _ : A, f x) (λ _ : A, f y) (λ _ : A, u))) x)^.
rewrite retr.
unfold ap10. unfold path_forall.
rewrite eisretr.
reflexivity.
- intro u. destruct u; simpl in *.
rewrite path_forall_1.
apply (transport (fun u => ap10 u x = 1) (sect 1)^).
reflexivity.
Defined.
Definition IsMono_to_IsMonof {A B : Type} (f : A -> B) : IsMono f -> IsMonof f.
intro H.
intros X a b.
pose (φ := fun p => path_forall a b (fun x => equiv_inv (IsEquiv := H (a x) (b x)) (ap10 p x))).
apply isequiv_adjointify with (g:= φ).
- intro p.
unfold φ.
apply (@equiv_inj _ _ _ (isequiv_apD10 _ _ _ _)).
apply path_forall; intro u.
apply (transport (λ U, U = ap10 p u) (ap10_ap_postcompose f _ u)^).
unfold ap10 at 1, path_forall. rewrite eisretr. rewrite eisretr. reflexivity.
- intro p; unfold φ; destruct p. simpl.
pose (foo := path_forall _ _ (fun y => (@eissect _ _ _ (H (a y) (a y)) idpath))).
simpl in foo. rewrite foo.
apply path_forall_1.
Qed.
Definition apf_Mono {T U : Type} (f: T -> U) (fMono : IsMonof f) X (x y : X -> T) (e e' : x = y) :
ap (fun u => f o u) e = ap (fun u => f o u) e' -> e = e'.
intro.
rewrite <- (@eissect _ _ _ (fMono _ _ _) e).
rewrite <- (@eissect _ _ _ (fMono _ _ _) e'). exact (ap _ X0).
Defined.
Lemma compose_equiv {A B C D:Type} (φ : A -> B) (u v : B -> C) (f : C -> D)
(equiv_compose_φ : IsEquiv (ap (λ x, x o φ) (x:= f o u) (y := f o v)))
(Mono_f : IsMono f)
: IsEquiv (ap (λ x, x o φ) (x:=u) (y:=v)).
Proof.
pose (Monof_f := IsMono_to_IsMonof f Mono_f).
unfold IsMonof in *; simpl in *.
pose (e1 := (Monof_f B u v)).
pose (e2 := (equiv_compose_φ)).
pose (e3 := @isequiv_inverse _ _ _ (Monof_f A (u o φ) (v o φ))).
assert (X: ((ap (λ u0 : A → C, f o u0))^-1 o (ap (λ x : B → D, x o φ) o (ap (λ u0 : B → C, f o u0) (x:=u) (y:=v)))) = (ap (λ x : B → C, x o φ))).
apply path_forall; intro p.
apply (@equiv_inj _ _ _ (Monof_f A (u o φ) (v o φ))). rewrite eisretr.
destruct p; reflexivity.
destruct X. exact (@isequiv_compose _ _ _ (@isequiv_compose _ _ _ e1 _ _ e2) _ _ e3).
Qed.
End Embeddings.
Section Surjections.
(* Some lemmas about surjections *)
Context `{ua: Univalence}.
Context `{fs: Funext}.
Lemma IsSurjection_toIm (X Y:Type) (f:X -> Y)
: IsSurjection (toIm f).
apply BuildIsSurjection.
intros [b p]; generalize dependent p.
apply Trunc_ind.
intro a; apply istrunc_truncation.
intros [a p].
apply tr.
exists a. apply path_sigma' with p.
apply path_ishprop.
Defined.
Lemma epi_prod (W X Y Z:Type) (f:X -> Y) (g:W -> Z) (epif : IsSurjection f) (epig : IsSurjection g)
: IsSurjection (λ x, (f (fst x), g (snd x))).
Proof.
apply BuildIsSurjection.
intros [y z].
specialize (epif y); specialize (epig z).
generalize dependent (center _ (Contr_internal := epif)); apply Trunc_ind; intro x; try apply istrunc_truncation.
generalize dependent (center _ (Contr_internal := epig)); apply Trunc_ind; intro w; try apply istrunc_truncation.
apply tr.
exists (x.1,w.1). simpl.
apply path_prod; [exact x.2 | exact w.2].
Qed.
Lemma epi_two_out_of_three_1 (A B C:Type) (f:A -> B) (g:B -> C) (h : A -> C) (π : forall a, g (f a) = h a)
: IsSurjection f -> IsSurjection g -> IsSurjection h.
intros Ef Eg.
apply BuildIsSurjection.
intros c.
generalize dependent (@center _ (Eg c)).
apply Trunc_rec. intros [b p].
generalize dependent (@center _ (Ef b)).
apply Trunc_rec. intros [a q].
apply tr.
exists a.
rewrite <- (π a).
rewrite q.
exact p.
Qed.
Lemma epi_two_out_of_three_2 (A B C:Type) (f:A -> B) (g:B -> C) (h : A -> C) (π : forall a, g (f a) = h a)
: IsSurjection f -> IsSurjection h -> IsSurjection g.
intros Ef Eh.
apply BuildIsSurjection.
intros c.
generalize dependent (@center _ (Eh c)).
apply Trunc_rec. intros [a p].
apply tr.
exists (f a).
exact ((π a) @ p).
Qed.
Definition IsEpi A B (f:A -> B)
:= forall C:Type, forall (x y : B -> C) , IsEquiv (ap (fun u => u o f) (x:=x) (y:=y)).
End Surjections.
|
module Flexidisc.OrdList.Fresh
import Flexidisc.Dec.IsYes
import Flexidisc.OrdList.Type
import Flexidisc.OrdList.Label
%default total
%access public export
||| Proof that a key is not in an `OrdList`
data Fresh : (l : key) -> (xs : OrdList key o value) -> Type where
||| It's always fresh when the `Ordlist` is empty
Nil : Fresh l []
||| If it's not the head nor in the tail, then it's `Fresh`
(::) : Not (l = l') -> Fresh l xs -> Fresh l ((l',ty') :: xs)
%name Fresh fresh, prf, new
||| Decide whether a label is fresh or not
decFresh : DecEq key => (l : key) -> (xs : OrdList key o value) -> Dec (Fresh l xs)
decFresh l [] = Yes []
decFresh l ((l', ty) :: xs) with (decEq l l')
| (Yes prf) = No (\ (freshHead :: _) => freshHead prf)
| (No freshHere) with (decFresh l xs)
| (Yes freshThere) = Yes (freshHere :: freshThere)
| (No f) = No (\ (_ :: freshTail) => f freshTail)
||| Ensure that the result of `decFresh` is a `Yes` for a given label and a
||| given `OrdList`
IsFresh : DecEq key => (l : key) -> (xs : OrdList key o value) -> Type
IsFresh l xs = IsYes (decFresh l xs)
||| Changing a value doesn't impact the freshness of a label
freshOnValueChange : Fresh l xs -> Fresh l (changeValue xs loc new)
freshOnValueChange (f :: fresh) {loc = Here} = f :: fresh
freshOnValueChange (f :: fresh) {loc = (There later)} = f :: freshOnValueChange fresh
||| Changing values doesn't impact the freshness of a label
freshOnMapValues : (p : Fresh l xs) -> Fresh l (f <$> xs)
freshOnMapValues [] = []
freshOnMapValues (prf :: fresh) = prf :: freshOnMapValues fresh
||| We can't find a label that is `Fresh` in an `OrdList`
freshCantBeLabel : Fresh l xs -> Not (OrdLabel l xs)
freshCantBeLabel (f :: _ ) Here = f Refl
freshCantBeLabel (_ :: fresh) (There later) = freshCantBeLabel fresh later
||| If a label is not in a list, is not in this list minus one element.
export
dropPreservesFresh : Fresh l xs -> Fresh l (dropLabel xs e)
dropPreservesFresh (f :: fresh) {e = Here} = fresh
dropPreservesFresh (f :: fresh) {e = (There e)} = f :: dropPreservesFresh fresh
||| If we can exhibit a `Fresh` value, we can build an `IsFresh` proof
isFreshFromEvidence : DecEq key => {l : key} -> (prf : Fresh l xs) -> IsFresh l xs
isFreshFromEvidence prf {l} {xs} with (decFresh l xs)
| (Yes _) = SoTrue
| (No contra) = absurd (contra prf)
||| If a label is not in an `OrdList`, it's not in the tail either
tailIsFresh : DecEq key => {l : key} -> IsFresh l (x :: xs) -> IsFresh l xs
tailIsFresh x = case getProof x of (f :: fresh) => isFreshFromEvidence fresh
|
[STATEMENT]
lemma assert_fun_dual: "((assert_fun p) o \<top>) \<sqinter> (dual_fun (assert_fun p)) = assert_fun p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (assert_fun p \<circ> \<top>) \<sqinter> dual_fun (assert_fun p) = assert_fun p
[PROOF STEP]
by (simp add: fun_eq_iff inf_fun_def dual_fun_def o_def assert_fun_def top_fun_def inf_sup_distrib) |
[STATEMENT]
lemma fun_relD2:
assumes "(f,f')\<in>Ra\<rightarrow>Rr"
assumes "f' x' = r'"
shows "\<forall>x. (x,x')\<in>Ra \<longrightarrow> (f x,r')\<in>Rr"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x. (x, x') \<in> Ra \<longrightarrow> (f x, r') \<in> Rr
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(f, f') \<in> Ra \<rightarrow> Rr
f' x' = r'
goal (1 subgoal):
1. \<forall>x. (x, x') \<in> Ra \<longrightarrow> (f x, r') \<in> Rr
[PROOF STEP]
by (auto simp: fun_rel_def) |
import Control.Monad.Identity
import Control.Monad.Writer
diga : a -> Writer (List a) ()
diga a = tell [a]
f : String -> Writer (List String) Int
f s = do
diga s
pure 1
-- o segundo item do writer é o log
-- snd $ runIdentity $ runWriterT f
-- o primeiro item do writer é o retorno
-- fst $ runIdentity $ runWriterT f
--transicoes : Pi -> Writer (List String) Pi
--transicoes p = do
-- diga (show p)
-- if (controlStackIsEmpty p) then
-- pure p
-- ELSE
-- TRANSICOES (TRANSICAO P)
|
||| A Idris port of the prettyprinter library [1] and
||| the ANSI terminal backend [2].
|||
||| [1] https://hackage.haskell.org/package/prettyprinter
||| [2] https://hackage.haskell.org/package/prettyprinter-ansi-terminal
module Text.PrettyPrint.Prettyprinter
import public Text.PrettyPrint.Prettyprinter.Doc
import public Text.PrettyPrint.Prettyprinter.Symbols
%default total
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
open import Cubical.Core.Everything
open import Cubical.Relation.Binary.Raw
module Cubical.Relation.Binary.Reasoning.Base.Partial
{a ℓ} {A : Type a} (_∼_ : RawRel A ℓ) (transitive : Transitive _∼_)
where
open import Cubical.Foundations.Prelude
infix 4 _IsRelatedTo_
infix 2 _∎⟨_⟩
infixr 1 _∼⟨_⟩_ _≡⟨_⟩_ _≡˘⟨_⟩_
infixr 1 _≡⟨⟩_
infix 0 begin_
------------------------------------------------------------------------
-- Definition of "related to"
-- This seemingly unnecessary type is used to make it possible to
-- infer arguments even if the underlying equality evaluates.
data _IsRelatedTo_ (x y : A) : Type ℓ where
relTo : (x∼y : x ∼ y) → x IsRelatedTo y
------------------------------------------------------------------------
-- Reasoning combinators
-- Beginning of a proof
begin_ : ∀ {x y} → x IsRelatedTo y → x ∼ y
begin relTo x∼y = x∼y
-- Standard step with the relation
_∼⟨_⟩_ : ∀ x {y z} → x ∼ y → y IsRelatedTo z → x IsRelatedTo z
_ ∼⟨ x∼y ⟩ relTo y∼z = relTo (transitive x∼y y∼z)
-- Step with a non-trivial propositional equality
_≡⟨_⟩_ : ∀ x {y z} → x ≡ y → y IsRelatedTo z → x IsRelatedTo z
_≡⟨_⟩_ x {_} {z} x≡y y∼z = J (λ w _ → w IsRelatedTo z) y∼z (sym x≡y)
-- Step with a flipped non-trivial propositional equality
_≡˘⟨_⟩_ : ∀ x {y z} → y ≡ x → y IsRelatedTo z → x IsRelatedTo z
x ≡˘⟨ y≡x ⟩ y∼z = x ≡⟨ sym y≡x ⟩ y∼z
-- Step with a trivial propositional equality
_≡⟨⟩_ : ∀ x {y} → x IsRelatedTo y → x IsRelatedTo y
_ ≡⟨⟩ x∼y = x∼y
-- Syntax for path definition
≡⟨⟩-syntax : ∀ x {y z : A} → x ≡ y → y IsRelatedTo z → x IsRelatedTo z
≡⟨⟩-syntax = _≡⟨_⟩_
infixr 1 ≡⟨⟩-syntax
syntax ≡⟨⟩-syntax x (λ i → B) y = x ≡[ i ]⟨ B ⟩ y
≡˘⟨⟩-syntax : ∀ x {y z : A} → y ≡ x → y IsRelatedTo z → x IsRelatedTo z
≡˘⟨⟩-syntax = _≡˘⟨_⟩_
infixr 1 ≡˘⟨⟩-syntax
syntax ≡˘⟨⟩-syntax x (λ i → B) y = x ≡˘[ i ]⟨ B ⟩ y
-- Termination step
_∎⟨_⟩ : ∀ x → x ∼ x → x IsRelatedTo x
_ ∎⟨ x∼x ⟩ = relTo x∼x
|
(* Title: Program Correctness Component Based on Kleene Algebra with Tests
Author: Victor Gomes, Georg Struth
Maintainer: Victor Gomes <[email protected]>
Georg Struth <[email protected]>
*)
section \<open>Two Standalone Components\<close>
theory VC_KAT_scratch
imports Main
begin
subsection \<open>Component Based on Kleene Algebra with Tests\<close>
text \<open>This component supports the verification and step-wise refinement of simple while programs
in a partial correctness setting.\<close>
subsubsection \<open>KAT: Definition and Basic Properties\<close>
notation times (infixl "\<cdot>" 70)
class plus_ord = plus + ord +
assumes less_eq_def: "x \<le> y \<longleftrightarrow> x + y = y"
and less_def: "x < y \<longleftrightarrow> x \<le> y \<and> x \<noteq> y"
class dioid = semiring + one + zero + plus_ord +
assumes add_idem [simp]: "x + x = x"
and mult_onel [simp]: "1 \<cdot> x = x"
and mult_oner [simp]: "x \<cdot> 1 = x"
and add_zerol [simp]: "0 + x = x"
and annil [simp]: "0 \<cdot> x = 0"
and annir [simp]: "x \<cdot> 0 = 0"
begin
subclass monoid_mult
by (standard, simp_all)
subclass order
apply (standard, simp_all add: less_def less_eq_def add_commute)
apply force
by (metis add_assoc)
lemma mult_isol: "x \<le> y \<Longrightarrow> z \<cdot> x \<le> z \<cdot> y"
by (metis distrib_left less_eq_def)
lemma mult_isor: "x \<le> y \<Longrightarrow> x \<cdot> z \<le> y \<cdot> z"
by (metis distrib_right less_eq_def)
lemma add_iso: "x \<le> y \<Longrightarrow> x + z \<le> y + z"
by (metis (no_types, lifting) abel_semigroup.commute add.abel_semigroup_axioms add.semigroup_axioms add_idem less_eq_def semigroup.assoc)
lemma add_lub: "x + y \<le> z \<longleftrightarrow> x \<le> z \<and> y \<le> z"
by (metis add_assoc add.left_commute add_idem less_eq_def)
end
class kleene_algebra = dioid +
fixes star :: "'a \<Rightarrow> 'a" ("_\<^sup>\<star>" [101] 100)
assumes star_unfoldl: "1 + x \<cdot> x\<^sup>\<star> \<le> x\<^sup>\<star>"
and star_unfoldr: "1 + x\<^sup>\<star> \<cdot> x \<le> x\<^sup>\<star>"
and star_inductl: "z + x \<cdot> y \<le> y \<Longrightarrow> x\<^sup>\<star> \<cdot> z \<le> y"
and star_inductr: "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x\<^sup>\<star> \<le> y"
begin
lemma star_sim: "x \<cdot> y \<le> z \<cdot> x \<Longrightarrow> x \<cdot> y\<^sup>\<star> \<le> z\<^sup>\<star> \<cdot> x"
proof -
assume "x \<cdot> y \<le> z \<cdot> x"
hence "x + z\<^sup>\<star> \<cdot> x \<cdot> y \<le> x + z\<^sup>\<star> \<cdot> z \<cdot> x"
by (metis add_lub distrib_left eq_refl less_eq_def mult_assoc)
also have "... \<le> z\<^sup>\<star> \<cdot> x"
using add_lub mult_isor star_unfoldr by fastforce
finally show ?thesis
by (simp add: star_inductr)
qed
end
class kat = kleene_algebra +
fixes at :: "'a \<Rightarrow> 'a"
assumes test_one [simp]: "at (at 1) = 1"
and test_mult [simp]: "at (at (at (at x) \<cdot> at (at y))) = at (at y) \<cdot> at (at x)"
and test_mult_comp [simp]: "at x \<cdot> at (at x) = 0"
and test_de_morgan: "at x + at y = at (at (at x) \<cdot> at (at y))"
begin
definition t_op :: "'a \<Rightarrow> 'a" ("t_" [100] 101) where
"t x = at (at x)"
lemma t_n [simp]: "t (at x) = at x"
by (metis add_idem test_de_morgan test_mult t_op_def)
lemma t_comm: "t x \<cdot> t y = t y \<cdot> t x"
by (metis add_commute test_de_morgan test_mult t_op_def)
lemma t_idem [simp]: "t x \<cdot> t x = t x"
by (metis add_idem test_de_morgan test_mult t_op_def)
lemma t_mult_closed [simp]: "t (t x \<cdot> t y) = t x \<cdot> t y"
using t_comm t_op_def by auto
subsubsection\<open>Propositional Hoare Logic\<close>
definition H :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" where
"H p x q \<longleftrightarrow> t p \<cdot> x \<le> x \<cdot> t q"
definition if_then_else :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" ("if _ then _ else _ fi" [64,64,64] 63) where
"if p then x else y fi = t p \<cdot> x + at p \<cdot> y"
definition while :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" ("while _ do _ od" [64,64] 63) where
"while p do x od = (t p \<cdot> x)\<^sup>\<star> \<cdot> at p"
definition while_inv :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" ("while _ inv _ do _ od" [64,64,64] 63) where
"while p inv i do x od = while p do x od"
lemma H_skip: "H p 1 p"
by (simp add: H_def)
lemma H_cons: "t p \<le> t p' \<Longrightarrow> t q' \<le> t q \<Longrightarrow> H p' x q' \<Longrightarrow> H p x q"
by (meson H_def mult_isol mult_isor order.trans)
lemma H_seq: "H r y q \<Longrightarrow> H p x r \<Longrightarrow> H p (x \<cdot> y) q"
proof -
assume h1: "H p x r" and h2: "H r y q"
hence h3: "t p \<cdot> x \<le> x \<cdot> t r" and h4: "t r \<cdot> y \<le> y \<cdot> t q"
using H_def apply blast using H_def h2 by blast
hence "t p \<cdot> x \<cdot> y \<le> x \<cdot> t r \<cdot> y"
using mult_isor by blast
also have "... \<le> x \<cdot> y \<cdot> t q"
by (simp add: h4 mult_isol mult_assoc)
finally show ?thesis
by (simp add: H_def mult_assoc)
qed
lemma H_cond: "H (t p \<cdot> t r) x q \<Longrightarrow> H (t p \<cdot> at r) y q \<Longrightarrow> H p (if r then x else y fi) q"
proof -
assume h1: "H (t p \<cdot> t r) x q" and h2: "H (t p \<cdot> at r) y q"
hence h3: "t r \<cdot> t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t q" and h4: "at r \<cdot> t p \<cdot> at r \<cdot> y \<le> at r \<cdot> y \<cdot> t q"
by (simp add: H_def mult_isol mult_assoc, metis H_def h2 mult_isol mult_assoc t_mult_closed t_n)
hence h5: "t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t q" and h6: "t p \<cdot> at r \<cdot> y \<le> at r \<cdot> y \<cdot> t q"
by (simp add: mult_assoc t_comm, metis h4 mult_assoc t_comm t_idem t_n)
have "t p \<cdot> (t r \<cdot> x + at r \<cdot> y) = t p \<cdot> t r \<cdot> x + t p \<cdot> at r \<cdot> y"
by (simp add: distrib_left mult_assoc)
also have "... \<le> t r \<cdot> x \<cdot> t q + t p \<cdot> at r \<cdot> y"
using h5 add_iso by blast
also have "... \<le> t r \<cdot> x \<cdot> t q + at r \<cdot> y \<cdot> t q"
by (simp add: add_commute h6 add_iso)
finally show ?thesis
by (simp add: H_def if_then_else_def distrib_right)
qed
lemma H_loop: "H (t p \<cdot> t r) x p \<Longrightarrow> H p (while r do x od) (t p \<cdot> at r)"
proof -
assume "H (t p \<cdot> t r) x p"
hence "t r \<cdot> t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t p"
by (metis H_def distrib_left less_eq_def mult_assoc t_mult_closed)
hence "t p \<cdot> t r \<cdot> x \<le> t r \<cdot> x \<cdot> t p"
by (simp add: mult_assoc t_comm)
hence "t p \<cdot> (t r \<cdot> x)\<^sup>\<star> \<cdot> at r \<le> (t r \<cdot> x)\<^sup>\<star> \<cdot> t p \<cdot> at r"
by (metis mult_isor star_sim mult_assoc)
hence "t p \<cdot> (t r \<cdot> x)\<^sup>\<star> \<cdot> at r \<le> (t r \<cdot> x)\<^sup>\<star> \<cdot> at r \<cdot> t p \<cdot> at r"
by (metis mult_assoc t_comm t_idem t_n)
thus ?thesis
by (metis H_def mult_assoc t_mult_closed t_n while_def)
qed
lemma H_while_inv: "t p \<le> t i \<Longrightarrow> t i \<cdot> at r \<le> t q \<Longrightarrow> H (t i \<cdot> t r) x i \<Longrightarrow> H p (while r inv i do x od) q"
by (metis H_cons H_loop t_mult_closed t_n while_inv_def)
end
subsubsection\<open>Soundness and Relation KAT\<close>
notation relcomp (infixl ";" 70)
interpretation rel_d: dioid Id "{}" "(\<union>)" "(;)" "(\<subseteq>)" "(\<subset>)"
by (standard, auto)
lemma (in dioid) power_inductl: "z + x \<cdot> y \<le> y \<Longrightarrow> x ^ i \<cdot> z \<le> y"
apply (induct i; clarsimp simp add: add_lub)
by (metis local.dual_order.trans local.mult_isol mult_assoc)
lemma (in dioid) power_inductr: "z + y \<cdot> x \<le> y \<Longrightarrow> z \<cdot> x ^ i \<le> y"
apply (induct i; clarsimp simp add: add_lub)
proof -
fix i
assume "z \<cdot> x ^ i \<le> y" "z \<le> y" "y \<cdot> x \<le> y"
hence "(z \<cdot> x ^ i) \<cdot> x \<le> y"
using local.dual_order.trans local.mult_isor by blast
thus "z \<cdot> (x \<cdot> x ^ i) \<le> y"
by (simp add: mult_assoc local.power_commutes)
qed
lemma power_is_relpow: "rel_d.power X i = X ^^ i"
by (induct i, simp_all add: relpow_commute)
lemma rel_star_def: "X^* = (\<Union>i. rel_d.power X i)"
by (simp add: power_is_relpow rtrancl_is_UN_relpow)
lemma rel_star_contl: "X ; Y^* = (\<Union>i. X ; rel_d.power Y i)"
by (simp add: rel_star_def relcomp_UNION_distrib)
lemma rel_star_contr: "X^* ; Y = (\<Union>i. (rel_d.power X i) ; Y)"
by (simp add: rel_star_def relcomp_UNION_distrib2)
definition rel_at :: "'a rel \<Rightarrow> 'a rel" where
"rel_at X = Id \<inter> - X"
interpretation rel_kat: kat Id "{}" "(\<union>)" "(;)" "(\<subseteq>)" "(\<subset>)" rtrancl rel_at
apply standard
apply auto[2]
by (auto simp: rel_star_contr rel_d.power_inductl rel_star_contl SUP_least rel_d.power_inductr rel_at_def)
subsubsection\<open>Embedding Predicates in Relations\<close>
type_synonym 'a pred = "'a \<Rightarrow> bool"
abbreviation p2r :: "'a pred \<Rightarrow> 'a rel" ("\<lceil>_\<rceil>") where
"\<lceil>P\<rceil> \<equiv> {(s,s) |s. P s}"
lemma t_p2r [simp]: "rel_kat.t_op \<lceil>P\<rceil> = \<lceil>P\<rceil>"
by (auto simp add: rel_kat.t_op_def rel_at_def)
lemma p2r_neg_hom [simp]: "rel_at \<lceil>P\<rceil> = \<lceil>\<lambda>s. \<not> P s\<rceil>"
by (auto simp: rel_at_def)
lemma p2r_conj_hom [simp]: "\<lceil>P\<rceil> \<inter> \<lceil>Q\<rceil> = \<lceil>\<lambda>s. P s \<and> Q s\<rceil>"
by auto
lemma p2r_conj_hom_var [simp]: "\<lceil>P\<rceil> ; \<lceil>Q\<rceil> = \<lceil>\<lambda>s. P s \<and> Q s\<rceil>"
by auto
lemma p2r_disj_hom [simp]: "\<lceil>P\<rceil> \<union> \<lceil>Q\<rceil> = \<lceil>\<lambda>s. P s \<or> Q s\<rceil>"
by auto
lemma impl_prop [simp]: "\<lceil>P\<rceil> \<subseteq> \<lceil>Q\<rceil> \<longleftrightarrow> (\<forall>s. P s \<longrightarrow> Q s)"
by auto
subsubsection \<open>Store and Assignment\<close>
type_synonym 'a store = "string \<Rightarrow> 'a"
definition gets :: "string \<Rightarrow> ('a store \<Rightarrow> 'a) \<Rightarrow> 'a store rel" ("_ ::= _" [70, 65] 61) where
"v ::= e = {(s, s(v := e s)) |s. True}"
lemma H_assign: "rel_kat.H \<lceil>\<lambda>s. P (s (v := e s))\<rceil> (v ::= e) \<lceil>P\<rceil>"
by (auto simp: gets_def rel_kat.H_def rel_kat.t_op_def rel_at_def)
lemma H_assign_var: "(\<forall>s. P s \<longrightarrow> Q (s (v := e s))) \<Longrightarrow> rel_kat.H \<lceil>P\<rceil> (v ::= e) \<lceil>Q\<rceil>"
by (auto simp: gets_def rel_kat.H_def rel_kat.t_op_def rel_at_def)
abbreviation H_sugar :: "'a pred \<Rightarrow> 'a rel \<Rightarrow> 'a pred \<Rightarrow> bool" ("PRE _ _ POST _" [64,64,64] 63) where
"PRE P X POST Q \<equiv> rel_kat.H \<lceil>P\<rceil> X \<lceil>Q\<rceil>"
abbreviation if_then_else_sugar :: "'a pred \<Rightarrow> 'a rel \<Rightarrow> 'a rel \<Rightarrow> 'a rel" ("IF _ THEN _ ELSE _ FI" [64,64,64] 63) where
"IF P THEN X ELSE Y FI \<equiv> rel_kat.if_then_else \<lceil>P\<rceil> X Y"
abbreviation while_inv_sugar :: "'a pred \<Rightarrow> 'a pred \<Rightarrow> 'a rel \<Rightarrow> 'a rel" ("WHILE _ INV _ DO _ OD" [64,64,64] 63) where
"WHILE P INV I DO X OD \<equiv> rel_kat.while_inv \<lceil>P\<rceil> \<lceil>I\<rceil> X"
subsubsection \<open>Verification Example\<close>
lemma euclid:
"PRE (\<lambda>s::nat store. s ''x'' = x \<and> s ''y'' = y)
(WHILE (\<lambda>s. s ''y'' \<noteq> 0) INV (\<lambda>s. gcd (s ''x'') (s ''y'') = gcd x y)
DO
(''z'' ::= (\<lambda>s. s ''y''));
(''y'' ::= (\<lambda>s. s ''x'' mod s ''y''));
(''x'' ::= (\<lambda>s. s ''z''))
OD)
POST (\<lambda>s. s ''x'' = gcd x y)"
apply (rule rel_kat.H_while_inv, simp_all, clarsimp)
apply (intro rel_kat.H_seq)
apply (subst H_assign, simp)+
apply (rule H_assign_var)
using gcd_red_nat by auto
subsubsection \<open>Definition of Refinement KAT\<close>
class rkat = kat +
fixes R :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"
assumes R1: "H p (R p q) q"
and R2: "H p x q \<Longrightarrow> x \<le> R p q"
begin
subsubsection \<open>Propositional Refinement Calculus\<close>
lemma R_skip: "1 \<le> R p p"
by (simp add: H_skip R2)
lemma R_cons: "t p \<le> t p' \<Longrightarrow> t q' \<le> t q \<Longrightarrow> R p' q' \<le> R p q"
by (simp add: H_cons R2 R1)
lemma R_seq: "(R p r) \<cdot> (R r q) \<le> R p q"
using H_seq R2 R1 by blast
lemma R_cond: "if v then (R (t v \<cdot> t p) q) else (R (at v \<cdot> t p) q) fi \<le> R p q"
by (metis H_cond R1 R2 t_comm t_n)
lemma R_loop: "while q do (R (t p \<cdot> t q) p) od \<le> R p (t p \<cdot> at q)"
by (simp add: H_loop R2 R1)
end
subsubsection \<open>Soundness and Relation RKAT\<close>
definition rel_R :: "'a rel \<Rightarrow> 'a rel \<Rightarrow> 'a rel" where
"rel_R P Q = \<Union>{X. rel_kat.H P X Q}"
interpretation rel_rkat: rkat Id "{}" "(\<union>)" "(;)" "(\<subseteq>)" "(\<subset>)" rtrancl rel_at rel_R
by (standard, auto simp: rel_R_def rel_kat.H_def rel_kat.t_op_def rel_at_def)
subsubsection \<open>Assignment Laws\<close>
lemma R_assign: "(\<forall>s. P s \<longrightarrow> Q (s (v := e s))) \<Longrightarrow> (v ::= e) \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>Q\<rceil>"
by (simp add: H_assign_var rel_rkat.R2)
lemma R_assignr: "(\<forall>s. Q' s \<longrightarrow> Q (s (v := e s))) \<Longrightarrow> (rel_R \<lceil>P\<rceil> \<lceil>Q'\<rceil>) ; (v ::= e) \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>Q\<rceil>"
proof -
assume a1: "\<forall>s. Q' s \<longrightarrow> Q (s(v := e s))"
have "\<forall>p pa cs f. \<exists>fa. (p fa \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>) \<and> (\<not> pa (fa(cs := f fa::'a)) \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>)"
using R_assign by blast
hence "v ::= e \<subseteq> rel_R \<lceil>Q'\<rceil> \<lceil>Q\<rceil>"
using a1 by blast
thus ?thesis
by (meson dual_order.trans rel_d.mult_isol rel_rkat.R_seq)
qed
lemma R_assignl: "(\<forall>s. P s \<longrightarrow> P' (s (v := e s))) \<Longrightarrow> (v ::= e) ; (rel_R \<lceil>P'\<rceil> \<lceil>Q\<rceil>) \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>Q\<rceil>"
proof -
assume a1: "\<forall>s. P s \<longrightarrow> P' (s(v := e s))"
have "\<forall>p pa cs f. \<exists>fa. (p fa \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>) \<and> (\<not> pa (fa(cs := f fa::'a)) \<or> cs ::= f \<subseteq> rel_R \<lceil>p\<rceil> \<lceil>pa\<rceil>)"
using R_assign by blast
then have "v ::= e \<subseteq> rel_R \<lceil>P\<rceil> \<lceil>P'\<rceil>"
using a1 by blast
then show ?thesis
by (meson dual_order.trans rel_d.mult_isor rel_rkat.R_seq)
qed
subsubsection \<open>Refinement Example\<close>
lemma var_swap_ref1:
"rel_R \<lceil>\<lambda>s. s ''x'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>
\<supseteq> (''z'' ::= (\<lambda>s. s ''x'')); rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>"
by (rule R_assignl, auto)
lemma var_swap_ref2:
"rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>
\<supseteq> (''x'' ::= (\<lambda>s. s ''y'')); rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''x'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>"
by (rule R_assignl, auto)
lemma var_swap_ref3:
"rel_R \<lceil>\<lambda>s. s ''z'' = a \<and> s ''x'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>
\<supseteq> (''y'' ::= (\<lambda>s. s ''z'')); rel_R \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>"
by (rule R_assignl, auto)
lemma var_swap_ref_var:
"rel_R \<lceil>\<lambda>s. s ''x'' = a \<and> s ''y'' = b\<rceil> \<lceil>\<lambda>s. s ''x'' = b \<and> s ''y'' = a\<rceil>
\<supseteq> (''z'' ::= (\<lambda>s. s ''x'')); (''x'' ::= (\<lambda>s. s ''y'')); (''y'' ::= (\<lambda>s. s ''z''))"
using var_swap_ref1 var_swap_ref2 var_swap_ref3 rel_rkat.R_skip by fastforce
end
|
(* Title: HOL/Auth/n_german_lemma_on_inv__8.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__8 imports n_german_base
begin
section{*All lemmas on causal relation between inv__8 and some rule r*}
lemma n_SendInvEVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvSVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const false)) (neg (eqn (IVar (Ident ''MemData'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__8:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__8:
assumes a1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i d where a1:"i\<le>N\<and>d\<le>N\<and>r=n_Store i d" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__8 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntE)) (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv4) ''State'')) (Const E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Cache'') i) ''State'')) (Const E)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const GntE))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqESVsinv__8:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqES i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvInvAckVsinv__8:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvInvAck i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqVsinv__8:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReq N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__8:
assumes a1: "\<exists> j. j\<le>N\<and>r=n_SendReqS j" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqEIVsinv__8:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqEI i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__8 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
#include <gbpLib.h>
#include <gbpMisc.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_eigen.h>
#include <gsl/gsl_spline.h>
void compute_triaxiality(double *x_in,
double *y_in,
double *z_in,
double x_cen,
double y_cen,
double z_cen,
double box_size,
int n_particles,
size_t *sort_index,
double return_values[3],
double return_vectors[3][3]) {
double s, s_new;
double q, q_new;
double a_new;
double b_new;
double c_new;
int i, j;
double M_tmp;
double x_tmp;
double y_tmp;
double z_tmp;
double * m_p;
double * x;
double * y;
double * z;
double inv_q2;
double inv_s2;
double inv_r2, r2;
double M[9];
int n_iterations;
int continue_flag;
double convergence = 0.0001;
int n_iterations_max = 200;
gsl_vector * eigen_vector;
gsl_matrix * eigen_matrix;
gsl_eigen_symmv_workspace *w;
gsl_matrix_view m;
// Initialize a bunch of stuff
a_new = 1.;
b_new = 1.;
c_new = 1.;
q_new = 1.;
s_new = 1.;
return_vectors[0][0] = 1.;
return_vectors[1][0] = 0.;
return_vectors[2][0] = 0.;
return_vectors[0][1] = 0.;
return_vectors[1][1] = 1.;
return_vectors[2][1] = 0.;
return_vectors[0][2] = 0.;
return_vectors[1][2] = 0.;
return_vectors[2][2] = 1.;
if(n_particles > 0)
continue_flag = GBP_TRUE;
else
continue_flag = GBP_FALSE;
eigen_vector = gsl_vector_alloc(3);
eigen_matrix = gsl_matrix_alloc(3, 3);
w = gsl_eigen_symmv_alloc(3);
m_p = (double *)SID_malloc(sizeof(double) * n_particles);
x = (double *)SID_malloc(sizeof(double) * n_particles);
y = (double *)SID_malloc(sizeof(double) * n_particles);
z = (double *)SID_malloc(sizeof(double) * n_particles);
if(sort_index != NULL) {
for(i = 0; i < n_particles; i++) {
m_p[i] = 1.;
x[i] = d_periodic((double)x_in[sort_index[i]] - x_cen, box_size);
y[i] = d_periodic((double)y_in[sort_index[i]] - y_cen, box_size);
z[i] = d_periodic((double)z_in[sort_index[i]] - z_cen, box_size);
}
} else {
for(i = 0; i < n_particles; i++) {
m_p[i] = 1.;
x[i] = d_periodic((double)x_in[i] - x_cen, box_size);
y[i] = d_periodic((double)y_in[i] - y_cen, box_size);
z[i] = d_periodic((double)z_in[i] - z_cen, box_size);
}
}
// Iterate until convergence
n_iterations = 0;
while(continue_flag) {
q = q_new;
s = s_new;
inv_q2 = 1. / (q * q);
inv_s2 = 1. / (s * s);
// Construct the moment of inertia tensor
for(i = 0; i < 9; i++)
M[i] = 0.;
for(i = 0; i < n_particles; i++) {
x_tmp = x[i];
y_tmp = y[i];
z_tmp = z[i];
M_tmp = m_p[i];
r2 = x_tmp * x_tmp + y_tmp * y_tmp * inv_q2 + z_tmp * z_tmp * inv_s2;
if(r2 > 0.) {
inv_r2 = 1. / r2;
M[0] += x_tmp * x_tmp * M_tmp * inv_r2;
M[1] += y_tmp * x_tmp * M_tmp * inv_r2;
M[2] += z_tmp * x_tmp * M_tmp * inv_r2;
M[3] += x_tmp * y_tmp * M_tmp * inv_r2;
M[4] += y_tmp * y_tmp * M_tmp * inv_r2;
M[5] += z_tmp * y_tmp * M_tmp * inv_r2;
M[6] += x_tmp * z_tmp * M_tmp * inv_r2;
M[7] += y_tmp * z_tmp * M_tmp * inv_r2;
M[8] += z_tmp * z_tmp * M_tmp * inv_r2;
}
}
// Solve for (and sort) the eigen values and eigen vectors
m = gsl_matrix_view_array(M, 3, 3);
gsl_eigen_symmv(&m.matrix, eigen_vector, eigen_matrix, w);
gsl_eigen_symmv_sort(eigen_vector, eigen_matrix, GSL_EIGEN_SORT_ABS_DESC);
// Convert gsl vectors and such into something simpler to use
for(i = 0; i < 3; i++) {
return_values[i] = sqrt(gsl_vector_get(eigen_vector, i));
return_vectors[i][0] = gsl_matrix_get(eigen_matrix, 0, i);
return_vectors[i][1] = gsl_matrix_get(eigen_matrix, 1, i);
return_vectors[i][2] = gsl_matrix_get(eigen_matrix, 2, i);
}
q_new = return_values[1] / return_values[0];
s_new = return_values[2] / return_values[0];
// Check for convergence
n_iterations++;
if(n_iterations >= n_iterations_max)
continue_flag = GBP_FALSE;
if((double)fabs((float)((q_new - q) / q)) < convergence && (double)fabs((float)((s_new - s) / s)) < convergence)
continue_flag = GBP_FALSE;
}
// Clean-up
gsl_eigen_symmv_free(w);
gsl_vector_free(eigen_vector);
gsl_matrix_free(eigen_matrix);
SID_free((void **)(&x));
SID_free((void **)(&y));
SID_free((void **)(&z));
SID_free((void **)(&m_p));
}
|
(* Title: HOL/Auth/n_germanSymIndex_lemma_inv__47_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_inv__47_on_rules imports n_germanSymIndex_lemma_on_inv__47
begin
section{*All lemmas on causal relation between inv__47*}
lemma lemma_inv__47_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv0 p__Inv2. p__Inv0\<le>N\<and>p__Inv2\<le>N\<and>p__Inv0~=p__Inv2\<and>f=inv__47 p__Inv0 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqEVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__47) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__47) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
lemma sigma_finite_measure_restrict_space: assumes "sigma_finite_measure M" and A: "A \<in> sets M" shows "sigma_finite_measure (restrict_space M A)" |
# Domain types which are registered/unregistered with TypeTools.
# Note that the types have to be quoted (additionally to the quote one would
# normally place on certain types) to work properly
local DomBoundVar_type := proc(nm)
local ixs := [indices(Domain:-ExtBound, nolist)], vs;
ormap(i->type(nm,Domain:-ExtBound[i]:-VarType), ixs);
end proc;
local DomBoundRange_type := proc(nm)
local ixs := [indices(Domain:-ExtBound, nolist)], vs;
ormap(i->type(nm,Domain:-ExtBound[i]:-RangeType), ixs);
end proc;
local DomainTypes := table(
# Domain bounds
[(DomBoundVar = 'thismodule:-DomBoundVar_type')
,(DomBoundRange = 'thismodule:-DomBoundRange_type')
,(DomBoundBinder = ''DInto(DomBoundVar, DomBoundRange, DomBoundKind)'' )
,(DomBoundKind = 'And(name, satisfies(nm->assigned(Domain:-ExtBound[nm])))' )
,(DomBound = ''And(specfunc(DBound)
,{anyfunc(list(DomBoundBinder))
,anyfunc(list(DomBoundBinder),DomCtx)
,anyfunc(list(DomBoundBinder),DomCtx,table) })'' )
# Domain shape
,(DomConstrain = 'specfunc(relation, `DConstrain`)' )
,(DomSum = 'specfunc(DomShape, `DSum`)' )
,(DomSplit = ''DSplit(Partition(DomShape))'' )
,(DomInto = ''Or(DInto(DomBoundVar, DomBoundRange, DomShape)
,DInto(DomBoundVar, DomShape) )'' )
,(DomShape = 'Or( DomConstrain, DomSum, DomSplit, DomInto )' )
# Domain
,(DomCtx = ''t_kb'')
,(HDomain = ''DOMAIN(DomBound, DomShape)'' )
# Maybe domain
,(DomNoSol = ''Not(freeof(`DNoSol`))'' )
,(HDomain_mb = ''Or(HDomain, DOMAIN(DomBound, DomNoSol))'' )
] );
|
[STATEMENT]
lemma card_carrier_vec: "card (carrier_vec n:: 'b::finite vec set) = CARD('b) ^ n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
let ?A = "UNIV::'b set"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
let ?B = "{xs. set xs \<subseteq> ?A \<and> length xs = n}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
let ?C = "(carrier_vec n:: 'b::finite vec set)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
have "card ?C = card ?B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
[PROOF STEP]
have "bij_betw (list_of_vec) ?C ?B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw list_of_vec (carrier_vec n) {xs. set xs \<subseteq> UNIV \<and> length xs = n}
[PROOF STEP]
proof (unfold bij_betw_def, auto)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. inj_on list_of_vec (carrier_vec n)
2. \<And>x. n = length x \<Longrightarrow> x \<in> list_of_vec ` carrier_vec (length x)
[PROOF STEP]
show "inj_on list_of_vec (carrier_vec n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj_on list_of_vec (carrier_vec n)
[PROOF STEP]
by (rule inj_on_list_of_vec)
[PROOF STATE]
proof (state)
this:
inj_on list_of_vec (carrier_vec n)
goal (1 subgoal):
1. \<And>x. n = length x \<Longrightarrow> x \<in> list_of_vec ` carrier_vec (length x)
[PROOF STEP]
fix x::"'b list"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. n = length x \<Longrightarrow> x \<in> list_of_vec ` carrier_vec (length x)
[PROOF STEP]
assume n: "n = length x"
[PROOF STATE]
proof (state)
this:
n = length x
goal (1 subgoal):
1. \<And>x. n = length x \<Longrightarrow> x \<in> list_of_vec ` carrier_vec (length x)
[PROOF STEP]
thus "x \<in> list_of_vec ` carrier_vec (length x)"
[PROOF STATE]
proof (prove)
using this:
n = length x
goal (1 subgoal):
1. x \<in> list_of_vec ` carrier_vec (length x)
[PROOF STEP]
unfolding image_def
[PROOF STATE]
proof (prove)
using this:
n = length x
goal (1 subgoal):
1. x \<in> {y. \<exists>x\<in>carrier_vec (length x). y = list_of_vec x}
[PROOF STEP]
by auto (rule bexI[of _ "vec_of_list x"], auto)
[PROOF STATE]
proof (state)
this:
x \<in> list_of_vec ` carrier_vec (length x)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
bij_betw list_of_vec (carrier_vec n) {xs. set xs \<subseteq> UNIV \<and> length xs = n}
goal (1 subgoal):
1. card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
bij_betw list_of_vec (carrier_vec n) {xs. set xs \<subseteq> UNIV \<and> length xs = n}
goal (1 subgoal):
1. card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
[PROOF STEP]
using bij_betw_same_card
[PROOF STATE]
proof (prove)
using this:
bij_betw list_of_vec (carrier_vec n) {xs. set xs \<subseteq> UNIV \<and> length xs = n}
bij_betw ?f ?A ?B \<Longrightarrow> card ?A = card ?B
goal (1 subgoal):
1. card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
card (carrier_vec n) = card {xs. set xs \<subseteq> UNIV \<and> length xs = n}
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
have "... = card ?A ^ n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card {xs. set xs \<subseteq> UNIV \<and> length xs = n} = CARD('b) ^ n
[PROOF STEP]
by (rule card_lists_length_eq, simp)
[PROOF STATE]
proof (state)
this:
card {xs. set xs \<subseteq> UNIV \<and> length xs = n} = CARD('b) ^ n
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
card (carrier_vec n) = CARD('b) ^ n
goal (1 subgoal):
1. card (carrier_vec n) = CARD('b) ^ n
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
card (carrier_vec n) = CARD('b) ^ n
goal:
No subgoals!
[PROOF STEP]
qed |
If $0 \leq x \leq 1$, then the sequence $x^n$ converges. |
-- @@stderr --
dtrace: failed to compile script test/unittest/arithmetic/err.D_DIV_ZERO.divby0.d: [D_DIV_ZERO] line 19: expression contains division by zero
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Structures.Subtype where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Data.NatPlusOne
open import Cubical.Data.Nat
open import Cubical.Data.Sigma
private
variable
ℓ ℓ' : Level
-- The type of Subtypes of a type
Subtype : {ℓ : Level} → (ℓ' : Level) → Type ℓ → Type (ℓ-max ℓ (ℓ-suc ℓ'))
Subtype ℓ' A = A → (hProp ℓ')
-- Coercion from Subtype to Type
Subtype→Type : {ℓ ℓ' : Level} {A : Type ℓ} (B : Subtype ℓ' A) → Type (ℓ-max ℓ ℓ')
Subtype→Type {A = A} B = Σ[ a ∈ A ] (fst (B a))
-- if A has Level n > 0 then so do all of its subtypes -}
subtypePreservesHLevel : {ℓ ℓ' : Level} {A : Type ℓ} {n : ℕ₊₁} (p : isOfHLevel (ℕ₊₁→ℕ n) A) (B : Subtype ℓ' A) → isOfHLevel (ℕ₊₁→ℕ n) (Subtype→Type B)
subtypePreservesHLevel {n = one} p B = isPropΣ p λ a → snd (B a)
subtypePreservesHLevel {n = 1+ (suc n)} p B = isOfHLevelΣ (suc (suc n)) p λ a → isProp→isOfHLevelSuc (suc n) (snd (B a))
-- if two terms x and y of the original type A are equal by q, and p and p' witness that x , y are in
-- the subtype then p ≡ p' over q
subtypeWitnessIrrelevance : {A : Type ℓ} (B : Subtype ℓ' A) {xp yp : Subtype→Type B} (q : fst xp ≡ fst yp) → xp ≡ yp
subtypeWitnessIrrelevance B q = Σ≡Prop (λ a → B a .snd) q
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$define xc_dimensions_2d
_2d_b88_beta := 0.007:
_2d_b88_csi := 8: (* for harmonic potentials *)
_2d_b88_f := x -> 1 + _2d_b88_beta/X_FACTOR_2D_C*x^2/(1 + _2d_b88_csi*_2d_b88_beta*x*arcsinh(x)):
f := (rs, zeta, xt, xs0, xs1) -> gga_exchange(_2d_b88_f, rs, zeta, xs0, xs1):
|
# Clustering of univariate data via Dirichlet Process Mixture
this is a continuation of 'estimate_univ_density'. Make sure to check it before going through this tutorial!
```python
import numpy as np
import matplotlib.pyplot as plt
from pybmix.core.mixing import DirichletProcessMixing, StickBreakMixing
from pybmix.core.hierarchy import UnivariateNormal
from pybmix.core.mixture_model import MixtureModel
np.random.seed(2021)
```
## DP and clustering
Recall that $\tilde p \sim DP(\alpha, G_0)$ means that $\tilde p = \sum_{h=1}^\infty w_h \delta_{\tau_h}$ with $\{w_h\}_h \sim GEM(\alpha)$ and $\{\tau_h\}_h \sim G_0$. Hence, realizations from a DP are almost surely discrete probability measures.
Hence, sampling
\begin{equation}
\begin{aligned}
\theta_1, \ldots, \theta_n | \tilde{p} & \sim \tilde{p} \\
\tilde{p} &\sim DP(\alpha, G_0)
\end{aligned}
\end{equation}
entails that with positive probability $\theta_i = \theta_j$ (with $i \neq j$). In a sample of size $n$ there will be $k \geq n$ unique values $\theta^*_1, \ldots, \theta^*_k$ among the $\theta_i$'s and clusters are defined as $C_j = \{i : \theta_i = \theta^*_j \}$.
When considering a mixture model, the $\theta_i$'s are not observations but latent variables. In the case of a univariate normal mizture models, $\theta_i = (\mu_i, \sigma^2_i)$ and the model can be written as
\begin{equation}
\begin{aligned}
y_i | \theta_i = (\mu_i, \sigma^2_i) &\sim \mathcal N(\mu_i, \sigma^2_i) \\
\theta_1, \ldots, \theta_n | \tilde{p} &\sim \tilde{p} \\
\tilde{p} &\sim DP(\alpha, G_0)
\end{aligned}
\end{equation}
and the clustering among the observations $y_i$'s is inherited by the clustering among the $\theta_i$'s.
Let's go back to the previous example
```python
def sample_from_mixture(weigths, means, sds, n_data):
n_comp = len(weigths)
clus_alloc = np.random.choice(np.arange(n_comp), p=[0.5, 0.5], size=n_data)
return np.random.normal(loc=means[clus_alloc], scale=sds[clus_alloc])
y = sample_from_mixture(
np.array([0.5, 0.5]), np.array([-3, 3]), np.array([1, 1]), 200)
mixing = DirichletProcessMixing(total_mass=5)
hierarchy = UnivariateNormal()
hierarchy.make_default_fixed_params(y, 2)
mixture = MixtureModel(mixing, hierarchy)
mixture.run_mcmc(y, algorithm="Neal2", niter=2000, nburn=1000)
```
We can extract the cluster allocation MCMC chain very easily
```python
mcmc_chain = mixture.get_chain()
cluster_alloc_chain = mcmc_chain.extract("cluster_allocs")
print(cluster_alloc_chain.shape)
```
cluster_alloc_chain is a matrix of shape [niter - nburn, ndata].
To get the posterior distribution of the number of clusters, we count in each row the number of unique values
```python
n_clust_chain = np.apply_along_axis(lambda x: len(np.unique(x)), 1,
cluster_alloc_chain)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
axes[0].vlines(np.arange(len(n_clust_chain)), n_clust_chain - 0.3, n_clust_chain + 0.3)
axes[0].set_title("Traceplot")
clusgrid = np.arange(1, 10)
probas = np.zeros_like(clusgrid)
for i, c in enumerate(clusgrid):
probas[i] = np.sum(n_clust_chain == c)
probas = probas / np.sum(probas)
axes[1].bar(clusgrid, probas)
axes[1].set_xticks(clusgrid)
axes[1].set_title("Posterior number of clusters")
plt.show()
```
Let's inspect two iterations: the first one and the last one, and look at the cluster allocations of the first 5 observations
```python
print("First iteration: ", cluster_alloc_chain[0][:5])
print("Last iteration: ", cluster_alloc_chain[-1][:5])
```
Observe that the clustering are identicals: the one is made of observations $\{1, 2, 5\}$ and the other cluster of observations $\{3, 4\}$. However the labels associated to each cluster are differend depending on the iterations: in the first iteration, $\{1, 2, 5\}$ are the first cluster (0th cluster) and $\{3, 4\}$ are the second cluster, while in the last iteration the opposite happens.
This is due to the so-called "label-switching". Usually to interpret the clustering result, a suitable point-estimate is chosen to minimize a loss function.
```python
from pybmix.estimators.cluster_estimator import ClusterEstimator
clus_est = ClusterEstimator(mixture)
best_clust = clus_est.get_point_estimate()
plt.hist(y, density=True, alpha=0.3)
for cluster_idx in clus_est.group_by_cluster(best_clust):
data = y[cluster_idx]
plt.scatter(data, np.zeros_like(data) + 5e-3)
plt.show()
```
Note how the posterior mode of the number of clusters is 3, but the point estimate for the best clustering consists of 2 clusters
|
Formal statement is: lemma arg_bounded: "- pi < arg z \<and> arg z \<le> pi" Informal statement is: The argument of a complex number is bounded by $\pi$. |
[STATEMENT]
lemma pmf_K_st: "pmf (K_st d s) t = \<integral>a. pmf (K(s, a)) t \<partial>d s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pmf (K_st d s) t = measure_pmf.expectation (d s) (\<lambda>a. pmf (K (s, a)) t)
[PROOF STEP]
unfolding K_st_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pmf (d s \<bind> (\<lambda>a. K (s, a))) t = measure_pmf.expectation (d s) (\<lambda>a. pmf (K (s, a)) t)
[PROOF STEP]
by (subst pmf_bind) auto |
Mephisto's sandals cover the full spectrum of casual to dressy. Walk all day in Mephisto sandals. Free shipping & free returns. |
module Control.Monad.Coop
import public System.Time
import Data.List
import Data.Queue1
import Data.SortedMap
import Data.SortedSet
import public Data.Zippable
import public Control.Applicative.Concurrent
import Control.Monad.Coop.Sync
import public Control.Monad.Spawn
import Control.Monad.State
import Control.Monad.State.Tuple
import public Control.Monad.Trans
%default total
------------
--- Data ---
------------
data SyncKind = Join | Race
export
data Coop : (m : Type -> Type) -> (a : Type) -> Type where
Point : m a -> Coop m a
Sequential : Coop m a -> (a -> Coop m b) -> Coop m b
Interleaved : Coop m a -> Coop m b -> Coop m (a, b)
Racing : Coop m a -> Coop m a -> Coop m a
RaceFence : (prevRaceSync : Maybe $ Sync Race) -> Coop m Unit
DelayedTill : Time -> Coop m Unit
Spawn : Coop m Unit -> Coop m Unit
Empty : Coop m a
-----------------------
--- Implementations ---
-----------------------
export
Timed m => Timed (Coop m) where
currentTime = Point currentTime
export
Applicative m => Functor (Coop m) where
map f (Point a) = Point (map f a)
map f (Sequential a b) = Sequential a $ \ar => map f $ b ar
map f x@(Interleaved _ _) = Sequential x $ Point . pure . f
map f x@(Racing _ _) = Sequential x $ Point . pure . f
map f x@(RaceFence _) = Sequential x $ Point . pure . f
map f x@(DelayedTill _) = Sequential x $ Point . pure . f
map f x@(Spawn _) = Sequential x $ Point . pure . f
map _ Empty = Empty
export
Applicative m => Applicative (Coop m) where
pure = Point . pure
l <*> r = Sequential l (<$> r)
-- This could be `(<*>) = Interleaved <&> uncurry apply`, but it must be consistent with `(>>=)` definition.
-- Consider code `doSmth *> sleepFor 100 *> doMore` comparing to `(doSmth `zip` sleepFor 100) *> doMore`.
-- Having parallel semantics for the `Applicative`'s `<*>`, those two examples above will mean the same, which seems to be unexpected.
-- We have a special name instance `Concurrent` below for that case.
export
race : Applicative m => Coop m a -> Coop m b -> Coop m $ Either a b
race l r = Racing (l <&> Left) (r <&> Right)
export
Applicative m => Alternative (Coop m) where
-- `empty` computation is like an infinite computation (i.e. no computation goes *after* it and its result cannot be analysed),
-- but in contrast, if it is the only what is left during the execution, computation simply finishes.
empty = Empty
l <|> r = l `Racing` r
export
Applicative m => Monad (Coop m) where
(>>=) = Sequential
export
Applicative m => Zippable (Coop m) where
zip = Interleaved
zipWith f = map (uncurry f) .: Interleaved
zip3 a b c = a `Interleaved` (b `Interleaved` c)
zipWith3 f a b c = zip3 a b c <&> \(x, y, z) => f x y z
unzipWith f ab = (fst . f <$> ab, snd . f <$> ab)
unzipWith3 f abc = (fst . f <$> abc, fst . snd . f <$> abc, snd . snd . f <$> abc)
[Conc] Applicative m => Applicative (Coop m) where
pure = Point . pure
(<*>) = zipWith apply
export
Applicative m => ConcurrentApplicative (Coop m) where
Concurrent = Conc
export
Timed m => Applicative m => CanSleep (Coop m) where
sleepTill = DelayedTill
export
Applicative m => CanSpawn (Coop m) where
-- Runs the given computation in parallel with the monadic continuation.
-- In contrast with `zip`, the continuations executes immediately, without waiting to the end of spawned computation.
-- Spawned computation will continue to work (if it needs) even if continuation has ended.
-- For example, running the following code
--
-- ```idris
-- x : HasIO m => Coop m Nat
-- x = do
-- spawn $ do
-- sleepFor 4.seconds
-- putStrLn "spawned"
-- putStrLn "main"
-- pure 1
-- ```
--
-- will result in returning `1` as the computation result **and** printing "spawned" in four seconds after funning the whole computation `x`.
spawn = Spawn
export
HasIO (Coop IO) where
liftIO = Point
export
MonadTrans Coop where
lift = Point
-------------------
--- Interpreter ---
-------------------
--- Data types describing discrete events ---
data LeftOrRight = Left | Right
record Event (m : Type -> Type) where
constructor Ev
time : Time
coop : Coop m actionRetTy
-- Two present postponed events with the same sync are meant to be blocking each other.
-- Postponed event needs to be sheduled only when all events with its sync are over.
-- `Sync` type is a comparable type and is a workaround of uncomparability of `Coop`.
joinSync : Maybe (Sync Join, LeftOrRight)
raceSync : Maybe $ Sync Race
--- List of events ---
0 Events : (Type -> Type) -> Type
Events = SortedMap Time . Queue1 . Event
insertTimed : Event m -> Events m -> Events m
insertTimed ev evs = insert ev.time (maybe (singleton ev) (add ev) (lookup ev.time evs)) evs
-- Must be equivalent to `insertTimed ev empty`
singleEvent : Event m -> Events m
singleEvent ev = singleton ev.time $ singleton ev
addEvents : MonadState (Events m) n => Event m -> List (Event m -> Event m) -> n Unit
addEvents ev = modify . foldl (\acc, modF => acc . insertTimed (modF ev)) id
-- Psrticular case for `addEvents ev [modF]`
addEvent : MonadState (Events m) n => Event m -> (Event m -> Event m) -> n Unit
addEvent ev modF = modify $ insertTimed $ modF ev
-- Psrticular case for `addEvents ev [modF1, modF2]`
addEvent2 : MonadState (Events m) n => Event m -> (Event m -> Event m) -> (Event m -> Event m) -> n Unit
addEvent2 ev modF1 modF2 = modify $ insertTimed (modF1 ev) . insertTimed (modF2 ev)
earliestEvent : Events m -> Maybe (Event m, Lazy (Events m))
earliestEvent evs = leftMost evs <&> \(t, tEvs) =>
let (currEv, restTEvs) = remove tEvs in
(currEv,) $ maybe (delete t evs) (\r => insert t r evs) restTEvs
filterEvents : (Event m -> Bool) -> Events m -> Events m
filterEvents f = fromList . mapMaybe (\(t, evs) => (t,) <$> filter f evs) . SortedMap.toList
--- Join synchronisation stuff ---
record Postponed (m : Type -> Type) where
constructor Postpone
postCoop : (contLTy, contRTy) -> Coop m contRetTy
postJoinSync : Maybe (Sync Join, LeftOrRight)
-- This postponed continuation is waiting for two executions.
-- When one of them is completed, the result should be present in this field.
completedHalf : Maybe completedHalfTy
0 JoinSyncs : (Type -> Type) -> Type
JoinSyncs = SortedMap (Sync Join) . Postponed
--- Race synchronisation stuff ---
-- Map from one race sync to all child syncs (i.e. those which are cancelled when a series with the parent sync finished)
0 RaceSyncs : Type
RaceSyncs = SortedMap (Sync Race) $ List $ Sync Race
transitiveLookup : Foldable f => Ord a => SortedMap a (f a) -> a -> SortedSet a
transitiveLookup mp x = let x1 = singleton x in go x1 x1 where
go : (curr : SortedSet a) -> (new : SortedSet a) -> SortedSet a
go curr new = if null new then curr else do
let allNexts = fromList $ SortedSet.toList new >>= maybe [] toList . flip SortedMap.lookup mp
let nextNew = allNexts `difference` curr
assert_total $ go (curr `union` nextNew) nextNew -- first argument is growing and has maximum bound (all `a` in the `mp`)
--- The run loop ---
%inline
runEvent : Monad m => MonadTrans t => Monad (t m) =>
MonadState (Events m) (t m) =>
MonadState (JoinSyncs m) (t m) =>
MonadState RaceSyncs (t m) =>
Event m -> t m Unit
runEvent ev = case ev.coop of
Point x => lift x >>= awakePostponed
Sequential lhs f => case lhs of
Point x => lift x >>= \r => addEvent ev {coop := f r}
Sequential x g => addEvent ev {coop := Sequential x $ g >=> f}
DelayedTill d => addEvent ev {time := d, coop := f ()}
Spawn s => addEvent2 ev {coop := s, joinSync := Nothing} {coop := f ()}
Interleaved l r => do uniqueSync <- newUniqueSync <$> get
modify $ insert uniqueSync $ Postpone f ev.joinSync $ Nothing {ty=Unit}
addEvent2 ev
{coop := l, joinSync := Just (uniqueSync, Left )}
{coop := r, joinSync := Just (uniqueSync, Right)}
RaceFence prevS => finishRaces *> addEvent ev {coop := f (), raceSync := prevS}
Racing Empty r => addEvent ev {coop := r >>= f}
Racing l Empty => addEvent ev {coop := l >>= f}
Racing l r => do uniqueSync <- newUniqueSync <$> get
modify $ insert uniqueSync [] -- to prevent generation of the same sync
whenJust ev.raceSync $ \parent => modify $ merge $ singleton parent [uniqueSync]
addEvent2 ev
{coop := l >>= (RaceFence ev.raceSync *>) . f, raceSync := Just uniqueSync}
{coop := r >>= (RaceFence ev.raceSync *>) . f, raceSync := Just uniqueSync}
Empty => pure ()
nonSeqNonPoint => addEvent ev {coop := nonSeqNonPoint >>= pure} -- manage as `Sequential _ Point`
where
awakePostponed : forall a. a -> t m Unit
awakePostponed myHalf =
whenJust ev.joinSync $ \(sy, iAmLOrR) => do
syncs <- get
whenJust (SortedMap.lookup sy syncs) $ \pp =>
case pp.completedHalf of
Just theirHalf => do
let awakenCoop = pp.postCoop $ case iAmLOrR of
Left => believe_me (myHalf, theirHalf)
Right => believe_me (theirHalf, myHalf)
addEvent ev {coop := awakenCoop, joinSync := pp.postJoinSync}
put $ delete sy syncs
Nothing =>
put $ insert sy ({completedHalf := Just myHalf} pp) syncs
finishRaces : t m Unit
finishRaces = whenJust ev.raceSync $ \currRaceSync => do
raceSyncs <- get
let syncsToRemove = transitiveLookup raceSyncs currRaceSync
modify $ filterEvents $ maybe True (not . flip contains syncsToRemove) . raceSync
put $ foldl (flip SortedMap.delete) raceSyncs syncsToRemove
export covering
runCoop : CanSleep m => Monad m => Coop m Unit -> m Unit
runCoop co = do
let initEvents = singleEvent $ Ev !currentTime co Nothing Nothing
initJoinSyncs : JoinSyncs m = empty
initRaceSyncs : RaceSyncs = empty
evalStateT (initEvents, initJoinSyncs, initRaceSyncs) runLeftEvents where
runLeftEvents : MonadTrans t => Monad (t m) =>
MonadState (Events m) (t m) =>
MonadState (JoinSyncs m) (t m) =>
MonadState RaceSyncs (t m) =>
t m Unit
runLeftEvents =
whenJust (earliestEvent !get) $ \(currEv, restEvs) => do
currTime <- lift currentTime
if currTime >= currEv.time
then put restEvs *> runEvent ({time := currTime} currEv)
else lift $ sleepTill currEv.time -- TODO to support and perform permanent tasks
runLeftEvents
|
{-# OPTIONS --safe --experimental-lossy-unification #-}
module Cubical.Algebra.Group.EilenbergMacLane.CupProduct where
open import Cubical.Algebra.Group.EilenbergMacLane.Base renaming (elim to EM-elim)
open import Cubical.Algebra.Group.EilenbergMacLane.WedgeConnectivity
open import Cubical.Algebra.Group.EilenbergMacLane.GroupStructure
open import Cubical.Algebra.Group.EilenbergMacLane.Properties
open import Cubical.Algebra.Group.Base
open import Cubical.Algebra.Group.Properties
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.GroupoidLaws renaming (assoc to ∙assoc)
open import Cubical.Foundations.Path
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Pointed
open import Cubical.Foundations.Transport
open import Cubical.Foundations.Pointed.Homogeneous
open import Cubical.Functions.Morphism
open import Cubical.Homotopy.Loopspace
open import Cubical.HITs.Truncation as Trunc renaming (rec to trRec; elim to trElim)
open import Cubical.HITs.EilenbergMacLane1 renaming (rec to EMrec)
open import Cubical.Algebra.AbGroup.Base
open import Cubical.Data.Empty
renaming (rec to ⊥-rec)
open import Cubical.HITs.Truncation
renaming (elim to trElim ; rec to trRec ; rec2 to trRec2)
open import Cubical.Data.Nat hiding (_·_) renaming (elim to ℕelim)
open import Cubical.HITs.Susp
open import Cubical.Algebra.AbGroup.TensorProduct
open import Cubical.Algebra.Group
open AbGroupStr renaming (_+_ to _+Gr_ ; -_ to -Gr_)
open PlusBis
private
variable
ℓ ℓ' ℓ'' : Level
-- Lemma for distributativity of cup product (used later)
pathType : ∀ {ℓ} {G : AbGroup ℓ} (n : ℕ) (x : EM G (2 + n)) (p : 0ₖ (2 + n) ≡ x) → Type ℓ
pathType n x p = sym (rUnitₖ (2 + n) x) ∙ (λ i → x +ₖ p i)
≡ sym (lUnitₖ (2 + n) x) ∙ λ i → p i +ₖ x
pathTypeMake : ∀ {ℓ} {G : AbGroup ℓ} (n : ℕ) (x : EM G (2 + n)) (p : 0ₖ (2 + n) ≡ x)
→ pathType n x p
pathTypeMake n x = J (λ x p → pathType n x p) refl
-- Definition of cup product (⌣ₖ, given by ·₀ when first argument is in K(G,0))
module _ {G' : AbGroup ℓ} {H' : AbGroup ℓ'} where
private
G = fst G'
H = fst H'
strG = snd G'
strH = snd H'
0G = 0g strG
0H = 0g strH
_+G_ = _+Gr_ strG
_+H_ = _+Gr_ strH
-H_ = -Gr_ strH
-G_ = -Gr_ strG
·₀' : H → (m : ℕ) → EM G' m → EM (G' ⨂ H') m
·₀' h =
elim+2
(_⊗ h)
(elimGroupoid _ (λ _ → emsquash)
embase
(λ g → emloop (g ⊗ h))
λ g l →
compPathR→PathP
(sym (∙assoc _ _ _
∙∙ cong₂ _∙_ (sym (emloop-comp _ _ _)
∙ cong emloop (sym (⊗DistL+⊗ g l h))) refl
∙∙ rCancel _)))
λ n f → trRec (isOfHLevelTrunc (4 + n))
λ { north → 0ₖ (suc (suc n))
; south → 0ₖ (suc (suc n))
; (merid a i) → EM→ΩEM+1 (suc n) (f (EM-raw→EM _ _ a)) i}
·₀ : G → (m : ℕ) → EM H' m → EM (G' ⨂ H') m
·₀ g =
elim+2 (λ h → g ⊗ h)
(elimGroupoid _ (λ _ → emsquash)
embase
(λ h → emloop (g ⊗ h))
λ h l → compPathR→PathP
(sym (∙assoc _ _ _
∙∙ cong₂ _∙_ (sym (emloop-comp _ _ _) ∙ cong emloop (sym (⊗DistR+⊗ g h l))) refl
∙∙ rCancel _)))
λ n f
→ trRec (isOfHLevelTrunc (4 + n))
λ { north → 0ₖ (suc (suc n))
; south → 0ₖ (suc (suc n))
; (merid a i) → EM→ΩEM+1 (suc n) (f (EM-raw→EM _ _ a)) i}
·₀-distr : (g h : G) → (m : ℕ) (x : EM H' m) → ·₀ (g +G h) m x ≡ ·₀ g m x +ₖ ·₀ h m x
·₀-distr g h =
elim+2
(⊗DistL+⊗ g h)
(elimSet _ (λ _ → emsquash _ _)
refl
(λ w → compPathR→PathP (sym ((λ i → emloop (⊗DistL+⊗ g h w i)
∙ (lUnit (sym (cong₂+₁ (emloop (g ⊗ w)) (emloop (h ⊗ w)) i)) (~ i)))
∙∙ cong₂ _∙_ (emloop-comp _ (g ⊗ w) (h ⊗ w)) refl
∙∙ rCancel _))))
λ m ind →
trElim (λ _ → isOfHLevelTruncPath)
λ { north → refl
; south → refl
; (merid a i) k → z m ind a k i}
where
z : (m : ℕ) → ((x : EM H' (suc m))
→ ·₀ (g +G h) (suc m) x
≡ ·₀ g (suc m) x +ₖ ·₀ h (suc m) x) → (a : EM-raw H' (suc m))
→ cong (·₀ (g +G h) (suc (suc m))) (cong ∣_∣ₕ (merid a)) ≡
cong₂ _+ₖ_
(cong (·₀ g (suc (suc m))) (cong ∣_∣ₕ (merid a)))
(cong (·₀ h (suc (suc m))) (cong ∣_∣ₕ (merid a)))
z m ind a = (λ i → EM→ΩEM+1 _ (ind (EM-raw→EM _ _ a) i))
∙∙ EM→ΩEM+1-hom _ (·₀ g (suc m) (EM-raw→EM H' (suc m) a))
(·₀ h (suc m) (EM-raw→EM H' (suc m) a))
∙∙ sym (cong₂+₂ m (cong (·₀ g (suc (suc m))) (cong ∣_∣ₕ (merid a)))
(cong (·₀ h (suc (suc m))) (cong ∣_∣ₕ (merid a))))
·₀0 : (m : ℕ) → (g : G) → ·₀ g m (0ₖ m) ≡ 0ₖ m
·₀0 zero = ⊗AnnihilR
·₀0 (suc zero) g = refl
·₀0 (suc (suc m)) g = refl
·₀'0 : (m : ℕ) (h : H) → ·₀' h m (0ₖ m) ≡ 0ₖ m
·₀'0 zero = ⊗AnnihilL
·₀'0 (suc zero) g = refl
·₀'0 (suc (suc m)) g = refl
0·₀ : (m : ℕ) → (x : _) → ·₀ 0G m x ≡ 0ₖ m
0·₀ =
elim+2 ⊗AnnihilL
(elimSet _ (λ _ → emsquash _ _)
refl
λ g → compPathR→PathP ((sym (emloop-1g _)
∙ cong emloop (sym (⊗AnnihilL g)))
∙∙ (λ i → rUnit (rUnit (cong (·₀ 0G 1) (emloop g)) i) i)
∙∙ sym (∙assoc _ _ _)))
λ n f → trElim (λ _ → isOfHLevelTruncPath)
λ { north → refl
; south → refl
; (merid a i) j → (cong (EM→ΩEM+1 (suc n)) (f (EM-raw→EM _ _ a))
∙ EM→ΩEM+1-0ₖ _) j i}
0·₀' : (m : ℕ) (g : _) → ·₀' 0H m g ≡ 0ₖ m
0·₀' =
elim+2
⊗AnnihilR
(elimSet _ (λ _ → emsquash _ _)
refl
λ g → compPathR→PathP (sym (∙assoc _ _ _
∙∙ sym (rUnit _) ∙ sym (rUnit _)
∙∙ (cong emloop (⊗AnnihilR g)
∙ emloop-1g _))))
λ n f → trElim (λ _ → isOfHLevelTruncPath)
λ { north → refl
; south → refl
; (merid a i) j → (cong (EM→ΩEM+1 (suc n)) (f (EM-raw→EM _ _ a))
∙ EM→ΩEM+1-0ₖ _) j i}
-- Definition of the cup product
cup∙ : ∀ n m → EM G' n → EM∙ H' m →∙ EM∙ (G' ⨂ H') (n +' m)
cup∙ =
ℕelim
(λ m g → (·₀ g m) , ·₀0 m g)
λ n f →
ℕelim
(λ g → (λ h → ·₀' h (suc n) g) , 0·₀' (suc n) g)
λ m _ → main n m f
where
main : (n m : ℕ) (ind : ((m : ℕ) → EM G' n → EM∙ H' m →∙ EM∙ (G' ⨂ H') (n +' m)))
→ EM G' (suc n) → EM∙ H' (suc m) →∙ EM∙ (G' ⨂ H') (suc (suc (n + m)))
main zero m ind =
elimGroupoid _ (λ _ → isOfHLevel↑∙ _ _)
((λ _ → 0ₖ (2 + m)) , refl)
(f m)
λ n h → finalpp m n h
where
f : (m : ℕ) → G → typ (Ω (EM∙ H' (suc m) →∙ EM∙ (G' ⨂ H') (suc (suc m)) ∙))
fst (f m g i) x = EM→ΩEM+1 _ (·₀ g _ x) i
snd (f zero g i) j = EM→ΩEM+1-0ₖ (suc zero) j i
snd (f (suc m) g i) j = EM→ΩEM+1-0ₖ (suc (suc m)) j i
f-hom-fst : (m : ℕ) (g h : G) → cong fst (f m (g +G h)) ≡ cong fst (f m g ∙ f m h)
f-hom-fst m g h =
(λ i j x → EM→ΩEM+1 _ (·₀-distr g h (suc m) x i) j)
∙∙ (λ i j x → EM→ΩEM+1-hom _ (·₀ g (suc m) x) (·₀ h (suc m) x) i j)
∙∙ sym (cong-∙ fst (f m g) (f m h))
f-hom : (m : ℕ) (g h : G) → f m (g +G h) ≡ f m g ∙ f m h
f-hom m g h = →∙Homogeneous≡Path (isHomogeneousEM _) _ _ (f-hom-fst m g h)
finalpp : (m : ℕ) (g h : G) → PathP (λ i → f m g i ≡ f m (g +G h) i) refl (f m h)
finalpp m g h =
compPathR→PathP (sym (rCancel _)
∙∙ cong (_∙ sym (f m (g +G h))) (f-hom m g h)
∙∙ sym (∙assoc _ _ _))
main (suc n) m ind =
trElim (λ _ → isOfHLevel↑∙ (2 + n) m)
λ { north → (λ _ → 0ₖ (3 + (n + m))) , refl
; south → (λ _ → 0ₖ (3 + (n + m))) , refl
; (merid a i) → Iso.inv (ΩfunExtIso _ _)
(EM→ΩEM+1∙ _ ∘∙ ind (suc m) (EM-raw→EM _ _ a)) i}
_⌣ₖ_ : {n m : ℕ} (x : EM G' n) (y : EM H' m) → EM (G' ⨂ H') (n +' m)
_⌣ₖ_ x y = cup∙ _ _ x .fst y
⌣ₖ-0ₖ : (n m : ℕ) (x : EM G' n) → (x ⌣ₖ 0ₖ m) ≡ 0ₖ (n +' m)
⌣ₖ-0ₖ n m x = cup∙ n m x .snd
0ₖ-⌣ₖ : (n m : ℕ) (x : EM H' m) → ((0ₖ n) ⌣ₖ x) ≡ 0ₖ (n +' m)
0ₖ-⌣ₖ zero m = 0·₀ _
0ₖ-⌣ₖ (suc zero) zero x = refl
0ₖ-⌣ₖ (suc (suc n)) zero x = refl
0ₖ-⌣ₖ (suc zero) (suc m) x = refl
0ₖ-⌣ₖ (suc (suc n)) (suc m) x = refl
module LeftDistributivity {G' : AbGroup ℓ} {H' : AbGroup ℓ'} where
private
distrl1 : (n m : ℕ) → EM H' m → EM H' m
→ EM∙ G' n →∙ EM∙ (G' ⨂ H') (n +' m)
fst (distrl1 n m x y) z = z ⌣ₖ (x +ₖ y)
snd (distrl1 n m x y) = 0ₖ-⌣ₖ n m _
distrl2 : (n m : ℕ) → EM H' m → EM H' m
→ EM∙ G' n →∙ EM∙ (G' ⨂ H') (n +' m)
fst (distrl2 n m x y) z = (z ⌣ₖ x) +ₖ (z ⌣ₖ y)
snd (distrl2 n m x y) =
cong₂ _+ₖ_ (0ₖ-⌣ₖ n m x) (0ₖ-⌣ₖ n m y) ∙ rUnitₖ _ (0ₖ (n +' m))
hLevLem : (n m : ℕ) → isOfHLevel (suc (suc m)) (EM∙ G' (suc n) →∙ EM∙ (G' ⨂ H') ((suc n) +' m))
hLevLem n m =
subst (isOfHLevel (suc (suc m)))
(λ i → EM∙ G' (suc n) →∙ EM∙ (G' ⨂ H')
((cong suc (+-comm m n) ∙ sym (+'≡+ (suc n) m)) i)) (isOfHLevel↑∙ m n)
mainDistrL : (n m : ℕ) (x y : EM H' (suc m))
→ distrl1 (suc n) (suc m) x y ≡ distrl2 (suc n) (suc m) x y
mainDistrL n zero =
wedgeConEM.fun H' H' 0 0
(λ _ _ → hLevLem _ _ _ _)
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt λ z → l x z))
(λ y → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt λ z → r y z ))
λ i → →∙Homogeneous≡ (isHomogeneousEM (suc (suc (n + 0))))
(funExt (λ z → l≡r z i))
where
l : (x : EM H' 1) (z : _)
→ (distrl1 (suc n) 1 embase x .fst z) ≡ (distrl2 (suc n) 1 embase x .fst z)
l x z = cong (z ⌣ₖ_) (lUnitₖ _ x)
∙∙ sym (lUnitₖ _ (z ⌣ₖ x))
∙∙ λ i → (⌣ₖ-0ₖ (suc n) (suc zero) z (~ i)) +ₖ (z ⌣ₖ x)
r : (z : EM H' 1) (x : EM G' (suc n))
→ (distrl1 (suc n) 1 z embase .fst x) ≡ (distrl2 (suc n) 1 z embase .fst x)
r y z = cong (z ⌣ₖ_) (rUnitₖ _ y)
∙∙ sym (rUnitₖ _ (z ⌣ₖ y))
∙∙ λ i → (z ⌣ₖ y) +ₖ (⌣ₖ-0ₖ (suc n) (suc zero) z (~ i))
l≡r : (z : EM G' (suc n)) → l embase z ≡ r embase z
l≡r z = sym (pathTypeMake _ _ (sym (⌣ₖ-0ₖ (suc n) (suc zero) z)))
mainDistrL n (suc m) =
elim2 (λ _ _ → isOfHLevelPath (4 + m) (hLevLem _ _) _ _)
(wedgeConEM.fun H' H' (suc m) (suc m)
(λ x y p q → isOfHLevelPlus {n = suc (suc m)} (suc m)
(hLevLem n (suc (suc m))
(distrl1 (suc n) (suc (suc m)) ∣ x ∣ ∣ y ∣)
(distrl2 (suc n) (suc (suc m)) ∣ x ∣ ∣ y ∣) p q))
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (l x)))
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (r x)))
λ i → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (λ z → l≡r z i)))
where
l : (x : EM-raw H' (suc (suc m))) (z : EM G' (suc n))
→ (distrl1 (suc n) (suc (suc m)) (0ₖ _) ∣ x ∣ₕ .fst z)
≡ (distrl2 (suc n) (suc (suc m)) (0ₖ _) ∣ x ∣ₕ .fst z)
l x z = cong (z ⌣ₖ_) (lUnitₖ (suc (suc m)) ∣ x ∣)
∙∙ sym (lUnitₖ _ (z ⌣ₖ ∣ x ∣))
∙∙ λ i → (⌣ₖ-0ₖ (suc n) (suc (suc m)) z (~ i)) +ₖ (z ⌣ₖ ∣ x ∣)
r : (x : EM-raw H' (suc (suc m))) (z : EM G' (suc n))
→ (distrl1 (suc n) (suc (suc m)) ∣ x ∣ₕ (0ₖ _) .fst z)
≡ (distrl2 (suc n) (suc (suc m)) ∣ x ∣ₕ (0ₖ _) .fst z)
r x z = cong (z ⌣ₖ_) (rUnitₖ (suc (suc m)) ∣ x ∣)
∙∙ sym (rUnitₖ _ (z ⌣ₖ ∣ x ∣))
∙∙ λ i → (z ⌣ₖ ∣ x ∣) +ₖ (⌣ₖ-0ₖ (suc n) (suc (suc m)) z (~ i))
l≡r : (z : EM G' (suc n)) → l north z ≡ r north z
l≡r z = sym (pathTypeMake _ _ (sym (⌣ₖ-0ₖ (suc n) (suc (suc m)) z)))
module RightDistributivity {G' : AbGroup ℓ} {H' : AbGroup ℓ'} where
private
G = fst G'
H = fst H'
strG = snd G'
strH = snd H'
0G = 0g strG
0H = 0g strH
_+G_ = _+Gr_ strG
_+H_ = _+Gr_ strH
-H_ = -Gr_ strH
-G_ = -Gr_ strG
distrr1 : (n m : ℕ) → EM G' n → EM G' n → EM∙ H' m →∙ EM∙ (G' ⨂ H') (n +' m)
fst (distrr1 n m x y) z = (x +ₖ y) ⌣ₖ z
snd (distrr1 n m x y) = ⌣ₖ-0ₖ n m _
distrr2 : (n m : ℕ) → EM G' n → EM G' n → EM∙ H' m →∙ EM∙ (G' ⨂ H') (n +' m)
fst (distrr2 n m x y) z = (x ⌣ₖ z) +ₖ (y ⌣ₖ z)
snd (distrr2 n m x y) = cong₂ _+ₖ_ (⌣ₖ-0ₖ n m x) (⌣ₖ-0ₖ n m y) ∙ rUnitₖ _ (0ₖ (n +' m))
mainDistrR : (n m : ℕ) (x y : EM G' (suc n))
→ distrr1 (suc n) (suc m) x y ≡ distrr2 (suc n) (suc m) x y
mainDistrR zero m =
wedgeConEM.fun G' G' 0 0
(λ _ _ → isOfHLevel↑∙ 1 m _ _)
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (l x)))
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (r x)))
λ i → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt λ z → l≡r z i)
where
l : (x : _) (z : _) → _ ≡ _
l x z =
(λ i → (lUnitₖ 1 x i) ⌣ₖ z)
∙∙ sym (lUnitₖ _ (x ⌣ₖ z))
∙∙ λ i → 0ₖ-⌣ₖ _ _ z (~ i) +ₖ (x ⌣ₖ z)
r : (x : _) (z : _) → _ ≡ _
r x z =
((λ i → (rUnitₖ 1 x i) ⌣ₖ z))
∙∙ sym (rUnitₖ _ _)
∙∙ λ i → (_⌣ₖ_ {n = 1} {m = suc m} x z) +ₖ 0ₖ-⌣ₖ (suc zero) (suc m) z (~ i)
l≡r : (z : _) → l embase z ≡ r embase z
l≡r z = pathTypeMake _ _ _
mainDistrR (suc n) m =
elim2 (λ _ _ → isOfHLevelPath (4 + n)
(isOfHLevel↑∙ (2 + n) m) _ _)
(wedgeConEM.fun _ _ _ _
(λ x y → isOfHLevelPath ((2 + n) + (2 + n))
(transport (λ i → isOfHLevel (((λ i → (+-comm n 2 (~ i) + (2 + n)))
∙ sym (+-assoc n 2 (2 + n))) (~ i))
(EM∙ H' (suc m) →∙ EM∙ ((fst (AbGroupPath (G' ⨂ H') (H' ⨂ G'))) ⨂-comm (~ i))
((+'-comm (suc m) (suc (suc n))) i)))
(isOfHLevelPlus n
(LeftDistributivity.hLevLem m (suc (suc n))))) _ _)
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (l x)))
(λ x → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt (r x)))
λ i → →∙Homogeneous≡ (isHomogeneousEM _)
(funExt λ z → r≡l z i))
where
l : (x : _) (z : _) → _ ≡ _
l x z = (λ i → (lUnitₖ _ ∣ x ∣ i) ⌣ₖ z)
∙∙ sym (lUnitₖ _ (∣ x ∣ ⌣ₖ z))
∙∙ λ i → 0ₖ-⌣ₖ _ _ z (~ i) +ₖ (∣ x ∣ ⌣ₖ z)
r : (x : _) (z : _) → _ ≡ _
r x z = (λ i → (rUnitₖ _ ∣ x ∣ i) ⌣ₖ z)
∙∙ sym (rUnitₖ _ (∣ x ∣ ⌣ₖ z))
∙∙ λ i → (∣ x ∣ ⌣ₖ z) +ₖ 0ₖ-⌣ₖ _ _ z (~ i)
r≡l : (z : _) → l north z ≡ r north z
r≡l z = pathTypeMake _ _ _
-- TODO: Summarise distributivity proofs
-- TODO: Associativity and graded commutativity, following Cubical.ZCohomology.RingStructure
-- The following lemmas will be needed to make the types match up.
|
Formal statement is: lemma contour_integrable_rmul: "f contour_integrable_on g \<Longrightarrow> (\<lambda>x. f x * c) contour_integrable_on g" Informal statement is: If $f$ is contour-integrable on $g$, then $f \cdot c$ is contour-integrable on $g$. |
{-# OPTIONS --safe --experimental-lossy-unification #-}
module Cubical.Algebra.CommRing.Instances.Polynomials.UnivariatePolyFun where
open import Cubical.Foundations.Prelude
open import Cubical.Data.Empty as ⊥
open import Cubical.Data.Nat hiding (_·_) renaming (_+_ to _+n_)
open import Cubical.Data.Nat.Order
open import Cubical.Data.Sigma
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.Ring
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.GradedRing.DirectSumFun
private variable
ℓ : Level
module _
(ACommRing@(A , Astr) : CommRing ℓ)
where
open CommRingStr Astr
open RingTheory (CommRing→Ring ACommRing)
UnivariatePolyFun-CommRing : CommRing ℓ
UnivariatePolyFun-CommRing = ⊕FunGradedRing-CommRing
_+n_ (makeIsMonoid isSetℕ +-assoc +-zero λ _ → refl) (λ _ _ → refl)
(λ _ → A)
(λ _ → snd (Ring→AbGroup (CommRing→Ring ACommRing)))
1r _·_ 0LeftAnnihilates 0RightAnnihilates
(λ a b c → ΣPathP ((+-assoc _ _ _) , (·Assoc _ _ _)))
(λ a → ΣPathP ((+-zero _) , (·IdR _)))
(λ a → ΣPathP (refl , (·IdL _)))
·DistR+
·DistL+
λ x y → ΣPathP ((+-comm _ _) , (·Comm _ _))
nUnivariatePolyFun : (A' : CommRing ℓ) → (n : ℕ) → CommRing ℓ
nUnivariatePolyFun A' zero = A'
nUnivariatePolyFun A' (suc n) = UnivariatePolyFun-CommRing (nUnivariatePolyFun A' n)
|
include("../src/Dataset.jl")
import .Dataset:
get_MINST,
TrainTest,
split_X_by_batches
include("../src/Model.jl")
import .Model:
split_encoder_result,
create_vae,
model_sample,
conv_MINST_model
include("../src/Utils.jl")
import .Utils:
gen_images,
save_model,
load_model
using Test
using Flux
using Flux.Tracker: TrackedReal
using Printf
using Distributions: Uniform
temp_dir = tempdir()
@testset "Model save/load" begin
n_sample = Int(floor(rand(Uniform(10,100))))
n_latent = Int(floor(rand(Uniform(1,50)))) * 2
ps, loss_fn, f, g = create_vae(n_sample, n_latent)
file = tempname()
save_model(f, g, file)
fp, gp = load_model
@test true
end
@testset "Image Utilities" begin
n_sample = Int(floor(rand(Uniform(10,100))))
n_latent = Int(floor(rand(Uniform(1,50)))) * 2
dataset = get_MINST(n_sample)
ps, loss_fn, f, g = create_vae(n_latent, n_sample)
# zero initialized μ̂, logσ̂
outfile_zero_latent = joinpath(temp_dir, "zero_latent.png")
@test typeof(model_sample(f)) == BitArray{4}
gen_images(outfile_zero_latent, f)
@test isfile(outfile_zero_latent)
@sprintf("saved file to %s", outfile_zero_latent)
# f(g(x)) test
outfile_X_latent = joinpath(temp_dir, "X_latent.png")
gen_images(outfile_X_latent, g, f, dataset.test_x)
@test isfile(outfile_X_latent)
@sprintf("saved file to %s", outfile_X_latent)
end
@testset "ADAM optimization can run" begin
n_sample = Int(floor(rand(Uniform(10,100))))
n_latent = Int(floor(rand(Uniform(1,50)))) * 2
dataset = get_MINST(n_sample)
X = dataset.train_x
ps, loss_fn, f, g = create_vae(n_latent, n_sample)
opt = ADAM()
@test typeof(loss_fn(X)) == TrackedReal{Float32}
X = float.(X .> 0.5)
Flux.train!(loss_fn, ps, zip([X]), opt)
@test true == true
end
@testset "Convolution and transpose is isomorphic" begin
dataset = get_MINST()
n_sample = 100
n_latent = 10
X = dataset.train_x[:,:,:,1:n_sample]
f, g = conv_MINST_model(n_latent)
X_transformed = g(reshape(X, 28, 28, 1, n_sample))
x_mean, x_std = split_encoder_result(X_transformed, n_latent)
Xp = f(x_mean)
@test size(Xp) == size(X)
end
@testset "MINST dataset: size/shape okay" begin
ds = get_MINST()
img_shape = (28, 28, 1)
n_train = 60000
n_test = 10000
@test size(ds.train_x) == (img_shape..., n_train)
@test size(ds.train_y) == (n_train, )
@test size(ds.test_x) == (img_shape..., n_test)
@test size(ds.test_y) == (n_test, )
end
|
State Before: α : Type u_1
inst✝¹ : DistribLattice α
inst✝ : OrderTop α
a b c : α
⊢ Codisjoint a (b ⊓ c) ↔ Codisjoint a b ∧ Codisjoint a c State After: no goals Tactic: simp only [codisjoint_iff, sup_inf_left, inf_eq_top_iff] |
theory SRUseEnv
imports ReduceWTS DerefVars
begin
(*
fun safe_act where
"safe_act s NoAct = True"
| "safe_act s (MakeAct x) = (s x = None)"
| "safe_act s (UseAct x) = True"
lemma safe_act_well_typed_app: "\<lbrakk> well_typed env r_s1 e1 tau r_s2 rx; app_red_exp (s1, e1) ax (s2, e2) \<rbrakk> \<Longrightarrow> safe_act s1 ax"
apply (case_tac e1)
apply (auto)
apply (case_tac ax)
apply (auto)
apply (case_tac ax)
apply (auto)
(* if case *)
apply (case_tac x41)
apply (auto)
apply (case_tac x1)
apply (auto)
apply (case_tac x3)
apply (auto)
(* case analysis on constants for make action *)
apply (case_tac x61)
apply (auto)
apply (case_tac x1)
apply (auto)
apply (case_tac x62)
apply (auto)
apply (simp add: fresh_var_def)
(* case analysis for op *)
apply (case_tac x62)
apply (auto)
(* case analysis for pair creation *)
apply (case_tac x61a)
apply (auto)
apply (case_tac x1)
apply (auto)
apply (simp add: fresh_var_def)
done
*)
(* ###### constructive permission definitions.
the idea here is that i want to constructively state which permissions are being consumed.
the difficult part is that because the "permissions consumed" involves
*)
(*
definition intro_use_env where
"intro_use_env r_s trs = (\<lambda> x. if x \<in> trs then UsePerm else r_s x)"
definition elim_use_env where
"elim_use_env r_s cs = (\<lambda> x. if x \<in> cs then NoPerm else r_s x)"*)
(*
fun red_env where
"red_env env e tau NoAct = env"
| "red_env env e tau (MakeAct x) = add_env env x tau"
| "red_env env e tau (UseAct x) = (if x \<notin> free_vars e then rem_env env x else env)"
*)
datatype gen_act =
NoResAct
| AddResAct string p_type perm_use_env
| Add2ResAct string string p_type
| ReadResAct
| WriteResAct string perm_use_env
fun red_env where
"red_env env NoResAct = env"
| "red_env env (AddResAct x tau r_s) = add_env env x tau"
| "red_env env (Add2ResAct x1 x2 tau) = add_env (add_env env x1 (ChanTy tau SEnd)) x2 (ChanTy tau REnd)"
| "red_env env ReadResAct = env"
| "red_env env (WriteResAct x r_s) = env"
(*
fun full_red_use_env where
"full_red_use_env r_s NoResAct = r_s"
(* remove resources used to create the value, add the new perm *)
| "full_red_use_env r_s (AddResAct x tau r_s') = add_use_env r_s x OwnPerm"
| "full_red_use_env r_s ReadResAct = r_s"*)
fun exp_red_use_env where
"exp_red_use_env r_s NoResAct = r_s"
(* remove resources used to create the value, add the new perm *)
| "exp_red_use_env r_s (AddResAct x tau r_s') = add_use_env r_s x OwnPerm"
| "exp_red_use_env r_s (Add2ResAct x1 x2 tau) = add_use_env (add_use_env r_s x1 OwnPerm) x2 OwnPerm"
| "exp_red_use_env r_s ReadResAct = r_s"
| "exp_red_use_env r_s (WriteResAct x r_s') = (diff_use_env r_s r_s')"
fun end_red_use_env where
"end_red_use_env r_s (WriteResAct x r_s') = (diff_use_env r_s r_s')"
| "end_red_use_env r_s r_ax = r_s"
(* if this is the resource's last use, remove it. otherwise add the resources given *)
(*| "red_use_env r_s (UseAct x) = r_s"*) (*
(*let r_s' = elim_use_env r_s trs in*)
(if x \<notin> free_vars e then rem_use_env r_s x else r_s))"*)
(*
fun red_res_map where
"red_res_map rs_map NoResAct = rs_map"
| "red_res_map rs_map (AddResAct x tau r_s) = add_mem rs_map x r_s"
| "red_res_map rs_map ReadResAct = rs_map"
*)
fun red_nres_map where
"red_nres_map rs_map NoResAct = rs_map"
| "red_nres_map rs_map (AddResAct x tau r_s) = add_env rs_map x r_s"
| "red_nres_map rs_map (Add2ResAct x1 x2 tau) = add_env (add_env rs_map x1 empty_use_env) x2 empty_use_env"
| "red_nres_map rs_map ReadResAct = rs_map"
| "red_nres_map rs_map (WriteResAct x r_s) = add_env rs_map x (comp_use_env (nres_lookup rs_map x) r_s)"
fun safe_act where
"safe_act s r_s NoResAct = True"
| "safe_act s r_s (AddResAct x tau r_x) = (s x = None \<and> leq_use_env r_x r_s)"
| "safe_act s r_s (Add2ResAct x1 x2 tau) = (s x1 = None \<and> s x2 = None \<and> x1 \<noteq> x2)"
| "safe_act s r_s ReadResAct = True"
| "safe_act s r_s (WriteResAct x r_x) = (s x \<noteq> None \<and> leq_use_env r_x r_s)"
fun corr_act where
"corr_act ax NoResAct = (ax = NoAct)"
| "corr_act ax (AddResAct x tau r_s) = (ax = MakeAct x)"
| "corr_act ax (Add2ResAct x1 x2 tau) = (ax = Mk2Act x1 x2)"
| "corr_act ax ReadResAct = (\<exists> x. ax = UseAct x)"
| "corr_act ax (WriteResAct x r_s) = (\<exists> x. ax = UseAct x)"
lemma leq_safe_act: "\<lbrakk> safe_act s r_x g_ax; leq_use_env r_x r_s \<rbrakk> \<Longrightarrow> safe_act s r_s g_ax"
apply (case_tac g_ax)
apply (auto)
apply (rule_tac r_sb="r_x" in trans_leq_use_env)
apply (auto)
apply (rule_tac r_sb="r_x" in trans_leq_use_env)
apply (auto)
done
(*
definition valid_reduct where
"valid_reduct r_exp = (\<forall> are s1 rs_map env r_c r_s1 e1 tau r_s2 rx ax s2 e2. (
r_exp are (s1, e1) ax (s2, e2) \<and> well_typed env r_s1 e1 tau r_s2 rx \<and>
well_typed_state s1 env rs_map \<and> valid_use_env s1 rs_map r_c r_s1) \<longrightarrow>
(\<exists> g_ax. well_typed (red_env env g_ax) (exp_red_use_env r_s1 g_ax) e2 tau r_s2 rx \<and>
well_typed_state s2 (red_env env g_ax) (red_res_map rs_map g_ax) \<and>
valid_use_env s2 (red_res_map rs_map g_ax) (full_red_use_env r_c g_ax) (exp_red_use_env r_s1 g_ax) \<and> safe_act s1 g_ax \<and> corr_act ax g_ax)
)" *)
definition valid_reduct where
"valid_reduct r_exp = (\<forall> are s1 rs_map env r_f r_s1 e1 tau r_s2 rx ax s2 e2. (
r_exp are (s1, e1) ax (s2, e2) \<and> well_typed env r_s1 e1 tau r_s2 rx \<and> proper_exp rs_map e1 \<and>
well_typed_state s1 env rs_map \<and> valid_exp_use_env s1 rs_map r_f \<and> leq_use_env r_s1 r_f) \<longrightarrow>
(\<exists> g_ax. well_typed (red_env env g_ax) (exp_red_use_env r_s1 g_ax) e2 tau (end_red_use_env r_s2 g_ax) (end_red_use_env rx g_ax) \<and>
proper_exp (red_nres_map rs_map g_ax) e2 \<and> well_typed_state s2 (red_env env g_ax) (red_nres_map rs_map g_ax) \<and>
valid_exp_use_env s2 (red_nres_map rs_map g_ax) (exp_red_use_env r_f g_ax) \<and> safe_act s1 (infl_use_env r_f r_s2) g_ax \<and> corr_act ax g_ax)
)"
(*
lemma wtddp2_np_var: "\<lbrakk> well_typed env r_s1 e tau r_s2 rx; x \<in> non_prim_vars env e; rx x = NoPerm \<rbrakk> \<Longrightarrow> r_s2 x \<noteq> OwnPerm"
apply (induct e arbitrary: env r_s1 tau r_s2 rx)
apply (auto)
(* const + op cases *)
apply (simp add: non_prim_vars_def)
apply (simp add: non_prim_vars_def)
(* var case *)
apply (cut_tac r_s="diff_use_env r_s1 (comp_use_env (ereq_use_env x1a tau) r_ex)" and r_x="r_s2" and x="x" in leq_use_no_own)
apply (cut_tac r_s="r_s1" and r_x="ereq_use_env x1a tau" and r_ex="comp_use_env (ereq_use_env x1a tau) r_ex" and x="x" in diff_use_none)
apply (simp add: ereq_use_env_def)
apply (simp add: one_use_env_def)
apply (simp add: non_prim_vars_def)
apply (simp add: non_prim_entry_def)
apply (simp add: end_req_perm_def)
apply (case_tac "req_type tau")
apply (auto)
apply (rule_tac r_s="rx" in leq_use_none)
apply (simp_all)
(* if case *)
apply (case_tac "x \<notin> free_vars (IfExp e1 e2 e3)")
apply (simp add: non_prim_vars_def)
apply (auto)
apply (case_tac "x \<notin> non_prim_vars env e1")
apply (simp add: non_prim_vars_def)
apply (rule_tac r_s="r_s2a" in leq_use_none)
apply (rule_tac well_typed_perm_leq)
apply (auto)
*)
(*
**** right now the issue is that vars from rx1 might still appear within e, even if they're owned - for instance, if a
var is used in a location that doesn't contribute to the reqs of the final result ()
(* - the idea is that if rx1 has an ownership, it means rx2 is None. if rx2 is None, but x is a non-prim var, it must have been
subtracted out, meaning it cannot be in r_s2, which is a contradiction. *)
lemma well_typed_disj_diff_perms2: "\<lbrakk> well_typed env r_s1 e tau r_s2 rx2; disj_use_env rx1 rx2; leq_use_env rx1 r_s2 \<rbrakk> \<Longrightarrow>
well_typed env (diff_use_env r_s1 rx1) e tau (diff_use_env r_s2 rx1) (diff_use_env rx2 rx1)"
apply (rule_tac well_typed_diff_perms)
apply (auto)
*)
lemma red_contain_env: "\<lbrakk> safe_act s r_s g_ax; sub_env s env \<rbrakk> \<Longrightarrow> contain_env (red_env env g_ax) env"
apply (case_tac g_ax)
apply (auto)
apply (rule_tac id_contain_env)
apply (rule_tac add_contain_env)
apply (simp add: sub_env_def)
apply (rule_tac env_b="add_env env x31 (ChanTy x33 SEnd)" in trans_contain_env)
apply (rule_tac add_contain_env)
apply (simp add: add_env_def)
apply (simp add: sub_env_def)
apply (rule_tac add_contain_env)
apply (simp add: sub_env_def)
apply (rule_tac id_contain_env)
apply (rule_tac id_contain_env)
done
(*
lemma exp_red_leq_use_env: "\<lbrakk> leq_use_env r_x r_s \<rbrakk> \<Longrightarrow> leq_use_env r_x (exp_red_use_env r_s g_ax)"
apply (case_tac g_ax)
apply (auto)
apply (rule_tac rhs_add_leq_use_env)
apply (simp)
apply (case_tac "r_x x21")
apply (auto)
done *)
(*
fun end_red_use_env where
"end_red_use_env r_s e (UseAct x) = (if x \<notin> free_vars e then rem_use_env r_s x else r_s)"
| "end_red_use_env r_s e ax = r_s" *)
(*
fun red_res_map where
"red_res_map rs_map e NoAct = rs_map"
| "red_res_map rs_map e (MakeAct x cs) = (add_mem rs_map x (intro_use_env empty_use_env cs))"
| "red_res_map rs_map e (UseAct x trs) = (add_mem rs_map x (intro_use_env (lookup_res rs_map x) trs))"
*)
(*
lemma ignore_elim_use_env: "elim_use_env r_s {} = r_s"
apply (simp add: elim_use_env_def)
done
lemma elim_sub_use_env: "\<lbrakk> sub_use_env s r_s \<rbrakk> \<Longrightarrow> sub_use_env s (elim_use_env r_s cs)"
apply (simp add: sub_use_env_def)
apply (simp add: elim_use_env_def)
done
lemma add_elim_leq_use_env: "leq_use_env (elim_use_env (add_use_env r_s x r) cs) (add_use_env (elim_use_env r_s cs) x r)"
apply (simp add: leq_use_env_def)
apply (simp add: elim_use_env_def)
apply (simp add: add_use_env_def)
apply (auto)
apply (case_tac r)
apply (auto)
apply (case_tac "r_s xa")
apply (auto)
done
lemma diff_elim_leq_use_env: "leq_use_env (diff_use_env (elim_use_env r_s cs) r_x) (elim_use_env (diff_use_env r_s r_x) cs)"
apply (simp add: leq_use_env_def)
apply (simp add: elim_use_env_def)
apply (simp add: diff_use_env_def)
apply (simp add: minus_use_env_def)
apply (simp add: neg_use_env_def)
apply (auto)
apply (case_tac "r_s x")
apply (auto)
apply (case_tac "r_x x")
apply (auto)
apply (case_tac "r_x x")
apply (auto)
done
lemma self_elim_leq_use_env: "leq_use_env (elim_use_env r_s cs) r_s"
apply (simp add: leq_use_env_def)
apply (simp add: elim_use_env_def)
apply (auto)
apply (case_tac "r_s x")
apply (auto)
done
lemma elim_leq_use_env: "\<lbrakk> leq_use_env r_x r_s \<rbrakk> \<Longrightarrow> leq_use_env (elim_use_env r_x cs) r_s"
apply (rule_tac r_sb="r_x" in trans_leq_use_env)
apply (simp)
apply (rule_tac self_elim_leq_use_env)
done
lemma dist_elim_leq_use_env: "\<lbrakk> leq_use_env r_x r_s \<rbrakk> \<Longrightarrow> leq_use_env (elim_use_env r_x cs) (elim_use_env r_s cs)"
apply (simp add: leq_use_env_def)
apply (simp add: elim_use_env_def)
done
(*
lemma diff_elim_use_env: "elim_use_env r_s cs = diff_use_env r_s (intro_use_env empty_use_env cs)"
apply (case_tac "\<forall> x. elim_use_env r_s cs x = diff_use_env r_s (intro_use_env empty_use_env cs) x")
apply (auto)
apply (simp add: elim_use_env_def)
apply (simp add: intro_use_env_def)
apply (simp add: diff_use_env_def)
apply (simp add: minus_use_env_def)
apply (simp add: neg_use_env_def)
apply (case_tac "x \<in> cs")
apply (auto)
apply (case_tac "r_s x")
apply (auto)
apply (simp add: empty_use_env_def)
apply (case_tac "r_s x")
apply (auto)
done
lemma rhs_unroll_elim_use_env: "\<lbrakk> leq_use_env r_x (elim_use_env r_s cs) \<rbrakk> \<Longrightarrow> leq_use_env r_x (diff_use_env r_s (intro_use_env empty_use_env cs))"
apply (cut_tac r_s="r_s" and cs="cs" in diff_elim_use_env)
apply (auto)
done
lemma lhs_unroll_elim_use_env: "\<lbrakk> leq_use_env (elim_use_env r_x cs) r_s \<rbrakk> \<Longrightarrow> leq_use_env (diff_use_env r_x (intro_use_env empty_use_env cs)) r_s"
apply (cut_tac r_s="r_x" and cs="cs" in diff_elim_use_env)
apply (auto)
done
lemma rhs_fold_elim_use_env: "\<lbrakk> leq_use_env r_x (diff_use_env (diff_use_env r_s (intro_use_env empty_use_env cs)) r_ex) \<rbrakk> \<Longrightarrow>
leq_use_env r_x (diff_use_env (elim_use_env r_s cs) r_ex)"
apply (cut_tac r_s="r_s" and cs="cs" in diff_elim_use_env)
apply (auto)
done
lemma lhs_fold_elim_use_env: "\<lbrakk> leq_use_env (diff_use_env (diff_use_env r_x (intro_use_env empty_use_env cs)) r_ex) r_s \<rbrakk> \<Longrightarrow>
leq_use_env (diff_use_env (elim_use_env r_x cs) r_ex) r_s"
apply (cut_tac r_s="r_x" and cs="cs" in diff_elim_use_env)
apply (auto)
done *)
lemma mini_disj_intro_union_env: "\<lbrakk> mini_disj_use_env (intro_use_env r_x s1) r_s; mini_disj_use_env (intro_use_env empty_use_env s2) r_s \<rbrakk> \<Longrightarrow>
mini_disj_use_env (intro_use_env r_x (s1 \<union> s2)) r_s"
apply (simp add: mini_disj_use_env_def)
apply (simp add: intro_use_env_def)
done
*)
(* ##### safe-reduction specific validity lemmas ##### *)
lemma red_sep_nres_map: "\<lbrakk> p_map u = Some r_s; disj_nres_map p_map; sub_nres_map s1 p_map;
safe_act s1 r_s g_ax; sub_use_env s1 r_s \<rbrakk> \<Longrightarrow> sep_nres_map (exp_red_use_env r_s g_ax) (rem_env p_map u)"
apply (simp add: sep_nres_map_def)
apply (auto)
(* we dont have to check x = u, since u has been removed from the map *)
apply (case_tac "u = x")
apply (auto)
apply (simp add: nres_lookup_def)
apply (simp add: rem_env_def)
apply (rule_tac empty_strong_disj_use_env2)
(* otherwise, the lookup is the same as it was in p_map *)
apply (cut_tac rs_map="p_map" and x="u" and y="x" in nres_rem_diff)
apply (auto)
(* from here we do case analysis on the possible ways that r_s has been modified *)
(* if it has not been modified the case is simple *)
apply (case_tac "exp_red_use_env r_s g_ax = r_s")
apply (simp add: disj_nres_map_def)
apply (erule_tac x="u" in allE)
apply (erule_tac x="x" in allE)
apply (auto)
apply (simp add: nres_lookup_def)
(* make case: if x21 has been added, the rest of r_s is disjoint from p_map x *)
apply (case_tac g_ax)
apply (auto)
apply (rule_tac add_strong_disj_use_env)
apply (simp add: disj_nres_map_def)
apply (erule_tac x="u" in allE)
apply (erule_tac x="x" in allE)
apply (auto)
apply (simp add: nres_lookup_def)
(* now we have to prove that x21 was not in p_map, which should be true since p_map is sub-ordinate to s *)
apply (case_tac "p_map x")
apply (simp add: nres_lookup_def)
apply (simp add: empty_use_env_def)
apply (simp add: sub_nres_map_def)
apply (erule_tac x="x" in allE)
apply (simp add: sub_use_env_def)
(* make 2 case: start by assuming we have p_map x *)
apply (case_tac "p_map x")
apply (simp add: nres_lookup_def)
apply (rule_tac empty_strong_disj_use_env2)
(* otherwise, prove r_s disjoint from p_map x *)
apply (rule_tac add_strong_disj_use_env)
apply (rule_tac add_strong_disj_use_env)
apply (simp add: disj_nres_map_def)
apply (erule_tac x="u" in allE)
apply (erule_tac x="x" in allE)
apply (auto)
apply (simp add: nres_lookup_def)
(* after this, prove x31 / x32 do not appear in p_map x *)
apply (simp add: sub_nres_map_def)
apply (erule_tac x="x" in allE)
apply (simp add: sub_use_env_def)
apply (simp add: sub_nres_map_def)
apply (erule_tac x="x" in allE)
apply (simp add: sub_use_env_def)
(* write case: otherwise, x42 was removed from r_s, so disjointness should be simple *)
apply (rule_tac r_s="r_s" in strong_disj_leq_use_env1)
apply (simp add: disj_nres_map_def)
apply (erule_tac x="u" in allE)
apply (erule_tac x="x" in allE)
apply (auto)
apply (simp add: nres_lookup_def)
apply (rule_tac self_diff_leq_use_env)
done
lemma red_sep_nres_map2: "\<lbrakk> p_map v = Some r_p; p_map u = Some r_s; u \<noteq> v; disj_nres_map p_map;
safe_act s1 r_s g_ax; sep_nres_map r_p rs_map \<rbrakk> \<Longrightarrow> sep_nres_map r_p (red_nres_map rs_map g_ax)"
apply (case_tac g_ax)
apply (auto)
(* make case *)
apply (rule_tac add_sep_nres_map)
apply (simp)
apply (rule_tac r_s="r_s" in strong_disj_leq_use_env2)
apply (simp add: disj_nres_map_def)
apply (auto)
apply (erule_tac x="v" in allE)
apply (erule_tac x="u" in allE)
apply (simp add: nres_lookup_def)
(* make 2 case *)
apply (rule_tac add_sep_nres_map)
apply (rule_tac add_sep_nres_map)
apply (simp)
apply (rule_tac empty_strong_disj_use_env2)
apply (rule_tac empty_strong_disj_use_env2)
(* write case *)
apply (rule_tac add_sep_nres_map)
apply (simp)
apply (rule_tac strong_disj_comp_use_env1)
apply (simp add: sep_nres_map_def)
apply (rule_tac r_s="r_s" in strong_disj_leq_use_env2)
apply (simp add: disj_nres_map_def)
apply (erule_tac x="v" in allE)
apply (erule_tac x="u" in allE)
apply (simp add: nres_lookup_def)
apply (simp)
done
end |
# AUthor: markusj1201
import pandas as pd
import numpy as np
import datetime as dt
import math
import warnings
warnings.filterwarnings("ignore")
prices = pd.read_csv("adjclose.csv", index_col="Date", parse_dates=True)
volumechanges = pd.read_csv("volume.csv", index_col="Date", parse_dates=True).pct_change()*100
today = dt.date(2000, 1, 15)
simend = dt.date(2019, 12, 31)
tickers = []
transactionid = 0
money = 1000000
portfolio = {}
activelog = []
transactionlog = []
def getprice(date, ticker):
global prices
return prices.loc[date][ticker]
def transaction(id, ticker, amount, price, type, info):
global transactionid
if type == "buy":
exp_date = today + dt.timedelta(days=14)
transactionid += 1
else:
exp_date = today
if type == "sell":
data = {"id": id, "ticker": ticker, "amount": amount, "price": price, "date": today, "type": type,
"exp_date": exp_date, "info": info}
elif type == "buy":
data = {"id": transactionid, "ticker": ticker, "amount": amount, "price": price, "date": today, "type": type,
"exp_date": exp_date, "info": info}
activelog.append(data)
transactionlog.append(data)
def buy(interestlst, allocated_money):
global money, portfolio
for item in interestlst:
price = getprice(today, item)
if not np.isnan(price):
quantity = math.floor(allocated_money/price)
money -= quantity*price
portfolio[item] += quantity
transaction(0, item, quantity, price, "buy", "")
def sell():
global money, portfolio, prices, today
itemstoremove = []
for i in range(len(activelog)):
log = activelog[i]
if log["exp_date"] <= today and log["type"] == "buy":
tickprice = getprice(today, log["ticker"])
if not np.isnan(tickprice):
money += log["amount"]*tickprice
portfolio[log["ticker"]] -= log["amount"]
transaction(log["id"], log["ticker"], log["amount"], tickprice, "sell", log["info"])
itemstoremove.append(i)
else:
log["exp_date"] += dt.timedelta(days=1)
itemstoremove.reverse()
for elem in itemstoremove:
activelog.remove(activelog[elem])
def simulation():
global today, volumechanges, money
start_date = today - dt.timedelta(days=14)
series = volumechanges.loc[start_date:today].mean()
interestlst = series[series > 100].index.tolist()
sell()
if len(interestlst) > 0:
#moneyToAllocate = 500000/len(interestlst)
moneyToAllocate = currentvalue()/(2*len(interestlst))
buy(interestlst, moneyToAllocate)
def getindices():
global tickers
f = open("symbols.txt", "r")
for line in f:
tickers.append(line.strip())
f.close()
def tradingday():
global prices, today
return np.datetime64(today) in list(prices.index.values)
def currentvalue():
global money, portfolio, today, prices
value = money
for ticker in tickers:
tickprice = getprice(today, ticker)
if not np.isnan(tickprice):
value += portfolio[ticker]*tickprice
return int(value*100)/100
def main():
global today
getindices()
for ticker in tickers:
portfolio[ticker] = 0
while today < simend:
while not tradingday():
today += dt.timedelta(days=1)
simulation()
currentpvalue = currentvalue()
print(currentpvalue, today)
today += dt.timedelta(days=7)
main() |
%
% CMPT 213: Object Oriented Design in Java - A Course Overview
% Section: Defensive Programming
%
% Author: Jeffrey Leung
%
\section{Defensive Programming}
\label{sec:defensive-programming}
\begin{easylist}
& \textbf{Precondition:} Guarantee before a method is called
&& Enforced by client, or else the class may have unintended behaviour
& \textbf{Postcondition:} Guarantee after a method is called
&& Enforced by caller
& Implementation has a contract for the client to fulfill
&& \textbf{Design by Contract:} Client is expected to enforce contract
&&& Less error checking
&& \textbf{Defensive Programming:} Paradigm where implementation checks for contractual violations and is responsible for maintaining correct state
&&& Quicker error catching
& Use assert to maintain consistent internal state
\end{easylist}
\clearpage
|
[STATEMENT]
lemma SETcod: "isZFfun (ZFfun A B f) \<Longrightarrow> cod\<^bsub>SET\<^esub> ZFfun A B f = B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. isZFfun (ZFfun A B f) \<Longrightarrow> cod\<^bsub>SET\<^esub> ZFfun A B f = B
[PROOF STEP]
by(simp add: SET_def MakeCat_def SET'_def ZFfunCod) |
{-# OPTIONS --cubical --safe --no-import-sorts #-}
module Cubical.Algebra.CommAlgebra.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Foundations.SIP
open import Cubical.Data.Sigma
open import Cubical.Reflection.StrictEquiv
open import Cubical.Structures.Axioms
open import Cubical.Algebra.Semigroup
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.Ring
open import Cubical.Algebra.Algebra hiding (⟨_⟩a)
private
variable
ℓ ℓ′ : Level
record IsCommAlgebra (R : CommRing {ℓ}) {A : Type ℓ}
(0a : A) (1a : A)
(_+_ : A → A → A) (_·_ : A → A → A) (-_ : A → A)
(_⋆_ : ⟨ R ⟩ → A → A) : Type ℓ where
constructor iscommalgebra
field
isAlgebra : IsAlgebra (CommRing→Ring R) 0a 1a _+_ _·_ -_ _⋆_
·-comm : (x y : A) → x · y ≡ y · x
open IsAlgebra isAlgebra public
record CommAlgebra (R : CommRing {ℓ}) : Type (ℓ-suc ℓ) where
constructor commalgebra
field
Carrier : Type ℓ
0a : Carrier
1a : Carrier
_+_ : Carrier → Carrier → Carrier
_·_ : Carrier → Carrier → Carrier
-_ : Carrier → Carrier
_⋆_ : ⟨ R ⟩ → Carrier → Carrier
isCommAlgebra : IsCommAlgebra R 0a 1a _+_ _·_ -_ _⋆_
open IsCommAlgebra isCommAlgebra public
module _ {R : CommRing {ℓ}} where
open CommRingStr (snd R) using (1r) renaming (_+_ to _+r_; _·_ to _·s_)
⟨_⟩a : CommAlgebra R → Type ℓ
⟨_⟩a = CommAlgebra.Carrier
CommAlgebra→Algebra : (A : CommAlgebra R) → Algebra (CommRing→Ring R)
CommAlgebra→Algebra (commalgebra Carrier _ _ _ _ _ _ (iscommalgebra isAlgebra ·-comm)) =
algebra Carrier _ _ _ _ _ _ isAlgebra
CommAlgebra→CommRing : (A : CommAlgebra R) → CommRing {ℓ}
CommAlgebra→CommRing (commalgebra Carrier _ _ _ _ _ _
(iscommalgebra isAlgebra ·-comm)) =
_ , commringstr _ _ _ _ _ (iscommring (IsAlgebra.isRing isAlgebra) ·-comm)
CommAlgebraEquiv : (R S : CommAlgebra R) → Type ℓ
CommAlgebraEquiv R S = AlgebraEquiv (CommAlgebra→Algebra R) (CommAlgebra→Algebra S)
makeIsCommAlgebra : {A : Type ℓ} {0a 1a : A}
{_+_ _·_ : A → A → A} { -_ : A → A} {_⋆_ : ⟨ R ⟩ → A → A}
(isSet-A : isSet A)
(+-assoc : (x y z : A) → x + (y + z) ≡ (x + y) + z)
(+-rid : (x : A) → x + 0a ≡ x)
(+-rinv : (x : A) → x + (- x) ≡ 0a)
(+-comm : (x y : A) → x + y ≡ y + x)
(·-assoc : (x y z : A) → x · (y · z) ≡ (x · y) · z)
(·-lid : (x : A) → 1a · x ≡ x)
(·-ldist-+ : (x y z : A) → (x + y) · z ≡ (x · z) + (y · z))
(·-comm : (x y : A) → x · y ≡ y · x)
(⋆-assoc : (r s : ⟨ R ⟩) (x : A) → (r ·s s) ⋆ x ≡ r ⋆ (s ⋆ x))
(⋆-ldist : (r s : ⟨ R ⟩) (x : A) → (r +r s) ⋆ x ≡ (r ⋆ x) + (s ⋆ x))
(⋆-rdist : (r : ⟨ R ⟩) (x y : A) → r ⋆ (x + y) ≡ (r ⋆ x) + (r ⋆ y))
(⋆-lid : (x : A) → 1r ⋆ x ≡ x)
(⋆-lassoc : (r : ⟨ R ⟩) (x y : A) → (r ⋆ x) · y ≡ r ⋆ (x · y))
→ IsCommAlgebra R 0a 1a _+_ _·_ -_ _⋆_
makeIsCommAlgebra {A} {0a} {1a} {_+_} {_·_} { -_} {_⋆_} isSet-A
+-assoc +-rid +-rinv +-comm
·-assoc ·-lid ·-ldist-+ ·-comm
⋆-assoc ⋆-ldist ⋆-rdist ⋆-lid ⋆-lassoc
= iscommalgebra
(makeIsAlgebra
isSet-A
+-assoc +-rid +-rinv +-comm
·-assoc
(λ x → x · 1a ≡⟨ ·-comm _ _ ⟩ 1a · x ≡⟨ ·-lid _ ⟩ x ∎)
·-lid
(λ x y z → x · (y + z) ≡⟨ ·-comm _ _ ⟩
(y + z) · x ≡⟨ ·-ldist-+ _ _ _ ⟩
(y · x) + (z · x) ≡⟨ cong (λ u → (y · x) + u) (·-comm _ _) ⟩
(y · x) + (x · z) ≡⟨ cong (λ u → u + (x · z)) (·-comm _ _) ⟩
(x · y) + (x · z) ∎)
·-ldist-+
⋆-assoc
⋆-ldist
⋆-rdist
⋆-lid
⋆-lassoc
λ r x y → r ⋆ (x · y) ≡⟨ cong (λ u → r ⋆ u) (·-comm _ _) ⟩
r ⋆ (y · x) ≡⟨ sym (⋆-lassoc _ _ _) ⟩
(r ⋆ y) · x ≡⟨ ·-comm _ _ ⟩
x · (r ⋆ y) ∎)
·-comm
module CommAlgebraΣTheory (R : CommRing {ℓ}) where
open AlgebraΣTheory (CommRing→Ring R)
CommAlgebraAxioms : (A : Type ℓ) (s : RawAlgebraStructure A) → Type ℓ
CommAlgebraAxioms A (_+_ , _·_ , 1a , _⋆_) = AlgebraAxioms A (_+_ , _·_ , 1a , _⋆_)
× ((x y : A) → x · y ≡ y · x)
CommAlgebraStructure : Type ℓ → Type ℓ
CommAlgebraStructure = AxiomsStructure RawAlgebraStructure CommAlgebraAxioms
CommAlgebraΣ : Type (ℓ-suc ℓ)
CommAlgebraΣ = TypeWithStr ℓ CommAlgebraStructure
CommAlgebraEquivStr : StrEquiv CommAlgebraStructure ℓ
CommAlgebraEquivStr = AxiomsEquivStr RawAlgebraEquivStr CommAlgebraAxioms
isPropCommAlgebraAxioms : (A : Type ℓ) (s : RawAlgebraStructure A)
→ isProp (CommAlgebraAxioms A s)
isPropCommAlgebraAxioms A (_+_ , _·_ , 1a , _⋆_) =
isPropΣ (isPropAlgebraAxioms A (_+_ , _·_ , 1a , _⋆_))
λ isAlgebra → isPropΠ2 λ _ _ → (isSetAlgebraΣ (A , _ , isAlgebra)) _ _
CommAlgebra→CommAlgebraΣ : CommAlgebra R → CommAlgebraΣ
CommAlgebra→CommAlgebraΣ (commalgebra _ _ _ _ _ _ _ (iscommalgebra G C)) =
_ , _ , Algebra→AlgebraΣ (algebra _ _ _ _ _ _ _ G) .snd .snd , C
CommAlgebraΣ→CommAlgebra : CommAlgebraΣ → CommAlgebra R
CommAlgebraΣ→CommAlgebra (_ , _ , G , C) =
commalgebra _ _ _ _ _ _ _ (iscommalgebra (AlgebraΣ→Algebra (_ , _ , G) .Algebra.isAlgebra) C)
CommAlgebraIsoCommAlgebraΣ : Iso (CommAlgebra R) CommAlgebraΣ
CommAlgebraIsoCommAlgebraΣ =
iso CommAlgebra→CommAlgebraΣ CommAlgebraΣ→CommAlgebra (λ _ → refl) (λ _ → refl)
commAlgebraUnivalentStr : UnivalentStr CommAlgebraStructure CommAlgebraEquivStr
commAlgebraUnivalentStr = axiomsUnivalentStr _ isPropCommAlgebraAxioms rawAlgebraUnivalentStr
CommAlgebraΣPath : (A B : CommAlgebraΣ) → (A ≃[ CommAlgebraEquivStr ] B) ≃ (A ≡ B)
CommAlgebraΣPath = SIP commAlgebraUnivalentStr
CommAlgebraEquivΣ : (A B : CommAlgebra R) → Type ℓ
CommAlgebraEquivΣ A B = CommAlgebra→CommAlgebraΣ A ≃[ CommAlgebraEquivStr ] CommAlgebra→CommAlgebraΣ B
CommAlgebraPath : (A B : CommAlgebra R) → (CommAlgebraEquiv A B) ≃ (A ≡ B)
CommAlgebraPath A B =
CommAlgebraEquiv A B ≃⟨ strictIsoToEquiv AlgebraEquivΣPath ⟩
CommAlgebraEquivΣ A B ≃⟨ CommAlgebraΣPath _ _ ⟩
CommAlgebra→CommAlgebraΣ A ≡ CommAlgebra→CommAlgebraΣ B
≃⟨ isoToEquiv (invIso (congIso CommAlgebraIsoCommAlgebraΣ)) ⟩
A ≡ B ■
CommAlgebraPath : (R : CommRing {ℓ}) → (A B : CommAlgebra R) → (CommAlgebraEquiv A B) ≃ (A ≡ B)
CommAlgebraPath = CommAlgebraΣTheory.CommAlgebraPath
|
function [J, grad] = costFunction(theta, X, y)
%COSTFUNCTION Compute cost and gradient for logistic regression
% J = COSTFUNCTION(theta, X, y) computes the cost of using theta as the
% parameter for logistic regression and the gradient of the cost
% w.r.t. to the parameters.
% Initialize some useful values
m = length(y); % number of training examples
% You need to return the following variables correctly
J = 0;
grad = zeros(size(theta));
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta.
% You should set J to the cost.
% Compute the partial derivatives and set grad to the partial
% derivatives of the cost w.r.t. each parameter in theta
%
% Note: grad should have the same dimensions as theta
%
J = (1 / m) * sum( -y'*log(sigmoid(X*theta)) - (1-y)'*log( 1 - sigmoid(X*theta)) );
grad = (1 / m) * sum( X .* repmat((sigmoid(X*theta) - y), 1, size(X,2)) );
% =============================================================
end
|
include defs
# entfkw - place Fortran keywords in symbol table.
# Place in the following table any long (> 6 characters)
# keyword that is used by your Fortran compiler:
subroutine entfkw
include COMMON_BLOCKS
string sequiv "equivalence"
call enter (sequiv, 0, fkwtbl)
end
|
\documentclass[a4paper,12pt]{article}
%% Language and font encodings
\usepackage[english]{babel}
\usepackage[utf8x]{inputenc}
%% Sets page size and margins
%\usepackage[a4paper,top=1cm,bottom=4mm,left=3cm,right=0mm,marginparwidth=1.75cm]{geometry}
%\usepackage[a4paper,top=0cm,bottom=3mm,left=3mm,right=0mm,marginparwidth=0cm]{geometry}
\usepackage{geometry} % for \newgeometry{} and \restoregeometry
%% Useful packages
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage[colorinlistoftodos]{todonotes}
\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\usepackage{url}%for \path{}
\usepackage{lscape}
\pagenumbering{gobble}%no numbers in the webpage
%\usepackage[counterclockwise]{rotating} %sidewaysfigure
\graphicspath{{/home/map479/mxochicale/github/DataSets/emmov/plots_timeseries/of-postprocessing/}}
\title{Postprocessing data for OpenFace}
\author{Miguel P Xochicale \\
School of Engineering\\
University of Birmingham, UK}
\begin{document}
\newgeometry{top=20mm,bottom=20mm,left=20mm,right=20mm}
\maketitle
%\begin{abstract}
%
%\end{abstract}
\section{Description}
Postprocessing techniques (e.g. Savitzky-Golay Filter, Zero Mean Unit Variance, Principal Component Analysis)
for OpenFace raw data \cite{baltrusaitis2016}.
\section{r-scripts and data paths}
\path{~/github/emmov-pilotstudy/code/r-scripts/postprocessing} \\
\path{postprocessing-openface.R}
is used to generate the plots which are saved at: \\
\path{~/github/DataSets/emmov/plots_timeseries/of-postprocessing/*}
The data is available in \cite{mxochicale2018}.
\bibliographystyle{apalike}
\bibliography{references}
%\newpage
%\newgeometry{top=0cm,bottom=3mm,left=3mm,right=0mm,marginparwidth=0cm}
%\restoregeometry
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_base}
\caption{S-G confidence and success.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvbase}
\caption{S-G,ZMUV confidence and success.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sggaze_xy}
\caption{S-G gaze xy.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sggaze_z}
\caption{S-G gaze z.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvgaze}
\caption{S-G,ZMUV gaze.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_PCgaze}
\caption{Principal Components for gaze.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvPCgaze}
\caption{S-G,ZMUV PC gaze.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgposeRxyz}
\caption{S-G pose Rotation xyz.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvposeR}
\caption{S-G,ZMUV pose R.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgposeTxy}
\caption{S-G pose Translation xyz.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgposeTz}
\caption{S-G pose Translation z.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvposeT}
\caption{S-G,ZMUV pose T.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_PCpose}
\caption{Principal Components for pose.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvPCpose}
\caption{S-G,ZMUV PC pose.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvPCxlm}
\caption{S-G,ZMUV PC x landmarks.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvPCylm}
\caption{S-G,ZMUV PC y landmarks.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvshape}
\caption{S-G,ZMUV shape.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvx012}
\caption{S-G,ZMUV x\_0, x\_1, x\_2.}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pre-of-timeseries_sgzmuvy012}
\caption{S-G,ZMUV y\_0, y\_1, y\_2.}
\end{figure}
\end{document}
|
module FaceRecognition
using Images
using FileIO
using StatsBase
using MultivariateStats
include("load_data.jl")
include("eigenfaces.jl")
export load_images,
Model,
train_model,
image_to_eigenfaces,
eigenfaces_to_image,
reconstruct_image,
reconstruct_images,
get_difference,
get_eigenfaces,
save,
load
end
|
In 1879 , a group of Catholic residents of the Madison area met to plan the building of a church . At the meeting , a total of $ 426 @.@ 75 was subscribed ; additional contributions of $ 322 @.@ 86 were obtained from citizens of Madison . In January 1880 , the church 's trustees spent $ 100 for five acres ( 2 @.@ 0 ha ) on a hill at the southeastern edge of town . In the spring , a party of parishioners drove their ox teams to Wisner , about 30 miles ( 50 km ) northeast of Madison , for the first load of lumber for the new church . The 30 @-@ by @-@ 40 @-@ foot ( 9 m × 12 m ) frame structure , with a capacity of 100 , was completed in November 1881 ; the total cost was $ 957 @.@ 61 , leaving $ 208 @.@ 00 owed to the carpenter . The new church was dedicated to St. Leonard of Port Maurice , an 18th @-@ century Franciscan priest , preacher , ascetic , and writer venerated as the patron saint of parish missions .
|
function example
% Two-state model of gene expression
%
% Reaction network:
% 0 -> mRNA
% mRNA -> mRNA + protein
% mRNA -> 0
% protein -> 0
tspan = [0, 10000]; %seconds
x0 = [0, 0]; %mRNA, protein
stoich_matrix = [ 1 0 ; %transcription
0 1 ; %translation
-1 0 ; %mRNA degradation
0 -1 ]; %protein degradation
% Rate constants
p.kR = 0.1;%0.01;
p.kP = 0.1;%1;
p.gR = 0.1;
p.gP = 0.002;
% Run simulation
%[t,x] = directMethod(stoich_matrix, @propensities_2state, tspan, x0, p);
[t,x] = firstReactionMethod(stoich_matrix, @propensities_2state, tspan, x0, p);
% Plot time course
figure(gcf);
stairs(t,x);
set(gca,'XLim',tspan);
xlabel('time (s)');
ylabel('molecules');
legend({'mRNA','protein'});
end
function a = propensities_2state(x, p)
mRNA = x(1);
protein = x(2);
a = [p.kR;
p.kP*mRNA;
p.gR*mRNA;
p.gP*protein];
end
|
module test_demo
use demo, only : substitute, getline
use testdrive, only : error_type, unittest_type, new_unittest, check
implicit none
private
public :: collect_demo
contains
!> Collect all exported unit tests
subroutine collect_demo(testsuite)
!> Collection of tests
type(unittest_type), allocatable, intent(out) :: testsuite(:)
testsuite = [new_unittest("substitute", test_substitute)]
end subroutine collect_demo
!> Check substitution of a single line
subroutine test_substitute(error)
!> Error handling
type(error_type), allocatable, intent(out) :: error
integer :: input, output, stat
character(len=:), allocatable :: line
open(newunit=input, status="scratch")
write(input, '(a)') "This is a valid test"
rewind(input)
open(newunit=output, status="scratch")
call substitute(input, output, "test", "example")
close(input)
rewind(output)
call getline(output, line, stat)
close(output)
call check(error, line, "This is a valid example")
end subroutine test_substitute
end module test_demo
program tester
use, intrinsic :: iso_fortran_env, only : error_unit
use testdrive, only : run_testsuite
use test_demo, only : collect_demo
implicit none
integer :: stat
stat = 0
call run_testsuite(collect_demo, error_unit, stat)
if (stat > 0) then
write(error_unit, '(i0, 1x, a)') stat, "test(s) failed!"
error stop
end if
end program tester
|
addStandardLegend <- function(x, limits, cols, units = '%',
plot_loc = c(0.32, 0.67, 0.7, 0.9),
ylabposScling=1, ...) {
add_raster_legend2(cols, limits, dat = x, srt = 0,
transpose = FALSE, plot_loc = plot_loc,
ylabposScling=ylabposScling, oneSideLabels = TRUE,
xpd = NA, adj = 1.0, units = units, ...)
}
|
WHAT: This 6 day, 5 night retreat includes an exciting three day challenge-by-choice Ropes Course, camp fires, creative expressions, all while making friends with Uniteens from throughout the Northwestern Region.
WHO: All Uniteens must be 11 and have finished 6th grade and be no more than an 8th grade graduate. Uniteens must have attended at least 4 Uniteen meetings or be a new member of the church. All Uniteens and counselors must sign the Heart Agreements agreeing to follow these Group Agreements.
Adults wanting to attend Unitreat please contact Chris Castaldi, Unitreat Site Director, phone -503.913.0898, or [email protected]. A volunteer & information packet will be sent to you.
ll Adult staff participate in the Saturday training & team building by either arriving Friday night June 16 or by 9AM Saturday June 17. Staff members need to arrange for assistance from other church groups or individuals in transporting their Uniteen group members who are attending, so all youth arrive on Sunday. Can you volunteer for a part of the week? We would love that too! Contact Chris for the volunteer packet & further info.
Click here to view the registration application. |
function esf_sum!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
@inbounds for col in 1:n
for r in 1:col
row = col - r + 1
S[row+1] = x[col] * S[row] + S[row+1]
end
end
end
"""
esf_sum(x)
Compute the elementary symmetric functions of order k = 1, ..., n
where n = length(x)
# Examples
```julia-repl
julia> esf_sum([3.5118, .6219, .2905, .8450, 1.8648])
6-element Array{Float64,1}:
1.0
7.134
16.9493
16.7781
7.05289
0.999736
```
"""
function esf_sum(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
esf_sum!(S,x)
return S
end
function esf_sum_reg!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
@inbounds for col in 1:n
for r in 1:col
row = col - r + 1
S[row+1] = ((col-row)/col) * S[row+1] + (row/col) * x[col] * S[row]
end
end
end
"""
esf_sum_reg(x)
Compute the elementary symmetric functions of order k = 1, ..., n
where n = length(x). Values are computed regularized by the binomial
coefficient binomial(n, k) to prevent over/under-flow.
# Examples
```julia-repl
julia> esf_sum_reg([3.5118, .6219, .2905, .8450, 1.8648])
6-element Array{Float64,1}:
1.0
1.4268
1.69493
1.67781
1.41058
0.999736
```
"""
function esf_sum_reg(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
esf_sum_reg!(S,x)
return S
end
function esf_sum_log!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,-Inf)
S[1] = zero(T)
@inbounds for col in 1:n
for r in 1:col
row = col - r + 1
Sr = S[row] + log(x[col])
Sr1 = S[row+1]
if (Sr1 > Sr) && (Sr1 > zero(T))
S[row+1] = Sr1 + log1p(exp(Sr - Sr1))
elseif (Sr >= Sr1) && (Sr > zero(T))
S[row+1] = Sr + log1p(exp(Sr1-Sr))
else
S[row+1] = log(exp(Sr1) + exp(Sr))
end
end
end
end
function esf_sum_log(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
esf_sum_log!(S,x)
return S
end
#Regularized summation algorithm where one input is zeroed out (for computing derivatives)
function esf_sum_reg2!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
adj = 0
@inbounds for col in 1:n
if x[col] == 0.0
adj += 1
continue
end
for r in 1:(col-adj)
row = (col-adj) - r + 1
S[row+1] = (col-adj-row)/(col-adj+1) * S[row+1] + (row+1)/(col-adj+1) * x[col] * S[row]
end
S[1] *= (col-adj)/(col-adj+1)
end
end
#Regularized summation algorithm where two inputs are zeroed out (for computing derivatives)
function esf_sum_reg3!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
adj = 0
@inbounds for col in 1:n
if x[col] == 0.0
adj += 1
continue
end
for r in 1:(col-adj)
row = (col-adj) - r + 1
S[row+1] = (col-adj-row)/(col-adj+2) * S[row+1] + (row+2)/(col-adj+2) * x[col] * S[row]
end
S[1] *= (col-adj) / (col-adj+2)
end
end
function esf_sum_dervs_1(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Array{T,2}(undef,n,n+1)
esf_sum!(S, x)
esf_sum_dervs_1!(P, x)
return S, P
end
function esf_sum_dervs_1!(P::AbstractArray{T,2}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
@views esf_sum!(P[j,:], x)
x[j] = xj
end
end
function esf_sum_dervs_1_reg(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Array{T,2}(undef,n,n+1)
esf_sum_reg!(S, x)
esf_sum_dervs_1_reg!(P, x)
return S, P
end
function esf_sum_dervs_1_reg!(P::AbstractArray{T,2}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
@views esf_sum_reg2!(P[j,:], x)
x[j] = xj
end
end
function esf_sum_dervs_2(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
H = Array{T,3}(undef,n,n,n+1)
esf_sum!(S, x)
esf_sum_dervs_2!(H, x)
return S, H
end
function esf_sum_dervs_2!(H::AbstractArray{T,3}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
for k in j:n
xk = x[k]
x[k] = zero(T)
@views esf_sum!(H[j,k,:], x)
H[k,j,:] .= H[j,k,:]
x[k] = xk
end
x[j] = xj
end
end
function esf_sum_dervs_2_reg(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
H = Array{T,3}(undef,n,n,n+1)
esf_sum_reg!(S, x)
esf_sum_dervs_2_reg!(H, x)
return S, H
end
function esf_sum_dervs_2_reg!(H::AbstractArray{T,3}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
@views esf_sum_reg2!(H[j,j,:], x)
for k in j+1:n
xk = x[k]
x[k] = zero(T)
@views esf_sum_reg3!(H[j,k,:], x)
H[k,j,:] .= H[j,k,:]
x[k] = xk
end
x[j] = xj
end
end
function esf_dc_fft!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
x::AbstractArray{T,1}, si::AbstractArray{T,1},
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(x)
M = size(tempS)[2]
#convolve initial subsets
@inbounds for g in 1:M
@views esf_sum!(tempS[1:(group_sizes[g]+1),g],
x[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
si .= zero(T)
@views _filt!(S[1:m], tempS[1:group_sizes[g],g], tempS[1:m,g+1], si[1:(group_sizes[g]-1)])
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function esf_dc_fft(x::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(x)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
si = zeros(T, n)
esf_dc_fft!(S, tempS, x, si, group_sizes, group_start_idx)
return S
end
function esf_dc_fft_reg!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
x::AbstractArray{T,1}, si::AbstractArray{T,1},
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(x)
M = size(tempS)[2]
tempS .= zero(T)
#convolve initial subsets
@inbounds for g in 1:M
@views esf_sum_reg!(tempS[1:(group_sizes[g]+1),g],
x[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
si .= zero(T)
@views _filt_reg!(S[1:m], tempS[1:group_sizes[g],g], tempS[1:m,g+1], si[1:(group_sizes[g]-1)], group_sizes[g]-1,group_sizes[g+1]-1)
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function esf_dc_fft_reg(x::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(x)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
si = zeros(T, n)
esf_dc_fft_reg!(S, tempS, x, si, group_sizes, group_start_idx)
return S
end
function esf_diff!(S::AbstractArray{T,1}, P::AbstractArray{T,2}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S, zero(T))
S[1] = one(T)
S[2] = sum(x)
P[:,1] .= one(T)
@inbounds for i in 2:n
for j in 1:n
P[j,i] = S[i] - x[j] * P[j,i-1]
S[i+1] = S[i+1] + x[j] * P[j,i]
end
S[i+1] /= i
end
end
"""
esf_diff(x)
Compute the elementary symmetric functions of order k = 1,...,n
where n = length(x) using the difference algorithm. Also computes
the matrix of first derivatives.
# Examples
```julia-repl
julia> esf_diff([3.5118, .6219, .2905, .8450, 1.8648])[1]
6-element Array{Float64,1}:
1.0
7.134
16.9493
16.7781
7.05289
0.999736
julia> esf_diff([3.5118, .6219, .2905, .8450, 1.8648])[2]
5×5 Array{Float64,2}:
1.0 3.6222 4.22884 1.92728 0.284679
1.0 6.5121 12.8994 8.75598 1.60755
1.0 6.8435 14.9612 12.4319 3.44143
1.0 6.289 11.6351 6.94648 1.18312
1.0 5.2692 7.12328 3.49463 0.536109
```
"""
function esf_diff(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Matrix{T}(undef,n,n)
esf_diff!(S,P,x)
return S, P
end
function esf_diff_onlyP!(S::AbstractArray{T,1}, P::AbstractArray{T,2}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
P[:,1] .= one(T)
@inbounds for i in 2:n
for j in 1:n
P[j,i] = S[i] - x[j] * S[j,i-1]
end
end
end
function esf_diff_reg!(S::AbstractArray{T,1}, P::AbstractArray{T,2}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S, zero(T))
S[1] = one(T)
S[2] = sum(x)/n
P[:,1] .= one(T)/n
@inbounds for i in 2:n
for j in 1:n
P[j,i] = (S[i] - x[j] * P[j,i-1]) * (i / (n+1-i))
S[i+1] = S[i+1] + x[j] * P[j,i]
end
S[i+1] /= i
end
end
function esf_diff_reg(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Matrix{T}(undef,n,n)
esf_diff_reg!(S,P,x)
return S, P
end
function esf_diff_updown(x::AbstractArray{T,1}) where T<:Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Matrix{T}(undef,n,n+1)
err = zero(T)
esf_diff_updown!(S,P,err,x)
return S, P[:,1:end-1], err
end
function esf_diff_updown!(S::AbstractArray{T,1}, P::AbstractArray{T,2},
fw_bk_err::T,
x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S, zero(T))
S[1] = one(T)
S[2] = sum(x)
S[n+1] = prod(x)
P[:,1] .= one(T)
P[:,end] .= zero(T)
mid = div(n, 2) + 1
@inbounds for i in 2:(mid-1)
for j in 1:n
P[j,i] = S[i] - x[j] * P[j,i-1]
S[i+1] = S[i+1] + x[j] * P[j,i]
end
S[i+1] /= i
end
S_mid_est = S[mid]
S[mid] = zero(T)
@inbounds for ii in mid:n
i = n - ii + mid
for j in 1:n
P[j,i] = (S[i+1] - P[j,i+1]) / x[j]
S[i] += P[j,i]
end
S[i] /= (n - (i-1))
end
fw_bk_err = abs(one(T) - S_mid_est / S[mid])
end |
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall O A0 A1 B P Q R : Universe, ((wd_ B A0 /\ (wd_ A0 A1 /\ (wd_ A0 O /\ (wd_ A1 O /\ (wd_ P Q /\ (wd_ R Q /\ (wd_ O B /\ (col_ P Q R /\ (col_ A0 A1 B /\ col_ O A0 A1))))))))) -> col_ O A0 B)).
Proof.
time tac.
Qed.
End FOFProblem.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.