Datasets:
AI4M
/

text
stringlengths
0
3.34M
/- Copyright (c) 2015 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura, Mario Carneiro ! This file was ported from Lean 3 source module logic.equiv.defs ! leanprover-community/mathlib commit c4658a649d216f57e99621708b09dcb3dcccbd23 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Data.FunLike.Equiv import Mathlib.Data.Quot import Mathlib.Init.Data.Bool.Lemmas import Mathlib.Logic.Unique import Mathlib.Tactic.Conv import Mathlib.Tactic.Relation.Rfl import Mathlib.Tactic.Relation.Symm import Mathlib.Tactic.Relation.Trans import Mathlib.Tactic.Simps.Basic import Mathlib.Tactic.Substs /-! # Equivalence between types In this file we define two types: * `Equiv α β` a.k.a. `α ≃ β`: a bijective map `α → β` bundled with its inverse map; we use this (and not equality!) to express that various `Type`s or `Sort`s are equivalent. * `Equiv.Perm α`: the group of permutations `α ≃ α`. More lemmas about `Equiv.Perm` can be found in `GroupTheory.Perm`. Then we define * canonical isomorphisms between various types: e.g., - `Equiv.refl α` is the identity map interpreted as `α ≃ α`; * operations on equivalences: e.g., - `Equiv.symm e : β ≃ α` is the inverse of `e : α ≃ β`; - `Equiv.trans e₁ e₂ : α ≃ γ` is the composition of `e₁ : α ≃ β` and `e₂ : β ≃ γ` (note the order of the arguments!); * definitions that transfer some instances along an equivalence. By convention, we transfer instances from right to left. - `Equiv.inhabited` takes `e : α ≃ β` and `[Inhabited β]` and returns `Inhabited α`; - `Equiv.unique` takes `e : α ≃ β` and `[Unique β]` and returns `unique α`; - `Equiv.decidableEq` takes `e : α ≃ β` and `[DecidableEq β]` and returns `DecidableEq α`. More definitions of this kind can be found in other files. E.g., `Data.Equiv.TransferInstance` does it for many algebraic type classes like `Group`, `Module`, etc. Many more such isomorphisms and operations are defined in `Logic.Equiv.Basic`. ## Tags equivalence, congruence, bijective map -/ open Function universe u v w z variable {α : Sort u} {β : Sort v} {γ : Sort w} /-- `α ≃ β` is the type of functions from `α → β` with a two-sided inverse. -/ structure Equiv (α : Sort _) (β : Sort _) where toFun : α → β invFun : β → α left_inv : LeftInverse invFun toFun right_inv : RightInverse invFun toFun #align equiv Equiv infixl:25 " ≃ " => Equiv /-- Turn an element of a type `F` satisfying `EquivLike F α β` into an actual `Equiv`. This is declared as the default coercion from `F` to `α ≃ β`. -/ @[coe] def EquivLike.toEquiv {F} [EquivLike F α β] (f :F) : α ≃ β where toFun := f invFun := EquivLike.inv f left_inv := EquivLike.left_inv f right_inv := EquivLike.right_inv f /-- Any type satisfying `EquivLike` can be cast into `Equiv` via `EquivLike.toEquiv`. -/ instance {F} [EquivLike F α β] : CoeTC F (α ≃ β) := ⟨EquivLike.toEquiv⟩ /-- `Perm α` is the type of bijections from `α` to itself. -/ @[reducible] def Equiv.Perm (α : Sort _) := Equiv α α #align equiv.perm Equiv.Perm namespace Equiv instance : EquivLike (α ≃ β) α β where coe := toFun inv := invFun left_inv := left_inv right_inv := right_inv coe_injective' e₁ e₂ h₁ h₂ := by cases e₁; cases e₂; congr /-- Helper instance when inference gets stuck on following the normal chain `EquivLike → EmbeddingLike → FunLike → CoeFun`. -/ instance : FunLike (α ≃ β) α (fun _ => β) := EmbeddingLike.toFunLike @[simp] theorem coe_fn_mk (f : α → β) (g l r) : (Equiv.mk f g l r : α → β) = f := rfl #align equiv.coe_fn_mk Equiv.coe_fn_mk /-- The map `(r ≃ s) → (r → s)` is injective. -/ theorem coe_fn_injective : @Function.Injective (α ≃ β) (α → β) (fun e => e) := FunLike.coe_injective' #align equiv.coe_fn_injective Equiv.coe_fn_injective protected theorem coe_inj {e₁ e₂ : α ≃ β} : (e₁ : α → β) = e₂ ↔ e₁ = e₂ := @FunLike.coe_fn_eq _ _ _ _ e₁ e₂ #align equiv.coe_inj Equiv.coe_inj @[ext] theorem ext {f g : Equiv α β} (H : ∀ x, f x = g x) : f = g := FunLike.ext f g H #align equiv.ext Equiv.ext protected theorem congr_arg {f : Equiv α β} {x x' : α} : x = x' → f x = f x' := FunLike.congr_arg f #align equiv.congr_arg Equiv.congr_arg protected theorem congr_fun {f g : Equiv α β} (h : f = g) (x : α) : f x = g x := FunLike.congr_fun h x #align equiv.congr_fun Equiv.congr_fun theorem ext_iff {f g : Equiv α β} : f = g ↔ ∀ x, f x = g x := FunLike.ext_iff #align equiv.ext_iff Equiv.ext_iff @[ext] theorem Perm.ext {σ τ : Equiv.Perm α} (H : ∀ x, σ x = τ x) : σ = τ := Equiv.ext H #align equiv.perm.ext Equiv.Perm.ext protected theorem Perm.congr_arg {f : Equiv.Perm α} {x x' : α} : x = x' → f x = f x' := Equiv.congr_arg #align equiv.perm.congr_arg Equiv.Perm.congr_arg protected theorem Perm.congr_fun {f g : Equiv.Perm α} (h : f = g) (x : α) : f x = g x := Equiv.congr_fun h x #align equiv.perm.congr_fun Equiv.Perm.congr_fun theorem Perm.ext_iff {σ τ : Equiv.Perm α} : σ = τ ↔ ∀ x, σ x = τ x := Equiv.ext_iff #align equiv.perm.ext_iff Equiv.Perm.ext_iff /-- Any type is equivalent to itself. -/ @[refl] protected def refl (α : Sort _) : α ≃ α := ⟨id, id, fun _ => rfl, fun _ => rfl⟩ #align equiv.refl Equiv.refl instance inhabited' : Inhabited (α ≃ α) := ⟨Equiv.refl α⟩ /-- Inverse of an equivalence `e : α ≃ β`. -/ @[symm] protected def symm (e : α ≃ β) : β ≃ α := ⟨e.invFun, e.toFun, e.right_inv, e.left_inv⟩ #align equiv.symm Equiv.symm /-- See Note [custom simps projection] -/ def Simps.symm_apply (e : α ≃ β) : β → α := e.symm #align equiv.simps.symm_apply Equiv.Simps.symm_apply initialize_simps_projections Equiv (toFun → apply, invFun → symm_apply) -- Porting note: -- Added these lemmas as restatements of `left_inv` and `right_inv`, -- which use the coercions. -- We might even consider switching the names, and having these as a public API. theorem left_inv' (e : α ≃ β) : Function.LeftInverse e.symm e := e.left_inv theorem right_inv' (e : α ≃ β) : Function.RightInverse e.symm e := e.right_inv /-- Composition of equivalences `e₁ : α ≃ β` and `e₂ : β ≃ γ`. -/ @[trans] protected def trans (e₁ : α ≃ β) (e₂ : β ≃ γ) : α ≃ γ := ⟨e₂ ∘ e₁, e₁.symm ∘ e₂.symm, e₂.left_inv.comp e₁.left_inv, e₂.right_inv.comp e₁.right_inv⟩ #align equiv.trans Equiv.trans @[simps] instance : Trans Equiv Equiv Equiv where trans := Equiv.trans -- porting note: this is not a syntactic tautology any more because -- the coercion from `e` to a function is now `FunLike.coe` not `e.toFun` @[simp] theorem toFun_as_coe (e : α ≃ β) : e.toFun = e := rfl #align equiv.to_fun_as_coe Equiv.toFun_as_coe -- porting note: `simp` should prove this using `toFun_as_coe`, but it doesn't. -- This might be a bug in `simp` -- see https://github.com/leanprover/lean4/issues/1937 -- If this issue is fixed then the simp linter probably will start complaining, and -- this theorem can be deleted hopefully without breaking any `simp` proofs. @[simp] theorem toFun_as_coe_apply (e : α ≃ β) (x : α) : e.toFun x = e x := rfl @[simp] theorem invFun_as_coe (e : α ≃ β) : e.invFun = e.symm := rfl #align equiv.inv_fun_as_coe Equiv.invFun_as_coe protected theorem injective (e : α ≃ β) : Injective e := EquivLike.injective e #align equiv.injective Equiv.injective protected theorem surjective (e : α ≃ β) : Surjective e := EquivLike.surjective e #align equiv.surjective Equiv.surjective protected theorem bijective (e : α ≃ β) : Bijective e := EquivLike.bijective e #align equiv.bijective Equiv.bijective protected theorem subsingleton (e : α ≃ β) [Subsingleton β] : Subsingleton α := e.injective.subsingleton #align equiv.subsingleton Equiv.subsingleton protected theorem subsingleton.symm (e : α ≃ β) [Subsingleton α] : Subsingleton β := e.symm.injective.subsingleton #align equiv.subsingleton.symm Equiv.subsingleton.symm theorem subsingleton_congr (e : α ≃ β) : Subsingleton α ↔ Subsingleton β := ⟨fun _ => e.symm.subsingleton, fun _ => e.subsingleton⟩ #align equiv.subsingleton_congr Equiv.subsingleton_congr instance equiv_subsingleton_cod [Subsingleton β] : Subsingleton (α ≃ β) := ⟨fun _ _ => Equiv.ext fun _ => Subsingleton.elim _ _⟩ instance equiv_subsingleton_dom [Subsingleton α] : Subsingleton (α ≃ β) := ⟨fun f _ => Equiv.ext fun _ => @Subsingleton.elim _ (Equiv.subsingleton.symm f) _ _⟩ instance permUnique [Subsingleton α] : Unique (Perm α) := uniqueOfSubsingleton (Equiv.refl α) theorem Perm.subsingleton_eq_refl [Subsingleton α] (e : Perm α) : e = Equiv.refl α := Subsingleton.elim _ _ #align equiv.perm.subsingleton_eq_refl Equiv.Perm.subsingleton_eq_refl /-- Transfer `DecidableEq` across an equivalence. -/ protected def decidableEq (e : α ≃ β) [DecidableEq β] : DecidableEq α := e.injective.decidableEq #align equiv.decidable_eq Equiv.decidableEq theorem nonempty_congr (e : α ≃ β) : Nonempty α ↔ Nonempty β := Nonempty.congr e e.symm #align equiv.nonempty_congr Equiv.nonempty_congr protected theorem nonempty (e : α ≃ β) [Nonempty β] : Nonempty α := e.nonempty_congr.mpr ‹_› #align equiv.nonempty Equiv.nonempty /-- If `α ≃ β` and `β` is inhabited, then so is `α`. -/ protected def inhabited [Inhabited β] (e : α ≃ β) : Inhabited α := ⟨e.symm default⟩ #align equiv.inhabited Equiv.inhabited /-- If `α ≃ β` and `β` is a singleton type, then so is `α`. -/ protected def unique [Unique β] (e : α ≃ β) : Unique α := e.symm.surjective.unique #align equiv.unique Equiv.unique /-- Equivalence between equal types. -/ protected def cast {α β : Sort _} (h : α = β) : α ≃ β := ⟨cast h, cast h.symm, fun _ => by cases h; rfl, fun _ => by cases h; rfl⟩ #align equiv.cast Equiv.cast @[simp] theorem coe_fn_symm_mk (f : α → β) (g l r) : ((Equiv.mk f g l r).symm : β → α) = g := rfl #align equiv.coe_fn_symm_mk Equiv.coe_fn_symm_mk @[simp] theorem coe_refl : (Equiv.refl α : α → α) = id := rfl #align equiv.coe_refl Equiv.coe_refl /-- This cannot be a `simp` lemmas as it incorrectly matches against `e : α ≃ synonym α`, when `synonym α` is semireducible. This makes a mess of `multiplicative.of_add` etc. -/ theorem Perm.coe_subsingleton {α : Type _} [Subsingleton α] (e : Perm α) : (e : α → α) = id := by rw [Perm.subsingleton_eq_refl e, coe_refl] #align equiv.perm.coe_subsingleton Equiv.Perm.coe_subsingleton -- porting note: marking this as `@[simp]` because `simp` doesn't fire on `coe_refl` -- in an expression such as `Equiv.refl a x` @[simp] theorem refl_apply (x : α) : Equiv.refl α x = x := rfl #align equiv.refl_apply Equiv.refl_apply @[simp] theorem coe_trans (f : α ≃ β) (g : β ≃ γ) : (f.trans g : α → γ) = g ∘ f := rfl #align equiv.coe_trans Equiv.coe_trans -- porting note: marking this as `@[simp]` because `simp` doesn't fire on `coe_trans` -- in an expression such as `Equiv.trans f g x` @[simp] theorem trans_apply (f : α ≃ β) (g : β ≃ γ) (a : α) : (f.trans g) a = g (f a) := rfl #align equiv.trans_apply Equiv.trans_apply @[simp] theorem apply_symm_apply (e : α ≃ β) (x : β) : e (e.symm x) = x := e.right_inv x #align equiv.apply_symm_apply Equiv.apply_symm_apply @[simp] theorem symm_apply_apply (e : α ≃ β) (x : α) : e.symm (e x) = x := e.left_inv x #align equiv.symm_apply_apply Equiv.symm_apply_apply @[simp] theorem symm_comp_self (e : α ≃ β) : e.symm ∘ e = id := funext e.symm_apply_apply #align equiv.symm_comp_self Equiv.symm_comp_self @[simp] theorem self_comp_symm (e : α ≃ β) : e ∘ e.symm = id := funext e.apply_symm_apply #align equiv.self_comp_symm Equiv.self_comp_symm @[simp] theorem symm_trans_apply (f : α ≃ β) (g : β ≃ γ) (a : γ) : (f.trans g).symm a = f.symm (g.symm a) := rfl #align equiv.symm_trans_apply Equiv.symm_trans_apply -- The `simp` attribute is needed to make this a `dsimp` lemma. -- `simp` will always rewrite with `equiv.symm_symm` before this has a chance to fire. @[simp, nolint simpNF] theorem symm_symm_apply (f : α ≃ β) (b : α) : f.symm.symm b = f b := rfl #align equiv.symm_symm_apply Equiv.symm_symm_apply theorem apply_eq_iff_eq (f : α ≃ β) {x y : α} : f x = f y ↔ x = y := EquivLike.apply_eq_iff_eq f #align equiv.apply_eq_iff_eq Equiv.apply_eq_iff_eq theorem apply_eq_iff_eq_symm_apply (f : α ≃ β) : f x = y ↔ x = f.symm y := by conv_lhs => rw [← apply_symm_apply f y] rw [apply_eq_iff_eq] #align equiv.apply_eq_iff_eq_symm_apply Equiv.apply_eq_iff_eq_symm_apply @[simp] theorem cast_apply {α β} (h : α = β) (x : α) : Equiv.cast h x = cast h x := rfl #align equiv.cast_apply Equiv.cast_apply @[simp] theorem cast_symm {α β} (h : α = β) : (Equiv.cast h).symm = Equiv.cast h.symm := rfl #align equiv.cast_symm Equiv.cast_symm @[simp] theorem cast_refl {α} (h : α = α := rfl) : Equiv.cast h = Equiv.refl α := rfl #align equiv.cast_refl Equiv.cast_refl @[simp] theorem cast_trans {α β γ} (h : α = β) (h2 : β = γ) : (Equiv.cast h).trans (Equiv.cast h2) = Equiv.cast (h.trans h2) := ext fun x => by substs h h2; rfl #align equiv.cast_trans Equiv.cast_trans theorem cast_eq_iff_heq {α β} (h : α = β) {a : α} {b : β} : Equiv.cast h a = b ↔ HEq a b := by subst h; simp [coe_refl] #align equiv.cast_eq_iff_heq Equiv.cast_eq_iff_heq theorem symm_apply_eq {α β} (e : α ≃ β) {x y} : e.symm x = y ↔ x = e y := ⟨fun H => by simp [H.symm], fun H => by simp [H]⟩ #align equiv.symm_apply_eq Equiv.symm_apply_eq theorem eq_symm_apply {α β} (e : α ≃ β) {x y} : y = e.symm x ↔ e y = x := (eq_comm.trans e.symm_apply_eq).trans eq_comm #align equiv.eq_symm_apply Equiv.eq_symm_apply @[simp] theorem symm_symm (e : α ≃ β) : e.symm.symm = e := by cases e; rfl #align equiv.symm_symm Equiv.symm_symm @[simp] theorem trans_refl (e : α ≃ β) : e.trans (Equiv.refl β) = e := by cases e; rfl #align equiv.trans_refl Equiv.trans_refl @[simp] theorem refl_symm : (Equiv.refl α).symm = Equiv.refl α := rfl #align equiv.refl_symm Equiv.refl_symm @[simp] theorem refl_trans (e : α ≃ β) : (Equiv.refl α).trans e = e := by cases e; rfl #align equiv.refl_trans Equiv.refl_trans @[simp] theorem symm_trans_self (e : α ≃ β) : e.symm.trans e = Equiv.refl β := ext <| by simp #align equiv.symm_trans_self Equiv.symm_trans_self @[simp] theorem self_trans_symm (e : α ≃ β) : e.trans e.symm = Equiv.refl α := ext <| by simp #align equiv.self_trans_symm Equiv.self_trans_symm theorem trans_assoc {δ} (ab : α ≃ β) (bc : β ≃ γ) (cd : γ ≃ δ) : (ab.trans bc).trans cd = ab.trans (bc.trans cd) := Equiv.ext fun _ => rfl #align equiv.trans_assoc Equiv.trans_assoc theorem leftInverse_symm (f : Equiv α β) : LeftInverse f.symm f := f.left_inv #align equiv.left_inverse_symm Equiv.leftInverse_symm theorem rightInverse_symm (f : Equiv α β) : Function.RightInverse f.symm f := f.right_inv #align equiv.right_inverse_symm Equiv.rightInverse_symm theorem injective_comp (e : α ≃ β) (f : β → γ) : Injective (f ∘ e) ↔ Injective f := EquivLike.injective_comp e f #align equiv.injective_comp Equiv.injective_comp theorem comp_injective (f : α → β) (e : β ≃ γ) : Injective (e ∘ f) ↔ Injective f := EquivLike.comp_injective f e #align equiv.comp_injective Equiv.comp_injective theorem surjective_comp (e : α ≃ β) (f : β → γ) : Surjective (f ∘ e) ↔ Surjective f := EquivLike.surjective_comp e f #align equiv.surjective_comp Equiv.surjective_comp theorem comp_surjective (f : α → β) (e : β ≃ γ) : Surjective (e ∘ f) ↔ Surjective f := EquivLike.comp_surjective f e #align equiv.comp_surjective Equiv.comp_surjective theorem bijective_comp (e : α ≃ β) (f : β → γ) : Bijective (f ∘ e) ↔ Bijective f := EquivLike.bijective_comp e f #align equiv.bijective_comp Equiv.bijective_comp theorem comp_bijective (f : α → β) (e : β ≃ γ) : Bijective (e ∘ f) ↔ Bijective f := EquivLike.comp_bijective f e #align equiv.comp_bijective Equiv.comp_bijective /-- If `α` is equivalent to `β` and `γ` is equivalent to `δ`, then the type of equivalences `α ≃ γ` is equivalent to the type of equivalences `β ≃ δ`. -/ def equivCongr (ab : α ≃ β) (cd : γ ≃ δ) : (α ≃ γ) ≃ (β ≃ δ) where toFun ac := (ab.symm.trans ac).trans cd invFun bd := ab.trans <| bd.trans <| cd.symm left_inv ac := by ext x; simp only [trans_apply, comp_apply, symm_apply_apply] right_inv ac := by ext x; simp only [trans_apply, comp_apply, apply_symm_apply] #align equiv.equiv_congr Equiv.equivCongr @[simp] theorem equivCongr_refl {α β} : (Equiv.refl α).equivCongr (Equiv.refl β) = Equiv.refl (α ≃ β) := by ext; rfl #align equiv.equiv_congr_refl Equiv.equivCongr_refl @[simp] theorem equivCongr_symm {δ} (ab : α ≃ β) (cd : γ ≃ δ) : (ab.equivCongr cd).symm = ab.symm.equivCongr cd.symm := by ext; rfl #align equiv.equiv_congr_symm Equiv.equivCongr_symm @[simp] theorem equivCongr_trans {δ ε ζ} (ab : α ≃ β) (de : δ ≃ ε) (bc : β ≃ γ) (ef : ε ≃ ζ) : (ab.equivCongr de).trans (bc.equivCongr ef) = (ab.trans bc).equivCongr (de.trans ef) := by ext; rfl #align equiv.equiv_congr_trans Equiv.equivCongr_trans @[simp] theorem equivCongr_refl_left {α β γ} (bg : β ≃ γ) (e : α ≃ β) : (Equiv.refl α).equivCongr bg e = e.trans bg := rfl #align equiv.equiv_congr_refl_left Equiv.equivCongr_refl_left @[simp] theorem equivCongr_refl_right {α β} (ab e : α ≃ β) : ab.equivCongr (Equiv.refl β) e = ab.symm.trans e := rfl #align equiv.equiv_congr_refl_right Equiv.equivCongr_refl_right @[simp] theorem equivCongr_apply_apply {δ} (ab : α ≃ β) (cd : γ ≃ δ) (e : α ≃ γ) (x) : ab.equivCongr cd e x = cd (e (ab.symm x)) := rfl #align equiv.equiv_congr_apply_apply Equiv.equivCongr_apply_apply section permCongr variable {α' β' : Type _} (e : α' ≃ β') /-- If `α` is equivalent to `β`, then `Perm α` is equivalent to `Perm β`. -/ def permCongr : Perm α' ≃ Perm β' := equivCongr e e #align equiv.perm_congr Equiv.permCongr theorem permCongr_def (p : Equiv.Perm α') : e.permCongr p = (e.symm.trans p).trans e := rfl #align equiv.perm_congr_def Equiv.permCongr_def @[simp] theorem permCongr_refl : e.permCongr (Equiv.refl _) = Equiv.refl _ := by simp [permCongr_def] #align equiv.perm_congr_refl Equiv.permCongr_refl @[simp] theorem permCongr_symm : e.permCongr.symm = e.symm.permCongr := rfl #align equiv.perm_congr_symm Equiv.permCongr_symm @[simp] theorem permCongr_apply (p : Equiv.Perm α') (x) : e.permCongr p x = e (p (e.symm x)) := rfl #align equiv.perm_congr_apply Equiv.permCongr_apply theorem permCongr_symm_apply (p : Equiv.Perm β') (x) : e.permCongr.symm p x = e.symm (p (e x)) := rfl #align equiv.perm_congr_symm_apply Equiv.permCongr_symm_apply theorem permCongr_trans (p p' : Equiv.Perm α') : (e.permCongr p).trans (e.permCongr p') = e.permCongr (p.trans p') := by ext; simp only [trans_apply, comp_apply, permCongr_apply, symm_apply_apply] #align equiv.perm_congr_trans Equiv.permCongr_trans end permCongr /-- Two empty types are equivalent. -/ def equivOfIsEmpty (α β : Sort _) [IsEmpty α] [IsEmpty β] : α ≃ β := ⟨isEmptyElim, isEmptyElim, isEmptyElim, isEmptyElim⟩ #align equiv.equiv_of_is_empty Equiv.equivOfIsEmpty /-- If `α` is an empty type, then it is equivalent to the `Empty` type. -/ def equivEmpty (α : Sort u) [IsEmpty α] : α ≃ Empty := equivOfIsEmpty α _ #align equiv.equiv_empty Equiv.equivEmpty /-- If `α` is an empty type, then it is equivalent to the `PEmpty` type in any universe. -/ def equivPEmpty (α : Sort v) [IsEmpty α] : α ≃ PEmpty.{u} := equivOfIsEmpty α _ #align equiv.equiv_pempty Equiv.equivPEmpty /-- `α` is equivalent to an empty type iff `α` is empty. -/ def equivEmptyEquiv (α : Sort u) : α ≃ Empty ≃ IsEmpty α := ⟨fun e => Function.isEmpty e, @equivEmpty α, fun e => ext fun x => (e x).elim, fun _ => rfl⟩ #align equiv.equiv_empty_equiv Equiv.equivEmptyEquiv /-- The `Sort` of proofs of a false proposition is equivalent to `PEmpty`. -/ def propEquivPEmpty {p : Prop} (h : ¬p) : p ≃ PEmpty := @equivPEmpty p <| IsEmpty.prop_iff.2 h #align equiv.prop_equiv_pempty Equiv.propEquivPEmpty /-- If both `α` and `β` have a unique element, then `α ≃ β`. -/ def equivOfUnique (α β : Sort _) [Unique.{u} α] [Unique.{v} β] : α ≃ β where toFun := default invFun := default left_inv _ := Subsingleton.elim _ _ right_inv _ := Subsingleton.elim _ _ #align equiv.equiv_of_unique Equiv.equivOfUnique /-- If `α` has a unique element, then it is equivalent to any `PUnit`. -/ def equivPUnit (α : Sort u) [Unique α] : α ≃ PUnit.{v} := equivOfUnique α _ #align equiv.equiv_punit Equiv.equivPUnit /-- The `Sort` of proofs of a true proposition is equivalent to `PUnit`. -/ def propEquivPUnit {p : Prop} (h : p) : p ≃ PUnit.{0} := @equivPUnit p <| uniqueProp h #align equiv.prop_equiv_punit Equiv.propEquivPUnit /-- `ULift α` is equivalent to `α`. -/ @[simps (config := { fullyApplied := false }) apply] protected def ulift {α : Type v} : ULift.{u} α ≃ α := ⟨ULift.down, ULift.up, ULift.up_down, fun _ => rfl⟩ #align equiv.ulift Equiv.ulift #align equiv.ulift_apply Equiv.ulift_apply /-- `PLift α` is equivalent to `α`. -/ @[simps (config := { fullyApplied := false }) apply] protected def plift : PLift α ≃ α := ⟨PLift.down, PLift.up, PLift.up_down, PLift.down_up⟩ #align equiv.plift Equiv.plift #align equiv.plift_apply Equiv.plift_apply /-- equivalence of propositions is the same as iff -/ def ofIff {P Q : Prop} (h : P ↔ Q) : P ≃ Q := ⟨h.mp, h.mpr, fun _ => rfl, fun _ => rfl⟩ #align equiv.of_iff Equiv.ofIff /-- If `α₁` is equivalent to `α₂` and `β₁` is equivalent to `β₂`, then the type of maps `α₁ → β₁` is equivalent to the type of maps `α₂ → β₂`. -/ -- porting note: removing `congr` attribute @[simps apply] def arrowCongr {α₁ β₁ α₂ β₂ : Sort _} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) : (α₁ → β₁) ≃ (α₂ → β₂) where toFun f := e₂ ∘ f ∘ e₁.symm invFun f := e₂.symm ∘ f ∘ e₁ left_inv f := funext fun x => by simp only [comp_apply, symm_apply_apply] right_inv f := funext fun x => by simp only [comp_apply, apply_symm_apply] #align equiv.arrow_congr_apply Equiv.arrowCongr_apply #align equiv.arrow_congr Equiv.arrowCongr theorem arrowCongr_comp {α₁ β₁ γ₁ α₂ β₂ γ₂ : Sort _} (ea : α₁ ≃ α₂) (eb : β₁ ≃ β₂) (ec : γ₁ ≃ γ₂) (f : α₁ → β₁) (g : β₁ → γ₁) : arrowCongr ea ec (g ∘ f) = arrowCongr eb ec g ∘ arrowCongr ea eb f := by ext; simp only [comp, arrowCongr_apply, eb.symm_apply_apply] #align equiv.arrow_congr_comp Equiv.arrowCongr_comp @[simp] theorem arrowCongr_refl {α β : Sort _} : arrowCongr (Equiv.refl α) (Equiv.refl β) = Equiv.refl (α → β) := rfl #align equiv.arrow_congr_refl Equiv.arrowCongr_refl @[simp] theorem arrowCongr_trans (e₁ : α₁ ≃ α₂) (e₁' : β₁ ≃ β₂) (e₂ : α₂ ≃ α₃) (e₂' : β₂ ≃ β₃) : arrowCongr (e₁.trans e₂) (e₁'.trans e₂') = (arrowCongr e₁ e₁').trans (arrowCongr e₂ e₂') := rfl #align equiv.arrow_congr_trans Equiv.arrowCongr_trans @[simp] theorem arrowCongr_symm (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) : (arrowCongr e₁ e₂).symm = arrowCongr e₁.symm e₂.symm := rfl #align equiv.arrow_congr_symm Equiv.arrowCongr_symm /-- A version of `Equiv.arrowCongr` in `Type`, rather than `Sort`. The `equiv_rw` tactic is not able to use the default `Sort` level `Equiv.arrowCongr`, because Lean's universe rules will not unify `?l_1` with `imax (1 ?m_1)`. -/ -- porting note: removing `congr` attribute @[simps! apply] def arrowCongr' {α₁ β₁ α₂ β₂ : Type _} (hα : α₁ ≃ α₂) (hβ : β₁ ≃ β₂) : (α₁ → β₁) ≃ (α₂ → β₂) := Equiv.arrowCongr hα hβ #align equiv.arrow_congr' Equiv.arrowCongr' #align equiv.arrow_congr'_apply Equiv.arrowCongr'_apply @[simp] theorem arrowCongr'_refl {α β : Type _} : arrowCongr' (Equiv.refl α) (Equiv.refl β) = Equiv.refl (α → β) := rfl #align equiv.arrow_congr'_refl Equiv.arrowCongr'_refl @[simp] theorem arrowCongr'_trans (e₁ : α₁ ≃ α₂) (e₁' : β₁ ≃ β₂) (e₂ : α₂ ≃ α₃) (e₂' : β₂ ≃ β₃) : arrowCongr' (e₁.trans e₂) (e₁'.trans e₂') = (arrowCongr' e₁ e₁').trans (arrowCongr' e₂ e₂') := rfl #align equiv.arrow_congr'_trans Equiv.arrowCongr'_trans @[simp] theorem arrowCongr'_symm (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) : (arrowCongr' e₁ e₂).symm = arrowCongr' e₁.symm e₂.symm := rfl #align equiv.arrow_congr'_symm Equiv.arrowCongr'_symm /-- Conjugate a map `f : α → α` by an equivalence `α ≃ β`. -/ @[simps! apply] def conj (e : α ≃ β) : (α → α) ≃ (β → β) := arrowCongr e e #align equiv.conj Equiv.conj #align equiv.conj_apply Equiv.conj_apply @[simp] theorem conj_refl : conj (Equiv.refl α) = Equiv.refl (α → α) := rfl #align equiv.conj_refl Equiv.conj_refl @[simp] theorem conj_symm (e : α ≃ β) : e.conj.symm = e.symm.conj := rfl #align equiv.conj_symm Equiv.conj_symm @[simp] theorem conj_trans (e₁ : α ≃ β) (e₂ : β ≃ γ) : (e₁.trans e₂).conj = e₁.conj.trans e₂.conj := rfl #align equiv.conj_trans Equiv.conj_trans -- This should not be a simp lemma as long as `(∘)` is reducible: -- when `(∘)` is reducible, Lean can unify `f₁ ∘ f₂` with any `g` using -- `f₁ := g` and `f₂ := λ x, x`. This causes nontermination. theorem conj_comp (e : α ≃ β) (f₁ f₂ : α → α) : e.conj (f₁ ∘ f₂) = e.conj f₁ ∘ e.conj f₂ := by apply arrowCongr_comp #align equiv.conj_comp Equiv.conj_comp theorem eq_comp_symm {α β γ} (e : α ≃ β) (f : β → γ) (g : α → γ) : f = g ∘ e.symm ↔ f ∘ e = g := (e.arrowCongr (Equiv.refl γ)).symm_apply_eq.symm #align equiv.eq_comp_symm Equiv.eq_comp_symm theorem comp_symm_eq {α β γ} (e : α ≃ β) (f : β → γ) (g : α → γ) : g ∘ e.symm = f ↔ g = f ∘ e := (e.arrowCongr (Equiv.refl γ)).eq_symm_apply.symm #align equiv.comp_symm_eq Equiv.comp_symm_eq theorem eq_symm_comp {α β γ} (e : α ≃ β) (f : γ → α) (g : γ → β) : f = e.symm ∘ g ↔ e ∘ f = g := ((Equiv.refl γ).arrowCongr e).eq_symm_apply #align equiv.eq_symm_comp Equiv.eq_symm_comp theorem symm_comp_eq {α β γ} (e : α ≃ β) (f : γ → α) (g : γ → β) : e.symm ∘ g = f ↔ g = e ∘ f := ((Equiv.refl γ).arrowCongr e).symm_apply_eq #align equiv.symm_comp_eq Equiv.symm_comp_eq /-- `PUnit` sorts in any two universes are equivalent. -/ def punitEquivPUnit : PUnit.{v} ≃ PUnit.{w} := ⟨fun _ => .unit, fun _ => .unit, fun ⟨⟩ => rfl, fun ⟨⟩ => rfl⟩ #align equiv.punit_equiv_punit Equiv.punitEquivPUnit /-- `Prop` is noncomputably equivalent to `Bool`. -/ noncomputable def propEquivBool : Prop ≃ Bool where toFun p := @decide p (Classical.propDecidable _) invFun b := b left_inv p := by simp [@Bool.decide_iff p (Classical.propDecidable _)] right_inv b := by cases b <;> simp #align equiv.Prop_equiv_bool Equiv.propEquivBool section /-- The sort of maps to `PUnit.{v}` is equivalent to `PUnit.{w}`. -/ def arrowPUnitEquivPUnit (α : Sort _) : (α → PUnit.{v}) ≃ PUnit.{w} := ⟨fun _ => .unit, fun _ _ => .unit, fun _ => rfl, fun _ => rfl⟩ #align equiv.arrow_punit_equiv_punit Equiv.arrowPUnitEquivPUnit /-- If `α` is `Subsingleton` and `a : α`, then the type of dependent functions `Π (i : α), β i` is equivalent to `β a`. -/ @[simps] def piSubsingleton (β : α → Sort _) [Subsingleton α] (a : α) : (∀ a', β a') ≃ β a where toFun := eval a invFun x b := cast (congr_arg β <| Subsingleton.elim a b) x left_inv _ := funext fun b => by rw [Subsingleton.elim b a]; rfl right_inv _ := rfl #align equiv.Pi_subsingleton_apply Equiv.piSubsingleton_apply #align equiv.Pi_subsingleton_symm_apply Equiv.piSubsingleton_symm_apply #align equiv.Pi_subsingleton Equiv.piSubsingleton /-- If `α` has a unique term, then the type of function `α → β` is equivalent to `β`. -/ @[simps! (config := { fullyApplied := false }) apply] def funUnique (α β) [Unique.{u} α] : (α → β) ≃ β := piSubsingleton _ default #align equiv.fun_unique Equiv.funUnique #align equiv.fun_unique_apply Equiv.funUnique_apply /-- The sort of maps from `PUnit` is equivalent to the codomain. -/ def punitArrowEquiv (α : Sort _) : (PUnit.{u} → α) ≃ α := funUnique PUnit.{u} α #align equiv.punit_arrow_equiv Equiv.punitArrowEquiv /-- The sort of maps from `True` is equivalent to the codomain. -/ def trueArrowEquiv (α : Sort _) : (True → α) ≃ α := funUnique _ _ #align equiv.true_arrow_equiv Equiv.trueArrowEquiv /-- The sort of maps from a type that `IsEmpty` is equivalent to `PUnit`. -/ def arrowPUnitOfIsEmpty (α β : Sort _) [IsEmpty α] : (α → β) ≃ PUnit.{u} where toFun _ := PUnit.unit invFun _ := isEmptyElim left_inv _ := funext isEmptyElim right_inv _ := rfl #align equiv.arrow_punit_of_is_empty Equiv.arrowPUnitOfIsEmpty /-- The sort of maps from `Empty` is equivalent to `PUnit`. -/ def emptyArrowEquivPUnit (α : Sort _) : (Empty → α) ≃ PUnit.{u} := arrowPUnitOfIsEmpty _ _ #align equiv.empty_arrow_equiv_punit Equiv.emptyArrowEquivPUnit /-- The sort of maps from `PEmpty` is equivalent to `PUnit`. -/ def pemptyArrowEquivPUnit (α : Sort _) : (PEmpty → α) ≃ PUnit.{u} := arrowPUnitOfIsEmpty _ _ #align equiv.pempty_arrow_equiv_punit Equiv.pemptyArrowEquivPUnit /-- The sort of maps from `False` is equivalent to `PUnit`. -/ def falseArrowEquivPUnit (α : Sort _) : (False → α) ≃ PUnit.{u} := arrowPUnitOfIsEmpty _ _ #align equiv.false_arrow_equiv_punit Equiv.falseArrowEquivPUnit end section /-- A `PSigma`-type is equivalent to the corresponding `Sigma`-type. -/ @[simps apply symm_apply] def psigmaEquivSigma {α} (β : α → Type _) : (Σ' i, β i) ≃ Σ i, β i where toFun a := ⟨a.1, a.2⟩ invFun a := ⟨a.1, a.2⟩ left_inv _ := rfl right_inv _ := rfl #align equiv.psigma_equiv_sigma Equiv.psigmaEquivSigma #align equiv.psigma_equiv_sigma_symm_apply Equiv.psigmaEquivSigma_symm_apply #align equiv.psigma_equiv_sigma_apply Equiv.psigmaEquivSigma_apply /-- A `PSigma`-type is equivalent to the corresponding `Sigma`-type. -/ @[simps apply symm_apply] def psigmaEquivSigmaPLift {α} (β : α → Sort _) : (Σ' i, β i) ≃ Σ i : PLift α, PLift (β i.down) where toFun a := ⟨PLift.up a.1, PLift.up a.2⟩ invFun a := ⟨a.1.down, a.2.down⟩ left_inv _ := rfl right_inv _ := rfl #align equiv.psigma_equiv_sigma_plift Equiv.psigmaEquivSigmaPLift #align equiv.psigma_equiv_sigma_plift_symm_apply Equiv.psigmaEquivSigmaPLift_symm_apply #align equiv.psigma_equiv_sigma_plift_apply Equiv.psigmaEquivSigmaPLift_apply /-- A family of equivalences `Π a, β₁ a ≃ β₂ a` generates an equivalence between `Σ' a, β₁ a` and `Σ' a, β₂ a`. -/ @[simps apply] def psigmaCongrRight {β₁ β₂ : α → Sort _} (F : ∀ a, β₁ a ≃ β₂ a) : (Σ' a, β₁ a) ≃ Σ' a, β₂ a where toFun a := ⟨a.1, F a.1 a.2⟩ invFun a := ⟨a.1, (F a.1).symm a.2⟩ left_inv | ⟨a, b⟩ => congr_arg (PSigma.mk a) <| symm_apply_apply (F a) b right_inv | ⟨a, b⟩ => congr_arg (PSigma.mk a) <| apply_symm_apply (F a) b #align equiv.psigma_congr_right Equiv.psigmaCongrRight #align equiv.psigma_congr_right_apply Equiv.psigmaCongrRight_apply -- Porting note: simp can now simplify the LHS, so I have removed `@[simp]` theorem psigmaCongrRight_trans {α} {β₁ β₂ β₃ : α → Sort _} (F : ∀ a, β₁ a ≃ β₂ a) (G : ∀ a, β₂ a ≃ β₃ a) : (psigmaCongrRight F).trans (psigmaCongrRight G) = psigmaCongrRight fun a => (F a).trans (G a) := rfl #align equiv.psigma_congr_right_trans Equiv.psigmaCongrRight_trans -- Porting note: simp can now simplify the LHS, so I have removed `@[simp]` -- Porting note: simp can now prove this, so I have removed `@[simp]` theorem psigmaCongrRight_refl {α} {β : α → Sort _} : (psigmaCongrRight fun a => Equiv.refl (β a)) = Equiv.refl (Σ' a, β a) := rfl #align equiv.psigma_congr_right_refl Equiv.psigmaCongrRight_refl /-- A family of equivalences `Π a, β₁ a ≃ β₂ a` generates an equivalence between `Σ a, β₁ a` and `Σ a, β₂ a`. -/ @[simps apply] def sigmaCongrRight {α} {β₁ β₂ : α → Type _} (F : ∀ a, β₁ a ≃ β₂ a) : (Σ a, β₁ a) ≃ Σ a, β₂ a where toFun a := ⟨a.1, F a.1 a.2⟩ invFun a := ⟨a.1, (F a.1).symm a.2⟩ left_inv | ⟨a, b⟩ => congr_arg (Sigma.mk a) <| symm_apply_apply (F a) b right_inv | ⟨a, b⟩ => congr_arg (Sigma.mk a) <| apply_symm_apply (F a) b #align equiv.sigma_congr_right Equiv.sigmaCongrRight #align equiv.sigma_congr_right_apply Equiv.sigmaCongrRight_apply -- Porting note: simp can now simplify the LHS, so I have removed `@[simp]` theorem sigmaCongrRight_trans {α} {β₁ β₂ β₃ : α → Type _} (F : ∀ a, β₁ a ≃ β₂ a) (G : ∀ a, β₂ a ≃ β₃ a) : (sigmaCongrRight F).trans (sigmaCongrRight G) = sigmaCongrRight fun a => (F a).trans (G a) := rfl #align equiv.sigma_congr_right_trans Equiv.sigmaCongrRight_trans -- Porting note: simp can now simplify the LHS, so I have removed `@[simp]` theorem sigmaCongrRight_symm {α} {β₁ β₂ : α → Type _} (F : ∀ a, β₁ a ≃ β₂ a) : (sigmaCongrRight F).symm = sigmaCongrRight fun a => (F a).symm := rfl #align equiv.sigma_congr_right_symm Equiv.sigmaCongrRight_symm -- Porting note: simp can now prove this, so I have removed `@[simp]` theorem sigmaCongrRight_refl {α} {β : α → Type _} : (sigmaCongrRight fun a => Equiv.refl (β a)) = Equiv.refl (Σ a, β a) := rfl #align equiv.sigma_congr_right_refl Equiv.sigmaCongrRight_refl /-- A `PSigma` with `Prop` fibers is equivalent to the subtype. -/ def psigmaEquivSubtype {α : Type v} (P : α → Prop) : (Σ' i, P i) ≃ Subtype P where toFun x := ⟨x.1, x.2⟩ invFun x := ⟨x.1, x.2⟩ left_inv _ := rfl right_inv _ := rfl #align equiv.psigma_equiv_subtype Equiv.psigmaEquivSubtype /-- A `Sigma` with `PLift` fibers is equivalent to the subtype. -/ def sigmaPLiftEquivSubtype {α : Type v} (P : α → Prop) : (Σ i, PLift (P i)) ≃ Subtype P := ((psigmaEquivSigma _).symm.trans (psigmaCongrRight fun _ => Equiv.plift)).trans (psigmaEquivSubtype P) #align equiv.sigma_plift_equiv_subtype Equiv.sigmaPLiftEquivSubtype /-- A `Sigma` with `λ i, ULift (PLift (P i))` fibers is equivalent to `{ x // P x }`. Variant of `sigmaPLiftEquivSubtype`. -/ def sigmaULiftPLiftEquivSubtype {α : Type v} (P : α → Prop) : (Σ i, ULift (PLift (P i))) ≃ Subtype P := (sigmaCongrRight fun _ => Equiv.ulift).trans (sigmaPLiftEquivSubtype P) #align equiv.sigma_ulift_plift_equiv_subtype Equiv.sigmaULiftPLiftEquivSubtype namespace Perm /-- A family of permutations `Π a, Perm (β a)` generates a permuation `Perm (Σ a, β₁ a)`. -/ @[reducible] def sigmaCongrRight {α} {β : α → Sort _} (F : ∀ a, Perm (β a)) : Perm (Σ a, β a) := Equiv.sigmaCongrRight F #align equiv.perm.sigma_congr_right Equiv.Perm.sigmaCongrRight @[simp] theorem sigmaCongrRight_trans {α} {β : α → Sort _} (F : ∀ a, Perm (β a)) (G : ∀ a, Perm (β a)) : (sigmaCongrRight F).trans (sigmaCongrRight G) = sigmaCongrRight fun a => (F a).trans (G a) := Equiv.sigmaCongrRight_trans F G #align equiv.perm.sigma_congr_right_trans Equiv.Perm.sigmaCongrRight_trans @[simp] theorem sigmaCongrRight_symm {α} {β : α → Sort _} (F : ∀ a, Perm (β a)) : (sigmaCongrRight F).symm = sigmaCongrRight fun a => (F a).symm := Equiv.sigmaCongrRight_symm F #align equiv.perm.sigma_congr_right_symm Equiv.Perm.sigmaCongrRight_symm @[simp] theorem sigmaCongrRight_refl {α} {β : α → Sort _} : (sigmaCongrRight fun a => Equiv.refl (β a)) = Equiv.refl (Σ a, β a) := Equiv.sigmaCongrRight_refl #align equiv.perm.sigma_congr_right_refl Equiv.Perm.sigmaCongrRight_refl end Perm /-- An equivalence `f : α₁ ≃ α₂` generates an equivalence between `Σ a, β (f a)` and `Σ a, β a`. -/ @[simps apply] def sigmaCongrLeft {β : α₂ → Sort _} (e : α₁ ≃ α₂) : (Σ a : α₁, β (e a)) ≃ Σ a : α₂, β a where toFun a := ⟨e a.1, a.2⟩ invFun a := ⟨e.symm a.1, (e.right_inv' a.1).symm ▸ a.2⟩ -- porting note: this was a pretty gnarly match already, and it got worse after porting left_inv := fun ⟨a, b⟩ => match (motive := ∀ a' (h : a' = a), Sigma.mk _ (congr_arg e h.symm ▸ b) = ⟨a, b⟩) e.symm (e a), e.left_inv a with | _, rfl => rfl right_inv := fun ⟨a, b⟩ => match (motive := ∀ a' (h : a' = a), Sigma.mk a' (h.symm ▸ b) = ⟨a, b⟩) e (e.symm a), e.apply_symm_apply _ with | _, rfl => rfl #align equiv.sigma_congr_left_apply Equiv.sigmaCongrLeft_apply #align equiv.sigma_congr_left Equiv.sigmaCongrLeft /-- Transporting a sigma type through an equivalence of the base -/ def sigmaCongrLeft' {α₁ α₂} {β : α₁ → Sort _} (f : α₁ ≃ α₂) : (Σ a : α₁, β a) ≃ Σ a : α₂, β (f.symm a) := (sigmaCongrLeft f.symm).symm #align equiv.sigma_congr_left' Equiv.sigmaCongrLeft' /-- Transporting a sigma type through an equivalence of the base and a family of equivalences of matching fibers -/ def sigmaCongr {α₁ α₂} {β₁ : α₁ → Sort _} {β₂ : α₂ → Sort _} (f : α₁ ≃ α₂) (F : ∀ a, β₁ a ≃ β₂ (f a)) : Sigma β₁ ≃ Sigma β₂ := (sigmaCongrRight F).trans (sigmaCongrLeft f) #align equiv.sigma_congr Equiv.sigmaCongr /-- `Sigma` type with a constant fiber is equivalent to the product. -/ @[simps apply symm_apply] def sigmaEquivProd (α β : Type _) : (Σ _ : α, β) ≃ α × β := ⟨fun a => ⟨a.1, a.2⟩, fun a => ⟨a.1, a.2⟩, fun ⟨_, _⟩ => rfl, fun ⟨_, _⟩ => rfl⟩ #align equiv.sigma_equiv_prod_apply Equiv.sigmaEquivProd_apply #align equiv.sigma_equiv_prod_symm_apply Equiv.sigmaEquivProd_symm_apply #align equiv.sigma_equiv_prod Equiv.sigmaEquivProd /-- If each fiber of a `Sigma` type is equivalent to a fixed type, then the sigma type is equivalent to the product. -/ def sigmaEquivProdOfEquiv {α β} {β₁ : α → Sort _} (F : ∀ a, β₁ a ≃ β) : Sigma β₁ ≃ α × β := (sigmaCongrRight F).trans (sigmaEquivProd α β) #align equiv.sigma_equiv_prod_of_equiv Equiv.sigmaEquivProdOfEquiv /-- Dependent product of types is associative up to an equivalence. -/ def sigmaAssoc {α : Type _} {β : α → Type _} (γ : ∀ a : α, β a → Type _) : (Σ ab : Σ a : α, β a, γ ab.1 ab.2) ≃ Σ a : α, Σ b : β a, γ a b where toFun x := ⟨x.1.1, ⟨x.1.2, x.2⟩⟩ invFun x := ⟨⟨x.1, x.2.1⟩, x.2.2⟩ left_inv _ := rfl right_inv _ := rfl #align equiv.sigma_assoc Equiv.sigmaAssoc end protected theorem exists_unique_congr {p : α → Prop} {q : β → Prop} (f : α ≃ β) (h : ∀ {x}, p x ↔ q (f x)) : (∃! x, p x) ↔ ∃! y, q y := by constructor · rintro ⟨a, ha₁, ha₂⟩ exact ⟨f a, h.1 ha₁, fun b hb => f.symm_apply_eq.1 (ha₂ (f.symm b) (h.2 (by simpa using hb)))⟩ · rintro ⟨b, hb₁, hb₂⟩ exact ⟨f.symm b, h.2 (by simpa using hb₁), fun y hy => (eq_symm_apply f).2 (hb₂ _ (h.1 hy))⟩ #align equiv.exists_unique_congr Equiv.exists_unique_congr protected theorem exists_unique_congr_left' {p : α → Prop} (f : α ≃ β) : (∃! x, p x) ↔ ∃! y, p (f.symm y) := Equiv.exists_unique_congr f fun {_} => by simp #align equiv.exists_unique_congr_left' Equiv.exists_unique_congr_left' protected theorem exists_unique_congr_left {p : β → Prop} (f : α ≃ β) : (∃! x, p (f x)) ↔ ∃! y, p y := (Equiv.exists_unique_congr_left' f.symm).symm #align equiv.exists_unique_congr_left Equiv.exists_unique_congr_left protected theorem forall_congr {p : α → Prop} {q : β → Prop} (f : α ≃ β) (h : ∀ {x}, p x ↔ q (f x)) : (∀ x, p x) ↔ (∀ y, q y) := by constructor <;> intro h₂ x . rw [← f.right_inv x]; apply h.mp; apply h₂ · apply h.mpr; apply h₂ #align equiv.forall_congr Equiv.forall_congr protected theorem forall_congr' {p : α → Prop} {q : β → Prop} (f : α ≃ β) (h : ∀ {x}, p (f.symm x) ↔ q x) : (∀ x, p x) ↔ ∀ y, q y := (Equiv.forall_congr f.symm h.symm).symm #align equiv.forall_congr' Equiv.forall_congr' -- We next build some higher arity versions of `Equiv.forall_congr`. -- Although they appear to just be repeated applications of `Equiv.forall_congr`, -- unification of metavariables works better with these versions. -- In particular, they are necessary in `equiv_rw`. -- (Stopping at ternary functions seems reasonable: at least in 1-categorical mathematics, -- it's rare to have axioms involving more than 3 elements at once.) protected theorem forall₂_congr {p : α₁ → β₁ → Prop} {q : α₂ → β₂ → Prop} (eα : α₁ ≃ α₂) (eβ : β₁ ≃ β₂) (h : ∀ {x y}, p x y ↔ q (eα x) (eβ y)) : (∀ x y, p x y) ↔ ∀ x y, q x y := Equiv.forall_congr _ <| Equiv.forall_congr _ h #align equiv.forall₂_congr Equiv.forall₂_congr protected theorem forall₂_congr' {p : α₁ → β₁ → Prop} {q : α₂ → β₂ → Prop} (eα : α₁ ≃ α₂) (eβ : β₁ ≃ β₂) (h : ∀ {x y}, p (eα.symm x) (eβ.symm y) ↔ q x y) : (∀ x y, p x y) ↔ ∀ x y, q x y := (Equiv.forall₂_congr eα.symm eβ.symm h.symm).symm #align equiv.forall₂_congr' Equiv.forall₂_congr' protected theorem forall₃_congr {p : α₁ → β₁ → γ₁ → Prop} {q : α₂ → β₂ → γ₂ → Prop} (eα : α₁ ≃ α₂) (eβ : β₁ ≃ β₂) (eγ : γ₁ ≃ γ₂) (h : ∀ {x y z}, p x y z ↔ q (eα x) (eβ y) (eγ z)) : (∀ x y z, p x y z) ↔ ∀ x y z, q x y z := Equiv.forall₂_congr _ _ <| Equiv.forall_congr _ h #align equiv.forall₃_congr Equiv.forall₃_congr protected theorem forall₃_congr' {p : α₁ → β₁ → γ₁ → Prop} {q : α₂ → β₂ → γ₂ → Prop} (eα : α₁ ≃ α₂) (eβ : β₁ ≃ β₂) (eγ : γ₁ ≃ γ₂) (h : ∀ {x y z}, p (eα.symm x) (eβ.symm y) (eγ.symm z) ↔ q x y z) : (∀ x y z, p x y z) ↔ ∀ x y z, q x y z := (Equiv.forall₃_congr eα.symm eβ.symm eγ.symm h.symm).symm #align equiv.forall₃_congr' Equiv.forall₃_congr' protected theorem forall_congr_left' {p : α → Prop} (f : α ≃ β) : (∀ x, p x) ↔ ∀ y, p (f.symm y) := Equiv.forall_congr f <| by simp #align equiv.forall_congr_left' Equiv.forall_congr_left' protected theorem forall_congr_left {p : β → Prop} (f : α ≃ β) : (∀ x, p (f x)) ↔ ∀ y, p y := (Equiv.forall_congr_left' f.symm).symm #align equiv.forall_congr_left Equiv.forall_congr_left protected theorem exists_congr_left {α β} (f : α ≃ β) {p : α → Prop} : (∃ a, p a) ↔ ∃ b, p (f.symm b) := ⟨fun ⟨a, h⟩ => ⟨f a, by simpa using h⟩, fun ⟨b, h⟩ => ⟨_, h⟩⟩ #align equiv.exists_congr_left Equiv.exists_congr_left end Equiv namespace Quot /-- An equivalence `e : α ≃ β` generates an equivalence between quotient spaces, if `ra a₁ a₂ ↔ rb (e a₁) (e a₂). -/ protected def congr {ra : α → α → Prop} {rb : β → β → Prop} (e : α ≃ β) (eq : ∀ a₁ a₂, ra a₁ a₂ ↔ rb (e a₁) (e a₂)) : Quot ra ≃ Quot rb where toFun := Quot.map e fun a₁ a₂ => (eq a₁ a₂).1 invFun := Quot.map e.symm fun b₁ b₂ h => (eq (e.symm b₁) (e.symm b₂)).2 ((e.apply_symm_apply b₁).symm ▸ (e.apply_symm_apply b₂).symm ▸ h) left_inv := by rintro ⟨a⟩; simp only [Quot.map, Equiv.symm_apply_apply] right_inv := by rintro ⟨a⟩; simp only [Quot.map, Equiv.apply_symm_apply] #align quot.congr Quot.congr @[simp] theorem congr_mk {ra : α → α → Prop} {rb : β → β → Prop} (e : α ≃ β) (eq : ∀ a₁ a₂ : α, ra a₁ a₂ ↔ rb (e a₁) (e a₂)) (a : α) : Quot.congr e eq (Quot.mk ra a) = Quot.mk rb (e a) := rfl #align quot.congr_mk Quot.congr_mk /-- Quotients are congruent on equivalences under equality of their relation. An alternative is just to use rewriting with `eq`, but then computational proofs get stuck. -/ protected def congrRight {r r' : α → α → Prop} (eq : ∀ a₁ a₂, r a₁ a₂ ↔ r' a₁ a₂) : Quot r ≃ Quot r' := Quot.congr (Equiv.refl α) eq #align quot.congr_right Quot.congrRight /-- An equivalence `e : α ≃ β` generates an equivalence between the quotient space of `α` by a relation `ra` and the quotient space of `β` by the image of this relation under `e`. -/ protected def congrLeft {r : α → α → Prop} (e : α ≃ β) : Quot r ≃ Quot fun b b' => r (e.symm b) (e.symm b') := Quot.congr e fun _ _ => by simp only [e.symm_apply_apply] #align quot.congr_left Quot.congrLeft end Quot namespace Quotient /-- An equivalence `e : α ≃ β` generates an equivalence between quotient spaces, if `ra a₁ a₂ ↔ rb (e a₁) (e a₂). -/ protected def congr {ra : Setoid α} {rb : Setoid β} (e : α ≃ β) (eq : ∀ a₁ a₂, @Setoid.r α ra a₁ a₂ ↔ @Setoid.r β rb (e a₁) (e a₂)) : Quotient ra ≃ Quotient rb := Quot.congr e eq #align quotient.congr Quotient.congr @[simp] theorem congr_mk {ra : Setoid α} {rb : Setoid β} (e : α ≃ β) (eq : ∀ a₁ a₂ : α, Setoid.r a₁ a₂ ↔ Setoid.r (e a₁) (e a₂)) (a : α) : Quotient.congr e eq (Quotient.mk ra a) = Quotient.mk rb (e a) := rfl #align quotient.congr_mk Quotient.congr_mk /-- Quotients are congruent on equivalences under equality of their relation. An alternative is just to use rewriting with `eq`, but then computational proofs get stuck. -/ protected def congrRight {r r' : Setoid α} (eq : ∀ a₁ a₂, @Setoid.r α r a₁ a₂ ↔ @Setoid.r α r' a₁ a₂) : Quotient r ≃ Quotient r' := Quot.congrRight eq #align quotient.congr_right Quotient.congrRight end Quotient
VIDEO : Norman Finkelstein - Israel and Palestine : Roots of Conflict , Prospects for Peace , presentation in Seattle , Washington , May 8 , 2008 .
(* Author: Tobias Nipkow *) section \<open>Braun Trees\<close> theory Braun_Tree imports "HOL-Library.Tree_Real" begin text \<open>Braun Trees were studied by Braun and Rem~\cite{BraunRem} and later Hoogerwoord~\cite{Hoogerwoord}.\<close> fun braun :: "'a tree \<Rightarrow> bool" where "braun Leaf = True" | "braun (Node l x r) = ((size l = size r \<or> size l = size r + 1) \<and> braun l \<and> braun r)" lemma braun_Node': "braun (Node l x r) = (size r \<le> size l \<and> size l \<le> size r + 1 \<and> braun l \<and> braun r)" by auto text \<open>The shape of a Braun-tree is uniquely determined by its size:\<close> lemma braun_unique: "\<lbrakk> braun (t1::unit tree); braun t2; size t1 = size t2 \<rbrakk> \<Longrightarrow> t1 = t2" proof (induction t1 arbitrary: t2) case Leaf thus ?case by simp next case (Node l1 _ r1) from Node.prems(3) have "t2 \<noteq> Leaf" by auto then obtain l2 x2 r2 where [simp]: "t2 = Node l2 x2 r2" by (meson neq_Leaf_iff) with Node.prems have "size l1 = size l2 \<and> size r1 = size r2" by auto thus ?case using Node.prems(1,2) Node.IH by auto qed text \<open>Braun trees are balanced:\<close> lemma balanced_if_braun: "braun t \<Longrightarrow> balanced t" proof(induction t) case Leaf show ?case by (simp add: balanced_def) next case (Node l x r) thus ?case using balanced_Node_if_wbal2 by force qed subsection \<open>Numbering Nodes\<close> text \<open>We show that a tree is a Braun tree iff a parity-based numbering (\<open>braun_indices\<close>) of nodes yields an interval of numbers.\<close> fun braun_indices :: "'a tree \<Rightarrow> nat set" where "braun_indices Leaf = {}" | "braun_indices (Node l _ r) = {1} \<union> (*) 2 ` braun_indices l \<union> Suc ` (*) 2 ` braun_indices r" lemma braun_indices1: "0 \<notin> braun_indices t" by (induction t) auto lemma finite_braun_indices: "finite(braun_indices t)" by (induction t) auto text "One direction:" lemma braun_indices_if_braun: "braun t \<Longrightarrow> braun_indices t = {1..size t}" proof(induction t) case Leaf thus ?case by simp next have *: "(*) 2 ` {a..b} \<union> Suc ` (*) 2 ` {a..b} = {2*a..2*b+1}" (is "?l = ?r") for a b proof show "?l \<subseteq> ?r" by auto next have "\<exists>x2\<in>{a..b}. x \<in> {Suc (2*x2), 2*x2}" if *: "x \<in> {2*a .. 2*b+1}" for x proof - have "x div 2 \<in> {a..b}" using * by auto moreover have "x \<in> {2 * (x div 2), Suc(2 * (x div 2))}" by auto ultimately show ?thesis by blast qed thus "?r \<subseteq> ?l" by fastforce qed case (Node l x r) hence "size l = size r \<or> size l = size r + 1" (is "?A \<or> ?B") by auto thus ?case proof assume ?A with Node show ?thesis by (auto simp: *) next assume ?B with Node show ?thesis by (auto simp: * atLeastAtMostSuc_conv) qed qed text "The other direction is more complicated. The following proof is due to Thomas Sewell." lemma disj_evens_odds: "(*) 2 ` A \<inter> Suc ` (*) 2 ` B = {}" using double_not_eq_Suc_double by auto lemma card_braun_indices: "card (braun_indices t) = size t" proof (induction t) case Leaf thus ?case by simp next case Node thus ?case by(auto simp: UNION_singleton_eq_range finite_braun_indices card_Un_disjoint card_insert_if disj_evens_odds card_image inj_on_def braun_indices1) qed lemma braun_indices_intvl_base_1: assumes bi: "braun_indices t = {m..n}" shows "{m..n} = {1..size t}" proof (cases "t = Leaf") case True then show ?thesis using bi by simp next case False note eqs = eqset_imp_iff[OF bi] from eqs[of 0] have 0: "0 < m" by (simp add: braun_indices1) from eqs[of 1] have 1: "m \<le> 1" by (cases t; simp add: False) from 0 1 have eq1: "m = 1" by simp from card_braun_indices[of t] show ?thesis by (simp add: bi eq1) qed lemma even_of_intvl_intvl: fixes S :: "nat set" assumes "S = {m..n} \<inter> {i. even i}" shows "\<exists>m' n'. S = (\<lambda>i. i * 2) ` {m'..n'}" apply (rule exI[where x="Suc m div 2"], rule exI[where x="n div 2"]) apply (fastforce simp add: assms mult.commute) done lemma odd_of_intvl_intvl: fixes S :: "nat set" assumes "S = {m..n} \<inter> {i. odd i}" shows "\<exists>m' n'. S = Suc ` (\<lambda>i. i * 2) ` {m'..n'}" proof - have step1: "\<exists>m'. S = Suc ` ({m'..n - 1} \<inter> {i. even i})" apply (rule_tac x="if n = 0 then 1 else m - 1" in exI) apply (auto simp: assms image_def elim!: oddE) done thus ?thesis by (metis even_of_intvl_intvl) qed lemma image_int_eq_image: "(\<forall>i \<in> S. f i \<in> T) \<Longrightarrow> (f ` S) \<inter> T = f ` S" "(\<forall>i \<in> S. f i \<notin> T) \<Longrightarrow> (f ` S) \<inter> T = {}" by auto lemma braun_indices1_le: "i \<in> braun_indices t \<Longrightarrow> Suc 0 \<le> i" using braun_indices1 not_less_eq_eq by blast lemma braun_if_braun_indices: "braun_indices t = {1..size t} \<Longrightarrow> braun t" proof(induction t) case Leaf then show ?case by simp next case (Node l x r) obtain t where t: "t = Node l x r" by simp from Node.prems have eq: "{2 .. size t} = (\<lambda>i. i * 2) ` braun_indices l \<union> Suc ` (\<lambda>i. i * 2) ` braun_indices r" (is "?R = ?S \<union> ?T") apply clarsimp apply (drule_tac f="\<lambda>S. S \<inter> {2..}" in arg_cong) apply (simp add: t mult.commute Int_Un_distrib2 image_int_eq_image braun_indices1_le) done then have ST: "?S = ?R \<inter> {i. even i}" "?T = ?R \<inter> {i. odd i}" by (simp_all add: Int_Un_distrib2 image_int_eq_image) from ST have l: "braun_indices l = {1 .. size l}" by (fastforce dest: braun_indices_intvl_base_1 dest!: even_of_intvl_intvl simp: mult.commute inj_image_eq_iff[OF inj_onI]) from ST have r: "braun_indices r = {1 .. size r}" by (fastforce dest: braun_indices_intvl_base_1 dest!: odd_of_intvl_intvl simp: mult.commute inj_image_eq_iff[OF inj_onI]) note STa = ST[THEN eqset_imp_iff, THEN iffD2] note STb = STa[of "size t"] STa[of "size t - 1"] then have sizes: "size l = size r \<or> size l = size r + 1" apply (clarsimp simp: t l r inj_image_mem_iff[OF inj_onI]) apply (cases "even (size l)"; cases "even (size r)"; clarsimp elim!: oddE; fastforce) done from l r sizes show ?case by (clarsimp simp: Node.IH) qed lemma braun_iff_braun_indices: "braun t \<longleftrightarrow> braun_indices t = {1..size t}" using braun_if_braun_indices braun_indices_if_braun by blast (* An older less appealing proof: lemma Suc0_notin_double: "Suc 0 \<notin> ( * ) 2 ` A" by(auto) lemma zero_in_double_iff: "(0::nat) \<in> ( * ) 2 ` A \<longleftrightarrow> 0 \<in> A" by(auto) lemma Suc_in_Suc_image_iff: "Suc n \<in> Suc ` A \<longleftrightarrow> n \<in> A" by(auto) lemmas nat_in_image = Suc0_notin_double zero_in_double_iff Suc_in_Suc_image_iff lemma disj_union_eq_iff: "\<lbrakk> L1 \<inter> R2 = {}; L2 \<inter> R1 = {} \<rbrakk> \<Longrightarrow> L1 \<union> R1 = L2 \<union> R2 \<longleftrightarrow> L1 = L2 \<and> R1 = R2" by blast lemma inj_braun_indices: "braun_indices t1 = braun_indices t2 \<Longrightarrow> t1 = (t2::unit tree)" proof(induction t1 arbitrary: t2) case Leaf thus ?case using braun_indices.elims by blast next case (Node l1 x1 r1) have "t2 \<noteq> Leaf" proof assume "t2 = Leaf" with Node.prems show False by simp qed thus ?case using Node by (auto simp: neq_Leaf_iff insert_ident nat_in_image braun_indices1 disj_union_eq_iff disj_evens_odds inj_image_eq_iff inj_def) qed text \<open>How many even/odd natural numbers are there between m and n?\<close> lemma card_Icc_even_nat: "card {i \<in> {m..n::nat}. even i} = (n+1-m + (m+1) mod 2) div 2" (is "?l m n = ?r m n") proof(induction "n+1 - m" arbitrary: n m) case 0 thus ?case by simp next case Suc have "m \<le> n" using Suc(2) by arith hence "{m..n} = insert m {m+1..n}" by auto hence "?l m n = card {i \<in> insert m {m+1..n}. even i}" by simp also have "\<dots> = ?r m n" (is "?l = ?r") proof (cases) assume "even m" hence "{i \<in> insert m {m+1..n}. even i} = insert m {i \<in> {m+1..n}. even i}" by auto hence "?l = card {i \<in> {m+1..n}. even i} + 1" by simp also have "\<dots> = (n-m + (m+2) mod 2) div 2 + 1" using Suc(1)[of n "m+1"] Suc(2) by simp also have "\<dots> = ?r" using \<open>even m\<close> \<open>m \<le> n\<close> by auto finally show ?thesis . next assume "odd m" hence "{i \<in> insert m {m+1..n}. even i} = {i \<in> {m+1..n}. even i}" by auto hence "?l = card ..." by simp also have "\<dots> = (n-m + (m+2) mod 2) div 2" using Suc(1)[of n "m+1"] Suc(2) by simp also have "\<dots> = ?r" using \<open>odd m\<close> \<open>m \<le> n\<close> even_iff_mod_2_eq_zero[of m] by simp finally show ?thesis . qed finally show ?case . qed lemma card_Icc_odd_nat: "card {i \<in> {m..n::nat}. odd i} = (n+1-m + m mod 2) div 2" proof - let ?A = "{i \<in> {m..n}. odd i}" let ?B = "{i \<in> {m+1..n+1}. even i}" have "card ?A = card (Suc ` ?A)" by (simp add: card_image) also have "Suc ` ?A = ?B" using Suc_le_D by(force simp: image_iff) also have "card ?B = (n+1-m + (m) mod 2) div 2" using card_Icc_even_nat[of "m+1" "n+1"] by simp finally show ?thesis . qed lemma compact_Icc_even: assumes "A = {i \<in> {m..n}. even i}" shows "A = (\<lambda>j. 2*(j-1) + m + m mod 2) ` {1..card A}" (is "_ = ?A") proof let ?a = "(n+1-m + (m+1) mod 2) div 2" have "\<exists>j \<in> {1..?a}. i = 2*(j-1) + m + m mod 2" if *: "i \<in> {m..n}" "even i" for i proof - let ?j = "(i - (m + m mod 2)) div 2 + 1" have "?j \<in> {1..?a} \<and> i = 2*(?j-1) + m + m mod 2" using * by(auto simp: mod2_eq_if) presburger+ thus ?thesis by blast qed thus "A \<subseteq> ?A" using assms by(auto simp: image_iff card_Icc_even_nat simp del: atLeastAtMost_iff) next let ?a = "(n+1-m + (m+1) mod 2) div 2" have 1: "2 * (j - 1) + m + m mod 2 \<in> {m..n}" if *: "j \<in> {1..?a}" for j using * by(auto simp: mod2_eq_if) have 2: "even (2 * (j - 1) + m + m mod 2)" for j by presburger show "?A \<subseteq> A" apply(simp add: assms card_Icc_even_nat del: atLeastAtMost_iff One_nat_def) using 1 2 by blast qed lemma compact_Icc_odd: assumes "B = {i \<in> {m..n}. odd i}" shows "B = (\<lambda>i. 2*(i-1) + m + (m+1) mod 2) ` {1..card B}" proof - define A :: " nat set" where "A = Suc ` B" have "A = {i \<in> {m+1..n+1}. even i}" using Suc_le_D by(force simp add: A_def assms image_iff) from compact_Icc_even[OF this] have "A = Suc ` (\<lambda>i. 2 * (i - 1) + m + (m + 1) mod 2) ` {1..card A}" by (simp add: image_comp o_def) hence B: "B = (\<lambda>i. 2 * (i - 1) + m + (m + 1) mod 2) ` {1..card A}" using A_def by (simp add: inj_image_eq_iff) have "card A = card B" by (metis A_def bij_betw_Suc bij_betw_same_card) with B show ?thesis by simp qed lemma even_odd_decomp: assumes "\<forall>x \<in> A. even x" "\<forall>x \<in> B. odd x" "A \<union> B = {m..n}" shows "(let a = card A; b = card B in a + b = n+1-m \<and> A = (\<lambda>i. 2*(i-1) + m + m mod 2) ` {1..a} \<and> B = (\<lambda>i. 2*(i-1) + m + (m+1) mod 2) ` {1..b} \<and> (a = b \<or> a = b+1 \<and> even m \<or> a+1 = b \<and> odd m))" proof - let ?a = "card A" let ?b = "card B" have "finite A \<and> finite B" by (metis \<open>A \<union> B = {m..n}\<close> finite_Un finite_atLeastAtMost) hence ab: "?a + ?b = Suc n - m" by (metis Int_emptyI assms card_Un_disjoint card_atLeastAtMost) have A: "A = {i \<in> {m..n}. even i}" using assms by auto hence A': "A = (\<lambda>i. 2*(i-1) + m + m mod 2) ` {1..?a}" by(rule compact_Icc_even) have B: "B = {i \<in> {m..n}. odd i}" using assms by auto hence B': "B = (\<lambda>i. 2*(i-1) + m + (m+1) mod 2) ` {1..?b}" by(rule compact_Icc_odd) have "?a = ?b \<or> ?a = ?b+1 \<and> even m \<or> ?a+1 = ?b \<and> odd m" apply(simp add: Let_def mod2_eq_if card_Icc_even_nat[of m n, simplified A[symmetric]] card_Icc_odd_nat[of m n, simplified B[symmetric]] split!: if_splits) by linarith with ab A' B' show ?thesis by simp qed lemma braun_if_braun_indices: "braun_indices t = {1..size t} \<Longrightarrow> braun t" proof(induction t) case Leaf then show ?case by simp next case (Node t1 x2 t2) have 1: "i > 0 \<Longrightarrow> Suc(Suc(2 * (i - Suc 0))) = 2*i" for i::nat by(simp add: algebra_simps) have 2: "i > 0 \<Longrightarrow> 2 * (i - Suc 0) + 3 = 2*i + 1" for i::nat by(simp add: algebra_simps) have 3: "( * ) 2 ` braun_indices t1 \<union> Suc ` ( * ) 2 ` braun_indices t2 = {2..size t1 + size t2 + 1}" using Node.prems by (simp add: insert_ident Icc_eq_insert_lb_nat nat_in_image braun_indices1) thus ?case using Node.IH even_odd_decomp[OF _ _ 3] by(simp add: card_image inj_on_def card_braun_indices Let_def 1 2 inj_image_eq_iff image_comp cong: image_cong_simp) qed *) end
/- Copyright (c) 2017 Kenny Lau. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kenny Lau, Mario Carneiro, Johannes Hölzl, Chris Hughes, Jens Wagemaker -/ import algebra.group.basic import logic.nontrivial /-! # Units (i.e., invertible elements) of a multiplicative monoid -/ universe u variable {α : Type u} /-- Units of a monoid, bundled version. An element of a `monoid` is a unit if it has a two-sided inverse. This version bundles the inverse element so that it can be computed. For a predicate see `is_unit`. -/ structure units (α : Type u) [monoid α] := (val : α) (inv : α) (val_inv : val * inv = 1) (inv_val : inv * val = 1) /-- Units of an add_monoid, bundled version. An element of an add_monoid is a unit if it has a two-sided additive inverse. This version bundles the inverse element so that it can be computed. For a predicate see `is_add_unit`. -/ structure add_units (α : Type u) [add_monoid α] := (val : α) (neg : α) (val_neg : val + neg = 0) (neg_val : neg + val = 0) attribute [to_additive add_units] units section has_elem @[to_additive] lemma unique_has_one {α : Type*} [unique α] [has_one α] : default α = 1 := unique.default_eq 1 end has_elem namespace units variables [monoid α] @[to_additive] instance : has_coe (units α) α := ⟨val⟩ @[to_additive] instance : has_inv (units α) := ⟨λ u, ⟨u.2, u.1, u.4, u.3⟩⟩ /-- See Note [custom simps projection] -/ @[to_additive /-" See Note [custom simps projection] "-/] def simps.coe (u : units α) : α := u /-- See Note [custom simps projection] -/ @[to_additive /-" See Note [custom simps projection] "-/] def simps.coe_inv (u : units α) : α := ↑(u⁻¹) initialize_simps_projections units (val → coe as_prefix, inv → coe_inv as_prefix) initialize_simps_projections add_units (val → coe as_prefix, neg → coe_neg as_prefix) @[simp, to_additive] lemma coe_mk (a : α) (b h₁ h₂) : ↑(units.mk a b h₁ h₂) = a := rfl @[ext, to_additive] theorem ext : function.injective (coe : units α → α) | ⟨v, i₁, vi₁, iv₁⟩ ⟨v', i₂, vi₂, iv₂⟩ e := by change v = v' at e; subst v'; congr; simpa only [iv₂, vi₁, one_mul, mul_one] using mul_assoc i₂ v i₁ @[norm_cast, to_additive] theorem eq_iff {a b : units α} : (a : α) = b ↔ a = b := ext.eq_iff @[to_additive] theorem ext_iff {a b : units α} : a = b ↔ (a : α) = b := eq_iff.symm @[to_additive] instance [decidable_eq α] : decidable_eq (units α) := λ a b, decidable_of_iff' _ ext_iff @[simp, to_additive] theorem mk_coe (u : units α) (y h₁ h₂) : mk (u : α) y h₁ h₂ = u := ext rfl /-- Copy a unit, adjusting definition equalities. -/ @[to_additive /-"Copy an `add_unit`, adjusting definitional equalities."-/, simps] def copy (u : units α) (val : α) (hv : val = u) (inv : α) (hi : inv = ↑(u⁻¹)) : units α := { val := val, inv := inv, inv_val := hv.symm ▸ hi.symm ▸ u.inv_val, val_inv := hv.symm ▸ hi.symm ▸ u.val_inv } @[to_additive] lemma copy_eq (u : units α) (val hv inv hi) : u.copy val hv inv hi = u := ext hv /-- Units of a monoid form a group. -/ @[to_additive] instance : group (units α) := { mul := λ u₁ u₂, ⟨u₁.val * u₂.val, u₂.inv * u₁.inv, by rw [mul_assoc, ← mul_assoc u₂.val, val_inv, one_mul, val_inv], by rw [mul_assoc, ← mul_assoc u₁.inv, inv_val, one_mul, inv_val]⟩, one := ⟨1, 1, one_mul 1, one_mul 1⟩, mul_one := λ u, ext $ mul_one u, one_mul := λ u, ext $ one_mul u, mul_assoc := λ u₁ u₂ u₃, ext $ mul_assoc u₁ u₂ u₃, inv := has_inv.inv, mul_left_inv := λ u, ext u.inv_val } variables (a b : units α) {c : units α} @[simp, norm_cast, to_additive] lemma coe_mul : (↑(a * b) : α) = a * b := rfl @[simp, norm_cast, to_additive] lemma coe_one : ((1 : units α) : α) = 1 := rfl @[simp, norm_cast, to_additive] lemma coe_eq_one {a : units α} : (a : α) = 1 ↔ a = 1 := by rw [←units.coe_one, eq_iff] @[simp, to_additive] lemma inv_mk (x y : α) (h₁ h₂) : (mk x y h₁ h₂)⁻¹ = mk y x h₂ h₁ := rfl @[simp, to_additive] lemma val_eq_coe : a.val = (↑a : α) := rfl @[simp, to_additive] lemma inv_eq_coe_inv : a.inv = ((a⁻¹ : units α) : α) := rfl @[simp, to_additive] lemma inv_mul : (↑a⁻¹ * a : α) = 1 := inv_val _ @[simp, to_additive] lemma mul_inv : (a * ↑a⁻¹ : α) = 1 := val_inv _ @[to_additive] lemma inv_mul_of_eq {u : units α} {a : α} (h : ↑u = a) : ↑u⁻¹ * a = 1 := by { rw [←h, u.inv_mul], } @[to_additive] lemma mul_inv_of_eq {u : units α} {a : α} (h : ↑u = a) : a * ↑u⁻¹ = 1 := by { rw [←h, u.mul_inv], } @[simp, to_additive] lemma mul_inv_cancel_left (a : units α) (b : α) : (a:α) * (↑a⁻¹ * b) = b := by rw [← mul_assoc, mul_inv, one_mul] @[simp, to_additive] lemma inv_mul_cancel_left (a : units α) (b : α) : (↑a⁻¹:α) * (a * b) = b := by rw [← mul_assoc, inv_mul, one_mul] @[simp, to_additive] lemma mul_inv_cancel_right (a : α) (b : units α) : a * b * ↑b⁻¹ = a := by rw [mul_assoc, mul_inv, mul_one] @[simp, to_additive] lemma inv_mul_cancel_right (a : α) (b : units α) : a * ↑b⁻¹ * b = a := by rw [mul_assoc, inv_mul, mul_one] @[to_additive] instance : inhabited (units α) := ⟨1⟩ @[to_additive] instance {α} [comm_monoid α] : comm_group (units α) := { mul_comm := λ u₁ u₂, ext $ mul_comm _ _, ..units.group } @[to_additive] instance [has_repr α] : has_repr (units α) := ⟨repr ∘ val⟩ @[simp, to_additive] theorem mul_right_inj (a : units α) {b c : α} : (a:α) * b = a * c ↔ b = c := ⟨λ h, by simpa only [inv_mul_cancel_left] using congr_arg ((*) ↑(a⁻¹ : units α)) h, congr_arg _⟩ @[simp, to_additive] theorem mul_left_inj (a : units α) {b c : α} : b * a = c * a ↔ b = c := ⟨λ h, by simpa only [mul_inv_cancel_right] using congr_arg (* ↑(a⁻¹ : units α)) h, congr_arg _⟩ @[to_additive] theorem eq_mul_inv_iff_mul_eq {a b : α} : a = b * ↑c⁻¹ ↔ a * c = b := ⟨λ h, by rw [h, inv_mul_cancel_right], λ h, by rw [← h, mul_inv_cancel_right]⟩ @[to_additive] theorem eq_inv_mul_iff_mul_eq {a c : α} : a = ↑b⁻¹ * c ↔ ↑b * a = c := ⟨λ h, by rw [h, mul_inv_cancel_left], λ h, by rw [← h, inv_mul_cancel_left]⟩ @[to_additive] theorem inv_mul_eq_iff_eq_mul {b c : α} : ↑a⁻¹ * b = c ↔ b = a * c := ⟨λ h, by rw [← h, mul_inv_cancel_left], λ h, by rw [h, inv_mul_cancel_left]⟩ @[to_additive] lemma inv_eq_of_mul_eq_one {u : units α} {a : α} (h : ↑u * a = 1) : ↑u⁻¹ = a := calc ↑u⁻¹ = ↑u⁻¹ * 1 : by rw mul_one ... = ↑u⁻¹ * ↑u * a : by rw [←h, ←mul_assoc] ... = a : by rw [u.inv_mul, one_mul] lemma inv_unique {u₁ u₂ : units α} (h : (↑u₁ : α) = ↑u₂) : (↑u₁⁻¹ : α) = ↑u₂⁻¹ := inv_eq_of_mul_eq_one $ by rw [h, u₂.mul_inv] end units /-- For `a, b` in a `comm_monoid` such that `a * b = 1`, makes a unit out of `a`. -/ @[to_additive "For `a, b` in an `add_comm_monoid` such that `a + b = 0`, makes an add_unit out of `a`."] def units.mk_of_mul_eq_one [comm_monoid α] (a b : α) (hab : a * b = 1) : units α := ⟨a, b, hab, (mul_comm b a).trans hab⟩ @[simp, to_additive] lemma units.coe_mk_of_mul_eq_one [comm_monoid α] {a b : α} (h : a * b = 1) : (units.mk_of_mul_eq_one a b h : α) = a := rfl section monoid variables [monoid α] {a b c : α} /-- Partial division. It is defined when the second argument is invertible, and unlike the division operator in `division_ring` it is not totalized at zero. -/ def divp (a : α) (u) : α := a * (u⁻¹ : units α) infix ` /ₚ `:70 := divp @[simp] theorem divp_self (u : units α) : (u : α) /ₚ u = 1 := units.mul_inv _ @[simp] theorem divp_one (a : α) : a /ₚ 1 = a := mul_one _ theorem divp_assoc (a b : α) (u : units α) : a * b /ₚ u = a * (b /ₚ u) := mul_assoc _ _ _ @[simp] theorem divp_inv (u : units α) : a /ₚ u⁻¹ = a * u := rfl @[simp] theorem divp_mul_cancel (a : α) (u : units α) : a /ₚ u * u = a := (mul_assoc _ _ _).trans $ by rw [units.inv_mul, mul_one] @[simp] theorem mul_divp_cancel (a : α) (u : units α) : (a * u) /ₚ u = a := (mul_assoc _ _ _).trans $ by rw [units.mul_inv, mul_one] @[simp] theorem divp_left_inj (u : units α) {a b : α} : a /ₚ u = b /ₚ u ↔ a = b := units.mul_left_inj _ theorem divp_divp_eq_divp_mul (x : α) (u₁ u₂ : units α) : (x /ₚ u₁) /ₚ u₂ = x /ₚ (u₂ * u₁) := by simp only [divp, mul_inv_rev, units.coe_mul, mul_assoc] theorem divp_eq_iff_mul_eq {x : α} {u : units α} {y : α} : x /ₚ u = y ↔ y * u = x := u.mul_left_inj.symm.trans $ by rw [divp_mul_cancel]; exact ⟨eq.symm, eq.symm⟩ theorem divp_eq_one_iff_eq {a : α} {u : units α} : a /ₚ u = 1 ↔ a = u := (units.mul_left_inj u).symm.trans $ by rw [divp_mul_cancel, one_mul] @[simp] theorem one_divp (u : units α) : 1 /ₚ u = ↑u⁻¹ := one_mul _ end monoid section comm_monoid variables [comm_monoid α] theorem divp_eq_divp_iff {x y : α} {ux uy : units α} : x /ₚ ux = y /ₚ uy ↔ x * uy = y * ux := by rw [divp_eq_iff_mul_eq, mul_comm, ← divp_assoc, divp_eq_iff_mul_eq, mul_comm y ux] theorem divp_mul_divp (x y : α) (ux uy : units α) : (x /ₚ ux) * (y /ₚ uy) = (x * y) /ₚ (ux * uy) := by rw [← divp_divp_eq_divp_mul, divp_assoc, mul_comm x, divp_assoc, mul_comm] end comm_monoid /-! # `is_unit` predicate In this file we define the `is_unit` predicate on a `monoid`, and prove a few basic properties. For the bundled version see `units`. See also `prime`, `associated`, and `irreducible` in `algebra/associated`. -/ section is_unit variables {M : Type*} {N : Type*} /-- An element `a : M` of a monoid is a unit if it has a two-sided inverse. The actual definition says that `a` is equal to some `u : units M`, where `units M` is a bundled version of `is_unit`. -/ @[to_additive is_add_unit "An element `a : M` of an add_monoid is an `add_unit` if it has a two-sided additive inverse. The actual definition says that `a` is equal to some `u : add_units M`, where `add_units M` is a bundled version of `is_add_unit`."] def is_unit [monoid M] (a : M) : Prop := ∃ u : units M, (u : M) = a @[nontriviality] lemma is_unit_of_subsingleton [monoid M] [subsingleton M] (a : M) : is_unit a := ⟨⟨a, a, subsingleton.elim _ _, subsingleton.elim _ _⟩, rfl⟩ instance [monoid M] [subsingleton M] : unique (units M) := { default := 1, uniq := λ a, units.coe_eq_one.mp $ subsingleton.elim (a : M) 1 } @[simp, to_additive is_add_unit_add_unit] protected lemma units.is_unit [monoid M] (u : units M) : is_unit (u : M) := ⟨u, rfl⟩ @[simp, to_additive is_add_unit_zero] theorem is_unit_one [monoid M] : is_unit (1:M) := ⟨1, rfl⟩ @[to_additive is_add_unit_of_add_eq_zero] theorem is_unit_of_mul_eq_one [comm_monoid M] (a b : M) (h : a * b = 1) : is_unit a := ⟨units.mk_of_mul_eq_one a b h, rfl⟩ @[to_additive is_add_unit.exists_neg] theorem is_unit.exists_right_inv [monoid M] {a : M} (h : is_unit a) : ∃ b, a * b = 1 := by { rcases h with ⟨⟨a, b, hab, _⟩, rfl⟩, exact ⟨b, hab⟩ } @[to_additive is_add_unit.exists_neg'] theorem is_unit.exists_left_inv [monoid M] {a : M} (h : is_unit a) : ∃ b, b * a = 1 := by { rcases h with ⟨⟨a, b, _, hba⟩, rfl⟩, exact ⟨b, hba⟩ } @[to_additive is_add_unit_iff_exists_neg] theorem is_unit_iff_exists_inv [comm_monoid M] {a : M} : is_unit a ↔ ∃ b, a * b = 1 := ⟨λ h, h.exists_right_inv, λ ⟨b, hab⟩, is_unit_of_mul_eq_one _ b hab⟩ @[to_additive is_add_unit_iff_exists_neg'] theorem is_unit_iff_exists_inv' [comm_monoid M] {a : M} : is_unit a ↔ ∃ b, b * a = 1 := by simp [is_unit_iff_exists_inv, mul_comm] @[to_additive] lemma is_unit.mul [monoid M] {x y : M} : is_unit x → is_unit y → is_unit (x * y) := by { rintros ⟨x, rfl⟩ ⟨y, rfl⟩, exact ⟨x * y, units.coe_mul _ _⟩ } /-- Multiplication by a `u : units M` on the right doesn't affect `is_unit`. -/ @[simp, to_additive is_add_unit_add_add_units "Addition of a `u : add_units M` on the right doesn't affect `is_add_unit`."] theorem units.is_unit_mul_units [monoid M] (a : M) (u : units M) : is_unit (a * u) ↔ is_unit a := iff.intro (assume ⟨v, hv⟩, have is_unit (a * ↑u * ↑u⁻¹), by existsi v * u⁻¹; rw [←hv, units.coe_mul], by rwa [mul_assoc, units.mul_inv, mul_one] at this) (λ v, v.mul u.is_unit) /-- Multiplication by a `u : units M` on the left doesn't affect `is_unit`. -/ @[simp, to_additive is_add_unit_add_units_add "Addition of a `u : add_units M` on the left doesn't affect `is_add_unit`."] theorem units.is_unit_units_mul {M : Type*} [monoid M] (u : units M) (a : M) : is_unit (↑u * a) ↔ is_unit a := iff.intro (assume ⟨v, hv⟩, have is_unit (↑u⁻¹ * (↑u * a)), by existsi u⁻¹ * v; rw [←hv, units.coe_mul], by rwa [←mul_assoc, units.inv_mul, one_mul] at this) u.is_unit.mul @[to_additive is_add_unit_of_add_is_add_unit_left] theorem is_unit_of_mul_is_unit_left [comm_monoid M] {x y : M} (hu : is_unit (x * y)) : is_unit x := let ⟨z, hz⟩ := is_unit_iff_exists_inv.1 hu in is_unit_iff_exists_inv.2 ⟨y * z, by rwa ← mul_assoc⟩ @[to_additive] theorem is_unit_of_mul_is_unit_right [comm_monoid M] {x y : M} (hu : is_unit (x * y)) : is_unit y := @is_unit_of_mul_is_unit_left _ _ y x $ by rwa mul_comm @[simp] lemma is_unit.mul_iff [comm_monoid M] {x y : M} : is_unit (x * y) ↔ is_unit x ∧ is_unit y := ⟨λ h, ⟨is_unit_of_mul_is_unit_left h, is_unit_of_mul_is_unit_right h⟩, λ h, is_unit.mul h.1 h.2⟩ @[to_additive] theorem is_unit.mul_right_inj [monoid M] {a b c : M} (ha : is_unit a) : a * b = a * c ↔ b = c := by cases ha with a ha; rw [←ha, units.mul_right_inj] @[to_additive] theorem is_unit.mul_left_inj [monoid M] {a b c : M} (ha : is_unit a) : b * a = c * a ↔ b = c := by cases ha with a ha; rw [←ha, units.mul_left_inj] /-- The element of the group of units, corresponding to an element of a monoid which is a unit. -/ noncomputable def is_unit.unit [monoid M] {a : M} (h : is_unit a) : units M := (classical.some h).copy a (classical.some_spec h).symm _ rfl lemma is_unit.unit_spec [monoid M] {a : M} (h : is_unit a) : ↑h.unit = a := rfl lemma is_unit.coe_inv_mul [monoid M] {a : M} (h : is_unit a) : ↑(h.unit)⁻¹ * a = 1 := units.mul_inv _ lemma is_unit.mul_coe_inv [monoid M] {a : M} (h : is_unit a) : a * ↑(h.unit)⁻¹ = 1 := begin convert units.mul_inv _, simp [h.unit_spec] end end is_unit section noncomputable_defs variables {M : Type*} /-- Constructs a `group` structure on a `monoid` consisting only of units. -/ noncomputable def group_of_is_unit [hM : monoid M] (h : ∀ (a : M), is_unit a) : group M := { inv := λ a, ↑((h a).unit)⁻¹, mul_left_inv := λ a, by { change ↑((h a).unit)⁻¹ * a = 1, rw [units.inv_mul_eq_iff_eq_mul, (h a).unit_spec, mul_one] }, .. hM } /-- Constructs a `comm_group` structure on a `comm_monoid` consisting only of units. -/ noncomputable def comm_group_of_is_unit [hM : comm_monoid M] (h : ∀ (a : M), is_unit a) : comm_group M := { inv := λ a, ↑((h a).unit)⁻¹, mul_left_inv := λ a, by { change ↑((h a).unit)⁻¹ * a = 1, rw [units.inv_mul_eq_iff_eq_mul, (h a).unit_spec, mul_one] }, .. hM } end noncomputable_defs
lemma contour_integrable_holomorphic_simple: assumes fh: "f holomorphic_on S" and os: "open S" and g: "valid_path g" "path_image g \<subseteq> S" shows "f contour_integrable_on g"
module Edsl import Data.Vect import Data.Fin data Ty = TyInt | TyBool | TyFun Ty Ty interpTy : Ty -> Type interpTy TyInt = Int interpTy TyBool = Bool interpTy (TyFun a t) = interpTy a -> interpTy t using (G:Vect n Ty) data HasType : Fin n -> Vect n Ty -> Ty -> Type where Stop : HasType FZ (t :: G) t Pop : HasType k G t -> HasType (FS k) (u :: G) t data Expr : Vect n Ty -> Ty -> Type where Var : HasType i G t -> Expr G t Val : (x : Int) -> Expr G TyInt Lam : Expr (a :: G) t -> Expr G (TyFun a t) App : Expr G (TyFun a t) -> Expr G a -> Expr G t Op : (interpTy a -> interpTy b -> interpTy c) -> Expr G a -> Expr G b -> Expr G c If : Expr G TyBool -> Lazy (Expr G a) -> Lazy (Expr G a) -> Expr G a data Env : Vect n Ty -> Type where Nil : Env Nil (::) : interpTy a -> Env G -> Env (a :: G) interp : Env G -> Expr G t -> interpTy t interp env (Var i) = ?sdf2 interp env (Val x) = x interp env (Lam body) = \x => interp (x :: env) body interp env (App f s) = (interp env f) (interp env s) interp env (Op op x y) = op (interp env x) (interp env y) interp env (If x t e) = if interp env x then interp env t else interp env e
{-# OPTIONS --cubical --postfix-projections --safe #-} open import Relation.Binary open import Prelude hiding (tt) module Data.List.Sort.InsertionSort {e} {E : Type e} {r₁ r₂} (totalOrder : TotalOrder E r₁ r₂) where open import Relation.Binary.Construct.LowerBound totalOrder open import Data.List.Sort.Sorted totalOrder open TotalOrder totalOrder renaming (refl to refl-≤) open TotalOrder lb-ord renaming (≤-trans to ⌊trans⌋) using () open import Data.List open import Data.Unit.UniversePolymorphic as Poly using (tt) open import Data.List.Relation.Binary.Permutation open import Function.Isomorphism open import Data.Fin open import Data.List.Membership private variable lb : ⌊∙⌋ insert : E → List E → List E insert x [] = x ∷ [] insert x (y ∷ xs) with x ≤ᵇ y ... | true = x ∷ y ∷ xs ... | false = y ∷ insert x xs insert-sort : List E → List E insert-sort = foldr insert [] insert-sorts : ∀ x xs → lb ⌊≤⌋ ⌊ x ⌋ → SortedFrom lb xs → SortedFrom lb (insert x xs) insert-sorts x [] lb≤x Pxs = lb≤x , tt insert-sorts x (y ∷ xs) lb≤x (lb≤y , Sxs) with x ≤? y ... | yes x≤y = lb≤x , x≤y , Sxs ... | no x≰y = lb≤y , insert-sorts x xs (<⇒≤ (≰⇒> x≰y)) Sxs sort-sorts : ∀ xs → Sorted (insert-sort xs) sort-sorts [] = tt sort-sorts (x ∷ xs) = insert-sorts x (insert-sort xs) tt (sort-sorts xs) insert-perm : ∀ x xs → insert x xs ↭ x ∷ xs insert-perm x [] = reflₚ insert-perm x (y ∷ xs) with x ≤ᵇ y ... | true = consₚ x reflₚ ... | false = consₚ y (insert-perm x xs) ⟨ transₚ ⟩ swapₚ y x xs sort-perm : ∀ xs → insert-sort xs ↭ xs sort-perm [] = reflₚ {xs = []} sort-perm (x ∷ xs) = insert-perm x (insert-sort xs) ⟨ transₚ {xs = insert x (insert-sort xs)} ⟩ consₚ x (sort-perm xs) perm-invar : ∀ xs ys → xs ↭ ys → insert-sort xs ≡ insert-sort ys perm-invar xs ys xs⇔ys = perm-same (insert-sort xs) (insert-sort ys) (sort-sorts xs) (sort-sorts ys) (λ k → sort-perm xs k ⟨ trans-⇔ ⟩ xs⇔ys k ⟨ trans-⇔ ⟩ sym-⇔ (sort-perm ys k))
Formal statement is: lemma bounded_iff: "bounded S \<longleftrightarrow> (\<exists>a. \<forall>x\<in>S. norm x \<le> a)" Informal statement is: A set $S$ is bounded if and only if there exists a real number $a$ such that for all $x \in S$, we have $|x| \leq a$.
[STATEMENT] lemma sort_append_swap: "sort (xs @ ys) = sort (ys @ xs)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sort (xs @ ys) = sort (ys @ xs) [PROOF STEP] by(induct xs arbitrary: ys rule: rev_induct)(simp_all add: sort_snoc[symmetric])
open import Function using ( _∘_ ) open import Data.Product using ( ∃ ; _×_ ; _,_ ) open import Data.Sum using ( _⊎_ ; inj₁ ; inj₂ ) open import Data.Empty using ( ⊥ ; ⊥-elim ) open import Data.Nat using ( ℕ ; zero ; suc ) renaming ( _+_ to _+ℕ_ ; _≤_ to _≤ℕ_ ) open import Relation.Binary.PropositionalEquality using ( _≡_ ; _≢_ ; refl ; sym ; cong ; cong₂ ; subst₂ ; inspect ; [_] ) open import Relation.Nullary using ( ¬_ ; Dec ; yes ; no ) open import FRP.LTL.Util using ( _trans_ ; _∋_ ; m+n≡0-impl-m≡0 ; ≤0-impl-≡0 ; 1+n≰n ) renaming ( +-comm to +ℕ-comm ; +-assoc to +ℕ-assoc ) open Relation.Binary.PropositionalEquality.≡-Reasoning using ( begin_ ; _≡⟨_⟩_ ; _∎ ) module FRP.LTL.Time where infix 2 _≤_ _≥_ _≰_ _≱_ _<_ _>_ infixr 4 _,_ infixr 5 _≤-trans_ _<-transˡ_ _<-transʳ_ _≤-asym_ _≤-total_ infixl 6 _+_ _∸_ -- Time has a cancellative action _+_ which respects the monoid structure of ℕ postulate Time : Set _+_ : Time → ℕ → Time +-unit : ∀ t → (t + 0 ≡ t) +-assoc : ∀ t m n → ((t + m) + n ≡ t + (m +ℕ n)) +-cancelˡ : ∀ t {m n} → (t + m ≡ t + n) → (m ≡ n) +-cancelʳ : ∀ {s t} n → (s + n ≡ t + n) → (s ≡ t) -- The order on time is derived from + data _≤_ (t u : Time) : Set where _,_ : ∀ n → (t + n ≡ u) → (t ≤ u) -- Floored subtraction t ∸ u is the smallest n such that t ≤ u + n postulate _∸_ : Time → Time → ℕ t≤u+t∸u : ∀ {t u} → (t ≤ u + (t ∸ u)) ∸-min : ∀ {t u n} → (t ≤ u + n) → (t ∸ u ≤ℕ n) -- End of postulates. suc-cancelʳ : ∀ {t u m n} → (t + suc m ≡ u + suc n) → (t + m ≡ u + n) suc-cancelʳ {t} {u} {m} {n} t+1+m≡u+1+n = +-cancelʳ 1 (+-assoc t m 1 trans cong₂ _+_ refl (+ℕ-comm m 1) trans t+1+m≡u+1+n trans cong₂ _+_ refl (+ℕ-comm 1 n) trans sym (+-assoc u n 1)) -- Syntax sugar for ≤ _≥_ : Time → Time → Set t ≥ u = u ≤ t _≰_ : Time → Time → Set t ≰ u = ¬(t ≤ u) _≱_ : Time → Time → Set t ≱ u = u ≰ t _<_ : Time → Time → Set t < u = (t ≤ u) × (u ≰ t) _>_ : Time → Time → Set t > u = u < t -- ≤ is a decidable total order ≤-refl : ∀ {t} → (t ≤ t) ≤-refl {t} = (0 , +-unit t) _≤-trans_ : ∀ {t u v} → (t ≤ u) → (u ≤ v) → (t ≤ v) _≤-trans_ {t} {u} {v} (m , t+m≡u) (n , u+n≡v) = (m +ℕ n , (sym (+-assoc t m n)) trans (cong₂ _+_ t+m≡u refl) trans u+n≡v) ≡-impl-≤ : ∀ {t u} → (t ≡ u) → (t ≤ u) ≡-impl-≤ refl = ≤-refl ≡-impl-≥ : ∀ {t u} → (t ≡ u) → (t ≥ u) ≡-impl-≥ refl = ≤-refl _≤-asym_ : ∀ {t u} → (t ≤ u) → (u ≤ t) → (t ≡ u) (m , t+m≡u) ≤-asym (n , u+n≡t) = sym (+-unit _) trans cong₂ _+_ refl (sym m≡0) trans t+m≡u where m≡0 : m ≡ 0 m≡0 = m+n≡0-impl-m≡0 m n (+-cancelˡ _ (sym (+-assoc _ m n) trans cong₂ _+_ t+m≡u refl trans u+n≡t trans sym (+-unit _))) ≤-impl-∸≡0 : ∀ {t u} → (t ≤ u) → (t ∸ u ≡ 0) ≤-impl-∸≡0 t≤u with (∸-min (t≤u ≤-trans ≡-impl-≤ (sym (+-unit _)))) ≤-impl-∸≡0 t≤u | t∸u≤0 = ≤0-impl-≡0 t∸u≤0 ∸≡0-impl-≤ : ∀ {t u} → (t ∸ u ≡ 0) → (t ≤ u) ∸≡0-impl-≤ t∸u≡0 = t≤u+t∸u ≤-trans ≡-impl-≤ (cong₂ _+_ refl t∸u≡0 trans +-unit _) ∸≢0-impl-≰ : ∀ {t u n} → (t ∸ u ≡ suc n) → (t ≰ u) ∸≢0-impl-≰ t∸u≡1+n t≤u with sym t∸u≡1+n trans ≤0-impl-≡0 (∸-min (t≤u ≤-trans ≡-impl-≤ (sym (+-unit _)))) ∸≢0-impl-≰ t∸u≡1+n t≤u | () t∸u≢0-impl-u∸t≡0 : ∀ t u {n} → (t ∸ u ≡ suc n) → (u ∸ t ≡ 0) t∸u≢0-impl-u∸t≡0 t u {n} t∸u≡1+n with t≤u+t∸u {t} {u} t∸u≢0-impl-u∸t≡0 t u {n} t∸u≡1+n | (zero , t+0≡u+t∸u) = ≤-impl-∸≡0 (t ∸ u , sym t+0≡u+t∸u trans +-unit t) t∸u≢0-impl-u∸t≡0 t u {n} t∸u≡1+n | (suc m , t+1+m≡u+t∸u) = ⊥-elim (1+n≰n n (subst₂ _≤ℕ_ t∸u≡1+n refl (∸-min (m , suc-cancelʳ (t+1+m≡u+t∸u trans cong₂ _+_ refl t∸u≡1+n))))) _≤-total_ : ∀ t u → (t ≤ u) ⊎ (u < t) t ≤-total u with t ∸ u | inspect (_∸_ t) u t ≤-total u | zero | [ t∸u≡0 ] = inj₁ (∸≡0-impl-≤ t∸u≡0) t ≤-total u | suc n | [ t∸u≡1+n ] with t∸u≢0-impl-u∸t≡0 t u t∸u≡1+n t ≤-total u | suc n | [ t∸u≡1+n ] | u∸t≡0 = inj₂ (∸≡0-impl-≤ u∸t≡0 , ∸≢0-impl-≰ t∸u≡1+n) -- Case analysis on ≤ data _≤-Case_ (t u : Time) : Set where lt : .(t < u) → (t ≤-Case u) eq : .(t ≡ u) → (t ≤-Case u) gt : .(u < t) → (t ≤-Case u) _≤-case_ : ∀ t u → (t ≤-Case u) t ≤-case u with (t ∸ u) | inspect (_∸_ t) u | u ∸ t | inspect (_∸_ u) t t ≤-case u | zero | [ t∸u≡0 ] | zero | [ u∸t≡0 ] = eq (∸≡0-impl-≤ t∸u≡0 ≤-asym ∸≡0-impl-≤ u∸t≡0) t ≤-case u | suc n | [ t∸u≡1+n ] | zero | [ u∸t≡0 ] = gt (∸≡0-impl-≤ u∸t≡0 , ∸≢0-impl-≰ t∸u≡1+n) t ≤-case u | zero | [ t∸u≡0 ] | suc w₁ | [ u∸t≡1+n ] = lt (∸≡0-impl-≤ t∸u≡0 , ∸≢0-impl-≰ u∸t≡1+n) t ≤-case u | suc m | [ t∸u≡1+m ] | suc n | [ u∸t≡1+n ] with sym u∸t≡1+n trans t∸u≢0-impl-u∸t≡0 t u t∸u≡1+m t ≤-case u | suc m | [ t∸u≡1+m ] | suc n | [ u∸t≡1+n ] | () -- + is monotone +-resp-≤ : ∀ {t u} → (t ≤ u) → ∀ n → (t + n ≤ u + n) +-resp-≤ (m , t+m≡u) n = ( m , +-assoc _ n m trans cong₂ _+_ refl (+ℕ-comm n m) trans sym (+-assoc _ m n) trans cong₂ _+_ t+m≡u refl ) +-refl-≤ : ∀ {t u} n → (t + n ≤ u + n) → (t ≤ u) +-refl-≤ n (m , t+n+m≡u+n) = ( m , +-cancelʳ n (+-assoc _ m n trans cong₂ _+_ refl (+ℕ-comm m n) trans sym (+-assoc _ n m) trans t+n+m≡u+n) ) -- Lemmas about < <-impl-≤ : ∀ {t u} → (t < u) → (t ≤ u) <-impl-≤ (t≤u , u≰t) = t≤u <-impl-≱ : ∀ {t u} → (t < u) → (u ≰ t) <-impl-≱ (t≤u , u≰t) = u≰t _<-transˡ_ : ∀ {t u v} → (t < u) → (u ≤ v) → (t < v) _<-transˡ_ (t≤u , u≰t) u≤v = (t≤u ≤-trans u≤v , λ v≤t → u≰t (u≤v ≤-trans v≤t)) _<-transʳ_ : ∀ {t u v} → (t ≤ u) → (u < v) → (t < v) _<-transʳ_ t≤u (u≤v , v≰u) = (t≤u ≤-trans u≤v , λ v≤t → v≰u (v≤t ≤-trans t≤u)) ≤-proof-irrel′ : ∀ {t u m n} → (m ≡ n) → (t+m≡u : t + m ≡ u) → (t+n≡u : t + n ≡ u) → (t ≤ u) ∋ (m , t+m≡u) ≡ (n , t+n≡u) ≤-proof-irrel′ refl refl refl = refl t≤t+1 : ∀ {t} → (t ≤ t + 1) t≤t+1 = (1 , refl) t≱t+1 : ∀ {t} → (t ≱ t + 1) t≱t+1 {t} (m , t+1+m≡t) with +-cancelˡ t (sym (+-assoc t 1 m) trans t+1+m≡t trans sym (+-unit t)) t≱t+1 (m , t+1+m≡t) | () t<t+1 : ∀ {t} → (t < t + 1) t<t+1 = (t≤t+1 , t≱t+1) <-impl-+1≤ : ∀ {t u} → (t < u) → (t + 1 ≤ u) <-impl-+1≤ {t} ((zero , t+0≡u) , u≰t) = ⊥-elim (u≰t (≡-impl-≥ (sym (+-unit t) trans t+0≡u))) <-impl-+1≤ {t} ((suc n , t+1+n≡u) , u≰t) = (n , +-assoc t 1 n trans t+1+n≡u) +-resp-< : ∀ {t u} → (t < u) → ∀ n → (t + n < u + n) +-resp-< (t≤u , t≱u) n = (+-resp-≤ t≤u n , λ u+n≤t+n → t≱u (+-refl-≤ n u+n≤t+n)) -- Proof irrelevance for ≤ ≤-proof-irrel : ∀ {t u} → (p q : t ≤ u) → (p ≡ q) ≤-proof-irrel {t} (m , t+m≡u) (n , t+n≡u) = ≤-proof-irrel′ (+-cancelˡ t (t+m≡u trans (sym t+n≡u))) t+m≡u t+n≡u -- Well ordering of < on an interval _≮[_]_ : Time → ℕ → Time → Set s ≮[ zero ] u = ⊥ s ≮[ suc n ] u = ∀ {t} → (s ≤ t) → (t < u) → (s ≮[ n ] t) <-wo′ : ∀ n {s u} → (s ≤ u) → (u ≤ s + n) → (s ≮[ suc n ] u) <-wo′ zero {s} s≤u u≤s+0 s≤t t<u = <-impl-≱ t<u (u≤s+0 ≤-trans ≡-impl-≤ (+-unit s) ≤-trans s≤t) <-wo′ (suc n) s≤u u≤s+1+n {t} s≤t ((zero , t+0≡u) , t≱u) = ⊥-elim (t≱u (≡-impl-≤ ((sym t+0≡u) trans (+-unit t)))) <-wo′ (suc n) {s} {u} s≤u (l , u+l≡s+1+n) {t} s≤t ((suc m , t+1+m≡u) , t≱u) = <-wo′ n s≤t (l +ℕ m , suc-cancelʳ t+1+l+m≡s+1+n) where t+1+l+m≡s+1+n : t + suc (l +ℕ m) ≡ s + suc n t+1+l+m≡s+1+n = cong₂ _+_ refl (cong suc (+ℕ-comm l m)) trans sym (+-assoc t (1 +ℕ m) l) trans cong₂ _+_ t+1+m≡u refl trans u+l≡s+1+n <-wo : ∀ {s u} → (s ≤ u) → ∃ λ n → (s ≮[ n ] u) <-wo (n , s+n≡u) = (suc n , λ {t} → <-wo′ n (n , s+n≡u) (≡-impl-≤ (sym s+n≡u)) {t})
[STATEMENT] lemma analz_increasing: "H \<subseteq> analz(H)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. H \<subseteq> analz H [PROOF STEP] by blast
Formal statement is: lemma lipschitz_extend_closure: fixes f::"('a::metric_space) \<Rightarrow> ('b::complete_space)" assumes "C-lipschitz_on U f" shows "\<exists>g. C-lipschitz_on (closure U) g \<and> (\<forall>x\<in>U. g x = f x)" Informal statement is: If $f$ is a $C$-Lipschitz function on $U$, then there exists a $C$-Lipschitz function $g$ on the closure of $U$ such that $g$ and $f$ agree on $U$.
/- Copyright (c) 2020 Johan Commelin, Damiano Testa. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin, Damiano Testa -/ import order.basic import data.equiv.basic /-! # Initial lemmas to work with the `order_dual` ## Definitions `to_dual` and `of_dual` the order reversing identity maps, bundled as equivalences. ## Basic Lemmas to convert between an order and its dual This file is similar to algebra/group/type_tags.lean -/ open function universes u v w variables {α : Type u} {β : Type v} {γ : Type w} {r : α → α → Prop} namespace order_dual instance [nontrivial α] : nontrivial (order_dual α) := by delta order_dual; assumption /-- `to_dual` is the identity function to the `order_dual` of a linear order. -/ def to_dual : α ≃ order_dual α := ⟨id, id, λ h, rfl, λ h, rfl⟩ /-- `of_dual` is the identity function from the `order_dual` of a linear order. -/ def of_dual : order_dual α ≃ α := to_dual.symm @[simp] lemma to_dual_symm_eq : (@to_dual α).symm = of_dual := rfl @[simp] lemma of_dual_symm_eq : (@of_dual α).symm = to_dual := rfl @[simp] lemma to_dual_of_dual (a : order_dual α) : to_dual (of_dual a) = a := rfl @[simp] lemma of_dual_to_dual (a : α) : of_dual (to_dual a) = a := rfl @[simp] lemma to_dual_inj {a b : α} : to_dual a = to_dual b ↔ a = b := iff.rfl @[simp] lemma to_dual_le_to_dual [has_le α] {a b : α} : to_dual a ≤ to_dual b ↔ b ≤ a := iff.rfl @[simp] lemma to_dual_lt_to_dual [has_lt α] {a b : α} : to_dual a < to_dual b ↔ b < a := iff.rfl @[simp] lemma of_dual_inj {a b : order_dual α} : of_dual a = of_dual b ↔ a = b := iff.rfl @[simp] lemma of_dual_le_of_dual [has_le α] {a b : order_dual α} : of_dual a ≤ of_dual b ↔ b ≤ a := iff.rfl @[simp] lemma of_dual_lt_of_dual [has_lt α] {a b : order_dual α} : of_dual a < of_dual b ↔ b < a := iff.rfl lemma le_to_dual [has_le α] {a : order_dual α} {b : α} : a ≤ to_dual b ↔ b ≤ of_dual a := iff.rfl lemma lt_to_dual [has_lt α] {a : order_dual α} {b : α} : a < to_dual b ↔ b < of_dual a := iff.rfl lemma to_dual_le [has_le α] {a : α} {b : order_dual α} : to_dual a ≤ b ↔ of_dual b ≤ a := iff.rfl lemma to_dual_lt [has_lt α] {a : α} {b : order_dual α} : to_dual a < b ↔ of_dual b < a := iff.rfl end order_dual
\hypertarget{classglite_1_1wms_1_1jdl_1_1AdAttributeException}{ \section{glite::wms::jdl::Ad\-Attribute\-Exception Class Reference} \label{classglite_1_1wms_1_1jdl_1_1AdAttributeException}\index{glite::wms::jdl::AdAttributeException@{glite::wms::jdl::AdAttributeException}} } {\tt \#include $<$Request\-Ad\-Exceptions.h$>$} Inheritance diagram for glite::wms::jdl::Ad\-Attribute\-Exception::\begin{figure}[H] \begin{center} \leavevmode \includegraphics[height=1.89189cm]{classglite_1_1wms_1_1jdl_1_1AdAttributeException} \end{center} \end{figure} \subsection*{Public Member Functions} \begin{CompactItemize} \item \hyperlink{classglite_1_1wms_1_1jdl_1_1AdAttributeException_a0}{Ad\-Attribute\-Exception::Ad\-Attribute\-Exception} (std::string file, int line, std::string method, int code, std::string exception\_\-name) \end{CompactItemize} \subsection{Detailed Description} \hyperlink{classglite_1_1wms_1_1jdl_1_1AdAttributeException}{Ad\-Attribute\-Exception} - raised when a not admitted value is added/set to the attribute \subsection{Member Function Documentation} \hypertarget{classglite_1_1wms_1_1jdl_1_1AdAttributeException_a0}{ \index{glite::wms::jdl::AdAttributeException@{glite::wms::jdl::Ad\-Attribute\-Exception}!AdAttributeException::AdAttributeException@{AdAttributeException::AdAttributeException}} \index{AdAttributeException::AdAttributeException@{AdAttributeException::AdAttributeException}!glite::wms::jdl::AdAttributeException@{glite::wms::jdl::Ad\-Attribute\-Exception}} \subsubsection[AdAttributeException::AdAttributeException]{\setlength{\rightskip}{0pt plus 5cm}glite::wms::jdl::Ad\-Attribute\-Exception::Ad\-Attribute\-Exception::Ad\-Attribute\-Exception (std::string {\em file}, int {\em line}, std::string {\em method}, int {\em code}, std::string {\em exception\_\-name})}} \label{classglite_1_1wms_1_1jdl_1_1AdAttributeException_a0} The documentation for this class was generated from the following file:\begin{CompactItemize} \item \hyperlink{RequestAdExceptions_8h}{Request\-Ad\-Exceptions.h}\end{CompactItemize}
\documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots. \geometry{letterpaper} % ... or a4paper or a5paper or ... \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent \usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode % TeX will automatically convert eps --> pdf in pdflatex \newcommand{\field}[1]{\textsf{\textbf{#1}}} \newcommand{\code}[1]{\texttt{#1}} \begin{document} %\section{} %\subsection{} \begin{center} \Large{Documentation for the PLOVER gold standard file \texttt{PLOVER\_GSR\_CAMEO\_readme.pdf}} \end{center} This collection of records is the first attempt to develop gold-standard records for the PLOVER event ontology. It is based on examples extracted from the CAMEO manual\footnote{\texttt{http://eventdata.parusanalytics.com/data.dir/cameo.html}} with considerable editing, since CAMEO coded only actors and events. The records are coded with the version of PLOVER specification that was current as of the \field{dateCoded} field: PLOVER is still under development and some of these may change in the future. The file conforms to JSON standards and should be readable with any JSON utility. It is important to note that the examples in the manual were originally human coded, and from the standpoint of automated coding, they are ideal codings and no known system would be likely to get all of them. There are \field{comment}s are some of the more problematic cases. The sentences used in the CAMEO manual were based on actual news stories around the turn of the century, but many have been edited, sometimes multiple times, and do not necessarily refer to actual historical events. The original CAMEO examples have been further edited so that all of the nation-states correspond to entities with names and codes in the current ISO-3166-alpha-3 entry in Wikipedia.\footnote{\texttt{https://en.wikipedia.org/wiki/ISO\_3166-1\_alpha-3}} Ethnicity codes correspond to ISO-639-2. Funding for PLOVER has been provided in part by the U. S. National Science Foundation award SBE-1539302, ``RIDIR: Modernizing Political Event Data for Big Data Social Science Research." \subsection*{Additional comments on the coding} \begin{enumerate} \item Only non-null JSON fields are included. All \field{date} fields are set to `2000-01-01.' The current version does not include any \field{mode} or \field{context} fields but these are retained as placeholders. \item City names are resolved as follows: \begin{itemize} \item Cities in general go to \field{sector} \code{CVL} \item Capital cities go to \field{sector} \code{GOV} if they are the subject or direct object, otherwise \field{sector} \code{CVL} \end{itemize} \item Some \field{eventLoc} fields have been filled in, usually on the basis of [human-identified] prepositional phrases. In some cases, the actor primary codes were inferred from these locations. \end{enumerate} \subsection*{Text Markup} The \field{textInfo/markup} field shows where the various text fields occur in the sentence. The following markers are used: \begin{table}[htdp] \begin{center} \begin{tabular}{ll} SRC & source actor and agent \\ TAR & target actor and agent\\ SRC/TAR: & Reciprocal source/target in the CAMEO system\\ EVT & event\\ LOC & location\\ \end{tabular} \end{center} \end{table}% This markup has been done through a combination of automatic translation from the original \LaTeX ~highlighting in the CAMEO manual and additional manual editing. It sometimes gets a bit dodgy when there are multiple words in the phrases, but probably is close enough to be useful \subsection*{Provenance} Location for this file and the data: \texttt{https://github.com/openeventdata/PLOVER} The GSR file (and documentation) is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. For further information contact: \texttt{[email protected]} Last update: \today Copyright \copyright 2016 by the Open Event Data Alliance \end{document}
-- /- -- Copyright (c) 2020 Wojciech Nawrocki. All rights reserved. -- Released under Apache 2.0 license as described in the file LICENSE. -- Authors: Wojciech Nawrocki -- -/ -- import category_theory.limits.shapes.finite_products -- import category_theory.limits.shapes.binary_products -- import category_theory.limits.shapes.terminal -- import tactic.rcases -- import pfin -- /-! # Stuff that should be in the catthy library. -/ -- namespace category_theory -- universes w w₁ -- -- def discrete.equiv_of_iso {J : Type w} {K : Type w₁} (h : J ≃ K) : (discrete J ≌ discrete K) := -- -- equivalence.mk -- -- (functor.of_function h.to_fun) -- C ⥤ D -- -- (functor.of_function h.inv_fun) -- D ⥤ C -- -- { hom := { -- -- app := λ X, begin -- -- apply eq_to_hom, -- -- simp only [functor.id_obj, functor.of_function_obj, functor.comp_obj], -- -- exact (h.left_inv X).symm, -- -- end, -- -- naturality' := λ X Y f, dec_trivial }, -- -- inv := { -- -- app := λ X, begin -- -- apply eq_to_hom, -- -- simp only [functor.id_obj, functor.of_function_obj, functor.comp_obj], -- -- exact h.left_inv X, -- -- end, -- -- naturality' := λ X Y f, dec_trivial }, -- -- hom_inv_id' := by ext1; exact dec_trivial, -- -- inv_hom_id' := by ext1; exact dec_trivial } -- -- { hom := { -- -- app := λ X, begin -- -- apply eq_to_hom, -- -- simp only [functor.id_obj, functor.of_function_obj, functor.comp_obj], -- -- exact h.right_inv X -- -- end, -- -- naturality' := λ X Y f, dec_trivial }, -- -- inv := { -- -- app := λ X, begin -- -- apply eq_to_hom, -- -- simp only [functor.id_obj, functor.of_function_obj, functor.comp_obj], -- -- exact (h.right_inv X).symm, -- -- end, -- -- naturality' := λ X Y f, dec_trivial }, -- -- hom_inv_id' := by ext1; exact dec_trivial, -- -- inv_hom_id' := by ext1; exact dec_trivial } -- namespace limits -- universes v u -- variables {C : Type u} [𝒞 : category.{v} C] -- include 𝒞 -- lemma prod.lift_uniq {X Y Z : C} [has_limit (pair X Y)] (f : Z ⟶ X) (g : Z ⟶ Y) (m : Z ⟶ X ⨯ Y) -- (hLeft : m ≫ prod.fst = f) (hRight : m ≫ prod.snd = g) -- : m = prod.lift f g := -- begin -- apply limit.hom_ext, -- intro j, -- cases hLeft, cases hRight, cases j, -- simp only [limit.lift_π, binary_fan.mk_π_app_left], -- simp only [limit.lift_π, binary_fan.mk_π_app_right], -- end -- end limits -- end category_theory -- /-! -- # Constructing finite products from binary products and a terminal object -- If a category has all binary products, and a terminal object, then it has all finite products. -- -/ -- namespace category_theory.limits -- open category_theory -- universes v u -- variables {C : Type u} [𝒞 : category.{v} C] -- include 𝒞 -- -- We hide the "implementation details" inside a namespace -- namespace has_finite_products_of_binary_products_and_terminal_object -- @[reducible] -- def match_statement_lol [has_binary_products.{v} C] -- {n : ℕ} (F: discrete (pfin (nat.succ n)) ⥤ C) (limF' : has_limit (discrete.lift pfin.succ ⋙ F)) -- : Π (j: pfin (nat.succ n)), F.obj ⟨0, nat.succ_pos n⟩ ⨯ limF'.cone.X ⟶ F.obj j -- | ⟨0, _⟩ := prod.fst -- | w@⟨nat.succ j, _⟩ := prod.snd ≫ -- limF'.cone.π.app (w.pred (λ h, nat.succ_ne_zero j (pfin.veq_of_eq h))) -- set_option eqn_compiler.zeta true -- def has_limit_for_pfin_diagram [has_binary_products.{v} C] [has_terminal.{v} C] -- : Π {n: ℕ} (F: (discrete (pfin n)) ⥤ C) -- , has_limit F -- | 0 F := -- -- In the base case, the category of cones over a diagram of shape ∅ is simply 𝒞, so -- -- the limit cone is 𝒞's terminal object. -- let absurdJ (x : pfin 0) : false := x.elim0 in -- let myCone : cone F := -- { X := terminal C, -- π := nat_trans.of_homs (λ j, (absurdJ j).elim) } in -- { cone := myCone, -- is_limit := -- { lift := λ s, terminal.from s.X -- , fac' := λ s j, (absurdJ j).elim -- , uniq' := λ s m h, dec_trivial } } -- | (nat.succ n) F := -- -- In the inductive case, we construct a limit cone with apex (F 0) ⨯ (apex of smaller limit cone) -- -- where the smaller cone is obtained from the below functor. -- let F' : discrete (pfin n) ⥤ C := discrete.lift pfin.succ ⋙ F in -- let limF' : has_limit F' := has_limit_for_pfin_diagram F' in -- let myCone : cone F := -- { X := (F.obj ⟨0, nat.succ_pos n⟩) ⨯ limF'.cone.X -- , π := nat_trans.of_homs (match_statement_lol F limF') } in -- TODO(WN): using an actual match statement here -- -- is hard to unfold later, but would obv be nicer. -- { cone := myCone, -- is_limit := -- { lift := λ s, -- -- Show that s.X is also the apex of a cone over F' .. -- let s' : cone F' := -- { X := s.X -- , π := nat_trans.of_homs (λ j, s.π.app j.succ) } in -- -- .. in order to get from s.X to limF'.cone.X in the right morphism -- -- using the fact that limF' is a limit cone over F'. -- prod.lift -- (s.π.app $ ⟨0, nat.succ_pos n⟩) -- (eq_to_hom rfl ≫ limF'.is_limit.lift s') -- -- Show that lift is in fact a morphism of cones from s into myCone. -- , fac' := λ s j, begin -- rcases j with ⟨j, hj⟩, cases j; -- simp only [category.id_comp, nat_trans.of_homs_app, eq_to_hom_refl, match_statement_lol, -- prod.lift_fst, limit.lift_π_assoc, is_limit.fac, nat_trans.of_homs_app, -- binary_fan.mk_π_app_right], congr -- end -- -- Show that lift is the unique morphism into myCone. -- , uniq' := λ s m h, begin -- have h0 := h ⟨0, nat.succ_pos n⟩, -- simp [match_statement_lol] at h0, -- let s' : cone F' := -- { X := s.X -- , π := nat_trans.of_homs (λ j, s.π.app j.succ) }, -- have hS : m ≫ prod.snd = eq_to_hom rfl ≫ limF'.is_limit.lift s', -- { -- m ≫ prod.snd is a morphism of cones over F' into limF'.X .. -- have hN : ∀ (j: discrete (pfin n)), (m ≫ prod.snd) ≫ limF'.cone.π.app j = s'.π.app j, -- { intro j, -- unfold_projs, simp [(h j.succ).symm], -- rcases j with ⟨j, hj⟩, refl }, -- -- .. and therefore unique. -- have hUniq' : m ≫ prod.snd = limF'.is_limit.lift s', -- from limF'.is_limit.uniq' s' (m ≫ prod.snd) hN, -- simp only [hUniq', category.id_comp, eq_to_hom_refl] }, -- exact prod.lift_uniq _ _ _ h0 hS -- end } } -- set_option eqn_compiler.zeta false -- end has_finite_products_of_binary_products_and_terminal_object -- open has_finite_products_of_binary_products_and_terminal_object -- -- TODO(WN): instance or def? Is there another way one might want to construct limits of shape pfin? -- instance has_limits_of_shape_pfin [has_binary_products.{v} C] [has_terminal.{v} C] (n : ℕ) -- : @has_limits_of_shape (discrete $ pfin n) _ C 𝒞 := -- ⟨λ F, has_limit_for_pfin_diagram F⟩ -- -- TODO(WN): trunc? #22 -- def has_trunc_finite_products [has_binary_products.{v} C] [has_terminal.{v} C] -- {J : Type v} [fintype J] [decidable_eq J] -- : trunc (has_limits_of_shape (discrete J) C) := -- trunc.lift_on (fintype.equiv_pfin J) -- (λ h, -- let hIso : discrete (pfin $ fintype.card J) ≌ discrete J := -- discrete.equiv_of_iso h.symm in -- let limsPfin : @has_limits_of_shape (discrete (pfin $ fintype.card J)) _ C 𝒞 := -- by apply_instance in -- trunc.mk $ has_limits_of_shape_of_equivalence hIso) -- (λ a b, trunc.eq _ _) -- end category_theory.limits
The road , also known as Old Post Road , was incorporated in 1813 as the Elk and Christiana Turnpike in order to get more money for repairs . The turnpike was completed in April 1817 . As a turnpike , tolls were collected to pay for the maintenance of the road . The construction of the New Castle and Frenchtown Railroad lowered the revenues of the turnpike and it became a public road again in 1838 . The road historically went through agricultural areas ; however , the surroundings have become more developed over the years . Much of the Old Baltimore Pike remains two lanes .
{-# OPTIONS --without-K #-} open import M-types.Base.Core module M-types.Base.Prod where ∏ : (X : Ty ℓ₀) → (Y : X → Ty ℓ₁) → Ty (ℓ-max ℓ₀ ℓ₁) ∏ X Y = (x : X) → Y x ∏-syntax : (X : Ty ℓ₀) → (Y : X → Ty ℓ₁) → Ty (ℓ-max ℓ₀ ℓ₁) ∏-syntax = ∏ infix 2 ∏-syntax syntax ∏-syntax X (λ x → Y) = ∏[ x ∈ X ] Y id : {X : Ty ℓ} → (X → X) id = λ x → x infixr 9 _∘_ _∘_ : {X : Ty ℓ₀} {Y : X → Ty ℓ₁} {Z : {x : X} → Y x → Ty ℓ₂} → ∏[ g ∈ ({x : X} → ∏[ y ∈ Y x ] Z y) ] ∏[ f ∈ (∏[ x ∈ X ] Y x) ] ∏[ x ∈ X ] Z (f x) f ∘ g = λ x → f (g x)
lemma contour_integrable_inversediff: assumes g: "valid_path g" and notin: "z \<notin> path_image g" shows "(\<lambda>w. 1 / (w-z)) contour_integrable_on g"
Formal statement is: lemma (in order_topology) order_tendstoD: assumes "(f \<longlongrightarrow> y) F" shows "a < y \<Longrightarrow> eventually (\<lambda>x. a < f x) F" and "y < a \<Longrightarrow> eventually (\<lambda>x. f x < a) F" Informal statement is: If $f$ converges to $y$ in the order topology, then $f$ is eventually greater than $a$ if $y$ is greater than $a$, and $f$ is eventually less than $a$ if $y$ is less than $a$.
/- Copyright (c) 2020 Aaron Anderson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Aaron Anderson -/ import algebra.big_operators.order import data.nat.interval import data.nat.factors /-! # Divisor finsets > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines sets of divisors of a natural number. This is particularly useful as background for defining Dirichlet convolution. ## Main Definitions Let `n : ℕ`. All of the following definitions are in the `nat` namespace: * `divisors n` is the `finset` of natural numbers that divide `n`. * `proper_divisors n` is the `finset` of natural numbers that divide `n`, other than `n`. * `divisors_antidiagonal n` is the `finset` of pairs `(x,y)` such that `x * y = n`. * `perfect n` is true when `n` is positive and the sum of `proper_divisors n` is `n`. ## Implementation details * `divisors 0`, `proper_divisors 0`, and `divisors_antidiagonal 0` are defined to be `∅`. ## Tags divisors, perfect numbers -/ open_locale classical open_locale big_operators open finset namespace nat variable (n : ℕ) /-- `divisors n` is the `finset` of divisors of `n`. As a special case, `divisors 0 = ∅`. -/ def divisors : finset ℕ := finset.filter (λ x : ℕ, x ∣ n) (finset.Ico 1 (n + 1)) /-- `proper_divisors n` is the `finset` of divisors of `n`, other than `n`. As a special case, `proper_divisors 0 = ∅`. -/ def proper_divisors : finset ℕ := finset.filter (λ x : ℕ, x ∣ n) (finset.Ico 1 n) /-- `divisors_antidiagonal n` is the `finset` of pairs `(x,y)` such that `x * y = n`. As a special case, `divisors_antidiagonal 0 = ∅`. -/ def divisors_antidiagonal : finset (ℕ × ℕ) := (Ico 1 (n + 1) ×ˢ Ico 1 (n + 1)).filter (λ x, x.fst * x.snd = n) variable {n} @[simp] lemma filter_dvd_eq_divisors (h : n ≠ 0) : (finset.range n.succ).filter (∣ n) = n.divisors := begin ext, simp only [divisors, mem_filter, mem_range, mem_Ico, and.congr_left_iff, iff_and_self], exact λ ha _, succ_le_iff.mpr (pos_of_dvd_of_pos ha h.bot_lt), end @[simp] lemma filter_dvd_eq_proper_divisors (h : n ≠ 0) : (finset.range n).filter (∣ n) = n.proper_divisors := begin ext, simp only [proper_divisors, mem_filter, mem_range, mem_Ico, and.congr_left_iff, iff_and_self], exact λ ha _, succ_le_iff.mpr (pos_of_dvd_of_pos ha h.bot_lt), end lemma proper_divisors.not_self_mem : ¬ n ∈ proper_divisors n := by simp [proper_divisors] @[simp] lemma mem_proper_divisors {m : ℕ} : n ∈ proper_divisors m ↔ n ∣ m ∧ n < m := begin rcases eq_or_ne m 0 with rfl | hm, { simp [proper_divisors] }, simp only [and_comm, ←filter_dvd_eq_proper_divisors hm, mem_filter, mem_range], end lemma insert_self_proper_divisors (h : n ≠ 0): insert n (proper_divisors n) = divisors n := by rw [divisors, proper_divisors, Ico_succ_right_eq_insert_Ico (one_le_iff_ne_zero.2 h), finset.filter_insert, if_pos (dvd_refl n)] lemma cons_self_proper_divisors (h : n ≠ 0) : cons n (proper_divisors n) proper_divisors.not_self_mem = divisors n := by rw [cons_eq_insert, insert_self_proper_divisors h] @[simp] lemma mem_divisors {m : ℕ} : n ∈ divisors m ↔ (n ∣ m ∧ m ≠ 0) := begin rcases eq_or_ne m 0 with rfl | hm, { simp [divisors] }, simp only [hm, ne.def, not_false_iff, and_true, ←filter_dvd_eq_divisors hm, mem_filter, mem_range, and_iff_right_iff_imp, lt_succ_iff], exact le_of_dvd hm.bot_lt, end lemma one_mem_divisors : 1 ∈ divisors n ↔ n ≠ 0 := by simp lemma mem_divisors_self (n : ℕ) (h : n ≠ 0) : n ∈ n.divisors := mem_divisors.2 ⟨dvd_rfl, h⟩ lemma dvd_of_mem_divisors {m : ℕ} (h : n ∈ divisors m) : n ∣ m := begin cases m, { apply dvd_zero }, { simp [mem_divisors.1 h], } end @[simp] lemma mem_divisors_antidiagonal {x : ℕ × ℕ} : x ∈ divisors_antidiagonal n ↔ x.fst * x.snd = n ∧ n ≠ 0 := begin simp only [divisors_antidiagonal, finset.mem_Ico, ne.def, finset.mem_filter, finset.mem_product], rw and_comm, apply and_congr_right, rintro rfl, split; intro h, { contrapose! h, simp [h], }, { rw [nat.lt_add_one_iff, nat.lt_add_one_iff], rw [mul_eq_zero, decidable.not_or_iff_and_not] at h, simp only [succ_le_of_lt (nat.pos_of_ne_zero h.1), succ_le_of_lt (nat.pos_of_ne_zero h.2), true_and], exact ⟨le_mul_of_pos_right (nat.pos_of_ne_zero h.2), le_mul_of_pos_left (nat.pos_of_ne_zero h.1)⟩ } end variable {n} lemma divisor_le {m : ℕ}: n ∈ divisors m → n ≤ m := begin cases m, { simp }, simp only [mem_divisors, m.succ_ne_zero, and_true, ne.def, not_false_iff], exact nat.le_of_dvd (nat.succ_pos m), end lemma divisors_subset_of_dvd {m : ℕ} (hzero : n ≠ 0) (h : m ∣ n) : divisors m ⊆ divisors n := finset.subset_iff.2 $ λ x hx, nat.mem_divisors.mpr (⟨(nat.mem_divisors.mp hx).1.trans h, hzero⟩) lemma divisors_subset_proper_divisors {m : ℕ} (hzero : n ≠ 0) (h : m ∣ n) (hdiff : m ≠ n) : divisors m ⊆ proper_divisors n := begin apply finset.subset_iff.2, intros x hx, exact nat.mem_proper_divisors.2 (⟨(nat.mem_divisors.1 hx).1.trans h, lt_of_le_of_lt (divisor_le hx) (lt_of_le_of_ne (divisor_le (nat.mem_divisors.2 ⟨h, hzero⟩)) hdiff)⟩) end @[simp] lemma divisors_zero : divisors 0 = ∅ := by { ext, simp } @[simp] lemma proper_divisors_zero : proper_divisors 0 = ∅ := by { ext, simp } lemma proper_divisors_subset_divisors : proper_divisors n ⊆ divisors n := filter_subset_filter _ $ Ico_subset_Ico_right n.le_succ @[simp] lemma divisors_one : divisors 1 = {1} := by { ext, simp } @[simp] lemma proper_divisors_one : proper_divisors 1 = ∅ := by rw [proper_divisors, Ico_self, filter_empty] lemma pos_of_mem_divisors {m : ℕ} (h : m ∈ n.divisors) : 0 < m := begin cases m, { rw [mem_divisors, zero_dvd_iff] at h, cases h.2 h.1 }, apply nat.succ_pos, end lemma pos_of_mem_proper_divisors {m : ℕ} (h : m ∈ n.proper_divisors) : 0 < m := pos_of_mem_divisors (proper_divisors_subset_divisors h) lemma one_mem_proper_divisors_iff_one_lt : 1 ∈ n.proper_divisors ↔ 1 < n := by rw [mem_proper_divisors, and_iff_right (one_dvd _)] @[simp] lemma divisors_antidiagonal_zero : divisors_antidiagonal 0 = ∅ := by { ext, simp } @[simp] lemma divisors_antidiagonal_one : divisors_antidiagonal 1 = {(1,1)} := by { ext, simp [nat.mul_eq_one_iff, prod.ext_iff], } @[simp] lemma swap_mem_divisors_antidiagonal {x : ℕ × ℕ} : x.swap ∈ divisors_antidiagonal n ↔ x ∈ divisors_antidiagonal n := by rw [mem_divisors_antidiagonal, mem_divisors_antidiagonal, mul_comm, prod.swap] lemma snd_mem_divisors_of_mem_antidiagonal {x : ℕ × ℕ} (h : x ∈ divisors_antidiagonal n) : x.snd ∈ divisors n := begin rw mem_divisors_antidiagonal at h, simp [dvd.intro_left _ h.1, h.2], end @[simp] lemma map_swap_divisors_antidiagonal : (divisors_antidiagonal n).map (equiv.prod_comm _ _).to_embedding = divisors_antidiagonal n := begin rw [← coe_inj, coe_map, equiv.coe_to_embedding, equiv.coe_prod_comm, set.image_swap_eq_preimage_swap], ext, exact swap_mem_divisors_antidiagonal, end @[simp] lemma image_fst_divisors_antidiagonal : (divisors_antidiagonal n).image prod.fst = divisors n := by { ext, simp [has_dvd.dvd, @eq_comm _ n (_ * _)] } @[simp] lemma image_snd_divisors_antidiagonal : (divisors_antidiagonal n).image prod.snd = divisors n := begin rw [←map_swap_divisors_antidiagonal, map_eq_image, image_image], exact image_fst_divisors_antidiagonal end lemma map_div_right_divisors : n.divisors.map ⟨λ d, (d, n/d), λ p₁ p₂, congr_arg prod.fst⟩ = n.divisors_antidiagonal := begin ext ⟨d, nd⟩, simp only [mem_map, mem_divisors_antidiagonal, function.embedding.coe_fn_mk, mem_divisors, prod.ext_iff, exists_prop, and.left_comm, exists_eq_left], split, { rintro ⟨⟨⟨k, rfl⟩, hn⟩, rfl⟩, rw [nat.mul_div_cancel_left _ (left_ne_zero_of_mul hn).bot_lt], exact ⟨rfl, hn⟩ }, { rintro ⟨rfl, hn⟩, exact ⟨⟨dvd_mul_right _ _, hn⟩, nat.mul_div_cancel_left _ (left_ne_zero_of_mul hn).bot_lt⟩ } end lemma map_div_left_divisors : n.divisors.map ⟨λ d, (n/d, d), λ p₁ p₂, congr_arg prod.snd⟩ = n.divisors_antidiagonal := begin apply finset.map_injective (equiv.prod_comm _ _).to_embedding, rw [map_swap_divisors_antidiagonal, ←map_div_right_divisors, finset.map_map], refl, end lemma sum_divisors_eq_sum_proper_divisors_add_self : ∑ i in divisors n, i = ∑ i in proper_divisors n, i + n := begin rcases decidable.eq_or_ne n 0 with rfl|hn, { simp }, { rw [← cons_self_proper_divisors hn, finset.sum_cons, add_comm] } end /-- `n : ℕ` is perfect if and only the sum of the proper divisors of `n` is `n` and `n` is positive. -/ def perfect (n : ℕ) : Prop := (∑ i in proper_divisors n, i = n) ∧ 0 < n theorem perfect_iff_sum_proper_divisors (h : 0 < n) : perfect n ↔ ∑ i in proper_divisors n, i = n := and_iff_left h theorem perfect_iff_sum_divisors_eq_two_mul (h : 0 < n) : perfect n ↔ ∑ i in divisors n, i = 2 * n := begin rw [perfect_iff_sum_proper_divisors h, sum_divisors_eq_sum_proper_divisors_add_self, two_mul], split; intro h, { rw h }, { apply add_right_cancel h } end lemma mem_divisors_prime_pow {p : ℕ} (pp : p.prime) (k : ℕ) {x : ℕ} : x ∈ divisors (p ^ k) ↔ ∃ (j : ℕ) (H : j ≤ k), x = p ^ j := by rw [mem_divisors, nat.dvd_prime_pow pp, and_iff_left (ne_of_gt (pow_pos pp.pos k))] lemma prime.divisors {p : ℕ} (pp : p.prime) : divisors p = {1, p} := begin ext, rw [mem_divisors, dvd_prime pp, and_iff_left pp.ne_zero, finset.mem_insert, finset.mem_singleton] end lemma prime.proper_divisors {p : ℕ} (pp : p.prime) : proper_divisors p = {1} := by rw [← erase_insert proper_divisors.not_self_mem, insert_self_proper_divisors pp.ne_zero, pp.divisors, pair_comm, erase_insert (λ con, pp.ne_one (mem_singleton.1 con))] lemma divisors_prime_pow {p : ℕ} (pp : p.prime) (k : ℕ) : divisors (p ^ k) = (finset.range (k + 1)).map ⟨pow p, pow_right_injective pp.two_le⟩ := by { ext, simp [mem_divisors_prime_pow, pp, nat.lt_succ_iff, @eq_comm _ a] } lemma eq_proper_divisors_of_subset_of_sum_eq_sum {s : finset ℕ} (hsub : s ⊆ n.proper_divisors) : ∑ x in s, x = ∑ x in n.proper_divisors, x → s = n.proper_divisors := begin cases n, { rw [proper_divisors_zero, subset_empty] at hsub, simp [hsub] }, classical, rw [← sum_sdiff hsub], intros h, apply subset.antisymm hsub, rw [← sdiff_eq_empty_iff_subset], contrapose h, rw [← ne.def, ← nonempty_iff_ne_empty] at h, apply ne_of_lt, rw [← zero_add (∑ x in s, x), ← add_assoc, add_zero], apply add_lt_add_right, have hlt := sum_lt_sum_of_nonempty h (λ x hx, pos_of_mem_proper_divisors (sdiff_subset _ _ hx)), simp only [sum_const_zero] at hlt, apply hlt end lemma sum_proper_divisors_dvd (h : ∑ x in n.proper_divisors, x ∣ n) : (∑ x in n.proper_divisors, x = 1) ∨ (∑ x in n.proper_divisors, x = n) := begin cases n, { simp }, cases n, { contrapose! h, simp, }, rw or_iff_not_imp_right, intro ne_n, have hlt : ∑ x in n.succ.succ.proper_divisors, x < n.succ.succ := lt_of_le_of_ne (nat.le_of_dvd (nat.succ_pos _) h) ne_n, symmetry, rw [← mem_singleton, eq_proper_divisors_of_subset_of_sum_eq_sum (singleton_subset_iff.2 (mem_proper_divisors.2 ⟨h, hlt⟩)) sum_singleton, mem_proper_divisors], refine ⟨one_dvd _, nat.succ_lt_succ (nat.succ_pos _)⟩, end @[simp, to_additive] lemma prime.prod_proper_divisors {α : Type*} [comm_monoid α] {p : ℕ} {f : ℕ → α} (h : p.prime) : ∏ x in p.proper_divisors, f x = f 1 := by simp [h.proper_divisors] @[simp, to_additive] lemma prime.prod_divisors {α : Type*} [comm_monoid α] {p : ℕ} {f : ℕ → α} (h : p.prime) : ∏ x in p.divisors, f x = f p * f 1 := by rw [← cons_self_proper_divisors h.ne_zero, prod_cons, h.prod_proper_divisors] lemma proper_divisors_eq_singleton_one_iff_prime : n.proper_divisors = {1} ↔ n.prime := ⟨λ h, begin have h1 := mem_singleton.2 rfl, rw [← h, mem_proper_divisors] at h1, refine nat.prime_def_lt''.mpr ⟨h1.2, λ m hdvd, _⟩, rw [← mem_singleton, ← h, mem_proper_divisors], have hle := nat.le_of_dvd (lt_trans (nat.succ_pos _) h1.2) hdvd, exact or.imp_left (λ hlt, ⟨hdvd, hlt⟩) hle.lt_or_eq end, prime.proper_divisors⟩ lemma sum_proper_divisors_eq_one_iff_prime : ∑ x in n.proper_divisors, x = 1 ↔ n.prime := begin cases n, { simp [nat.not_prime_zero] }, cases n, { simp [nat.not_prime_one] }, rw [← proper_divisors_eq_singleton_one_iff_prime], refine ⟨λ h, _, λ h, h.symm ▸ sum_singleton⟩, rw [@eq_comm (finset ℕ) _ _], apply eq_proper_divisors_of_subset_of_sum_eq_sum (singleton_subset_iff.2 (one_mem_proper_divisors_iff_one_lt.2 (succ_lt_succ (nat.succ_pos _)))) (eq.trans sum_singleton h.symm) end lemma mem_proper_divisors_prime_pow {p : ℕ} (pp : p.prime) (k : ℕ) {x : ℕ} : x ∈ proper_divisors (p ^ k) ↔ ∃ (j : ℕ) (H : j < k), x = p ^ j := begin rw [mem_proper_divisors, nat.dvd_prime_pow pp, ← exists_and_distrib_right], simp only [exists_prop, and_assoc], apply exists_congr, intro a, split; intro h, { rcases h with ⟨h_left, rfl, h_right⟩, rwa pow_lt_pow_iff pp.one_lt at h_right, simpa, }, { rcases h with ⟨h_left, rfl⟩, rwa pow_lt_pow_iff pp.one_lt, simp [h_left, le_of_lt], }, end lemma proper_divisors_prime_pow {p : ℕ} (pp : p.prime) (k : ℕ) : proper_divisors (p ^ k) = (finset.range k).map ⟨pow p, pow_right_injective pp.two_le⟩ := by { ext, simp [mem_proper_divisors_prime_pow, pp, nat.lt_succ_iff, @eq_comm _ a], } @[simp, to_additive] lemma prod_proper_divisors_prime_pow {α : Type*} [comm_monoid α] {k p : ℕ} {f : ℕ → α} (h : p.prime) : ∏ x in (p ^ k).proper_divisors, f x = ∏ x in range k, f (p ^ x) := by simp [h, proper_divisors_prime_pow] @[simp, to_additive sum_divisors_prime_pow] lemma prod_divisors_prime_pow {α : Type*} [comm_monoid α] {k p : ℕ} {f : ℕ → α} (h : p.prime) : ∏ x in (p ^ k).divisors, f x = ∏ x in range (k + 1), f (p ^ x) := by simp [h, divisors_prime_pow] @[to_additive] lemma prod_divisors_antidiagonal {M : Type*} [comm_monoid M] (f : ℕ → ℕ → M) {n : ℕ} : ∏ i in n.divisors_antidiagonal, f i.1 i.2 = ∏ i in n.divisors, f i (n / i) := begin rw [←map_div_right_divisors, finset.prod_map], refl, end @[to_additive] lemma prod_divisors_antidiagonal' {M : Type*} [comm_monoid M] (f : ℕ → ℕ → M) {n : ℕ} : ∏ i in n.divisors_antidiagonal, f i.1 i.2 = ∏ i in n.divisors, f (n / i) i := begin rw [←map_swap_divisors_antidiagonal, finset.prod_map], exact prod_divisors_antidiagonal (λ i j, f j i), end /-- The factors of `n` are the prime divisors -/ lemma prime_divisors_eq_to_filter_divisors_prime (n : ℕ) : n.factors.to_finset = (divisors n).filter prime := begin rcases n.eq_zero_or_pos with rfl | hn, { simp }, { ext q, simpa [hn, hn.ne', mem_factors] using and_comm (prime q) (q ∣ n) } end @[simp] lemma image_div_divisors_eq_divisors (n : ℕ) : image (λ (x : ℕ), n / x) n.divisors = n.divisors := begin by_cases hn : n = 0, { simp [hn] }, ext, split, { rw mem_image, rintros ⟨x, hx1, hx2⟩, rw mem_divisors at *, refine ⟨_,hn⟩, rw ←hx2, exact div_dvd_of_dvd hx1.1 }, { rw [mem_divisors, mem_image], rintros ⟨h1, -⟩, exact ⟨n/a, mem_divisors.mpr ⟨div_dvd_of_dvd h1, hn⟩, nat.div_div_self h1 hn⟩ }, end @[simp, to_additive sum_div_divisors] lemma prod_div_divisors {α : Type*} [comm_monoid α] (n : ℕ) (f : ℕ → α) : ∏ d in n.divisors, f (n/d) = n.divisors.prod f := begin by_cases hn : n = 0, { simp [hn] }, rw ←prod_image, { exact prod_congr (image_div_divisors_eq_divisors n) (by simp) }, { intros x hx y hy h, rw mem_divisors at hx hy, exact (div_eq_iff_eq_of_dvd_dvd hn hx.1 hy.1).mp h } end end nat
\section{Confusion matrices} \label{sec:confusion_matrices} We present some confusion matrices that help analyzing models performance on test data. \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{lr.png} \caption{Logistic regression test confusion matrix.} \label{fig:lr} \end{figure} \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{lda.png} \caption{Linear discriminant analysis test confusion matrix.} \label{fig:lda} \end{figure} \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{k_nn.png} \caption{K-nearest neighbors test confusion matrix.} \label{fig:k_nn} \end{figure} \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{rf.png} \caption{Random forest test confusion matrix.} \label{fig:rf} \end{figure} \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{svm.png} \caption{Support vector machine test confusion matrix.} \label{fig:svm} \end{figure}
lemma at_right_to_0: "at_right a = filtermap (\<lambda>x. x + a) (at_right 0)" for a :: real
Formal statement is: lemma bounded_scaling: fixes S :: "'a::real_normed_vector set" shows "bounded S \<Longrightarrow> bounded ((\<lambda>x. c *\<^sub>R x) ` S)" Informal statement is: If $S$ is a bounded set, then the set of all scalar multiples of elements of $S$ is also bounded.
/- Copyright (c) 2022 Jireh Loreaux. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jireh Loreaux -/ import algebra.star.basic import data.set.pointwise /-! # Pointwise star operation on sets This file defines the star operation pointwise on sets and provides the basic API. Besides basic facts about about how the star operation acts on sets (e.g., `(s ∩ t)⋆ = s⋆ ∩ t⋆`), if `s t : set α`, then under suitable assumption on `α`, it is shown * `(s + t)⋆ = s⋆ + t⋆` * `(s * t)⋆ = t⋆ + s⋆` * `(s⁻¹)⋆ = (s⋆)⁻¹` -/ namespace set open_locale pointwise local postfix `⋆`:std.prec.max_plus := star variables {α : Type*} {s t : set α} {a : α} /-- The set `(star s : set α)` is defined as `{x | star x ∈ s}` in locale `pointwise`. In the usual case where `star` is involutive, it is equal to `{star s | x ∈ s}`, see `set.image_star`. -/ protected def has_star [has_star α] : has_star (set α) := ⟨preimage has_star.star⟩ localized "attribute [instance] set.has_star" in pointwise @[simp] lemma star_empty [has_star α] : (∅ : set α)⋆ = ∅ := rfl @[simp] lemma star_univ [has_star α] : (univ : set α)⋆ = univ := rfl @[simp] lemma nonempty_star [has_involutive_star α] {s : set α} : (s⋆).nonempty ↔ s.nonempty := star_involutive.surjective.nonempty_preimage lemma nonempty.star [has_involutive_star α] {s : set α} (h : s.nonempty) : (s⋆).nonempty := nonempty_star.2 h @[simp] lemma mem_star [has_star α] : a ∈ s⋆ ↔ a⋆ ∈ s := iff.rfl lemma star_mem_star [has_involutive_star α] : a⋆ ∈ s⋆ ↔ a ∈ s := by simp only [mem_star, star_star] @[simp] lemma star_preimage [has_star α] : has_star.star ⁻¹' s = s⋆ := rfl @[simp] lemma image_star [has_involutive_star α] : has_star.star '' s = s⋆ := by { simp only [← star_preimage], rw [image_eq_preimage_of_inverse]; intro; simp only [star_star] } @[simp] lemma inter_star [has_star α] : (s ∩ t)⋆ = s⋆ ∩ t⋆ := preimage_inter @[simp] lemma union_star [has_star α] : (s ∪ t)⋆ = s⋆ ∪ t⋆ := preimage_union @[simp] lemma Inter_star {ι : Sort*} [has_star α] (s : ι → set α) : (⋂ i, s i)⋆ = ⋂ i, (s i)⋆ := preimage_Inter @[simp] @[simp] lemma compl_star [has_star α] : (sᶜ)⋆ = (s⋆)ᶜ := preimage_compl @[simp] instance [has_involutive_star α] : has_involutive_star (set α) := { star := has_star.star, star_involutive := λ s, by { simp only [← star_preimage, preimage_preimage, star_star, preimage_id'] } } @[simp] lemma star_subset_star [has_involutive_star α] {s t : set α} : s⋆ ⊆ t⋆ ↔ s ⊆ t := equiv.star.surjective.preimage_subset_preimage_iff lemma star_subset [has_involutive_star α] {s t : set α} : s⋆ ⊆ t ↔ s ⊆ t⋆ := by { rw [← star_subset_star, star_star] } lemma finite.star [has_involutive_star α] {s : set α} (hs : finite s) : finite s⋆ := hs.preimage $ star_injective.inj_on _ lemma star_singleton {β : Type*} [has_involutive_star β] (x : β) : ({x} : set β)⋆ = {x⋆} := by { ext1 y, rw [mem_star, mem_singleton_iff, mem_singleton_iff, star_eq_iff_star_eq, eq_comm], } protected lemma star_mul [monoid α] [star_semigroup α] (s t : set α) : (s * t)⋆ = t⋆ * s⋆ := by simp_rw [←image_star, ←image2_mul, image_image2, image2_image_left, image2_image_right, star_mul, image2_swap _ s t] protected lemma star_add [add_monoid α] [star_add_monoid α] (s t : set α) : (s + t)⋆ = s⋆ + t⋆ := by simp_rw [←image_star, ←image2_add, image_image2, image2_image_left, image2_image_right, star_add] @[simp] instance [has_star α] [has_trivial_star α] : has_trivial_star (set α) := { star_trivial := λ s, by { rw [←star_preimage], ext1, simp [star_trivial] } } protected lemma star_inv [group α] [star_semigroup α] (s : set α) : (s⁻¹)⋆ = (s⋆)⁻¹ := by { ext, simp only [mem_star, mem_inv, star_inv] } protected lemma star_inv' [division_ring α] [star_ring α] (s : set α) : (s⁻¹)⋆ = (s⋆)⁻¹ := by { ext, simp only [mem_star, mem_inv, star_inv'] } end set
[STATEMENT] lemma Wbis_raw_coind: assumes "sym theta" and "theta \<subseteq> Wretr theta" shows "theta \<subseteq> Wbis" [PROOF STATE] proof (prove) goal (1 subgoal): 1. theta \<subseteq> Wbis [PROOF STEP] using assms mono_Retr bis_raw_coind [PROOF STATE] proof (prove) using this: sym theta theta \<subseteq> Wretr theta Bisim.mono Sretr Bisim.mono ZOretr Bisim.mono ZOretrT Bisim.mono Wretr Bisim.mono WretrT Bisim.mono RetrT \<lbrakk>Bisim.mono ?Retr; sym ?theta; ?theta \<subseteq> ?Retr ?theta\<rbrakk> \<Longrightarrow> ?theta \<subseteq> bis ?Retr goal (1 subgoal): 1. theta \<subseteq> Wbis [PROOF STEP] unfolding Wbis_def [PROOF STATE] proof (prove) using this: sym theta theta \<subseteq> Wretr theta Bisim.mono Sretr Bisim.mono ZOretr Bisim.mono ZOretrT Bisim.mono Wretr Bisim.mono WretrT Bisim.mono RetrT \<lbrakk>Bisim.mono ?Retr; sym ?theta; ?theta \<subseteq> ?Retr ?theta\<rbrakk> \<Longrightarrow> ?theta \<subseteq> bis ?Retr goal (1 subgoal): 1. theta \<subseteq> bis Wretr [PROOF STEP] by blast
Require Import Coq.Strings.String. From mathcomp Require Import ssreflect ssrfun ssrbool ssrnat eqtype choice seq ssrnum ssrint ssralg bigop. From deriving Require Import deriving. From extructures Require Import ord fset fmap fperm. From CoqUtils Require Import nominal. Set Implicit Arguments. Unset Strict Implicit. Unset Printing Implicit Defensive. Section Basic. Local Open Scope fset_scope. Definition int_ordMixin := @PcanOrdMixin int _ _ _ pickleK. Canonical int_ordType := Eval hnf in OrdType int int_ordMixin. Canonical int_nominalType := Eval hnf in [nominalType for int by //]. Canonical int_trivialNominalType := Eval hnf in [trivialNominalType for int]. Inductive binop : Type := | Add | Mul | Sub | Eq | Leq | And | Or. Definition nat_of_binop b := match b with | Add => 0 | Mul => 1 | Sub => 2 | Eq => 3 | Leq => 4 | And => 5 | Or => 6 end. Definition binop_of_nat n := match n with | 0 => Add | 1 => Mul | 2 => Sub | 3 => Eq | 4 => Leq | 5 => And | _ => Or end. Lemma nat_of_binopK : cancel nat_of_binop binop_of_nat. Proof. by case. Qed. Definition binop_eqMixin := CanEqMixin nat_of_binopK. Canonical binop_eqType := Eval hnf in EqType binop binop_eqMixin. Definition binop_choiceMixin := CanChoiceMixin nat_of_binopK. Canonical binop_choiceType := Eval hnf in ChoiceType binop binop_choiceMixin. Definition binop_countMixin := CanCountMixin nat_of_binopK. Canonical binop_countType := Eval hnf in CountType binop binop_countMixin. Definition binop_ordMixin := CanOrdMixin nat_of_binopK. Canonical binop_ordType := Eval hnf in OrdType binop binop_ordMixin. Inductive expr := | Var of string | Bool of bool | Num of int | Binop of binop & expr & expr | Neg of expr | ENil | Offset of expr | Size of expr | Cast of expr. Fixpoint tree_of_expr e := match e with | Var x => GenTree.Node 0 [:: GenTree.Leaf (pickle x)] | Bool b => GenTree.Node 1 [:: GenTree.Leaf (pickle b)] | Num n => GenTree.Node 2 [:: GenTree.Leaf (pickle n)] | Binop b e1 e2 => GenTree.Node 3 [:: GenTree.Leaf (pickle b); tree_of_expr e1; tree_of_expr e2] | Neg e => GenTree.Node 4 [:: tree_of_expr e] | ENil => GenTree.Node 5 [::] | Offset e => GenTree.Node 6 [:: tree_of_expr e] | Size e => GenTree.Node 7 [:: tree_of_expr e] | Cast e => GenTree.Node 8 [:: tree_of_expr e] end. Fixpoint expr_of_tree t := match t with | GenTree.Node 0 [:: GenTree.Leaf x & _] => Var (odflt String.EmptyString (unpickle x)) | GenTree.Node 1 [:: GenTree.Leaf b & _] => Bool (odflt true (unpickle b)) | GenTree.Node 2 [:: GenTree.Leaf n & _ ] => Num (odflt (0 : int) (unpickle n)) | GenTree.Node 3 [:: GenTree.Leaf b, e1, e2 & _ ] => Binop (odflt Add (unpickle b)) (expr_of_tree e1) (expr_of_tree e2) | GenTree.Node 4 [:: e & _] => Neg (expr_of_tree e) | GenTree.Node 5 _ => ENil | GenTree.Node 6 [:: e & _] => Offset (expr_of_tree e) | GenTree.Node 7 [:: e & _] => Size (expr_of_tree e) | GenTree.Node 8 [:: e & _] => Cast (expr_of_tree e) | _ => Var String.EmptyString end. Lemma tree_of_exprK : cancel tree_of_expr expr_of_tree. Proof. rewrite /expr_of_tree [@unpickle]lock. by elim=> /= [x|b|n|b e1 -> e2 ->|e -> | |e ->|e ->|e ->] //; rewrite -lock pickleK. Qed. Definition expr_eqMixin := CanEqMixin tree_of_exprK. Canonical expr_eqType := Eval hnf in EqType expr expr_eqMixin. Definition expr_choiceMixin := CanChoiceMixin tree_of_exprK. Canonical expr_choiceType := Eval hnf in ChoiceType expr expr_choiceMixin. Definition expr_countMixin := CanCountMixin tree_of_exprK. Canonical expr_countType := Eval hnf in CountType expr expr_countMixin. Definition expr_ordMixin := PcanOrdMixin (@pickleK expr_countType). Canonical expr_ordType := Eval hnf in OrdType expr expr_ordMixin. Canonical expr_nominalType := Eval hnf in [nominalType for expr by //]. Canonical expr_trivialNominalType := Eval hnf in [trivialNominalType for expr]. Inductive com : Type := | Assn of string & expr | Load of string & expr | Store of expr & expr | Alloc of string & expr | Free of expr | Skip | Seq of com & com | If of expr & com & com | While of expr & com. Fixpoint tree_of_com c := match c with | Assn x e => GenTree.Node 0 [:: GenTree.Leaf (pickle x); GenTree.Leaf (pickle e)] | Load x e => GenTree.Node 1 [:: GenTree.Leaf (pickle x); GenTree.Leaf (pickle e)] | Store e1 e2 => GenTree.Node 2 [:: GenTree.Leaf (pickle e1); GenTree.Leaf (pickle e2)] | Alloc x e => GenTree.Node 3 [:: GenTree.Leaf (pickle x); GenTree.Leaf (pickle e)] | Free e => GenTree.Node 4 [:: GenTree.Leaf (pickle e)] | Skip => GenTree.Node 5 [::] | Seq c1 c2 => GenTree.Node 6 [:: tree_of_com c1; tree_of_com c2] | If e c1 c2 => GenTree.Node 7 [:: GenTree.Leaf (pickle e); tree_of_com c1; tree_of_com c2] | While e c => GenTree.Node 8 [:: GenTree.Leaf (pickle e); tree_of_com c] end. Fixpoint com_of_tree t := let var x := odflt String.EmptyString (unpickle x) in let exp e := odflt (Num 0) (unpickle e) in match t with | GenTree.Node 0 [:: GenTree.Leaf x; GenTree.Leaf e] => Assn (var x) (exp e) | GenTree.Node 1 [:: GenTree.Leaf x; GenTree.Leaf e] => Load (var x) (exp e) | GenTree.Node 2 [:: GenTree.Leaf e1; GenTree.Leaf e2] => Store (exp e1) (exp e2) | GenTree.Node 3 [:: GenTree.Leaf x; GenTree.Leaf e] => Alloc (var x) (exp e) | GenTree.Node 4 [:: GenTree.Leaf e] => Free (exp e) | GenTree.Node 5 [::] => Skip | GenTree.Node 6 [:: c1; c2] => Seq (com_of_tree c1) (com_of_tree c2) | GenTree.Node 7 [:: GenTree.Leaf e; c1; c2] => If (exp e) (com_of_tree c1) (com_of_tree c2) | GenTree.Node 8 [:: GenTree.Leaf e; c] => While (exp e) (com_of_tree c) | _ => Skip end. Lemma tree_of_comK : cancel tree_of_com com_of_tree. Proof. elim=> [x e|x e|e1 e2|x e|e| |c1 IH1 c2 IH2|e c1 IH1 c2 IH2|e c IH] //=; rewrite ?pickleK ?IH ?IH1 ?IH2 //; f_equal; by apply: Some_inj; rewrite -[RHS]pickleK. Qed. Definition com_eqMixin := CanEqMixin tree_of_comK. Canonical com_eqType := Eval hnf in EqType com com_eqMixin. Definition com_choiceMixin := CanChoiceMixin tree_of_comK. Canonical com_choiceType := Eval hnf in ChoiceType com com_choiceMixin. Definition com_countMixin := CanCountMixin tree_of_comK. Canonical com_countType := Eval hnf in CountType com com_countMixin. Definition com_ordMixin := PcanOrdMixin (@pickleK com_countType). Canonical com_ordType := Eval hnf in OrdType com com_ordMixin. Canonical com_nominalType := Eval hnf in [nominalType for com by //]. Canonical com_trivialNominalType := Eval hnf in [trivialNominalType for com]. (** Type of pointers. [name] corresponds to an atom, in the nominal set sense. *) Definition ptr := (name * int)%type. (* FIXME: If we don't declare these, then many lemmas on partial maps do not work when applied to heaps. The file structured.v contains some examples. *) Canonical ptr_eqType := Eval hnf in [eqType of ptr]. Canonical ptr_choiceType := Eval hnf in [choiceType of ptr]. Canonical ptr_ordType := Eval hnf in [ordType of ptr]. Canonical ptr_nominalType := Eval hnf in [nominalType of ptr]. (** Pointers values contain an additional immutable size field, initialized at allocation time. *) Inductive value := | VBool of bool | VNum of int | VPtr of ptr & int | VNil. Definition sum_of_value v := match v with | VBool b => inl b | VNum n => inr (inl n) | VPtr p sz => inr (inr (inl (p, sz))) | VNil => inr (inr (inr tt)) end. Definition value_of_sum v := match v with | inl b => VBool b | inr (inl n) => VNum n | inr (inr (inl (p, sz))) => VPtr p sz | inr (inr (inr tt)) => VNil end. Lemma sum_of_valueK : cancel sum_of_value value_of_sum. Proof. by case. Qed. Lemma value_of_sumK : cancel value_of_sum sum_of_value. Proof. by do ![case=>//]. Qed. Definition value_eqMixin := CanEqMixin sum_of_valueK. Canonical value_eqType := Eval hnf in EqType value value_eqMixin. Definition value_choiceMixin := CanChoiceMixin sum_of_valueK. Canonical value_choiceType := Eval hnf in ChoiceType value value_choiceMixin. Definition value_ordMixin := CanOrdMixin sum_of_valueK. Canonical value_ordType := Eval hnf in OrdType value value_ordMixin. Definition value_nominalMixin := BijNominalMixin sum_of_valueK value_of_sumK. Canonical value_nominalType := Eval hnf in NominalType value value_nominalMixin. Lemma renamevE pm v : rename pm v = match v with | VBool b => VBool b | VNum n => VNum n | VPtr p sz => VPtr (rename pm p) sz | VNil => VNil end. Proof. by case: v. Qed. Lemma namesvE v : names v = match v with | VPtr p _ => fset1 p.1 | _ => fset0 end. Proof. by case: v=> [b|n|[i n] ?|] //=; rewrite -2![RHS]fsetU0. Qed. Global Instance VBool_eqvar : {eqvar VBool}. Proof. by move=> s b _ <-. Qed. Global Instance VNum_eqvar : {eqvar VNum}. Proof. by move=> s n _ <-. Qed. Global Instance VPtr_eqvar : {eqvar VPtr}. Proof. by move=> s p _ <-; finsupp. Qed. Global Instance VNil_eqvar : {eqvar VNil}. Proof. by []. Qed. Notation locals := {fmap string -> value}. Notation heap := {fmap ptr -> value}. Implicit Types (ls : locals) (h : heap) (s : locals * heap). Definition mkblock (b : name) vs : heap := uncurrym (setm emptym b (mkfmapfp (fun i => if i is Posz n then Some (nth VNil vs n) else None) [seq Posz n | n <- iota 0 (size vs)])). Lemma mkblockE p b vs : mkblock b vs p = if p.1 == b then if p.2 is Posz n then if n < size vs then Some (nth VNil vs n) else None else None else None. Proof. rewrite /mkblock uncurrymE setmE. case: ifP=> //= _; rewrite mkfmapfpE. case: p.2=> [n|n] /=. rewrite mem_map; last by move=> ?? [->]. by rewrite mem_iota /= add0n. by case: ifP. Qed. Global Instance mkblock_eqvar : {eqvar mkblock}. Proof. move=> s i _ <- vs _ <-. eapply getm_nomR => - [i' [n|n]] _ <-; rewrite !mkblockE /=; last by finsupp. (* FIXME: All this rewriting should not be needed *) rewrite -[size (rename s vs)]size_eqvar renameT; finsupp. Qed. Definition eval_binop b v1 v2 := match b, v1, v2 with | Add, VNum n1, VNum n2 => VNum (n1 + n2) | Add, VPtr p sz, VNum n | Add, VNum n, VPtr p sz => VPtr (p.1, p.2 + n) sz | Add, _, _ => VNil | Sub, VNum n1, VNum n2 => VNum (n1 - n2) | Sub, VPtr p sz, VNum n => VPtr (p.1, p.2 - n) sz | Sub, _, _ => VNil | Mul, VNum n1, VNum n2 => VNum (n1 * n2) | Mul, _, _ => VNil | Eq, _, _ => VBool (v1 == v2) | Leq, VNum n1, VNum n2 => VBool (n1 <= n2) | Leq, _, _ => VNil | And, VBool b1, VBool b2 => VBool (b1 && b2) | And, _, _ => VNil | Or, VBool b1, VBool b2 => VBool (b1 || b2) | Or, _, _ => VNil end%R. (** Function [eval_expr] computes the value of an expression [e] given a local store [ls]. It takes an additional argument [cast] which determines how the cast operator is interpreted: when [cast = true], cast is just the identity; when [cast = false], cast converts the block identifier to an integer. *) Fixpoint eval_expr cast e ls := match e with | Var x => odflt VNil (ls x) | Bool b => VBool b | Num n => VNum n | Binop b e1 e2 => eval_binop b (eval_expr cast e1 ls) (eval_expr cast e2 ls) | ENil => VNil | Neg e => if eval_expr cast e ls is VBool b then VBool (~~ b) else VNil | Offset e => if eval_expr cast e ls is VPtr p _ then VNum p.2 else VNil | Size e => if eval_expr cast e ls is VPtr _ sz then VNum sz else VNil | Cast e => let v := eval_expr cast e ls in if cast then v else if eval_expr cast e ls is VPtr p _ then VNum (val p.1) else VNil end. Section Result. Variable T : Type. (** Type of results of computations. [Done x] indicates that a computation successfully terminated, returning [x] as a result. [Error] indicates that an error occurred. [NotYet] indicates that the computation ran for too many steps and couldn't complete. *) Inductive result := | Done of T | Error | NotYet. Definition sum_of_result r := match r with | Done x => inl x | Error => inr true | NotYet => inr false end. Definition result_of_sum r := match r with | inl x => Done x | inr true => Error | inr false => NotYet end. Lemma sum_of_resultK : cancel sum_of_result result_of_sum. Proof. by case. Qed. Lemma result_of_sumK : cancel result_of_sum sum_of_result. Proof. by do ![case=>//]. Qed. Definition result_of_option r := if r is Some x then Done x else Error. End Result. Arguments Error {_}. Arguments NotYet {_}. Definition result_eqMixin (T : eqType) := CanEqMixin (@sum_of_resultK T). Canonical result_eqType (T : eqType) := Eval hnf in EqType (result T) (result_eqMixin T). Definition result_choiceMixin (T : choiceType) := CanChoiceMixin (@sum_of_resultK T). Canonical result_choiceType (T : choiceType) := Eval hnf in ChoiceType (result T) (result_choiceMixin T). Definition result_ordMixin (T : ordType) := CanOrdMixin (@sum_of_resultK T). Canonical result_ordType (T : ordType) := Eval hnf in OrdType (result T) (result_ordMixin T). Definition result_nominalMixin (T : nominalType) := BijNominalMixin (@sum_of_resultK T) (@result_of_sumK T). Canonical result_nominalType (T : nominalType) := Eval hnf in NominalType (result T) (result_nominalMixin T). Section Nominal. Variable T : nominalType. Global Instance Done_eqvar : {eqvar @Done T}. Proof. by move=> s x _ <-. Qed. Lemma renameresE pm (r : result T) : rename pm r = match r with | Done x => Done (rename pm x) | Error => Error | NotYet => NotYet end. Proof. by case: r. Qed. Lemma namesresE (r : result T) : names r = if r is Done x then names x else fset0. Proof. by case: r. Qed. End Nominal. Section Restriction. Variable T : restrType. Definition hide_result A (r : result T) := match r with | Done x => Done (hide A x) | Error => Error | NotYet => NotYet end. Lemma hide_result_law : Restriction.law hide_result. Proof. rewrite /hide_result; constructor. - move=> s A _ <- [x| |] _ <- //=; finsupp. - by move=> A [x| |] //; rewrite hideI. - by move=> ?? [x| |] //; rewrite hideU. - by move=> [x| |] //; rewrite hide0. by move=> ? [x| |] /=; rewrite ?hideP ?fdisjoints0. Qed. Definition result_restrMixin := RestrMixin hide_result_law. Canonical result_restrType := RestrType (result T) result_restrMixin. End Restriction. Fixpoint eval_com cast c s k : result (locals * heap) := if k is S k' then match c with | Assn x e => Done (setm s.1 x (eval_expr cast e s.1), s.2) | Load x e => if eval_expr cast e s.1 is VPtr p _ then if s.2 p is Some v then Done (setm s.1 x v, s.2) else Error else Error | Store e e' => if eval_expr cast e s.1 is VPtr p _ then if updm s.2 p (eval_expr cast e' s.1) is Some h' then Done (s.1, h') else Error else Error | Alloc x e => if eval_expr cast e s.1 is VNum (Posz n) then Done (let i := fresh (names s) in (setm s.1 x (VPtr (i, 0 : int) n), unionm (mkblock i (nseq n (VNum 0))) s.2)) else Error | Free e => if eval_expr cast e s.1 is VPtr p _ then if p.2 == 0 then if p.1 \in domm ((@currym _ _ _ : heap -> _) s.2) then Done (s.1, filterm (fun (p' : ptr) _ => p'.1 != p.1) s.2) else Error else Error else Error | Skip => Done s | Seq c1 c2 => match eval_com cast c1 s k' with | Done s' => eval_com cast c2 s' k' | Error => Error | NotYet => NotYet end | If e ct ce => if eval_expr cast e s.1 is VBool b then eval_com cast (if b then ct else ce) s k' else Error | While e c => if eval_expr cast e s.1 is VBool b then eval_com cast (if b then Seq c (While e c) else Skip) s k' else Error end else NotYet. Section Monotonicity. (** The semantics defined as a function is consistent, in the sense that increasing the maximum number of steps it can run for can only cause it to produce a better result. *) Definition refine_result (T : eqType) (r1 r2 : result T) := (r1 == NotYet) || (r1 == r2). Lemma eval_com_leq cast s c k k' : k <= k' -> refine_result (eval_com cast c s k) (eval_com cast c s k'). Proof. move=> Pk; elim: k' k Pk s c => [|k' IH] [|k] // /IH {IH} IH s. rewrite /refine_result. case=> [x e|x e|e e'|x e|e| |c1 c2|e ct ce|e c] /=; try by rewrite eqxx ?orbT. - case/orP: (IH s c1) => [/eqP -> //|/eqP ->]. case: (eval_com _ c1 _ _) => [s'| |] //=. by eauto. - by case: eval_expr=> //= b; eauto. by case: eval_expr=> //= b; eauto. Qed. End Monotonicity. (** Free variables that occur in a command or expression. *) Fixpoint vars_e e := match e with | Var x => fset1 x | Bool _ => fset0 | Num _ => fset0 | Binop _ e1 e2 => vars_e e1 :|: vars_e e2 | ENil => fset0 | Neg e => vars_e e | Offset e => vars_e e | Size e => vars_e e | Cast e => vars_e e end. Fixpoint vars_c c := match c with | Assn x e => x |: vars_e e | Load x e => x |: vars_e e | Store e e' => vars_e e :|: vars_e e' | Alloc x e => x |: vars_e e | Free e => vars_e e | Skip => fset0 | Seq c1 c2 => vars_c c1 :|: vars_c c2 | If e ct ce => vars_e e :|: vars_c ct :|: vars_c ce | While e c => vars_e e :|: vars_c c end. (** Potentially modified variables in a command. *) Fixpoint mod_vars_c c := match c with | Assn x _ => fset1 x | Load x _ => fset1 x | Store _ _ => fset0 | Alloc x _ => fset1 x | Free _ => fset0 | Skip => fset0 | Seq c1 c2 => mod_vars_c c1 :|: mod_vars_c c2 | If _ c1 c2 => mod_vars_c c1 :|: mod_vars_c c2 | While _ c => mod_vars_c c end. Lemma mod_vars_cP cast s s' c x k : eval_com cast c s k = Done s' -> x \notin mod_vars_c c -> s'.1 x = s.1 x. Proof. elim: k s s' c=> [|k IH] //= [ls h] s'. case=> [x' e|x' e|e e'|x' e|e| |c1 c2|e c1 c2|e c] /=; rewrite 1?in_fset1. - by move=> [<-] {s'}; rewrite setmE => /negbTE ->. - case: eval_expr=> // p sz. case: getm=> //= v [<-] {s'}. by rewrite setmE => /negbTE ->. - case: eval_expr=> // p sz. by case: updm=> //= h' [<-] {s'} _. - case: eval_expr=> [| [n|] | |] //= [<-] {s'}. by rewrite setmE => /negbTE ->. - case: eval_expr=> // p sz. by case: ifP=> // Hp; case: ifP=> //= Hp' [<-]. - by move=> [<-]. - case e1: eval_com => [s''| |] //= e2. rewrite in_fsetU=> /norP [nc1 nc2]. by rewrite (IH _ _ _ e2) // (IH _ _ _ e1). - rewrite in_fsetU. by case: eval_expr => [[] | | | ] //= he /norP [nc1 nc2]; rewrite (IH _ _ _ he). - by case: eval_expr=> [[] | | | ] //= he hx; rewrite (IH _ _ _ he) //= fsetUid. Qed. Lemma mod_vars_c_subset c : fsubset (mod_vars_c c) (vars_c c). Proof. elim: c=> [x e|x e|e e'|x e|e| |c1 IH1 c2 IH2|e c1 IH1 c2 IH2|e c IH] /=; rewrite ?fsub0set ?fsub1set ?in_fsetU1 ?eqxx //. - by rewrite fsetUSS. - by rewrite -fsetUA; apply: fsubsetU; rewrite fsetUSS // orbT. by rewrite fsubsetU // IH orbT. Qed. (** Basic lemmas about the semantics *) Lemma eval_expr_unionm cast ls1 ls2 e : fsubset (vars_e e) (domm ls1) -> eval_expr cast e (unionm ls1 ls2) = eval_expr cast e ls1. Proof. elim: e => [x|b|n|b e1 IH1 e2 IH2|e IH| |e IH|e IH|e IH] //=. - by rewrite fsub1set unionmE => /dommP [v ->]. - by rewrite fsubUset=> /andP [/IH1 {IH1} -> /IH2 {IH2} ->]. - by case: cast IH=> // IH sub; rewrite IH. - by case: cast IH=> // IH sub; rewrite IH. - by case: cast IH=> // IH sub; rewrite IH. by case: cast IH=> // IH sub; rewrite IH. Qed. Lemma eval_binop_names b v1 v2 : fsubset (names (eval_binop b v1 v2)) (names (v1, v2)). Proof. case: b v1 v2=> [] [b1|n1|p1 sz1|] [b2|n2|p2 sz2|] //=; try by rewrite fsub0set. - by rewrite fsubsetU //= !namesvE fsubsetxx orbT. - by rewrite fsubsetU //= !namesvE fsubsetxx. by rewrite fsubsetU //= !namesvE fsubsetxx. Qed. Lemma eval_expr_names cast ls e : fsubset (names (eval_expr cast e ls)) (names ls). Proof. elim: e=> [x|b|n|b e1 IH1 e2 IH2|e IH| |e IH|e IH|e IH] //=; try by rewrite fsub0set. - case get_x: (ls x) => [[b|n|p|]|] //=; try by rewrite fsub0set. apply/fsubsetP=> i; rewrite namesvE => /fset1P -> {i}. apply/namesmP; eapply PMFreeNamesVal; eauto. by rewrite namesvE; apply/namesnP. - by rewrite (fsubset_trans (eval_binop_names b _ _)) // fsubUset IH1 IH2. - by case: eval_expr => // *; rewrite fsub0set. - by case: eval_expr => // *; rewrite fsub0set. - by case: eval_expr => // *; rewrite fsub0set. case: cast IH=> //. by case: (eval_expr _ _ _)=> [b|n|p|]; rewrite fsub0set. Qed. Lemma domm_mkblock i vs : domm (mkblock i vs) = fset [seq (i, Posz n) | n <- iota 0 (size vs)]. Proof. apply/eqP; rewrite eqEfsubset; apply/andP; split; apply/fsubsetP => /= - [i' n]. move=> /dommP [v]. rewrite mkblockE /= in_fset. have [-> {i'}|] //= := altP eqP. case: n=> [n|] //=. case: ifP=> [n_vs|] //= [e]. apply/mapP; exists n=> //. by rewrite mem_iota. rewrite in_fset=> /mapP /= [n']. rewrite mem_iota /= add0n => n_vs [-> ->]. apply/dommP; exists (nth VNil vs n'). by rewrite mkblockE /= eqxx n_vs. Qed. Lemma names_domm_mkblock i vs : names (domm (mkblock i vs)) = if nilp vs then fset0 else fset1 i. Proof. case: ifP=> [/nilP ->|nnil_vs]. rewrite (_ : mkblock i [::] = emptym) ?domm0 ?namesfs0 //. apply/eq_fmap=> p; rewrite mkblockE /= emptymE. by case: ifP=> //; case: p.2. rewrite domm_mkblock names_fset. apply/eqP; rewrite eqEfsubset; apply/andP; split; apply/fsubsetP=> i'. case/namessP=> /= [[i'' n] /mapP [n' ?] [-> ?]]. by rewrite in_fsetU /= namesT in_fset0 orbF namesnE. move=> /fset1P ->. apply/namessP; exists (i, Posz 0). apply/mapP; exists 0=> //. rewrite mem_iota /= add0n. by case: vs nnil_vs. by rewrite in_fsetU /= namesT in_fset0 orbF namesnE in_fset1 eqxx. Qed. Lemma codomm_mkblock i vs : codomm (mkblock i vs) = fset vs. Proof. apply/eqP; rewrite eqEfsubset; apply/andP; split; apply/fsubsetP=> v. case/codommP=> /= - [i' n]. rewrite mkblockE /=. have [_ {i'}|] //= := altP eqP. case: n=> [n|] //=. case: ifP=> [n_vs|] //= [<-]. rewrite in_fset. by apply/mem_nth. rewrite in_fset => v_vs. apply/codommP. exists (i, Posz (index v vs)). by rewrite mkblockE /= eqxx index_mem v_vs nth_index. Qed. Lemma names_codomm_mkblock i vs : names (codomm (mkblock i vs)) = names vs. Proof. by rewrite codomm_mkblock names_fset. Qed. Lemma names_mkblock i vs : names (mkblock i vs) = if nilp vs then fset0 else i |: names vs. Proof. rewrite namesmE names_domm_mkblock names_codomm_mkblock. case: vs=> //=. by rewrite fset0U namessE. Qed. Lemma names_mkblock_fsubset i vs : fsubset (names (mkblock i vs)) (i |: names vs). Proof. by rewrite names_mkblock fun_if if_arg fsub0set fsubsetxx if_same. Qed. Lemma fdisjoint_names_domm h1 h2 : fdisjoint (names (domm h1)) (names (domm h2)) -> fdisjoint (domm h1) (domm h2). Proof. move=> /fdisjointP dis; apply/fdisjointP=> p Pp. have /dis Pi: p.1 \in names (domm h1). by apply/namesfsP; exists p=> //=; apply/fsetUP; left; apply/namesnP. apply: contra Pi=> Pi; apply/namesfsP; exists p=> //. by apply/fsetUP; left; apply/namesnP. Qed. Definition stateu s1 s2 : locals * heap := (unionm s1.1 s2.1, unionm s1.2 s2.2). Lemma eval_com_vars safe s s' c k : fsubset (vars_c c) (domm s.1) -> eval_com safe c s k = Done s' -> domm s'.1 = domm s.1. Proof. elim: k s s' c => [|k IH] /= s s'; first by []. case=> [x e|x e|e e'|x e|e| |c1 c2|e c1 c2|e c] /=; rewrite ?fsubU1set ?fsubUset. - case/andP=> [Px Pe] [<-]; rewrite domm_set /=. apply/eqP; rewrite eqEfsubset; apply/andP; split. by rewrite fsubU1set Px fsubsetxx. by rewrite fsubsetUr. - case/andP=> [Px Pe]. case: eval_expr => // p sz; case: getm=> [v|] //= [<-] {s'}. rewrite domm_set. apply/eqP; rewrite eqEfsubset; apply/andP; split. by rewrite fsubU1set Px fsubsetxx. by rewrite fsubsetUr. - case/andP=> Pe Pe'. by case: eval_expr => // p sz; rewrite /updm; case: getm=> [v|] //= [<-] {s'}. - case: eval_expr => [|[n|]| |] // /andP [Px Pe] [<-] /=. rewrite domm_set. apply/eqP; rewrite eqEfsubset; apply/andP; split; last exact: fsubsetUr. by rewrite fsubUset fsubsetxx andbT fsub1set. - case: eval_expr => // p sz. by have [|]:= altP eqP=> // _; case: ifP=> //= in_h1 sub [<-] {s'}. - by move=> _ [<-] {s'}. - case/andP=> vars_c1 vars_c2. case ev_c1: (eval_com _ _ _) => [s''| |] //= ev_c2. move: vars_c2; rewrite -(IH _ _ _ vars_c1 ev_c1) => vars_c2. by rewrite -(IH _ _ _ vars_c2 ev_c2). - case: eval_expr=> // - b. by rewrite -andbA => /and3P [_ vars_c1 vars_c2]; case: b; eapply IH; eauto. case: eval_expr=> // - [] P; apply: IH; try by rewrite fsub0set. by rewrite /= fsetUC -fsetUA fsetUid fsubUset. Qed. End Basic. Arguments Error {_}. Arguments NotYet {_}. Infix "∪" := stateu (at level 40, left associativity) : state_scope. Instance stateu_eqvar : {eqvar stateu}. Proof. by rewrite /stateu; finsupp. Qed.
-- Andreas, 2022-03-11, issue #5823 -- Make sure we do not weaken the singleton detection too much by checking for loops. open import Agda.Builtin.Nat record ⊤ : Set where record Wrap (A : Set) : Set where field unwrap : A record _×_ (A B : Set) : Set where field fst : A snd : B -- A singleton record type with nestings of Wrap. Singleton = (Wrap ⊤ × Wrap (Wrap ⊤)) × Wrap (Wrap (Wrap ⊤ × ⊤) × Wrap ⊤) -- Agda should solve this meta: unique : Singleton unique = _ -- This is fine, even though we pass through 'Wrap' several times. -- Passing already visited non-recursive records is fine! mutual record S (n : Nat) : Set where inductive; eta-equality field inn : T n T : Nat → Set T zero = ⊤ T (suc n) = T n × S n -- S n is a eta singleton type for each n because it is terminating. inh5 : S 5 inh5 = _
#' Create a gene name xref for heuristc map of mars/10x #' #' #' mcell_add_gene_names_xref_mars10x = function(mars_mat_id, tenx_mat_id, xref_id="DB") { mat1 = scdb_mat(mars_mat_id) if(is.null(mat1)) { stop("no mars mat ", mars_mat_id) } mat2 = scdb_mat(tenx_mat_id) if(is.null(mat2)) { stop("no tenx mat ", tenx_mat_id) } mars_m_nms = rownames(mat1@mat) mars_nms = strsplit(mars_m_nms, ";") mars_n = unlist(lapply(mars_nms, length)) mars_v = rep(mars_m_nms,times=mars_n) names(mars_v) = unlist(mars_nms) tenx_m_nms = rownames(mat2@mat) df1 = data.frame(key = names(mars_v), mars = mars_v) df2 = data.frame(key = tenx_m_nms, tenx = tenx_m_nms) xref_df = df1 %>% full_join(df2) scdb_add_gene_names_xref(xref_id, xref_df) } #' Generate mapping of 10x to mars names #' #' Not more than finding which concatenated names (";" delimited) are related to 10x gene names. Should be replaced by something more systematic that will happen during import #' #' @param mars_mc_id metacell id of a mars-seq dataset #' @param tenx_mc_id metacell id of a 10x dataset #' #' @export gen_10x_mars_gene_match = function(mars_mc_id, tenx_mc_id) { mc1 = scdb_mc(mars_mc_id) if(is.null(mc1)) { stop("not mars mc ", mars_mc_id) } mc2 = scdb_mc(tenx_mc_id) if(is.null(mc2)) { stop("not tenx mc ", tenx_mc_id) } mars_m_nms = rownames(mc1@e_gc) mars_nms = strsplit(mars_m_nms, ";") mars_n = unlist(lapply(mars_nms, length)) mars_v = rep(mars_m_nms,times=mars_n) names(mars_v) = unlist(mars_nms) nm10x = rownames(mc2@e_gc) ten2mars = mars_v[nm10x] f = !duplicated(ten2mars) mars2ten = nm10x[f] names(mars2ten) = ten2mars[f] return(ten2mars) }
Formal statement is: lemma degree_add_less: "degree p < n \<Longrightarrow> degree q < n \<Longrightarrow> degree (p + q) < n" Informal statement is: If the degree of a polynomial $p$ is less than $n$, and the degree of a polynomial $q$ is less than $n$, then the degree of $p + q$ is less than $n$.
(* Title: HOL/Auth/n_germanSimp_lemma_inv__51_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSimp Protocol Case Study*} theory n_germanSimp_lemma_inv__51_on_rules imports n_germanSimp_lemma_on_inv__51 begin section{*All lemmas on causal relation between inv__51*} lemma lemma_inv__51_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__51 p__Inv3 p__Inv4)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_StoreVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqSVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqE__part__0Vsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqE__part__1Vsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvAckVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntSVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntEVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntSVsinv__51) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntEVsinv__51) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
double : Num x => x -> x double x = x + x twice : (a -> a) -> a -> a twice f x = f (f x) Shape : Type rotate : Shape -> Shape quadruple : Num a => a -> a quadruple = twice double turn_around : Shape -> Shape turn_around = twice rotate
import for_mathlib.endomorphisms.basic import for_mathlib.exact_functor universe v namespace category_theory namespace endomorphisms open homological_complex category_theory category_theory.limits category variables (𝓐 : Type*) [category.{v} 𝓐] @[simps] def tautological_nat_trans : (endomorphisms.forget 𝓐) ⟶ (endomorphisms.forget 𝓐) := { app := λ X, X.e, } variable {𝓐} variables [abelian 𝓐] [has_coproducts_of_shape (ulift.{v} ℕ) 𝓐] [has_products_of_shape (ulift.{v} ℕ) 𝓐] variables {M : Type*} {c : complex_shape M} (F : endomorphisms 𝓐 ⥤ homological_complex 𝓐 c) variables (Y : homological_complex (endomorphisms 𝓐) c) @[simps] def _root_.homological_complex.tautological_endomorphism : Y ⟶ Y := { f := λ i, ⟨(Y.X i).e, rfl⟩, } lemma homology_functor_obj_e (i : M) : ((homology_functor (endomorphisms 𝓐) c i).obj Y).e = ((homology_functor (endomorphisms 𝓐) c i).map Y.tautological_endomorphism).f := begin have h₁ := ((endomorphisms.forget 𝓐).homology_functor_iso c i).hom.naturality Y.tautological_endomorphism, rw [← cancel_mono (((endomorphisms.forget 𝓐).homology_functor_iso c i).inv.app Y), assoc] at h₁, conv_lhs at h₁ { congr, skip, rw [← nat_trans.comp_app, iso.hom_inv_id, nat_trans.id_app], }, rw comp_id at h₁, conv_lhs at h₁ { dsimp only [functor.comp, endomorphisms.forget], }, rw h₁, clear h₁, have h₂ := nat_trans.congr_app (functor.naturality_homology_functor_iso (tautological_nat_trans 𝓐) c i) Y, dsimp [nat_trans.hcomp] at h₂, rw [comp_id, id_comp, ← cancel_mono (((endomorphisms.forget 𝓐).homology_functor_iso c i).inv.app Y), assoc] at h₂, conv_lhs at h₂ { congr, skip, rw [← nat_trans.comp_app, iso.hom_inv_id, nat_trans.id_app], }, erw comp_id at h₂, exact h₂, end end endomorphisms end category_theory
Devin Garret Townsend ( born May 5 , 1972 ) is a Canadian musician , songwriter and record producer . He was the founder , songwriter , vocalist , and guitarist in extreme metal band Strapping Young Lad from 1994 to 2007 and has an extensive career as a solo artist .
"""SQLAlchemy sample data for unit tests. Notes ----- We use the psycopg2 ``copy_from`` rather than SQLAlchemy for fast insertion. """ import io from astropy.coordinates import SkyCoord, uniform_spherical_random_surface from astropy import units as u from mocpy import MOC import numpy as np import pytest from ...constants import HPX, LEVEL, PIXEL_AREA from ...types import Tile from .models import Galaxy, Field, FieldTile, Skymap, SkymapTile ( RANDOM_GALAXIES_SEED, RANDOM_FIELDS_SEED, RANDOM_SKY_MAP_SEED ) = np.random.SeedSequence(12345).spawn(3) def get_ztf_footprint_corners(): """Return the corner offsets of the ZTF footprint.""" x = 6.86 / 2 return [-x, +x, +x, -x] * u.deg, [-x, -x, +x, +x] * u.deg def get_footprints_grid(lon, lat, offsets): """Get a grid of footprints for an equatorial-mount telescope. Parameters ---------- lon : astropy.units.Quantity Longitudes of footprint vertices at the standard pointing. Should be an array of length N. lat : astropy.units.Quantity Latitudes of footprint vertices at the standard pointing. Should be an array of length N. offsets : astropy.coordinates.SkyCoord Pointings for the field grid. Should have length M. Returns ------- astropy.coordinates.SkyCoord Footprints with dimensions (M, N). """ lon = np.repeat(lon[np.newaxis, :], len(offsets), axis=0) lat = np.repeat(lat[np.newaxis, :], len(offsets), axis=0) result = SkyCoord(lon, lat, frame=offsets[:, np.newaxis].skyoffset_frame()) return result.icrs def get_random_points(n, seed): with pytest.MonkeyPatch.context() as monkeypatch: monkeypatch.setattr(np, 'random', np.random.default_rng(seed)) return uniform_spherical_random_surface(n) def get_random_galaxies(n, cursor): points = SkyCoord(get_random_points(n, RANDOM_GALAXIES_SEED)) hpx = HPX.skycoord_to_healpix(points) f = io.StringIO('\n'.join(f'{i}' for i in hpx)) cursor.copy_from(f, Galaxy.__tablename__, columns=('hpx',)) return points def get_random_fields(n, cursor): centers = SkyCoord(get_random_points(n, RANDOM_FIELDS_SEED)) footprints = get_footprints_grid(*get_ztf_footprint_corners(), centers) mocs = [MOC.from_polygon_skycoord(footprint) for footprint in footprints] f = io.StringIO('\n'.join(f'{i}' for i in range(len(mocs)))) cursor.copy_from(f, Field.__tablename__) f = io.StringIO( '\n'.join( f'{i}\t{hpx}' for i, moc in enumerate(mocs) for hpx in Tile.tiles_from(moc) ) ) cursor.copy_from(f, FieldTile.__tablename__) return mocs def get_random_sky_map(n, cursor): rng = np.random.default_rng(RANDOM_SKY_MAP_SEED) # Make a randomly subdivided sky map npix = HPX.npix tiles = np.arange(0, npix + 1, 4 ** LEVEL).tolist() while len(tiles) < n: i = rng.integers(len(tiles)) lo = 0 if i == 0 else tiles[i - 1] hi = tiles[i] diff = (hi - lo) // 4 if diff == 0: continue tiles.insert(i, hi - diff) tiles.insert(i, hi - 2 * diff) tiles.insert(i, hi - 3 * diff) probdensity = rng.uniform(0, 1, size=len(tiles) - 1) probdensity /= np.sum(np.diff(tiles) * probdensity) * PIXEL_AREA f = io.StringIO('1') cursor.copy_from(f, Skymap.__tablename__) f = io.StringIO( '\n'.join( f'1\t[{lo},{hi})\t{p}' for lo, hi, p in zip(tiles[:-1], tiles[1:], probdensity) ) ) cursor.copy_from(f, SkymapTile.__tablename__) return tiles, probdensity
module Idrlisp.Pattern import Idrlisp.Sexp %default total public export record Pattern where constructor MkPattern fixed : List String rest : Maybe String export Eq Pattern where (==) x y = fixed x == fixed y && rest x == rest y export Show Pattern where show x = let fixed = map Sym (fixed x) in let rest = map Sym (rest x) in assert_total $ show $ the (Sexp ()) $ foldr (::) (fromMaybe Nil rest) fixed export build : Sexp a -> Either (Sexp a) Pattern build (Sym x) = Right $ MkPattern [] (Just x) build Nil = Right $ MkPattern [] Nothing build (Sym x :: s) = record { fixed $= (x::) } <$> build s build (x :: s) = Left x build s = Left s export bind : Pattern -> List (Sexp a) -> Either String (List (String, Sexp a)) bind pat xs = go (fixed pat) (rest pat) xs where go : List String -> Maybe String -> List (Sexp a) -> Either String (List (String, Sexp a)) go (p :: ps) rest [] = Left "not enough arguments" go (p :: ps) rest (a :: args) = ((p, a) ::) <$> go ps rest args go [] Nothing [] = Right [] go [] Nothing (a :: args) = Left "too much arguments" go [] (Just rest) args = Right [(rest, foldr (::) Nil args)]
State Before: R : Type u_1 inst✝² : NonAssocRing R p : ℕ inst✝¹ : CharP R p inst✝ : Finite R ⊢ p ≠ 0 State After: R : Type u_1 inst✝² : NonAssocRing R inst✝¹ : Finite R inst✝ : CharP R 0 ⊢ False Tactic: rintro rfl State Before: R : Type u_1 inst✝² : NonAssocRing R inst✝¹ : Finite R inst✝ : CharP R 0 ⊢ False State After: R : Type u_1 inst✝² : NonAssocRing R inst✝¹ : Finite R inst✝ : CharP R 0 this : CharZero R ⊢ False Tactic: haveI : CharZero R := charP_to_charZero R State Before: R : Type u_1 inst✝² : NonAssocRing R inst✝¹ : Finite R inst✝ : CharP R 0 this : CharZero R ⊢ False State After: case intro R : Type u_1 inst✝² : NonAssocRing R inst✝¹ : Finite R inst✝ : CharP R 0 this : CharZero R val✝ : Fintype R ⊢ False Tactic: cases nonempty_fintype R State Before: case intro R : Type u_1 inst✝² : NonAssocRing R inst✝¹ : Finite R inst✝ : CharP R 0 this : CharZero R val✝ : Fintype R ⊢ False State After: no goals Tactic: exact absurd Nat.cast_injective (not_injective_infinite_finite ((↑) : ℕ → R))
Karl Kispert, principal of cyber and information security, has more than 28 years of experience in selling, managing and delivering information risk management, internal audit, regulatory and compliance programs, and information security and technology risk management. A former chief information security officer, Kispert has helped design and implement cybersecurity programs for many firms, according to the firm. “By adding this new service line, and bringing someone with Karl’s expertise to the firm, we can service yet another important aspect of our clients’ and prospects’ businesses, ensuring their continued success,” CEO Louis Grassi said in a written statement. Services will include full security programs, compliance, third party vendor risk assessment, threat management, and managed security services.
/- The formalism of forcing, following Justin Moore's notes -/ import order.bounded_lattice tactic order.complete_boolean_algebra set_theory.zfc .to_mathlib open lattice universe u @[class]def forcing_notion (α : Type u) : Type u := order_top α -- @[instance]def has_top_forcing_notion (α : Type u) [H : forcing_notion α] : has_top α := sorry instance partial_order_of_forcing_notion (α : Type u) [H : forcing_notion α] : partial_order α := { le := H.le, lt := H.lt, le_refl := H.le_refl, le_trans := H.le_trans, lt_iff_le_not_le := H.lt_iff_le_not_le, le_antisymm := H.le_antisymm } def order_top.mk {α : Type u} [H₁ : partial_order α] [H₂ : has_top α] (H : ∀ a : α, a ≤ ⊤) : order_top α := { top := ⊤, le := (≤), lt := (<), le_refl := H₁.le_refl, le_trans := H₁.le_trans, lt_iff_le_not_le := H₁.lt_iff_le_not_le, le_antisymm := H₁.le_antisymm, le_top := H } @[instance]example {α : Type u} : forcing_notion (set α) := order_top.mk (λ _, le_top) /- A pfilter is an order-theoretic filter on the partial order α -/ structure pfilter (α : Type u) [partial_order α] : Type u := (X : set α) (nonempty : X ≠ ∅) (upward_closed : ∀ (p q : α) (H_le : p ≤ q) (H_mem : p ∈ X), q ∈ X) (downward_directed : ∀ (p q ∈ X), ∃ r ∈ X, r ≤ p ∧ r ≤ q) inductive Name (P : Type u) [forcing_notion P] : Type (u+1) | mk (α : Type u) (A : α → Name) (B : α → P) : Name postfix `-name`:100 := Name instance : partial_order punit := { le := λ _ _, true, lt := λ _ _, false, le_refl := by simp, le_trans := by simp, lt_iff_le_not_le := by simp, le_antisymm := by finish } instance : has_top punit := ⟨punit.star⟩ instance : forcing_notion punit := order_top.mk (by finish) instance forcing_notion_complete_boolean_algebra {α : Type u} [complete_boolean_algebra α] : forcing_notion α := order_top.mk (by finish) --TODO(jesse) rewrite in terms of pSet.rec and Name.rec def pSet_equiv_trivial_name : pSet.{u} ≃ (punit-name : Type (u+1)) := { to_fun := λ u, begin induction u with α A ih, from ⟨α, ih, λ _, punit.star⟩ end, inv_fun := λ v, begin induction v with α A B ih, from ⟨α, ih⟩ end, left_inv := λ x, by induction x; finish, right_inv := λ y, by induction y; finish } -- def Pcheck {P} [forcing_notion P] : pSet.{u} → (P-name : Type (u+1)) -- | ⟨α, A⟩ := ⟨α, λ a, Pcheck (A a), λ _, ⊤⟩ namespace pfilter -- note: this will require a smallness argument, since we're going to be reconstructing a type in the ground model /- from Moore's "The method of forcing": If G is any filter and ẋ is any Q-name, define ẋ(G) recursively by ẋ(G) := { ẏ(G) : ∃p ∈ G (( ẏ, p) ∈ ẋ)} x ↦ ẋ is a map (Name α).{u} → Type u, parametrized by a pfilter (G : pfilter α) However, what does it mean for a filter in this case to be generic? -/ def eval {P : Type u} [forcing_notion P] (𝒢 : pfilter P) : P-name → Type u | ⟨α, A, B⟩ := Σ p : {a : α // B a ∈ 𝒢.X}, eval (A p.1) def eval_image {P : Type u} [forcing_notion P] (𝒢 : pfilter P): Type (u + 1) := {α // ∃ x, α = eval 𝒢 x} -- this should be our new model of set theory --TODO 6.8. 6.9, and 6.10 from Moore's notes -- def foo {P : Type u} [forcing_notion P] (𝒢 : pfilter P) : pSet.{u} → (eval_image.{u} 𝒢) := λ x, ⟨eval 𝒢 (Pcheck x), ⟨_, rfl⟩⟩ -- now foo is the canonical map from pSet to eval_image -- need to check that (foo x) is "equivalent" to x again in some way end pfilter
[STATEMENT] lemma chamber_morphism: "ChamberComplexMorphism X Y f" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ChamberComplexMorphism X Y f [PROOF STEP] ..
%% Grain Boundary Tutorial % A quick guide to grain boundary analysis % %% Grain boundaries generation % % To work with grain boundaries we need some ebsd data and have to detect % grains within the data set. % load some example data mtexdata twins % detect grains [grains,ebsd.grainId,ebsd.mis2mean] = calcGrains(ebsd('indexed')) % smooth them grains = grains.smooth % visualize the grains plot(grains,grains.meanOrientation) %% % Now we can extract from the grains its boundary and save it to a seperate % variable gB = grains.boundary %% % The output tells us that we have 3219 Magnesium to Magnesium boundary % segments and 606 boundary segements where the grains are cutted by the % scanning boundary. To restrict the grain boundaries to a specific phase % transistion you shall do gB_MgMg = gB('Magnesium','Magnesium') %% Properties of grain boundaries % % A variable of type grain boundary contains the following properties % % * misorientation % * direction % * segLength % % These can be used to colorize the grain boundaries. By the following % command we plot the grain boundaries colorized by the misorientation % angle plot(gB_MgMg,gB_MgMg.misorientation.angle./degree,'linewidth',2) mtexColorbar %% hold on plot(gB('notIndexed'),'lineColor','blue','linewith',5) hold off %% grains.innerBoundary
module ASN1 where open import ASN1.Untyped public open import ASN1.BER public open import ASN1.X509 public
/* multiset/gsl_multiset.h * based on combination/gsl_combination.h by Szymon Jaroszewicz * based on permutation/gsl_permutation.h by Brian Gough * * Copyright (C) 2009 Rhys Ulerich * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __GSL_MULTISET_H__ #define __GSL_MULTISET_H__ #include <stdlib.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_types.h> #include <gsl/gsl_inline.h> #include <gsl/gsl_check_range.h> #undef __BEGIN_DECLS #undef __END_DECLS #ifdef __cplusplus # define __BEGIN_DECLS extern "C" { # define __END_DECLS } #else # define __BEGIN_DECLS /* empty */ # define __END_DECLS /* empty */ #endif __BEGIN_DECLS struct gsl_multiset_struct { size_t n; size_t k; size_t *data; }; typedef struct gsl_multiset_struct gsl_multiset; gsl_multiset *gsl_multiset_alloc (const size_t n, const size_t k); gsl_multiset *gsl_multiset_calloc (const size_t n, const size_t k); void gsl_multiset_init_first (gsl_multiset * c); void gsl_multiset_init_last (gsl_multiset * c); void gsl_multiset_free (gsl_multiset * c); int gsl_multiset_memcpy (gsl_multiset * dest, const gsl_multiset * src); int gsl_multiset_fread (FILE * stream, gsl_multiset * c); int gsl_multiset_fwrite (FILE * stream, const gsl_multiset * c); int gsl_multiset_fscanf (FILE * stream, gsl_multiset * c); int gsl_multiset_fprintf (FILE * stream, const gsl_multiset * c, const char *format); size_t gsl_multiset_n (const gsl_multiset * c); size_t gsl_multiset_k (const gsl_multiset * c); size_t * gsl_multiset_data (const gsl_multiset * c); int gsl_multiset_valid (gsl_multiset * c); int gsl_multiset_next (gsl_multiset * c); int gsl_multiset_prev (gsl_multiset * c); INLINE_DECL size_t gsl_multiset_get (const gsl_multiset * c, const size_t i); #ifdef HAVE_INLINE INLINE_FUN size_t gsl_multiset_get (const gsl_multiset * c, const size_t i) { #if GSL_RANGE_CHECK if (GSL_RANGE_COND(i >= c->k)) /* size_t is unsigned, can't be negative */ { GSL_ERROR_VAL ("index out of range", GSL_EINVAL, 0); } #endif return c->data[i]; } #endif /* HAVE_INLINE */ __END_DECLS #endif /* __GSL_MULTISET_H__ */
State Before: α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s t : Multiset α x : α inst✝ : DecidablePred (r x) h : Rel r s t ⊢ countp (r x) s = countp (r x) t State After: case empty α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s t✝ : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s t✝ t : Multiset α h : Rel r 0 t ⊢ countp (r x) 0 = countp (r x) t case cons α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t✝ : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t✝ y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t t : Multiset α h : Rel r (y ::ₘ s) t ⊢ countp (r x) (y ::ₘ s) = countp (r x) t Tactic: induction' s using Multiset.induction_on with y s ih generalizing t State Before: case empty α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s t✝ : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s t✝ t : Multiset α h : Rel r 0 t ⊢ countp (r x) 0 = countp (r x) t State After: no goals Tactic: rw [rel_zero_left.mp h] State Before: case cons α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t✝ : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t✝ y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t t : Multiset α h : Rel r (y ::ₘ s) t ⊢ countp (r x) (y ::ₘ s) = countp (r x) t State After: case cons.intro.intro.intro.intro α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t b : α bs : Multiset α hb1 : r y b hb2 : Rel r s bs h : Rel r (y ::ₘ s) (b ::ₘ bs) ⊢ countp (r x) (y ::ₘ s) = countp (r x) (b ::ₘ bs) Tactic: obtain ⟨b, bs, hb1, hb2, rfl⟩ := rel_cons_left.mp h State Before: case cons.intro.intro.intro.intro α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t b : α bs : Multiset α hb1 : r y b hb2 : Rel r s bs h : Rel r (y ::ₘ s) (b ::ₘ bs) ⊢ countp (r x) (y ::ₘ s) = countp (r x) (b ::ₘ bs) State After: case cons.intro.intro.intro.intro α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t b : α bs : Multiset α hb1 : r y b hb2 : Rel r s bs h : Rel r (y ::ₘ s) (b ::ₘ bs) ⊢ (countp (r x) bs + if r x y then 1 else 0) = countp (r x) bs + if r x b then 1 else 0 Tactic: rw [countp_cons, countp_cons, ih hb2] State Before: case cons.intro.intro.intro.intro α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t b : α bs : Multiset α hb1 : r y b hb2 : Rel r s bs h : Rel r (y ::ₘ s) (b ::ₘ bs) ⊢ (countp (r x) bs + if r x y then 1 else 0) = countp (r x) bs + if r x b then 1 else 0 State After: case cons.intro.intro.intro.intro α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t b : α bs : Multiset α hb1 : r y b hb2 : Rel r s bs h : Rel r (y ::ₘ s) (b ::ₘ bs) ⊢ (if r x y then 1 else 0) = if r x b then 1 else 0 Tactic: simp only [decide_eq_true_eq, add_right_inj] State Before: case cons.intro.intro.intro.intro α : Type u_1 β : Type ?u.476873 γ : Type ?u.476876 δ : Type ?u.476879 r✝ : α → β → Prop p : γ → δ → Prop r : α → α → Prop inst✝² : IsTrans α r inst✝¹ : IsSymm α r s✝ t : Multiset α x : α inst✝ : DecidablePred (r x) h✝ : Rel r s✝ t y : α s : Multiset α ih : ∀ {t : Multiset α}, Rel r s t → countp (r x) s = countp (r x) t b : α bs : Multiset α hb1 : r y b hb2 : Rel r s bs h : Rel r (y ::ₘ s) (b ::ₘ bs) ⊢ (if r x y then 1 else 0) = if r x b then 1 else 0 State After: no goals Tactic: refine' (if_congr ⟨fun h => _root_.trans h hb1, fun h => _root_.trans h (symm hb1)⟩ rfl rfl)
State Before: α : Type u_1 β : Type ?u.342023 ι : Type ?u.342026 G : Type ?u.342029 M : Type u_2 N : Type ?u.342035 inst✝¹ : CommMonoid M inst✝ : CommMonoid N f✝ g : α → M a b : α s t : Set α f : α → M h : ¬a ∈ s hs : Set.Finite (s ∩ mulSupport f) ⊢ (∏ᶠ (i : α) (_ : i ∈ insert a s), f i) = f a * ∏ᶠ (i : α) (_ : i ∈ s), f i State After: α : Type u_1 β : Type ?u.342023 ι : Type ?u.342026 G : Type ?u.342029 M : Type u_2 N : Type ?u.342035 inst✝¹ : CommMonoid M inst✝ : CommMonoid N f✝ g : α → M a b : α s t : Set α f : α → M h : ¬a ∈ s hs : Set.Finite (s ∩ mulSupport f) ⊢ Disjoint {a} s α : Type u_1 β : Type ?u.342023 ι : Type ?u.342026 G : Type ?u.342029 M : Type u_2 N : Type ?u.342035 inst✝¹ : CommMonoid M inst✝ : CommMonoid N f✝ g : α → M a b : α s t : Set α f : α → M h : ¬a ∈ s hs : Set.Finite (s ∩ mulSupport f) ⊢ Set.Finite ({a} ∩ mulSupport f) Tactic: rw [insert_eq, finprod_mem_union' _ _ hs, finprod_mem_singleton] State Before: α : Type u_1 β : Type ?u.342023 ι : Type ?u.342026 G : Type ?u.342029 M : Type u_2 N : Type ?u.342035 inst✝¹ : CommMonoid M inst✝ : CommMonoid N f✝ g : α → M a b : α s t : Set α f : α → M h : ¬a ∈ s hs : Set.Finite (s ∩ mulSupport f) ⊢ Disjoint {a} s State After: no goals Tactic: rwa [disjoint_singleton_left] State Before: α : Type u_1 β : Type ?u.342023 ι : Type ?u.342026 G : Type ?u.342029 M : Type u_2 N : Type ?u.342035 inst✝¹ : CommMonoid M inst✝ : CommMonoid N f✝ g : α → M a b : α s t : Set α f : α → M h : ¬a ∈ s hs : Set.Finite (s ∩ mulSupport f) ⊢ Set.Finite ({a} ∩ mulSupport f) State After: no goals Tactic: exact (finite_singleton a).inter_of_left _
In August 1877 , Mathews was seconded from the Navy to Sultan Barghash of Zanzibar to form a European @-@ style army which could be used to enforce Zanzibar 's control over its mainland possessions . The army had traditionally been composed entirely of Arabs and Persians but Mathews opened up recruitment to the African majority on the island and had 300 recruits in training by the end of the year . In addition , Mathews employed some unorthodox recruitment methods such as purchasing slaves from their masters , using inmates from the prison and recruiting from Africans rescued from the slavers . In June 1877 , at the instigation of John Kirk , the explorer and friend of the Sultan , the British government sent a shipment of 500 modern rifles and ammunition as a gift with which to arm the troops . Mathews introduced a new uniform for the troops consisting of a red cap , short black jackets and white trousers for the enlisted ranks and dark blue frock coats and trousers with gold and silver lace for the Arab officers . The latter was possibly modelled on the Royal Navy officers uniform with which he was familiar . The army grew quickly ; by the 1880s Mathews would command 1 @,@ 300 men , his forces eventually numbering 1 @,@ 000 regulars and 5 @,@ 000 irregulars .
If $f$ is integrable on two paths $g_1$ and $g_2$, and $g_1$ and $g_2$ are valid paths, then the integral of $f$ on the path $g_1 + g_2$ is equal to the sum of the integrals of $f$ on $g_1$ and $g_2$.
library(reshape) library(wordcloud) library(RColorBrewer) amcat.lda.addMeta <- function(m, meta){ if('meta' %in% names(m)) {m$meta = meta[match(rownames(m$dtm), meta$id),] } else m = c(m, list(meta=meta[match(rownames(m$dtm), meta$id),])) m } ## PLOTTING amcat.plot.lda.alltopics <- function(m, time_var=m$meta$date, category_var=m$meta$medium, date_interval='day', path='/tmp/clouds/'){ for(topic_nr in 1:nrow(m$document_sums)){ print(paste('Plotting:',topic_nr)) fn = paste(path, topic_nr, ".png", sep="") if (!is.null(fn)) png(fn, width=1280,height=800) amcat.plot.lda.topic(m, topic_nr, time_var, category_var, date_interval) if (!is.null(fn)) dev.off() } par(mfrow=c(1,1)) } amcat.plot.lda.topic <- function(m, topic_nr, time_var=m$meta$date, category_var=m$meta$medium, date_interval='day', pct=F, value='total'){ par(mar=c(4.5,3,2,1), cex.axis=1.7) layout(matrix(c(1,1,2,3), 2, 2, byrow = TRUE), widths=c(2.5,1.5), heights=c(1,2)) amcat.plot.lda.time(m, topic_nr, time_var, date_interval, pct=pct, value=value) amcat.plot.lda.wordcloud(m, topic_nr) amcat.plot.lda.category(m, topic_nr, category_var, pct=pct, value=value) par(mfrow=c(1,1)) } amcat.prepare.time.var <- function(time_var, date_interval){ if(class(time_var) == 'Date'){ if(date_interval == 'day') time_var = as.Date(format(time_var, '%Y-%m-%d')) if(date_interval == 'month') time_var = as.Date(paste(format(time_var, '%Y-%m'),'-01',sep='')) if(date_interval == 'week') time_var = as.Date(paste(format(time_var, '%Y-%W'),1), '%Y-%W %u') if(date_interval == 'year') time_var = as.Date(paste(format(time_var, '%Y'),'-01-01',sep='')) } time_var } amcat.fill.time.gaps <- function(d, date_interval){ if(class(d$time) == 'numeric'){ for(t in min(d$time):max(d$time)) if(!t %in% d$time) d = rbind(d, data.frame(time=t, value=0)) } if(class(d$time) == 'Date'){ date_sequence = seq.Date(from=min(d$time), to=max(d$time), by=date_interval) for(i in 1:length(date_sequence)){ t = date_sequence[i] if(!t %in% d$time) d = rbind(d, data.frame(time=t, value=0)) } } d[order(d$time),] } amcat.prepare.plot.values <- function(m, break_var, topic_nr, pct=F, value='total'){ d = data.frame(value=m$document_sums[topic_nr,], break_var=break_var) if(value == 'relative') d$value= d$value / colSums(m$document_sums) if(pct == T) d$value = d$value / sum(d$value) d = aggregate(d[,c('value')], by=list(break_var=d$break_var), FUN='sum') d } amcat.plot.lda.time <- function(m, topic_nr, time_var=m$meta$date, date_interval='day', pct=F, value='total'){ par(mar=c(3,3,3,1)) time_var = amcat.prepare.time.var(time_var, date_interval) d = amcat.prepare.plot.values(m, break_var=time_var, topic_nr=topic_nr, pct=pct, value=value) colnames(d) = c('time','value') d = amcat.fill.time.gaps(d, date_interval) plot(d$time, d$value, type='l', xlab='', main='', ylab='', xlim=c(min(d$time), max(d$time)), ylim=c(0, max(d$value)), bty='L', lwd=5, col='darkgrey') d } amcat.plot.lda.category <- function(m, topic_nr, category_var=m$meta$medium, pct=F, value='total'){ par(mar=c(10,0,1,2)) d = amcat.prepare.plot.values(m, break_var=category_var, topic_nr=topic_nr, pct=pct, value=value) colnames(d) = c('category','value') barplot(as.matrix(t(d[,c('value')])), main='', beside=TRUE,horiz=FALSE, density=NA, col='darkgrey', xlab='', ylab="", axes=T, names.arg=d$category, cex.names=1.5, cex.axis=1.5, adj=1, las=2) d } amcat.plot.lda.wordcloud <- function(m, topic_nr){ x = m$topics[topic_nr,] x = sort(x[x>5], decreasing=T)[1:100] x = x[!is.na(x)] names = sub("/.*", "", names(x)) freqs = x**.5 pal <- brewer.pal(6,"YlGnBu") wordcloud(names, freqs, scale=c(6,.5), min.freq=1, max.words=Inf, random.order=FALSE, rot.per=.15, colors=pal) } ## GRAPHS amcat.ucmatrix.to.simmatrix<- function(ucmatrix, similarity.measure){ if(similarity.measure=='pearson_cor') sim.matrix = cor(t(ucmatrix)) if(similarity.measure=='spearman_cor') sim.matrix = cor(t(ucmatrix), method='spearman') if(similarity.measure=='hellinger_dist') { library(topicmodels) sim.matrix = distHellinger(as.matrix(ucmatrix)) } if(similarity.measure=='cosine') { library(lsa) sim.matrix = cosine(t(as.matrix(ucmatrix))) } sim.matrix } amcat.simmatrix.to.simgraph <- function(m){ m[lower.tri(m, diag=T)] = NA m.indices = which(!is.na(m),arr.ind=T) m.values = na.omit(as.vector(m)) data.frame(node.X=m.indices[,1], node.Y=m.indices[,2], similarity=m.values) } amcat.ucmatrix.to.simgraph <- function(ucmatrix, similarity.measure='pearson_cor'){ sim = amcat.ucmatrix.to.simmatrix(ucmatrix, similarity.measure) amcat.simmatrix.to.simgraph(sim) } amcat.unit.similarity.graph <- function(ucmatrix, unit.id.list, similarity.measure='pearson_cor'){ d = aggregate(ucmatrix, by=unit.id.list, FUN='sum') ucmatrix = d[,(length(unit.id.list)+1):ncol(d)] simgraph = amcat.ucmatrix.to.simgraph(ucmatrix, similarity.measure) meta = as.data.frame(d[,names(unit.id.list)], stringsAsFactors=F) colnames(meta) = names(unit.id.list) meta = cbind(id=1:nrow(meta), meta) meta$topic_totals = rowSums(ucmatrix) list(graph_df=simgraph, meta=meta) } amcat.lda.similarity.graph <- function(m, unit.id.list, vertex_label=names(unit.id.list)[1], similarity.measure='pearson_cor'){ library(igraph) doc_topic_matrix = t(m$document_sums) unit.similarity = amcat.unit.similarity.graph(doc_topic_matrix, unit.id.list, similarity.measure) g = graph.data.frame(unit.similarity$graph_df, directed=F, vertices=unit.similarity$meta) E(g)$weight = E(g)$similarity g }
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: lda_exc *) $include "vwn.mpl" f_vwn := (rs, z) -> + f_aux(A_rpa[1], b_rpa[1], c_rpa[1], x0_rpa[1], rs)*(1 - f_zeta(z)) + f_aux(A_rpa[2], b_rpa[2], c_rpa[2], x0_rpa[2], rs)*f_zeta(z): f := (rs, z) -> f_vwn(rs, z):
Fixpoint eqb (n m : nat) : bool := match n with | O => match m with | O => true | S m' => false end | S n' => match m with | O => false | S m' => eqb n' m' end end. Notation "x =? y" := (eqb x y) (at level 70) : nat_scope. Theorem zero_nbeq_plus_1 : forall n : nat, 0 =? (n + 1) = false. Proof. intros [| n']. - reflexivity. - reflexivity. Qed.
[STATEMENT] lemma Abs_rename_res': fixes x::"'a::fs" assumes a: "(p \<bullet> bs) \<sharp>* x" (*and b: "finite bs"*) shows "\<exists>q. [bs]res. x = [q \<bullet> bs]res. (q \<bullet> x) \<and> q \<bullet> bs = p \<bullet> bs" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>q. [bs]res. x = [q \<bullet> bs]res. q \<bullet> x \<and> q \<bullet> bs = p \<bullet> bs [PROOF STEP] using Abs_rename_res[OF a] [PROOF STATE] proof (prove) using this: \<exists>q. [bs]res. x = [p \<bullet> bs]res. q \<bullet> x \<and> q \<bullet> bs = p \<bullet> bs goal (1 subgoal): 1. \<exists>q. [bs]res. x = [q \<bullet> bs]res. q \<bullet> x \<and> q \<bullet> bs = p \<bullet> bs [PROOF STEP] by metis
#ifndef MAPCSS_STYLEEVALUATOR_HPP_INCLUDED #define MAPCSS_STYLEEVALUATOR_HPP_INCLUDED #include "entities/Element.hpp" #include "index/StringTable.hpp" #include "utils/CoreUtils.hpp" #include "utils/ElementUtils.hpp" #include <boost/variant/recursive_variant.hpp> #include <boost/variant/apply_visitor.hpp> #include <cstdint> #include <string> #include <list> #include <memory> #include <vector> namespace utymap { namespace mapcss { /// Represents style declaration which support evaluation. struct StyleEvaluator final { /// NOTE has to put these declarations here due to evaluate function implementation struct Nil {}; struct Signed; struct Tree; typedef boost::variant<Nil, double, std::string, boost::recursive_wrapper<Signed>, boost::recursive_wrapper<Tree>> Operand; struct Signed { char sign; Operand operand; }; struct Operation { char operator_; Operand operand; }; struct Tree { Operand first; std::list<Operation> rest; }; StyleEvaluator() = delete; /// Parses expression into AST. static std::unique_ptr<Tree> parse(const std::string& expression); /// Evaluates expression using tags. template <typename T> static T evaluate(const Tree& tree, const std::vector<utymap::entities::Tag>& tags, utymap::index::StringTable& stringTable) { typedef typename std::conditional<std::is_same<T, std::string>::value, StringEvaluator, DoubleEvaluator>::type EvaluatorType; return EvaluatorType(tags, stringTable)(tree); } private: /// Specifies default AST evaluator behaviour. template <typename T> struct Evaluator { typedef T result_type; Evaluator(const std::vector<utymap::entities::Tag>& tags, utymap::index::StringTable& stringTable) : tags_(tags), stringTable_(stringTable) { } protected: static std::string throwException() { throw std::domain_error("Evaluator: unsupported operation."); } const std::vector<utymap::entities::Tag>& tags_; utymap::index::StringTable& stringTable_; }; /// Evaluates double from AST. struct DoubleEvaluator : public Evaluator<double> { DoubleEvaluator(const std::vector<utymap::entities::Tag>& tags, utymap::index::StringTable& stringTable) : Evaluator(tags, stringTable) { } double operator()(Nil) const { return 0; } double operator()(double n) const { return n; } double operator()(const Operation& o, double lhs) const { double rhs = boost::apply_visitor(*this, o.operand); switch (o.operator_) { case '+': return lhs + rhs; case '-': return lhs - rhs; case '*': return lhs * rhs; case '/': return lhs / rhs; default: return 0; } } double operator()(const std::string& tagKey) const { auto keyId = stringTable_.getId(tagKey); return utymap::utils::parseDouble(utymap::utils::getTagValue(keyId, tags_, stringTable_)); } double operator()(const Signed& s) const { double rhs = boost::apply_visitor(*this, s.operand); switch (s.sign) { case '-': return -rhs; case '+': return +rhs; default: return 0; } } double operator()(const Tree& tree) const { double state = boost::apply_visitor(*this, tree.first); for (const Operation& oper : tree.rest) state = (*this)(oper, state); return state; } }; /// Evaluates string from AST. struct StringEvaluator : public Evaluator<std::string> { StringEvaluator(const std::vector<utymap::entities::Tag>& tags, utymap::index::StringTable& stringTable) : Evaluator(tags, stringTable) { } std::string operator()(Nil) const { return throwException(); } std::string operator()(double n) const { return throwException(); } std::string operator()(const Operation& o, double lhs) const { return throwException(); } std::string operator()(const std::string& tagKey) const { return utymap::utils::getTagValue(stringTable_.getId(tagKey), tags_, stringTable_); } std::string operator()(const Signed& s) const { return throwException(); } std::string operator()(const Tree& tree) const { return boost::apply_visitor(*this, tree.first); } }; std::string value_; std::unique_ptr<Tree> tree_; }; } } #endif // MAPCSS_STYLEEVALUATOR_HPP_INCLUDED
/* multiroots/fdjac.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <config.h> #include <gsl/gsl_multiroots.h> int gsl_multiroot_fdjacobian (gsl_multiroot_function * F, const gsl_vector * x, const gsl_vector * f, double epsrel, gsl_matrix * jacobian) { const size_t n = x->size; const size_t m = f->size; const size_t n1 = jacobian->size1; const size_t n2 = jacobian->size2; if (m != n1 || n != n2) { GSL_ERROR ("function and jacobian are not conformant", GSL_EBADLEN); } { size_t i,j; gsl_vector *x1, *f1; x1 = gsl_vector_alloc (n); if (x1 == 0) { GSL_ERROR ("failed to allocate space for x1 workspace", GSL_ENOMEM); } f1 = gsl_vector_alloc (m); if (f1 == 0) { gsl_vector_free (x1); GSL_ERROR ("failed to allocate space for f1 workspace", GSL_ENOMEM); } gsl_vector_memcpy (x1, x); /* copy x into x1 */ for (j = 0; j < n; j++) { double xj = gsl_vector_get (x, j); double dx = epsrel * fabs (xj); if (dx == 0) { dx = epsrel; } gsl_vector_set (x1, j, xj + dx); { int status = GSL_MULTIROOT_FN_EVAL (F, x1, f1); if (status != GSL_SUCCESS) { return GSL_EBADFUNC; } } gsl_vector_set (x1, j, xj); for (i = 0; i < m; i++) { double g1 = gsl_vector_get (f1, i); double g0 = gsl_vector_get (f, i); gsl_matrix_set (jacobian, i, j, (g1 - g0) / dx); } } gsl_vector_free (x1); gsl_vector_free (f1); } return GSL_SUCCESS; }
! { dg-do compile } ! { dg-options "-Wunused" } ! ! PR fortran/31461 ! ! Contributed by Vivek Rao. ! module util_mod integer :: i,j end module util_mod program main use util_mod, only: i,j ! { dg-warning "Unused module variable .i. which has been explicitly imported" } j = 1 print*,"j=",j end program main
||| A Javascript `Object` is primarily a mapping from string ||| to values, and we can use them exactly as such. However, ||| objects can also be used as an intermediary representation ||| for encoding data from and to JSON. ||| ||| This module therefore provides additional functionality: ||| ||| A linear `LinObject` data type for encoding nested datastructure ||| into an intermediary object representation, and an immutable ||| `IObject` data type, mainly for decoding data, but also as an efficient ||| means for having an immutable string to data mapping. module JS.Object import JS.Any import JS.Array import JS.Boolean import JS.Inheritance import JS.Marshall import JS.Nullable import JS.Util export data Object : Type where [external] export ToFFI Object Object where toFFI = id export FromFFI Object Object where fromFFI = Just public export JSType Object where parents = [] mixins = [] -------------------------------------------------------------------------------- -- Linear Objects -------------------------------------------------------------------------------- %foreign "javascript:lambda:() => {return {}}" prim__new : () -> Object %foreign "javascript:lambda:(o,str) => o[str]" prim__get : Object -> String -> AnyPtr %foreign "javascript:lambda:(u,o,str,v) => { o[str] = v; return o }" prim__set : forall a . Object -> String -> a -> Object %foreign "javascript:lambda:(u,o) => JSON.stringify(o)" prim__stringify : forall a . a -> String %foreign "javascript:lambda:s => JSON.parse(s)" prim__parse : String -> AnyPtr ||| Objects, mutable in a linear context. Useful for ||| efficient, non-monadic encoding of Idris2 values, ||| for instance to be used in an FFI call to an external ||| function, or when encoding Idris2 values to JSON through ||| the Javascript `JSON.stringify` function. export record LinObject where constructor MkLinObject obj : Object export newObj : (1 f : (1 _ : LinObject) -> a) -> a newObj f = f (MkLinObject (prim__new ())) export thaw : (1 _ : LinObject) -> IO Object thaw (MkLinObject obj) = pure obj export lset : (1 _ : LinObject) -> (fld : String) -> a -> LinObject lset (MkLinObject o) f a = MkLinObject $ prim__set o f a export lget : (1 _ : LinObject) -> (fld : String) -> Res AnyPtr (const $ LinObject) lget (MkLinObject obj) fld = prim__get obj fld # MkLinObject obj -------------------------------------------------------------------------------- -- Immutable Objects -------------------------------------------------------------------------------- export record IObject where constructor MkIObject obj : Object export freeze : (1 _ : LinObject) -> IObject freeze (MkLinObject obj) = MkIObject obj export get : SafeCast a => IObject -> String -> Maybe a get (MkIObject obj) str = safeCast $ prim__get obj str -------------------------------------------------------------------------------- -- JSON Values -------------------------------------------------------------------------------- public export data Value : Type where Arr : IArray Value -> Value Boolean : Bool -> Value Null : Value Num : Double -> Value Obj : IObject -> Value Str : String -> Value toAny : Value -> Any toAny (Obj x) = MkAny x toAny (Boolean x) = MkAny $ toFFI x toAny (Arr x) = MkAny $ map toAny x toAny (Str x) = MkAny x toAny (Num x) = MkAny x toAny Null = MkAny (null {a = ()}) -------------------------------------------------------------------------------- -- JSON Encoding -------------------------------------------------------------------------------- export stringify : Value -> String stringify = prim__stringify . toFFI . toAny export obj : (1 _ : LinObject) -> Value obj (MkLinObject o) = Obj $ MkIObject o export lsetVal : (1 _ : LinObject) -> (fld : String) -> Value -> LinObject lsetVal o f v = lset o f (toFFI $ toAny v) export withPairs : List (String,Value) -> ((1 _ : LinObject) -> a) -> a withPairs ps f = newObj (run ps) where run : List (String,Value) -> (1 _ : LinObject) -> a run [] o = f o run ((s,v) :: ps) o = run ps (lsetVal o s v) export pairs : List (String,Value) -> Value pairs ps = withPairs ps obj export vals : List Value -> Value vals = Arr . fromList -------------------------------------------------------------------------------- -- JSON decoding -------------------------------------------------------------------------------- toVal : Any -> Maybe Value toVal (MkAny ptr) = (Str <$> safeCast ptr) <|> (Boolean <$> (safeCast ptr >>= fromFFI)) <|> (if isArray ptr then array ptr else Nothing) <|> (if isNull ptr then Just Null else Nothing) <|> (Num <$> safeCast ptr) <|> (Obj . MkIObject <$> unsafeCastOnTypeof "object" ptr) where array : a -> Maybe Value array a = let arr = the (IArray Any) (believe_me a) in Arr <$> traverse toVal arr export parse : String -> Either JSErr Value parse s = do ptr <- try prim__parse s maybe (Left $ Caught #"Unable to decode JSON: \#{s}"#) Right (toVal (MkAny ptr)) export parseMaybe : String -> Maybe Value parseMaybe = either (const Nothing) Just . parse export getObject : Value -> Maybe IObject getObject (Obj x) = Just x getObject _ = Nothing export getBool : Value -> Maybe Bool getBool (Boolean x) = Just x getBool _ = Nothing export getStr : Value -> Maybe String getStr (Str x) = Just x getStr _ = Nothing export getNum : Value -> Maybe Double getNum (Num x) = Just x getNum _ = Nothing export getArray : Value -> Maybe (IArray Value) getArray (Arr x) = Just x getArray _ = Nothing export valueAt : IObject -> String -> Maybe Value valueAt (MkIObject obj) = toVal . MkAny . prim__get obj
[STATEMENT] lemma degree_less_sum' : "MPoly_Type.degree (p::real mpoly) var = n \<Longrightarrow> MPoly_Type.degree (q::real mpoly) var = m \<Longrightarrow> n < m \<Longrightarrow> MPoly_Type.degree (p + q) var = m" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>MPoly_Type.degree p var = n; MPoly_Type.degree q var = m; n < m\<rbrakk> \<Longrightarrow> MPoly_Type.degree (p + q) var = m [PROOF STEP] using degree_less_sum[of q var m p n] [PROOF STATE] proof (prove) using this: \<lbrakk>MPoly_Type.degree q var = m; MPoly_Type.degree p var = n; n < m\<rbrakk> \<Longrightarrow> MPoly_Type.degree (q + p) var = m goal (1 subgoal): 1. \<lbrakk>MPoly_Type.degree p var = n; MPoly_Type.degree q var = m; n < m\<rbrakk> \<Longrightarrow> MPoly_Type.degree (p + q) var = m [PROOF STEP] by (simp add: add.commute)
module Intro % access public export sm : List Nat -> Nat sm [] = 0 sm (x :: xs) = x + (sm xs) fct : Nat -> Nat fct Z = 1 fct (S k) = (S k) * (fct k) fbp : Nat -> (Nat, Nat) fbp Z = (1, 1) fbp (S k) = (snd (fbp k), fst (fbp k) + snd (fbp k)) fib : Nat -> Nat fib n = fst (fbp n) public export add : Nat -> Nat -> Nat add Z j = j add (S k) j = S (add k j) mul : Nat -> Nat -> Nat mul Z j = Z mul (S k) j = add j (mul k j) sub : (n: Nat) -> (m : Nat) -> (LTE m n) -> Nat sub n Z LTEZero = n sub (S right) (S left) (LTESucc x) = sub right left x oneLTEFour : LTE 1 4 oneLTEFour = LTESucc LTEZero fourMinusOne : Nat fourMinusOne = sub 4 1 oneLTEFour reflLTE : (n: Nat) -> LTE n n reflLTE Z = LTEZero reflLTE (S k) = LTESucc (reflLTE k) sillyZero: Nat -> Nat sillyZero n = sub n n (reflLTE n) idNat : Nat -> Nat idNat = \x => x loop: Nat -> Nat loop k = loop (S k)
[STATEMENT] lemma complex_vector_eq_affinity: "m \<noteq> 0 \<Longrightarrow> y = m *\<^sub>C x + c \<longleftrightarrow> inverse m *\<^sub>C y - (inverse m *\<^sub>C c) = x" for x :: "'a::complex_vector" [PROOF STATE] proof (prove) goal (1 subgoal): 1. m \<noteq> 0 \<Longrightarrow> (y = m *\<^sub>C x + c) = (y /\<^sub>C m - c /\<^sub>C m = x) [PROOF STEP] using complex_vector_affinity_eq[where m=m and x=x and y=y and c=c] [PROOF STATE] proof (prove) using this: m \<noteq> 0 \<Longrightarrow> (m *\<^sub>C x + c = y) = (x = y /\<^sub>C m - c /\<^sub>C m) goal (1 subgoal): 1. m \<noteq> 0 \<Longrightarrow> (y = m *\<^sub>C x + c) = (y /\<^sub>C m - c /\<^sub>C m = x) [PROOF STEP] by metis
Since 1988 , chess theorists have challenged previously well @-@ established views about White 's advantage . Grandmaster ( GM ) András Adorján wrote a series of books on the theme that " Black is OK ! " , arguing that the general perception that White has an advantage is founded more in psychology than reality . GM Mihai Suba and others contend that sometimes White 's initiative disappears for no apparent reason as a game progresses . The prevalent style of play for Black today is to seek dynamic , unbalanced positions with active counterplay , rather than merely trying to equalize .
using Base64 const PROGRESS_ID = "weave_progress" function run_doc( doc::WeaveDoc; doctype::Union{Nothing,AbstractString} = nothing, out_path::Union{Symbol,AbstractString} = :doc, args::Any = Dict(), mod::Union{Module,Nothing} = nothing, fig_path::Union{Nothing,AbstractString} = nothing, fig_ext::Union{Nothing,AbstractString} = nothing, cache_path::AbstractString = "cache", cache::Symbol = :off, ) # cache :all, :user, :off, :refresh doc.doctype = isnothing(doctype) ? (doctype = detect_doctype(doc.source)) : doctype doc.format = deepcopy(get_format(doctype)) cwd = doc.cwd = get_cwd(doc, out_path) mkpath(cwd) # TODO: provide a way not to create `fig_path` ? if isnothing(fig_path) fig_path = if (endswith(doctype, "2pdf") && cache === :off) || endswith(doctype, "2html") basename(mktempdir(abspath(cwd))) else DEFAULT_FIG_PATH end end mkpath(normpath(cwd, fig_path)) # This is needed for latex and should work on all output formats @static Sys.iswindows() && (fig_path = replace(fig_path, "\\" => "/")) set_rc_params(doc, fig_path, fig_ext) cache === :off || @eval import Serialization # XXX: evaluate in a more sensible module # New sandbox for each document with args exposed isnothing(mod) && (mod = sandbox = Core.eval(Main, :(module $(gensym(:WeaveSandBox)) end))::Module) Core.eval(mod, :(WEAVE_ARGS = $(args))) mimetypes = doc.format.mimetypes report = Report(cwd, doc.basename, doc.format, mimetypes) cd_back = let d = pwd(); () -> cd(d); end cd(cwd) pushdisplay(report) try if cache !== :off && cache !== :refresh cached = read_cache(doc, cache_path) isnothing(cached) && @info "No cached results found, running code" else cached = nothing end executed = [] n = length(filter(chunk->isa(chunk,CodeChunk), doc.chunks)) i = 0 for chunk in doc.chunks if chunk isa CodeChunk options = merge(doc.chunk_defaults, chunk.options) merge!(chunk.options, options) @info "Weaving chunk $(chunk.number) from line $(chunk.start_line)" progress=(i)/n _id=PROGRESS_ID i+=1 end restore = (cache === :user && chunk isa CodeChunk && chunk.options[:cache]) result_chunks = if cached ≠ nothing && (cache === :all || restore) restore_chunk(chunk, cached) else run_chunk(chunk, doc, report, mod) end executed = [executed; result_chunks] end replace_header_inline!(doc, report, mod) # evaluate and replace inline code in header doc.header_script = report.header_script doc.chunks = executed cache !== :off && write_cache(doc, cache_path) @isdefined(sandbox) && clear_module!(sandbox) catch err rethrow(err) finally @info "Weaved all chunks" progress=1 _id=PROGRESS_ID cd_back() popdisplay(report) # ensure display pops out even if internal error occurs end return doc end run_doc(doc::WeaveDoc, doctype::Union{Nothing,AbstractString}; kwargs...) = run_doc(doc; doctype = doctype, kwargs...) """ detect_doctype(path) Detect the output format based on file extension. """ function detect_doctype(path) _, ext = lowercase.(splitext(path)) match(r"^\.(jl|.?md|ipynb)", ext) !== nothing && return "md2html" ext == ".rst" && return "rst" ext == ".tex" && return "texminted" ext == ".txt" && return "asciidoc" return "pandoc" end function get_cwd(doc, out_path) return if out_path === :doc dirname(doc.path) elseif out_path === :pwd pwd() else path, ext = splitext(out_path) if isempty(ext) # directory given path else # file given dirname(path) end end |> abspath end function run_chunk(chunk::CodeChunk, doc, report, mod) result = eval_chunk(doc, chunk, report, mod) occursin("2html", doc.doctype) && (embed_figures!(result, report.cwd)) return result end function embed_figures!(chunk::CodeChunk, cwd) for (i, fig) in enumerate(chunk.figures) chunk.figures[i] = img2base64(fig, cwd) end end embed_figures!(chunks, cwd) = embed_figures!.(chunks, Ref(cwd)) function img2base64(fig, cwd) ext = splitext(fig)[2] f = open(joinpath(cwd, fig), "r") raw = read(f) close(f) return ext == ".png" ? "data:image/png;base64," * stringmime(MIME("image/png"), raw) : ext == ".svg" ? "data:image/svg+xml;base64," * stringmime(MIME("image/svg"), raw) : ext == ".gif" ? "data:image/gif;base64," * stringmime(MIME("image/gif"), raw) : fig end function run_chunk(chunk::DocChunk, doc, report, mod) chunk.content = [run_inline(c, doc, report, mod) for c in chunk.content] return chunk end run_inline(inline::InlineText, ::WeaveDoc, ::Report, ::Module) = inline const INLINE_OPTIONS = Dict( :term => false, :hold => true, :wrap => false ) function run_inline(inline::InlineCode, doc::WeaveDoc, report::Report, mod::Module) # Make a temporary CodeChunk for running code. Collect results and don't wrap chunk = CodeChunk(inline.content, 0, 0, "", INLINE_OPTIONS) options = merge(doc.chunk_defaults, chunk.options) merge!(chunk.options, options) chunks = eval_chunk(doc, chunk, report, mod) occursin("2html", doc.doctype) && (embed_figures!(chunks, report.cwd)) output = chunks[1].output endswith(output, "\n") && (output = output[1:end-1]) inline.output = output inline.rich_output = chunks[1].rich_output inline.figures = chunks[1].figures return inline end function run_code(doc::WeaveDoc, chunk::CodeChunk, report::Report, mod::Module) code = chunk.content path = doc.path error = chunk.options[:error] codes = chunk.options[:term] ? split_code(code) : [code] capture = code -> capture_output(code, mod, path, error, report) return capture.(codes) end function split_code(code) res = String[] e = 1 ex = :init while true s = e ex, e = Meta.parse(code, s) isnothing(ex) && break push!(res, strip(code[s:e-1])) end return res end function capture_output(code, mod, path, error, report) reset_report!(report) old = stdout rw, wr = redirect_stdout() reader = @async read(rw, String) local out = nothing task_local_storage(:SOURCE_PATH, path) do try obj = include_string(mod, code, path) # TODO: fix line number !isnothing(obj) && !REPL.ends_with_semicolon(code) && display(obj) catch _err err = unwrap_load_err(_err) error || throw(err) display(err) @warn "ERROR: $(typeof(err)) occurred, including output in Weaved document" finally redirect_stdout(old) close(wr) out = fetch(reader) close(rw) end end return ChunkOutput(code, remove_ansi_control_chars(out), report.rich_output, report.figures) end function reset_report!(report) report.rich_output = "" report.figures = String[] end unwrap_load_err(err) = return err unwrap_load_err(err::LoadError) = return err.error # https://stackoverflow.com/a/33925425/12113178 remove_ansi_control_chars(s) = replace(s, r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]" => "") function eval_chunk(doc::WeaveDoc, chunk::CodeChunk, report::Report, mod::Module) if !chunk.options[:eval] chunk.output = "" chunk.options[:fig] = false return chunk end execute_prehooks!(chunk) report.fignum = 1 report.cur_chunk = chunk if hasproperty(report.format, :out_width) && isnothing(chunk.options[:out_width]) chunk.options[:out_width] = report.format.out_width end chunk.result = run_code(doc, chunk, report, mod) execute_posthooks!(chunk) return chunk.options[:term] ? collect_term_results(chunk) : chunk.options[:hold] ? collect_hold_results(chunk) : collect_results(chunk) end # Hooks to run before and after chunks, this is form IJulia, const preexecution_hooks = Function[] push_preexecution_hook!(f::Function) = push!(preexecution_hooks, f) function pop_preexecution_hook!(f::Function) i = findfirst(x -> x == f, preexecution_hooks) isnothing(i) && error("this function has not been registered in the pre-execution hook yet") return splice!(preexecution_hooks, i) end function execute_prehooks!(chunk::CodeChunk) for prehook in preexecution_hooks Base.invokelatest(prehook, chunk) end end const postexecution_hooks = Function[] push_postexecution_hook!(f::Function) = push!(postexecution_hooks, f) function pop_postexecution_hook!(f::Function) i = findfirst(x -> x == f, postexecution_hooks) isnothing(i) && error("this function has not been registered in the post-execution hook yet") return splice!(postexecution_hooks, i) end function execute_posthooks!(chunk::CodeChunk) for posthook in postexecution_hooks Base.invokelatest(posthook, chunk) end end """ clear_module!(mod::Module) Recursively sets variables in `mod` to `nothing` so that they're GCed. !!! warning `const` variables can't be reassigned, as such they can't be cleared. """ function clear_module!(mod::Module) for name in names(mod; all = true) name === :eval && continue try v = getfield(mod, name) if v isa Module && v != mod clear_module!(v) continue end isconst(mod, name) && continue # can't clear constant Core.eval(mod, :($name = nothing)) catch err @debug err end end end function get_figname(report::Report, chunk; fignum = nothing, ext = nothing) isnothing(ext) && (ext = chunk.options[:fig_ext]) isnothing(fignum) && (fignum = report.fignum) chunkid = isnothing(chunk.options[:label]) ? chunk.number : chunk.options[:label] basename = string(report.basename, '_', chunkid, '_', fignum, ext) full_name = normpath(report.cwd, chunk.options[:fig_path], basename) rel_name = string(chunk.options[:fig_path], '/', basename) # Relative path is used in output return full_name, rel_name end function set_rc_params(doc::WeaveDoc, fig_path, fig_ext) doc.chunk_defaults[:fig_ext] = isnothing(fig_ext) ? doc.format.fig_ext : fig_ext doc.chunk_defaults[:fig_path] = fig_path end function collect_results(chunk::CodeChunk) content = "" result_chunks = CodeChunk[] for r in chunk.result content *= r.code # Check if there is any output from chunk if any(!isempty ∘ strip, (r.stdout, r.rich_output)) || !isempty(r.figures) rchunk = CodeChunk( content, chunk.number, chunk.start_line, chunk.optionstring, copy(chunk.options), ) rchunk.output = r.stdout rchunk.rich_output = r.rich_output rchunk.figures = r.figures push!(result_chunks, rchunk) content = "" end end if !isempty(content) rchunk = CodeChunk( content, chunk.number, chunk.start_line, chunk.optionstring, copy(chunk.options), ) push!(result_chunks, rchunk) end return result_chunks end function collect_term_results(chunk::CodeChunk) output = "" prompt = chunk.options[:prompt] result_chunks = CodeChunk[] for r in chunk.result output *= string('\n', indent_term_code(prompt, r.code), '\n', r.stdout) if !isempty(r.figures) rchunk = CodeChunk( "", chunk.number, chunk.start_line, chunk.optionstring, copy(chunk.options), ) rchunk.output = output output = "" rchunk.figures = r.figures push!(result_chunks, rchunk) end end if !isempty(output) rchunk = CodeChunk( "", chunk.number, chunk.start_line, chunk.optionstring, copy(chunk.options), ) rchunk.output = output push!(result_chunks, rchunk) end return result_chunks end function indent_term_code(prompt, code) prompt_with_space = string(prompt, ' ') n = length(prompt_with_space) pads = ' ' ^ n return map(enumerate(split(code, '\n'))) do (i,line) isone(i) ? string(prompt_with_space, line) : string(pads, line) end |> joinlines end function collect_hold_results(chunk::CodeChunk) for r in chunk.result chunk.output *= r.stdout chunk.rich_output *= r.rich_output chunk.figures = [chunk.figures; r.figures] end return [chunk] end const HEADER_INLINE = Regex("$(HEADER_INLINE_START)(?<code>.+)$(HEADER_INLINE_END)") replace_header_inline!(doc, report, mod) = _replace_header_inline!(doc, doc.header, report, mod) function _replace_header_inline!(doc, header, report, mod) replace!(header) do (k,v) return k => v isa Dict ? _replace_header_inline!(doc, v, report, mod) : !isa(v, AbstractString) ? v : replace(v, HEADER_INLINE => s -> begin code = replace(s, HEADER_INLINE => s"\g<code>") run_inline_code(code, doc, report, mod) end) end return header end function run_inline_code(code, doc, report, mod) inline = InlineCode(code, 1, :inline) inline = run_inline(inline, doc, report, mod) return strip(inline.output, '"') end
theory Lexord_List imports Main begin typedef 'a lexlist = "{xs::'a list. True}" morphisms unlex Lex by auto definition "lexlist \<equiv> Lex" lemma lexlist_ext: "Lex xs = Lex ys \<Longrightarrow> xs = ys" by (auto simp: Lex_inject) lemma Lex_unlex [simp, code abstype]: "Lex (unlex lxs) = lxs" by (metis unlex_inverse) lemma unlex_lexlist [simp, code abstract]: "unlex (lexlist xs) = xs" by (metis lexlist_ext unlex_inverse lexlist_def) definition list_less :: "'a :: ord list \<Rightarrow> 'a list \<Rightarrow> bool" where "list_less xs ys \<longleftrightarrow> (xs, ys) \<in> lexord {(u, v). u < v}" definition list_le where "list_le xs ys \<longleftrightarrow> list_less xs ys \<or> xs = ys" lemma not_less_Nil [simp]: "\<not> list_less x []" by (simp add: list_less_def) lemma Nil_less_Cons [simp]: "list_less [] (a # x)" by (simp add: list_less_def) lemma Cons_less_Cons [simp]: "list_less (a # x) (b # y) \<longleftrightarrow> a < b \<or> a = b \<and> list_less x y" by (simp add: list_less_def) lemma le_Nil [simp]: "list_le x [] \<longleftrightarrow> x = []" unfolding list_le_def by (cases x) auto lemma Nil_le_Cons [simp]: "list_le [] x" unfolding list_le_def by (cases x) auto lemma Cons_le_Cons [simp]: "list_le (a # x) (b # y) \<longleftrightarrow> a < b \<or> a = b \<and> list_le x y" unfolding list_le_def by auto lemma less_list_code [code]: "list_less xs [] \<longleftrightarrow> False" "list_less [] (x # xs) \<longleftrightarrow> True" "list_less (x # xs) (y # ys) \<longleftrightarrow> x < y \<or> x = y \<and> list_less xs ys" by simp_all instantiation lexlist :: (ord) ord begin definition lexlist_less_def: "xs < ys \<longleftrightarrow> list_less (unlex xs) (unlex ys)" definition lexlist_le_def: "(xs :: _ lexlist) \<le> ys \<longleftrightarrow> list_le (unlex xs) (unlex ys)" instance .. lemmas lexlist_ord_defs = lexlist_le_def lexlist_less_def list_le_def list_less_def end instance lexlist :: (order) order proof fix xs :: "'a lexlist" show "xs \<le> xs" by (simp add: lexlist_le_def list_le_def) next fix xs ys zs :: "'a lexlist" assume "xs \<le> ys" and "ys \<le> zs" then show "xs \<le> zs" apply (auto simp add: lexlist_ord_defs) apply (rule lexord_trans) apply (auto intro: transI) done next fix xs ys :: "'a lexlist" assume "xs \<le> ys" and "ys \<le> xs" then show "xs = ys" apply (auto simp add: lexlist_ord_defs) apply (rule lexord_irreflexive [THEN notE]) defer apply (rule lexord_trans) apply (auto intro: transI simp: unlex_inject) done next fix xs ys :: "'a lexlist" show "xs < ys \<longleftrightarrow> xs \<le> ys \<and> \<not> ys \<le> xs" apply (auto simp add: lexlist_ord_defs) defer apply (rule lexord_irreflexive [THEN notE]) apply auto apply (rule lexord_irreflexive [THEN notE]) defer apply (rule lexord_trans) apply (auto intro: transI) done qed instance lexlist :: (linorder) linorder proof fix xs ys :: "'a lexlist" have "(unlex xs, unlex ys) \<in> lexord {(u, v). u < v} \<or> unlex xs = unlex ys \<or> (unlex ys, unlex xs) \<in> lexord {(u, v). u < v}" by (rule lexord_linear) auto then show "xs \<le> ys \<or> ys \<le> xs" by (auto simp add: lexlist_ord_defs unlex_inject) qed end
Formal statement is: proposition homotopic_paths_reparametrize: assumes "path p" and pips: "path_image p \<subseteq> s" and contf: "continuous_on {0..1} f" and f01:"f ` {0..1} \<subseteq> {0..1}" and [simp]: "f(0) = 0" "f(1) = 1" and q: "\<And>t. t \<in> {0..1} \<Longrightarrow> q(t) = p(f t)" shows "homotopic_paths s p q" Informal statement is: If $p$ is a path in $s$ and $f$ is a continuous function from $[0,1]$ to $[0,1]$ such that $f(0) = 0$ and $f(1) = 1$, then the path $q$ defined by $q(t) = p(f(t))$ is homotopic to $p$.
//////////////////////////////////////////////////////////////////////////////// /// Copyright of this program is the property of AMADEUS, without /// whose written permission reproduction in whole or in part is prohibited. //////////////////////////////////////////////////////////////////////////////// #pragma once #include <boost/lexical_cast.hpp> #include <string> #include <mdw/UnknownException.hpp> namespace mdw { template<typename Target, typename Source> Target lexical_cast(const Source& iArg) { try{ Target aTarget = boost::lexical_cast<Target>(iArg); return aTarget; } catch (boost::bad_lexical_cast& aException){ mdw::UnknownException aEx(boost::lexical_cast<std::string>(iArg) + " cannot be cast (" + aException.what() + ")"); throw aEx; } } /* template<typename Source> std::string lexical_cast(const Source& iArg) { return boost::lexical_cast<std::string>(iArg); }*/ }
(* Title: HOL/Auth/n_mutualExSimp_lemma_inv__5_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_mutualExSimp Protocol Case Study*} theory n_mutualExSimp_lemma_inv__5_on_rules imports n_mutualExSimp_lemma_on_inv__5 begin section{*All lemmas on causal relation between inv__5*} lemma lemma_inv__5_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__5 p__Inv3 p__Inv4)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)\<or> (\<exists> i. i\<le>N\<and>r=n_Exit i)\<or> (\<exists> i. i\<le>N\<and>r=n_Idle i)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_CritVsinv__5) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Exit i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_ExitVsinv__5) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_Idle i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_IdleVsinv__5) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
{- This second-order signature was created from the following second-order syntax description: syntax CommRing | CR type * : 0-ary term zero : * | 𝟘 add : * * -> * | _⊕_ l20 one : * | 𝟙 mult : * * -> * | _⊗_ l30 neg : * -> * | ⊖_ r50 theory (𝟘U⊕ᴸ) a |> add (zero, a) = a (𝟘U⊕ᴿ) a |> add (a, zero) = a (⊕A) a b c |> add (add(a, b), c) = add (a, add(b, c)) (⊕C) a b |> add(a, b) = add(b, a) (𝟙U⊗ᴸ) a |> mult (one, a) = a (𝟙U⊗ᴿ) a |> mult (a, one) = a (⊗A) a b c |> mult (mult(a, b), c) = mult (a, mult(b, c)) (⊗D⊕ᴸ) a b c |> mult (a, add (b, c)) = add (mult(a, b), mult(a, c)) (⊗D⊕ᴿ) a b c |> mult (add (a, b), c) = add (mult(a, c), mult(b, c)) (𝟘X⊗ᴸ) a |> mult (zero, a) = zero (𝟘X⊗ᴿ) a |> mult (a, zero) = zero (⊖N⊕ᴸ) a |> add (neg (a), a) = zero (⊖N⊕ᴿ) a |> add (a, neg (a)) = zero (⊗C) a b |> mult(a, b) = mult(b, a) -} module CommRing.Signature where open import SOAS.Context open import SOAS.Common open import SOAS.Syntax.Signature *T public open import SOAS.Syntax.Build *T public -- Operator symbols data CRₒ : Set where zeroₒ addₒ oneₒ multₒ negₒ : CRₒ -- Term signature CR:Sig : Signature CRₒ CR:Sig = sig λ { zeroₒ → ⟼₀ * ; addₒ → (⊢₀ *) , (⊢₀ *) ⟼₂ * ; oneₒ → ⟼₀ * ; multₒ → (⊢₀ *) , (⊢₀ *) ⟼₂ * ; negₒ → (⊢₀ *) ⟼₁ * } open Signature CR:Sig public
using Test using FluidTensors using LinearAlgebra @testset "Vector Operations" begin a = Vec(1.,2.,3.) b = Vec(4.,5.,6.) @test a == [1.,2.,3.] @test a+b === Vec(5.,7.,9.) @test b-a === Vec(3.,3.,3.) @test a⋅b ≈ 32. @test norm(a) ≈ 3.7416573867739413 @test norm(im*a) ≈ 3.7416573867739413 @test 2*a === Vec(2.,4.,6.) @test a/2 === Vec(0.5,1.,1.5) @test cross(a,b) === Vec(-3.,6.,-3.) @test cross(b,a) === Vec(3.,-6.,3.) end @testset "Tensor Operations" begin A = SymTen(1.,2.,3.,4.,5.,6.) @test A == [1. 2. 3.; 2. 4. 5.; 3. 5. 6.] B = SymTen(2.,3.,4.,5.,6.,7.) @test A+B === SymTen(3.,5.,7.,9.,11.,13.) @test B-A === SymTen(1.,1.,1.,1.,1.,1.) @test 2*A === SymTen(2.,4.,6.,8.,10.,12.) @test A/2 === SymTen(0.5,1.,1.5,2.,2.5,3.) @test tr(A) == 11. @test A:B == tr(A*B) @test norm(A) == sqrt(2*tr(A*A)) @test traceless(SymTen(1.,2.,3.,1.,5.,1.)) == [0. 2. 3.; 2. 0. 5.; 3. 5. 0.] end @testset "Tensor x Vector operation" begin a = Vec(1.,2.,3.) b = Vec(4.,5.,6.) A = SymTen(1.,2.,3.,4.,5.,6.) @test A⋅a === Vec(14.,25.,31.) @test symouter(a,b) === SymTen(4.,6.5,9.,10.,13.5,18.) @test antisymouter(a,b) === AntiSymTen(-1.5,-3.0,-1.5) @test outer(a,b) === Ten(4.,8.,12.,5.,10.,15.,6.,12.,18.) end @testset "UniformScaling" begin A = SymTen(1,1,1,1,1,1) @test A - I === SymTen(0,1,1,0,1,0) @test A + I === SymTen(2,1,1,2,1,2) @test I - A === SymTen(0,-1,-1,0,-1,0) end @testset "VecArrays" begin v = VecArray([1. 2.;3. 4.], [5. 6.; 7. 8.], [9. 10.;-1. -2.]) @test eltype(v) === Vec{Float64} @test size(v) == (2,2) @test v[2] === Vec{Float64}(3.,7.,-1.) @test v[1,2] === Vec{Float64}(2., 6., 10.) @test v[2,2] === Vec{Float64}(4., 8., -2.) v[1] = Vec(-1.,-2.,-3.) @test v.x[1] == -1 && v.y[1] == -2 && v.z[1] == -3. end @testset "Ten Mixed type operations" begin v = Vec(rand(3)...) for a in (SymTen(rand(6)...), AntiSymTen(rand(3)...), Ten(rand(9)...)) for b in (SymTen(rand(6)...), AntiSymTen(rand(3)...), Ten(rand(9)...)) for op in (+,-) @test op(a,b) ≈ op(Matrix(a),Matrix(b)) end @test a⋅b ≈ a*b @test a:b ≈ tr(Matrix(a)'*Matrix(b)) atol=2e-15 @test Lie(a,b) ≈ (Matrix(a)*Matrix(b) - Matrix(b)*Matrix(a)) atol=2e-15 end @test a⋅v ≈ Matrix(a)*Vector(v) @test v⋅a ≈ Matrix(a)'*Vector(v) @test -a ≈ -Matrix(a) @test a + I ≈ Matrix(a) + I @test a - I ≈ Matrix(a) - I @test I + a ≈ I + Matrix(a) @test I - a ≈ I - Matrix(a) for op in (+,-,tr,det) @test op(a) ≈ op(Matrix(a)) atol=2e-15 end @test square(a) ≈ Matrix(a)^2 @test symmetric(a) ≈ (Matrix(a) + Matrix(a)')/2 atol=2e-15 @test antisymmetric(a) ≈ (Matrix(a) - Matrix(a)')/2 atol=2e-15 end end @testset "Eigenvalues and vectors of SymTen" begin for a in (SymTen(rand(6)...), SymTen(rand(6)...), SymTen(rand(6)...)) l,e = eigvec(a) @test all(isapprox.(l, eig(a))) || all(isapprox.((l[3],l[2],l[1]), eig(a))) b = eigen(Matrix(a)) lv = b.values ev = b.vectors @test all(isapprox.(l, lv)) @test (e[1] ≈ ev[:,1] || -e[1] ≈ ev[:,1]) @test (e[2] ≈ ev[:,2] || -e[2] ≈ ev[:,2]) @test (e[3] ≈ ev[:,3] || -e[3] ≈ ev[:,3]) @test cross(e[1],e[2]) ≈ e[3] end end
/- Copyright (c) 2022 Eric Wieser. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Eric Wieser -/ import algebra.star.basic import algebra.ring.prod import algebra.module.prod /-! # `star` on product types > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. We put a `has_star` structure on product types that operates elementwise. -/ universes u v w variables {R : Type u} {S : Type v} namespace prod instance [has_star R] [has_star S] : has_star (R × S) := { star := λ x, (star x.1, star x.2) } @[simp] lemma fst_star [has_star R] [has_star S] (x : R × S) : (star x).1 = star x.1 := rfl @[simp] lemma star_def [has_star R] [has_star S] (x : R × S) : star x = (star x.1, star x.2) := rfl instance [has_star R] [has_star S] [has_trivial_star R] [has_trivial_star S] : has_trivial_star (R × S) := { star_trivial := λ _, prod.ext (star_trivial _) (star_trivial _) } instance [has_involutive_star R] [has_involutive_star S] : has_involutive_star (R × S) := { star_involutive := λ _, prod.ext (star_star _) (star_star _) } instance [semigroup R] [semigroup S] [star_semigroup R] [star_semigroup S] : star_semigroup (R × S) := { star_mul := λ _ _, prod.ext (star_mul _ _) (star_mul _ _) } instance [add_monoid R] [add_monoid S] [star_add_monoid R] [star_add_monoid S] : star_add_monoid (R × S) := { star_add := λ _ _, prod.ext (star_add _ _) (star_add _ _) } instance [non_unital_semiring R] [non_unital_semiring S] [star_ring R] [star_ring S] : star_ring (R × S) := { ..prod.star_add_monoid, ..(prod.star_semigroup : star_semigroup (R × S)) } instance {α : Type w} [has_smul α R] [has_smul α S] [has_star α] [has_star R] [has_star S] [star_module α R] [star_module α S] : star_module α (R × S) := { star_smul := λ r x, prod.ext (star_smul _ _) (star_smul _ _) } end prod @[simp] lemma units.embed_product_star [monoid R] [star_semigroup R] (u : Rˣ) : units.embed_product R (star u) = star (units.embed_product R u) := rfl
(* Title: HOL/Auth/n_germanSymIndex_lemma_inv__34_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSymIndex Protocol Case Study*} theory n_germanSymIndex_lemma_inv__34_on_rules imports n_germanSymIndex_lemma_on_inv__34 begin section{*All lemmas on causal relation between inv__34*} lemma lemma_inv__34_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__34 p__Inv2)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_StoreVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqSVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqSVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqEVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvAckVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntSVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntEVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntSVsinv__34) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntEVsinv__34) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal4. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Derive Show for lst. Derive Arbitrary for lst. Instance Dec_Eq_lst : Dec_Eq lst. Proof. dec_eq. Qed. Lemma conj5synthconj4 : forall (lv0 : lst), (@eq natural (len lv0) (len (append Nil lv0))). Admitted. QuickChick conj5synthconj4.
subsection \<open>Timing\<close> theory Primitives_IMP_Minus_Time imports Primitives_IMP_Minus "Poly_Reductions_Lib.Landau_Auxiliaries" "Poly_Reductions_Lib.Polynomial_Growth_Functions" "HOL-Library.Rewrite" begin text \<open>In this file we show that the runtime of the programs in Primitives_IMP_Minus is polynomial in the input size\<close> subsubsection \<open>Multiplication\<close> (* Accumulator in one step, not sure if necessary *) lemma mul_imp_time_acc': "mul_imp_time t s = mul_imp_time 0 s + t" by (induction t) (use mul_imp_time_acc in auto) (* Experiment, this should stop endless unfolding problems *) corollary mul_imp_time_acc'': "NO_MATCH 0 t \<Longrightarrow> mul_imp_time t s = mul_imp_time 0 s + t" using mul_imp_time_acc'. (* Experiments with simp rules, delete: *) lemma no_match_simp: "NO_MATCH p v \<Longrightarrow> P \<Longrightarrow> P" by simp lemmas mul_imp_time_simp = no_match_simp[OF _ mul_imp_time.simps, where p="mul_imp_time t (mul_state_upd s)" for s t] (* Proving was getting ugly, so drop the accumulator *) fun mul_imp_time' :: "nat \<Rightarrow> nat" where "mul_imp_time' 0 = 2" | "mul_imp_time' n = mul_imp_time' (n div 2) + 10" (* Equivalence of simpler version *) lemma mul_imp_time_mul_imp_time': "mul_imp_time t s = mul_imp_time' (mul_b s) + t" proof (induction "mul_b s" arbitrary: s t rule: mul_imp_time'.induct) case 1 then show ?case by (subst mul_imp_time.simps) auto next case 2 show ?case by (subst mul_imp_time.simps) (auto simp add: 2(1) 2(2)[symmetric] mul_state_upd_def mul_imp_time_acc) qed (* Extract non recursive version *) lemma mul_imp_time'_non_rec: "mul_imp_time' b = (if b = 0 then 0 else 10 * (1 + Discrete.log b)) + 2" proof (induction b rule: mul_imp_time'.induct) case 1 then show ?case by simp next case (2 b) then show ?case proof(cases b) case 0 then show ?thesis using 2 by auto next case (Suc n) then show ?thesis using 2 by (auto simp add: log_rec) qed qed (* Move back to mul_imp_time *) lemma mul_imp_time_non_rec: "mul_imp_time t s = (if mul_b s = 0 then 0 else 10 * (1 + Discrete.log (mul_b s))) + 2 + t" proof- have "mul_imp_time t s = mul_imp_time' (mul_b s) + t" by (simp add: mul_imp_time_mul_imp_time') also have "\<dots> = (if (mul_b s) = 0 then 0 else 10 * (1 + Discrete.log (mul_b s))) + 2 + t" by (simp add: mul_imp_time'_non_rec) finally show ?thesis by simp qed (* Hide details maybe?*) lemma mul_imp_time_non_rec_bound: "mul_imp_time t s \<le> 10 * Discrete.log (mul_b s) + 12 + t" using mul_imp_time_non_rec by auto subsubsection \<open>Squaring\<close> definition square_imp_time' :: "nat \<Rightarrow> nat" where "square_imp_time' a = 8 + mul_imp_time' a" lemma square_imp_time_square_imp_time': "square_imp_time t s = square_imp_time' (square_x s) + t" by (subst square_imp_time.simps) (simp add: square_imp_time'_def mul_imp_time_mul_imp_time') lemma square_imp_time'_non_rec: "square_imp_time' a = (if a = 0 then 0 else 10 * (1 + Discrete.log a)) + 10" by (simp del: mul_imp_time'.simps add: mul_imp_time'_non_rec square_imp_time'_def) lemma square_imp_time'_non_rec_bound: "square_imp_time' a \<le> 20 + 10 * (Discrete.log a)" proof- have "22 + 10 * Discrete.log (Suc a) \<le> 22 + 10 * Suc (Discrete.log a)" using dlog_Suc_bound by (meson add_left_mono mult_le_mono2) thus ?thesis by (subst square_imp_time'_non_rec) simp qed (* This allows bounding *) lemma mono_mul_imp_time'_pre: "m \<le> n \<Longrightarrow> mul_imp_time' m \<le> mul_imp_time' n" by (auto simp add: Discrete.log_le_iff mul_imp_time'_non_rec) corollary mono_mul_imp_time': "mono mul_imp_time'" using mono_mul_imp_time'_pre .. lemma mono_square_imp_time'_pre: "m \<le> n \<Longrightarrow> square_imp_time' m \<le> square_imp_time' n" by (auto simp add: mono_mul_imp_time'_pre square_imp_time'_def) corollary mono_square_imp_time': "mono square_imp_time'" using mono_square_imp_time'_pre .. subsubsection \<open>Triangle\<close> (* Probably useless accumulator laws *) lemma triangle_imp_time_acc': "triangle_imp_time t s = triangle_imp_time 0 s + t" by (induction t) (use triangle_imp_time_acc in auto) (* This should prevent endless unfolding *) lemma triangle_imp_time_acc'': "NO_MATCH 0 t \<Longrightarrow> triangle_imp_time t s = triangle_imp_time 0 s + t" using triangle_imp_time_acc' . definition triangle_imp_time' :: "nat \<Rightarrow> nat" where "triangle_imp_time' a = 10 + mul_imp_time' (a+1)" lemma triangle_imp_time_triangle_imp_time': "triangle_imp_time t s = triangle_imp_time' (triangle_a s) + t" by (subst triangle_imp_time.simps) (simp add: triangle_imp_time'_def mul_imp_time_mul_imp_time') (* Problem: Suc a in argument *) lemma triangle_imp_time'_non_rec: "triangle_imp_time' a = 22 + 10 * Discrete.log (Suc a)" by (simp del: mul_imp_time'.simps add: mul_imp_time'_non_rec triangle_imp_time'_def) lemma triangle_imp_time'_non_rec_bound: "triangle_imp_time' a \<le> 32 + 10 * (Discrete.log a)" proof- have "22 + 10 * Discrete.log (Suc a) \<le> 22 + 10 * Suc (Discrete.log a)" using dlog_Suc_bound by (meson add_left_mono mult_le_mono2) thus ?thesis by (subst triangle_imp_time'_non_rec) simp qed subsubsection \<open>Encoding pairs\<close> definition prod_encode_imp_time' :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "prod_encode_imp_time' a b = 8 + triangle_imp_time' (a+b)" lemma prod_encode_imp_time_prod_encode_imp_time': "prod_encode_imp_time t s = prod_encode_imp_time' (prod_encode_a s) (prod_encode_b s) + t" by (subst prod_encode_imp_time.simps) (simp add: prod_encode_imp_time'_def triangle_imp_time_triangle_imp_time') lemma dlog_add_bound': "a+b > 0 \<Longrightarrow> Discrete.log (a+b) \<le> Discrete.log a + Discrete.log b + 1" apply (induction "a+b" arbitrary: a b rule: log_induct) apply auto using log_Suc_zero apply auto[1] by (metis Discrete.log_le_iff add_Suc_right add_Suc_shift add_cancel_right_left add_le_mono1 log_twice mult_2 nat_add_left_cancel_le nat_le_linear trans_le_add1 trans_le_add2) lemma dlog_add_bound: "Discrete.log (a+b) \<le> Suc (Discrete.log a + Discrete.log b)" by (metis Suc_eq_plus1 dlog_add_bound' le_SucI le_add2 not_gr0 trans_less_add2) (* The question is whether I bound this in each step or at the end *) lemma prod_encode_imp_time'_non_rec: "prod_encode_imp_time' a b = 30 + 10 * Discrete.log (Suc (a + b))" by (auto simp add: prod_encode_imp_time'_def triangle_imp_time'_non_rec) (* Bound every step, should scale better in the long run *) lemma prod_encode_imp_time'_non_rec_bound: "prod_encode_imp_time' a b \<le> 50 + 10 * Discrete.log a + 10 * Discrete.log b" proof- have "prod_encode_imp_time' a b = 8 + triangle_imp_time' (a+b)" unfolding prod_encode_imp_time'_def .. also have "\<dots> \<le> 40 + 10 * (Discrete.log (a+b))" using triangle_imp_time'_non_rec_bound by simp also have "\<dots> \<le> 40 + 10 * Suc (Discrete.log a + Discrete.log b)" using dlog_add_bound by (meson add_left_mono mult_le_mono2) also have "\<dots> = 50 + 10 * Discrete.log a + 10 * Discrete.log b" by simp finally show ?thesis . qed subsubsection \<open>Square root\<close> function dsqrt'_imp_time' :: "nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat" where "dsqrt'_imp_time' y L R = (if Suc L < R then let M = (L+R) div 2 in 23 + square_imp_time' M + (if M*M \<le> y then dsqrt'_imp_time' y M R else dsqrt'_imp_time' y L M) else 7)" by auto termination by (relation "Wellfounded.measure (\<lambda>(y,L,R). R - L)") auto declare dsqrt'_imp_time'.simps[simp del] lemma dsqrt'_imp_time_dsqrt'_imp_time': "dsqrt'_imp_time t s = dsqrt'_imp_time' (dsqrt'_state_y s) (dsqrt'_state_L s) (dsqrt'_state_R s) + t" proof (induction "dsqrt'_state_y s" "dsqrt'_state_L s" "dsqrt'_state_R s" arbitrary: s t rule: dsqrt'_imp_time'.induct) case 1 then show ?case apply (subst dsqrt'_imp_time.simps) apply (subst dsqrt'_imp_time'.simps) by (auto simp add: "1" dsqrt'_imp_state_upd_def dsqrt'_imp_time_acc square_imp_time_square_imp_time' power2_eq_square square_imp_correct Let_def) qed lemma middle_alternative: "Suc L < R \<Longrightarrow> (L + R) div 2 = (R - L) div 2 + L" by (induction L arbitrary: R) auto lemma "1 + (L::real) < R \<Longrightarrow> R - ((R - L) div 2 + L) = (R - L) div 2" by (smt (z3) field_sum_of_halves) (* Basically: Add a ceiling function. *) lemma distance: "Suc L < R \<Longrightarrow> R - ((R - L) div 2 + L) = (R - L) div 2 + (if even (L+R) then 0 else 1)" apply (induction L arbitrary: R) apply (auto simp add: algebra_simps)[] using left_add_twice apply presburger apply auto apply (smt (verit, best) Suc_diff_1 Suc_less_eq add.right_neutral diff_diff_left even_Suc odd_pos plus_1_eq_Suc) apply (smt (verit, ccfv_SIG) Nat.add_0_right Suc_lessE diff_Suc_Suc even_Suc) apply (smt (verit, del_insts) One_nat_def Suc_diff_Suc Suc_lessD add.commute add_Suc_right even_diff_nat odd_Suc_div_two odd_add odd_one plus_1_eq_Suc) by (smt (verit, best) One_nat_def Suc_eq_plus1 Suc_less_eq diff_diff_left even_Suc odd_Suc_minus_one plus_1_eq_Suc) (* Quick'n'dirty version of the textbook proof, first for powers of two then general *) lemma dsqrt'_imp_time'_log_bound_2pow: "L < R \<Longrightarrow> \<exists>k. R-L = 2^k \<Longrightarrow> dsqrt'_imp_time' y L R \<le> (23 + square_imp_time' R) * (Discrete.log (R - L)) + 7" proof(induction y L R rule: dsqrt'_imp_time'.induct) case (1 y L R) then show ?case proof(cases "Suc L < R") case rec: True from rec have "(R - L) > 1" by simp hence size: "(R - L) div 2 > 0" by simp from "1.prems"(2) \<open>1 < R - L\<close> have "even (L+R)" by (metis One_nat_def add.commute even_diff_nat even_numeral even_power less_nat_zero_code nat_diff_split nat_neq_iff nat_power_eq_Suc_0_iff) hence "even (R - L)" by simp with \<open>even (L + R)\<close> "1.prems"(2) have pow: "\<exists>k . R - (L + R) div 2 = 2^k" by (smt (verit, del_insts) Suc_leI cancel_comm_monoid_add_class.diff_cancel distance even_power le_add_diff_inverse local.rec log_exp log_exp2_le middle_alternative power_Suc0_right power_diff_power_eq size zero_neq_numeral) from \<open>even (L + R)\<close> distance rec have d: "R - (L + R) div 2 = (R - L) div 2" using middle_alternative by simp from \<open>even (L + R)\<close> distance rec have d': "(L + R) div 2 - L = (R - L) div 2" using middle_alternative by simp have mid_bound: "(L + R) div 2 < R" by (metis d size zero_less_diff) show ?thesis proof(cases "((L + R) div 2)^2 \<le> y") case True hence s: "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y ((L + R) div 2) R)" using mid_bound mono_square_imp_time'_pre by (subst dsqrt'_imp_time'.simps) (simp add: rec power2_eq_square Let_def) have I: "(dsqrt'_imp_time' y ((L + R) div 2) R) \<le> (23 + square_imp_time' R) * Discrete.log (R - ((L + R) div 2)) + 7" apply (subst "1.IH"(1)) using "1.prems" rec True pow by (auto simp add: power2_eq_square) have "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y ((L + R) div 2) R)" using s . also have "\<dots> \<le> (23 + square_imp_time' R) * Suc (Discrete.log (R - ((L + R) div 2))) + 7" using I by auto finally show ?thesis apply (subst log_rec) using div_greater_zero_iff size apply blast by (simp add: d) next case False hence s: "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y L ((L + R) div 2))" using mid_bound mono_square_imp_time'_pre by (subst dsqrt'_imp_time'.simps) (simp add: rec power2_eq_square Let_def) have I: "(dsqrt'_imp_time' y L ((L + R) div 2)) \<le> (23 + square_imp_time' ((L + R) div 2)) * Discrete.log (((L + R) div 2) - L) + 7" apply (subst "1.IH"(2)) using "1.prems" rec False pow by (auto simp add: power2_eq_square d d') hence I': "(dsqrt'_imp_time' y L ((L + R) div 2)) \<le> (23 + square_imp_time' R) * Discrete.log (((L + R) div 2) - L) + 7" using mid_bound mono_square_imp_time'_pre by (smt (verit, best) add.left_commute add_le_mono less_or_eq_imp_le mult_le_mono1 nat_add_left_cancel_le) have "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y L ((L + R) div 2))" using s . also have "\<dots> \<le> (23 + square_imp_time' R) * Suc (Discrete.log (((L + R) div 2)- L)) + 7" using I' by auto finally show ?thesis apply (subst log_rec) using div_greater_zero_iff size apply blast by (simp add: d d') qed next case False then show ?thesis using "1.prems" False by (metis add_leD2 dsqrt'_imp_time'.simps le_refl) qed qed (* The extra size from uneven splits is only dangerous near powers of two*) lemma log_changes_2pow: "(\<And>k .Suc n \<noteq> 2^k) \<Longrightarrow> Discrete.log (Suc n) = Discrete.log n" by (metis Suc_lessD bot_nat_0.not_eq_extremum le_SucE log_Suc_zero log_eqI log_exp2_gt log_exp2_le log_zero zero_less_Suc) (* The bigger half is a power of two, so problem should not cascade *) lemma "k>1 \<Longrightarrow> Suc n = 2^k \<Longrightarrow> n = 2^(k-1) + 2^(k-1) - 1" apply (induction n) apply auto using log_Suc_zero apply auto[1] by (metis One_nat_def diff_add_inverse le_add_diff_inverse mult_2 nat_less_le plus_1_eq_Suc power_Suc) lemma log_split_near_2pow: "Suc n = 2^k \<Longrightarrow> Discrete.log (Suc (2 * n)) = (Discrete.log (Suc n))" by (simp add: log_eqI) lemma dsqrt'_imp_time'_log: "L < R \<Longrightarrow> dsqrt'_imp_time' y L R \<le> (23 + square_imp_time' R) * (Suc (Discrete.log (R - L))) + 7" proof(induction "R-L" arbitrary: y L R rule: less_induct) case less then show ?case proof(cases "Suc L < R") case rec: True have mid_bound: "(L + R) div 2 < R" using local.rec by linarith then show ?thesis proof(cases "((L + R) div 2)^2 \<le> y") case True hence s: "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y ((L + R) div 2) R)" using mid_bound mono_square_imp_time'_pre by (subst dsqrt'_imp_time'.simps) (simp add: rec power2_eq_square Let_def) have I: "dsqrt'_imp_time' y ((L + R) div 2) R \<le> (23 + square_imp_time' R) * Suc (Discrete.log (R - ((L + R) div 2))) + 7" apply (rule less) using local.rec by linarith+ consider (even_split) "(R - ((L + R) div 2)) = (R - L) div 2" | (odd_split) "(R - ((L + R) div 2)) = Suc ((R - L) div 2)" by linarith then show ?thesis proof cases (* No problem here *) case even_split from rec have "Discrete.log (R - L) > 0" by (subst log_rec) simp_all with even_split show ?thesis using I s by auto next case odd_split (* For an odd split we need an interval of length \<ge>3 *) with rec have odd: "odd (R - L)" using distance middle_alternative by fastforce with rec have size: "R - L > 2" by (auto simp add: less_diff_conv intro!: Suc_lessI) have recomb: "R - L = Suc (2 * ((R - L) div 2))" by (metis Suc_eq_plus1 odd odd_two_times_div_two_succ) consider (power) k where "Suc ((R - L) div 2) = 2^k" | (unproblematic) "\<And>k. Suc ((R - L) div 2) \<noteq> 2^k" by blast then show ?thesis proof cases case power have "Discrete.log (Suc ((R - L) div 2)) = k" by (simp add: power) hence question: "Discrete.log (R - L) = k" apply (subst recomb) apply (subst log_split_near_2pow) using power apply assumption by simp have "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y ((L + R) div 2) R)" using s . also have "\<dots> \<le> 23 + square_imp_time' R + ((23 + square_imp_time' R) * Suc (Discrete.log (R - ((L + R) div 2))) + 7)" using I by auto also have "\<dots> \<le> (23 + square_imp_time' R) * Suc (Suc (Discrete.log (R - ((L + R) div 2)))) + 7" by simp have "23 + square_imp_time' R + (dsqrt'_imp_time' y ((L + R) div 2) R) \<le> (23 + square_imp_time' R) * Suc (Discrete.log (R - L)) + 7" by (simp add: question) (metis \<open>Discrete.log (Suc ((R - L) div 2)) = k\<close> add.commute dsqrt'_imp_time'_log_bound_2pow mid_bound odd_split power) (* Basically works because we have an exact version for the 2^k case *) show ?thesis using \<open>23 + square_imp_time' R + dsqrt'_imp_time' y ((L + R) div 2) R \<le> (23 + square_imp_time' R) * Suc (Discrete.log (R - L)) + 7\<close> order.trans s by blast next case unproblematic hence "Suc ((R - L) div 2) \<noteq> 2^k" for k by (simp add: odd_split) hence irrel: "Discrete.log (Suc ((R - L) div 2)) = Discrete.log ((R - L) div 2)" by (rule log_changes_2pow) show ?thesis using I odd_split size s irrel by (subst log_rec) auto qed qed next case False hence s: "dsqrt'_imp_time' y L R \<le> 23 + square_imp_time' R + (dsqrt'_imp_time' y L ((L + R) div 2))" using mid_bound mono_square_imp_time'_pre by (subst dsqrt'_imp_time'.simps) (simp add: rec power2_eq_square Let_def) have I: "(dsqrt'_imp_time' y L ((L + R) div 2)) \<le> (23 + square_imp_time' R) * Suc (Discrete.log (((L + R) div 2) - L)) + 7" by (smt (verit, best) False One_nat_def diff_add_inverse2 diff_is_0_eq div_2_gt_zero div_greater_zero_iff div_less dsqrt'_imp_time'.elims le_diff_conv le_trans less.hyps linorder_not_le local.rec mid_bound middle_alternative mult.commute mult_le_mono2 plus_1_eq_Suc power2_eq_square s) have split: "(((L + R) div 2) - L) = (R - L) div 2" by (simp add: local.rec middle_alternative) from rec have "Discrete.log (R - L) > 0" by (subst log_rec) simp_all with split show ?thesis using I s by auto qed next case False with less.prems have "Suc L = R" by simp then show ?thesis apply (subst dsqrt'_imp_time'.simps) using False by auto qed qed lemma dsqrt'_imp_time'_non_rec_bound: assumes "L < R" shows "dsqrt'_imp_time' y L R \<le> 60 + (63 * Discrete.log R + 10 * (Discrete.log R)^2)" proof- have "dsqrt'_imp_time' y L R \<le> (23 + 20 + 10 * Suc (Discrete.log R)) * Suc (Discrete.log (R - L)) + 7" apply (rule le_trans[OF dsqrt'_imp_time'_log[OF assms, of y]]) apply (rule add_le_mono1) apply (rule mult_le_mono1) using square_imp_time'_non_rec_bound[of R] by simp also have "\<dots> = (43 + 10 * Suc (Discrete.log R)) * Suc (Discrete.log (R - L)) + 7" by simp also have "\<dots> \<le> (43 + 10 * Suc (Discrete.log R)) * Suc (Discrete.log R) + 7" by (simp add: Discrete.log_le_iff) finally show ?thesis by (auto simp add: power2_eq_square algebra_simps) qed lemma "dsqrt'_state_L s < dsqrt'_state_R s \<Longrightarrow> dsqrt'_imp_time t s \<le> t + 60 + (63 * Discrete.log (dsqrt'_state_R s) + 10 * (Discrete.log (dsqrt'_state_R s))^2)" by (simp add: dsqrt'_imp_time'_non_rec_bound dsqrt'_imp_time_dsqrt'_imp_time') fun dsqrt_imp_time' :: "nat \<Rightarrow> nat" where "dsqrt_imp_time' y = 8 + dsqrt'_imp_time' y 0 (Suc y)" lemma dsqrt_imp_time_dsqrt_imp_time': "dsqrt_imp_time t s = t + dsqrt_imp_time' (dsqrt_state_y s)" by (subst dsqrt_imp_time.simps) (auto simp add: dsqrt_imp_time_acc_2 dsqrt'_imp_time_dsqrt'_imp_time') lemma dsqrt_imp_time'_non_rec_bound: "dsqrt_imp_time' y \<le> 141 + (83 * Discrete.log y + 10 * (Discrete.log y)\<^sup>2)" proof- have "dsqrt_imp_time' y \<le> 68 + (63 * Discrete.log (Suc y) + 10 * (Discrete.log (Suc y))^2)" by (auto simp add: dsqrt'_imp_time'_non_rec_bound) also have "\<dots> \<le> 131 + (63 * Discrete.log y + 10 * (Discrete.log (Suc y))^2)" by simp (metis dlog_Suc_bound le_refl mult_Suc_right mult_le_mono) also have "\<dots> \<le> 131 + (63 * Discrete.log y + 10 * (Suc (Discrete.log y))^2)" by (simp add: dlog_Suc_bound) also have "\<dots> = 131 + (63 * Discrete.log y + 10 * (1 + 2 * Discrete.log y + (Discrete.log y)^2))" by (smt (verit, del_insts) Suc_eq_plus1_left add.assoc add.commute mult.assoc mult_1 power2_sum power_one) also have "\<dots> \<le> 141 + (83 * Discrete.log y + 10 * (Discrete.log y)^2)" by simp finally show ?thesis . qed text \<open>Triangular root\<close> fun tsqrt_imp_time' :: "nat \<Rightarrow> nat" where "tsqrt_imp_time' y = 16 + mul_imp_time' 8 + dsqrt_imp_time' (8*y+1)" lemma tsqrt_imp_time_tsqrt_imp_time': "tsqrt_imp_time t s = t + tsqrt_imp_time' (tsqrt_state_y s)" by (subst tsqrt_imp_time.simps) (auto simp add: tsqrt_imp_time_acc_2 dsqrt_imp_time_dsqrt_imp_time' mul_imp_time_mul_imp_time' mul_imp_correct Let_def mult.commute) lemma tsqrt_imp_time'_non_rec_bound: "tsqrt_imp_time' y \<le> 691 + (163 * Discrete.log y + 10 * (Discrete.log y)\<^sup>2)" proof- have "tsqrt_imp_time' y = 58 + dsqrt_imp_time' (8*y+1)" by code_simp also have "\<dots> \<le> 58 + (141 + (83 * Discrete.log (8*y+1) + 10 * (Discrete.log (8*y+1))\<^sup>2))" using dsqrt_imp_time'_non_rec_bound by auto also have "\<dots> \<le> 282 + (83 * Discrete.log (8*y) + 10 * (Discrete.log (8*y+1))\<^sup>2)" by simp (metis dlog_Suc_bound le_refl mult_Suc_right mult_le_mono) also have "\<dots> \<le> 282 + (83 * (3 + Discrete.log y) + 10 * (Discrete.log (8*y+1))\<^sup>2)" proof- (* \<le> because of y=0 case*) have s: "8*y = (2*(2*(2*y)))" by simp have "Discrete.log (8*y) = 3 + Discrete.log y" if "y>0" apply (subst s) using that apply (subst log_twice, simp)+ by presburger thus ?thesis by force qed also have "\<dots> = 531 + (83 * Discrete.log y + 10 * (Discrete.log (8*y+1))\<^sup>2)" by simp also have "\<dots> \<le> 531 + (83 * Discrete.log y + 10 * (1 + Discrete.log (8*y))\<^sup>2)" by (simp add: dlog_Suc_bound) also have "\<dots> \<le> 531 + (83 * Discrete.log y + 10 * (4 + Discrete.log y)\<^sup>2)" proof- have s: "8*y = (2*(2*(2*y)))" by simp have "Discrete.log (8*y) = 3 + Discrete.log y" if "y>0" apply (subst s) using that apply (subst log_twice, simp)+ by presburger thus ?thesis by force qed also have "\<dots> = 531 + (83 * Discrete.log y + 10 * (Discrete.log y)^2 + 80 * Discrete.log y + 160)" by (auto simp add: power2_eq_square algebra_simps) also have "\<dots> = 691 + (163 * Discrete.log y + 10 * (Discrete.log y)^2)" by simp finally show ?thesis . qed fun fst'_imp_time' :: "nat \<Rightarrow> nat" where "fst'_imp_time' p = 10 + tsqrt_imp_time' p + triangle_imp_time' (tsqrt p)" lemma fst'_imp_time_fst'_imp_time': "fst'_imp_time t s = t + fst'_imp_time' (fst'_state_p s)" by (subst fst'_imp_time.simps) (auto simp add: fst'_imp_time_acc_2 tsqrt_imp_time_tsqrt_imp_time' triangle_imp_time_triangle_imp_time' tsqrt_imp_correct Let_def mult.commute) (* MOVE *) lemma tsqrt_le: "tsqrt p \<le> p" using triangle_nat_le_imp_le triangle_tsqrt_le by blast lemma fst'_imp_time'_non_rec_bound: "fst'_imp_time' p \<le> 733 + 173 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2" proof- have "fst'_imp_time' p = 10 + tsqrt_imp_time' p + triangle_imp_time' (tsqrt p)" by simp also have "\<dots> \<le> 10 + 691 + (163 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2) + triangle_imp_time' (tsqrt p)" using tsqrt_imp_time'_non_rec_bound by simp also have "\<dots> \<le> 10 + 691 + (163 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2) + 32 + 10 * (Discrete.log (tsqrt p))" using triangle_imp_time'_non_rec_bound by simp also have "\<dots> \<le> 10 + 691 + (163 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2) + 32 + 10 * (Discrete.log p)" using tsqrt_le by (simp add: Discrete.log_le_iff) also have "\<dots> \<le> 733 + 173 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2" by simp finally show ?thesis . qed fun snd'_imp_time' :: "nat \<Rightarrow> nat" where "snd'_imp_time' p = 8 + tsqrt_imp_time' p + fst'_imp_time' p" lemma snd'_imp_time_snd'_imp_time': "snd'_imp_time t s = t + snd'_imp_time' (snd'_state_p s)" by (subst snd'_imp_time.simps) (auto simp add: snd'_imp_time_acc_2 tsqrt_imp_time_tsqrt_imp_time' fst'_imp_time_fst'_imp_time' Let_def) lemma snd'_imp_time'_non_rec_bound: "snd'_imp_time' p \<le> 1432 + 336 * Discrete.log p + 20 * (Discrete.log p)\<^sup>2" proof- have "snd'_imp_time' p = 8 + tsqrt_imp_time' p + fst'_imp_time' p" by simp also have "\<dots> \<le> 8 + 691 + (163 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2) + fst'_imp_time' p" using tsqrt_imp_time'_non_rec_bound by (metis (no_types, lifting) add_mono eq_imp_le group_cancel.add1) also have "\<dots> \<le> 8 + 691 + (163 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2) + 733 + 173 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2" using fst'_imp_time'_non_rec_bound by (metis (mono_tags, lifting) add.assoc nat_add_left_cancel_le) also have "\<dots> \<le> 1432 + 336 * Discrete.log p + 20 * (Discrete.log p)\<^sup>2" by auto finally show ?thesis . qed fun prod_decode_imp_time' :: "nat \<Rightarrow> nat" where "prod_decode_imp_time' p = 8 + fst'_imp_time' p + snd'_imp_time' p" lemma prod_decode_imp_time_prod_decode_imp_time': "prod_decode_imp_time t s = t + prod_decode_imp_time' (prod_decode_state_p s)" by (subst prod_decode_imp_time.simps) (auto simp add: prod_decode_imp_time_acc_2 tsqrt_imp_time_tsqrt_imp_time' fst'_imp_time_fst'_imp_time' snd'_imp_time_snd'_imp_time' Let_def) lemma prod_decode'_imp_time'_non_rec_bound: "prod_decode_imp_time' p \<le> 2173 + 509 * Discrete.log p + 30 * (Discrete.log p)\<^sup>2" proof- have "prod_decode_imp_time' p = 8 + fst'_imp_time' p + snd'_imp_time' p" by simp also have "\<dots> \<le> 8 + 733 + 173 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2 + snd'_imp_time' p" using fst'_imp_time'_non_rec_bound by (metis (no_types, lifting) add_mono eq_imp_le group_cancel.add1) also have "\<dots> \<le> 8 + 733 + 173 * Discrete.log p + 10 * (Discrete.log p)\<^sup>2 + 1432 + 336 * Discrete.log p + 20 * (Discrete.log p)\<^sup>2" using snd'_imp_time'_non_rec_bound by (metis (no_types, lifting) add_mono eq_imp_le group_cancel.add1) also have "\<dots> \<le> 2173 + 509 * Discrete.log p + 30 * (Discrete.log p)\<^sup>2" by auto finally show ?thesis . qed fun hd_imp_time' :: "nat \<Rightarrow> nat" where "hd_imp_time' l = 8 + prod_decode_imp_time' (l-1)" lemma hd_imp_time_hd_imp_time': "hd_imp_time t s = t + hd_imp_time' (hd_xs s)" by (subst hd_imp_time.simps) (auto simp add: hd_imp_time_acc prod_decode_imp_time_prod_decode_imp_time' Let_def) lemma hd_imp_time'_non_rec_bound: "hd_imp_time' l \<le> 2181 + 509 * Discrete.log l + 30 * (Discrete.log l)\<^sup>2" proof- have "hd_imp_time' l \<le> 8 + 2173 + 509 * Discrete.log (l-1) + 30 * (Discrete.log (l-1))\<^sup>2" using prod_decode'_imp_time'_non_rec_bound by simp also have "\<dots> \<le> 2181 + 509 * Discrete.log (l-1) + 30 * (Discrete.log (l-1))\<^sup>2" by simp also have "\<dots> \<le> 2181 + 509 * Discrete.log l + 30 * (Discrete.log (l-1))\<^sup>2" by (simp add: Discrete.log_le_iff) also have "\<dots> \<le> 2181 + 509 * Discrete.log l + 30 * (Discrete.log l)\<^sup>2" by (simp add: Discrete.log_le_iff) finally show ?thesis . qed fun tl_imp_time' :: "nat \<Rightarrow> nat" where "tl_imp_time' l = 8 + prod_decode_imp_time' (l-1)" lemma tl_imp_time_tl_imp_time': "tl_imp_time t s = t + tl_imp_time' (tl_xs s)" by (subst tl_imp_time.simps) (auto simp add: tl_imp_time_acc prod_decode_imp_time_prod_decode_imp_time' Let_def) lemma tl_imp_time'_non_rec_bound: "tl_imp_time' l \<le> 2181 + 509 * Discrete.log l + 30 * (Discrete.log l)\<^sup>2" proof- have "tl_imp_time' l \<le> 8 + 2173 + 509 * Discrete.log (l-1) + 30 * (Discrete.log (l-1))\<^sup>2" using prod_decode'_imp_time'_non_rec_bound by simp also have "\<dots> \<le> 2181 + 509 * Discrete.log (l-1) + 30 * (Discrete.log (l-1))\<^sup>2" by simp also have "\<dots> \<le> 2181 + 509 * Discrete.log l + 30 * (Discrete.log (l-1))\<^sup>2" by (simp add: Discrete.log_le_iff) also have "\<dots> \<le> 2181 + 509 * Discrete.log l + 30 * (Discrete.log l)\<^sup>2" by (simp add: Discrete.log_le_iff) finally show ?thesis . qed (* function length_imp_time:: "nat \<Rightarrow> length_state\<Rightarrow> nat" where "length_imp_time t s = (if length_xs s \<noteq> 0 then \<comment> \<open>While xs \<noteq> 0\<close> ( let t = t + 1; tl_xs' = (length_xs s); t = t+2; tl_ret' = 0; t = t+2; tl_state = \<lparr>tl_xs = tl_xs', tl_ret = tl_ret'\<rparr>; tl_state_ret = tl_imp tl_state; t = t + tl_imp_time 0 tl_state; length_xs' = tl_ret tl_state_ret; t = t + 2; length_ret' = length_ret s + 1; t = t + 2; next_iteration = length_imp_time t (length_state_upd s) in next_iteration ) else ( let t = t + 2; ret = t in ret ) )*) fun length_imp_time' :: "nat \<Rightarrow> nat" where "length_imp_time' 0 = 2" | "length_imp_time' xs = 9 + tl_imp_time' xs + length_imp_time' (tl_nat xs)" lemma length_imp_time_acc_3: "NO_MATCH 0 x \<Longrightarrow> (length_imp_time x s) = x + (length_imp_time 0 s)" using length_imp_time_acc_2. lemma length_imp_time_length_imp_time': "length_imp_time t s = t + length_imp_time' (length_xs s)" proof (induction "length_xs s" arbitrary: s t rule: length_imp_time'.induct) case 1 then show ?case by (subst length_imp_time.simps) auto next case 2 hence s: "length_imp_time' (length_xs s) = 9 + tl_imp_time' (length_xs s) + length_imp_time' (tl_nat (length_xs s))" by (metis length_imp_time'.simps(2)) show ?case apply (subst length_imp_time.simps) using 2(2) apply (simp add: s 2(1) Let_def length_imp_time_acc_3 tl_imp_time_tl_imp_time' length_state_upd_def tl_imp_correct) done qed lemma a: "6 < (n::nat) \<Longrightarrow> 1 + n * 8 \<le> (1 + n)\<^sup>2" apply (induction n) apply (auto simp add: field_simps power2_eq_square) by (metis add_less_mono1 eval_nat_numeral(3) mult_Suc_right nat_le_linear nat_mult_less_cancel_disj not_less_iff_gr_or_eq order_le_less_trans) lemma a': "6 < (n::real) \<Longrightarrow> 1 + n * 8 \<le> (1 + n)\<^sup>2" by (auto simp add: field_simps power2_eq_square) lemma b': assumes "6 < n" shows "(sqrt (8 * n + 1) - 1) / 2 \<le> n / 2" proof- have "sqrt (8 * n + 1) \<le> 1 + n" apply (rule real_le_lsqrt) using assms by (auto simp add: a' mult.commute add.commute) hence "(sqrt (8 * n + 1) - 1) \<le> n" by simp thus ?thesis by simp qed lemma b: assumes "6 < n" shows "(Discrete.sqrt (8 * n + 1) - 1) div 2 \<le> n div 2" proof- have 1: "(Discrete.sqrt (8 * n + 1) - 1) div 2 \<le> nat (floor ((sqrt (8 * n + 1) - 1) / 2))" using tsqrt_def tsqrt_real by presburger have assms': "6 < real n" using assms by simp show ?thesis apply (rule le_trans) using 1 apply simp using b'[OF assms'] by (metis add.commute divide2_div2 floor_mono nat_mono) qed (* This should mean it shrinks fast enough. Condition is annoying, as it might result in me having to do the first few cases explicitly There is a reason why Landau symbols where invented... *) lemma tsqrt_div2_bound: "n > 6 \<Longrightarrow> tsqrt n \<le> n div 2" unfolding tsqrt_def using b . lemma snd'_nat_div2_bound: "n > 6 \<Longrightarrow> snd'_nat n \<le> n div 2" unfolding snd'_nat_def using tsqrt_div2_bound by force lemma snd_nat_div2_bound: "n > 6 \<Longrightarrow> snd_nat n \<le> n div 2" using snd'_nat_div2_bound snd_nat_snd'_nat by presburger (* This proof shows that I migh need monotonicity lemmas for my operartions.. *) lemma tl_nat_div2_bound: "n > 6 \<Longrightarrow> tl_nat n \<le> n div 2" unfolding tl_nat_def using snd_nat_div2_bound by (metis diff_le_self le_trans mono_tsqrt' snd'_nat_def snd_nat_snd'_nat tsqrt_div2_bound) lemma triangle_imp_time'_mono_pre: "m \<le> n \<Longrightarrow> triangle_imp_time' m \<le> triangle_imp_time' n" using mono_mul_imp_time'_pre by (simp add: triangle_imp_time'_def) (* It is easier now to bound before solving the recursion, problem is that I would need to prove something like monotonicity for tl_imp_time' which requires a bunch of monotonicity *) fun length_imp_time'' :: "nat \<Rightarrow> nat" where "length_imp_time'' 0 = 2" | "length_imp_time'' xs = 9 + (2181 + 509 * Discrete.log xs + 30 * (Discrete.log xs)\<^sup>2) + length_imp_time'' (tl_nat xs)" lemma length_imp_time'_length_imp_time'': "length_imp_time' xs \<le> length_imp_time'' xs" apply (induction xs rule: length_imp_time''.induct) apply simp using tl_imp_time'_non_rec_bound by (metis add.commute add_le_mono add_le_mono1 length_imp_time''.simps(2) length_imp_time'.simps(2)) lemma length_imp_time''_non_rec_bound': "length_imp_time'' xs \<le> 2 + Suc (Discrete.log xs) * (9 + (2181 + 509 * Discrete.log xs + 30 * (Discrete.log xs)\<^sup>2))" proof (induction xs rule: length_imp_time''.induct) case 1 then show ?case by simp next case (2 v) show ?case proof(cases "Suc v>6") case True hence "tl_nat (Suc v) \<le> Suc v div 2" using tl_nat_div2_bound by blast hence "Discrete.log (tl_nat (Suc v)) \<le> Discrete.log (Suc v div 2)" by (rule log_le_iff) with "2.IH" have "length_imp_time'' (tl_nat (Suc v)) \<le> 2 + Suc (Discrete.log (Suc v div 2)) * (9 + (2181 + 509 * Discrete.log (tl_nat (Suc v)) + 30 * (Discrete.log (tl_nat (Suc v)))\<^sup>2))" by (smt (verit, best) Suc_le_mono Suc_mult_le_cancel1 add_le_cancel_left add_le_mono1 le_trans less_Suc_eq_le less_numeral_extra(3) mult_le_mono not_less_less_Suc_eq numeral_2_eq_2) also have "\<dots> \<le> 2 + Suc (Discrete.log (Suc v div 2)) * (9 + (2181 + 509 * Discrete.log (Suc v div 2) + 30 * (Discrete.log (Suc v div 2))\<^sup>2))" by (metis \<open>Discrete.log (tl_nat (Suc v)) \<le> Discrete.log (Suc v div 2)\<close> add_left_mono add_mono_thms_linordered_semiring(1) nat_mult_le_cancel_disj power2_nat_le_eq_le) also have "\<dots> \<le> 2 + (Discrete.log (Suc v)) * (9 + (2181 + 509 * Discrete.log (Suc v div 2) + 30 * (Discrete.log (Suc v div 2))\<^sup>2))" by (metis Discrete.log.simps True gr_implies_not0 le_eq_less_or_eq less_Suc_eq numeral_2_eq_2 zero_neq_numeral) finally show ?thesis apply (subst length_imp_time''.simps) apply (auto simp add: algebra_simps simp del: tl_imp_time'.simps) by (smt (verit, ccfv_SIG) Suc_le_mono diff_le_self le_trans mult_le_mono2 nat_add_left_cancel_le power2_nat_le_eq_le) next case False from this consider "v = 0" | "v = 1" | "v = 2" | "v = 3" | "v = 4" | "v = 5" by fastforce then show ?thesis apply (cases) apply (auto simp add: tl_nat_def snd_nat_snd'_nat snd'_nat_def tsqrt_def fst'_nat_def) apply (all code_simp) done qed qed corollary length_imp_time'_non_rec_bound': "length_imp_time' xs \<le> 2192 + 2699 * Discrete.log xs + 539 * (Discrete.log xs)\<^sup>2 + 30 * (Discrete.log xs)^3" apply (rule le_trans[OF length_imp_time'_length_imp_time'' ]) using length_imp_time''_non_rec_bound' by (auto simp add: field_simps power2_eq_square power3_eq_cube) fun cons_imp_time' :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "cons_imp_time' h t = 8 + prod_encode_imp_time' h t" lemma cons_imp_time_cons_imp_time': "cons_imp_time t s = t + cons_imp_time' (cons_h s) (cons_t s)" by (auto simp add: cons_imp_time.simps prod_encode_imp_time_prod_encode_imp_time') lemma cons_imp_time'_non_rec_bound: "cons_imp_time' h t \<le> 58 + 10 * Discrete.log h + 10 * Discrete.log t" using prod_encode_imp_time'_non_rec_bound by (simp add: add.assoc) fun append_imp_time' :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "append_imp_time' _ 0 = 2" | "append_imp_time' acc xs = 19 + hd_imp_time' xs + cons_imp_time' (hd_nat xs) acc + tl_imp_time' xs + append_imp_time' (cons (hd_nat xs) acc) (tl_nat xs)" lemma O_square_subset_nat: "O((f::nat \<Rightarrow> nat)) \<subseteq> O(\<lambda>n . (f n)^2)" apply (rule landau_o.big_subsetI) apply (rule landau_o.big_mono) apply (rule eventuallyI) apply (auto simp add: power2_nat_le_imp_le) done corollary "(\<exists>k . (\<lambda>n. real (f n)) \<in> O(\<lambda>n. real n ^ k)) \<longleftrightarrow> (\<exists>k . (\<lambda>n. real (f n)) \<in> o(\<lambda>n. real n ^ k))" using poly_def poly_iff_ex_smallo by presburger (* The definition seperates the polynomial bit from the bit_length part, I did it combined so far. *) lemma natfun_bigo_poly_log_iff: fixes f :: "nat \<Rightarrow> real" shows "f \<in> O(\<lambda>n. Discrete.log n ^ k) \<longleftrightarrow> (\<exists>c. \<forall>n>1. \<bar>f n\<bar> \<le> c * real (Discrete.log n ^ k))" proof assume "\<exists>c. \<forall>n>1. \<bar>f n\<bar> \<le> c * real (Discrete.log n ^ k)" then obtain c where c: "\<forall>n>1. \<bar>f n\<bar> \<le> c * real (Discrete.log n ^ k)" by auto have "eventually (\<lambda>n. \<bar>f n\<bar> \<le> c * real (Discrete.log n ^ k)) at_top" using eventually_gt_at_top[of 1] by eventually_elim (use c in auto) thus "f \<in> O(\<lambda>n. Discrete.log n ^ k)" by (intro bigoI[of _ c]) (auto intro!: always_eventually) next assume 1: "f \<in> O(\<lambda>n. Discrete.log n ^ k)" have 2: "real (Discrete.log n ^ k) \<noteq> 0" if "n \<ge> 2" for n :: nat using that by (metis bot_nat_0.not_eq_extremum log_rec of_nat_0_eq_iff power_not_zero zero_less_Suc) from natfun_bigoE[OF 1 2, of 2] obtain c where "\<forall>n\<ge>2. \<bar>f n\<bar> \<le> c * real (Discrete.log n ^ k)" by simp metis? thus "\<exists>c. \<forall>n>1. \<bar>f n\<bar> \<le> c * real (Discrete.log n ^ k)" apply (auto simp: Suc_le_eq) by (metis Suc_leI numeral_2_eq_2) qed term poly definition poly_log :: "(nat \<Rightarrow> nat) \<Rightarrow> bool" where "poly_log f \<longleftrightarrow> (\<exists>k. (\<lambda>n. real (f n)) \<in> O(\<lambda>n. real (Discrete.log n ^ k)))" lemma poly_log_iff_ex_smallo: "poly_log f \<longleftrightarrow> (\<exists>k. (\<lambda>n. real (f n)) \<in> o(\<lambda>n. real (Discrete.log n ^ k)))" unfolding poly_log_def proof safe fix k assume "f \<in> O(\<lambda>n. real (Discrete.log n ^ k))" also have "(\<lambda>n. real (Discrete.log n ^ k)) \<in> o(\<lambda>n. real (Discrete.log n ^ (k + 1)))" by real_asymp finally have "f \<in> o(\<lambda>n. (Discrete.log n ^ (k + 1)))" . thus "\<exists>k. f \<in> o(\<lambda>n. Discrete.log n ^ k)" .. qed (auto intro: landau_o.small_imp_big) lemma poly_log_const [simp, intro]: "poly_log (\<lambda>_. c)" by (auto simp: poly_log_def intro!: exI[of _ 0]) lemma poly_log_cmult [intro]: "poly_log f \<Longrightarrow> poly_log (\<lambda>x. c * f x)" by (auto simp: poly_log_def) thm real_asymp_nat_reify thm real_asymp_reify_simps lemma test: "(\<lambda>x. real (Discrete.log x) ^ k) \<in> O(\<lambda>x. real (Discrete.log x) ^ max k l)" apply (rule landau_o.big_mono[of]) apply (rule eventually_at_top_linorderI[of 2]) apply simp_all apply (rule power_increasing) apply simp_all by (metis One_nat_def Suc_leI log_rec real_of_nat_ge_one_iff zero_less_Suc) lemma "norm (real c) = real c" by simp lemma power_weaken_heavy: "a \<le> b \<Longrightarrow> n > 0 \<Longrightarrow> a \<le> (b::nat)^(n::nat)" by (metis One_nat_def Suc_leI bot_nat_0.extremum_uniqueI not_less_eq_eq order.trans power_increasing power_one_right zero_le_power) lemma const_in_poly_log_internal': "(\<lambda>_. real c) \<in> O(\<lambda>n . real (Discrete.log n ^ (Suc k)))" apply (rule landau_o.big_mono[]) apply (rule eventually_at_top_linorderI[of "2^c"]) by (metis (mono_tags, opaque_lifting) Discrete.log_le_iff One_nat_def Suc_leI bot_nat_0.not_eq_extremum le_trans log_exp nat.simps(3) nle_le norm_of_nat of_nat_le_iff power_increasing power_one_right) corollary const_in_poly_log_internal: "(\<lambda>_. real c) \<in> O(\<lambda>n . real (Discrete.log n ^ k))" apply (cases k) apply simp using const_in_poly_log_internal' by simp lemma poly_log_add [intro]: assumes "poly_log f" "poly_log g" shows "poly_log (\<lambda>x. f x + g x)" proof - from assms obtain k l where kl: "f \<in> O(\<lambda>n. Discrete.log n ^ k)" "g \<in> O(\<lambda>n. Discrete.log n ^ l)" by (auto simp: poly_log_def) have "f \<in> O(\<lambda>n. Discrete.log n ^ max k l)" "g \<in> O(\<lambda>n. Discrete.log n ^ max k l)" apply (rule kl[THEN landau_o.big.trans], simp add: test)+ by (metis max.commute test) from sum_in_bigo(1)[OF this] show ?thesis by (auto simp: poly_log_def) qed lemma poly_log_diff [intro]: assumes "poly_log f" "poly_log g" shows "poly_log (\<lambda>x. f x - g x)" proof - from assms obtain k l where kl: "f \<in> O(\<lambda>n. Discrete.log n ^ k)" "g \<in> O(\<lambda>n. Discrete.log n ^ l)" by (auto simp: poly_log_def) have "(\<lambda>x. real (f x - g x)) \<in> O(\<lambda>x. real (f x) - real (g x))" by (intro landau_o.big_mono) (auto intro!: always_eventually) also have "f \<in> O(\<lambda>n. Discrete.log n ^ max k l)" "g \<in> O(\<lambda>n. Discrete.log n ^ max k l)" apply (rule kl[THEN landau_o.big.trans], simp add: test)+ by (metis max.commute test) from sum_in_bigo(2)[OF this] have "(\<lambda>x. real (f x) - real (g x)) \<in> O(\<lambda>x. real (Discrete.log x ^ max k l))" . finally show ?thesis by (auto simp: poly_log_def) qed lemma poly_log_mult [intro]: assumes "poly_log f" "poly_log g" shows "poly_log (\<lambda>x. f x * g x)" proof - from assms obtain k l where kl: "f \<in> O(\<lambda>n. Discrete.log n ^ k)" "g \<in> O(\<lambda>n. Discrete.log n ^ l)" by (auto simp: poly_log_def) from landau_o.big.mult[OF this] have "(\<lambda>n. f n * g n) \<in> O(\<lambda>n. Discrete.log n ^ (k + l))" by (simp add: power_add) thus ?thesis by (auto simp: poly_log_def) qed value "{..(n::nat)}" lemma poly_log_make_mono_iff: "poly_log (make_mono f) \<longleftrightarrow> poly_log f" proof safe fix f assume *: "poly_log (make_mono f)" have "f \<in> O(make_mono f)" by (rule landau_o.big_mono) (auto intro!: always_eventually) also from * obtain k where "make_mono f \<in> O(\<lambda>n. Discrete.log n ^ k)" by (auto simp: poly_log_def) finally show "poly_log f" by (auto simp: poly_log_def) next assume "poly_log f" then obtain k where "f \<in> O(\<lambda>n. Discrete.log n ^ k)" by (auto simp: poly_log_def) then obtain c' :: real where c': "\<And>n. n > 1 \<Longrightarrow> f n \<le> c' * Discrete.log n ^ k" by (subst (asm) natfun_bigo_poly_log_iff) auto define c where "c = max c' 1" have "c > 0" by (simp add: c_def) have c: "f n \<le> c * Discrete.log n ^ k" if "n > 1" for n proof - have "f n \<le> c' * Discrete.log n ^ k" using c'[of n] that by blast also have "\<dots> \<le> c * Discrete.log n ^ k" by (intro mult_right_mono) (auto simp: c_def) finally show ?thesis by simp qed have "eventually (\<lambda>n. real (make_mono f n) \<le> real (f 0) + real (f 1) + c * real (Discrete.log n ^ k)) at_top" using eventually_gt_at_top[of 1] proof eventually_elim case (elim n) have "real (make_mono f n) = real (Max (f ` {..n}))" by (auto simp: make_mono_def) also have "{..n} = insert 0 {0<..n}" using elim by auto also have "\<dots> = insert 0 (insert 1 {1<..n})" using elim by auto also have "Max (f ` \<dots>) = max (f 0) (max (f 1) (Max (f ` {1<..n})))" using elim by (simp add: Max_insert) also have "real \<dots> = max (real (f 0)) (max (real (f 1)) (real (Max (f ` {1<..n}))))" by simp also have "real (Max (f ` {1<..n})) = Max ((real \<circ> f) ` {1<..n})" using elim by (subst mono_Max_commute) (auto simp: image_image incseq_def) also have "\<dots> \<le> c * real (Discrete.log n ^ k)" unfolding o_def proof (intro Max.boundedI; safe?) fix i assume i: "i \<in> {1<..n}" from i have "real (f i) \<le> c * real (Discrete.log i ^ k)" by (intro c) auto also have "\<dots> \<le> c * real (Discrete.log n ^ k)" using i \<open>c > 0\<close> by (auto intro!: mult_left_mono power_mono simp add: Discrete.log_le_iff) finally show "real (f i) \<le> c * real (Discrete.log n ^ k)" . qed (use elim in auto) hence "max (real (f 0)) (max (real (f 1)) (Max ((real \<circ> f) ` {1<..n}))) \<le> max (real (f 0)) (max (real (f 1)) (c * real (Discrete.log n ^ k)))" by (intro max.mono) auto also have "\<dots> \<le> real (f 0) + real (f 1) + c * real (Discrete.log n ^ k)" using \<open>c > 0\<close> by simp finally show ?case . qed hence "make_mono f \<in> O(\<lambda>n. real (f 0) + real (f 1) + c * real (Discrete.log n ^ k))" using \<open>c > 0\<close> by (intro bigoI[of _ 1]) auto also have "(\<lambda>n. real (f 0) + real (f 1) + c * real (Discrete.log n ^ k)) \<in> O(\<lambda>n. real (Discrete.log n ^ k))" using \<open>c > 0\<close> apply (intro sum_in_bigo) apply (intro const_in_poly_log_internal)+ apply auto done finally show "poly_log (make_mono f)" by (auto simp: poly_log_def) qed lemma "n>0 \<Longrightarrow> (n::nat)^k \<le> n^(Suc k)" by simp lemma "(\<lambda>_. 1) \<in> O(Discrete.log)" by real_asymp find_theorems name: Landau name: trans lemma step: "(\<lambda>_. 1) \<in> O(g) \<Longrightarrow> f \<in> O(\<lambda>n . g n ^ k) \<Longrightarrow> f \<in> O(\<lambda>n . g n ^ Suc k)" apply simp apply (rule Landau_Symbols.landau_o.big_mult_1') by simp_all lemma step': "(\<lambda>_. 1) \<in> O(g) \<Longrightarrow> f \<in> o(\<lambda>n . g n ^ k) \<Longrightarrow> f \<in> o(\<lambda>n . g n ^ Suc k)" apply simp apply (rule Landau_Symbols.landau_o.small_mult_1') by simp_all lemma step'_nat: "(\<lambda>_. 1) \<in> O(g) \<Longrightarrow> (f::nat \<Rightarrow> nat) \<in> o(\<lambda>n . (g::nat \<Rightarrow> nat) n ^ k) \<Longrightarrow> f \<in> o(\<lambda>n . g n ^ Suc k)" using step' by auto lemma poly_log_compose [intro]: assumes "poly_log f" "poly_log g" shows "poly_log (f \<circ> g)" proof - from assms have "poly_log (make_mono f)" by (simp add: poly_log_make_mono_iff) then obtain k c where k: "\<And>n. n > 1 \<Longrightarrow> make_mono f n \<le> c * real (Discrete.log n ^ k)" apply (auto simp: poly_log_def natfun_bigo_poly_log_iff) (* ? *) by (smt (verit, del_insts) One_nat_def \<open>poly_log (make_mono f)\<close> natfun_bigo_poly_log_iff norm_of_nat of_nat_power poly_log_def) have "c \<ge> 0" proof- have "Discrete.log n ^ k \<ge> 0" for n by simp hence 1: "real (Discrete.log n ^ k) \<ge> 0" for n by simp have 2: "make_mono f n \<ge> 0" for n by simp show ?thesis using k[of 2] 1[of 2] 2[of 2] apply (auto simp add: field_simps) by (metis (mono_tags, opaque_lifting) Multiseries_Expansion.intyness_1 landau_o.R_trans log_exp mult.right_neutral nle_le of_nat_0_le_iff one_le_power power_le_one_iff power_one_right) qed from assms obtain l where l: "g \<in> o(\<lambda>n. Discrete.log n ^ l)" by (auto simp: poly_log_iff_ex_smallo) hence "g \<in> o(\<lambda>n. Discrete.log n ^ Suc l)" apply (intro step'_nat) apply real_asymp using l by simp from this obtain l where l: "g \<in> o(\<lambda>n. Discrete.log n ^ l)" "l > 0" by blast have "eventually (\<lambda>n. g n \<le> Discrete.log n ^ l) at_top" using landau_o.smallD[OF l(1), of 1] by auto hence "eventually (\<lambda>n. real (f (g n)) \<le> c * real (Discrete.log n ^ (k * l))) at_top" using eventually_gt_at_top[of 4] proof eventually_elim case (elim n) have "real (f (g n)) \<le> real (make_mono f (g n))" by auto also from elim(1) have "make_mono f (g n) \<le> make_mono f (Discrete.log n ^ l)" by (rule monoD[OF mono_make_mono]) also have "\<dots> \<le> c * (Discrete.log (Discrete.log n ^ l)) ^ k" proof- have "Discrete.log n \<ge> Discrete.log 4" apply (rule log_le_iff) using \<open>n>4\<close> by simp hence "Discrete.log n > 1" by code_simp auto hence "1 < Discrete.log n ^ l" using \<open>n>4\<close> l(2) apply code_simp apply auto using less_trans_Suc power_gt_expt by presburger hence "real (make_mono f (Discrete.log n ^ l)) \<le> c * real (Discrete.log (Discrete.log n ^ l) ^ k)" using k[of "Discrete.log n ^ l"] by simp thus ?thesis by simp qed also have "\<dots> \<le> c * (Discrete.log n ^ l) ^ k" proof- have "(Discrete.log (Discrete.log n ^ l) ^ k) \<le> ((Discrete.log n ^ l) ^ k)" by (metis Discrete.log_le_iff le_neq_implies_less lessI less_or_eq_imp_le log_exp nat_power_eq_Suc_0_iff numeral_2_eq_2 power_gt_expt power_mono_iff zero_order(1)) thus ?thesis by (meson \<open>0 \<le> c\<close> mult_left_mono of_nat_le_iff) qed also have "\<dots> = c * real (Discrete.log n ^ (k * l))" by (subst mult.commute) (simp add: power_mult) finally show ?case by simp qed hence "f \<circ> g \<in> O(\<lambda>n. Discrete.log n ^ (k * l))" by (intro bigoI[of _ c]) auto thus ?thesis by (auto simp: poly_log_def) qed lemma poly_log_dlog: "poly_log Discrete.log" unfolding poly_log_def apply (rule exI[of _ 1]) by simp lemma "(\<lambda>n . (Discrete.log::nat \<Rightarrow> nat) n + (Discrete.log n)^2) \<in> O(\<lambda>n . (Discrete.log n)^2)" by real_asymp lemma append_imp_time_append_imp_time': "append_imp_time t s = t + append_imp_time' (append_state.append_acc s) (append_xs s)" proof(induction "append_state.append_acc s" "append_xs s" arbitrary: t s rule: append_imp_time'.induct) case 1 then show ?case by (simp add: append_imp_time.simps) next case (2 v) hence s: "append_imp_time' (append_state.append_acc s) (append_xs s) = 19 + hd_imp_time' (append_xs s) + cons_imp_time' (hd_nat (append_xs s)) (append_state.append_acc s) + tl_imp_time' (append_xs s) + append_imp_time' (cons (hd_nat (append_xs s)) (append_state.append_acc s)) (tl_nat (append_xs s))" by (metis append_imp_time'.simps(2)) show ?case apply (subst append_imp_time.simps) apply (subst append_imp_time_acc_2) using 2(2) by (simp add: s 2(1) Let_def append_imp_time_acc hd_imp_time_hd_imp_time' tl_imp_time_tl_imp_time' cons_imp_time_cons_imp_time' hd_imp_correct tl_imp_correct cons_imp_correct append_state_upd_def del: hd_imp_time'.simps tl_imp_time'.simps cons_imp_time'.simps) qed lemma hd_nat_le: "hd_nat n \<le> n" unfolding hd_nat_def fst_nat_fst'_nat fst'_nat_def by auto definition "append_imp_time'_iter_bound acc xs \<equiv> 60 * (Discrete.log xs)\<^sup>2 + 1028 * Discrete.log xs + 10 * Discrete.log acc + 4439" fun append_imp_time'' :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "append_imp_time'' _ 0 = 2" | "append_imp_time'' acc xs = append_imp_time'_iter_bound acc xs + append_imp_time'' (cons (hd_nat xs) acc) (tl_nat xs)" (* Extra Suc to account for rounding down :( *) lemma log_mult: "Discrete.log (n * m) \<le> Suc (Discrete.log n + Discrete.log m)" by (auto simp add: log_altdef algebra_simps log_mult floor_add) thm log_twice lemma cons_bound: "Discrete.log (cons h t) \<le> Suc (Suc (Suc (Suc (Suc (Suc (3 * Discrete.log h + 2 * Discrete.log t))))))" proof- have "Discrete.log (Suc ((h + t) * Suc (h + t) div 2 + h)) \<le> Suc (Suc (Discrete.log ((h + t) * Suc (h + t) div 2) + Discrete.log h))" using dlog_add_bound dlog_Suc_bound by (meson Suc_le_mono le_trans) also have "\<dots> \<le> Suc (Suc (Discrete.log ((h + t) * Suc (h + t)) + Discrete.log h))" (* Bounds are getting more and more loose lol*) by (subst log_half) auto also have "\<dots> \<le> Suc (Suc (Suc (Discrete.log (h + t) + Discrete.log (Suc (h + t)) + Discrete.log h)))" using log_mult by (metis (no_types, opaque_lifting) Suc_plus add.commute nat_add_left_cancel_le) also have "\<dots> \<le> Suc (Suc (Suc (Suc (2 * Discrete.log (h + t) + Discrete.log h))))" using dlog_Suc_bound by force also have "\<dots> \<le> Suc (Suc (Suc (Suc (2 * Suc (Discrete.log h + Discrete.log t) + Discrete.log h))))" using dlog_add_bound by (meson Suc_le_mono add_mono_thms_linordered_semiring(3) mult_le_mono2) also have "\<dots> \<le> Suc (Suc (Suc (Suc (Suc (Suc (3 * Discrete.log h + 2 * Discrete.log t))))))" by simp finally show ?thesis by (auto simp add: cons_def prod_encode_def triangle_def) qed lemma "cons prod_encode xs (cons (y#ys))" lemma "Discrete.log (Primitives.append_acc acc xs) \<le> 3 * Suc (Discrete.log acc + Discrete.log xs)" proof(induction acc xs rule: Primitives.append_acc.induct) case (1 acc) then show ?case by simp next case (2 acc xs) then show ?case sorry qed lemma add_0_eq_nat: "x = 0+(x::nat)" by simp lemma "append_imp_time'_iter_bound (cons x acc) xs \<le> 10 + (append_imp_time'_iter_bound acc (cons x xs))" unfolding append_imp_time'_iter_bound_def proof- qed apply (subst (3) add.commute) apply (rule add_mono) apply simp apply (rule add_mono) apply simp apply (auto simp add: cons_def prod_encode_def triangle_def field_simps power2_eq_square) lemma append_imp_time'_append_imp_time'': "append_imp_time' acc xs \<le> append_imp_time'' acc xs" proof (induction acc xs rule: append_imp_time''.induct) case (1 uu) then show ?case by simp next (* Why do I just now remember my rewrite? Try it here *) case (2 acc v) have s: "append_imp_time'_iter_bound acc xs = 19 + (2181 + 509 * Discrete.log xs + 30 * (Discrete.log xs)\<^sup>2) + (58 + 10 * Discrete.log xs + 10 * Discrete.log acc) + (2181 + 509 * Discrete.log xs + 30 * (Discrete.log xs)\<^sup>2)" for acc xs by (auto simp add: append_imp_time'_iter_bound_def) show ?case apply (simp only: s append_imp_time'.simps append_imp_time''.simps) apply (rule add_le_mono) apply (rule add_le_mono) apply (rule add_le_mono) apply (rule add_le_mono) apply simp using hd_imp_time'_non_rec_bound apply blast using cons_imp_time'_non_rec_bound hd_nat_le apply auto[] apply (meson Discrete.log_le_iff add_le_mono1 add_left_mono le_refl le_trans mult_le_mono) using tl_imp_time'_non_rec_bound apply blast using "2.IH" apply auto done qed lemma append_imp_time'_append_imp_time'': "append_imp_time'' acc xs \<le> " proof (induction acc xs rule: append_imp_time''.induct) case (1 uu) then show ?case by simp next (* Why do I just now remember my rewrite? Try it here *) case (2 acc v) have s: "append_imp_time'_iter_bound acc xs = 19 + (2181 + 509 * Discrete.log xs + 30 * (Discrete.log xs)\<^sup>2) + (58 + 10 * Discrete.log xs + 10 * Discrete.log acc) + (2181 + 509 * Discrete.log xs + 30 * (Discrete.log xs)\<^sup>2)" for acc xs by (auto simp add: append_imp_time'_iter_bound_def) show ?case apply (simp only: s append_imp_time'.simps append_imp_time''.simps) apply (rule add_le_mono) apply (rule add_le_mono) apply (rule add_le_mono) apply (rule add_le_mono) apply simp using hd_imp_time'_non_rec_bound apply blast using cons_imp_time'_non_rec_bound hd_nat_le apply auto[] apply (meson Discrete.log_le_iff add_le_mono1 add_left_mono le_refl le_trans mult_le_mono) using tl_imp_time'_non_rec_bound apply blast using "2.IH" apply auto done qed (* Solving recursion will be a bit ugly, as acc grows... *) (* List stuff skipped as prod_encode_aux missing :( *) subsubsection \<open>Logical and\<close> fun AND_neq_zero_imp_time' :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "AND_neq_zero_imp_time' a b = 1 + (if a \<noteq> 0 then 3 else 2)" lemma AND_neq_zero_imp_time_AND_neq_zero_imp_time': "AND_neq_zero_imp_time t s = AND_neq_zero_imp_time' (AND_neq_zero_a s) (AND_neq_zero_b s) + t" by (auto simp add: AND_neq_zero_imp_time.simps) subsubsection \<open>Logical or\<close> fun OR_neq_zero_imp_time' :: "nat \<Rightarrow> nat \<Rightarrow> nat" where "OR_neq_zero_imp_time' a b = 1 + (if a \<noteq> 0 then 2 else 3)" lemma OR_neq_zero_imp_time_OR_neq_zero_imp_time': "OR_neq_zero_imp_time t s = OR_neq_zero_imp_time' (OR_neq_zero_a s) (OR_neq_zero_b s) + t" by (auto simp add: OR_neq_zero_imp_time.simps) end
module Logic.Classical.DoubleNegated where open import Data.Tuple as Tuple using (_⨯_ ; _,_) open import Functional open import Logic.Names open import Logic.Propositional as Constructive using (¬¬_) import Logic.Predicate as Constructive open import Logic import Lvl open import Syntax.Type open import Type private variable ℓ ℓ₁ ℓ₂ : Lvl.Level private variable X Y Z W : Stmt{ℓ} private variable P : X → Stmt{ℓ} -- Classical propositions are expressed as propositions wrapped in double negation. -- TODO: I am not sure, but I think this works? My reasoning is the following: -- • [¬¬]-elim ↔ excluded-middle -- Double negation elimination is equivalent to excluded middle in constructive logic. -- • Theory(ClassicalLogic) = Theory(ConstructiveLogic ∪ {excludedMiddle}) -- Constructive logic is classical logic without EM. -- EM is the difference between classical and constructive. -- • Theory(ClassicalLogic) ⊈ Theory(ConstructiveLogic) -- • Theory(ClassicalLogic) ⊇ Theory(ConstructiveLogic) -- This seems to be a common description of constructive logic. -- • [¬¬]-intro ∈ Theory(ConstructiveLogic) -- Double negation introduction exists in constructive logic. -- • (∀φ. ¬¬¬¬φ → ¬¬φ) ∈ Theory(ConstructiveLogic) -- Double negation elimination exists inside a double negation in constructive logic. -- • Every natural deduction introduction/elimination rule in constructive logic can be expressed inside a double negation. -- Therefore: -- • Theory(ClassicalLogic) = Theory(ConstructiveLogic ∪ {[¬¬]-elim}). -- • The theory inside double negation in constructive logic is (at least) a classical logic. -- Because every intro/elim rule exists in there, -- the propositions inside a double negation are at least a constructive logic. -- But [¬¬]-elim also exists in there. -- Therefore it is at least a classical logic. -- This cannot be done with predicate logic because the translation for [∀] does not hold in both directions. module _ where infixl 1011 •_ infixl 1010 ¬_ infixl 1005 _∧_ infixl 1004 _∨_ infixl 1000 _⟵_ _⟷_ _⟶_ •_ : Stmt{ℓ} → Stmt •_ = ¬¬_ _∧_ : Stmt{ℓ₁} → Stmt{ℓ₂} → Stmt _∧_ = (¬¬_) ∘₂ (Constructive._∧_) _⟶_ : Stmt{ℓ₁} → Stmt{ℓ₂} → Stmt _⟶_ = (¬¬_) ∘₂ (_→ᶠ_) _⟵_ : Stmt{ℓ₁} → Stmt{ℓ₂} → Stmt _⟵_ = (¬¬_) ∘₂ (_←_) _⟷_ : Stmt{ℓ₁} → Stmt{ℓ₂} → Stmt _⟷_ = (¬¬_) ∘₂ (Constructive._↔_) _∨_ : Stmt{ℓ₁} → Stmt{ℓ₂} → Stmt _∨_ = (¬¬_) ∘₂ (Constructive._∨_) ⊥ : Stmt ⊥ = ¬¬ Constructive.⊥ ⊤ : Stmt ⊤ = ¬¬ Constructive.⊤ ¬_ : Stmt{ℓ} → Stmt ¬_ = (¬¬_) ∘ Constructive.¬_ ∀ₗ : (X → Stmt{ℓ}) → Stmt ∀ₗ = (¬¬_) ∘ Constructive.∀ₗ ∘ ((¬¬_) ∘_) ∃ : (X → Stmt{ℓ}) → Stmt ∃ = (¬¬_) ∘ Constructive.∃ import Logic.Propositional.Theorems as Constructive import Logic.Predicate.Theorems as Constructive [→]ₗ-[¬¬]-elim : ((¬¬ X) → Y) → (X → Y) [→]ₗ-[¬¬]-elim = liftᵣ(Constructive.[¬¬]-intro) [→]ᵣ-[¬¬]-move-out : (X → (¬¬ Y)) → ¬¬(X → Y) [→]ᵣ-[¬¬]-move-out xnny nxy = nxy (x ↦ Constructive.[⊥]-elim(xnny x (y ↦ nxy (const y)))) double-contrapositiveᵣ : (X → Y) → ((¬¬ X) → (¬¬ Y)) -- DoubleNegated(X → Y) → DoubleNegated(¬¬ X → ¬¬ Y) double-contrapositiveᵣ = Constructive.contrapositiveᵣ ∘ Constructive.contrapositiveᵣ [¬¬]-double-contrapositiveₗ : ¬¬(X → Y) ← ((¬¬ X) → (¬¬ Y)) [¬¬]-double-contrapositiveₗ p = [→]ᵣ-[¬¬]-move-out ([→]ₗ-[¬¬]-elim p) -- Also called: Double-negation shift. [¬¬][→]-preserving : ¬¬(X → Y) Constructive.↔ ((¬¬ X) → (¬¬ Y)) [¬¬][→]-preserving{X = X}{Y = Y} = Constructive.[↔]-intro l r where l : ¬¬(X → Y) ← ((¬¬ X) → (¬¬ Y)) l = [→]ᵣ-[¬¬]-move-out ∘ [→]ₗ-[¬¬]-elim r : ¬¬(X → Y) → ((¬¬ X) → (¬¬ Y)) r(nnxy)(nnx)(ny) = ((Constructive.[→]-elim ((xy ↦ ((Constructive.[→]-elim ((Constructive.[→]-elim ((Constructive.[⊥]-elim ((Constructive.[→]-elim ((x ↦ ((Constructive.[→]-elim ((Constructive.[→]-elim x xy) :of: Y) (ny :of: (Y → Constructive.⊥)) ) :of: Constructive.⊥) ) :of: (X → Constructive.⊥)) (nnx :of: ((X → Constructive.⊥) → Constructive.⊥)) ) :of: Constructive.⊥) ) :of: X) (xy :of: (X → Y)) ) :of: Y) (ny :of: (Y → Constructive.⊥)) ) :of: Constructive.⊥) ) :of: ((X → Y) → Constructive.⊥)) (nnxy :of: ¬¬(X → Y)) ) :of: Constructive.⊥) ------------------------------------------ -- Converting theorems with implication in constructive logic to classical logic prop-intro : X → (¬¬ X) prop-intro = Constructive.[¬¬]-intro [→]₁-intro : (X → Y) → ((¬¬ X) → (¬¬ Y)) [→]₁-intro = double-contrapositiveᵣ [→]₂-intro : (X → Y → Z) → ((¬¬ X) → (¬¬ Y) → (¬¬ Z)) [→]₂-intro(xyz) = (Constructive.[↔]-to-[→] [¬¬][→]-preserving) ∘ ([→]₁-intro(xyz)) [→]₃-intro : (X → Y → Z → W) → ((¬¬ X) → (¬¬ Y) → (¬¬ Z) → (¬¬ W)) [→]₃-intro(xyzw) = (Constructive.[↔]-to-[→] [¬¬][→]-preserving) ∘₂ ([→]₂-intro(xyzw)) ------------------------------------------ -- Theorems [→][∧]-assumptionₗ : ¬¬((X Constructive.∧ Y) → Z) ← ¬¬(X → Y → Z) [→][∧]-assumptionₗ = [→]₁-intro(Tuple.uncurry) [→][∧]-assumptionᵣ : ¬¬((X Constructive.∧ Y) → Z) → ¬¬(X → Y → Z) [→][∧]-assumptionᵣ = [→]₁-intro(Tuple.curry) [¬¬]-intro : (¬¬ X) → ¬¬(¬¬ X) [¬¬]-intro = Constructive.[¬¬]-intro ------------------------------------------ -- Conjunction (AND) [∧]-intro : (• X) → (• Y) → (X ∧ Y) [∧]-intro = [→]₂-intro Constructive.[∧]-intro [∧]-elimₗ : (X ∧ Y) → (• X) [∧]-elimₗ = [→]₁-intro Constructive.[∧]-elimₗ [∧]-elimᵣ : (X ∧ Y) → (• Y) [∧]-elimᵣ = [→]₁-intro Constructive.[∧]-elimᵣ ------------------------------------------ -- Implication [→]-elim : (X ⟶ Y) → (• X) → (• Y) [→]-elim = [→]₂-intro(swap Constructive.[→]-elim) [→]-intro : ((• X) → (• Y)) → (X ⟶ Y) [→]-intro = [¬¬]-double-contrapositiveₗ ------------------------------------------ -- Reverse implication [←]-intro : ((• Y) ← (• X)) → (Y ⟵ X) [←]-intro = [¬¬]-double-contrapositiveₗ [←]-elim : (• X) → (Y ⟵ X) → (• Y) [←]-elim = [→]₂-intro(Constructive.[←]-elim) ------------------------------------------ -- Equivalence [↔]-intro : ((• X) ← (• Y)) → ((• X) → (• Y)) → (X ⟷ Y) [↔]-intro yx xy = ([→]₂-intro(Constructive.[↔]-intro)) ([→]-intro yx) ([→]-intro xy) [↔]-elimₗ : (X ⟷ Y) → (• X) ← (• Y) [↔]-elimₗ = [→]-elim ∘ ([→]₁-intro(Constructive.[↔]-to-[←])) [↔]-elimᵣ : (X ⟷ Y) → (• X) → (• Y) [↔]-elimᵣ = [→]-elim ∘ ([→]₁-intro(Constructive.[↔]-to-[→])) ------------------------------------------ -- Disjunction (OR) [∨]-introₗ : (• X) → (X ∨ Y) [∨]-introₗ = [→]₁-intro(Constructive.[∨]-introₗ) [∨]-introᵣ : (• Y) → (X ∨ Y) [∨]-introᵣ = [→]₁-intro(Constructive.[∨]-introᵣ) [∨]-elim : ((• X) → (• Z)) → ((• Y) → (• Z)) → (X ∨ Y) → (¬¬ Z) [∨]-elim xz yz = ([→]₃-intro(Constructive.[∨]-elim)) ([→]-intro xz) ([→]-intro yz) ------------------------------------------ -- Bottom (false, absurdity, empty, contradiction) [⊥]-intro : (• X) → (¬ X) → ⊥ [⊥]-intro = [→]₂-intro(Constructive.[⊥]-intro) [⊥]-elim : ⊥ → (• X) [⊥]-elim = [→]₁-intro(Constructive.[⊥]-elim) ------------------------------------------ -- Top (true, truth, unit, validity) [⊤]-intro : ⊤ [⊤]-intro = prop-intro(Constructive.[⊤]-intro) ------------------------------------------ -- Negation [¬]-intro : ((• X) → ⊥) → (¬ X) [¬]-intro = ([→]₁-intro(Constructive.[¬]-intro)) ∘ [→]-intro [¬]-elim : ((¬ X) → ⊥) → (• X) [¬]-elim nnx nx = nnx (apply nx) id ------------------------------------------ -- For-all quantification [∀]-intro : (∀{x} → • P(x)) → (∀ₗ P) [∀]-intro = apply [∀]-elim : (∀ₗ P) → ∀{x} → • P(x) [∀]-elim apx npx = apx(\px → apply npx px) ------------------------------------------ -- Existential quantification [∃]-intro : ∀{x} → • P(x) → (∃ P) [∃]-intro {x = x} = [→]₁-intro(proof ↦ Constructive.[∃]-intro (x) ⦃ proof ⦄) [∃]-elim : (∀{x} → • P(x) → • X) → (∃ P) → • X [∃]-elim apx nnep nx = nnep (Constructive.[∃]-elim (p ↦ apx (apply p) nx)) ------------------------------------------ -- Theorems exclusive to classic logic (compared to constructive logic) [¬¬]-elim : ¬¬(¬¬ X) → (• X) [¬¬]-elim = Constructive.[¬¬¬]-elim excluded-middle : (X ∨ (Constructive.¬ X)) excluded-middle{X = X} = Constructive.[¬¬]-excluded-middle [→]-disjunctive-formᵣ : (X ⟶ Y) → (Constructive.¬ X ∨ Y) [→]-disjunctive-formᵣ n-n-x→y n-nx∨y = (n-nx∨y ∘ Constructive.[∨]-introₗ) (n-n-x→y ∘ (n-nx∨y ∘ Constructive.[∨]-introᵣ ∘₂ apply)) -- contrapositiveₗ : (X ⟶ Y) ← ((Constructive.¬ X) ⟵ (Constructive.¬ Y)) -- contrapositiveₗ n-n-ny→nx = {!!} -- Constructive.[→]-intro ([¬¬]-elim ∘ Constructive.contrapositiveᵣ(Constructive.[→]-elim n-n-ny→nx) ∘ Constructive.[¬¬]-intro) module _ where _∨ʷᵉᵃᵏ_ : Stmt{ℓ₁} → Stmt{ℓ₂} → Stmt X ∨ʷᵉᵃᵏ Y = Constructive.¬((Constructive.¬ X) Constructive.∧ (Constructive.¬ Y)) [∨ʷᵉᵃᵏ]-introₗ : X → (X ∨ʷᵉᵃᵏ Y) [∨ʷᵉᵃᵏ]-introₗ = swap Constructive.[∧]-elimₗ [∨ʷᵉᵃᵏ]-introᵣ : Y → (X ∨ʷᵉᵃᵏ Y) [∨ʷᵉᵃᵏ]-introᵣ = swap Constructive.[∧]-elimᵣ [∨ʷᵉᵃᵏ]-elim : (X → Z) → (Y → Z) → DoubleNegationOn(Z) → (X ∨ʷᵉᵃᵏ Y) → Z [∨ʷᵉᵃᵏ]-elim xz yz nnzz xy = nnzz(nz ↦ xy(Constructive.[∧]-intro (nz ∘ xz) (nz ∘ yz))) ∃ʷᵉᵃᵏ : (X → Stmt{ℓ}) → Stmt ∃ʷᵉᵃᵏ P = Constructive.¬(∀{x} → (Constructive.¬ P(x))) [∃ʷᵉᵃᵏ]-intro : ∀(x) → ⦃ proof : P(x) ⦄ → (∃ʷᵉᵃᵏ P) [∃ʷᵉᵃᵏ]-intro _ ⦃ px ⦄ axnpx = axnpx px [∃ʷᵉᵃᵏ]-elim : (∀{x} → P(x) → X) → DoubleNegationOn(X) → (∃ʷᵉᵃᵏ P) → X [∃ʷᵉᵃᵏ]-elim axpxx nnxx ep = nnxx(ep ∘ (_∘ axpxx))
function [node,elem]=readsmf(fname) % % [node,elem]=readsmf(fname) % % read simple model format (SMF) % % author: Qianqian Fang, <q.fang at neu.edu> % date: 2007/11/21 % % input: % fname: name of the SMF data file % % output: % node: node coordinates of the mesh % elem: list of elements of the mesh % % -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net) % node=[]; elem=[]; fid=fopen(fname,'rt'); while(~feof(fid)) line=fgetl(fid); if(line(1)=='v') dd=sscanf(line,'v %f %f %f'); if(length(dd)==3) node=[node;dd]; end elseif(line(1)=='f') dd=sscanf(line,'f %d %d %d'); if(length(dd)==3) elem=[elem;dd]; end end end fclose(fid); node=reshape(node,3,length(node)/3)'; elem=reshape(elem,3,length(elem)/3)';
/- Copyright (c) 2018 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Jannis Limperg -/ /-! # Monadic instances for `ulift` and `plift` In this file we define `monad` and `is_lawful_monad` instances on `plift` and `ulift`. -/ universes u v namespace plift variables {α : Sort u} {β : Sort v} /-- Functorial action. -/ protected def map (f : α → β) (a : plift α) : plift β := plift.up (f a.down) @[simp] /-- Embedding of pure values. -/ @[simp] protected def pure : α → plift α := up /-- Applicative sequencing. -/ protected def seq (f : plift (α → β)) (x : plift α) : plift β := plift.up (f.down x.down) @[simp] lemma seq_up (f : α → β) (x : α) : (plift.up f).seq (plift.up x) = plift.up (f x) := rfl /-- Monadic bind. -/ protected def bind (a : plift α) (f : α → plift β) : plift β := f a.down @[simp] lemma bind_up (a : α) (f : α → plift β) : (plift.up a).bind f = f a := rfl instance : monad plift := { map := @plift.map, pure := @plift.pure, seq := @plift.seq, bind := @plift.bind } instance : is_lawful_functor plift := { id_map := λ α ⟨x⟩, rfl, comp_map := λ α β γ g h ⟨x⟩, rfl } instance : is_lawful_applicative plift := { pure_seq_eq_map := λ α β g ⟨x⟩, rfl, map_pure := λ α β g x, rfl, seq_pure := λ α β ⟨g⟩ x, rfl, seq_assoc := λ α β γ ⟨x⟩ ⟨g⟩ ⟨h⟩, rfl } instance : is_lawful_monad plift := { bind_pure_comp_eq_map := λ α β f ⟨x⟩, rfl, bind_map_eq_seq := λ α β ⟨a⟩ ⟨b⟩, rfl, pure_bind := λ α β x f, rfl, bind_assoc := λ α β γ ⟨x⟩ f g, rfl } @[simp] lemma rec.constant {α : Sort u} {β : Type v} (b : β) : @plift.rec α (λ _, β) (λ _, b) = λ _, b := funext (λ x, plift.cases_on x (λ a, eq.refl (plift.rec (λ a', b) {down := a}))) end plift namespace ulift variables {α : Type u} {β : Type v} /-- Functorial action. -/ protected def map (f : α → β) (a : ulift α) : ulift β := ulift.up (f a.down) @[simp] lemma map_up (f : α → β) (a : α) : (ulift.up a).map f = ulift.up (f a) := rfl /-- Embedding of pure values. -/ @[simp] protected def pure : α → ulift α := up /-- Applicative sequencing. -/ protected def seq (f : ulift (α → β)) (x : ulift α) : ulift β := ulift.up (f.down x.down) @[simp] lemma seq_up (f : α → β) (x : α) : (ulift.up f).seq (ulift.up x) = ulift.up (f x) := rfl /-- Monadic bind. -/ protected def bind (a : ulift α) (f : α → ulift β) : ulift β := f a.down @[simp] lemma bind_up (a : α) (f : α → ulift β) : (ulift.up a).bind f = f a := rfl instance : monad ulift := { map := @ulift.map, pure := @ulift.pure, seq := @ulift.seq, bind := @ulift.bind } instance : is_lawful_functor ulift := { id_map := λ α ⟨x⟩, rfl, comp_map := λ α β γ g h ⟨x⟩, rfl } instance : is_lawful_applicative ulift := { to_is_lawful_functor := ulift.is_lawful_functor, pure_seq_eq_map := λ α β g ⟨x⟩, rfl, map_pure := λ α β g x, rfl, seq_pure := λ α β ⟨g⟩ x, rfl, seq_assoc := λ α β γ ⟨x⟩ ⟨g⟩ ⟨h⟩, rfl } instance : is_lawful_monad ulift := { bind_pure_comp_eq_map := λ α β f ⟨x⟩, rfl, bind_map_eq_seq := λ α β ⟨a⟩ ⟨b⟩, rfl, pure_bind := λ α β x f, by { dsimp only [bind, pure, ulift.pure, ulift.bind], cases (f x), refl }, bind_assoc := λ α β γ ⟨x⟩ f g, by { dsimp only [bind, pure, ulift.pure, ulift.bind], cases (f x), refl } } @[simp] lemma rec.constant {α : Type u} {β : Sort v} (b : β) : @ulift.rec α (λ _, β) (λ _, b) = λ _, b := funext (λ x, ulift.cases_on x (λ a, eq.refl (ulift.rec (λ a', b) {down := a}))) end ulift
lemma order_0I: "poly p a \<noteq> 0 \<Longrightarrow> order a p = 0"
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__9.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSymIndex Protocol Case Study*} theory n_germanSymIndex_lemma_on_inv__9 imports n_germanSymIndex_base begin section{*All lemmas on causal relation between inv__9 and some rule r*} lemma n_StoreVsinv__9: assumes a1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i d where a1:"i\<le>N\<and>d\<le>N\<and>r=n_Store i d" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__9 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Cache'') i) ''State'')) (Const E)) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendInvAckVsinv__9: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__9 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "((formEval (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)) s))\<or>((formEval (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)) s))" have "?P3 s" apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const I))) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''Data'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E))) s))" have "?P3 s" apply (cut_tac a1 a2 b1 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately have "invHoldForRule s f r (invariants N)" by satx } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvInvAckVsinv__9: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__9 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately have "invHoldForRule s f r (invariants N)" by satx } moreover { assume b1: "(i~=p__Inv2)" have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" have "?P2 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately have "invHoldForRule s f r (invariants N)" by satx } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendGntEVsinv__9: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__9 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendReqE__part__1Vsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvGntSVsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendGntSVsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqEVsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvGntEVsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvGntE i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendInv__part__0Vsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqE__part__0Vsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendInv__part__1Vsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqSVsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqSVsinv__9: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__9 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
Mathews was rewarded for his service in Zanzibar by the British government which appointed him a Companion of the Order of St Michael and St George in 1880 and a Companion of the Order of the Bath on 24 May 1889 . Despite becoming renowned in East Africa as a man who ran a fair administration and was strict with criminals , unhappiness with effective British rule and his halting of the slave trade led some Arabs to petition the Sultan for his removal in 1892 . In 1893 Mathews purchased the island of <unk> for the government . He intended it to be used as a prison but it never housed prisoners and was instead used to quarantine yellow fever cases before its present use as a conservation area for giant tortoises . Mathews was appointed a Knight Commander of the Order of St Michael and St George in 1894 . He was also awarded membership of the Order of the Crown by the German government .
theory Strong_Convexity imports Main "HOL-Analysis.Analysis" "HOL-Analysis.Convex" begin definition strong_convex_on :: "'a::euclidean_space set\<Rightarrow> ('a \<Rightarrow> real) \<Rightarrow> real \<Rightarrow> bool" where "strong_convex_on s f k \<longleftrightarrow> (\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> f (u *\<^sub>R x + v *\<^sub>R y) \<le> u * f x + v * f y - (k/2) * u * v * norm(x-y) * norm(x-y) )" lemma help2_3 : "norm (x+y)^2 = norm x ^ 2 + 2 *\<^sub>R (inner x y) + norm y ^ 2" for x y :: "'a::euclidean_space" by (smt inner_commute inner_left_distrib power2_norm_eq_inner scaleR_2) lemma help2_31 : "norm (x - y)^2 = norm x ^ 2 - 2 *\<^sub>R (inner x y) + norm y ^ 2" for x y :: "'a::euclidean_space" using help2_3 by (simp add: inner_commute inner_diff_right power2_norm_eq_inner) lemma help2_2 : "(norm (u *\<^sub>R x + v *\<^sub>R y))^2 = norm (u *\<^sub>R x)^2 + (2 * u * v) *\<^sub>R (inner x y) + norm (v *\<^sub>R y)^2 " for x y :: "'a::euclidean_space" by (simp add: help2_3) lemma help2_4: "norm (u *\<^sub>R x)^2 = u^2 * norm(x)^2" proof - have "abs(u)^2 = u^2" by simp then show "norm (u *\<^sub>R x)^2 = u^2 * norm(x)^2" using norm_scaleR power2_eq_square by (simp add: power_mult_distrib) qed lemma sq_norm_strong_convex: "strong_convex_on s (\<lambda> w. k * norm(w) * norm(w)) (2*k)" for s :: "'a::euclidean_space set" proof - let ?f = "(\<lambda> w. k * norm(w) * norm(w))" have "\<forall> x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0.( u + v = 1 \<longrightarrow> ?f (u *\<^sub>R x + v *\<^sub>R y) \<le> u * ?f x + v * ?f y - (2*k/2) * u * v * norm(x-y) * norm(x-y) )" proof (rule)+ fix x assume"x\<in>s" fix y assume"y\<in>s" fix u assume"(u::real) \<ge> 0" fix v assume"(v::real) \<ge> 0" assume "u+v = 1" then show " k *norm (u *\<^sub>R x + v *\<^sub>R y) * norm (u *\<^sub>R x + v *\<^sub>R y) \<le> u * (k * norm x * norm x) + v *(k * norm y * norm y) - 2 * k / 2 * u * v *norm (x - y) * norm (x - y)" proof - have "?f (u *\<^sub>R x + v *\<^sub>R y) = k*(norm (u *\<^sub>R x + v *\<^sub>R y))^2" by (simp add: power2_eq_square) also have "k*(norm (u *\<^sub>R x + v *\<^sub>R y))^2 = k*(norm (u *\<^sub>R x)^2 + (2 * u * v) * (inner x y) + norm (v *\<^sub>R y)^2)" by (simp add: help2_2) also have " k*(norm (u *\<^sub>R x)^2 + (2 * u * v) * (inner x y) + norm (v *\<^sub>R y)^2) = k*(u^2 * norm (x)^2 + (2 * u * v) * (inner x y) + v^2 * norm (y)^2)" using help2_4 by metis also have "k*(u^2 * norm (x)^2 + (2 * u * v) * (inner x y) + v^2 * norm (y)^2) = k*u*norm(x)^2 + (2 * k * u * v) * (inner x y) + k* v * norm (y)^2 - k * u * v * norm(x)^2 - k * u * v *norm(y)^2" using `u+v = 1` by algebra also have "k*u*norm(x)^2 + (2 * k * u * v) * (inner x y) + k* v * norm (y)^2 - k * u * v * norm(x)^2 - k * u * v *norm(y)^2 = k*u*norm(x)^2 + k* v * norm (y)^2 - (k * u * v) * ( norm(x)^2 - 2 * (inner x y) + norm(y)^2)" using distrib_left help2_31 by argo also have " k*u*norm(x)^2 + k* v * norm (y)^2 - (k * u * v) * ( norm(x)^2 - 2 * (inner x y) + norm(y)^2) = k*u*norm(x)^2 + k* v * norm (y)^2 - (k * u * v) * norm(x - y)^2" by (simp add: help2_31) finally have "?f (u *\<^sub>R x + v *\<^sub>R y) = u * ?f x + v * ?f y - (2*k/2) * u * v * norm(x-y) * norm(x-y)" by (simp add: power2_eq_square help2_31) then show ?thesis by linarith qed qed then show ?thesis unfolding strong_convex_on_def by blast qed instantiation "fun" :: (type, plus) plus begin definition fun_plus_def: "A + B = (\<lambda>x. A x + B x)" lemma minus_apply [simp, code]: "(A + B) x = A x + B x" by (simp add: fun_plus_def) instance .. end instantiation "fun" :: (ab_semigroup_add, ab_semigroup_add) ab_semigroup_add begin instance proof fix x y z :: "'a => 'b" show "x + y + z = x + (y + z)" unfolding fun_plus_def by (simp add: linordered_field_class.sign_simps(1)) next fix x y :: "'a => 'b" show "x + y = y + x" unfolding fun_plus_def by (simp add: linordered_field_class.sign_simps(2)) qed end instantiation "fun" :: (comm_monoid_add, comm_monoid_add) comm_monoid_add begin definition zero_fun_def: "0 == (\<lambda>x. 0)" instance proof fix a :: "'a => 'b" show "0 + a = a" unfolding zero_fun_def fun_plus_def by simp qed end lemma convex_fun_add: assumes "convex_on s f" "convex_on s g" shows "convex_on s (f + g)" proof - have "(f + g) = (\<lambda> x. f x + g x)" using fun_plus_def by auto moreover have "convex_on s (\<lambda>x. f x + g x)" using assms convex_on_add by auto ultimately show "convex_on s (f + g)" by auto qed lemma strong_convex_sum: "strong_convex_on s f k \<and> convex_on s g \<longrightarrow> strong_convex_on s ( f + g) k" proof assume "strong_convex_on s f k \<and> convex_on s g" then show "strong_convex_on s (f + g) k" proof have "strong_convex_on s f k" using `strong_convex_on s f k \<and> convex_on s g` by simp have "convex_on s g" using `strong_convex_on s f k \<and> convex_on s g` by simp have "(\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> (f+g) (u *\<^sub>R x + v *\<^sub>R y) \<le> u * (f+g) x + v * (f+g) y - (k/2) * u * v * norm(x-y) * norm(x-y) )" proof (rule)+ fix x assume"x\<in>s" fix y assume"y\<in>s" fix u assume"(u::real) \<ge> 0" fix v assume"(v::real) \<ge> 0" assume "u+v = 1" then show "(f+g) (u *\<^sub>R x + v *\<^sub>R y) \<le> u * (f+g) x + v * (f+g) y - (k/2) * u * v * norm(x-y) * norm(x-y)" proof - have 1: "f (u *\<^sub>R x + v *\<^sub>R y) \<le> u * f x + v * f y - (k/2) * u * v * norm(x-y) * norm(x-y)" using \<open>0 \<le> u\<close> \<open>0 \<le> v\<close> \<open>u + v = 1\<close> \<open>x \<in> s\<close> \<open>y \<in> s\<close> `strong_convex_on s f k` unfolding strong_convex_on_def by blast have 2: " g (u *\<^sub>R x + v *\<^sub>R y) \<le> u * g x + v * g y" using \<open>0 \<le> u\<close> \<open>0 \<le> v\<close> \<open>u + v = 1\<close> \<open>x \<in> s\<close> \<open>y \<in> s\<close> `convex_on s g ` unfolding convex_on_def by blast have 3:"f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \<le> u * f x + v * f y - (k/2) * u * v * norm(x-y) * norm(x-y) + u * g x + v * g y " using 1 2 by linarith then show ?thesis by (simp add: distrib_left) qed qed then show ?thesis unfolding strong_convex_on_def by auto qed qed lemma help7: assumes "(l::real)<0" assumes "\<forall>x. norm (f x - l)< -l" shows "\<forall>x. f x < 0" proof (rule ccontr) assume "\<not> (\<forall>x. f x < 0)" then show False using assms(2) real_norm_def by smt qed lemma LIM_fun_less_zero1: "f \<midarrow>a\<rightarrow> l \<Longrightarrow> l < 0 \<Longrightarrow> \<exists>r>0. \<forall>x. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> f x < 0" for a :: "'b::euclidean_space" and l :: "real" proof - assume "f \<midarrow>a\<rightarrow> l" "l < 0" then have "\<exists>r. 0 < r \<and> (\<forall>x. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> norm (f x - l)< -l)" using LIM_D[of f l a "-l"] by (simp add: norm_minus_commute) then obtain r where "0 < r" "(\<forall>x. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> norm (f x - l)< -l)" by auto then have "(\<forall>x. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> f x < 0)" using `l<0` help7 by auto then show ?thesis using \<open>0 < r\<close> by blast qed lemma metric_LIM_le2: fixes a :: "real" assumes "f \<midarrow>a\<rightarrow> (l::real)" assumes "a\<ge>0" and "\<forall>x>a. f x \<ge> 0" shows " l \<ge> 0" proof (rule ccontr) assume "\<not> (l \<ge> 0)" then have " l < 0" by simp then have " \<exists>r>0. \<forall>x. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> f x < 0" using assms(1) LIM_fun_less_zero1 by blast then have "\<exists>r>0. \<forall>x>a. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> f x < 0 \<and> f x \<ge> 0" using assms(3) by blast then have "\<exists>r>0. \<forall>x>a. norm(a - x) \<ge> r" by force then obtain r where "r>0" and " \<forall>x>a. norm(a - x) \<ge> r" by auto then have 1: "\<forall>x>a. norm(a - x) \<ge> r" by auto have "\<exists>k. k>0 \<and> k <r " using `r>0` by (simp add: dense) then obtain k where "k>0" and "k < r" by auto then have "\<exists> x. x>a \<and> x-a = k" by smt then have "\<exists> x>a. norm(a-x) < r \<and> norm(a - x) \<ge> r" using `k<r`1 by auto then show False by linarith qed lemma metric_LIM_le_zero: fixes a :: "real" assumes "f \<midarrow>a\<rightarrow> (l::real)" assumes "a\<ge>0" and "\<exists>r>0. \<forall>x>a. norm(a-x) < r \<longrightarrow> f x \<ge> 0" shows " l \<ge> 0" proof (rule ccontr) assume "\<not> (l \<ge> 0)" then have " l < 0" by simp then have " \<exists>r>0. \<forall>x. x \<noteq> a \<and> norm(a - x) < r \<longrightarrow> f x < 0" using assms(1) LIM_fun_less_zero1 by blast then obtain r where "r>0" and 1: "\<forall>x>a. norm(a - x) < r \<longrightarrow> f x < 0" by auto obtain r1 where "r1>0" and 2: "\<forall>x>a. norm(a-x) < r1 \<longrightarrow> f x \<ge> 0" using assms(3) by auto let ?min_r = "min r1 r" have 3: " r \<ge> ?min_r " by auto have 4: " r1 \<ge> ?min_r " by auto have "?min_r>0" using `r>0` `r1>0` by auto then have 5: "\<forall>x>a. norm(a - x) < ?min_r \<longrightarrow> f x < 0" using 1 3 by auto then have "\<forall>x>a. norm(a - x) < ?min_r \<longrightarrow> f x \<ge> 0" using 2 4 by auto then have "\<forall>x>a. norm(a - x) < ?min_r \<longrightarrow> f x < 0 \<and> f x \<ge> 0" using 5 by blast then have 6: "\<forall>x>a. norm(a - x) \<ge> ?min_r" by force then have "\<exists>k. k>0 \<and> k <?min_r " using `?min_r>0` dense by blast then obtain k where "k>0" and "k < ?min_r" by auto then have "\<exists> x. x>a \<and> x-a = k" by smt then have "\<exists> x>a. norm(a-x) < ?min_r \<and> norm(a - x) \<ge> ?min_r" using `k<?min_r` 6 by auto then show False using LIM_fun_less_zero1 by linarith qed lemma help_8: "x > 0 \<Longrightarrow> dist t 0 < r/x \<longrightarrow> norm(t) * x < r" proof assume " x>0" assume "dist t 0 < r/x" then have " norm(t) * x < (r/x) * x" using `dist t 0 < r/x` `x>0` mult_less_le_imp_less[of "norm t" "r/x" "x" "x"] by auto then show "norm(t) * x < r" using `x>0` nonzero_mult_div_cancel_right by auto qed lemma real_collapse [simp]: "(1 - u) * a * b + (-1) * a * b = - u * a * b" for a :: "real" by (simp add: algebra_simps) lemma real_left_commute: "a * b * x = b * a * x" for a :: real by (simp add: mult.commute) lemma strongly_convex_min: assumes "strong_convex_on s f k" assumes "x \<in> s" assumes "\<forall>y\<in>s. (f x \<le> f y)" assumes "w \<in> s" assumes "convex s" shows "f w - f x \<ge> (k/2)*norm(w - x)^2" proof (cases "w = x") case True then show ?thesis by auto next case False then show ?thesis proof(cases "k = 0") case True then show ?thesis using assms(3) assms(4) by auto next case False then show ?thesis proof - have "(\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> f (u *\<^sub>R x + v *\<^sub>R y) \<le> u * f x + v * f y - (k/2) * u * v * norm(x-y)^2)" using assms(1) unfolding strong_convex_on_def by (simp add: power2_eq_square mult.commute mult.left_commute) then have 0:" \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> f (u *\<^sub>R w + v *\<^sub>R x) \<le> u * f w + v * f x - (k/2) * u * v * norm(w-x)^2" using assms(2) assms(4) by blast have "\<forall>u>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> (f (u *\<^sub>R w + (1-u) *\<^sub>R x) - f x )/u \<le> f w - f x - (k/2) * (1-u) * norm(w-x)^2" proof(rule)+ fix u assume "(u::real)>0" fix v assume "(v::real) \<ge> 0" assume "u + v = 1" then show "(f (u *\<^sub>R w + (1-u) *\<^sub>R x) - f x )/u \<le> f w - f x - (k/2) * (1-u) * norm(w-x)^2" proof - have "f (u *\<^sub>R w + (1-u) *\<^sub>R x) \<le> u * f w + (1-u) * f x - (k/2) * u * (1-u) * norm(w-x)^2" using `u + v = 1` 0 \<open>0 < u\<close> \<open>0 \<le> v\<close> by auto then have " f (u *\<^sub>R w + (1-u) *\<^sub>R x)/u \<le> (u * f w + (1-u) * f x - (k/2) * u * (1-u) * norm(w-x)^2)/u" using `u>0` by (meson divide_right_mono less_eq_real_def) then have " f (u *\<^sub>R w + (1-u) *\<^sub>R x)/u \<le> (u * f w)/u + ((1-u) * f x)/u - ((k/2) * u * (1-u) * norm(w-x)^2)/u" by (simp add: add_divide_distrib diff_divide_distrib) then have " f (u *\<^sub>R w + (1-u) *\<^sub>R x)/u \<le> f w + ((1-u) / u)* f x - (k/2) * (1-u) * norm(w-x)^2" using \<open>0 < u\<close> add_divide_distrib diff_divide_distrib by auto then have " f (u *\<^sub>R w + (1-u) *\<^sub>R x)/u \<le> f w + (1/u)* f x + (-u/u)*f x - (k/2) * (1-u) * norm(w-x)^2" by (simp add: diff_divide_distrib Groups.mult_ac(2) add_diff_eq right_diff_distrib') then have " f (u *\<^sub>R w + (1-u) *\<^sub>R x)/u - (1/u)* f x \<le> f w - f x - (k/2) * (1-u) * norm(w-x)^2" using diff_divide_distrib Groups.mult_ac(2) add_diff_eq right_diff_distrib' `u>0` by force then show ?thesis by (simp add: diff_divide_distrib) qed qed then have 1:"\<forall>u>0. u <= 1 \<longrightarrow> (\<lambda> t. (f (t *\<^sub>R w + (1-t) *\<^sub>R x) - f x )/t) u \<le> (\<lambda> t. f w - f x - (k/2) * (1-t) * norm(w-x)^2) u" by smt have "\<forall>u>0. u <= 1 \<longrightarrow> u *\<^sub>R w + (1 - u) *\<^sub>R x \<in> s " using assms(2) assms(4) assms(5) by (simp add: convex_def) then have "\<forall>u>0. u <= 1 \<longrightarrow> (\<lambda> t. (f (t *\<^sub>R w + (1-t) *\<^sub>R x) - f x )/t) u \<ge> 0" using assms(3) assms(2) assms(4) by auto then have 11 : "\<forall>u>0. u <= 1 \<longrightarrow> 0 \<le> (\<lambda> t. f w - f x - (k/2) * (1-t) * norm(w-x)^2) u" using 1 by fastforce let ?f = "(\<lambda> t. f w - f x - (k/2) * (1-t) * norm(w-x)^2)" let ?L = "(f w - f x - (k/2) * norm(w-x)^2)" have "\<forall>t. dist (?f t) ?L = norm(?f t - ?L)" using dist_norm by blast then have 2: "\<forall>t. norm(?f t - ?L) = norm( (k/2) * (1-t) * norm(w-x)^2 + (-1)* (k/2) * norm(w-x)^2)" by auto then have 3: "\<forall>t. norm(?f t - ?L) = norm(t*(k/2) * norm(w-x)^2)" using "2" real_left_commute real_collapse real_left_commute by (metis (no_types, hide_lams) mult_minus_left norm_minus_cancel) then have "\<forall>t. norm(t*(k/2) * norm(w-x)^2) = norm(t) * norm(k/2) * norm(w-x)^2" using norm_ge_zero norm_mult power2_eq_square real_norm_def by smt then have 5:"\<forall>t. norm(?f t - ?L) = norm(t) * norm(w-x)^2 * norm((k/2))" using 3 by simp have 55: "norm(w-x)^2 * norm((k/2)) > 0" using `w \<noteq> x` `k \<noteq> 0` by auto then have "\<forall>r. \<forall>t. t \<noteq> 0 \<and> dist t 0 < ( r/(norm(w-x)^2 * norm(k/2))) \<longrightarrow> norm(t) * norm(w-x)^2 * norm((k/2)) < r" by (simp add: help_8 mult.assoc) then have 6: "\<forall>r. \<forall>t. t \<noteq> 0 \<and> dist t 0 < ( r/(norm(w-x)^2 * norm(k/2))) \<longrightarrow> dist (?f t) ?L < r" using 5 dist_norm by metis then have "\<forall>r>0. (r/(norm(w-x)^2 * norm(k/2))) > 0" using divide_pos_pos 55 by blast then have "\<forall>r > 0. \<exists>s > 0. \<forall>t. t \<noteq> 0 \<and> dist t 0 < s \<longrightarrow> dist (?f t) ?L < r" using 6 by auto then have 7:" ?f \<midarrow>0\<rightarrow> ?L" unfolding LIM_def by auto then have "\<forall>u>0. u <= 1 \<longrightarrow> 0 \<le> ?f u" using 11 by simp then have "\<exists>r>0. \<forall>u>0. u \<le> r \<longrightarrow> 0 \<le> ?f u" using zero_less_one by blast then have "\<exists>r>0.\<forall>u>0. norm (0 -u) < r \<longrightarrow> 0 \<le> ?f u" by auto then have "?L \<ge> 0" using metric_LIM_le_zero using 7 by blast then show ?thesis by auto qed qed qed lemma strong_conv_if_eq: " f = g \<Longrightarrow> strong_convex_on s f k \<Longrightarrow> strong_convex_on s g k" using HOL.subst by auto lemma strong_conv_then_conv: assumes k_pos: "k \<ge> 0" shows "strong_convex_on s f k \<Longrightarrow> convex_on s f" proof - assume "strong_convex_on s f k" then have 1:" (\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> f (u *\<^sub>R x + v *\<^sub>R y) \<le> u * f x + v * f y - (k/2) * u * v * norm(x-y) * norm(x-y) )" unfolding strong_convex_on_def by auto have "\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> (k/2) * u * v * norm(x-y) * norm(x-y) \<ge> 0" using k_pos by simp then have "\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> u * f x + v * f y - (k/2) * u * v * norm(x-y) * norm(x-y) \<le> u * f x + v * f y " by auto then have "(\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u\<ge>0. \<forall>v\<ge>0. u + v = 1 \<longrightarrow> f (u *\<^sub>R x + v *\<^sub>R y) \<le> u * f x + v * f y)" using 1 by smt then show "convex_on s f" by (simp add: convex_on_def) qed end
/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <iostream> #include <fstream> #include <memory> #include <string> #include <sys/types.h> #include <dirent.h> #include <sys/time.h> #include <sstream> #include <boost/format.hpp> #if !_MSC_VER && !__clang__ && (__GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ <= 8))) #include <boost/regex.hpp> using boost::regex; using boost::regex_match; using boost::match_results; using boost::smatch; #else #include <regex> using std::regex; using std::regex_match; using std::match_results; using std::smatch; #endif #include <chrono> #include <cstdio> #include <thread> #include "json.hpp" #include <signal.h> #include <grpcpp/grpcpp.h> #include <google/protobuf/util/json_util.h> #include <grpc/language-agent-v2/trace.grpc.pb.h> #include <grpc/language-agent-v2/trace.pb.h> #include <grpc/common/trace-common.pb.h> #include <grpc/register/InstancePing.grpc.pb.h> #include <grpc/register/InstancePing.pb.h> using grpc::Channel; using grpc::ClientContext; using grpc::Status; using grpc::ClientWriter; using json = nlohmann::json; class GreeterClient { public: GreeterClient(std::shared_ptr<Channel> channel) : stub_(TraceSegmentReportService::NewStub(channel)), pingStub_(ServiceInstancePing::NewStub(channel)) {} int collect(UpstreamSegment request) { Commands reply; ClientContext context; std::unique_ptr<ClientWriter<UpstreamSegment>> writer(stub_->collect(&context, &reply)); if (!writer->Write(request)) { } writer->WritesDone(); Status status = writer->Finish(); if (status.ok()) { std::cout << "send ok!" << std::endl; } else { std::cout << "send error!" << status.error_message() << std::endl; } return 1; } int heartbeat(ServiceInstancePingPkg request) { Commands reply; ClientContext context; Status status = pingStub_->doPing(&context, request, &reply); if (status.ok()) { std::cout << "send heartbeat ok!" << std::endl; } else { std::cout << "send heartbeat error!" << status.error_message() << std::endl; } return 1; } private: std::unique_ptr<TraceSegmentReportService::Stub> stub_; std::unique_ptr<ServiceInstancePing::Stub> pingStub_; }; int main(int argc, char **argv) { for (int i = 0; i < argc; ++i) { if (std::strncmp("-h", argv[i], sizeof(argv[i]) - 1) == 0 || std::strncmp("--help", argv[i], sizeof(argv[i]) - 1) == 0) { std::cout << "report_client grpc log_path" << std::endl; std::cout << "e.g. report_client 120.0.0.1:11800 /tmp" << std::endl; return 0; } } if (argc == 1) { std::cout << "report_client grpc log_path" << std::endl; std::cout << "e.g. report_client 120.0.0.1:11800 /tmp" << std::endl; return 0; } GreeterClient greeter(grpc::CreateChannel(argv[1], grpc::InsecureChannelCredentials())); std::map<int, int> instancePid; std::map<int, std::string> instanceUUID; std::map<int, long> sendTime; while (1) { struct dirent *dir; DIR *dp; if ((dp = opendir(argv[2])) == NULL) { std::cerr << "open directory error"; return 0; } // heartbeat for (auto &i: instancePid) { struct timeval tv; gettimeofday(&tv, NULL); if(tv.tv_sec - sendTime[i.first] > 40) { kill(instancePid[i.first], 0); } sendTime[i.first] = tv.tv_sec; std::cout << "send heartbeat ..." << std::endl; ServiceInstancePingPkg request; request.set_serviceinstanceid(i.first); request.set_time(tv.tv_sec*1000 + tv.tv_usec/1000); request.set_serviceinstanceuuid(instanceUUID[i.first]); greeter.heartbeat(request); } while ((dir = readdir(dp)) != NULL) { if (strcmp(".", dir->d_name) == 0 || strcmp("..", dir->d_name) == 0) { continue; } std::string fileName = std::string(argv[2]) + "/" + dir->d_name; const regex pattern(std::string(argv[2]) + "/skywalking\\.(\\d+)-\\d+\\.log"); if (regex_match(fileName, pattern)) { match_results<std::string::const_iterator> result; bool valid = regex_match(fileName, result, pattern); if (valid) { struct timeval tv; gettimeofday(&tv,NULL); long fileTime = std::stol(result[1]); long localTime = tv.tv_sec - 3; if (fileTime < localTime) { std::ifstream file; file.open(fileName, std::ios::in); if (file.is_open()) { std::cout << "send `" << fileName << "` to skywalking service" << std::endl; std::string strLine; while (std::getline(file, strLine)) { if (strLine.empty()) { continue; } json j; try { j = json::parse(strLine); }catch (...) { remove(fileName.c_str()); continue; } UpstreamSegment request; smatch traceResult; std::string tmp(j["segment"]["traceSegmentId"].get<std::string>()); bool valid = regex_match(tmp, traceResult, regex("([\\-0-9]+)\\.(\\d+)\\.(\\d+)")); if (valid) { // add to map if(!instancePid[j["application_instance"]]) { instancePid[j["application_instance"]] = j["pid"]; instanceUUID[j["application_instance"]] = j["uuid"]; sendTime[j["application_instance"]] = 0; } for (int i = 0; i < j["globalTraceIds"].size(); i++) { std::cout << "send " << j["globalTraceIds"][i].get<std::string>() << " to skywalking service" << std::endl; smatch globalTraceResult; std::string tmp(j["globalTraceIds"][i].get<std::string>()); bool valid = regex_match(tmp, globalTraceResult, regex("(\\-?\\d+)\\.(\\d+)\\.(\\d+)")); UniqueId *globalTrace = request.add_globaltraceids(); long long idp1 = std::stoll(globalTraceResult[1]); long long idp2 = std::stoll(globalTraceResult[2]); long long idp3 = std::stoll(globalTraceResult[3]); globalTrace->add_idparts(idp1); globalTrace->add_idparts(idp2); globalTrace->add_idparts(idp3); } UniqueId *uniqueId = new UniqueId; long long idp1 = std::stoll(traceResult[1]); long long idp2 = std::stoll(traceResult[2]); long long idp3 = std::stoll(traceResult[3]); uniqueId->add_idparts(idp1); uniqueId->add_idparts(idp2); uniqueId->add_idparts(idp3); SegmentObject traceSegmentObject; traceSegmentObject.set_allocated_tracesegmentid(uniqueId); traceSegmentObject.set_serviceid(j["application_id"].get<int>()); traceSegmentObject.set_serviceinstanceid(j["application_instance"].get<int>()); traceSegmentObject.set_issizelimited(j["segment"]["isSizeLimited"].get<int>()); auto spans = j["segment"]["spans"]; for (int i = 0; i < spans.size(); i++) { SpanObjectV2 *spanObject = traceSegmentObject.add_spans(); spanObject->set_spanid(spans[i]["spanId"].get<int>()); spanObject->set_parentspanid(spans[i]["parentSpanId"].get<int>()); spanObject->set_starttime(spans[i]["startTime"]); spanObject->set_endtime(spans[i]["endTime"]); spanObject->set_operationname(spans[i]["operationName"]); std::string peer(spans[i]["peer"].get<std::string>()); int spanType = spans[i]["spanType"].get<int>(); if (spanType == 0) { spanObject->set_spantype(SpanType::Entry); } else if (spanType == 2) { spanObject->set_spantype(SpanType::Local); } else if (spanType == 1) { spanObject->set_spantype(SpanType::Exit); } if(spanType == 1 && !peer.empty()) { spanObject->set_peer(peer); } int spanLayer = spans[i]["spanLayer"].get<int>(); if (spanLayer == 3) { spanObject->set_spanlayer(SpanLayer::Http); } spanObject->set_componentid(spans[i]["componentId"].get<int>()); spanObject->set_iserror(spans[i]["isError"].get<int>()); // refs auto refs = spans[i]["refs"]; for (int k = 0; k < refs.size(); k++) { smatch traceResult; std::string tmp(refs[k]["parentTraceSegmentId"].get<std::string>()); bool valid = regex_match(tmp, traceResult, regex("([\\-0-9]+)\\.(\\d+)\\.(\\d+)")); UniqueId *uniqueIdTmp = new UniqueId; long long idp1 = std::stoll(traceResult[1]); long long idp2 = std::stoll(traceResult[2]); long long idp3 = std::stoll(traceResult[3]); uniqueIdTmp->add_idparts(idp1); uniqueIdTmp->add_idparts(idp2); uniqueIdTmp->add_idparts(idp3); SegmentReference *r = spanObject->add_refs(); r->set_allocated_parenttracesegmentid(uniqueIdTmp); r->set_parentspanid(refs[k]["parentSpanId"].get<int>()); r->set_parentserviceinstanceid(refs[k]["parentApplicationInstanceId"].get<int>()); r->set_networkaddress(refs[k]["networkAddress"].get<std::string>()); r->set_entryserviceinstanceid(refs[k]["entryApplicationInstanceId"].get<int>()); r->set_entryendpoint(refs[k]["entryServiceName"].get<std::string>()); r->set_parentendpoint(refs[k]["parentServiceName"].get<std::string>()); } if(!peer.empty()) { KeyStringValuePair *url = spanObject->add_tags(); url->set_key("url"); url->set_value(boost::str(boost::format("http://%s%s") % peer % spans[i]["operationName"].get<std::string>())); } } std::string test; traceSegmentObject.SerializeToString(&test); request.set_segment(test); greeter.collect(request); } } remove(fileName.c_str()); } } } } } closedir(dp); std::this_thread::sleep_for(std::chrono::milliseconds(500)); } // std::string user("world"); // std::string reply = greeter.SayHello(user); // std::cout << "Greeter received: " << reply << std::endl; return 0; }
Mechanical type systems introduced near the end of the 19th century , such as the Linotype and Monotype machines , allowed for some variable sentence spacing similar to hand composition . Just as these machines revolutionized the mass production of text , the advent of the typewriter around the same time revolutionized the creation of personal and business documents . But the typewriters ' mechanical limitations did not allow variable spacing — typists could only choose the number of times they pressed the space bar . <unk> in some English @-@ speaking countries initially learned to insert three spaces between sentences to approximate the wider sentence spacing used in traditional printing , but later settled on two spaces , a practice that continued throughout the 20th century . This became known as English spacing , and marked a divergence from French typists , who continued to use French spacing .
The imaginary part of a complex number $z$ is zero if and only if the imaginary part of $z$ times the complex conjugate of $z$ is zero.
-- La suma de no negativos es expansiva -- ==================================== -- Demostrar si a y b son números reales y a es no -- negativo, entonces b ≤ a + b import data.real.basic variables {a b : ℝ} -- 1ª demostración example (ha : 0 ≤ a) : b ≤ a + b := begin calc b = 0 + b : by rw zero_add ... ≤ a + b : by exact add_le_add_right ha b, end -- 2ª demostración example (ha : 0 ≤ a) : b ≤ a + b := begin calc b = 0 + b : (zero_add b).symm ... ≤ a + b : add_le_add_right ha b, end -- 3ª demostración example (ha : 0 ≤ a) : b ≤ a + b := begin calc b = 0 + b : by ring ... ≤ a + b : add_le_add_right ha b, end -- 4ª demostración example (ha : 0 ≤ a) : b ≤ a + b := by simp [ha] -- 5ª demostración example (ha : 0 ≤ a) : b ≤ a + b := le_add_of_nonneg_left ha -- 6ª demostración example (ha : 0 ≤ a) : b ≤ a + b := by linarith -- 7ª demostración example (ha : 0 ≤ a) : b ≤ a + b := by finish -- Demostrar si a y b son números reales y b es no -- negativo, entonces a ≤ a + b -- 1ª demostración example (hb : 0 ≤ b) : a ≤ a + b := begin calc a = a + 0 : by rw add_zero ... ≤ a + b : by exact add_le_add_left hb a, end -- 2ª demostración example (hb : 0 ≤ b) : a ≤ a + b := begin calc a = a + 0 : (add_zero a).symm ... ≤ a + b : add_le_add_left hb a, end -- 3ª demostración example (hb : 0 ≤ b) : a ≤ a + b := begin calc a = a + 0 : by ring ... ≤ a + b : add_le_add_left hb a, end -- 4ª demostración example (hb : 0 ≤ b) : a ≤ a + b := by simp [hb] -- 5ª demostración example (hb : 0 ≤ b) : a ≤ a + b := le_add_of_nonneg_right hb -- 6ª demostración example (hb : 0 ≤ b) : a ≤ a + b := by linarith -- 7ª demostración example (hb : 0 ≤ b) : a ≤ a + b := by finish -- 8ª demostración example (hb : 0 ≤ b) : a ≤ a + b := begin rw add_comm, apply le_add_of_nonneg_left, exact hb, end -- 9ª demostración example (hb : 0 ≤ b) : a ≤ a + b := begin rw add_comm, exact le_add_of_nonneg_left hb, end
(* Title: HOL/Auth/n_germanSymIndex_lemma_inv__33_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSymIndex Protocol Case Study*} theory n_germanSymIndex_lemma_inv__33_on_rules imports n_germanSymIndex_lemma_on_inv__33 begin section{*All lemmas on causal relation between inv__33*} lemma lemma_inv__33_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__33 p__Inv2)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_StoreVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqSVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqSVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqEVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvAckVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntSVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntEVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntSVsinv__33) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntEVsinv__33) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
import data.nat.basic import tactic namespace tree -- Определим двоичное дерево, где у каждой внутренней вершины ровно два ребенка -- Такое дерево либо состоит из одной вершины - листа, либо состоит из корня, у которого есть левое и правое поддерево inductive Tree | Leaf : Tree | Branch (left : Tree) (right : Tree) : Tree -- Аналогично можно определить без названия аргументов inductive Tree2 | Leaf : Tree2 | Branch : Tree2 → Tree2 → Tree2 -- Откроем namespace Tree, чтобы писать Leaf и Branch вместо Tree.Leaf и Tree.Branch open Tree -- Для алгебраических типов данных Lean автоматически генерирует много функций и лемм (см. ниже) #print prefix tree.Tree /- Из них: Инъективность конструкторов: `Tree.Branch.inj : ∀ {l1 r1 l2 r2 : Tree}, Branch l1 r1 = Branch l2 r2 → l1 = l2 ∧ r1 = r2` Паттерн матчинг: Tree.cases_on : Π {C : Tree → Sort l} (n : Tree), C Leaf → (Π (left right : Tree), C (left.Branch right)) → C n Если мы можем доказать некоторое свойство `C` для `Leaf`, а также для `Branch left right` для любых `left` и `right`, то C верно для любого дерева. То же самое, если мы хотим определить функцию для дерева: чтобы определить функцию для дерева, нужно определить функцию на каждом из конструкторов. Рекурсор: Tree.rec_on : Π {C : Tree → Sort l} (n : Tree), C Leaf → (Π (left right : Tree), C left → C right → C (left.Branch right)) → C n Индукция по дереву: если свойство `C` верно для Leaf и из того, что верно C left и C right, верно C (Branch left right), то C верно для любого дерева. Аналогично можно определить функцию для любого дерева, для `cases_on` и `rec_on` нет разницы между утверждениями про деревья и функциями от деревьев (как мы знаем, утверждения <-> типы, и доказательства <-> элементы этого типа). -/ variables {T L R : Tree} -- Определим функцию "количество листьев" для дерева def leaves : Tree → ℕ | Leaf := 1 | (Branch left right) := leaves left + leaves right -- В примере выше важны скобки: надо явно показать, что это один аргумент типа `Tree` -- Определите самостоятельно функцию "количество внутренних вершин" для дерева def internal : Tree → ℕ := sorry -- Доказывать утверждения про деревья можно по индукции! -- Когда цель выглядит как равенство натуральных чисел, используйте тактику `ring` или `linarith`, чтобы закрыть цель -- Или же можете поупражняться в применении `add_assoc` и `add_comm` lemma leaves_eq_internal_add_one : leaves T = 1 + internal T := begin induction T with L R hL hR, { sorry, }, { sorry, }, end -- Добавим также леммы для `simp`, чтобы `leaves Leaf` и `leaves (Branch l r)` автоматически редуцировалось -- Они верны по определению, поэтому их доказательство - просто `rfl` (терм, а не тактика) @[simp] lemma leaves_Leaf : leaves Leaf = 1 := rfl @[simp] lemma leaves_Branch : leaves (Branch L R) = leaves L + leaves R := rfl -- Допишите самостоятельно simp-леммы для `internal` -- Теперь попробуйте максимально использовать simp при доказательстве -- После включения опции строчкой ниже simp будет выводить те переписывания, что происходили set_option trace.simplify.rewrite true -- example : leaves T = internal T + 1 := -- begin -- induction T with L R hL hR, -- { simp, }, -- { simp [hL, hR], ring }, -- end -- Отключим опцию, чтобы не мешалась дальше set_option trace.simplify.rewrite false -- Потренируйтесь перед интервью в Google и напишите функцию, разворачивающую бинарное дерево def flip : Tree → Tree := sorry -- И докажите, что это инволюция lemma flip_flip : flip (flip T) = T := begin sorry, end -- Количество вершин в дереве не больше 2^высота - 1 -- Если вы не играли в max_minigame, то поищите в `data.nat.basic` полезные леммы про `max a b` -- Подсказки: -- С вычитанием ℕ работать неприятно, перейдите сначала к цели без вычитаний: для этого найдите подходящую функцию в библиотеке -- Постарайтесь свести цель к линейному неравенству, затем вызовите `linarith` -- Тактика `norm_num` помогает доказывать утверждения про конкретные числа, например, `1 ≤ 2` def depth : Tree → ℕ := sorry def size : Tree → ℕ := sorry lemma size_le_pow2_depth_minus_one : size T ≤ 2 ^ depth T - 1 := begin sorry, end -- Если вы можете придумать еще что-то интересное, что можно доказать про деревья, напишите! end tree
With five regular @-@ season MVPs ( tied for second place with Bill Russell ; only Kareem Abdul @-@ Jabbar has won more , six ) , six Finals MVPs ( NBA record ) , and three All @-@ Star MVPs , Jordan is the most decorated player ever to play in the NBA . Jordan finished among the top three in regular @-@ season MVP voting a record 10 times , and was named one of the 50 Greatest Players in NBA History in 1996 . He is one of only seven players in history to win an NCAA championship , an NBA championship , and an Olympic gold medal ( doing so twice with the 1984 and 1992 U.S. men 's basketball teams ) .
State Before: F : Type ?u.7176 α : Type u_1 β : Type ?u.7182 γ : Type ?u.7185 inst✝ : InvolutiveInv α s✝ t : Set α a✝ a : α s : Set α ⊢ (insert a s)⁻¹ = insert a⁻¹ s⁻¹ State After: no goals Tactic: rw [insert_eq, union_inv, inv_singleton, insert_eq]
%% Submissions for peer-review must enable line-numbering %% using the lineno option in the \documentclass command. %% %% Camera-ready submissions do not need line numbers, and %% should have this option removed. %% %% Please note that the line numbering option requires %% version 1.1 or newer of the wlpeerj.cls file, and %% the corresponding author info requires v1.2 \documentclass[fleqn,10pt,lineno]{wlpeerj} %%disable for submission %\linespread{2} \title{A distribution-based effect size is more reproducible than hypothesis testing when analyzing high throughput sequencing datasets} \author[1,2]{Andrew D. Fernandes} \author[2]{Michael Vu} \author[2]{Lisa-Monique Edward} \author[2]{Jean M. Macklaim} \author[2]{Gregory B. Gloor} \affil[1]{Shield AI Inc., San Diego CA, 92130, United States of America} \affil[2]{Department of Biochemistry, University of Western Ontario, London, N6A 5C1, Canada} \corrauthor[5]{G. Gloor}{[email protected]} % \keywords{Keyword1, Keyword2, Keyword3} \begin{abstract} High throughput sequencing is analyzed using a combination of null hypothesis significance testing and ad-hoc cutoffs. This framework is strongly affected by sample size and is known to be irreproducible in underpowered studies, yet no suitable non-parametric alternative has been proposed. Here we present a novel non-parametric standardized effect size estimate, $\mathbb{E}$, for high-throughput sequencing datasets. Case studies are shown for modelled data, transcriptome and amplicon-sequencing datasets. The $\mathbb{E}$ statistic is shown to be more reproducible and robust than p-values and requires sample sizes as small as 5 to identify differentially abundant features. Source code and binaries are freely available at: https://bioconductor.org/packages/ALDEx2.html, omicplotR, and https://github.com/ggloor/distEffect. Datasets can be found at doi://10.6084/m9.figshare.8132216. \end{abstract} \begin{document} \flushbottom \maketitle \thispagestyle{empty} \section*{Introduction} High throughput sequencing (HTS) datasets for transcriptomics, metagenomics and 16S rRNA gene sequencing are high dimensional and generally conducted at pilot-scale sample sizes. Much effort has been spent identifying the best approaches and tools to determine what is `significantly different' between groups \citep{Soneson:2013,Schurch:2016aa}, but the answer seems to depend on the specific dataset and associated model parameters \citep{Thorsen:2016aa,hawinkel2017,Weiss:2017aa}. As commonly conducted, the investigator determines what is `significantly different' using a null hypothesis significance approach and then decides what level of difference is `biologically meaningful' among the significantly different features. Graphically, this approach is represented by the Volcano plot \citep{Cui:2003aa} where the magnitude of change (difference) is plotted vs the p-value. One under-appreciated consequence of pilot-scale research is that false positive features will often have very low apparent p-values \citep{Halsey:2015aa}. This explains in part why so many observations fail to replicate in larger datasets \citep{Ioannidis:2005aa}. In fact, both p-values and absolute difference are poor predictors of replication likelihood if the experiment were conducted again \citep{Cumming:2008aa,Halsey:2015aa}. Null-hypothesis significance-based testing methods also have the property that the number of significant features identified is affected by the number of samples being compared. This leads to the practice of prioritizing `statistically significant' observations over biologically significant differences. On the other hand, a standardized effect size addresses the issues of interest to the biologist: ``what is reproducibly different?'' or ``would I identify the same true positive features as different if the experiment were repeated?" \citep{coe2002s,shinichi:2004,Colquhoun:2014aa,gloor:effect}. Standardized effect size statistics start from the assumption that there is a difference, but that the difference can be arbitrarily close to zero. Unfortunately, standardized effect size metrics are not routinely used when analyzing HTS datasets. One potential barrier is that parametric effect size statistics may not be suitable for HTS datasets because the data may not fit a Gaussian distribution. The most widely used standardized effect size is Cohen's d, which is a parametric standardized effect size for the difference between the means of two groups. The general formulation is given in Equation~\ref{eq:cohen}, \begin{equation} \mathrm{Cohen's~d} = \frac{\mathrm{mean}(a)- \mathrm{mean}(b)}{\sigma_{a,b}} \label{eq:cohen} \end{equation} and is a Z score when measured in a Normal distribution. Cohen's d measures the difference between the means of the two distributions divided by the pooled standard deviation, denoted as \(\sigma_{a,b}\). However, this metric depends upon the data being relatively Normal, which cannot be guaranteed for HTS data as seen in Figure \ref{fig:dist}. The purpose of this report is to show that we can characterize the difference between distributions in a non-parametric manner without resorting to either summarizing the data prematurely or resorting to a rank-based approach, both of which discard much information. We introduce, $\mathbb{E}$, a simple-to-calculate non-parametric standardized effect size statistic calculated on distributions directly. This measure is implemented in the ALDEx2, and CoDaSeq R packages. The $\mathbb{E}$ statistic has been used in both meta-transcriptome and microbiome studies, for example see \citep{macklaim:2013, bian:2017}, and has been shown to give remarkably reproducible results even with extremely small sample sizes \citep{nelson:2015vaginal}. The $\mathbb{E}$ metric has a near monotonic relationship with p-values, but has the advantage of being relatively stable between sample sizes. However, it is unknown how $\mathbb{E}$ compares with parametric effect size estimates, how many samples are required, and its sensitivity and specificity characteristics. \section*{Methods} \subsection*{Calculating $\mathbb{E}$} High throughput sequencing (HTS) platforms such as Illumina output thousands to billions of `reads', short nucleotide sequences that are derived from a DNA or RNA molecule in the sequencing `library'. The library is a subset of the nucleic acid molecules that have been collected from an environment and made compatible with a particular HTS platform. The HTS instruments deliver these reads as integer `counts' per genomic feature---gene, location, etc \citep{lovell2020counts}. However, the counts are actually a single proxy for the probability of observing the particular read in a sample under a repeated sampling model. This is clear since technical replicates of the same library return different counts\citep{Marioni:2008}, and the difference between technical replicates is consistent with multivariate Poisson sampling \citep{fernandes:2013, gloorAJS:2016}. The probability estimate is delivered by the instrument as an integer representation of the probability multiplied by the number of reads \citep{fernandes:2013, gloorAJS:2016}. Thus, the data returned by HTS are a type of count compositional data, where only the relationships between the features have meaning \citep{aitchison:1986, Lovell:2015, fernandes:2014, gloorFrontiers:2017, Kaul:2017aa, Quinn:2019aa}. The ALDEx2 tool uses a combination of probabilistic modelling and compositional data analysis to determine the features that are different between groups where that difference is insensitive to random sampling. Technical replicate variance estimation and conversion of the count data to probabilities is accomplished by Monte-Carlo sampling from the Dirichlet distribution \citep{fernandes:2013, gloorAJS:2016}, which is conveniently also the conjugate prior for the multivariate Poisson process. The differences between features is linearized by applying a log-ratio transformation to the Dirichlet Monte-Carlo realizations and analyzed according to the rules of compositional data analysis \citep{aitchison:1986,fernandes:2013,Tsilimigras:2016aa,gloorFrontiers:2017}. In effect, ALDEx2 linearizes the differences between the features in proportional data, allowing various standard stasticial tests to validly be performed. The `Group Distribution' panel in Figure 1 shows the distribution for a gene in a highly replicated and curated RNA-seq experiment \citep{Schurch:2016aa} with the expression of the gene in the WT and knockout conditions shown by the two density distributions. An Anderson-Darling test indicates that a Normal distribution is a poor fit for both distributions ($p < 1e-4$). Consequently, standard effect size measures that depend on summary statistics that assume a Normal distribution are expected to perform poorly and the non-parametric method described here is to be preferred. \begin{figure}[t!] \centerline{\includegraphics[scale=0.50]{YDR171W_dist.pdf}} \caption{The density of read counts may not follow a Normal distribution. The `Group Distribution' panel in the top left shows the density of the clr-transformed read counts in the two groups of a highly replicated RNA-seq experiment conduced in \emph{S. cerevisiae} \citep{Schurch:2016aa} for the gene YDR171W. We can see that the distributions are partially overlapping and are strongly multimodal. The `Win Grp Dispersion' shows the density of the within group dispersion of the two groups calculated as outlined in equation \ref{eq:disp}. The`Btw Grp Difference' shows the density of the between group difference calculated as outlined in equation \ref{eq:diff}. The `Effect size' shows the density of the effect size calculated as in equation \ref{eq:ff}. The dashed vertical line in this final panel shows an effect size of 0, and approximately 10\% of the effect size distribution crosses this threshold for this gene. The proportion of the effect size distribution that crosses an effect of 0 is known as the `overlap' measure. For each distribution the median and interquartile range are shown as the thick vertical line and the enclosing box. This plot was generated from the `aldex.plotFeature()' command.} \label{fig:dist} \end{figure} We will use the distributions for the gene YDR171W in Figure \ref{fig:dist} as an example. Starting with two vectors $\vec{a}$ and $\vec{b}$ that correspond to the log-ratio transformed Dirichlet Monte-Carlo realizations of a feature in two groups, we need a method to determine the standardized effect size; that is, the difference between groups relative to an estimate of within-group dispersion. Since these posterior distributions can have heavy tails, may be multimodal, and may be skewed, any useful statistic should be insensitive to even extreme non-Normality, and provide sensible answers even if the posterior distributions are Cauchy in one or both groups \citep{fernandes:2013}. Below and in the Supplement we define the properties of the approach used. We can define a non-parametric \emph{difference} vector in Equation~(\ref{eq:diff}) as the signed difference between the two groups \begin{equation} \vec{\mathit{diff}} = \vec{a} - \vec{b}, \label{eq:diff}\vspace*{-10pt} \end{equation} with the distribution of the vector shown in Figure 1 `Btw Group Difference'. We can further define a non-parametric \emph{dispersion} vector as in Equation~(\ref{eq:disp}), where the notation $\boldsymbol{\rho}\vec{a}$ indicates one or more random permutations of the vector \begin{equation} \vec{\mathit{disp}} = max \{ \lvert \vec{a} - \boldsymbol{\rho} \vec{a} \rvert ,\lvert \vec{b} -\boldsymbol{\rho} \vec{b} \rvert \}, \label{eq:disp}\vspace*{-10pt} \end{equation} with the distribution shown in Figure 1 `Win Group Dispersion'. Finally, we can define an \emph{effect} vector as in Equation~(\ref{eq:ff}) that is the element-wise ratio of these two vectors \begin{equation} \vec{\mathit{eff}} = \frac{\vec{diff}}{\vec{disp}}, \label{eq:ff}\vspace*{0pt} \end{equation} with the distribution of the effect vector shown in Figure 1 `Effect size'. Taking the median of $\vec{\mathit{diff}}, \vec{\mathit{disp}}$ and $\vec{\mathit{eff}}$ returns a robust estimate of the central tendency of these statistics MSD (median signed difference), MMAD (median of the maximum absolute deviation), and $\mathbb{E}$), and these are the `diff.btw', `diff.win' and `effect' statistics reported by ALDEx2. The median and interquartile range of these summary statistics for each distribution is shown in Figure \ref{fig:dist}. The MSD is very similar to the difference between the means or the difference between medians in a Normal distribution as shown in Supplementary Figure 1. The MMAD metric is novel and the Supplement shows it has a Gaussian efficiency of 52\%, a breakdown point of 20\% (Supplementary Figure 2), and is 1.42 times the size of the standard deviation on a Normal distribution. The $\mathbb{E}$ statistic is a standardized effect size and is approximately 0.7 of Cohen's d when comparing the difference between two Normal distributions. Below and in Supplementary Figure 3 we show that this metric returns sensible values even with non-Normal distributions. We used simple simulated datasets to determine baseline characteristics in a number of different distributions. Then we use the data from a highly replicated RNA-seq experiment \citep{Schurch:2016aa} or from a large 16S rRNA gene sequencing study \citep{bian:2017} and examined 100 random subsets of the data with between 2 and 20 samples in each group. For each random subset we collected the set of features that were called as differentially abundant at thresholds of $\mathbb{E} \ge 1$, or with an expected Benjamini-Hochburg adjusted p-value of $\le 0.1$ calculated using either the parametric Welch's t-test, or the non-parametric Wilcoxon test in the ALDEx2 R package. These are output as `we.eBH' and 'wi.eBH' by the ALDEx2 tool. These were compared to a `truth' set determined by identifying those features that were identified in all of 100 independent tests of the full dataset with outliers removed using the same tests and cutoffs. Note that this is simply a measure of consistency and is congruent with the approach taken in \citep{Schurch:2016aa}. We also examined subsets of these datasets where the subsets were taken from the same group. This allowed us to characterize the properties of $\mathbb{E}$ when no difference between groups was expected. \enlargethispage{6pt} \section*{Results and Discussion} The motivation for this work is to identify what features are reliably different even with the small sample sizes prevalent in high throughput sequencing experiments. Measuring differential abundance in high throughput sequencing datasets is difficult for a variety of reasons. First, almost all experiments are underpowered. Second, the true distribution of the data and the ground truth of the data are both unknown. Third, when sample sizes are large, almost all features are identified as `significantly different' by null hypothesis significance testing frameworks. This latter reason is why ensuring that the feature is below a p-value threshold (or below a q, or false discovery rate, threshold) and be above a minimum difference threshold is common guidance \citep{Schurch:2016aa}. Graphically, these cutoffs are represented by the volcano plot \citep{Cui:2003aa}. We began by examining the behaviour of the $\mathbb{E}$ metric and its constituent statistics. Supplementary Figure 1 shows that the difference between distributions measure is essentially as efficient and stable a measure of location as is the difference between means. When comparing measures of scale, Supplementary Figure 2 shows that the breakdown point for the MMAD is 20\% and the efficiency is approximately 52\% of the standard deviation in a Normal distribution. Thus, the MMAD is reasonably efficient, and much less prone to contamination than is the standard deviation. Simulation shows that the MMAD is approximately 1.418 larger than the standard deviation for a Normal distribution. Taken together, $\mathbb{E}$ is approximately 0.705 the size of Cohen's d in a Normal distribution, but $\mathbb{E}$ returns sensible estimates even for non-Normal distributions such as $\beta$ and Cauchy distributions. The remaining data and figures were generated from two real datasets. The `yeast' dataset is derived from a highly replicated RNA-seq dataset generated by \citep{Schurch:2016aa}, and the `16S' dataset is derived from a large scale cross-sectional survey of the microbiome of healthy chinese volunteers \citep{bian:2017}. Generically the genes or operational taxonomic units that compose the sequence bins will be referred to as 'features'. These two datasets are polar opposites in many ways, with the yeast dataset having very few 0-count features and low variance within groups, and the 16S dataset being very sparse and having high variance within groups. These two datasets are used to investigate the relationships between three measurable summary statistics of the distributions the difference between groups, the effect size and the overlap, and how these values can be used to identify reproducibly different features in high throughput sequencing datasets. Figure \ref{fig:null}:A shows the relationship between $\mathbb{E}$ and p-values for the 6349 genes in the yeast dataset. We can see that there is very good correspondence such that features with very high effect sizes have very low adjusted p-values. This is in line with the expected relationship between effect sizes and p-values, and provides additional confidence that $\mathbb{E}$ is an appropriate metric for effect size. However, note that the non-parametric Wilcoxon test adjusted p-values (in blue) have far fewer outliers on this plot than do the parametric Welch's t-test adjusted p-values (in red). We conclude that the majority of features likely have distributions that do not deviate too much from the Normality assumption of the parametric test, but that a significant minority of features have enough deviation to affect the calculated p-value. These outliers could contribute to both false positive and false negative identifications when using a parametric null hypothesis testing approach. \begin{figure}[tpb] \centerline{\includegraphics[scale=0.5]{null-effect.pdf}} \caption{Distributional properties of $\mathbb{E}$. Panel A shows the relationship between $\mathbb{E}$ and Benjamini-Hochberg adjusted p-values calculated by either a non-parametric Wilcoxon test (blue points), or a parametric Welch's t-test (red points), where each point represents one of the genes in the yeast transcriptome dataset. The y axis has been truncated to highlight the p-values greater than 1e-10, and the dashed grey line shows the location of a false positive threshold of 0.1. Panel B shows the relationship between $\mathbb{E}$ and the overlap measure in the yeast dataset (blue points) and the 16S dataset (orange points). Overlaid in red is the expected relationship for a Z-score in a Normal distribution. } \label{fig:null} \end{figure} We next focused on the characteristics of false positive features in the two datasets and examined the relationship between the $\mathbb{E}$ and the overlap metrics in both the yeast and 16S datasets. Recall from Figure \ref{fig:dist} that all values are calculated from the distributions and not inferred from summary statistics. The overlap is the proportion of the a $\mathbb{E}$ distribution where the tail overlaps 0. In a Normal distribution, Cohen's d is exactly equivalent to a Z score, and we can determine the proportion of the tail distribution that corresponds to any Z score (effect size). This relationship is plotted in Figure \ref{fig:null}:B and we can see that the non-parametric $\mathbb{E}$ and overlap metrics correspond very well to the expected relationship for a Z score and tail area in a Normal distribution. On this graph, an overlap of 0.1 corresponds to $ \mathbb{E} \sim 1.2$. Next, we examined the proportion of features that were identified as being false positives as a function of per-group sample size if there was no difference between groups. For this, we generated 100 random instances from both datasets with the samples draws from only one group and the per-group sample size varying from 2 to 20. We calculated the proportion of all features in the dataset that had a greater than 2-fold difference; or an overlap less than 0.1; or that had $\mathbb{E}$ values greater than 0.5, 1, or 2; or that had the intersect of the difference $>1$, overlap $<0.1$ and $\mathbb{E}$ greater than 1. This was plotted relative to the per-group sample size in Fig \ref{fig:fp}:A. A number of observations can be made. First, it is apparent that there is a strong linear relationship between the proportion of features that are identified as false positive and the sample size for all the metrics. Second, a smaller number of samples was needed to ensure no false positive features were identified as the effect size increased; at the extreme an effect size of 2 would be sufficient to exclude FP features with as few as 5 samples per group. Third, difference was a poor measure by which to exclude FP features, being worse as a measure in the highly variable 16S dataset. Fourth, the effect size was highly reproducible as a measure to exclude FP features having almost the same characteristics in both datasets. The overlap measure was also highly reproducible, but this is a trivial finding since Figure \ref{fig:null} shows that effect size and overlap are highly correlated. Fifth, combining the three measures was able to exclude FP features better than was any single metric. However, the triple measure of effect, overlap and difference did not give the same result in the two datasets. In the low variance yeast dataset the triple measure was, if anything, it was even more specific than was doubling the effect size. In contrast, the triple measure was only slightly more specific in the high variance dataset than were the single measures of $\mathbb{E}$ or overlap. \begin{figure}[tpb] \centerline{\includegraphics[scale=0.5]{FP-plots.pdf}} \caption{Characteristics of false positive features using $\mathbb{E}$. Panel A plots per-group sample size and the FP rate of difference, and the median values when applied to a samples drawn at random from the same group. Also shown is the combination of these three metrics into one metric that is the overlap between the three (triple). Panel B shows an effect plot \citep{gloor:effect} of the whole yeast transcriptome dataset in grey points, with the false positive features identified by either difference between groups (blue) or $\mathbb{E}$ (orange) identified from a random subset of 5 samples from each group. Panel C shows the same analysis on the 16S rRNA gene sequencing dataset. The false positive features identified by each approach are restricted to features with quite separate characteristics of difference and dispersion.} \label{fig:fp} \end{figure} We used effect plots \citep{gloor:effect} to identify why the triple measure was more specific in the yeast dataset than in the 16S dataset and the results are shown in Figure \ref{fig:fp} panels B and C. Here we overplotted the FP features found when an example dataset of 5 in each group was compared to the TP features found when the full dataset was examined and applying the dual cutoffs of both difference and $\mathbb{E} > 1$. In the yeast dataset, the FP $\mathbb{E}$ features have very low dispersion (variance) and very low difference, while the features identified as FP when only difference was used tended to have either very high or very low dispersion. In the 16S dataset, essentially all the features have very high dispersion. Here we found that the FP features identified by $\mathbb{E}$ have a large between group difference, and the FP features identified only by difference tend to have low difference between groups. This mirrors the observation seen in the yeast dataset. These observations explain why using both $\mathbb{E}$ and difference in combination are more discriminatory than either alone, as they tend to identify different sets of FP features. With this information, we can determine the sensitivity and specificity of identifying TP and FP features in the example datasets when comparing two groups. In the yeast dataset, the edgeR tool identified over 4600 out of 6349 genes as `significant' (Benjamini-Hochberg adjusted p-value $< 0.05$) when all samples were included using either the glm or exact test modes (Supplementary Table 1). Other widely used tools gave similar results \citep{Schurch:2016aa}. The null hypthesis testing framework in ALDEx2 also returned at least 4300 genes in the same dataset. Thus, identifying such a large proportion of genes as differentially abundant indicates that statistical significance is not informative for this type of experiment. Schurch et al. (and others) recommend adding a secondary threshold such as a fold-change cutoff to identify genes of interest for follow-up analyses \citep{Cui:2003aa,Schurch:2016aa}. When sample sizes are sufficiently large, we would expect that the fold-change cutoff itself would be the primary determinant of difference; however, this approach would not include either the biological variance or the uncertainty of measurement in the analysis. Furthermore, the difference metric is not sufficient to exclude FP features and this is especially relevant for features with large dispersion. \begin{figure}[tpb] \centerline{\includegraphics[scale=0.4]{TPvsFP.pdf}} \caption{True Positive (TP) and False Positive (FP) identifications as a function of per group sample size in the two datasets. The two panels show the results of randomly sub-sampling the yeast transcriptome and the 16S rRNA gene datasets 100 times with various numbers of features in each group. Features that were identified in the subsample and in the full dataset were counted as true positives (TP), and features that were identified in the subsets that were not identified as different in the full dataset were counted as false positives (FP). The panels show the median proportion of TP found by each approach, and the median proportion of all positives that were FP. Cutoffs used were absolute effect $> 1$, absolute difference $> 1$, overlap of $< 0.1$, or q score of $< 0.1$ (Benjamini-Hochberg adjusted p-value). The triple approach used the intersect of the effect, difference and overlap approaches. } \label{fig:02} \end{figure} We examined the relationship between sample size and the number of features identified as significantly different using a null hypothesis testing framework in the yeast dataset. Figure \ref{fig:02}:A shows the median True and False Positive rates. In this analysis we are testing for the ability to detect features that would have been observed as differentially abundant in the full dataset if we use a random subset of the data, and the plots show the median value of 100 trials at each sample size. The `q+diff` example in the Proportion panel shows the rate observed for the Benjamini-Hochberg corrected p-values (q-values) and a 2-fold difference observed when using the edgeR tool (REF), as advised in recent best practices \citep{Schurch:2016aa}. As expected when using p-values alone, we observe that the power of null hypothesis significance test is strongly affected by sample size, and only reaches 90\% power to detect when the per-group sample size is greater than 20. However, the FP rate is effectively 0. Interestingly, applying both the significance test and the fold-change cutoff caused both the TP and FP rates to increase substantially. At small sample sizes the TP rate its greater than 80\% and the FP rate ranges from $\sim 30$\% and down. Inspection of the results indicated that this was because in this dataset the significance test was all-but irrelevant since all features with at least a 2-fold change had a q-score below the threshold of 0.1. Note that many tools have difficulty estimating the actual FDR in real datasets \citep{Thorsen:2016aa,hawinkel2017}. In contrast to q-scores alone, the TPR of the $\mathbb{E}$ statistic in the same random datasets is essentially independent of the number of samples for all methods and combinations. However, the FPR is strongly affected by sample size as was observed for q-scores and difference. Note that even when only two samples are used, the $\mathbb{E}$ statistic identifies over 80\% of the features as different as are identified by the same statistic in the full dataset. Thus, the simple metric outlined here can correctly identify the `true positive' set even when the number of samples is very small. The tradeoff when using this statistic is that at very low sample sizes the False Discovery Rate (fdr) is extreme; in this dataset and with a cutoff of $\mathbb{E} > 1$, the fdr is 40\% with two samples, but falls to less than 10\% only when there are 15 or more samples. Interestingly, applying a fold-change and an overlap metric (denoted as triple in the figure) cutoff to the $\mathbb{E}$ metric reduces the false discovery rate dramatically, and it is now indistinguishable from the FP rate observed with the q-score and difference combination. A similar trend is observed with the 16S dataset, where the metrics have extremely good power to detect even at low sample sizes, but have a high false positive rate at low sample sizes. We conclude that the $\mathbb{E}$ measure, alone or in combination with the difference and overlap metric, constitutes a useful and reproducible way to identify differentially abundant features in disparate datasets. \section*{Conclusion} By default, we want to know both `what is significant' and `what is different' \citep{Cui:2003aa}. Both of these questions can be addressed with a standardized effect size statistic that scales the difference between features by their dispersion. We have found plots of difference and dispersion to be an exceedingly useful tool when examining HTS datasets \citep{gloor:effect}. Furthermore, datasets analyzed by this approach have proven to be remarkably reproducible as shown by independent lab validation \citep{macklaim:2013, nelson:2015vaginal}. The $\mathbb{E}$ statistic outlined here is a relatively robust statistic with the attractive property that it consistently identifies almost all of the true features regardless of the underlying distribution and the number of samples, as shown in Figure \ref{fig:02}. In marked contrast, even the best p-value based approaches can identify only a small proportion of the features at small samples sizes that would have been found in the full dataset \citep{Schurch:2016aa}. Thus, the simple metric outlined here can correctly identify the `true positive' set even when the number of samples is very small. Note that fold-change thresholds, as is commonly used, is not the same as a standardized effect statistic, and applying the threshold values of \citep{Schurch:2016aa} while reducing the features that are found does not necessarily enhance reproducibility. In fact this investigation highlights the danger in relying on fold-change to identify differentially abundant features. We can see that the 16S rRNA gene sequencing datasets have substantially greater numbers of fold-change FP features than does the yeast transcriptome dataset when no difference in expected. This is because of the substantially greater dispersion observed for the features in the former dataset than in the latter. The tradeoff when using the $\mathbb{E}$ statistic is that at very low sample sizes the False Discovery Rate can be extreme; in this dataset and with a cutoff of $\mathbb{E} > 1$, the FDR is 40\% with two samples, but falls to less than 10\% only when there are 15 or more samples regardless of dataset. Note that the FDR as measured is assessing congruence with the result in the whole dataset since the actual ground truth is not known. Given that FP are not identified if there is no difference between groups when the sample size is greater than 10 (Figure \ref{fig:fp}:A), it is likely that the FP identified when the groups are different are simply features with lower effect sizes. Supplementary Figure 5 shows that this is in fact the case. Moreover, using the combination of effect, difference and overlap enhances specificity regardless of dataset. This is because these measures are filtering out different sets of FP features, but identify substantially the same set of TP features: the $\mathbb{E}$ metric is the mid point of the effect size distribution and identifies those features with large standardized change between groups; the overlap metric corresponds to the tail of the $\vec{\mathit{eff}}$ distribution and identifies those features with narrow distributions; the difference between metric identifies those features with large absolute fold change. Further tempering this is the observation that no false positives are identified when no difference is expected in two different datasets when there are 10 or more samples per group. Taken together, we suggest that a fold change of at least two, and both $\mathbb{E} > 1$ and overlap $< 0.1$ are robust and reproducible measures that provide an acceptable mix of power and specificity when the sample size is greater than 10 per group in diverse datasets. This work describes the $\mathbb{E}$ statistic that measures a standardized effect size directly from distributions and not from summary statistics. We show that it is useful when examining high throughput sequencing datasets. The statistic is relatively robust and efficient, and answers the question most desired by the biologist, namely `what is reproducibly different'. The $\mathbb{E}$ metric is computed in the ALDEx2 R package as the `effect' output where it is the median of the inferred technical and biological data, and in the CodaSeq R package where it acts only on the point estimates of the data. Interactive exploration of effect sizes can be done in the omicplotR Bioconductor package \citep{omicplot}. \section*{Acknowledgements} We thank past and present members of the lab for helpful comments and insights. In particular Dan Giguere suggested the title, and Brandon Lieng developed the code in ALDEx2 that provided Figure 1. \vspace{-12pt} \section*{Funding} This work was funded by NSERC (RGPIN-03878-2015) awarded to G.B.G.\vspace*{-12pt} \begin{thebibliography}{} \bibitem[Aitchison, 1986]{aitchison:1986} Aitchison, J. (1986). \newblock {\em The Statistical Analysis of Compositional Data}. \newblock Chapman \& Hall. \bibitem[Bian et~al., 2017]{bian:2017} Bian, G., Gloor, G.~B., Gong, A., Jia, C., Zhang, W., Hu, J., Zhang, H., Zhang, Y., Zhou, Z., Zhang, J., Burton, J.~P., Reid, G., Xiao, Y., Zeng, Q., Yang, K., and Li, J. (2017). \newblock The gut microbiota of healthy aged Chinese is similar to that of the healthy young. \newblock {\em mSphere}, 2(5):e00327--17. \bibitem[Coe, 2002]{coe2002s} Coe, R. (2002). \newblock It's the effect size, stupid: What effect size is and why it is important. \bibitem[Colquhoun, 2014]{Colquhoun:2014aa} Colquhoun, D. (2014). \newblock An investigation of the false discovery rate and the misinterpretation of p-values. \newblock {\em R Soc Open Sci}, 1(3):140216. \bibitem[Cui and Churchill, 2003]{Cui:2003aa} Cui, X. and Churchill, G.~A. (2003). \newblock Statistical tests for differential expression in cDNA microarray experiments. \newblock {\em Genome Biol}, 4(4):210.1 -- 210.10. \bibitem[Cumming, 2008]{Cumming:2008aa} Cumming, G. (2008). \newblock Replication and p intervals: p values predict the future only vaguely, but confidence intervals do much better. \newblock {\em Perspect Psychol Sci}, 3(4):286--300. \bibitem[Fernandes et~al., 2013]{fernandes:2013} Fernandes, A.~D., Macklaim, J.~M., Linn, T.~G., Reid, G., and Gloor, G.~B. (2013). \newblock Anova-like differential expression (ALDEX) analysis for mixed population RNA-seq. \newblock {\em PLoS One}, 8(7):e67019. \bibitem[Fernandes et~al., 2014]{fernandes:2014} Fernandes, A.~D., Reid, J.~N., Macklaim, J.~M., McMurrough, T.~A., Edgell, D.~R., and Gloor, G.~B. (2014). \newblock Unifying the analysis of high-throughput sequencing datasets: characterizing {RNA}-seq, 16{S} r{RNA} gene sequencing and selective growth experiments by compositional data analysis. \newblock {\em Microbiome}, 2:15.1--15.13. \bibitem[Giguere et~al., 2019]{omicplot} Giguere, D., Macklaim, J., and Gloor, G. (2019). \newblock omicplotR: Visual exploration of omic datasets using a shiny app. \newblock Bioconductor v1.4.0. \bibitem[Gloor et~al., 2016a]{gloor:effect} Gloor, G.~B., Macklaim, J.~M., and Fernandes, A.~D. (2016a). \newblock Displaying variation in large datasets: Plotting a visual summary of effect sizes. \newblock {\em Journal of Computational and Graphical Statistics}, 25(3C):971--979. \bibitem[Gloor et~al., 2017]{gloorFrontiers:2017} Gloor, G.~B., Macklaim, J.~M., Pawlowsky-Glahn, V., and Egozcue, J.~J. (2017). \newblock Microbiome datasets are compositional: And this is not optional. \newblock {\em Front Microbiol}, 8:2224. \bibitem[Gloor et~al., 2016b]{gloorAJS:2016} Gloor, G.~B., Macklaim, J.~M., Vu, M., and Fernandes, A.~D. (2016b). \newblock Compositional uncertainty should not be ignored in high-throughput sequencing data analysis. \newblock {\em Austrian Journal of Statistics}, 45:73--87. \bibitem[Halsey et~al., 2015]{Halsey:2015aa} Halsey, L.~G., Curran-Everett, D., Vowler, S.~L., and Drummond, G.~B. (2015). \newblock The fickle p value generates irreproducible results. \newblock {\em Nat Methods}, 12(3):179--85. \bibitem[Hawinkel et~al., 2018]{hawinkel2017} Hawinkel, S., Mattiello, F., Bijnens, L., and Thas, O. (2018). \newblock A broken promise : microbiome differential abundance methods do not control the false discovery rate. \newblock {\em BRIEFINGS IN BIOINFORMATICS}. \bibitem[Ioannidis, 2005]{Ioannidis:2005aa} Ioannidis, J. P.~A. (2005). \newblock Why most published research findings are false. \newblock {\em PLoS Med}, 2(8):e124. \bibitem[Kaul et~al., 2017]{Kaul:2017aa} Kaul, A., Mandal, S., Davidov, O., and Peddada, S.~D. (2017). \newblock Analysis of microbiome data in the presence of excess zeros. \newblock {\em Front Microbiol}, 8:2114. \bibitem[Lovell et~al., 2015]{Lovell:2015} Lovell, D., Pawlowsky-Glahn, V., Egozcue, J.~J., Marguerat, S., and B{\"a}hler, J. (2015). \newblock Proportionality: a valid alternative to correlation for relative data. \newblock {\em PLoS Comput Biol}, 11(3):e1004075. \bibitem[Lovell et~al., 2020]{lovell2020counts} Lovell, D.~R., Chua, X.-Y., and McGrath, A. (2020). \newblock Counts: an outstanding challenge for log-ratio analysis of compositional data in the molecular biosciences. \newblock {\em NAR Genomics and Bioinformatics}, 2(2):lqaa040. \bibitem[Macklaim et~al., 2013]{macklaim:2013} Macklaim, J.~M., Fernandes, A.~D., Di~Bella, J.~M., Hammond, J.-A., Reid, G., and Gloor, G.~B. (2013). \newblock Comparative meta-{RNA}-seq of the vaginal microbiota and differential expression by \em{Lactobacillus iners} in health and dysbiosis. \newblock {\em Microbiome}, 1(1):12. \bibitem[Marioni et~al., 2008]{Marioni:2008} Marioni, J.~C., Mason, C.~E., Mane, S.~M., Stephens, M., and Gilad, Y. (2008). \newblock {RNA-seq}: an assessment of technical reproducibility and comparison with gene expression arrays. \newblock {\em Genome Res}, 18(9):1509--17. \bibitem[Nakagawa, 2004]{shinichi:2004} Nakagawa, S. (2004). \newblock A farewell to Bonferroni: the problems of low statistical power and publication bias. \newblock {\em Behavioral Ecology}, 15(6):1044--1045. \bibitem[Nelson et~al., 2015]{nelson:2015vaginal} Nelson, T.~M., Borgogna, J.-L.~C., Brotman, R.~M., Ravel, J., Walk, S.~T., and Yeoman, C.~J. (2015). \newblock Vaginal biogenic amines: biomarkers of bacterial vaginosis or precursors to vaginal dysbiosis? \newblock {\em Frontiers in physiology}, 6. \bibitem[Quinn et~al., 2019]{Quinn:2019aa} Quinn, T.~P., Erb, I., Gloor, G., Notredame, C., Richardson, M.~F., and Crowley, T.~M. (2019). \newblock A field guide for the compositional analysis of any-omics data. \newblock {\em Gigascience}, 8(9). \bibitem[Schurch et~al., 2016]{Schurch:2016aa} Schurch, N.~J., Schofield, P., Gierli{\'n}ski, M., Cole, C., Sherstnev, A., Singh, V., Wrobel, N., Gharbi, K., Simpson, G.~G., Owen-Hughes, T., Blaxter, M., and Barton, G.~J. (2016). \newblock How many biological replicates are needed in an RNA-seq experiment and which differential expression tool should you use? \newblock {\em RNA}, 22(6):839--51. \bibitem[Soneson and Delorenzi, 2013]{Soneson:2013} Soneson, C. and Delorenzi, M. (2013). \newblock A comparison of methods for differential expression analysis of {RNA-seq} data. \newblock {\em BMC Bioinformatics}, 14:91. \bibitem[Thorsen et~al., 2016]{Thorsen:2016aa} Thorsen, J., Brejnrod, A., Mortensen, M., Rasmussen, M.~A., Stokholm, J., Al-Soud, W.~A., S{\o}rensen, S., Bisgaard, H., and Waage, J. (2016). \newblock Large-scale benchmarking reveals false discoveries and count transformation sensitivity in 16{S} r{RNA} gene amplicon data analysis methods used in microbiome studies. \newblock {\em Microbiome}, 4(1):62. \bibitem[Tsilimigras and Fodor, 2016]{Tsilimigras:2016aa} Tsilimigras, M. C.~B. and Fodor, A.~A. (2016). \newblock Compositional data analysis of the microbiome: fundamentals, tools, and challenges. \newblock {\em Ann Epidemiol}, 26(5):330--5. \bibitem[Weiss et~al., 2017]{Weiss:2017aa} Weiss, S., Xu, Z.~Z., Peddada, S., Amir, A., Bittinger, K., Gonzalez, A., Lozupone, C., Zaneveld, J.~R., V{\'a}zquez-Baeza, Y., Birmingham, A., Hyde, E.~R., and Knight, R. (2017). \newblock Normalization and microbial differential abundance strategies depend upon data characteristics. \newblock {\em Microbiome}, 5(1):27. \end{thebibliography} \end{document}
module Common.Level where open import Agda.Primitive public
\subsection*{Zoom Policies} \begin{itemize} \item Use your real name as the display name. This is important for my own auditing of attendance. \item Your video must always be on and your sound should always be on (except in cases of background noise) \item You can use an appropriate background but it must be a static image. \item No snap camera or similar nonsense. No chatting on zoom. \item No texting, slacking or verbal chatting with other students (no talking in class!) \end{itemize}
from linear_reg import lin_mod import pandas as pd import statsmodels.api as sm import streamlit as st class logit(lin_mod): # logistic regression using statsmodel def logit_imp(self): st.info("conversion to dummie") st.write(pd.get_dummies(self.y).head()) st.write("selecting first column ---->") mod=sm.Logit(pd.get_dummies(self.y).iloc[:,0],self.x).fit() st.write(mod.summary())
\filetitle{prior}{Add new prior to system priors object}{systempriors/prior} \paragraph{Syntax}\label{syntax} \begin{verbatim} S = prior(S,Expr,PriorFunc,...) S = prior(S,Expr,[],...) \end{verbatim} \paragraph{Input arguments}\label{input-arguments} \begin{itemize} \item \texttt{S} {[} systempriors {]} - System priors object. \item \texttt{Expr} {[} char {]} - Expression that defines a value for which a prior density will be defined; see Description for system properties that can be referred to in the expression. \item \texttt{PriorFunc} {[} function\_handle \textbar{} empty {]} - Function handle returning the log of prior density; empty prior function, \texttt{{[}{]}}, means a uniform prior. \end{itemize} \paragraph{Output arguments}\label{output-arguments} \begin{itemize} \itemsep1pt\parskip0pt\parsep0pt \item \texttt{S} {[} systempriors {]} - The system priors object with the new prior added. \end{itemize} \paragraph{Options}\label{options} \begin{itemize} \item \texttt{'lowerBound='} {[} numeric \textbar{} \emph{\texttt{-Inf}} {]} - Lower bound for the prior. \item \texttt{'upperBound='} {[} numeric \textbar{} \emph{\texttt{Inf}} {]} - Upper bound for the prior. \end{itemize} \paragraph{Description}\label{description} \subparagraph{System properties that can be used in \texttt{Expr}}\label{system-properties-that-can-be-used-in-expr} \begin{itemize} \item \texttt{srf{[}VarName,ShockName,T{]}} - Plain shock response function of variables \texttt{VarName} to shock \texttt{ShockName} in period \texttt{T}. Mind the square brackets. \item \texttt{ffrf{[}VarName,MVarName,Freq{]}} - Filter frequency response function of transition variables \texttt{TVarName} to measurement variable \texttt{MVarName} at frequency \texttt{Freq}. Mind the square brackets. \item \texttt{corr{[}VarName1,VarName2,Lag{]}} - Correlation between variable \texttt{VarName1} and variables \texttt{VarName2} lagged by \texttt{Lag} periods. \item \texttt{spd{[}VarName1,VarName2,Freq{]}} - Spectral density between variables \texttt{VarName1} and \texttt{VarName2} at frequency \texttt{Freq}. \end{itemize} If a variable is declared as a \href{modellang/logvariables}{\texttt{log-variable}}, it must be referred to as \texttt{log(VarName)} in the above expressions, and the log of that variables is returned, e.g. \texttt{srf{[}log(VarName),ShockName,T{]}}. or \texttt{ffrf{[}log(TVarName),MVarName,T{]}}. \subparagraph{Expressions involving combinations or functions of parameters}\label{expressions-involving-combinations-or-functions-of-parameters} Model parameter names can be referred to in \texttt{Expr} preceded by a dot (period), e.g. \texttt{.alpha\^{}2 + .beta\^{}2} defines a prior on the sum of squares of the two parameters (\texttt{alpha} and \texttt{beta}). \paragraph{Example}\label{example} Create a new empty systemprios object based on an existing model. \begin{verbatim} s = systempriors(m); \end{verbatim} Add a prior on minus the shock response function of variable \texttt{ygap} to shock \texttt{eps\_pie} in period 4. The prior density is lognormal with mean 0.3 and std deviation 0.05; \begin{verbatim} s = prior(s,'-srf[ygap,eps_pie,4]',logdist.lognormal(0.3,0.05)); \end{verbatim} Add a prior on the gain of the frequency response function of transition variable \texttt{ygap} to measurement variable `y' at frequency \texttt{2*pi/40}. The prior density is normal with mean 0.5 and std deviation 0.01. This prior says that we wish to keep the cut-off periodicity for trend-cycle decomposition close to 40 periods. \begin{verbatim} s = prior(s,'abs(ffrf[ygap,y,2*pi/40])',logdist.normal(0.5,0.01)); \end{verbatim} Add a prior on the sum of parameters \texttt{alpha1} and \texttt{alpha2}. The prior is normal with mean 0.9 and std deviation 0.1, but the sum is forced to be between 0 and 1 by imposing lower and upper bounds. \begin{verbatim} s = prior(s,'alpha1+alpha2',logdist.normal(0.9,0.1), ... 'lowerBound=',0,'upperBound=',1); \end{verbatim}
{-# OPTIONS --without-K #-} module higher.circle where open import higher.circle.core public open import higher.circle.properties public
module Language.Reflection import public Language.Reflection.TT import public Language.Reflection.TTImp import public Control.Monad.Trans %default total ---------------------------------- --- Elaboration data structure --- ---------------------------------- ||| Elaboration scripts ||| Where types/terms are returned, binders will have unique, if not ||| necessarily human readabe, names export data Elab : Type -> Type where Pure : a -> Elab a Bind : Elab a -> (a -> Elab b) -> Elab b Fail : FC -> String -> Elab a Try : Elab a -> Elab a -> Elab a ||| Log a message. Takes a ||| * topic ||| * level ||| * message LogMsg : String -> Nat -> String -> Elab () ||| Print and log a term. Takes a ||| * topic ||| * level ||| * message ||| * term LogTerm : String -> Nat -> String -> TTImp -> Elab () ||| Resugar, print and log a term. Takes a ||| * topic ||| * level ||| * message ||| * term LogSugaredTerm : String -> Nat -> String -> TTImp -> Elab () -- Elaborate a TTImp term to a concrete value Check : TTImp -> Elab expected -- Quote a concrete expression back to a TTImp Quote : (0 _ : val) -> Elab TTImp -- Elaborate under a lambda Lambda : (0 x : Type) -> {0 ty : x -> Type} -> ((val : x) -> Elab (ty val)) -> Elab ((val : x) -> (ty val)) -- Get the current goal type, if known -- (it might need to be inferred from the solution) Goal : Elab (Maybe TTImp) -- Get the names of local variables in scope LocalVars : Elab (List Name) -- Generate a new unique name, based on the given string GenSym : String -> Elab Name -- Put a name in the current namespace InCurrentNS : Name -> Elab Name -- Get the types of every name which matches. -- There may be ambiguities - returns a list of fully explicit names -- and their types. If there's no results, the name is undefined. GetType : Name -> Elab (List (Name, TTImp)) -- Get the metadata associated with a name GetInfo : Name -> Elab (List (Name, NameInfo)) -- Get the type of a local variable GetLocalType : Name -> Elab TTImp -- Get the constructors of a data type. The name must be fully resolved. GetCons : Name -> Elab (List Name) -- Check a group of top level declarations Declare : List Decl -> Elab () export Functor Elab where map f e = Bind e $ Pure . f export Applicative Elab where pure = Pure f <*> a = Bind f (<$> a) export Alternative Elab where empty = Fail EmptyFC "" l <|> r = Try l r export Monad Elab where (>>=) = Bind ----------------------------- --- Elaboration interface --- ----------------------------- public export interface Monad m => Elaboration m where ||| Report an error in elaboration at some location failAt : FC -> String -> m a ||| Try the first elaborator. If it fails, reset the elaborator state and ||| run the second try : Elab a -> Elab a -> m a ||| Write a log message, if the log level is >= the given level logMsg : String -> Nat -> String -> m () ||| Write a log message and a rendered term, if the log level is >= the given level logTerm : String -> Nat -> String -> TTImp -> m () ||| Write a log message and a resugared & rendered term, if the log level is >= the given level logSugaredTerm : String -> Nat -> String -> TTImp -> m () ||| Check that some TTImp syntax has the expected type ||| Returns the type checked value check : TTImp -> m expected ||| Return TTImp syntax of a given value quote : (0 _ : val) -> m TTImp ||| Build a lambda expression lambda : (0 x : Type) -> {0 ty : x -> Type} -> ((val : x) -> Elab (ty val)) -> m ((val : x) -> (ty val)) ||| Get the goal type of the current elaboration goal : m (Maybe TTImp) ||| Get the names of the local variables in scope localVars : m (List Name) ||| Generate a new unique name genSym : String -> m Name ||| Given a name, return the name decorated with the current namespace inCurrentNS : Name -> m Name ||| Given a possibly ambiguous name, get all the matching names and their types getType : Name -> m (List (Name, TTImp)) ||| Get the metadata associated with a name. Returns all matching namea and their types getInfo : Name -> m (List (Name, NameInfo)) ||| Get the type of a local variable getLocalType : Name -> m TTImp ||| Get the constructors of a fully qualified data type name getCons : Name -> m (List Name) ||| Make some top level declarations declare : List Decl -> m () export %inline ||| Report an error in elaboration fail : Elaboration m => String -> m a fail = failAt EmptyFC ||| Log the current goal type, if the log level is >= the given level export %inline logGoal : Elaboration m => String -> Nat -> String -> m () logGoal str n msg = whenJust !goal $ logTerm str n msg export Elaboration Elab where failAt = Fail try = Try logMsg = LogMsg logTerm = LogTerm logSugaredTerm = LogSugaredTerm check = Check quote = Quote lambda = Lambda goal = Goal localVars = LocalVars genSym = GenSym inCurrentNS = InCurrentNS getType = GetType getInfo = GetInfo getLocalType = GetLocalType getCons = GetCons declare = Declare public export Elaboration m => MonadTrans t => Monad (t m) => Elaboration (t m) where failAt = lift .: failAt try = lift .: try logMsg s = lift .: logMsg s logTerm s n = lift .: logTerm s n logSugaredTerm s n = lift .: logSugaredTerm s n check = lift . check quote v = lift $ quote v lambda x = lift . lambda x goal = lift goal localVars = lift localVars genSym = lift . genSym inCurrentNS = lift . inCurrentNS getType = lift . getType getInfo = lift . getInfo getLocalType = lift . getLocalType getCons = lift . getCons declare = lift . declare
If $s$ and $t$ are sequentially compact, then $s \times t$ is sequentially compact.
module OfficialCrashCourse.DepTypes import Data.Vect -- isSingle : Type isSingleton : Bool -> Type isSingleton True = Nat isSingleton False = List Nat mkSingle : (x : Bool) -> isSingleton x mkSingle True = 0 mkSingle False = [] -- Vect is like a List having length Nat elements data Vector : Nat -> Type -> Type where NilVec : Vector Z a (::) : a -> Vector k a -> Vector (S k) a -- Defining a set of Nat many elems like above data FinSet : Nat -> Type where FZ : FinSet (S k) -- zeroth element FS : FinSet k -> FinSet (S k) -- k + 1 th element from k th element -- using implicit arguments and proofs appVec: Ord a => Vector n a -> Vector m a -> Vector (n + m) a appVec NilVec vecm = vecm -- 0 + left = left appVec {n} vecn NilVec = rewrite plusZeroRightNeutral n in vecn -- left + 0 = left appVec {n=(S n)} {m=(S m)} ( x :: xs) (y :: ys) = rewrite sym $ plusSuccRightSucc n m in -- using sym to flip args, S (left + right) = left + S right x :: y :: (appVec xs ys) -- what is the i th element in a n sized Vector with i <= n? indexer : Fin n -> Vect n a -> a indexer FZ (x :: _) = x indexer (FS i) (_ :: xs) = indexer i xs isEmpty : {n: Nat} -> Vect n a -> Bool isEmpty {n = Z} _ = True isEmpty _ = False
%% spatial src = kthtips_src; options.J = 4; options.L = 6; options.parallel = 0; w = wavelet_factory_2d_spatial(options, options); features{1} = @(x)(mean(mean(format_scat(scat(x,w)),2),3)); db = prepare_database(src, features, options); %% spatial with renorm src = kthtips_src; options.J = 4; options.L = 6; options.parallel = 0; w = wavelet_factory_2d_spatial(options, options); features{1} = @(x)(mean(mean(format_scat(renorm_scat_spatial(scat(x,w))),2),3)); db = prepare_database(src, features, options); %% db2 = db_pca(db, 100); %% without order 0 db2 = db; db2.features = db2.features(2:end, :); %% this takes about an hour on 2.4 Ghz core i7 options.parallel = 0; db = prepare_database(src, features, options); options.J = 5; options.antialiasing = 0; w = wavelet_factory_3d([480, 640], options); features{1} = @(x)(sum(sum(format_scat(scat(x,w)),2),3)); %% this takes about two hour on 2.4 Ghz core i7 options.parallel = 0; db2 = prepare_database(src, features, options); %% not averaged src = uiuc_src; options.parallel = 0; options.J = 5; options.L = 6; w = wavelet_factory_2d_spatial(options, options); features{1} = @(x)(scat(x,w)); db = prepare_database(src, features, options); %% joint with slog db3 = db2; db3.features = log(db2.features); %% classif with 200 randomn partition and size 5 10 20 grid_train = [5,20,40]; n_fold = 10; clear error_2d; for i_fold = 1:n_fold for i_grid = 1:numel(grid_train) n_train = grid_train(i_grid); prop = n_train/81; [train_set, test_set] = create_partition(src, prop); train_opt.dim = n_train; model = affine_train(db, train_set, train_opt); labels = affine_test(db, model, test_set); error_2d(i_fold, i_grid) = classif_err(labels, test_set, src); fprintf('fold %d n_train %g acc %g \n',i_fold, n_train, 1-error_2d(i_fold, i_grid)); end end % 0.5278 0.6775 0.8379 %% grid_train = [5,20,40]; n_fold = 10; clear error_2d; for i_fold = 1:n_fold for i_grid = 1:numel(grid_train) n_train = grid_train(i_grid); prop = n_train/81; [train_set, test_set] = create_partition(src, prop); train_opt.dim = n_train; model = affine_train(db2, train_set, train_opt); labels = affine_test(db2, model, test_set); error_2d(i_fold, i_grid) = classif_err(labels, test_set, src); fprintf('fold %d n_train %g acc %g \n',i_fold, n_train, 1-error_2d(i_fold, i_grid)); end end % 0.5278 0.6775 0.8379 %% grid_train = [5,10,20]; n_fold = 1; clear error_2d; for i_fold = 1:n_fold for i_grid = 1:numel(grid_train) n_train = grid_train(i_grid); prop = n_train/81; [train_set, test_set] = create_partition(src, prop); train_opt.dim = n_train; model = affine_train(db2, train_set, train_opt); labels = affine_test(db2, model, test_set); error_2d(i_fold, i_grid) = classif_err(labels, test_set, src); fprintf('fold %d n_train %g acc %g \n',i_fold, n_train, 1-error_2d(i_fold, i_grid)); end end %% joint scatt for i = 1:100 [train_set, test_set] = create_partition(src, 1/2); train_opt.dim = 20; model = affine_train(db2, ... train_set, train_opt); labels = affine_test(db2, model, ... test_set); err_3d(i) = classif_err(labels, test_set, src); fprintf('fold %d error %d \n',i, err_3d(i)); end for i = 1:100 [train_set, test_set] = create_partition(src, 1/8); train_opt.dim = 5; model = affine_train(db3, ... train_set, train_opt); labels = affine_test(db3, model, ... test_set); err_3d_log(i) = classif_err(labels, test_set, src); fprintf('fold %d correct %d \n',i,100*(1- err_3d_log(i))); end
[STATEMENT] lemma edge_of_imp_subset: "S edge_of T \<Longrightarrow> S \<subseteq> T" [PROOF STATE] proof (prove) goal (1 subgoal): 1. S edge_of T \<Longrightarrow> S \<subseteq> T [PROOF STEP] by (simp add: edge_of_def face_of_imp_subset)
data("iris") library(sqldf) colnames(iris)<-gsub("\\.","_",colnames(iris)) subiris<-sqldf("SELECT * FROM iris WHERE Sepal_Length>2.5") View(subiris) class(subiris)
import Js.Node.Module import Js.Electron.App import Js.Electron.Window %lib Node "electron" %default total win : String -> Options win dir = record {title = Just "Hello Electron", url = Just ("file://" ++ dir ++ "/view.html"), width = Just 800, height = Just 600} defaults main : JS_IO () main = onReady $ do centre !(create $ win !dir)
/- Copyright (c) 2021 Floris van Doorn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Floris van Doorn -/ import order.conditionally_complete_lattice import data.int.least_greatest /-! ## `ℤ` forms a conditionally complete linear order The integers form a conditionally complete linear order. -/ open int open_locale classical noncomputable theory instance : conditionally_complete_linear_order ℤ := { Sup := λ s, if h : s.nonempty ∧ bdd_above s then greatest_of_bdd (classical.some h.2) (classical.some_spec h.2) h.1 else 0, Inf := λ s, if h : s.nonempty ∧ bdd_below s then least_of_bdd (classical.some h.2) (classical.some_spec h.2) h.1 else 0, le_cSup := begin intros s n hs hns, have : s.nonempty ∧ bdd_above s := ⟨⟨n, hns⟩, hs⟩, rw [dif_pos this], exact (greatest_of_bdd _ _ _).2.2 n hns end, cSup_le := begin intros s n hs hns, have : s.nonempty ∧ bdd_above s := ⟨hs, ⟨n, hns⟩⟩, rw [dif_pos this], exact hns (greatest_of_bdd _ (classical.some_spec this.2) _).2.1 end, cInf_le := begin intros s n hs hns, have : s.nonempty ∧ bdd_below s := ⟨⟨n, hns⟩, hs⟩, rw [dif_pos this], exact (least_of_bdd _ _ _).2.2 n hns end, le_cInf := begin intros s n hs hns, have : s.nonempty ∧ bdd_below s := ⟨hs, ⟨n, hns⟩⟩, rw [dif_pos this], exact hns (least_of_bdd _ (classical.some_spec this.2) _).2.1 end, .. int.linear_order, ..linear_order.to_lattice } namespace int lemma cSup_eq_greatest_of_bdd {s : set ℤ} [decidable_pred (∈ s)] (b : ℤ) (Hb : ∀ z ∈ s, z ≤ b) (Hinh : ∃ z : ℤ, z ∈ s) : Sup s = greatest_of_bdd b Hb Hinh := begin convert dif_pos _ using 1, { convert coe_greatest_of_bdd_eq _ (classical.some_spec (⟨b, Hb⟩ : bdd_above s)) _ }, { exact ⟨Hinh, b, Hb⟩, } end @[simp] lemma cSup_empty : Sup (∅ : set ℤ) = 0 := dif_neg (by simp) lemma cSup_of_not_bdd_above {s : set ℤ} (h : ¬ bdd_above s) : Sup s = 0 := dif_neg (by simp [h]) lemma cInf_eq_least_of_bdd {s : set ℤ} [decidable_pred (∈ s)] (b : ℤ) (Hb : ∀ z ∈ s, b ≤ z) (Hinh : ∃ z : ℤ, z ∈ s) : Inf s = least_of_bdd b Hb Hinh := begin convert dif_pos _ using 1, { convert coe_least_of_bdd_eq _ (classical.some_spec (⟨b, Hb⟩ : bdd_below s)) _ }, { exact ⟨Hinh, b, Hb⟩, } end @[simp] lemma cInf_empty : Inf (∅ : set ℤ) = 0 := dif_neg (by simp) lemma cInf_of_not_bdd_below {s : set ℤ} (h : ¬ bdd_below s) : Inf s = 0 := dif_neg (by simp [h]) lemma cSup_mem {s : set ℤ} (h1 : s.nonempty) (h2 : bdd_above s) : Sup s ∈ s := begin convert (greatest_of_bdd _ (classical.some_spec h2) h1).2.1, exact dif_pos ⟨h1, h2⟩, end lemma cInf_mem {s : set ℤ} (h1 : s.nonempty) (h2 : bdd_below s) : Inf s ∈ s := begin convert (least_of_bdd _ (classical.some_spec h2) h1).2.1, exact dif_pos ⟨h1, h2⟩, end end int
(* Author: David Sanan Maintainer: David Sanan, sanan at ntu edu sg License: LGPL *) (* Title: XVCGCon.thy Author: David Sanan, NTU Copyright (C) 2015-2016 David Sanan Some rights reserved, NTU This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) theory XVcgCon imports VcgSeq begin text {* We introduce a syntactic variant of the let-expression so that we can safely unfold it during verification condition generation. With the new theorem attribute @{text "vcg_simp"} we can declare equalities to be used by the verification condition generator, while simplifying assertions. *} syntax "_Let'" :: "[letbinds, basicblock] => basicblock" ("(LET (_)/ IN (_))" 23) translations "_Let' (_binds b bs) e" == "_Let' b (_Let' bs e)" "_Let' (_bind x a) e" == "CONST Let' a (%x. e)" lemma Let'_unfold : "Let' x f = f x" by (simp add: Let'_def Let_def) lemma Let'_split_conv: "(Let' x (\<lambda>p. (case_prod (f p) (g p)))) = (Let' x (\<lambda>p. (f p) (fst (g p)) (snd (g p))))" by (simp add: split_def) end