Datasets:
AI4M
/

text
stringlengths
0
3.34M
export QuantumStatesData, SqueezedStatesData, SqueezedThermalStatesData, gen_table_schema abstract type QuantumStatesData end struct SqueezedStatesData <: QuantumStatesData end struct SqueezedThermalStatesData <: QuantumStatesData end Base.string(::Type{QuantumStatesData}) = "quantum_states" Base.string(::Type{SqueezedStatesData}) = "squeezed_states" Base.string(::Type{SqueezedThermalStatesData}) = "squeezed_thermal_states" function gen_table_schema(table::Type{SqueezedStatesData}) return """ CREATE TABLE $(string(table)) ( id UUID DEFAULT uuid_generate_v4(), r FLOAT8 NOT NULL, theta FLOAT8 NOT NULL, dim INT8 NOT NULL, rho BYTEA COMPRESSION lz4 NOT NULL, n_points INT8 NOT NULL, bhd BYTEA COMPRESSION lz4 NOT NULL, w_range INT8 NOT NULL, w BYTEA COMPRESSION lz4 NOT NULL, PRIMARY KEY (ID) ); """ end function gen_table_schema(table::Type{SqueezedThermalStatesData}) return """ CREATE TABLE $(string(table)) ( id UUID DEFAULT uuid_generate_v4(), r FLOAT8 NOT NULL, theta FLOAT8 NOT NULL, nbar FLOAT8 NOT NULL, dim INT8 NOT NULL, rho BYTEA COMPRESSION lz4 NOT NULL, n_points INT8 NOT NULL, bhd BYTEA COMPRESSION lz4 NOT NULL, w_range INT8 NOT NULL, w BYTEA COMPRESSION lz4 NOT NULL, PRIMARY KEY (ID) ); """ end
-- @@stderr -- dtrace: failed to compile script test/unittest/actions/printf/err.D_PRINTF_ARG_EXTRA.addr_width.d: [D_PRINTF_ARG_EXTRA] line 20: printf( ) prototype mismatch: only 1 arguments required by this format string
module Injection import Data.Vect import Data.Nat %default total |||Is a Nat different from all the Nats in a vector? public export isDifferent : Nat -> Vect n Nat -> Bool isDifferent n [] = True isDifferent n (x :: xs) = (n /= x) && isDifferent n xs |||Are all the elements of a Nat vector different? public export allDifferent : Vect n Nat -> Bool allDifferent [] = True allDifferent (x :: xs) = isDifferent x xs && allDifferent xs |||Are all the elements in a vector smaller than a given Nat? public export allSmaller : Vect n Nat -> Nat -> Bool allSmaller [] m = True allSmaller (x :: xs) m = (x < m) && allSmaller xs m public export isInjective : Nat -> Vect n Nat -> Bool isInjective m v = allSmaller v m && allDifferent v |||Returns the element at index k in a vector public export index : (k : Nat) -> Vect n Nat -> {auto prf : (k < n) = True} -> Nat index Z (x::_) = x index (S k) (_::xs) = index k xs |||Returns the vector [1,2,...,n] public export rangeVect : (startIndex : Nat) -> (length : Nat) -> Vect length Nat rangeVect k Z = [] rangeVect k (S i) = k :: rangeVect (S k) i
/- Copyright (c) 2021 Shing Tak Lam. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Shing Tak Lam -/ import topology.homotopy.basic /-! # Homotopy equivalences between topological spaces In this file, we define homotopy equivalences between topological spaces `X` and `Y` as a pair of functions `f : C(X, Y)` and `g : C(Y, X)` such that `f.comp g` and `g.comp f` are both homotopic to `id`. ## Main definitions - `continuous_map.homotopy_equiv` is the type of homotopy equivalences between topological spaces. ## Notation We introduce the notation `X ≃ₕ Y` for `continuous_map.homotopy_equiv X Y` in the `continuous_map` locale. -/ universes u v w variables {X : Type u} {Y : Type v} {Z : Type w} variables [topological_space X] [topological_space Y] [topological_space Z] namespace continuous_map /-- A homotopy equivalence between topological spaces `X` and `Y` are a pair of functions `to_fun : C(X, Y)` and `inv_fun : C(Y, X)` such that `to_fun.comp inv_fun` and `inv_fun.comp to_fun` are both homotopic to `id`. -/ @[ext] structure homotopy_equiv (X : Type u) (Y : Type v) [topological_space X] [topological_space Y] := (to_fun : C(X, Y)) (inv_fun : C(Y, X)) (left_inv : (inv_fun.comp to_fun).homotopic (continuous_map.id X)) (right_inv : (to_fun.comp inv_fun).homotopic (continuous_map.id Y)) localized "infix ` ≃ₕ `:25 := continuous_map.homotopy_equiv" in continuous_map namespace homotopy_equiv instance : has_coe_to_fun (homotopy_equiv X Y) (λ _, X → Y) := ⟨λ h, h.to_fun⟩ @[simp] lemma to_fun_eq_coe (h : homotopy_equiv X Y) : (h.to_fun : X → Y) = h := rfl @[continuity] lemma continuous (h : homotopy_equiv X Y) : continuous h := h.to_fun.continuous end homotopy_equiv end continuous_map open_locale continuous_map namespace homeomorph /-- Any homeomorphism is a homotopy equivalence. -/ def to_homotopy_equiv (h : X ≃ₜ Y) : X ≃ₕ Y := { to_fun := ⟨h⟩, inv_fun := ⟨h.symm⟩, left_inv := by { convert continuous_map.homotopic.refl _, ext, simp }, right_inv := by { convert continuous_map.homotopic.refl _, ext, simp } } @[simp] lemma coe_to_homotopy_equiv (h : X ≃ₜ Y) : ⇑(h.to_homotopy_equiv) = h := rfl end homeomorph namespace continuous_map namespace homotopy_equiv /-- If `X` is homotopy equivalent to `Y`, then `Y` is homotopy equivalent to `X`. -/ def symm (h : X ≃ₕ Y) : Y ≃ₕ X := { to_fun := h.inv_fun, inv_fun := h.to_fun, left_inv := h.right_inv, right_inv := h.left_inv } @[simp] lemma coe_inv_fun (h : homotopy_equiv X Y) : (⇑h.inv_fun : Y → X) = ⇑h.symm := rfl /-- See Note [custom simps projection]. We need to specify this projection explicitly in this case, because it is a composition of multiple projections. -/ def simps.apply (h : X ≃ₕ Y) : X → Y := h /-- See Note [custom simps projection]. We need to specify this projection explicitly in this case, because it is a composition of multiple projections. -/ def simps.symm_apply (h : X ≃ₕ Y) : Y → X := h.symm initialize_simps_projections homotopy_equiv (to_fun_to_fun -> apply, inv_fun_to_fun -> symm_apply, -to_fun, -inv_fun) /-- Any topological space is homotopy equivalent to itself. -/ @[simps] def refl (X : Type u) [topological_space X] : X ≃ₕ X := (homeomorph.refl X).to_homotopy_equiv instance : inhabited (homotopy_equiv unit unit) := ⟨refl unit⟩ /-- If `X` is homotopy equivalent to `Y`, and `Y` is homotopy equivalent to `Z`, then `X` is homotopy equivalent to `Z`. -/ @[simps] def trans (h₁ : X ≃ₕ Y) (h₂ : Y ≃ₕ Z) : X ≃ₕ Z := { to_fun := h₂.to_fun.comp h₁.to_fun, inv_fun := h₁.inv_fun.comp h₂.inv_fun, left_inv := begin refine homotopic.trans _ h₁.left_inv, change ((h₁.inv_fun.comp h₂.inv_fun).comp (h₂.to_fun.comp h₁.to_fun)) with h₁.inv_fun.comp ((h₂.inv_fun.comp h₂.to_fun).comp h₁.to_fun), refine homotopic.hcomp _ (homotopic.refl _), refine homotopic.trans ((homotopic.refl _).hcomp h₂.left_inv) _, -- simp, rw continuous_map.id_comp, end, right_inv := begin refine homotopic.trans _ h₂.right_inv, change ((h₂.to_fun.comp h₁.to_fun).comp (h₁.inv_fun.comp h₂.inv_fun)) with h₂.to_fun.comp ((h₁.to_fun.comp h₁.inv_fun).comp h₂.inv_fun), refine homotopic.hcomp _ (homotopic.refl _), refine homotopic.trans ((homotopic.refl _).hcomp h₁.right_inv) _, rw id_comp, end } lemma symm_trans (h₁ : X ≃ₕ Y) (h₂ : Y ≃ₕ Z) : (h₁.trans h₂).symm = h₂.symm.trans h₁.symm := by ext; refl end homotopy_equiv end continuous_map open continuous_map namespace homeomorph @[simp] lemma refl_to_homotopy_equiv (X : Type u) [topological_space X] : (homeomorph.refl X).to_homotopy_equiv = homotopy_equiv.refl X := rfl @[simp] lemma symm_to_homotopy_equiv (h : X ≃ₜ Y) : h.symm.to_homotopy_equiv = h.to_homotopy_equiv.symm := rfl @[simp] lemma trans_to_homotopy_equiv (h₀ : X ≃ₜ Y) (h₁ : Y ≃ₜ Z) : (h₀.trans h₁).to_homotopy_equiv = h₀.to_homotopy_equiv.trans h₁.to_homotopy_equiv := rfl end homeomorph
{-# OPTIONS --cubical --no-import-sorts --no-exact-split --safe #-} module Cubical.Data.Nat.Order where open import Cubical.Foundations.Prelude open import Cubical.Foundations.Function open import Cubical.Foundations.HLevels open import Cubical.Data.Empty as ⊥ open import Cubical.Data.Sigma open import Cubical.Data.Sum as ⊎ open import Cubical.Data.Nat.Base open import Cubical.Data.Nat.Properties open import Cubical.Induction.WellFounded open import Cubical.Relation.Nullary infix 4 _≤_ _<_ _≤_ : ℕ → ℕ → Type₀ m ≤ n = Σ[ k ∈ ℕ ] k + m ≡ n _<_ : ℕ → ℕ → Type₀ m < n = suc m ≤ n data Trichotomy (m n : ℕ) : Type₀ where lt : m < n → Trichotomy m n eq : m ≡ n → Trichotomy m n gt : n < m → Trichotomy m n private variable k l m n : ℕ private witness-prop : ∀ j → isProp (j + m ≡ n) witness-prop {m} {n} j = isSetℕ (j + m) n m≤n-isProp : isProp (m ≤ n) m≤n-isProp {m} {n} (k , p) (l , q) = Σ≡Prop witness-prop lemma where lemma : k ≡ l lemma = inj-+m (p ∙ (sym q)) zero-≤ : 0 ≤ n zero-≤ {n} = n , +-zero n suc-≤-suc : m ≤ n → suc m ≤ suc n suc-≤-suc (k , p) = k , (+-suc k _) ∙ (cong suc p) ≤-+k : m ≤ n → m + k ≤ n + k ≤-+k {m} {k = k} (i , p) = i , +-assoc i m k ∙ cong (_+ k) p ≤-k+ : m ≤ n → k + m ≤ k + n ≤-k+ {m} {n} {k} = subst (_≤ k + n) (+-comm m k) ∘ subst (m + k ≤_) (+-comm n k) ∘ ≤-+k pred-≤-pred : suc m ≤ suc n → m ≤ n pred-≤-pred (k , p) = k , injSuc ((sym (+-suc k _)) ∙ p) ≤-refl : m ≤ m ≤-refl = 0 , refl ≤-suc : m ≤ n → m ≤ suc n ≤-suc (k , p) = suc k , cong suc p ≤-predℕ : predℕ n ≤ n ≤-predℕ {zero} = ≤-refl ≤-predℕ {suc n} = ≤-suc ≤-refl ≤-trans : k ≤ m → m ≤ n → k ≤ n ≤-trans {k} {m} {n} (i , p) (j , q) = i + j , l2 ∙ (l1 ∙ q) where l1 : j + i + k ≡ j + m l1 = (sym (+-assoc j i k)) ∙ (cong (j +_) p) l2 : i + j + k ≡ j + i + k l2 = cong (_+ k) (+-comm i j) ≤-antisym : m ≤ n → n ≤ m → m ≡ n ≤-antisym {m} (i , p) (j , q) = (cong (_+ m) l3) ∙ p where l1 : j + i + m ≡ m l1 = (sym (+-assoc j i m)) ∙ ((cong (j +_) p) ∙ q) l2 : j + i ≡ 0 l2 = m+n≡n→m≡0 l1 l3 : 0 ≡ i l3 = sym (snd (m+n≡0→m≡0×n≡0 l2)) ≤-k+-cancel : k + m ≤ k + n → m ≤ n ≤-k+-cancel {k} {m} (l , p) = l , inj-m+ (sub k m ∙ p) where sub : ∀ k m → k + (l + m) ≡ l + (k + m) sub k m = +-assoc k l m ∙ cong (_+ m) (+-comm k l) ∙ sym (+-assoc l k m) ≤-+k-cancel : m + k ≤ n + k → m ≤ n ≤-+k-cancel {m} {k} {n} (l , p) = l , cancelled where cancelled : l + m ≡ n cancelled = inj-+m (sym (+-assoc l m k) ∙ p) ≤-·k : m ≤ n → m · k ≤ n · k ≤-·k {m} {n} {k} (d , r) = d · k , reason where reason : d · k + m · k ≡ n · k reason = d · k + m · k ≡⟨ ·-distribʳ d m k ⟩ (d + m) · k ≡⟨ cong (_· k) r ⟩ n · k ∎ <-k+-cancel : k + m < k + n → m < n <-k+-cancel {k} {m} {n} = ≤-k+-cancel ∘ subst (_≤ k + n) (sym (+-suc k m)) ¬-<-zero : ¬ m < 0 ¬-<-zero (k , p) = snotz ((sym (+-suc k _)) ∙ p) ¬m<m : ¬ m < m ¬m<m {m} = ¬-<-zero ∘ ≤-+k-cancel {k = m} ≤0→≡0 : n ≤ 0 → n ≡ 0 ≤0→≡0 {zero} ineq = refl ≤0→≡0 {suc n} ineq = ⊥.rec (¬-<-zero ineq) predℕ-≤-predℕ : m ≤ n → (predℕ m) ≤ (predℕ n) predℕ-≤-predℕ {zero} {zero} ineq = ≤-refl predℕ-≤-predℕ {zero} {suc n} ineq = zero-≤ predℕ-≤-predℕ {suc m} {zero} ineq = ⊥.rec (¬-<-zero ineq) predℕ-≤-predℕ {suc m} {suc n} ineq = pred-≤-pred ineq ¬m+n<m : ¬ m + n < m ¬m+n<m {m} {n} = ¬-<-zero ∘ <-k+-cancel ∘ subst (m + n <_) (sym (+-zero m)) <-weaken : m < n → m ≤ n <-weaken (k , p) = suc k , sym (+-suc k _) ∙ p ≤<-trans : l ≤ m → m < n → l < n ≤<-trans p = ≤-trans (suc-≤-suc p) <≤-trans : l < m → m ≤ n → l < n <≤-trans = ≤-trans <-trans : l < m → m < n → l < n <-trans p = ≤<-trans (<-weaken p) <-asym : m < n → ¬ n ≤ m <-asym m<n = ¬m<m ∘ <≤-trans m<n <-+k : m < n → m + k < n + k <-+k p = ≤-+k p <-k+ : m < n → k + m < k + n <-k+ {m} {n} {k} p = subst (λ km → km ≤ k + n) (+-suc k m) (≤-k+ p) <-·sk : m < n → m · suc k < n · suc k <-·sk {m} {n} {k} (d , r) = (d · suc k + k) , reason where reason : (d · suc k + k) + suc (m · suc k) ≡ n · suc k reason = (d · suc k + k) + suc (m · suc k) ≡⟨ sym (+-assoc (d · suc k) k _) ⟩ d · suc k + (k + suc (m · suc k)) ≡[ i ]⟨ d · suc k + +-suc k (m · suc k) i ⟩ d · suc k + suc m · suc k ≡⟨ ·-distribʳ d (suc m) (suc k) ⟩ (d + suc m) · suc k ≡⟨ cong (_· suc k) r ⟩ n · suc k ∎ ≤-∸-+-cancel : m ≤ n → (n ∸ m) + m ≡ n ≤-∸-+-cancel {zero} {n} _ = +-zero _ ≤-∸-+-cancel {suc m} {zero} m≤n = ⊥.rec (¬-<-zero m≤n) ≤-∸-+-cancel {suc m} {suc n} m+1≤n+1 = +-suc _ _ ∙ cong suc (≤-∸-+-cancel (pred-≤-pred m+1≤n+1)) left-≤-max : m ≤ max m n left-≤-max {zero} {n} = zero-≤ left-≤-max {suc m} {zero} = ≤-refl left-≤-max {suc m} {suc n} = suc-≤-suc left-≤-max right-≤-max : n ≤ max m n right-≤-max {zero} {m} = zero-≤ right-≤-max {suc n} {zero} = ≤-refl right-≤-max {suc n} {suc m} = suc-≤-suc right-≤-max min-≤-left : min m n ≤ m min-≤-left {zero} {n} = ≤-refl min-≤-left {suc m} {zero} = zero-≤ min-≤-left {suc m} {suc n} = suc-≤-suc min-≤-left min-≤-right : min m n ≤ n min-≤-right {zero} {n} = zero-≤ min-≤-right {suc m} {zero} = ≤-refl min-≤-right {suc m} {suc n} = suc-≤-suc min-≤-right Trichotomy-suc : Trichotomy m n → Trichotomy (suc m) (suc n) Trichotomy-suc (lt m<n) = lt (suc-≤-suc m<n) Trichotomy-suc (eq m=n) = eq (cong suc m=n) Trichotomy-suc (gt n<m) = gt (suc-≤-suc n<m) _≟_ : ∀ m n → Trichotomy m n zero ≟ zero = eq refl zero ≟ suc n = lt (n , +-comm n 1) suc m ≟ zero = gt (m , +-comm m 1) suc m ≟ suc n = Trichotomy-suc (m ≟ n) <-split : m < suc n → (m < n) ⊎ (m ≡ n) <-split {n = zero} = inr ∘ snd ∘ m+n≡0→m≡0×n≡0 ∘ snd ∘ pred-≤-pred <-split {zero} {suc n} = λ _ → inl (suc-≤-suc zero-≤) <-split {suc m} {suc n} = ⊎.map suc-≤-suc (cong suc) ∘ <-split ∘ pred-≤-pred private acc-suc : Acc _<_ n → Acc _<_ (suc n) acc-suc a = acc λ y y<sn → case <-split y<sn of λ { (inl y<n) → access a y y<n ; (inr y≡n) → subst _ (sym y≡n) a } <-wellfounded : WellFounded _<_ <-wellfounded zero = acc λ _ → ⊥.rec ∘ ¬-<-zero <-wellfounded (suc n) = acc-suc (<-wellfounded n) <→≢ : n < m → ¬ n ≡ m <→≢ {n} {m} p q = ¬m<m (subst (_< m) q p) module _ (b₀ : ℕ) (P : ℕ → Type₀) (base : ∀ n → n < suc b₀ → P n) (step : ∀ n → P n → P (suc b₀ + n)) where open WFI (<-wellfounded) private dichotomy : ∀ b n → (n < b) ⊎ (Σ[ m ∈ ℕ ] n ≡ b + m) dichotomy b n = case n ≟ b return (λ _ → (n < b) ⊎ (Σ[ m ∈ ℕ ] n ≡ b + m)) of λ { (lt o) → inl o ; (eq p) → inr (0 , p ∙ sym (+-zero b)) ; (gt (m , p)) → inr (suc m , sym p ∙ +-suc m b ∙ +-comm (suc m) b) } dichotomy<≡ : ∀ b n → (n<b : n < b) → dichotomy b n ≡ inl n<b dichotomy<≡ b n n<b = case dichotomy b n return (λ d → d ≡ inl n<b) of λ { (inl x) → cong inl (m≤n-isProp x n<b) ; (inr (m , p)) → ⊥.rec (<-asym n<b (m , sym (p ∙ +-comm b m))) } dichotomy+≡ : ∀ b m n → (p : n ≡ b + m) → dichotomy b n ≡ inr (m , p) dichotomy+≡ b m n p = case dichotomy b n return (λ d → d ≡ inr (m , p)) of λ { (inl n<b) → ⊥.rec (<-asym n<b (m , +-comm m b ∙ sym p)) ; (inr (m' , q)) → cong inr (Σ≡Prop (λ x → isSetℕ n (b + x)) (inj-m+ {m = b} (sym q ∙ p))) } b = suc b₀ lemma₁ : ∀{x y z} → x ≡ suc z + y → y < x lemma₁ {y = y} {z} p = z , +-suc z y ∙ sym p subStep : (n : ℕ) → (∀ m → m < n → P m) → (n < b) ⊎ (Σ[ m ∈ ℕ ] n ≡ b + m) → P n subStep n _ (inl l) = base n l subStep n rec (inr (m , p)) = transport (cong P (sym p)) (step m (rec m (lemma₁ p))) wfStep : (n : ℕ) → (∀ m → m < n → P m) → P n wfStep n rec = subStep n rec (dichotomy b n) wfStepLemma₀ : ∀ n (n<b : n < b) rec → wfStep n rec ≡ base n n<b wfStepLemma₀ n n<b rec = cong (subStep n rec) (dichotomy<≡ b n n<b) wfStepLemma₁ : ∀ n rec → wfStep (b + n) rec ≡ step n (rec n (lemma₁ refl)) wfStepLemma₁ n rec = cong (subStep (b + n) rec) (dichotomy+≡ b n (b + n) refl) ∙ transportRefl _ +induction : ∀ n → P n +induction = induction wfStep +inductionBase : ∀ n → (l : n < b) → +induction n ≡ base n l +inductionBase n l = induction-compute wfStep n ∙ wfStepLemma₀ n l _ +inductionStep : ∀ n → +induction (b + n) ≡ step n (+induction n) +inductionStep n = induction-compute wfStep (b + n) ∙ wfStepLemma₁ n _ module <-Reasoning where -- TODO: would it be better to mirror the way it is done in the agda-stdlib? infixr 2 _<⟨_⟩_ _≤<⟨_⟩_ _≤⟨_⟩_ _<≤⟨_⟩_ _≡<⟨_⟩_ _≡≤⟨_⟩_ _<≡⟨_⟩_ _≤≡⟨_⟩_ _<⟨_⟩_ : ∀ k → k < n → n < m → k < m _ <⟨ p ⟩ q = <-trans p q _≤<⟨_⟩_ : ∀ k → k ≤ n → n < m → k < m _ ≤<⟨ p ⟩ q = ≤<-trans p q _≤⟨_⟩_ : ∀ k → k ≤ n → n ≤ m → k ≤ m _ ≤⟨ p ⟩ q = ≤-trans p q _<≤⟨_⟩_ : ∀ k → k < n → n ≤ m → k < m _ <≤⟨ p ⟩ q = <≤-trans p q _≡≤⟨_⟩_ : ∀ k → k ≡ l → l ≤ m → k ≤ m _ ≡≤⟨ p ⟩ q = subst (λ k → k ≤ _) (sym p) q _≡<⟨_⟩_ : ∀ k → k ≡ l → l < m → k < m _ ≡<⟨ p ⟩ q = _ ≡≤⟨ cong suc p ⟩ q _≤≡⟨_⟩_ : ∀ k → k ≤ l → l ≡ m → k ≤ m _ ≤≡⟨ p ⟩ q = subst (λ l → _ ≤ l) q p _<≡⟨_⟩_ : ∀ k → k < l → l ≡ m → k < m _ <≡⟨ p ⟩ q = _ ≤≡⟨ p ⟩ q
input = read.csv("D:/vaibhav/trend nxt/topgear/R Community/adult_census_income/Adult Census Income Binary Classification dataset.csv", header=TRUE) head(input) summary(input) str(input) input$income2 = ifelse(input$income == " <=50K",0, 1 ) head(input) input1 = read.csv("D:/vaibhav/trend nxt/topgear/R Community/adult_census_income/Adult Census Income Binary Classification dataset.csv", header=TRUE, na.strings = c(" ?", " ")) na.omit(input1) head(input1) summary(input1) str(input1) input1$income2 = ifelse(input1$income == " <=50K",0, 1 ) head(input1) shuffle_index = sample(1:nrow(input1)) head(shuffle_index) input1 = input1[shuffle_index,] head(input1) test_data_size = 0.8 data_size = nrow(input1) train_sample = 1:(test_data_size*data_size) train_data = input1[train_sample, ] test_data = input1[-train_sample, ] ##install if package is not installed ##install.packages("rpart.plot") library(rpart) library(rpart.plot) fit <- rpart(income2 ~ age + workclass + fnlwgt + education + marital.status + occupation + relationship + race + sex + capital.gain + capital.loss + hours.per.week + native.country, data = train_data, method = 'class') rpart.plot(fit, extra = 106) predict_unseen <-predict(fit, test_data, type = 'class') table_mat <- table(test_data$income2, predict_unseen) table_mat accuracy_Test <- sum(diag(table_mat)) / sum(table_mat) accuracy_Test
!--------------------------------------------------------------------- !--------------------------------------------------------------------- subroutine setup_mpi !--------------------------------------------------------------------- !--------------------------------------------------------------------- !--------------------------------------------------------------------- ! set up MPI stuff !--------------------------------------------------------------------- use bt_data use mpinpb implicit none integer error, nc call mpi_init(error) if (.not. convertdouble) then dp_type = MPI_DOUBLE_PRECISION else dp_type = MPI_REAL endif !--------------------------------------------------------------------- ! get a process grid that requires a square number of procs. ! excess ranks are marked as inactive. !--------------------------------------------------------------------- call get_active_nprocs(1, nc, maxcells, no_nodes, & & total_nodes, node, comm_setup, active) if (.not. active) return call mpi_comm_dup(comm_setup, comm_solve, error) call mpi_comm_dup(comm_setup, comm_rhs, error) !--------------------------------------------------------------------- ! let node 0 be the root for the group (there is only one) !--------------------------------------------------------------------- root = 0 return end
module x02-842Induction where -- Library import Relation.Binary.PropositionalEquality as Eq open Eq using (_≡_; refl; cong; sym) open Eq.≡-Reasoning using (begin_; _≡⟨⟩_; _≡⟨_⟩_; _∎) open import Data.Nat using (ℕ; zero; suc; _+_; _*_; _∸_) -- PLFA coverage of identity, associativity, commutativity, distributivity. -- An example of the associative law for addition. _ : (3 + 4) + 5 ≡ 3 + (4 + 5) _ = begin (3 + 4) + 5 ≡⟨⟩ 7 + 5 ≡⟨⟩ 12 ≡⟨⟩ 3 + 9 ≡⟨⟩ 3 + (4 + 5) ∎ -- A theorem easy to prove. +-identityᴸ : ∀ (m : ℕ) → zero + m ≡ m +-identityᴸ m = {!!} -- A first nontrivial theorem. -- An equational proof is shown in PLFA. -- It uses helpers cong and sym imported from the standard library, -- and a form of equational reasoning that allows more elaborate justification. -- Instead we will use 'rewrite'. +-identityʳ : ∀ (m : ℕ) → m + zero ≡ m +-identityʳ m = {!!} -- Associativity of addition. -- (Done first in PLFA.) +-assoc : ∀ (m n p : ℕ) → (m + n) + p ≡ m + (n + p) +-assoc m n p = {!!} -- A useful lemma about addition. -- Equational proof shown in PLFA. +-suc : ∀ (m n : ℕ) → m + suc n ≡ suc (m + n) +-suc m n = {!!} -- Commutativity of addition. -- Equational proof shown in PLFA. +-comm : ∀ (m n : ℕ) → m + n ≡ n + m +-comm m n = {!!} -- 842 exercise: AddSwap (1 point) -- Please do this without using induction/recursion. +-swap : ∀ (m n p : ℕ) → (m + n) + p ≡ n + (m + p) +-swap m n p = {!!} -- 842 exercise: AddDistMult (2 points) -- Show that addition distributes over multiplication. *-+-rdistrib : ∀ (m n p : ℕ) → (m + n) * p ≡ m * p + n * p *-+-rdistrib m n p = {!!} -- 842 exercise: MultAssoc (2 points) -- Show that multiplication is associative. *-assoc : ∀ (m n p : ℕ) → (m * n) * p ≡ m * (n * p) *-assoc m n p = {!!} -- 842 exercise: MultComm (3 points) -- Show that multiplication is commutative. -- As with the addition proof above, helper lemmas will be needed. *-comm : ∀ (m n : ℕ) → m * n ≡ n * m *-comm m n = {!!} -- 842 exercise: LeftMonusZero (1 point) -- PLFA asks "Did your proof require induction?" -- (which should give you an indication of the expected answer). 0∸n≡0 : ∀ (m : ℕ) → zero ∸ m ≡ zero 0∸n≡0 m = {!!} -- 842 exercise: MonusAssocish (2 points) -- Show a form of associativity for monus. ∸-+-assoc : ∀ (m n p : ℕ) → m ∸ n ∸ p ≡ m ∸ (n + p) ∸-+-assoc m n p = {!!} -- 842 extended exercise: properties of binary representation. -- This is based on the PLFA Bin-laws exercise. -- Copied from 842Naturals. data Bin-ℕ : Set where bits : Bin-ℕ _x0 : Bin-ℕ → Bin-ℕ _x1 : Bin-ℕ → Bin-ℕ dbl : ℕ → ℕ dbl zero = zero dbl (suc n) = suc (suc (dbl n)) -- Copy your versions of 'inc', 'to', 'from', 'bin-+' over from 842Naturals. -- You may choose to change them here to make proofs easier. -- But make sure to test them if you do! inc : Bin-ℕ → Bin-ℕ inc m = {!!} tob : ℕ → Bin-ℕ tob m = {!!} fromb : Bin-ℕ → ℕ fromb m = {!!} _bin-+_ : Bin-ℕ → Bin-ℕ → Bin-ℕ m bin-+ n = {!!} -- 842 exercise: DoubleB (1 point) -- Write the Bin-ℕ version of dbl, here called dblb. -- As with the other Bin-ℕ operations, don't use tob/fromb. dblb : Bin-ℕ → Bin-ℕ dblb m = {!!} -- Here are some properties of tob/fromb/inc suggested by PLFA Induction. -- Please complete the proofs. -- 842 exercise: FromInc (1 point) from∘inc : ∀ (m : Bin-ℕ) → fromb (inc m) ≡ suc (fromb m) from∘inc m = {!!} -- 842 exercise: FromToB (1 point) from∘tob : ∀ (m : ℕ) → fromb (tob m) ≡ m from∘tob m = {!!} -- 842 exercise: ToFromB (2 points) -- The property ∀ (m : Bin-ℕ) → tob (fromb m) ≡ m cannot be proved. -- Can you see why? -- However, this restriction of it can be proved. to/from-corr : ∀ (m : Bin-ℕ) (n : ℕ) → m ≡ tob n → fromb m ≡ n to/from-corr m n m≡tn = {!!} -- Here are a few more properties for you to prove. -- 842 exercise: DblBInc (1 point) dblb∘inc : ∀ (m : Bin-ℕ) → dblb (inc m) ≡ inc (inc (dblb m)) dblb∘inc m = {!!} -- 842 exercise: ToDbl (1 point) to∘dbl : ∀ (m : ℕ) → tob (dbl m) ≡ dblb (tob m) to∘dbl m = {!!} -- 842 exercise: FromDblB (1 point) from∘dblb : ∀ (m : Bin-ℕ) → fromb (dblb m) ≡ dbl (fromb m) from∘dblb m = {!!} -- 842 exercise: BinPlusLInc (2 points) -- This helper function translates the second case for unary addition -- suc m + n = suc (m + n) -- into the binary setting. It's useful in the next proof. -- Hint: induction on both m and n is needed. bin-+-linc : ∀ (m n : Bin-ℕ) → (inc m) bin-+ n ≡ inc (m bin-+ n) bin-+-linc m n = {!!} -- 842 exercise: PlusUnaryBinary (2 points) -- This theorem relates unary and binary addition. to∘+ : ∀ (m n : ℕ) → tob (m + n) ≡ tob m bin-+ tob n to∘+ m n = {!!} -- This ends the extended exercise. -- The following theorems proved in PLFA can be found in the standard library. -- import Data.Nat.Properties using (+-assoc; +-identityʳ; +-suc; +-comm) -- Unicode used in this chapter: {- ∀ U+2200 FOR ALL (\forall, \all) ʳ U+02B3 MODIFIER LETTER SMALL R (\^r) ′ U+2032 PRIME (\') ″ U+2033 DOUBLE PRIME (\') ‴ U+2034 TRIPLE PRIME (\') ⁗ U+2057 QUADRUPLE PRIME (\') -}
theory Nominal imports "~~/src/HOL/Library/Infinite_Set" "~~/src/HOL/Library/Old_Datatype" keywords "atom_decl" "nominal_datatype" "equivariance" :: thy_decl and "nominal_primrec" "nominal_inductive" "nominal_inductive2" :: thy_goal and "avoids" begin section {* Permutations *} (*======================*) type_synonym 'x prm = "('x \<times> 'x) list" (* polymorphic constants for permutation and swapping *) consts perm :: "'x prm \<Rightarrow> 'a \<Rightarrow> 'a" (infixr "\<bullet>" 80) swap :: "('x \<times> 'x) \<Rightarrow> 'x \<Rightarrow> 'x" (* a "private" copy of the option type used in the abstraction function *) datatype 'a noption = nSome 'a | nNone datatype_compat noption (* a "private" copy of the product type used in the nominal induct method *) datatype ('a, 'b) nprod = nPair 'a 'b datatype_compat nprod (* an auxiliary constant for the decision procedure involving *) (* permutations (to avoid loops when using perm-compositions) *) definition "perm_aux pi x = pi\<bullet>x" (* overloaded permutation operations *) overloading perm_fun \<equiv> "perm :: 'x prm \<Rightarrow> ('a\<Rightarrow>'b) \<Rightarrow> ('a\<Rightarrow>'b)" (unchecked) perm_bool \<equiv> "perm :: 'x prm \<Rightarrow> bool \<Rightarrow> bool" (unchecked) perm_set \<equiv> "perm :: 'x prm \<Rightarrow> 'a set \<Rightarrow> 'a set" (unchecked) perm_unit \<equiv> "perm :: 'x prm \<Rightarrow> unit \<Rightarrow> unit" (unchecked) perm_prod \<equiv> "perm :: 'x prm \<Rightarrow> ('a\<times>'b) \<Rightarrow> ('a\<times>'b)" (unchecked) perm_list \<equiv> "perm :: 'x prm \<Rightarrow> 'a list \<Rightarrow> 'a list" (unchecked) perm_option \<equiv> "perm :: 'x prm \<Rightarrow> 'a option \<Rightarrow> 'a option" (unchecked) perm_char \<equiv> "perm :: 'x prm \<Rightarrow> char \<Rightarrow> char" (unchecked) perm_nat \<equiv> "perm :: 'x prm \<Rightarrow> nat \<Rightarrow> nat" (unchecked) perm_int \<equiv> "perm :: 'x prm \<Rightarrow> int \<Rightarrow> int" (unchecked) perm_noption \<equiv> "perm :: 'x prm \<Rightarrow> 'a noption \<Rightarrow> 'a noption" (unchecked) perm_nprod \<equiv> "perm :: 'x prm \<Rightarrow> ('a, 'b) nprod \<Rightarrow> ('a, 'b) nprod" (unchecked) begin definition perm_fun :: "'x prm \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b" where "perm_fun pi f = (\<lambda>x. pi \<bullet> f (rev pi \<bullet> x))" definition perm_bool :: "'x prm \<Rightarrow> bool \<Rightarrow> bool" where "perm_bool pi b = b" definition perm_set :: "'x prm \<Rightarrow> 'a set \<Rightarrow> 'a set" where "perm_set pi X = {pi \<bullet> x | x. x \<in> X}" primrec perm_unit :: "'x prm \<Rightarrow> unit \<Rightarrow> unit" where "perm_unit pi () = ()" primrec perm_prod :: "'x prm \<Rightarrow> ('a\<times>'b) \<Rightarrow> ('a\<times>'b)" where "perm_prod pi (x, y) = (pi\<bullet>x, pi\<bullet>y)" primrec perm_list :: "'x prm \<Rightarrow> 'a list \<Rightarrow> 'a list" where nil_eqvt: "perm_list pi [] = []" | cons_eqvt: "perm_list pi (x#xs) = (pi\<bullet>x)#(pi\<bullet>xs)" primrec perm_option :: "'x prm \<Rightarrow> 'a option \<Rightarrow> 'a option" where some_eqvt: "perm_option pi (Some x) = Some (pi\<bullet>x)" | none_eqvt: "perm_option pi None = None" definition perm_char :: "'x prm \<Rightarrow> char \<Rightarrow> char" where "perm_char pi c = c" definition perm_nat :: "'x prm \<Rightarrow> nat \<Rightarrow> nat" where "perm_nat pi i = i" definition perm_int :: "'x prm \<Rightarrow> int \<Rightarrow> int" where "perm_int pi i = i" primrec perm_noption :: "'x prm \<Rightarrow> 'a noption \<Rightarrow> 'a noption" where nsome_eqvt: "perm_noption pi (nSome x) = nSome (pi\<bullet>x)" | nnone_eqvt: "perm_noption pi nNone = nNone" primrec perm_nprod :: "'x prm \<Rightarrow> ('a, 'b) nprod \<Rightarrow> ('a, 'b) nprod" where "perm_nprod pi (nPair x y) = nPair (pi\<bullet>x) (pi\<bullet>y)" end (* permutations on booleans *) lemmas perm_bool = perm_bool_def lemma true_eqvt [simp]: "pi \<bullet> True \<longleftrightarrow> True" by (simp add: perm_bool_def) lemma false_eqvt [simp]: "pi \<bullet> False \<longleftrightarrow> False" by (simp add: perm_bool_def) lemma perm_boolI: assumes a: "P" shows "pi\<bullet>P" using a by (simp add: perm_bool) lemma perm_boolE: assumes a: "pi\<bullet>P" shows "P" using a by (simp add: perm_bool) lemma if_eqvt: fixes pi::"'a prm" shows "pi\<bullet>(if b then c1 else c2) = (if (pi\<bullet>b) then (pi\<bullet>c1) else (pi\<bullet>c2))" by (simp add: perm_fun_def) lemma imp_eqvt: shows "pi\<bullet>(A\<longrightarrow>B) = ((pi\<bullet>A)\<longrightarrow>(pi\<bullet>B))" by (simp add: perm_bool) lemma conj_eqvt: shows "pi\<bullet>(A\<and>B) = ((pi\<bullet>A)\<and>(pi\<bullet>B))" by (simp add: perm_bool) lemma disj_eqvt: shows "pi\<bullet>(A\<or>B) = ((pi\<bullet>A)\<or>(pi\<bullet>B))" by (simp add: perm_bool) lemma neg_eqvt: shows "pi\<bullet>(\<not> A) = (\<not> (pi\<bullet>A))" by (simp add: perm_bool) (* permutation on sets *) lemma empty_eqvt: shows "pi\<bullet>{} = {}" by (simp add: perm_set_def) lemma union_eqvt: shows "(pi\<bullet>(X\<union>Y)) = (pi\<bullet>X) \<union> (pi\<bullet>Y)" by (auto simp add: perm_set_def) lemma insert_eqvt: shows "pi\<bullet>(insert x X) = insert (pi\<bullet>x) (pi\<bullet>X)" by (auto simp add: perm_set_def) (* permutations on products *) lemma fst_eqvt: "pi\<bullet>(fst x) = fst (pi\<bullet>x)" by (cases x) simp lemma snd_eqvt: "pi\<bullet>(snd x) = snd (pi\<bullet>x)" by (cases x) simp (* permutation on lists *) lemma append_eqvt: fixes pi :: "'x prm" and l1 :: "'a list" and l2 :: "'a list" shows "pi\<bullet>(l1@l2) = (pi\<bullet>l1)@(pi\<bullet>l2)" by (induct l1) auto lemma rev_eqvt: fixes pi :: "'x prm" and l :: "'a list" shows "pi\<bullet>(rev l) = rev (pi\<bullet>l)" by (induct l) (simp_all add: append_eqvt) lemma set_eqvt: fixes pi :: "'x prm" and xs :: "'a list" shows "pi\<bullet>(set xs) = set (pi\<bullet>xs)" by (induct xs) (auto simp add: empty_eqvt insert_eqvt) (* permutation on characters and strings *) lemma perm_string: fixes s::"string" shows "pi\<bullet>s = s" by (induct s)(auto simp add: perm_char_def) section {* permutation equality *} (*==============================*) definition prm_eq :: "'x prm \<Rightarrow> 'x prm \<Rightarrow> bool" (" _ \<triangleq> _ " [80,80] 80) where "pi1 \<triangleq> pi2 \<longleftrightarrow> (\<forall>a::'x. pi1\<bullet>a = pi2\<bullet>a)" section {* Support, Freshness and Supports*} (*========================================*) definition supp :: "'a \<Rightarrow> ('x set)" where "supp x = {a . (infinite {b . [(a,b)]\<bullet>x \<noteq> x})}" definition fresh :: "'x \<Rightarrow> 'a \<Rightarrow> bool" ("_ \<sharp> _" [80,80] 80) where "a \<sharp> x \<longleftrightarrow> a \<notin> supp x" definition supports :: "'x set \<Rightarrow> 'a \<Rightarrow> bool" (infixl "supports" 80) where "S supports x \<longleftrightarrow> (\<forall>a b. (a\<notin>S \<and> b\<notin>S \<longrightarrow> [(a,b)]\<bullet>x=x))" (* lemmas about supp *) lemma supp_fresh_iff: fixes x :: "'a" shows "(supp x) = {a::'x. \<not>a\<sharp>x}" by (simp add: fresh_def) lemma supp_unit: shows "supp () = {}" by (simp add: supp_def) lemma supp_set_empty: shows "supp {} = {}" by (force simp add: supp_def empty_eqvt) lemma supp_prod: fixes x :: "'a" and y :: "'b" shows "(supp (x,y)) = (supp x)\<union>(supp y)" by (force simp add: supp_def Collect_imp_eq Collect_neg_eq) lemma supp_nprod: fixes x :: "'a" and y :: "'b" shows "(supp (nPair x y)) = (supp x)\<union>(supp y)" by (force simp add: supp_def Collect_imp_eq Collect_neg_eq) lemma supp_list_nil: shows "supp [] = {}" by (simp add: supp_def) lemma supp_list_cons: fixes x :: "'a" and xs :: "'a list" shows "supp (x#xs) = (supp x)\<union>(supp xs)" by (auto simp add: supp_def Collect_imp_eq Collect_neg_eq) lemma supp_list_append: fixes xs :: "'a list" and ys :: "'a list" shows "supp (xs@ys) = (supp xs)\<union>(supp ys)" by (induct xs) (auto simp add: supp_list_nil supp_list_cons) lemma supp_list_rev: fixes xs :: "'a list" shows "supp (rev xs) = (supp xs)" by (induct xs, auto simp add: supp_list_append supp_list_cons supp_list_nil) lemma supp_bool: fixes x :: "bool" shows "supp x = {}" by (cases "x") (simp_all add: supp_def) lemma supp_some: fixes x :: "'a" shows "supp (Some x) = (supp x)" by (simp add: supp_def) lemma supp_none: fixes x :: "'a" shows "supp (None) = {}" by (simp add: supp_def) lemma supp_int: fixes i::"int" shows "supp (i) = {}" by (simp add: supp_def perm_int_def) lemma supp_nat: fixes n::"nat" shows "(supp n) = {}" by (simp add: supp_def perm_nat_def) lemma supp_char: fixes c::"char" shows "(supp c) = {}" by (simp add: supp_def perm_char_def) lemma supp_string: fixes s::"string" shows "(supp s) = {}" by (simp add: supp_def perm_string) (* lemmas about freshness *) lemma fresh_set_empty: shows "a\<sharp>{}" by (simp add: fresh_def supp_set_empty) lemma fresh_unit: shows "a\<sharp>()" by (simp add: fresh_def supp_unit) lemma fresh_prod: fixes a :: "'x" and x :: "'a" and y :: "'b" shows "a\<sharp>(x,y) = (a\<sharp>x \<and> a\<sharp>y)" by (simp add: fresh_def supp_prod) lemma fresh_list_nil: fixes a :: "'x" shows "a\<sharp>[]" by (simp add: fresh_def supp_list_nil) lemma fresh_list_cons: fixes a :: "'x" and x :: "'a" and xs :: "'a list" shows "a\<sharp>(x#xs) = (a\<sharp>x \<and> a\<sharp>xs)" by (simp add: fresh_def supp_list_cons) lemma fresh_list_append: fixes a :: "'x" and xs :: "'a list" and ys :: "'a list" shows "a\<sharp>(xs@ys) = (a\<sharp>xs \<and> a\<sharp>ys)" by (simp add: fresh_def supp_list_append) lemma fresh_list_rev: fixes a :: "'x" and xs :: "'a list" shows "a\<sharp>(rev xs) = a\<sharp>xs" by (simp add: fresh_def supp_list_rev) lemma fresh_none: fixes a :: "'x" shows "a\<sharp>None" by (simp add: fresh_def supp_none) lemma fresh_some: fixes a :: "'x" and x :: "'a" shows "a\<sharp>(Some x) = a\<sharp>x" by (simp add: fresh_def supp_some) lemma fresh_int: fixes a :: "'x" and i :: "int" shows "a\<sharp>i" by (simp add: fresh_def supp_int) lemma fresh_nat: fixes a :: "'x" and n :: "nat" shows "a\<sharp>n" by (simp add: fresh_def supp_nat) lemma fresh_char: fixes a :: "'x" and c :: "char" shows "a\<sharp>c" by (simp add: fresh_def supp_char) lemma fresh_string: fixes a :: "'x" and s :: "string" shows "a\<sharp>s" by (simp add: fresh_def supp_string) lemma fresh_bool: fixes a :: "'x" and b :: "bool" shows "a\<sharp>b" by (simp add: fresh_def supp_bool) text {* Normalization of freshness results; cf.\ @{text nominal_induct} *} lemma fresh_unit_elim: shows "(a\<sharp>() \<Longrightarrow> PROP C) \<equiv> PROP C" by (simp add: fresh_def supp_unit) lemma fresh_prod_elim: shows "(a\<sharp>(x,y) \<Longrightarrow> PROP C) \<equiv> (a\<sharp>x \<Longrightarrow> a\<sharp>y \<Longrightarrow> PROP C)" by rule (simp_all add: fresh_prod) (* this rule needs to be added before the fresh_prodD is *) (* added to the simplifier with mksimps *) lemma fresh_prodD: shows "a\<sharp>(x,y) \<Longrightarrow> a\<sharp>x" and "a\<sharp>(x,y) \<Longrightarrow> a\<sharp>y" by (simp_all add: fresh_prod) ML {* val mksimps_pairs = (@{const_name Nominal.fresh}, @{thms fresh_prodD}) :: mksimps_pairs; *} declaration {* fn _ => Simplifier.map_ss (Simplifier.set_mksimps (mksimps mksimps_pairs)) *} section {* Abstract Properties for Permutations and Atoms *} (*=========================================================*) (* properties for being a permutation type *) definition "pt TYPE('a) TYPE('x) \<equiv> (\<forall>(x::'a). ([]::'x prm)\<bullet>x = x) \<and> (\<forall>(pi1::'x prm) (pi2::'x prm) (x::'a). (pi1@pi2)\<bullet>x = pi1\<bullet>(pi2\<bullet>x)) \<and> (\<forall>(pi1::'x prm) (pi2::'x prm) (x::'a). pi1 \<triangleq> pi2 \<longrightarrow> pi1\<bullet>x = pi2\<bullet>x)" (* properties for being an atom type *) definition "at TYPE('x) \<equiv> (\<forall>(x::'x). ([]::'x prm)\<bullet>x = x) \<and> (\<forall>(a::'x) (b::'x) (pi::'x prm) (x::'x). ((a,b)#(pi::'x prm))\<bullet>x = swap (a,b) (pi\<bullet>x)) \<and> (\<forall>(a::'x) (b::'x) (c::'x). swap (a,b) c = (if a=c then b else (if b=c then a else c))) \<and> (infinite (UNIV::'x set))" (* property of two atom-types being disjoint *) definition "disjoint TYPE('x) TYPE('y) \<equiv> (\<forall>(pi::'x prm)(x::'y). pi\<bullet>x = x) \<and> (\<forall>(pi::'y prm)(x::'x). pi\<bullet>x = x)" (* composition property of two permutation on a type 'a *) definition "cp TYPE ('a) TYPE('x) TYPE('y) \<equiv> (\<forall>(pi2::'y prm) (pi1::'x prm) (x::'a) . pi1\<bullet>(pi2\<bullet>x) = (pi1\<bullet>pi2)\<bullet>(pi1\<bullet>x))" (* property of having finite support *) definition "fs TYPE('a) TYPE('x) \<equiv> \<forall>(x::'a). finite ((supp x)::'x set)" section {* Lemmas about the atom-type properties*} (*==============================================*) lemma at1: fixes x::"'x" assumes a: "at TYPE('x)" shows "([]::'x prm)\<bullet>x = x" using a by (simp add: at_def) lemma at2: fixes a ::"'x" and b ::"'x" and x ::"'x" and pi::"'x prm" assumes a: "at TYPE('x)" shows "((a,b)#pi)\<bullet>x = swap (a,b) (pi\<bullet>x)" using a by (simp only: at_def) lemma at3: fixes a ::"'x" and b ::"'x" and c ::"'x" assumes a: "at TYPE('x)" shows "swap (a,b) c = (if a=c then b else (if b=c then a else c))" using a by (simp only: at_def) (* rules to calculate simple permutations *) lemmas at_calc = at2 at1 at3 lemma at_swap_simps: fixes a ::"'x" and b ::"'x" assumes a: "at TYPE('x)" shows "[(a,b)]\<bullet>a = b" and "[(a,b)]\<bullet>b = a" and "\<lbrakk>a\<noteq>c; b\<noteq>c\<rbrakk> \<Longrightarrow> [(a,b)]\<bullet>c = c" using a by (simp_all add: at_calc) lemma at4: assumes a: "at TYPE('x)" shows "infinite (UNIV::'x set)" using a by (simp add: at_def) lemma at_append: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and c :: "'x" assumes at: "at TYPE('x)" shows "(pi1@pi2)\<bullet>c = pi1\<bullet>(pi2\<bullet>c)" proof (induct pi1) case Nil show ?case by (simp add: at1[OF at]) next case (Cons x xs) have "(xs@pi2)\<bullet>c = xs\<bullet>(pi2\<bullet>c)" by fact also have "(x#xs)@pi2 = x#(xs@pi2)" by simp ultimately show ?case by (cases "x", simp add: at2[OF at]) qed lemma at_swap: fixes a :: "'x" and b :: "'x" and c :: "'x" assumes at: "at TYPE('x)" shows "swap (a,b) (swap (a,b) c) = c" by (auto simp add: at3[OF at]) lemma at_rev_pi: fixes pi :: "'x prm" and c :: "'x" assumes at: "at TYPE('x)" shows "(rev pi)\<bullet>(pi\<bullet>c) = c" proof(induct pi) case Nil show ?case by (simp add: at1[OF at]) next case (Cons x xs) thus ?case by (cases "x", simp add: at2[OF at] at_append[OF at] at1[OF at] at_swap[OF at]) qed lemma at_pi_rev: fixes pi :: "'x prm" and x :: "'x" assumes at: "at TYPE('x)" shows "pi\<bullet>((rev pi)\<bullet>x) = x" by (rule at_rev_pi[OF at, of "rev pi" _,simplified]) lemma at_bij1: fixes pi :: "'x prm" and x :: "'x" and y :: "'x" assumes at: "at TYPE('x)" and a: "(pi\<bullet>x) = y" shows "x=(rev pi)\<bullet>y" proof - from a have "y=(pi\<bullet>x)" by (rule sym) thus ?thesis by (simp only: at_rev_pi[OF at]) qed lemma at_bij2: fixes pi :: "'x prm" and x :: "'x" and y :: "'x" assumes at: "at TYPE('x)" and a: "((rev pi)\<bullet>x) = y" shows "x=pi\<bullet>y" proof - from a have "y=((rev pi)\<bullet>x)" by (rule sym) thus ?thesis by (simp only: at_pi_rev[OF at]) qed lemma at_bij: fixes pi :: "'x prm" and x :: "'x" and y :: "'x" assumes at: "at TYPE('x)" shows "(pi\<bullet>x = pi\<bullet>y) = (x=y)" proof assume "pi\<bullet>x = pi\<bullet>y" hence "x=(rev pi)\<bullet>(pi\<bullet>y)" by (rule at_bij1[OF at]) thus "x=y" by (simp only: at_rev_pi[OF at]) next assume "x=y" thus "pi\<bullet>x = pi\<bullet>y" by simp qed lemma at_supp: fixes x :: "'x" assumes at: "at TYPE('x)" shows "supp x = {x}" by(auto simp: supp_def Collect_conj_eq Collect_imp_eq at_calc[OF at] at4[OF at]) lemma at_fresh: fixes a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" shows "(a\<sharp>b) = (a\<noteq>b)" by (simp add: at_supp[OF at] fresh_def) lemma at_prm_fresh1: fixes c :: "'x" and pi:: "'x prm" assumes at: "at TYPE('x)" and a: "c\<sharp>pi" shows "\<forall>(a,b)\<in>set pi. c\<noteq>a \<and> c\<noteq>b" using a by (induct pi) (auto simp add: fresh_list_cons fresh_prod at_fresh[OF at]) lemma at_prm_fresh2: fixes c :: "'x" and pi:: "'x prm" assumes at: "at TYPE('x)" and a: "\<forall>(a,b)\<in>set pi. c\<noteq>a \<and> c\<noteq>b" shows "pi\<bullet>c = c" using a by(induct pi) (auto simp add: at1[OF at] at2[OF at] at3[OF at]) lemma at_prm_fresh: fixes c :: "'x" and pi:: "'x prm" assumes at: "at TYPE('x)" and a: "c\<sharp>pi" shows "pi\<bullet>c = c" by (rule at_prm_fresh2[OF at], rule at_prm_fresh1[OF at, OF a]) lemma at_prm_rev_eq: fixes pi1 :: "'x prm" and pi2 :: "'x prm" assumes at: "at TYPE('x)" shows "((rev pi1) \<triangleq> (rev pi2)) = (pi1 \<triangleq> pi2)" proof (simp add: prm_eq_def, auto) fix x assume "\<forall>x::'x. (rev pi1)\<bullet>x = (rev pi2)\<bullet>x" hence "(rev (pi1::'x prm))\<bullet>(pi2\<bullet>(x::'x)) = (rev (pi2::'x prm))\<bullet>(pi2\<bullet>x)" by simp hence "(rev (pi1::'x prm))\<bullet>((pi2::'x prm)\<bullet>x) = (x::'x)" by (simp add: at_rev_pi[OF at]) hence "(pi2::'x prm)\<bullet>x = (pi1::'x prm)\<bullet>x" by (simp add: at_bij2[OF at]) thus "pi1\<bullet>x = pi2\<bullet>x" by simp next fix x assume "\<forall>x::'x. pi1\<bullet>x = pi2\<bullet>x" hence "(pi1::'x prm)\<bullet>((rev pi2)\<bullet>x) = (pi2::'x prm)\<bullet>((rev pi2)\<bullet>(x::'x))" by simp hence "(pi1::'x prm)\<bullet>((rev pi2)\<bullet>(x::'x)) = x" by (simp add: at_pi_rev[OF at]) hence "(rev pi2)\<bullet>x = (rev pi1)\<bullet>(x::'x)" by (simp add: at_bij1[OF at]) thus "(rev pi1)\<bullet>x = (rev pi2)\<bullet>(x::'x)" by simp qed lemma at_prm_eq_append: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and pi3 :: "'x prm" assumes at: "at TYPE('x)" and a: "pi1 \<triangleq> pi2" shows "(pi3@pi1) \<triangleq> (pi3@pi2)" using a by (simp add: prm_eq_def at_append[OF at] at_bij[OF at]) lemma at_prm_eq_append': fixes pi1 :: "'x prm" and pi2 :: "'x prm" and pi3 :: "'x prm" assumes at: "at TYPE('x)" and a: "pi1 \<triangleq> pi2" shows "(pi1@pi3) \<triangleq> (pi2@pi3)" using a by (simp add: prm_eq_def at_append[OF at]) lemma at_prm_eq_trans: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and pi3 :: "'x prm" assumes a1: "pi1 \<triangleq> pi2" and a2: "pi2 \<triangleq> pi3" shows "pi1 \<triangleq> pi3" using a1 a2 by (auto simp add: prm_eq_def) lemma at_prm_eq_refl: fixes pi :: "'x prm" shows "pi \<triangleq> pi" by (simp add: prm_eq_def) lemma at_prm_rev_eq1: fixes pi1 :: "'x prm" and pi2 :: "'x prm" assumes at: "at TYPE('x)" shows "pi1 \<triangleq> pi2 \<Longrightarrow> (rev pi1) \<triangleq> (rev pi2)" by (simp add: at_prm_rev_eq[OF at]) lemma at_ds1: fixes a :: "'x" assumes at: "at TYPE('x)" shows "[(a,a)] \<triangleq> []" by (force simp add: prm_eq_def at_calc[OF at]) lemma at_ds2: fixes pi :: "'x prm" and a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" shows "([(a,b)]@pi) \<triangleq> (pi@[((rev pi)\<bullet>a,(rev pi)\<bullet>b)])" by (force simp add: prm_eq_def at_append[OF at] at_bij[OF at] at_pi_rev[OF at] at_rev_pi[OF at] at_calc[OF at]) lemma at_ds3: fixes a :: "'x" and b :: "'x" and c :: "'x" assumes at: "at TYPE('x)" and a: "distinct [a,b,c]" shows "[(a,c),(b,c),(a,c)] \<triangleq> [(a,b)]" using a by (force simp add: prm_eq_def at_calc[OF at]) lemma at_ds4: fixes a :: "'x" and b :: "'x" and pi :: "'x prm" assumes at: "at TYPE('x)" shows "(pi@[(a,(rev pi)\<bullet>b)]) \<triangleq> ([(pi\<bullet>a,b)]@pi)" by (force simp add: prm_eq_def at_append[OF at] at_calc[OF at] at_bij[OF at] at_pi_rev[OF at] at_rev_pi[OF at]) lemma at_ds5: fixes a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" shows "[(a,b)] \<triangleq> [(b,a)]" by (force simp add: prm_eq_def at_calc[OF at]) lemma at_ds5': fixes a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" shows "[(a,b),(b,a)] \<triangleq> []" by (force simp add: prm_eq_def at_calc[OF at]) lemma at_ds6: fixes a :: "'x" and b :: "'x" and c :: "'x" assumes at: "at TYPE('x)" and a: "distinct [a,b,c]" shows "[(a,c),(a,b)] \<triangleq> [(b,c),(a,c)]" using a by (force simp add: prm_eq_def at_calc[OF at]) lemma at_ds7: fixes pi :: "'x prm" assumes at: "at TYPE('x)" shows "((rev pi)@pi) \<triangleq> []" by (simp add: prm_eq_def at1[OF at] at_append[OF at] at_rev_pi[OF at]) lemma at_ds8_aux: fixes pi :: "'x prm" and a :: "'x" and b :: "'x" and c :: "'x" assumes at: "at TYPE('x)" shows "pi\<bullet>(swap (a,b) c) = swap (pi\<bullet>a,pi\<bullet>b) (pi\<bullet>c)" by (force simp add: at_calc[OF at] at_bij[OF at]) lemma at_ds8: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" shows "(pi1@pi2) \<triangleq> ((pi1\<bullet>pi2)@pi1)" apply(induct_tac pi2) apply(simp add: prm_eq_def) apply(auto simp add: prm_eq_def) apply(simp add: at2[OF at]) apply(drule_tac x="aa" in spec) apply(drule sym) apply(simp) apply(simp add: at_append[OF at]) apply(simp add: at2[OF at]) apply(simp add: at_ds8_aux[OF at]) done lemma at_ds9: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" shows " ((rev pi2)@(rev pi1)) \<triangleq> ((rev pi1)@(rev (pi1\<bullet>pi2)))" apply(induct_tac pi2) apply(simp add: prm_eq_def) apply(auto simp add: prm_eq_def) apply(simp add: at_append[OF at]) apply(simp add: at2[OF at] at1[OF at]) apply(drule_tac x="swap(pi1\<bullet>a,pi1\<bullet>b) aa" in spec) apply(drule sym) apply(simp) apply(simp add: at_ds8_aux[OF at]) apply(simp add: at_rev_pi[OF at]) done lemma at_ds10: fixes pi :: "'x prm" and a :: "'x" and b :: "'x" assumes at: "at TYPE('x)" and a: "b\<sharp>(rev pi)" shows "([(pi\<bullet>a,b)]@pi) \<triangleq> (pi@[(a,b)])" using a apply - apply(rule at_prm_eq_trans) apply(rule at_ds2[OF at]) apply(simp add: at_prm_fresh[OF at] at_rev_pi[OF at]) apply(rule at_prm_eq_refl) done --"there always exists an atom that is not being in a finite set" lemma ex_in_inf: fixes A::"'x set" assumes at: "at TYPE('x)" and fs: "finite A" obtains c::"'x" where "c\<notin>A" proof - from fs at4[OF at] have "infinite ((UNIV::'x set) - A)" by (simp add: Diff_infinite_finite) hence "((UNIV::'x set) - A) \<noteq> ({}::'x set)" by (force simp only:) then obtain c::"'x" where "c\<in>((UNIV::'x set) - A)" by force then have "c\<notin>A" by simp then show ?thesis .. qed text {* there always exists a fresh name for an object with finite support *} lemma at_exists_fresh': fixes x :: "'a" assumes at: "at TYPE('x)" and fs: "finite ((supp x)::'x set)" shows "\<exists>c::'x. c\<sharp>x" by (auto simp add: fresh_def intro: ex_in_inf[OF at, OF fs]) lemma at_exists_fresh: fixes x :: "'a" assumes at: "at TYPE('x)" and fs: "finite ((supp x)::'x set)" obtains c::"'x" where "c\<sharp>x" by (auto intro: ex_in_inf[OF at, OF fs] simp add: fresh_def) lemma at_finite_select: fixes S::"'a set" assumes a: "at TYPE('a)" and b: "finite S" shows "\<exists>x. x \<notin> S" using a b apply(drule_tac S="UNIV::'a set" in Diff_infinite_finite) apply(simp add: at_def) apply(subgoal_tac "UNIV - S \<noteq> {}") apply(simp only: ex_in_conv [symmetric]) apply(blast) apply(rule notI) apply(simp) done lemma at_different: assumes at: "at TYPE('x)" shows "\<exists>(b::'x). a\<noteq>b" proof - have "infinite (UNIV::'x set)" by (rule at4[OF at]) hence inf2: "infinite (UNIV-{a})" by (rule infinite_remove) have "(UNIV-{a}) \<noteq> ({}::'x set)" proof (rule_tac ccontr, drule_tac notnotD) assume "UNIV-{a} = ({}::'x set)" with inf2 have "infinite ({}::'x set)" by simp then show "False" by auto qed hence "\<exists>(b::'x). b\<in>(UNIV-{a})" by blast then obtain b::"'x" where mem2: "b\<in>(UNIV-{a})" by blast from mem2 have "a\<noteq>b" by blast then show "\<exists>(b::'x). a\<noteq>b" by blast qed --"the at-props imply the pt-props" lemma at_pt_inst: assumes at: "at TYPE('x)" shows "pt TYPE('x) TYPE('x)" apply(auto simp only: pt_def) apply(simp only: at1[OF at]) apply(simp only: at_append[OF at]) apply(simp only: prm_eq_def) done section {* finite support properties *} (*===================================*) lemma fs1: fixes x :: "'a" assumes a: "fs TYPE('a) TYPE('x)" shows "finite ((supp x)::'x set)" using a by (simp add: fs_def) lemma fs_at_inst: fixes a :: "'x" assumes at: "at TYPE('x)" shows "fs TYPE('x) TYPE('x)" apply(simp add: fs_def) apply(simp add: at_supp[OF at]) done lemma fs_unit_inst: shows "fs TYPE(unit) TYPE('x)" apply(simp add: fs_def) apply(simp add: supp_unit) done lemma fs_prod_inst: assumes fsa: "fs TYPE('a) TYPE('x)" and fsb: "fs TYPE('b) TYPE('x)" shows "fs TYPE('a\<times>'b) TYPE('x)" apply(unfold fs_def) apply(auto simp add: supp_prod) apply(rule fs1[OF fsa]) apply(rule fs1[OF fsb]) done lemma fs_nprod_inst: assumes fsa: "fs TYPE('a) TYPE('x)" and fsb: "fs TYPE('b) TYPE('x)" shows "fs TYPE(('a,'b) nprod) TYPE('x)" apply(unfold fs_def, rule allI) apply(case_tac x) apply(auto simp add: supp_nprod) apply(rule fs1[OF fsa]) apply(rule fs1[OF fsb]) done lemma fs_list_inst: assumes fs: "fs TYPE('a) TYPE('x)" shows "fs TYPE('a list) TYPE('x)" apply(simp add: fs_def, rule allI) apply(induct_tac x) apply(simp add: supp_list_nil) apply(simp add: supp_list_cons) apply(rule fs1[OF fs]) done lemma fs_option_inst: assumes fs: "fs TYPE('a) TYPE('x)" shows "fs TYPE('a option) TYPE('x)" apply(simp add: fs_def, rule allI) apply(case_tac x) apply(simp add: supp_none) apply(simp add: supp_some) apply(rule fs1[OF fs]) done section {* Lemmas about the permutation properties *} (*=================================================*) lemma pt1: fixes x::"'a" assumes a: "pt TYPE('a) TYPE('x)" shows "([]::'x prm)\<bullet>x = x" using a by (simp add: pt_def) lemma pt2: fixes pi1::"'x prm" and pi2::"'x prm" and x ::"'a" assumes a: "pt TYPE('a) TYPE('x)" shows "(pi1@pi2)\<bullet>x = pi1\<bullet>(pi2\<bullet>x)" using a by (simp add: pt_def) lemma pt3: fixes pi1::"'x prm" and pi2::"'x prm" and x ::"'a" assumes a: "pt TYPE('a) TYPE('x)" shows "pi1 \<triangleq> pi2 \<Longrightarrow> pi1\<bullet>x = pi2\<bullet>x" using a by (simp add: pt_def) lemma pt3_rev: fixes pi1::"'x prm" and pi2::"'x prm" and x ::"'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi1 \<triangleq> pi2 \<Longrightarrow> (rev pi1)\<bullet>x = (rev pi2)\<bullet>x" by (rule pt3[OF pt], simp add: at_prm_rev_eq[OF at]) section {* composition properties *} (* ============================== *) lemma cp1: fixes pi1::"'x prm" and pi2::"'y prm" and x ::"'a" assumes cp: "cp TYPE ('a) TYPE('x) TYPE('y)" shows "pi1\<bullet>(pi2\<bullet>x) = (pi1\<bullet>pi2)\<bullet>(pi1\<bullet>x)" using cp by (simp add: cp_def) lemma cp_pt_inst: assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "cp TYPE('a) TYPE('x) TYPE('x)" apply(auto simp add: cp_def pt2[OF pt,symmetric]) apply(rule pt3[OF pt]) apply(rule at_ds8[OF at]) done section {* disjointness properties *} (*=================================*) lemma dj_perm_forget: fixes pi::"'y prm" and x ::"'x" assumes dj: "disjoint TYPE('x) TYPE('y)" shows "pi\<bullet>x=x" using dj by (simp_all add: disjoint_def) lemma dj_perm_set_forget: fixes pi::"'y prm" and x ::"'x set" assumes dj: "disjoint TYPE('x) TYPE('y)" shows "pi\<bullet>x=x" using dj by (simp_all add: perm_set_def disjoint_def) lemma dj_perm_perm_forget: fixes pi1::"'x prm" and pi2::"'y prm" assumes dj: "disjoint TYPE('x) TYPE('y)" shows "pi2\<bullet>pi1=pi1" using dj by (induct pi1, auto simp add: disjoint_def) lemma dj_cp: fixes pi1::"'x prm" and pi2::"'y prm" and x ::"'a" assumes cp: "cp TYPE ('a) TYPE('x) TYPE('y)" and dj: "disjoint TYPE('y) TYPE('x)" shows "pi1\<bullet>(pi2\<bullet>x) = (pi2)\<bullet>(pi1\<bullet>x)" by (simp add: cp1[OF cp] dj_perm_perm_forget[OF dj]) lemma dj_supp: fixes a::"'x" assumes dj: "disjoint TYPE('x) TYPE('y)" shows "(supp a) = ({}::'y set)" apply(simp add: supp_def dj_perm_forget[OF dj]) done lemma at_fresh_ineq: fixes a :: "'x" and b :: "'y" assumes dj: "disjoint TYPE('y) TYPE('x)" shows "a\<sharp>b" by (simp add: fresh_def dj_supp[OF dj]) section {* permutation type instances *} (* ===================================*) lemma pt_fun_inst: assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" shows "pt TYPE('a\<Rightarrow>'b) TYPE('x)" apply(auto simp only: pt_def) apply(simp_all add: perm_fun_def) apply(simp add: pt1[OF pta] pt1[OF ptb]) apply(simp add: pt2[OF pta] pt2[OF ptb]) apply(subgoal_tac "(rev pi1) \<triangleq> (rev pi2)")(*A*) apply(simp add: pt3[OF pta] pt3[OF ptb]) (*A*) apply(simp add: at_prm_rev_eq[OF at]) done lemma pt_bool_inst: shows "pt TYPE(bool) TYPE('x)" by (simp add: pt_def perm_bool_def) lemma pt_set_inst: assumes pt: "pt TYPE('a) TYPE('x)" shows "pt TYPE('a set) TYPE('x)" apply(simp add: pt_def) apply(simp_all add: perm_set_def) apply(simp add: pt1[OF pt]) apply(force simp add: pt2[OF pt] pt3[OF pt]) done lemma pt_unit_inst: shows "pt TYPE(unit) TYPE('x)" by (simp add: pt_def) lemma pt_prod_inst: assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" shows "pt TYPE('a \<times> 'b) TYPE('x)" apply(auto simp add: pt_def) apply(rule pt1[OF pta]) apply(rule pt1[OF ptb]) apply(rule pt2[OF pta]) apply(rule pt2[OF ptb]) apply(rule pt3[OF pta],assumption) apply(rule pt3[OF ptb],assumption) done lemma pt_list_nil: fixes xs :: "'a list" assumes pt: "pt TYPE('a) TYPE ('x)" shows "([]::'x prm)\<bullet>xs = xs" apply(induct_tac xs) apply(simp_all add: pt1[OF pt]) done lemma pt_list_append: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and xs :: "'a list" assumes pt: "pt TYPE('a) TYPE ('x)" shows "(pi1@pi2)\<bullet>xs = pi1\<bullet>(pi2\<bullet>xs)" apply(induct_tac xs) apply(simp_all add: pt2[OF pt]) done lemma pt_list_prm_eq: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and xs :: "'a list" assumes pt: "pt TYPE('a) TYPE ('x)" shows "pi1 \<triangleq> pi2 \<Longrightarrow> pi1\<bullet>xs = pi2\<bullet>xs" apply(induct_tac xs) apply(simp_all add: prm_eq_def pt3[OF pt]) done lemma pt_list_inst: assumes pt: "pt TYPE('a) TYPE('x)" shows "pt TYPE('a list) TYPE('x)" apply(auto simp only: pt_def) apply(rule pt_list_nil[OF pt]) apply(rule pt_list_append[OF pt]) apply(rule pt_list_prm_eq[OF pt],assumption) done lemma pt_option_inst: assumes pta: "pt TYPE('a) TYPE('x)" shows "pt TYPE('a option) TYPE('x)" apply(auto simp only: pt_def) apply(case_tac "x") apply(simp_all add: pt1[OF pta]) apply(case_tac "x") apply(simp_all add: pt2[OF pta]) apply(case_tac "x") apply(simp_all add: pt3[OF pta]) done lemma pt_noption_inst: assumes pta: "pt TYPE('a) TYPE('x)" shows "pt TYPE('a noption) TYPE('x)" apply(auto simp only: pt_def) apply(case_tac "x") apply(simp_all add: pt1[OF pta]) apply(case_tac "x") apply(simp_all add: pt2[OF pta]) apply(case_tac "x") apply(simp_all add: pt3[OF pta]) done lemma pt_nprod_inst: assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" shows "pt TYPE(('a,'b) nprod) TYPE('x)" apply(auto simp add: pt_def) apply(case_tac x) apply(simp add: pt1[OF pta] pt1[OF ptb]) apply(case_tac x) apply(simp add: pt2[OF pta] pt2[OF ptb]) apply(case_tac x) apply(simp add: pt3[OF pta] pt3[OF ptb]) done section {* further lemmas for permutation types *} (*==============================================*) lemma pt_rev_pi: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(rev pi)\<bullet>(pi\<bullet>x) = x" proof - have "((rev pi)@pi) \<triangleq> ([]::'x prm)" by (simp add: at_ds7[OF at]) hence "((rev pi)@pi)\<bullet>(x::'a) = ([]::'x prm)\<bullet>x" by (simp add: pt3[OF pt]) thus ?thesis by (simp add: pt1[OF pt] pt2[OF pt]) qed lemma pt_pi_rev: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>((rev pi)\<bullet>x) = x" by (simp add: pt_rev_pi[OF pt, OF at,of "rev pi" "x",simplified]) lemma pt_bij1: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "(pi\<bullet>x) = y" shows "x=(rev pi)\<bullet>y" proof - from a have "y=(pi\<bullet>x)" by (rule sym) thus ?thesis by (simp only: pt_rev_pi[OF pt, OF at]) qed lemma pt_bij2: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "x = (rev pi)\<bullet>y" shows "(pi\<bullet>x)=y" using a by (simp add: pt_pi_rev[OF pt, OF at]) lemma pt_bij: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>x = pi\<bullet>y) = (x=y)" proof assume "pi\<bullet>x = pi\<bullet>y" hence "x=(rev pi)\<bullet>(pi\<bullet>y)" by (rule pt_bij1[OF pt, OF at]) thus "x=y" by (simp only: pt_rev_pi[OF pt, OF at]) next assume "x=y" thus "pi\<bullet>x = pi\<bullet>y" by simp qed lemma pt_eq_eqvt: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(x=y) = (pi\<bullet>x = pi\<bullet>y)" using pt at by (auto simp add: pt_bij perm_bool) lemma pt_bij3: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" assumes a: "x=y" shows "(pi\<bullet>x = pi\<bullet>y)" using a by simp lemma pt_bij4: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "pi\<bullet>x = pi\<bullet>y" shows "x = y" using a by (simp add: pt_bij[OF pt, OF at]) lemma pt_swap_bij: fixes a :: "'x" and b :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "[(a,b)]\<bullet>([(a,b)]\<bullet>x) = x" by (rule pt_bij2[OF pt, OF at], simp) lemma pt_swap_bij': fixes a :: "'x" and b :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "[(a,b)]\<bullet>([(b,a)]\<bullet>x) = x" apply(simp add: pt2[OF pt,symmetric]) apply(rule trans) apply(rule pt3[OF pt]) apply(rule at_ds5'[OF at]) apply(rule pt1[OF pt]) done lemma pt_swap_bij'': fixes a :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "[(a,a)]\<bullet>x = x" apply(rule trans) apply(rule pt3[OF pt]) apply(rule at_ds1[OF at]) apply(rule pt1[OF pt]) done lemma supp_singleton: shows "supp {x} = supp x" by (force simp add: supp_def perm_set_def) lemma fresh_singleton: shows "a\<sharp>{x} = a\<sharp>x" by (simp add: fresh_def supp_singleton) lemma pt_set_bij1: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "((pi\<bullet>x)\<in>X) = (x\<in>((rev pi)\<bullet>X))" by (force simp add: perm_set_def pt_rev_pi[OF pt, OF at] pt_pi_rev[OF pt, OF at]) lemma pt_set_bij1a: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(x\<in>(pi\<bullet>X)) = (((rev pi)\<bullet>x)\<in>X)" by (force simp add: perm_set_def pt_rev_pi[OF pt, OF at] pt_pi_rev[OF pt, OF at]) lemma pt_set_bij: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "((pi\<bullet>x)\<in>(pi\<bullet>X)) = (x\<in>X)" by (simp add: perm_set_def pt_bij[OF pt, OF at]) lemma pt_in_eqvt: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(x\<in>X)=((pi\<bullet>x)\<in>(pi\<bullet>X))" using assms by (auto simp add: pt_set_bij perm_bool) lemma pt_set_bij2: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "x\<in>X" shows "(pi\<bullet>x)\<in>(pi\<bullet>X)" using a by (simp add: pt_set_bij[OF pt, OF at]) lemma pt_set_bij2a: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "x\<in>((rev pi)\<bullet>X)" shows "(pi\<bullet>x)\<in>X" using a by (simp add: pt_set_bij1[OF pt, OF at]) (* FIXME: is this lemma needed anywhere? *) lemma pt_set_bij3: fixes pi :: "'x prm" and x :: "'a" and X :: "'a set" shows "pi\<bullet>(x\<in>X) = (x\<in>X)" by (simp add: perm_bool) lemma pt_subseteq_eqvt: fixes pi :: "'x prm" and Y :: "'a set" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>(X\<subseteq>Y)) = ((pi\<bullet>X)\<subseteq>(pi\<bullet>Y))" by (auto simp add: perm_set_def perm_bool pt_bij[OF pt, OF at]) lemma pt_set_diff_eqvt: fixes X::"'a set" and Y::"'a set" and pi::"'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(X - Y) = (pi\<bullet>X) - (pi\<bullet>Y)" by (auto simp add: perm_set_def pt_bij[OF pt, OF at]) lemma pt_Collect_eqvt: fixes pi::"'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>{x::'a. P x} = {x. P ((rev pi)\<bullet>x)}" apply(auto simp add: perm_set_def pt_rev_pi[OF pt, OF at]) apply(rule_tac x="(rev pi)\<bullet>x" in exI) apply(simp add: pt_pi_rev[OF pt, OF at]) done -- "some helper lemmas for the pt_perm_supp_ineq lemma" lemma Collect_permI: fixes pi :: "'x prm" and x :: "'a" assumes a: "\<forall>x. (P1 x = P2 x)" shows "{pi\<bullet>x| x. P1 x} = {pi\<bullet>x| x. P2 x}" using a by force lemma Infinite_cong: assumes a: "X = Y" shows "infinite X = infinite Y" using a by (simp) lemma pt_set_eq_ineq: fixes pi :: "'y prm" assumes pt: "pt TYPE('x) TYPE('y)" and at: "at TYPE('y)" shows "{pi\<bullet>x| x::'x. P x} = {x::'x. P ((rev pi)\<bullet>x)}" by (force simp only: pt_rev_pi[OF pt, OF at] pt_pi_rev[OF pt, OF at]) lemma pt_inject_on_ineq: fixes X :: "'y set" and pi :: "'x prm" assumes pt: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" shows "inj_on (perm pi) X" proof (unfold inj_on_def, intro strip) fix x::"'y" and y::"'y" assume "pi\<bullet>x = pi\<bullet>y" thus "x=y" by (simp add: pt_bij[OF pt, OF at]) qed lemma pt_set_finite_ineq: fixes X :: "'x set" and pi :: "'y prm" assumes pt: "pt TYPE('x) TYPE('y)" and at: "at TYPE('y)" shows "finite (pi\<bullet>X) = finite X" proof - have image: "(pi\<bullet>X) = (perm pi ` X)" by (force simp only: perm_set_def) show ?thesis proof (rule iffI) assume "finite (pi\<bullet>X)" hence "finite (perm pi ` X)" using image by (simp) thus "finite X" using pt_inject_on_ineq[OF pt, OF at] by (rule finite_imageD) next assume "finite X" hence "finite (perm pi ` X)" by (rule finite_imageI) thus "finite (pi\<bullet>X)" using image by (simp) qed qed lemma pt_set_infinite_ineq: fixes X :: "'x set" and pi :: "'y prm" assumes pt: "pt TYPE('x) TYPE('y)" and at: "at TYPE('y)" shows "infinite (pi\<bullet>X) = infinite X" using pt at by (simp add: pt_set_finite_ineq) lemma pt_perm_supp_ineq: fixes pi :: "'x prm" and x :: "'a" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" shows "(pi\<bullet>((supp x)::'y set)) = supp (pi\<bullet>x)" (is "?LHS = ?RHS") proof - have "?LHS = {pi\<bullet>a | a. infinite {b. [(a,b)]\<bullet>x \<noteq> x}}" by (simp add: supp_def perm_set_def) also have "\<dots> = {pi\<bullet>a | a. infinite {pi\<bullet>b | b. [(a,b)]\<bullet>x \<noteq> x}}" proof (rule Collect_permI, rule allI, rule iffI) fix a assume "infinite {b::'y. [(a,b)]\<bullet>x \<noteq> x}" hence "infinite (pi\<bullet>{b::'y. [(a,b)]\<bullet>x \<noteq> x})" by (simp add: pt_set_infinite_ineq[OF ptb, OF at]) thus "infinite {pi\<bullet>b |b::'y. [(a,b)]\<bullet>x \<noteq> x}" by (simp add: perm_set_def) next fix a assume "infinite {pi\<bullet>b |b::'y. [(a,b)]\<bullet>x \<noteq> x}" hence "infinite (pi\<bullet>{b::'y. [(a,b)]\<bullet>x \<noteq> x})" by (simp add: perm_set_def) thus "infinite {b::'y. [(a,b)]\<bullet>x \<noteq> x}" by (simp add: pt_set_infinite_ineq[OF ptb, OF at]) qed also have "\<dots> = {a. infinite {b::'y. [((rev pi)\<bullet>a,(rev pi)\<bullet>b)]\<bullet>x \<noteq> x}}" by (simp add: pt_set_eq_ineq[OF ptb, OF at]) also have "\<dots> = {a. infinite {b. pi\<bullet>([((rev pi)\<bullet>a,(rev pi)\<bullet>b)]\<bullet>x) \<noteq> (pi\<bullet>x)}}" by (simp add: pt_bij[OF pta, OF at]) also have "\<dots> = {a. infinite {b. [(a,b)]\<bullet>(pi\<bullet>x) \<noteq> (pi\<bullet>x)}}" proof (rule Collect_cong, rule Infinite_cong, rule Collect_cong) fix a::"'y" and b::"'y" have "pi\<bullet>(([((rev pi)\<bullet>a,(rev pi)\<bullet>b)])\<bullet>x) = [(a,b)]\<bullet>(pi\<bullet>x)" by (simp add: cp1[OF cp] pt_pi_rev[OF ptb, OF at]) thus "(pi\<bullet>([((rev pi)\<bullet>a,(rev pi)\<bullet>b)]\<bullet>x) \<noteq> pi\<bullet>x) = ([(a,b)]\<bullet>(pi\<bullet>x) \<noteq> pi\<bullet>x)" by simp qed finally show "?LHS = ?RHS" by (simp add: supp_def) qed lemma pt_perm_supp: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>((supp x)::'x set)) = supp (pi\<bullet>x)" apply(rule pt_perm_supp_ineq) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) done lemma pt_supp_finite_pi: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" shows "finite ((supp (pi\<bullet>x))::'x set)" apply(simp add: pt_perm_supp[OF pt, OF at, symmetric]) apply(simp add: pt_set_finite_ineq[OF at_pt_inst[OF at], OF at]) apply(rule f) done lemma pt_fresh_left_ineq: fixes pi :: "'x prm" and x :: "'a" and a :: "'y" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" shows "a\<sharp>(pi\<bullet>x) = ((rev pi)\<bullet>a)\<sharp>x" apply(simp add: fresh_def) apply(simp add: pt_set_bij1[OF ptb, OF at]) apply(simp add: pt_perm_supp_ineq[OF pta, OF ptb, OF at, OF cp]) done lemma pt_fresh_right_ineq: fixes pi :: "'x prm" and x :: "'a" and a :: "'y" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" shows "(pi\<bullet>a)\<sharp>x = a\<sharp>((rev pi)\<bullet>x)" apply(simp add: fresh_def) apply(simp add: pt_set_bij1[OF ptb, OF at]) apply(simp add: pt_perm_supp_ineq[OF pta, OF ptb, OF at, OF cp]) done lemma pt_fresh_bij_ineq: fixes pi :: "'x prm" and x :: "'a" and a :: "'y" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" shows "(pi\<bullet>a)\<sharp>(pi\<bullet>x) = a\<sharp>x" apply(simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp]) apply(simp add: pt_rev_pi[OF ptb, OF at]) done lemma pt_fresh_left: fixes pi :: "'x prm" and x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "a\<sharp>(pi\<bullet>x) = ((rev pi)\<bullet>a)\<sharp>x" apply(rule pt_fresh_left_ineq) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) done lemma pt_fresh_right: fixes pi :: "'x prm" and x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>a)\<sharp>x = a\<sharp>((rev pi)\<bullet>x)" apply(rule pt_fresh_right_ineq) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) done lemma pt_fresh_bij: fixes pi :: "'x prm" and x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>a)\<sharp>(pi\<bullet>x) = a\<sharp>x" apply(rule pt_fresh_bij_ineq) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) done lemma pt_fresh_bij1: fixes pi :: "'x prm" and x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "a\<sharp>x" shows "(pi\<bullet>a)\<sharp>(pi\<bullet>x)" using a by (simp add: pt_fresh_bij[OF pt, OF at]) lemma pt_fresh_bij2: fixes pi :: "'x prm" and x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "(pi\<bullet>a)\<sharp>(pi\<bullet>x)" shows "a\<sharp>x" using a by (simp add: pt_fresh_bij[OF pt, OF at]) lemma pt_fresh_eqvt: fixes pi :: "'x prm" and x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(a\<sharp>x) = (pi\<bullet>a)\<sharp>(pi\<bullet>x)" by (simp add: perm_bool pt_fresh_bij[OF pt, OF at]) lemma pt_perm_fresh1: fixes a :: "'x" and b :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and a1: "\<not>(a\<sharp>x)" and a2: "b\<sharp>x" shows "[(a,b)]\<bullet>x \<noteq> x" proof assume neg: "[(a,b)]\<bullet>x = x" from a1 have a1':"a\<in>(supp x)" by (simp add: fresh_def) from a2 have a2':"b\<notin>(supp x)" by (simp add: fresh_def) from a1' a2' have a3: "a\<noteq>b" by force from a1' have "([(a,b)]\<bullet>a)\<in>([(a,b)]\<bullet>(supp x))" by (simp only: pt_set_bij[OF at_pt_inst[OF at], OF at]) hence "b\<in>([(a,b)]\<bullet>(supp x))" by (simp add: at_calc[OF at]) hence "b\<in>(supp ([(a,b)]\<bullet>x))" by (simp add: pt_perm_supp[OF pt,OF at]) with a2' neg show False by simp qed (* the next two lemmas are needed in the proof *) (* of the structural induction principle *) lemma pt_fresh_aux: fixes a::"'x" and b::"'x" and c::"'x" and x::"'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" assumes a1: "c\<noteq>a" and a2: "a\<sharp>x" and a3: "c\<sharp>x" shows "c\<sharp>([(a,b)]\<bullet>x)" using a1 a2 a3 by (simp_all add: pt_fresh_left[OF pt, OF at] at_calc[OF at]) lemma pt_fresh_perm_app: fixes pi :: "'x prm" and a :: "'x" and x :: "'y" assumes pt: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and h1: "a\<sharp>pi" and h2: "a\<sharp>x" shows "a\<sharp>(pi\<bullet>x)" using assms proof - have "a\<sharp>(rev pi)"using h1 by (simp add: fresh_list_rev) then have "(rev pi)\<bullet>a = a" by (simp add: at_prm_fresh[OF at]) then have "((rev pi)\<bullet>a)\<sharp>x" using h2 by simp thus "a\<sharp>(pi\<bullet>x)" by (simp add: pt_fresh_right[OF pt, OF at]) qed lemma pt_fresh_perm_app_ineq: fixes pi::"'x prm" and c::"'y" and x::"'a" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" and dj: "disjoint TYPE('y) TYPE('x)" assumes a: "c\<sharp>x" shows "c\<sharp>(pi\<bullet>x)" using a by (simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp] dj_perm_forget[OF dj]) lemma pt_fresh_eqvt_ineq: fixes pi::"'x prm" and c::"'y" and x::"'a" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" and dj: "disjoint TYPE('y) TYPE('x)" shows "pi\<bullet>(c\<sharp>x) = (pi\<bullet>c)\<sharp>(pi\<bullet>x)" by (simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp] dj_perm_forget[OF dj] perm_bool) --"the co-set of a finite set is infinte" lemma finite_infinite: assumes a: "finite {b::'x. P b}" and b: "infinite (UNIV::'x set)" shows "infinite {b. \<not>P b}" proof - from a b have "infinite (UNIV - {b::'x. P b})" by (simp add: Diff_infinite_finite) moreover have "{b::'x. \<not>P b} = UNIV - {b::'x. P b}" by auto ultimately show "infinite {b::'x. \<not>P b}" by simp qed lemma pt_fresh_fresh: fixes x :: "'a" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and a1: "a\<sharp>x" and a2: "b\<sharp>x" shows "[(a,b)]\<bullet>x=x" proof (cases "a=b") assume "a=b" hence "[(a,b)] \<triangleq> []" by (simp add: at_ds1[OF at]) hence "[(a,b)]\<bullet>x=([]::'x prm)\<bullet>x" by (rule pt3[OF pt]) thus ?thesis by (simp only: pt1[OF pt]) next assume c2: "a\<noteq>b" from a1 have f1: "finite {c. [(a,c)]\<bullet>x \<noteq> x}" by (simp add: fresh_def supp_def) from a2 have f2: "finite {c. [(b,c)]\<bullet>x \<noteq> x}" by (simp add: fresh_def supp_def) from f1 and f2 have f3: "finite {c. perm [(a,c)] x \<noteq> x \<or> perm [(b,c)] x \<noteq> x}" by (force simp only: Collect_disj_eq) have "infinite {c. [(a,c)]\<bullet>x = x \<and> [(b,c)]\<bullet>x = x}" by (simp add: finite_infinite[OF f3,OF at4[OF at], simplified]) hence "infinite ({c. [(a,c)]\<bullet>x = x \<and> [(b,c)]\<bullet>x = x}-{a,b})" by (force dest: Diff_infinite_finite) hence "({c. [(a,c)]\<bullet>x = x \<and> [(b,c)]\<bullet>x = x}-{a,b}) \<noteq> {}" by (metis finite_set set_empty2) hence "\<exists>c. c\<in>({c. [(a,c)]\<bullet>x = x \<and> [(b,c)]\<bullet>x = x}-{a,b})" by (force) then obtain c where eq1: "[(a,c)]\<bullet>x = x" and eq2: "[(b,c)]\<bullet>x = x" and ineq: "a\<noteq>c \<and> b\<noteq>c" by (force) hence "[(a,c)]\<bullet>([(b,c)]\<bullet>([(a,c)]\<bullet>x)) = x" by simp hence eq3: "[(a,c),(b,c),(a,c)]\<bullet>x = x" by (simp add: pt2[OF pt,symmetric]) from c2 ineq have "[(a,c),(b,c),(a,c)] \<triangleq> [(a,b)]" by (simp add: at_ds3[OF at]) hence "[(a,c),(b,c),(a,c)]\<bullet>x = [(a,b)]\<bullet>x" by (rule pt3[OF pt]) thus ?thesis using eq3 by simp qed lemma pt_pi_fresh_fresh: fixes x :: "'a" and pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and a: "\<forall>(a,b)\<in>set pi. a\<sharp>x \<and> b\<sharp>x" shows "pi\<bullet>x=x" using a proof (induct pi) case Nil show "([]::'x prm)\<bullet>x = x" by (rule pt1[OF pt]) next case (Cons ab pi) have a: "\<forall>(a,b)\<in>set (ab#pi). a\<sharp>x \<and> b\<sharp>x" by fact have ih: "(\<forall>(a,b)\<in>set pi. a\<sharp>x \<and> b\<sharp>x) \<Longrightarrow> pi\<bullet>x=x" by fact obtain a b where e: "ab=(a,b)" by (cases ab) (auto) from a have a': "a\<sharp>x" "b\<sharp>x" using e by auto have "(ab#pi)\<bullet>x = ([(a,b)]@pi)\<bullet>x" using e by simp also have "\<dots> = [(a,b)]\<bullet>(pi\<bullet>x)" by (simp only: pt2[OF pt]) also have "\<dots> = [(a,b)]\<bullet>x" using ih a by simp also have "\<dots> = x" using a' by (simp add: pt_fresh_fresh[OF pt, OF at]) finally show "(ab#pi)\<bullet>x = x" by simp qed lemma pt_perm_compose: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi2\<bullet>(pi1\<bullet>x) = (pi2\<bullet>pi1)\<bullet>(pi2\<bullet>x)" proof - have "(pi2@pi1) \<triangleq> ((pi2\<bullet>pi1)@pi2)" by (rule at_ds8 [OF at]) hence "(pi2@pi1)\<bullet>x = ((pi2\<bullet>pi1)@pi2)\<bullet>x" by (rule pt3[OF pt]) thus ?thesis by (simp add: pt2[OF pt]) qed lemma pt_perm_compose': fixes pi1 :: "'x prm" and pi2 :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi2\<bullet>pi1)\<bullet>x = pi2\<bullet>(pi1\<bullet>((rev pi2)\<bullet>x))" proof - have "pi2\<bullet>(pi1\<bullet>((rev pi2)\<bullet>x)) = (pi2\<bullet>pi1)\<bullet>(pi2\<bullet>((rev pi2)\<bullet>x))" by (rule pt_perm_compose[OF pt, OF at]) also have "\<dots> = (pi2\<bullet>pi1)\<bullet>x" by (simp add: pt_pi_rev[OF pt, OF at]) finally have "pi2\<bullet>(pi1\<bullet>((rev pi2)\<bullet>x)) = (pi2\<bullet>pi1)\<bullet>x" by simp thus ?thesis by simp qed lemma pt_perm_compose_rev: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(rev pi2)\<bullet>((rev pi1)\<bullet>x) = (rev pi1)\<bullet>(rev (pi1\<bullet>pi2)\<bullet>x)" proof - have "((rev pi2)@(rev pi1)) \<triangleq> ((rev pi1)@(rev (pi1\<bullet>pi2)))" by (rule at_ds9[OF at]) hence "((rev pi2)@(rev pi1))\<bullet>x = ((rev pi1)@(rev (pi1\<bullet>pi2)))\<bullet>x" by (rule pt3[OF pt]) thus ?thesis by (simp add: pt2[OF pt]) qed section {* equivariance for some connectives *} lemma pt_all_eqvt: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(\<forall>(x::'a). P x) = (\<forall>(x::'a). pi\<bullet>(P ((rev pi)\<bullet>x)))" apply(auto simp add: perm_bool perm_fun_def) apply(drule_tac x="pi\<bullet>x" in spec) apply(simp add: pt_rev_pi[OF pt, OF at]) done lemma pt_ex_eqvt: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(\<exists>(x::'a). P x) = (\<exists>(x::'a). pi\<bullet>(P ((rev pi)\<bullet>x)))" apply(auto simp add: perm_bool perm_fun_def) apply(rule_tac x="pi\<bullet>x" in exI) apply(simp add: pt_rev_pi[OF pt, OF at]) done lemma pt_ex1_eqvt: fixes pi :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>(\<exists>!x. P (x::'a))) = (\<exists>!x. pi\<bullet>(P (rev pi\<bullet>x)))" unfolding Ex1_def by (simp add: pt_ex_eqvt[OF pt at] conj_eqvt pt_all_eqvt[OF pt at] imp_eqvt pt_eq_eqvt[OF pt at] pt_pi_rev[OF pt at]) lemma pt_the_eqvt: fixes pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and unique: "\<exists>!x. P x" shows "pi\<bullet>(THE(x::'a). P x) = (THE(x::'a). pi\<bullet>(P ((rev pi)\<bullet>x)))" apply(rule the1_equality [symmetric]) apply(simp add: pt_ex1_eqvt[OF pt at,symmetric]) apply(simp add: perm_bool unique) apply(simp add: perm_bool pt_rev_pi [OF pt at]) apply(rule theI'[OF unique]) done section {* facts about supports *} (*==============================*) lemma supports_subset: fixes x :: "'a" and S1 :: "'x set" and S2 :: "'x set" assumes a: "S1 supports x" and b: "S1 \<subseteq> S2" shows "S2 supports x" using a b by (force simp add: supports_def) lemma supp_is_subset: fixes S :: "'x set" and x :: "'a" assumes a1: "S supports x" and a2: "finite S" shows "(supp x)\<subseteq>S" proof (rule ccontr) assume "\<not>(supp x \<subseteq> S)" hence "\<exists>a. a\<in>(supp x) \<and> a\<notin>S" by force then obtain a where b1: "a\<in>supp x" and b2: "a\<notin>S" by force from a1 b2 have "\<forall>b. (b\<notin>S \<longrightarrow> ([(a,b)]\<bullet>x = x))" by (unfold supports_def, force) hence "{b. [(a,b)]\<bullet>x \<noteq> x}\<subseteq>S" by force with a2 have "finite {b. [(a,b)]\<bullet>x \<noteq> x}" by (simp add: finite_subset) hence "a\<notin>(supp x)" by (unfold supp_def, auto) with b1 show False by simp qed lemma supp_supports: fixes x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" shows "((supp x)::'x set) supports x" proof (unfold supports_def, intro strip) fix a b assume "(a::'x)\<notin>(supp x) \<and> (b::'x)\<notin>(supp x)" hence "a\<sharp>x" and "b\<sharp>x" by (auto simp add: fresh_def) thus "[(a,b)]\<bullet>x = x" by (rule pt_fresh_fresh[OF pt, OF at]) qed lemma supports_finite: fixes S :: "'x set" and x :: "'a" assumes a1: "S supports x" and a2: "finite S" shows "finite ((supp x)::'x set)" proof - have "(supp x)\<subseteq>S" using a1 a2 by (rule supp_is_subset) thus ?thesis using a2 by (simp add: finite_subset) qed lemma supp_is_inter: fixes x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and fs: "fs TYPE('a) TYPE('x)" shows "((supp x)::'x set) = (\<Inter> {S. finite S \<and> S supports x})" proof (rule equalityI) show "((supp x)::'x set) \<subseteq> (\<Inter> {S. finite S \<and> S supports x})" proof (clarify) fix S c assume b: "c\<in>((supp x)::'x set)" and "finite (S::'x set)" and "S supports x" hence "((supp x)::'x set)\<subseteq>S" by (simp add: supp_is_subset) with b show "c\<in>S" by force qed next show "(\<Inter> {S. finite S \<and> S supports x}) \<subseteq> ((supp x)::'x set)" proof (clarify, simp) fix c assume d: "\<forall>(S::'x set). finite S \<and> S supports x \<longrightarrow> c\<in>S" have "((supp x)::'x set) supports x" by (rule supp_supports[OF pt, OF at]) with d fs1[OF fs] show "c\<in>supp x" by force qed qed lemma supp_is_least_supports: fixes S :: "'x set" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and a1: "S supports x" and a2: "finite S" and a3: "\<forall>S'. (S' supports x) \<longrightarrow> S\<subseteq>S'" shows "S = (supp x)" proof (rule equalityI) show "((supp x)::'x set)\<subseteq>S" using a1 a2 by (rule supp_is_subset) next have "((supp x)::'x set) supports x" by (rule supp_supports[OF pt, OF at]) with a3 show "S\<subseteq>supp x" by force qed lemma supports_set: fixes S :: "'x set" and X :: "'a set" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and a: "\<forall>x\<in>X. (\<forall>(a::'x) (b::'x). a\<notin>S\<and>b\<notin>S \<longrightarrow> ([(a,b)]\<bullet>x)\<in>X)" shows "S supports X" using a apply(auto simp add: supports_def) apply(simp add: pt_set_bij1a[OF pt, OF at]) apply(force simp add: pt_swap_bij[OF pt, OF at]) apply(simp add: pt_set_bij1a[OF pt, OF at]) done lemma supports_fresh: fixes S :: "'x set" and a :: "'x" and x :: "'a" assumes a1: "S supports x" and a2: "finite S" and a3: "a\<notin>S" shows "a\<sharp>x" proof (simp add: fresh_def) have "(supp x)\<subseteq>S" using a1 a2 by (rule supp_is_subset) thus "a\<notin>(supp x)" using a3 by force qed lemma at_fin_set_supports: fixes X::"'x set" assumes at: "at TYPE('x)" shows "X supports X" proof - have "\<forall>a b. a\<notin>X \<and> b\<notin>X \<longrightarrow> [(a,b)]\<bullet>X = X" by (auto simp add: perm_set_def at_calc[OF at]) then show ?thesis by (simp add: supports_def) qed lemma infinite_Collection: assumes a1:"infinite X" and a2:"\<forall>b\<in>X. P(b)" shows "infinite {b\<in>X. P(b)}" using a1 a2 apply auto apply (subgoal_tac "infinite (X - {b\<in>X. P b})") apply (simp add: set_diff_eq) apply (simp add: Diff_infinite_finite) done lemma at_fin_set_supp: fixes X::"'x set" assumes at: "at TYPE('x)" and fs: "finite X" shows "(supp X) = X" proof (rule subset_antisym) show "(supp X) \<subseteq> X" using at_fin_set_supports[OF at] using fs by (simp add: supp_is_subset) next have inf: "infinite (UNIV-X)" using at4[OF at] fs by (auto simp add: Diff_infinite_finite) { fix a::"'x" assume asm: "a\<in>X" hence "\<forall>b\<in>(UNIV-X). [(a,b)]\<bullet>X\<noteq>X" by (auto simp add: perm_set_def at_calc[OF at]) with inf have "infinite {b\<in>(UNIV-X). [(a,b)]\<bullet>X\<noteq>X}" by (rule infinite_Collection) hence "infinite {b. [(a,b)]\<bullet>X\<noteq>X}" by (rule_tac infinite_super, auto) hence "a\<in>(supp X)" by (simp add: supp_def) } then show "X\<subseteq>(supp X)" by blast qed lemma at_fin_set_fresh: fixes X::"'x set" assumes at: "at TYPE('x)" and fs: "finite X" shows "(x \<sharp> X) = (x \<notin> X)" by (simp add: at_fin_set_supp fresh_def at fs) section {* Permutations acting on Functions *} (*==========================================*) lemma pt_fun_app_eq: fixes f :: "'a\<Rightarrow>'b" and x :: "'a" and pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(f x) = (pi\<bullet>f)(pi\<bullet>x)" by (simp add: perm_fun_def pt_rev_pi[OF pt, OF at]) --"sometimes pt_fun_app_eq does too much; this lemma 'corrects it'" lemma pt_perm: fixes x :: "'a" and pi1 :: "'x prm" and pi2 :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" shows "(pi1\<bullet>perm pi2)(pi1\<bullet>x) = pi1\<bullet>(pi2\<bullet>x)" by (simp add: pt_fun_app_eq[OF pt, OF at]) lemma pt_fun_eq: fixes f :: "'a\<Rightarrow>'b" and pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>f = f) = (\<forall> x. pi\<bullet>(f x) = f (pi\<bullet>x))" (is "?LHS = ?RHS") proof assume a: "?LHS" show "?RHS" proof fix x have "pi\<bullet>(f x) = (pi\<bullet>f)(pi\<bullet>x)" by (simp add: pt_fun_app_eq[OF pt, OF at]) also have "\<dots> = f (pi\<bullet>x)" using a by simp finally show "pi\<bullet>(f x) = f (pi\<bullet>x)" by simp qed next assume b: "?RHS" show "?LHS" proof (rule ccontr) assume "(pi\<bullet>f) \<noteq> f" hence "\<exists>x. (pi\<bullet>f) x \<noteq> f x" by (simp add: fun_eq_iff) then obtain x where b1: "(pi\<bullet>f) x \<noteq> f x" by force from b have "pi\<bullet>(f ((rev pi)\<bullet>x)) = f (pi\<bullet>((rev pi)\<bullet>x))" by force hence "(pi\<bullet>f)(pi\<bullet>((rev pi)\<bullet>x)) = f (pi\<bullet>((rev pi)\<bullet>x))" by (simp add: pt_fun_app_eq[OF pt, OF at]) hence "(pi\<bullet>f) x = f x" by (simp add: pt_pi_rev[OF pt, OF at]) with b1 show "False" by simp qed qed -- "two helper lemmas for the equivariance of functions" lemma pt_swap_eq_aux: fixes y :: "'a" and pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and a: "\<forall>(a::'x) (b::'x). [(a,b)]\<bullet>y = y" shows "pi\<bullet>y = y" proof(induct pi) case Nil show ?case by (simp add: pt1[OF pt]) next case (Cons x xs) have ih: "xs\<bullet>y = y" by fact obtain a b where p: "x=(a,b)" by force have "((a,b)#xs)\<bullet>y = ([(a,b)]@xs)\<bullet>y" by simp also have "\<dots> = [(a,b)]\<bullet>(xs\<bullet>y)" by (simp only: pt2[OF pt]) finally show ?case using a ih p by simp qed lemma pt_swap_eq: fixes y :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" shows "(\<forall>(a::'x) (b::'x). [(a,b)]\<bullet>y = y) = (\<forall>pi::'x prm. pi\<bullet>y = y)" by (force intro: pt_swap_eq_aux[OF pt]) lemma pt_eqvt_fun1a: fixes f :: "'a\<Rightarrow>'b" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" and a: "((supp f)::'x set)={}" shows "\<forall>(pi::'x prm). pi\<bullet>f = f" proof (intro strip) fix pi have "\<forall>a b. a\<notin>((supp f)::'x set) \<and> b\<notin>((supp f)::'x set) \<longrightarrow> (([(a,b)]\<bullet>f) = f)" by (intro strip, fold fresh_def, simp add: pt_fresh_fresh[OF pt_fun_inst[OF pta, OF ptb, OF at],OF at]) with a have "\<forall>(a::'x) (b::'x). ([(a,b)]\<bullet>f) = f" by force hence "\<forall>(pi::'x prm). pi\<bullet>f = f" by (simp add: pt_swap_eq[OF pt_fun_inst[OF pta, OF ptb, OF at]]) thus "(pi::'x prm)\<bullet>f = f" by simp qed lemma pt_eqvt_fun1b: fixes f :: "'a\<Rightarrow>'b" assumes a: "\<forall>(pi::'x prm). pi\<bullet>f = f" shows "((supp f)::'x set)={}" using a by (simp add: supp_def) lemma pt_eqvt_fun1: fixes f :: "'a\<Rightarrow>'b" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" shows "(((supp f)::'x set)={}) = (\<forall>(pi::'x prm). pi\<bullet>f = f)" (is "?LHS = ?RHS") by (rule iffI, simp add: pt_eqvt_fun1a[OF pta, OF ptb, OF at], simp add: pt_eqvt_fun1b) lemma pt_eqvt_fun2a: fixes f :: "'a\<Rightarrow>'b" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" assumes a: "((supp f)::'x set)={}" shows "\<forall>(pi::'x prm) (x::'a). pi\<bullet>(f x) = f(pi\<bullet>x)" proof (intro strip) fix pi x from a have b: "\<forall>(pi::'x prm). pi\<bullet>f = f" by (simp add: pt_eqvt_fun1[OF pta, OF ptb, OF at]) have "(pi::'x prm)\<bullet>(f x) = (pi\<bullet>f)(pi\<bullet>x)" by (simp add: pt_fun_app_eq[OF pta, OF at]) with b show "(pi::'x prm)\<bullet>(f x) = f (pi\<bullet>x)" by force qed lemma pt_eqvt_fun2b: fixes f :: "'a\<Rightarrow>'b" assumes pt1: "pt TYPE('a) TYPE('x)" and pt2: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" assumes a: "\<forall>(pi::'x prm) (x::'a). pi\<bullet>(f x) = f(pi\<bullet>x)" shows "((supp f)::'x set)={}" proof - from a have "\<forall>(pi::'x prm). pi\<bullet>f = f" by (simp add: pt_fun_eq[OF pt1, OF at, symmetric]) thus ?thesis by (simp add: supp_def) qed lemma pt_eqvt_fun2: fixes f :: "'a\<Rightarrow>'b" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" shows "(((supp f)::'x set)={}) = (\<forall>(pi::'x prm) (x::'a). pi\<bullet>(f x) = f(pi\<bullet>x))" by (rule iffI, simp add: pt_eqvt_fun2a[OF pta, OF ptb, OF at], simp add: pt_eqvt_fun2b[OF pta, OF ptb, OF at]) lemma pt_supp_fun_subset: fixes f :: "'a\<Rightarrow>'b" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp f)::'x set)" and f2: "finite ((supp x)::'x set)" shows "supp (f x) \<subseteq> (((supp f)\<union>(supp x))::'x set)" proof - have s1: "((supp f)\<union>((supp x)::'x set)) supports (f x)" proof (simp add: supports_def, fold fresh_def, auto) fix a::"'x" and b::"'x" assume "a\<sharp>f" and "b\<sharp>f" hence a1: "[(a,b)]\<bullet>f = f" by (rule pt_fresh_fresh[OF pt_fun_inst[OF pta, OF ptb, OF at], OF at]) assume "a\<sharp>x" and "b\<sharp>x" hence a2: "[(a,b)]\<bullet>x = x" by (rule pt_fresh_fresh[OF pta, OF at]) from a1 a2 show "[(a,b)]\<bullet>(f x) = (f x)" by (simp add: pt_fun_app_eq[OF pta, OF at]) qed from f1 f2 have "finite ((supp f)\<union>((supp x)::'x set))" by force with s1 show ?thesis by (rule supp_is_subset) qed lemma pt_empty_supp_fun_subset: fixes f :: "'a\<Rightarrow>'b" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('b) TYPE('x)" and at: "at TYPE('x)" and e: "(supp f)=({}::'x set)" shows "supp (f x) \<subseteq> ((supp x)::'x set)" proof (unfold supp_def, auto) fix a::"'x" assume a1: "finite {b. [(a, b)]\<bullet>x \<noteq> x}" assume "infinite {b. [(a, b)]\<bullet>(f x) \<noteq> f x}" hence a2: "infinite {b. f ([(a, b)]\<bullet>x) \<noteq> f x}" using e by (simp add: pt_eqvt_fun2[OF pta, OF ptb, OF at]) have a3: "{b. f ([(a,b)]\<bullet>x) \<noteq> f x}\<subseteq>{b. [(a,b)]\<bullet>x \<noteq> x}" by force from a1 a2 a3 show False by (force dest: finite_subset) qed section {* Facts about the support of finite sets of finitely supported things *} (*=============================================================================*) definition X_to_Un_supp :: "('a set) \<Rightarrow> 'x set" where "X_to_Un_supp X \<equiv> \<Union>x\<in>X. ((supp x)::'x set)" lemma UNION_f_eqvt: fixes X::"('a set)" and f::"'a \<Rightarrow> 'x set" and pi::"'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(\<Union>x\<in>X. f x) = (\<Union>x\<in>(pi\<bullet>X). (pi\<bullet>f) x)" proof - have pt_x: "pt TYPE('x) TYPE('x)" by (force intro: at_pt_inst at) show ?thesis proof (rule equalityI) case goal1 show "pi\<bullet>(\<Union>x\<in>X. f x) \<subseteq> (\<Union>x\<in>(pi\<bullet>X). (pi\<bullet>f) x)" apply(auto simp add: perm_set_def) apply(rule_tac x="pi\<bullet>xb" in exI) apply(rule conjI) apply(rule_tac x="xb" in exI) apply(simp) apply(subgoal_tac "(pi\<bullet>f) (pi\<bullet>xb) = pi\<bullet>(f xb)")(*A*) apply(simp) apply(rule pt_set_bij2[OF pt_x, OF at]) apply(assumption) (*A*) apply(rule sym) apply(rule pt_fun_app_eq[OF pt, OF at]) done next case goal2 show "(\<Union>x\<in>(pi\<bullet>X). (pi\<bullet>f) x) \<subseteq> pi\<bullet>(\<Union>x\<in>X. f x)" apply(auto simp add: perm_set_def) apply(rule_tac x="(rev pi)\<bullet>x" in exI) apply(rule conjI) apply(simp add: pt_pi_rev[OF pt_x, OF at]) apply(rule_tac x="xb" in bexI) apply(simp add: pt_set_bij1[OF pt_x, OF at]) apply(simp add: pt_fun_app_eq[OF pt, OF at]) apply(assumption) done qed qed lemma X_to_Un_supp_eqvt: fixes X::"('a set)" and pi::"'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(X_to_Un_supp X) = ((X_to_Un_supp (pi\<bullet>X))::'x set)" apply(simp add: X_to_Un_supp_def) apply(simp add: UNION_f_eqvt[OF pt, OF at] perm_fun_def) apply(simp add: pt_perm_supp[OF pt, OF at]) apply(simp add: pt_pi_rev[OF pt, OF at]) done lemma Union_supports_set: fixes X::"('a set)" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(\<Union>x\<in>X. ((supp x)::'x set)) supports X" apply(simp add: supports_def fresh_def[symmetric]) apply(rule allI)+ apply(rule impI) apply(erule conjE) apply(simp add: perm_set_def) apply(auto) apply(subgoal_tac "[(a,b)]\<bullet>xa = xa")(*A*) apply(simp) apply(rule pt_fresh_fresh[OF pt, OF at]) apply(force) apply(force) apply(rule_tac x="x" in exI) apply(simp) apply(rule sym) apply(rule pt_fresh_fresh[OF pt, OF at]) apply(force)+ done lemma Union_of_fin_supp_sets: fixes X::"('a set)" assumes fs: "fs TYPE('a) TYPE('x)" and fi: "finite X" shows "finite (\<Union>x\<in>X. ((supp x)::'x set))" using fi by (induct, auto simp add: fs1[OF fs]) lemma Union_included_in_supp: fixes X::"('a set)" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and fi: "finite X" shows "(\<Union>x\<in>X. ((supp x)::'x set)) \<subseteq> supp X" proof - have "supp ((X_to_Un_supp X)::'x set) \<subseteq> ((supp X)::'x set)" apply(rule pt_empty_supp_fun_subset) apply(force intro: pt_set_inst at_pt_inst pt at)+ apply(rule pt_eqvt_fun2b) apply(force intro: pt_set_inst at_pt_inst pt at)+ apply(rule allI)+ apply(rule X_to_Un_supp_eqvt[OF pt, OF at]) done hence "supp (\<Union>x\<in>X. ((supp x)::'x set)) \<subseteq> ((supp X)::'x set)" by (simp add: X_to_Un_supp_def) moreover have "supp (\<Union>x\<in>X. ((supp x)::'x set)) = (\<Union>x\<in>X. ((supp x)::'x set))" apply(rule at_fin_set_supp[OF at]) apply(rule Union_of_fin_supp_sets[OF fs, OF fi]) done ultimately show ?thesis by force qed lemma supp_of_fin_sets: fixes X::"('a set)" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and fi: "finite X" shows "(supp X) = (\<Union>x\<in>X. ((supp x)::'x set))" apply(rule equalityI) apply(rule supp_is_subset) apply(rule Union_supports_set[OF pt, OF at]) apply(rule Union_of_fin_supp_sets[OF fs, OF fi]) apply(rule Union_included_in_supp[OF pt, OF at, OF fs, OF fi]) done lemma supp_fin_union: fixes X::"('a set)" and Y::"('a set)" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and f1: "finite X" and f2: "finite Y" shows "(supp (X\<union>Y)) = (supp X)\<union>((supp Y)::'x set)" using f1 f2 by (force simp add: supp_of_fin_sets[OF pt, OF at, OF fs]) lemma supp_fin_insert: fixes X::"('a set)" and x::"'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and f: "finite X" shows "(supp (insert x X)) = (supp x)\<union>((supp X)::'x set)" proof - have "(supp (insert x X)) = ((supp ({x}\<union>(X::'a set)))::'x set)" by simp also have "\<dots> = (supp {x})\<union>(supp X)" by (rule supp_fin_union[OF pt, OF at, OF fs], simp_all add: f) finally show "(supp (insert x X)) = (supp x)\<union>((supp X)::'x set)" by (simp add: supp_singleton) qed lemma fresh_fin_union: fixes X::"('a set)" and Y::"('a set)" and a::"'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and f1: "finite X" and f2: "finite Y" shows "a\<sharp>(X\<union>Y) = (a\<sharp>X \<and> a\<sharp>Y)" apply(simp add: fresh_def) apply(simp add: supp_fin_union[OF pt, OF at, OF fs, OF f1, OF f2]) done lemma fresh_fin_insert: fixes X::"('a set)" and x::"'a" and a::"'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and f: "finite X" shows "a\<sharp>(insert x X) = (a\<sharp>x \<and> a\<sharp>X)" apply(simp add: fresh_def) apply(simp add: supp_fin_insert[OF pt, OF at, OF fs, OF f]) done lemma fresh_fin_insert1: fixes X::"('a set)" and x::"'a" and a::"'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" and f: "finite X" and a1: "a\<sharp>x" and a2: "a\<sharp>X" shows "a\<sharp>(insert x X)" using a1 a2 by (simp add: fresh_fin_insert[OF pt, OF at, OF fs, OF f]) lemma pt_list_set_supp: fixes xs :: "'a list" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" shows "supp (set xs) = ((supp xs)::'x set)" proof - have "supp (set xs) = (\<Union>x\<in>(set xs). ((supp x)::'x set))" by (rule supp_of_fin_sets[OF pt, OF at, OF fs], rule finite_set) also have "(\<Union>x\<in>(set xs). ((supp x)::'x set)) = (supp xs)" proof(induct xs) case Nil show ?case by (simp add: supp_list_nil) next case (Cons h t) thus ?case by (simp add: supp_list_cons) qed finally show ?thesis by simp qed lemma pt_list_set_fresh: fixes a :: "'x" and xs :: "'a list" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fs: "fs TYPE('a) TYPE('x)" shows "a\<sharp>(set xs) = a\<sharp>xs" by (simp add: fresh_def pt_list_set_supp[OF pt, OF at, OF fs]) section {* generalisation of freshness to lists and sets of atoms *} (*================================================================*) consts fresh_star :: "'b \<Rightarrow> 'a \<Rightarrow> bool" ("_ \<sharp>* _" [100,100] 100) defs (overloaded) fresh_star_set: "xs\<sharp>*c \<equiv> \<forall>x\<in>xs. x\<sharp>c" defs (overloaded) fresh_star_list: "xs\<sharp>*c \<equiv> \<forall>x\<in>set xs. x\<sharp>c" lemmas fresh_star_def = fresh_star_list fresh_star_set lemma fresh_star_prod_set: fixes xs::"'a set" shows "xs\<sharp>*(a,b) = (xs\<sharp>*a \<and> xs\<sharp>*b)" by (auto simp add: fresh_star_def fresh_prod) lemma fresh_star_prod_list: fixes xs::"'a list" shows "xs\<sharp>*(a,b) = (xs\<sharp>*a \<and> xs\<sharp>*b)" by (auto simp add: fresh_star_def fresh_prod) lemmas fresh_star_prod = fresh_star_prod_list fresh_star_prod_set lemma fresh_star_set_eq: "set xs \<sharp>* c = xs \<sharp>* c" by (simp add: fresh_star_def) lemma fresh_star_Un_elim: "((S \<union> T) \<sharp>* c \<Longrightarrow> PROP C) \<equiv> (S \<sharp>* c \<Longrightarrow> T \<sharp>* c \<Longrightarrow> PROP C)" apply rule apply (simp_all add: fresh_star_def) apply (erule meta_mp) apply blast done lemma fresh_star_insert_elim: "(insert x S \<sharp>* c \<Longrightarrow> PROP C) \<equiv> (x \<sharp> c \<Longrightarrow> S \<sharp>* c \<Longrightarrow> PROP C)" by rule (simp_all add: fresh_star_def) lemma fresh_star_empty_elim: "({} \<sharp>* c \<Longrightarrow> PROP C) \<equiv> PROP C" by (simp add: fresh_star_def) text {* Normalization of freshness results; see \ @{text nominal_induct} *} lemma fresh_star_unit_elim: shows "((a::'a set)\<sharp>*() \<Longrightarrow> PROP C) \<equiv> PROP C" and "((b::'a list)\<sharp>*() \<Longrightarrow> PROP C) \<equiv> PROP C" by (simp_all add: fresh_star_def fresh_def supp_unit) lemma fresh_star_prod_elim: shows "((a::'a set)\<sharp>*(x,y) \<Longrightarrow> PROP C) \<equiv> (a\<sharp>*x \<Longrightarrow> a\<sharp>*y \<Longrightarrow> PROP C)" and "((b::'a list)\<sharp>*(x,y) \<Longrightarrow> PROP C) \<equiv> (b\<sharp>*x \<Longrightarrow> b\<sharp>*y \<Longrightarrow> PROP C)" by (rule, simp_all add: fresh_star_prod)+ lemma pt_fresh_star_bij_ineq: fixes pi :: "'x prm" and x :: "'a" and a :: "'y set" and b :: "'y list" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" shows "(pi\<bullet>a)\<sharp>*(pi\<bullet>x) = a\<sharp>*x" and "(pi\<bullet>b)\<sharp>*(pi\<bullet>x) = b\<sharp>*x" apply(unfold fresh_star_def) apply(auto) apply(drule_tac x="pi\<bullet>xa" in bspec) apply(erule pt_set_bij2[OF ptb, OF at]) apply(simp add: fresh_star_def pt_fresh_bij_ineq[OF pta, OF ptb, OF at, OF cp]) apply(drule_tac x="(rev pi)\<bullet>xa" in bspec) apply(simp add: pt_set_bij1[OF ptb, OF at]) apply(simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp]) apply(drule_tac x="pi\<bullet>xa" in bspec) apply(simp add: pt_set_bij1[OF ptb, OF at]) apply(simp add: set_eqvt pt_rev_pi[OF pt_list_inst[OF ptb], OF at]) apply(simp add: pt_fresh_bij_ineq[OF pta, OF ptb, OF at, OF cp]) apply(drule_tac x="(rev pi)\<bullet>xa" in bspec) apply(simp add: pt_set_bij1[OF ptb, OF at] set_eqvt) apply(simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp]) done lemma pt_fresh_star_bij: fixes pi :: "'x prm" and x :: "'a" and a :: "'x set" and b :: "'x list" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>a)\<sharp>*(pi\<bullet>x) = a\<sharp>*x" and "(pi\<bullet>b)\<sharp>*(pi\<bullet>x) = b\<sharp>*x" apply(rule pt_fresh_star_bij_ineq(1)) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) apply(rule pt_fresh_star_bij_ineq(2)) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) done lemma pt_fresh_star_eqvt: fixes pi :: "'x prm" and x :: "'a" and a :: "'x set" and b :: "'x list" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>(a\<sharp>*x) = (pi\<bullet>a)\<sharp>*(pi\<bullet>x)" and "pi\<bullet>(b\<sharp>*x) = (pi\<bullet>b)\<sharp>*(pi\<bullet>x)" by (simp_all add: perm_bool pt_fresh_star_bij[OF pt, OF at]) lemma pt_fresh_star_eqvt_ineq: fixes pi::"'x prm" and a::"'y set" and b::"'y list" and x::"'a" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" and dj: "disjoint TYPE('y) TYPE('x)" shows "pi\<bullet>(a\<sharp>*x) = (pi\<bullet>a)\<sharp>*(pi\<bullet>x)" and "pi\<bullet>(b\<sharp>*x) = (pi\<bullet>b)\<sharp>*(pi\<bullet>x)" by (simp_all add: pt_fresh_star_bij_ineq[OF pta, OF ptb, OF at, OF cp] dj_perm_forget[OF dj] perm_bool) lemma pt_freshs_freshs: assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE ('x)" and pi: "set (pi::'x prm) \<subseteq> Xs \<times> Ys" and Xs: "Xs \<sharp>* (x::'a)" and Ys: "Ys \<sharp>* x" shows "pi\<bullet>x = x" using pi proof (induct pi) case Nil show ?case by (simp add: pt1 [OF pt]) next case (Cons p pi) obtain a b where p: "p = (a, b)" by (cases p) with Cons Xs Ys have "a \<sharp> x" "b \<sharp> x" by (simp_all add: fresh_star_def) with Cons p show ?case by (simp add: pt_fresh_fresh [OF pt at] pt2 [OF pt, of "[(a, b)]" pi, simplified]) qed lemma pt_fresh_star_pi: fixes x::"'a" and pi::"'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a: "((supp x)::'x set)\<sharp>* pi" shows "pi\<bullet>x = x" using a apply(induct pi) apply(auto simp add: fresh_star_def fresh_list_cons fresh_prod pt1[OF pt]) apply(subgoal_tac "((a,b)#pi)\<bullet>x = ([(a,b)]@pi)\<bullet>x") apply(simp only: pt2[OF pt]) apply(rule pt_fresh_fresh[OF pt at]) apply(simp add: fresh_def at_supp[OF at]) apply(blast) apply(simp add: fresh_def at_supp[OF at]) apply(blast) apply(simp add: pt2[OF pt]) done section {* Infrastructure lemmas for strong rule inductions *} (*==========================================================*) text {* For every set of atoms, there is another set of atoms avoiding a finitely supported c and there is a permutation which 'translates' between both sets. *} lemma at_set_avoiding_aux: fixes Xs::"'a set" and As::"'a set" assumes at: "at TYPE('a)" and b: "Xs \<subseteq> As" and c: "finite As" and d: "finite ((supp c)::'a set)" shows "\<exists>(pi::'a prm). (pi\<bullet>Xs)\<sharp>*c \<and> (pi\<bullet>Xs) \<inter> As = {} \<and> set pi \<subseteq> Xs \<times> (pi\<bullet>Xs)" proof - from b c have "finite Xs" by (simp add: finite_subset) then show ?thesis using b proof (induct) case empty have "({}::'a set)\<sharp>*c" by (simp add: fresh_star_def) moreover have "({}::'a set) \<inter> As = {}" by simp moreover have "set ([]::'a prm) \<subseteq> {} \<times> {}" by simp ultimately show ?case by (simp add: empty_eqvt) next case (insert x Xs) then have ih: "\<exists>pi. (pi\<bullet>Xs)\<sharp>*c \<and> (pi\<bullet>Xs) \<inter> As = {} \<and> set pi \<subseteq> Xs \<times> (pi\<bullet>Xs)" by simp then obtain pi where a1: "(pi\<bullet>Xs)\<sharp>*c" and a2: "(pi\<bullet>Xs) \<inter> As = {}" and a4: "set pi \<subseteq> Xs \<times> (pi\<bullet>Xs)" by blast have b: "x\<notin>Xs" by fact have d1: "finite As" by fact have d2: "finite Xs" by fact have d3: "({x} \<union> Xs) \<subseteq> As" using insert(4) by simp from d d1 d2 obtain y::"'a" where fr: "y\<sharp>(c,pi\<bullet>Xs,As)" apply(rule_tac at_exists_fresh[OF at, where x="(c,pi\<bullet>Xs,As)"]) apply(auto simp add: supp_prod at_supp[OF at] at_fin_set_supp[OF at] pt_supp_finite_pi[OF pt_set_inst[OF at_pt_inst[OF at]] at]) done have "({y}\<union>(pi\<bullet>Xs))\<sharp>*c" using a1 fr by (simp add: fresh_star_def) moreover have "({y}\<union>(pi\<bullet>Xs))\<inter>As = {}" using a2 d1 fr by (simp add: fresh_prod at_fin_set_fresh[OF at]) moreover have "pi\<bullet>x=x" using a4 b a2 d3 by (rule_tac at_prm_fresh2[OF at]) (auto) then have "set ((pi\<bullet>x,y)#pi) \<subseteq> ({x} \<union> Xs) \<times> ({y}\<union>(pi\<bullet>Xs))" using a4 by auto moreover have "(((pi\<bullet>x,y)#pi)\<bullet>({x} \<union> Xs)) = {y}\<union>(pi\<bullet>Xs)" proof - have eq: "[(pi\<bullet>x,y)]\<bullet>(pi\<bullet>Xs) = (pi\<bullet>Xs)" proof - have "(pi\<bullet>x)\<sharp>(pi\<bullet>Xs)" using b d2 by (simp add: pt_fresh_bij [OF pt_set_inst [OF at_pt_inst [OF at]], OF at] at_fin_set_fresh [OF at]) moreover have "y\<sharp>(pi\<bullet>Xs)" using fr by simp ultimately show "[(pi\<bullet>x,y)]\<bullet>(pi\<bullet>Xs) = (pi\<bullet>Xs)" by (simp add: pt_fresh_fresh[OF pt_set_inst [OF at_pt_inst[OF at]], OF at]) qed have "(((pi\<bullet>x,y)#pi)\<bullet>({x}\<union>Xs)) = ([(pi\<bullet>x,y)]\<bullet>(pi\<bullet>({x}\<union>Xs)))" by (simp add: pt2[symmetric, OF pt_set_inst [OF at_pt_inst[OF at]]]) also have "\<dots> = {y}\<union>([(pi\<bullet>x,y)]\<bullet>(pi\<bullet>Xs))" by (simp only: union_eqvt perm_set_def at_calc[OF at])(auto) finally show "(((pi\<bullet>x,y)#pi)\<bullet>({x} \<union> Xs)) = {y}\<union>(pi\<bullet>Xs)" using eq by simp qed ultimately show ?case by (rule_tac x="(pi\<bullet>x,y)#pi" in exI) (auto) qed qed lemma at_set_avoiding: fixes Xs::"'a set" assumes at: "at TYPE('a)" and a: "finite Xs" and b: "finite ((supp c)::'a set)" obtains pi::"'a prm" where "(pi\<bullet>Xs)\<sharp>*c" and "set pi \<subseteq> Xs \<times> (pi\<bullet>Xs)" using a b at_set_avoiding_aux[OF at, where Xs="Xs" and As="Xs" and c="c"] by (blast) section {* composition instances *} (* ============================= *) lemma cp_list_inst: assumes c1: "cp TYPE ('a) TYPE('x) TYPE('y)" shows "cp TYPE ('a list) TYPE('x) TYPE('y)" using c1 apply(simp add: cp_def) apply(auto) apply(induct_tac x) apply(auto) done lemma cp_set_inst: assumes c1: "cp TYPE ('a) TYPE('x) TYPE('y)" shows "cp TYPE ('a set) TYPE('x) TYPE('y)" using c1 apply(simp add: cp_def) apply(auto) apply(auto simp add: perm_set_def) apply(rule_tac x="pi2\<bullet>xc" in exI) apply(auto) done lemma cp_option_inst: assumes c1: "cp TYPE ('a) TYPE('x) TYPE('y)" shows "cp TYPE ('a option) TYPE('x) TYPE('y)" using c1 apply(simp add: cp_def) apply(auto) apply(case_tac x) apply(auto) done lemma cp_noption_inst: assumes c1: "cp TYPE ('a) TYPE('x) TYPE('y)" shows "cp TYPE ('a noption) TYPE('x) TYPE('y)" using c1 apply(simp add: cp_def) apply(auto) apply(case_tac x) apply(auto) done lemma cp_unit_inst: shows "cp TYPE (unit) TYPE('x) TYPE('y)" apply(simp add: cp_def) done lemma cp_bool_inst: shows "cp TYPE (bool) TYPE('x) TYPE('y)" apply(simp add: cp_def) apply(rule allI)+ apply(induct_tac x) apply(simp_all) done lemma cp_prod_inst: assumes c1: "cp TYPE ('a) TYPE('x) TYPE('y)" and c2: "cp TYPE ('b) TYPE('x) TYPE('y)" shows "cp TYPE ('a\<times>'b) TYPE('x) TYPE('y)" using c1 c2 apply(simp add: cp_def) done lemma cp_fun_inst: assumes c1: "cp TYPE ('a) TYPE('x) TYPE('y)" and c2: "cp TYPE ('b) TYPE('x) TYPE('y)" and pt: "pt TYPE ('y) TYPE('x)" and at: "at TYPE ('x)" shows "cp TYPE ('a\<Rightarrow>'b) TYPE('x) TYPE('y)" using c1 c2 apply(auto simp add: cp_def perm_fun_def fun_eq_iff) apply(simp add: rev_eqvt[symmetric]) apply(simp add: pt_rev_pi[OF pt_list_inst[OF pt_prod_inst[OF pt, OF pt]], OF at]) done section {* Andy's freshness lemma *} (*================================*) lemma freshness_lemma: fixes h :: "'x\<Rightarrow>'a" assumes pta: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp h)::'x set)" and a: "\<exists>a::'x. a\<sharp>(h,h a)" shows "\<exists>fr::'a. \<forall>a::'x. a\<sharp>h \<longrightarrow> (h a) = fr" proof - have ptb: "pt TYPE('x) TYPE('x)" by (simp add: at_pt_inst[OF at]) have ptc: "pt TYPE('x\<Rightarrow>'a) TYPE('x)" by (simp add: pt_fun_inst[OF ptb, OF pta, OF at]) from a obtain a0 where a1: "a0\<sharp>h" and a2: "a0\<sharp>(h a0)" by (force simp add: fresh_prod) show ?thesis proof let ?fr = "h (a0::'x)" show "\<forall>(a::'x). (a\<sharp>h \<longrightarrow> ((h a) = ?fr))" proof (intro strip) fix a assume a3: "(a::'x)\<sharp>h" show "h (a::'x) = h a0" proof (cases "a=a0") case True thus "h (a::'x) = h a0" by simp next case False assume "a\<noteq>a0" hence c1: "a\<notin>((supp a0)::'x set)" by (simp add: fresh_def[symmetric] at_fresh[OF at]) have c2: "a\<notin>((supp h)::'x set)" using a3 by (simp add: fresh_def) from c1 c2 have c3: "a\<notin>((supp h)\<union>((supp a0)::'x set))" by force have f2: "finite ((supp a0)::'x set)" by (simp add: at_supp[OF at]) from f1 f2 have "((supp (h a0))::'x set)\<subseteq>((supp h)\<union>(supp a0))" by (simp add: pt_supp_fun_subset[OF ptb, OF pta, OF at]) hence "a\<notin>((supp (h a0))::'x set)" using c3 by force hence "a\<sharp>(h a0)" by (simp add: fresh_def) with a2 have d1: "[(a0,a)]\<bullet>(h a0) = (h a0)" by (rule pt_fresh_fresh[OF pta, OF at]) from a1 a3 have d2: "[(a0,a)]\<bullet>h = h" by (rule pt_fresh_fresh[OF ptc, OF at]) from d1 have "h a0 = [(a0,a)]\<bullet>(h a0)" by simp also have "\<dots>= ([(a0,a)]\<bullet>h)([(a0,a)]\<bullet>a0)" by (simp add: pt_fun_app_eq[OF ptb, OF at]) also have "\<dots> = h ([(a0,a)]\<bullet>a0)" using d2 by simp also have "\<dots> = h a" by (simp add: at_calc[OF at]) finally show "h a = h a0" by simp qed qed qed qed lemma freshness_lemma_unique: fixes h :: "'x\<Rightarrow>'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp h)::'x set)" and a: "\<exists>(a::'x). a\<sharp>(h,h a)" shows "\<exists>!(fr::'a). \<forall>(a::'x). a\<sharp>h \<longrightarrow> (h a) = fr" proof (rule ex_ex1I) from pt at f1 a show "\<exists>fr::'a. \<forall>a::'x. a\<sharp>h \<longrightarrow> h a = fr" by (simp add: freshness_lemma) next fix fr1 fr2 assume b1: "\<forall>a::'x. a\<sharp>h \<longrightarrow> h a = fr1" assume b2: "\<forall>a::'x. a\<sharp>h \<longrightarrow> h a = fr2" from a obtain a where "(a::'x)\<sharp>h" by (force simp add: fresh_prod) with b1 b2 have "h a = fr1 \<and> h a = fr2" by force thus "fr1 = fr2" by force qed -- "packaging the freshness lemma into a function" definition fresh_fun :: "('x\<Rightarrow>'a)\<Rightarrow>'a" where "fresh_fun (h) \<equiv> THE fr. (\<forall>(a::'x). a\<sharp>h \<longrightarrow> (h a) = fr)" lemma fresh_fun_app: fixes h :: "'x\<Rightarrow>'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp h)::'x set)" and a: "\<exists>(a::'x). a\<sharp>(h,h a)" and b: "a\<sharp>h" shows "(fresh_fun h) = (h a)" proof (unfold fresh_fun_def, rule the_equality) show "\<forall>(a'::'x). a'\<sharp>h \<longrightarrow> h a' = h a" proof (intro strip) fix a'::"'x" assume c: "a'\<sharp>h" from pt at f1 a have "\<exists>(fr::'a). \<forall>(a::'x). a\<sharp>h \<longrightarrow> (h a) = fr" by (rule freshness_lemma) with b c show "h a' = h a" by force qed next fix fr::"'a" assume "\<forall>a. a\<sharp>h \<longrightarrow> h a = fr" with b show "fr = h a" by force qed lemma fresh_fun_app': fixes h :: "'x\<Rightarrow>'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp h)::'x set)" and a: "a\<sharp>h" "a\<sharp>h a" shows "(fresh_fun h) = (h a)" apply(rule fresh_fun_app[OF pt, OF at, OF f1]) apply(auto simp add: fresh_prod intro: a) done lemma fresh_fun_equiv_ineq: fixes h :: "'y\<Rightarrow>'a" and pi:: "'x prm" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and ptb':"pt TYPE('a) TYPE('y)" and at: "at TYPE('x)" and at': "at TYPE('y)" and cpa: "cp TYPE('a) TYPE('x) TYPE('y)" and cpb: "cp TYPE('y) TYPE('x) TYPE('y)" and f1: "finite ((supp h)::'y set)" and a1: "\<exists>(a::'y). a\<sharp>(h,h a)" shows "pi\<bullet>(fresh_fun h) = fresh_fun(pi\<bullet>h)" (is "?LHS = ?RHS") proof - have ptd: "pt TYPE('y) TYPE('y)" by (simp add: at_pt_inst[OF at']) have ptc: "pt TYPE('y\<Rightarrow>'a) TYPE('x)" by (simp add: pt_fun_inst[OF ptb, OF pta, OF at]) have cpc: "cp TYPE('y\<Rightarrow>'a) TYPE ('x) TYPE ('y)" by (rule cp_fun_inst[OF cpb cpa ptb at]) have f2: "finite ((supp (pi\<bullet>h))::'y set)" proof - from f1 have "finite (pi\<bullet>((supp h)::'y set))" by (simp add: pt_set_finite_ineq[OF ptb, OF at]) thus ?thesis by (simp add: pt_perm_supp_ineq[OF ptc, OF ptb, OF at, OF cpc]) qed from a1 obtain a' where c0: "a'\<sharp>(h,h a')" by force hence c1: "a'\<sharp>h" and c2: "a'\<sharp>(h a')" by (simp_all add: fresh_prod) have c3: "(pi\<bullet>a')\<sharp>(pi\<bullet>h)" using c1 by (simp add: pt_fresh_bij_ineq[OF ptc, OF ptb, OF at, OF cpc]) have c4: "(pi\<bullet>a')\<sharp>(pi\<bullet>h) (pi\<bullet>a')" proof - from c2 have "(pi\<bullet>a')\<sharp>(pi\<bullet>(h a'))" by (simp add: pt_fresh_bij_ineq[OF pta, OF ptb, OF at,OF cpa]) thus ?thesis by (simp add: pt_fun_app_eq[OF ptb, OF at]) qed have a2: "\<exists>(a::'y). a\<sharp>(pi\<bullet>h,(pi\<bullet>h) a)" using c3 c4 by (force simp add: fresh_prod) have d1: "?LHS = pi\<bullet>(h a')" using c1 a1 by (simp add: fresh_fun_app[OF ptb', OF at', OF f1]) have d2: "?RHS = (pi\<bullet>h) (pi\<bullet>a')" using c3 a2 by (simp add: fresh_fun_app[OF ptb', OF at', OF f2]) show ?thesis using d1 d2 by (simp add: pt_fun_app_eq[OF ptb, OF at]) qed lemma fresh_fun_equiv: fixes h :: "'x\<Rightarrow>'a" and pi:: "'x prm" assumes pta: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp h)::'x set)" and a1: "\<exists>(a::'x). a\<sharp>(h,h a)" shows "pi\<bullet>(fresh_fun h) = fresh_fun(pi\<bullet>h)" (is "?LHS = ?RHS") proof - have ptb: "pt TYPE('x) TYPE('x)" by (simp add: at_pt_inst[OF at]) have ptc: "pt TYPE('x\<Rightarrow>'a) TYPE('x)" by (simp add: pt_fun_inst[OF ptb, OF pta, OF at]) have f2: "finite ((supp (pi\<bullet>h))::'x set)" proof - from f1 have "finite (pi\<bullet>((supp h)::'x set))" by (simp add: pt_set_finite_ineq[OF ptb, OF at]) thus ?thesis by (simp add: pt_perm_supp[OF ptc, OF at]) qed from a1 obtain a' where c0: "a'\<sharp>(h,h a')" by force hence c1: "a'\<sharp>h" and c2: "a'\<sharp>(h a')" by (simp_all add: fresh_prod) have c3: "(pi\<bullet>a')\<sharp>(pi\<bullet>h)" using c1 by (simp add: pt_fresh_bij[OF ptc, OF at]) have c4: "(pi\<bullet>a')\<sharp>(pi\<bullet>h) (pi\<bullet>a')" proof - from c2 have "(pi\<bullet>a')\<sharp>(pi\<bullet>(h a'))" by (simp add: pt_fresh_bij[OF pta, OF at]) thus ?thesis by (simp add: pt_fun_app_eq[OF ptb, OF at]) qed have a2: "\<exists>(a::'x). a\<sharp>(pi\<bullet>h,(pi\<bullet>h) a)" using c3 c4 by (force simp add: fresh_prod) have d1: "?LHS = pi\<bullet>(h a')" using c1 a1 by (simp add: fresh_fun_app[OF pta, OF at, OF f1]) have d2: "?RHS = (pi\<bullet>h) (pi\<bullet>a')" using c3 a2 by (simp add: fresh_fun_app[OF pta, OF at, OF f2]) show ?thesis using d1 d2 by (simp add: pt_fun_app_eq[OF ptb, OF at]) qed lemma fresh_fun_supports: fixes h :: "'x\<Rightarrow>'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f1: "finite ((supp h)::'x set)" and a: "\<exists>(a::'x). a\<sharp>(h,h a)" shows "((supp h)::'x set) supports (fresh_fun h)" apply(simp add: supports_def fresh_def[symmetric]) apply(auto) apply(simp add: fresh_fun_equiv[OF pt, OF at, OF f1, OF a]) apply(simp add: pt_fresh_fresh[OF pt_fun_inst[OF at_pt_inst[OF at], OF pt], OF at, OF at]) done section {* Abstraction function *} (*==============================*) lemma pt_abs_fun_inst: assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pt TYPE('x\<Rightarrow>('a noption)) TYPE('x)" by (rule pt_fun_inst[OF at_pt_inst[OF at],OF pt_noption_inst[OF pt],OF at]) definition abs_fun :: "'x\<Rightarrow>'a\<Rightarrow>('x\<Rightarrow>('a noption))" ("[_]._" [100,100] 100) where "[a].x \<equiv> (\<lambda>b. (if b=a then nSome(x) else (if b\<sharp>x then nSome([(a,b)]\<bullet>x) else nNone)))" (* FIXME: should be called perm_if and placed close to the definition of permutations on bools *) lemma abs_fun_if: fixes pi :: "'x prm" and x :: "'a" and y :: "'a" and c :: "bool" shows "pi\<bullet>(if c then x else y) = (if c then (pi\<bullet>x) else (pi\<bullet>y))" by force lemma abs_fun_pi_ineq: fixes a :: "'y" and x :: "'a" and pi :: "'x prm" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" shows "pi\<bullet>([a].x) = [(pi\<bullet>a)].(pi\<bullet>x)" apply(simp add: abs_fun_def perm_fun_def abs_fun_if) apply(simp only: fun_eq_iff) apply(rule allI) apply(subgoal_tac "(((rev pi)\<bullet>(xa::'y)) = (a::'y)) = (xa = pi\<bullet>a)")(*A*) apply(subgoal_tac "(((rev pi)\<bullet>xa)\<sharp>x) = (xa\<sharp>(pi\<bullet>x))")(*B*) apply(subgoal_tac "pi\<bullet>([(a,(rev pi)\<bullet>xa)]\<bullet>x) = [(pi\<bullet>a,xa)]\<bullet>(pi\<bullet>x)")(*C*) apply(simp) (*C*) apply(simp add: cp1[OF cp]) apply(simp add: pt_pi_rev[OF ptb, OF at]) (*B*) apply(simp add: pt_fresh_left_ineq[OF pta, OF ptb, OF at, OF cp]) (*A*) apply(rule iffI) apply(rule pt_bij2[OF ptb, OF at, THEN sym]) apply(simp) apply(rule pt_bij2[OF ptb, OF at]) apply(simp) done lemma abs_fun_pi: fixes a :: "'x" and x :: "'a" and pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi\<bullet>([a].x) = [(pi\<bullet>a)].(pi\<bullet>x)" apply(rule abs_fun_pi_ineq) apply(rule pt) apply(rule at_pt_inst) apply(rule at)+ apply(rule cp_pt_inst) apply(rule pt) apply(rule at) done lemma abs_fun_eq1: fixes x :: "'a" and y :: "'a" and a :: "'x" shows "([a].x = [a].y) = (x = y)" apply(auto simp add: abs_fun_def) apply(auto simp add: fun_eq_iff) apply(drule_tac x="a" in spec) apply(simp) done lemma abs_fun_eq2: fixes x :: "'a" and y :: "'a" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a1: "a\<noteq>b" and a2: "[a].x = [b].y" shows "x=[(a,b)]\<bullet>y \<and> a\<sharp>y" proof - from a2 have "\<forall>c::'x. ([a].x) c = ([b].y) c" by (force simp add: fun_eq_iff) hence "([a].x) a = ([b].y) a" by simp hence a3: "nSome(x) = ([b].y) a" by (simp add: abs_fun_def) show "x=[(a,b)]\<bullet>y \<and> a\<sharp>y" proof (cases "a\<sharp>y") assume a4: "a\<sharp>y" hence "x=[(b,a)]\<bullet>y" using a3 a1 by (simp add: abs_fun_def) moreover have "[(a,b)]\<bullet>y = [(b,a)]\<bullet>y" by (rule pt3[OF pt], rule at_ds5[OF at]) ultimately show ?thesis using a4 by simp next assume "\<not>a\<sharp>y" hence "nSome(x) = nNone" using a1 a3 by (simp add: abs_fun_def) hence False by simp thus ?thesis by simp qed qed lemma abs_fun_eq3: fixes x :: "'a" and y :: "'a" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and a1: "a\<noteq>b" and a2: "x=[(a,b)]\<bullet>y" and a3: "a\<sharp>y" shows "[a].x =[b].y" proof - show ?thesis proof (simp only: abs_fun_def fun_eq_iff, intro strip) fix c::"'x" let ?LHS = "if c=a then nSome(x) else if c\<sharp>x then nSome([(a,c)]\<bullet>x) else nNone" and ?RHS = "if c=b then nSome(y) else if c\<sharp>y then nSome([(b,c)]\<bullet>y) else nNone" show "?LHS=?RHS" proof - have "(c=a) \<or> (c=b) \<or> (c\<noteq>a \<and> c\<noteq>b)" by blast moreover --"case c=a" { have "nSome(x) = nSome([(a,b)]\<bullet>y)" using a2 by simp also have "\<dots> = nSome([(b,a)]\<bullet>y)" by (simp, rule pt3[OF pt], rule at_ds5[OF at]) finally have "nSome(x) = nSome([(b,a)]\<bullet>y)" by simp moreover assume "c=a" ultimately have "?LHS=?RHS" using a1 a3 by simp } moreover -- "case c=b" { have a4: "y=[(a,b)]\<bullet>x" using a2 by (simp only: pt_swap_bij[OF pt, OF at]) hence "a\<sharp>([(a,b)]\<bullet>x)" using a3 by simp hence "b\<sharp>x" by (simp add: at_calc[OF at] pt_fresh_left[OF pt, OF at]) moreover assume "c=b" ultimately have "?LHS=?RHS" using a1 a4 by simp } moreover -- "case c\<noteq>a \<and> c\<noteq>b" { assume a5: "c\<noteq>a \<and> c\<noteq>b" moreover have "c\<sharp>x = c\<sharp>y" using a2 a5 by (force simp add: at_calc[OF at] pt_fresh_left[OF pt, OF at]) moreover have "c\<sharp>y \<longrightarrow> [(a,c)]\<bullet>x = [(b,c)]\<bullet>y" proof (intro strip) assume a6: "c\<sharp>y" have "[(a,c),(b,c),(a,c)] \<triangleq> [(a,b)]" using a1 a5 by (force intro: at_ds3[OF at]) hence "[(a,c)]\<bullet>([(b,c)]\<bullet>([(a,c)]\<bullet>y)) = [(a,b)]\<bullet>y" by (simp add: pt2[OF pt, symmetric] pt3[OF pt]) hence "[(a,c)]\<bullet>([(b,c)]\<bullet>y) = [(a,b)]\<bullet>y" using a3 a6 by (simp add: pt_fresh_fresh[OF pt, OF at]) hence "[(a,c)]\<bullet>([(b,c)]\<bullet>y) = x" using a2 by simp hence "[(b,c)]\<bullet>y = [(a,c)]\<bullet>x" by (drule_tac pt_bij1[OF pt, OF at], simp) thus "[(a,c)]\<bullet>x = [(b,c)]\<bullet>y" by simp qed ultimately have "?LHS=?RHS" by simp } ultimately show "?LHS = ?RHS" by blast qed qed qed (* alpha equivalence *) lemma abs_fun_eq: fixes x :: "'a" and y :: "'a" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "([a].x = [b].y) = ((a=b \<and> x=y)\<or>(a\<noteq>b \<and> x=[(a,b)]\<bullet>y \<and> a\<sharp>y))" proof (rule iffI) assume b: "[a].x = [b].y" show "(a=b \<and> x=y)\<or>(a\<noteq>b \<and> x=[(a,b)]\<bullet>y \<and> a\<sharp>y)" proof (cases "a=b") case True with b show ?thesis by (simp add: abs_fun_eq1) next case False with b show ?thesis by (simp add: abs_fun_eq2[OF pt, OF at]) qed next assume "(a=b \<and> x=y)\<or>(a\<noteq>b \<and> x=[(a,b)]\<bullet>y \<and> a\<sharp>y)" thus "[a].x = [b].y" proof assume "a=b \<and> x=y" thus ?thesis by simp next assume "a\<noteq>b \<and> x=[(a,b)]\<bullet>y \<and> a\<sharp>y" thus ?thesis by (simp add: abs_fun_eq3[OF pt, OF at]) qed qed (* symmetric version of alpha-equivalence *) lemma abs_fun_eq': fixes x :: "'a" and y :: "'a" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "([a].x = [b].y) = ((a=b \<and> x=y)\<or>(a\<noteq>b \<and> [(b,a)]\<bullet>x=y \<and> b\<sharp>x))" by (auto simp add: abs_fun_eq[OF pt, OF at] pt_swap_bij'[OF pt, OF at] pt_fresh_left[OF pt, OF at] at_calc[OF at]) (* alpha_equivalence with a fresh name *) lemma abs_fun_fresh: fixes x :: "'a" and y :: "'a" and c :: "'x" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and fr: "c\<noteq>a" "c\<noteq>b" "c\<sharp>x" "c\<sharp>y" shows "([a].x = [b].y) = ([(a,c)]\<bullet>x = [(b,c)]\<bullet>y)" proof (rule iffI) assume eq0: "[a].x = [b].y" show "[(a,c)]\<bullet>x = [(b,c)]\<bullet>y" proof (cases "a=b") case True then show ?thesis using eq0 by (simp add: pt_bij[OF pt, OF at] abs_fun_eq[OF pt, OF at]) next case False have ineq: "a\<noteq>b" by fact with eq0 have eq: "x=[(a,b)]\<bullet>y" and fr': "a\<sharp>y" by (simp_all add: abs_fun_eq[OF pt, OF at]) from eq have "[(a,c)]\<bullet>x = [(a,c)]\<bullet>[(a,b)]\<bullet>y" by (simp add: pt_bij[OF pt, OF at]) also have "\<dots> = ([(a,c)]\<bullet>[(a,b)])\<bullet>([(a,c)]\<bullet>y)" by (rule pt_perm_compose[OF pt, OF at]) also have "\<dots> = [(c,b)]\<bullet>y" using ineq fr fr' by (simp add: pt_fresh_fresh[OF pt, OF at] at_calc[OF at]) also have "\<dots> = [(b,c)]\<bullet>y" by (rule pt3[OF pt], rule at_ds5[OF at]) finally show ?thesis by simp qed next assume eq: "[(a,c)]\<bullet>x = [(b,c)]\<bullet>y" thus "[a].x = [b].y" proof (cases "a=b") case True then show ?thesis using eq by (simp add: pt_bij[OF pt, OF at] abs_fun_eq[OF pt, OF at]) next case False have ineq: "a\<noteq>b" by fact from fr have "([(a,c)]\<bullet>c)\<sharp>([(a,c)]\<bullet>x)" by (simp add: pt_fresh_bij[OF pt, OF at]) hence "a\<sharp>([(b,c)]\<bullet>y)" using eq fr by (simp add: at_calc[OF at]) hence fr0: "a\<sharp>y" using ineq fr by (simp add: pt_fresh_left[OF pt, OF at] at_calc[OF at]) from eq have "x = (rev [(a,c)])\<bullet>([(b,c)]\<bullet>y)" by (rule pt_bij1[OF pt, OF at]) also have "\<dots> = [(a,c)]\<bullet>([(b,c)]\<bullet>y)" by simp also have "\<dots> = ([(a,c)]\<bullet>[(b,c)])\<bullet>([(a,c)]\<bullet>y)" by (rule pt_perm_compose[OF pt, OF at]) also have "\<dots> = [(b,a)]\<bullet>y" using ineq fr fr0 by (simp add: pt_fresh_fresh[OF pt, OF at] at_calc[OF at]) also have "\<dots> = [(a,b)]\<bullet>y" by (rule pt3[OF pt], rule at_ds5[OF at]) finally show ?thesis using ineq fr0 by (simp add: abs_fun_eq[OF pt, OF at]) qed qed lemma abs_fun_fresh': fixes x :: "'a" and y :: "'a" and c :: "'x" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and as: "[a].x = [b].y" and fr: "c\<noteq>a" "c\<noteq>b" "c\<sharp>x" "c\<sharp>y" shows "x = [(a,c)]\<bullet>[(b,c)]\<bullet>y" using as fr apply(drule_tac sym) apply(simp add: abs_fun_fresh[OF pt, OF at] pt_swap_bij[OF pt, OF at]) done lemma abs_fun_supp_approx: fixes x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "((supp ([a].x))::'x set) \<subseteq> (supp (x,a))" proof fix c assume "c\<in>((supp ([a].x))::'x set)" hence "infinite {b. [(c,b)]\<bullet>([a].x) \<noteq> [a].x}" by (simp add: supp_def) hence "infinite {b. [([(c,b)]\<bullet>a)].([(c,b)]\<bullet>x) \<noteq> [a].x}" by (simp add: abs_fun_pi[OF pt, OF at]) moreover have "{b. [([(c,b)]\<bullet>a)].([(c,b)]\<bullet>x) \<noteq> [a].x} \<subseteq> {b. ([(c,b)]\<bullet>x,[(c,b)]\<bullet>a) \<noteq> (x, a)}" by force ultimately have "infinite {b. ([(c,b)]\<bullet>x,[(c,b)]\<bullet>a) \<noteq> (x, a)}" by (simp add: infinite_super) thus "c\<in>(supp (x,a))" by (simp add: supp_def) qed lemma abs_fun_finite_supp: fixes x :: "'a" and a :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" shows "finite ((supp ([a].x))::'x set)" proof - from f have "finite ((supp (x,a))::'x set)" by (simp add: supp_prod at_supp[OF at]) moreover have "((supp ([a].x))::'x set) \<subseteq> (supp (x,a))" by (rule abs_fun_supp_approx[OF pt, OF at]) ultimately show ?thesis by (simp add: finite_subset) qed lemma fresh_abs_funI1: fixes x :: "'a" and a :: "'x" and b :: "'x" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" and a1: "b\<sharp>x" and a2: "a\<noteq>b" shows "b\<sharp>([a].x)" proof - have "\<exists>c::'x. c\<sharp>(b,a,x,[a].x)" proof (rule at_exists_fresh'[OF at], auto simp add: supp_prod at_supp[OF at] f) show "finite ((supp ([a].x))::'x set)" using f by (simp add: abs_fun_finite_supp[OF pt, OF at]) qed then obtain c where fr1: "c\<noteq>b" and fr2: "c\<noteq>a" and fr3: "c\<sharp>x" and fr4: "c\<sharp>([a].x)" by (force simp add: fresh_prod at_fresh[OF at]) have e: "[(c,b)]\<bullet>([a].x) = [a].([(c,b)]\<bullet>x)" using a2 fr1 fr2 by (force simp add: abs_fun_pi[OF pt, OF at] at_calc[OF at]) from fr4 have "([(c,b)]\<bullet>c)\<sharp> ([(c,b)]\<bullet>([a].x))" by (simp add: pt_fresh_bij[OF pt_abs_fun_inst[OF pt, OF at], OF at]) hence "b\<sharp>([a].([(c,b)]\<bullet>x))" using fr1 fr2 e by (simp add: at_calc[OF at]) thus ?thesis using a1 fr3 by (simp add: pt_fresh_fresh[OF pt, OF at]) qed lemma fresh_abs_funE: fixes a :: "'x" and b :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" and a1: "b\<sharp>([a].x)" and a2: "b\<noteq>a" shows "b\<sharp>x" proof - have "\<exists>c::'x. c\<sharp>(b,a,x,[a].x)" proof (rule at_exists_fresh'[OF at], auto simp add: supp_prod at_supp[OF at] f) show "finite ((supp ([a].x))::'x set)" using f by (simp add: abs_fun_finite_supp[OF pt, OF at]) qed then obtain c where fr1: "b\<noteq>c" and fr2: "c\<noteq>a" and fr3: "c\<sharp>x" and fr4: "c\<sharp>([a].x)" by (force simp add: fresh_prod at_fresh[OF at]) have "[a].x = [(b,c)]\<bullet>([a].x)" using a1 fr4 by (simp add: pt_fresh_fresh[OF pt_abs_fun_inst[OF pt, OF at], OF at]) hence "[a].x = [a].([(b,c)]\<bullet>x)" using fr2 a2 by (force simp add: abs_fun_pi[OF pt, OF at] at_calc[OF at]) hence b: "([(b,c)]\<bullet>x) = x" by (simp add: abs_fun_eq1) from fr3 have "([(b,c)]\<bullet>c)\<sharp>([(b,c)]\<bullet>x)" by (simp add: pt_fresh_bij[OF pt, OF at]) thus ?thesis using b fr1 by (simp add: at_calc[OF at]) qed lemma fresh_abs_funI2: fixes a :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" shows "a\<sharp>([a].x)" proof - have "\<exists>c::'x. c\<sharp>(a,x)" by (rule at_exists_fresh'[OF at], auto simp add: supp_prod at_supp[OF at] f) then obtain c where fr1: "a\<noteq>c" and fr1_sym: "c\<noteq>a" and fr2: "c\<sharp>x" by (force simp add: fresh_prod at_fresh[OF at]) have "c\<sharp>([a].x)" using f fr1 fr2 by (simp add: fresh_abs_funI1[OF pt, OF at]) hence "([(c,a)]\<bullet>c)\<sharp>([(c,a)]\<bullet>([a].x))" using fr1 by (simp only: pt_fresh_bij[OF pt_abs_fun_inst[OF pt, OF at], OF at]) hence a: "a\<sharp>([c].([(c,a)]\<bullet>x))" using fr1_sym by (simp add: abs_fun_pi[OF pt, OF at] at_calc[OF at]) have "[c].([(c,a)]\<bullet>x) = ([a].x)" using fr1_sym fr2 by (simp add: abs_fun_eq[OF pt, OF at]) thus ?thesis using a by simp qed lemma fresh_abs_fun_iff: fixes a :: "'x" and b :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" shows "(b\<sharp>([a].x)) = (b=a \<or> b\<sharp>x)" by (auto dest: fresh_abs_funE[OF pt, OF at,OF f] intro: fresh_abs_funI1[OF pt, OF at,OF f] fresh_abs_funI2[OF pt, OF at,OF f]) lemma abs_fun_supp: fixes a :: "'x" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" and f: "finite ((supp x)::'x set)" shows "supp ([a].x) = (supp x)-{a}" by (force simp add: supp_fresh_iff fresh_abs_fun_iff[OF pt, OF at, OF f]) (* maybe needs to be better stated as supp intersection supp *) lemma abs_fun_supp_ineq: fixes a :: "'y" and x :: "'a" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" and dj: "disjoint TYPE('y) TYPE('x)" shows "((supp ([a].x))::'x set) = (supp x)" apply(auto simp add: supp_def) apply(auto simp add: abs_fun_pi_ineq[OF pta, OF ptb, OF at, OF cp]) apply(auto simp add: dj_perm_forget[OF dj]) apply(auto simp add: abs_fun_eq1) done lemma fresh_abs_fun_iff_ineq: fixes a :: "'y" and b :: "'x" and x :: "'a" assumes pta: "pt TYPE('a) TYPE('x)" and ptb: "pt TYPE('y) TYPE('x)" and at: "at TYPE('x)" and cp: "cp TYPE('a) TYPE('x) TYPE('y)" and dj: "disjoint TYPE('y) TYPE('x)" shows "b\<sharp>([a].x) = b\<sharp>x" by (simp add: fresh_def abs_fun_supp_ineq[OF pta, OF ptb, OF at, OF cp, OF dj]) section {* abstraction type for the parsing in nominal datatype *} (*==============================================================*) inductive_set ABS_set :: "('x\<Rightarrow>('a noption)) set" where ABS_in: "(abs_fun a x)\<in>ABS_set" definition "ABS = ABS_set" typedef ('x,'a) ABS ("\<guillemotleft>_\<guillemotright>_" [1000,1000] 1000) = "ABS::('x\<Rightarrow>('a noption)) set" morphisms Rep_ABS Abs_ABS unfolding ABS_def proof fix x::"'a" and a::"'x" show "(abs_fun a x)\<in> ABS_set" by (rule ABS_in) qed section {* lemmas for deciding permutation equations *} (*===================================================*) lemma perm_aux_fold: shows "perm_aux pi x = pi\<bullet>x" by (simp only: perm_aux_def) lemma pt_perm_compose_aux: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and x :: "'a" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "pi2\<bullet>(pi1\<bullet>x) = perm_aux (pi2\<bullet>pi1) (pi2\<bullet>x)" proof - have "(pi2@pi1) \<triangleq> ((pi2\<bullet>pi1)@pi2)" by (rule at_ds8[OF at]) hence "(pi2@pi1)\<bullet>x = ((pi2\<bullet>pi1)@pi2)\<bullet>x" by (rule pt3[OF pt]) thus ?thesis by (simp add: pt2[OF pt] perm_aux_def) qed lemma cp1_aux: fixes pi1::"'x prm" and pi2::"'y prm" and x ::"'a" assumes cp: "cp TYPE ('a) TYPE('x) TYPE('y)" shows "pi1\<bullet>(pi2\<bullet>x) = perm_aux (pi1\<bullet>pi2) (pi1\<bullet>x)" using cp by (simp add: cp_def perm_aux_def) lemma perm_eq_app: fixes f :: "'a\<Rightarrow>'b" and x :: "'a" and pi :: "'x prm" assumes pt: "pt TYPE('a) TYPE('x)" and at: "at TYPE('x)" shows "(pi\<bullet>(f x)=y) = ((pi\<bullet>f)(pi\<bullet>x)=y)" by (simp add: pt_fun_app_eq[OF pt, OF at]) lemma perm_eq_lam: fixes f :: "'a\<Rightarrow>'b" and x :: "'a" and pi :: "'x prm" shows "((pi\<bullet>(\<lambda>x. f x))=y) = ((\<lambda>x. (pi\<bullet>(f ((rev pi)\<bullet>x))))=y)" by (simp add: perm_fun_def) section {* test *} lemma at_prm_eq_compose: fixes pi1 :: "'x prm" and pi2 :: "'x prm" and pi3 :: "'x prm" assumes at: "at TYPE('x)" and a: "pi1 \<triangleq> pi2" shows "(pi3\<bullet>pi1) \<triangleq> (pi3\<bullet>pi2)" proof - have pt: "pt TYPE('x) TYPE('x)" by (rule at_pt_inst[OF at]) have pt_prm: "pt TYPE('x prm) TYPE('x)" by (rule pt_list_inst[OF pt_prod_inst[OF pt, OF pt]]) from a show ?thesis apply - apply(auto simp add: prm_eq_def) apply(rule_tac pi="rev pi3" in pt_bij4[OF pt, OF at]) apply(rule trans) apply(rule pt_perm_compose[OF pt, OF at]) apply(simp add: pt_rev_pi[OF pt_prm, OF at]) apply(rule sym) apply(rule trans) apply(rule pt_perm_compose[OF pt, OF at]) apply(simp add: pt_rev_pi[OF pt_prm, OF at]) done qed (************************) (* Various eqvt-lemmas *) lemma Zero_nat_eqvt: shows "pi\<bullet>(0::nat) = 0" by (auto simp add: perm_nat_def) lemma One_nat_eqvt: shows "pi\<bullet>(1::nat) = 1" by (simp add: perm_nat_def) lemma Suc_eqvt: shows "pi\<bullet>(Suc x) = Suc (pi\<bullet>x)" by (auto simp add: perm_nat_def) lemma numeral_nat_eqvt: shows "pi\<bullet>((numeral n)::nat) = numeral n" by (simp add: perm_nat_def perm_int_def) lemma max_nat_eqvt: fixes x::"nat" shows "pi\<bullet>(max x y) = max (pi\<bullet>x) (pi\<bullet>y)" by (simp add:perm_nat_def) lemma min_nat_eqvt: fixes x::"nat" shows "pi\<bullet>(min x y) = min (pi\<bullet>x) (pi\<bullet>y)" by (simp add:perm_nat_def) lemma plus_nat_eqvt: fixes x::"nat" shows "pi\<bullet>(x + y) = (pi\<bullet>x) + (pi\<bullet>y)" by (simp add:perm_nat_def) lemma minus_nat_eqvt: fixes x::"nat" shows "pi\<bullet>(x - y) = (pi\<bullet>x) - (pi\<bullet>y)" by (simp add:perm_nat_def) lemma mult_nat_eqvt: fixes x::"nat" shows "pi\<bullet>(x * y) = (pi\<bullet>x) * (pi\<bullet>y)" by (simp add:perm_nat_def) lemma div_nat_eqvt: fixes x::"nat" shows "pi\<bullet>(x div y) = (pi\<bullet>x) div (pi\<bullet>y)" by (simp add:perm_nat_def) lemma Zero_int_eqvt: shows "pi\<bullet>(0::int) = 0" by (auto simp add: perm_int_def) lemma One_int_eqvt: shows "pi\<bullet>(1::int) = 1" by (simp add: perm_int_def) lemma numeral_int_eqvt: shows "pi\<bullet>((numeral n)::int) = numeral n" by (simp add: perm_int_def perm_int_def) lemma neg_numeral_int_eqvt: shows "pi\<bullet>((- numeral n)::int) = - numeral n" by (simp add: perm_int_def perm_int_def) lemma max_int_eqvt: fixes x::"int" shows "pi\<bullet>(max (x::int) y) = max (pi\<bullet>x) (pi\<bullet>y)" by (simp add:perm_int_def) lemma min_int_eqvt: fixes x::"int" shows "pi\<bullet>(min x y) = min (pi\<bullet>x) (pi\<bullet>y)" by (simp add:perm_int_def) lemma plus_int_eqvt: fixes x::"int" shows "pi\<bullet>(x + y) = (pi\<bullet>x) + (pi\<bullet>y)" by (simp add:perm_int_def) lemma minus_int_eqvt: fixes x::"int" shows "pi\<bullet>(x - y) = (pi\<bullet>x) - (pi\<bullet>y)" by (simp add:perm_int_def) lemma mult_int_eqvt: fixes x::"int" shows "pi\<bullet>(x * y) = (pi\<bullet>x) * (pi\<bullet>y)" by (simp add:perm_int_def) lemma div_int_eqvt: fixes x::"int" shows "pi\<bullet>(x div y) = (pi\<bullet>x) div (pi\<bullet>y)" by (simp add:perm_int_def) (*******************************************************) (* Setup of the theorem attributes eqvt and eqvt_force *) ML_file "nominal_thmdecls.ML" setup "NominalThmDecls.setup" lemmas [eqvt] = (* connectives *) if_eqvt imp_eqvt disj_eqvt conj_eqvt neg_eqvt true_eqvt false_eqvt imp_eqvt [folded induct_implies_def] (* datatypes *) perm_unit.simps perm_list.simps append_eqvt perm_prod.simps fst_eqvt snd_eqvt perm_option.simps (* nats *) Suc_eqvt Zero_nat_eqvt One_nat_eqvt min_nat_eqvt max_nat_eqvt plus_nat_eqvt minus_nat_eqvt mult_nat_eqvt div_nat_eqvt (* ints *) Zero_int_eqvt One_int_eqvt min_int_eqvt max_int_eqvt plus_int_eqvt minus_int_eqvt mult_int_eqvt div_int_eqvt (* sets *) union_eqvt empty_eqvt insert_eqvt set_eqvt (* the lemmas numeral_nat_eqvt numeral_int_eqvt do not conform with the *) (* usual form of an eqvt-lemma, but they are needed for analysing *) (* permutations on nats and ints *) lemmas [eqvt_force] = numeral_nat_eqvt numeral_int_eqvt neg_numeral_int_eqvt (***************************************) (* setup for the individial atom-kinds *) (* and nominal datatypes *) ML_file "nominal_atoms.ML" (************************************************************) (* various tactics for analysing permutations, supports etc *) ML_file "nominal_permeq.ML" method_setup perm_simp = {* NominalPermeq.perm_simp_meth *} {* simp rules and simprocs for analysing permutations *} method_setup perm_simp_debug = {* NominalPermeq.perm_simp_meth_debug *} {* simp rules and simprocs for analysing permutations including debugging facilities *} method_setup perm_extend_simp = {* NominalPermeq.perm_extend_simp_meth *} {* tactic for deciding equalities involving permutations *} method_setup perm_extend_simp_debug = {* NominalPermeq.perm_extend_simp_meth_debug *} {* tactic for deciding equalities involving permutations including debugging facilities *} method_setup supports_simp = {* NominalPermeq.supports_meth *} {* tactic for deciding whether something supports something else *} method_setup supports_simp_debug = {* NominalPermeq.supports_meth_debug *} {* tactic for deciding whether something supports something else including debugging facilities *} method_setup finite_guess = {* NominalPermeq.finite_guess_meth *} {* tactic for deciding whether something has finite support *} method_setup finite_guess_debug = {* NominalPermeq.finite_guess_meth_debug *} {* tactic for deciding whether something has finite support including debugging facilities *} method_setup fresh_guess = {* NominalPermeq.fresh_guess_meth *} {* tactic for deciding whether an atom is fresh for something*} method_setup fresh_guess_debug = {* NominalPermeq.fresh_guess_meth_debug *} {* tactic for deciding whether an atom is fresh for something including debugging facilities *} (*****************************************************************) (* tactics for generating fresh names and simplifying fresh_funs *) ML_file "nominal_fresh_fun.ML" method_setup generate_fresh = {* Args.type_name {proper = true, strict = true} >> (fn s => fn ctxt => SIMPLE_METHOD (generate_fresh_tac ctxt s)) *} "generate a name fresh for all the variables in the goal" method_setup fresh_fun_simp = {* Scan.lift (Args.parens (Args.$$$ "no_asm") >> K true || Scan.succeed false) >> (fn b => fn ctxt => SIMPLE_METHOD' (fresh_fun_tac ctxt b)) *} "delete one inner occurrence of fresh_fun" (************************************************) (* main file for constructing nominal datatypes *) lemma allE_Nil: assumes "\<forall>x. P x" obtains "P []" using assms .. ML_file "nominal_datatype.ML" (******************************************************) (* primitive recursive functions on nominal datatypes *) ML_file "nominal_primrec.ML" (****************************************************) (* inductive definition involving nominal datatypes *) ML_file "nominal_inductive.ML" ML_file "nominal_inductive2.ML" (*****************************************) (* setup for induction principles method *) ML_file "nominal_induct.ML" method_setup nominal_induct = {* NominalInduct.nominal_induct_method *} {* nominal induction *} end
lemma homeomorphic_spheres: fixes a b ::"'a::real_normed_vector" assumes "0 < d" "0 < e" shows "(sphere a d) homeomorphic (sphere b e)"
[GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P✝ P : Cᵒᵖ ⥤ D inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ (whiskeringLeft Cᵒᵖ D E).obj (sheafify J P) ≅ (whiskeringLeft Cᵒᵖ D E).obj P ⋙ sheafification J E [PROOFSTEP] refine' J.plusFunctorWhiskerLeftIso _ ≪≫ _ ≪≫ Functor.associator _ _ _ [GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P✝ P : Cᵒᵖ ⥤ D inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ (whiskeringLeft Cᵒᵖ D E).obj (plusObj J P) ⋙ plusFunctor J E ≅ ((whiskeringLeft Cᵒᵖ D E).obj P ⋙ plusFunctor J E) ⋙ plusFunctor J E [PROOFSTEP] refine' isoWhiskerRight _ _ [GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P✝ P : Cᵒᵖ ⥤ D inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ (whiskeringLeft Cᵒᵖ D E).obj (plusObj J P) ≅ (whiskeringLeft Cᵒᵖ D E).obj P ⋙ plusFunctor J E [PROOFSTEP] refine' J.plusFunctorWhiskerLeftIso _ [GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F✝ : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F✝ inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F✝ P✝ P : Cᵒᵖ ⥤ D F : D ⥤ E inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ NatTrans.app (sheafificationWhiskerLeftIso J P).hom F = (sheafifyCompIso J F P).hom [PROOFSTEP] dsimp [sheafificationWhiskerLeftIso, sheafifyCompIso] [GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F✝ : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F✝ inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F✝ P✝ P : Cᵒᵖ ⥤ D F : D ⥤ E inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom ≫ 𝟙 (plusObj J (plusObj J (P ⋙ F))) = (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom [PROOFSTEP] rw [Category.comp_id] [GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F✝ : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F✝ inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F✝ P✝ P : Cᵒᵖ ⥤ D F : D ⥤ E inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ NatTrans.app (sheafificationWhiskerLeftIso J P).inv F = (sheafifyCompIso J F P).inv [PROOFSTEP] dsimp [sheafificationWhiskerLeftIso, sheafifyCompIso] [GOAL] C : Type u inst✝¹⁰ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁹ : Category.{max v u, w₁} D E : Type w₂ inst✝⁸ : Category.{max v u, w₂} E F✝ : D ⥤ E inst✝⁷ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁶ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁵ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁴ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝³ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F✝ inst✝² : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F✝ P✝ P : Cᵒᵖ ⥤ D F : D ⥤ E inst✝¹ : (F : D ⥤ E) → (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (F : D ⥤ E) → (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F ⊢ (𝟙 (plusObj J (plusObj J (P ⋙ F))) ≫ plusMap J (plusCompIso J F P).inv) ≫ (plusCompIso J F (plusObj J P)).inv = plusMap J (plusCompIso J F P).inv ≫ (plusCompIso J F (plusObj J P)).inv [PROOFSTEP] erw [Category.id_comp] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ sheafification J D ⋙ (whiskeringRight Cᵒᵖ D E).obj F ≅ (whiskeringRight Cᵒᵖ D E).obj F ⋙ sheafification J E [PROOFSTEP] refine' Functor.associator _ _ _ ≪≫ _ [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ plusFunctor J D ⋙ plusFunctor J D ⋙ (whiskeringRight Cᵒᵖ D E).obj F ≅ (whiskeringRight Cᵒᵖ D E).obj F ⋙ sheafification J E [PROOFSTEP] refine' isoWhiskerLeft (J.plusFunctor D) (J.plusFunctorWhiskerRightIso _) ≪≫ _ [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ plusFunctor J D ⋙ (whiskeringRight Cᵒᵖ D E).obj F ⋙ plusFunctor J E ≅ (whiskeringRight Cᵒᵖ D E).obj F ⋙ sheafification J E [PROOFSTEP] refine' _ ≪≫ Functor.associator _ _ _ [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ plusFunctor J D ⋙ (whiskeringRight Cᵒᵖ D E).obj F ⋙ plusFunctor J E ≅ ((whiskeringRight Cᵒᵖ D E).obj F ⋙ plusFunctor J E) ⋙ plusFunctor J E [PROOFSTEP] refine' (Functor.associator _ _ _).symm ≪≫ _ [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ (plusFunctor J D ⋙ (whiskeringRight Cᵒᵖ D E).obj F) ⋙ plusFunctor J E ≅ ((whiskeringRight Cᵒᵖ D E).obj F ⋙ plusFunctor J E) ⋙ plusFunctor J E [PROOFSTEP] exact isoWhiskerRight (J.plusFunctorWhiskerRightIso _) (J.plusFunctor E) [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ NatTrans.app (sheafificationWhiskerRightIso J F).hom P = (sheafifyCompIso J F P).hom [PROOFSTEP] dsimp [sheafificationWhiskerRightIso, sheafifyCompIso] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ 𝟙 (plusObj J (plusObj J P) ⋙ F) ≫ (plusCompIso J F (plusObj J P)).hom ≫ (𝟙 (plusObj J (plusObj J P ⋙ F)) ≫ plusMap J (plusCompIso J F P).hom) ≫ 𝟙 (plusObj J (plusObj J (P ⋙ F))) = (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom [PROOFSTEP] simp only [Category.id_comp, Category.comp_id] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ 𝟙 (plusObj J (plusObj J P) ⋙ F) ≫ (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom = (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom [PROOFSTEP] erw [Category.id_comp] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ NatTrans.app (sheafificationWhiskerRightIso J F).inv P = (sheafifyCompIso J F P).inv [PROOFSTEP] dsimp [sheafificationWhiskerRightIso, sheafifyCompIso] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ ((𝟙 (plusObj J (plusObj J (P ⋙ F))) ≫ plusMap J (plusCompIso J F P).inv ≫ 𝟙 (plusObj J (plusObj J P ⋙ F))) ≫ (plusCompIso J F (plusObj J P)).inv) ≫ 𝟙 (plusObj J (plusObj J P) ⋙ F) = plusMap J (plusCompIso J F P).inv ≫ (plusCompIso J F (plusObj J P)).inv [PROOFSTEP] simp only [Category.id_comp, Category.comp_id] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ (𝟙 (plusObj J (plusObj J (P ⋙ F))) ≫ plusMap J (plusCompIso J F P).inv) ≫ (plusCompIso J F (plusObj J P)).inv = plusMap J (plusCompIso J F P).inv ≫ (plusCompIso J F (plusObj J P)).inv [PROOFSTEP] erw [Category.id_comp] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ whiskerRight (toSheafify J P) F ≫ (sheafifyCompIso J F P).hom = toSheafify J (P ⋙ F) [PROOFSTEP] dsimp [sheafifyCompIso] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ whiskerRight (toSheafify J P) F ≫ (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom = toSheafify J (P ⋙ F) [PROOFSTEP] erw [whiskerRight_comp, Category.assoc] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ whiskerRight (toPlus J P) F ≫ whiskerRight (plusMap J (toPlus J P)) F ≫ (plusCompIso J F (plusObj J P)).hom ≫ plusMap J (plusCompIso J F P).hom = toSheafify J (P ⋙ F) [PROOFSTEP] slice_lhs 2 3 => rw [plusCompIso_whiskerRight] [GOAL] case a.a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | whiskerRight (plusMap J (toPlus J P)) F ≫ (plusCompIso J F (plusObj J P)).hom case a.a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | plusMap J (plusCompIso J F P).hom case a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | whiskerRight (toPlus J P) F [PROOFSTEP] rw [plusCompIso_whiskerRight] [GOAL] case a.a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | whiskerRight (plusMap J (toPlus J P)) F ≫ (plusCompIso J F (plusObj J P)).hom case a.a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | plusMap J (plusCompIso J F P).hom case a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | whiskerRight (toPlus J P) F [PROOFSTEP] rw [plusCompIso_whiskerRight] [GOAL] case a.a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | whiskerRight (plusMap J (toPlus J P)) F ≫ (plusCompIso J F (plusObj J P)).hom case a.a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | plusMap J (plusCompIso J F P).hom case a C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D | whiskerRight (toPlus J P) F [PROOFSTEP] rw [plusCompIso_whiskerRight] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ whiskerRight (toPlus J P) F ≫ ((plusCompIso J F P).hom ≫ plusMap J (whiskerRight (toPlus J P) F)) ≫ plusMap J (plusCompIso J F P).hom = toSheafify J (P ⋙ F) [PROOFSTEP] rw [Category.assoc, ← J.plusMap_comp, whiskerRight_toPlus_comp_plusCompIso_hom, ← Category.assoc, whiskerRight_toPlus_comp_plusCompIso_hom] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ toPlus J (P ⋙ F) ≫ plusMap J (toPlus J (P ⋙ F)) = toSheafify J (P ⋙ F) [PROOFSTEP] rfl [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ toSheafify J (P ⋙ F) ≫ (sheafifyCompIso J F P).inv = whiskerRight (toSheafify J P) F [PROOFSTEP] rw [Iso.comp_inv_eq] [GOAL] C : Type u inst✝⁸ : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝⁷ : Category.{max v u, w₁} D E : Type w₂ inst✝⁶ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁵ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁴ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝³ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝² : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D ⊢ toSheafify J (P ⋙ F) = whiskerRight (toSheafify J P) F ≫ (sheafifyCompIso J F P).hom [PROOFSTEP] simp [GOAL] C : Type u inst✝¹² : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝¹¹ : Category.{max v u, w₁} D E : Type w₂ inst✝¹⁰ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁹ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁸ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁷ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁶ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝⁵ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝⁴ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D inst✝³ : ConcreteCategory D inst✝² : PreservesLimits (forget D) inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ (forget D) inst✝ : ReflectsIsomorphisms (forget D) ⊢ (sheafifyCompIso J F P).inv = sheafifyLift J (whiskerRight (toSheafify J P) F) (_ : Presheaf.IsSheaf J (sheafify J P ⋙ F)) [PROOFSTEP] apply J.sheafifyLift_unique [GOAL] case a C : Type u inst✝¹² : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝¹¹ : Category.{max v u, w₁} D E : Type w₂ inst✝¹⁰ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁹ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁸ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁷ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁶ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝⁵ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝⁴ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D inst✝³ : ConcreteCategory D inst✝² : PreservesLimits (forget D) inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ (forget D) inst✝ : ReflectsIsomorphisms (forget D) ⊢ toSheafify J (P ⋙ F) ≫ (sheafifyCompIso J F P).inv = whiskerRight (toSheafify J P) F [PROOFSTEP] rw [Iso.comp_inv_eq] [GOAL] case a C : Type u inst✝¹² : Category.{v, u} C J : GrothendieckTopology C D : Type w₁ inst✝¹¹ : Category.{max v u, w₁} D E : Type w₂ inst✝¹⁰ : Category.{max v u, w₂} E F : D ⥤ E inst✝⁹ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) D inst✝⁸ : ∀ (α β : Type (max v u)) (fst snd : β → α), HasLimitsOfShape (WalkingMulticospan fst snd) E inst✝⁷ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ D inst✝⁶ : ∀ (X : C), HasColimitsOfShape (Cover J X)ᵒᵖ E inst✝⁵ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ F inst✝⁴ : (X : C) → (W : Cover J X) → (P : Cᵒᵖ ⥤ D) → PreservesLimit (MulticospanIndex.multicospan (Cover.index W P)) F P : Cᵒᵖ ⥤ D inst✝³ : ConcreteCategory D inst✝² : PreservesLimits (forget D) inst✝¹ : (X : C) → PreservesColimitsOfShape (Cover J X)ᵒᵖ (forget D) inst✝ : ReflectsIsomorphisms (forget D) ⊢ toSheafify J (P ⋙ F) = whiskerRight (toSheafify J P) F ≫ (sheafifyCompIso J F P).hom [PROOFSTEP] simp
proposition injective_imp_isometric: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" assumes s: "closed s" "subspace s" and f: "bounded_linear f" "\<forall>x\<in>s. f x = 0 \<longrightarrow> x = 0" shows "\<exists>e>0. \<forall>x\<in>s. norm (f x) \<ge> e * norm x"
Formal statement is: lemma compact_imp_complete: fixes s :: "'a::metric_space set" assumes "compact s" shows "complete s" Informal statement is: Any compact metric space is complete.
/- Copyright (c) 2022 Yakov Pechersky. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yakov Pechersky -/ import data.polynomial.taylor import field_theory.ratfunc import ring_theory.laurent_series /-! # Laurent expansions of rational functions ## Main declarations * `ratfunc.laurent`: the Laurent expansion of the rational function `f` at `r`, as an `alg_hom`. * `ratfunc.laurent_injective`: the Laurent expansion at `r` is unique ## Implementation details Implemented as the quotient of two Taylor expansions, over domains. An auxiliary definition is provided first to make the construction of the `alg_hom` easier, which works on `comm_ring` which are not necessarily domains. -/ universe u namespace ratfunc noncomputable theory open polynomial open_locale classical non_zero_divisors polynomial variables {R : Type u} [comm_ring R] [hdomain : is_domain R] (r s : R) (p q : R[X]) (f : ratfunc R) lemma taylor_mem_non_zero_divisors (hp : p ∈ R[X]⁰) : taylor r p ∈ R[X]⁰ := begin rw mem_non_zero_divisors_iff, intros x hx, have : x = taylor (r - r) x, { simp }, rwa [this, sub_eq_add_neg, ←taylor_taylor, ←taylor_mul, linear_map.map_eq_zero_iff _ (taylor_injective _), mul_right_mem_non_zero_divisors_eq_zero_iff hp, linear_map.map_eq_zero_iff _ (taylor_injective _)] at hx, end /-- The Laurent expansion of rational functions about a value. Auxiliary definition, usage when over integral domains should prefer `ratfunc.laurent`. -/ def laurent_aux : ratfunc R →+* ratfunc R := ratfunc.map_ring_hom (ring_hom.mk (taylor r) (taylor_one _) (taylor_mul _) (linear_map.map_zero _) (linear_map.map_add _)) (taylor_mem_non_zero_divisors _) lemma laurent_aux_of_fraction_ring_mk (q : R[X]⁰) : laurent_aux r (of_fraction_ring (localization.mk p q)) = of_fraction_ring (localization.mk (taylor r p) ⟨taylor r q, taylor_mem_non_zero_divisors r q q.prop⟩) := map_apply_of_fraction_ring_mk _ _ _ _ include hdomain lemma laurent_aux_div : laurent_aux r (algebra_map _ _ p / (algebra_map _ _ q)) = algebra_map _ _ (taylor r p) / (algebra_map _ _ (taylor r q)) := map_apply_div _ _ _ _ @[simp] lemma laurent_aux_algebra_map : laurent_aux r (algebra_map _ _ p) = algebra_map _ _ (taylor r p) := by rw [←mk_one, ←mk_one, mk_eq_div, laurent_aux_div, mk_eq_div, taylor_one, _root_.map_one] /-- The Laurent expansion of rational functions about a value. -/ def laurent : ratfunc R →ₐ[R] ratfunc R := ratfunc.map_alg_hom (alg_hom.mk (taylor r) (taylor_one _) (taylor_mul _) (linear_map.map_zero _) (linear_map.map_add _) (by simp [polynomial.algebra_map_apply])) (taylor_mem_non_zero_divisors _) lemma laurent_div : laurent r (algebra_map _ _ p / (algebra_map _ _ q)) = algebra_map _ _ (taylor r p) / (algebra_map _ _ (taylor r q)) := laurent_aux_div r p q @[simp] lemma laurent_algebra_map : laurent r (algebra_map _ _ p) = algebra_map _ _ (taylor r p) := laurent_aux_algebra_map _ _ @[simp] lemma laurent_X : laurent r X = X + C r := by rw [←algebra_map_X, laurent_algebra_map, taylor_X, _root_.map_add, algebra_map_C] @[simp] lemma laurent_C (x : R) : laurent r (C x) = C x := by rw [←algebra_map_C, laurent_algebra_map, taylor_C] @[simp] lemma laurent_at_zero : laurent 0 f = f := by { induction f using ratfunc.induction_on, simp } lemma laurent_laurent : laurent r (laurent s f) = laurent (r + s) f := begin induction f using ratfunc.induction_on, simp_rw [laurent_div, taylor_taylor] end lemma laurent_injective : function.injective (laurent r) := λ _ _ h, by simpa [laurent_laurent] using congr_arg (laurent (-r)) h end ratfunc
#' store #' @param x object to store #' @export store <- function(x = NULL){ path = paste0(system.file(package = "stop"), "/store.rds") if(is.null(x)) return(readRDS(path)) saveRDS(x,path) }
[STATEMENT] lemma tag_machin: "a * arctan b = MACHIN_TAG a (arctan b)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. a * arctan b = MACHIN_TAG a (arctan b) [PROOF STEP] by (simp add: MACHIN_TAG_def)
[STATEMENT] lemma tag_machin: "a * arctan b = MACHIN_TAG a (arctan b)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. a * arctan b = MACHIN_TAG a (arctan b) [PROOF STEP] by (simp add: MACHIN_TAG_def)
lemma disjoint_ballI: "dist x y \<ge> r+s \<Longrightarrow> ball x r \<inter> ball y s = {}"
(* Property from Productive Use of Failure in Inductive Proof, Andrew Ireland and Alan Bundy, JAR 1996. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_prop_44 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun y :: "Nat => Nat => bool" where "y (Z) (Z) = True" | "y (Z) (S z2) = False" | "y (S x2) (Z) = False" | "y (S x2) (S y22) = y x2 y22" fun x :: "bool => bool => bool" where "x True y2 = True" | "x False y2 = y2" fun elem :: "Nat => Nat list => bool" where "elem z (nil2) = False" | "elem z (cons2 z2 xs) = x (y z z2) (elem z xs)" fun intersect :: "Nat list => Nat list => Nat list" where "intersect (nil2) y2 = nil2" | "intersect (cons2 z2 xs) y2 = (if elem z2 y2 then cons2 z2 (intersect xs y2) else intersect xs y2)" theorem property0 : "((elem z y2) ==> ((elem z z2) ==> (elem z (intersect y2 z2))))" oops end
theory T101 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) " nitpick[card nat=4,timeout=86400] oops end
module AuxDefs where data ℕ : Set where zero : ℕ suc : ℕ → ℕ _+_ : ℕ → ℕ → ℕ zero + n = n (suc m) + n = suc (m + n) data Bool : Set where true : Bool false : Bool data Comparison : Set where less : Comparison equal : Comparison greater : Comparison _<_ : ℕ → ℕ → Bool zero < y = true suc x < zero = false suc x < suc y = x < y
Formal statement is: lemma valid_path_polynomial_function: fixes p :: "real \<Rightarrow> 'a::euclidean_space" shows "polynomial_function p \<Longrightarrow> valid_path p" Informal statement is: If $p$ is a polynomial function, then $p$ is a valid path.
The goal of the induction phase is to reach a complete remission . Complete remission does not mean the disease has been cured ; rather , it signifies no disease can be detected with available diagnostic methods . Complete remission is obtained in about 50 % – 75 % of newly diagnosed adults , although this may vary based on the prognostic factors described above . The length of remission depends on the prognostic features of the original leukemia . In general , all remissions will fail without additional consolidation therapy .
Require Import HoareDef OpenDef Open STB. Require Import Add0 Repeat0 Add1 Repeat1 Add01proof Repeat01proof. Require Import Coqlib. Require Import ImpPrelude. Require Import Skeleton. Require Import PCM. Require Import ModSem Behavior. Require Import Relation_Definitions. (*** TODO: export these in Coqlib or Universe ***) Require Import Relation_Operators. Require Import RelationPairs. From ITree Require Import Events.MapDefault. From ExtLib Require Import Core.RelDec Structures.Maps Data.Map.FMapAList. Require Import ProofMode Invariant. Require Import Imp. Require Import ImpNotations. Require Import ImpProofs. Set Implicit Arguments. Local Open Scope nat_scope. Section PROOF. Let Σ: GRA.t := GRA.of_list []. Local Existing Instance Σ. Let FunStb: Sk.t -> gname -> option fspec := fun sk => to_stb [("succ", succ_spec)]. Let GlobalStb: Sk.t -> gname -> option fspec := fun sk => to_closed_stb (KMod.get_stb [Add1.KAdd; Repeat1.KRepeat FunStb] sk). Let FunStb_incl: forall sk, stb_incl (FunStb sk) (GlobalStb sk). Proof. i. etrans; [|eapply to_closed_stb_weaker]. stb_incl_tac. Qed. Let GlobalStb_repeat: forall sk, fn_has_spec (GlobalStb sk) "repeat" (Repeat1.repeat_spec FunStb sk). Proof. ii. econs; ss. refl. Qed. Let FunStb_succ: forall sk, fn_has_spec (FunStb sk) "succ" (Add1.succ_spec). Proof. ii. econs; ss. refl. Qed. Let prog_tgt := [Add0.Add; Repeat0.Repeat]. Let prog_src := KMod.transl_src_list [Add1.KAdd; Repeat1.KRepeat FunStb]. Theorem correct: refines2 prog_tgt prog_src. Proof. etrans; cycle 1. { eapply adequacy_open. i. exists ε. splits; ss. g_wf_tac. } eapply refines2_cons. { eapply Add01proof.correct; et. } { eapply Repeat01proof.correct; et. unfold to_closed_stb. ii. des_ifs. } Qed. End PROOF.
(* Adapted from Tobias Nipkow in order to accommodate bounded regular expressions *) section "Extended Regular Expressions 3" theory Regular_Exps3 imports "Regular-Sets.Regular_Set" begin datatype (atoms: 'a) rexp = is_Zero: Zero | is_One: One | Atom 'a | Plus "('a rexp)" "('a rexp)" | Times "('a rexp)" "('a rexp)" | Star "('a rexp)" | NTimes "('a rexp)" "nat" | (* r{n} - exactly n-times *) Upto "('a rexp)" "nat" | (* r{..n} - up to n-times *) From "('a rexp)" "nat" | (* r{n..} - from n-times *) Rec string "('a rexp)" | (* record regular expression *) Charset "('a set)" fun lang :: "'a rexp => 'a lang" where "lang Zero = {}" | "lang One = {[]}" | "lang (Atom a) = {[a]}" | "lang (Plus r s) = (lang r) Un (lang s)" | "lang (Times r s) = conc (lang r) (lang s)" | "lang (Star r) = star(lang r)" | "lang (NTimes r n) = ((lang r) ^^ n)" | "lang (Upto r n) = (\<Union>i \<in> {..n}. (lang r) ^^ i)" | "lang (From r n) = (\<Union>i \<in> {n..}. (lang r) ^^ i)" | "lang (Rec l r) = lang r" | "lang (Charset cs) = {[c] | c . c \<in> cs}" primrec nullable :: "'a rexp \<Rightarrow> bool" where "nullable Zero = False" | "nullable One = True" | "nullable (Atom c) = False" | "nullable (Plus r1 r2) = (nullable r1 \<or> nullable r2)" | "nullable (Times r1 r2) = (nullable r1 \<and> nullable r2)" | "nullable (Star r) = True" | "nullable (NTimes r n) = (if n = 0 then True else nullable r)" | "nullable (Upto r n) = True" | "nullable (From r n) = (if n = 0 then True else nullable r)" | "nullable (Rec l r) = nullable r" | "nullable (Charset cs) = False" lemma pow_empty_iff: shows "[] \<in> (lang r) ^^ n \<longleftrightarrow> (if n = 0 then True else [] \<in> (lang r))" by (induct n)(auto) lemma nullable_iff: shows "nullable r \<longleftrightarrow> [] \<in> lang r" by (induct r) (auto simp add: conc_def pow_empty_iff split: if_splits) end
%!TEX root = ../main.tex \section{LOREM IPSUM} \lipsum[1-8]
theory Asm imports Main Exp begin section \<open>Stack Machine and Compilation\<close> datatype instr = LOADI val | LOAD vname | ADD type_synonym stack = "val list" fun exec1 :: "instr \<Rightarrow> state \<Rightarrow> stack \<Rightarrow> stack" where "exec1 (LOADI n) _ stk = n # stk" | "exec1 (LOAD x) s stk = s(x) # stk" | "exec1 ADD _ (j # i # stk) = (i + j) # stk" fun exec :: "instr list \<Rightarrow> state \<Rightarrow> stack \<Rightarrow> stack" where "exec [] _ stk = stk" | "exec (i # is) s stk = exec is s (exec1 i s stk)" fun comp :: "aexp \<Rightarrow> instr list" where "comp (N n) = [LOADI n]" | "comp (V x) = [LOAD x]" | "comp (Plus e1 e2) = comp e1 @ comp e2 @ [ADD]" lemma exec_append[simp]: "exec (is1 @ is2) s stk = exec is2 s (exec is1 s stk)" apply(induction is1 arbitrary: stk) by auto lemma correctness_asm[simp]: "exec (comp a) s stk = (aval a s) # stk" apply(induction a arbitrary: stk) by simp_all end
using GeometricIntegrators.Equations using GeometricIntegrators.Equations: function_v_dummy, get_λ₀ using Test t₀ = 0. q₀ = [1.] p₀ = [1.] x₀ = [1., 1.] λ₀ = [0.] @testset "$(rpad("General equation functionality",80))" begin @test function_v_dummy(t₀, q₀, p₀, λ₀) == nothing @test get_λ₀(zeros(3), zeros(3)) == zeros(3) @test get_λ₀(zeros(3), zeros(3,3)) == zeros(3) @test get_λ₀(zeros(3,3), zeros(3)) == zeros(3,3) struct TestEquation{DT,TT} <: Equation{DT,TT} end @test_throws ErrorException ndims(TestEquation{Float64,Float64}()) end include("deterministic_equations_tests.jl") include("stochastic_equations_tests.jl")
(* Title: HOL/Auth/n_german_lemma_inv__48_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_german Protocol Case Study*} theory n_german_lemma_inv__48_on_rules imports n_german_lemma_on_inv__48 begin section{*All lemmas on causal relation between inv__48*} lemma lemma_inv__48_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__48 p__Inv3 p__Inv4)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqEI i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqES i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReq N i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvE i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntE i)\<or> (\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqSVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqEI i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqEIVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqES i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqESVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvEVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvSVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvAckVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntSVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntEVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntSVsinv__48) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntEVsinv__48) done } moreover { assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_StoreVsinv__48) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
\chapter{Prototyping the Semantics of a Domain-Specific Modeling Language} \label{chap:prototype-semantics} \input{prototype-semantics/abstract} \input{prototype-semantics/introduction} %\input{prototype-semantics/dsl} \input{prototype-semantics/prototyping-semantics} \input{prototype-semantics/visualization} \input{prototype-semantics/verification} \input{prototype-semantics/related-work} \input{prototype-semantics/conclusions-and-future-work}
{- null is Nothing is not Just not null is not Nothing is Just == /= == Just 42 Nothing -} import CIL.FFI import CIL.System.Reflection putMaybeString : Maybe String -> CIL_IO () putMaybeString s = putStrLn (maybe "null" id s) putIsNothing : Maybe String -> CIL_IO () putIsNothing Nothing = putStrLn "is Nothing" putIsNothing _ = putStrLn "is not Nothing" putIsJust : Maybe String -> CIL_IO () putIsJust (Just _) = putStrLn "is Just" putIsJust _ = putStrLn "is not Just" TheExportsTy : CILTy TheExportsTy = CILTyRef "" "TheExports" %inline invokeMaybeString : String -> Maybe String -> CIL_IO () invokeMaybeString fn s = invokeStatic TheExportsTy fn (Maybe String -> CIL_IO ()) s testFFI : Maybe String -> CIL_IO () testFFI s = do invokeMaybeString "putMaybeString" s invokeMaybeString "putIsNothing" s invokeMaybeString "putIsJust" s valueOf : Bool -> Maybe String valueOf False = Nothing valueOf True = Just "True" testMaybeEq : Bool -> Maybe String -> CIL_IO () testMaybeEq b m = putStrLn (if valueOf b == m then "==" else "/=") maybeInt : Bool -> Maybe Int maybeInt True = Just 42 maybeInt False = Nothing testMaybeInt : Bool -> CIL_IO () testMaybeInt b = printLn (maybeInt b) %inline invokeTestMaybeInt : Bool -> CIL_IO () invokeTestMaybeInt = invokeStatic TheExportsTy "testMaybeInt" (Bool -> CIL_IO ()) main : CIL_IO () main = do testFFI Nothing testFFI (Just "not null") testMaybeEq False Nothing testMaybeEq False (Just "True") testMaybeEq True (Just "True") invokeTestMaybeInt True invokeTestMaybeInt False exports : FFI_Export FFI_CIL "TheExports" [] exports = Fun putMaybeString CILDefault $ Fun putIsNothing CILDefault $ Fun putIsJust CILDefault $ Fun testMaybeInt CILDefault $ End -- Local Variables: -- idris-load-packages: ("cil") -- End:
(* Title: HOL/Auth/n_german_lemma_inv__44_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_german Protocol Case Study*} theory n_german_lemma_inv__44_on_rules imports n_german_lemma_on_inv__44 begin section{*All lemmas on causal relation between inv__44*} lemma lemma_inv__44_on_rules: assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__44 p__Inv3 p__Inv4)" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqEI i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendReqES i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvReq N i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvE i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or> (\<exists> i. i\<le>N\<and>r=n_RecvGntE i)\<or> (\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqSVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqEI i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqEIVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqES i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendReqESVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvReqVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvEVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvSVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendInvAckVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntSVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_SendGntEVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntSVsinv__44) done } moreover { assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_RecvGntEVsinv__44) done } moreover { assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_StoreVsinv__44) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
6 ) is formed when antimony is burnt in air . In the gas phase , this compound exists as Sb
lemma primitive_part_prim: "content p = 1 \<Longrightarrow> primitive_part p = p"
`is_element/shuffles` := (n::nonnegint,m::nonnegint) -> proc(s) local i; if not(`is_element/permutations`(n+m)(s)) then return false; fi; for i from 1 to n+m-1 do if i <> n and s[i+1] < s[i] then return false; fi; od: return true; end: `is_equal/shuffles` := (n,m) -> (s,t) -> evalb(s = t); `is_leq/shuffles` := NULL; `from_subset/shuffles` := (n::nonnegint,m::nonnegint) -> proc(A) option remember; local X,B,i; X := {seq(i,i=1..n+m)}; if A minus X <> {} then return FAIL; fi; if nops(A) <> n then return FAIL; fi; B := X minus A; return [op(sort(A)),op(sort(B))]; end: `to_subset/shuffles` := (n::nonnegint,m::nonnegint) -> proc(s) local i; {seq(s[i],i=1..n)}; end: `to_grid_path/shuffles` := (n::nonnegint,m::nonnegint) -> proc(s) local t,si,i; t := table(): t[0] := [0,0]; si := `inv/permutations`(n+m)(s); for i from 1 to n+m do t[i] := `if`(si[i] <= n,[1,0],[0,1]) +~ t[i-1]; od: return eval(t); end: `from_grid_path/shuffles` := (n::nonnegint,m::nonnegint) -> proc(t) local A,i; A := select(i -> t[i][1] > t[i-1][1],{seq(i,i=1..n+m)}); return `from_subset/shuffles`(n,m)(A); end: `random_element/shuffles` := (n::nonnegint,m::nonnegint) -> proc() local u,A,i; u := combinat[randperm](n+m); A := {seq(u[i],i=1..n)}; return `from_subset/shuffles`(n,m)(A); end: `list_elements/shuffles` := proc(n::nonnegint,m::nonnegint) local AA; AA := map(A -> {op(A)},combinat[choose](n+m,n)); return map(`from_subset/shuffles`(n,m),AA); end: `list_elements/inverse_shuffles` := proc(n::nonnegint,m::nonnegint) option remember; map(`inv/permutations`(n+m),`list_elements/shuffles`(n,m)); end: `count_elements/shuffles` := (n::nonnegint,m::nonnegint) -> binomial(n+m,n); `sgn/shuffles` := (n::nonnegint,m::nonnegint) -> proc(s) local i,j; return signum(mul(mul(s[j]-s[i],j=i+1..n+m),i=1..n+m-1)); end:
{-# OPTIONS --cubical --safe #-} module Data.Integer where open import Level open import Data.Nat using (ℕ; suc; zero) import Data.Nat as ℕ import Data.Nat.Properties as ℕ open import Data.Bool data ℤ : Type where ⁺ : ℕ → ℤ ⁻suc : ℕ → ℤ ⁻ : ℕ → ℤ ⁻ zero = ⁺ zero ⁻ (suc n) = ⁻suc n {-# DISPLAY ⁻suc n = ⁻ suc n #-} negate : ℤ → ℤ negate (⁺ x) = ⁻ x negate (⁻suc x) = ⁺ (suc x) {-# DISPLAY negate x = ⁻ x #-} infixl 6 _+_ _-suc_ : ℕ → ℕ → ℤ x -suc y = if y ℕ.<ᴮ x then ⁺ (x ℕ.∸ (suc y)) else ⁻suc (y ℕ.∸ x) _+_ : ℤ → ℤ → ℤ ⁺ x + ⁺ y = ⁺ (x ℕ.+ y) ⁺ x + ⁻suc y = x -suc y ⁻suc x + ⁺ y = y -suc x ⁻suc x + ⁻suc y = ⁻suc (suc x ℕ.+ y) _*-suc_ : ℕ → ℕ → ℤ zero *-suc m = ⁺ zero suc n *-suc m = ⁻suc (n ℕ.+ m ℕ.+ n ℕ.* m) infixl 7 _*_ _*_ : ℤ → ℤ → ℤ ⁺ x * ⁺ y = ⁺ (x ℕ.* y) ⁺ x * ⁻suc y = x *-suc y ⁻suc x * ⁺ y = y *-suc x ⁻suc x * ⁻suc y = ⁺ (suc x ℕ.* suc y)
{-# OPTIONS --without-K --safe #-} module Cham.Inference where
Formal statement is: lemma uniformly_continuous_on_extension_on_closure: fixes f::"'a::metric_space \<Rightarrow> 'b::complete_space" assumes uc: "uniformly_continuous_on X f" obtains g where "uniformly_continuous_on (closure X) g" "\<And>x. x \<in> X \<Longrightarrow> f x = g x" "\<And>Y h x. X \<subseteq> Y \<Longrightarrow> Y \<subseteq> closure X \<Longrightarrow> continuous_on Y h \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> f x = h x) \<Longrightarrow> x \<in> Y \<Longrightarrow> h x = g x" Informal statement is: If $f$ is uniformly continuous on a set $X$, then there exists a uniformly continuous extension of $f$ to the closure of $X$.
[STATEMENT] lemma concat_in_star: "set ws \<subseteq> A \<Longrightarrow> concat ws : star A" [PROOF STATE] proof (prove) goal (1 subgoal): 1. set ws \<subseteq> A \<Longrightarrow> concat ws \<in> star A [PROOF STEP] by (induct ws) simp_all
#ifndef __DETGRAPHUTILS_HPP__ #define __DETGRAPHUTILS_HPP__ #include <string> #include <vector> #include <boost/graph/graphviz.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/range/iterator_range.hpp> #include "HelperMaps.hpp" #include "GraphUtils.hpp" struct dgVerticeProps { std::string name; std::string role; std::vector<Graph::vDesc> vs; }; struct dgEdgeProps { std::string label; std::string lowlink; }; struct dgGraphProps { std::string name; }; using DG_t = boost::adjacency_list<boost::vecS, boost::vecS, boost::directedS, dgVerticeProps, dgEdgeProps, dgGraphProps>; namespace DG { struct Vertex { std::string name; std::string role; std::vector<Graph::vDesc> vs; }; using vDesc = DG_t::vertex_descriptor; using eDesc = DG_t::edge_descriptor; using vIter = DG_t::vertex_iterator; using vIterPair = std::pair<vIter, vIter>; using eIter = DG_t::edge_iterator; using eIterPair = std::pair<eIter, eIter>; using oeIter = DG_t::out_edge_iterator; using oeIterPair = std::pair<oeIter, oeIter>; DG_t determinize(const Graph_t &g, const edgeLabelSet &els); DG::Vertex createVertex(); DG::Vertex createVertex(const std::vector<Graph::vDesc> &vertices, const Graph_t &g); DG::Vertex createVertex(const std::set<Graph::vDesc> &vertices, const Graph_t &g); DG::Vertex createStart(); DG::vDesc getStart(const DG_t &dg); bool hasVertex(const DG_t &dg, const DG::Vertex &v); DG::vDesc getVertexByName(const DG_t &dg, const std::string &name); void addVertexToSet(const Graph::vDesc &gv, DG::Vertex &dg); void updateVertexName(DG::Vertex &dgv, const Graph_t &g); void updateVertexName(DG_t &dg, const DG::vDesc &v, const Graph_t &g); DG::vDesc addEmptyVertex(DG_t &dg, const edgeLabelSet &els); void addSelfEdges(DG_t &dg, const DG::vDesc &v, const edgeLabelSet &els); DG::eDesc addEdge(DG_t &dg, const DG::vDesc &src, const label &l, const DG::vDesc &dst); DG::vDesc addVertex(DG_t &dg, const DG::Vertex &v); std::vector<DG::eDesc> getOutEdges(const DG_t &dg, const DG::vDesc &v, const Graph_t &g); Range<DG::oeIter> getOutEdges(const DG_t &g, const DG::vDesc &v); bool hasEdgeForGrouping(const Graph_t &g, const std::vector<Graph::eDesc> &edges, const alignmentGrouping &gp, label& l); std::vector<Graph::eDesc> getEdgesForGrouping(const Graph_t &g, const std::vector<Graph::eDesc> &edges, const alignmentGrouping &gp); DG::vDesc getDst(const DG::vDesc &v, const std::string &l, const DG_t &g); void print(const DG_t &g); void print(const DG_t &g, std::ostream& target); void printVertices(const DG_t &g); void printEdges(const DG_t &g); void printOutEdge(const DG_t &g, const Graph::eDesc &e); void printOutEdge(const DG_t &g, const Graph::eDesc &e, std::ostream& target); void printOutEdges(const DG_t &g, const Graph::vDesc &vd); void printOutEdges(const DG_t &g, const Graph::vDesc &vd, std::ostream& target); } #endif // __DETGRAPHUTILS_HPP__
The CPS men served without wages and minimal support from the federal government . The cost of maintaining the CPS camps and providing for the needs of the men was the responsibility of their congregations and families . CPS men served longer than regular draftees and were not released until well after the end of the war . Initially skeptical of the program , government agencies learned to appreciate the men 's service and requested more workers from the program . CPS made significant contributions to forest fire prevention , erosion and flood control , medical science and reform of the mental health system .
State Before: α : Type u_1 inst✝ : CircularOrder α a b : α ⊢ cIoo a bᶜ = cIcc b a State After: case h α : Type u_1 inst✝ : CircularOrder α a b x✝ : α ⊢ x✝ ∈ cIoo a bᶜ ↔ x✝ ∈ cIcc b a Tactic: ext State Before: case h α : Type u_1 inst✝ : CircularOrder α a b x✝ : α ⊢ x✝ ∈ cIoo a bᶜ ↔ x✝ ∈ cIcc b a State After: case h α : Type u_1 inst✝ : CircularOrder α a b x✝ : α ⊢ x✝ ∈ cIoo a bᶜ ↔ ¬sbtw a x✝ b Tactic: rw [Set.mem_cIcc, btw_iff_not_sbtw] State Before: case h α : Type u_1 inst✝ : CircularOrder α a b x✝ : α ⊢ x✝ ∈ cIoo a bᶜ ↔ ¬sbtw a x✝ b State After: no goals Tactic: rfl
Formal statement is: lemma closed_approachable: fixes S :: "'a::metric_space set" shows "closed S \<Longrightarrow> (\<forall>e>0. \<exists>y\<in>S. dist y x < e) \<longleftrightarrow> x \<in> S" Informal statement is: If $S$ is a closed set, then $x \in S$ if and only if for every $\epsilon > 0$, there exists $y \in S$ such that $|x - y| < \epsilon$.
library(tau) library(ngram) x <- ngram::rcorpus(50000) tautime <- system.time(pt1 <- textcnt(x, n=3, split=" ", method="string"))[3] ngtime <- system.time(pt2 <- get.phrasetable(ngram(x, n=3)))[3] cat("tau: ", tautime, "\n") cat("ngram: ", ngtime, "\n") cat("tau/ngram: ", tautime/ngtime, "\n")
lemma continuous_on_finite: fixes S :: "'a::t1_space set" shows "finite S \<Longrightarrow> continuous_on S f"
theory T133 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) " nitpick[card nat=4,timeout=86400] oops end
plot_fpmpi <- function(x, which=1L:4L, show.title=TRUE, label) { df <- as.data.frame(x@parsed) df$Routine <- factor(df$Routine) df$Calls <- factor(df$Calls) if (missing(label)) label <- "FPMPI Profiler Output" # Set up plots g1 <- ggplot(df, aes_string(x = 'Routine', y = 'Calls')) + geom_bar(stat = "identity", aes_string(fill = 'Routine')) + xlab("Routine") + ylab("Number of Function Calls") + theme(axis.text.x = element_text(angle = 20, hjust = 1)) + theme(legend.position = "none") g2 <- ggplot(df, aes_string(x = 'Routine', y = 'Time')) + geom_bar(stat = "identity", aes_string(fill = 'Routine')) + xlab("Routine") + ylab("Time (seconds)") + theme(axis.text.x = element_text(angle = 20, hjust = 1)) + theme(legend.position = "none") g3 <- ggplot(df, aes_string(x = 'Routine', y = 'Data.Sent')) + geom_bar(stat = "identity", aes_string(fill = 'Routine')) + xlab("Routine") + ylab("Data Sent (bytes)") + theme(axis.text.x = element_text(angle = 20, hjust = 1)) + theme(legend.position = "none") g4 <- ggplot(df, aes_string(x = 'Routine', y = 'SyncTime')) + geom_bar(stat = "identity", aes_string(fill = 'Routine')) + xlab("Routine") + ylab("Sync Time (seconds)") + theme(axis.text.x = element_text(angle = 20, hjust = 1)) + theme(legend.position = "none") # Plot them plots <- list(g1, g2, g3, g4) g <- grid_plotter(plots=plots, which=which, label=label, show.title=show.title) return( g ) }
library(ggplot2) source("date.r") # Inflation ------------------------------------------------------ cpi <- read.csv("finances-cpi-west.csv") cpi <- rbind(cpi, data.frame(year = 2008, month = 11, cpi = cpi$cpi[nrow(cpi)])) cpi <- subset(cpi, (year == 2003 & month >= 4) | year > 2003) cpi$ratio <- cpi$cpi / cpi$cpi[1] qplot(year + month / 12, ratio, data = cpi, geom = "line", ylab = "Inflation") + xlab(NULL) ggsave(file = "beautiful-data/graphics/daily-cpi.pdf", width = 8, height = 4) # Interest rates ------------------------------------------------------------- # downloaded from http://research.stlouisfed.org/fred2/ mprime <- read.csv("finances-mprime.csv", stringsAsFactors = FALSE) names(mprime) <- c("date", "mprime") mprime$date <- as.Date(mprime$date) fedfunds <- read.csv("finances-fedfunds.csv", stringsAsFactors = FALSE) names(fedfunds) <- c("date", "fedfunds") fedfunds$date <- as.Date(fedfunds$date) irates <- subset(merge(mprime, fedfunds, by = "date"), date > as.Date("2003-03-01")) qplot(mprime, fedfunds, data = irates) with(irates, cor(mprime, fedfunds)) # 0.998 correlation irates$month <- month(irates$date) irates$year <- year(irates$date)
-- @@stderr -- dtrace: failed to compile script test/unittest/union/err.D_ADDROF_VAR.UnionPointer.d: [D_ADDROF_VAR] line 33: cannot take address of dynamic variable
Formal statement is: proposition Lim: "(f \<longlongrightarrow> l) net \<longleftrightarrow> trivial_limit net \<or> (\<forall>e>0. eventually (\<lambda>x. dist (f x) l < e) net)" Informal statement is: A net $f$ converges to $l$ if and only if either $f$ is eventually constant or for every $\epsilon > 0$, there exists $x$ such that $|f(x) - l| < \epsilon$.
-- {-# OPTIONS -v tc.with:40 #-} module RewriteAndWhere where open import Common.Equality data ℕ : Set where zero : ℕ -- good : (a b : ℕ) → a ≡ b → b ≡ a -- good a b eq with a | eq -- ... | .b | refl = foo -- where -- foo : b ≡ b -- foo = refl mutual aux : (a b : ℕ)(w : ℕ) → w ≡ b → b ≡ w aux a b .b refl = foo where foo : b ≡ b foo = refl good₂ : (a b : ℕ) → a ≡ b → b ≡ a good₂ a b eq = aux a b a eq bad : (a b : ℕ) → a ≡ b → b ≡ a bad a b eq rewrite eq = foo where foo : b ≡ b foo = refl -- Andreas, 2014-11-06: this rewrite is trying to touch -- variable b bound in pattern of parent function, which is -- illegal. -- -- foo rewrite sym eq = bar -- where -- bar : a ≡ a -- bar = refl -- Andreas, 2015-11-18 added test during exploration of issue 1692. -- Ulf, 2016-02-25 after fix to #745 this no longer works. -- test : (a b : ℕ) → a ≡ b → b ≡ a -- test a b eq with a | eq -- test a b eq | .b | refl = eq
module Text.Html import public Data.SOP import public Text.Html.Attribute as Html import public Text.Html.Event as Html import public Text.Html.Node as Html
#' get signature loadings #' @param widedat Output from get_nmf_input #' @param nmfdat Output from nmf #' @importFrom dplyr mutate #' @importFrom tidyr gather #' @importFrom stats coef #' @import magrittr #' @return Signature loadings #' @export get_loads <- function(widedat, nmfdat){ sigloads <- data.frame(subtype=names(widedat), sig1=coef(nmfdat)[1,], sig2=coef(nmfdat)[2,], sig3=coef(nmfdat)[3,]) %>% mutate(sig1=sig1/sum(sig1), sig2=sig2/sum(sig2), sig3=sig3/sum(sig3)) %>% gather(sig, value, sig1:sig3) names(sigloads) <- c("subtype", "sig", "value") sigloads <- sigloads %>% mutate(Category=substr(subtype, 1, 5), Sequence=substr(subtype, 7, 14)) return(sigloads) } #' plot signature loadings #' @importFrom ggplot2 ggplot geom_bar facet_grid theme_bw theme #' @param sigloads Signature loadings from get_loads #' @return ggplot2 object with signature loads plot #' @export plot_loads <- function(sigloads){ p <- ggplot(sigloads, aes(x=Sequence, y=value, fill=sig))+ geom_bar(stat="identity")+ facet_grid(sig~Category, scales="free_x")+ theme_bw()+ theme(axis.text.x=element_text(angle=90, hjust=1), strip.text=element_text(size=16), legend.position="none") return(p) } #' plot signature contribution across individuals #' @importFrom ggplot2 ggplot geom_line aes scale_x_discrete scale_y_continuous facet_wrap ylab theme_bw theme #' @param sigdat Signature loadings from get_loads #' @return ggplot2 object with signature by individual loads plot #' @export plot_ind_sigs <- function(sigdat){ p <- ggplot(sigdat, aes(x=ID, y=prob, group=cluster, colour=cluster)) + geom_line()+ scale_x_discrete(expand=c(0,0))+ scale_y_continuous(expand=c(0,0), limits=c(0,1))+ facet_wrap(~Study, scales="free_x", nrow=1)+ ylab("signature contribution")+ theme_bw()+ theme(axis.text.x=element_blank(), legend.position="bottom") return(p) }
.fp \np tr .lg 0
% --- [ Equations used in lab ] ------------------------------------------------ \subsection{Equations used in lab} % present tense \todo{foo}
/- Copyright (c) 2022 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison ! This file was ported from Lean 3 source module category_theory.limits.constructions.zero_objects ! leanprover-community/mathlib commit 10bf4f825ad729c5653adc039dafa3622e7f93c9 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.CategoryTheory.Limits.Shapes.Pullbacks import Mathbin.CategoryTheory.Limits.Shapes.ZeroMorphisms import Mathbin.CategoryTheory.Limits.Constructions.BinaryProducts /-! # Limits involving zero objects > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. Binary products and coproducts with a zero object always exist, and pullbacks/pushouts over a zero object are products/coproducts. -/ noncomputable section open CategoryTheory variable {C : Type _} [Category C] namespace CategoryTheory.Limits variable [HasZeroObject C] [HasZeroMorphisms C] open ZeroObject #print CategoryTheory.Limits.binaryFanZeroLeft /- /-- The limit cone for the product with a zero object. -/ def binaryFanZeroLeft (X : C) : BinaryFan (0 : C) X := BinaryFan.mk 0 (𝟙 X) #align category_theory.limits.binary_fan_zero_left CategoryTheory.Limits.binaryFanZeroLeft -/ #print CategoryTheory.Limits.binaryFanZeroLeftIsLimit /- /-- The limit cone for the product with a zero object is limiting. -/ def binaryFanZeroLeftIsLimit (X : C) : IsLimit (binaryFanZeroLeft X) := BinaryFan.isLimitMk (fun s => BinaryFan.snd s) (by tidy) (by tidy) (by tidy) #align category_theory.limits.binary_fan_zero_left_is_limit CategoryTheory.Limits.binaryFanZeroLeftIsLimit -/ #print CategoryTheory.Limits.hasBinaryProduct_zero_left /- instance hasBinaryProduct_zero_left (X : C) : HasBinaryProduct (0 : C) X := HasLimit.mk ⟨_, binaryFanZeroLeftIsLimit X⟩ #align category_theory.limits.has_binary_product_zero_left CategoryTheory.Limits.hasBinaryProduct_zero_left -/ #print CategoryTheory.Limits.zeroProdIso /- /-- A zero object is a left unit for categorical product. -/ def zeroProdIso (X : C) : (0 : C) ⨯ X ≅ X := limit.isoLimitCone ⟨_, binaryFanZeroLeftIsLimit X⟩ #align category_theory.limits.zero_prod_iso CategoryTheory.Limits.zeroProdIso -/ #print CategoryTheory.Limits.zeroProdIso_hom /- @[simp] theorem zeroProdIso_hom (X : C) : (zeroProdIso X).Hom = prod.snd := rfl #align category_theory.limits.zero_prod_iso_hom CategoryTheory.Limits.zeroProdIso_hom -/ #print CategoryTheory.Limits.zeroProdIso_inv_snd /- @[simp] theorem zeroProdIso_inv_snd (X : C) : (zeroProdIso X).inv ≫ prod.snd = 𝟙 X := by dsimp [zero_prod_iso, binary_fan_zero_left] simp #align category_theory.limits.zero_prod_iso_inv_snd CategoryTheory.Limits.zeroProdIso_inv_snd -/ #print CategoryTheory.Limits.binaryFanZeroRight /- /-- The limit cone for the product with a zero object. -/ def binaryFanZeroRight (X : C) : BinaryFan X (0 : C) := BinaryFan.mk (𝟙 X) 0 #align category_theory.limits.binary_fan_zero_right CategoryTheory.Limits.binaryFanZeroRight -/ #print CategoryTheory.Limits.binaryFanZeroRightIsLimit /- /-- The limit cone for the product with a zero object is limiting. -/ def binaryFanZeroRightIsLimit (X : C) : IsLimit (binaryFanZeroRight X) := BinaryFan.isLimitMk (fun s => BinaryFan.fst s) (by tidy) (by tidy) (by tidy) #align category_theory.limits.binary_fan_zero_right_is_limit CategoryTheory.Limits.binaryFanZeroRightIsLimit -/ #print CategoryTheory.Limits.hasBinaryProduct_zero_right /- instance hasBinaryProduct_zero_right (X : C) : HasBinaryProduct X (0 : C) := HasLimit.mk ⟨_, binaryFanZeroRightIsLimit X⟩ #align category_theory.limits.has_binary_product_zero_right CategoryTheory.Limits.hasBinaryProduct_zero_right -/ #print CategoryTheory.Limits.prodZeroIso /- /-- A zero object is a right unit for categorical product. -/ def prodZeroIso (X : C) : X ⨯ (0 : C) ≅ X := limit.isoLimitCone ⟨_, binaryFanZeroRightIsLimit X⟩ #align category_theory.limits.prod_zero_iso CategoryTheory.Limits.prodZeroIso -/ #print CategoryTheory.Limits.prodZeroIso_hom /- @[simp] theorem prodZeroIso_hom (X : C) : (prodZeroIso X).Hom = prod.fst := rfl #align category_theory.limits.prod_zero_iso_hom CategoryTheory.Limits.prodZeroIso_hom -/ #print CategoryTheory.Limits.prodZeroIso_iso_inv_snd /- @[simp] theorem prodZeroIso_iso_inv_snd (X : C) : (prodZeroIso X).inv ≫ prod.fst = 𝟙 X := by dsimp [prod_zero_iso, binary_fan_zero_right] simp #align category_theory.limits.prod_zero_iso_iso_inv_snd CategoryTheory.Limits.prodZeroIso_iso_inv_snd -/ #print CategoryTheory.Limits.binaryCofanZeroLeft /- /-- The colimit cocone for the coproduct with a zero object. -/ def binaryCofanZeroLeft (X : C) : BinaryCofan (0 : C) X := BinaryCofan.mk 0 (𝟙 X) #align category_theory.limits.binary_cofan_zero_left CategoryTheory.Limits.binaryCofanZeroLeft -/ #print CategoryTheory.Limits.binaryCofanZeroLeftIsColimit /- /-- The colimit cocone for the coproduct with a zero object is colimiting. -/ def binaryCofanZeroLeftIsColimit (X : C) : IsColimit (binaryCofanZeroLeft X) := BinaryCofan.isColimitMk (fun s => BinaryCofan.inr s) (by tidy) (by tidy) (by tidy) #align category_theory.limits.binary_cofan_zero_left_is_colimit CategoryTheory.Limits.binaryCofanZeroLeftIsColimit -/ #print CategoryTheory.Limits.hasBinaryCoproduct_zero_left /- instance hasBinaryCoproduct_zero_left (X : C) : HasBinaryCoproduct (0 : C) X := HasColimit.mk ⟨_, binaryCofanZeroLeftIsColimit X⟩ #align category_theory.limits.has_binary_coproduct_zero_left CategoryTheory.Limits.hasBinaryCoproduct_zero_left -/ #print CategoryTheory.Limits.zeroCoprodIso /- /-- A zero object is a left unit for categorical coproduct. -/ def zeroCoprodIso (X : C) : (0 : C) ⨿ X ≅ X := colimit.isoColimitCocone ⟨_, binaryCofanZeroLeftIsColimit X⟩ #align category_theory.limits.zero_coprod_iso CategoryTheory.Limits.zeroCoprodIso -/ #print CategoryTheory.Limits.inr_zeroCoprodIso_hom /- @[simp] theorem inr_zeroCoprodIso_hom (X : C) : coprod.inr ≫ (zeroCoprodIso X).Hom = 𝟙 X := by dsimp [zero_coprod_iso, binary_cofan_zero_left] simp #align category_theory.limits.inr_zero_coprod_iso_hom CategoryTheory.Limits.inr_zeroCoprodIso_hom -/ #print CategoryTheory.Limits.zeroCoprodIso_inv /- @[simp] theorem zeroCoprodIso_inv (X : C) : (zeroCoprodIso X).inv = coprod.inr := rfl #align category_theory.limits.zero_coprod_iso_inv CategoryTheory.Limits.zeroCoprodIso_inv -/ #print CategoryTheory.Limits.binaryCofanZeroRight /- /-- The colimit cocone for the coproduct with a zero object. -/ def binaryCofanZeroRight (X : C) : BinaryCofan X (0 : C) := BinaryCofan.mk (𝟙 X) 0 #align category_theory.limits.binary_cofan_zero_right CategoryTheory.Limits.binaryCofanZeroRight -/ #print CategoryTheory.Limits.binaryCofanZeroRightIsColimit /- /-- The colimit cocone for the coproduct with a zero object is colimiting. -/ def binaryCofanZeroRightIsColimit (X : C) : IsColimit (binaryCofanZeroRight X) := BinaryCofan.isColimitMk (fun s => BinaryCofan.inl s) (by tidy) (by tidy) (by tidy) #align category_theory.limits.binary_cofan_zero_right_is_colimit CategoryTheory.Limits.binaryCofanZeroRightIsColimit -/ #print CategoryTheory.Limits.hasBinaryCoproduct_zero_right /- instance hasBinaryCoproduct_zero_right (X : C) : HasBinaryCoproduct X (0 : C) := HasColimit.mk ⟨_, binaryCofanZeroRightIsColimit X⟩ #align category_theory.limits.has_binary_coproduct_zero_right CategoryTheory.Limits.hasBinaryCoproduct_zero_right -/ #print CategoryTheory.Limits.coprodZeroIso /- /-- A zero object is a right unit for categorical coproduct. -/ def coprodZeroIso (X : C) : X ⨿ (0 : C) ≅ X := colimit.isoColimitCocone ⟨_, binaryCofanZeroRightIsColimit X⟩ #align category_theory.limits.coprod_zero_iso CategoryTheory.Limits.coprodZeroIso -/ #print CategoryTheory.Limits.inr_coprodZeroIso_hom /- @[simp] theorem inr_coprodZeroIso_hom (X : C) : coprod.inl ≫ (coprodZeroIso X).Hom = 𝟙 X := by dsimp [coprod_zero_iso, binary_cofan_zero_right] simp #align category_theory.limits.inr_coprod_zeroiso_hom CategoryTheory.Limits.inr_coprodZeroIso_hom -/ #print CategoryTheory.Limits.coprodZeroIso_inv /- @[simp] theorem coprodZeroIso_inv (X : C) : (coprodZeroIso X).inv = coprod.inl := rfl #align category_theory.limits.coprod_zero_iso_inv CategoryTheory.Limits.coprodZeroIso_inv -/ #print CategoryTheory.Limits.hasPullback_over_zero /- instance hasPullback_over_zero (X Y : C) [HasBinaryProduct X Y] : HasPullback (0 : X ⟶ 0) (0 : Y ⟶ 0) := HasLimit.mk ⟨_, isPullbackOfIsTerminalIsProduct _ _ _ _ HasZeroObject.zeroIsTerminal (prodIsProd X Y)⟩ #align category_theory.limits.has_pullback_over_zero CategoryTheory.Limits.hasPullback_over_zero -/ #print CategoryTheory.Limits.pullbackZeroZeroIso /- /-- The pullback over the zeron object is the product. -/ def pullbackZeroZeroIso (X Y : C) [HasBinaryProduct X Y] : pullback (0 : X ⟶ 0) (0 : Y ⟶ 0) ≅ X ⨯ Y := limit.isoLimitCone ⟨_, isPullbackOfIsTerminalIsProduct _ _ _ _ HasZeroObject.zeroIsTerminal (prodIsProd X Y)⟩ #align category_theory.limits.pullback_zero_zero_iso CategoryTheory.Limits.pullbackZeroZeroIso -/ #print CategoryTheory.Limits.pullbackZeroZeroIso_inv_fst /- @[simp] theorem pullbackZeroZeroIso_inv_fst (X Y : C) [HasBinaryProduct X Y] : (pullbackZeroZeroIso X Y).inv ≫ pullback.fst = prod.fst := by dsimp [pullback_zero_zero_iso] simp #align category_theory.limits.pullback_zero_zero_iso_inv_fst CategoryTheory.Limits.pullbackZeroZeroIso_inv_fst -/ #print CategoryTheory.Limits.pullbackZeroZeroIso_inv_snd /- @[simp] theorem pullbackZeroZeroIso_inv_snd (X Y : C) [HasBinaryProduct X Y] : (pullbackZeroZeroIso X Y).inv ≫ pullback.snd = prod.snd := by dsimp [pullback_zero_zero_iso] simp #align category_theory.limits.pullback_zero_zero_iso_inv_snd CategoryTheory.Limits.pullbackZeroZeroIso_inv_snd -/ #print CategoryTheory.Limits.pullbackZeroZeroIso_hom_fst /- @[simp] theorem pullbackZeroZeroIso_hom_fst (X Y : C) [HasBinaryProduct X Y] : (pullbackZeroZeroIso X Y).Hom ≫ prod.fst = pullback.fst := by simp [← iso.eq_inv_comp] #align category_theory.limits.pullback_zero_zero_iso_hom_fst CategoryTheory.Limits.pullbackZeroZeroIso_hom_fst -/ #print CategoryTheory.Limits.pullbackZeroZeroIso_hom_snd /- @[simp] theorem pullbackZeroZeroIso_hom_snd (X Y : C) [HasBinaryProduct X Y] : (pullbackZeroZeroIso X Y).Hom ≫ prod.snd = pullback.snd := by simp [← iso.eq_inv_comp] #align category_theory.limits.pullback_zero_zero_iso_hom_snd CategoryTheory.Limits.pullbackZeroZeroIso_hom_snd -/ #print CategoryTheory.Limits.hasPushout_over_zero /- instance hasPushout_over_zero (X Y : C) [HasBinaryCoproduct X Y] : HasPushout (0 : 0 ⟶ X) (0 : 0 ⟶ Y) := HasColimit.mk ⟨_, isPushoutOfIsInitialIsCoproduct _ _ _ _ HasZeroObject.zeroIsInitial (coprodIsCoprod X Y)⟩ #align category_theory.limits.has_pushout_over_zero CategoryTheory.Limits.hasPushout_over_zero -/ #print CategoryTheory.Limits.pushoutZeroZeroIso /- /-- The pushout over the zero object is the coproduct. -/ def pushoutZeroZeroIso (X Y : C) [HasBinaryCoproduct X Y] : pushout (0 : 0 ⟶ X) (0 : 0 ⟶ Y) ≅ X ⨿ Y := colimit.isoColimitCocone ⟨_, isPushoutOfIsInitialIsCoproduct _ _ _ _ HasZeroObject.zeroIsInitial (coprodIsCoprod X Y)⟩ #align category_theory.limits.pushout_zero_zero_iso CategoryTheory.Limits.pushoutZeroZeroIso -/ #print CategoryTheory.Limits.inl_pushoutZeroZeroIso_hom /- @[simp] theorem inl_pushoutZeroZeroIso_hom (X Y : C) [HasBinaryCoproduct X Y] : pushout.inl ≫ (pushoutZeroZeroIso X Y).Hom = coprod.inl := by dsimp [pushout_zero_zero_iso] simp #align category_theory.limits.inl_pushout_zero_zero_iso_hom CategoryTheory.Limits.inl_pushoutZeroZeroIso_hom -/ #print CategoryTheory.Limits.inr_pushoutZeroZeroIso_hom /- @[simp] theorem inr_pushoutZeroZeroIso_hom (X Y : C) [HasBinaryCoproduct X Y] : pushout.inr ≫ (pushoutZeroZeroIso X Y).Hom = coprod.inr := by dsimp [pushout_zero_zero_iso] simp #align category_theory.limits.inr_pushout_zero_zero_iso_hom CategoryTheory.Limits.inr_pushoutZeroZeroIso_hom -/ #print CategoryTheory.Limits.inl_pushoutZeroZeroIso_inv /- @[simp] theorem inl_pushoutZeroZeroIso_inv (X Y : C) [HasBinaryCoproduct X Y] : coprod.inl ≫ (pushoutZeroZeroIso X Y).inv = pushout.inl := by simp [iso.comp_inv_eq] #align category_theory.limits.inl_pushout_zero_zero_iso_inv CategoryTheory.Limits.inl_pushoutZeroZeroIso_inv -/ #print CategoryTheory.Limits.inr_pushoutZeroZeroIso_inv /- @[simp] theorem inr_pushoutZeroZeroIso_inv (X Y : C) [HasBinaryCoproduct X Y] : coprod.inr ≫ (pushoutZeroZeroIso X Y).inv = pushout.inr := by simp [iso.comp_inv_eq] #align category_theory.limits.inr_pushout_zero_zero_iso_inv CategoryTheory.Limits.inr_pushoutZeroZeroIso_inv -/ end CategoryTheory.Limits
[STATEMENT] lemma snocs_is_append: "snocs xs ys = xs @ ys" [PROOF STATE] proof (prove) goal (1 subgoal): 1. snocs xs ys = xs @ ys [PROOF STEP] unfolding snocs_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. xs @ ys = xs @ ys [PROOF STEP] ..
State Before: l : Type u_1 m : Type u_2 n : Type u_3 o : Type u_4 m' : o → Type ?u.375873 n' : o → Type ?u.375878 R : Type ?u.375881 S : Type ?u.375884 α : Type v β : Type w γ : Type ?u.375891 inst✝² : NonUnitalSemiring α inst✝¹ : Fintype m inst✝ : Fintype n L : Matrix l m α M : Matrix m n α N : Matrix n o α ⊢ L ⬝ M ⬝ N = L ⬝ (M ⬝ N) State After: case a.h l : Type u_1 m : Type u_2 n : Type u_3 o : Type u_4 m' : o → Type ?u.375873 n' : o → Type ?u.375878 R : Type ?u.375881 S : Type ?u.375884 α : Type v β : Type w γ : Type ?u.375891 inst✝² : NonUnitalSemiring α inst✝¹ : Fintype m inst✝ : Fintype n L : Matrix l m α M : Matrix m n α N : Matrix n o α i✝ : l x✝ : o ⊢ (L ⬝ M ⬝ N) i✝ x✝ = (L ⬝ (M ⬝ N)) i✝ x✝ Tactic: ext State Before: case a.h l : Type u_1 m : Type u_2 n : Type u_3 o : Type u_4 m' : o → Type ?u.375873 n' : o → Type ?u.375878 R : Type ?u.375881 S : Type ?u.375884 α : Type v β : Type w γ : Type ?u.375891 inst✝² : NonUnitalSemiring α inst✝¹ : Fintype m inst✝ : Fintype n L : Matrix l m α M : Matrix m n α N : Matrix n o α i✝ : l x✝ : o ⊢ (L ⬝ M ⬝ N) i✝ x✝ = (L ⬝ (M ⬝ N)) i✝ x✝ State After: no goals Tactic: apply dotProduct_assoc
section \<open>\isaheader{Set Interface}\<close> theory Intf_Set imports "../../../Refine_Monadic/Refine_Monadic" begin consts i_set :: "interface \<Rightarrow> interface" lemmas [autoref_rel_intf] = REL_INTFI[of set_rel i_set] definition [simp]: "op_set_delete x s \<equiv> s - {x}" definition [simp]: "op_set_isEmpty s \<equiv> s = {}" definition [simp]: "op_set_isSng s \<equiv> card s = 1" definition [simp]: "op_set_size_abort m s \<equiv> min m (card s)" definition [simp]: "op_set_disjoint a b \<equiv> a\<inter>b={}" definition [simp]: "op_set_filter P s \<equiv> {x\<in>s. P x}" definition [simp]: "op_set_sel P s \<equiv> SPEC (\<lambda>x. x\<in>s \<and> P x)" definition [simp]: "op_set_pick s \<equiv> SPEC (\<lambda>x. x\<in>s)" definition [simp]: "op_set_to_sorted_list ordR s \<equiv> SPEC (\<lambda>l. set l = s \<and> distinct l \<and> sorted_wrt ordR l)" definition [simp]: "op_set_to_list s \<equiv> SPEC (\<lambda>l. set l = s \<and> distinct l)" definition [simp]: "op_set_cart x y \<equiv> x \<times> y" (* TODO: Do op_set_pick_remove (like op_map_pick_remove) *) context begin interpretation autoref_syn . "s = {} \<equiv> op_set_isEmpty$s" "{}=s \<equiv> op_set_isEmpty$s" "card s = 1 \<equiv> op_set_isSng$s" "\<exists>x. s={x} \<equiv> op_set_isSng$s" "\<exists>x. {x}=s \<equiv> op_set_isSng$s" "min m (card s) \<equiv> op_set_size_abort$m$s" "min (card s) m \<equiv> op_set_size_abort$m$s" "a\<inter>b={} \<equiv> op_set_disjoint$a$b" "{x\<in>s. P x} \<equiv> op_set_filter$P$s" "SPEC (\<lambda>x. x\<in>s \<and> P x) \<equiv> op_set_sel$P$s" "SPEC (\<lambda>x. P x \<and> x\<in>s) \<equiv> op_set_sel$P$s" "SPEC (\<lambda>x. x\<in>s) \<equiv> op_set_pick$s" by (auto intro!: eq_reflection simp: card_Suc_eq) lemma [autoref_op_pat]: "a \<times> b \<equiv> op_set_cart a b" by (auto intro!: eq_reflection simp: card_Suc_eq) lemma [autoref_op_pat]: "SPEC (\<lambda>(u,v). (u,v)\<in>s) \<equiv> op_set_pick$s" "SPEC (\<lambda>(u,v). P u v \<and> (u,v)\<in>s) \<equiv> op_set_sel$(case_prod P)$s" "SPEC (\<lambda>(u,v). (u,v)\<in>s \<and> P u v) \<equiv> op_set_sel$(case_prod P)$s" by (auto intro!: eq_reflection) lemma [autoref_op_pat]: "SPEC (\<lambda>l. set l = s \<and> distinct l \<and> sorted_wrt ordR l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. set l = s \<and> sorted_wrt ordR l \<and> distinct l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. distinct l \<and> set l = s \<and> sorted_wrt ordR l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. distinct l \<and> sorted_wrt ordR l \<and> set l = s) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. sorted_wrt ordR l \<and> distinct l \<and> set l = s) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. sorted_wrt ordR l \<and> set l = s \<and> distinct l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. s = set l \<and> distinct l \<and> sorted_wrt ordR l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. s = set l \<and> sorted_wrt ordR l \<and> distinct l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. distinct l \<and> s = set l \<and> sorted_wrt ordR l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. distinct l \<and> sorted_wrt ordR l \<and> s = set l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. sorted_wrt ordR l \<and> distinct l \<and> s = set l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. sorted_wrt ordR l \<and> s = set l \<and> distinct l) \<equiv> OP (op_set_to_sorted_list ordR)$s" "SPEC (\<lambda>l. set l = s \<and> distinct l) \<equiv> op_set_to_list$s" "SPEC (\<lambda>l. distinct l \<and> set l = s) \<equiv> op_set_to_list$s" "SPEC (\<lambda>l. s = set l \<and> distinct l) \<equiv> op_set_to_list$s" "SPEC (\<lambda>l. distinct l \<and> s = set l) \<equiv> op_set_to_list$s" by (auto intro!: eq_reflection) end lemma [autoref_itype]: "{} ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "insert ::\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "op_set_delete ::\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "(\<in>) ::\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_bool" "op_set_isEmpty ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_bool" "op_set_isSng ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_bool" "(\<union>) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "(\<inter>) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "((-) :: 'a set \<Rightarrow> 'a set \<Rightarrow> 'a set) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "((=) :: 'a set \<Rightarrow> 'a set \<Rightarrow> bool) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_bool" "(\<subseteq>) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_bool" "op_set_disjoint ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_bool" "Ball ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i (I \<rightarrow>\<^sub>i i_bool) \<rightarrow>\<^sub>i i_bool" "Bex ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i (I \<rightarrow>\<^sub>i i_bool) \<rightarrow>\<^sub>i i_bool" "op_set_filter ::\<^sub>i (I \<rightarrow>\<^sub>i i_bool) \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "card ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_nat" "op_set_size_abort ::\<^sub>i i_nat \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i i_nat" "set ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "op_set_sel ::\<^sub>i (I \<rightarrow>\<^sub>i i_bool) \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_nres" "op_set_pick ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_nres" "Sigma ::\<^sub>i \<langle>Ia\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i (Ia \<rightarrow>\<^sub>i \<langle>Ib\<rangle>\<^sub>ii_set) \<rightarrow>\<^sub>i \<langle>\<langle>Ia,Ib\<rangle>\<^sub>ii_prod\<rangle>\<^sub>ii_set" "(`) ::\<^sub>i (Ia\<rightarrow>\<^sub>iIb) \<rightarrow>\<^sub>i \<langle>Ia\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>Ib\<rangle>\<^sub>ii_set" "op_set_cart ::\<^sub>i \<langle>Ix\<rangle>\<^sub>iIsx \<rightarrow>\<^sub>i \<langle>Iy\<rangle>\<^sub>iIsy \<rightarrow>\<^sub>i \<langle>\<langle>Ix, Iy\<rangle>\<^sub>ii_prod\<rangle>\<^sub>iIsp" "Union ::\<^sub>i \<langle>\<langle>I\<rangle>\<^sub>ii_set\<rangle>\<^sub>ii_set \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_set" "atLeastLessThan ::\<^sub>i i_nat \<rightarrow>\<^sub>i i_nat \<rightarrow>\<^sub>i \<langle>i_nat\<rangle>\<^sub>ii_set" by simp_all lemma hom_set1[autoref_hom]: "CONSTRAINT {} (\<langle>R\<rangle>Rs)" "CONSTRAINT insert (R\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT (\<in>) (R\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT (\<union>) (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT (\<inter>) (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT (-) (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT (=) (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT (\<subseteq>) (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT Ball (\<langle>R\<rangle>Rs\<rightarrow>(R\<rightarrow>Id)\<rightarrow>Id)" "CONSTRAINT Bex (\<langle>R\<rangle>Rs\<rightarrow>(R\<rightarrow>Id)\<rightarrow>Id)" "CONSTRAINT card (\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT set (\<langle>R\<rangle>Rl\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT (`) ((Ra\<rightarrow>Rb) \<rightarrow> \<langle>Ra\<rangle>Rs\<rightarrow>\<langle>Rb\<rangle>Rs)" "CONSTRAINT Union (\<langle>\<langle>R\<rangle>Ri\<rangle>Ro \<rightarrow> \<langle>R\<rangle>Ri)" by simp_all lemma hom_set2[autoref_hom]: "CONSTRAINT op_set_delete (R\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT op_set_isEmpty (\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT op_set_isSng (\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT op_set_size_abort (Id\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT op_set_disjoint (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>Id)" "CONSTRAINT op_set_filter ((R\<rightarrow>Id)\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rs)" "CONSTRAINT op_set_sel ((R \<rightarrow> Id)\<rightarrow>\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rn)" "CONSTRAINT op_set_pick (\<langle>R\<rangle>Rs\<rightarrow>\<langle>R\<rangle>Rn)" by simp_all lemma hom_set_Sigma[autoref_hom]: "CONSTRAINT Sigma (\<langle>Ra\<rangle>Rs \<rightarrow> (Ra \<rightarrow> \<langle>Rb\<rangle>Rs) \<rightarrow> \<langle>\<langle>Ra,Rb\<rangle>prod_rel\<rangle>Rs2)" by simp_all definition "finite_set_rel R \<equiv> Range R \<subseteq> Collect (finite)" lemma finite_set_rel_trigger: "finite_set_rel R \<Longrightarrow> finite_set_rel R" . declaration \<open>Tagged_Solver.add_triggers "Relators.relator_props_solver" @{thms finite_set_rel_trigger}\<close> end
/- Copyright (c) 2017 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.logic import Mathlib.Lean3Lib.init.data.ordering.basic universes u v l namespace Mathlib class is_symm_op (α : Type u) (β : outParam (Type v)) (op : α → α → β) where symm_op : ∀ (a b : α), op a b = op b a class is_commutative (α : Type u) (op : α → α → α) where comm : ∀ (a b : α), op a b = op b a protected instance is_symm_op_of_is_commutative (α : Type u) (op : α → α → α) [is_commutative α op] : is_symm_op α α op := is_symm_op.mk is_commutative.comm class is_associative (α : Type u) (op : α → α → α) where assoc : ∀ (a b c : α), op (op a b) c = op a (op b c) class is_left_id (α : Type u) (op : α → α → α) (o : outParam α) where left_id : ∀ (a : α), op o a = a class is_right_id (α : Type u) (op : α → α → α) (o : outParam α) where right_id : ∀ (a : α), op a o = a class is_left_null (α : Type u) (op : α → α → α) (o : outParam α) where left_null : ∀ (a : α), op o a = o class is_right_null (α : Type u) (op : α → α → α) (o : outParam α) where right_null : ∀ (a : α), op a o = o class is_left_cancel (α : Type u) (op : α → α → α) where left_cancel : ∀ (a b c : α), op a b = op a c → b = c class is_right_cancel (α : Type u) (op : α → α → α) where right_cancel : ∀ (a b c : α), op a b = op c b → a = c class is_idempotent (α : Type u) (op : α → α → α) where idempotent : ∀ (a : α), op a a = a class is_left_distrib (α : Type u) (op₁ : α → α → α) (op₂ : outParam (α → α → α)) where left_distrib : ∀ (a b c : α), op₁ a (op₂ b c) = op₂ (op₁ a b) (op₁ a c) class is_right_distrib (α : Type u) (op₁ : α → α → α) (op₂ : outParam (α → α → α)) where right_distrib : ∀ (a b c : α), op₁ (op₂ a b) c = op₂ (op₁ a c) (op₁ b c) class is_left_inv (α : Type u) (op : α → α → α) (inv : outParam (α → α)) (o : outParam α) where left_inv : ∀ (a : α), op (inv a) a = o class is_right_inv (α : Type u) (op : α → α → α) (inv : outParam (α → α)) (o : outParam α) where right_inv : ∀ (a : α), op a (inv a) = o class is_cond_left_inv (α : Type u) (op : α → α → α) (inv : outParam (α → α)) (o : outParam α) (p : outParam (α → Prop)) where left_inv : ∀ (a : α), p a → op (inv a) a = o class is_cond_right_inv (α : Type u) (op : α → α → α) (inv : outParam (α → α)) (o : outParam α) (p : outParam (α → Prop)) where right_inv : ∀ (a : α), p a → op a (inv a) = o class is_distinct (α : Type u) (a : α) (b : α) where distinct : a ≠ b /- -- The following type class doesn't seem very useful, a regular simp lemma should work for this. -- The following type class doesn't seem very useful, a regular simp lemma should work for this. class is_inv (α : Type u) (β : Type v) (f : α → β) (g : out β → α) : Prop := (inv : ∀ a, g (f a) = a) -- The following one can also be handled using a regular simp lemma -- The following one can also be handled using a regular simp lemma class is_idempotent (α : Type u) (f : α → α) : Prop := (idempotent : ∀ a, f (f a) = f a) -/ /-- `is_irrefl X r` means the binary relation `r` on `X` is irreflexive (that is, `r x x` never holds). -/ class is_irrefl (α : Type u) (r : α → α → Prop) where irrefl : ∀ (a : α), ¬r a a /-- `is_refl X r` means the binary relation `r` on `X` is reflexive. -/ class is_refl (α : Type u) (r : α → α → Prop) where refl : ∀ (a : α), r a a /-- `is_symm X r` means the binary relation `r` on `X` is symmetric. -/ class is_symm (α : Type u) (r : α → α → Prop) where symm : ∀ (a b : α), r a b → r b a /-- The opposite of a symmetric relation is symmetric. -/ protected instance is_symm_op_of_is_symm (α : Type u) (r : α → α → Prop) [is_symm α r] : is_symm_op α Prop r := is_symm_op.mk fun (a b : α) => propext { mp := is_symm.symm a b, mpr := is_symm.symm b a } /-- `is_asymm X r` means that the binary relation `r` on `X` is asymmetric, that is, `r a b → ¬ r b a`. -/ class is_asymm (α : Type u) (r : α → α → Prop) where asymm : ∀ (a b : α), r a b → ¬r b a /-- `is_antisymm X r` means the binary relation `r` on `X` is antisymmetric. -/ class is_antisymm (α : Type u) (r : α → α → Prop) where antisymm : ∀ (a b : α), r a b → r b a → a = b /-- `is_trans X r` means the binary relation `r` on `X` is transitive. -/ class is_trans (α : Type u) (r : α → α → Prop) where trans : ∀ (a b c : α), r a b → r b c → r a c /-- `is_total X r` means that the binary relation `r` on `X` is total, that is, that for any `x y : X` we have `r x y` or `r y x`.-/ class is_total (α : Type u) (r : α → α → Prop) where total : ∀ (a b : α), r a b ∨ r b a /-- `is_preorder X r` means that the binary relation `r` on `X` is a pre-order, that is, reflexive and transitive. -/ class is_preorder (α : Type u) (r : α → α → Prop) extends is_refl α r, is_trans α r where /-- `is_total_preorder X r` means that the binary relation `r` on `X` is total and a preorder. -/ class is_total_preorder (α : Type u) (r : α → α → Prop) extends is_trans α r, is_total α r where /-- Every total pre-order is a pre-order. -/ protected instance is_total_preorder_is_preorder (α : Type u) (r : α → α → Prop) [s : is_total_preorder α r] : is_preorder α r := is_preorder.mk class is_partial_order (α : Type u) (r : α → α → Prop) extends is_antisymm α r, is_preorder α r where class is_linear_order (α : Type u) (r : α → α → Prop) extends is_total α r, is_partial_order α r where class is_equiv (α : Type u) (r : α → α → Prop) extends is_symm α r, is_preorder α r where class is_per (α : Type u) (r : α → α → Prop) extends is_symm α r, is_trans α r where class is_strict_order (α : Type u) (r : α → α → Prop) extends is_trans α r, is_irrefl α r where class is_incomp_trans (α : Type u) (lt : α → α → Prop) where incomp_trans : ∀ (a b c : α), ¬lt a b ∧ ¬lt b a → ¬lt b c ∧ ¬lt c b → ¬lt a c ∧ ¬lt c a class is_strict_weak_order (α : Type u) (lt : α → α → Prop) extends is_incomp_trans α lt, is_strict_order α lt where class is_trichotomous (α : Type u) (lt : α → α → Prop) where trichotomous : ∀ (a b : α), lt a b ∨ a = b ∨ lt b a class is_strict_total_order (α : Type u) (lt : α → α → Prop) extends is_strict_weak_order α lt, is_trichotomous α lt where protected instance eq_is_equiv (α : Type u) : is_equiv α Eq := is_equiv.mk theorem irrefl {α : Type u} {r : α → α → Prop} [is_irrefl α r] (a : α) : ¬r a a := is_irrefl.irrefl a theorem refl {α : Type u} {r : α → α → Prop} [is_refl α r] (a : α) : r a a := is_refl.refl a theorem trans {α : Type u} {r : α → α → Prop} [is_trans α r] {a : α} {b : α} {c : α} : r a b → r b c → r a c := is_trans.trans a b c theorem symm {α : Type u} {r : α → α → Prop} [is_symm α r] {a : α} {b : α} : r a b → r b a := is_symm.symm a b theorem antisymm {α : Type u} {r : α → α → Prop} [is_antisymm α r] {a : α} {b : α} : r a b → r b a → a = b := is_antisymm.antisymm a b theorem asymm {α : Type u} {r : α → α → Prop} [is_asymm α r] {a : α} {b : α} : r a b → ¬r b a := is_asymm.asymm a b theorem trichotomous {α : Type u} {r : α → α → Prop} [is_trichotomous α r] (a : α) (b : α) : r a b ∨ a = b ∨ r b a := is_trichotomous.trichotomous theorem incomp_trans {α : Type u} {r : α → α → Prop} [is_incomp_trans α r] {a : α} {b : α} {c : α} : ¬r a b ∧ ¬r b a → ¬r b c ∧ ¬r c b → ¬r a c ∧ ¬r c a := is_incomp_trans.incomp_trans a b c protected instance is_asymm_of_is_trans_of_is_irrefl {α : Type u} {r : α → α → Prop} [is_trans α r] [is_irrefl α r] : is_asymm α r := is_asymm.mk fun (a b : α) (h₁ : r a b) (h₂ : r b a) => absurd (trans h₁ h₂) (irrefl a) theorem irrefl_of {α : Type u} (r : α → α → Prop) [is_irrefl α r] (a : α) : ¬r a a := irrefl a theorem refl_of {α : Type u} (r : α → α → Prop) [is_refl α r] (a : α) : r a a := refl a theorem trans_of {α : Type u} (r : α → α → Prop) [is_trans α r] {a : α} {b : α} {c : α} : r a b → r b c → r a c := trans theorem symm_of {α : Type u} (r : α → α → Prop) [is_symm α r] {a : α} {b : α} : r a b → r b a := symm theorem asymm_of {α : Type u} (r : α → α → Prop) [is_asymm α r] {a : α} {b : α} : r a b → ¬r b a := asymm theorem total_of {α : Type u} (r : α → α → Prop) [is_total α r] (a : α) (b : α) : r a b ∨ r b a := is_total.total a b theorem trichotomous_of {α : Type u} (r : α → α → Prop) [is_trichotomous α r] (a : α) (b : α) : r a b ∨ a = b ∨ r b a := trichotomous theorem incomp_trans_of {α : Type u} (r : α → α → Prop) [is_incomp_trans α r] {a : α} {b : α} {c : α} : ¬r a b ∧ ¬r b a → ¬r b c ∧ ¬r c b → ¬r a c ∧ ¬r c a := incomp_trans namespace strict_weak_order def equiv {α : Type u} {r : α → α → Prop} (a : α) (b : α) := ¬r a b ∧ ¬r b a theorem erefl {α : Type u} {r : α → α → Prop} [is_strict_weak_order α r] (a : α) : equiv a a := { left := irrefl a, right := irrefl a } theorem esymm {α : Type u} {r : α → α → Prop} [is_strict_weak_order α r] {a : α} {b : α} : equiv a b → equiv b a := sorry theorem etrans {α : Type u} {r : α → α → Prop} [is_strict_weak_order α r] {a : α} {b : α} {c : α} : equiv a b → equiv b c → equiv a c := incomp_trans theorem not_lt_of_equiv {α : Type u} {r : α → α → Prop} [is_strict_weak_order α r] {a : α} {b : α} : equiv a b → ¬r a b := fun (h : equiv a b) => and.left h theorem not_lt_of_equiv' {α : Type u} {r : α → α → Prop} [is_strict_weak_order α r] {a : α} {b : α} : equiv a b → ¬r b a := fun (h : equiv a b) => and.right h protected instance is_equiv {α : Type u} {r : α → α → Prop} [is_strict_weak_order α r] : is_equiv α equiv := is_equiv.mk /- Notation for the equivalence relation induced by lt -/ end strict_weak_order theorem is_strict_weak_order_of_is_total_preorder {α : Type u} {le : α → α → Prop} {lt : α → α → Prop} [DecidableRel le] [s : is_total_preorder α le] (h : ∀ (a b : α), lt a b ↔ ¬le b a) : is_strict_weak_order α lt := is_strict_weak_order.mk theorem lt_of_lt_of_incomp {α : Type u} {lt : α → α → Prop} [is_strict_weak_order α lt] [DecidableRel lt] {a : α} {b : α} {c : α} : lt a b → ¬lt b c ∧ ¬lt c b → lt a c := sorry theorem lt_of_incomp_of_lt {α : Type u} {lt : α → α → Prop} [is_strict_weak_order α lt] [DecidableRel lt] {a : α} {b : α} {c : α} : ¬lt a b ∧ ¬lt b a → lt b c → lt a c := sorry theorem eq_of_incomp {α : Type u} {lt : α → α → Prop} [is_trichotomous α lt] {a : α} {b : α} : ¬lt a b ∧ ¬lt b a → a = b := sorry theorem eq_of_eqv_lt {α : Type u} {lt : α → α → Prop} [is_trichotomous α lt] {a : α} {b : α} : strict_weak_order.equiv a b → a = b := eq_of_incomp theorem incomp_iff_eq {α : Type u} {lt : α → α → Prop} [is_trichotomous α lt] [is_irrefl α lt] (a : α) (b : α) : ¬lt a b ∧ ¬lt b a ↔ a = b := { mp := eq_of_incomp, mpr := fun (hab : a = b) => hab ▸ { left := irrefl_of lt a, right := irrefl_of lt a } } theorem eqv_lt_iff_eq {α : Type u} {lt : α → α → Prop} [is_trichotomous α lt] [is_irrefl α lt] (a : α) (b : α) : strict_weak_order.equiv a b ↔ a = b := incomp_iff_eq a b theorem not_lt_of_lt {α : Type u} {lt : α → α → Prop} [is_strict_order α lt] {a : α} {b : α} : lt a b → ¬lt b a := fun (h₁ : lt a b) (h₂ : lt b a) => absurd (trans_of lt h₁ h₂) (irrefl_of lt a) end Mathlib
If $a$ is not in the topological space $X$, then the set of neighborhoods of $a$ in $X$ is empty.
Formal statement is: lemma collinear_subset: "\<lbrakk>collinear T; S \<subseteq> T\<rbrakk> \<Longrightarrow> collinear S" Informal statement is: If $T$ is a set of collinear points and $S$ is a subset of $T$, then $S$ is a set of collinear points.
-- @@stderr -- dtrace: failed to compile script test/unittest/inline/err.D_IDENT_UNDEF.recur.d: [D_IDENT_UNDEF] line 20: failed to resolve foo: Unknown variable name
[STATEMENT] lemma not_malformed: "x \<in> (env_dom e) \<Longrightarrow> \<exists>fun. e = Env fun" [PROOF STATE] proof (prove) goal (1 subgoal): 1. x \<in> env_dom e \<Longrightarrow> \<exists>fun. e = Env fun [PROOF STEP] by (cases e, simp_all)
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) (* prefix: gga_x_hjs_params *params; assert(p->params != NULL); params = (gga_x_hjs_params * )(p->params); *) $include "gga_x_hjs.mpl" hjs2_xi := 1/(exp(20) - 1): hjs2_fs := s -> -log((exp(-s) + hjs2_xi)/(1 + hjs2_xi)): hjs_fx := (rs, z, x) -> hjs_f1(rs, z, hjs2_fs(X2S*x)):
[STATEMENT] lemma (in category) cat_Yoneda_arrow_is_ntcf: assumes "\<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha>" and "r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr>" and "u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr>" shows "Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] interpret \<KK>: is_functor \<alpha> \<CC> \<open>cat_Set \<alpha>\<close> \<KK> [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] by (rule assms(1)) [PROOF STATE] proof (state) goal (1 subgoal): 1. Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] note \<KK>ru = cat_Yoneda_component_is_arr[OF assms] [PROOF STATE] proof (state) this: ?d \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> \<Longrightarrow> Yoneda_component \<KK> r u ?d : Hom \<CC> r ?d \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>?d\<rparr> goal (1 subgoal): 1. Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] let ?\<KK>ru = \<open>Yoneda_component \<KK> r u\<close> [PROOF STATE] proof (state) goal (1 subgoal): 1. Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) goal (1 subgoal): 1. Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> [PROOF STEP] proof(intro is_ntcfI', unfold \<KK>.Yoneda_arrow_components) [PROOF STATE] proof (state) goal (13 subgoals): 1. \<Z> \<alpha> 2. vfsequence (Yoneda_arrow \<alpha> \<KK> r u) 3. vcard (Yoneda_arrow \<alpha> \<KK> r u) = 5\<^sub>\<nat> 4. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 5. \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 6. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) = Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) 7. \<KK> = \<KK> 8. \<CC> = \<CC> 9. cat_Set \<alpha> = cat_Set \<alpha> 10. vsv (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) A total of 13 subgoals... [PROOF STEP] show "vfsequence (Yoneda_arrow \<alpha> \<KK> r u)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vfsequence (Yoneda_arrow \<alpha> \<KK> r u) [PROOF STEP] unfolding Yoneda_arrow_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. vfsequence [VLambda (\<KK>\<lparr>HomDom\<rparr>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u), Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<KK>\<lparr>HomDom\<rparr>(r,-), \<KK>, \<KK>\<lparr>HomDom\<rparr>, cat_Set \<alpha>]\<^sub>\<circ> [PROOF STEP] by simp [PROOF STATE] proof (state) this: vfsequence (Yoneda_arrow \<alpha> \<KK> r u) goal (12 subgoals): 1. \<Z> \<alpha> 2. vcard (Yoneda_arrow \<alpha> \<KK> r u) = 5\<^sub>\<nat> 3. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 4. \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 5. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) = Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) 6. \<KK> = \<KK> 7. \<CC> = \<CC> 8. cat_Set \<alpha> = cat_Set \<alpha> 9. vsv (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) 10. \<D>\<^sub>\<circ> (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) = \<CC>\<lparr>Obj\<rparr> A total of 12 subgoals... [PROOF STEP] show "vcard (Yoneda_arrow \<alpha> \<KK> r u) = 5\<^sub>\<nat>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vcard (Yoneda_arrow \<alpha> \<KK> r u) = 5\<^sub>\<nat> [PROOF STEP] unfolding Yoneda_arrow_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. vcard [VLambda (\<KK>\<lparr>HomDom\<rparr>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u), Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<KK>\<lparr>HomDom\<rparr>(r,-), \<KK>, \<KK>\<lparr>HomDom\<rparr>, cat_Set \<alpha>]\<^sub>\<circ> = 5\<^sub>\<nat> [PROOF STEP] by (simp add: nat_omega_simps) [PROOF STATE] proof (state) this: vcard (Yoneda_arrow \<alpha> \<KK> r u) = 5\<^sub>\<nat> goal (11 subgoals): 1. \<Z> \<alpha> 2. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 3. \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 4. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) = Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) 5. \<KK> = \<KK> 6. \<CC> = \<CC> 7. cat_Set \<alpha> = cat_Set \<alpha> 8. vsv (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) 9. \<D>\<^sub>\<circ> (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) = \<CC>\<lparr>Obj\<rparr> 10. \<And>a. a \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> \<Longrightarrow> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> A total of 11 subgoals... [PROOF STEP] show "(\<lambda>d\<in>\<^sub>\<circ>\<CC>\<lparr>Obj\<rparr>. ?\<KK>ru d)\<lparr>a\<rparr> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr>" if "a \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr>" for a [PROOF STATE] proof (prove) goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> [PROOF STEP] using that assms category_axioms [PROOF STATE] proof (prove) using this: a \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> category \<alpha> \<CC> goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> [PROOF STEP] by ( cs_concl cs_shallow cs_simp: cat_cs_simps cat_op_simps V_cs_simps cs_intro: cat_cs_intros ) [PROOF STATE] proof (state) this: ?a \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> \<Longrightarrow> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>?a\<rparr> : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ObjMap\<rparr>\<lparr>?a\<rparr> \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>?a\<rparr> goal (10 subgoals): 1. \<Z> \<alpha> 2. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 3. \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 4. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) = Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) 5. \<KK> = \<KK> 6. \<CC> = \<CC> 7. cat_Set \<alpha> = cat_Set \<alpha> 8. vsv (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) 9. \<D>\<^sub>\<circ> (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) = \<CC>\<lparr>Obj\<rparr> 10. \<And>a b f. f : a \<mapsto>\<^bsub>\<CC>\<^esub> b \<Longrightarrow> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] show "(\<lambda>d\<in>\<^sub>\<circ>\<CC>\<lparr>Obj\<rparr>. ?\<KK>ru d)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> (\<lambda>d\<in>\<^sub>\<circ>\<CC>\<lparr>Obj\<rparr>. ?\<KK>ru d)\<lparr>a\<rparr>" if "f : a \<mapsto>\<^bsub>\<CC>\<^esub> b" for a b f [PROOF STATE] proof (prove) goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] note \<MM>a = \<KK>ru[OF cat_is_arrD(2)[OF that]] [PROOF STATE] proof (state) this: Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] note \<MM>b = \<KK>ru[OF cat_is_arrD(3)[OF that]] [PROOF STATE] proof (state) this: Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] from category_axioms assms that \<MM>b [PROOF STATE] proof (chain) picking this: category \<alpha> \<CC> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> f : a \<mapsto>\<^bsub>\<CC>\<^esub> b Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] have b_f: "?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>" [PROOF STATE] proof (prove) using this: category \<alpha> \<CC> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> f : a \<mapsto>\<^bsub>\<CC>\<^esub> b Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] by ( cs_concl cs_shallow cs_intro: cat_cs_intros cat_op_intros cat_prod_cs_intros ) [PROOF STATE] proof (state) this: Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] then [PROOF STATE] proof (chain) picking this: Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] have dom_lhs: "\<D>\<^sub>\<circ> ((?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) = Hom \<CC> r a" [PROOF STATE] proof (prove) using this: Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. \<D>\<^sub>\<circ> ((Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) = Hom \<CC> r a [PROOF STEP] by (cs_concl cs_shallow cs_simp: cat_cs_simps) [PROOF STATE] proof (state) this: \<D>\<^sub>\<circ> ((Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) = Hom \<CC> r a goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] from assms that \<MM>a [PROOF STATE] proof (chain) picking this: \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> f : a \<mapsto>\<^bsub>\<CC>\<^esub> b Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> [PROOF STEP] have f_a: "\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr>" [PROOF STATE] proof (prove) using this: \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> f : a \<mapsto>\<^bsub>\<CC>\<^esub> b Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> goal (1 subgoal): 1. \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] by (cs_concl cs_shallow cs_intro: cat_cs_intros) [PROOF STATE] proof (state) this: \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] have dom_rhs: "\<D>\<^sub>\<circ> ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a)\<lparr>ArrVal\<rparr>) = Hom \<CC> r a" [PROOF STATE] proof (prove) using this: \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. \<D>\<^sub>\<circ> ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>) = Hom \<CC> r a [PROOF STEP] by (cs_concl cs_shallow cs_simp: cat_cs_simps) [PROOF STATE] proof (state) this: \<D>\<^sub>\<circ> ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>) = Hom \<CC> r a goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] have [cat_cs_simps]: "?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a [PROOF STEP] proof(rule arr_Set_eqI[of \<alpha>]) [PROOF STATE] proof (state) goal (5 subgoals): 1. arr_Set \<alpha> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>) 2. arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a) 3. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> 4. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrDom\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrDom\<rparr> 5. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrCod\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrCod\<rparr> [PROOF STEP] from b_f [PROOF STATE] proof (chain) picking this: Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] show arr_Set_b_f: "arr_Set \<alpha> (?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)" [PROOF STATE] proof (prove) using this: Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. arr_Set \<alpha> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>) [PROOF STEP] by (auto simp: cat_Set_is_arrD(1)) [PROOF STATE] proof (state) this: arr_Set \<alpha> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>) goal (4 subgoals): 1. arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a) 2. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> 3. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrDom\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrDom\<rparr> 4. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrCod\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrCod\<rparr> [PROOF STEP] interpret b_f: arr_Set \<alpha> \<open>?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. arr_Set \<alpha> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>) [PROOF STEP] by (rule arr_Set_b_f) [PROOF STATE] proof (state) goal (4 subgoals): 1. arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a) 2. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> 3. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrDom\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrDom\<rparr> 4. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrCod\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrCod\<rparr> [PROOF STEP] from f_a [PROOF STATE] proof (chain) picking this: \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] show arr_Set_f_a: "arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a)" [PROOF STATE] proof (prove) using this: \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a) [PROOF STEP] by (auto simp: cat_Set_is_arrD(1)) [PROOF STATE] proof (state) this: arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a) goal (3 subgoals): 1. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> 2. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrDom\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrDom\<rparr> 3. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrCod\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrCod\<rparr> [PROOF STEP] interpret f_a: arr_Set \<alpha> \<open>\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. arr_Set \<alpha> (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a) [PROOF STEP] by (rule arr_Set_f_a) [PROOF STATE] proof (state) goal (3 subgoals): 1. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> 2. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrDom\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrDom\<rparr> 3. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrCod\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrCod\<rparr> [PROOF STEP] show "(?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a)\<lparr>ArrVal\<rparr>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> [PROOF STEP] proof(rule vsv_eqI, unfold dom_lhs dom_rhs in_Hom_iff) [PROOF STATE] proof (state) goal (4 subgoals): 1. vsv ((Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) 2. vsv ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>) 3. Hom \<CC> r a = Hom \<CC> r a 4. \<And>aa. aa : r \<mapsto>\<^bsub>\<CC>\<^esub> a \<Longrightarrow> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>\<lparr>aa\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>\<lparr>aa\<rparr> [PROOF STEP] fix q [PROOF STATE] proof (state) goal (4 subgoals): 1. vsv ((Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) 2. vsv ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>) 3. Hom \<CC> r a = Hom \<CC> r a 4. \<And>aa. aa : r \<mapsto>\<^bsub>\<CC>\<^esub> a \<Longrightarrow> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>\<lparr>aa\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>\<lparr>aa\<rparr> [PROOF STEP] assume "q : r \<mapsto>\<^bsub>\<CC>\<^esub> a" [PROOF STATE] proof (state) this: q : r \<mapsto>\<^bsub>\<CC>\<^esub> a goal (4 subgoals): 1. vsv ((Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) 2. vsv ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>) 3. Hom \<CC> r a = Hom \<CC> r a 4. \<And>aa. aa : r \<mapsto>\<^bsub>\<CC>\<^esub> a \<Longrightarrow> (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>\<lparr>aa\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>\<lparr>aa\<rparr> [PROOF STEP] from category_axioms assms that this \<MM>a \<MM>b [PROOF STATE] proof (chain) picking this: category \<alpha> \<CC> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> f : a \<mapsto>\<^bsub>\<CC>\<^esub> b q : r \<mapsto>\<^bsub>\<CC>\<^esub> a Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] show "(?\<KK>ru b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>\<lparr>q\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> ?\<KK>ru a)\<lparr>ArrVal\<rparr>\<lparr>q\<rparr>" [PROOF STATE] proof (prove) using this: category \<alpha> \<CC> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> f : a \<mapsto>\<^bsub>\<CC>\<^esub> b q : r \<mapsto>\<^bsub>\<CC>\<^esub> a Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>\<lparr>q\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>\<lparr>q\<rparr> [PROOF STEP] by ( cs_concl cs_shallow cs_simp: cat_cs_simps cat_op_simps cs_intro: cat_cs_intros cat_op_intros cat_prod_cs_intros ) [PROOF STATE] proof (state) this: (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>\<lparr>q\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>\<lparr>q\<rparr> goal (3 subgoals): 1. vsv ((Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr>) 2. vsv ((\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr>) 3. Hom \<CC> r a = Hom \<CC> r a [PROOF STEP] qed (use arr_Set_b_f arr_Set_f_a in auto) [PROOF STATE] proof (state) this: (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrVal\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrVal\<rparr> goal (2 subgoals): 1. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrDom\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrDom\<rparr> 2. (Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ>)\<lparr>ArrCod\<rparr> = (\<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a)\<lparr>ArrCod\<rparr> [PROOF STEP] qed (use b_f f_a in \<open>cs_concl cs_shallow cs_simp: cat_cs_simps\<close>)+ [PROOF STATE] proof (state) this: Yoneda_component \<KK> r u b \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> cf_hom \<CC> [\<CC>\<lparr>CId\<rparr>\<lparr>r\<rparr>, f]\<^sub>\<circ> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Yoneda_component \<KK> r u a goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] from that category_axioms assms \<MM>a \<MM>b [PROOF STATE] proof (chain) picking this: f : a \<mapsto>\<^bsub>\<CC>\<^esub> b category \<alpha> \<CC> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: f : a \<mapsto>\<^bsub>\<CC>\<^esub> b category \<alpha> \<CC> \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> r \<in>\<^sub>\<circ> \<CC>\<lparr>Obj\<rparr> u \<in>\<^sub>\<circ> \<KK>\<lparr>ObjMap\<rparr>\<lparr>r\<rparr> Yoneda_component \<KK> r u a : Hom \<CC> r a \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> Yoneda_component \<KK> r u b : Hom \<CC> r b \<mapsto>\<^bsub>cat_Set \<alpha>\<^esub> \<KK>\<lparr>ObjMap\<rparr>\<lparr>b\<rparr> goal (1 subgoal): 1. VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> [PROOF STEP] by ( cs_concl cs_simp: V_cs_simps cat_cs_simps cat_op_simps cs_intro: cat_cs_intros ) [PROOF STATE] proof (state) this: VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>a\<rparr> goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: ?f : ?a \<mapsto>\<^bsub>\<CC>\<^esub> ?b \<Longrightarrow> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>?b\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-)\<lparr>ArrMap\<rparr>\<lparr>?f\<rparr> = \<KK>\<lparr>ArrMap\<rparr>\<lparr>?f\<rparr> \<circ>\<^sub>A\<^bsub>cat_Set \<alpha>\<^esub> VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)\<lparr>?a\<rparr> goal (9 subgoals): 1. \<Z> \<alpha> 2. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 3. \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> 4. Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) = Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) 5. \<KK> = \<KK> 6. \<CC> = \<CC> 7. cat_Set \<alpha> = cat_Set \<alpha> 8. vsv (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) 9. \<D>\<^sub>\<circ> (VLambda (\<CC>\<lparr>Obj\<rparr>) (Yoneda_component \<KK> r u)) = \<CC>\<lparr>Obj\<rparr> [PROOF STEP] qed (auto simp: assms(2) cat_cs_intros) [PROOF STATE] proof (state) this: Yoneda_arrow \<alpha> \<KK> r u : Hom\<^sub>O\<^sub>.\<^sub>C\<^bsub>\<alpha>\<^esub>\<CC>(r,-) \<mapsto>\<^sub>C\<^sub>F \<KK> : \<CC> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> cat_Set \<alpha> goal: No subgoals! [PROOF STEP] qed
Require Import Ascii String. Require Import Coq.Structures.OrderedTypeEx. Require Import Coq.Structures.OrderedType. Require Import Coq.Structures.DecidableTypeEx. Require Import Coq.ZArith.Znat. Require Import Coq.Arith.Peano_dec. Module Type IndexedType. Variable t: Type. Variable index: t -> nat. Hypothesis index_inj: forall (x y: t), index x = index y <-> x = y. Variable eq: forall (x y: t), {x = y} + {x <> y}. End IndexedType. Module IndexedAscii <: IndexedType. Definition t := ascii. Definition index := nat_of_ascii. Definition eq := ascii_dec. Hypothesis index_inj: forall (x y: t), index x = index y <-> x = y. End IndexedAscii. Module OrderedAscii(A: IndexedType) <: OrderedType. Definition t := A.t. Definition eq (x y: t) := x = y. Definition lt (x y: t) := lt (A.index x) (A.index y). Lemma eq_refl : forall x : t, eq x x. Proof (@refl_equal t). Lemma eq_sym : forall x y : t, eq x y -> eq y x. Proof (@sym_equal t). Lemma eq_trans : forall x y z : t, eq x y -> eq y z -> eq x z. Proof (@trans_equal t). Lemma lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Proof. unfold lt; intros. rewrite <- H in H0. exact H0. Qed. Lemma lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Proof. unfold lt; unfold eq; intros. red. intro. subst y. generalize dependent H. apply lt_irrefl. Qed. Lemma compare : forall x y : t, Compare lt eq x y. Proof. intros. case_eq ( nat_compare (A.index x) (A.index y)); intro. - apply EQ. apply nat_compare_eq in H. apply A.index_inj. assumption. - apply LT. apply nat_compare_lt in H. unfold lt. assumption. - apply GT. apply nat_compare_gt in H. unfold lt. intuition. Qed. Lemma eq_dec : forall x y, { eq x y } + { ~ eq x y }. Proof. intros. case_eq (nat_compare (A.index x) (A.index y)); intros. - left. apply nat_compare_eq in H. apply A.index_inj. assumption. - right. red. intro. inversion H0. apply nat_compare_lt in H. apply A.index_inj in H1. intuition. - right. red. intro. inversion H0. apply nat_compare_gt in H. apply A.index_inj in H1. intuition. Defined. End OrderedAscii. Module AsciiVars := OrderedAscii (IndexedAscii). Module RegionVars := PairOrderedType Nat_as_OT Nat_as_OT.
/- Copyright (c) 2019 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Bhavik Mehta -/ import category_theory.monad.adjunction import category_theory.adjunction.limits import category_theory.limits.shapes.terminal /-! # Limits and colimits in the category of algebras This file shows that the forgetful functor `forget T : algebra T ⥤ C` for a monad `T : C ⥤ C` creates limits and creates any colimits which `T` preserves. This is used to show that `algebra T` has any limits which `C` has, and any colimits which `C` has and `T` preserves. This is generalised to the case of a monadic functor `D ⥤ C`. ## TODO Dualise for the category of coalgebras and comonadic left adjoints. -/ namespace category_theory open category open category_theory.limits universes v u v₁ v₂ u₁ u₂ -- morphism levels before object levels. See note [category_theory universes]. namespace monad variables {C : Type u₁} [category.{v₁} C] variables {T : monad C} variables {J : Type u} [category.{v} J] namespace forget_creates_limits variables (D : J ⥤ algebra T) (c : cone (D ⋙ T.forget)) (t : is_limit c) /-- (Impl) The natural transformation used to define the new cone -/ @[simps] def γ : (D ⋙ T.forget ⋙ ↑T) ⟶ D ⋙ T.forget := { app := λ j, (D.obj j).a } /-- (Impl) This new cone is used to construct the algebra structure -/ @[simps π_app] def new_cone : cone (D ⋙ forget T) := { X := T.obj c.X, π := (functor.const_comp _ _ ↑T).inv ≫ whisker_right c.π T ≫ γ D } /-- The algebra structure which will be the apex of the new limit cone for `D`. -/ @[simps] def cone_point : algebra T := { A := c.X, a := t.lift (new_cone D c), unit' := t.hom_ext $ λ j, begin rw [category.assoc, t.fac, new_cone_π_app, ←T.η.naturality_assoc, functor.id_map, (D.obj j).unit], dsimp, simp -- See library note [dsimp, simp] end, assoc' := t.hom_ext $ λ j, begin rw [category.assoc, category.assoc, t.fac (new_cone D c), new_cone_π_app, ←functor.map_comp_assoc, t.fac (new_cone D c), new_cone_π_app, ←T.μ.naturality_assoc, (D.obj j).assoc, functor.map_comp, category.assoc], refl, end } /-- (Impl) Construct the lifted cone in `algebra T` which will be limiting. -/ @[simps] def lifted_cone : cone D := { X := cone_point D c t, π := { app := λ j, { f := c.π.app j }, naturality' := λ X Y f, by { ext1, dsimp, erw c.w f, simp } } } /-- (Impl) Prove that the lifted cone is limiting. -/ @[simps] def lifted_cone_is_limit : is_limit (lifted_cone D c t) := { lift := λ s, { f := t.lift ((forget T).map_cone s), h' := t.hom_ext $ λ j, begin dsimp, rw [category.assoc, category.assoc, t.fac, new_cone_π_app, ←functor.map_comp_assoc, t.fac, functor.map_cone_π_app], apply (s.π.app j).h, end }, uniq' := λ s m J, begin ext1, apply t.hom_ext, intro j, simpa [t.fac ((forget T).map_cone s) j] using congr_arg algebra.hom.f (J j), end } end forget_creates_limits -- Theorem 5.6.5 from [Riehl][riehl2017] /-- The forgetful functor from the Eilenberg-Moore category creates limits. -/ noncomputable instance forget_creates_limits : creates_limits_of_size (forget T) := { creates_limits_of_shape := λ J 𝒥, by exactI { creates_limit := λ D, creates_limit_of_reflects_iso (λ c t, { lifted_cone := forget_creates_limits.lifted_cone D c t, valid_lift := cones.ext (iso.refl _) (λ j, (id_comp _).symm), makes_limit := forget_creates_limits.lifted_cone_is_limit _ _ _ } ) } } /-- `D ⋙ forget T` has a limit, then `D` has a limit. -/ lemma has_limit_of_comp_forget_has_limit (D : J ⥤ algebra T) [has_limit (D ⋙ forget T)] : has_limit D := has_limit_of_created D (forget T) namespace forget_creates_colimits -- Let's hide the implementation details in a namespace variables {D : J ⥤ algebra T} (c : cocone (D ⋙ forget T)) (t : is_colimit c) -- We have a diagram D of shape J in the category of algebras, and we assume that we are given a -- colimit for its image D ⋙ forget T under the forgetful functor, say its apex is L. -- We'll construct a colimiting coalgebra for D, whose carrier will also be L. -- To do this, we must find a map TL ⟶ L. Since T preserves colimits, TL is also a colimit. -- In particular, it is a colimit for the diagram `(D ⋙ forget T) ⋙ T` -- so to construct a map TL ⟶ L it suffices to show that L is the apex of a cocone for this diagram. -- In other words, we need a natural transformation from const L to `(D ⋙ forget T) ⋙ T`. -- But we already know that L is the apex of a cocone for the diagram `D ⋙ forget T`, so it -- suffices to give a natural transformation `((D ⋙ forget T) ⋙ T) ⟶ (D ⋙ forget T)`: /-- (Impl) The natural transformation given by the algebra structure maps, used to construct a cocone `c` with apex `colimit (D ⋙ forget T)`. -/ @[simps] def γ : ((D ⋙ forget T) ⋙ ↑T) ⟶ (D ⋙ forget T) := { app := λ j, (D.obj j).a } /-- (Impl) A cocone for the diagram `(D ⋙ forget T) ⋙ T` found by composing the natural transformation `γ` with the colimiting cocone for `D ⋙ forget T`. -/ @[simps] def new_cocone : cocone ((D ⋙ forget T) ⋙ ↑T) := { X := c.X, ι := γ ≫ c.ι } variables [preserves_colimit (D ⋙ forget T) (T : C ⥤ C)] /-- (Impl) Define the map `λ : TL ⟶ L`, which will serve as the structure of the coalgebra on `L`, and we will show is the colimiting object. We use the cocone constructed by `c` and the fact that `T` preserves colimits to produce this morphism. -/ @[reducible] def lambda : ((T : C ⥤ C).map_cocone c).X ⟶ c.X := (is_colimit_of_preserves _ t).desc (new_cocone c) /-- (Impl) The key property defining the map `λ : TL ⟶ L`. -/ lemma commuting (j : J) : (T : C ⥤ C).map (c.ι.app j) ≫ lambda c t = (D.obj j).a ≫ c.ι.app j := (is_colimit_of_preserves _ t).fac (new_cocone c) j variables [preserves_colimit ((D ⋙ forget T) ⋙ ↑T) (T : C ⥤ C)] /-- (Impl) Construct the colimiting algebra from the map `λ : TL ⟶ L` given by `lambda`. We are required to show it satisfies the two algebra laws, which follow from the algebra laws for the image of `D` and our `commuting` lemma. -/ @[simps] def cocone_point : algebra T := { A := c.X, a := lambda c t, unit' := begin apply t.hom_ext, intro j, rw [(show c.ι.app j ≫ T.η.app c.X ≫ _ = T.η.app (D.obj j).A ≫ _ ≫ _, from T.η.naturality_assoc _ _), commuting, algebra.unit_assoc (D.obj j)], dsimp, simp -- See library note [dsimp, simp] end, assoc' := begin refine (is_colimit_of_preserves _ (is_colimit_of_preserves _ t)).hom_ext (λ j, _), rw [functor.map_cocone_ι_app, functor.map_cocone_ι_app, (show (T : C ⥤ C).map ((T : C ⥤ C).map _) ≫ _ ≫ _ = _, from T.μ.naturality_assoc _ _), ←functor.map_comp_assoc, commuting, functor.map_comp, category.assoc, commuting], apply (D.obj j).assoc_assoc _, end } /-- (Impl) Construct the lifted cocone in `algebra T` which will be colimiting. -/ @[simps] def lifted_cocone : cocone D := { X := cocone_point c t, ι := { app := λ j, { f := c.ι.app j, h' := commuting _ _ _ }, naturality' := λ A B f, by { ext1, dsimp, rw [comp_id], apply c.w } } } /-- (Impl) Prove that the lifted cocone is colimiting. -/ @[simps] def lifted_cocone_is_colimit : is_colimit (lifted_cocone c t) := { desc := λ s, { f := t.desc ((forget T).map_cocone s), h' := (is_colimit_of_preserves (T : C ⥤ C) t).hom_ext $ λ j, begin dsimp, rw [←functor.map_comp_assoc, ←category.assoc, t.fac, commuting, category.assoc, t.fac], apply algebra.hom.h, end }, uniq' := λ s m J, by { ext1, apply t.hom_ext, intro j, simpa using congr_arg algebra.hom.f (J j) } } end forget_creates_colimits open forget_creates_colimits -- TODO: the converse of this is true as well /-- The forgetful functor from the Eilenberg-Moore category for a monad creates any colimit which the monad itself preserves. -/ noncomputable instance forget_creates_colimit (D : J ⥤ algebra T) [preserves_colimit (D ⋙ forget T) (T : C ⥤ C)] [preserves_colimit ((D ⋙ forget T) ⋙ ↑T) (T : C ⥤ C)] : creates_colimit D (forget T) := creates_colimit_of_reflects_iso $ λ c t, { lifted_cocone := { X := cocone_point c t, ι := { app := λ j, { f := c.ι.app j, h' := commuting _ _ _ }, naturality' := λ A B f, by { ext1, dsimp, erw [comp_id, c.w] } } }, valid_lift := cocones.ext (iso.refl _) (by tidy), makes_colimit := lifted_cocone_is_colimit _ _ } noncomputable instance forget_creates_colimits_of_shape [preserves_colimits_of_shape J (T : C ⥤ C)] : creates_colimits_of_shape J (forget T) := { creates_colimit := λ K, by apply_instance } noncomputable instance forget_creates_colimits [preserves_colimits_of_size.{v u} (T : C ⥤ C)] : creates_colimits_of_size.{v u} (forget T) := { creates_colimits_of_shape := λ J 𝒥₁, by apply_instance } /-- For `D : J ⥤ algebra T`, `D ⋙ forget T` has a colimit, then `D` has a colimit provided colimits of shape `J` are preserved by `T`. -/ lemma forget_creates_colimits_of_monad_preserves [preserves_colimits_of_shape J (T : C ⥤ C)] (D : J ⥤ algebra T) [has_colimit (D ⋙ forget T)] : has_colimit D := has_colimit_of_created D (forget T) end monad variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₂} D] variables {J : Type u} [category.{v} J] instance comp_comparison_forget_has_limit (F : J ⥤ D) (R : D ⥤ C) [monadic_right_adjoint R] [has_limit (F ⋙ R)] : has_limit ((F ⋙ monad.comparison (adjunction.of_right_adjoint R)) ⋙ monad.forget _) := @has_limit_of_iso _ _ _ _ (F ⋙ R) _ _ (iso_whisker_left F (monad.comparison_forget (adjunction.of_right_adjoint R)).symm) instance comp_comparison_has_limit (F : J ⥤ D) (R : D ⥤ C) [monadic_right_adjoint R] [has_limit (F ⋙ R)] : has_limit (F ⋙ monad.comparison (adjunction.of_right_adjoint R)) := monad.has_limit_of_comp_forget_has_limit (F ⋙ monad.comparison (adjunction.of_right_adjoint R)) /-- Any monadic functor creates limits. -/ noncomputable def monadic_creates_limits (R : D ⥤ C) [monadic_right_adjoint R] : creates_limits_of_size.{v u} R := creates_limits_of_nat_iso (monad.comparison_forget (adjunction.of_right_adjoint R)) /-- The forgetful functor from the Eilenberg-Moore category for a monad creates any colimit which the monad itself preserves. -/ noncomputable def monadic_creates_colimit_of_preserves_colimit (R : D ⥤ C) (K : J ⥤ D) [monadic_right_adjoint R] [preserves_colimit (K ⋙ R) (left_adjoint R ⋙ R)] [preserves_colimit ((K ⋙ R) ⋙ left_adjoint R ⋙ R) (left_adjoint R ⋙ R)] : creates_colimit K R := begin apply creates_colimit_of_nat_iso (monad.comparison_forget (adjunction.of_right_adjoint R)), apply category_theory.comp_creates_colimit _ _, apply_instance, let i : ((K ⋙ monad.comparison (adjunction.of_right_adjoint R)) ⋙ monad.forget _) ≅ K ⋙ R := functor.associator _ _ _ ≪≫ iso_whisker_left K (monad.comparison_forget (adjunction.of_right_adjoint R)), apply category_theory.monad.forget_creates_colimit _, { dsimp, refine preserves_colimit_of_iso_diagram _ i.symm }, { dsimp, refine preserves_colimit_of_iso_diagram _ (iso_whisker_right i (left_adjoint R ⋙ R)).symm }, end /-- A monadic functor creates any colimits of shapes it preserves. -/ noncomputable def monadic_creates_colimits_of_shape_of_preserves_colimits_of_shape (R : D ⥤ C) [monadic_right_adjoint R] [preserves_colimits_of_shape J R] : creates_colimits_of_shape J R := begin have : preserves_colimits_of_shape J (left_adjoint R ⋙ R), { apply category_theory.limits.comp_preserves_colimits_of_shape _ _, apply (adjunction.left_adjoint_preserves_colimits (adjunction.of_right_adjoint R)).1, apply_instance }, exactI ⟨λ K, monadic_creates_colimit_of_preserves_colimit _ _⟩, end /-- A monadic functor creates colimits if it preserves colimits. -/ noncomputable def monadic_creates_colimits_of_preserves_colimits (R : D ⥤ C) [monadic_right_adjoint R] [preserves_colimits_of_size.{v u} R] : creates_colimits_of_size.{v u} R := { creates_colimits_of_shape := λ J 𝒥₁, by exactI monadic_creates_colimits_of_shape_of_preserves_colimits_of_shape _ } section lemma has_limit_of_reflective (F : J ⥤ D) (R : D ⥤ C) [has_limit (F ⋙ R)] [reflective R] : has_limit F := by { haveI := monadic_creates_limits.{v u} R, exact has_limit_of_created F R } /-- If `C` has limits of shape `J` then any reflective subcategory has limits of shape `J`. -/ lemma has_limits_of_shape_of_reflective [has_limits_of_shape J C] (R : D ⥤ C) [reflective R] : has_limits_of_shape J D := { has_limit := λ F, has_limit_of_reflective F R } /-- If `C` has limits then any reflective subcategory has limits. -/ lemma has_limits_of_reflective (R : D ⥤ C) [has_limits_of_size.{v u} C] [reflective R] : has_limits_of_size.{v u} D := { has_limits_of_shape := λ J 𝒥₁, by exactI has_limits_of_shape_of_reflective R } /-- If `C` has colimits of shape `J` then any reflective subcategory has colimits of shape `J`. -/ lemma has_colimits_of_shape_of_reflective (R : D ⥤ C) [reflective R] [has_colimits_of_shape J C] : has_colimits_of_shape J D := { has_colimit := λ F, begin let c := (left_adjoint R).map_cocone (colimit.cocone (F ⋙ R)), letI : preserves_colimits_of_shape J _ := (adjunction.of_right_adjoint R).left_adjoint_preserves_colimits.1, let t : is_colimit c := is_colimit_of_preserves (left_adjoint R) (colimit.is_colimit _), apply has_colimit.mk ⟨_, (is_colimit.precompose_inv_equiv _ _).symm t⟩, apply (iso_whisker_left F (as_iso (adjunction.of_right_adjoint R).counit) : _) ≪≫ F.right_unitor, end } /-- If `C` has colimits then any reflective subcategory has colimits. -/ lemma has_colimits_of_reflective (R : D ⥤ C) [reflective R] [has_colimits_of_size.{v u} C] : has_colimits_of_size.{v u} D := { has_colimits_of_shape := λ J 𝒥, by exactI has_colimits_of_shape_of_reflective R } /-- The reflector always preserves terminal objects. Note this in general doesn't apply to any other limit. -/ noncomputable def left_adjoint_preserves_terminal_of_reflective (R : D ⥤ C) [reflective R] : preserves_limits_of_shape (discrete.{v} pempty) (left_adjoint R) := { preserves_limit := λ K, let F := functor.empty.{v} D in begin apply preserves_limit_of_iso_diagram _ (functor.empty_ext (F ⋙ R) _), fsplit, intros c h, haveI : has_limit (F ⋙ R) := ⟨⟨⟨c,h⟩⟩⟩, haveI : has_limit F := has_limit_of_reflective F R, apply is_limit_change_empty_cone D (limit.is_limit F), apply (as_iso ((adjunction.of_right_adjoint R).counit.app _)).symm.trans, { apply (left_adjoint R).map_iso, letI := monadic_creates_limits.{v v} R, let := (category_theory.preserves_limit_of_creates_limit_and_has_limit F R).preserves, apply (this (limit.is_limit F)).cone_point_unique_up_to_iso h }, apply_instance, end } end end category_theory
import Data.Vect -- Exercise 1 myPlusCommutes : (n : Nat) -> (m : Nat) -> n + m = m + n myPlusCommutes Z m = rewrite plusZeroRightNeutral m in Refl myPlusCommutes (S k) m = rewrite myPlusCommutes k m in rewrite plusSuccRightSucc m k in Refl -- Exercise 2 reverseProof_nil : (acc : Vect n1 a) -> Vect (plus n1 0) a reverseProof_nil {n1} acc = rewrite plusZeroRightNeutral n1 in acc reverseProof_xs : Vect (S n + k) a -> Vect (plus n (S k)) a reverseProof_xs {n} {k} xs = rewrite sym (plusSuccRightSucc n k) in xs myReverse : Vect n a -> Vect n a myReverse xs = reverse' [] xs where reverse' : Vect n a -> Vect m a -> Vect (n + m) a reverse' acc [] = reverseProof_nil acc reverse' acc (x :: xs) = reverseProof_xs (reverse' (x::acc) xs)
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) exp4_a1 := 199.81: exp4_a2 := 4.3476: exp4_c1 := 0.8524: exp4_c2 := 1.2264: # This is Eq. (40) of the paper. exp4_f0 := s -> exp4_c1*(1 - exp(-exp4_a1*s^2)) + exp4_c2*(1 - exp(-exp4_a2*s^4)): exp4_f := x -> exp4_f0(X2S*x): f := (rs, zeta, xt, xs0, xs1) -> gga_kinetic(exp4_f, rs, zeta, xs0, xs1):
-- Generalized variables in datatype (and record) parameters module _ where open import Agda.Primitive open import Agda.Builtin.Nat module NotParameterised where variable ℓ : Level A : Set ℓ x y : A m n : Nat data Vec (A : Set ℓ) : Nat → Set ℓ where [] : Vec A zero _∷_ : A → Vec A n → Vec A (suc n) variable xs : Vec A n -- n should be generalized as an index here data All (P : A → Set ℓ) : Vec A n → Set ℓ where [] : All P [] _∷_ : P x → All P xs → All P (x ∷ xs) infix 2 _∈_ -- need an occurrence of ℓ in the params to not generalize it as an index, -- so we bind A explicitly data _∈_ {A : Set ℓ} (x : A) : Vec A n → Set ℓ where zero : x ∈ x ∷ xs suc : x ∈ xs → x ∈ y ∷ xs lookup : {P : A → Set ℓ} → All P xs → x ∈ xs → P x lookup (x ∷ _) zero = x lookup (_ ∷ xs) (suc i) = lookup xs i -- Check that we can do the same in a parameterised module module Parameterised (Dummy : Set) where variable ℓ : Level A : Set ℓ x y : A m n : Nat data Vec (A : Set ℓ) : Nat → Set ℓ where [] : Vec A zero _∷_ : A → Vec A n → Vec A (suc n) variable xs : Vec A n -- n should be generalized as an index here data All (P : A → Set ℓ) : Vec A n → Set ℓ where [] : All P [] _∷_ : P x → All P xs → All P (x ∷ xs) infix 2 _∈_ -- need an occurrence of ℓ in the params to not generalize it as an index, -- so we bind A explicitly data _∈_ {A : Set ℓ} (x : A) : Vec A n → Set ℓ where zero : x ∈ x ∷ xs suc : x ∈ xs → x ∈ y ∷ xs lookup : {P : A → Set ℓ} → All P xs → x ∈ xs → P x lookup (x ∷ _) zero = x lookup (_ ∷ xs) (suc i) = lookup xs i
module SkepticLang.JSONUtils import Language.JSON %access public export ||| Gets the value of an attribute of a JSON object. ||| ||| @json the JSON object ||| @key the name of the attribute getObjAttr : (json : JSON) -> (key : String) -> Maybe JSON getObjAttr (JObject xs) key = getObjAttr' xs key where getObjAttr' : (json : List (String, JSON)) -> (key : String) -> Maybe JSON getObjAttr' ((a, b) :: xs) key = if a == key then Just b else getObjAttr' xs key getObjAttr' [] _ = Nothing getObjAttr _ _ = Nothing
function ball_on_support(obj::SupportBallGenerator, arg0::List) return jcall(obj, "ballOnSupport", EnclosingBall, (List,), arg0) end
lemma filterlim_times_pos: "LIM x F1. c * f x :> at_right l" if "filterlim f (at_right p) F1" "0 < c" "l = c * p" for c::"'a::{linordered_field, linorder_topology}"
State Before: α : Type u_1 E : Type u_2 F : Type ?u.195874 m0 : MeasurableSpace α inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace ℝ E inst✝³ : CompleteSpace E inst✝² : NormedAddCommGroup F inst✝¹ : NormedSpace ℝ F inst✝ : CompleteSpace F μ : Measure α s✝ : Set E f : α → E s : Set α ⊢ (⨍ (x : α) in s, f x ∂μ) = (ENNReal.toReal (↑↑μ s))⁻¹ • ∫ (x : α) in s, f x ∂μ State After: no goals Tactic: rw [average_eq, restrict_apply_univ]
/- Copyright (c) 2022 Rémi Bottinelli. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Rémi Bottinelli -/ import category_theory.groupoid import combinatorics.quiver.basic /-! > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines a few basic properties of groupoids. -/ namespace category_theory namespace groupoid variables (C : Type*) [groupoid C] section thin lemma is_thin_iff : quiver.is_thin C ↔ ∀ c : C, subsingleton (c ⟶ c) := begin refine ⟨λ h c, h c c, λ h c d, subsingleton.intro $ λ f g, _⟩, haveI := h d, calc f = f ≫ (inv g ≫ g) : by simp only [inv_eq_inv, is_iso.inv_hom_id, category.comp_id] ... = f ≫ (inv f ≫ g) : by congr ... = g : by simp only [inv_eq_inv, is_iso.hom_inv_id_assoc], end end thin section disconnected /-- A subgroupoid is totally disconnected if it only has loops. -/ def is_totally_disconnected := ∀ (c d : C), (c ⟶ d) → c = d end disconnected end groupoid end category_theory
/- Copyright (c) 2021 Noam Atar. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Noam Atar -/ import order.basic import order.ideal import order.pfilter /-! # Prime ideals ## Main definitions Throughout this file, `P` is at least a preorder, but some sections require more structure, such as a bottom element, a top element, or a join-semilattice structure. - `order.ideal.prime_pair`: A pair of an `ideal` and a `pfilter` which form a partition of `P`. This is useful as giving the data of a prime ideal is the same as giving the data of a prime filter. - `order.ideal.is_prime`: a predicate for prime ideals. Dual to the notion of a prime filter. - `order.pfilter.is_prime`: a predicate for prime filters. Dual to the notion of a prime ideal. ## References - <https://en.wikipedia.org/wiki/Ideal_(order_theory)> ## Tags ideal, prime -/ open order.pfilter namespace order variables {P : Type*} namespace ideal /-- A pair of an `ideal` and a `pfilter` which form a partition of `P`. -/ @[nolint has_inhabited_instance] structure prime_pair (P : Type*) [preorder P] := (I : ideal P) (F : pfilter P) (is_compl_I_F : is_compl (I : set P) F) namespace prime_pair variables [preorder P] (IF : prime_pair P) lemma compl_I_eq_F : (IF.I : set P)ᶜ = IF.F := IF.is_compl_I_F.compl_eq lemma compl_F_eq_I : (IF.F : set P)ᶜ = IF.I := IF.is_compl_I_F.eq_compl.symm lemma I_is_proper : is_proper IF.I := begin cases IF.F.nonempty, apply is_proper_of_not_mem (_ : w ∉ IF.I), rwa ← IF.compl_I_eq_F at h, end lemma disjoint : disjoint (IF.I : set P) IF.F := IF.is_compl_I_F.disjoint lemma I_union_F : (IF.I : set P) ∪ IF.F = set.univ := IF.is_compl_I_F.sup_eq_top lemma F_union_I : (IF.F : set P) ∪ IF.I = set.univ := IF.is_compl_I_F.symm.sup_eq_top end prime_pair /-- An ideal `I` is prime if its complement is a filter. -/ @[mk_iff] class is_prime [preorder P] (I : ideal P) extends is_proper I : Prop := (compl_filter : is_pfilter (I : set P)ᶜ) section preorder variable [preorder P] /-- Create an element of type `order.ideal.prime_pair` from an ideal satisfying the predicate `order.ideal.is_prime`. -/ def is_prime.to_prime_pair {I : ideal P} (h : is_prime I) : prime_pair P := { I := I, F := h.compl_filter.to_pfilter, is_compl_I_F := is_compl_compl } lemma prime_pair.I_is_prime (IF : prime_pair P) : is_prime IF.I := { compl_filter := by { rw IF.compl_I_eq_F, exact IF.F.is_pfilter }, ..IF.I_is_proper } end preorder section semilattice_inf variables [semilattice_inf P] {x y : P} {I : ideal P} lemma is_prime.mem_or_mem (hI : is_prime I) {x y : P} : x ⊓ y ∈ I → x ∈ I ∨ y ∈ I := begin contrapose!, let F := hI.compl_filter.to_pfilter, show x ∈ F ∧ y ∈ F → x ⊓ y ∈ F, exact λ h, inf_mem _ _ h.1 h.2, end lemma is_prime.of_mem_or_mem [is_proper I] (hI : ∀ {x y : P}, x ⊓ y ∈ I → x ∈ I ∨ y ∈ I) : is_prime I := begin rw is_prime_iff, use ‹_›, apply is_pfilter.of_def, { exact set.nonempty_compl.2 (I.is_proper_iff.1 ‹_›) }, { intros x _ y _, refine ⟨x ⊓ y, _, inf_le_left, inf_le_right⟩, have := mt hI, tauto! }, { exact @mem_compl_of_ge _ _ _ } end lemma is_prime_iff_mem_or_mem [is_proper I] : is_prime I ↔ ∀ {x y : P}, x ⊓ y ∈ I → x ∈ I ∨ y ∈ I := ⟨is_prime.mem_or_mem, is_prime.of_mem_or_mem⟩ end semilattice_inf section distrib_lattice variables [distrib_lattice P] {I : ideal P} @[priority 100] instance is_maximal.is_prime [is_maximal I] : is_prime I := begin rw is_prime_iff_mem_or_mem, intros x y, contrapose!, rintro ⟨hx, hynI⟩ hxy, apply hynI, let J := I ⊔ principal x, have hJuniv : (J : set P) = set.univ := is_maximal.maximal_proper (lt_sup_principal_of_not_mem ‹_›), have hyJ : y ∈ ↑J := set.eq_univ_iff_forall.mp hJuniv y, rw coe_sup_eq at hyJ, rcases hyJ with ⟨a, ha, b, hb, hy⟩, rw hy, apply sup_mem _ _ ha, refine I.mem_of_le (le_inf hb _) hxy, rw hy, exact le_sup_right end end distrib_lattice end ideal namespace pfilter variable [preorder P] /-- A filter `F` is prime if its complement is an ideal. -/ @[mk_iff] class is_prime (F : pfilter P) : Prop := (compl_ideal : is_ideal (F : set P)ᶜ) /-- Create an element of type `order.ideal.prime_pair` from a filter satisfying the predicate `order.pfilter.is_prime`. -/ def is_prime.to_prime_pair {F : pfilter P} (h : is_prime F) : ideal.prime_pair P := { I := h.compl_ideal.to_ideal, F := F, is_compl_I_F := is_compl_compl.symm } lemma _root_.order.ideal.prime_pair.F_is_prime (IF : ideal.prime_pair P) : is_prime IF.F := { compl_ideal := by { rw IF.compl_F_eq_I, exact IF.I.is_ideal } } end pfilter end order
State Before: C : Type u inst✝¹ : Category C X✝ Y✝ Z✝ : C f✝ g✝ : X✝ ⟶ Y✝ h✝ : Y✝ ⟶ Z✝ X Y Z : C f : X ⟶ Y g : Y ⟶ Z h : X ⟶ Z inst✝ : IsIso f hh : IsIso h w : f ≫ g = h ⊢ IsIso g State After: C : Type u inst✝¹ : Category C X✝ Y✝ Z✝ : C f✝ g✝ : X✝ ⟶ Y✝ h✝ : Y✝ ⟶ Z✝ X Y Z : C f : X ⟶ Y g : Y ⟶ Z h : X ⟶ Z inst✝ : IsIso f hh : IsIso (f ≫ g) w : f ≫ g = h ⊢ IsIso g Tactic: rw [← w] at hh State Before: C : Type u inst✝¹ : Category C X✝ Y✝ Z✝ : C f✝ g✝ : X✝ ⟶ Y✝ h✝ : Y✝ ⟶ Z✝ X Y Z : C f : X ⟶ Y g : Y ⟶ Z h : X ⟶ Z inst✝ : IsIso f hh : IsIso (f ≫ g) w : f ≫ g = h ⊢ IsIso g State After: C : Type u inst✝¹ : Category C X✝ Y✝ Z✝ : C f✝ g✝ : X✝ ⟶ Y✝ h✝ : Y✝ ⟶ Z✝ X Y Z : C f : X ⟶ Y g : Y ⟶ Z h : X ⟶ Z inst✝ : IsIso f hh : IsIso (f ≫ g) w : f ≫ g = h this : IsIso (f ≫ g) ⊢ IsIso g Tactic: haveI := hh State Before: C : Type u inst✝¹ : Category C X✝ Y✝ Z✝ : C f✝ g✝ : X✝ ⟶ Y✝ h✝ : Y✝ ⟶ Z✝ X Y Z : C f : X ⟶ Y g : Y ⟶ Z h : X ⟶ Z inst✝ : IsIso f hh : IsIso (f ≫ g) w : f ≫ g = h this : IsIso (f ≫ g) ⊢ IsIso g State After: no goals Tactic: exact of_isIso_comp_left f g
theory SN imports Lam_Funs begin text \<open>Strong Normalisation proof from the Proofs and Types book\<close> section \<open>Beta Reduction\<close> lemma subst_rename: assumes a: "c\<sharp>t1" shows "t1[a::=t2] = ([(c,a)]\<bullet>t1)[c::=t2]" using a by (nominal_induct t1 avoiding: a c t2 rule: lam.strong_induct) (auto simp add: calc_atm fresh_atm abs_fresh) lemma forget: assumes a: "a\<sharp>t1" shows "t1[a::=t2] = t1" using a by (nominal_induct t1 avoiding: a t2 rule: lam.strong_induct) (auto simp add: abs_fresh fresh_atm) lemma fresh_fact: fixes a::"name" assumes a: "a\<sharp>t1" "a\<sharp>t2" shows "a\<sharp>t1[b::=t2]" using a by (nominal_induct t1 avoiding: a b t2 rule: lam.strong_induct) (auto simp add: abs_fresh fresh_atm) lemma fresh_fact': fixes a::"name" assumes a: "a\<sharp>t2" shows "a\<sharp>t1[a::=t2]" using a by (nominal_induct t1 avoiding: a t2 rule: lam.strong_induct) (auto simp add: abs_fresh fresh_atm) lemma subst_lemma: assumes a: "x\<noteq>y" and b: "x\<sharp>L" shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]" using a b by (nominal_induct M avoiding: x y N L rule: lam.strong_induct) (auto simp add: fresh_fact forget) lemma id_subs: shows "t[x::=Var x] = t" by (nominal_induct t avoiding: x rule: lam.strong_induct) (simp_all add: fresh_atm) lemma lookup_fresh: fixes z::"name" assumes "z\<sharp>\<theta>" "z\<sharp>x" shows "z\<sharp> lookup \<theta> x" using assms by (induct rule: lookup.induct) (auto simp add: fresh_list_cons) lemma lookup_fresh': assumes "z\<sharp>\<theta>" shows "lookup \<theta> z = Var z" using assms by (induct rule: lookup.induct) (auto simp add: fresh_list_cons fresh_prod fresh_atm) lemma psubst_subst: assumes h:"c\<sharp>\<theta>" shows "(\<theta><t>)[c::=s] = ((c,s)#\<theta>)<t>" using h by (nominal_induct t avoiding: \<theta> c s rule: lam.strong_induct) (auto simp add: fresh_list_cons fresh_atm forget lookup_fresh lookup_fresh') inductive Beta :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>\<^sub>\<beta> _" [80,80] 80) where b1[intro!]: "s1 \<longrightarrow>\<^sub>\<beta> s2 \<Longrightarrow> App s1 t \<longrightarrow>\<^sub>\<beta> App s2 t" | b2[intro!]: "s1\<longrightarrow>\<^sub>\<beta>s2 \<Longrightarrow> App t s1 \<longrightarrow>\<^sub>\<beta> App t s2" | b3[intro!]: "s1\<longrightarrow>\<^sub>\<beta>s2 \<Longrightarrow> Lam [a].s1 \<longrightarrow>\<^sub>\<beta> Lam [a].s2" | b4[intro!]: "a\<sharp>s2 \<Longrightarrow> App (Lam [a].s1) s2\<longrightarrow>\<^sub>\<beta> (s1[a::=s2])" equivariance Beta nominal_inductive Beta by (simp_all add: abs_fresh fresh_fact') lemma beta_preserves_fresh: fixes a::"name" assumes a: "t\<longrightarrow>\<^sub>\<beta> s" shows "a\<sharp>t \<Longrightarrow> a\<sharp>s" using a apply(nominal_induct t s avoiding: a rule: Beta.strong_induct) apply(auto simp add: abs_fresh fresh_fact fresh_atm) done lemma beta_abs: assumes a: "Lam [a].t\<longrightarrow>\<^sub>\<beta> t'" shows "\<exists>t''. t'=Lam [a].t'' \<and> t\<longrightarrow>\<^sub>\<beta> t''" proof - have "a\<sharp>Lam [a].t" by (simp add: abs_fresh) with a have "a\<sharp>t'" by (simp add: beta_preserves_fresh) with a show ?thesis by (cases rule: Beta.strong_cases[where a="a" and aa="a"]) (auto simp add: lam.inject abs_fresh alpha) qed lemma beta_subst: assumes a: "M \<longrightarrow>\<^sub>\<beta> M'" shows "M[x::=N]\<longrightarrow>\<^sub>\<beta> M'[x::=N]" using a by (nominal_induct M M' avoiding: x N rule: Beta.strong_induct) (auto simp add: fresh_atm subst_lemma fresh_fact) section \<open>types\<close> nominal_datatype ty = TVar "nat" | TArr "ty" "ty" (infix "\<rightarrow>" 200) lemma fresh_ty: fixes a ::"name" and \<tau> ::"ty" shows "a\<sharp>\<tau>" by (nominal_induct \<tau> rule: ty.strong_induct) (auto simp add: fresh_nat) (* valid contexts *) inductive valid :: "(name\<times>ty) list \<Rightarrow> bool" where v1[intro]: "valid []" | v2[intro]: "\<lbrakk>valid \<Gamma>;a\<sharp>\<Gamma>\<rbrakk>\<Longrightarrow> valid ((a,\<sigma>)#\<Gamma>)" equivariance valid (* typing judgements *) lemma fresh_context: fixes \<Gamma> :: "(name\<times>ty)list" and a :: "name" assumes a: "a\<sharp>\<Gamma>" shows "\<not>(\<exists>\<tau>::ty. (a,\<tau>)\<in>set \<Gamma>)" using a by (induct \<Gamma>) (auto simp add: fresh_prod fresh_list_cons fresh_atm) inductive typing :: "(name\<times>ty) list\<Rightarrow>lam\<Rightarrow>ty\<Rightarrow>bool" ("_ \<turnstile> _ : _" [60,60,60] 60) where t1[intro]: "\<lbrakk>valid \<Gamma>; (a,\<tau>)\<in>set \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Var a : \<tau>" | t2[intro]: "\<lbrakk>\<Gamma> \<turnstile> t1 : \<tau>\<rightarrow>\<sigma>; \<Gamma> \<turnstile> t2 : \<tau>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> App t1 t2 : \<sigma>" | t3[intro]: "\<lbrakk>a\<sharp>\<Gamma>;((a,\<tau>)#\<Gamma>) \<turnstile> t : \<sigma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Lam [a].t : \<tau>\<rightarrow>\<sigma>" equivariance typing nominal_inductive typing by (simp_all add: abs_fresh fresh_ty) subsection \<open>a fact about beta\<close> definition "NORMAL" :: "lam \<Rightarrow> bool" where "NORMAL t \<equiv> \<not>(\<exists>t'. t\<longrightarrow>\<^sub>\<beta> t')" lemma NORMAL_Var: shows "NORMAL (Var a)" proof - { assume "\<exists>t'. (Var a) \<longrightarrow>\<^sub>\<beta> t'" then obtain t' where "(Var a) \<longrightarrow>\<^sub>\<beta> t'" by blast hence False by (cases) (auto) } thus "NORMAL (Var a)" by (auto simp add: NORMAL_def) qed text \<open>Inductive version of Strong Normalisation\<close> inductive SN :: "lam \<Rightarrow> bool" where SN_intro: "(\<And>t'. t \<longrightarrow>\<^sub>\<beta> t' \<Longrightarrow> SN t') \<Longrightarrow> SN t" lemma SN_preserved: assumes a: "SN t1" "t1\<longrightarrow>\<^sub>\<beta> t2" shows "SN t2" using a by (cases) (auto) lemma double_SN_aux: assumes a: "SN a" and b: "SN b" and hyp: "\<And>x z. \<lbrakk>\<And>y. x \<longrightarrow>\<^sub>\<beta> y \<Longrightarrow> SN y; \<And>y. x \<longrightarrow>\<^sub>\<beta> y \<Longrightarrow> P y z; \<And>u. z \<longrightarrow>\<^sub>\<beta> u \<Longrightarrow> SN u; \<And>u. z \<longrightarrow>\<^sub>\<beta> u \<Longrightarrow> P x u\<rbrakk> \<Longrightarrow> P x z" shows "P a b" proof - from a have r: "\<And>b. SN b \<Longrightarrow> P a b" proof (induct a rule: SN.SN.induct) case (SN_intro x) note SNI' = SN_intro have "SN b" by fact thus ?case proof (induct b rule: SN.SN.induct) case (SN_intro y) show ?case apply (rule hyp) apply (erule SNI') apply (erule SNI') apply (rule SN.SN_intro) apply (erule SN_intro)+ done qed qed from b show ?thesis by (rule r) qed lemma double_SN[consumes 2]: assumes a: "SN a" and b: "SN b" and c: "\<And>x z. \<lbrakk>\<And>y. x \<longrightarrow>\<^sub>\<beta> y \<Longrightarrow> P y z; \<And>u. z \<longrightarrow>\<^sub>\<beta> u \<Longrightarrow> P x u\<rbrakk> \<Longrightarrow> P x z" shows "P a b" using a b c apply(rule_tac double_SN_aux) apply(assumption)+ apply(blast) done section \<open>Candidates\<close> nominal_primrec RED :: "ty \<Rightarrow> lam set" where "RED (TVar X) = {t. SN(t)}" | "RED (\<tau>\<rightarrow>\<sigma>) = {t. \<forall>u. (u\<in>RED \<tau> \<longrightarrow> (App t u)\<in>RED \<sigma>)}" by (rule TrueI)+ text \<open>neutral terms\<close> definition NEUT :: "lam \<Rightarrow> bool" where "NEUT t \<equiv> (\<exists>a. t = Var a) \<or> (\<exists>t1 t2. t = App t1 t2)" (* a slight hack to get the first element of applications *) (* this is needed to get (SN t) from SN (App t s) *) inductive FST :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<guillemotright> _" [80,80] 80) where fst[intro!]: "(App t s) \<guillemotright> t" nominal_primrec fst_app_aux::"lam\<Rightarrow>lam option" where "fst_app_aux (Var a) = None" | "fst_app_aux (App t1 t2) = Some t1" | "fst_app_aux (Lam [x].t) = None" apply(finite_guess)+ apply(rule TrueI)+ apply(simp add: fresh_none) apply(fresh_guess)+ done definition fst_app_def[simp]: "fst_app t = the (fst_app_aux t)" lemma SN_of_FST_of_App: assumes a: "SN (App t s)" shows "SN (fst_app (App t s))" using a proof - from a have "\<forall>z. (App t s \<guillemotright> z) \<longrightarrow> SN z" by (induct rule: SN.SN.induct) (blast elim: FST.cases intro: SN_intro) then have "SN t" by blast then show "SN (fst_app (App t s))" by simp qed section \<open>Candidates\<close> definition "CR1" :: "ty \<Rightarrow> bool" where "CR1 \<tau> \<equiv> \<forall>t. (t\<in>RED \<tau> \<longrightarrow> SN t)" definition "CR2" :: "ty \<Rightarrow> bool" where "CR2 \<tau> \<equiv> \<forall>t t'. (t\<in>RED \<tau> \<and> t \<longrightarrow>\<^sub>\<beta> t') \<longrightarrow> t'\<in>RED \<tau>" definition "CR3_RED" :: "lam \<Rightarrow> ty \<Rightarrow> bool" where "CR3_RED t \<tau> \<equiv> \<forall>t'. t\<longrightarrow>\<^sub>\<beta> t' \<longrightarrow> t'\<in>RED \<tau>" definition "CR3" :: "ty \<Rightarrow> bool" where "CR3 \<tau> \<equiv> \<forall>t. (NEUT t \<and> CR3_RED t \<tau>) \<longrightarrow> t\<in>RED \<tau>" definition "CR4" :: "ty \<Rightarrow> bool" where "CR4 \<tau> \<equiv> \<forall>t. (NEUT t \<and> NORMAL t) \<longrightarrow>t\<in>RED \<tau>" lemma CR3_implies_CR4: assumes a: "CR3 \<tau>" shows "CR4 \<tau>" using a by (auto simp add: CR3_def CR3_RED_def CR4_def NORMAL_def) (* sub_induction in the arrow-type case for the next proof *) lemma sub_induction: assumes a: "SN(u)" and b: "u\<in>RED \<tau>" and c1: "NEUT t" and c2: "CR2 \<tau>" and c3: "CR3 \<sigma>" and c4: "CR3_RED t (\<tau>\<rightarrow>\<sigma>)" shows "(App t u)\<in>RED \<sigma>" using a b proof (induct) fix u assume as: "u\<in>RED \<tau>" assume ih: " \<And>u'. \<lbrakk>u \<longrightarrow>\<^sub>\<beta> u'; u' \<in> RED \<tau>\<rbrakk> \<Longrightarrow> App t u' \<in> RED \<sigma>" have "NEUT (App t u)" using c1 by (auto simp add: NEUT_def) moreover have "CR3_RED (App t u) \<sigma>" unfolding CR3_RED_def proof (intro strip) fix r assume red: "App t u \<longrightarrow>\<^sub>\<beta> r" moreover { assume "\<exists>t'. t \<longrightarrow>\<^sub>\<beta> t' \<and> r = App t' u" then obtain t' where a1: "t \<longrightarrow>\<^sub>\<beta> t'" and a2: "r = App t' u" by blast have "t'\<in>RED (\<tau>\<rightarrow>\<sigma>)" using c4 a1 by (simp add: CR3_RED_def) then have "App t' u\<in>RED \<sigma>" using as by simp then have "r\<in>RED \<sigma>" using a2 by simp } moreover { assume "\<exists>u'. u \<longrightarrow>\<^sub>\<beta> u' \<and> r = App t u'" then obtain u' where b1: "u \<longrightarrow>\<^sub>\<beta> u'" and b2: "r = App t u'" by blast have "u'\<in>RED \<tau>" using as b1 c2 by (auto simp add: CR2_def) with ih have "App t u' \<in> RED \<sigma>" using b1 by simp then have "r\<in>RED \<sigma>" using b2 by simp } moreover { assume "\<exists>x t'. t = Lam [x].t'" then obtain x t' where "t = Lam [x].t'" by blast then have "NEUT (Lam [x].t')" using c1 by simp then have "False" by (simp add: NEUT_def) then have "r\<in>RED \<sigma>" by simp } ultimately show "r \<in> RED \<sigma>" by (cases) (auto simp add: lam.inject) qed ultimately show "App t u \<in> RED \<sigma>" using c3 by (simp add: CR3_def) qed text \<open>properties of the candiadates\<close> lemma RED_props: shows "CR1 \<tau>" and "CR2 \<tau>" and "CR3 \<tau>" proof (nominal_induct \<tau> rule: ty.strong_induct) case (TVar a) { case 1 show "CR1 (TVar a)" by (simp add: CR1_def) next case 2 show "CR2 (TVar a)" by (auto intro: SN_preserved simp add: CR2_def) next case 3 show "CR3 (TVar a)" by (auto intro: SN_intro simp add: CR3_def CR3_RED_def) } next case (TArr \<tau>1 \<tau>2) { case 1 have ih_CR3_\<tau>1: "CR3 \<tau>1" by fact have ih_CR1_\<tau>2: "CR1 \<tau>2" by fact have "\<And>t. t \<in> RED (\<tau>1 \<rightarrow> \<tau>2) \<Longrightarrow> SN t" proof - fix t assume "t \<in> RED (\<tau>1 \<rightarrow> \<tau>2)" then have a: "\<forall>u. u \<in> RED \<tau>1 \<longrightarrow> App t u \<in> RED \<tau>2" by simp from ih_CR3_\<tau>1 have "CR4 \<tau>1" by (simp add: CR3_implies_CR4) moreover fix a have "NEUT (Var a)" by (force simp add: NEUT_def) moreover have "NORMAL (Var a)" by (rule NORMAL_Var) ultimately have "(Var a)\<in> RED \<tau>1" by (simp add: CR4_def) with a have "App t (Var a) \<in> RED \<tau>2" by simp hence "SN (App t (Var a))" using ih_CR1_\<tau>2 by (simp add: CR1_def) thus "SN t" by (auto dest: SN_of_FST_of_App) qed then show "CR1 (\<tau>1 \<rightarrow> \<tau>2)" unfolding CR1_def by simp next case 2 have ih_CR2_\<tau>2: "CR2 \<tau>2" by fact then show "CR2 (\<tau>1 \<rightarrow> \<tau>2)" unfolding CR2_def by auto next case 3 have ih_CR1_\<tau>1: "CR1 \<tau>1" by fact have ih_CR2_\<tau>1: "CR2 \<tau>1" by fact have ih_CR3_\<tau>2: "CR3 \<tau>2" by fact show "CR3 (\<tau>1 \<rightarrow> \<tau>2)" unfolding CR3_def proof (simp, intro strip) fix t u assume a1: "u \<in> RED \<tau>1" assume a2: "NEUT t \<and> CR3_RED t (\<tau>1 \<rightarrow> \<tau>2)" have "SN(u)" using a1 ih_CR1_\<tau>1 by (simp add: CR1_def) then show "(App t u)\<in>RED \<tau>2" using ih_CR2_\<tau>1 ih_CR3_\<tau>2 a1 a2 by (blast intro: sub_induction) qed } qed text \<open> the next lemma not as simple as on paper, probably because of the stronger double_SN induction \<close> lemma abs_RED: assumes asm: "\<forall>s\<in>RED \<tau>. t[x::=s]\<in>RED \<sigma>" shows "Lam [x].t\<in>RED (\<tau>\<rightarrow>\<sigma>)" proof - have b1: "SN t" proof - have "Var x\<in>RED \<tau>" proof - have "CR4 \<tau>" by (simp add: RED_props CR3_implies_CR4) moreover have "NEUT (Var x)" by (auto simp add: NEUT_def) moreover have "NORMAL (Var x)" by (auto elim: Beta.cases simp add: NORMAL_def) ultimately show "Var x\<in>RED \<tau>" by (simp add: CR4_def) qed then have "t[x::=Var x]\<in>RED \<sigma>" using asm by simp then have "t\<in>RED \<sigma>" by (simp add: id_subs) moreover have "CR1 \<sigma>" by (simp add: RED_props) ultimately show "SN t" by (simp add: CR1_def) qed show "Lam [x].t\<in>RED (\<tau>\<rightarrow>\<sigma>)" proof (simp, intro strip) fix u assume b2: "u\<in>RED \<tau>" then have b3: "SN u" using RED_props by (auto simp add: CR1_def) show "App (Lam [x].t) u \<in> RED \<sigma>" using b1 b3 b2 asm proof(induct t u rule: double_SN) fix t u assume ih1: "\<And>t'. \<lbrakk>t \<longrightarrow>\<^sub>\<beta> t'; u\<in>RED \<tau>; \<forall>s\<in>RED \<tau>. t'[x::=s]\<in>RED \<sigma>\<rbrakk> \<Longrightarrow> App (Lam [x].t') u \<in> RED \<sigma>" assume ih2: "\<And>u'. \<lbrakk>u \<longrightarrow>\<^sub>\<beta> u'; u'\<in>RED \<tau>; \<forall>s\<in>RED \<tau>. t[x::=s]\<in>RED \<sigma>\<rbrakk> \<Longrightarrow> App (Lam [x].t) u' \<in> RED \<sigma>" assume as1: "u \<in> RED \<tau>" assume as2: "\<forall>s\<in>RED \<tau>. t[x::=s]\<in>RED \<sigma>" have "CR3_RED (App (Lam [x].t) u) \<sigma>" unfolding CR3_RED_def proof(intro strip) fix r assume red: "App (Lam [x].t) u \<longrightarrow>\<^sub>\<beta> r" moreover { assume "\<exists>t'. t \<longrightarrow>\<^sub>\<beta> t' \<and> r = App (Lam [x].t') u" then obtain t' where a1: "t \<longrightarrow>\<^sub>\<beta> t'" and a2: "r = App (Lam [x].t') u" by blast have "App (Lam [x].t') u\<in>RED \<sigma>" using ih1 a1 as1 as2 apply(auto) apply(drule_tac x="t'" in meta_spec) apply(simp) apply(drule meta_mp) prefer 2 apply(auto)[1] apply(rule ballI) apply(drule_tac x="s" in bspec) apply(simp) apply(subgoal_tac "CR2 \<sigma>")(*A*) apply(unfold CR2_def)[1] apply(drule_tac x="t[x::=s]" in spec) apply(drule_tac x="t'[x::=s]" in spec) apply(simp add: beta_subst) (*A*) apply(simp add: RED_props) done then have "r\<in>RED \<sigma>" using a2 by simp } moreover { assume "\<exists>u'. u \<longrightarrow>\<^sub>\<beta> u' \<and> r = App (Lam [x].t) u'" then obtain u' where b1: "u \<longrightarrow>\<^sub>\<beta> u'" and b2: "r = App (Lam [x].t) u'" by blast have "App (Lam [x].t) u'\<in>RED \<sigma>" using ih2 b1 as1 as2 apply(auto) apply(drule_tac x="u'" in meta_spec) apply(simp) apply(drule meta_mp) apply(subgoal_tac "CR2 \<tau>") apply(unfold CR2_def)[1] apply(drule_tac x="u" in spec) apply(drule_tac x="u'" in spec) apply(simp) apply(simp add: RED_props) apply(simp) done then have "r\<in>RED \<sigma>" using b2 by simp } moreover { assume "r = t[x::=u]" then have "r\<in>RED \<sigma>" using as1 as2 by auto } ultimately show "r \<in> RED \<sigma>" (* one wants to use the strong elimination principle; for this one has to know that x\<sharp>u *) apply(cases) apply(auto simp add: lam.inject) apply(drule beta_abs) apply(auto)[1] apply(auto simp add: alpha subst_rename) done qed moreover have "NEUT (App (Lam [x].t) u)" unfolding NEUT_def by (auto) ultimately show "App (Lam [x].t) u \<in> RED \<sigma>" using RED_props by (simp add: CR3_def) qed qed qed abbreviation mapsto :: "(name\<times>lam) list \<Rightarrow> name \<Rightarrow> lam \<Rightarrow> bool" ("_ maps _ to _" [55,55,55] 55) where "\<theta> maps x to e \<equiv> (lookup \<theta> x) = e" abbreviation closes :: "(name\<times>lam) list \<Rightarrow> (name\<times>ty) list \<Rightarrow> bool" ("_ closes _" [55,55] 55) where "\<theta> closes \<Gamma> \<equiv> \<forall>x T. ((x,T) \<in> set \<Gamma> \<longrightarrow> (\<exists>t. \<theta> maps x to t \<and> t \<in> RED T))" lemma all_RED: assumes a: "\<Gamma> \<turnstile> t : \<tau>" and b: "\<theta> closes \<Gamma>" shows "\<theta><t> \<in> RED \<tau>" using a b proof(nominal_induct avoiding: \<theta> rule: typing.strong_induct) case (t3 a \<Gamma> \<sigma> t \<tau> \<theta>) \<comment> \<open>lambda case\<close> have ih: "\<And>\<theta>. \<theta> closes ((a,\<sigma>)#\<Gamma>) \<Longrightarrow> \<theta><t> \<in> RED \<tau>" by fact have \<theta>_cond: "\<theta> closes \<Gamma>" by fact have fresh: "a\<sharp>\<Gamma>" "a\<sharp>\<theta>" by fact+ from ih have "\<forall>s\<in>RED \<sigma>. ((a,s)#\<theta>)<t> \<in> RED \<tau>" using fresh \<theta>_cond fresh_context by simp then have "\<forall>s\<in>RED \<sigma>. \<theta><t>[a::=s] \<in> RED \<tau>" using fresh by (simp add: psubst_subst) then have "Lam [a].(\<theta><t>) \<in> RED (\<sigma> \<rightarrow> \<tau>)" by (simp only: abs_RED) then show "\<theta><(Lam [a].t)> \<in> RED (\<sigma> \<rightarrow> \<tau>)" using fresh by simp qed auto section \<open>identity substitution generated from a context \<Gamma>\<close> fun "id" :: "(name\<times>ty) list \<Rightarrow> (name\<times>lam) list" where "id [] = []" | "id ((x,\<tau>)#\<Gamma>) = (x,Var x)#(id \<Gamma>)" lemma id_maps: shows "(id \<Gamma>) maps a to (Var a)" by (induct \<Gamma>) (auto) lemma id_fresh: fixes a::"name" assumes a: "a\<sharp>\<Gamma>" shows "a\<sharp>(id \<Gamma>)" using a by (induct \<Gamma>) (auto simp add: fresh_list_nil fresh_list_cons) lemma id_apply: shows "(id \<Gamma>)<t> = t" by (nominal_induct t avoiding: \<Gamma> rule: lam.strong_induct) (auto simp add: id_maps id_fresh) lemma id_closes: shows "(id \<Gamma>) closes \<Gamma>" apply(auto) apply(simp add: id_maps) apply(subgoal_tac "CR3 T") \<comment> \<open>A\<close> apply(drule CR3_implies_CR4) apply(simp add: CR4_def) apply(drule_tac x="Var x" in spec) apply(force simp add: NEUT_def NORMAL_Var) \<comment> \<open>A\<close> apply(rule RED_props) done lemma typing_implies_RED: assumes a: "\<Gamma> \<turnstile> t : \<tau>" shows "t \<in> RED \<tau>" proof - have "(id \<Gamma>)<t>\<in>RED \<tau>" proof - have "(id \<Gamma>) closes \<Gamma>" by (rule id_closes) with a show ?thesis by (rule all_RED) qed thus"t \<in> RED \<tau>" by (simp add: id_apply) qed lemma typing_implies_SN: assumes a: "\<Gamma> \<turnstile> t : \<tau>" shows "SN(t)" proof - from a have "t \<in> RED \<tau>" by (rule typing_implies_RED) moreover have "CR1 \<tau>" by (rule RED_props) ultimately show "SN(t)" by (simp add: CR1_def) qed end
Formal statement is: lemma lim_within_interior: "x \<in> interior S \<Longrightarrow> (f \<longlongrightarrow> l) (at x within S) \<longleftrightarrow> (f \<longlongrightarrow> l) (at x)" Informal statement is: If $x$ is an interior point of $S$, then the limit of $f$ at $x$ within $S$ is the same as the limit of $f$ at $x$.
module Implicit import Data.Vect -- n, m, elem are unbound implicits -- Below is converted interanally by Idris into Bound implicits like below. -- reverse : List elem -> List elem -- append : Vect n elem -> Vect m elem -> Vect (n + m) elem -- n, m, elem are bound implicits reverse : {elem : Type} -> List elem -> List elem append : {elem : Type} -> {n : Nat} -> {m : Nat} -> Vect n elem -> Vect m elem -> Vect (n + m) elem my_length : Vect n elem -> Nat my_length [] = Z my_length (x :: xs) = 1 + length xs -- We can refer to n directly using implicit syntax my_implicit_length : Vect n elem -> Nat my_implicit_length {n} xs = n
Formal statement is: lemma cball_eq_sing: fixes x :: "'a::{metric_space,perfect_space}" shows "cball x e = {x} \<longleftrightarrow> e = 0" Informal statement is: For any metric space $X$ and any point $x \in X$, the closed ball of radius $e$ centered at $x$ is equal to $\{x\}$ if and only if $e = 0$.
function x = conj_gradient_method(A, b) %Parameters n = 20; %Initialization clc format short e tol = 1e-2; r = A*x-b; p = -r; k = 0; abs_r = sqrt(r'*r); while abs_r > tol alpha = (r'*r)/(p'*A*p); x = x+alpha*p; r_old = r; r = r + alpha*A*p; beta = (r'*r)/(r_old'*r_old); p = -r+beta*p; k=k+1; abs_r = sqrt(r'*r); % disp([k abs_r alpha]); end
(* Property from Case-Analysis for Rippling and Inductive Proof, Moa Johansson, Lucas Dixon and Alan Bundy, ITP 2010. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_prop_71 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun x :: "Nat => Nat => bool" where "x (Z) (Z) = True" | "x (Z) (S z2) = False" | "x (S x2) (Z) = False" | "x (S x2) (S y2) = x x2 y2" fun elem :: "Nat => Nat list => bool" where "elem y (nil2) = False" | "elem y (cons2 z2 xs) = (if x y z2 then True else elem y xs)" fun t2 :: "Nat => Nat => bool" where "t2 y (Z) = False" | "t2 (Z) (S z2) = True" | "t2 (S x2) (S z2) = t2 x2 z2" fun ins :: "Nat => Nat list => Nat list" where "ins y (nil2) = cons2 y (nil2)" | "ins y (cons2 z2 xs) = (if t2 y z2 then cons2 y (cons2 z2 xs) else cons2 z2 (ins y xs))" theorem property0 : "((~ (x y z)) ==> ((elem y (ins z xs)) = (elem y xs)))" oops end
# Upper envelope This notebook shows how to use the **upperenvelope** module from the **consav** package. # Model Consider a **standard consumption-saving** model \begin{align} v_{t}(m_{t})&=\max_{c_{t}}\frac{c_{t}^{1-\rho}}{1-\rho}+\beta v_{t+1}(m_{t+1}) \end{align} where \begin{align} a_{t} &=m_{t}-c_{t} \\ m_{t+1} &=Ra_{t}+y \\ \end{align} The **Euler equation** is \begin{align} c_{t}^{-\rho} &=\beta Rc_{t+1}^{-\rho} \end{align} Assume that the **t+1 consumption and value functions** are given by \begin{align} c_{t+1}(m_{t}) &= \sqrt{m_{t}}-\eta_{c} \cdot 1\{m_{t}\geq\underline{m}\} \\ v_{t+1}(m_{t}) &= \sqrt{m_{t}}+\eta_{v}\sqrt{m_{t}-\underline{m}} \cdot 1\{m_{t}\geq\underline{m}\} \end{align} This **notebook** shows how to find the **t consumption and value function** using an **upper envelope** code despite the **kink** in the next-period value function. # Algorithm 1. Specify an increasing grid of $m_t$ indexed by $j$, such as {${m_1,m_2,...,m_{\#_m}}$} <br> 2. Specify an increasing grid of $a_t $ indexed by $i$, such as {${a^1,a^2,...,a^{\#_a}}$} <br> 3. For each $i$ compute (using linear interpolation):<br> a. Post-decision value function: $w^i = \beta \breve{v}_{t+1}(Ra^i+y)$ <br> b. Post-decision marginal value of cash: $q^i = \beta R\breve{c}_{t+1}(Ra^i+y)^{-\rho}$ <br> c. Consumption: $c_i = (q^i)^{-1/\rho}$ <br> d. Cash-on-hand: $m^i = a^i + c^i$ <br> 4. For each $j$: <br> a. Constraint: If $m_j < m^1$ then set $c_j = m_j$ <br> b. Find best segment: If $m_j \geq m^1$ then set $c_j =c_j^{i^{\star}(j)} $ where <br> $$ \begin{align} c_j^i=c_j^i+\frac{c^{i+1}-c^i}{m^{i+1}-m^i}(m_j-m^i) \end{align} $$ and $$ \begin{align} i^{\star}(j)=\arg\max_{i\in\{1,\dots\#_{A}-1\}}\frac{(c_{j}^{i})^{1-\rho}}{1-\rho}+\beta w_{j}^{i} \\ \end{align} $$ subject to $$ \begin{align} m_{j} &\in [m^{i},m^{i+1}] \\ a_{j}^{i} &= m_{j}-c_{j}^{i} \\ w_{j}^{i} &= w^{i}+\frac{w^{i+1}-w^{i}}{a^{i+1}-a^{i}}(a_{j}^{i}-a^{i}) \end{align} $$ # Setup ```python import numpy as np from numba import njit import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') prop_cycle = plt.rcParams["axes.prop_cycle"] colors = prop_cycle.by_key()["color"] import ipywidgets as widgets ``` Choose parameters and create grids: ```python def setup(): par = dict() # a. model parameters par['beta'] = 0.96 par['rho'] = 2 par['R'] = 1.02 par['y'] = 1 # b. cash-on-hand (exogenous grid) par['Nm'] = 10000 par['m_max'] = 10 # c. end-of-period assets (exogenous grid) par['Na'] = 1000 par['a_max'] = 10 # d. next-period consumption and value function par['eta_v'] = 0.5 par['eta_c'] = 0.5 par['x_ubar'] = 5 return par def create_grids(par): par['grid_a'] = np.linspace(0,par['a_max'],par['Na']) par['grid_m'] = np.linspace(1e-8,par['m_max'],par['Nm']) return par par = setup() par = create_grids(par) ``` # Next-period functions Calculate the next-period consumption and value functions: ```python sol = dict() # a. consumption function sol['c_next'] = np.sqrt(par['grid_m']) - par['eta_c']*(par['grid_m'] >= par['x_ubar']); # b. value function sol['v_next'] = np.sqrt(par['grid_m']) + par['eta_v']*np.sqrt(np.fmax(par['grid_m']-par['x_ubar'],0))*(par['grid_m'] >= par['x_ubar']) ``` ## Figures Plot them to see the jump in consumption and the kink in the value function. ```python # a. next-period consumption function fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(par['grid_m'],sol['c_next'],'o',MarkerSize=0.5) ax.set_title('next-period consumption function') ax.set_xlabel('$m_t$') ax.set_ylabel('$c_t$') # b. next-period value function fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(par['grid_m'],sol['v_next'],'o',MarkerSize=0.5) ax.set_title('next-period value function') ax.set_xlabel('$m_t$') ax.set_ylabel('$v_t$'); ``` # EGM ```python from consav import linear_interp # linear interpolation ``` Apply the EGM algorithm. ```python @njit def u(c,rho): return c**(1-2)/(1-2) def marg_u(c,par): return c**(-par['rho']) def inv_marg_u(u,par): return u**(-1.0/par['rho']) def EGM(par,sol): # a. next-period cash-on-hand m_plus = par['R']*par['grid_a'] + par['y'] # b. post-decision value function sol['w_vec'] = np.empty(m_plus.size) linear_interp.interp_1d_vec(par['grid_m'],sol['v_next'],m_plus,sol['w_vec']) # c. post-decision marginal value of cash c_next_interp = np.empty(m_plus.size) linear_interp.interp_1d_vec(par['grid_m'],sol['c_next'],m_plus,c_next_interp) q = par['beta']*par['R']*marg_u(c_next_interp,par) # d. EGM sol['c_vec'] = inv_marg_u(q,par) sol['m_vec'] = par['grid_a'] + sol['c_vec'] return sol sol = EGM(par,sol) ``` ## Figures Plot the result of the EGM algorithm to see that the its does not define a consumption function. ```python # a. raw consumption function fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(sol['m_vec'],sol['c_vec'],'o',MarkerSize=0.5) ax.set_title('raw consumption points') ax.set_xlabel('$m_t$') ax.set_ylabel('$c_t$') # b. raw value function fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(sol['m_vec'],sol['w_vec'],'o',MarkerSize=0.5) ax.set_title('raw value function points') ax.set_xlabel('$m_t$') ax.set_ylabel('$w_t$'); ``` # Upper envelope ```python from consav import upperenvelope # a. create myupperenvelope = upperenvelope.create(u) # where is the utility function # b. apply c_ast_vec = np.empty(par['grid_m'].size) # output v_ast_vec = np.empty(par['grid_m'].size) # output myupperenvelope(par['grid_a'],sol['m_vec'],sol['c_vec'],sol['w_vec'],par['grid_m'],c_ast_vec,v_ast_vec,par['rho']) ``` ## Figures ```python # a. consumption function fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(par['grid_m'],c_ast_vec,'o',MarkerSize=0.5) ax.set_title('consumption function') ax.set_xlabel('$m_t$') ax.set_ylabel('$c_t$') # b. value function fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(par['grid_m'],v_ast_vec,'o',MarkerSize=0.5) ax.set_title('value function') ax.set_xlabel('$m_t$') ax.set_ylabel('$v_t$') ax.set_ylim((-5,5)); ```
(* Author: Tobias Nipkow, 2007 *) section\<open>Quantifier elimination\<close> theory QE imports Logic begin text\<open>\noindent The generic, i.e.\ theory-independent part of quantifier elimination. Both DNF and an NNF-based procedures are defined and proved correct.\<close> notation (input) Collect ("|_|") subsection\<open>No Equality\<close> context ATOM begin subsubsection\<open>DNF-based\<close> text\<open>\noindent Taking care of atoms independent of variable 0:\<close> definition "qelim qe as = (let qf = qe [a\<leftarrow>as. depends\<^sub>0 a]; indep = [Atom(decr a). a\<leftarrow>as, \<not> depends\<^sub>0 a] in and qf (list_conj indep))" abbreviation is_dnf_qe :: "('a list \<Rightarrow> 'a fm) \<Rightarrow> 'a list \<Rightarrow> bool" where "is_dnf_qe qe as \<equiv> \<forall>xs. I(qe as) xs = (\<exists>x.\<forall>a\<in>set as. I\<^sub>a a (x#xs))" text\<open>\noindent Note that the exported abbreviation will have as a first parameter the type @{typ"'b"} of values \<open>xs\<close> ranges over.\<close> lemma I_qelim: assumes qe: "\<And>as. (\<forall>a \<in> set as. depends\<^sub>0 a) \<Longrightarrow> is_dnf_qe qe as" shows "is_dnf_qe (qelim qe) as" (is "\<forall>xs. ?P xs") proof fix xs let ?as0 = "filter depends\<^sub>0 as" let ?as1 = "filter (Not \<circ> depends\<^sub>0) as" have "I (qelim qe as) xs = (I (qe ?as0) xs \<and> (\<forall>a\<in>set(map decr ?as1). I\<^sub>a a xs))" (is "_ = (_ \<and> ?B)") by(force simp add:qelim_def) also have "\<dots> = ((\<exists>x. \<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> ?B)" by(simp add:qe not_dep_decr) also have "\<dots> = (\<exists>x. (\<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> ?B)" by blast also have "?B = (\<forall>a \<in> set ?as1. I\<^sub>a (decr a) xs)" by simp also have "(\<exists>x. (\<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> \<dots>) = (\<exists>x. (\<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> (\<forall>a \<in> set ?as1. I\<^sub>a a (x#xs)))" by(simp add: not_dep_decr) also have "\<dots> = (\<exists>x. \<forall>a \<in> set(?as0 @ ?as1). I\<^sub>a a (x#xs))" by (simp add:ball_Un) also have "\<dots> = (\<exists>x. \<forall>a \<in> set(as). I\<^sub>a a (x#xs))" by simp blast finally show "?P xs" . qed text\<open>\noindent The generic DNF-based quantifier elimination procedure:\<close> fun lift_dnf_qe :: "('a list \<Rightarrow> 'a fm) \<Rightarrow> 'a fm \<Rightarrow> 'a fm" where "lift_dnf_qe qe (And \<phi>\<^sub>1 \<phi>\<^sub>2) = and (lift_dnf_qe qe \<phi>\<^sub>1) (lift_dnf_qe qe \<phi>\<^sub>2)" | "lift_dnf_qe qe (Or \<phi>\<^sub>1 \<phi>\<^sub>2) = or (lift_dnf_qe qe \<phi>\<^sub>1) (lift_dnf_qe qe \<phi>\<^sub>2)" | "lift_dnf_qe qe (Neg \<phi>) = neg(lift_dnf_qe qe \<phi>)" | "lift_dnf_qe qe (ExQ \<phi>) = Disj (dnf(nnf(lift_dnf_qe qe \<phi>))) (qelim qe)" | "lift_dnf_qe qe \<phi> = \<phi>" lemma qfree_lift_dnf_qe: "(\<And>as. (\<forall>a\<in>set as. depends\<^sub>0 a) \<Longrightarrow> qfree(qe as)) \<Longrightarrow> qfree(lift_dnf_qe qe \<phi>)" by (induct \<phi>) (simp_all add:qelim_def) lemma qfree_lift_dnf_qe2: "qe \<in> lists |depends\<^sub>0| \<rightarrow> |qfree| \<Longrightarrow> qfree(lift_dnf_qe qe \<phi>)" using in_lists_conv_set[where ?'a = 'a] by (simp add:Pi_def qfree_lift_dnf_qe) lemma lem: "\<forall>P A. (\<exists>x\<in>A. \<exists>y. P x y) = (\<exists>y. \<exists>x\<in>A. P x y)" by blast lemma I_lift_dnf_qe: assumes "\<And>as. (\<forall>a \<in> set as. depends\<^sub>0 a) \<Longrightarrow> qfree(qe as)" and "\<And>as. (\<forall>a \<in> set as. depends\<^sub>0 a) \<Longrightarrow> is_dnf_qe qe as" shows "I (lift_dnf_qe qe \<phi>) xs = I \<phi> xs" proof(induct \<phi> arbitrary:xs) case ExQ thus ?case by (simp add: assms I_qelim lem I_dnf nqfree_nnf qfree_lift_dnf_qe I_nnf) qed simp_all lemma I_lift_dnf_qe2: assumes "qe \<in> lists |depends\<^sub>0| \<rightarrow> |qfree|" and "\<forall>as \<in> lists |depends\<^sub>0|. is_dnf_qe qe as" shows "I (lift_dnf_qe qe \<phi>) xs = I \<phi> xs" using assms in_lists_conv_set[where ?'a = 'a] by(simp add:Pi_def I_lift_dnf_qe) text\<open>\noindent Quantifier elimination with invariant (needed for Presburger):\<close> lemma I_qelim_anormal: assumes qe: "\<And>xs as. \<forall>a \<in> set as. depends\<^sub>0 a \<and> anormal a \<Longrightarrow> is_dnf_qe qe as" and nm: "\<forall>a \<in> set as. anormal a" shows "I (qelim qe as) xs = (\<exists>x. \<forall>a\<in>set as. I\<^sub>a a (x#xs))" proof - let ?as0 = "filter depends\<^sub>0 as" let ?as1 = "filter (Not \<circ> depends\<^sub>0) as" have "I (qelim qe as) xs = (I (qe ?as0) xs \<and> (\<forall>a\<in>set(map decr ?as1). I\<^sub>a a xs))" (is "_ = (_ \<and> ?B)") by(force simp add:qelim_def) also have "\<dots> = ((\<exists>x. \<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> ?B)" by(simp add:qe nm not_dep_decr) also have "\<dots> = (\<exists>x. (\<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> ?B)" by blast also have "?B = (\<forall>a \<in> set ?as1. I\<^sub>a (decr a) xs)" by simp also have "(\<exists>x. (\<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> \<dots>) = (\<exists>x. (\<forall>a \<in> set ?as0. I\<^sub>a a (x#xs)) \<and> (\<forall>a \<in> set ?as1. I\<^sub>a a (x#xs)))" by(simp add: not_dep_decr) also have "\<dots> = (\<exists>x. \<forall>a \<in> set(?as0 @ ?as1). I\<^sub>a a (x#xs))" by (simp add:ball_Un) also have "\<dots> = (\<exists>x. \<forall>a \<in> set(as). I\<^sub>a a (x#xs))" by simp blast finally show ?thesis . qed context notes [[simp_depth_limit = 5]] begin lemma anormal_atoms_qelim: "(\<And>as. \<forall>a \<in> set as. depends\<^sub>0 a \<and> anormal a \<Longrightarrow> normal(qe as)) \<Longrightarrow> \<forall>a \<in> set as. anormal a \<Longrightarrow> a \<in> atoms(qelim qe as) \<Longrightarrow> anormal a" apply(auto simp add:qelim_def and_def normal_def split:if_split_asm) apply(auto simp add:anormal_decr dest!: atoms_list_conjE) apply(erule_tac x = "filter depends\<^sub>0 as" in meta_allE) apply(simp) apply(erule_tac x = "filter depends\<^sub>0 as" in meta_allE) apply(simp) done lemma normal_lift_dnf_qe: assumes "\<And>as. \<forall>a \<in> set as. depends\<^sub>0 a \<Longrightarrow> qfree(qe as)" and "\<And>as. \<forall>a \<in> set as. depends\<^sub>0 a \<and> anormal a \<Longrightarrow> normal(qe as)" shows "normal \<phi> \<Longrightarrow> normal(lift_dnf_qe qe \<phi>)" proof(simp add:normal_def, induct \<phi>) case ExQ thus ?case apply (auto dest!: atoms_list_disjE) apply(rule anormal_atoms_qelim) prefer 3 apply assumption apply(simp add:assms) apply (simp add:normal_def qfree_lift_dnf_qe anormal_dnf_nnf assms) done qed (simp_all add:and_def or_def neg_def Ball_def) end context notes [[simp_depth_limit = 9]] begin lemma I_lift_dnf_qe_anormal: assumes "\<And>as. \<forall>a \<in> set as. depends\<^sub>0 a \<Longrightarrow> qfree(qe as)" and "\<And>as. \<forall>a \<in> set as. depends\<^sub>0 a \<and> anormal a \<Longrightarrow> normal(qe as)" and "\<And>xs as. \<forall>a \<in> set as. depends\<^sub>0 a \<and> anormal a \<Longrightarrow> is_dnf_qe qe as" shows "normal f \<Longrightarrow> I (lift_dnf_qe qe f) xs = I f xs" proof(induct f arbitrary:xs) case ExQ thus ?case using normal_lift_dnf_qe[of qe] by (simp add: assms[simplified normal_def] anormal_dnf_nnf I_qelim_anormal lem I_dnf nqfree_nnf qfree_lift_dnf_qe I_nnf normal_def) qed (simp_all add:normal_def) end lemma I_lift_dnf_qe_anormal2: assumes "qe \<in> lists |depends\<^sub>0| \<rightarrow> |qfree|" and "qe \<in> lists ( |depends\<^sub>0| \<inter> |anormal| ) \<rightarrow> |normal|" and "\<forall>as \<in> lists( |depends\<^sub>0| \<inter> |anormal| ). is_dnf_qe qe as" shows "normal f \<Longrightarrow> I (lift_dnf_qe qe f) xs = I f xs" using assms in_lists_conv_set[where ?'a = 'a] by(simp add:Pi_def I_lift_dnf_qe_anormal Int_def) subsubsection\<open>NNF-based\<close> fun lift_nnf_qe :: "('a fm \<Rightarrow> 'a fm) \<Rightarrow> 'a fm \<Rightarrow> 'a fm" where "lift_nnf_qe qe (And \<phi>\<^sub>1 \<phi>\<^sub>2) = and (lift_nnf_qe qe \<phi>\<^sub>1) (lift_nnf_qe qe \<phi>\<^sub>2)" | "lift_nnf_qe qe (Or \<phi>\<^sub>1 \<phi>\<^sub>2) = or (lift_nnf_qe qe \<phi>\<^sub>1) (lift_nnf_qe qe \<phi>\<^sub>2)" | "lift_nnf_qe qe (Neg \<phi>) = neg(lift_nnf_qe qe \<phi>)" | "lift_nnf_qe qe (ExQ \<phi>) = qe(nnf(lift_nnf_qe qe \<phi>))" | "lift_nnf_qe qe \<phi> = \<phi>" lemma qfree_lift_nnf_qe: "(\<And>\<phi>. nqfree \<phi> \<Longrightarrow> qfree(qe \<phi>)) \<Longrightarrow> qfree(lift_nnf_qe qe \<phi>)" by (induct \<phi>) (simp_all add:nqfree_nnf) lemma qfree_lift_nnf_qe2: "qe \<in> |nqfree| \<rightarrow> |qfree| \<Longrightarrow> qfree(lift_nnf_qe qe \<phi>)" by(simp add:Pi_def qfree_lift_nnf_qe) lemma I_lift_nnf_qe: assumes "\<And>\<phi>. nqfree \<phi> \<Longrightarrow> qfree(qe \<phi>)" and "\<And>xs \<phi>. nqfree \<phi> \<Longrightarrow> I (qe \<phi>) xs = (\<exists>x. I \<phi> (x#xs))" shows "I (lift_nnf_qe qe \<phi>) xs = I \<phi> xs" proof(induct "\<phi>" arbitrary:xs) case ExQ thus ?case by (simp add: assms nqfree_nnf qfree_lift_nnf_qe I_nnf) qed simp_all lemma I_lift_nnf_qe2: assumes "qe \<in> |nqfree| \<rightarrow> |qfree|" and "\<forall>\<phi> \<in> |nqfree|. \<forall>xs. I (qe \<phi>) xs = (\<exists>x. I \<phi> (x#xs))" shows "I (lift_nnf_qe qe \<phi>) xs = I \<phi> xs" using assms by(simp add:Pi_def I_lift_nnf_qe) lemma normal_lift_nnf_qe: assumes "\<And>\<phi>. nqfree \<phi> \<Longrightarrow> qfree(qe \<phi>)" and "\<And>\<phi>. nqfree \<phi> \<Longrightarrow> normal \<phi> \<Longrightarrow> normal(qe \<phi>)" shows "normal \<phi> \<Longrightarrow> normal(lift_nnf_qe qe \<phi>)" by (induct \<phi>) (simp_all add: assms Logic.neg_def normal_nnf nqfree_nnf qfree_lift_nnf_qe) lemma I_lift_nnf_qe_normal: assumes "\<And>\<phi>. nqfree \<phi> \<Longrightarrow> qfree(qe \<phi>)" and "\<And>\<phi>. nqfree \<phi> \<Longrightarrow> normal \<phi> \<Longrightarrow> normal(qe \<phi>)" and "\<And>xs \<phi>. normal \<phi> \<Longrightarrow> nqfree \<phi> \<Longrightarrow> I (qe \<phi>) xs = (\<exists>x. I \<phi> (x#xs))" shows "normal \<phi> \<Longrightarrow> I (lift_nnf_qe qe \<phi>) xs = I \<phi> xs" proof(induct "\<phi>" arbitrary:xs) case ExQ thus ?case by (simp add: assms nqfree_nnf qfree_lift_nnf_qe I_nnf normal_lift_nnf_qe normal_nnf) qed auto lemma I_lift_nnf_qe_normal2: assumes "qe \<in> |nqfree| \<rightarrow> |qfree|" and "qe \<in> |nqfree| \<inter> |normal| \<rightarrow> |normal|" and "\<forall>\<phi> \<in> |normal| \<inter> |nqfree|. \<forall>xs. I (qe \<phi>) xs = (\<exists>x. I \<phi> (x#xs))" shows "normal \<phi> \<Longrightarrow> I (lift_nnf_qe qe \<phi>) xs = I \<phi> xs" using assms by(simp add:Pi_def I_lift_nnf_qe_normal Int_def) end subsection\<open>With equality\<close> text\<open>DNF-based quantifier elimination can accommodate equality atoms in a generic fashion.\<close> locale ATOM_EQ = ATOM + fixes solvable\<^sub>0 :: "'a \<Rightarrow> bool" and trivial :: "'a \<Rightarrow> bool" and subst\<^sub>0 :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" assumes subst\<^sub>0: "\<lbrakk> solvable\<^sub>0 eq; \<not>trivial eq; I\<^sub>a eq (x#xs); depends\<^sub>0 a \<rbrakk> \<Longrightarrow> I\<^sub>a (subst\<^sub>0 eq a) xs = I\<^sub>a a (x#xs)" and trivial: "trivial eq \<Longrightarrow> I\<^sub>a eq xs" and solvable: "solvable\<^sub>0 eq \<Longrightarrow> \<exists>x. I\<^sub>a eq (x#xs)" and is_triv_self_subst: "solvable\<^sub>0 eq \<Longrightarrow> trivial (subst\<^sub>0 eq eq)" begin definition lift_eq_qe :: "('a list \<Rightarrow> 'a fm) \<Rightarrow> 'a list \<Rightarrow> 'a fm" where "lift_eq_qe qe as = (let as = [a\<leftarrow>as. \<not> trivial a] in case [a\<leftarrow>as. solvable\<^sub>0 a] of [] \<Rightarrow> qe as | eq # eqs \<Rightarrow> (let ineqs = [a\<leftarrow>as. \<not> solvable\<^sub>0 a] in list_conj (map (Atom \<circ> (subst\<^sub>0 eq)) (eqs @ ineqs))))" theorem I_lift_eq_qe: assumes dep: "\<forall>a\<in>set as. depends\<^sub>0 a" assumes qe: "\<And>as. (\<forall>a \<in> set as. depends\<^sub>0 a \<and> \<not> solvable\<^sub>0 a) \<Longrightarrow> I (qe as) xs = (\<exists>x. \<forall>a \<in> set as. I\<^sub>a a (x#xs))" shows "I (lift_eq_qe qe as) xs = (\<exists>x. \<forall>a \<in> set as. I\<^sub>a a (x#xs))" (is "?L = ?R") proof - let ?as = "[a\<leftarrow>as. \<not> trivial a]" show ?thesis proof (cases "[a\<leftarrow>?as. solvable\<^sub>0 a]") case Nil hence "\<forall>a\<in>set as. \<not> trivial a \<longrightarrow> \<not> solvable\<^sub>0 a" by(auto simp: filter_empty_conv) thus "?L = ?R" by(simp add:lift_eq_qe_def dep qe cong:conj_cong) (metis trivial) next case (Cons eq _) then have "eq \<in> set as" "solvable\<^sub>0 eq" "\<not> trivial eq" by (auto simp: filter_eq_Cons_iff) then obtain e where "I\<^sub>a eq (e#xs)" by(metis solvable) have "\<forall>a \<in> set as. I\<^sub>a a (e # xs) = I\<^sub>a (subst\<^sub>0 eq a) xs" by(simp add: subst\<^sub>0[OF \<open>solvable\<^sub>0 eq\<close> \<open>\<not> trivial eq\<close> \<open>I\<^sub>a eq (e#xs)\<close>] dep) thus ?thesis using Cons dep apply(simp add: lift_eq_qe_def, clarsimp simp: filter_eq_Cons_iff ball_Un) apply(rule iffI) apply(fastforce intro!:exI[of _ e] simp: trivial is_triv_self_subst) apply (metis subst\<^sub>0) done qed qed definition "lift_dnfeq_qe = lift_dnf_qe \<circ> lift_eq_qe" lemma qfree_lift_eq_qe: "(\<And>as. \<forall>a\<in>set as. depends\<^sub>0 a \<Longrightarrow> qfree (qe as)) \<Longrightarrow> \<forall>a\<in>set as. depends\<^sub>0 a \<Longrightarrow> qfree(lift_eq_qe qe as)" by(simp add:lift_eq_qe_def ball_Un split:list.split) lemma qfree_lift_dnfeq_qe: "(\<And>as. (\<forall>a\<in>set as. depends\<^sub>0 a) \<Longrightarrow> qfree(qe as)) \<Longrightarrow> qfree(lift_dnfeq_qe qe \<phi>)" by(simp add: lift_dnfeq_qe_def qfree_lift_dnf_qe qfree_lift_eq_qe) lemma I_lift_dnfeq_qe: "(\<And>as. (\<forall>a \<in> set as. depends\<^sub>0 a) \<Longrightarrow> qfree(qe as)) \<Longrightarrow> (\<And>as. (\<forall>a \<in> set as. depends\<^sub>0 a \<and> \<not> solvable\<^sub>0 a) \<Longrightarrow> is_dnf_qe qe as) \<Longrightarrow> I (lift_dnfeq_qe qe \<phi>) xs = I \<phi> xs" by(simp add:lift_dnfeq_qe_def I_lift_dnf_qe qfree_lift_eq_qe I_lift_eq_qe) lemma I_lift_dnfeq_qe2: "qe \<in> lists |depends\<^sub>0| \<rightarrow> |qfree| \<Longrightarrow> (\<forall>as \<in> lists( |depends\<^sub>0| \<inter> - |solvable\<^sub>0| ). is_dnf_qe qe as) \<Longrightarrow> I (lift_dnfeq_qe qe \<phi>) xs = I \<phi> xs" using in_lists_conv_set[where ?'a = 'a] by(simp add:Pi_def I_lift_dnfeq_qe Int_def Compl_eq) end end
[STATEMENT] lemma sum_vec_list: "sum_list (list_of_vec v) = sum_vec v" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sum_list (list_of_vec v) = sum_vec v [PROOF STEP] by (induct v)(simp_all add: sum_vec_vCons)
(* Author: Dmitriy Traytel *) section \<open>Normalization of WS1S Formulas\<close> (*<*) theory WS1S_Normalization imports WS1S begin (*>*) fun nNot where "nNot (FNot \<phi>) = \<phi>" | "nNot (FAnd \<phi>1 \<phi>2) = FOr (nNot \<phi>1) (nNot \<phi>2)" | "nNot (FOr \<phi>1 \<phi>2) = FAnd (nNot \<phi>1) (nNot \<phi>2)" | "nNot \<phi> = FNot \<phi>" primrec norm where "norm (FQ a m) = FQ a m" | "norm (FLess m n) = FLess m n" | "norm (FIn m M) = FIn m M" | "norm (FOr \<phi> \<psi>) = FOr (norm \<phi>) (norm \<psi>)" | "norm (FAnd \<phi> \<psi>) = FAnd (norm \<phi>) (norm \<psi>)" | "norm (FNot \<phi>) = nNot (norm \<phi>)" | "norm (FExists \<phi>) = FExists (norm \<phi>)" | "norm (FEXISTS \<phi>) = FEXISTS (norm \<phi>)" context formula begin lemma satisfies_nNot[simp]: "(w, I) \<Turnstile> nNot \<phi> \<longleftrightarrow> (w, I) \<Turnstile> FNot \<phi>" by (induct \<phi> rule: nNot.induct) auto lemma FOV_nNot[simp]: "FOV (nNot \<phi>) = FOV (FNot \<phi>)" by (induct \<phi> rule: nNot.induct) auto lemma SOV_nNot[simp]: "SOV (nNot \<phi>) = SOV (FNot \<phi>)" by (induct \<phi> rule: nNot.induct) auto lemma pre_wf_formula_nNot[simp]: "pre_wf_formula n (nNot \<phi>) = pre_wf_formula n (FNot \<phi>)" by (induct \<phi> rule: nNot.induct) auto lemma FOV_norm[simp]: "FOV (norm \<phi>) = FOV \<phi>" by (induct \<phi>) auto lemma SOV_norm[simp]: "SOV (norm \<phi>) = SOV \<phi>" by (induct \<phi>) auto lemma pre_wf_formula_norm[simp]: "pre_wf_formula n (norm \<phi>) = pre_wf_formula n \<phi>" by (induct \<phi> arbitrary: n) auto lemma satisfies_norm[simp]: "wI \<Turnstile> norm \<phi> \<longleftrightarrow> wI \<Turnstile> \<phi>" by (induct \<phi> arbitrary: wI) auto lemma lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S_norm[simp]: "lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S n (norm \<phi>) = lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S n \<phi>" unfolding lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S_def by auto end (*<*) end (*>*)
lines.linewidth: 1.0 # line width in points text.color: 232F3E axes.facecolor: FAFAFA # axes background color axes.edgecolor: CACFD0 # axes edge color axes.linewidth: 1.5 # edge line width axes.grid: True # display grid or not axes.titlesize: 14 # font size of the axes title axes.labelsize: 10 # font size of the x and y labels axes.labelcolor: FF9900 # color of the x and y labels axes.titleweight: bold # font weight of title axes.titlecolor: FF9900 # color of the axes title #axes.labelcolor: 0073BB axes.prop_cycle: cycler('color', ['232F3E', '527FFF', '0073BB', '4d27aa', 'b0084d', 'bf0816', 'd45b07', 'FF9900', 'e38b00', '067f68', '16bf9f']) xtick.color: 0073BB # color of the ticks xtick.labelsize: 9 # font size of the tick labels ytick.color: 0073BB # color of the ticks ytick.labelsize: 9 # font size of the tick labels grid.color: 232F3E # grid color grid.alpha: 0.1 # transparency, between 0.0 and 1.0 legend.framealpha: 0.5 # legend patch transparency legend.facecolor: fee385 # inherit from axes.facecolor; or color spec legend.edgecolor: e38b00 # background patch boundary color legend.fancybox: True # if True, use a rounded box for the figure.titlesize: large # size of the figure title (``Figure.suptitle()``) figure.titleweight: bold # weight of the figure title figure.facecolor: F2F3F3 # figure face color
import .fv namespace tts ------------------------------------------------------------------ namespace exp ------------------------------------------------------------------ variables {V : Type} -- Type of variable names variables {x y : tagged V} -- Variable names variables {e ex : exp V} -- Expressions variables [decidable_eq V] open occurs /-- Substitute a free variable for an expression in an expression -/ def subst (x : tagged V) (ex : exp V) : exp V → exp V | (var bound y) := var bound y | (var free y) := if x = y then ex else var free y | (app ef ea) := app (subst ef) (subst ea) | (lam v eb) := lam v (subst eb) | (let_ v ed eb) := let_ v (subst ed) (subst eb) @[simp] theorem subst_var_bound : subst x ex (var bound y) = var bound y := rfl @[simp] theorem subst_var_free_eq (h : x = y) : subst x ex (var free y) = ex := by simp [subst, h] @[simp] theorem subst_var_free_ne (h : x ≠ y) : subst x ex (var free y) = var free y := by simp [subst, h] @[simp] theorem subst_app {ef ea : exp V} : subst x ex (app ef ea) = app (subst x ex ef) (subst x ex ea) := rfl @[simp] theorem subst_lam {v} {eb : exp V} : subst x ex (lam v eb) = lam v (subst x ex eb) := rfl @[simp] theorem subst_let_ {v} {ed eb : exp V} : subst x ex (let_ v ed eb) = let_ v (subst x ex ed) (subst x ex eb) := rfl @[simp] theorem subst_fresh (h : x ∉ fv e) : subst x ex e = e := by induction e with o; try {cases o}; try {simp at h}; try {cases h}; simp * end /- namespace -/ exp -------------------------------------------------------- end /- namespace -/ tts --------------------------------------------------------
[STATEMENT] lemma bal_imp_full: "bal t \<Longrightarrow> full (height t) t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. bal t \<Longrightarrow> full (height t) t [PROOF STEP] by (induct t, simp_all)
[STATEMENT] lemma fst_in_set_lemma: "(x, y) \<in> set l \<Longrightarrow> x \<in> fst ` set l" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (x, y) \<in> set l \<Longrightarrow> x \<in> fst ` set l [PROOF STEP] by (induct l) auto
/- Copyright (c) 2018 Kenny Lau. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kenny Lau, Yury Kudryashov -/ import algebra.algebra.basic import algebra.hom.iterate import algebra.hom.non_unital_alg import linear_algebra.tensor_product /-! # Facts about algebras involving bilinear maps and tensor products > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. We move a few basic statements about algebras out of `algebra.algebra.basic`, in order to avoid importing `linear_algebra.bilinear_map` and `linear_algebra.tensor_product` unnecessarily. -/ open_locale tensor_product open module namespace linear_map section non_unital_non_assoc variables (R A : Type*) [comm_semiring R] [non_unital_non_assoc_semiring A] [module R A] [smul_comm_class R A A] [is_scalar_tower R A A] /-- The multiplication in a non-unital non-associative algebra is a bilinear map. A weaker version of this for semirings exists as `add_monoid_hom.mul`. -/ def mul : A →ₗ[R] A →ₗ[R] A := linear_map.mk₂ R (*) add_mul smul_mul_assoc mul_add mul_smul_comm /-- The multiplication map on a non-unital algebra, as an `R`-linear map from `A ⊗[R] A` to `A`. -/ def mul' : A ⊗[R] A →ₗ[R] A := tensor_product.lift (mul R A) variables {A} /-- The multiplication on the left in a non-unital algebra is a linear map. -/ def mul_left (a : A) : A →ₗ[R] A := mul R A a /-- The multiplication on the right in an algebra is a linear map. -/ def mul_right (a : A) : A →ₗ[R] A := (mul R A).flip a /-- Simultaneous multiplication on the left and right is a linear map. -/ def mul_left_right (ab : A × A) : A →ₗ[R] A := (mul_right R ab.snd).comp (mul_left R ab.fst) @[simp] lemma mul_left_to_add_monoid_hom (a : A) : (mul_left R a : A →+ A) = add_monoid_hom.mul_left a := rfl @[simp] lemma mul_right_to_add_monoid_hom (a : A) : (mul_right R a : A →+ A) = add_monoid_hom.mul_right a := rfl variables {R} @[simp] lemma mul_apply' (a b : A) : mul R A a b = a * b := rfl @[simp] lemma mul_left_apply (a b : A) : mul_left R a b = a * b := rfl @[simp] lemma mul_right_apply (a b : A) : mul_right R a b = b * a := rfl @[simp] lemma mul_left_right_apply (a b x : A) : mul_left_right R (a, b) x = a * x * b := rfl @[simp] lemma mul'_apply {a b : A} : mul' R A (a ⊗ₜ b) = a * b := rfl @[simp] lemma mul_left_zero_eq_zero : mul_left R (0 : A) = 0 := (mul R A).map_zero @[simp] lemma mul_right_zero_eq_zero : mul_right R (0 : A) = 0 := (mul R A).flip.map_zero end non_unital_non_assoc section non_unital variables (R A : Type*) [comm_semiring R] [non_unital_semiring A] [module R A] [smul_comm_class R A A] [is_scalar_tower R A A] /-- The multiplication in a non-unital algebra is a bilinear map. A weaker version of this for non-unital non-associative algebras exists as `linear_map.mul`. -/ def _root_.non_unital_alg_hom.lmul : A →ₙₐ[R] (End R A) := { map_mul' := by { intros a b, ext c, exact mul_assoc a b c }, map_zero' := by { ext a, exact zero_mul a }, .. (mul R A) } variables {R A} @[simp] lemma _root_.non_unital_alg_hom.coe_lmul_eq_mul : ⇑(non_unital_alg_hom.lmul R A) = mul R A := rfl lemma commute_mul_left_right (a b : A) : commute (mul_left R a) (mul_right R b) := by { ext c, exact (mul_assoc a c b).symm, } @[simp] lemma mul_left_mul (a b : A) : mul_left R (a * b) = (mul_left R a).comp (mul_left R b) := by { ext, simp only [mul_left_apply, comp_apply, mul_assoc] } @[simp] lemma mul_right_mul (a b : A) : mul_right R (a * b) = (mul_right R b).comp (mul_right R a) := by { ext, simp only [mul_right_apply, comp_apply, mul_assoc] } end non_unital section semiring variables (R A : Type*) [comm_semiring R] [semiring A] [algebra R A] /-- The multiplication in an algebra is an algebra homomorphism into the endomorphisms on the algebra. A weaker version of this for non-unital algebras exists as `non_unital_alg_hom.mul`. -/ def _root_.algebra.lmul : A →ₐ[R] (End R A) := { map_one' := by { ext a, exact one_mul a }, map_mul' := by { intros a b, ext c, exact mul_assoc a b c }, map_zero' := by { ext a, exact zero_mul a }, commutes' := by { intro r, ext a, exact (algebra.smul_def r a).symm, }, .. (linear_map.mul R A) } variables {R A} @[simp] lemma _root_.algebra.coe_lmul_eq_mul : ⇑(algebra.lmul R A) = mul R A := rfl @[simp] lemma mul_left_eq_zero_iff (a : A) : mul_left R a = 0 ↔ a = 0 := begin split; intros h, { rw [← mul_one a, ← mul_left_apply a 1, h, linear_map.zero_apply], }, { rw h, exact mul_left_zero_eq_zero, }, end @[simp] lemma mul_right_eq_zero_iff (a : A) : mul_right R a = 0 ↔ a = 0 := begin split; intros h, { rw [← one_mul a, ← mul_right_apply a 1, h, linear_map.zero_apply], }, { rw h, exact mul_right_zero_eq_zero, }, end @[simp] lemma mul_left_one : mul_left R (1:A) = linear_map.id := by { ext, simp only [linear_map.id_coe, one_mul, id.def, mul_left_apply] } @[simp] lemma mul_right_one : mul_right R (1:A) = linear_map.id := by { ext, simp only [linear_map.id_coe, mul_one, id.def, mul_right_apply] } @[simp] lemma pow_mul_left (a : A) (n : ℕ) : (mul_left R a) ^ n = mul_left R (a ^ n) := by simpa only [mul_left, ←algebra.coe_lmul_eq_mul] using ((algebra.lmul R A).map_pow a n).symm @[simp] lemma pow_mul_right (a : A) (n : ℕ) : (mul_right R a) ^ n = mul_right R (a ^ n) := begin simp only [mul_right, ←algebra.coe_lmul_eq_mul], exact linear_map.coe_injective (((mul_right R a).coe_pow n).symm ▸ (mul_right_iterate a n)), end end semiring section ring variables {R A : Type*} [comm_semiring R] [ring A] [algebra R A] lemma mul_left_injective [no_zero_divisors A] {x : A} (hx : x ≠ 0) : function.injective (mul_left R x) := begin letI : nontrivial A := ⟨⟨x, 0, hx⟩⟩, letI := no_zero_divisors.to_is_domain A, exact mul_right_injective₀ hx, end lemma mul_right_injective [no_zero_divisors A] {x : A} (hx : x ≠ 0) : function.injective (mul_right R x) := begin letI : nontrivial A := ⟨⟨x, 0, hx⟩⟩, letI := no_zero_divisors.to_is_domain A, exact mul_left_injective₀ hx, end lemma mul_injective [no_zero_divisors A] {x : A} (hx : x ≠ 0) : function.injective (mul R A x) := begin letI : nontrivial A := ⟨⟨x, 0, hx⟩⟩, letI := no_zero_divisors.to_is_domain A, exact mul_right_injective₀ hx, end end ring end linear_map
State Before: α : Type u β : Type v γ : Type ?u.39611 δ : Type ?u.39614 ε : Type ?u.39617 ζ : Type ?u.39620 inst✝⁵ : TopologicalSpace α inst✝⁴ : TopologicalSpace β inst✝³ : TopologicalSpace γ inst✝² : TopologicalSpace δ inst✝¹ : TopologicalSpace ε inst✝ : TopologicalSpace ζ a : α b : β ⊢ 𝓝 (a, b) = map Prod.swap (𝓝 (b, a)) State After: α : Type u β : Type v γ : Type ?u.39611 δ : Type ?u.39614 ε : Type ?u.39617 ζ : Type ?u.39620 inst✝⁵ : TopologicalSpace α inst✝⁴ : TopologicalSpace β inst✝³ : TopologicalSpace γ inst✝² : TopologicalSpace δ inst✝¹ : TopologicalSpace ε inst✝ : TopologicalSpace ζ a : α b : β ⊢ map (fun p => (p.snd, p.fst)) (𝓝 b ×ˢ 𝓝 a) = map Prod.swap (𝓝 b ×ˢ 𝓝 a) Tactic: rw [nhds_prod_eq, Filter.prod_comm, nhds_prod_eq] State Before: α : Type u β : Type v γ : Type ?u.39611 δ : Type ?u.39614 ε : Type ?u.39617 ζ : Type ?u.39620 inst✝⁵ : TopologicalSpace α inst✝⁴ : TopologicalSpace β inst✝³ : TopologicalSpace γ inst✝² : TopologicalSpace δ inst✝¹ : TopologicalSpace ε inst✝ : TopologicalSpace ζ a : α b : β ⊢ map (fun p => (p.snd, p.fst)) (𝓝 b ×ˢ 𝓝 a) = map Prod.swap (𝓝 b ×ˢ 𝓝 a) State After: no goals Tactic: rfl
Formal statement is: lemma trivial_limit_within: "trivial_limit (at a within S) \<longleftrightarrow> \<not> a islimpt S" Informal statement is: A function has a trivial limit at $a$ within $S$ if and only if $a$ is not a limit point of $S$.
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.Relation.Nullary.HLevels where open import Cubical.Foundations.Prelude open import Cubical.Foundations.HLevels open import Cubical.Foundations.Function open import Cubical.Functions.Fixpoint open import Cubical.Relation.Nullary private variable ℓ : Level A : Type ℓ isPropPopulated : isProp (Populated A) isPropPopulated = isPropΠ λ x → 2-Constant→isPropFixpoint (x .fst) (x .snd) isPropHSeparated : isProp (HSeparated A) isPropHSeparated f g i x y a = HSeparated→isSet f x y (f x y a) (g x y a) i isPropCollapsible≡ : isProp (Collapsible≡ A) isPropCollapsible≡ {A = A} f = (isPropΠ2 λ x y → isPropCollapsiblePointwise) f where sA : isSet A sA = Collapsible≡→isSet f gA : isGroupoid A gA = isSet→isGroupoid sA isPropCollapsiblePointwise : ∀ {x y} → isProp (Collapsible (x ≡ y)) isPropCollapsiblePointwise {x} {y} (a , ca) (b , cb) = λ i → endoFunction i , endoFunctionIsConstant i where endoFunction : a ≡ b endoFunction = funExt λ p → sA _ _ (a p) (b p) isProp2-Constant : (k : I) → isProp (2-Constant (endoFunction k)) isProp2-Constant k = isPropΠ2 λ r s → gA x y (endoFunction k r) (endoFunction k s) endoFunctionIsConstant : PathP (λ i → 2-Constant (endoFunction i)) ca cb endoFunctionIsConstant = isProp→PathP isProp2-Constant ca cb
It can be great belonging to a community organization - getting to know people with similar interests, doing things that make a difference, and celebrating your accomplishments. But sometimes it's hard to make those things happen; people can get stuck doing things the way they have always been done and become so focused on the "now" that there is no chance to get a vision for the future. That is where strategic planning comes in. Strategic planning is a means of envisioning your organization's future and determining how to get there. It can stimulate ingenuity and new approaches, respond to opportunities and obstacles, and provide a framework for day-to-day decisions. If this sounds like something that could benefit you or a community organization you are active in, register now.
module PolicyOptimization using ..AST using Flux using Flux.Tracker: grad, update! using POMDPs using Random using Parameters using RLInterface using Distributed using Distributions using LinearAlgebra using Base.Iterators import JLD2: @save, @load import ProgressMeter: Progress, next! export search!, TRPOSolver, TRPOPlanner, PPOSolver, PPOPlanner # Modified from Shreyas Kowshik's implementation. include("policies.jl") include("trpo.jl") include("ppo.jl") include(joinpath("utils", "utils.jl")) include(joinpath("utils", "buffer.jl")) include(joinpath("utils", "policy_saving.jl")) include("rollout.jl") include("train.jl") function POMDPs.action(planner::Union{TRPOPlanner, PPOPlanner}, s) if actiontype(planner.mdp) == ASTSeedAction @warn "DRL solvers (TRPO/PPO) are not as effective with ASTSeedAction. Use ASTMDP{ASTSampleAction}() instead." end train!(planner) # train neural networks # Pass back action trace if recording is on (i.e. top_k) if planner.mdp.params.top_k > 0 return get_top_path(planner.mdp) else statevec = convert_s(Vector{Float32}, s, planner.mdp) nn_action = get_action(planner.policy, statevec) ast_action = translate_ast_action(planner.mdp.sim, nn_action, actiontype(planner.mdp)) return ast_action end end function AST.search!(planner::Union{TRPOPlanner, PPOPlanner}) mdp::ASTMDP = planner.mdp Random.seed!(mdp.params.seed) # Determinism s = AST.initialstate(mdp) return action(planner, s) end end # module
Formal statement is: lemma AE_impI: "(P \<Longrightarrow> AE x in M. Q x) \<Longrightarrow> AE x in M. P \<longrightarrow> Q x" Informal statement is: If $P$ implies that $Q$ holds almost everywhere, then $P$ and $Q$ hold almost everywhere.
import cedille-options open import general-util module classify (options : cedille-options.options) {mF : Set → Set} {{_ : monad mF}} where open import lib open import cedille-types open import constants open import conversion open import ctxt open import is-free open import lift open import rename open import rewriting open import meta-vars options {mF} open import spans options {mF} open import subst open import syntax-util open import to-string options open import untyped-spans options {mF} check-ret : ∀{A : Set} → maybe A → Set check-ret{A} nothing = maybe A check-ret (just _) = ⊤ infixl 2 _≫=spanr_ _≫=spanr_ : ∀{A : Set}{m : maybe A} → spanM (maybe A) → (A → spanM (check-ret m)) → spanM (check-ret m) _≫=spanr_{m = nothing} = _≫=spanm_ _≫=spanr_{m = just _} = _≫=spanj_ -- return the appropriate value meaning that typing failed (in either checking or synthesizing mode) check-fail : ∀{A : Set} → (m : maybe A) → spanM (check-ret m) check-fail nothing = spanMr nothing check-fail (just _) = spanMok unimplemented-check : spanM ⊤ unimplemented-check = spanMok unimplemented-synth : ∀{A : Set} → spanM (maybe A) unimplemented-synth = spanMr nothing unimplemented-if : ∀{A : Set} → (m : maybe A) → spanM (check-ret m) unimplemented-if nothing = unimplemented-synth unimplemented-if (just _) = unimplemented-check -- return the second maybe value, if we are in synthesizing mode return-when : ∀{A : Set} → (m : maybe A) → maybe A → spanM (check-ret m) return-when nothing u = spanMr u return-when (just _) u = spanMr triv -- if m is not "nothing", return "just star" return-star-when : (m : maybe kind) → spanM (check-ret m) return-star-when m = return-when m (just star) if-check-against-star-data : ctxt → string → maybe kind → 𝕃 tagged-val × err-m if-check-against-star-data Γ desc nothing = [ kind-data Γ star ] , nothing if-check-against-star-data Γ desc (just (Star _)) = [ kind-data Γ star ] , nothing if-check-against-star-data Γ desc (just k) = [ expected-kind Γ k ] , just (desc ^ " is being checked against a kind other than ★") hnf-from : ctxt → (e : 𝔹) → maybeMinus → term → term hnf-from Γ e EpsHnf t = hnf Γ (unfolding-set-erased unfold-head e) t tt hnf-from Γ e EpsHanf t = hanf Γ e t maybe-hnf : {ed : exprd} → ctxt → maybe ⟦ ed ⟧ → maybe ⟦ ed ⟧ maybe-hnf Γ = maybe-map λ t → hnf Γ (unfolding-elab unfold-head) t tt check-term-update-eq : ctxt → leftRight → maybeMinus → posinfo → term → term → posinfo → type check-term-update-eq Γ Left m pi t1 t2 pi' = TpEq pi (hnf-from Γ tt m t1) t2 pi' check-term-update-eq Γ Right m pi t1 t2 pi' = TpEq pi t1 (hnf-from Γ tt m t2) pi' check-term-update-eq Γ Both m pi t1 t2 pi' = TpEq pi (hnf-from Γ tt m t1) (hnf-from Γ tt m t2) pi' add-tk' : erased? → posinfo → var → tk → spanM restore-def add-tk' e pi x atk = helper atk ≫=span λ mi → (if ~ (x =string ignored-var) then (get-ctxt λ Γ → spanM-add (var-span e Γ pi x checking atk nothing)) else spanMok) ≫span spanMr mi where helper : tk → spanM restore-def helper (Tkk k) = spanM-push-type-decl pi x k helper (Tkt t) = spanM-push-term-decl pi x t add-tk : posinfo → var → tk → spanM restore-def add-tk = add-tk' ff check-type-return : ctxt → kind → spanM (maybe kind) check-type-return Γ k = spanMr (just (hnf Γ unfold-head k tt)) check-termi-return-hnf : ctxt → (subject : term) → type → spanM (maybe type) check-termi-return-hnf Γ subject tp = spanMr (just (hnf Γ (unfolding-elab unfold-head) tp tt)) lambda-bound-var-conv-error : ctxt → var → tk → tk → 𝕃 tagged-val → 𝕃 tagged-val × string lambda-bound-var-conv-error Γ x atk atk' tvs = (("the variable" , [[ x ]] , []) :: (to-string-tag-tk "its declared classifier" Γ atk') :: [ to-string-tag-tk "the expected classifier" Γ atk ]) ++ tvs , "The classifier given for a λ-bound variable is not the one we expected" lambda-bound-class-if : optClass → tk → tk lambda-bound-class-if NoClass atk = atk lambda-bound-class-if (SomeClass atk') atk = atk' {- for check-term and check-type, if the optional classifier is given, we will check against it. Otherwise, we will try to synthesize a type. check-type should return kinds in hnf using check-type-return. Use add-tk above to add declarations to the ctxt, since these should be normalized and with self-types instantiated. The term/type/kind being checked is never qualified, but the type/kind it is being checked against should always be qualified. So if a term/type is ever being checked against something that was in a term/type the user wrote (phi, for example, needs to check its first term against an equation between its second and third terms), the type/ kind being checked against should be qualified first. Additionally, when returning a synthesized type, lambdas should substitute the position-qualified variable for the original variable in the returned type, so that if the bound variable ever gets substituted by some other code it will work correctly. -} record spine-data : Set where constructor mk-spine-data field spine-mvars : meta-vars spine-type : decortype spine-locale : ℕ {-# TERMINATING #-} check-term : term → (m : maybe type) → spanM (check-ret m) check-termi : term → (m : maybe type) → spanM (check-ret m) check-term-spine : term → (m : prototype) → 𝔹 → spanM (maybe spine-data) check-type : type → (m : maybe kind) → spanM (check-ret m) check-typei : type → (m : maybe kind) → spanM (check-ret m) check-kind : kind → spanM ⊤ check-args-against-params : (kind-or-import : maybe tagged-val {- location -}) → (posinfo × var) → params → args → spanM ⊤ check-erased-margs : term → maybe type → spanM ⊤ check-tk : tk → spanM ⊤ check-def : defTermOrType → spanM (var × restore-def) {- Cedilleum specification, section 4.3 -} is-arrow-type : type → kind → posinfo → posinfo → spanM ⊤ is-arrow-type t (KndTpArrow t' (Star _)) pi pi' = get-ctxt (λ Γ → if (conv-type Γ t t') then spanMok else (spanM-add (mk-span "Wrong motive" pi pi' (expected-type Γ t :: [ type-argument Γ t' ]) (just "Type missmatch")))) is-arrow-type t (KndPi _ _ _ (Tkt t') (Star _)) pi pi' = get-ctxt (λ Γ → if (conv-type Γ t t') then spanMok else (spanM-add (mk-span "Wrong motive" pi pi' (expected-type Γ t :: [ type-argument Γ t' ]) (just "Type missmatch")))) is-arrow-type t _ pi pi' = spanM-add (mk-span "Wrong motive" pi pi' [] (just "Not a valid motive type 3")) -- example of renaming: [[%CE%93%E2%86%92%CE%93' : ctxt %E2%86%92 ctxt][here]] -- [[check-term-spine t'@(App t%E2%82%81 e? t%E2%82%82) mtp max =]] valid-elim-kind : type → kind → kind → posinfo → posinfo → spanM ⊤ valid-elim-kind t (Star _) k pi pi' = is-arrow-type t k pi pi' valid-elim-kind t (KndPi _ pix x (Tkt t1) k1) (KndPi _ _ y (Tkt t2) k2) pi pi' = get-ctxt (λ Γ → if (conv-type Γ t1 t2) then set-ctxt (ctxt-term-decl pix x t1 Γ) ≫span valid-elim-kind (TpAppt t (Var pix x)) k1 k2 pi pi' else spanM-add (mk-span "Motive error" pi pi' [] (just "Not a valid motive 4"))) valid-elim-kind t (KndPi _ pix x (Tkk k1') k1) (KndPi _ _ y (Tkk k2') k2) pi pi' = get-ctxt (λ Γ → if (conv-kind Γ k1' k2') then set-ctxt (ctxt-type-decl pix x k1' Γ) ≫span valid-elim-kind (TpApp t (TpVar pix x)) k1 k2 pi pi' else spanM-add (mk-span "Motive error" pi pi' [] (just "Not a valid motive 5"))) valid-elim-kind t (KndTpArrow t1 k1) (KndTpArrow t2 k2) pi pi' = get-ctxt (λ Γ → if (conv-type Γ t1 t2) then valid-elim-kind (TpApp t t1) k1 k2 pi pi' else spanM-add (mk-span "Motive error" pi pi' [] (just "Not a valid motive 6"))) valid-elim-kind _ _ _ pi pi' = spanM-add (mk-span "Motive error" pi pi' [] (just "Not a valid motive 7")) {- Cedilleum specification, section 4.4 -} branch-type : ctxt → term → type → type → type -- Π x : Tk, ∀ x : T, ∀ x : k cases branch-type Γ t (Abs pi e pi' x tk ty) m = Abs pi e pi' x tk (branch-type Γ t ty m) branch-type Γ t _ m = TpAppt m t -- TODO: missing indices s ! is ctxt needed ? -- converts mu cases (Example: from "vcons -n -m x xs -eq → ff" to "Λ n. Λ m. λ x. λ xs. Λ eq. ff") abstract-varargs : varargs → term → spanM (maybe term) abstract-varargs NoVarargs t = spanMr (just t) abstract-varargs (NormalVararg x vs) t = (abstract-varargs vs t) on-fail (spanMr nothing) ≫=spanm' (λ a → spanMr (just (Lam posinfo-gen NotErased posinfo-gen x NoClass a))) abstract-varargs (ErasedVararg x vs) t = (abstract-varargs vs t) on-fail (spanMr nothing) ≫=spanm' (λ a → spanMr (just (Lam posinfo-gen Erased posinfo-gen x NoClass a))) abstract-varargs (TypeVararg x vs) t = (abstract-varargs vs t) on-fail (spanMr nothing) ≫=spanm' (λ a → get-ctxt (λ Γ → helper (ctxt-lookup-type-var Γ x) a)) where helper : maybe kind → term → spanM (maybe term) helper nothing t = spanMr nothing helper (just k) t = spanMr (just (Lam posinfo-gen Erased posinfo-gen x (SomeClass (Tkk k)) t)) check-cases : dataConsts → cases → params → type → posinfo → posinfo → spanM ⊤ check-cases DataNull NoCase _ _ _ _ = spanMok check-cases (DataCons (DataConst _ c t) cts) (SomeCase pi c' varsargs t' cs) ps ty pic pic' = spanM-add (mk-span "Mu case" pi (term-end-pos t') [] nothing) ≫span check-cases cts cs ps ty pic pic' check-cases _ _ _ _ pic pic' = spanM-add (mk-span "Mu Cases error" pic pic' [] (just "Number of cases and constructors do not match")) {- Cedilleum specification, section 4.5 -} well-formed-patterns : defDatatype → term → type → cases → posinfo → posinfo → spanM ⊤ well-formed-patterns dd@(Datatype pi pix x ps k cons pf) t P cases pic pic' = (check-type P nothing) on-fail (spanM-add (mk-span "Wrong motive" (type-start-pos P) (type-end-pos P) [] (just "Motive does not typecheck"))) ≫=spanm' (λ kmtv → get-ctxt (λ Γ → valid-elim-kind (lam-expand-type ps (qualif-type Γ (TpVar pix x))) k kmtv (type-start-pos P) (type-end-pos P) ≫span check-cases cons cases ps P pic pic')) -- check-term -- ================================================== module check-term-errors {A : Set} where inapplicable-tp : (t : term) (tp : type) (htp : type) (mtp : maybe type) → spanM $ check-ret mtp inapplicable-tp t tp htp m = get-ctxt λ Γ → spanM-add (AppTp-span t tp (maybe-to-checking m) ([ head-type Γ htp ]) (just "The type of the head does not allow it to be applied to a type argument")) ≫span (spanMr $ ret m) where ret : (m : maybe type) → check-ret m ret (just x₁) = triv ret nothing = nothing check-term = check-termi -- Used to call hnf on expected/synthesized type check-type subject nothing = check-typei subject nothing check-type subject (just k) = get-ctxt (λ Γ → check-typei subject (just (hnf Γ (unfolding-elab unfold-head) k tt))) check-termi (Parens pi t pi') tp = spanM-add (punctuation-span "Parens" pi pi') ≫span check-termi t tp check-termi (Var pi x) mtp = get-ctxt (cont mtp) where cont : (mtp : maybe type) → ctxt → spanM (check-ret mtp) cont mtp Γ with ctxt-lookup-term-var Γ x cont mtp Γ | nothing = spanM-add (Var-span Γ pi x (maybe-to-checking mtp) (expected-type-if Γ mtp ++ [ missing-type ]) (just "Missing a type for a term variable.")) ≫span return-when mtp mtp cont nothing Γ | just tp = spanM-add (Var-span Γ pi x synthesizing [ type-data Γ tp ] nothing) ≫span spanMr (just tp) cont (just tp) Γ | just tp' = spanM-add (uncurry (Var-span Γ pi x checking) (check-for-type-mismatch Γ "synthesized" tp tp')) check-termi t'@(AppTp t tp') mtp = get-ctxt λ Γ → check-termi t nothing on-fail spanM-add (AppTp-span t tp' (maybe-to-checking mtp) (expected-type-if Γ mtp) nothing) ≫span return-when mtp mtp ≫=spanm' λ tp → spanMr (either-else' (to-is-tpabs tp) (λ _ → to-is-tpabs (hnf Γ (unfolding-elab unfold-head) tp tt)) inj₂) on-fail (λ _ → check-term-errors.inapplicable-tp {A = check-ret mtp} t tp' tp mtp) ≫=spans' λ ret → let mk-tpabs e? x k body = ret in check-type tp' (just k) ≫span let rtp = subst Γ (qualif-type Γ tp') x body in spanM-add (uncurry (λ tvs → AppTp-span t tp' (maybe-to-checking mtp) (type-data Γ rtp :: tvs)) (check-for-type-mismatch-if Γ "synthesizing" mtp rtp)) ≫span return-when mtp (just rtp) check-termi t''@(App t m t') tp = get-ctxt λ Γ → check-term-spine t'' (proto-maybe tp) tt on-fail check-fail tp ≫=spanm' λ where (mk-spine-data Xs tp' _) → return-when tp (just (meta-vars-subst-type' ff Γ Xs (decortype-to-type tp'))) check-termi (Let pi d t) mtp = -- spanM-add (punctuation-span "Let" pi (posinfo-plus pi 3)) ≫span check-def d ≫=span finish where maybe-subst : defTermOrType → (mtp : maybe type) → check-ret mtp → spanM (check-ret mtp) maybe-subst _ (just T) triv = spanMok maybe-subst _ nothing nothing = spanMr nothing maybe-subst (DefTerm pi x NoType t) nothing (just T) = get-ctxt λ Γ → spanMr (just (subst Γ (qualif-term Γ (Chi posinfo-gen NoType t)) (pi % x) T)) maybe-subst (DefTerm pi x (SomeType T') t) nothing (just T) = get-ctxt λ Γ → spanMr (just (subst Γ (qualif-term Γ (Chi posinfo-gen (SomeType T') t)) (pi % x) T)) maybe-subst (DefType pi x k T') nothing (just T) = get-ctxt λ Γ → spanMr (just (subst Γ (qualif-type Γ T') (pi % x) T)) -- maybe-subst covers the case where the synthesized type of t has the let-bound -- variable in it by substituting the let definition for the let-bound variable -- in the synthesized type. We also need to use Chi to maintain the checking mode -- of the term so that the type still kind-checks, as a synthesizing term let could -- be substituted into a checking position, or vice-versa with a checking term let. finish : (var × restore-def) → spanM (check-ret mtp) finish (x , m) = get-ctxt λ Γ → spanM-add (Let-span Γ (maybe-to-checking mtp) pi d t [] nothing) ≫span check-term t mtp ≫=span λ r → spanM-restore-info x m ≫span maybe-subst d mtp r check-termi (Open pi x t) mtp = get-ctxt (λ Γ → spanMr (ctxt-get-qi Γ x) ≫=span λ where (just (x' , _)) → cont x' mtp nothing → spanM-add (Var-span Γ (posinfo-plus pi 5) x (maybe-to-checking mtp) [] (just (nodef-err x))) ≫span -- (open-span (just (nodef-err x))) ≫span (check-fail mtp)) where span-name = "Open an opaque definition in a sub-term" nodef-err : string → string nodef-err s = "the definition '" ^ s ^ "' is not in scope" category-err : string → string category-err s = "the definition '" ^ s ^ "' is not a type/term definition" open-span : err-m → span open-span err = mk-span span-name pi (term-end-pos t) [] err cont : var → (m : maybe type) → spanM (check-ret m) cont v mtp = spanM-clarify-def v ≫=span λ where (just si) → spanM-add (open-span nothing) ≫span get-ctxt (λ Γ' → spanM-add (Var-span Γ' (posinfo-plus pi 5) x (maybe-to-checking mtp) [] nothing) ≫span check-term t mtp ≫=span λ r → spanM-restore-clarified-def v si ≫span spanMr r) nothing → spanM-add (open-span (just (category-err v))) ≫span (check-fail mtp) check-termi (Lam pi l pi' x (SomeClass atk) t) nothing = spanM-add (punctuation-span "Lambda" pi (posinfo-plus pi 1)) ≫span check-tk atk ≫span add-tk pi' x atk ≫=span λ mi → check-term t nothing ≫=span λ mtp → spanM-restore-info x mi ≫span -- now restore the context cont mtp where cont : maybe type → spanM (maybe type) cont nothing = get-ctxt λ Γ → spanM-add (Lam-span Γ synthesizing pi l x (SomeClass atk) t [] nothing) ≫span spanMr nothing cont (just tp) = get-ctxt λ Γ → let atk' = qualif-tk Γ atk in -- This should indeed "unqualify" occurrences of x in tp for rettp let rettp = abs-tk l x atk' (rename-var Γ (pi' % x) x tp) in let tvs = [ type-data Γ rettp ] in let p = if me-erased l && is-free-in skip-erased x t then just "The bound variable occurs free in the erasure of the body (not allowed)." , [ erasure Γ t ] else nothing , [] in spanM-add (Lam-span Γ synthesizing pi l x (SomeClass atk') t (snd p ++ tvs) (fst p)) ≫span check-termi-return-hnf Γ (Lam pi l pi' x (SomeClass atk) t) rettp check-termi (Lam pi l _ x NoClass t) nothing = get-ctxt λ Γ → spanM-add (punctuation-span "Lambda" pi (posinfo-plus pi 1)) ≫span spanM-add (Lam-span Γ synthesizing pi l x NoClass t [] (just ("We are not checking this abstraction against a type, so a classifier must be" ^ " given for the bound variable " ^ x))) ≫span spanMr nothing check-termi (Lam pi l pi' x oc t) (just tp) = get-ctxt λ Γ → cont (to-abs tp maybe-or to-abs (hnf Γ unfold-head tp tt)) where cont : maybe abs → spanM ⊤ cont (just (mk-abs b x' atk _ tp')) = check-oc oc ≫span spanM-add (punctuation-span "Lambda" pi (posinfo-plus pi 1)) ≫span get-ctxt λ Γ → spanM-add (uncurry (this-span Γ atk oc) (check-erasures Γ l b)) ≫span add-tk' (me-erased l) pi' x (lambda-bound-class-if oc atk) ≫=span λ mi → get-ctxt λ Γ' → check-term t (just (rename-var Γ x' (qualif-var Γ' x) tp')) ≫span spanM-restore-info x mi where this-span : ctxt → tk → optClass → 𝕃 tagged-val → err-m → span this-span Γ _ NoClass tvs = Lam-span Γ checking pi l x oc t tvs this-span Γ atk (SomeClass atk') tvs err = if conv-tk Γ (qualif-tk Γ atk') atk then Lam-span Γ checking pi l x oc t tvs err else let p = lambda-bound-var-conv-error Γ x atk atk' tvs in Lam-span Γ checking pi l x oc t (fst p) (just (snd p)) check-oc : optClass → spanM ⊤ check-oc NoClass = spanMok check-oc (SomeClass atk) = check-tk atk check-erasures : ctxt → maybeErased → maybeErased → 𝕃 tagged-val × err-m check-erasures Γ Erased All = if is-free-in skip-erased x t then type-data Γ tp :: [ erasure Γ t ] , just "The Λ-bound variable occurs free in the erasure of the body." else [ type-data Γ tp ] , nothing check-erasures Γ NotErased Pi = [ type-data Γ tp ] , nothing check-erasures Γ Erased Pi = [ expected-type Γ tp ] , just ("The expected type is a Π-abstraction (indicating explicit input), but" ^ " the term is a Λ-abstraction (implicit input).") check-erasures Γ NotErased All = [ expected-type Γ tp ] , just ("The expected type is a ∀-abstraction (indicating implicit input), but" ^ " the term is a λ-abstraction (explicit input).") cont nothing = get-ctxt λ Γ → spanM-add (punctuation-span "Lambda" pi (posinfo-plus pi 1)) ≫span spanM-add (Lam-span Γ checking pi l x oc t [ expected-type Γ tp ] (just "The expected type is not of the form that can classify a λ-abstraction")) check-termi (Beta pi ot ot') (just tp) = untyped-optTerm-spans ot ≫span untyped-optTerm-spans ot' ≫span get-ctxt λ Γ → spanM-add (uncurry (Beta-span pi (optTerm-end-pos-beta pi ot ot') checking) (case hnf Γ unfold-head tp tt of λ where (TpEq pi' t1 t2 pi'') → if conv-term Γ t1 t2 then [ type-data Γ (TpEq pi' t1 t2 pi'') ] , (optTerm-conv Γ t1 ot) else [ expected-type Γ (TpEq pi' t1 t2 pi'') ] , (just "The two terms in the equation are not β-equal") tp → [ expected-type Γ tp ] , just "The expected type is not an equation.")) where optTerm-conv : ctxt → term → optTerm → err-m optTerm-conv Γ t1 NoTerm = nothing optTerm-conv Γ t1 (SomeTerm t _) = if conv-term Γ (qualif-term Γ t) t1 then nothing else just "The expected type does not match the synthesized type" check-termi (Beta pi (SomeTerm t pi') ot) nothing = get-ctxt λ Γ → untyped-term-spans t ≫span untyped-optTerm-spans ot ≫span let tp = qualif-type Γ (TpEq posinfo-gen t t posinfo-gen) in spanM-add (Beta-span pi (optTerm-end-pos-beta pi (SomeTerm t pi') ot) synthesizing [ type-data Γ tp ] nothing) ≫span spanMr (just tp) check-termi (Beta pi NoTerm ot') nothing = untyped-optTerm-spans ot' ≫span spanM-add (Beta-span pi (optTerm-end-pos-beta pi NoTerm ot') synthesizing [] (just "An expected type is required in order to type a use of plain β.")) ≫span spanMr nothing check-termi (Epsilon pi lr m t) (just tp) = -- (TpEq pi' t1 t2 pi'')) = get-ctxt λ Γ → case hnf Γ unfold-head tp tt of λ where (TpEq pi' t1 t2 pi'') → spanM-add (Epsilon-span pi lr m t checking [ type-data Γ (TpEq pi' t1 t2 pi'') ] nothing) ≫span check-term t (just (check-term-update-eq Γ lr m pi' t1 t2 pi'')) tp → spanM-add (Epsilon-span pi lr m t checking [ expected-type Γ tp ] (just "The expected type is not an equation, when checking an ε-term.")) check-termi (Epsilon pi lr m t) nothing = check-term t nothing ≫=span λ mtp → get-ctxt λ Γ → cont (maybe-hnf Γ mtp) where cont : maybe type → spanM (maybe type) cont nothing = spanM-add (Epsilon-span pi lr m t synthesizing [] (just "There is no expected type, and we could not synthesize a type from the body of the ε-term.")) ≫span spanMr nothing cont (just (TpEq pi' t1 t2 pi'')) = get-ctxt λ Γ → let r = check-term-update-eq Γ lr m pi' t1 t2 pi'' in spanM-add (Epsilon-span pi lr m t synthesizing [ type-data Γ r ] nothing) ≫span spanMr (just r) cont (just tp) = get-ctxt λ Γ → spanM-add (Epsilon-span pi lr m t synthesizing [ to-string-tag "the synthesized type" Γ tp ] (just "There is no expected type, and the type we synthesized for the body of the ε-term is not an equation.")) ≫span spanMr nothing check-termi (Sigma pi t) mt = check-term t nothing ≫=span λ mt' → get-ctxt λ Γ → cont Γ mt (maybe-hnf Γ mt') where cont : ctxt → (outer : maybe type) → maybe type → spanM (check-ret outer) cont Γ mt nothing = spanM-add (Sigma-span pi t (maybe-to-checking mt) [] (just "We could not synthesize a type from the body of the ς-term")) ≫span check-fail mt cont Γ mt (just tp) with mt | hnf Γ unfold-head tp tt ...| nothing | TpEq pi' t1 t2 pi'' = spanM-add (Sigma-span pi t synthesizing [ type-data Γ (TpEq pi' t2 t1 pi'') ] nothing) ≫span spanMr (just (TpEq pi' t2 t1 pi'')) ...| just tp' | TpEq pi' t1 t2 pi'' = spanM-add ∘ (flip uncurry) (check-for-type-mismatch Γ "synthesized" tp' (TpEq pi' t2 t1 pi'')) $ λ tvs err → Sigma-span pi t checking tvs err ...| mt' | tp' = spanM-add (Sigma-span pi t (maybe-to-checking mt') (to-string-tag "the synthesized type" Γ tp' :: expected-type-if Γ mt') (just ("The type we synthesized for the body of the ς-term is not an equation"))) ≫span check-fail mt' check-termi (Phi pi t₁≃t₂ t₁ t₂ pi') (just tp) = get-ctxt λ Γ → check-term t₁≃t₂ (just (qualif-type Γ (TpEq posinfo-gen t₁ t₂ posinfo-gen))) ≫span check-term t₁ (just tp) ≫span untyped-term-spans t₂ ≫span spanM-add (Phi-span pi pi' checking [ type-data Γ tp ] nothing) check-termi (Phi pi t₁≃t₂ t₁ t₂ pi') nothing = get-ctxt λ Γ → check-term t₁≃t₂ (just (qualif-type Γ (TpEq posinfo-gen t₁ t₂ posinfo-gen))) ≫span check-term t₁ nothing ≫=span λ mtp → untyped-term-spans t₂ ≫span spanM-add (Phi-span pi pi' synthesizing (type-data-tvs Γ mtp) nothing) ≫span spanMr mtp where type-data-tvs : ctxt → maybe type → 𝕃 tagged-val type-data-tvs Γ (just tp) = [ type-data Γ tp ] type-data-tvs Γ nothing = [] check-termi (Rho pi op on t (Guide pi' x tp) t') nothing = get-ctxt λ Γ → spanM-add (Var-span (ctxt-var-decl-loc pi' x Γ) pi' x synthesizing [] nothing) ≫span check-term t' nothing ≫=span λ mtp → untyped-optGuide-spans (Guide pi' x tp) ≫span check-term t nothing ≫=span λ mtp' → case maybe-hnf Γ mtp' of λ where (just (TpEq _ t1 t2 _)) → maybe-else (spanM-add (Rho-span pi t t' synthesizing op (inj₂ x) [] nothing) ≫span spanMr nothing) (λ tp' → let Γ' = ctxt-var-decl-loc pi' x Γ tp = qualif-type Γ' tp tp'' = subst Γ t1 x tp qt = qualif-term Γ t -- tp''' = qualif-type Γ (subst-type Γ t2 x tp) tp''' = post-rewrite Γ' x qt t2 (rewrite-at Γ' x qt tt tp' tp) in if conv-type Γ tp'' tp' then (spanM-add (Rho-span pi t t' synthesizing op (inj₂ x) [ type-data Γ tp''' ] nothing) ≫span spanMr (just tp''')) else (spanM-add (Rho-span pi t t' synthesizing op (inj₂ x) (type-data Γ tp' :: [ expected-type-subterm Γ tp'' ]) (just "The expected type of the subterm does not match the synthesized type")) ≫span spanMr nothing)) mtp (just _) → spanM-add (Rho-span pi t t' synthesizing op (inj₂ x) [] (just "We could not synthesize an equation from the first subterm in a ρ-term.")) ≫span spanMr nothing nothing → spanM-add (Rho-span pi t t' synthesizing op (inj₂ x) [] nothing) ≫span check-term t' nothing check-termi (Rho pi op on t (Guide pi' x tp) t') (just tp') = get-ctxt λ Γ → untyped-optGuide-spans (Guide pi' x tp) ≫span check-term t nothing ≫=span λ mtp → case maybe-hnf Γ mtp of λ where (just (TpEq _ t1 t2 _)) → let Γ' = ctxt-var-decl-loc pi' x Γ qt = qualif-term Γ t tp = qualif-type Γ' tp tp'' = subst Γ' t2 x tp -- This is t2 (and t1 below) so that Cedille Core files are correctly checked by regular Cedille -- tp''' = subst-type Γ t1 x (qualif-type Γ tp) tp''' = post-rewrite Γ' x qt t1 (rewrite-at Γ' x qt tt tp' tp) err = if conv-type Γ tp'' tp' then nothing else just "The expected type does not match the specified type" in spanM-add (Rho-span pi t t' checking op (inj₂ x) (type-data Γ tp'' :: [ expected-type Γ tp' ]) err) ≫span spanM-add (Var-span (ctxt-var-decl-loc pi' x Γ) pi' x checking [] nothing) ≫span check-term t' (just tp''') (just _) → spanM-add (Rho-span pi t t' checking op (inj₂ x) [] (just "We could not synthesize an equation from the first subterm in a ρ-term.")) nothing → spanM-add (Rho-span pi t t' checking op (inj₂ x) [] nothing) ≫span check-term t' (just tp) check-termi (Rho pi op on t NoGuide t') (just tp) = get-ctxt λ Γ → check-term t nothing ≫=span λ mtp → cont (maybe-hnf Γ mtp) (hnf Γ unfold-head-no-lift tp tt) where cont : maybe type → type → spanM ⊤ cont nothing tp = get-ctxt λ Γ → spanM-add (Rho-span pi t t' checking op (inj₁ 0) [ expected-type Γ tp ] nothing) ≫span check-term t' (just tp) cont (just (TpEq pi' t1 t2 pi'')) tp = get-ctxt λ Γ → let ns-err = optNums-to-stringset on x = fresh-var "x" (ctxt-binds-var Γ) empty-renamectxt Γ' = ctxt-var-decl x Γ qt = qualif-term Γ t s = rewrite-type tp Γ' (is-rho-plus op) (fst ns-err) qt t1 x 0 T = post-rewrite Γ' x qt t2 (fst s) in -- subst-type Γ' t2 x (fst s) in check-term t' (just T) ≫span spanM-add (Rho-span pi t t' checking op (inj₁ (fst (snd s))) ((to-string-tag "the equation" Γ (TpEq pi' t1 t2 pi'')) :: [ type-data Γ tp ]) (snd ns-err (snd (snd s)))) cont (just tp') tp = get-ctxt λ Γ → spanM-add (Rho-span pi t t' checking op (inj₁ 0) ((to-string-tag "the synthesized type for the first subterm" Γ tp') :: [ expected-type Γ tp ]) (just "We could not synthesize an equation from the first subterm in a ρ-term.")) check-termi (Rho pi op on t NoGuide t') nothing = check-term t nothing ≫=span λ mtp → check-term t' nothing ≫=span λ mtp' → get-ctxt λ Γ → cont (maybe-hnf Γ mtp) (maybe-map (λ mtp' → hnf Γ unfold-head-no-lift mtp' tt) mtp') where cont : maybe type → maybe type → spanM (maybe type) cont (just (TpEq pi' t1 t2 pi'')) (just tp) = get-ctxt λ Γ → let ns-err = optNums-to-stringset on x = fresh-var "x" (ctxt-binds-var Γ) empty-renamectxt qt = qualif-term Γ t Γ' = ctxt-var-decl x Γ s = rewrite-type tp Γ' (is-rho-plus op) (fst ns-err) qt t1 x 0 tp' = post-rewrite Γ' x qt t2 (fst s) in -- subst-type Γ t2 x (fst s) in spanM-add (Rho-span pi t t' synthesizing op (inj₁ (fst (snd s))) [ type-data Γ tp' ] (snd ns-err (snd (snd s)))) ≫span check-termi-return-hnf Γ (Rho pi op on t NoGuide t') tp' cont (just tp') (just _) = get-ctxt λ Γ → spanM-add (Rho-span pi t t' synthesizing op (inj₁ 0) [ to-string-tag "the synthesized type for the first subterm" Γ tp' ] (just "We could not synthesize an equation from the first subterm in a ρ-term.")) ≫span spanMr nothing cont _ _ = spanM-add (Rho-span pi t t' synthesizing op (inj₁ 0) [] nothing) ≫span spanMr nothing check-termi (Chi pi (SomeType tp) t) mtp = check-type tp (just star) ≫span get-ctxt λ Γ → let tp' = qualif-type Γ tp in check-termi t (just tp') ≫span cont tp' mtp where cont : type → (m : maybe type) → spanM (check-ret m) cont tp' nothing = get-ctxt λ Γ → spanM-add (Chi-span Γ pi (SomeType tp) t synthesizing [] nothing) ≫span spanMr (just tp') cont tp' (just tp'') = get-ctxt λ Γ → spanM-add (uncurry (Chi-span Γ pi (SomeType tp') t checking) (check-for-type-mismatch Γ "asserted" tp'' tp')) check-termi (Chi pi NoType t) (just tp) = check-term t nothing ≫=span cont where cont : (m : maybe type) → spanM ⊤ cont nothing = get-ctxt (λ Γ → spanM-add (Chi-span Γ pi NoType t checking [] nothing) ≫span spanMok) cont (just tp') = get-ctxt λ Γ → spanM-add (uncurry (Chi-span Γ pi NoType t checking) (check-for-type-mismatch Γ "synthesized" tp tp')) check-termi (Chi pi NoType t) nothing = get-ctxt λ Γ → spanM-add (Chi-span Γ pi NoType t synthesizing [] nothing) ≫span check-term t nothing check-termi (Delta pi mT t) mtp = check-term t nothing ≫=span λ T → get-ctxt λ Γ → spanM-add (Delta-span Γ pi mT t (maybe-to-checking mtp) [] (maybe-hnf Γ T ≫=maybe check-contra Γ)) ≫span (case mT of λ where NoType → spanMr compileFailType (SomeType T) → check-type T (just (Star posinfo-gen)) ≫span spanMr T) ≫=span λ T → return-when mtp (just (qualif-type Γ T)) where check-contra : ctxt → type → err-m check-contra Γ (TpEq _ t1 t2 _) = if check-beta-inequiv (hnf Γ unfold-head t1 tt) (hnf Γ unfold-head t2 tt) then nothing else just "We could not find a contradiction in the synthesized type of the subterm." check-contra _ _ = just "We could not synthesize an equation from the subterm." check-termi (Theta pi u t ls) nothing = get-ctxt λ Γ → spanM-add (Theta-span Γ pi u t ls synthesizing [] (just "Theta-terms can only be used in checking positions (and this is a synthesizing one).")) ≫span spanMr nothing check-termi (Theta pi AbstractEq t ls) (just tp) = -- discard spans from checking t, because we will check it again below check-term t nothing ≫=spand λ mtp → get-ctxt λ Γ → cont (maybe-hnf Γ mtp) (hnf Γ unfold-head tp tt) where cont : maybe type → type → spanM ⊤ cont nothing tp = check-term t nothing ≫=span λ m → get-ctxt λ Γ → spanM-add (Theta-span Γ pi AbstractEq t ls checking [ expected-type Γ tp ] (just "We could not compute a motive from the given term")) -- (expected-type Γ tp :: [ motive-label , [[ "We could not compute a motive from the given term" ]] , [] ])))) cont (just htp) tp = get-ctxt λ Γ → let x = (fresh-var "x" (ctxt-binds-var Γ) empty-renamectxt) in let motive = mtplam x (Tkt htp) (TpArrow (TpEq posinfo-gen t (mvar x) posinfo-gen) Erased tp) in spanM-add (Theta-span Γ pi AbstractEq t ls checking (expected-type Γ tp :: [ the-motive Γ motive ]) nothing) ≫span check-term (lterms-to-term AbstractEq (AppTp t (NoSpans motive (posinfo-plus (term-end-pos t) 1))) ls) (just tp) check-termi (Theta pi Abstract t ls) (just tp) = -- discard spans from checking the head, because we will check it again below check-term t nothing ≫=spand λ mtp → get-ctxt λ Γ → cont t (maybe-hnf Γ mtp) (hnf Γ unfold-head tp tt) where cont : term → maybe type → type → spanM ⊤ cont _ nothing tp = check-term t nothing ≫=span λ m → get-ctxt λ Γ → spanM-add (Theta-span Γ pi Abstract t ls checking [ expected-type Γ tp ] (just "We could not compute a motive from the given term")) -- (expected-type Γ tp :: [ motive-label , [[ "We could not compute a motive from the given term" ]] , [] ])))) cont t (just htp) tp = get-ctxt λ Γ → let x = compute-var (hnf Γ unfold-head (qualif-term Γ t) tt) x' = maybe-else (unqual-local x) id (var-suffix x) in let motive = mtplam x' (Tkt htp) (rename-var Γ x x' tp) in spanM-add (Theta-span Γ pi Abstract t ls checking (expected-type Γ tp :: [ the-motive Γ motive ]) nothing) ≫span check-term (lterms-to-term Abstract (AppTp t (NoSpans motive (term-end-pos t))) ls) (just tp) where compute-var : term → var compute-var (Var pi' x) = x compute-var t = ignored-var check-termi (Theta pi (AbstractVars vs) t ls) (just tp) = get-ctxt λ Γ → let tp = hnf Γ unfold-head tp tt in cont (wrap-vars Γ vs tp {-(substs-type empty-ctxt (rep-vars Γ vs empty-trie) tp)-}) tp where wrap-var : ctxt → var → type → maybe type wrap-var Γ v tp = ctxt-lookup-tk-var Γ v ≫=maybe λ atk → just (mtplam v atk (rename-var Γ (qualif-var Γ v) v tp)) wrap-vars : ctxt → vars → type → maybe type wrap-vars Γ (VarsStart v) tp = wrap-var Γ v tp wrap-vars Γ (VarsNext v vs) tp = wrap-vars Γ vs tp ≫=maybe wrap-var Γ v cont : maybe type → type → spanM ⊤ cont nothing tp = check-term t nothing ≫=span λ m → get-ctxt λ Γ → spanM-add (Theta-span Γ pi (AbstractVars vs) t ls checking [ expected-type Γ tp ] (just ("We could not compute a motive from the given term" ^ " because one of the abstracted vars is not in scope."))) cont (just motive) tp = get-ctxt λ Γ → spanM-add (Theta-span Γ pi (AbstractVars vs) t ls checking (expected-type Γ tp :: [ the-motive Γ motive ]) nothing) ≫span check-term (lterms-to-term Abstract (AppTp t (NoSpans motive (posinfo-plus (term-end-pos t) 1))) ls) (just tp) {-rep-var : ctxt → var → trie term → trie term rep-var Γ v ρ with trie-lookup (ctxt-get-qualif Γ) v ...| nothing = ρ ...| just (v' , _) = trie-insert ρ v' (Var posinfo-gen v) rep-vars : ctxt → vars → trie term → trie term rep-vars Γ (VarsStart v) = rep-var Γ v rep-vars Γ (VarsNext v vs) ρ = rep-vars Γ vs (rep-var Γ v ρ)-} check-termi (Hole pi) tp = get-ctxt λ Γ → spanM-add (hole-span Γ pi tp []) ≫span return-when tp tp check-termi (IotaPair pi t1 t2 og pi') (just tp) = -- (Iota pi1 pi2 x tp1 tp2)) = get-ctxt λ Γ → case hnf Γ unfold-head tp tt of λ where (Iota pi1 pi2 x tp1 tp2) → check-term t1 (just tp1) ≫span let t1' = qualif-term Γ t1 t2' = qualif-term Γ t2 in check-term t2 (just (subst Γ t1' x tp2)) ≫span optGuide-spans og checking ≫span check-optGuide og tp1 tp2 pi2 x ≫=span λ e → let cc = check-conv Γ t1' t2' e in spanM-add (IotaPair-span pi pi' checking (expected-type Γ (Iota pi1 pi2 x tp1 tp2) :: snd cc) (fst cc)) tp → spanM-add (IotaPair-span pi pi' checking [ expected-type Γ tp ] (just "The type we are checking against is not a iota-type")) where ntag : ctxt → string → string → term → unfolding → tagged-val ntag Γ nkind which t u = to-string-tag (nkind ^ " of the " ^ which ^ " component: ") Γ (hnf Γ u t tt) err : ctxt → string → term → tagged-val err Γ which t = ntag Γ "Hnf" which t unfold-head check-conv : ctxt → term → term → err-m → err-m × 𝕃 tagged-val check-conv Γ t1 t2 e = if conv-term Γ t1 t2 then e , [] else just "The two components of the iota-pair are not convertible (as required)." , err Γ "first" t1 :: [ err Γ "second" t2 ] check-optGuide : optGuide → type → type → posinfo → var → spanM err-m check-optGuide NoGuide tp1 tp2 pi2 x = spanMr nothing check-optGuide (Guide pi x' tp) tp1 tp2 pi2 x = get-ctxt λ Γ → with-ctxt (ctxt-term-decl pi x' tp1 Γ) (check-type tp (just (Star posinfo-gen))) ≫span spanMr (if conv-type Γ tp2 (qualif-type (ctxt-var-decl x Γ) (subst Γ (Var pi2 x) x' tp)) then nothing else just "The expected type does not match the guided type") check-termi (IotaPair pi t1 t2 (Guide pi' x T2) pi'') nothing = get-ctxt λ Γ → check-term t1 nothing ≫=span λ T1 → check-term t2 (just (qualif-type Γ (subst Γ (qualif-term Γ t1) x T2))) ≫span maybe-else spanMok (λ T1 → with-ctxt (ctxt-term-decl pi' x T1 Γ) (check-type T2 (just (Star posinfo-gen)))) T1 ≫span let T2' = qualif-type (ctxt-var-decl x Γ) T2 in spanM-add (IotaPair-span pi pi'' synthesizing (maybe-else [] (λ T1 → [ type-data Γ (Iota posinfo-gen posinfo-gen x T1 T2') ]) T1) nothing) ≫span spanM-add (Var-span (ctxt-var-decl-loc pi' x Γ) pi' x synthesizing [] nothing) ≫span spanMr (T1 ≫=maybe λ T1 → just (Iota posinfo-gen posinfo-gen x T1 T2')) where err : ctxt → err-m × 𝕃 tagged-val err Γ = if conv-term Γ t1 t2 then nothing , [] else just "The two components of the iota-pair are not convertible (as required)." , to-string-tag "Hnf of the first component" Γ (hnf Γ unfold-head t1 tt) :: [ to-string-tag "Hnf of the second component" Γ (hnf Γ unfold-head t2 tt) ] check-termi (IotaPair pi t1 t2 NoGuide pi') nothing = spanM-add (IotaPair-span pi pi' synthesizing [] (just "Iota pairs require a specified type when in a synthesizing position")) ≫span spanMr nothing check-termi (IotaProj t n pi) mtp = check-term t nothing ≫=span λ mtp' → get-ctxt λ Γ → cont' mtp (posinfo-to-ℕ n) (maybe-hnf Γ mtp') where cont : (outer : maybe type) → ℕ → (computed : type) → spanM (check-ret outer) cont mtp n computed with computed cont mtp 1 computed | Iota pi' pi'' x t1 t2 = get-ctxt λ Γ → spanM-add (uncurry (λ tvs → IotaProj-span t pi (maybe-to-checking mtp) (head-type Γ computed :: tvs)) (check-for-type-mismatch-if Γ "synthesized" mtp t1)) ≫span return-when mtp (just t1) cont mtp 2 computed | Iota pi' pi'' x a t2 = get-ctxt λ Γ → let t2' = subst Γ (qualif-term Γ t) x t2 in spanM-add (uncurry (λ tvs → IotaProj-span t pi (maybe-to-checking mtp) (head-type Γ computed :: tvs)) (check-for-type-mismatch-if Γ "synthesized" mtp t2')) ≫span return-when mtp (just t2') cont mtp n computed | Iota pi' pi'' x t1 t2 = get-ctxt λ Γ → spanM-add (IotaProj-span t pi (maybe-to-checking mtp) [ head-type Γ computed ] (just "Iota-projections must use .1 or .2 only.")) ≫span return-when mtp mtp cont mtp n computed | _ = get-ctxt λ Γ → spanM-add (IotaProj-span t pi (maybe-to-checking mtp) [ head-type Γ computed ] (just "The head type is not a iota-abstraction.")) ≫span return-when mtp mtp cont' : (outer : maybe type) → ℕ → (computed : maybe type) → spanM (check-ret outer) cont' mtp _ nothing = spanM-add (IotaProj-span t pi (maybe-to-checking mtp) [] nothing) ≫span return-when mtp mtp cont' mtp n (just tp) = get-ctxt λ Γ → cont mtp n (hnf Γ unfold-head tp tt) -- we are looking for iotas in the bodies of rec defs check-termi mu@(Mu' pi t (SomeType P) pi' cs pi'') nothing = check-term t nothing on-fail (spanMr nothing) ≫=spanm' (λ I → spanM-add (Mu'-span mu [] nothing) ≫span spanMr (just I) ) check-termi mu@(Mu' pi t (SomeType P) pi' cs pi'') (just tp) = check-term t nothing on-fail spanMok ≫=spanm' (λ I → -- TODO: remove T parameters and s indices from expression I T s get-ctxt (helper I)) where helper : type → ctxt → spanM ⊤ helper (TpVar _ x) (mk-ctxt _ _ _ _ d) with trie-lookup d x ... | just dt = well-formed-patterns dt t P cs pi' pi'' ... | nothing = spanMok helper _ Γ = spanMok check-termi (Mu' pi t NoType _ cs pi') (just tp) = spanMok check-termi (Mu' pi t NoType _ cs pi') nothing = spanMr nothing check-termi (Mu pi x t (SomeType m) _ cs pi') (just tp) = spanMok check-termi (Mu pi x t (SomeType m) _ cs pi') nothing = spanMr nothing check-termi (Mu pi x t NoType _ cs pi') nothing = spanMr nothing check-termi (Mu pi x t NoType _ cs pi') (just tp) = spanMok {-check-termi t tp = get-ctxt (λ Γ → spanM-add (unimplemented-term-span Γ (term-start-pos t) (term-end-pos t) tp) ≫span unimplemented-if tp)-} -- END check-term -- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -- check-term-spine -- ================================================== -- check-term-spine is where metavariables are generated and solved, so it -- requires its set of helpers check-term-spine-return : ctxt → meta-vars → decortype → ℕ → spanM (maybe spine-data) check-term-spine-return Γ Xs dt locl = spanMr (just (mk-spine-data Xs dt locl)) -- a flag indicating how aggresively we should be unfolding during matching. -- "both" is the backtracking flag. We will attempt "both" matches, which means -- first matching without unfolding, then if that fails unfolding the type once -- and continue matching the subexpresions with "both" data match-unfolding-state : Set where match-unfolding-both match-unfolding-approx match-unfolding-hnf : match-unfolding-state -- main matching definitions -- -------------------------------------------------- -- NOTE: these functions don't actually ever emit spans match-types : meta-vars → local-vars → match-unfolding-state → (tpₓ tp : type) → spanM $ match-error-t meta-vars match-kinds : meta-vars → local-vars → match-unfolding-state → (kₓ k : kind) → spanM $ match-error-t meta-vars match-tks : meta-vars → local-vars → match-unfolding-state → (tkₓ tk : tk) → spanM $ match-error-t meta-vars record match-prototype-data : Set where constructor mk-match-prototype-data field match-proto-mvars : meta-vars match-proto-dectp : decortype match-proto-error : 𝔹 open match-prototype-data match-prototype : (Xs : meta-vars) (is-hnf : 𝔹) (tp : type) (pt : prototype) → spanM $ match-prototype-data -- substitutions used during matching -- -------------------------------------------------- -- These have to be in the spanM monad because substitution can unlock a `stuck` -- decoration, causing another round of prototype matching (which invokes type matching) substh-decortype : {ed : exprd} → ctxt → renamectxt → trie ⟦ ed ⟧ → decortype → spanM $ decortype substh-decortype Γ ρ σ (decor-type tp) = spanMr $ decor-type (substh-type Γ ρ σ tp) substh-decortype Γ ρ σ (decor-arrow e? dom cod) = substh-decortype Γ ρ σ cod ≫=span λ cod → spanMr $ decor-arrow e? (substh-type Γ ρ σ dom) cod -- spanMr $ decor-arrow e? (substh-type Γ ρ σ dom) (substh-decortype Γ ρ σ cod) substh-decortype Γ ρ σ (decor-decor e? pi x sol dt) = let x' = subst-rename-var-if Γ ρ x σ Γ' = ctxt-var-decl-loc pi x' Γ ρ' = renamectxt-insert ρ x x' in substh-decortype Γ' ρ' σ dt ≫=span λ dt' → spanMr $ decor-decor e? pi x' (substh-meta-var-sort Γ ρ σ sol) dt' -- decor-decor e? x' (substh-meta-var-sol Γ' ρ' σ sol) (substh-decortype Γ' ρ' σ dt) substh-decortype Γ ρ σ (decor-stuck tp pt) = match-prototype meta-vars-empty ff (substh-type Γ ρ σ tp) pt -- NOTE: its an invariant that if you start with no meta-variables, prototype matching -- produces no meta-variables as output ≫=span λ ret → spanMr (match-proto-dectp ret) substh-decortype Γ ρ σ (decor-error tp pt) = spanMr $ decor-error (substh-type Γ ρ σ tp) pt subst-decortype : {ed : exprd} → ctxt → ⟦ ed ⟧ → var → decortype → spanM $ decortype subst-decortype Γ s x dt = substh-decortype Γ empty-renamectxt (trie-single x s) dt meta-vars-subst-decortype' : (unfold : 𝔹) → ctxt → meta-vars → decortype → spanM decortype meta-vars-subst-decortype' uf Γ Xs dt = substh-decortype Γ empty-renamectxt (meta-vars-get-sub Xs) dt ≫=span λ dt' → spanMr $ if uf then hnf-decortype Γ (unfolding-elab unfold-head) dt' tt else dt' meta-vars-subst-decortype : ctxt → meta-vars → decortype → spanM decortype meta-vars-subst-decortype = meta-vars-subst-decortype' tt -- unfolding a decorated type to reveal a term / type abstraction -- -------------------------------------------------- {-# TERMINATING #-} meta-vars-peel' : ctxt → span-location → meta-vars → decortype → spanM $ (𝕃 meta-var) × decortype meta-vars-peel' Γ sl Xs (decor-decor e? pi x (meta-var-tp k mtp) dt) = let Y = meta-var-fresh-tp Xs x sl (k , mtp) Xs' = meta-vars-add Xs Y in subst-decortype Γ (meta-var-to-type-unsafe Y) x dt ≫=span λ dt' → meta-vars-peel' Γ sl Xs' dt' ≫=span λ ret → let Ys = fst ret ; rdt = snd ret in spanMr $ Y :: Ys , rdt meta-vars-peel' Γ sl Xs dt@(decor-decor e? pi x (meta-var-tm _ _) _) = spanMr $ [] , dt meta-vars-peel' Γ sl Xs dt@(decor-arrow _ _ _) = spanMr $ [] , dt -- NOTE: vv The clause below will later generate a type error vv meta-vars-peel' Γ sl Xs dt@(decor-stuck _ _) = spanMr $ [] , dt -- NOTE: vv The clause below is an internal error, if reached vv meta-vars-peel' Γ sl Xs dt@(decor-type _) = spanMr $ [] , dt meta-vars-peel' Γ sl Xs dt@(decor-error _ _) = spanMr $ [] , dt meta-vars-unfold-tmapp' : ctxt → span-location → meta-vars → decortype → spanM $ (𝕃 meta-var × is-tmabsd?) meta-vars-unfold-tmapp' Γ sl Xs dt = meta-vars-subst-decortype Γ Xs dt ≫=span λ dt' → meta-vars-peel' Γ sl Xs dt' ≫=span λ where (Ys , dt'@(decor-arrow e? dom cod)) → spanMr $ Ys , yes-tmabsd dt' e? "_" dom ff cod (Ys , dt'@(decor-decor e? pi x (meta-var-tm dom _) cod)) → spanMr $ Ys , yes-tmabsd dt' e? x dom (is-free-in check-erased x (decortype-to-type cod)) cod (Ys , dt@(decor-decor _ _ _ (meta-var-tp _ _) _)) → spanMr $ Ys , not-tmabsd dt -- NOTE: vv this is a type error vv (Ys , dt@(decor-stuck _ _)) → spanMr $ Ys , not-tmabsd dt -- NOTE: vv this is an internal error, if reached vv (Ys , dt@(decor-type _)) → spanMr $ Ys , not-tmabsd dt (Ys , dt@(decor-error _ _)) → spanMr $ Ys , not-tmabsd dt meta-vars-unfold-tpapp' : ctxt → meta-vars → decortype → spanM is-tpabsd? meta-vars-unfold-tpapp' Γ Xs dt = meta-vars-subst-decortype Γ Xs dt ≫=span λ where (dt″@(decor-decor e? pi x (meta-var-tp k mtp) dt')) → spanMr $ yes-tpabsd dt″ e? x k (flip maybe-map mtp meta-var-sol.sol) dt' (dt″@(decor-decor _ _ _ (meta-var-tm _ _) _)) → spanMr $ not-tpabsd dt″ (dt″@(decor-arrow _ _ _)) → spanMr $ not-tpabsd dt″ (dt″@(decor-stuck _ _)) → spanMr $ not-tpabsd dt″ (dt″@(decor-type _)) → spanMr $ not-tpabsd dt″ (dt″@(decor-error _ _)) → spanMr $ not-tpabsd dt″ -- errors -- -------------------------------------------------- -- general type errors for applications module check-term-app-tm-errors {A : Set} (t₁ t₂ : term) (htp : type) (Xs : meta-vars) (is-locale : 𝔹) (m : checking-mode) where inapplicable : maybeErased → decortype → prototype → spanM (maybe A) inapplicable e? dt pt = get-ctxt λ Γ → spanM-add (App-span is-locale t₁ t₂ m (head-type Γ (meta-vars-subst-type Γ Xs htp) -- :: decortype-data Γ dt -- :: prototype-data Γ pt :: meta-vars-data-all Γ Xs) (just $ "The type of the head does not allow the head to be applied to " ^ h e? ^ " argument")) ≫span spanMr nothing where h : maybeErased → string h Erased = "an erased term" h NotErased = "a term" bad-erasure : maybeErased → spanM (maybe A) bad-erasure e? = get-ctxt λ Γ → spanM-add (App-span is-locale t₁ t₂ m (head-type Γ (meta-vars-subst-type Γ Xs htp) :: meta-vars-data-all Γ Xs) (just (msg e?))) ≫span spanMr nothing where msg : maybeErased → string msg Erased = "The type computed for the head requires an explicit (non-erased) argument," ^ " but the application is marked as erased" msg NotErased = "The type computed for the head requires an implicit (erased) argument," ^ " but the application is marked as not erased" unmatchable : (tpₓ tp : type) (msg : string) → 𝕃 tagged-val → spanM (maybe A) unmatchable tpₓ tp msg tvs = get-ctxt λ Γ → spanM-add (App-span is-locale t₁ t₂ m (arg-exp-type Γ tpₓ :: arg-type Γ tp :: tvs ++ meta-vars-data-all Γ Xs) (just msg)) ≫span spanMr nothing unsolved-meta-vars : type → 𝕃 tagged-val → spanM (maybe A) unsolved-meta-vars tp tvs = get-ctxt λ Γ → spanM-add (App-span tt t₁ t₂ m (type-data Γ tp :: meta-vars-data-all Γ Xs ++ tvs) (just "There are unsolved meta-variables in this maximal application")) ≫span spanMr nothing module check-term-app-tp-errors {A : Set} (t : term) (tp htp : type) (Xs : meta-vars) (m : checking-mode) where inapplicable : decortype → spanM (maybe A) inapplicable dt = get-ctxt λ Γ → spanM-add (AppTp-span t tp synthesizing (head-type Γ (meta-vars-subst-type Γ Xs htp) -- :: decortype-data Γ dt :: meta-vars-data-all Γ Xs) (just "The type of the head does not allow the head to be applied to a type argument")) ≫span spanMr nothing ctai-disagree : (ctai-sol : type) → spanM $ maybe A ctai-disagree ctai-sol = get-ctxt λ Γ → spanM-add (AppTp-span t tp m (head-type Γ (meta-vars-subst-type Γ Xs htp) :: contextual-type-argument Γ ctai-sol :: meta-vars-data-all Γ Xs) (just "The given and contextually inferred type argument differ")) ≫span spanMr nothing -- meta-variable locality -- -------------------------------------------------- -- for debugging -- prepend to the tvs returned by check-spine-locality if you're having trouble private locale-tag : ℕ → tagged-val locale-tag n = "locale n" , [[ ℕ-to-string n ]] , [] private is-locale : (max : 𝔹) → (locl : maybe ℕ) → 𝔹 is-locale max locl = max || maybe-else' locl ff iszero check-spine-locality : ctxt → meta-vars → type → (max : 𝔹) → (locl : ℕ) → spanM (maybe (meta-vars × ℕ × 𝔹)) check-spine-locality Γ Xs tp max locl = let new-locl = if iszero locl then num-arrows-in-type Γ tp else locl new-Xs = if iszero locl then meta-vars-empty else Xs left-locl = is-locale max (just locl) in if left-locl && (~ meta-vars-solved? Xs) then spanMr nothing else spanMr (just (new-Xs , new-locl , left-locl)) -- main definition -------------------------------------------------- data check-term-app-ret : Set where check-term-app-return : (Xs : meta-vars) (cod : decortype) (arg-mode : checking-mode) → (tvs : 𝕃 tagged-val) → check-term-app-ret check-term-app : (Xs : meta-vars) (Ys : 𝕃 meta-var) → (t₁ t₂ : term) → is-tmabsd → 𝔹 → spanM (maybe check-term-app-ret) check-term-spine t'@(App t₁ e? t₂) pt max = -- 1) type the applicand, extending the prototype let pt' = proto-arrow e? pt in check-term-spine t₁ pt' ff on-fail handleApplicandTypeError -- 2) make sure the applicand type reveals an arrow (term abstraction) ≫=spanm' λ ret → let (mk-spine-data Xs dt locl) = ret in -- the meta-vars need to know the span they were introduced in get-ctxt λ Γ → let sloc = span-loc $ ctxt-get-current-filename Γ in -- see if the decorated type of the head `dt` reveals an arrow meta-vars-unfold-tmapp' Γ sloc Xs dt ≫=span λ ret → let Ys = fst ret ; tm-arrow? = snd ret in spanMr tm-arrow? on-fail (λ _ → genInapplicableError Xs dt pt' locl) -- if so, get the (plain, undecorated) type of the head `htp` ≫=spans' λ arr → let htp = decortype-to-type ∘ is-tmabsd-dt $ arr in -- 3) make sure erasures of the applicand type + syntax of application match checkErasuresMatch e? (is-tmabsd-e? arr) htp Xs locl -- 4) type the application, filling in missing type arguments with meta-variables ≫=spanm' λ _ → check-term-app Xs Ys t₁ t₂ arr (islocl locl) -- 5) check no unsolved mvars, if the application is maximal (or a locality) ≫=spanm' λ {(check-term-app-return Xs' rtp' arg-mode tvs) → let rtp = decortype-to-type rtp' in checkLocality Γ Xs' htp rtp max (pred locl) tvs ≫=spanm' uncurry₂ λ Xs'' locl' is-loc → -- 6) generate span and finish genAppSpan Γ Xs Xs' Ys pt rtp is-loc tvs ≫span check-term-spine-return Γ Xs'' rtp' locl' } where mode = prototype-to-checking pt span-loc : (fn : string) → span-location span-loc fn = fn , term-start-pos t₁ , term-end-pos t₂ islocl : ℕ → 𝔹 islocl locl = is-locale max (just $ pred locl) handleApplicandTypeError : spanM ∘ maybe $ _ handleApplicandTypeError = spanM-add (App-span max t₁ t₂ mode [] nothing) ≫span check-term t₂ nothing ≫=span (const $ spanMr nothing) genInapplicableError : meta-vars → decortype → prototype → (locl : ℕ) → spanM (maybe _) genInapplicableError Xs dt pt locl = check-term-app-tm-errors.inapplicable t₁ t₂ (decortype-to-type dt) Xs (islocl locl) mode e? dt (proto-arrow e? pt) checkErasuresMatch : (e?₁ e?₂ : maybeErased) → type → meta-vars → (locl : ℕ) → spanM ∘ maybe $ ⊤ checkErasuresMatch e?₁ e?₂ htp Xs locl = if ~ eq-maybeErased e?₁ e?₂ then check-term-app-tm-errors.bad-erasure t₁ t₂ htp Xs (islocl locl) mode e?₁ else (spanMr ∘ just $ triv) checkLocality : ctxt → meta-vars → (htp rtp : type) → (max : 𝔹) (locl : ℕ) → 𝕃 tagged-val → spanM ∘ maybe $ _ checkLocality Γ Xs htp rtp max locl tvs = check-spine-locality Γ Xs rtp max locl on-fail check-term-app-tm-errors.unsolved-meta-vars t₁ t₂ htp Xs (islocl locl) mode rtp tvs ≫=spanm' (spanMr ∘ just) genAppSpan : ctxt → (Xs Xs' : meta-vars) → (Ys : 𝕃 meta-var) → prototype → type → (is-locl : 𝔹) → 𝕃 tagged-val → spanM ⊤ genAppSpan Γ Xs Xs' Ys pt rtp is-loc tvs = spanM-add $ (flip uncurry) (meta-vars-check-type-mismatch-if (prototype-to-maybe pt) Γ "synthesized" meta-vars-empty rtp) λ tvs' → App-span is-loc t₁ t₂ mode (tvs' ++ meta-vars-intro-data Γ (meta-vars-from-list Ys) ++ meta-vars-sol-data Γ Xs Xs' ++ tvs) check-term-spine t'@(AppTp t tp) pt max = get-ctxt λ Γ → -- 1) type the applicand check-term-spine t pt max on-fail handleApplicandTypeError ≫=spanm' λ ret → let (mk-spine-data Xs dt locl) = ret ; htp = decortype-to-type dt in -- 2) make sure it reveals a type abstraction meta-vars-unfold-tpapp' Γ Xs dt on-fail (λ _ → genInapplicableError Xs htp dt) -- 3) ensure the type argument has the expected kind, -- but don't compare with the contextually infered type argument (for now) ≫=spans' λ ret → let mk-tpabsd dt e? x k sol rdt = ret in check-type tp (just (meta-vars-subst-kind Γ Xs k)) -- 4) produce the result type of the application ≫span subst-decortype-if Γ Xs x k sol rdt ≫=span λ ret → let Xs = fst ret ; rdt = snd ret ; rtp = decortype-to-type rdt in -- 5) generate span data and finish genAppTpSpan Γ Xs pt rtp ≫span check-term-spine-return Γ Xs rdt locl where mode = prototype-to-checking pt span-loc : ctxt → span-location span-loc Γ = (ctxt-get-current-filename Γ) , term-start-pos t , type-end-pos tp handleApplicandTypeError : spanM ∘ maybe $ spine-data handleApplicandTypeError = spanM-add (AppTp-span t tp synthesizing [] nothing) ≫span check-type tp nothing ≫=span (const $ spanMr nothing) genInapplicableError : meta-vars → type → decortype → spanM ∘ maybe $ spine-data genInapplicableError Xs htp dt = check-term-app-tp-errors.inapplicable t tp htp Xs mode dt subst-decortype-if : ctxt → meta-vars → var → kind → maybe type → decortype → spanM (meta-vars × decortype) subst-decortype-if Γ Xs x k sol rdt = if ~ is-hole tp then subst-decortype Γ (qualif-type Γ tp) x rdt ≫=span (λ res → spanMr (Xs , res)) else let sol = maybe-map (λ t → mk-meta-var-sol t checking) sol Y = meta-var-fresh-tp Xs x (span-loc Γ) (k , sol) Xs' = meta-vars-add Xs Y in subst-decortype Γ (meta-var-to-type-unsafe Y) x rdt ≫=span λ rdt' → spanMr (Xs' , rdt') genAppTpSpan : ctxt → meta-vars → prototype → (ret-tp : type) → spanM ⊤ genAppTpSpan Γ Xs pt ret-tp = spanM-add ∘ (flip uncurry) -- check for a type mismatch, if there even is an expected type (meta-vars-check-type-mismatch-if (prototype-to-maybe pt) Γ "synthesizing" Xs ret-tp) $ -- then take the generated 𝕃 tagged-val and add to the span λ tvs → AppTp-span t tp mode $ tvs ++ meta-vars-data-all Γ Xs -- ++ (prototype-data Γ tp :: [ decortype-data Γ dt ]) check-term-spine (Parens _ t _) pt max = check-term-spine t pt max check-term-spine t pt max = check-term t nothing ≫=spanm' λ htp → get-ctxt λ Γ → let locl = num-arrows-in-type Γ htp in match-prototype meta-vars-empty ff htp pt -- NOTE: it is an invariant that the variables solved in the -- solution set of the fst of this are a subset of the variables given -- to match-* -- that is, for (σ , W) = match-prototype ... -- we have dom(σ) = ∅ ≫=span λ ret → let dt = match-proto-dectp ret in check-term-spine-return Γ meta-vars-empty dt locl -- check-term-app -- -------------------------------------------------- -- -- If `dom` has unsolved meta-vars in it, synthesize argument t₂ and try to solve for them. -- Otherwise, check t₂ against a fully known expected type check-term-app Xs Zs t₁ t₂ (mk-tmabsd dt e? x dom occurs cod) is-locl = get-ctxt λ Γ → let Xs' = meta-vars-add* Xs Zs ; tp = decortype-to-type dt in -- 1) calculate return type of function (possible subst) genAppRetType Γ -- 2) either synth or check arg type, depending on available info -- checking "exits early", as well as failure ≫=span λ rdt → checkArgWithMetas Xs' tp rdt exit-early spanMr -- 3) match *synthesized* type with expected (partial) type ≫=spans' λ atp → match-types Xs' empty-trie match-unfolding-both dom atp ≫=span (handleMatchResult Xs' atp tp rdt) where mode = synthesizing genAppRetType : ctxt → spanM decortype genAppRetType Γ = if occurs then subst-decortype Γ (qualif-term Γ t₂) x cod else spanMr cod genAppRetTypeHole : ctxt → spanM decortype genAppRetTypeHole Γ = if occurs then subst-decortype Γ (Hole posinfo-gen) x cod else spanMr cod checkArgWithMetas : meta-vars → type → decortype → spanM $ (maybe check-term-app-ret ∨ type) checkArgWithMetas Xs' tp rdt = get-ctxt λ Γ → -- check arg against fully known type if ~ meta-vars-are-free-in-type Xs' dom then check-term t₂ (just dom) ≫span (spanMr ∘' inj₁ ∘' just $ check-term-app-return Xs' rdt mode []) -- synthesize type for the argument else check-term t₂ nothing -- if that doesn't work, press on -- feeding a hole for the dependency, if needed on-fail (genAppRetTypeHole Γ ≫=span λ rdt-hole → spanMr ∘' inj₁ ∘' just $ check-term-app-return Xs' rdt-hole mode [ arg-exp-type Γ dom ]) ≫=spanm' λ tp → spanMr ∘' inj₂ $ tp handleMatchResult : meta-vars → (atp tp : type) → decortype → match-error-t meta-vars → spanM ∘ maybe $ check-term-app-ret handleMatchResult Xs' atp tp rdt (match-error (msg , tvs)) = check-term-app-tm-errors.unmatchable t₁ t₂ tp Xs' is-locl mode dom atp msg tvs handleMatchResult Xs' atp tp rdt (match-ok Xs) = get-ctxt λ Γ → meta-vars-subst-decortype Γ Xs rdt ≫=span λ rdt → spanMr ∘ just $ check-term-app-return Xs rdt mode [] match-unfolding-next : match-unfolding-state → match-unfolding-state match-unfolding-next match-unfolding-both = match-unfolding-both match-unfolding-next match-unfolding-approx = match-unfolding-approx match-unfolding-next match-unfolding-hnf = match-unfolding-both module m-err = meta-vars-match-errors check-type-for-match : type → spanM $ match-error-t kind check-type-for-match tp = (with-qualified-qualif $ with-clear-error $ get-ctxt λ Γ → check-type tp nothing on-fail spanMr ∘ match-error $ "Could not kind computed arg type" , [] ≫=spanm' λ k → spanMr ∘ match-ok $ k) ≫=spand spanMr where qualified-qualif : ctxt → qualif qualified-qualif (mk-ctxt mod ss is os _) = for trie-strings is accum empty-trie use λ x q → trie-insert q x (x , ArgsNil) -- helper to restore qualif state with-qualified-qualif : ∀ {A} → spanM A → spanM A with-qualified-qualif sm = get-ctxt λ Γ → with-ctxt (ctxt-set-qualif Γ (qualified-qualif Γ)) sm -- helper to restore error state with-clear-error : ∀ {A} → spanM A → spanM A with-clear-error m = get-error λ es → set-error nothing ≫span m ≫=span λ a → set-error es ≫span spanMr a -- match-types -- -------------------------------------------------- match-types-ok : meta-vars → spanM $ match-error-t meta-vars match-types-ok = spanMr ∘ match-ok match-types-error : match-error-data → spanM $ match-error-t meta-vars match-types-error = spanMr ∘ match-error match-types Xs Ls match-unfolding-both tpₓ tp = get-ctxt λ Γ → match-types Xs Ls match-unfolding-approx tpₓ tp ≫=span λ where (match-ok Xs) → match-types-ok Xs (match-error msg) → match-types Xs Ls match-unfolding-hnf (hnf Γ (unfolding-elab unfold-head) tpₓ tt) (hnf Γ (unfolding-elab unfold-head) tp tt) match-types Xs Ls unf tpₓ@(TpVar pi x) tp = -- check that x is a meta-var get-ctxt λ Γ → maybe-else' (meta-vars-lookup-with-kind Xs x) -- if not, make sure the two variables are the same -- TODO: above assumes no term meta-variables (spanMr (err⊎-guard (~ conv-type Γ tpₓ tp) m-err.e-match-failure ≫⊎ match-ok Xs)) -- scope check the solution λ ret → let X = fst ret ; kₓ = snd ret in if are-free-in-type check-erased Ls tp then match-types-error $ m-err.e-meta-scope Γ tpₓ tp else (check-type-for-match tp ≫=spans' λ k → match-kinds Xs empty-trie match-unfolding-both kₓ k on-fail (λ _ → spanMr ∘ match-error $ m-err.e-bad-sol-kind Γ x tp) ≫=spans' λ Xs → spanMr (meta-vars-solve-tp Γ Xs x tp synthesizing) ≫=spans' λ Xs → match-types-ok $ meta-vars-update-kinds Γ Xs Xs) match-types Xs Ls unf (TpApp tpₓ₁ tpₓ₂) (TpApp tp₁ tp₂) = match-types Xs Ls unf tpₓ₁ tp₁ ≫=spans' λ Xs' → match-types Xs' Ls (match-unfolding-next unf) tpₓ₂ tp₂ match-types Xs Ls unf (TpAppt tpₓ tmₓ) (TpAppt tp tm) = match-types Xs Ls unf tpₓ tp ≫=spans' λ Xs' → get-ctxt λ Γ → spanMr $ if ~ conv-term Γ tmₓ tm then (match-error m-err.e-match-failure) else match-ok Xs' match-types Xs Ls unf tpₓ'@(Abs piₓ bₓ piₓ' xₓ tkₓ tpₓ) tp'@(Abs pi b pi' x tk tp) = get-ctxt λ Γ → if ~ eq-maybeErased bₓ b then (match-types-error m-err.e-match-failure) else ( match-tks Xs Ls (match-unfolding-next unf) tkₓ tk ≫=spans' λ Xs' → with-ctxt (Γ→Γ' Γ) (match-types Xs' Ls' (match-unfolding-next unf) tpₓ tp)) where Γ→Γ' : ctxt → ctxt Γ→Γ' Γ = ctxt-rename xₓ x (ctxt-var-decl-if x Γ) Ls' = stringset-insert Ls x match-types Xs Ls unf tpₓ@(TpArrow tp₁ₓ atₓ tp₂ₓ) tp@(TpArrow tp₁ at tp₂) = get-ctxt λ Γ → if ~ eq-maybeErased atₓ at then match-types-error m-err.e-match-failure else ( match-types Xs Ls (match-unfolding-next unf) tp₁ₓ tp₁ ≫=spans' λ Xs → match-types Xs Ls (match-unfolding-next unf) tp₂ₓ tp₂) match-types Xs Ls unf tpₓ@(TpArrow tp₁ₓ atₓ tp₂ₓ) tp@(Abs pi b pi' x (Tkt tp₁) tp₂) = get-ctxt λ Γ → if ~ eq-maybeErased atₓ b then match-types-error m-err.e-match-failure else ( match-types Xs Ls (match-unfolding-next unf) tp₁ₓ tp₁ ≫=spans' λ Xs → match-types Xs (stringset-insert Ls x) (match-unfolding-next unf) tp₂ₓ tp₂) match-types Xs Ls unf tpₓ@(Abs piₓ bₓ piₓ' xₓ (Tkt tp₁ₓ) tp₂ₓ) tp@(TpArrow tp₁ at tp₂) = get-ctxt λ Γ → if ~ eq-maybeErased bₓ at then match-types-error m-err.e-match-failure else ( match-types Xs Ls (match-unfolding-next unf) tp₁ₓ tp₁ ≫=spans' λ Xs → match-types Xs (stringset-insert Ls xₓ) (match-unfolding-next unf) tp₂ₓ tp₂) match-types Xs Ls unf (Iota _ piₓ xₓ mₓ tpₓ) (Iota _ pi x m tp) = get-ctxt λ Γ → match-types Xs Ls (match-unfolding-next unf) mₓ m ≫=spans' λ Xs → with-ctxt (Γ→Γ' Γ) (match-types Xs Ls' (match-unfolding-next unf) tpₓ tp) where Γ→Γ' : ctxt → ctxt Γ→Γ' Γ = ctxt-rename xₓ x (ctxt-var-decl-if x Γ) Ls' = stringset-insert Ls x match-types Xs Ls unf (TpEq _ t₁ₓ t₂ₓ _) (TpEq _ t₁ t₂ _) = get-ctxt λ Γ → if ~ conv-term Γ t₁ₓ t₁ then match-types-error $ m-err.e-match-failure else if ~ conv-term Γ t₂ₓ t₂ then match-types-error $ m-err.e-match-failure else match-types-ok Xs match-types Xs Ls unf (Lft _ piₓ xₓ tₓ lₓ) (Lft _ pi x t l) = get-ctxt λ Γ → if ~ conv-liftingType Γ lₓ l then match-types-error $ m-err.e-match-failure else if ~ conv-term (Γ→Γ' Γ) tₓ t then match-types-error $ m-err.e-match-failure else match-types-ok Xs where Γ→Γ' : ctxt → ctxt Γ→Γ' Γ = ctxt-rename xₓ x (ctxt-var-decl-if x Γ) match-types Xs Ls unf (TpLambda _ piₓ xₓ atkₓ tpₓ) (TpLambda _ pi x atk tp) = get-ctxt λ Γ → match-tks Xs Ls (match-unfolding-next unf) atkₓ atk ≫=spans' λ Xs → with-ctxt (Γ→Γ' Γ) (match-types Xs Ls' (match-unfolding-next unf) tpₓ tp) where Γ→Γ' : ctxt → ctxt Γ→Γ' Γ = ctxt-rename xₓ x (ctxt-var-decl-if x Γ) Ls' = stringset-insert Ls x match-types Xs Ls unf (NoSpans tpₓ _) (NoSpans tp _) = match-types Xs Ls unf tpₓ tp -- TODO for now, don't approximate lets match-types Xs Ls unf (TpLet piₓ (DefTerm pi x ot t) tpₓ) tp = get-ctxt λ Γ → match-types Xs Ls unf (subst Γ (Chi posinfo-gen ot t) x tpₓ) tp match-types Xs Ls unf (TpLet piₓ (DefType pi x k tpₓ-let) tpₓ) tp = get-ctxt λ Γ → match-types Xs Ls unf (subst Γ tpₓ-let x tpₓ) tp match-types Xs Ls unf tpₓ (TpLet _ (DefTerm _ x ot t) tp) = get-ctxt λ Γ → match-types Xs Ls unf tpₓ (subst Γ (Chi posinfo-gen ot t) x tp) match-types Xs Ls unf tpₓ (TpLet _ (DefType _ x k tp-let) tp) = get-ctxt λ Γ → match-types Xs Ls unf tpₓ (subst Γ tp-let x tp) -- match-types Xs Ls unf (TpHole x₁) tp = {!!} match-types Xs Ls unf (TpParens _ tpₓ _) tp = match-types Xs Ls unf tpₓ tp match-types Xs Ls unf tpₓ (TpParens _ tp _) = match-types Xs Ls unf tpₓ tp match-types Xs Ls unf tpₓ tp = get-ctxt λ Γ → match-types-error m-err.e-match-failure -- match-kinds -- -------------------------------------------------- -- match-kinds-norm: match already normalized kinds match-kinds-norm : meta-vars → local-vars → match-unfolding-state → (kₓ k : kind) → spanM $ match-error-t meta-vars match-kinds-norm Xs Ls uf (KndParens _ kₓ _) (KndParens _ k _) = match-kinds Xs Ls uf kₓ k -- kind pi match-kinds-norm Xs Ls uf (KndPi _ piₓ xₓ tkₓ kₓ) (KndPi _ pi x tk k) = get-ctxt λ Γ → match-tks Xs Ls uf tkₓ tk ≫=spans' λ Xs → with-ctxt (Γ→Γ' Γ) (match-kinds Xs Ls uf kₓ k) where Γ→Γ' : ctxt → ctxt Γ→Γ' Γ = ctxt-rename xₓ x (ctxt-var-decl-if x Γ) Ls' = stringset-insert Ls x -- kind arrow match-kinds-norm Xs Ls uf (KndArrow kₓ₁ kₓ₂) (KndArrow k₁ k₂) = match-kinds Xs Ls uf kₓ₁ k₁ ≫=spans' λ Xs → match-kinds Xs Ls uf kₓ₂ k₂ match-kinds-norm Xs Ls uf (KndArrow kₓ₁ kₓ₂) (KndPi _ pi x (Tkk k₁) k₂) = match-kinds Xs Ls uf kₓ₁ k₁ ≫=spans' λ Xs → match-kinds Xs Ls uf kₓ₂ k₂ match-kinds-norm Xs Ls uf (KndPi _ _ x (Tkk kₓ₁) kₓ₂) (KndArrow k₁ k₂) = match-kinds Xs Ls uf kₓ₁ k₁ ≫=spans' λ Xs → match-kinds Xs Ls uf kₓ₂ k₂ -- kind tp arrow match-kinds-norm Xs Ls uf (KndTpArrow tpₓ kₓ) (KndTpArrow tp k) = match-types Xs Ls uf tpₓ tp ≫=spans' λ Xs → match-kinds Xs Ls uf kₓ k match-kinds-norm Xs Ls uf (KndPi _ _ x (Tkt tpₓ) kₓ) (KndTpArrow tp k) = match-types Xs Ls uf tpₓ tp ≫=spans' λ Xs → match-kinds Xs Ls uf kₓ k match-kinds-norm Xs Ls uf (KndTpArrow tpₓ kₓ) (KndPi _ _ x (Tkt tp) k) = match-types Xs Ls uf tpₓ tp ≫=spans' λ Xs → match-kinds Xs Ls uf kₓ k match-kinds-norm Xs Ls uf (Star _) (Star _) = match-types-ok $ Xs match-kinds-norm Xs Ls uf kₓ k = get-ctxt λ Γ → match-types-error $ m-err.e-matchk-failure -- m-err.e-kind-ineq Γ kₓ k match-kinds Xs Ls uf kₓ k = get-ctxt λ Γ → match-kinds-norm Xs Ls uf (hnf Γ (unfolding-elab unfold-head) kₓ tt) (hnf Γ (unfolding-elab unfold-head) k tt) -- match-tk -- -------------------------------------------------- match-tks Xs Ls uf (Tkk kₓ) (Tkk k) = match-kinds Xs Ls uf kₓ k match-tks Xs Ls uf (Tkt tpₓ) (Tkt tp) = match-types Xs Ls uf tpₓ tp match-tks Xs Ls uf tkₓ tk = get-ctxt λ Γ → match-types-error m-err.e-matchk-failure -- m-err.e-tk-ineq Γ tkₓ tk -- match-prototype -- -------------------------------------------------- match-prototype-err : type → prototype → spanM match-prototype-data match-prototype-err tp pt = spanMr $ mk-match-prototype-data meta-vars-empty (decor-error tp pt) tt {- -------------------- Xs ⊢? T ≔ ⁇ ⇒ (∅ , T) -} match-prototype Xs uf tp (proto-maybe nothing) = spanMr $ mk-match-prototype-data Xs (decor-type tp) ff {- Xs ⊢= T ≔ S ⇒ σ -------------------- Xs ⊢? T ≔ S ⇒ (σ , T) -} match-prototype Xs uf tp (proto-maybe (just tp')) = match-types Xs empty-trie match-unfolding-both tp tp' on-fail (λ _ → spanMr $ mk-match-prototype-data Xs (decor-type tp) tt) ≫=spans' λ Xs' → spanMr $ mk-match-prototype-data Xs' (decor-type tp) ff {- Xs,X ⊢? T ≔ ⁇ → P ⇒ (σ , W) ----------------------------------------------- Xs ⊢? ∀ X . T ≔ ⁇ → P ⇒ (σ - X , ∀ X = σ(X) . W) -} match-prototype Xs uf (Abs pi bₓ pi' x (Tkk k) tp) pt'@(proto-arrow e? pt) = get-ctxt λ Γ → -- 1) generate a fresh meta-var Y, add it to the meta-vars, and rename -- occurences of x in tp to Y let ret = meta-vars-add-from-tpabs Γ missing-span-location Xs (mk-tpabs Erased x k tp) Y = fst ret ; Xs' = snd ret ; tp' = subst Γ (meta-var-to-type-unsafe Y) x tp -- 2) match the body against the original prototype to generate a decorated type -- and find some solutions in match-prototype Xs' ff tp' pt' ≫=span λ ret → let mk-match-prototype-data Xs' dt err = ret Y' = maybe-else' (meta-vars-lookup Xs' (meta-var-name Y)) Y λ Y → Y -- 3) replace the meta-vars with the bound type variable in subst-decortype Γ (TpVar pi x) (meta-var-name Y) dt -- 4) leave behind the solution for Y as a decoration and drop Y from Xs ≫=span λ dt' → let sort' = meta-var.sort (meta-var-set-src Y' checking) dt″ = decor-decor Erased pi x sort' dt' in spanMr $ mk-match-prototype-data (meta-vars-remove Xs' Y) dt″ err {- Xs ⊢? T ≔ P ⇒ (σ , P) ----------------------------- Xs ⊢? S → T ≔ ⁇ → P ⇒ (σ , P) -} match-prototype Xs uf (Abs pi b pi' x (Tkt dom) cod) (proto-arrow e? pt) = match-prototype Xs ff cod pt ≫=span λ ret → let mk-match-prototype-data Xs dt err = ret dt' = decor-decor b pi x (meta-var-tm dom nothing) dt in spanMr $ if ~ eq-maybeErased b e? then mk-match-prototype-data meta-vars-empty dt' tt else mk-match-prototype-data Xs dt' err match-prototype Xs uf (TpArrow dom at cod) (proto-arrow e? pt) = match-prototype Xs ff cod pt ≫=span λ ret → let mk-match-prototype-data Xs' dt err = ret dt' = decor-arrow at dom dt in spanMr $ if ~ eq-maybeErased at e? then mk-match-prototype-data meta-vars-empty dt' tt else mk-match-prototype-data Xs' dt' err {- X ∈ Xs ----------------------------------- Xs ⊢? X ≔ ⁇ → P ⇒ (σ , (X , ⁇ → P)) -} match-prototype Xs tt tp@(TpVar pi x) pt@(proto-arrow _ _) = spanMr $ mk-match-prototype-data Xs (decor-stuck tp pt) ff -- everything else... -- Types for which we should keep digging match-prototype Xs ff tp@(TpVar pi x) pt@(proto-arrow _ _) = get-ctxt λ Γ → match-prototype Xs tt (hnf Γ (unfolding-elab unfold-head) tp tt) pt match-prototype Xs uf (NoSpans tp _) pt@(proto-arrow _ _) = match-prototype Xs uf tp pt match-prototype Xs uf (TpParens _ tp _) pt@(proto-arrow _ _) = match-prototype Xs uf tp pt match-prototype Xs uf (TpLet pi (DefTerm piₗ x opt t) tp) pt@(proto-arrow _ _) = get-ctxt λ Γ → let tp' = subst Γ (Chi posinfo-gen opt t) x tp in match-prototype Xs uf tp' pt match-prototype Xs uf (TpLet pi (DefType piₗ x k tp') tp) pt@(proto-arrow _ _) = get-ctxt λ Γ → let tp″ = subst Γ tp' x tp in match-prototype Xs uf tp″ pt match-prototype Xs ff tp@(TpApp _ _) pt@(proto-arrow _ _) = get-ctxt λ Γ → match-prototype Xs tt (hnf Γ (unfolding-elab unfold-head) tp tt) pt match-prototype Xs ff tp@(TpAppt _ _) pt@(proto-arrow _ _) = get-ctxt λ Γ → match-prototype Xs tt (hnf Γ (unfolding-elab unfold-head) tp tt) pt -- types for which we should suspend disbelief match-prototype Xs tt tp@(TpApp _ _) pt@(proto-arrow _ _) = spanMr $ mk-match-prototype-data Xs (decor-stuck tp pt) ff match-prototype Xs tt tp@(TpAppt _ _) pt@(proto-arrow _ _) = spanMr $ mk-match-prototype-data Xs (decor-stuck tp pt) ff -- types which clearly do not match the prototype match-prototype Xs uf tp@(TpEq _ _ _ _) pt@(proto-arrow _ _) = match-prototype-err tp pt match-prototype Xs uf tp@(TpHole _) pt@(proto-arrow _ _) = match-prototype-err tp pt match-prototype Xs uf tp@(TpLambda _ _ _ _ _) pt@(proto-arrow _ _) = match-prototype-err tp pt match-prototype Xs uf tp@(Iota _ _ _ _ _) pt@(proto-arrow _ _) = match-prototype-err tp pt match-prototype Xs uf tp@(Lft _ _ _ _ _) pt@(proto-arrow _ _) = match-prototype-err tp pt -- check-typei: check a type against (maybe) a kind -- ================================================== --ACG WIP --check-typei (TpHole pi) k = spanM-add check-typei (TpHole pi) k = get-ctxt (λ Γ → spanM-add (tp-hole-span Γ pi k []) ≫span return-when k k) check-typei (TpParens pi t pi') k = spanM-add (punctuation-span "Parens (type)" pi pi') ≫span check-type t k check-typei (NoSpans t _) k = check-type t k ≫=spand spanMr check-typei (TpVar pi x) mk = get-ctxt (cont mk) where cont : (mk : maybe kind) → ctxt → spanM (check-ret mk) cont mk Γ with ctxt-lookup-type-var Γ x cont mk Γ | nothing = spanM-add (TpVar-span Γ pi x (maybe-to-checking mk) (expected-kind-if Γ mk ++ [ missing-kind ]) (just "Missing a kind for a type variable.")) ≫span return-when mk mk cont nothing Γ | (just k) = spanM-add (TpVar-span Γ pi x synthesizing [ kind-data Γ k ] nothing) ≫span check-type-return Γ k cont (just k) Γ | just k' = spanM-add (TpVar-span Γ pi x checking (expected-kind Γ k :: [ kind-data Γ k' ]) (if conv-kind Γ k k' then nothing else just "The computed kind does not match the expected kind.")) check-typei (TpLambda pi pi' x atk body) (just k) with to-absk k check-typei (TpLambda pi pi' x atk body) (just k) | just (mk-absk x' atk' _ k') = check-tk atk ≫span spanM-add (punctuation-span "Lambda (type)" pi (posinfo-plus pi 1)) ≫span get-ctxt λ Γ → spanM-add (if conv-tk Γ (qualif-tk Γ atk) atk' then TpLambda-span pi x atk body checking [ kind-data Γ k ] nothing else uncurry (λ tvs err → TpLambda-span pi x atk body checking tvs (just err)) (lambda-bound-var-conv-error Γ x atk' atk [ kind-data Γ k ])) ≫span add-tk pi' x atk ≫=span λ mi → get-ctxt λ Γ' → check-type body (just (rename-var Γ x' (qualif-var Γ' x) k')) ≫span spanM-restore-info x mi check-typei (TpLambda pi pi' x atk body) (just k) | nothing = check-tk atk ≫span spanM-add (punctuation-span "Lambda (type)" pi (posinfo-plus pi 1)) ≫span get-ctxt λ Γ → spanM-add (TpLambda-span pi x atk body checking [ expected-kind Γ k ] (just "The type is being checked against a kind which is not an arrow- or Pi-kind.")) check-typei (TpLambda pi pi' x atk body) nothing = spanM-add (punctuation-span "Lambda (type)" pi (posinfo-plus pi 1)) ≫span check-tk atk ≫span add-tk pi' x atk ≫=span λ mi → check-type body nothing ≫=span cont ≫=span λ mk → spanM-restore-info x mi ≫span spanMr mk where cont : maybe kind → spanM (maybe kind) cont nothing = spanM-add (TpLambda-span pi x atk body synthesizing [] nothing) ≫span spanMr nothing cont (just k) = get-ctxt λ Γ → let atk' = qualif-tk Γ atk in -- This should indeed "unqualify" occurrences of x in k for r let r = absk-tk x atk' (rename-var Γ (pi' % x) x k) in spanM-add (TpLambda-span pi x atk' body synthesizing [ kind-data Γ r ] nothing) ≫span spanMr (just r) check-typei (Abs pi b {- All or Pi -} pi' x atk body) k = get-ctxt λ Γ → spanM-add (uncurry (TpQuant-span (me-unerased b) pi x atk body (maybe-to-checking k)) (if-check-against-star-data Γ "A type-level quantification" k)) ≫span spanM-add (punctuation-span "Forall" pi (posinfo-plus pi 1)) ≫span check-tk atk ≫span add-tk pi' x atk ≫=span λ mi → check-type body (just star) ≫span spanM-restore-info x mi ≫span return-star-when k check-typei (TpArrow t1 _ t2) k = get-ctxt λ Γ → spanM-add (uncurry (TpArrow-span t1 t2 (maybe-to-checking k)) (if-check-against-star-data Γ "An arrow type" k)) ≫span check-type t1 (just star) ≫span check-type t2 (just star) ≫span return-star-when k check-typei (TpAppt tp t) k = check-type tp nothing ≫=span cont'' ≫=spanr cont' k where cont : kind → spanM (maybe kind) cont (KndTpArrow tp' k') = check-term t (just tp') ≫span spanMr (just k') cont (KndPi _ _ x (Tkt tp') k') = check-term t (just tp') ≫span get-ctxt λ Γ → spanMr (just (subst Γ (qualif-term Γ t) x k')) cont k' = get-ctxt λ Γ → spanM-add (TpAppt-span tp t (maybe-to-checking k) (type-app-head Γ tp :: head-kind Γ k' :: [ term-argument Γ t ]) (just ("The kind computed for the head of the type application does" ^ " not allow the head to be applied to an argument which is a term"))) ≫span spanMr nothing cont' : (outer : maybe kind) → kind → spanM (check-ret outer) cont' nothing k = get-ctxt λ Γ → spanM-add (TpAppt-span tp t synthesizing [ kind-data Γ k ] nothing) ≫span check-type-return Γ k cont' (just k') k = get-ctxt λ Γ → if conv-kind Γ k k' then spanM-add (TpAppt-span tp t checking (expected-kind Γ k' :: [ kind-data Γ k ]) nothing) else spanM-add (TpAppt-span tp t checking (expected-kind Γ k' :: [ kind-data Γ k ]) (just "The kind computed for a type application does not match the expected kind.")) cont'' : maybe kind → spanM (maybe kind) cont'' nothing = spanM-add (TpAppt-span tp t (maybe-to-checking k) [] nothing) ≫span spanMr nothing cont'' (just k) = cont k check-typei (TpApp tp tp') k = check-type tp nothing ≫=span cont'' ≫=spanr cont' k where cont : kind → spanM (maybe kind) cont (KndArrow k'' k') = check-type tp' (just k'') ≫span spanMr (just k') cont (KndPi _ _ x (Tkk k'') k') = check-type tp' (just k'') ≫span get-ctxt λ Γ → spanMr (just (subst Γ (qualif-type Γ tp') x k')) cont k' = get-ctxt λ Γ → spanM-add (TpApp-span tp tp' (maybe-to-checking k) (type-app-head Γ tp :: head-kind Γ k' :: [ type-argument Γ tp' ]) (just ("The kind computed for the head of the type application does" ^ " not allow the head to be applied to an argument which is a type"))) ≫span spanMr nothing cont' : (outer : maybe kind) → kind → spanM (check-ret outer) cont' nothing k = get-ctxt λ Γ → spanM-add (TpApp-span tp tp' synthesizing [ kind-data Γ k ] nothing) ≫span check-type-return Γ k cont' (just k') k = get-ctxt λ Γ → if conv-kind Γ k k' then spanM-add (TpApp-span tp tp' checking (expected-kind Γ k' :: [ kind-data Γ k' ]) nothing) else spanM-add (TpApp-span tp tp' checking (expected-kind Γ k' :: [ kind-data Γ k ]) (just "The kind computed for a type application does not match the expected kind.")) cont'' : maybe kind → spanM (maybe kind) cont'' nothing = spanM-add (TpApp-span tp tp' (maybe-to-checking k) [] nothing) ≫span spanMr nothing cont'' (just k) = cont k check-typei (TpEq pi t1 t2 pi') k = get-ctxt (λ Γ → untyped-term-spans t1 ≫span set-ctxt Γ ≫span untyped-term-spans t2 ≫span set-ctxt Γ) ≫span get-ctxt λ Γ → spanM-add (uncurry (TpEq-span pi t1 t2 pi' (maybe-to-checking k)) (if-check-against-star-data Γ "An equation" k)) ≫span -- spanM-add (unchecked-term-span t1) ≫span -- spanM-add (unchecked-term-span t2) ≫span return-star-when k check-typei (Lft pi pi' X t l) k = add-tk pi' X (Tkk star) ≫=span λ mi → get-ctxt λ Γ → check-term t (just (qualif-type Γ (liftingType-to-type X l))) ≫span spanM-add (punctuation-span "Lift" pi (posinfo-plus pi 1)) ≫span spanM-restore-info X mi ≫span cont k (qualif-kind Γ (liftingType-to-kind l)) where cont : (outer : maybe kind) → kind → spanM (check-ret outer) cont nothing k = get-ctxt λ Γ → spanM-add (Lft-span pi X t synthesizing [ kind-data Γ k ] nothing) ≫span spanMr (just k) cont (just k') k = get-ctxt λ Γ → if conv-kind Γ k k' then spanM-add (Lft-span pi X t checking ( expected-kind Γ k' :: [ kind-data Γ k ]) nothing) else spanM-add (Lft-span pi X t checking ( expected-kind Γ k' :: [ kind-data Γ k ]) (just "The expected kind does not match the computed kind.")) check-typei (Iota pi pi' x t1 t2) mk = get-ctxt λ Γ → spanM-add (uncurry (Iota-span pi t2 (maybe-to-checking mk)) (if-check-against-star-data Γ "A iota-type" mk)) ≫span check-typei t1 (just star) ≫span add-tk pi' x (Tkt t1) ≫=span λ mi → check-typei t2 (just star) ≫span spanM-restore-info x mi ≫span return-star-when mk check-typei (TpLet pi d T) mk = check-def d ≫=span finish where maybe-subst : defTermOrType → (mk : maybe kind) → check-ret mk → spanM (check-ret mk) maybe-subst _ (just k) triv = spanMok maybe-subst _ nothing nothing = spanMr nothing maybe-subst (DefTerm pi x NoType t) nothing (just k) = get-ctxt λ Γ → spanMr (just (subst Γ (qualif-term Γ (Chi posinfo-gen NoType t)) (pi % x) k)) maybe-subst (DefTerm pi x (SomeType T) t) nothing (just k) = get-ctxt λ Γ → spanMr (just (subst Γ (qualif-term Γ (Chi posinfo-gen (SomeType T) t)) (pi % x) k)) maybe-subst (DefType pi x k' T') nothing (just k) = get-ctxt λ Γ → spanMr (just (subst Γ (qualif-type Γ T') (pi % x) k)) finish : var × restore-def → spanM (check-ret mk) finish (x , m) = get-ctxt λ Γ → spanM-add (TpLet-span Γ (maybe-to-checking mk) pi d T [] nothing) ≫span check-type T mk ≫=span λ r → spanM-restore-info x m ≫span maybe-subst d mk r check-kind (KndParens pi k pi') = spanM-add (punctuation-span "Parens (kind)" pi pi') ≫span check-kind k check-kind (Star pi) = spanM-add (Star-span pi checking nothing) check-kind (KndVar pi x ys) = get-ctxt λ Γ → maybe-else' (ctxt-lookup-kind-var-def-args Γ x) (spanM-add (KndVar-span Γ (pi , x) (kvar-end-pos pi x ys) ParamsNil checking [] (just "Undefined kind variable"))) λ ps-as → check-args-against-params nothing (pi , x) -- Isn't used vvvv (fst $ snd $ elim-pair ps-as λ ps as → subst-params-args Γ ps as star) ys {-helper (ctxt-lookup-kind-var-def-args Γ x) where helper : maybe (params × args) → spanM ⊤ helper (just (ps , as)) = check-args-against-params nothing (pi , x) ps (append-args as ys) helper nothing = get-ctxt λ Γ → spanM-add (KndVar-span Γ (pi , x) (kvar-end-pos pi x ys) ParamsNil checking [] (just "Undefined kind variable"))-} check-kind (KndArrow k k') = spanM-add (KndArrow-span k k' checking nothing) ≫span check-kind k ≫span check-kind k' check-kind (KndTpArrow t k) = spanM-add (KndTpArrow-span t k checking nothing) ≫span check-type t (just star) ≫span check-kind k check-kind (KndPi pi pi' x atk k) = spanM-add (punctuation-span "Pi (kind)" pi (posinfo-plus pi 1)) ≫span spanM-add (KndPi-span pi x atk k checking nothing) ≫span check-tk atk ≫span add-tk pi' x atk ≫=span λ mi → check-kind k ≫span spanM-restore-info x mi check-args-against-params kind-or-import orig ps ys = caap (~ isJust kind-or-import) ps ys empty-trie where make-span : ctxt → 𝕃 tagged-val → err-m → span make-span Γ ts err = maybe-else (KndVar-span Γ orig (kvar-end-pos (fst orig) (snd orig) ys) ps checking ts err) (λ loc → Import-module-span Γ orig ps (loc :: ts) err) kind-or-import caap : 𝔹 → params → args → trie arg → spanM ⊤ caap koi (ParamsCons (Decl _ pi _ x (Tkk k) _) ps) (ArgsCons (TypeArg T) ys) σ = get-ctxt λ Γ → check-type T (just (substs Γ σ k)) ≫span caap koi ps ys (trie-insert σ x $ TypeArg (qualif-type Γ T)) caap ff (ParamsCons (Decl _ pi NotErased x (Tkt T) _) ps) (ArgsCons (TermArg NotErased t) ys) σ = get-ctxt λ Γ → let T' = substs Γ σ T in check-term t (just T') ≫span check-erased-margs t (just T') ≫span caap ff ps ys (trie-insert σ x $ TermArg NotErased (qualif-term Γ t)) caap ff (ParamsCons (Decl _ pi Erased x (Tkt T) _) ps) (ArgsCons (TermArg NotErased t) ys) σ = get-ctxt λ Γ → spanM-add (make-span Γ [ term-argument Γ t ] (just ("A term argument was supplied for erased term parameter " ^ x))) caap ff (ParamsCons (Decl _ pi NotErased x (Tkt T) _) ps) (ArgsCons (TermArg Erased t) ys) σ = get-ctxt λ Γ → spanM-add (make-span Γ [ term-argument Γ t ] (just ("An erased term argument was supplied for term parameter " ^ x))) -- Either a kind argument or a correctly erased module argument caap koi (ParamsCons (Decl _ pi me x (Tkt T) _) ps) (ArgsCons (TermArg me' t) ys) σ = get-ctxt λ Γ → check-term t (just (substs Γ σ T)) ≫span caap koi ps ys (trie-insert σ x $ TermArg me (qualif-term Γ t)) caap koi (ParamsCons (Decl _ x₁ _ x (Tkk x₃) x₄) ps₁) (ArgsCons (TermArg _ x₅) ys₂) σ = get-ctxt λ Γ → spanM-add (make-span Γ [ term-argument Γ x₅ ] (just ("A term argument was supplied for type parameter " ^ x))) caap koi (ParamsCons (Decl _ x₁ _ x (Tkt x₃) x₄) ps₁) (ArgsCons (TypeArg x₅) ys₂) σ = get-ctxt λ Γ → spanM-add (make-span Γ [ type-argument Γ x₅ ] (just ("A type argument was supplied for term parameter " ^ x))) caap tt (ParamsCons (Decl _ _ _ x _ _) ps₁) ArgsNil σ = get-ctxt λ Γ → spanM-add (make-span Γ [] (just ("Missing an argument for parameter " ^ x))) caap ff (ParamsCons (Decl _ _ _ x _ _) ps₁) ArgsNil σ = get-ctxt λ Γ → spanM-add (make-span Γ [] nothing) caap koi ParamsNil (ArgsCons x₁ ys₂) σ = get-ctxt λ Γ → spanM-add (make-span Γ [ arg-argument Γ x₁ ] (just "An extra argument was given")) caap koi ParamsNil ArgsNil σ = get-ctxt λ Γ → spanM-add (make-span Γ [] nothing) check-erased-margs t mtp = get-ctxt λ Γ → let x = erased-margs Γ in if are-free-in skip-erased x t then spanM-add (erased-marg-span Γ t mtp) else spanMok check-tk (Tkk k) = check-kind k check-tk (Tkt t) = check-type t (just star) check-def (DefTerm pi₁ x NoType t') = get-ctxt λ Γ → check-term t' nothing ≫=span cont (compileFail-in Γ t') t' where cont : 𝕃 tagged-val × err-m → term → maybe type → spanM (var × restore-def) cont (tvs , err) t' (just T) = spanM-push-term-def pi₁ x t' T ≫=span λ m → get-ctxt λ Γ → spanM-add (Var-span Γ pi₁ x synthesizing (type-data Γ T :: noterased :: tvs) err) ≫span spanMr (x , m) cont (tvs , err) t' nothing = spanM-push-term-udef pi₁ x t' ≫=span λ m → get-ctxt λ Γ → spanM-add (Var-span Γ pi₁ x synthesizing (noterased :: tvs) err) ≫span spanMr (x , m) check-def (DefTerm pi₁ x (SomeType T) t') = check-type T (just star) ≫span get-ctxt λ Γ → let T' = qualif-type Γ T in check-term t' (just T') ≫span spanM-push-term-def pi₁ x t' T' ≫=span λ m → get-ctxt λ Γ → let p = compileFail-in Γ t' in spanM-add (Var-span Γ pi₁ x checking (type-data Γ T' :: noterased :: fst p) (snd p)) ≫span spanMr (x , m) check-def (DefType pi x k T) = check-kind k ≫span get-ctxt λ Γ → let k' = qualif-kind Γ k in check-type T (just k') ≫span spanM-push-type-def pi x T k' ≫=span λ m → get-ctxt λ Γ → spanM-add (Var-span Γ pi x checking (noterased :: [ kind-data Γ k' ]) nothing) ≫span spanMr (x , m)