Datasets:
AI4M
/

text
stringlengths
0
3.34M
{-# OPTIONS --cubical --no-import-sorts --safe #-} open import Cubical.Categories.Category open import Cubical.Categories.Morphism renaming (isIso to isIsoC) open import Cubical.Foundations.Prelude open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.Equiv open Iso open import Cubical.Foundations.HLevels open Precategory open import Cubical.Core.Glue open import Cubical.Foundations.Univalence open import Cubical.Foundations.Transport using (transpFill) module Cubical.Categories.Constructions.Slice {ℓ ℓ' : Level} (C : Precategory ℓ ℓ') (c : C .ob) {{isC : isCategory C}} where open import Cubical.Data.Sigma -- just a helper to prevent redundency TypeC : Type (ℓ-suc (ℓ-max ℓ ℓ')) TypeC = Type (ℓ-max ℓ ℓ') -- Components of a slice category record SliceOb : TypeC where constructor sliceob field {S-ob} : C .ob S-arr : C [ S-ob , c ] open SliceOb public record SliceHom (a b : SliceOb) : Type ℓ' where constructor slicehom field S-hom : C [ S-ob a , S-ob b ] -- commutative diagram S-comm : S-hom ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a open SliceHom public -- Helpers for working with equality -- can probably replace these by showing that SliceOb is isomorphic to Sigma and -- that paths are isomorphic to Sigma? But sounds like that would need a lot of transp SliceOb-≡-intro : ∀ {a b} {f g} → (p : a ≡ b) → PathP (λ i → C [ p i , c ]) f g → sliceob {a} f ≡ sliceob {b} g SliceOb-≡-intro p q = λ i → sliceob {p i} (q i) module _ {xf yg : SliceOb} where private x = xf .S-ob f = xf .S-arr y = yg .S-ob g = yg .S-arr -- a path between slice objects is the "same" as a pair of paths between C obs and C arrows SOPathIsoPathΣ : Iso (xf ≡ yg) (Σ[ p ∈ x ≡ y ] PathP (λ i → C [ p i , c ]) f g) SOPathIsoPathΣ .fun p = (λ i → (p i) .S-ob) , (λ i → (p i) .S-arr) SOPathIsoPathΣ .inv (p , q) i = sliceob {p i} (q i) SOPathIsoPathΣ .rightInv _ = refl SOPathIsoPathΣ .leftInv _ = refl SOPath≃PathΣ = isoToEquiv SOPathIsoPathΣ SOPath≡PathΣ = ua (isoToEquiv SOPathIsoPathΣ) -- intro and elim for working with SliceHom equalities (is there a better way to do this?) SliceHom-≡-intro : ∀ {a b} {f g} {c₁} {c₂} → (p : f ≡ g) → PathP (λ i → (p i) ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a) c₁ c₂ → slicehom f c₁ ≡ slicehom g c₂ SliceHom-≡-intro p q = λ i → slicehom (p i) (q i) SliceHom-≡-elim : ∀ {a b} {f g} {c₁} {c₂} → slicehom f c₁ ≡ slicehom g c₂ → Σ[ p ∈ f ≡ g ] PathP (λ i → (p i) ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a) c₁ c₂ SliceHom-≡-elim r = (λ i → S-hom (r i)) , λ i → S-comm (r i) SliceHom-≡-intro' : ∀ {a b} {f g : C [ a .S-ob , b .S-ob ]} {c₁} {c₂} → (p : f ≡ g) → slicehom f c₁ ≡ slicehom g c₂ SliceHom-≡-intro' {a} {b} {f} {g} {c₁} {c₂} p i = slicehom (p i) (c₁≡c₂ i) where c₁≡c₂ : PathP (λ i → (p i) ⋆⟨ C ⟩ (b .S-arr) ≡ a .S-arr) c₁ c₂ c₁≡c₂ = isOfHLevel→isOfHLevelDep 1 (λ _ → isC .isSetHom _ _) c₁ c₂ p -- SliceHom is isomorphic to the Sigma type with the same components SliceHom-Σ-Iso : ∀ {a b} → Iso (SliceHom a b) (Σ[ h ∈ C [ S-ob a , S-ob b ] ] h ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a) SliceHom-Σ-Iso .fun (slicehom h c) = h , c SliceHom-Σ-Iso .inv (h , c) = slicehom h c SliceHom-Σ-Iso .rightInv = λ x → refl SliceHom-Σ-Iso .leftInv = λ x → refl -- Precategory definition SliceCat : Precategory _ _ SliceCat .ob = SliceOb SliceCat .Hom[_,_] = SliceHom SliceCat .id (sliceob {x} f) = slicehom (C .id x) (C .⋆IdL _) SliceCat ._⋆_ {sliceob j} {sliceob k} {sliceob l} (slicehom f p) (slicehom g p') = slicehom (f ⋆⟨ C ⟩ g) ( f ⋆⟨ C ⟩ g ⋆⟨ C ⟩ l ≡⟨ C .⋆Assoc _ _ _ ⟩ f ⋆⟨ C ⟩ (g ⋆⟨ C ⟩ l) ≡⟨ cong (λ v → f ⋆⟨ C ⟩ v) p' ⟩ f ⋆⟨ C ⟩ k ≡⟨ p ⟩ j ∎) SliceCat .⋆IdL (slicehom S-hom S-comm) = SliceHom-≡-intro (⋆IdL C _) (toPathP (isC .isSetHom _ _ _ _)) SliceCat .⋆IdR (slicehom S-hom S-comm) = SliceHom-≡-intro (⋆IdR C _) (toPathP (isC .isSetHom _ _ _ _)) SliceCat .⋆Assoc f g h = SliceHom-≡-intro (⋆Assoc C _ _ _) (toPathP (isC .isSetHom _ _ _ _)) -- SliceCat is a Category instance isCatSlice : isCategory SliceCat isCatSlice .isSetHom {a} {b} (slicehom f c₁) (slicehom g c₂) p q = cong isoP p'≡q' where -- paths between SliceHoms are equivalent to the projection paths p' : Σ[ p ∈ f ≡ g ] PathP (λ i → (p i) ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a) c₁ c₂ p' = SliceHom-≡-elim p q' : Σ[ p ∈ f ≡ g ] PathP (λ i → (p i) ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a) c₁ c₂ q' = SliceHom-≡-elim q -- we want all paths between (dependent) paths of this type to be equal B = λ v → v ⋆⟨ C ⟩ (S-arr b) ≡ S-arr a -- need the groupoidness for dependent paths homIsGroupoidDep : isOfHLevelDep 2 B homIsGroupoidDep = isOfHLevel→isOfHLevelDep 2 (λ v x y → isSet→isGroupoid (isC .isSetHom) _ _ x y) -- we first prove that the projected paths are equal p'≡q' : p' ≡ q' p'≡q' = ΣPathP ((isC .isSetHom _ _ _ _) , toPathP (homIsGroupoidDep _ _ _ _ _)) -- and then we can use equivalence to lift these paths up -- to actual SliceHom paths isoP = λ g → cong (inv SliceHom-Σ-Iso) (fun (ΣPathIsoPathΣ) g) -- SliceCat is univalent if C is univalent module _ ⦃ isU : isUnivalent C ⦄ where open CatIso open Iso module _ { xf yg : SliceOb } where private x = xf .S-ob y = yg .S-ob -- names for the equivalences/isos pathIsoEquiv : (x ≡ y) ≃ (CatIso x y) pathIsoEquiv = univEquiv isU x y isoPathEquiv : (CatIso x y) ≃ (x ≡ y) isoPathEquiv = invEquiv pathIsoEquiv pToIIso' : Iso (x ≡ y) (CatIso x y) pToIIso' = equivToIso pathIsoEquiv -- the iso in SliceCat we're given induces an iso in C between x and y module _ ( cIso@(catiso kc lc s r) : CatIso {C = SliceCat} xf yg ) where extractIso' : CatIso {C = C} x y extractIso' .mor = kc .S-hom extractIso' .inv = lc .S-hom extractIso' .sec i = (s i) .S-hom extractIso' .ret i = (r i) .S-hom instance preservesUnivalenceSlice : isUnivalent SliceCat -- we prove the equivalence by going through Iso preservesUnivalenceSlice .univ xf@(sliceob {x} f) yg@(sliceob {y} g) = isoToIsEquiv sIso where -- this is just here because the type checker can't seem to infer xf and yg pToIIso : Iso (x ≡ y) (CatIso x y) pToIIso = pToIIso' {xf = xf} {yg} -- the meat of the proof sIso : Iso (xf ≡ yg) (CatIso xf yg) sIso .fun p = pathToIso xf yg p -- we use the normal pathToIso via path induction to get an isomorphism sIso .inv is@(catiso kc lc s r) = SliceOb-≡-intro x≡y (symP (sym (lc .S-comm) ◁ lf≡f)) where -- we get a path between xf and yg by combining paths between -- x and y, and f and g -- 1. x≡y follows from univalence of C -- 2. f≡g is more tricky; by commutativity, we know that g ≡ l ⋆ f -- so we want l to be id; we get this by showing: id ≡ pathToIso x y x≡y ≡ l -- where the first step follows from path induction, and the second from univalence of C -- morphisms in C from kc and lc k = kc .S-hom l = lc .S-hom -- extract out the iso between x and y extractIso : CatIso {C = C} x y extractIso = extractIso' is -- and we can use univalence of C to get x ≡ y x≡y : x ≡ y x≡y = pToIIso .inv extractIso -- to show that f ≡ g, we show that l ≡ id -- by using C's isomorphism pToI≡id : PathP (λ i → C [ x≡y (~ i) , x ]) (pathToIso {C = C} x y x≡y .inv) (C .id x) pToI≡id = J (λ y p → PathP (λ i → C [ p (~ i) , x ]) (pathToIso {C = C} x y p .inv) (C .id x)) (λ j → JRefl pToIFam pToIBase j .inv) x≡y where idx = C .id x pToIFam = (λ z _ → CatIso {C = C} x z) pToIBase = catiso (C .id x) idx (C .⋆IdL idx) (C .⋆IdL idx) l≡pToI : l ≡ pathToIso {C = C} x y x≡y .inv l≡pToI i = pToIIso .rightInv extractIso (~ i) .inv l≡id : PathP (λ i → C [ x≡y (~ i) , x ]) l (C .id x) l≡id = l≡pToI ◁ pToI≡id lf≡f : PathP (λ i → C [ x≡y (~ i) , c ]) (l ⋆⟨ C ⟩ f) f lf≡f = (λ i → (l≡id i) ⋆⟨ C ⟩ f) ▷ C .⋆IdL _ sIso .rightInv is@(catiso kc lc s r) i = catiso (kc'≡kc i) (lc'≡lc i) (s'≡s i) (r'≡r i) -- we prove rightInv using a combination of univalence and the fact that homs are an h-set where kc' = (sIso .fun) (sIso .inv is) .mor lc' = (sIso .fun) (sIso .inv is) .inv k' = kc' .S-hom l' = lc' .S-hom k = kc .S-hom l = lc .S-hom extractIso : CatIso {C = C} x y extractIso = extractIso' is -- we do the equality component wise -- mor k'≡k : k' ≡ k k'≡k i = (pToIIso .rightInv extractIso) i .mor kcom'≡kcom : PathP (λ j → (k'≡k j) ⋆⟨ C ⟩ g ≡ f) (kc' .S-comm) (kc .S-comm) kcom'≡kcom = isSetHomP1 _ _ λ i → (k'≡k i) ⋆⟨ C ⟩ g kc'≡kc : kc' ≡ kc kc'≡kc i = slicehom (k'≡k i) (kcom'≡kcom i) -- inv l'≡l : l' ≡ l l'≡l i = (pToIIso .rightInv extractIso) i .inv lcom'≡lcom : PathP (λ j → (l'≡l j) ⋆⟨ C ⟩ f ≡ g) (lc' .S-comm) (lc .S-comm) lcom'≡lcom = isSetHomP1 _ _ λ i → (l'≡l i) ⋆⟨ C ⟩ f lc'≡lc : lc' ≡ lc lc'≡lc i = slicehom (l'≡l i) (lcom'≡lcom i) -- sec s' = (sIso .fun) (sIso .inv is) .sec s'≡s : PathP (λ i → lc'≡lc i ⋆⟨ SliceCat ⟩ kc'≡kc i ≡ SliceCat .id _) s' s s'≡s = isSetHomP1 _ _ λ i → lc'≡lc i ⋆⟨ SliceCat ⟩ kc'≡kc i -- ret r' = (sIso .fun) (sIso .inv is) .ret r'≡r : PathP (λ i → kc'≡kc i ⋆⟨ SliceCat ⟩ lc'≡lc i ≡ SliceCat .id _) r' r r'≡r = isSetHomP1 _ _ λ i → kc'≡kc i ⋆⟨ SliceCat ⟩ lc'≡lc i sIso .leftInv p = p'≡p -- to show that the round trip is equivalent to the identity -- we show that this is true for each component (S-ob, S-arr) -- and then combine -- specifically, we show that p'Ob≡pOb and p'Mor≡pMor -- and it follows that p'≡p where p' = (sIso .inv) (sIso .fun p) pOb : x ≡ y pOb i = (p i) .S-ob p'Ob : x ≡ y p'Ob i = (p' i) .S-ob pMor : PathP (λ i → C [ pOb i , c ]) f g pMor i = (p i) .S-arr p'Mor : PathP (λ i → C [ p'Ob i , c ]) f g p'Mor i = (p' i) .S-arr -- we first show that it's equivalent to use sIso first then extract, or to extract first than use pToIIso extractCom : extractIso' (sIso .fun p) ≡ pToIIso .fun pOb extractCom = J (λ yg' p̃ → extractIso' (pathToIso xf yg' p̃) ≡ pToIIso' {xf = xf} {yg'} .fun (λ i → (p̃ i) .S-ob)) (cong extractIso' (JRefl pToIFam' pToIBase') ∙ sym (JRefl pToIFam pToIBase)) p where idx = C .id x pToIFam = (λ z _ → CatIso {C = C} x z) pToIBase = catiso (C .id x) idx (C .⋆IdL idx) (C .⋆IdL idx) idxf = SliceCat .id xf pToIFam' = (λ z _ → CatIso {C = SliceCat} xf z) pToIBase' = catiso (SliceCat .id xf) idxf (SliceCat .⋆IdL idxf) (SliceCat .⋆IdL idxf) -- why does this not follow definitionally? -- from extractCom, we get that performing the roundtrip on pOb gives us back p'Ob ppp : p'Ob ≡ (pToIIso .inv) (pToIIso .fun pOb) ppp = cong (pToIIso .inv) extractCom -- apply univalence of C -- this gives us the first component that we want p'Ob≡pOb : p'Ob ≡ pOb p'Ob≡pOb = ppp ∙ pToIIso .leftInv pOb -- isSetHom gives us the second component, path between morphisms p'Mor≡pMor : PathP (λ j → PathP (λ i → C [ (p'Ob≡pOb j) i , c ]) f g) p'Mor pMor p'Mor≡pMor = isSetHomP2l _ _ p'Mor pMor p'Ob≡pOb -- we can use the above paths to show that p' ≡ p p'≡p : p' ≡ p p'≡p i = comp (λ i' → SOPath≡PathΣ {xf = xf} {yg} (~ i')) (λ j → λ { (i = i0) → left (~ j) ; (i = i1) → right (~ j) }) (p'Σ≡pΣ i) where -- we break up p' and p into their constituent paths -- first via transport and then via our component definitions from before -- we show that p'ΣT ≡ p'Σ (and same for p) via univalence -- and p'Σ≡pΣ follows from our work from above p'ΣT : Σ[ p ∈ x ≡ y ] PathP (λ i → C [ p i , c ]) f g p'ΣT = transport SOPath≡PathΣ p' p'Σ : Σ[ p ∈ x ≡ y ] PathP (λ i → C [ p i , c ]) f g p'Σ = (p'Ob , p'Mor) pΣT : Σ[ p ∈ x ≡ y ] PathP (λ i → C [ p i , c ]) f g pΣT = transport SOPath≡PathΣ p pΣ : Σ[ p ∈ x ≡ y ] PathP (λ i → C [ p i , c ]) f g pΣ = (pOb , pMor)-- transport SOPathP≡PathPSO p -- using the computation rule to ua p'ΣT≡p'Σ : p'ΣT ≡ p'Σ p'ΣT≡p'Σ = uaβ SOPath≃PathΣ p' pΣT≡pΣ : pΣT ≡ pΣ pΣT≡pΣ = uaβ SOPath≃PathΣ p p'Σ≡pΣ : p'Σ ≡ pΣ p'Σ≡pΣ = ΣPathP (p'Ob≡pOb , p'Mor≡pMor) -- two sides of the square we're connecting left : PathP (λ i → SOPath≡PathΣ {xf = xf} {yg} i) p' p'Σ left = transport-filler SOPath≡PathΣ p' ▷ p'ΣT≡p'Σ right : PathP (λ i → SOPath≡PathΣ {xf = xf} {yg} i) p pΣ right = transport-filler SOPath≡PathΣ p ▷ pΣT≡pΣ -- properties -- TODO: move to own file open isIsoC renaming (inv to invC) -- make a slice isomorphism from just the hom sliceIso : ∀ {a b} (f : C [ a .S-ob , b .S-ob ]) (c : (f ⋆⟨ C ⟩ b .S-arr) ≡ a .S-arr) → isIsoC {C = C} f → isIsoC {C = SliceCat} (slicehom f c) sliceIso f c isof .invC = slicehom (isof .invC) (sym (invMoveL (isIso→areInv isof) c)) sliceIso f c isof .sec = SliceHom-≡-intro' (isof .sec) sliceIso f c isof .ret = SliceHom-≡-intro' (isof .ret)
module Lambda where open import Prelude open import Star open import Examples open import Modal -- Environments record TyAlg (ty : Set) : Set where field nat : ty _⟶_ : ty -> ty -> ty data Ty : Set where <nat> : Ty _<⟶>_ : Ty -> Ty -> Ty freeTyAlg : TyAlg Ty freeTyAlg = record { nat = <nat>; _⟶_ = _<⟶>_ } termTyAlg : TyAlg True termTyAlg = record { nat = _; _⟶_ = \_ _ -> _ } record TyArrow {ty₁ ty₂ : Set}(T₁ : TyAlg ty₁)(T₂ : TyAlg ty₂) : Set where field apply : ty₁ -> ty₂ respNat : apply (TyAlg.nat T₁) == TyAlg.nat T₂ resp⟶ : forall {τ₁ τ₂} -> apply (TyAlg._⟶_ T₁ τ₁ τ₂) == TyAlg._⟶_ T₂ (apply τ₁) (apply τ₂) _=Ty=>_ : {ty₁ ty₂ : Set}(T₁ : TyAlg ty₁)(T₂ : TyAlg ty₂) -> Set _=Ty=>_ = TyArrow !Ty : {ty : Set}{T : TyAlg ty} -> T =Ty=> termTyAlg !Ty = record { apply = ! ; respNat = refl ; resp⟶ = refl } Ctx : Set Ctx = List Ty Var : {ty : Set} -> List ty -> ty -> Set Var Γ τ = Any (_==_ τ) Γ vzero : {τ : Ty} {Γ : Ctx} -> Var (τ • Γ) τ vzero = done refl • ε vsuc : {σ τ : Ty} {Γ : Ctx} -> Var Γ τ -> Var (σ • Γ) τ vsuc v = step • v module Term {ty : Set}(T : TyAlg ty) where private open module TT = TyAlg T data Tm : List ty -> ty -> Set where var : forall {Γ τ} -> Var Γ τ -> Tm Γ τ zz : forall {Γ} -> Tm Γ nat ss : forall {Γ} -> Tm Γ (nat ⟶ nat) ƛ : forall {Γ σ τ} -> Tm (σ • Γ) τ -> Tm Γ (σ ⟶ τ) _$_ : forall {Γ σ τ} -> Tm Γ (σ ⟶ τ) -> Tm Γ σ -> Tm Γ τ module Eval where private open module TT = Term freeTyAlg ty⟦_⟧ : Ty -> Set ty⟦ <nat> ⟧ = Nat ty⟦ σ <⟶> τ ⟧ = ty⟦ σ ⟧ -> ty⟦ τ ⟧ Env : Ctx -> Set Env = All ty⟦_⟧ _[_] : forall {Γ τ} -> Env Γ -> Var Γ τ -> ty⟦ τ ⟧ ρ [ x ] with lookup x ρ ... | result _ refl v = v ⟦_⟧_ : forall {Γ τ} -> Tm Γ τ -> Env Γ -> ty⟦ τ ⟧ ⟦ var x ⟧ ρ = ρ [ x ] ⟦ zz ⟧ ρ = zero ⟦ ss ⟧ ρ = suc ⟦ ƛ t ⟧ ρ = \x -> ⟦ t ⟧ (check x • ρ) ⟦ s $ t ⟧ ρ = (⟦ s ⟧ ρ) (⟦ t ⟧ ρ) module MoreExamples where private open module TT = TyAlg freeTyAlg private open module Tm = Term freeTyAlg open Eval tm-one : Tm ε nat tm-one = ss $ zz tm-id : Tm ε (nat ⟶ nat) tm-id = ƛ (var (done refl • ε)) tm : Tm ε nat tm = tm-id $ tm-one tm-twice : Tm ε ((nat ⟶ nat) ⟶ (nat ⟶ nat)) tm-twice = ƛ (ƛ (f $ (f $ x))) where Γ : Ctx Γ = nat • (nat ⟶ nat) • ε f : Tm Γ (nat ⟶ nat) f = var (vsuc vzero) x : Tm Γ nat x = var vzero sem : {τ : Ty} -> Tm ε τ -> ty⟦ τ ⟧ sem e = ⟦ e ⟧ ε one : Nat one = sem tm twice : (Nat -> Nat) -> (Nat -> Nat) twice = sem tm-twice
import category_theory.full_subcategory import category_theory.limits.creates import category_theory.reflects_isomorphisms import category_theory.limits.preserves.shapes.binary_products import category_theory.adjunction.fully_faithful import category_theory.adjunction.limits import category_theory.closed.cartesian import cartesian_closed import power namespace category_theory open category_theory category_theory.category category_theory.limits open classifier universes v u u₂ noncomputable theory variables (C : Type u) [category.{v} C] local attribute [instance] has_finite_products_of_has_finite_limits class topos := [lim : has_finite_limits.{v} C] [sub : has_subobject_classifier.{v} C] [cc : cartesian_closed.{v} C] attribute [instance] topos.lim topos.sub topos.cc variables [topos.{v} C] variable {C} lemma prod_iso_pb {B : C} (f : over B) : prod.functor.obj f = star f ⋙ over.forget _ := rfl def prod_iso_pb' {B : C} (f : over B) : prod.functor.obj f ≅ real_pullback f.hom ⋙ dependent_sum f.hom := calc star f ⋙ over.forget _ ≅ star f ⋙ (over.iterated_slice_equiv _).functor ⋙ (over.iterated_slice_equiv f).inverse ⋙ over.forget _ : iso_whisker_left (star f) (iso_whisker_right f.iterated_slice_equiv.unit_iso (over.forget _)) ... ≅ (star f ⋙ (over.iterated_slice_equiv _).functor) ⋙ ((over.iterated_slice_equiv f).inverse ⋙ over.forget _) : iso.refl _ ... ≅ (star f ⋙ (over.iterated_slice_equiv _).functor) ⋙ dependent_sum f.hom : iso.refl _ ... ≅ real_pullback f.hom ⋙ dependent_sum f.hom : begin refine iso_whisker_right _ (dependent_sum f.hom), have : f = over.mk f.hom, cases f, congr, apply subsingleton.elim, convert iso_pb f.hom, end def prod_iso_pb'' {B : C} (f : over B) : prod.functor.obj f ≅ real_pullback f.hom ⋙ over.map f.hom := calc star f ⋙ over.forget _ ≅ star f ⋙ (over.iterated_slice_equiv _).functor ⋙ (over.iterated_slice_equiv f).inverse ⋙ over.forget _ : iso_whisker_left (star f) (iso_whisker_right f.iterated_slice_equiv.unit_iso (over.forget _)) ... ≅ (star f ⋙ (over.iterated_slice_equiv _).functor) ⋙ ((over.iterated_slice_equiv f).inverse ⋙ over.forget _) : iso.refl _ ... ≅ (star f ⋙ (over.iterated_slice_equiv _).functor) ⋙ dependent_sum f.hom : iso.refl _ ... ≅ real_pullback f.hom ⋙ dependent_sum f.hom : begin refine iso_whisker_right _ (dependent_sum f.hom), have : f = over.mk f.hom, cases f, congr, apply subsingleton.elim, convert iso_pb f.hom, end def pullback_sum_iso {X Y Z W : C} {f : X ⟶ Y} {g : X ⟶ Z} {h : Y ⟶ W} {k : Z ⟶ W} {comm : f ≫ h = g ≫ k} (t : is_limit (pullback_cone.mk f g comm)) : real_pullback g ⋙ over.map f ≅ over.map k ⋙ real_pullback h := begin apply nat_iso.of_components _ _, { intro m, apply over_iso _ _, { refine ⟨_, _, _, _⟩, { apply pullback.lift pullback.fst (pullback.snd ≫ f) _, change pullback.fst ≫ _ ≫ k = _, simp only [pullback.condition_assoc, assoc, comm] }, { apply pullback.lift pullback.fst _ _, refine (pullback_cone.is_limit.lift' t pullback.snd (pullback.fst ≫ m.hom) _).1, rw [← pullback.condition, assoc], refl, erw (pullback_cone.is_limit.lift' t pullback.snd (pullback.fst ≫ m.hom) _).2.2 }, { apply pullback.hom_ext, { simp }, { rw [assoc, id_comp, pullback.lift_snd], apply pullback_cone.is_limit.hom_ext t, { rw [assoc, (pullback_cone.is_limit.lift' t _ _ _).2.1, pullback.lift_snd], refl }, { rw [assoc, (pullback_cone.is_limit.lift' t _ _ _).2.2, pullback.lift_fst_assoc, pullback.condition], refl } } }, { apply pullback.hom_ext, { simp }, { rw [id_comp, assoc, pullback.lift_snd, pullback.lift_snd_assoc], apply (pullback_cone.is_limit.lift' t _ _ _).2.1 } } }, { apply pullback.lift_snd } }, { intros, ext1, change pullback.lift _ _ _ ≫ pullback.lift _ _ _ = pullback.lift _ _ _ ≫ pullback.lift (pullback.fst ≫ f_1.left) _ _, ext1; simp } end def test' {A B : C} (f : over A) (k : B ⟶ A) : over.map k ⋙ prod.functor.obj f ≅ prod.functor.obj ((real_pullback k).obj f) ⋙ over.map k := calc over.map k ⋙ prod.functor.obj f ≅ over.map k ⋙ real_pullback f.hom ⋙ over.map f.hom : iso_whisker_left (over.map k) (prod_iso_pb'' _) ... ≅ real_pullback pullback.snd ⋙ over.map pullback.fst ⋙ over.map f.hom : iso_whisker_right (pullback_sum_iso (cone_is_pullback _ _)).symm (dependent_sum f.hom) ... ≅ real_pullback pullback.snd ⋙ over.map (_ ≫ f.hom) : iso_whisker_left (real_pullback _) (over_map_comp _ _).symm ... ≅ real_pullback pullback.snd ⋙ over.map (pullback.snd ≫ k) : iso_whisker_left (real_pullback _) (by rw pullback.condition) ... ≅ real_pullback ((real_pullback k).obj f).hom ⋙ over.map pullback.snd ⋙ over.map k : iso_whisker_left (real_pullback _) (over_map_comp _ _) ... ≅ prod.functor.obj ((real_pullback k).obj f) ⋙ over.map k : iso_whisker_right (prod_iso_pb' _).symm (over.map k) def test {A B : C} (f : over A) (k : B ⟶ A) : exp f ⋙ real_pullback k ≅ real_pullback k ⋙ exp ((real_pullback k).obj f) := begin apply adjunction.right_adjoint_uniq, apply adjunction.comp _ _ (radj k) (exp.adjunction _), apply adjunction.of_nat_iso_left _ (test' f k).symm, apply adjunction.comp _ _ (exp.adjunction _) (radj k), end /-- Pullback respects exponentials! (Natural in `g`) -/ def pullback_exp {X Y A B : C} (f g : over A) (k : B ⟶ A) : (real_pullback k).obj (f ⟹ g) ≅ (real_pullback k).obj f ⟹ (real_pullback k).obj g := (test f k).app g instance subq_cc (A : C) : cartesian_closed (subq A) := @cartesian_closed_of_equiv _ _ (id _) _ _ _ (sub_one_over A).symm (top_cc _) /-- The bottom of the subobject category. -/ def sub_bot (B : C) : sub B := sub.mk' (initial.to B) @[simp] lemma sub_bot_left {B : C} : (↑(sub_bot B) : over B).left = ⊥_ C := rfl @[simp] lemma sub_bot_arrow {B : C} : (sub_bot B).arrow = initial.to B := rfl def subq_bot (B : C) : subq B := ⟦sub_bot B⟧ instance {B : C} : order_bot (subq B) := { bot := subq_bot B, bot_le := quotient.ind begin intro a, refine ⟨sub.hom_mk (initial.to _) _⟩, dsimp, apply subsingleton.elim end, ..category_theory.subq.partial_order B } lemma pullback_bot {A B : C} (f : A ⟶ B) : (subq.pullback f).obj ⊥ = ⊥ := begin apply quotient.sound, symmetry, refine ⟨sub.iso_mk _ _⟩, refine (as_iso pullback.fst).symm, dsimp, apply subsingleton.elim, end -- local attribute [instance] limits.has_coequalizers_of_has_finite_colimits local attribute [instance] has_finite_coproducts_of_has_finite_colimits example (A B : C) (f : A ⟶ B) : regular_epi (factor_thru_image f) := by apply_instance variables {A B : C} -- def union' : sub' A → sub' A → sub' A := λ f g, -- sub'.mk' (image.ι (coprod.desc f.arrow.hom g.arrow.hom)) -- lemma left_le_union' (f g : sub' A) : f ≤ union' f g := -- begin -- refine ⟨_, _⟩, -- apply coprod.inl ≫ factor_thru_image _, -- dsimp [union'], -- rw [assoc, image.fac, coprod.inl_desc], -- end -- lemma right_le_union' (f g : sub' A) : g ≤ union' f g := -- begin -- refine ⟨_, _⟩, -- apply coprod.inr ≫ factor_thru_image _, -- dsimp [union'], -- rw [assoc, image.fac, coprod.inr_desc], -- end -- lemma union'_le (f g h : sub' A) : f ≤ h → g ≤ h → union' f g ≤ h := -- begin -- rintros ⟨hf, hf₁⟩ ⟨hg, hg₁⟩, -- refine ⟨_, _⟩, -- refine image.lift ⟨_, h.arrow.hom, coprod.desc hf hg⟩, -- apply image.lift_fac, -- end -- lemma union'_mono {f₁ f₂ g₁ g₂ : sub' A} : f₁ ≤ f₂ → g₁ ≤ g₂ → union' f₁ g₁ ≤ union' f₂ g₂ := -- begin -- intros hf hg, -- apply union'_le, -- apply le_trans hf (left_le_union' _ _), -- apply le_trans hg (right_le_union' _ _), -- end -- def union : sub A → sub A → sub A := quotient.map₂ union' -- begin -- rintro f₁ f₂ ⟨hf₁, hf₂⟩ g₁ g₂ ⟨hg₁, hg₂⟩, -- exact ⟨union'_mono hf₁ hg₁, union'_mono hf₂ hg₂⟩, -- end def equiv_to_iff {P Q : Prop} (h : P ≃ Q) : P ↔ Q := ⟨h.to_fun, h.inv_fun⟩ lemma exp_transpose (a b c : subq A) : a ⊓ b ≤ c ↔ b ≤ (a ⟹ c) := begin rw ← prod_eq_inter, apply equiv_to_iff, apply equiv.plift.symm.trans (equiv.ulift.symm.trans (((exp.adjunction a).hom_equiv b c).trans (equiv.ulift.trans equiv.plift))), end -- def exist' (f : B ⟶ A) (a : sub' B) : sub' A := -- sub'.mk' (image.ι (a.arrow.hom ≫ f)) -- def exist'' (f : B ⟶ A) : sub' B ⥤ sub' A := -- preorder_functor (exist' f) -- begin -- rintros a₁ a₂ ⟨k, hk⟩, -- refine ⟨_, _⟩, -- refine image.lift {I := _, m := image.ι _, e := k ≫ factor_thru_image _, fac' := _}, -- rw [assoc, image.fac, reassoc_of hk], -- apply image.lift_fac, -- end -- def exist (f : B ⟶ A) : sub B ⥤ sub A := lower_functor (exist'' f) -- def pb_adj (f : B ⟶ A) : exist'' f ⊣ pullback_sub' f -- equiv.trans equiv.plift.symm $ equiv.trans equiv.ulift.symm $ equiv.trans ((exp.adjunction a).hom_equiv b c) _ -- begin -- have : ulift (plift _) ≃ ulift (plift _) := (exp.adjunction a).hom_equiv b c, -- end instance : bounded_lattice (subq A) := { ..category_theory.subq.semilattice_inf_top, ..category_theory.subq.semilattice_sup, ..category_theory.subq.order_bot } lemma coprod_eq_union {A : C} {f₁ f₂ : subq A} : (f₁ ⨿ f₂) = f₁ ⊔ f₂ := begin apply le_antisymm, apply le_of_hom, apply coprod.desc, apply hom_of_le, apply le_sup_left, apply hom_of_le, apply le_sup_right, apply sup_le, apply le_of_hom, apply coprod.inl, apply le_of_hom, apply coprod.inr end -- (x ⊔ y) ⊓ (x ⊔ z) ≤ x ⊔ y ⊓ z lemma subq.distrib (x y z : subq A) : x ⊓ (y ⊔ z) ≤ (x ⊓ y) ⊔ (x ⊓ z) := begin rw [exp_transpose], apply sup_le, rw [← exp_transpose], exact le_sup_left, rw [← exp_transpose], exact le_sup_right, end lemma le_sup_inf_of_inf_sup_le {α : Type*} [lattice α] (inf_sup_le : ∀ x y z : α, x ⊓ (y ⊔ z) ≤ (x ⊓ y) ⊔ (x ⊓ z)) : ∀ x y z : α, (x ⊔ y) ⊓ (x ⊔ z) ≤ x ⊔ (y ⊓ z) := begin have : ∀ (x y z : α), x ⊓ (y ⊔ z) = (x ⊓ y) ⊔ (x ⊓ z), intros x y z, apply le_antisymm (inf_sup_le x y z) (sup_le (inf_le_inf_left x le_sup_left) (inf_le_inf_left x le_sup_right)), intros, rw this, change ((x ⊔ y) ⊓ x) ⊔ ((x ⊔ y) ⊓ z) ≤ x ⊔ (y ⊓ z), apply sup_le, transitivity x, simp, simp, rw inf_comm, rw this, apply sup_le_sup, apply inf_le_right, rw inf_comm, end def sub.pullback_image_aux {A' : C} (f : A ⟶ B) (g : A' ⟶ B) : (sub.pullback f).obj (sub.image.obj (over.mk g)) ≅ sub.image.obj ((real_pullback f).obj (over.mk g)) := { hom := sub.hom_mk (pullback_image _ _).hom (pullback_image_fac _ _), inv := sub.hom_mk (pullback_image _ _).inv (pullback_image_inv_fac _ _) } /-- Image commutes with pullback. -/ def sub.pullback_image (f : A ⟶ B) : sub.image ⋙ sub.pullback f ≅ real_pullback f ⋙ sub.image := nat_iso.of_components (λ g, sub.pullback_image_aux f _) (by tidy) /-- Lemma A1.3.3 of the Elephant. -/ def frobenius {A B : C} (f : A ⟶ B) (A' : sub A) (B' : sub B) : (sub.intersection.obj ((sub.exists f).obj A')).obj B' ≅ (sub.exists f).obj ((sub.intersection.obj A').obj ((sub.pullback f).obj B')) := begin refine sub.iso_mk _ _, apply unique_factorise ((pullback.snd ≫ A'.arrow) ≫ f) (pullback B'.arrow (image.ι (A'.arrow ≫ f))) _ (pullback.fst ≫ B'.arrow) _, { apply pullback.lift (pullback.fst ≫ pullback.fst) (pullback.snd ≫ factor_thru_image _) _, rw [assoc, pullback.condition, assoc, image.fac], apply pullback.condition_assoc }, { rw pullback.lift_fst_assoc, change (pullback.fst ≫ pullback.fst) ≫ B'.arrow = (pullback.snd ≫ A'.arrow) ≫ f, erw [assoc, pullback.condition, pullback.condition_assoc, assoc] }, { apply category_theory.strong_epi_of_regular_epi _, apply regular_epi_of_is_pullback_alt _ pullback.snd pullback.snd (factor_thru_image (A'.arrow ≫ f)) _ _, apply pullback.lift_snd, refine both_pb_to_left_pb _ _ _ _ _ _ _ _ _ (cone_is_pullback _ _) _, simp only [pullback.lift_fst], have : factor_thru_image (A'.arrow ≫ f) ≫ image.ι (A'.arrow ≫ f) = A'.arrow ≫ f := image.fac _, convert left_pb_to_both_pb _ _ _ _ _ _ _ _ _ (cone_is_pullback pullback.snd A'.arrow) (cone_is_pullback B'.arrow f) }, { erw unique_factorise_hom_comp_image, apply pullback.condition }, end lemma subq.frobenius {A B : C} (f : A ⟶ B) (A' : subq A) (B' : subq B) : (subq.exists f).obj A' ⊓ B' = (subq.exists f).obj (A' ⊓ (subq.pullback f).obj B') := quotient.induction_on₂ A' B' $ λ a' b', quotient.sound ⟨frobenius f _ _⟩ instance pb_frob {A B : C} (f : A ⟶ B) (x : subq A) (y : subq B) : is_iso (frobenius_map y x (subq.exists_pull_adj f)) := { inv := begin refine ⟨⟨_⟩⟩, rw [prod_eq_inter, prod_eq_inter, inf_comm, subq.frobenius, inf_comm], end } instance pb_preserves_lim (f : A ⟶ B) : preserves_limits (subq.pullback f) := adjunction.right_adjoint_preserves_limits (subq.exists_pull_adj f) instance pullback_cc (f : A ⟶ B) : cartesian_closed_functor (subq.pullback f) := cartesian_closed_of_frobenius_iso (subq.exists_pull_adj f) lemma subq.pullback_exp (f : A ⟶ B) (x y : subq B) : (subq.pullback f).obj (x ⟹ y) = ((subq.pullback f).obj x ⟹ (subq.pullback f).obj y) := begin apply skel_is_skel, have := (category_theory.pullback_cc f).comparison_iso, refine @as_iso _ _ _ _ _ (this x y), end instance : bounded_distrib_lattice (subq A) := { le_sup_inf := le_sup_inf_of_inf_sup_le subq.distrib, ..category_theory.subq.bounded_lattice } instance : has_compl (subq A) := { compl := λ x, x ⟹ ⊥ } variables (x y z : subq A) lemma imp_eq_top_iff_le : (x ⟹ y) = ⊤ ↔ x ≤ y := by rw [eq_top_iff, ← exp_transpose, inf_top_eq] @[simp] lemma imp_self : (x ⟹ x) = ⊤ := by rw [imp_eq_top_iff_le]. lemma classifier_of_pullback {E F A : C} (m : A ⟶ E) (f : F ⟶ E) [mono m] : f ≫ classifier_of m = classifier_of (pullback.snd : pullback m f ⟶ F) := begin symmetry, apply uniquely, apply left_right_hpb_to_both_hpb _ has_pullback_top_of_pb (classifies m), end lemma class_lift_of_is_iso {A₁ A₂ E : C} {m₁ : A₁ ⟶ E} {m₂ : A₂ ⟶ E} [mono m₁] [mono m₂] (h : A₁ ⟶ A₂) [is_iso h] : h ≫ m₂ = m₁ → classifier_of m₁ = classifier_of m₂ := begin intros k, apply uniquely, change has_pullback_top _ _ _, rw ← id_comp (classifier_of m₂), apply left_right_hpb_to_both_hpb m₂, apply top_iso_has_pullback_top h, simpa, apply classifies, end lemma class_lift_of_iso {A₁ A₂ E : C} {m₁ : A₁ ⟶ E} {m₂ : A₂ ⟶ E} [mono m₁] [mono m₂] (h : A₁ ≅ A₂) (l : h.hom ≫ m₂ = m₁) : classifier_of m₁ = classifier_of m₂ := class_lift_of_is_iso h.hom l lemma class_lift_of_both_factor {A₁ A₂ E : C} {m₁ : A₁ ⟶ E} {m₂ : A₂ ⟶ E} [mono m₁] [mono m₂] (hom : A₁ ⟶ A₂) (inv : A₂ ⟶ A₁) : hom ≫ m₂ = m₁ → inv ≫ m₁ = m₂ → classifier_of m₁ = classifier_of m₂ := begin intros k l, apply class_lift_of_iso ⟨hom, inv, _, _⟩ k, rw ← cancel_mono m₁, simp [k, l], rw ← cancel_mono m₂, simp [k, l], end def how_inj_is_classifier {E A₁ A₂ : C} (m₁ : A₁ ⟶ E) (m₂ : A₂ ⟶ E) [mono m₁] [mono m₂] (h : classifier_of m₁ = classifier_of m₂) : A₁ ≅ A₂ := { hom := (pullback_cone.is_limit.lift' (classifies m₂).is_pb (classifies m₁).top m₁ (h ▸ (classifies m₁).comm)).1, inv := (pullback_cone.is_limit.lift' (classifies m₁).is_pb (classifies m₂).top m₂ (h.symm ▸ (classifies m₂).comm)).1, hom_inv_id' := by erw [← cancel_mono_id m₁, assoc, lift'_right, lift'_right], inv_hom_id' := by erw [← cancel_mono_id m₂, assoc, lift'_right, lift'_right] } lemma c_very_inj {E A₁ A₂ : C} {m₁ : A₁ ⟶ E} {m₂ : A₂ ⟶ E} [mono m₁] [mono m₂] (h : classifier_of m₁ = classifier_of m₂) : (how_inj_is_classifier _ _ h).hom ≫ m₂ = m₁ := lift'_right _ _ _ _ def get_subobject_obj {B : C} (c : B ⟶ Ω C) : C := pullback (truth C) c def get_subobject {B : C} (c : B ⟶ Ω C) : get_subobject_obj c ⟶ B := pullback.snd instance get_subobject_mono {B : C} (c : B ⟶ Ω C) : mono (get_subobject c) := pullback.snd_of_mono lemma classify_inv {E : C} (c : E ⟶ Ω C) : classifier_of (get_subobject c) = c := (uniquely _ _ has_pullback_top_of_pb) set_option pp.universes false @[simps] def classification {B : C} : (B ⟶ Ω C) ≃ subq B := { to_fun := λ k, ⟦sub.mk' (get_subobject k)⟧, inv_fun := begin refine quotient.lift (λ (k : sub B), _) _, exact classifier_of k.arrow, rintro a₁ a₂ ⟨⟨k₁, k₂, _, _⟩⟩, apply class_lift_of_both_factor _ _ (sub.w k₁) (sub.w k₂), end, left_inv := λ k, classify_inv k, right_inv := quotient.ind begin intro k, apply quotient.sound, refine equiv_of_both_ways (sub.hom_mk _ ((classifies k.arrow).is_pb.fac _ walking_cospan.right)) (sub.hom_mk _ (pullback.lift_snd _ _ (classifies k.arrow).comm)), end } abbreviation classify {B : C} : subq B → (B ⟶ Ω C) := classification.symm lemma classify_eq_iff_eq {B : C} (m n : subq B) : classify m = classify n ↔ m = n := classification.right_inv.injective.eq_iff lemma classify_pullback {B B' : C} (f : B ⟶ B') : ∀ m, classify ((subq.pullback f).obj m) = f ≫ classify m := quotient.ind $ by { intro m, exact (classifier_of_pullback _ _).symm } lemma classification_natural_symm {B B' : C} (f : B ⟶ B') (c : B' ⟶ Ω C) : classification (f ≫ c) = (subq.pullback f).obj (classification c) := begin rw [← classification.eq_symm_apply], change _ = classify _, rw [classify_pullback], congr', symmetry, apply classification.symm_apply_apply c, end -- def indicators {B : C} (m : B ⟶ Ω C) (n : B ⟶ Ω C) : B ⟶ Ω C := -- classify (classification m ⊓ classification n) -- def indicators_natural {B B' : C} (f : B' ⟶ B) (m : B ⟶ Ω C) (n : B ⟶ Ω C) : -- f ≫ indicators m n = indicators (f ≫ m) (f ≫ n) := -- begin -- dunfold indicators, -- rw [classification_natural_symm, classification_natural_symm, ← intersect_pullback, -- classification.eq_symm_apply, classification_natural_symm, classification.apply_symm_apply], -- end -- variable (C) -- def and_arrow : Ω C ⨯ Ω C ⟶ Ω C := indicators limits.prod.fst limits.prod.snd -- variable {C}h /-- Complement commutes with pullback. -/ lemma compl_natural (m : subq B) (f : A ⟶ B) : (subq.pullback f).obj mᶜ = ((subq.pullback f).obj m)ᶜ := by { erw [subq.pullback_exp, pullback_bot], refl } def neg_arrow_aux (m : B ⟶ Ω C) : B ⟶ Ω C := classify (classification m)ᶜ lemma neg_arrow_aux_natural {B B' : C} (f : B' ⟶ B) (m : B ⟶ Ω C) : f ≫ neg_arrow_aux m = neg_arrow_aux (f ≫ m) := begin rw [neg_arrow_aux, neg_arrow_aux, classification.eq_symm_apply, classification_natural_symm, classification_natural_symm, classification.apply_symm_apply, compl_natural], end variable (C) def not : Ω C ⟶ Ω C := neg_arrow_aux (𝟙 _) variable {C} lemma not_prop (f : subq B) : classify fᶜ = classify f ≫ not C := by rw [not, neg_arrow_aux_natural, comp_id, neg_arrow_aux, classification.apply_symm_apply] end category_theory
Formal statement is: lemma connected_ivt_component: fixes x::"'a::euclidean_space" shows "connected S \<Longrightarrow> x \<in> S \<Longrightarrow> y \<in> S \<Longrightarrow> x\<bullet>k \<le> a \<Longrightarrow> a \<le> y\<bullet>k \<Longrightarrow> (\<exists>z\<in>S. z\<bullet>k = a)" Informal statement is: If $S$ is a connected set in $\mathbb{R}^n$ and $x, y \in S$ with $x_k \leq a \leq y_k$, then there exists $z \in S$ such that $z_k = a$.
State Before: α : Type u_1 inst✝ : LinearOrderedAddCommGroup α hα : Archimedean α p : α hp : 0 < p a✝ b✝ c : α n : ℤ a b : α m : ℤ ⊢ toIocDiv hp (a + m • p) b = toIocDiv hp a b - m State After: α : Type u_1 inst✝ : LinearOrderedAddCommGroup α hα : Archimedean α p : α hp : 0 < p a✝ b✝ c : α n : ℤ a b : α m : ℤ ⊢ b - (toIocDiv hp a b - m) • p ∈ Set.Ioc (a + m • p) (a + m • p + p) Tactic: refine' toIocDiv_eq_of_sub_zsmul_mem_Ioc _ _ State Before: α : Type u_1 inst✝ : LinearOrderedAddCommGroup α hα : Archimedean α p : α hp : 0 < p a✝ b✝ c : α n : ℤ a b : α m : ℤ ⊢ b - (toIocDiv hp a b - m) • p ∈ Set.Ioc (a + m • p) (a + m • p + p) State After: α : Type u_1 inst✝ : LinearOrderedAddCommGroup α hα : Archimedean α p : α hp : 0 < p a✝ b✝ c : α n : ℤ a b : α m : ℤ ⊢ b - toIocDiv hp a b • p + m • p ∈ Set.Ioc (a + m • p) (a + p + m • p) Tactic: rw [sub_smul, ← sub_add, add_right_comm] State Before: α : Type u_1 inst✝ : LinearOrderedAddCommGroup α hα : Archimedean α p : α hp : 0 < p a✝ b✝ c : α n : ℤ a b : α m : ℤ ⊢ b - toIocDiv hp a b • p + m • p ∈ Set.Ioc (a + m • p) (a + p + m • p) State After: no goals Tactic: simpa using sub_toIocDiv_zsmul_mem_Ioc hp a b
letter.frequency <- function(filename) { file <- paste(readLines(filename), collapse = '') chars <- strsplit(file, NULL)[[1]] summary(factor(chars)) }
module Class.Monad.Except where open import Class.Monad open import Class.Monoid open import Data.Maybe open import Level private variable a : Level A : Set a record MonadExcept (M : Set a → Set a) {{_ : Monad M}} (E : Set a) : Set (suc a) where field throwError : E → M A catchError : M A → (E → M A) → M A appendIfError : {{_ : Monoid E}} → M A → E → M A appendIfError x s = catchError x λ e → throwError (e + s) maybeToError : Maybe A → E → M A maybeToError (just x) e = return x maybeToError nothing e = throwError e tryElse : M A → M A → M A tryElse x y = catchError x λ _ → y open MonadExcept {{...}} public
[GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs_nonempty : Finset.Nonempty s hs : ∀ (i : ι), i ∈ s → p (g i) ⊢ f (∏ i in s, g i) ≤ ∏ i in s, f (g i) [PROOFSTEP] refine' le_trans (Multiset.le_prod_nonempty_of_submultiplicative_on_pred f p h_mul hp_mul _ _ _) _ [GOAL] case refine'_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs_nonempty : Finset.Nonempty s hs : ∀ (i : ι), i ∈ s → p (g i) ⊢ Multiset.map (fun i => g i) s.val ≠ ∅ [PROOFSTEP] simp [hs_nonempty.ne_empty] [GOAL] case refine'_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs_nonempty : Finset.Nonempty s hs : ∀ (i : ι), i ∈ s → p (g i) ⊢ ∀ (a : M), a ∈ Multiset.map (fun i => g i) s.val → p a [PROOFSTEP] exact Multiset.forall_mem_map_iff.mpr hs [GOAL] case refine'_3 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs_nonempty : Finset.Nonempty s hs : ∀ (i : ι), i ∈ s → p (g i) ⊢ Multiset.prod (Multiset.map f (Multiset.map (fun i => g i) s.val)) ≤ ∏ i in s, f (g i) [PROOFSTEP] rw [Multiset.map_map] [GOAL] case refine'_3 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs_nonempty : Finset.Nonempty s hs : ∀ (i : ι), i ∈ s → p (g i) ⊢ Multiset.prod (Multiset.map (f ∘ fun i => g i) s.val) ≤ ∏ i in s, f (g i) [PROOFSTEP] rfl [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_one : f 1 = 1 h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs : ∀ (i : ι), i ∈ s → p (g i) ⊢ f (∏ i in s, g i) ≤ ∏ i in s, f (g i) [PROOFSTEP] rcases eq_empty_or_nonempty s with (rfl | hs_nonempty) [GOAL] case inl ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_one : f 1 = 1 h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M hs : ∀ (i : ι), i ∈ ∅ → p (g i) ⊢ f (∏ i in ∅, g i) ≤ ∏ i in ∅, f (g i) [PROOFSTEP] simp [h_one] [GOAL] case inr ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N p : M → Prop h_one : f 1 = 1 h_mul : ∀ (x y : M), p x → p y → f (x * y) ≤ f x * f y hp_mul : ∀ (x y : M), p x → p y → p (x * y) g : ι → M s : Finset ι hs : ∀ (i : ι), i ∈ s → p (g i) hs_nonempty : Finset.Nonempty s ⊢ f (∏ i in s, g i) ≤ ∏ i in s, f (g i) [PROOFSTEP] exact le_prod_nonempty_of_submultiplicative_on_pred f p h_mul hp_mul g s hs_nonempty hs [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N h_one : f 1 = 1 h_mul : ∀ (x y : M), f (x * y) ≤ f x * f y s : Finset ι g : ι → M ⊢ f (∏ i in s, g i) ≤ ∏ i in s, f (g i) [PROOFSTEP] refine' le_trans (Multiset.le_prod_of_submultiplicative f h_one h_mul _) _ [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N h_one : f 1 = 1 h_mul : ∀ (x y : M), f (x * y) ≤ f x * f y s : Finset ι g : ι → M ⊢ Multiset.prod (Multiset.map f (Multiset.map (fun i => g i) s.val)) ≤ ∏ i in s, f (g i) [PROOFSTEP] rw [Multiset.map_map] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f : M → N h_one : f 1 = 1 h_mul : ∀ (x y : M), f (x * y) ≤ f x * f y s : Finset ι g : ι → M ⊢ Multiset.prod (Multiset.map (f ∘ fun i => g i) s.val) ≤ ∏ i in s, f (g i) [PROOFSTEP] rfl [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι h : ∀ (i : ι), i ∈ s → 1 ≤ f i ⊢ 1 ≤ ∏ i in s, 1 [PROOFSTEP] rw [prod_const_one] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι h : ∀ (i : ι), i ∈ s → f i ≤ 1 ⊢ ∏ i in s, 1 = 1 [PROOFSTEP] rw [prod_const_one] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι h : s ⊆ t hf : ∀ (i : ι), i ∈ t → ¬i ∈ s → 1 ≤ f i ⊢ ∏ i in s, f i ≤ ∏ i in t, f i [PROOFSTEP] classical calc ∏ i in s, f i ≤ (∏ i in t \ s, f i) * ∏ i in s, f i := le_mul_of_one_le_left' <| one_le_prod' <| by simpa only [mem_sdiff, and_imp] _ = ∏ i in t \ s ∪ s, f i := (prod_union sdiff_disjoint).symm _ = ∏ i in t, f i := by rw [sdiff_union_of_subset h] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι h : s ⊆ t hf : ∀ (i : ι), i ∈ t → ¬i ∈ s → 1 ≤ f i ⊢ ∏ i in s, f i ≤ ∏ i in t, f i [PROOFSTEP] calc ∏ i in s, f i ≤ (∏ i in t \ s, f i) * ∏ i in s, f i := le_mul_of_one_le_left' <| one_le_prod' <| by simpa only [mem_sdiff, and_imp] _ = ∏ i in t \ s ∪ s, f i := (prod_union sdiff_disjoint).symm _ = ∏ i in t, f i := by rw [sdiff_union_of_subset h] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι h : s ⊆ t hf : ∀ (i : ι), i ∈ t → ¬i ∈ s → 1 ≤ f i ⊢ ∀ (i : ι), i ∈ t \ s → 1 ≤ f i [PROOFSTEP] simpa only [mem_sdiff, and_imp] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι h : s ⊆ t hf : ∀ (i : ι), i ∈ t → ¬i ∈ s → 1 ≤ f i ⊢ ∏ i in t \ s ∪ s, f i = ∏ i in t, f i [PROOFSTEP] rw [sdiff_union_of_subset h] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι ⊢ (∀ (i : ι), i ∈ s → 1 ≤ f i) → (∏ i in s, f i = 1 ↔ ∀ (i : ι), i ∈ s → f i = 1) [PROOFSTEP] classical refine Finset.induction_on s (fun _ ↦ ⟨fun _ _ h ↦ False.elim (Finset.not_mem_empty _ h), fun _ ↦ rfl⟩) ?_ intro a s ha ih H have : ∀ i ∈ s, 1 ≤ f i := fun _ ↦ H _ ∘ mem_insert_of_mem rw [prod_insert ha, mul_eq_one_iff' (H _ <| mem_insert_self _ _) (one_le_prod' this), forall_mem_insert, ih this] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι ⊢ (∀ (i : ι), i ∈ s → 1 ≤ f i) → (∏ i in s, f i = 1 ↔ ∀ (i : ι), i ∈ s → f i = 1) [PROOFSTEP] refine Finset.induction_on s (fun _ ↦ ⟨fun _ _ h ↦ False.elim (Finset.not_mem_empty _ h), fun _ ↦ rfl⟩) ?_ [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s t : Finset ι ⊢ ∀ ⦃a : ι⦄ {s : Finset ι}, ¬a ∈ s → ((∀ (i : ι), i ∈ s → 1 ≤ f i) → (∏ i in s, f i = 1 ↔ ∀ (i : ι), i ∈ s → f i = 1)) → (∀ (i : ι), i ∈ insert a s → 1 ≤ f i) → (∏ i in insert a s, f i = 1 ↔ ∀ (i : ι), i ∈ insert a s → f i = 1) [PROOFSTEP] intro a s ha ih H [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s✝ t : Finset ι a : ι s : Finset ι ha : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 1 ≤ f i) → (∏ i in s, f i = 1 ↔ ∀ (i : ι), i ∈ s → f i = 1) H : ∀ (i : ι), i ∈ insert a s → 1 ≤ f i ⊢ ∏ i in insert a s, f i = 1 ↔ ∀ (i : ι), i ∈ insert a s → f i = 1 [PROOFSTEP] have : ∀ i ∈ s, 1 ≤ f i := fun _ ↦ H _ ∘ mem_insert_of_mem [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f g : ι → N s✝ t : Finset ι a : ι s : Finset ι ha : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 1 ≤ f i) → (∏ i in s, f i = 1 ↔ ∀ (i : ι), i ∈ s → f i = 1) H : ∀ (i : ι), i ∈ insert a s → 1 ≤ f i this : ∀ (i : ι), i ∈ s → 1 ≤ f i ⊢ ∏ i in insert a s, f i = 1 ↔ ∀ (i : ι), i ∈ insert a s → f i = 1 [PROOFSTEP] rw [prod_insert ha, mul_eq_one_iff' (H _ <| mem_insert_self _ _) (one_le_prod' this), forall_mem_insert, ih this] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f✝ g : ι → N s✝ t s : Finset ι f : ι → N n : N h : ∀ (x : ι), x ∈ s → f x ≤ n ⊢ Finset.prod s f ≤ n ^ card s [PROOFSTEP] refine' (Multiset.prod_le_pow_card (s.val.map f) n _).trans _ [GOAL] case refine'_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f✝ g : ι → N s✝ t s : Finset ι f : ι → N n : N h : ∀ (x : ι), x ∈ s → f x ≤ n ⊢ ∀ (x : N), x ∈ Multiset.map f s.val → x ≤ n [PROOFSTEP] simpa using h [GOAL] case refine'_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f✝ g : ι → N s✝ t s : Finset ι f : ι → N n : N h : ∀ (x : ι), x ∈ s → f x ≤ n ⊢ n ^ ↑Multiset.card (Multiset.map f s.val) ≤ n ^ card s [PROOFSTEP] simp [GOAL] case refine'_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : CommMonoid M inst✝ : OrderedCommMonoid N f✝ g : ι → N s✝ t s : Finset ι f : ι → N n : N h : ∀ (x : ι), x ∈ s → f x ≤ n ⊢ n ^ ↑Multiset.card s.val ≤ n ^ card s [PROOFSTEP] rfl [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G✝ : Type u_6 k : Type u_7 R : Type u_8 G : Type u_9 inst✝ : LinearOrderedAddCommGroup G f : ι → G s : Finset ι hf : ∀ (i : ι), i ∈ s → 0 ≤ f i ⊢ |∑ i in s, f i| = ∑ i in s, f i [PROOFSTEP] rw [abs_of_nonneg (Finset.sum_nonneg hf)] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G✝ : Type u_6 k : Type u_7 R : Type u_8 G : Type u_9 inst✝ : LinearOrderedAddCommGroup G f : ι → G s : Finset ι hf : ∀ (i : ι), 0 ≤ f i ⊢ |∑ i in s, f i| = ∑ i in s, f i [PROOFSTEP] rw [abs_of_nonneg (Finset.sum_nonneg' hf)] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq β f : α → β s : Finset α t : Finset β Hf : ∀ (a : α), a ∈ s → f a ∈ t n : ℕ hn : ∀ (a : β), a ∈ t → card (filter (fun x => f x = a) s) ≤ n ⊢ ∑ _a in t, n = n * card t [PROOFSTEP] simp [mul_comm] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq β f : α → β s : Finset α t : Finset β Hf : ∀ (a : α), a ∈ s → f a ∈ t n : ℕ hn : ∀ (a : β), a ∈ t → n ≤ card (filter (fun x => f x = a) s) ⊢ n * card t = ∑ _a in t, n [PROOFSTEP] simp [mul_comm] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq β f : α → β s : Finset α t : Finset β Hf : ∀ (a : α), a ∈ s → f a ∈ t n : ℕ hn : ∀ (a : β), a ∈ t → n ≤ card (filter (fun x => f x = a) s) ⊢ ∑ a in t, card (filter (fun x => f x = a) s) = card s [PROOFSTEP] rw [← card_eq_sum_card_fiberwise Hf] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ h : ∀ (a : α), a ∈ s → card (filter ((fun x x_1 => x ∈ x_1) a) B) ≤ n ⊢ ∑ t in B, card (s ∩ t) ≤ card s * n [PROOFSTEP] refine' le_trans _ (s.sum_le_card_nsmul _ _ h) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ h : ∀ (a : α), a ∈ s → card (filter ((fun x x_1 => x ∈ x_1) a) B) ≤ n ⊢ ∑ t in B, card (s ∩ t) ≤ ∑ x in s, card (filter ((fun x x_1 => x ∈ x_1) x) B) [PROOFSTEP] simp_rw [← filter_mem_eq_inter, card_eq_sum_ones, sum_filter] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ h : ∀ (a : α), a ∈ s → card (filter ((fun x x_1 => x ∈ x_1) a) B) ≤ n ⊢ (∑ x in B, ∑ a in s, if a ∈ x then 1 else 0) ≤ ∑ x in s, ∑ a in B, if x ∈ a then 1 else 0 [PROOFSTEP] exact sum_comm.le [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ inst✝ : Fintype α h : ∀ (a : α), card (filter ((fun x x_1 => x ∈ x_1) a) B) ≤ n ⊢ ∑ s in B, card s = ∑ s in B, card (univ ∩ s) [PROOFSTEP] simp_rw [univ_inter] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ h : ∀ (a : α), a ∈ s → n ≤ card (filter ((fun x x_1 => x ∈ x_1) a) B) ⊢ card s * n ≤ ∑ t in B, card (s ∩ t) [PROOFSTEP] apply (s.card_nsmul_le_sum _ _ h).trans [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ h : ∀ (a : α), a ∈ s → n ≤ card (filter ((fun x x_1 => x ∈ x_1) a) B) ⊢ ∑ x in s, card (filter ((fun x x_1 => x ∈ x_1) x) B) ≤ ∑ t in B, card (s ∩ t) [PROOFSTEP] simp_rw [← filter_mem_eq_inter, card_eq_sum_ones, sum_filter] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ h : ∀ (a : α), a ∈ s → n ≤ card (filter ((fun x x_1 => x ∈ x_1) a) B) ⊢ (∑ x in s, ∑ a in B, if x ∈ a then 1 else 0) ≤ ∑ x in B, ∑ a in s, if a ∈ x then 1 else 0 [PROOFSTEP] exact sum_comm.le [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ inst✝ : Fintype α h : ∀ (a : α), n ≤ card (filter ((fun x x_1 => x ∈ x_1) a) B) ⊢ ∑ s in B, card (univ ∩ s) = ∑ s in B, card s [PROOFSTEP] simp_rw [univ_inter] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : DecidableEq α s : Finset α B : Finset (Finset α) n : ℕ inst✝ : Fintype α h : ∀ (a : α), card (filter ((fun x x_1 => x ∈ x_1) a) B) = n ⊢ ∑ s in B, card s = Fintype.card α * n [PROOFSTEP] simp_rw [Fintype.card, ← sum_card_inter fun a _ ↦ h a, univ_inter] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s✝ : Finset α B : Finset (Finset α) n : ℕ s : Finset ι f : ι → Finset α hs : Set.PairwiseDisjoint (↑s) f hf : ∀ (i : ι), i ∈ s → Finset.Nonempty (f i) ⊢ card s ≤ card (Finset.biUnion s f) [PROOFSTEP] rw [card_biUnion hs, card_eq_sum_ones] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s✝ : Finset α B : Finset (Finset α) n : ℕ s : Finset ι f : ι → Finset α hs : Set.PairwiseDisjoint (↑s) f hf : ∀ (i : ι), i ∈ s → Finset.Nonempty (f i) ⊢ ∑ x in s, 1 ≤ ∑ u in s, card (f u) [PROOFSTEP] exact sum_le_sum fun i hi ↦ (hf i hi).card_pos [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s✝ : Finset α B : Finset (Finset α) n : ℕ s : Finset ι f : ι → Finset α hs : Set.PairwiseDisjoint (↑s) f ⊢ card s ≤ card (Finset.biUnion s f) + card (filter (fun i => f i = ∅) s) [PROOFSTEP] rw [← Finset.filter_card_add_filter_neg_card_eq_card fun i ↦ f i = ∅, add_comm] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : DecidableEq α s✝ : Finset α B : Finset (Finset α) n : ℕ s : Finset ι f : ι → Finset α hs : Set.PairwiseDisjoint (↑s) f ⊢ card (filter (fun a => ¬f a = ∅) s) + card (filter (fun i => f i = ∅) s) ≤ card (Finset.biUnion s f) + card (filter (fun i => f i = ∅) s) [PROOFSTEP] exact add_le_add_right ((card_le_card_biUnion (hs.subset <| filter_subset _ _) fun i hi ↦ nonempty_of_ne_empty <| (mem_filter.1 hi).2).trans <| card_le_of_subset <| biUnion_subset_biUnion_of_subset_left _ <| filter_subset _ _) _ [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ ∏ x in s, f x ≤ ∏ x in t, f x [PROOFSTEP] classical calc ∏ x in s, f x = (∏ x in s.filter fun x ↦ f x = 1, f x) * ∏ x in s.filter fun x ↦ f x ≠ 1, f x := by rw [← prod_union, filter_union_filter_neg_eq] exact disjoint_filter.2 fun _ _ h n_h ↦ n_h h _ ≤ ∏ x in t, f x := mul_le_of_le_one_of_le (prod_le_one' <| by simp only [mem_filter, and_imp]; exact fun _ _ ↦ le_of_eq) (prod_le_prod_of_subset' <| by simpa only [subset_iff, mem_filter, and_imp]) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ ∏ x in s, f x ≤ ∏ x in t, f x [PROOFSTEP] calc ∏ x in s, f x = (∏ x in s.filter fun x ↦ f x = 1, f x) * ∏ x in s.filter fun x ↦ f x ≠ 1, f x := by rw [← prod_union, filter_union_filter_neg_eq] exact disjoint_filter.2 fun _ _ h n_h ↦ n_h h _ ≤ ∏ x in t, f x := mul_le_of_le_one_of_le (prod_le_one' <| by simp only [mem_filter, and_imp]; exact fun _ _ ↦ le_of_eq) (prod_le_prod_of_subset' <| by simpa only [subset_iff, mem_filter, and_imp]) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ ∏ x in s, f x = (∏ x in filter (fun x => f x = 1) s, f x) * ∏ x in filter (fun x => f x ≠ 1) s, f x [PROOFSTEP] rw [← prod_union, filter_union_filter_neg_eq] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ Disjoint (filter (fun x => f x = 1) s) (filter (fun x => f x ≠ 1) s) [PROOFSTEP] exact disjoint_filter.2 fun _ _ h n_h ↦ n_h h [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ ∀ (i : ι), i ∈ filter (fun x => f x = 1) s → f i ≤ 1 [PROOFSTEP] simp only [mem_filter, and_imp] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ ∀ (i : ι), i ∈ s → f i = 1 → f i ≤ 1 [PROOFSTEP] exact fun _ _ ↦ le_of_eq [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedMonoid M f : ι → M s t : Finset ι h : ∀ (x : ι), x ∈ s → f x ≠ 1 → x ∈ t ⊢ filter (fun x => f x ≠ 1) s ⊆ t [PROOFSTEP] simpa only [subset_iff, mem_filter, and_imp] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hle : ∀ (i : ι), i ∈ s → f i ≤ g i Hlt : ∃ i, i ∈ s ∧ f i < g i ⊢ ∏ i in s, f i < ∏ i in s, g i [PROOFSTEP] classical rcases Hlt with ⟨i, hi, hlt⟩ rw [← insert_erase hi, prod_insert (not_mem_erase _ _), prod_insert (not_mem_erase _ _)] exact mul_lt_mul_of_lt_of_le hlt (prod_le_prod' fun j hj ↦ Hle j <| mem_of_mem_erase hj) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hle : ∀ (i : ι), i ∈ s → f i ≤ g i Hlt : ∃ i, i ∈ s ∧ f i < g i ⊢ ∏ i in s, f i < ∏ i in s, g i [PROOFSTEP] rcases Hlt with ⟨i, hi, hlt⟩ [GOAL] case intro.intro ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hle : ∀ (i : ι), i ∈ s → f i ≤ g i i : ι hi : i ∈ s hlt : f i < g i ⊢ ∏ i in s, f i < ∏ i in s, g i [PROOFSTEP] rw [← insert_erase hi, prod_insert (not_mem_erase _ _), prod_insert (not_mem_erase _ _)] [GOAL] case intro.intro ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hle : ∀ (i : ι), i ∈ s → f i ≤ g i i : ι hi : i ∈ s hlt : f i < g i ⊢ f i * ∏ x in erase s i, f x < g i * ∏ x in erase s i, g x [PROOFSTEP] exact mul_lt_mul_of_lt_of_le hlt (prod_le_prod' fun j hj ↦ Hle j <| mem_of_mem_erase hj) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι hs : Finset.Nonempty s Hlt : ∀ (i : ι), i ∈ s → f i < g i ⊢ ∏ i in s, f i < ∏ i in s, g i [PROOFSTEP] apply prod_lt_prod' [GOAL] case Hle ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι hs : Finset.Nonempty s Hlt : ∀ (i : ι), i ∈ s → f i < g i ⊢ ∀ (i : ι), i ∈ s → f i ≤ g i [PROOFSTEP] intro i hi [GOAL] case Hle ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι hs : Finset.Nonempty s Hlt : ∀ (i : ι), i ∈ s → f i < g i i : ι hi : i ∈ s ⊢ f i ≤ g i [PROOFSTEP] apply le_of_lt (Hlt i hi) [GOAL] case Hlt ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι hs : Finset.Nonempty s Hlt : ∀ (i : ι), i ∈ s → f i < g i ⊢ ∃ i, i ∈ s ∧ f i < g i [PROOFSTEP] cases' hs with i hi [GOAL] case Hlt.intro ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hlt : ∀ (i : ι), i ∈ s → f i < g i i : ι hi : i ∈ s ⊢ ∃ i, i ∈ s ∧ f i < g i [PROOFSTEP] exact ⟨i, hi, Hlt i hi⟩ [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ ∏ j in s, f j < ∏ j in t, f j [PROOFSTEP] classical calc ∏ j in s, f j < ∏ j in insert i s, f j := by rw [prod_insert hs] exact lt_mul_of_one_lt_left' (∏ j in s, f j) hlt _ ≤ ∏ j in t, f j := by apply prod_le_prod_of_subset_of_one_le' · simp [Finset.insert_subset_iff, h, ht] · intro x hx h'x simp only [mem_insert, not_or] at h'x exact hle x hx h'x.2 [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ ∏ j in s, f j < ∏ j in t, f j [PROOFSTEP] calc ∏ j in s, f j < ∏ j in insert i s, f j := by rw [prod_insert hs] exact lt_mul_of_one_lt_left' (∏ j in s, f j) hlt _ ≤ ∏ j in t, f j := by apply prod_le_prod_of_subset_of_one_le' · simp [Finset.insert_subset_iff, h, ht] · intro x hx h'x simp only [mem_insert, not_or] at h'x exact hle x hx h'x.2 [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ ∏ j in s, f j < ∏ j in insert i s, f j [PROOFSTEP] rw [prod_insert hs] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ ∏ j in s, f j < f i * ∏ x in s, f x [PROOFSTEP] exact lt_mul_of_one_lt_left' (∏ j in s, f j) hlt [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ ∏ j in insert i s, f j ≤ ∏ j in t, f j [PROOFSTEP] apply prod_le_prod_of_subset_of_one_le' [GOAL] case h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ insert i s ⊆ t [PROOFSTEP] simp [Finset.insert_subset_iff, h, ht] [GOAL] case hf ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j ⊢ ∀ (i_1 : ι), i_1 ∈ t → ¬i_1 ∈ insert i s → 1 ≤ f i_1 [PROOFSTEP] intro x hx h'x [GOAL] case hf ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j x : ι hx : x ∈ t h'x : ¬x ∈ insert i s ⊢ 1 ≤ f x [PROOFSTEP] simp only [mem_insert, not_or] at h'x [GOAL] case hf ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : s ⊆ t i : ι ht : i ∈ t hs : ¬i ∈ s hlt : 1 < f i hle : ∀ (j : ι), j ∈ t → ¬j ∈ s → 1 ≤ f j x : ι hx : x ∈ t h'x : ¬x = i ∧ ¬x ∈ s ⊢ 1 ≤ f x [PROOFSTEP] exact hle x hx h'x.2 [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : ∀ (i : ι), i ∈ s → 1 < f i hs : Finset.Nonempty s ⊢ 1 ≤ ∏ i in s, 1 [PROOFSTEP] rw [prod_const_one] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f g : ι → M s t : Finset ι h : ∀ (i : ι), i ∈ s → f i < 1 hs : Finset.Nonempty s ⊢ ∏ i in s, 1 ≤ 1 [PROOFSTEP] rw [prod_const_one] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f✝ g✝ : ι → M s t : Finset ι f g : ι → M h : ∀ (i : ι), i ∈ s → f i ≤ g i ⊢ ∏ i in s, f i = ∏ i in s, g i ↔ ∀ (i : ι), i ∈ s → f i = g i [PROOFSTEP] classical revert h refine Finset.induction_on s (fun _ ↦ ⟨fun _ _ h ↦ False.elim (Finset.not_mem_empty _ h), fun _ ↦ rfl⟩) fun a s ha ih H ↦ ?_ specialize ih fun i ↦ H i ∘ Finset.mem_insert_of_mem rw [Finset.prod_insert ha, Finset.prod_insert ha, Finset.forall_mem_insert, ← ih] exact mul_eq_mul_iff_eq_and_eq (H a (s.mem_insert_self a)) (Finset.prod_le_prod' fun i ↦ H i ∘ Finset.mem_insert_of_mem) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f✝ g✝ : ι → M s t : Finset ι f g : ι → M h : ∀ (i : ι), i ∈ s → f i ≤ g i ⊢ ∏ i in s, f i = ∏ i in s, g i ↔ ∀ (i : ι), i ∈ s → f i = g i [PROOFSTEP] revert h [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f✝ g✝ : ι → M s t : Finset ι f g : ι → M ⊢ (∀ (i : ι), i ∈ s → f i ≤ g i) → (∏ i in s, f i = ∏ i in s, g i ↔ ∀ (i : ι), i ∈ s → f i = g i) [PROOFSTEP] refine Finset.induction_on s (fun _ ↦ ⟨fun _ _ h ↦ False.elim (Finset.not_mem_empty _ h), fun _ ↦ rfl⟩) fun a s ha ih H ↦ ?_ [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f✝ g✝ : ι → M s✝ t : Finset ι f g : ι → M a : ι s : Finset ι ha : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → f i ≤ g i) → (∏ i in s, f i = ∏ i in s, g i ↔ ∀ (i : ι), i ∈ s → f i = g i) H : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ ∏ i in insert a s, f i = ∏ i in insert a s, g i ↔ ∀ (i : ι), i ∈ insert a s → f i = g i [PROOFSTEP] specialize ih fun i ↦ H i ∘ Finset.mem_insert_of_mem [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f✝ g✝ : ι → M s✝ t : Finset ι f g : ι → M a : ι s : Finset ι ha : ¬a ∈ s H : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ih : ∏ i in s, f i = ∏ i in s, g i ↔ ∀ (i : ι), i ∈ s → f i = g i ⊢ ∏ i in insert a s, f i = ∏ i in insert a s, g i ↔ ∀ (i : ι), i ∈ insert a s → f i = g i [PROOFSTEP] rw [Finset.prod_insert ha, Finset.prod_insert ha, Finset.forall_mem_insert, ← ih] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCancelCommMonoid M f✝ g✝ : ι → M s✝ t : Finset ι f g : ι → M a : ι s : Finset ι ha : ¬a ∈ s H : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ih : ∏ i in s, f i = ∏ i in s, g i ↔ ∀ (i : ι), i ∈ s → f i = g i ⊢ f a * ∏ x in s, f x = g a * ∏ x in s, g x ↔ f a = g a ∧ ∏ i in s, f i = ∏ i in s, g i [PROOFSTEP] exact mul_eq_mul_iff_eq_and_eq (H a (s.mem_insert_self a)) (Finset.prod_le_prod' fun i ↦ H i ∘ Finset.mem_insert_of_mem) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hlt : ∏ i in s, f i < ∏ i in s, g i ⊢ ∃ i, i ∈ s ∧ f i < g i [PROOFSTEP] contrapose! Hlt with Hle [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f g : ι → M s t : Finset ι Hle : ∀ (i : ι), i ∈ s → g i ≤ f i ⊢ ∏ i in s, g i ≤ ∏ i in s, f i [PROOFSTEP] exact prod_le_prod' Hle [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f g : ι → M s t : Finset ι hs : Finset.Nonempty s Hle : ∏ i in s, f i ≤ ∏ i in s, g i ⊢ ∃ i, i ∈ s ∧ f i ≤ g i [PROOFSTEP] contrapose! Hle with Hlt [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f g : ι → M s t : Finset ι hs : Finset.Nonempty s Hlt : ∀ (i : ι), i ∈ s → g i < f i ⊢ ∏ i in s, g i < ∏ i in s, f i [PROOFSTEP] exact prod_lt_prod_of_nonempty' hs Hlt [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f✝ g : ι → M s t : Finset ι f : ι → M h₁ : ∏ i in s, f i = 1 h₂ : ∃ i, i ∈ s ∧ f i ≠ 1 ⊢ ∃ i, i ∈ s ∧ 1 < f i [PROOFSTEP] contrapose! h₁ [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f✝ g : ι → M s t : Finset ι f : ι → M h₂ : ∃ i, i ∈ s ∧ f i ≠ 1 h₁ : ∀ (i : ι), i ∈ s → f i ≤ 1 ⊢ ∏ i in s, f i ≠ 1 [PROOFSTEP] obtain ⟨i, m, i_ne⟩ : ∃ i ∈ s, f i ≠ 1 := h₂ [GOAL] case intro.intro ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f✝ g : ι → M s t : Finset ι f : ι → M h₁ : ∀ (i : ι), i ∈ s → f i ≤ 1 i : ι m : i ∈ s i_ne : f i ≠ 1 ⊢ ∏ i in s, f i ≠ 1 [PROOFSTEP] apply ne_of_lt [GOAL] case intro.intro.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : LinearOrderedCancelCommMonoid M f✝ g : ι → M s t : Finset ι f : ι → M h₁ : ∀ (i : ι), i ∈ s → f i ≤ 1 i : ι m : i ∈ s i_ne : f i ≠ 1 ⊢ ∏ i in s, f i < 1 [PROOFSTEP] calc ∏ j in s, f j < ∏ j in s, 1 := prod_lt_prod' h₁ ⟨i, m, (h₁ i m).lt_of_ne i_ne⟩ _ = 1 := prod_const_one [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s t : Finset ι h0 : ∀ (i : ι), i ∈ s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ s → f i ≤ g i ⊢ ∏ i in s, f i ≤ ∏ i in s, g i [PROOFSTEP] induction' s using Finset.induction with a s has ih h [GOAL] case empty ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s t : Finset ι h0✝ : ∀ (i : ι), i ∈ s → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s → f i ≤ g i h0 : ∀ (i : ι), i ∈ ∅ → 0 ≤ f i h1 : ∀ (i : ι), i ∈ ∅ → f i ≤ g i ⊢ ∏ i in ∅, f i ≤ ∏ i in ∅, g i [PROOFSTEP] simp [GOAL] case insert ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ ∏ i in insert a s, f i ≤ ∏ i in insert a s, g i [PROOFSTEP] simp only [prod_insert has] [GOAL] case insert ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ f a * ∏ i in s, f i ≤ g a * ∏ i in s, g i [PROOFSTEP] apply mul_le_mul [GOAL] case insert.h₁ ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ f a ≤ g a [PROOFSTEP] exact h1 a (mem_insert_self a s) [GOAL] case insert.h₂ ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ ∏ i in s, f i ≤ ∏ i in s, g i [PROOFSTEP] refine ih (fun x H ↦ h0 _ ?_) (fun x H ↦ h1 _ ?_) [GOAL] case insert.h₂.refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i x : ι H : x ∈ s ⊢ x ∈ insert a s [PROOFSTEP] exact mem_insert_of_mem H [GOAL] case insert.h₂.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i x : ι H : x ∈ s ⊢ x ∈ insert a s [PROOFSTEP] exact mem_insert_of_mem H [GOAL] case insert.c0 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ 0 ≤ ∏ i in s, f i [PROOFSTEP] apply prod_nonneg fun x H ↦ h0 x (mem_insert_of_mem H) [GOAL] case insert.b0 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s✝ t : Finset ι h0✝ : ∀ (i : ι), i ∈ s✝ → 0 ≤ f i h1✝ : ∀ (i : ι), i ∈ s✝ → f i ≤ g i a : ι s : Finset ι has : ¬a ∈ s ih : (∀ (i : ι), i ∈ s → 0 ≤ f i) → (∀ (i : ι), i ∈ s → f i ≤ g i) → ∏ i in s, f i ≤ ∏ i in s, g i h0 : ∀ (i : ι), i ∈ insert a s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ insert a s → f i ≤ g i ⊢ 0 ≤ g a [PROOFSTEP] apply le_trans (h0 a (mem_insert_self a s)) (h1 a (mem_insert_self a s)) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s t : Finset ι h0 : ∀ (i : ι), i ∈ s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ s → f i ≤ 1 ⊢ ∏ i in s, f i ≤ 1 [PROOFSTEP] convert ← prod_le_prod h0 h1 [GOAL] case h.e'_4 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f g : ι → R s t : Finset ι h0 : ∀ (i : ι), i ∈ s → 0 ≤ f i h1 : ∀ (i : ι), i ∈ s → f i ≤ 1 ⊢ ∏ i in s, 1 = 1 [PROOFSTEP] exact Finset.prod_const_one [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∏ i in s, g i + ∏ i in s, h i ≤ ∏ i in s, f i [PROOFSTEP] simp_rw [prod_eq_mul_prod_diff_singleton hi] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ g i * ∏ i in s \ {i}, g i + h i * ∏ i in s \ {i}, h i ≤ f i * ∏ i in s \ {i}, f i [PROOFSTEP] refine le_trans ?_ (mul_le_mul_of_nonneg_right h2i ?_) [GOAL] case refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ g i * ∏ i in s \ {i}, g i + h i * ∏ i in s \ {i}, h i ≤ (g i + h i) * ∏ i in s \ {i}, f i [PROOFSTEP] rw [right_distrib] [GOAL] case refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ g i * ∏ i in s \ {i}, g i + h i * ∏ i in s \ {i}, h i ≤ g i * ∏ i in s \ {i}, f i + h i * ∏ i in s \ {i}, f i [PROOFSTEP] refine add_le_add ?_ ?_ [GOAL] case refine_1.refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ g i * ∏ i in s \ {i}, g i ≤ g i * ∏ i in s \ {i}, f i [PROOFSTEP] refine mul_le_mul_of_nonneg_left ?_ ?_ [GOAL] case refine_1.refine_1.refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∏ i in s \ {i}, g i ≤ ∏ i in s \ {i}, f i [PROOFSTEP] refine prod_le_prod ?_ ?_ [GOAL] case refine_1.refine_1.refine_1.refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → 0 ≤ g i_1 [PROOFSTEP] simp (config := { contextual := true }) [*] [GOAL] case refine_1.refine_1.refine_1.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → g i_1 ≤ f i_1 [PROOFSTEP] simp (config := { contextual := true }) [*] [GOAL] case refine_1.refine_1.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ 0 ≤ g i [PROOFSTEP] try apply_assumption [GOAL] case refine_1.refine_1.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ 0 ≤ g i [PROOFSTEP] apply_assumption [GOAL] case refine_1.refine_1.refine_2.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ i ∈ s [PROOFSTEP] try assumption [GOAL] case refine_1.refine_1.refine_2.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ i ∈ s [PROOFSTEP] assumption [GOAL] case refine_1.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ h i * ∏ i in s \ {i}, h i ≤ h i * ∏ i in s \ {i}, f i [PROOFSTEP] refine mul_le_mul_of_nonneg_left ?_ ?_ [GOAL] case refine_1.refine_2.refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∏ i in s \ {i}, h i ≤ ∏ i in s \ {i}, f i [PROOFSTEP] refine prod_le_prod ?_ ?_ [GOAL] case refine_1.refine_2.refine_1.refine_1 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → 0 ≤ h i_1 [PROOFSTEP] simp (config := { contextual := true }) [*] [GOAL] case refine_1.refine_2.refine_1.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → h i_1 ≤ f i_1 [PROOFSTEP] simp (config := { contextual := true }) [*] [GOAL] case refine_1.refine_2.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ 0 ≤ h i [PROOFSTEP] try apply_assumption [GOAL] case refine_1.refine_2.refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ 0 ≤ h i [PROOFSTEP] apply_assumption [GOAL] case refine_1.refine_2.refine_2.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ i ∈ s [PROOFSTEP] try assumption [GOAL] case refine_1.refine_2.refine_2.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ i ∈ s [PROOFSTEP] assumption [GOAL] case refine_2 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ 0 ≤ ∏ i in s \ {i}, f i [PROOFSTEP] apply prod_nonneg [GOAL] case refine_2.h0 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → 0 ≤ f i_1 [PROOFSTEP] simp only [and_imp, mem_sdiff, mem_singleton] [GOAL] case refine_2.h0 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i ⊢ ∀ (i_1 : ι), i_1 ∈ s → ¬i_1 = i → 0 ≤ f i_1 [PROOFSTEP] intro j h1j h2j [GOAL] case refine_2.h0 ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : OrderedCommSemiring R f✝ g✝ : ι → R s t : Finset ι i : ι f g h : ι → R hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j hg : ∀ (i : ι), i ∈ s → 0 ≤ g i hh : ∀ (i : ι), i ∈ s → 0 ≤ h i j : ι h1j : j ∈ s h2j : ¬j = i ⊢ 0 ≤ f j [PROOFSTEP] exact le_trans (hg j h1j) (hgf j h1j h2j) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∏ i in s, g i + ∏ i in s, h i ≤ ∏ i in s, f i [PROOFSTEP] classical simp_rw [prod_eq_mul_prod_diff_singleton hi] refine' le_trans _ (mul_le_mul_right' h2i _) rw [right_distrib] apply add_le_add <;> apply mul_le_mul_left' <;> apply prod_le_prod' <;> simp only [and_imp, mem_sdiff, mem_singleton] <;> intros <;> apply_assumption <;> assumption [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∏ i in s, g i + ∏ i in s, h i ≤ ∏ i in s, f i [PROOFSTEP] simp_rw [prod_eq_mul_prod_diff_singleton hi] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ g i * ∏ i in s \ {i}, g i + h i * ∏ i in s \ {i}, h i ≤ f i * ∏ i in s \ {i}, f i [PROOFSTEP] refine' le_trans _ (mul_le_mul_right' h2i _) [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ g i * ∏ i in s \ {i}, g i + h i * ∏ i in s \ {i}, h i ≤ (g i + h i) * ∏ i in s \ {i}, f i [PROOFSTEP] rw [right_distrib] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ g i * ∏ i in s \ {i}, g i + h i * ∏ i in s \ {i}, h i ≤ g i * ∏ i in s \ {i}, f i + h i * ∏ i in s \ {i}, f i [PROOFSTEP] apply add_le_add [GOAL] case h₁ ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ g i * ∏ i in s \ {i}, g i ≤ g i * ∏ i in s \ {i}, f i [PROOFSTEP] apply mul_le_mul_left' [GOAL] case h₂ ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ h i * ∏ i in s \ {i}, h i ≤ h i * ∏ i in s \ {i}, f i [PROOFSTEP] apply mul_le_mul_left' [GOAL] case h₁.bc ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∏ i in s \ {i}, g i ≤ ∏ i in s \ {i}, f i [PROOFSTEP] apply prod_le_prod' [GOAL] case h₂.bc ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∏ i in s \ {i}, h i ≤ ∏ i in s \ {i}, f i [PROOFSTEP] apply prod_le_prod' [GOAL] case h₁.bc.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → g i_1 ≤ f i_1 [PROOFSTEP] simp only [and_imp, mem_sdiff, mem_singleton] [GOAL] case h₂.bc.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∀ (i_1 : ι), i_1 ∈ s \ {i} → h i_1 ≤ f i_1 [PROOFSTEP] simp only [and_imp, mem_sdiff, mem_singleton] [GOAL] case h₁.bc.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∀ (i_1 : ι), i_1 ∈ s → ¬i_1 = i → g i_1 ≤ f i_1 [PROOFSTEP] intros [GOAL] case h₂.bc.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j ⊢ ∀ (i_1 : ι), i_1 ∈ s → ¬i_1 = i → h i_1 ≤ f i_1 [PROOFSTEP] intros [GOAL] case h₁.bc.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j i✝ : ι a✝¹ : i✝ ∈ s a✝ : ¬i✝ = i ⊢ g i✝ ≤ f i✝ [PROOFSTEP] apply_assumption [GOAL] case h₂.bc.h ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j i✝ : ι a✝¹ : i✝ ∈ s a✝ : ¬i✝ = i ⊢ h i✝ ≤ f i✝ [PROOFSTEP] apply_assumption [GOAL] case h₁.bc.h.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j i✝ : ι a✝¹ : i✝ ∈ s a✝ : ¬i✝ = i ⊢ i✝ ∈ s [PROOFSTEP] assumption [GOAL] case h₁.bc.h.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j i✝ : ι a✝¹ : i✝ ∈ s a✝ : ¬i✝ = i ⊢ i✝ ≠ i [PROOFSTEP] assumption [GOAL] case h₂.bc.h.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j i✝ : ι a✝¹ : i✝ ∈ s a✝ : ¬i✝ = i ⊢ i✝ ∈ s [PROOFSTEP] assumption [GOAL] case h₂.bc.h.a ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : CanonicallyOrderedCommSemiring R f g h : ι → R s : Finset ι i : ι hi : i ∈ s h2i : g i + h i ≤ f i hgf : ∀ (j : ι), j ∈ s → j ≠ i → g j ≤ f j hhf : ∀ (j : ι), j ∈ s → j ≠ i → h j ≤ f j i✝ : ι a✝¹ : i✝ ∈ s a✝ : ¬i✝ = i ⊢ i✝ ≠ i [PROOFSTEP] assumption [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : AddCommMonoid M s : Finset ι f : ι → WithTop M ⊢ ∑ i in s, f i = ⊤ ↔ ∃ i, i ∈ s ∧ f i = ⊤ [PROOFSTEP] induction s using Finset.cons_induction [GOAL] case empty ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : AddCommMonoid M f : ι → WithTop M ⊢ ∑ i in ∅, f i = ⊤ ↔ ∃ i, i ∈ ∅ ∧ f i = ⊤ [PROOFSTEP] simp [*] [GOAL] case cons ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝ : AddCommMonoid M f : ι → WithTop M a✝¹ : ι s✝ : Finset ι h✝ : ¬a✝¹ ∈ s✝ a✝ : ∑ i in s✝, f i = ⊤ ↔ ∃ i, i ∈ s✝ ∧ f i = ⊤ ⊢ ∑ i in cons a✝¹ s✝ h✝, f i = ⊤ ↔ ∃ i, i ∈ cons a✝¹ s✝ h✝ ∧ f i = ⊤ [PROOFSTEP] simp [*] [GOAL] ι : Type u_1 α : Type u_2 β : Type u_3 M : Type u_4 N : Type u_5 G : Type u_6 k : Type u_7 R : Type u_8 inst✝¹ : AddCommMonoid M inst✝ : LT M s : Finset ι f : ι → WithTop M ⊢ ∑ i in s, f i < ⊤ ↔ ∀ (i : ι), i ∈ s → f i < ⊤ [PROOFSTEP] simp only [WithTop.lt_top_iff_ne_top, ne_eq, sum_eq_top_iff, not_exists, not_and]
module JudiLing using DataFrames using Random, Distributions using SparseArrays, LinearAlgebra, Statistics, SuiteSparse using BenchmarkTools using DataStructures using ProgressMeter using CSV using GZip using PyCall include("utils.jl") include("pyndl.jl") include("wh.jl") include("make_cue_matrix.jl") include("make_semantic_matrix.jl") include("cholesky.jl") include("make_adjacency_matrix.jl") include("make_yt_matrix.jl") include("find_path.jl") include("eval.jl") include("output.jl") include("preprocess.jl") include("pickle.jl") include("test_combo.jl") include("display.jl") end
/- Copyright (c) 2020 Pim Spelier, Daan van Gent. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Pim Spelier, Daan van Gent. -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.fintype.basic import Mathlib.data.num.lemmas import Mathlib.tactic.derive_fintype import Mathlib.PostPort universes l namespace Mathlib /-! # Encodings This file contains the definition of a (finite) encoding, a map from a type to strings in an alphabet, used in defining computability by Turing machines. It also contains several examples: ## Examples - `fin_encoding_nat_bool` : a binary encoding of ℕ in a simple alphabet. - `fin_encoding_nat_Γ'` : a binary encoding of ℕ in the alphabet used for TM's. - `unary_fin_encoding_nat` : a unary encoding of ℕ - `fin_encoding_bool_bool` : an encoding of bool. -/ namespace computability /-- An encoding of a type in a certain alphabet, together with a decoding. -/ structure encoding (α : Type) where Γ : Type encode : α → List Γ decode : List Γ → Option α decode_encode : ∀ (x : α), decode (encode x) = some x /-- An encoding plus a guarantee of finiteness of the alphabet. -/ structure fin_encoding (α : Type) extends encoding α where Γ_fin : fintype (encoding.Γ _to_encoding) /-- A standard Turing machine alphabet, consisting of blank,bit0,bit1,bra,ket,comma. -/ inductive Γ' where | blank : Γ' | bit : Bool → Γ' | bra : Γ' | ket : Γ' | comma : Γ' protected instance inhabited_Γ' : Inhabited Γ' := { default := Γ'.blank } /-- The natural inclusion of bool in Γ'. -/ def inclusion_bool_Γ' : Bool → Γ' := Γ'.bit /-- An arbitrary section of the natural inclusion of bool in Γ'. -/ def section_Γ'_bool : Γ' → Bool := sorry theorem left_inverse_section_inclusion : function.left_inverse section_Γ'_bool inclusion_bool_Γ' := fun (x : Bool) => bool.cases_on x rfl rfl theorem inclusion_bool_Γ'_injective : function.injective inclusion_bool_Γ' := function.has_left_inverse.injective (Exists.intro section_Γ'_bool left_inverse_section_inclusion) /-- An encoding function of the positive binary numbers in bool. -/ def encode_pos_num : pos_num → List Bool := sorry /-- An encoding function of the binary numbers in bool. -/ def encode_num : num → List Bool := sorry /-- An encoding function of ℕ in bool. -/ def encode_nat (n : ℕ) : List Bool := encode_num ↑n /-- A decoding function from `list bool` to the positive binary numbers. -/ def decode_pos_num : List Bool → pos_num := sorry /-- A decoding function from `list bool` to the binary numbers. -/ def decode_num : List Bool → num := fun (l : List Bool) => ite (l = []) num.zero ↑(decode_pos_num l) /-- A decoding function from `list bool` to ℕ. -/ def decode_nat : List Bool → ℕ := fun (l : List Bool) => ↑(decode_num l) theorem encode_pos_num_nonempty (n : pos_num) : encode_pos_num n ≠ [] := pos_num.cases_on n (list.cons_ne_nil tt []) (fun (m : pos_num) => list.cons_ne_nil tt (encode_pos_num m)) fun (m : pos_num) => list.cons_ne_nil false (encode_pos_num m) theorem decode_encode_pos_num (n : pos_num) : decode_pos_num (encode_pos_num n) = n := sorry theorem decode_encode_num (n : num) : decode_num (encode_num n) = n := sorry theorem decode_encode_nat (n : ℕ) : decode_nat (encode_nat n) = n := sorry /-- A binary encoding of ℕ in bool. -/ def encoding_nat_bool : encoding ℕ := encoding.mk Bool encode_nat (fun (n : List Bool) => some (decode_nat n)) sorry /-- A binary fin_encoding of ℕ in bool. -/ def fin_encoding_nat_bool : fin_encoding ℕ := fin_encoding.mk encoding_nat_bool bool.fintype /-- A binary encoding of ℕ in Γ'. -/ def encoding_nat_Γ' : encoding ℕ := encoding.mk Γ' (fun (x : ℕ) => list.map inclusion_bool_Γ' (encode_nat x)) (fun (x : List Γ') => some (decode_nat (list.map section_Γ'_bool x))) sorry /-- A binary fin_encoding of ℕ in Γ'. -/ def fin_encoding_nat_Γ' : fin_encoding ℕ := fin_encoding.mk encoding_nat_Γ' Γ'.fintype /-- A unary encoding function of ℕ in bool. -/ def unary_encode_nat : ℕ → List Bool := sorry /-- A unary decoding function from `list bool` to ℕ. -/ def unary_decode_nat : List Bool → ℕ := list.length theorem unary_decode_encode_nat (n : ℕ) : unary_decode_nat (unary_encode_nat n) = n := Nat.rec rfl (fun (m : ℕ) (hm : unary_decode_nat (unary_encode_nat m) = m) => Eq.symm (congr_arg Nat.succ (Eq.symm hm))) n /-- A unary fin_encoding of ℕ. -/ def unary_fin_encoding_nat : fin_encoding ℕ := fin_encoding.mk (encoding.mk Bool unary_encode_nat (fun (n : List Bool) => some (unary_decode_nat n)) sorry) bool.fintype /-- An encoding function of bool in bool. -/ def encode_bool : Bool → List Bool := list.ret /-- A decoding function from `list bool` to bool. -/ def decode_bool : List Bool → Bool := sorry theorem decode_encode_bool (b : Bool) : decode_bool (encode_bool b) = b := bool.cases_on b rfl rfl /-- A fin_encoding of bool in bool. -/ def fin_encoding_bool_bool : fin_encoding Bool := fin_encoding.mk (encoding.mk Bool encode_bool (fun (x : List Bool) => some (decode_bool x)) sorry) bool.fintype protected instance inhabited_fin_encoding : Inhabited (fin_encoding Bool) := { default := fin_encoding_bool_bool } protected instance inhabited_encoding : Inhabited (encoding Bool) := { default := fin_encoding.to_encoding fin_encoding_bool_bool }
[STATEMENT] lemma power_strict_mono: shows "a < b \<Longrightarrow> 0 \<le> a \<Longrightarrow> 0 < n \<Longrightarrow> a ^ n < b ^ n" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>a < b; (0::'a) \<le> a; 0 < n\<rbrakk> \<Longrightarrow> a ^ n < b ^ n [PROOF STEP] by (subst nonneg_power_less, auto)
Public vs. Private Cloud Server: Which Is Best for You? These days, backing up your information on a cloud server is a safe bet. That goes double if you're running a business and need storage. But should you choose a public cloud server or a private cloud server? The kind of server that you should get depends on your needs. They both have benefits that should be considered prior to deciding. In this article, we will discuss private and public cloud servers and why you may choose one over the other. Looking to secure data on the cloud? One of the primary advantages of cloud hosting is security. Cloud applications are designed so that vulnerabilities can be immediately patched rather than having to wait for users to install updates. But with recent data breaches at large cloud providers, how safe is the cloud? When you choose a cloud provider your number one question should be, how are they going to keep my information safe? While cloud providers can assure you that security is their top concern, you can take some steps to ensure your information doesn’t fall victim to security breaches and hackers. Looking for a place to host your business applications and data? Outgrown your current hosting plan? It might be time to make the switch to cloud servers. Cloud dedicated servers are the future of business hosting. From reduced cost and increased storage space, they outperform their traditional hosting counterparts in almost every way. Let’s take a look at why your business needs a cloud dedicated server. Cloud hosting service providers span a vast range of services, capabilities, and expertise. The best cloud hosting providers offer a secure and reliable infrastructure platform capable of supporting and scaling business critical operations easily and consistently. But it can be difficult for prospective cloud clients to tell the difference between a great cloud hosting service provider and a provider that can’t offer the best service. Cloud computing is the use of internet-connected servers to host software and virtual infrastructure accessed and controlled over the web or an API. The phrase “cloud computing” has only been used for a decade or so, but the history of cloud computing stretches back much further. We’re rapidly approaching the tenth anniversary of the cloud. In the last decade, the cloud has fundamentally reshaped the infrastructure hosting and enterprise IT space, and in the process has matured enormously. The cloud has evolved from a rudimentary alternative to on-prem data centers to the highly capable and flexible platform we take for granted today. It will continue to evolve in the future, in ways that are both predictable and beyond our ability to forecast. The future of cloud will be managed, hybrid, make heavy use of containers, and be distributed beyond the traditional data center.
# Regressione usando la teoria Bayesiana La regressione lineare che abbiamo visto fino ad ora usa modello di tipo deterministico per analizzare e predire dati, ma è anche possibile usare modelli di tipo probabilistico per effettuare una regressione lineare, vediamone alcuni modelli, per maggiori info visitate questo __[link](https://scikit-learn.org/stable/modules/linear_model.html#bayesian-regression)__. All'interno di questi modelli si usa il Teorema di Bayes che stabilisce la seguente formula: \begin{equation} \Large P(H_{0}|E) = \frac{P(E|H_{0})P(H_{0})}{P(E)} \end{equation} Dove $P(H_{0}|E)$ è detta probabilità di $H_{0}$, dato un evento $E$, $P(H_{0})$ è detta probabilità a priori di $H_{0}$, $P(E)$ è detta probabilità di osservare $E$ detta ***probabilita marginale***, mentre $P(E|H_{0})$ è la **funzione verosomigliante** ovvero una funzione di probabilità condizionata in cui **il primo argomento($E$) è fisso, mentre il secondo ($H_{0}$) è variabile**. In questi tipi di modelli si introduce una distribuzione a priori non informativa sugli iperparametri che dovranno essere poi determinati grazie ai dati attraverso il teorema precedente, per fare un esempio ipotizziamo di avere dei dati che noi pensiamo siano distribuiti secondo una gaussiana, allora il modello dovrà determinare: \begin{equation} \Large p(y | X,w,\alpha) = \mathcal{N}(Xw, \alpha) \end{equation} Ovvero una distribuzione probabilistica di tipo gaussiana centrata in $Xw$ e con una varianza $\alpha$ che deve essere determinata dai dati. **Tra i vantaggi abbiamo che:** - questi modelli si adattano ai dati per definizione - possiamo introdurre termini di regoralizzazione per renderli più robusti **Tra gli svantaggi abbiamo che:** - per fare inferenza statistica si usa il teorema di Bayes che è computazionalmente molto oneroso in termini di risorse. ## Bayesian Ridge Regression Questo modello di regressione bayesiano usa un termine di regoralizzazione $l_2$, come nella Ridge Regression, dal punto di vista probabilistico questo modello usa una distribuzione gaussiana e determina i parametri usando: \begin{equation} \Large p(w|\lambda) = \mathcal{N}(w|0, \lambda^{-1}I_p) \end{equation} Per essere precisi al fine di determinare i pesi si utilizza la **Maximum a posteriori (MAP) usando la log marginal likelihood** ovvero: \begin{equation} \Large W = max_{w} \quad exp[-\frac{(y-X\beta)^{T}(y-X\beta)}{2\sigma^{2}} -\frac{\parallel \beta \parallel^{2}_{2}}{2\tau^2}]\\ \Large W = min_{w} (y-X\beta)^{T}(y-X\beta) + \lambda \parallel \beta \parallel^{2}_{2} \end{equation} Sono presenti altri parametri nell'implementazione dell'algoritmo, quello che ci serve sapere è che gli $\alpha$ e i $\beta$ sono dei parametri per la definizione di una __[gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution)__ che è una distribuzione continua e che tali parametri sono definiti dai dati a partire da dei valori iniziali e quelli che non sono informativi, ovvero forniscono una vaga informazione sulla variabile sono di solito settati piccoli in modo da non influire sulla determinazione della distribuzione, per maggiori dettagli consultate __[scikit](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html#sklearn.linear_model.BayesianRidge)__. ```python import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import BayesianRidge from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split #load the dataset data = load_boston() df = pd.DataFrame(data.data, columns = data.feature_names) df['MEDV'] = data.target #prepare the data X_train, X_test, y_train, y_test = train_test_split(df.LSTAT.values, df.MEDV.values, random_state=0, test_size = 0.2) bayesian_ridge = BayesianRidge(compute_score=True) bayesian_ridge.fit(X_train.reshape(-1, 1), y_train) #setting plot fig, ax = plt.subplots(figsize=(8, 6)) ax.scatter(X_train, y_train, label = "train", color = "blue") ax.scatter(X_test, y_test, label = "test", color = "green") line, = ax.plot(X_train, bayesian_ridge.predict(X_train.reshape(-1, 1)), label = "fit", color = "red") plt.legend() plt.show() print('Bayesian Regression parameters estimated:') print(f'Estimated precision of noise: {bayesian_ridge.alpha_}') print(f'Estimated precision of weights: {bayesian_ridge.lambda_}') print(f'Estimated variance-covariance matrix of the weights: {bayesian_ridge.sigma_}') print(f'Number of iterations applied to obtain estimated parameters: {bayesian_ridge.n_iter}') print(f'Value of the log marginal likelihood (to be maximized) at each iteration: {bayesian_ridge.scores_}') print(f'Coefficients obtained: {bayesian_ridge.coef_} ') print(f'R^2 score on training set: {bayesian_ridge.score(X_train.reshape(-1, 1), y_train)},' f'test set: {bayesian_ridge.score(X_test.reshape(-1, 1), y_test)}') ``` ```python #use bayesian regression for multiple features X, Y = df.drop('MEDV', axis = 1).values, df.MEDV.values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0, test_size = 0.2) bayesian_ridge = BayesianRidge(compute_score=True) bayesian_ridge.fit(X_train, y_train) print('Bayesian Regression parameters estimated:') print(f'Estimated precision of noise: {bayesian_ridge.alpha_}') print(f'Estimated precision of weights: {bayesian_ridge.lambda_}') print(f'Estimated variance-covariance matrix of the weights:\n {bayesian_ridge.sigma_}') print(f'Number of iterations applied to obtain estimated parameters: {bayesian_ridge.n_iter}') print(f'Value of the log marginal likelihood (to be maximized) at each iteration: {bayesian_ridge.scores_}') print(f'Coefficients obtained: {bayesian_ridge.coef_}') print(f'R^2 score on training set: {bayesian_ridge.score(X_train, y_train)},' f'test set: {bayesian_ridge.score(X_test, y_test)}') ``` Bayesian Regression parameters estimated: Estimated precision of noise: 0.04852260984263468 Estimated precision of weights: 0.6102776668911499 Estimated variance-covariance matrix of the weights: [[ 1.37807667e-03 -4.21686237e-05 1.14783119e-04 1.33038925e-03 5.23898493e-04 -5.03102014e-04 2.54259490e-05 8.10729775e-04 -7.08264350e-04 1.73865830e-06 4.19197193e-06 9.65422950e-07 -4.28286306e-04] [-4.21686237e-05 2.11934550e-04 9.46457792e-05 -3.36739425e-05 2.26167868e-04 -1.19769254e-03 3.40602275e-05 -1.23376941e-03 1.15702942e-04 -1.22646117e-05 6.28935842e-04 -9.28306016e-07 -5.52116221e-05] [ 1.14783119e-04 9.46457792e-05 3.84978972e-03 -3.29431563e-03 -5.88290917e-03 2.96787024e-03 -4.79759035e-05 3.86353396e-03 1.23200302e-03 -1.19937394e-04 -6.99517992e-04 8.49353980e-06 -3.29102032e-04] [ 1.33038925e-03 -3.36739425e-05 -3.29431563e-03 5.54104395e-01 3.22722392e-03 -1.40269180e-02 -2.10841800e-04 4.20558065e-03 -5.26855992e-03 2.82056262e-04 6.89665918e-03 -1.46206232e-04 5.23290591e-04] [ 5.23898493e-04 2.26167868e-04 -5.88290917e-03 3.22722392e-03 1.50306746e+00 1.40723837e-02 -1.51436902e-03 2.06812830e-02 -4.63635763e-03 -9.52089831e-05 1.54729761e-02 8.47550865e-05 -8.11699603e-04] [-5.03102014e-04 -1.19769254e-03 2.96787024e-03 -1.40269180e-02 1.40723837e-02 1.88532573e-01 -1.00276279e-03 1.23364142e-02 -3.96165695e-03 1.36181246e-04 7.05598456e-03 1.40262994e-04 1.23707593e-02] [ 2.54259490e-05 3.40602275e-05 -4.79759035e-05 -2.10841800e-04 -1.51436902e-03 -1.00276279e-03 1.92886918e-04 1.11553215e-03 1.41681895e-05 -2.21449764e-06 6.89317812e-05 -3.43306916e-06 -2.82745377e-04] [ 8.10729775e-04 -1.23376941e-03 3.86353396e-03 4.20558065e-03 2.06812830e-02 1.23364142e-02 1.11553215e-03 4.24687506e-02 5.06790571e-04 1.39860154e-05 -6.06660615e-03 8.08883058e-06 -1.11213750e-04] [-7.08264350e-04 1.15702942e-04 1.23200302e-03 -5.26855992e-03 -4.63635763e-03 -3.96165695e-03 1.41681895e-05 5.06790571e-04 4.84860106e-03 -2.19995676e-04 -1.29297181e-03 2.71447407e-05 -3.82643525e-05] [ 1.73865830e-06 -1.22646117e-05 -1.19937394e-04 2.82056262e-04 -9.52089831e-05 1.36181246e-04 -2.21449764e-06 1.39860154e-05 -2.19995676e-04 1.54552120e-05 -3.29135378e-05 2.00490745e-07 2.10006857e-06] [ 4.19197193e-06 6.28935842e-04 -6.99517992e-04 6.89665918e-03 1.54729761e-02 7.05598456e-03 6.89317812e-05 -6.06660615e-03 -1.29297181e-03 -3.29135378e-05 1.72110542e-02 -3.21893047e-05 -4.28030248e-04] [ 9.65422950e-07 -9.28306016e-07 8.49353980e-06 -1.46206232e-04 8.47550865e-05 1.40262994e-04 -3.43306916e-06 8.08883058e-06 2.71447407e-05 2.00490745e-07 -3.21893047e-05 8.87238476e-06 3.82467045e-05] [-4.28286306e-04 -5.52116221e-05 -3.29102032e-04 5.23290591e-04 -8.11699603e-04 1.23707593e-02 -2.82745377e-04 -1.11213750e-04 -3.82643525e-05 2.10006857e-06 -4.28030248e-04 3.82467045e-05 2.91326441e-03]] Number of iterations applied to obtain estimated parameters: 300 Value of the log marginal likelihood (to be maximized) at each iteration: [-1353.62507777 -1227.46832923 -1226.12557601 -1225.95851367 -1225.93824383 -1225.93569 -1225.9353617 -1225.93531916 -1225.93531363 -1225.93531291 -1225.93496052] Coefficients obtained: [-0.11428353 0.04907884 -0.0518813 1.62176048 -1.34219915 3.4597405 -0.01454935 -1.18479245 0.21385904 -0.01261243 -0.91358884 0.00882926 -0.52904135] R^2 score on training set: 0.7648922821629467,test set: 0.5623483354170234 ## Logistic Regression La **logistic regression sebbene il nome non è in genere usato per fare la regressione, ma per fare classificazione**, la base su cui si fonda la logistic regression è la funzione logistica che ha la seguente formula: \begin{equation} \Large f(x) = C \frac{1}{1 + e^{-k(x-x_0)}} \end{equation} dove $C$ rappresenta il valore massimo della funzione, $k$ rappresenta la ripidità con cui sale la curva, $x_0$ rappresenta il punto in cui la funzione assume valore $f(x_0) = \frac{1}{2}$. ```python import numpy as np def logistica(X, C = 1, k = 1, x0 = 0): return C /(1 + np.exp(-k *(X-x0))) X = np.linspace(-10,10,100) fig, axs = plt.subplots(1, 4, figsize = (18,6)) fig.suptitle("Funzione logistica per diversi parametri") axs[0].plot(X, logistica(X)) axs[0].set_title("C = 1, k = 1, x0 = 0") axs[0].grid(True) axs[1].plot(X, logistica(X, C = 2)) axs[1].set_title("C = 2, k = 1, x0 = 0") axs[1].grid(True) axs[2].plot(X, logistica(X, k = 10)) axs[2].set_title("C = 1, k = 10, x0 = 0") axs[2].grid(True) axs[3].plot(X, logistica(X, x0 = 5)) axs[3].set_title("C = 1, k = 1, x0 = 5") axs[3].grid(True) plt.show() ``` Ora che abbiamo capito come è fatta la funzione logistica, vediamo di capire cosa fa la logistic regression guardando la formula usata per l'optimizzazione in scikit: \begin{equation} \Large \min_{w,c} \frac{1-\rho}{2} w^{T}w + \rho \parallel w \parallel_{1} + C \sum_{i=1}^{n} log(exp(-y_i (X_{i}^{T} w + c)) +1) \end{equation} In questa equazione possiamo vedere che abbiamo sia il termine di regoralizzazione $l2$, che $l1$, il cui peso nella formula ci dice in quale misura stiamo usando le regoralizzazioni, abbiamo poi un valore $C$ iperparametro dell'equazione che dobbiamo determinare per capire in che misura vogliamo che la regoralizzazione sia determinante ed notare bene **usiamo il log sulle funzione logistiche, questo perché in questa maniera si riduce la possibilità di avere numeri enormi e computazionalmente è più veloce**.<br> Per valutare la qualità del modello nella classificazione si possono utilizzare diverse metriche, per capire come funzionano in genere bisogna capire cosa si intendono con True Positive(TP), False Postive(FP), True Negative(TN), False Negative(FN). Al fine di capire come funziona cerchiamo di rappresentarli usando una __[confusion matrix](https://scikit-learn.org/stable/modules/model_evaluation.html#confusion-matrix)__ che è qui rappresentata in maniera generica: | Actual/Predicted | True | False | | | |:----------------:|:--------------:|:--------------:|---|---| | True | True Positive | False Positive | | | | False | False Negative | True Negative | | | In questa matrice possiamo notare che il caso ideale sarebbe se i valori sono tutti sulla diagonale poiché il modello dovrebbe essere in grado di classificare con accuratezza assoluta il dataset. In genere però per aver una singola metrica che contenga tutte le informazioni riguardo alla sua capacità di classificazione in maniera quantitativa, tra le più usate abbiamo: - precision : $ \Large \frac{tp}{tp+tn} $ - accuracy : $ \Large \frac{\sum_{i = 0}^{N-1} I(y_{pred} = y_{true})}{N} $ con $I$ funzione di identità che fornisce il valore 1 solo se la condizione è soddisfatta e $N$è il numero di sample - recall : $ \Large \frac{tp}{tp + fn} $ - f1 score: $ 2* \Large \frac{precision * recall}{precision + recall} $ Esistono molte altre metriche per la classificazione per maggiori info consultate __[scikit](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics)__.<br> La logistic regression può essere usata sia in condizioni di classificazione binaria che in condizione di classificazione di classi multiple , vediamo degli esempi in tal caso. ```python from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import classification_report from mlxtend.plotting import plot_decision_regions #load data df = pd.read_csv('../data/diabetes2.csv') print('Dataset of diabetes used:') display(df) #prepare data X_train, X_test, y_train, y_test = train_test_split(df.drop('Outcome', axis = 1).values, df.Outcome.values, random_state=0, test_size = 0.2) target_names = ['class 0', 'class 1'] #get the best classifier given training data logistic = LogisticRegressionCV(class_weight= 'balanced', cv = 10, max_iter = 1e4).fit(X_train, y_train) print(f'Logisitc regression score using 10 folds:\n {logistic.scores_}') print('-'*80) print(f'Training accuracy score: {logistic.score(X_train, y_train)}, Test accuracy score:{logistic.score(X_test, y_test)}') print('-'*80) print(classification_report(y_test, logistic.predict(X_test), target_names=target_names)) #create output dataframe output = {'Probability class 0': logistic.predict_proba(X_test)[:,0], 'Probability class 1': logistic.predict_proba(X_test)[:,1], 'Predicted': logistic.predict(X_test), 'True': y_test} test_results = pd.DataFrame(data = output, index = np.arange(0, y_test.shape[0])) print('Dataframe of the results with the classes predicted, true and probability associated') display(test_results) ``` Dataset of diabetes used: <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Pregnancies</th> <th>Glucose</th> <th>BloodPressure</th> <th>SkinThickness</th> <th>Insulin</th> <th>BMI</th> <th>DiabetesPedigreeFunction</th> <th>Age</th> <th>Outcome</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>6</td> <td>148</td> <td>72</td> <td>35</td> <td>0</td> <td>33.6</td> <td>0.627</td> <td>50</td> <td>1</td> </tr> <tr> <th>1</th> <td>1</td> <td>85</td> <td>66</td> <td>29</td> <td>0</td> <td>26.6</td> <td>0.351</td> <td>31</td> <td>0</td> </tr> <tr> <th>2</th> <td>8</td> <td>183</td> <td>64</td> <td>0</td> <td>0</td> <td>23.3</td> <td>0.672</td> <td>32</td> <td>1</td> </tr> <tr> <th>3</th> <td>1</td> <td>89</td> <td>66</td> <td>23</td> <td>94</td> <td>28.1</td> <td>0.167</td> <td>21</td> <td>0</td> </tr> <tr> <th>4</th> <td>0</td> <td>137</td> <td>40</td> <td>35</td> <td>168</td> <td>43.1</td> <td>2.288</td> <td>33</td> <td>1</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>763</th> <td>10</td> <td>101</td> <td>76</td> <td>48</td> <td>180</td> <td>32.9</td> <td>0.171</td> <td>63</td> <td>0</td> </tr> <tr> <th>764</th> <td>2</td> <td>122</td> <td>70</td> <td>27</td> <td>0</td> <td>36.8</td> <td>0.340</td> <td>27</td> <td>0</td> </tr> <tr> <th>765</th> <td>5</td> <td>121</td> <td>72</td> <td>23</td> <td>112</td> <td>26.2</td> <td>0.245</td> <td>30</td> <td>0</td> </tr> <tr> <th>766</th> <td>1</td> <td>126</td> <td>60</td> <td>0</td> <td>0</td> <td>30.1</td> <td>0.349</td> <td>47</td> <td>1</td> </tr> <tr> <th>767</th> <td>1</td> <td>93</td> <td>70</td> <td>31</td> <td>0</td> <td>30.4</td> <td>0.315</td> <td>23</td> <td>0</td> </tr> </tbody> </table> <p>768 rows × 9 columns</p> </div> Logisitc regression score using 10 folds: {1: array([[0.74193548, 0.72580645, 0.70967742, 0.70967742, 0.74193548, 0.72580645, 0.72580645, 0.72580645, 0.72580645, 0.72580645], [0.72580645, 0.67741935, 0.69354839, 0.72580645, 0.74193548, 0.75806452, 0.75806452, 0.75806452, 0.75806452, 0.75806452], [0.75806452, 0.74193548, 0.77419355, 0.79032258, 0.80645161, 0.80645161, 0.80645161, 0.80645161, 0.80645161, 0.80645161], [0.75806452, 0.74193548, 0.74193548, 0.74193548, 0.72580645, 0.74193548, 0.74193548, 0.74193548, 0.74193548, 0.74193548], [0.67213115, 0.68852459, 0.6557377 , 0.68852459, 0.68852459, 0.68852459, 0.68852459, 0.68852459, 0.68852459, 0.68852459], [0.67213115, 0.73770492, 0.75409836, 0.78688525, 0.78688525, 0.78688525, 0.78688525, 0.78688525, 0.78688525, 0.78688525], [0.81967213, 0.80327869, 0.78688525, 0.7704918 , 0.75409836, 0.75409836, 0.75409836, 0.75409836, 0.75409836, 0.75409836], [0.70491803, 0.70491803, 0.68852459, 0.68852459, 0.68852459, 0.67213115, 0.67213115, 0.67213115, 0.67213115, 0.67213115], [0.75409836, 0.78688525, 0.78688525, 0.78688525, 0.80327869, 0.80327869, 0.80327869, 0.80327869, 0.80327869, 0.80327869], [0.72131148, 0.73770492, 0.73770492, 0.73770492, 0.73770492, 0.73770492, 0.73770492, 0.73770492, 0.73770492, 0.73770492]])} -------------------------------------------------------------------------------- Training accuracy score: 0.754071661237785, Test accuracy score:0.7662337662337663 -------------------------------------------------------------------------------- precision recall f1-score support class 0 0.87 0.78 0.82 107 class 1 0.59 0.74 0.66 47 accuracy 0.77 154 macro avg 0.73 0.76 0.74 154 weighted avg 0.79 0.77 0.77 154 Dataframe of the results with the classes predicted, true and probability associated <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Probability class 0</th> <th>Probability class 1</th> <th>Predicted</th> <th>True</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0.028167</td> <td>0.971833</td> <td>1</td> <td>1</td> </tr> <tr> <th>1</th> <td>0.745021</td> <td>0.254979</td> <td>0</td> <td>0</td> </tr> <tr> <th>2</th> <td>0.842474</td> <td>0.157526</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>0.243233</td> <td>0.756767</td> <td>1</td> <td>1</td> </tr> <tr> <th>4</th> <td>0.806568</td> <td>0.193432</td> <td>0</td> <td>0</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>149</th> <td>0.708021</td> <td>0.291979</td> <td>0</td> <td>1</td> </tr> <tr> <th>150</th> <td>0.864217</td> <td>0.135783</td> <td>0</td> <td>0</td> </tr> <tr> <th>151</th> <td>0.233446</td> <td>0.766554</td> <td>1</td> <td>1</td> </tr> <tr> <th>152</th> <td>0.833437</td> <td>0.166563</td> <td>0</td> <td>0</td> </tr> <tr> <th>153</th> <td>0.738377</td> <td>0.261623</td> <td>0</td> <td>0</td> </tr> </tbody> </table> <p>154 rows × 4 columns</p> </div> Per vedere come funziona con più classi potete vedere questo __[esempio di scikit](https://scikit-learn.org/stable/auto_examples/linear_model/plot_logistic_multinomial.html#sphx-glr-auto-examples-linear-model-plot-logistic-multinomial-py)__ in cui potete anche vedere anche al differenza dei due tipi di algoritmi di classificazione OnevsRest and Multinomial per maggiori info andate __[qui](https://scikit-learn.org/stable/modules/multiclass.html?highlight=onevsrest)__. ## Polynomial Regression Fino ad ora abbiamo visto metodi lineari che riuscivano a classificare bene, ma in molti casi potrebbe capitare che il dataset presenta **non linearità, in tal caso dobbiamo mappare il dataset in un nuovo spazio in cui possiamo usare i metodi lineari**, scikit in tal caso fornisce come soluzione la trasformazione di dati attraverso una funzione polinomiale oppure un applicazione di una __[kernel ridge regression](https://scikit-learn.org/stable/modules/kernel_ridge.html)__ simile alla futura Support Vector Machine, ma con piccole differenze. Cerchiamo di creare un problema per cui sia necessario tale situazione e vediamo come applicare questi strumenti usando inoltre la __[pipeline scikit](https://scikit-learn.org/stable/modules/compose.html#combining-estimators)__. ```python from sklearn.preprocessing import PolynomialFeatures x = np.arange(20).reshape(10,2) print(f'Dati originali:\n{x}') poly = PolynomialFeatures(degree=2) print(f'Dati tansformati usando un polinomio di grado 2:\n {poly.fit_transform(x)}') poly = PolynomialFeatures(degree=2, interaction_only= True) print(f'Dati tansformati usando un polinomio di grado 2 contando solo i termini' f'misti, le identità e il termine di grado nullo:\n{poly.fit_transform(x)}') ``` Dati originali: [[ 0 1] [ 2 3] [ 4 5] [ 6 7] [ 8 9] [10 11] [12 13] [14 15] [16 17] [18 19]] Dati tansformati usando un polinomio di grado 2: [[ 1. 0. 1. 0. 0. 1.] [ 1. 2. 3. 4. 6. 9.] [ 1. 4. 5. 16. 20. 25.] [ 1. 6. 7. 36. 42. 49.] [ 1. 8. 9. 64. 72. 81.] [ 1. 10. 11. 100. 110. 121.] [ 1. 12. 13. 144. 156. 169.] [ 1. 14. 15. 196. 210. 225.] [ 1. 16. 17. 256. 272. 289.] [ 1. 18. 19. 324. 342. 361.]] Dati tansformati usando un polinomio di grado 2 contando solo i terminimisti, le identità e il termine di grado nullo: [[ 1. 0. 1. 0.] [ 1. 2. 3. 6.] [ 1. 4. 5. 20.] [ 1. 6. 7. 42.] [ 1. 8. 9. 72.] [ 1. 10. 11. 110.] [ 1. 12. 13. 156.] [ 1. 14. 15. 210.] [ 1. 16. 17. 272.] [ 1. 18. 19. 342.]] Possiamo quindi vedere come i dati originali nella forma $[x_0, x_1]$ sono stati trasformati nella forma $[1, x_0, x_1, x_{0}^2, x_{0}x_{1}, x_{1}^2]$ mentre con il caso `intercation_only = True` ritorna $[1, x_0, x_1, x_{0}x_{1}]$, vediamo ora come usarlo nel caso di un dataset non lineare. ```python from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline x = np.arange(100) y = 2 * x**3 + 3 * x**2 + 5 * x + 4 # generate points used to plot x_plot = np.linspace(0, 100, 100) # create matrix versions of these arrays X = x[:, np.newaxis] X_plot = x_plot[:, np.newaxis] colors = ['blue', 'green', 'red'] fig, axs = plt.subplots(figsize = (12,8)) plt.scatter(x, y, color='navy', s=30, marker='o', label="training points") #polynomial features for count, degree in enumerate([1, 2, 3]): model = make_pipeline(PolynomialFeatures(degree), LinearRegression()) model.fit(X, y) y_plot = model.predict(X_plot) plt.plot(x_plot, y_plot, color=colors[count], label="degree %d" % degree) plt.legend() plt.show() ``` Abbiamo quindi capito come applicare trasformazioni lineari, in futuro scopriremo che questo approcio è molto sconveniente dal punto di vista computazionale a causa della dimensione sempre maggiore da tenere in conto, per questo in futuro vedremo le __[SVM](https://scikit-learn.org/stable/modules/svm.html#svm)__ per ovviare a questi problemi. --- COMPLIMENTI AVETE FINITO IL NOTEBOOK 4 DI MACHINE LEARNING!
\section{Introduction} Self-organizing map \citep{Kohonen:1982} (SOM) is a vector quantization method that maps data onto a grid, usually two-dimensional and regular. After learning has converged, the codebook is self-organized such that the prototypes associated with two nearby nodes are similar. This is a direct consequence of the underlying topology of the map as well as the learning algorithm that, when presented with a new sample, modifies the code word of the best matching unit (BMU, the unit with the closest to the input code word) as well as the code word of units in its vicinity (neighborhood). SOMs have been used in a vast number of applications \citep{Kaski:1998,Oja:2003,Polla:2009} and today there exist several variants of the original algorithm \citep{Kohonen:2001}. However, according to the survey of \citep{Astudillo:2014}, only a few of these variants consider an alternative topology for the map, the regular Cartesian and the hexagonal grid being by far the most common used ones. Among the alternatives, the growing neural gas \citep{Fritzke:1994} is worth to be mentioned since it relies on a dynamic set of units and builds the topology {\em a posteriori} as it is also the case for the incremental grid growing neural network \citep{Blackmore:1995} and the controlled growth self organizing map \citep{Alahakoon:2000}. However, this {\em a posteriori} topology is built in the data space as opposed to the neural space. This means that the neighborhood property is lost and two neurons that are close to each other on the map may end with totally different prototypes in the data space. The impact of the network topology on the self-organization has also been studied by \citep{Jiang:2009} using the MNIST database. In the direct problem (evaluating influence of topology on performance), these authors consider SOMs whose neighborhood is defined by a regular, small world or random network and show a weak influence of the topology on the performance of the underlying model. In the inverse problem (searching for the best topology), authors try to optimize the topology of the network using evolutionary algorithms \citep{Eiben:2003} in order to minimize the classification error. Their results indicate a weak correlation between the topology and the performances in this specific case. However, \citep{Burguillo:2013} reported contradictory results to \citep{Eiben:2003}, when they studied the use of self-organizing map for time series predictions and considered different topologies (spatial, small-world, random and scale-free). They concluded that the classical spatial topology remains the best while the scale-free topology seems inadequate for the time series prediction task. But for the two others (random and small-world), the difference was not so large and topology does not seem to dramatically impact performance. In this work, we are interested in exploring an alternative topology in order to specifically handle cases where the intrinsic dimension of the data is higher than the dimension of the map. Most of the time, the topology of the SOM is one dimensional (linear network) or two dimensional (regular or hexagonal grid) and this may not correspond to the intrinsic dimension of the data, especially in the high dimensional case. This may result in the non-preservation of the topology \citep{Villmann:1999} with potentially multiple foldings of the map. The problem is even harder considering the data are unknown at the time of construction of the network. To overcome this topological constraint, we propose a variation of the self organizing map algorithm by considering the random placement of neurons on a two-dimensional manifold, following a blue noise distribution from which various topologies can be derived. These topologies possess random discontinuities that allow for a more flexible self-organization, especially with high-dimensional data. After introducing the methods, the model will be illustrated and analyzed using several classical examples and its properties will be more finely introduced. Finally, we'll explain how this model can be made resilient to neural gain or loss by reorganizing the neural sheet using the centroidal Voronoi tesselation. A constant issue with self-organizing maps is how can we measure the quality of a map. In SOM's literature, there is neither one measure to rule them all nor a single general recipe on how to measure the quality of the map. Some of the usual measures are the distortion \cite{rynkiewicz:2008}, the $\delta x - \delta y$ representation \citep{Demartines:1992}, and many other specialized measures for rectangular grids or specific types of SOMs \citep{Polani2002}. However, most of those measures cannot be used in this work since we do not use a standard grid for laying over the neural space, instead we use a randomly distributed graph (see supplementary material for standard measures). This and the fact that the neural space is discrete introduce a significant challenge on deciding what will be a good measure for our comparisons \citep{Polani2002} (i.e., to compare the neural spaces of RSOM and regular SOM with the input space). According to \citep{Polani2002}, the quality of the map's organization can be considered equivalent to topology preservation. Therefore, a topological tool such as the persistent homology can help in comparing the input space with the neural one. Topological Data Analysis (TDA) is a relatively new field of applied mathematics and offers a great deal of topological and geometrical tools to analyze point cloud data \citep{Carlsson:2009,HerculanoHouzel:2013}. Such TDA methods have been proposed in \citep{Polani2002}, however TDA wasn't that advanced and popular back then. Therefore, in this work we use the persistent homology and barcodes to analyze our results and compare the neural spaces generated by the SOM algorithms with the input spaces. We provide more details about TDA and persistent homology later in the corresponding section. To avoid confusion between the original SOM proposed by Teuvo Kohonen and the newly randomized SOM, we'll refer to the original as \textbf{SOM} and the newly randomized one as \textbf{RSOM}. % \gid{[``In the present review, we wish to point out that the structure introduced in that papers on the discrete space can be interpreted as a complex, a structure known from algebraic topology (Henle 1979). A complex can be seen as a generalization of the notion of a graph. However, no invocation of metric structures is required for its definition, thus the method and the measures derived from it can be regarded as a truly pure topological notions.'']} % Self-organizing map (SOM) \citep{Kohonen:1982} is a vector quantization method that maps high dimensional data on a low-dimensional grid (usually two-dimensional) through an unsupervised learning process. The low-dimensional discrete map, usually called codebook, consists of code words (vectors) that represent a part of the input space. Two neighboring code words represent similar input samples (prototypes). This is a direct consequence of the underlying topology of the map as well as the learning algorithm. When a new sample is given then the learning algorithm modifies the prototype of the best matching unit (BMU) as well as the units in its vicinity (neighborhood). SOM have been used in a variety of applications \citep{Kaski:1998,Oja:2003,Polla:2009} and several variations of the original algorithm have been proposed over time \citep{Polla:2009}. % However, most SOM algorithms assume a fixed neural space (\emph{i.e.}, the space defined by the nodes of the SOM network -- code words) topology, which usually is either a rectangular or a hexagonal Cartesian grid \citep{Astudillo:2014}. This sort of predefined topology of neural space enforces a rigidity on the neural map and this can lead to a \emph{dimension mismatch} between the input and neural space. This often results in neural representations that are ill-formed and do not cover properly the entire data space. For instance, if the topology of the SOM is one dimensional or two-dimensional (regular or hexagonal grid) and the intrinsic dimension of the data is higher than the topology may not be preserved \citep{Villmann:1999}, leading some times to multiple foldings of the map. One of the roots of this problem is the lack of knowledge of the underlying topology of the data space. % One way to overcome this limitation is to introduce dynamic set of units (neurons) that learn the topology \emph{a posteriori}. Such algorithms are the (i) growing neural gas \citep{Fritzke:1994}, (ii) the incremental grid growing neural network \citep{Blackmore:1995}, and the controlled growth map \citep{Alahakoon:2000}. Nonetheless, the topology in these cases, is built in the data space as opposed to the neural space. This means that the neighborhood property is lost and two nearby neurons on the map may end up with totally different prototypes in the data space. Consequently, these dynamic units do not really solve the problem of preserving the topology and the topological relations between neurons. One solution is to use an alternative topology that allows for more flexibility in the neural space without loosing performance. We therefore propose in this work a variation of the SOM algorithm by considering the random placement of neurons on a two-dimensional manifold, following a blue noise distribution from which one can derive various different topologies. These topologies possess random but controllable discontinuities that allow for a more flexible self-organization, especially with high-dimensional data. % We are not the first to explore alternative topologies for training a SOM and the impact of the network topology on self-organization has been studied before. For instance, \citep{Jiang:2009} consider SOMs whose neighborhood is defined by a regular, small world or random network trained on the MNIST data set, showing a weak influence of the topology on the performance of the SOM learning algorithm. Furthermore, they optimized the topology of the network using evolutionary algorithms~\citep{Eiben:2003} minimizing the classification error. In this case, their results indicate again a weak correlation between the topology and the performance of the SOM. Another study conducted by \citep{Burguillo:2013} found that the standard Cartesian grid topology was the best over non-conventional topologies (small world, random, and scale-free) for SOMs solving time series prediction problems. % This paper is organized as follows: first we introduce the necessary terminology and notation. Then we present the model and the learning algorithm as well as the tools to asses the performance of the proposed algorithm. After introducing the model, we conduct several experiments to test the performance of the algorithm and examine the final topology of the neural space. Finally, we tested the ability of the learning algorithm to cope with situations where reorganization of the neural space is necessary. More precisely, (i) we perform an ablation study by removing units from the neural space, and (ii) we add extra neurons on the map increasing the capacity of the neural space. In both cases, we show that the topology of the neural space can be preserved.
lemma degree_diff_less: "degree p < n \<Longrightarrow> degree q < n \<Longrightarrow> degree (p - q) < n" for p q :: "'a::ab_group_add poly"
import util.logic import util.category import util.meta.tactic.basic import util.meta.tactic.monotonicity run_cmd do mk_simp_attr `predicate, mk_simp_attr `lifted_fn namespace predicate universe variables u u' u₀ u₁ u₂ variables {α : Sort u₀} variables {β : Sort u₁} variables {γ : Sort u₂} variables {σ : Sort u'} structure var (α : Sort u₀) (β : Sort u₁) : Sort (max u₀ u₁+1) := (apply : α → β) attribute [pp_using_anonymous_constructor] var @[simp, predicate] def fun_app_to_var (f : α → β) : var σ α → var σ β | ⟨ g ⟩ := ⟨ f ∘ g ⟩ @[simp, predicate] def combine_var : var σ (α → β) → var σ α → var σ β | ⟨ f ⟩ ⟨ x ⟩ := ⟨ λ s, f s (x s) ⟩ @[reducible] def pred' (α : Sort u) : Type (max u 1) := var.{u 1} α Prop def pred'.mk := @var.mk notation x ` ⊨ `:53 y:52 := (var.apply y x) structure judgement (h y : pred' α) : Prop := (apply : ∀ σ, σ ⊨ h → σ ⊨ y) infix ` ⊢ `:53 := judgement def lifted₀ (p : β) : var α β := ⟨ λ _, p ⟩ def lifted₁ (op : β → γ) (p : var α β) : var α γ := ⟨ λ i, op (i ⊨ p) ⟩ def lifted₂ (op : α → β → γ) (p : var σ α) (q : var σ β) : var σ γ := ⟨ λ i, op (i ⊨ p) (i ⊨ q) ⟩ attribute [simp, predicate] lifted₀ lifted₁ lifted₂ attribute [predicate] var.apply var.mk pred'.mk -- def ew (p : pred' α) : Prop := -- ∀ i, i ⊨ p @[predicate] def False {α} : pred' α := lifted₀ false @[predicate] def True {α} : pred' α := lifted₀ true @[reducible] def holds (x : pred' α) := ∀ Γ, judgement Γ x prefix `⊩ `:53 := holds def p_or (p₀ p₁ : pred' α) : pred' α := lifted₂ or p₀ p₁ @[simp, predicate] lemma p_or_to_fun (p₀ p₁ : pred' α) (x : α) : x ⊨ p_or p₀ p₁ ↔ x ⊨ p₀ ∨ x ⊨ p₁ := by refl def p_and (p₀ p₁ : pred' α) : pred' α := lifted₂ and p₀ p₁ def p_impl (p₀ p₁ : pred' α) : pred' α := lifted₂ implies p₀ p₁ @[lifted_fn, reducible] def v_eq : var α β → var α β → pred' α := lifted₂ eq @[lifted_fn, reducible] def p_equiv : pred' α → pred' α → pred' α := v_eq def p_entails (p₀ p₁ : pred' α) : Prop := ⊩ p_impl p₀ p₁ def p_not (p : pred' α) : pred' α := lifted₁ not p def p_exists {β : Sort u'} {t : Sort u} (P : t → pred' β) : pred' β := ⟨λ x, ∃ y, x ⊨ P y⟩ def p_forall {t : Sort u} {β : Sort u'} (P : t → pred' β) : pred' β := ⟨ λ x, ∀ y, x ⊨ P y ⟩ notation `∃∃` binders `, ` r:(scoped P, p_exists P) := r notation `∀∀` binders `, ` r:(scoped P, p_forall P) := r infixl ` ⋁ `:65 := p_or infixl ` ⋀ `:70 := p_and infixr ` ⟶ `:60 := p_impl precedence ≡:55 infixr ` ≡ ` := p_equiv infix ` ⟹ `:60 := p_entails -- notation `⦃ `:max act ` ⦄`:0 := ew act -- Γ ⊢ p -- ∀ σ, σ ⊨ Γ → σ ⊨ p instance : has_neg (pred' α) := has_neg.mk p_not def ctx_impl (Γ p q : pred' α) : Prop := Γ ⊢ p ⟶ q instance var_functor {γ : Type _} : functor (var γ) := { map := λ α β f x, ⟨ λ y, f $ x.apply y ⟩ } instance var_has_seq {γ : Type _} : has_seq (var γ) := { seq := λ α β f x, ⟨ λ s, f.apply s (x.apply s) ⟩ } instance var_has_pure {γ : Type _} : has_pure (var γ) := { pure := λ α x, ⟨ λ _, x ⟩ } instance var_applicative {α : Type u} : applicative (var α) := { ..predicate.var_functor , ..predicate.var_has_seq , ..predicate.var_has_pure } instance var_has_bind {γ : Type _} : has_bind (var γ) := { bind := λ α x ⟨ m ⟩ f, ⟨ λ i, (f $ m i).apply i ⟩ } instance var_monad {γ : Type _} : monad (var γ) := { ..predicate.var_applicative , ..predicate.var_has_bind } @[lifted_fn, reducible] def v_lt {β : Type _} [has_lt β] : var α β → var α β → pred' α := lifted₂ (<) @[lifted_fn, reducible] def v_wf_r [has_well_founded β] : var α β → var α β → pred' α := lifted₂ has_well_founded.r @[lifted_fn, reducible] def v_le {β : Type _} [has_le β] : var α β → var α β → pred' α := lifted₂ (≤) @[lifted_fn, reducible] def v_mem {β : Type _} {γ : Type _} [has_mem β γ] : var α β → var α γ → pred' α := lifted₂ (∈) infix ` ≃ `:75 := v_eq infix ` ∊ `:75 := v_mem infix ` ≺ `:75 := v_lt infix ` ≼ `:75 := v_le infix ` ≺≺ `:75 := v_wf_r infix ` << `:50 := has_well_founded.r def var_seq : var σ (α → β) → var σ α → var σ β | ⟨ f ⟩ ⟨ x ⟩ := ⟨ λ i, f i (x i) ⟩ instance val_to_var_coe : has_coe β (var α β) := { coe := λ x, ⟨ λ _, x ⟩ } instance option_val_to_var_coe {β} : has_coe β (var α (option β)) := { coe := λ x, ↑(some x) } instance var_coe_to_fun : has_coe_to_fun (var σ $ α → β) := { F := λ _, var σ α → var σ β , coe := var_seq } def var_coe_to_fun₂ : has_coe_to_fun (var σ $ α → β → γ) := { F := λ _, var σ α → var σ β → var σ γ , coe := λ f x₀ x₁, f x₀ x₁ } def var_coe_to_fun₃ {α₀ α₁ α₂} : has_coe_to_fun (var σ $ α₀ → α₁ → α₂ → β) := { F := λ _, var σ α₀ → var σ α₁ → var σ α₂ → var σ β , coe := λ f x₀ x₁ x₂, f x₀ x₁ x₂ } def var_coe_to_fun₄ {α₀ α₁ α₂ α₃} : has_coe_to_fun (var σ $ α₀ → α₁ → α₂ → α₃ → β) := { F := λ _, var σ α₀ → var σ α₁ → var σ α₂ → var σ α₃ → var σ β , coe := λ f x₀ x₁ x₂ x₃, f x₀ x₁ x₂ x₃ } abbreviation val_to_var : β → var α β := coe notation `⟪ ` x ` ⟫` := (⟨ x ⟩ : var _ _) notation `⟪ ` t, x ` ⟫` := (@val_to_var t _ x) def proj : var β γ → var α β → var α γ | ⟨p⟩ ⟨f⟩ := ⟨p∘f⟩ infix ` ! `:90 := proj @[simp, predicate, reducible] def contramap (p : pred' α) (f : β → α) : pred' β := p ! ⟨ f ⟩ infixr ` '∘ `:90 := contramap def whole : var α α := ⟨ @id α ⟩ end predicate
[STATEMENT] lemma stutter_reduced_suffix: "\<natural> (suffix k (\<natural>\<sigma>)) = suffix k (\<natural>\<sigma>)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<natural>suffix k (\<natural>\<sigma>) = suffix k (\<natural>\<sigma>) [PROOF STEP] proof (rule stutter_free_reduced) [PROOF STATE] proof (state) goal (1 subgoal): 1. stutter_free (suffix k (\<natural>\<sigma>)) [PROOF STEP] have "stutter_free (\<natural>\<sigma>)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. stutter_free (\<natural>\<sigma>) [PROOF STEP] by (rule stutter_reduced_stutter_free) [PROOF STATE] proof (state) this: stutter_free (\<natural>\<sigma>) goal (1 subgoal): 1. stutter_free (suffix k (\<natural>\<sigma>)) [PROOF STEP] thus "stutter_free (suffix k (\<natural>\<sigma>))" [PROOF STATE] proof (prove) using this: stutter_free (\<natural>\<sigma>) goal (1 subgoal): 1. stutter_free (suffix k (\<natural>\<sigma>)) [PROOF STEP] by (rule stutter_free_suffix) [PROOF STATE] proof (state) this: stutter_free (suffix k (\<natural>\<sigma>)) goal: No subgoals! [PROOF STEP] qed
theorem great_Picard: assumes "open M" "z \<in> M" "a \<noteq> b" and holf: "f holomorphic_on (M - {z})" and fab: "\<And>w. w \<in> M - {z} \<Longrightarrow> f w \<noteq> a \<and> f w \<noteq> b" obtains l where "(f \<longlongrightarrow> l) (at z) \<or> ((inverse \<circ> f) \<longlongrightarrow> l) (at z)"
// Copyright 2015 Yu Jing<[email protected]> #include "rdsextr/parr_events.h" #include <cstdio> #include <cstring> #include <cmath> #include <ctime> // std::time #include <pthread.h> // thread #include <unistd.h> // usleep #include <string> #include <mutex> // std::lock_guard , mutex #include <queue> // queue #include <vector> #include <set> #include <map> #include <thread> //#include <boost/random/linear_congruential.hpp> //#include <boost/random/uniform_real.hpp> //#include <boost/random/variate_generator.hpp> #include "argcv/util/timer.h" #include "rdsextr/gnode.h" #include "rdsextr/util.h" #include "ThreadPool.h" #include "leveldb/db.h" #include "leveldb/cache.h" #include "leveldb/options.h" #include "leveldb/write_batch.h" #define VERBOSE_PATH 1 namespace rdsextr { using std::vector; using std::set; using std::map; using std::string; // exact results // for control group std::mutex path_debug_mtx; // currently no escape void complete_discov(size_t curr_node, set<size_t> escape_nodes, vector<size_t> path_nodes, int len, double cwp, RGrapgh * _G) { path_nodes.push_back(curr_node); //escape_nodes.insert(curr_node); if(len > 1) { GNode * _node = _G -> get_node(curr_node); int cnt_discov = 0; double sum_wp = 0.0; for(auto & id_weight : _node->weights) { sum_wp += id_weight.second; } sum_wp = sum_wp <= 0 ? 0.0001 : sum_wp; for(auto & id_weight : _node->weights) { size_t nxid = id_weight.first; //if(escape_nodes.find(nxid) == escape_nodes.end()) { complete_discov(nxid, escape_nodes, path_nodes, len - 1, cwp * id_weight.second / sum_wp, _G); cnt_discov++; //} } if(cnt_discov == 0) { // terminate discovery before T comes #if VERBOSE_PATH if(path_nodes.size() > 1) { path_debug_mtx.lock(); printf("[C:PATH:M] "); for(auto id: path_nodes) { printf("%lu -> ", id); } printf("\n"); path_debug_mtx.unlock(); } #endif _G->append_path(path_nodes, cwp); } } else { #if VERBOSE_PATH if(path_nodes.size() > 1) { path_debug_mtx.lock(); printf("[C:PATH:E] "); for(auto id: path_nodes) { printf("%lu -> ", id); } printf("\n"); path_debug_mtx.unlock(); } #endif _G->append_path(path_nodes, cwp); } } bool complete_path_generator_event_ctl(RGrapgh * _G , size_t nodeid_start, size_t nodeid_end) { set<size_t> escape_nodes; vector<size_t> path_nodes; for(size_t i = nodeid_start; i < nodeid_end ; i++ ){ complete_discov(i, escape_nodes, path_nodes, _G->T, 1, _G); } return true; } // currently no escape void random_discov(size_t curr_node, set<size_t> escape_nodes, vector<size_t> path_nodes, int len, RGrapgh * _G, int64_t threshold) { path_nodes.push_back(curr_node); //escape_nodes.insert(curr_node); if(len > 1) { GNode * _node = _G -> get_node(curr_node); int cnt_discov = 0; for(auto & id_weight : _node->weights) { size_t nxid = id_weight.first; //if(escape_nodes.find(nxid) == escape_nodes.end()) { if(rand() <= threshold) { random_discov(nxid, escape_nodes, path_nodes, len - 1, _G, threshold); cnt_discov++; } //} } if(cnt_discov == 0) { // terminate discovery before T comes #if VERBOSE_PATH if(path_nodes.size() > 1) { path_debug_mtx.lock(); printf("[R:PATH:M] "); for(auto id: path_nodes) { printf("%lu -> ", id); } printf("\n"); path_debug_mtx.unlock(); } #endif _G->append_path(path_nodes); } } else { #if VERBOSE_PATH if(path_nodes.size() > 1) { path_debug_mtx.lock(); printf("[R:PATH:E] "); for(auto id: path_nodes) { printf("%lu -> ", id); } printf("\n"); path_debug_mtx.unlock(); } #endif _G->append_path(path_nodes); } } bool random_path_generator_event_ctl(RGrapgh * _G , size_t nodeid_start, size_t nodeid_end) { set<size_t> escape_nodes; vector<size_t> path_nodes; int64_t threshold = RAND_MAX * _G->CTL_Q; for(size_t i = nodeid_start; i < nodeid_end ; i++ ){ random_discov(i, escape_nodes, path_nodes, _G->T, _G, threshold); } return true; } bool path_sim_calculator_event_ctl(RGrapgh * _G , size_t nodeid_start, size_t nodeid_end){ map<string,string> data_to_save; leveldb::DB* db = _G->db; for(size_t i = nodeid_start; i < nodeid_end ; i++ ){ map<size_t, double> nodes_with_weight_in_same_path; GNode * _current_node = _G->get_node(i); std::vector<size_t> * _current_path = &(_current_node->path); double sum_wp = 0.0; for(vector<size_t>::iterator iter = _current_path->begin(); iter != _current_path->end(); iter++) { sum_wp += (_G->get_path(*iter)->weight); } sum_wp = sum_wp >= 0.0 ? sum_wp : 0.0001; // key : node id , value : path id size for(vector<size_t>::iterator iter = _current_path->begin(); iter != _current_path->end(); iter++) { GPath * _p = _G->get_path(*iter); assert(_p != nullptr); vector<size_t> * _node_v = &(_p->node_v); double cwp = _p->weight / sum_wp; //nodeset.insert(_G->get_path(*iter)->node.begin(),_G->get_path(*iter)->node.end()); for(vector<size_t>::const_iterator it = _node_v->begin(); it != _node_v->end(); it++) { if(nodes_with_weight_in_same_path.find(*it) == nodes_with_weight_in_same_path.end()) nodes_with_weight_in_same_path[*it]=0; nodes_with_weight_in_same_path[*it] += cwp; //printf("%lu => %lu, weight: %f curr: %f\n", i, *it, cwp, nodes_with_weight_in_same_path[*it]); } } MinHeap<std::pair<size_t,double>> top_D_nodes(_G->D,rdsextr::pair_compare_by_value<size_t, double>); for (std::map<size_t,double>::iterator it=nodes_with_weight_in_same_path.begin(); it!=nodes_with_weight_in_same_path.end(); ++it) top_D_nodes.push(*it); std::pair<size_t,double> val; vector<std::pair<size_t,double>> top_node_vect; while(top_D_nodes.pop(val)) { top_node_vect.push_back(val); } vector_reverse(top_node_vect); std::string similar_path_list_key(_G->get_similar_path_filename()); similar_path_list_key += std::to_string(i); std::string similar_structure_list_key(_G->get_similar_structure_filename()); similar_structure_list_key += std::to_string(i); std::string similar_path_list_val(""); std::string similar_structure_list_val(""); if(top_node_vect.size() > 0 ) { //similar_path_list_val += std::to_string(top_node_vect[0].first) + ":"+ std::to_string((double)top_node_vect[0].second/_G->R); similar_path_list_val += std::to_string(top_node_vect[0].first) + ":"+ double2string((double)top_node_vect[0].second); similar_structure_list_val += std::to_string(top_node_vect[0].second); for(size_t ix = 1 ; ix < top_node_vect.size(); ix++ ) { similar_path_list_val += " "; //similar_path_list_val += std::to_string(top_node_vect[ix].first) + ":"+ std::to_string((double)top_node_vect[ix].second/_G->R); similar_path_list_val += std::to_string(top_node_vect[ix].first) + ":"+ double2string((double)top_node_vect[ix].second); similar_structure_list_val += " "; similar_structure_list_val += std::to_string(top_node_vect[ix].second); } for(size_t ix = top_node_vect.size() ; ix < _G->D; ix++ ) { similar_structure_list_val += " 0"; } } else { similar_structure_list_val += "0"; for(size_t ix = 1 ; ix < _G->D; ix++ ) { similar_structure_list_val += " 0"; } } // printf("~~~~~~~~%s\n", similar_path_list_val.c_str()); //ldb_set(db,similar_path_list_key,similar_path_list_val); //ldb_set(db,similar_structure_list_key,similar_structure_list_val); data_to_save.insert(std::make_pair(similar_path_list_key,similar_path_list_val)); data_to_save.insert(std::make_pair(similar_structure_list_key,similar_structure_list_val)); // printf("======%s\n", data_to_save[similar_path_list_key].c_str()); if(i % 100000 == 0 && data_to_save.size() > 0) { ldb_batch_add(db,data_to_save); data_to_save.clear(); } } if(data_to_save.size() > 0) { ldb_batch_add(db,data_to_save); } return true; } }
subroutine dkqg_tbqdk_v(p,msq) implicit none ************************************************************************ * Author: R.K. Ellis * * January, 2012. * * calculate the element squared and subtraction terms * * for the contribution of virtual corrections to the process * * * * [nwz=+1] * * q(-p1) +g(-p2)=nu(p3)+e+(p4)+b(p5)+bb(p6)+q'(p7) * * * * [nwz=-1] * * q(-p1) +g(-p2)=e-(p3)+v~(p4)+b~(p5)+b(p6)+q'(p7) * * * * Top is kept strictly on-shell although all spin correlations * * are retained. * * * * NOTE: this routine is a replacement for dkqg_tbqdk_v_old.f, * * including the effect of the b-quark mass. In the massless * * case it is approximately 10% faster than that routine * * * ************************************************************************ include 'constants.f' include 'ewcouple.f' include 'qcdcouple.f' include 'masses.f' include 'ckm.f' include 'nwz.f' integer j,k,hb,hc,ht,ha,h2 double precision msq(-nf:nf,-nf:nf),p(mxpart,4) double precision fac,msq_qg,msq_gq,msq_qbg,msq_gqb double complex prop double complex mtop(2,2),mtopv(2,2),manti(2,2),mantiv(2,2), & mqg(2,2,2),mgq(2,2,2),mqbg(2,2,2),mgqb(2,2,2), & mtotqg(2,2,2),mtotgq(2,2,2), & mtotqbg(2,2,2),mtotgqb(2,2,2), & mtotqgv(2,2,2),mtotgqv(2,2,2), & mtotqbgv(2,2,2),mtotgqbv(2,2,2) C----set all elements to zero msq(:,:)=0d0 if (nwz .eq. +1) then call singletoponshell(1,2,7,p,0,mqg) call singletoponshell(2,1,7,p,0,mgq) call singletoponshell(7,2,1,p,0,mqbg) call singletoponshell(7,1,2,p,0,mgqb) call tdecay(p,3,4,5,mtop) call tdecay_v(p,3,4,5,mtopv) else call singleatoponshell(1,2,7,p,0,mqg) call singleatoponshell(2,1,7,p,0,mgq) call singleatoponshell(7,2,1,p,0,mqbg) call singleatoponshell(7,1,2,p,0,mgqb) call adecay(p,3,4,5,manti) call adecay_v(p,3,4,5,mantiv) endif c--- q-g amplitudes do hb=1,2 do h2=1,2 do hc=1,2 mtotqg(hb,h2,hc)=czip mtotgq(hb,h2,hc)=czip mtotqbg(hb,h2,hc)=czip mtotgqb(hb,h2,hc)=czip mtotqgv(hb,h2,hc)=czip mtotgqv(hb,h2,hc)=czip mtotqbgv(hb,h2,hc)=czip mtotgqbv(hb,h2,hc)=czip if (nwz .eq. +1) then do ht=1,2 mtotqg(hb,h2,hc)=mtotqg(hb,h2,hc) & +mtop(hb,ht)*mqg(ht,h2,hc) mtotgq(hb,h2,hc)=mtotgq(hb,h2,hc) & +mtop(hb,ht)*mgq(ht,h2,hc) mtotqbg(hb,h2,hc)=mtotqbg(hb,h2,hc) & +mtop(hb,ht)*mqbg(ht,h2,hc) mtotgqb(hb,h2,hc)=mtotgqb(hb,h2,hc) & +mtop(hb,ht)*mgqb(ht,h2,hc) mtotqgv(hb,h2,hc)=mtotqgv(hb,h2,hc) & +mtopv(hb,ht)*mqg(ht,h2,hc) mtotgqv(hb,h2,hc)=mtotgqv(hb,h2,hc) & +mtopv(hb,ht)*mgq(ht,h2,hc) mtotqbgv(hb,h2,hc)=mtotqbgv(hb,h2,hc) & +mtopv(hb,ht)*mqbg(ht,h2,hc) mtotgqbv(hb,h2,hc)=mtotgqbv(hb,h2,hc) & +mtopv(hb,ht)*mgqb(ht,h2,hc) enddo else do ha=1,2 mtotqg(hb,h2,hc)=mtotqg(hb,h2,hc) & +mqg(hb,h2,ha)*manti(ha,hc) mtotgq(hb,h2,hc)=mtotgq(hb,h2,hc) & +mgq(hb,h2,ha)*manti(ha,hc) mtotqbg(hb,h2,hc)=mtotqbg(hb,h2,hc) & +mqbg(hb,h2,ha)*manti(ha,hc) mtotgqb(hb,h2,hc)=mtotgqb(hb,h2,hc) & +mgqb(hb,h2,ha)*manti(ha,hc) mtotqgv(hb,h2,hc)=mtotqgv(hb,h2,hc) & +mqg(hb,h2,ha)*mantiv(ha,hc) mtotgqv(hb,h2,hc)=mtotgqv(hb,h2,hc) & +mgq(hb,h2,ha)*mantiv(ha,hc) mtotqbgv(hb,h2,hc)=mtotqbgv(hb,h2,hc) & +mqbg(hb,h2,ha)*mantiv(ha,hc) mtotgqbv(hb,h2,hc)=mtotgqbv(hb,h2,hc) & +mgqb(hb,h2,ha)*mantiv(ha,hc) enddo endif enddo enddo enddo prop=dcmplx(zip,mt*twidth) fac=V*xn*gwsq**4*gsq/abs(prop)**2*ason2pi*CF c--- include factor for hadronic decays c if ((case .eq. 'tt_bbh') .or. (case .eq. 'tt_hdk')) fac=2d0*xn*fac msq_qg=0d0 msq_gq=0d0 msq_qbg=0d0 msq_gqb=0d0 do hb=1,2 do h2=1,2 do hc=1,2 msq_qg=msq_qg+fac*aveqg & *dble(dconjg(mtotqg(hb,h2,hc))*mtotqgv(hb,h2,hc)) msq_gq=msq_gq+fac*aveqg & *dble(dconjg(mtotgq(hb,h2,hc))*mtotgqv(hb,h2,hc)) msq_qbg=msq_qbg+fac*aveqg & *dble(dconjg(mtotqbg(hb,h2,hc))*mtotqbgv(hb,h2,hc)) msq_gqb=msq_gqb+fac*aveqg & *dble(dconjg(mtotgqb(hb,h2,hc))*mtotgqbv(hb,h2,hc)) enddo enddo enddo C---fill qb-q, gg and q-qb elements do j=-nf,nf do k=-nf,nf if ((j .gt. 0) .and. (k .eq. 0)) then msq(j,k)=Vsum(j)*msq_qg elseif ((j .lt. 0) .and. (k .eq. 0)) then msq(j,k)=Vsum(j)*msq_qbg elseif ((j .eq. 0) .and. (k .gt. 0)) then msq(j,k)=Vsum(k)*msq_gq elseif ((j .eq. 0) .and. (k .lt. 0)) then msq(j,k)=Vsum(k)*msq_gqb endif enddo enddo return end
# Week 4. Training Issues In this part, we will formally set up a simple but powerful classification network, to recogize 0-9 nubmers in MNIST dataset. Yep, we will build a classification network and train from scratch. We would introduce some techniques to improve your train model performance. This part is designed and completed by **Jiaxin Zhuang( [email protected] )** and Feifei Xue([email protected]), if you have some questions about this part and you think there are still some things to do, dont't hesitate to email us or add our wechat. # Outline 1. Outline 1. Required modules ( If you use your own computer, Just pip install it ! ) 2. Common Setup 2. classificatioon network 1. short introdution of MNIST 2. Define a Feedforward Neural network 3. Training 0. Including that define a model, loss function, metric, data-augmentation for training data 1. Pre-set hyper-parameters 2. Initialize model parameters 3. repeat over certain number of epochs 1. Shuffle whole training data 2. For each mini-batch data 1. load mini-batch data 2. compute gradient of loss over parameters 3. update parameters with gradient descent 4. save model 4. Training advanced 1. l2\_norm 2. dropout 3. batch_normalization 4. data augmentation 5. Visualizatio of training and validation phase 1. add tensorboardX to writer summary into tensorboard 2. download your file in local 3. run tensorboard in pc and open http://localhost:6666 to browse the tensorboard 6. Gradient 1. Gradient vanishing 2. Gradient exploding ```python %load_ext autoreload %autoreload 2 ``` # 1.1 Required Module [numpy](https://docs.scipy.org/doc/numpy-1.13.0/user/whatisnumpy.html): NumPy is the fundamental package for scientific computing in Python. [pytorch](https://pytorch.org/docs/stable/index.html): End-to-end deep learning platform. [torchvision](https://pytorch.org/docs/stable/torchvision/index.html): This package consists of popular datasets, model architectures, and common image transformations for computer vision. [tensorflow](https://www.tensorflow.org/guide): An open source machine learning framework. [tensorboard](https://www.tensorflow.org/guide/summaries_and_tensorboard): A suite of visualization tools to make training easier to understand, debug, and optimize TensorFlow programs. [tensorboardX](https://tensorboardx.readthedocs.io/en/latest/tensorboard.html): Tensorboard for Pytorch. [matplotlib](https://matplotlib.org/): It is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. # 1.2 Common Setup ```python # Load all necessary modules here, for clearness import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # from torchvision.datasets import MNIST import torchvision from torchvision import transforms from torch.optim import lr_scheduler from tensorboardX import SummaryWriter from collections import OrderedDict import matplotlib.pyplot as plt from tqdm import tqdm ``` ```python # Whether to put data in GPU according to GPU is available or not # cuda = torch.cuda.is_available() # In case the default gpu does not have enough space, you can choose which device to use # torch.cuda.set_device(device) # device: id # Since gpu in lab is not enough for your guys, we prefer to cpu computation cuda = torch.device('cpu') ``` # 2. Classfication Model Ww would define a simple Convolutional Neural Network to classify MNIST ## 2.1 Short indroduction of MNIST The MNIST database (Modified National Institute of Standards and Technology database) is a large database of handwritten digits that is commonly used for training various image processing systems. The MNIST database contains 60,000 training images and 10,000 testing images. Each class has 5000 traning images and 1000 test images. Each image is 28x28. And they look like images below. # 2.2 Define A FeedForward Neural Network We would define a FeedForward Neural Network with **3 hidden layers**. Each layer is followed a activation function, we would try **sigmoid** and **relu** respectively. For simplicity, each hidden layer has the equal neurons. In reality, however, we would apply different amount of neurons in different hidden layers. ## 2.2.1 Activation Function There are many useful activation function and you can choose one of them to use. Usually we use **relu** as our network function. #### 2.2.1.1 ReLU Applies the rectified linear unit function element-wise \begin{equation} ReLU(x) = max(0, x) \end{equation} #### 2.2.1.2 Sigmoid Applies the element-wise function: \begin{equation} Sigmoid(x)=\frac{1}{1+e^{-x}} \end{equation} ## 2.2.2 Network's Input and output Inputs: For every batch >\[batchSize, channels, height, width\] -> [B,C,H,W] Outputs: prediction scores of each images, eg. \[0.001, 0.0034 ..., 0.3\] > \[batchSize, classes\] Network Strutrue ``` Inputs Linear/Function Output [128, 1, 28, 28] -> Linear(28*28, 100) -> [128, 100] # first hidden lyaer -> ReLU -> [128, 100] # relu activation function, may sigmoid -> Linear(100, 100) -> [128, 100] # second hidden lyaer -> ReLU -> [128, 100] # relu activation function, may sigmoid -> Linear(100, 100) -> [128, 100] # third hidden lyaer -> ReLU -> [128, 100] # relu activation function, may sigmoid -> Linear(100, 10) -> [128, 10] # Classification Layer ``` ```python class FeedForwardNeuralNetwork(nn.Module): """ Inputs Linear/Function Output [128, 1, 28, 28] -> Linear(28*28, 100) -> [128, 100] # first hidden lyaer -> ReLU -> [128, 100] # relu activation function, may sigmoid -> Linear(100, 100) -> [128, 100] # second hidden lyaer -> ReLU -> [128, 100] # relu activation function, may sigmoid -> Linear(100, 100) -> [128, 100] # third hidden lyaer -> ReLU -> [128, 100] # relu activation function, may sigmoid -> Linear(100, 10) -> [128, 10] # Classification Layer """ def __init__(self, input_size, hidden_size, output_size, activation_function='RELU'): super(FeedForwardNeuralNetwork, self).__init__() self.use_dropout = False self.use_bn = False self.hidden1 = nn.Linear(input_size, hidden_size) # Linear function 1: 784 --> 100 self.hidden2 = nn.Linear(hidden_size, hidden_size) # Linear function 2: 100 --> 100 self.hidden3 = nn.Linear(hidden_size, hidden_size) # Linear function 3: 100 --> 100 # Linear function 4 (readout): 100 --> 10 self.classification_layer = nn.Linear(hidden_size, output_size) self.dropout = nn.Dropout(p=0.5) # Drop out with prob = 0.5 self.hidden1_bn = nn.BatchNorm1d(hidden_size) # Batch Normalization self.hidden2_bn = nn.BatchNorm1d(hidden_size) self.hidden3_bn = nn.BatchNorm1d(hidden_size) # Non-linearity if activation_function == 'SIGMOID': self.activation_function1 = nn.Sigmoid() self.activation_function2 = nn.Sigmoid() self.activation_function3 = nn.Sigmoid() elif activation_function == 'RELU': self.activation_function1 = nn.ReLU() self.activation_function2 = nn.ReLU() self.activation_function3 = nn.ReLU() def forward(self, x): """Defines the computation performed at every call. Should be overridden by all subclasses. Args: x: [batch_size, channel, height, width], input for network Returns: out: [batch_size, n_classes], output from network """ x = x.view(x.size(0), -1) # flatten x in [128, 784] out = self.hidden1(x) out = self.activation_function1(out) # Non-linearity 1 if self.use_bn == True: out = self.hidden1_bn(out) out = self.hidden2(out) out = self.activation_function2(out) if self.use_bn == True: out = self.hidden2_bn(out) out = self.hidden3(out) if self.use_bn == True: out = self.hidden3_bn(out) out = self.activation_function3(out) if self.use_dropout == True: out = self.dropout(out) out = self.classification_layer(out) return out def set_use_dropout(self, use_dropout): """Whether to use dropout. Auxiliary function for our exp, not necessary. Args: use_dropout: True, False """ self.use_dropout = use_dropout def set_use_bn(self, use_bn): """Whether to use batch normalization. Auxiliary function for our exp, not necessary. Args: use_bn: True, False """ self.use_bn = use_bn def get_grad(self): """Return average grad for hidden2, hidden3. Auxiliary function for our exp, not necessary. """ hidden2_average_grad = np.mean(np.sqrt(np.square(self.hidden2.weight.grad.detach().numpy()))) hidden3_average_grad = np.mean(np.sqrt(np.square(self.hidden3.weight.grad.detach().numpy()))) return hidden2_average_grad, hidden3_average_grad ``` # 3. Training We would define training function here. Additionally, hyper-parameters, loss function, metric would be included here too. ## 3.1 Pre-set hyper-parameters setting hyperparameters like below hyper paprameters include following part * learning rate: usually we start from a quite bigger lr like 1e-1, 1e-2, 1e-3, and slow lr as epoch moves. * n_epochs: training epoch must set large so model has enough time to converge. Usually, we will set a quite big epoch at the first training time. * batch_size: usually, bigger batch size mean's better usage of GPU and model would need less epoches to converge. And the exponent of 2 is used, eg. 2, 4, 8, 16, 32, 64, 128. 256. ```python ### Hyper parameters batch_size = 128 # batch size is 128 n_epochs = 5 # train for 5 epochs learning_rate = 0.01 # learning rate is 0.01 input_size = 28*28 # input image has size 28x28 hidden_size = 100 # hidden neurons is 100 for each layer output_size = 10 # classes of prediction l2_norm = 0 # not to use l2 penalty dropout = False # not to use get_grad = False # not to obtain grad ``` ```python # create a model object model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ## 3.2 Initialize model parameters Pytorch provide default initialization (**uniform intialization**) for linear layer. But there is still some useful intialization method. Read more about initialization from this [link](https://pytorch.org/docs/stable/_modules/torch/nn/init.html) ``` torch.nn.init.normal_ torch.nn.init.uniform_ torch.nn.init.constant_ torch.nn.init.eye_ torch.nn.init.xavier_uniform_ torch.nn.init.xavier_normal_ torch.nn.init.kaiming_uniform_ ``` ```python def show_weight_bias(model): """Show some weights and bias distribution every layers in model. !!YOU CAN READ THIS CODE LATER!! """ # Create a figure and a set of subplots fig, axs = plt.subplots(2,3, sharey=False, tight_layout=True) # weight and bias for every hidden layer h1_w = model.hidden1.weight.detach().numpy().flatten() h1_b = model.hidden1.bias.detach().numpy().flatten() h2_w = model.hidden2.weight.detach().numpy().flatten() h2_b = model.hidden2.bias.detach().numpy().flatten() h3_w = model.hidden3.weight.detach().numpy().flatten() h3_b = model.hidden3.bias.detach().numpy().flatten() axs[0,0].hist(h1_w) axs[0,1].hist(h2_w) axs[0,2].hist(h3_w) axs[1,0].hist(h1_b) axs[1,1].hist(h2_b) axs[1,2].hist(h3_b) # set title for every sub plots axs[0,0].set_title('hidden1_weight') axs[0,1].set_title('hidden2_weight') axs[0,2].set_title('hidden3_weight') axs[1,0].set_title('hidden1_bias') axs[1,1].set_title('hidden2_bias') axs[1,2].set_title('hidden3_bias') ``` ```python # Show default initialization for every hidden layer by pytorch # it's uniform distribution show_weight_bias(model) ``` ```python # If you want to use other intialization method, you can use code below # and define your initialization below def weight_bias_reset(model): """Custom initialization, you can use your favorable initialization method. """ for m in model.modules(): if isinstance(m, nn.Linear): # initialize linear layer with mean and std mean, std = 0, 0.1 # Initialization method torch.nn.init.normal_(m.weight, mean, std) torch.nn.init.normal_(m.bias, mean, std) # Another way to initialize # m.weight.data.normal_(mean, std) # m.bias.data.normal_(mean, std) ``` ```python weight_bias_reset(model) # reset parameters for each hidden layer show_weight_bias(model) # show weight and bias distribution, normal distribution now. ``` ## 作业1 使用 torch.nn.init.constant, torch.nn.init.xavier_uniform_, torch.nn.init_xavier_normal_去重写初始化函数,使用对应函数初始化模型,并且使用show_weight_bias显示模型隐藏层的参数分布。此处应该有6个cell作答。!!不必初始化bias!! ```python # TODO def weight_bias_reset_constant(model): """Constant initalization """ for m in model.modules(): if isinstance(m, nn.Linear): # remove pass and code here pass ``` ```python # TODO # Reset parameters and show their distribution ``` ```python # TODO def weight_bias_reset_xavier_uniform(model): """xaveir_uniform, gain=1 """ for m in model.modules(): if isinstance(m, nn.Linear): # remove pass and code here pass ``` ```python # TODO # Reset parameters and show their distribution ``` ```python # TODO def weight_bias_reset_kaiming_uniform(model): """kaiming_uniform, a=0,model='fan_in', non_linearity='relu' """ for m in model.modules(): if isinstance(m, nn.Linear): # remove pass and code here pass ``` ```python # TODO # Reset parameters and show their distribution ``` ## 3.3 Repeat over certain numbers of epoch * Shuffle whole training data ```shuffle train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs) ``` * For each mini-batch data * load mini-batch data ``` for batch_idx, (data, target) in enumerate(train_loader): \ ... ``` * compute gradient of loss over parameters ``` output = net(data) # make prediction loss = loss_fn(output, target) # compute loss loss.backward() # compute gradient of loss over parameters ``` * update parameters with gradient descent ``` optimzer.step() # update parameters with gradient descent ``` ### 3.3.1 Shuffle whole traning data #### 3.3.1.1 Data Loading Please pay attention to data augmentation. Read more data augmentation method from this [link](https://pytorch.org/docs/stable/torchvision/transforms.html). ``` torchvision.transforms.RandomVerticalFlip torchvision.transforms.RandomHorizontalFlip ... ``` ```python # define method of preprocessing data for evaluating train_transform = transforms.Compose([ transforms.ToTensor(), # Convert a PIL Image or numpy.ndarray to tensor. # Normalize a tensor image with mean 0.1307 and standard deviation 0.3081 transforms.Normalize((0.1307,), (0.3081,)) ]) test_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) ``` ```python # use MNIST provided by torchvision # torchvision.datasets provide MNIST dataset for classification train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=train_transform, download=True) test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=test_transform, download=False) ``` ```python # pay attention to this, train_dataset doesn't load any data # It just defined some method and store some message to preprocess data train_dataset ``` Dataset MNIST Number of datapoints: 60000 Split: train Root Location: ./data Transforms (if any): Compose( ToTensor() Normalize(mean=(0.1307,), std=(0.3081,)) ) Target Transforms (if any): None ```python # Data loader. # Combines a dataset and a sampler, # and provides single- or multi-process iterators over the dataset. train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) ``` ```python # functions to show an image def imshow(img): """show some imgs in datasets !!YOU CAN READ THIS CODE LATER!! """ npimg = img.numpy() # convert tensor to numpy plt.imshow(np.transpose(npimg, (1, 2, 0))) # [channel, height, width] -> [height, width, channel] plt.show() ``` ```python # get some random training images by batch dataiter = iter(train_loader) images, labels = dataiter.next() # get a batch of images # show images imshow(torchvision.utils.make_grid(images)) ``` ### 3.3.2 & 3.3.3 compute gradient of loss over parameters & update parameters with gradient descent ```python def train(train_loader, model, loss_fn, optimizer, get_grad=False): """train model using loss_fn and optimizer. When thid function is called, model trains for one epoch. Args: train_loader: train data model: prediction model loss_fn: loss function to judge the distance between target and outputs optimizer: optimize the loss function get_grad: True, False Returns: total_loss: loss average_grad2: average grad for hidden 2 in this epoch average_grad3: average grad for hidden 3 in this epoch """ # set the module in training model, affecting module e.g., Dropout, BatchNorm, etc. model.train() total_loss = 0 grad_2 = 0.0 # store sum(grad) for hidden 3 layer grad_3 = 0.0 # store sum(grad) for hidden 3 layer for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() # clear gradients of all optimized torch.Tensors' outputs = model(data) # make predictions loss = loss_fn(outputs, target) # compute loss total_loss += loss.item() # accumulate every batch loss in a epoch loss.backward() # compute gradient of loss over parameters if get_grad == True: g2, g3 = model.get_grad() # get grad for hiddern 2 and 3 layer in this batch grad_2 += g2 # accumulate grad for hidden 2 grad_3 += g3 # accumulate grad for hidden 2 optimizer.step() # update parameters with gradient descent average_loss = total_loss / batch_idx # average loss in this epoch average_grad2 = grad_2 / batch_idx # average grad for hidden 2 in this epoch average_grad3 = grad_3 / batch_idx # average grad for hidden 3 in this epoch return average_loss, average_grad2, average_grad3 ``` ```python def evaluate(loader, model, loss_fn): """test model's prediction performance on loader. When thid function is called, model is evaluated. Args: loader: data for evaluation model: prediction model loss_fn: loss function to judge the distance between target and outputs Returns: total_loss accuracy """ # context-manager that disabled gradient computation with torch.no_grad(): # set the module in evaluation mode model.eval() correct = 0.0 # account correct amount of data total_loss = 0 # account loss for batch_idx, (data, target) in enumerate(loader): outputs = model(data) # make predictions # return the maximum value of each row of the input tensor in the # given dimension dim, the second return vale is the index location # of each maxium value found(argmax) _, predicted = torch.max(outputs, 1) # Detach: Returns a new Tensor, detached from the current graph. #The result will never require gradient. correct += (predicted == target).sum().detach().numpy() loss = loss_fn(outputs, target) # compute loss total_loss += loss.item() # accumulate every batch loss in a epoch accuracy = correct*100.0 / len(loader.dataset) # accuracy in a epoch return total_loss, accuracy ``` Define function fit and use train_epoch and test_epoch ```python def fit(train_loader, val_loader, model, loss_fn, optimizer, n_epochs, get_grad=False): """train and val model here, we use train_epoch to train model and val_epoch to val model prediction performance Args: train_loader: train data val_loader: validation data model: prediction model loss_fn: loss function to judge the distance between target and outputs optimizer: optimize the loss function n_epochs: training epochs get_grad: Whether to get grad of hidden2 layer and hidden3 layer Returns: train_accs: accuracy of train n_epochs, a list train_losses: loss of n_epochs, a list """ grad_2 = [] # save grad for hidden 2 every epoch grad_3 = [] # save grad for hidden 3 every epoch train_accs = [] # save train accuracy every epoch train_losses = [] # save train loss every epoch for epoch in range(n_epochs): # train for n_epochs # train model on training datasets, optimize loss function and update model parameters train_loss, average_grad2, average_grad3 = train(train_loader, model, loss_fn, optimizer, get_grad) # evaluate model performance on train dataset _, train_accuracy = evaluate(train_loader, model, loss_fn) message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, \ n_epochs, train_loss, train_accuracy) print(message) # save loss, accuracy, grad train_accs.append(train_accuracy) train_losses.append(train_loss) grad_2.append(average_grad2) grad_3.append(average_grad3) # evaluate model performance on val dataset val_loss, val_accuracy = evaluate(val_loader, model, loss_fn) message = 'Epoch: {}/{}. Validation set: Average loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, \ n_epochs, val_loss, val_accuracy) print(message) # Whether to get grad for showing if get_grad == True: fig, ax = plt.subplots() # add a set of subplots to this figure ax.plot(grad_2, label='Gradient for Hidden 2 Layer') # plot grad 2 ax.plot(grad_3, label='Gradient for Hidden 3 Layer') # plot grad 3 plt.ylim(top=0.004) # place a legend on axes legend = ax.legend(loc='best', shadow=True, fontsize='x-large') return train_accs, train_losses ``` ```python def show_curve(ys, title): """plot curlve for Loss and Accuacy !!YOU CAN READ THIS LATER, if you are interested Args: ys: loss or acc list title: Loss or Accuracy """ x = np.array(range(len(ys))) y = np.array(ys) plt.plot(x, y, c='b') plt.axis() plt.title('{} Curve:'.format(title)) plt.xlabel('Epoch') plt.ylabel('{} Value'.format(title)) plt.show() ``` ## 作业 2 1. 运行一下fit函数,根据结束时候训练集的accuracy,回答:模型是否训练到过拟合。 2. 使用提供的show_curve函数,画出训练的时候loss和accuracy的变化 Hints: 因为jupyter对变量有上下文关系,模型,优化器需要重新声明。可以使用以下代码进行重新定义模型和优化器。注意到此处用的是默认初始化。 ```python ### Hyper parameters batch_size = 128 # batch size is 128 n_epochs = 5 # train for 5 epochs learning_rate = 0.01 # learning rate is 0.01 input_size = 28*28 # input image has size 28x28 hidden_size = 100 # hidden neurons is 100 for each layer output_size = 10 # classes of prediction l2_norm = 0 # not to use l2 penalty dropout = False # not to use get_grad = False # not to obtain grad # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python train_accs, train_losses = fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad) ``` ```python # TODO # show curve ``` ## 作业 3 1. 将n_epochs设为10,观察模型是否能在训练集上达到过拟合, 使用show_curve作图。 2. 当希望模型在5个epoch内在训练集上达到过拟合,可以通过适当调整learning rate来实现。选择一个合适的learing rate,训练模型,并且使用show_curve作图, 验证你的learning rate Hints: 因为jupyter对变量有上下文关系,模型,优化器需要重新声明。可以使用以下代码进行重新定义模型和优化器。注意到此处用的是默认初始化。 ```python ### Hyper parameters batch_size = 128 # batch size is 128 n_epochs = 5 # train for 5 epochs learning_rate = 0.01 # learning rate is 0.01 input_size = 28*28 # input image has size 28x28 hidden_size = 100 # hidden neurons is 100 for each layer output_size = 10 # classes of prediction l2_norm = 0 # not to use l2 penalty dropout = False # not to use get_grad = False # not to obtain grad # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python # TODO # 3.1 Train ``` ```python # TODO # 3.1 show_curve ``` ```python # TODO # 3.2 Train ``` ```python # TODO # 3.2 show_curve ``` ### 3.4 save model Pytorch provide two kinds of method to save model. We recommmend the method which only saves parameters. Because it's more feasible and dont' rely on fixed model. When saving parameters, we not only save **learnable parameters in model**, but also **learnable parameters in optimizer**. A common PyTorch convention is to save models using either a .pt or .pth file extension. Read more abount save load from this [link](https://pytorch.org/tutorials/beginner/saving_loading_models.html) ```python # show parameters in model # Print model's state_dict print("Model's state_dict:") for param_tensor in model.state_dict(): print(param_tensor, "\t", model.state_dict()[param_tensor].size()) # Print optimizer's state_dict print("\nOptimizer's state_dict:") for var_name in optimizer.state_dict(): print(var_name, "\t", optimizer.state_dict()[var_name]) ``` Model's state_dict: hidden1.weight torch.Size([100, 784]) hidden1.bias torch.Size([100]) hidden2.weight torch.Size([100, 100]) hidden2.bias torch.Size([100]) hidden3.weight torch.Size([100, 100]) hidden3.bias torch.Size([100]) classification_layer.weight torch.Size([10, 100]) classification_layer.bias torch.Size([10]) hidden1_bn.weight torch.Size([100]) hidden1_bn.bias torch.Size([100]) hidden1_bn.running_mean torch.Size([100]) hidden1_bn.running_var torch.Size([100]) hidden1_bn.num_batches_tracked torch.Size([]) hidden2_bn.weight torch.Size([100]) hidden2_bn.bias torch.Size([100]) hidden2_bn.running_mean torch.Size([100]) hidden2_bn.running_var torch.Size([100]) hidden2_bn.num_batches_tracked torch.Size([]) hidden3_bn.weight torch.Size([100]) hidden3_bn.bias torch.Size([100]) hidden3_bn.running_mean torch.Size([100]) hidden3_bn.running_var torch.Size([100]) hidden3_bn.num_batches_tracked torch.Size([]) Optimizer's state_dict: state {} param_groups [{'lr': 0.01, 'momentum': 0, 'dampening': 0, 'weight_decay': 0, 'nesterov': False, 'params': [140033366951904, 140033366951544, 140033366952768, 140033366953560, 140033366952552, 140033366952480, 140033304926824, 140033304924736, 140033304925384, 140033304926968, 140033304926248, 140033304926176, 140033304926680, 140033304927760]}] ```python # save model save_path = './model.pt' torch.save(model.state_dict(), save_path) ``` ```python # load parameters from files saved_parametes = torch.load(save_path) print(saved_parametes) ``` OrderedDict([('hidden1.weight', tensor([[-0.0056, 0.0096, 0.0256, ..., 0.0314, -0.0123, -0.0245], [ 0.0174, -0.0271, 0.0291, ..., 0.0249, 0.0207, 0.0309], [-0.0281, -0.0064, -0.0113, ..., 0.0044, -0.0126, 0.0320], ..., [ 0.0276, -0.0181, -0.0149, ..., -0.0175, -0.0306, -0.0275], [-0.0207, 0.0350, -0.0084, ..., 0.0275, -0.0060, -0.0181], [-0.0036, 0.0163, -0.0011, ..., -0.0013, -0.0349, -0.0043]])), ('hidden1.bias', tensor([-1.1949e-02, 3.9362e-03, 9.4911e-03, 8.6605e-03, -1.0223e-02, -2.2284e-02, -3.4768e-02, 2.3229e-02, -8.8324e-03, 2.2501e-02, -3.2246e-02, 5.1050e-03, 1.0391e-02, 2.7352e-02, 1.1459e-02, -1.1725e-02, -3.0930e-02, 2.0169e-02, 2.9972e-03, 2.5611e-02, 3.2411e-02, -3.5713e-02, -3.0771e-02, -1.5290e-02, -2.1979e-03, 1.5742e-02, 9.2924e-03, 1.2081e-02, 5.5597e-03, 3.0818e-02, -1.2302e-02, 2.2055e-02, -4.0043e-04, -1.4790e-02, 1.0790e-02, 1.1346e-02, 3.2512e-03, -1.1293e-02, 3.2924e-02, -1.5291e-02, -7.9001e-03, -2.3216e-02, -6.0322e-03, 1.5571e-02, -3.5401e-02, 4.2528e-03, 3.1193e-02, -1.0147e-02, -2.0139e-03, -1.2018e-02, -3.2551e-03, -2.2296e-02, -1.8701e-02, -1.1076e-02, 1.3640e-02, 2.1068e-02, -3.2070e-02, -1.6288e-02, 3.0494e-02, 1.0417e-03, 2.6660e-02, -2.6842e-02, 9.0875e-03, 1.0330e-02, 1.3220e-02, -1.4060e-02, 3.5329e-02, -1.6400e-02, 2.7394e-02, 1.0365e-02, 2.1799e-02, -9.8232e-03, 2.3748e-02, -2.1644e-03, -2.2229e-02, -1.9397e-02, -1.8717e-02, -6.9130e-05, -1.3481e-02, -2.3714e-02, 2.1718e-02, 2.9248e-03, -8.3387e-04, -3.1779e-02, -1.5879e-02, -3.0405e-02, -2.1847e-02, 1.0549e-02, 2.5400e-02, 1.2406e-02, -2.1816e-02, -2.9822e-02, 1.3116e-02, -1.1967e-02, 2.8561e-02, -2.1895e-02, -2.1483e-02, 2.4633e-02, 3.4562e-02, -3.3654e-02])), ('hidden2.weight', tensor([[ 0.0649, -0.0519, 0.0146, ..., -0.0631, 0.0116, -0.0864], [-0.0470, -0.0516, -0.0868, ..., -0.0634, -0.0820, 0.0274], [ 0.0007, -0.0025, -0.0272, ..., 0.0437, -0.0126, -0.0186], ..., [ 0.0482, 0.0474, -0.0640, ..., 0.0428, 0.0875, 0.0842], [-0.0976, -0.0642, -0.0021, ..., -0.0706, 0.0800, 0.0147], [ 0.0588, -0.0735, 0.0110, ..., 0.0324, -0.0083, -0.0898]])), ('hidden2.bias', tensor([-0.0144, -0.0361, -0.0283, -0.0489, 0.0079, 0.0916, -0.0738, 0.0614, -0.0209, 0.0889, -0.0243, 0.0847, 0.0437, 0.0450, 0.0265, -0.0497, 0.0850, 0.0315, 0.0431, -0.0312, 0.0113, -0.0903, -0.0952, -0.0910, -0.0679, 0.0321, 0.0612, -0.0019, -0.0444, 0.0302, 0.0355, 0.0317, 0.0654, 0.0464, 0.0652, 0.0465, -0.0680, 0.0164, -0.0721, -0.0306, 0.0007, -0.0142, 0.0183, 0.0354, -0.0466, -0.0208, -0.0910, -0.0069, 0.0673, -0.0196, 0.0389, -0.0908, -0.0654, -0.0812, 0.0753, -0.0985, 0.0061, 0.0300, 0.0763, 0.0513, -0.0316, 0.0259, -0.0836, -0.0454, -0.0266, -0.0053, -0.0438, 0.0279, -0.0181, -0.0499, 0.0293, -0.0053, -0.0637, 0.0553, -0.0622, -0.0756, 0.0062, 0.0331, -0.0716, -0.0437, 0.0918, -0.0075, -0.0988, -0.0708, 0.0840, -0.0479, -0.0055, -0.0654, -0.0481, 0.0077, 0.0885, -0.0813, 0.0214, -0.0405, 0.0435, 0.0887, 0.0957, -0.0758, -0.0664, -0.0945])), ('hidden3.weight', tensor([[-0.0654, -0.0952, -0.0538, ..., -0.0442, 0.0721, -0.0422], [ 0.0928, -0.0530, 0.0196, ..., 0.0611, 0.0505, 0.0238], [ 0.0390, 0.0025, 0.0027, ..., 0.0778, 0.0148, -0.0749], ..., [ 0.0738, -0.0315, -0.0227, ..., -0.0263, -0.0458, -0.0501], [ 0.0056, 0.0554, -0.0756, ..., -0.0179, 0.0821, 0.0949], [ 0.0437, -0.0884, -0.0438, ..., 0.0247, -0.0304, 0.0375]])), ('hidden3.bias', tensor([-0.0969, -0.0380, -0.0387, -0.0677, 0.0018, -0.0203, 0.0170, 0.0932, 0.0049, -0.0671, -0.0996, 0.0424, 0.0831, 0.0844, -0.0162, -0.0407, -0.0583, -0.0721, 0.0526, 0.0030, -0.0787, -0.0264, -0.0056, -0.0624, 0.0447, -0.0642, 0.0267, 0.0743, -0.0642, 0.0662, 0.0309, -0.0805, 0.0945, -0.0376, 0.0609, 0.0771, 0.0777, 0.0649, -0.0735, -0.0407, 0.0939, 0.0473, -0.0336, -0.0521, 0.0866, -0.0966, 0.0594, -0.0247, 0.0195, -0.0466, -0.0815, 0.0717, -0.0017, -0.0482, -0.0636, -0.0906, -0.0512, -0.0676, 0.0662, -0.0913, -0.0707, 0.0690, 0.0346, 0.0164, 0.0992, -0.0535, -0.0031, 0.0451, 0.0032, -0.0806, -0.0416, -0.0028, -0.0206, -0.0118, -0.0101, 0.0054, 0.0747, -0.0436, 0.0233, 0.0769, 0.0009, 0.0481, -0.0146, -0.0732, -0.0245, -0.0059, -0.0221, 0.0369, 0.0181, -0.0196, -0.0123, 0.0310, 0.0489, -0.0764, 0.0204, 0.0968, 0.0471, 0.0450, -0.0374, 0.0066])), ('classification_layer.weight', tensor([[-0.0947, -0.0296, 0.0547, 0.0275, 0.0324, -0.0106, 0.0525, 0.0910, -0.0291, -0.0878, -0.0730, -0.0210, -0.0944, 0.0910, -0.0143, 0.0826, 0.0060, -0.0581, -0.0078, 0.0852, 0.0970, 0.0238, -0.0738, -0.0322, -0.0762, -0.0103, -0.0805, 0.0280, 0.0066, 0.0583, 0.0112, 0.0859, -0.0311, 0.0254, 0.0421, 0.0716, 0.0604, -0.0575, 0.0196, 0.0720, -0.0653, -0.0766, -0.0840, 0.0042, 0.0919, 0.0165, 0.0524, 0.0608, 0.0584, 0.0879, 0.0500, 0.0138, -0.0440, -0.0189, 0.0981, 0.0064, -0.0861, -0.0770, 0.0372, -0.0983, -0.0405, 0.0369, 0.0041, -0.0889, -0.0664, 0.0457, -0.0861, -0.0901, -0.0905, 0.0317, 0.0856, -0.0376, 0.0467, -0.0812, -0.0667, 0.0240, -0.0752, -0.0958, 0.0100, 0.0337, 0.0060, -0.0181, 0.0887, 0.0060, -0.0835, 0.0054, 0.0644, -0.0451, 0.0974, -0.0632, -0.0172, 0.0953, -0.0782, 0.0881, 0.0041, -0.0394, -0.0921, -0.0176, 0.0322, -0.0305], [ 0.0930, 0.0268, -0.0289, -0.0409, 0.0739, -0.0384, 0.0306, -0.0782, 0.0095, -0.0651, -0.0745, 0.0054, 0.0929, -0.0283, -0.0974, -0.0762, 0.0683, -0.0871, 0.0824, 0.0384, -0.0625, -0.0946, -0.0960, -0.0205, 0.0057, 0.0937, -0.0340, 0.0576, -0.0128, -0.0115, -0.0686, -0.0532, 0.0421, 0.0452, 0.0994, 0.0287, -0.0260, 0.0369, -0.0862, -0.0185, 0.0464, -0.0456, -0.0083, -0.0605, 0.0149, 0.0577, 0.0180, 0.0061, -0.0762, 0.0285, 0.0883, -0.0095, 0.0019, 0.0131, -0.0955, 0.0169, -0.0065, -0.0789, 0.0333, -0.0681, -0.0992, -0.0118, 0.0704, -0.0332, 0.0602, -0.0441, -0.0574, 0.0546, -0.0472, 0.0266, 0.0823, 0.0734, 0.0832, 0.0558, -0.0361, 0.0113, -0.0030, -0.0814, 0.0805, -0.0716, 0.0263, 0.0872, 0.0516, -0.0693, -0.0538, 0.0151, 0.0556, -0.0448, -0.0030, -0.0627, 0.0145, 0.0602, -0.0139, -0.0631, 0.0139, 0.0819, 0.0417, -0.0734, 0.0735, 0.0062], [ 0.0963, 0.0663, -0.0192, 0.0440, 0.0083, -0.0632, 0.0942, 0.0317, 0.0396, -0.0195, -0.0632, 0.0785, -0.0902, 0.0560, -0.0099, -0.0173, 0.0399, -0.0048, 0.0264, -0.0266, -0.0064, 0.0673, -0.0921, 0.0538, 0.0897, -0.0720, -0.0081, 0.0813, -0.0003, -0.0451, 0.0355, -0.0125, -0.0757, 0.0775, -0.0061, -0.0008, 0.0246, -0.0940, -0.0759, -0.0841, 0.0783, -0.0949, -0.0270, -0.0531, -0.0888, -0.0781, -0.0684, -0.0798, -0.0503, 0.0236, -0.0876, 0.0238, -0.0450, 0.0714, 0.0677, -0.0623, 0.0522, 0.0473, 0.0416, -0.0206, -0.0477, 0.0693, -0.0964, -0.0330, 0.0869, -0.0205, 0.0734, 0.0544, -0.0125, -0.0289, -0.0420, -0.0270, 0.0475, 0.0537, -0.0822, 0.0700, -0.0392, -0.0808, -0.0178, 0.0813, -0.0964, -0.0211, 0.0681, -0.0793, 0.0596, -0.0997, -0.0780, -0.0490, -0.0997, 0.0741, -0.0557, 0.0320, 0.0609, 0.0353, -0.0437, -0.0104, -0.0174, -0.0215, -0.0454, -0.0495], [ 0.0972, 0.0933, -0.0555, 0.0119, 0.0512, -0.0152, 0.0041, -0.0527, 0.0571, 0.0185, -0.0364, -0.0396, 0.0417, 0.0214, 0.0657, -0.0947, -0.0724, -0.0099, 0.0211, 0.0290, -0.0964, -0.0568, 0.0499, 0.0351, 0.0566, 0.0678, -0.0521, 0.0628, -0.0615, 0.0270, -0.0987, 0.0372, -0.0808, 0.0183, -0.0261, -0.0561, 0.0623, 0.0503, -0.0188, 0.0567, 0.0961, -0.0032, -0.0293, 0.0711, 0.0457, 0.0987, -0.0714, 0.0281, -0.0077, -0.0596, 0.0835, 0.0241, -0.0054, -0.0814, -0.0568, 0.0041, -0.0203, -0.0093, 0.0208, -0.0836, 0.0844, -0.0945, -0.0677, -0.0165, 0.0285, -0.0291, -0.0725, -0.0683, -0.0475, -0.0608, 0.0062, -0.0227, 0.0714, 0.0923, -0.0394, 0.0032, 0.0015, 0.0500, 0.0351, -0.0502, 0.0644, 0.0267, -0.0621, 0.0472, -0.0687, 0.0452, 0.0962, -0.0710, -0.0503, 0.0205, -0.0692, 0.0776, 0.0231, -0.0033, -0.0520, 0.0563, 0.0535, 0.0969, -0.0309, 0.0308], [ 0.0101, -0.0181, 0.0973, 0.0282, -0.0560, 0.0585, 0.0920, -0.0356, -0.0247, 0.0798, 0.0433, 0.0718, -0.0035, -0.0760, -0.0725, -0.0992, 0.0444, 0.0406, -0.0544, -0.0056, -0.0208, -0.0954, -0.0584, 0.0960, -0.0243, 0.0413, 0.0973, 0.0087, -0.0568, -0.0886, -0.0296, 0.0127, 0.0702, 0.0040, 0.0784, -0.0509, 0.0680, -0.0258, 0.0314, -0.0849, 0.0221, -0.0584, 0.0745, 0.0608, 0.0695, -0.0266, 0.0051, -0.0599, -0.0639, 0.0307, 0.0472, 0.0483, 0.0774, 0.0952, -0.0295, -0.0014, -0.0700, -0.0099, -0.0991, -0.0664, 0.0657, 0.0247, 0.0876, -0.0917, 0.0666, 0.0468, 0.0920, 0.0977, -0.0539, 0.0721, -0.0511, -0.0506, -0.0292, 0.0928, -0.0901, -0.0014, 0.0300, -0.0257, 0.0685, -0.0039, -0.0398, 0.0622, 0.0579, 0.0440, 0.0849, -0.0172, -0.0149, -0.0162, -0.0498, -0.0463, -0.0621, 0.0949, 0.0223, 0.0283, -0.0764, 0.0305, 0.0139, 0.0762, -0.0097, 0.0347], [-0.0219, -0.0119, 0.0605, 0.0616, 0.0125, -0.0453, 0.0712, 0.0317, -0.0263, -0.0428, 0.0146, 0.0547, 0.0151, 0.0450, 0.0955, -0.0983, 0.0091, -0.0964, -0.0668, -0.0438, -0.0047, -0.0014, 0.0347, 0.0121, 0.0987, -0.0331, -0.0830, -0.0518, -0.0840, 0.0232, -0.0939, -0.0273, 0.0952, -0.0281, -0.0494, 0.0023, 0.0366, -0.0445, -0.0314, -0.0334, -0.0908, -0.0058, 0.0893, 0.0006, -0.0914, 0.0177, 0.0429, -0.0670, -0.0658, 0.0661, -0.0795, 0.0095, 0.0757, -0.0478, 0.0707, 0.0782, -0.0895, -0.0065, 0.0791, -0.0637, -0.0514, 0.0035, 0.0113, 0.0223, 0.0574, 0.0588, 0.0242, -0.0692, 0.0460, -0.0265, -0.0401, 0.0061, -0.0761, 0.0507, 0.0076, -0.0223, -0.0848, 0.0713, 0.0012, 0.0335, 0.0469, -0.0862, -0.0344, -0.0462, 0.0405, 0.0003, 0.0400, 0.0509, -0.0958, 0.0309, 0.0454, 0.0955, 0.0994, 0.0263, -0.0561, 0.0608, 0.0262, -0.0942, 0.0964, -0.0474], [-0.0994, 0.0172, 0.0555, 0.0566, 0.0710, -0.0119, -0.0140, 0.0583, 0.0053, -0.0094, 0.0705, -0.0376, 0.0641, -0.0147, 0.0286, -0.0051, -0.0320, -0.0493, -0.0675, 0.0327, -0.0137, -0.0216, -0.0328, 0.0781, 0.0053, -0.0028, 0.0395, 0.0025, -0.0167, -0.0576, 0.0933, 0.0491, -0.0698, -0.0958, 0.0096, 0.0810, 0.0474, -0.0046, -0.0615, 0.0218, -0.0019, -0.0972, -0.0919, -0.0783, -0.0556, -0.0606, -0.0150, 0.0777, 0.0423, -0.0608, -0.0989, -0.0941, 0.0247, 0.0268, -0.0916, 0.0792, -0.0930, 0.0261, -0.0354, -0.0578, 0.0612, 0.0838, -0.0704, 0.0441, 0.0019, -0.0633, -0.0302, 0.0370, 0.0491, -0.0618, 0.0146, -0.0403, -0.0127, 0.0306, 0.0747, -0.0165, -0.0634, -0.0502, -0.0665, 0.0318, 0.0460, -0.0738, 0.0343, -0.0614, 0.0634, 0.0869, -0.0585, 0.0068, 0.0165, -0.0526, 0.0471, 0.0023, 0.0282, -0.0710, -0.0987, -0.0873, -0.0237, -0.0286, 0.0169, -0.0311], [-0.0465, 0.0555, 0.0380, -0.0614, -0.0425, -0.0710, 0.0767, -0.0357, -0.0903, -0.0528, 0.0200, -0.0697, -0.0016, -0.0994, 0.0088, -0.0180, 0.0790, 0.0566, 0.0725, -0.0158, 0.0996, -0.0162, -0.0947, 0.0624, -0.0252, -0.0529, 0.0243, 0.0683, -0.0833, 0.0586, -0.0722, 0.0215, 0.0829, 0.0655, -0.0293, -0.0516, -0.0144, -0.0061, 0.0082, -0.0438, -0.0699, -0.0466, -0.0149, -0.0204, 0.0088, 0.0276, -0.0269, 0.0544, -0.0782, 0.0921, -0.0512, -0.0765, -0.0907, -0.0571, -0.0464, -0.0224, 0.0298, -0.0861, -0.0933, 0.0484, 0.0101, -0.0977, -0.0766, 0.0294, 0.0847, -0.0305, 0.0364, 0.0886, 0.0610, -0.0966, -0.0012, 0.0750, 0.0272, 0.0226, 0.0497, -0.0781, 0.0828, -0.0595, 0.0631, -0.0456, 0.0118, -0.0900, -0.0539, -0.0114, -0.0666, 0.0370, -0.0017, 0.0599, 0.0157, 0.0314, -0.0895, 0.0141, -0.0848, 0.0176, 0.0045, -0.0730, 0.0453, 0.0096, -0.0119, -0.0657], [-0.0080, -0.0633, -0.0267, -0.0506, 0.0475, -0.0934, -0.0716, -0.0372, -0.0666, 0.0093, -0.0563, -0.0398, -0.0012, 0.0570, 0.0689, -0.0458, 0.0294, 0.0301, -0.0182, -0.0112, -0.0118, -0.0164, -0.0055, 0.0959, -0.0294, 0.0959, 0.0219, -0.0104, 0.0294, 0.0729, 0.0102, -0.0696, 0.0923, -0.0276, -0.0717, -0.0677, 0.0771, -0.0384, 0.0373, 0.0630, -0.0980, 0.0888, 0.0050, 0.0391, -0.0481, -0.0475, -0.0968, -0.0959, -0.0041, 0.0891, -0.0441, -0.0018, -0.0381, 0.0839, 0.0094, -0.0007, 0.0087, -0.0944, 0.0427, -0.0027, 0.0400, 0.0887, 0.0932, -0.0718, -0.0310, 0.0406, -0.0443, 0.0092, -0.0731, 0.0990, -0.0593, -0.0779, -0.0131, -0.0957, 0.0309, 0.0120, 0.0635, -0.0156, 0.0138, -0.0623, 0.0374, -0.0853, -0.0190, -0.0553, 0.0446, -0.0207, 0.0467, 0.0951, -0.0805, -0.0974, 0.0105, 0.0428, 0.0192, 0.0235, -0.0746, 0.0212, -0.0675, 0.0490, -0.0324, -0.0965], [-0.0278, 0.0832, 0.0290, 0.0513, 0.0037, 0.0654, 0.0255, 0.0501, 0.0464, -0.0271, 0.0028, 0.0485, -0.0307, -0.0568, 0.0459, 0.0288, 0.0446, -0.0991, -0.0120, -0.0194, -0.0199, 0.0507, 0.0141, 0.0769, 0.0406, 0.0892, 0.0965, -0.0600, -0.0763, -0.0007, 0.0395, 0.0648, 0.0315, -0.0756, 0.0224, 0.0840, 0.0128, -0.0479, -0.0298, 0.0469, -0.0394, -0.0007, -0.0811, -0.0001, 0.0171, -0.0917, -0.0712, 0.0015, -0.0722, -0.0999, -0.0026, -0.0763, 0.0237, -0.0425, -0.0388, 0.0907, 0.0652, 0.0520, -0.0358, 0.0128, -0.0782, -0.0524, 0.0241, 0.0131, -0.0822, -0.0841, -0.0869, 0.0376, 0.0882, -0.0602, 0.0032, 0.0956, -0.0062, 0.0242, 0.0762, 0.0259, 0.0156, 0.0999, 0.0397, 0.0613, 0.0085, 0.0415, 0.0318, -0.0359, 0.0814, -0.0048, 0.0883, 0.0148, -0.0377, -0.0708, -0.0435, -0.0307, 0.0248, 0.0799, -0.0005, 0.0474, 0.0618, 0.0964, 0.0108, 0.0173]])), ('classification_layer.bias', tensor([ 0.0938, -0.0915, 0.0312, 0.0080, 0.0923, -0.0631, 0.0445, -0.0516, 0.0802, 0.0539])), ('hidden1_bn.weight', tensor([0.2641, 0.2875, 0.2584, 0.8671, 0.7530, 0.8996, 0.1242, 0.2656, 0.8032, 0.5599, 0.8794, 0.8440, 0.6962, 0.6451, 0.7139, 0.2835, 0.7829, 0.5907, 0.7485, 0.7074, 0.4070, 0.4516, 0.5127, 0.6463, 0.2160, 0.2042, 0.8773, 0.1200, 0.5566, 0.2667, 0.1846, 0.0938, 0.4583, 0.8859, 0.7345, 0.9188, 0.3178, 0.0675, 0.2951, 0.3370, 0.7652, 0.8962, 0.7824, 0.8657, 0.5546, 0.1125, 0.6883, 0.2024, 0.7088, 0.9314, 0.2489, 0.9793, 0.5461, 0.0610, 0.3534, 0.3852, 0.6393, 0.3760, 0.3415, 0.4445, 0.1629, 0.7077, 0.4490, 0.5131, 0.4164, 0.0485, 0.7431, 0.3195, 0.4365, 0.1668, 0.2620, 0.2585, 0.7262, 0.8578, 0.6468, 0.7684, 0.1772, 0.7548, 0.4943, 0.6163, 0.7765, 0.5626, 0.3520, 0.3227, 0.8284, 0.0965, 0.5794, 0.0522, 0.1627, 0.9507, 0.8736, 0.9385, 0.0475, 0.5115, 0.6523, 0.4838, 0.9199, 0.8586, 0.9675, 0.4444])), ('hidden1_bn.bias', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])), ('hidden1_bn.running_mean', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])), ('hidden1_bn.running_var', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])), ('hidden1_bn.num_batches_tracked', tensor(0)), ('hidden2_bn.weight', tensor([0.0080, 0.7764, 0.8470, 0.8384, 0.1727, 0.5827, 0.3746, 0.9699, 0.1763, 0.4227, 0.2199, 0.0308, 0.1622, 0.1386, 0.0721, 0.1250, 0.8746, 0.5801, 0.5857, 0.3904, 0.4019, 0.9434, 0.2623, 0.0735, 0.4327, 0.2965, 0.7626, 0.8738, 0.0899, 0.0639, 0.7353, 0.5767, 0.5398, 0.1324, 0.8093, 0.0025, 0.6160, 0.2558, 0.9963, 0.1607, 0.3707, 0.1134, 0.2655, 0.9932, 0.1528, 0.6505, 0.1393, 0.4991, 0.6387, 0.9305, 0.9338, 0.0969, 0.1521, 0.7443, 0.5617, 0.5793, 0.7303, 0.3408, 0.1844, 0.3070, 0.8134, 0.2563, 0.7673, 0.9219, 0.5718, 0.9630, 0.1569, 0.9759, 0.4024, 0.3746, 0.9691, 0.8241, 0.0731, 0.2630, 0.7671, 0.0594, 0.7128, 0.5354, 0.1626, 0.4043, 0.4170, 0.8843, 0.6048, 0.5487, 0.1190, 0.9286, 0.5068, 0.9690, 0.6766, 0.1146, 0.5533, 0.6867, 0.2475, 0.3321, 0.3633, 0.2163, 0.5001, 0.8818, 0.5112, 0.7430])), ('hidden2_bn.bias', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])), ('hidden2_bn.running_mean', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])), ('hidden2_bn.running_var', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])), ('hidden2_bn.num_batches_tracked', tensor(0)), ('hidden3_bn.weight', tensor([0.1546, 0.0437, 0.1645, 0.9504, 0.3686, 0.8011, 0.8887, 0.4545, 0.1227, 0.6376, 0.0423, 0.4627, 0.6473, 0.6717, 0.2636, 0.2921, 0.4598, 0.0062, 0.0241, 0.8096, 0.2212, 0.2601, 0.5115, 0.9143, 0.6449, 0.3428, 0.9887, 0.1188, 0.0889, 0.8317, 0.5211, 0.8097, 0.5319, 0.8839, 0.3562, 0.7626, 0.8409, 0.6521, 0.4797, 0.5439, 0.2615, 0.9632, 0.8309, 0.9491, 0.0033, 0.3386, 0.5573, 0.6740, 0.8810, 0.7728, 0.3341, 0.7041, 0.4257, 0.6194, 0.6305, 0.2669, 0.3417, 0.1234, 0.0237, 0.7384, 0.5803, 0.6837, 0.1358, 0.1136, 0.2143, 0.7967, 0.0320, 0.4477, 0.0941, 0.8139, 0.3432, 0.7913, 0.1458, 0.9725, 0.1054, 0.2943, 0.0988, 0.6605, 0.1120, 0.4030, 0.7504, 0.8279, 0.9817, 0.8320, 0.3695, 0.9013, 0.6641, 0.9816, 0.2163, 0.0487, 0.0324, 0.1352, 0.7339, 0.8152, 0.3193, 0.9740, 0.0833, 0.0827, 0.5218, 0.0251])), ('hidden3_bn.bias', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])), ('hidden3_bn.running_mean', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])), ('hidden3_bn.running_var', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])), ('hidden3_bn.num_batches_tracked', tensor(0))]) ```python # initailze model by saved parameters new_model = FeedForwardNeuralNetwork(input_size, hidden_size, output_size) new_model.load_state_dict(saved_parametes) ``` ## 作业 4 使用 test_epoch 函数,预测new_model在test_loader上的accuracy和loss ```python # TODO # test your model prediction performance # new_test_loss, new_test_accuracy = test_epoch(test_loader, new_model, loss_fn) # message = 'Average loss: {:.4f}, Accuracy: {:.4f}'.format(new_test_loss, new_test_accuracy) # print(message) ``` ## 4. Training Advanced ### 4.1 l2_norm we could minimize the regularization term below by use $weight\_decay$ in **SGD optimizer** \begin{equation} L\_norm = {\sum_{i=1}^{m}{\theta_{i}^{2}}} \end{equation} set l2_norm=0.01, let's train and see ```python ### Hyper parameters batch_size = 128 n_epochs = 5 learning_rate = 0.01 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0.01 # use l2 penalty get_grad = False # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python train_accs, train_losses = fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad) ``` Epoch: 1/5. Train set: Average loss: 1.9009, Accuracy: 74.3017 Epoch: 1/5. Validation set: Average loss: 74.4788, Accuracy: 74.6000 Epoch: 2/5. Train set: Average loss: 0.6102, Accuracy: 87.1717 Epoch: 2/5. Validation set: Average loss: 34.3180, Accuracy: 87.7700 Epoch: 3/5. Train set: Average loss: 0.3998, Accuracy: 89.2317 Epoch: 3/5. Validation set: Average loss: 28.3469, Accuracy: 89.4900 Epoch: 4/5. Train set: Average loss: 0.3501, Accuracy: 90.2517 Epoch: 4/5. Validation set: Average loss: 25.7806, Accuracy: 90.4200 Epoch: 5/5. Train set: Average loss: 0.3222, Accuracy: 90.9783 Epoch: 5/5. Validation set: Average loss: 23.9897, Accuracy: 91.0600 # 作业 5 思考正则项在loss中占比的影响。使用 l2_norm = 1, 训练模型 Hints: 因为jupyter对变量有上下文关系,模型,优化器需要重新声明。可以使用以下代码进行重新定义模型和优化器。注意到此处用的是默认初始化。 ```python # TODO ### Hyper parameters # declare a model # l2_norm can be done in SGD ``` ```python # TODO # Train ``` ### 4.2 dropout During training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution. Each channel will be zeroed out independently on every forward call. Hints: 因为jupyter对变量有上下文关系,模型,优化器需要重新声明。可以使用以下代码进行重新定义模型和优化器。注意到此处用的是默认初始化。 ```python ### Hyper parameters batch_size = 128 n_epochs = 5 learning_rate = 0.01 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # without using l2 penalty get_grad = False # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python # Set dropout to True and probability = 0.5 model.set_use_dropout(True) ``` ```python train_accs, train_losses = fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad) ``` Epoch: 1/5. Train set: Average loss: 1.8824, Accuracy: 80.4967 Epoch: 1/5. Validation set: Average loss: 69.8040, Accuracy: 81.0000 Epoch: 2/5. Train set: Average loss: 0.7348, Accuracy: 87.4400 Epoch: 2/5. Validation set: Average loss: 33.8798, Accuracy: 87.6500 Epoch: 3/5. Train set: Average loss: 0.5086, Accuracy: 89.5917 Epoch: 3/5. Validation set: Average loss: 27.1355, Accuracy: 89.5500 Epoch: 4/5. Train set: Average loss: 0.4285, Accuracy: 90.8233 Epoch: 4/5. Validation set: Average loss: 23.7725, Accuracy: 90.8500 Epoch: 5/5. Train set: Average loss: 0.3815, Accuracy: 91.8033 Epoch: 5/5. Validation set: Average loss: 21.2870, Accuracy: 91.8900 ### 4.3 batch_normalization Batch normalization is a technique for improving the performance and stability of artificial neural networks \begin{equation} y=\frac{x-E[x]}{\sqrt{Var[x]+\epsilon}} * \gamma + \beta, \end{equation} $\gamma$ and $\beta$ are learnable parameters Hints: 因为jupyter对变量有上下文关系,模型,优化器需要重新声明。可以使用以下代码进行重新定义模型和优化器。注意到此处用的是默认初始化。 ```python ### Hyper parameters batch_size = 128 n_epochs = 5 learning_rate = 0.01 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # without using l2 penalty get_grad = False # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python model.set_use_bn(True) ``` ```python model.use_bn ``` True ```python train_accs, train_losses = fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad) ``` Epoch: 1/5. Train set: Average loss: 1.0694, Accuracy: 91.1317 Epoch: 1/5. Validation set: Average loss: 35.9896, Accuracy: 91.4500 Epoch: 2/5. Train set: Average loss: 0.3393, Accuracy: 94.3667 Epoch: 2/5. Validation set: Average loss: 19.5889, Accuracy: 94.1300 Epoch: 3/5. Train set: Average loss: 0.2161, Accuracy: 95.6933 Epoch: 3/5. Validation set: Average loss: 14.6173, Accuracy: 95.3200 Epoch: 4/5. Train set: Average loss: 0.1645, Accuracy: 96.5617 Epoch: 4/5. Validation set: Average loss: 12.0746, Accuracy: 95.8500 Epoch: 5/5. Train set: Average loss: 0.1339, Accuracy: 97.1417 Epoch: 5/5. Validation set: Average loss: 10.6426, Accuracy: 96.2500 ### 4.4 data augmentation data augmentation can be more complicated to gain a better generalization on test dataset ```python # only add random horizontal flip train_transform_1 = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), # Convert a PIL Image or numpy.ndarray to tensor. # Normalize a tensor image with mean and standard deviation transforms.Normalize((0.1307,), (0.3081,)) ]) # only add random crop train_transform_2 = transforms.Compose([ transforms.RandomCrop(size=[28,28], padding=4), transforms.ToTensor(), # Convert a PIL Image or numpy.ndarray to tensor. # Normalize a tensor image with mean and standard deviation transforms.Normalize((0.1307,), (0.3081,)) ]) # add random horizontal flip and random crop train_transform_3 = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(size=[28,28], padding=4), transforms.ToTensor(), # Convert a PIL Image or numpy.ndarray to tensor. # Normalize a tensor image with mean and standard deviation transforms.Normalize((0.1307,), (0.3081,)) ]) ``` ```python # reload train_loader using trans train_dataset_1 = torchvision.datasets.MNIST(root='./data', train=True, transform=train_transform_1, download=False) train_loader_1 = torch.utils.data.DataLoader(dataset=train_dataset_1, batch_size=batch_size, shuffle=True) ``` ```python print(train_dataset_1) ``` Dataset MNIST Number of datapoints: 60000 Split: train Root Location: ./data Transforms (if any): Compose( RandomHorizontalFlip(p=0.5) ToTensor() Normalize(mean=(0.1307,), std=(0.3081,)) ) Target Transforms (if any): None ```python ### Hyper parameters batch_size = 128 n_epochs = 5 learning_rate = 0.01 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # without using l2 penalty get_grad = False # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python train_accs, train_losses = fit(train_loader_1, test_loader, model, loss_fn, optimizer, n_epochs, get_grad) ``` Epoch: 1/5. Train set: Average loss: 2.0063, Accuracy: 68.9483 Epoch: 1/5. Validation set: Average loss: 91.0255, Accuracy: 69.8400 Epoch: 2/5. Train set: Average loss: 0.7965, Accuracy: 79.6317 Epoch: 2/5. Validation set: Average loss: 49.2182, Accuracy: 80.1000 Epoch: 3/5. Train set: Average loss: 0.5844, Accuracy: 82.9250 Epoch: 3/5. Validation set: Average loss: 40.5368, Accuracy: 83.4900 Epoch: 4/5. Train set: Average loss: 0.5082, Accuracy: 85.2550 Epoch: 4/5. Validation set: Average loss: 35.7995, Accuracy: 85.8400 Epoch: 5/5. Train set: Average loss: 0.4541, Accuracy: 86.4417 Epoch: 5/5. Validation set: Average loss: 32.2371, Accuracy: 87.0600 ## 作业 6 使用提供的train_transform_2, train_transform_3,重新加载train_loader,并且使用fit进行训练 Hints: 因为jupyter对变量有上下文关系,模型,优化器需要重新声明。注意到此处用的是默认初始化。 ```python # TODO # train_transform_2 ``` ```python # TODO # train_transform_3 ``` ## 5. Visualizatio of training and validation phase We could use tensorboard to visualize our training and test phase. You could find example [here](https://github.com/lanpa/tensorboardX) ## 6. Gradient explosion and vanishing We have embedded code which shows grad for hidden2 and hidden3 layer. By observing their grad changes, we can see whether gradient is normal or not. For plot grad changes, you need to **set get_grad=True** in **fit function** ```python ### Hyper parameters batch_size = 128 n_epochs = 15 learning_rate = 0.01 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # use l2 penalty get_grad = True # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad) ``` ### 6.1.1 Gradient Vanishing Set learning=e-10 ```python ### Hyper parameters batch_size = 128 n_epochs = 15 learning_rate = 1e-20 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # use l2 penalty get_grad = True # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad=get_grad) ``` # 6.1.2 Gradient Explosion #### 6.1.2.1 learning rate set learning rate = 10 ```python ### Hyper parameters batch_size = 128 n_epochs = 15 learning_rate = 1.0168 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # not to use l2 penalty get_grad = True # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad=True) ``` #### 6.1.2.2 normalization for input data #### 6.1.2.3 unsuitable weight initialization ```python ### Hyper parameters batch_size = 128 n_epochs = 15 learning_rate = 1 input_size = 28*28 hidden_size = 100 output_size = 10 l2_norm = 0 # not to use l2 penalty get_grad = True # declare a model model = FeedForwardNeuralNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size) # Cross entropy loss_fn = torch.nn.CrossEntropyLoss() # l2_norm can be done in SGD optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=l2_norm) ``` ```python # reset parameters as 10 def wrong_weight_bias_reset(model): """Using normalization with mean=0, std=1 to initialize model's parameter """ for m in model.modules(): if isinstance(m, nn.Linear): # initialize linear layer with mean and std mean, std = 0, 1 # Initialization method torch.nn.init.normal_(m.weight, mean, std) torch.nn.init.normal_(m.bias, mean, std) ``` ```python wrong_weight_bias_reset(model) show_weight_bias(model) ``` ```python fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, get_grad=True) ``` ## References 1. [Training a Classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) 2. [Save Model and Load Model](https://pytorch.org/tutorials/beginner/saving_loading_models.html) 3. [Visualize your training phase](https://github.com/lanpa/tensorboardX) 4. [Exploding and Vanishing Gradients](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2017/readings/L15%20Exploding%20and%20Vanishing%20Gradients.pdf) 5. [Gradient disappearance and gradient explosion in neural network training](https://bzdww.com/article/19659/) 6. [tensorboardX](https://github.com/lanpa/tensorboardX)
from __future__ import print_function, absolute_import import torch import numpy as np __all__ = ["angledifference", "accuracy", "cal_dice", "cal_all_metric"] def angledifference(cosine_similarity): if torch.is_tensor(cosine_similarity): angdiff = torch.acos(cosine_similarity.clamp(-1, 1)) else: angdiff = np.arccos(cosine_similarity) return angdiff.mean() def cal_dice(input, target): smooth = 1e-3 if torch.is_tensor(input): input_var = input.to(torch.float32) else: input_var = input.astype(np.float32) input_cal = 1.0 * (input_var > 0.5) target_cal = 1.0 * (target > 0.5) intersection = input_cal * target_cal dice = (2 * intersection.sum() + smooth) / (input_cal.sum() + target_cal.sum() + smooth) return dice def cal_all_metric(input, target): """ calculate all metric used in segmentation task """ smooth = 1 if torch.is_tensor(input): input_var = input.to(torch.float32) else: input_var = input.astype(np.float32) intersection_sum = (input_var * target).sum() input_sum = input_var.sum() target_sum = target.sum() dice = (2 * intersection_sum + smooth) / (input_sum + target_sum + smooth) precision = (intersection_sum + smooth) / (input_sum + smooth) recall = (intersection_sum + smooth) / (target_sum + smooth) return (dice, precision, recall) def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res
[STATEMENT] lemma canon_rewriter_unique: assumes "rb_aux_inv1 bs" and "is_canon_rewriter rword (set bs) u a" and "is_canon_rewriter rword (set bs) u b" shows "a = b" [PROOF STATE] proof (prove) goal (1 subgoal): 1. a = b [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. a = b [PROOF STEP] from assms(1) [PROOF STATE] proof (chain) picking this: rb_aux_inv1 bs [PROOF STEP] have "inj_on lt (set bs)" [PROOF STATE] proof (prove) using this: rb_aux_inv1 bs goal (1 subgoal): 1. inj_on lt (set bs) [PROOF STEP] by (rule rb_aux_inv1_lt_inj_on) [PROOF STATE] proof (state) this: inj_on lt (set bs) goal (1 subgoal): 1. a = b [PROOF STEP] moreover [PROOF STATE] proof (state) this: inj_on lt (set bs) goal (1 subgoal): 1. a = b [PROOF STEP] from rword(1) assms(2, 3) [PROOF STATE] proof (chain) picking this: is_rewrite_ord rword is_canon_rewriter rword (set bs) u a is_canon_rewriter rword (set bs) u b [PROOF STEP] have "lt a = lt b" [PROOF STATE] proof (prove) using this: is_rewrite_ord rword is_canon_rewriter rword (set bs) u a is_canon_rewriter rword (set bs) u b goal (1 subgoal): 1. lt a = lt b [PROOF STEP] by (rule is_rewrite_ord_canon_rewriterD2) [PROOF STATE] proof (state) this: lt a = lt b goal (1 subgoal): 1. a = b [PROOF STEP] moreover [PROOF STATE] proof (state) this: lt a = lt b goal (1 subgoal): 1. a = b [PROOF STEP] from assms(2) [PROOF STATE] proof (chain) picking this: is_canon_rewriter rword (set bs) u a [PROOF STEP] have "a \<in> set bs" [PROOF STATE] proof (prove) using this: is_canon_rewriter rword (set bs) u a goal (1 subgoal): 1. a \<in> set bs [PROOF STEP] by (rule is_canon_rewriterD1) [PROOF STATE] proof (state) this: a \<in> set bs goal (1 subgoal): 1. a = b [PROOF STEP] moreover [PROOF STATE] proof (state) this: a \<in> set bs goal (1 subgoal): 1. a = b [PROOF STEP] from assms(3) [PROOF STATE] proof (chain) picking this: is_canon_rewriter rword (set bs) u b [PROOF STEP] have "b \<in> set bs" [PROOF STATE] proof (prove) using this: is_canon_rewriter rword (set bs) u b goal (1 subgoal): 1. b \<in> set bs [PROOF STEP] by (rule is_canon_rewriterD1) [PROOF STATE] proof (state) this: b \<in> set bs goal (1 subgoal): 1. a = b [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: inj_on lt (set bs) lt a = lt b a \<in> set bs b \<in> set bs [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: inj_on lt (set bs) lt a = lt b a \<in> set bs b \<in> set bs goal (1 subgoal): 1. a = b [PROOF STEP] by (rule inj_onD) [PROOF STATE] proof (state) this: a = b goal: No subgoals! [PROOF STEP] qed
import .lovelib /-! # LoVe Demo 6: Monads Monads are an important functional programming abstraction. They generalize computation with side effects. Haskell has shown that they can be used very successful to write imperative programs. For us, they are interesting in their own right and for two more reasons: * They provide a nice example of axiomatic reasoning. * They are needed for programming Lean itself (metaprogramming, lecture 7). -/ set_option pp.beta true set_option pp.generalized_field_notation false namespace LoVe /-! ## Introductory Example Consider the following programming task: Implement a function `sum_2_5_7 ns` that sums up the second, fifth, and seventh items of a list `ns` of natural numbers. Use `option ℕ` for the result so that if the list has fewer than seven elements, you can return `option.none`. A straightforward solution follows: -/ def sum_2_5_7 (ns : list ℕ) : option ℕ := match list.nth ns 1 with | option.none := option.none | option.some n2 := match list.nth ns 4 with | option.none := option.none | option.some n5 := match list.nth ns 6 with | option.none := option.none | option.some n7 := option.some (n2 + n5 + n7) end end end /-! The code is ugly, because of all the pattern matching on options. We can put all the ugliness in one function, which we call `connect`: -/ def connect {α : Type} {β : Type} : option α → (α → option β) → option β | option.none f := option.none | (option.some a) f := f a def sum_2_5_7₂ (ns : list ℕ) : option ℕ := connect (list.nth ns 1) (λn2, connect (list.nth ns 4) (λn5, connect (list.nth ns 6) (λn7, option.some (n2 + n5 + n7)))) /-! Instead of defining `connect` ourselves, we can use Lean's predefined general `bind` operation. We can also use `pure` instead of `option.some`: -/ #check bind def sum_2_5_7₃ (ns : list ℕ) : option ℕ := bind (list.nth ns 1) (λn2, bind (list.nth ns 4) (λn5, bind (list.nth ns 6) (λn7, pure (n2 + n5 + n7)))) /-! Syntactic sugar: `ma >>= f` := `bind ma f` -/ #check (>>=) def sum_2_5_7₄ (ns : list ℕ) : option ℕ := list.nth ns 1 >>= λn2, list.nth ns 4 >>= λn5, list.nth ns 6 >>= λn7, pure (n2 + n5 + n7) /-! Syntactic sugar: `do a ← ma, t` := `ma >>= (λa, t)` `do ma, t` := `ma >>= (λ_, t)` -/ def sum_2_5_7₅ (ns : list ℕ) : option ℕ := do n2 ← list.nth ns 1, do n5 ← list.nth ns 4, do n7 ← list.nth ns 6, pure (n2 + n5 + n7) /-! The `do`s can be combined: -/ def sum_2_5_7₆ (ns : list ℕ) : option ℕ := do n2 ← list.nth ns 1, n5 ← list.nth ns 4, n7 ← list.nth ns 6, pure (n2 + n5 + n7) /-! Although the notation has an imperative flavor, the function is a pure functional program. ## Two Operations and Three Laws The `option` type constructor is an example of a monad. In general, a __monad__ is a type constructor `m` that depends on some type parameter `α` (i.e., `m α`) equipped with two distinguished operations: `pure {α : Type} : α → m α` `bind {α β : Type} : m α → (α → m β) → m β` For `option`: `pure` := `option.some` `bind` := `connect` Intuitively, we can think of a monad as a "box": * `pure` puts the data into the box. * `bind` allows us to access the data in the box and modify it (possibly even changing its type, since the result is an `m β` monad, not a `m α` monad). There is no general way to extract the data from the monad, i.e., to obtain an `α` from an `m α`. To summarize, `pure a` provides no side effect and simply provides a box containing the the value `a`, whereas `bind ma f` (also written `ma >>= f`) executes `ma`, then executes `f` with the boxed result `a` of `ma`. The option monad is only one instance among many. Type | Effect ------------ | -------------------------------------------------------------- `id α` | no effects `option α` | simple exceptions `σ → α × σ` | threading through a state of type `σ` `set α` | nondeterministic computation returning `α` values `t → α` | reading elements of type `t` (e.g., a configuration) `ℕ × α` | adjoining running time (e.g., to model algorithmic complexity) `string × α` | adjoining text output (e.g., for logging) `prob α` | probability (e.g., using random number generators) `io α` | interaction with the operating system `tactic α` | interaction with the proof assistant All of the above are type constructors `m` are parameterized by a type `α`. Some effects can be combined (e.g., `option (t → α)`). Some effects are not executable (e.g., `set α`, `prob α`). They are nonetheless useful for modeling programs abstractly in the logic. Specific monads may provide a way to extract the boxed value stored in the monad without `bind`'s requirement of putting it back in a monad. Monads have several benefits, including: * They provide the convenient and highly readable `do` notation. * They support generic operations, such as `mmap {α β : Type} : (α → m β) → list α → m (list β)`, which work uniformly across all monads. The `bind` and `pure` operations are normally required to obey three laws. Pure data as the first program can be simplified away: do a' ← pure a, f a' = f a Pure data as the second program can be simplified away: do a ← ma, pure a = ma Nested programs `ma`, `f`, `g` can be flattened using this associativity rule: do b ← do { a ← ma, f a }, g b = do a ← ma, b ← f a, g b ## A Type Class of Monads Monads are a mathematical structure, so we use class to add them as a type class. We can think of a type class as a structure that is parameterized by a type—or here, by a type constructor `m : Type → Type`. -/ @[class] structure lawful_monad (m : Type → Type) extends has_bind m, has_pure m := (pure_bind {α β : Type} (a : α) (f : α → m β) : (pure a >>= f) = f a) (bind_pure {α : Type} (ma : m α) : (ma >>= pure) = ma) (bind_assoc {α β γ : Type} (f : α → m β) (g : β → m γ) (ma : m α) : ((ma >>= f) >>= g) = (ma >>= (λa, f a >>= g))) #print monad #print is_lawful_monad /-! Step by step: * We are creating a structure parameterized by a unary type constructor `m`. * The structure inherits the fields, and any syntactic sugar, from structures called `has_bind` and `has_pure`, which provide the `bind` and `pure` operations on `m` and some syntactic sugar. * The definition adds three fields to those already provided by `has_bind` and `has_pure`, to store the proofs of the laws. To instantiate this definition with a concrete monad, we must supply the type constructor `m` (e.g., `option`), `bind` and `pure` operators, and proofs of the laws. ## No Effects Our first monad is the trivial monad `m := id` (i.e., `m := λα. α`). -/ def id.pure {α : Type} : α → id α := id def id.bind {α β : Type} : id α → (α → id β) → id β | a f := f a @[instance] def id.lawful_monad : lawful_monad id := { pure := @id.pure, bind := @id.bind, pure_bind := begin intros α β a f, refl end, bind_pure := begin intros α ma, refl end, bind_assoc := begin intros α β γ f g ma, refl end } /-! ## Basic Exceptions As we saw above, the option type provides a basic exception mechanism. -/ def option.pure {α : Type} : α → option α := option.some def option.bind {α β : Type} : option α → (α → option β) → option β | option.none f := option.none | (option.some a) f := f a @[instance] def option.lawful_monad : lawful_monad option := { pure := @option.pure, bind := @option.bind, pure_bind := begin intros α β a f, refl end, bind_pure := begin intros α ma, cases' ma, { refl }, { refl } end, bind_assoc := begin intros α β γ f g ma, cases' ma, { refl }, { refl } end } def option.throw {α : Type} : option α := option.none def option.catch {α : Type} : option α → option α → option α | option.none ma' := ma' | (option.some a) _ := option.some a @[instance] def option.has_orelse : has_orelse option := { orelse := @option.catch } /-! ## Mutable State The state monad provides an abstraction corresponding to a mutable state. Some compiler recognize the state monad to produce efficient imperative code. -/ def action (σ α : Type) : Type := σ → α × σ def action.read {σ : Type} : action σ σ | s := (s, s) def action.write {σ : Type} (s : σ) : action σ unit | _ := ((), s) def action.pure {σ α : Type} (a : α) : action σ α | s := (a, s) def action.bind {σ : Type} {α β : Type} (ma : action σ α) (f : α → action σ β) : action σ β | s := match ma s with | (a, s') := f a s' end @[instance] def action.lawful_monad {σ : Type} : lawful_monad (action σ) := { pure := @action.pure σ, bind := @action.bind σ, pure_bind := begin intros α β a f, apply funext, intro s, refl end, bind_pure := begin intros α ma, apply funext, intro s, simp [action.bind], cases' ma s, refl end, bind_assoc := begin intros α β γ f g ma, apply funext, intro s, simp [action.bind], cases' ma s, refl end } def increasingly : list ℕ → action ℕ (list ℕ) | [] := pure [] | (n :: ns) := do prev ← action.read, if n < prev then increasingly ns else do action.write n, ns' ← increasingly ns, pure (n :: ns') #eval increasingly [1, 2, 3, 2] 0 #eval increasingly [1, 2, 3, 2, 4, 5, 2] 0 /-! ## Nondeterminism The set monad stores an arbitrary, possibly infinite number of `α` values. -/ #check set def set.pure {α : Type} : α → set α | a := {a} def set.bind {α β : Type} : set α → (α → set β) → set β | A f := {b | ∃a, a ∈ A ∧ b ∈ f a} @[instance] def set.lawful_monad : lawful_monad set := { pure := @set.pure, bind := @set.bind, pure_bind := begin intros α β a f, simp [set.pure, set.bind] end, bind_pure := begin intros α ma, simp [set.pure, set.bind] end, bind_assoc := begin intros α β γ f g ma, simp [set.pure, set.bind], apply set.ext, simp, tautology end } /-! `tautology` performs elimination of the logical symbols `∧`, `∨`, `↔`, and `∃` in hypotheses and introduction of `∧`, `↔`, and `∃` in the conclusion, until all the emerging subgoals can be trivially proved (e.g., by `refl`). ## A Generic Algorithm: Iteration over a List We consider a generic effectful program `mmap` that iterates over a list and applies a function `f` to each element. -/ def nths_fine {α : Type} (xss : list (list α)) (n : ℕ) : list (option α) := list.map (λxs, list.nth xs n) xss #eval nths_fine [[11, 12, 13, 14], [21, 22, 23]] 2 #eval nths_fine [[11, 12, 13, 14], [21, 22, 23]] 3 def mmap {m : Type → Type} [lawful_monad m] {α β : Type} (f : α → m β) : list α → m (list β) | [] := pure [] | (a :: as) := do b ← f a, bs ← mmap as, pure (b :: bs) def nths_coarse {α : Type} (xss : list (list α)) (n : ℕ) : option (list α) := mmap (λxs, list.nth xs n) xss #eval nths_coarse [[11, 12, 13, 14], [21, 22, 23]] 2 #eval nths_coarse [[11, 12, 13, 14], [21, 22, 23]] 3 lemma mmap_append {m : Type → Type} [lawful_monad m] {α β : Type} (f : α → m β) : ∀as as' : list α, mmap f (as ++ as') = do bs ← mmap f as, bs' ← mmap f as', pure (bs ++ bs') | [] _ := by simp [mmap, lawful_monad.bind_pure, lawful_monad.pure_bind] | (a :: as) as' := by simp [mmap, mmap_append as as', lawful_monad.pure_bind, lawful_monad.bind_assoc] end LoVe
function [ cnt, S, bad_idx ] = paramStats( annotation_struct, options, verbose ) %PARAMSTATS Collects statistics about possible positions of each landmark %point (component) % Detailed explanation goes here % % INPUT: % image ... array of cells, each cell containes bbox found by detector and filename % bw ... vector [w; h] contains size of base window. % components ... matrix of size 2xM, M is count of components [w0 ... wM-1; h0 ... hM-1]. Order of components is important! % bw_margin ... vector [w; h] base window margin in percents of original bbox. % image_path ... path to directory containing image database. Must contain subdir /mgt with xml annotations. % verbose ... true for image output with landmarks and components depicted % % OUTPUT: % cnt ... count of images that passed test % S ... matrix 4xM [x0_min; y0_min; x0_max; y0_max; ... xM-1_min; yM-1_min; xM-1_max; yM-1_max] bbox for each component % bad_idx ... indices to image array of images that does not passed this % test % % 16-07-10 Michal Uricar % 10-04-11 Michal Uricar, estimation of S0 fixed (face center intead of nose) % 12-07-11 Michal Uricar, corners dataset % 21-03-12 Michal Uricar, LFW annotation in one file if (nargin < 3) verbose = false; end; bad_idx = []; % count of components M = size(options.components, 2); cnt = 0; S = [inf(2, M); -inf(2, M)]; % for each image in db N = annotation_struct.N; for i = 1 : N [Iframe, Annotation, I, itmp.bbox, bbox, OrigPoints] = getImageFrame(options, i, annotation_struct); % !!! Annotation is empty at i == 578 !!! if (isempty(Annotation)) bad_idx = [bad_idx i]; continue; end; Points = prepareS0gt(Annotation.P, options); % transform nose to the center of face Points = Points(:, options.comselect); % extract relevant points only (name list in options.compnames) Points(:, M) = Annotation.P(:, 10); % copy back original nose position %% Check for each component if it fits in normalized frame flag = true; for j = 1 : M if ( ((Points(1, j) - options.components(1, j)/2) < 0) || ((Points(1, j) + options.components(1, j)/2) > options.bw(1)) || ... ((Points(2, j) - options.components(2, j)/2) < 0) || ((Points(2, j) + options.components(2, j)/2) > options.bw(2)) ) flag = false; end; end; % increase number of valid images if flag is true if (flag) cnt = cnt + 1; fprintf('%.2f%% - Passed: tested image no.%d file %s...\n', i*100/N, i, Annotation.image.filename); %% recompute AABB for S for j = 1 : M bb = [Points(1, j) - options.components(1, j)/2 Points(2, j) - options.components(2, j)/2 ... Points(1, j) + options.components(1, j)/2 Points(2, j) + options.components(2, j)/2 ]; S(1, j) = min(S(1, j), bb(1)); S(2, j) = min(S(2, j), bb(2)); S(3, j) = max(S(3, j), bb(3)); S(4, j) = max(S(4, j), bb(4)); end; else fprintf('%.2f%% - Not passed: tested image no.%d file %s...\n', i*100/N, i, Annotation.image.filename); bad_idx = [bad_idx i]; end; %% Visualization if (verbose) aabb = makeAABB(itmp.bbox); aabb2 = makeAABB(bbox); % show original image with annotation figure(1); subplot(2, 2, 1); imshow(I, []); hold on; % show ground truth points % for k = 1 : M plot(OrigPoints(1, :), OrigPoints(2, :), 'r.', 'LineWidth', 2, 'MarkerSize', 10); % end; % show aabb found by detector line([aabb(1,:) aabb(1,1)], [aabb(2,:) aabb(2,1)], 'color', 'b'); line([aabb2(1,:) aabb2(1,1)], [aabb2(2,:) aabb2(2,1)], 'color', 'y'); hold off; % show normalized frame subplot(2, 2, 2) imshow(Iframe, []); hold on; for k = 1 : M plot(Points(1, k), Points(2, k), 'r.', 'LineWidth', 2, 'MarkerSize', 10); bb = makeAABB([Points(1, k) - options.components(1, k)/2 Points(2, k) - options.components(2, k)/2 ... Points(1, k) + options.components(1, k)/2 Points(2, k) + options.components(2, k)/2 ]); line([bb(1,:) bb(1,1)], [bb(2,:) bb(2,1)], 'color', 'y'); end; hold off; % show recomputed S also if (max(S(1, :)) < inf && min(S(3, :)) > -inf) subplot(2, 2, 3) imshow(Iframe, []); hold on; for k = 1 : M plot(Points(1, k), Points(2, k), 'r.', 'LineWidth', 2, 'MarkerSize', 10); bb = makeAABB(S(:, k)); line([bb(1,:) bb(1,1)], [bb(2,:) bb(2,1)], 'color', 'b'); bb = makeAABB([Points(1, k) - options.components(1, k)/2 Points(2, k) - options.components(2, k)/2 ... Points(1, k) + options.components(1, k)/2 Points(2, k) + options.components(2, k)/2 ]); line([bb(1,:) bb(1,1)], [bb(2,:) bb(2,1)], 'color', 'y'); end; hold off; end; % saveas(gcf, ['./img/' 'bw_' Annotation.image.filename]); % close gcf; end; end; end
/* Copyright [2019, 2020] [IBM Corporation] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef __CCPM_AREATOP_ALLOCATOR_H__ #define __CCPM_AREATOP_ALLOCATOR_H__ #include "atomic_word.h" #include "list_item.h" #include <ccpm/interfaces.h> #include <common/byte_span.h> #include <gsl/pointers> #include <array> #include <cstddef> #include <ios> // ios_base::fmtflags, ostream #include <vector> namespace ccpm { struct level_hints { private: /* Each level has a list for every possible number of contiguous free elements. * of contiguous free elements. * Since "contiguous" free elements do not span a word, that is one list * for every possible contiguous size in a word. * The list at element n locates an area_ctl with has maximal runs of exactly * n+1 free elements, */ using free_ctls_t = std::array<list_item, alloc_states_per_word>; free_ctls_t _free_ctls; free_ctls_t::size_type find_free_ctl_ix(unsigned min_run_length) const; free_ctls_t::size_type tier_ix_from_run_length(unsigned run_length) const; public: unsigned _ct_alloc_probe_success; unsigned _ct_alloc_probe_failure; unsigned _ct_subdivision; public: static constexpr auto size() { return alloc_states_per_word; } /* return smallest tier index which has an area with * run length >= min_run_length * If no such tier, return sub_states_per_word. */ auto find_free_ctl(unsigned min_run_length_) const -> const list_item * { return & _free_ctls[find_free_ctl_ix(min_run_length_)]; } auto find_free_ctl(unsigned min_run_length_) -> list_item * { return & _free_ctls[find_free_ctl_ix(min_run_length_)]; } const auto *tier_from_run_length(unsigned run_length_) const { return & _free_ctls[tier_ix_from_run_length(run_length_)]; } auto *tier_from_run_length(unsigned run_length_) { return & _free_ctls[tier_ix_from_run_length(run_length_)]; } const auto *tier_end() const { return &_free_ctls[alloc_states_per_word]; } /* Find the longest run length less than mac_run_length. * Used for tracing, when find_free_ctl_ix has failed to * find a long enough run */ free_ctls_t::size_type find_mac_ctl_ix(unsigned mac_run_length) const; level_hints() : _free_ctls() , _ct_alloc_probe_success(0) , _ct_alloc_probe_failure(0) , _ct_subdivision(0) {} using level_ix_t = std::uint8_t; void print( std::ostream &o_ , level_ix_t level_ , std::ios_base::fmtflags // size_format_ ) const; }; struct area_ctl; /* * Location of non-persisted items for a single "region" managed by the crash-consistent allocator. * Persisted items are keps in an area_ctl, which is in persistent memory. */ struct area_top { private: using level_ix_t = std::uint8_t; using byte_span = common::byte_span; using persist_type = gsl::not_null<ccpm::persister *>; area_ctl *_ctl; std::size_t _bytes_free; bool _all_restored; unsigned _trace_level; std::ostream *_o; using level_hints_vec = std::vector<level_hints>; level_hints_vec _level; unsigned _ct_allocation; byte_span _region; /* for get_region only */ area_top( // persist_type persist area_ctl *ctl , unsigned trace_level , byte_span iov , std::ostream &o ); area_top(const area_top &) = delete; area_top &operator=(const area_top &) = delete; void allocate_strategy_1( persist_type persist_ , void * & ptr_ , std::size_t bytes , std::size_t alignment , level_hints_vec::iterator level_ , unsigned run_length ); bool allocate_recovery_1(); bool allocate_recovery_2(persist_type persist, level_hints_vec::iterator level); bool trace_coarse() const { return 0 < _trace_level; } bool trace_fine() const { return 1 < _trace_level; } public: /* Initial area_ctl */ explicit area_top( persist_type persist , byte_span iov , unsigned trace_level , std::ostream &o ); /* Restored area_ctl */ explicit area_top( persist_type persist , byte_span iov , const ownership_callback_t &resolver , unsigned trace_level , std::ostream &o ); ~area_top(); bool includes(const void *addr) const; /* Free byte count. Required by users */ std::size_t bytes_free() const; byte_span get_region() const { return _region; } void allocate( persist_type persist , void * & ptr, std::size_t bytes , std::size_t alignment ); void deallocate( persist_type persist , void * & ptr, std::size_t bytes ); void print( std::ostream &o , std::ios_base::fmtflags size_format ) const; void print_ctls( std::ostream *o_ , std::ios_base::fmtflags format_ ) const; level_ix_t height() const { return level_ix_t(_level.size()); } void set_root(const byte_span & iov, persist_type persist); byte_span get_root() const; /* * called by area_ctl to add area_ctl a, at level level_ix, with a longest * free run (consecutive free elements) of free_run, to _level, which is the * non-persistent catalog of area_ctl items. */ void remove_from_chain(area_ctl *a, level_ix_t level_ix, unsigned longest_run); void restore_to_chain(area_ctl *a, level_ix_t level_ix, unsigned run_length); bool contains(const void *p) const; bool is_in_chain( const area_ctl *a , level_ix_t level_ix , unsigned run_length ) const; }; } #endif
Require Import FcEtt.sigs. Require Import FcEtt.utils. Require Import FcEtt.ett_inf_cs. Require Import FcEtt.ett_ind. Require Import FcEtt.imports. Require Import FcEtt.tactics. Require Import FcEtt.erase_syntax. Require Import FcEtt.ext_red. (* weakening for available cos *) Require Import FcEtt.fc_invert FcEtt.fc_unique. Require Import FcEtt.ett_par. Require Import FcEtt.toplevel. Require Import FcEtt.fc_context_fv. Module erase (wf : fc_wf_sig) (weak : fc_weak_sig) (subst : fc_subst_sig) (e_invert : ext_invert_sig). Include e_invert. Module e_red := ext_red e_invert. Import e_red. Import wf weak subst. Module invert := fc_invert wf weak subst. Module unique := fc_unique wf subst. Import invert unique. Set Implicit Arguments. Set Bullet Behavior "Strict Subproofs". Lemma Path_erase : forall T a, Path T a -> Path T (erase a). Proof. induction 1; try destruct rho; simpl; auto. autorewcs. eauto with lc. Qed. Hint Constructors Typing PropWff Iso DefEq Ctx. Ltac dispatch_rho := match goal with | H11 : ∀ x : atom, ¬ x `in` ?L0 → RhoCheck ?rho x (erase_tm (open_tm_wrt_tm ?b1 (a_Var_f x))) |- ∀ x : atom, ¬ x `in` ?LL → RhoCheck ?rho x (open_tm_wrt_tm (erase_tm ?b1) (a_Var_f x)) => let Fr := fresh in let r' := fresh in intros x; intros; assert (FrL0 : x `notin` L0); eauto; move: (H11 x FrL0) => r'; autorewcshyp r'; rewrite -open_tm_erase_tm in r'; eapply r' end. (* ------------------------------------------ *) Lemma erase_mutual : (forall G a A, AnnTyping G a A -> Typing (erase_context G) (erase a) (erase A)) /\ (forall G phi, AnnPropWff G phi -> PropWff (erase_context G) (erase phi)) /\ (forall G D g p1 p2, AnnIso G D g p1 p2 -> Iso (erase_context G) D (erase_constraint p1) (erase_constraint p2)) /\ (forall G D g a b, AnnDefEq G D g a b -> forall A, AnnTyping G a A -> DefEq (erase_context G) D (erase a) (erase b) (erase A)) /\ (forall G, AnnCtx G -> Ctx (erase_context G) /\ forall c t, binds c t G -> binds c (erase_sort t) (erase_context G)). Proof. eapply ann_typing_wff_iso_defeq_mutual. all: intros; split_hyp; try solve [simpl; split_hyp; auto; eauto 2]. - eapply E_Var; auto. rewrite -[Tm (erase _)]/(erase_sort (Tm _)) /erase_context. apply binds_map_2. auto. - simpl. pick fresh x and apply E_Pi; auto. replace (a_Var_f x) with (erase (a_Var_f x)); auto. rewrite open_tm_erase_tm. rewrite_env (erase_context ((x ~ Tm A) ++ G)). eapply H. auto. - simpl. pick fresh x and apply E_Abs; auto. assert (FrL : x `notin` L). auto. pose (J := H0 x FrL). rewrite <- open_tm_erase_tm in J. rewrite <- open_tm_erase_tm in J. unfold erase_context in J. rewrite map_app in J. simpl in J. auto. assert (FrL : x `notin` L). auto. move: (r x FrL) => r'. autorewcshyp r'. rewrite -open_tm_erase_tm in r'. eapply r'. - rewrite -open_tm_erase_tm. simpl in H. simpl. destruct rho; simpl; eauto. - (* cast *) simpl. autorewcs. eapply E_Conv; eauto 1. rewrite <- erase_dom. pose KA := AnnTyping_regularity a0. clearbody KA. eapply (H0 a_Star). auto. - simpl. pick fresh x and apply E_CPi; eauto. autorewcs. rewrite (open_co_erase_tm2 (g_Var_f x)). rewrite_env (erase_context ((x ~ Co phi) ++ G)). eauto. - pick fresh x and apply E_CAbs; auto. assert (FrL : x `notin` L). auto. pose (J := H0 x FrL). rewrite (open_co_erase_tm2 (g_Var_f x)). rewrite (open_co_erase_tm2 (g_Var_f x)). auto. - rewrite -(open_co_erase_tm2 _ _ g_Triv) /=. pose K := AnnTyping_regularity a0. clearbody K. inversion K. inversion H4. subst. eapply E_CApp. simpl in H. eauto. rewrite <- erase_dom. eapply H0; eauto. - simpl. eapply E_Const; eauto. unfold toplevel. unfold erase_sig. replace (Cs (erase_tm A)) with (erase_csort (Cs A)). eapply binds_map. auto. auto. - simpl. eapply E_Fam; eauto. unfold toplevel. unfold erase_sig. replace (Ax (erase_tm a) (erase_tm A)) with (erase_csort (Ax a A)); auto. - simpl. econstructor; autorewcs. + eauto. + autorewcshyp e. by rewrite e. + eapply Typing_regularity; eauto 1. - assert (Ctx (erase_context G)). eauto. simpl in *. inversion a1. inversion a2. subst. eapply E_PropCong. eapply H; eauto. rewrite H10. eapply H0; eauto. - destruct (AnnDefEq_regularity a) as [S1 [S2 [g' [AT1 [AT2 _]]]]]. inversion AT1. inversion AT2. subst. destruct phi1. destruct phi2. simpl in *. eapply E_CPiFst. eapply (H a_Star); eauto. - eapply sym_iso. auto. - simpl. rewrite e. rewrite e0. inversion a0. inversion H0. subst. simpl in *. eapply E_IsoConv; eauto 1. eapply (H a_Star). eapply AnnTyping_regularity. eauto. inversion H1. subst. eapply E_Wff; eauto 1. eapply E_Conv; eauto 1. eapply E_Sym. eapply DefEq_weaken_available. eapply (H a_Star). eauto 1. eapply AnnTyping_regularity. eauto. eapply E_Conv; eauto 1. eapply E_Sym. eapply DefEq_weaken_available. eapply (H a_Star). eauto 1. eapply AnnTyping_regularity. eauto. - pose K:= (binds_to_AnnPropWff _ _ _ _ a0 b0). clearbody K. inversion K. subst. resolve_unique_nosubst. pose M := H1 c (Co (Eq a b A0)) b0. eapply E_Assn; eauto. - simpl. resolve_unique_nosubst. subst. eapply E_Refl; auto. - resolve_unique_nosubst. assert (K :Ctx (erase_context G)) . eauto. pose R1 := AnnTyping_regularity a0. pose R2 := AnnTyping_regularity a1. simpl. rewrite -e. eapply E_Refl; eauto. - eapply E_Sym. resolve_unique_nosubst. pose R1 := AnnTyping_regularity a0. pose R2 := AnnTyping_regularity a1. pose K1 := H1 a_Star R1. clearbody K1. simpl in K1. pose K2 := H2 B a0. clearbody K2. eapply DefEq_conv. eauto. rewrite <- erase_dom. auto. - (* trans *) destruct (AnnDefEq_regularity a0) as [S1 [S2 [g4 [T1 [T2 DE]]]]]. destruct (AnnDefEq_regularity a2) as [S1' [S2' [g4' [T1' [T2' DE']]]]]. resolve_unique_nosubst. resolve_unique_nosubst. resolve_unique_nosubst. resolve_unique_nosubst. eapply E_Trans. eauto. eapply DefEq_conv. eauto. rewrite <- erase_dom. eapply E_Sym. eapply (H3 a_Star). eapply AnnTyping_regularity. eauto. - simpl. assert (Ctx (erase_context G)). eauto. resolve_unique_nosubst. eapply E_Beta. auto. auto. rewrite e. eauto. eauto. - (* pi-cong*) assert (A = a_Star). eapply AnnTyping_unique; eauto. subst. simpl. inversion a1. subst. eapply (E_PiCong (L \u L0)); try solve [simpl in *; eauto 2]. + eapply (H a_Star). auto. + intros x Fr. assert (FrL : x `notin` L). auto. pose K := H0 x FrL a_Star. clearbody K. clear H0. rewrite -open_tm_erase_tm in K. simpl. simpl in K. have: a_Var_f x = erase (a_Var_f x) by done. move=> ->. rewrite (open_tm_erase_tm B3) e. rewrite -(open_tm_erase_tm B2). simpl. have: a_Var_f x = erase (a_Var_f x) by done. move=> ->. rewrite (open_tm_erase_tm B2). simpl. eapply K. eapply H8. auto. auto. + simpl in H1. eapply invert_a_Pi. eauto. - simpl. inversion H4. subst. simpl. eapply (E_AbsCong (L \u L0)) ; auto. intros x Fr. assert (FrL : x `notin` L). auto. assert (FrL0 : x `notin` L0). auto. assert (EQ: (erase (open_tm_wrt_tm b3 (a_Var_f x))) = (erase (open_tm_wrt_tm b2 (a_Var_f x)))). rewrite e. rewrite <- open_tm_erase_tm. rewrite <- open_tm_erase_tm. simpl. auto. auto. replace (a_Var_f x) with (erase (a_Var_f x)). rewrite open_tm_erase_tm. rewrite open_tm_erase_tm. rewrite open_tm_erase_tm. rewrite EQ. eapply (H0 x FrL (open_tm_wrt_tm B0 (a_Var_f x))). eapply H11; simpl; auto. simpl. auto. dispatch_rho. dispatch_rho. - simpl in *. resolve_unique_nosubst. destruct rho. + inversion a3. subst. rewrite <- open_tm_erase_tm. eapply E_AppCong. eapply (H (a_Pi Rel A0 B0)). eauto. eapply H0. auto. + inversion a3. subst. rewrite <- open_tm_erase_tm. move: (H _ H9) => h0. move: (H0 _ H10) => h1. move: (DefEq_regularity h1) => p1. inversion p1. eapply E_IAppCong; eauto. - simpl in *. destruct (AnnDefEq_regularity a) as [S1 [S2 [g' [TA1 [TA2 _]]]]]. inversion TA1. subst. resolve_unique_nosubst. inversion TA2. subst. simpl. eapply E_PiFst. eapply (H a_Star). eauto. - rewrite <- open_tm_erase_tm. rewrite <- open_tm_erase_tm. simpl in *. destruct (AnnDefEq_regularity a) as [S1 [S2 [g' [TA1 [TA2 _]]]]]. inversion TA1. assert (AnnTyping G (open_tm_wrt_tm B1 a1) a_Star). { pick fresh y. rewrite (tm_subst_tm_tm_intro y). replace a_Star with (tm_subst_tm_tm a1 y a_Star). eapply AnnTyping_tm_subst; auto. simpl. auto. auto. } resolve_unique_nosubst. eapply E_PiSnd; eauto 1. eapply (H a_Star). eauto. eapply (H0 A1). eauto. - (* CPiCong *) simpl. assert (a_Star = A). eapply (AnnTyping_unique a1). eauto. subst. clear H3. inversion a1. inversion a2. subst. eapply (E_CPiCong (L \u dom G \u L0 \u L1)); try solve [simpl in *; eauto 2]. + intros c Fr. assert (FrL : c `notin` L). auto. pose K := a0 c FrL. clearbody K. rewrite (open_co_erase_tm2 (g_Var_f c)). rewrite (open_co_erase_tm2 g_Triv). assert (EQ: (erase (open_tm_wrt_co B3 (g_Var_f c))) = (erase (open_tm_wrt_co B2 (g_Var_f c)))). rewrite e. rewrite <- open_co_erase_tm. rewrite <- open_co_erase_tm. auto. auto. rewrite <- (open_co_erase_tm2 g_Triv B3 (g_Var_f c)). rewrite (open_co_erase_tm2 (g_Var_f c)). rewrite EQ. eapply (H0 c FrL a_Star); auto. + simpl in H1. eapply invert_a_CPi. eauto. - simpl. inversion H5. subst. simpl. eapply (E_CAbsCong (L \u dom G \u L0)). + intros c Fr. assert (FrL : c `notin` L). auto. pose K := a0 c FrL. clearbody K. rewrite (open_co_erase_tm2 (g_Var_f c)). rewrite (open_co_erase_tm2 g_Triv). assert (EQ: (erase (open_tm_wrt_co a3 (g_Var_f c))) = (erase (open_tm_wrt_co a2 (g_Var_f c)))). rewrite e. rewrite <- open_co_erase_tm. rewrite <- open_co_erase_tm. auto. auto. rewrite <- (open_co_erase_tm2 g_Triv a3 (g_Var_f c)). rewrite (open_co_erase_tm2 (g_Var_f c)). rewrite EQ. rewrite (open_co_erase_tm2 (g_Var_f c) B0). eapply (H0 c FrL (open_tm_wrt_co B0 (g_Var_f c))). eauto. + simpl in H1. have CT: Ctx (erase_context G) by eauto 2. move: (Typing_regularity H1) => TCPi. destruct (invert_a_CPi TCPi) as (_ & _ & P). eauto. - simpl. inversion H5. subst. inversion a5. subst. resolve_unique_subst. resolve_unique_subst. inversion H6. subst. clear H6. clear H7. clear H11. inversion a6. subst. autorewcs. rewrite <- (open_co_erase_tm2 _ _ g_Triv). apply AnnDefEq_weaken_available in a0. apply AnnDefEq_weaken_available in a4. resolve_unique_subst. resolve_unique_subst. pose K := AnnTyping_regularity H9. clearbody K. inversion K. inversion H10. subst. pose K1 := AnnTyping_regularity H8. clearbody K1. inversion K1. inversion H12. subst. eapply E_CAppCong. move: (H _ H9) => h0. eapply h0. fold erase_tm. eapply DefEq_weaken_available. eauto. - simpl in H. rewrite <- (@open_co_erase_tm2 _ _ g_Triv). rewrite <- (@open_co_erase_tm2 _ _ g_Triv). simpl. destruct (AnnDefEq_regularity a0) as [S1 [S2 [g [AT1 [AT2 _]]]]]. inversion AT1. subst. inversion H6. subst. assert (AnnTyping G (open_tm_wrt_co B1 g2) a_Star). { pick fresh y. rewrite (co_subst_co_tm_intro y). replace a_Star with (co_subst_co_tm g2 y a_Star). eapply AnnTyping_co_subst; auto. simpl. eauto. simpl. auto. auto. } resolve_unique_nosubst. eapply E_CPiSnd. eapply (H a_Star). auto. rewrite -erase_dom. auto. inversion AT2. inversion H7. rewrite -erase_dom. auto. - destruct (AnnIso_regularity a1) as [W1 W2]. inversion W1. inversion W2. subst. resolve_unique_nosubst. eapply E_Cast. eauto. eauto. - destruct (AnnIso_regularity a0) as [W1 W2]. inversion W1. inversion W2. subst. move: (AnnTyping_regularity H5) => ?. resolve_unique_nosubst. simpl. eapply E_IsoSnd. eauto. - simpl in *. destruct (An_Abs_inversion H0) as (B0 & h0 & h1 & h2). subst. pick fresh x. destruct (h2 x ltac:(auto)) as [RC h3]. rewrite e in h3; auto. inversion h3. subst. have h4: AnnCtx G by eauto with ctx_wff. have h5: AnnCtx (nil ++ (x ~ Tm A) ++ G) by econstructor; eauto with ctx_wff. move: (AnnTyping_weakening a0 (x ~ Tm A) nil G eq_refl ltac:(auto)) => h0. simpl_env in h0. resolve_unique_subst. inversion H1. subst. apply open_tm_wrt_tm_inj in H6; auto. subst. simpl. destruct rho. + eapply E_EtaRel with (L := L \u {{x}}); auto. intros. replace (a_Var_f x0) with (erase (a_Var_f x0)). rewrite open_tm_erase_tm. rewrite e; auto. simpl; auto. + eapply E_EtaIrrel with (L := L \u {{x}}); auto. intros. replace (a_Var_f x0) with (erase (a_Var_f x0)). rewrite open_tm_erase_tm. rewrite e; auto. simpl; auto. - simpl in *. destruct (An_CAbs_inversion H0) as (B0 & h0 & h1). subst. pick fresh x. destruct (h1 x ltac:(auto)) as [RC h3]. rewrite e in h3; auto. inversion h3. subst. have h4: AnnCtx G by eauto with ctx_wff. have h5: AnnCtx (nil ++ (x ~ Co phi) ++ G) by econstructor; eauto with ctx_wff. move: (AnnTyping_weakening a0 (x ~ Co phi) nil G eq_refl ltac:(auto)) => h0. simpl_env in h0. clear h4 h5. resolve_unique_subst. inversion H1. subst. apply open_tm_wrt_co_inj in H4; auto. subst. simpl. eapply E_EtaC with (L := L \u {{x}}); auto. intros. erewrite open_co_erase_tm2. erewrite e; auto. (* Left/Right - simpl in *. have k0: AnnTyping G (a_App a rho b) (open_tm_wrt_tm B b). { eapply An_App. eauto. eauto. } move: (H3 _ k0) => h0. clear H3. move: (AnnTyping_regularity a0) => h1. move: (H4 _ h1) => h2. clear H4. rewrite erase_dom in h2. move: (H5 _ (AnnTyping_regularity k0)) => h3. clear H5. rewrite erase_dom in h3. ann_invert_clear. ann_invert_clear. resolve_unique_nosubst. resolve_unique_nosubst. assert (a_Pi rho A B = a_Pi rho A B0). eapply AnnTyping_unique. eauto. eauto. inversion H3. subst B0. repeat match goal with [ H : AnnTyping _ _ _ |- _ ] => clear H end. repeat match goal with [ H : AnnDefEq _ _ _ _ _ |- _ ] => clear H end. move: (DefEq_regularity h0) => Pw. inversion Pw. clear Pw. subst. remember (erase_context G) as G0. have Cx: Ctx G0. eauto. have ?: Typing G0 (erase_tm a') (a_Pi rho (erase_tm A) (erase_tm B)). { eapply E_Conv. eauto. auto. eauto using Typing_regularity. } have ?: DefEq G0 (dom G0) (erase_tm A) (erase_tm A') a_Star. { eapply E_PiFst. eauto. } have ?: Typing G0 (erase_tm b') (erase_tm A). { eapply E_Conv. eauto. eauto. eauto using Typing_regularity. } destruct rho. + simpl in *. match goal with [ H8 : Typing _ (a_App (erase_tm a) Rel (erase_tm b)) _ |- _ ] => move: (invert_a_App_Rel Cx H8) => [A3 [B3 hyp]] end. split_hyp. match goal with [ H7 : Typing _ (a_App (erase_tm a') Rel (erase_tm b')) _ |- _ ] => move: (invert_a_App_Rel Cx H7) => [A4 [B4 hyp]] end. split_hyp. eapply E_LeftRel with (b:=erase_tm b) (b':=erase_tm b'). ++ eapply Path_erase. eauto. ++ eapply Path_erase. eauto. ++ auto. ++ auto. ++ auto. ++ auto. ++ autorewcs. rewrite open_tm_erase_tm. auto. ++ eapply E_Trans with (a1 := (open_tm_wrt_tm (erase B') (erase b'))). autorewcs. repeat rewrite open_tm_erase_tm. auto. eapply E_Sym. eapply E_PiSnd with (B1:=erase B)(B2:=erase B'). eauto. eapply E_Refl. eauto. + simpl in *. match goal with [ H8 : Typing _ (a_App (erase_tm a) _ a_Bullet) _ |- _ ] => move: (invert_a_App_Irrel Cx H8) => [A3 [B3 [b3 hyp]]] end. split_hyp. match goal with [ H7 : Typing _ (a_App (erase_tm a') _ a_Bullet) _ |- _ ] => move: (invert_a_App_Irrel Cx H7) => [A4 [B4 [b4 hyp]]] end. split_hyp. eapply E_LeftIrrel with (b:=erase_tm b) (b':=erase_tm b'). ++ eapply Path_erase. eauto. ++ eapply Path_erase. eauto. ++ auto. ++ auto. ++ auto. ++ auto. ++ autorewcs. rewrite open_tm_erase_tm. auto. ++ eapply E_Trans with (a1 := (open_tm_wrt_tm (erase B') (erase b'))). autorewcs. repeat rewrite open_tm_erase_tm. auto. eapply E_Sym. eapply E_PiSnd with (B1:=erase B)(B2:=erase B'). eauto. eapply E_Refl. eauto. - simpl in *. have k0: AnnTyping G (a_App a Rel b) (open_tm_wrt_tm B b). { eapply An_App. eauto. eauto. } move: (H3 _ k0) => h0. clear H3. move: (AnnTyping_regularity a0) => h1. move: (H4 _ h1) => h2. clear H4. rewrite erase_dom in h2. move: (AnnTyping_regularity a2) => h3. move: (H5 _ (AnnTyping_regularity k0)) => h4. clear H5. rewrite erase_dom in h4. ann_invert_clear. resolve_unique_nosubst. repeat match goal with [ H : AnnTyping _ _ _ |- _ ] => clear H end. repeat match goal with [ H : AnnDefEq _ _ _ _ _ |- _ ] => clear H end. move: (DefEq_regularity h0) => Pw. inversion Pw. clear Pw. subst. remember (erase_context G) as G0. have Cx: Ctx G0. eauto. simpl in *. have ?: Typing G0 (erase_tm a') (a_Pi Rel (erase_tm A) (erase_tm B)). { eapply E_Conv. eauto. auto. eauto using Typing_regularity. } have ?: DefEq G0 (dom G0) (erase_tm A) (erase_tm A') a_Star. { eapply E_PiFst. eauto. } have ?: Typing G0 (erase_tm b') (erase_tm A). { eapply E_Conv. eauto. eauto. eauto using Typing_regularity. } match goal with [ H8 : Typing _ (a_App (erase_tm a) Rel (erase_tm b)) _ |- _ ] => move: (invert_a_App_Rel Cx H8) => [A3 [B3 hyp]] end. split_hyp. match goal with [ H7 : Typing _ (a_App (erase_tm a') Rel (erase_tm b')) _ |- _ ] => move: (invert_a_App_Rel Cx H7) => [A4 [B4 hyp]] end. split_hyp. eapply E_Right with (a:=erase_tm a) (a':=erase_tm a'). ++ eapply Path_erase. eauto. ++ eapply Path_erase. eauto. ++ eauto. ++ auto. ++ auto. ++ auto. ++ autorewcs. rewrite open_tm_erase_tm. auto. ++ eapply E_Trans with (a1 := (open_tm_wrt_tm (erase B') (erase b'))). autorewcs. repeat rewrite open_tm_erase_tm. auto. eapply E_Sym. eapply E_PiSnd with (B1:=erase B)(B2:=erase B'). eauto. eapply E_Refl. eauto. - simpl in *. have k0: AnnTyping G (a_CApp a g) (open_tm_wrt_co B g). eauto. move: (H3 _ k0) => h0. clear H3. move: (AnnTyping_regularity a0) => h1. move: (H4 _ h1) => h2. clear H4. rewrite erase_dom in h2. move: (H5 _ (AnnTyping_regularity k0)) => h3. clear H5. rewrite erase_dom in h3. move: (AnnDefEq_regularity a7) => [T1 [T2 [s hyp]]]. split_hyp. repeat ann_invert_clear. resolve_unique_nosubst. resolve_unique_nosubst. resolve_unique_subst. resolve_unique_subst. resolve_unique_subst. invert_syntactic_equality. move: (H1 _ ltac:(eauto)) => h4. clear H1. move: (H2 _ ltac:(eauto)) => h5. clear H2. rewrite erase_dom in h4. rewrite erase_dom in h5. repeat match goal with [ H : AnnTyping _ _ _ |- _ ] => clear H end. repeat match goal with [ H : AnnDefEq _ _ _ _ _ |- _ ] => clear H end. move: (DefEq_regularity h0) => Pw. inversion Pw. clear Pw. subst. remember (erase_context G) as G0. have Cx: Ctx G0. eauto. simpl in *. have ?: Typing G0 (erase_tm a') (a_CPi (Eq (erase_tm a10) (erase_tm b) (erase_tm A1)) (erase_tm B0)). { eapply E_Conv. eauto using Typing_regularity. eapply E_Sym. auto. eauto using Typing_regularity. } match goal with [ H8 : Typing _ (a_CApp (erase_tm a) g_Triv) _ |- _ ] => move: (invert_a_CApp H8) => [a5 [b5 [A5 [B5 hyp]]]] end. split_hyp. match goal with [ H7 : Typing _ (a_CApp (erase_tm a') g_Triv) _ |- _ ] => move: (invert_a_CApp H7) => [a4 [b4 [A4 [B4 hyp]]]] end. split_hyp. eapply E_CLeft. ++ eapply Path_erase. eauto. ++ eapply Path_erase. eauto. ++ auto. ++ auto. ++ auto. ++ autorewcs. erewrite -> open_co_erase_tm2 with (g:=g_Triv). eauto. *) - rewrite <- dom_map with (f:=erase_sort) in n. unfold erase_context in *. split. eapply E_ConsTm; auto. intros. destruct (@binds_cons_1 _ c x _ (Tm A) G H2) as [[E1 E2] | E3]. + subst. simpl. eauto. + simpl. eapply binds_cons_3. auto. - rewrite <- dom_map with (f:=erase_sort) in n. unfold erase_context in *. split. eapply E_ConsCo; auto. intros. destruct (@binds_cons_1 _ c0 c _ (Co phi) G H2) as [[E1 E2] | E3]. + subst. simpl. eauto. + simpl. eapply binds_cons_3. auto. Qed. Definition AnnTyping_erase : (forall G a A, AnnTyping G a A -> Typing (erase_context G) (erase a) (erase A)) := first erase_mutual. Definition AnnPropWff_erase : (forall G phi, AnnPropWff G phi -> PropWff (erase_context G) (erase phi)) := second erase_mutual. Definition AnnIso_erase : (forall G D g p1 p2, AnnIso G D g p1 p2 -> Iso (erase_context G) D (erase_constraint p1) (erase_constraint p2)) := third erase_mutual. Definition AnnDefEq_erase : (forall G D g a b, AnnDefEq G D g a b -> forall A, AnnTyping G a A -> DefEq (erase_context G) D (erase a) (erase b) (erase A)) := fourth erase_mutual. Definition AnnCtx_erase : (forall G, AnnCtx G -> Ctx (erase_context G) /\ forall c t, binds c t G -> binds c (erase_sort t) (erase_context G)) := fifth erase_mutual. Lemma erasure_a_Star : forall G a A, AnnTyping G a A -> erase A = a_Star -> exists a', erase a = erase a' /\ AnnTyping G a' a_Star. Proof. intros G a A H H0. remember (g_Refl2 A a_Star (g_Refl a_Star)) as g. pose K := AnnTyping_regularity H. have L: AnnCtx G by eauto with ctx_wff. assert (AnnDefEq G (dom G) g A a_Star). { rewrite Heqg. eauto. } assert (AnnTyping G a_Star a_Star). eauto. exists (a_Conv a g). repeat split. eauto. Qed. Lemma erasure_cvt : forall G a A, AnnTyping G a A -> forall B, erase A = erase B -> AnnTyping G B a_Star -> exists a', erase a = erase a' /\ AnnTyping G a' B. Proof. intros G a A H B e TB. pose K := AnnTyping_regularity H. clearbody K. remember (g_Refl2 A B (g_Refl a_Star)) as g. assert (AnnDefEq G (dom G) g A B). { rewrite Heqg. eapply An_EraseEq. eauto. eauto. eauto. eapply An_Refl. eapply An_Star. eauto with ctx_wff. } remember (a_Conv a (g_Refl2 A B (g_Refl a_Star))) as a0'. assert (ATA' : AnnTyping G a0' B). { rewrite Heqa0'. rewrite <- Heqg. eapply An_Conv. eauto. eauto. eauto. } exists (a_Conv a g). eauto. Qed. Lemma AnnDefEq_invertb : forall G D g a b, AnnDefEq G D g a b -> exists A b' g, AnnTyping G a A /\ AnnTyping G b' A /\ erase b' = erase b /\ AnnDefEq G D g b b'. Proof. intros G D g a b DE. destruct (AnnDefEq_regularity DE) as [SA [SB [g4 [AT0' [ATB0' SAB]]]]]. exists SA. eexists. eexists. assert (AnnTyping G (a_Conv b (g_Sym g4)) SA). { eapply An_Conv. eapply ATB0'. eapply An_Sym. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eapply An_Refl. eapply An_Star. eauto with ctx_wff. eauto. eapply AnnTyping_regularity. eauto. } split. auto. split. eauto. split. simpl. auto. eapply An_EraseEq. eauto. eauto. simpl. eauto. eapply An_Sym. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eapply An_Refl. eapply An_Star. eauto with ctx_wff. eauto. Qed. (* ----------------------------------------------------------- *) Lemma erasure_AnnDefEq : forall G D g A'' B'' S A' B', AnnDefEq G D g A'' B'' -> AnnTyping G A'' S -> erase S = a_Star -> erase A'' = erase A' -> erase B'' = erase B' -> AnnTyping G A' a_Star -> AnnTyping G B' a_Star -> exists g', AnnDefEq G D g' A' B'. Proof. intros G D g A'' B'' S A' B' H H0 H1 H2 H3 H4 H5. destruct (AnnDefEq_invertb H) as (S' & b'' & g' & TA'' & Tb' & Eb' & DEB). resolve_unique_nosubst. move :(AnnTyping_regularity H0) => R0. move :(AnnTyping_regularity Tb') => R1. have CTX : AnnCtx G by eauto with ctx_wff. assert (TEMP : exists g, AnnDefEq G D g A' A''). { eexists. eapply An_EraseEq. eauto. eauto. eauto. eapply An_EraseEq. eapply An_Star. auto. eauto. eauto. eapply An_Refl. eauto. } destruct TEMP as (gA & DEA). assert (TEMP : exists g, AnnDefEq G D g b'' B'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eauto. eauto. eauto. eapply An_Refl. eauto. } destruct TEMP as (gB & DEB2). destruct (An_Trans' DEA H) as [gX TR1]. destruct (An_Trans' TR1 DEB) as [gX2 TR2]. destruct (An_Trans' TR2 DEB2) as [gX3 TR3]. exists gX3. exact TR3. Qed. Lemma AnnDefEq_invert_a_Star : forall G0 D g1 A1' A2' S, AnnDefEq G0 D g1 A1' A2' -> AnnTyping G0 A1' S -> erase S = a_Star -> exists A1'', exists A2'', exists g, erase A1'' = erase A1' /\ erase A2'' = erase A2' /\ AnnDefEq G0 D g A1'' A2'' /\ AnnTyping G0 A1'' a_Star /\ AnnTyping G0 A2'' a_Star. Proof. intros G0 D g1 A1' A2' S DE T EA3. destruct (erasure_a_Star T EA3) as (A1'' & EA1'' & TA1'). assert (exists g, AnnDefEq G0 D g A1'' A1'). { eexists. eapply An_EraseEq with (A := a_Star); eauto 1. assert (AnnCtx G0). eauto with ctx_wff. eapply An_EraseEq with (A := a_Star). eauto. eapply AnnTyping_regularity; eauto 1. eauto. eapply An_Refl. eauto. } destruct H as [g2 DE1]. destruct (An_Trans' DE1 DE) as [g3 DE2]. destruct (AnnDefEq_invertb DE2) as (A1''' & A2'' & g4 & ? & T2 & E1 & DE3). resolve_unique_nosubst. destruct (An_Trans' DE2 DE3) as [g5 DE4]. exists A1'', A2'', g5. repeat split; eauto. Qed. Lemma Path_to_Path : forall a0, lc_tm a0 -> forall T a, Path T a -> erase a0 = a -> Path T a0. Proof. intros a0. induction a0. all: intros LC T1 a P H. all: inversion P; subst. all: try destruct rho; simpl in *; try done. all: lc_inversion c. all: invert_syntactic_equality. all: econstructor; eauto. Qed. (* ----------------------------------------------------------- *) Lemma annotation_mutual : (forall G a A, Typing G a A -> forall G0, erase_context G0 = G -> AnnCtx G0 -> exists a0 A0, (erase a0) = a /\ (erase A0) = A /\ AnnTyping G0 a0 A0) /\ (forall G phi, PropWff G phi -> forall G0, erase_context G0 = G -> AnnCtx G0 -> exists phi0, erase_constraint phi0 = phi /\ AnnPropWff G0 phi0) /\ (forall G D p1 p2, Iso G D p1 p2 -> forall G0, erase_context G0 = G -> AnnCtx G0 -> exists g0 p1' p2', (erase_constraint p1') = p1 /\ (erase_constraint p2') = p2 /\ AnnIso G0 D g0 p1' p2') /\ (forall G D a b A, DefEq G D a b A -> forall G0, erase_context G0 = G -> AnnCtx G0 -> exists g a0 b0 A0, (erase a0) = a /\ (erase b0) = b /\ (erase A0) = A /\ AnnDefEq G0 D g a0 b0 /\ AnnTyping G0 a0 A0 /\ AnnTyping G0 b0 A0) /\ (forall G, Ctx G -> True). Proof. eapply typing_wff_iso_defeq_mutual; intros; auto. - exists a_Star. exists a_Star. repeat split. auto. - rename H0 into EQ. unfold erase_context in EQ. rewrite <- EQ in b. apply binds_map_3 in b. destruct b as [s' [EQ2 b]]. destruct s'; simpl in EQ2; inversion EQ2. exists (a_Var_f x). exists A0. unfold erase_context. simpl. split; auto. - (* E_Pi *) clear t. clear t0. pick fresh x. assert (FrL : x `notin` L). auto. destruct (H0 G0 H1 H2) as [A0 [S0 [EQ1 [EQ2 AT]]]]. clear H0. destruct (erasure_a_Star AT EQ2) as [A0' [EQ3 AS]]. assert (EQA : erase A0' = A). rewrite <- EQ3. auto. assert (AN: AnnCtx ((x ~ Tm A0') ++ G0)). eauto with ctx_wff. assert (E : erase_context ([(x, Tm A0')] ++ G0) = [(x, Tm A)] ++ G). { unfold erase_context. simpl in *. unfold erase_context in H1. congruence. } destruct (H x FrL _ E AN) as [B0 [S [E2 [E3 AT2]]]]. clear H. clear E. clear AN. destruct (erasure_a_Star AT2 E3) as [B0' [E4 AT4]]. exists (a_Pi rho A0' (close_tm_wrt_tm x B0')). exists a_Star. repeat split. { simpl. f_equal; auto. autorewcs. rewrite <- (close_tm_erase_tm x B0'). rewrite <- E4. rewrite E2. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. } { eapply An_Pi_exists with (x:=x); eauto. autorewrite with lngen. fsetdec. rewrite open_tm_wrt_tm_close_tm_wrt_tm. eauto. } - (* E_Abs *) destruct (H0 G0 H1 H2) as [A0 [s0 [E1 [E2 AT]]]]. clear H0. destruct (erasure_a_Star AT E2) as [A0' [EQ3 AS]]. assert (EQA : erase A0' = A). rewrite <- EQ3. auto. pick fresh x. assert (FrL : x `notin` L). auto. assert (AN: AnnCtx ((x ~ Tm A0') ++ G0)). eauto with ctx_wff. assert (E : erase_context ([(x, Tm A0')] ++ G0) = [(x, Tm A)] ++ G). rewrite <- H1. unfold erase_context. simpl in *. congruence. destruct (H x FrL _ E AN) as [b0 [B0 [E3 [E4 AT_2]]]]. clear H. clear E. exists (a_Abs rho A0' (close_tm_wrt_tm x b0)). exists (a_Pi rho A0' (close_tm_wrt_tm x B0)). split. simpl in *. subst. f_equal. (* Little hack because we need a better control of how simpl simplifies erase (and its monomorphic versions) *) set (k := close_tm_erase_tm). simpl in k. unfold close_tm_wrt_tm. rewrite <- k; auto. rewrite E3. (* assert (k' : forall x', close_tm_wrt_tm_rec 0 x x' = close_tm_wrt_tm x x') by done; rewrite_and_clear k'. *) rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. split. simpl. subst. f_equal. autorewcs. congruence. (* FIXME: general solution *) (* have: (forall x (t : tm), close_tm x t = close_tm_rec 0 x t) by reflexivity. move=> ->.*) rewrite <- close_tm_erase_tm. rewrite E4. simpl. (* assert (k' : forall x', close_tm_wrt_tm_rec 0 x x' = close_tm_wrt_tm x x') by done; rewrite_and_clear k'. *) rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. apply An_Abs_exists with (x := x); auto. apply notin_union_3; auto. apply notin_union_3; auto. autorewrite with lngen; auto. autorewrite with lngen; auto. rewrite open_tm_wrt_tm_close_tm_wrt_tm; auto. rewrite open_tm_wrt_tm_close_tm_wrt_tm; auto. rewrite open_tm_wrt_tm_close_tm_wrt_tm; auto. autorewcs. rewrite E3. eapply r; auto. - (* E_App *) destruct (H G0 H1 H2) as [a0 [AB0 [F1 [F2 Ty2]]]]. clear H. destruct (H0 G0 H1 H2) as [b0 [A0 [M1 [M2 Ty3]]]]. clear H0. assert (K : AnnTyping G0 AB0 a_Star). eapply AnnTyping_regularity; eauto. destruct (erase_pi F2 K) as [PA [PB [EAB [EPA [EPB TYB]]]]]. inversion TYB. subst. (* remember (g_Refl2 AB0 (a_Pi PA PB) (g_Refl a_Star)) as g. assert (AnnDefEq G0 empty g AB0 (a_Pi PA PB)). { rewrite Heqg. eapply An_EraseEq. eauto. eauto. eauto. eauto. } remember (a_Conv a0 (g_Refl2 AB0 (a_Pi PA PB) (g_Refl a_Star))) as a0'. assert (ATA' : AnnTyping G0 a0' (a_Pi PA PB)). { rewrite Heqa0'. eapply An_Conv. eauto. eauto. eauto. } *) assert (N : AnnTyping G0 A0 a_Star). eapply AnnTyping_regularity; eauto. destruct (erasure_cvt Ty2 EAB) as [a0' [g ATA']]; eauto. destruct (erasure_cvt Ty3 (symmetry EPA)) as [b0' [g' ATB']]; eauto. exists (a_App a0' Rel b0'). exists (open_tm_wrt_tm PB b0'). simpl. rewrite <- open_tm_erase_tm. simpl in *. repeat split. congruence. congruence. eauto. - (* E_IApp case *) destruct (H G0 H1 H2) as [a0 [AB0 [F1 [F2 Ty2]]]]. clear H. destruct (H0 G0 H1 H2) as [b0 [A0 [M1 [M2 Ty3]]]]. clear H0. assert (K : AnnTyping G0 AB0 a_Star). eapply AnnTyping_regularity; eauto. destruct (erase_pi F2 K) as [PA [PB [EAB [EPA [EPB TYB]]]]]. inversion TYB. subst. assert (N : AnnTyping G0 A0 a_Star). eapply AnnTyping_regularity; eauto. destruct (erasure_cvt Ty2 EAB) as [a0' [g ATA']]; eauto. destruct (erasure_cvt Ty3 (symmetry EPA)) as [b0' [g' ATB']]; eauto. exists (a_App a0' Irrel b0'). exists (open_tm_wrt_tm PB b0'). simpl. rewrite <- open_tm_erase_tm. simpl in *. repeat split. congruence. congruence. eauto. - (* ex_conv case *) destruct (H G0 H2) as [a0 [A0 [E1 [E2 Ty]]]]; auto. clear H. destruct (H0 G0 H2 H3) as [g [A0' [B0' [S [Ea [Eb [Es [DE [Z Z']]]]]]]]]; auto; clear H0. subst. replace a_Star with (erase a_Star) in Es; [|simpl;auto]. destruct (erasure_cvt Z Es) as [A0'' [AS1 AS2]]. eapply An_Star. assumption. assert (Ea' : erase A0 = erase A0''). rewrite -AS1. auto. destruct (erasure_cvt Ty Ea') as [a'' [Ea0 Ta0]]. eauto. destruct (AnnDefEq_invertb DE) as [SA [B0'' [g5 [AT1 [AT2 [Eb SS]]]]]]. resolve_unique_nosubst. destruct (erasure_a_Star AT2 Es) as (B0 & EB0 & TB0). pose A0S := AnnTyping_regularity Ty. clearbody A0S. rewrite -erase_dom in DE. assert (E1 :exists g, AnnDefEq G0 (dom G0) g A0 A0'). { eexists. eapply An_EraseEq. eauto. eauto. eauto. eapply An_EraseEq. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct E1. assert (E2 : exists g, AnnDefEq G0 (dom G0) g A0' A0''). { eexists. eapply An_EraseEq. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply An_Star. eauto. eauto. eapply An_Refl. eauto. } destruct E2. assert (E3 : exists g, AnnDefEq G0 (dom G0) g A0'' B0''). { destruct (An_Sym' H0). rewrite -erase_dom in SS. destruct (An_Trans' DE SS); try eassumption. eapply An_Trans' with (a1 := A0'); try eassumption. } destruct E3 as [g'' EQ]. assert (E4 : exists g, AnnDefEq G0 (dom G0) g B0'' B0). { eexists. eapply An_EraseEq. eauto. eauto. eauto. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply An_Star. eauto. eauto. eapply An_Refl; eauto. } destruct E4 as (gg & EE). destruct (An_Trans' EQ EE). eexists (a_Conv a'' x1). eexists B0. split. eauto. split. congruence. eapply An_Conv. eauto. eauto. eauto. - (* CPi *) pick fresh c. assert (FrL : c `notin` L). auto. destruct (H0 G0 H1 H2) as [phi0 [EQ1 AT]]. clear H0. assert (AN: AnnCtx ((c ~ Co phi0) ++ G0)). eauto with ctx_wff. assert (E : erase_context ([(c, Co phi0)] ++ G0) = [(c, Co phi)] ++ G). unfold erase_context. simpl. rewrite EQ1. unfold erase_context in H1. rewrite H1. auto. destruct (H c FrL _ E AN) as [b0 [S0 [E2 [E3 AT2]]]]. clear H. clear E. clear AN. destruct (erasure_a_Star AT2) as [b0' [EB N1]]; eauto. exists (a_CPi phi0 (close_tm_wrt_co c b0')). exists a_Star. split. simpl. f_equal. auto. autorewcs. rewrite <- close_co_erase_tm. rewrite <- EB. rewrite E2. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co; auto. split. auto. eapply An_CPi_exists with (c := c); eauto. apply notin_union_3; auto. pose K := fv_co_co_tm_close_tm_wrt_co b0' c. clearbody K. unfold AtomSetImpl.Equal in K. rewrite K. fsetdec. rewrite open_tm_wrt_co_close_tm_wrt_co. auto. - (* abs *) destruct (H0 G0 H1 H2) as [A0 [E1 AT]]. clear H0. clear t. pick fresh x. assert (FrL : x `notin` L). auto. assert (AN: AnnCtx ((x ~ Co A0) ++ G0)). eauto with ctx_wff. assert (E : erase_context ([(x, Co A0)] ++ G0) = [(x, Co phi)] ++ G). rewrite <- H1. unfold erase_context. simpl. rewrite E1. auto. destruct (H x FrL _ E AN) as [b0 [B0 [E3 [E4 AT_2]]]]. clear H. clear E. exists (a_CAbs A0 (close_tm_wrt_co x b0)). exists (a_CPi A0 (close_tm_wrt_co x B0)). split. simpl. subst. f_equal. autorewcs. rewrite <- close_co_erase_tm; auto. rewrite E3. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co; auto. split. simpl. subst. f_equal. autorewcs. rewrite <- close_co_erase_tm. rewrite E4. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co; auto. apply An_CAbs_exists with (c := x); auto. { apply notin_union_3; auto. apply notin_union_3; auto. pose K := fv_co_co_tm_close_tm_wrt_co b0 x. clearbody K. unfold AtomSetImpl.Equal in K. rewrite K. auto. pose K := fv_co_co_tm_close_tm_wrt_co B0 x. clearbody K. unfold AtomSetImpl.Equal in K. rewrite K. auto. } rewrite open_tm_wrt_co_close_tm_wrt_co; auto. rewrite open_tm_wrt_co_close_tm_wrt_co; auto. - (* CApp *) clear d. clear t. destruct (H G0 H1 H2) as [a0 [A0 [E1 [E2 Ty]]]]. clear H. destruct (H0 G0 H1 H2) as [g [A0' [B0' [Ea' [Eb DE ]]]]]. clear H0. destruct DE as [Eb0 [EA [EQ [AT _]]]]. pose K := AnnTyping_regularity Ty. destruct (erase_cpi E2 K) as [phi2 [B2 [E3 [Ep [EB2 AP]]]]]. destruct phi2. simpl in *. inversion Ep. clear Ep. subst. rename A1 into A2. rename a2 into A1. rename b0 into B. destruct (erasure_cvt Ty) with (B := a_CPi (Eq A1 B A2) B2) as [a0' [TA' EA']]; eauto. inversion AP. inversion H4. subst. inversion H6. subst. assert (K1 : exists g, AnnDefEq G0 (dom G0) g A1 B0'). { eapply An_Trans' with (a1 := A0'). eapply An_EraseEq. eauto. eauto. eauto. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eauto. rewrite -erase_dom in EQ. eapply EQ. } destruct K1. destruct (AnnDefEq_regularity H) as [C1 [C2 [gB [T1 [T2 DE2]]]]]. resolve_unique_subst. destruct (An_Sym' DE2). assert (K3 : exists g, AnnDefEq G0 (dom G0) g C2 B0). { eapply An_Trans' with (a1 := C1). eauto. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eauto. } destruct K3. assert (K4 : exists g, AnnDefEq G0 (dom G0) g B0' B). { eexists. eapply An_EraseEq. eapply T2. eapply H11. eauto. eauto. } destruct K4. assert (K2 : exists g, AnnDefEq G0 (dom G0) g A1 B). { eapply An_Trans' with (a1 := B0'). eauto. eauto. } destruct K2 as [g' Y]. exists (a_CApp a0' g'). exists (open_tm_wrt_co B2 g'). subst. simpl. rewrite <- open_co_erase_tm. rewrite no_co_in_erased_tm. repeat split. autorewcs. congruence. eauto. - exists (a_Const T). destruct (H0 nil eq_refl) as (a0 & A0 & E1 & E2 & Ty). auto. clear H0. unfold toplevel in b. unfold erase_sig in b. destruct (@binds_map_3 _ _ erase_csort T (Cs A) an_toplevel). (* destruct (@binds_map_3 _ _ T (Cs A) erase_csort an_toplevel b). *) eauto. split_hyp. destruct x; simpl in H0; inversion H0. subst. exists A1. simpl. split; eauto. split; eauto. econstructor; eauto. eapply an_toplevel_to_const; eauto. - destruct (H0 nil eq_refl) as (a0 & A0 & E1 & E2 & Ty). auto. unfold toplevel in b. unfold erase_sig in b. destruct (@binds_map_3 _ _ erase_csort F (Ax a A) an_toplevel). eauto. split_hyp. destruct x; inversion H3. exists (a_Fam F). exists A1. repeat split; auto. eapply An_Fam; eauto. eapply AnnTyping_regularity. eapply an_toplevel_closed. eauto. - destruct (H G0 H2 H3) as [a0 [A0 [E1 [E2 Ty]]]]. clear H. destruct (H0 G0 H2 H3) as [b0 [A1 [E3 [E4 TyB]]]]. clear H0. clear H1. subst. exists (Eq a0 b0 A0). simpl. split. auto. eauto. - (* PropCong *) clear d. clear d0. rename A1 into a0. rename A2 into b0. rename B1 into a1. rename B2 into b1. destruct (H G0 H1 H2) as [g0 [a0' [b0' [A' [Ea0 [Eb0 [EA0 [DE0 [T0 _]]]]]]]]]. clear H. destruct (H0 G0 H1 H2) as [g1 [a1' [b1' [B' [Ea1 [Eb1 [EA1 [DE1 [T1 _]]]]]]]]]. clear H0. move :(AnnTyping_regularity T0) => R0. move :(AnnTyping_regularity T1) => R1. assert (TEMP: exists g, AnnDefEq G0 (dom G0) g A' B'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_Refl. eauto. } destruct TEMP as (gX & EqA'B'). destruct (An_Sym' EqA'B') as (gY & EqB'A'). remember (a_Conv a1' gY) as a1''. assert (AnnTyping G0 a1'' A'). rewrite Heqa1''; eapply An_Conv; eassumption. assert (erase a1'' = a1). rewrite Heqa1''. simpl. autorewcs. congruence. assert (AnnPropWff G0 (Eq a0' a1'' A')). { econstructor. eauto. eauto. autorewcs. congruence. } (* Now need b0'' : A'. get it from a0' ~ b0' *) destruct (AnnDefEq_invertb DE0) as [AA0' [b0'' [gb0 [TA0 [TB0 [E DE0']]]]]]. resolve_unique_nosubst. (* Now we need b1'' : A' get it from a1' ~ b1' ?? *) assert (TEMP : exists g, AnnDefEq G0 D g a1'' a1'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eauto. } destruct TEMP as (gZ & Eqa1''a1'). destruct (An_Trans' Eqa1''a1' DE1) as (gY1 & Eqa1''b1'). destruct (AnnDefEq_invertb Eqa1''b1') as [AA1'' [b1'' [gb1 [TA1 [TB1 [E1 DE1']]]]]]. resolve_unique_nosubst. assert (AnnPropWff G0 (Eq b0'' b1'' A')). econstructor. eauto. eauto. autorewcs. congruence. assert (TEMP : exists g, AnnDefEq G0 D g a0' b0''). eapply (An_Trans' DE0 DE0'). destruct TEMP as [gY2 Eqa0'b0'']. assert (TEMP : exists g, AnnDefEq G0 D g a1'' b1''). eapply (An_Trans' Eqa1''b1' DE1'). destruct TEMP as [gY3 Eqa1''b1'']. eexists. exists (Eq a0' a1'' A'). exists (Eq b0'' b1'' A'). split. simpl. autorewcs. f_equal; auto. split. simpl. autorewcs. f_equal; try congruence. simpl. autorewcs. f_equal; auto. econstructor; eauto. - clear d. clear p0. clear p. destruct (H G0 H2 H3) as (g & A' & B' & S & EA & EB & ES & DE & TA & TB). clear H. destruct (H0 G0 H2 H3) as (phi0 & Ep0 & WF0). clear H0. destruct (H1 G0 H2 H3) as (phi1 & Ep1 & WF1). clear H1. destruct phi0 as [A1a A2a A'']. destruct phi1 as [A1b A2b B'']. simpl in Ep0. inversion Ep0. clear Ep0. simpl in Ep1. inversion Ep1. clear Ep1. inversion WF0. subst. inversion WF1. subst. move: (AnnTyping_regularity H8) => R1. move: (AnnTyping_regularity H9) => R2. move: (AnnTyping_regularity H11) => R3. move: (AnnTyping_regularity H12) => R4. destruct (AnnDefEq_invert_a_Star DE TA ES) as (A''' & B''' & g2 & EA2 & EB2 & DE2 & TAS & TBS). simpl in *. destruct (erasure_cvt H12) with (B:= A'') as (A2a' & E2a & T2a); eauto 1. (* p1 is (Eq A1a a2a' A''). Want other side to also have type A'' *) assert (TMP: exists g, AnnDefEq G0 D g A''' A''). { eexists. eapply An_EraseEq; eauto 1. congruence. eapply An_Refl; eauto 2. } destruct TMP as (ga & EAAA). (* convert type of A1b from B'' to A'' *) assert (TMP : exists g, AnnDefEq G0 D g B'' A''). { eexists. eapply An_Trans2 with (a1 := B'''). eapply An_EraseEq; eauto 1. congruence. eapply An_Refl; eauto 2. eapply An_Trans2 with (a1 := A'''). eapply An_Sym2; eauto 1. eapply An_EraseEq; eauto 1. congruence. eapply An_Refl; eauto 2. } destruct TMP as (gb & EBA). (* convert type of A2b from B to A'' *) assert (TMP : exists g, AnnDefEq G0 D g B A''). { eexists. eapply An_Trans2 with (a1 := B''); eauto 1. eapply An_EraseEq; eauto 1. eapply An_Refl; eauto 2. } destruct TMP as (gc & EBBA). eexists. exists (Eq A1a A2a A''). exists (Eq A1b A2b B''). repeat split; auto. simpl; auto. f_equal. congruence. congruence. eapply An_IsoConv. eapply An_Sym2. eauto 1. eapply An_Wff; eauto 1. eapply An_Wff; eauto 1. congruence. congruence. - (* CPiFst *) clear d. destruct (H G0 H0 H1) as [g [a0 [b0 [A0' [E1 [E2 [E3 [DE [Ty UT]]]]]]]]]. clear H. subst. destruct (AnnDefEq_regularity DE) as [A0 [B0 [g0 [TA0 [TB0 DE0]]]]]. destruct (erase_cpi E1 TA0) as [phi1' [B1' [E [Ephi [EB T1]]]]]. destruct (erase_cpi E2 TB0) as [phi2' [B2' [E' [Ephi' [EB' T2]]]]]. resolve_unique_nosubst. resolve_unique_nosubst. destruct (An_Refl_Star D E T1 Ty E3). assert (TB1 : AnnTyping G0 (a_Conv b0 (g_Sym g0)) A0'). { eapply An_Conv. eauto. eapply An_Sym. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eapply An_Refl. eapply An_Star. eauto. eauto. eapply AnnTyping_regularity. eauto. } assert (E4 : erase (a_Conv b0 (g_Sym g0)) = erase (a_CPi phi2' B2')). { simpl. autorewcs. rewrite E'. auto. } destruct (An_Refl_Star D E4 T2 TB1 E3). assert (exists g, AnnDefEq G0 D g (a_CPi phi1' B1') (a_CPi phi2' B2')). { eapply erasure_AnnDefEq with (A'' := a0) (B'' := b0); auto. eauto. eauto. eauto. } destruct H2. destruct phi1. destruct phi2. eexists. exists phi1', phi2'. destruct phi1'. destruct phi2'. simpl in *. repeat split. congruence. congruence. eapply An_CPiFst. eapply H2. - (* assn *) rewrite <- H0 in b0. unfold erase_context in b0. destruct (binds_map_3 _ _ erase_sort c (Co (Eq a b A)) G0) as [s [E2 E3]]. eauto. destruct s; try (simpl in E2; inversion E2). destruct phi. simpl in E2. inversion E2. subst. clear E2. move: (binds_to_AnnPropWff _ _ _ _ H1 E3) => K. inversion K. subst. move: (AnnTyping_regularity H6) => TA1. move: (AnnTyping_regularity H7) => TB0. assert (exists g, AnnDefEq G0 (dom G0) g B A0). { eexists. eapply An_EraseEq; eauto 1. eapply An_Refl; eauto 2. } destruct H0 as [g' DE]. assert (AnnTyping G0 (a_Conv b1 g') A0). eapply An_Conv; eauto 1. eexists. exists a0, (a_Conv b1 g'), A0. repeat split. eapply An_Trans2 with (a1 := b1); eauto 1. eapply An_Assn; eauto. eapply An_EraseEq; eauto. eauto. eauto. - (* refl *) destruct (H G0 H0 H1) as [a0' [A0 [E1 [E2 Ty ]]]]. clear H. eexists. exists a0', a0', A0. repeat split; auto. eapply An_Refl. eauto. - (* sym *) destruct (H G0 H0 H1) as [g [a0 [b0 [A0 [E1 [E2 [E3 [DE [Ty TU]]]]]]]]]. clear H. destruct (AnnDefEq_invertb DE) as [A0' [b0' [g' [T1 [T2 [T3 T4]]]]]]. resolve_unique_nosubst. assert (exists g, AnnDefEq G0 D g b0' a0). { destruct (An_Sym' DE). destruct (An_Sym' T4). eapply (An_Trans' H2 H). } destruct H. eexists. exists b0'. exists a0. exists A0. repeat split; auto. congruence. eassumption. - (* Trans *) destruct (H G0 H1 H2) as (g0 & a' & a1' & A0 & E1 & E2 & E3 & DE & Ty & TyU). clear H. destruct (H0 G0 H1 H2) as (g1 & a1'' & b' & A1 & E4 & E5 & E6 & DE1 & Ty1 & TyU1). clear H0. destruct (AnnDefEq_invertb DE) as (A' & a1''' & g2 & T1 & T2 & E7 & DE2). destruct (AnnDefEq_invertb DE1) as (B' & b'' & g3 & T3 & T4 & E8 & DE3). subst. destruct (An_Trans' DE DE2). destruct (An_Trans' DE1 DE3). resolve_unique_nosubst. resolve_unique_nosubst. assert (exists g, AnnDefEq G0 D g a1''' a1''). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eauto. } destruct H1. destruct (An_Trans' H H1). destruct (An_Trans' H3 H0). destruct (AnnDefEq_invertb H4) as (? & b''' & ? & T3' & T4' & E8' & DE3'). resolve_unique_nosubst. eexists. exists a'. exists b'''. exists A0. repeat split; auto. congruence. eapply An_Trans2 with (a1 := b''); eauto 1. - (* step case *) destruct (H G0 H1 H2) as [a1' [A1 [E1 [E2 Ty]]]]. clear H. destruct (H0 G0 H1 H2) as [a2' [A2 [E1' [E2' Ty']]]]. clear H0. subst. assert (exists g, AnnDefEq G0 D g A2 A1). { eexists. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto 1. eapply An_Refl. eauto 2. } destruct H. assert (AnnTyping G0 (a_Conv a2' x) A1). { eapply An_Conv; eauto 1. eapply AnnDefEq_weaken_available; eauto 1. eapply AnnTyping_regularity. eauto. } eexists. exists a1', (a_Conv a2' x), A1. repeat split; eauto 1. eapply An_Beta; eauto 1. - (* pi-cong *) clear d. clear d0. clear H1. rename H2 into H1. rename H3 into H2. rename H4 into H3. rename H5 into H4. destruct (H G0 H3 H4) as (g1 & A1' & A2' & S & EA1 & EA2 & EA3 & DE & T & U). clear H. clear H1. clear H2. destruct (AnnDefEq_invert_a_Star DE T EA3) as (A1'' & A2'' & g5 & EA5 & EA4 & DE4 & TA1' & TA2'). assert (erase A1'' = A1). congruence. assert (erase A2'' = A2). congruence. clear dependent A1'. clear dependent A2'. clear dependent S. pick fresh x1. assert (FrL : x1 `notin` L). auto. assert (CTX1 : AnnCtx ([(x1, Tm A1'')] ++ G0)). eauto with ctx_wff. destruct (H0 x1 FrL ([(x1,Tm A1'')] ++ G0)) as (g2 & B1' & B2' & S & EB1 & EB2 & ES & DEB & DT & _); auto. { simpl. autorewcs. congruence. } clear H0. destruct (AnnDefEq_invert_a_Star DEB DT ES) as (B1'' & B2'' & g6 & EB3 & EB4 & DE5 & TB1' & TB2'); auto. assert (erase B1'' = open_tm_wrt_tm B1 (a_Var_f x1)). congruence. assert (erase B2'' = open_tm_wrt_tm B2 (a_Var_f x1)). congruence. clear dependent B1'. clear dependent B2'. clear dependent S. pick fresh x2. remember (close_tm_wrt_tm x1 B2'') as CB2. remember (open_tm_wrt_tm CB2 (a_Conv (a_Var_f x2) (g_Sym g5))) as B3. assert (CTX2 : AnnCtx ([(x2, Tm A2'')] ++ G0)). eauto with ctx_wff. assert (CTX3 : AnnCtx ([(x2, Tm A2'')] ++ [(x1, Tm A1'')] ++ G0)). { eapply An_ConsTm; eauto with ctx_wff. eapply (AnnTyping_weakening _ [(x1, Tm A1'')] nil); simpl; eauto with ctx_wff. } assert (AnnTyping G0 (a_Pi rho A1'' (close_tm_wrt_tm x1 B1'')) a_Star). { eapply An_Pi_exists with (x := x1). autorewrite with lngen. clear dependent x2. auto. autorewrite with lngen. auto. auto. } assert (AnnTyping G0 (a_Pi rho A2'' (close_tm_wrt_tm x2 B3)) a_Star). { eapply An_Pi_exists with (x := x2). autorewrite with lngen. auto. rewrite HeqB3. rewrite HeqCB2. autorewrite with lngen. rewrite -tm_subst_tm_tm_spec. replace a_Star with (tm_subst_tm_tm (a_Conv (a_Var_f x2) (g_Sym g5)) x1 a_Star); [|simpl; auto]. eapply AnnTyping_tm_subst; eauto. eapply AnnTyping_weakening with (F := ([(x1, Tm A1'')])); eauto. eapply An_ConsTm; eauto. eapply AnnTyping_weakening with (F := nil); eauto. eapply An_Conv; eauto. eapply AnnDefEq_weakening with (F := nil)(G0 := G0). eapply (fourth ann_weaken_available_mutual) with (D := dom G0). eapply AnnDefEq_weaken_available. eauto. simpl. clear Fr. clear Fr0. fsetdec. eauto. simpl_env. auto. eapply AnnTyping_weakening with (F := nil); eauto. eauto. } exists (g_PiCong rho g5 (close_co_wrt_tm x1 g6)), (a_Pi rho A1'' (close_tm_wrt_tm x1 B1'')), (a_Pi rho A2'' (close_tm_wrt_tm x2 B3)), a_Star. repeat split; auto. + simpl. rewrite <- close_tm_erase_tm; auto. rewrite H0. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. rewrite -H. auto. + simpl. f_equal. auto. rewrite <- close_tm_erase_tm; auto. rewrite HeqB3. rewrite HeqCB2. rewrite <- open_tm_erase_tm. rewrite <- close_tm_erase_tm. rewrite H2. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. autorewrite with lngen. apply notin_remove_2. pose KK := fv_tm_tm_tm_open_tm_wrt_tm_upper B2 (a_Var_f x1). clearbody KK. unfold AtomSetImpl.Subset in KK. unfold not. intros NN. apply KK in NN. apply notin_union in NN. inversion NN. clear KK. simpl. auto. auto. + eapply An_PiCong_exists with (x1 := x1) (x2 := x2) (B2 := CB2); auto. ++ rewrite HeqCB2. autorewrite with lngen. auto. ++ rewrite HeqB3. rewrite HeqCB2. autorewrite with lngen. apply notin_union; auto. ++ rewrite HeqCB2. autorewrite with lngen. auto. ++ autorewrite with lngen. auto. ++ rewrite HeqCB2. autorewrite with lngen. move: (AnnDefEq_context_fv DE5) => /= ?. clear Fr Fr0. apply An_Pi_exists with (x:=x1). +++ apply notin_union. inversion CTX1. auto. autorewrite with lngen. fsetdec. +++ autorewrite with lngen. auto. +++ auto. - (* abs-cong *) clear d. rename H1 into H3. rename H2 into H4. destruct (H0 G0 H3 H4) as (A1' & S1 & EA1 & ES & AT). clear H0. subst. destruct (erasure_a_Star AT ES) as (A1 & EA5 & AT1). (* destruct (AnnDefEq_invert_a_Star DE AT ES) as (A1 & A2 & gg & EA5 & EA6 & H & AT1 & AT2). rewrite -EA5. rewrite -EA6. rewrite -EA5 in H0. clear dependent A1'. clear dependent A2'. clear dependent S. *) pick fresh x1. assert (FrL : x1 `notin` L). auto. destruct (H x1 FrL ([(x1,Tm A1)] ++ G0)) as (g2 & b1' & b2' & B' & EB1 & EB2 & S & DEB & TB & TB2); auto. simpl. autorewcs. congruence. pick fresh x2. remember (close_tm_wrt_tm x1 b2') as b2''. remember (g_Refl A1) as gg. assert (AnnDefEq G0 D gg A1 A1). { rewrite Heqgg. eauto 3. } remember (open_tm_wrt_tm b2'' (a_Conv (a_Var_f x2) (g_Sym gg))) as b3. remember (open_tm_wrt_tm (close_tm_wrt_tm x1 B') (a_Conv (a_Var_f x2) (g_Sym gg))) as B3. assert (AnnTyping G0 (a_Abs rho A1 (close_tm_wrt_tm x1 b1')) (a_Pi rho A1 (close_tm_wrt_tm x1 B'))). { eapply An_Abs_exists with (x := x1). + autorewrite with lngen. clear dependent x2. auto. + auto. + autorewrite with lngen. auto. + autorewrite with lngen. autorewcs. rewrite EB1. auto. } assert (CTX2 : AnnCtx ([(x2, Tm A1)] ++ G0)). eauto with ctx_wff. assert (CTX3 : AnnCtx ([(x2, Tm A1)] ++ [(x1, Tm A1)] ++ G0)). { eapply An_ConsTm; eauto. eapply (AnnTyping_weakening _ [(x1, Tm A1)] nil); simpl; eauto with ctx_wff. } assert (AnnTyping G0 (a_Abs rho A1 (close_tm_wrt_tm x2 b3)) (a_Pi rho A1 (close_tm_wrt_tm x2 B3))). { eapply An_Abs_exists with (x := x2). + autorewrite with lngen. auto. + auto. + rewrite Heqb3. rewrite HeqB3. rewrite Heqb2''. autorewrite with lngen. rewrite (tm_subst_tm_tm_intro x1). rewrite -(tm_subst_tm_tm_spec B'). eapply AnnTyping_tm_subst; eauto 1. autorewrite with lngen. eapply AnnTyping_weakening; eauto 1. eapply An_ConsTm; eauto 1. eapply AnnTyping_weakening with (F:=nil); eauto 1. simpl. eauto. eapply An_Conv. eapply An_Var; eauto. eapply An_Sym2. eapply AnnDefEq_weakening with (F:=nil); eauto 1. simpl. eapply (fourth ann_weaken_available_mutual) with (D:= dom G0). eapply AnnDefEq_weaken_available. eauto. clear Fr Fr0. fsetdec. eapply AnnTyping_weakening with (F:=nil); eauto 1. autorewrite with lngen. eauto. + rewrite Heqb3. rewrite Heqb2''. autorewrite with lngen. rewrite (tm_subst_tm_tm_intro x1); auto. autorewrite with lngen. autorewcs. rewrite -subst_tm_erase_tm; auto. simpl. autorewcs. rewrite EB2. rewrite -(tm_subst_tm_tm_intro x1); auto. autorewrite with lngen. auto. } assert (TMP: exists g, AnnDefEq G0 D g (a_Pi rho A1 (close_tm_wrt_tm x1 B')) (a_Pi rho A1 (close_tm_wrt_tm x2 B3))). { eexists. eapply An_PiCong_exists with (x1:=x1) (x2:=x2) (B2 := close_tm_wrt_tm x1 B') (g1:= gg) (g2 := (close_co_wrt_tm x1 (g_Refl B'))). + simpl. autorewrite with lngen. clear Fr0. auto. + autorewrite with lngen. apply notin_union. auto. rewrite Heqgg. auto. + auto. + autorewrite with lngen. eapply An_Refl. eapply AnnTyping_regularity. eauto 1. + autorewrite with lngen. auto. + eapply AnnTyping_regularity. eauto 1. + eapply AnnTyping_regularity. eauto 1. + autorewrite with lngen. move: (AnnTyping_context_fv TB) => /= ?. clear Fr Fr0. apply An_Pi_exists with (x := x1). apply notin_union. inversion CTX3. inversion H7. auto. autorewrite with lngen. fsetdec. autorewrite with lngen. eapply AnnTyping_regularity. eauto. inversion CTX2. auto. } destruct TMP as [gpi Epipi]. assert (AnnTyping G0 (a_Conv (a_Abs rho A1 (close_tm_wrt_tm x2 b3)) (g_Sym gpi)) (a_Pi rho A1 (close_tm_wrt_tm x1 B'))). { eapply An_Conv. eauto 1. eapply An_Sym2. eapply AnnDefEq_weaken_available; eauto 1. eapply AnnTyping_regularity. eauto 1. } eexists. exists (a_Abs rho A1 (close_tm_wrt_tm x1 b1')), (a_Conv (a_Abs rho A1 (close_tm_wrt_tm x2 b3)) (g_Sym gpi)), (a_Pi rho A1 (close_tm_wrt_tm x1 B')). repeat split; eauto 1. { simpl. f_equal. rewrite <- close_tm_erase_tm; auto. rewrite EB1. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. } { simpl. f_equal. auto. rewrite <- close_tm_erase_tm; auto. rewrite Heqb3. rewrite Heqb2''. rewrite <- open_tm_erase_tm. rewrite <- close_tm_erase_tm. rewrite EB2. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. autorewrite with lngen. apply notin_remove_2. pose KK := fv_tm_tm_tm_open_tm_wrt_tm_upper b2 (a_Var_f x1). clearbody KK. unfold AtomSetImpl.Subset in KK. unfold not. intros NN. apply KK in NN. apply notin_union in NN. inversion NN. clear KK. simpl. auto. auto. } { simpl. f_equal. autorewcs. congruence. autorewcs. rewrite -close_tm_erase_tm; auto. rewrite S. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm; auto. } { eapply An_Trans2 with (a1 := (a_Abs rho A1 (close_tm_wrt_tm x2 b3))). { eapply An_AbsCong_exists with (x1:=x1)(x2:=x2)(b2 := b2'') (g1 := gg) (g2 := (close_co_wrt_tm x1 g2)) (B := a_Pi rho A1 (close_tm_wrt_tm x1 B')). + rewrite Heqb2''. autorewrite with lngen. auto. + rewrite Heqb3. rewrite Heqb2''. autorewrite with lngen. apply notin_union; auto. rewrite Heqgg. auto. + auto. + rewrite Heqb2''. autorewrite with lngen. auto. + autorewrite with lngen. auto. + auto. + auto. + autorewrite with lngen. autorewcs. rewrite EB1. auto. + rewrite Heqb3. rewrite Heqb2''. autorewrite with lngen. rewrite (tm_subst_tm_tm_intro x1); auto. autorewrite with lngen. autorewcs. rewrite -subst_tm_erase_tm; auto. simpl. autorewcs. rewrite EB2. rewrite -(tm_subst_tm_tm_intro x1); auto. autorewrite with lngen. auto. + rewrite Heqb2''. autorewrite with lngen. clear Fr Fr0. move: (AnnTyping_context_fv TB2) => /= ?. inversion CTX3. inversion H8. subst. eapply An_Abs_exists with (x:= x1). autorewrite with lngen. fsetdec. auto. autorewrite with lngen. auto. autorewrite with lngen. { apply An_Abs_inversion in H2. destruct H2 as [BB [h0 [h1 h2]]]. move: (h2 x1 ltac:(auto)) => [h3 _]. rewrite <- open_tm_erase_tm in h3. rewrite <- close_tm_erase_tm in h3. rewrite <- open_tm_erase_tm in h3. rewrite <- close_tm_erase_tm in h3. simpl in h3. replace (a_Var_f x2) with (erase_tm (a_Var_f x2)) in h3. replace (a_Var_f x1) with (erase_tm (a_Var_f x1)) in h3. autorewcshyp h3. rewrite close_tm_erase_tm in h3. rewrite open_tm_erase_tm in h3. replace (a_Var_f x2) with (erase_tm (a_Var_f x2)) in h3. rewrite close_tm_erase_tm in h3. rewrite open_tm_erase_tm in h3. simpl in h3. rewrite close_tm_wrt_tm_open_tm_wrt_tm in h3. rewrite open_tm_wrt_tm_close_tm_wrt_tm in h3. auto. autorewrite with lngen. move: (AnnTyping_context_fv TB2) => [h5 _]. simpl in h5. rewrite h5. simpl in H10. fsetdec. auto. auto. auto. } } eapply An_EraseEq; eauto 1. eapply An_Sym. eauto 1. eapply AnnTyping_regularity. eauto 1. eapply AnnTyping_regularity. eauto 1. eapply An_Refl; eauto 2. eapply AnnDefEq_weaken_available; eauto 1. } Unshelve. eauto. eauto. - (* appcong *) destruct (H G0 H1 H2) as [g1 [a1' [b1' [AB1 [EA1 [EA2 [ET1 [DE1 [TAB1 _]]]]]]]]]. clear H. destruct (H0 G0 H1 H2) as [g2 [a2' [b2' [A1 [EA3 [EA4 [ET2 [DE2 [TA1 _]]]]]]]]]. clear H0. move: (AnnTyping_regularity TAB1) => TPi. destruct (erase_pi ET1 TPi) as (A' & B' & E1 & E2 & E3 & TP). inversion TP. subst. destruct (AnnDefEq_regularity DE2) as (A2' & B2' & g3 & ? & Tb2' & DEa2b1). resolve_unique_nosubst. destruct (erasure_cvt TAB1 E1) as (a1'' & E5 & Ta1''); eauto. assert (exists g, AnnDefEq G0 D g a1'' a1'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H as [g4 DEa1''a1']. move: (An_Trans2 DEa1''a1' DE1) => DE4. destruct (erasure_cvt TA1) with (B := A') as (a2'' & E4 & Ta2''); eauto. assert (exists g, AnnDefEq G0 D g a2'' a2'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H as [g5 DEa2''a2']. move: (An_Trans2 DEa2''a2' DE2) => DE3. destruct (AnnDefEq_invertb DE4) as (AB1' & b1'' & g6 & TA1' & TB1' & EB & DE5). resolve_unique_nosubst. destruct (AnnDefEq_invertb DE3) as (A1'' & b2'' & g7 & TA1'' & TB2'' & EB1 & DE6). resolve_unique_nosubst. assert (TT : AnnTyping G0 (a_App a1'' Rel a2'') (open_tm_wrt_tm B' a2'')). { eapply An_App. eauto. eauto. } assert (AnnTyping G0 (a_App b1'' Rel b2'') (open_tm_wrt_tm B' b2'')). { eapply An_App. eauto. eauto. } assert (exists g, AnnDefEq G0 D g a2'' b2''). { eexists. eapply An_Trans2. eauto. eauto. } destruct H0 as [g8 Eab]. assert (exists g, AnnDefEq G0 D g (open_tm_wrt_tm B' a2'') (open_tm_wrt_tm B' b2'')). { eexists. eapply An_PiSnd; eauto 1. eapply An_Refl. eapply AnnTyping_regularity. eauto 1. } destruct H0 as [g9 HBB]. assert (AnnTyping G0 (a_Conv (a_App b1'' Rel b2'') (g_Sym g9)) (open_tm_wrt_tm B' a2'')). { eapply An_Conv; eauto 1. eapply An_Sym2. eapply AnnDefEq_weaken_available; eauto 1. eapply AnnTyping_regularity. eauto. } eexists. exists (a_App a1'' Rel a2''). exists (a_Conv (a_App b1'' Rel b2'') (g_Sym g9)). exists (open_tm_wrt_tm B' a2''). repeat split. simpl. autorewcs. congruence. simpl. autorewcs. congruence. rewrite -open_tm_erase_tm. f_equal. auto. { eapply An_Trans2 with (a1 := (a_App b1'' Rel b2'')). eapply An_AppCong; eauto 1. eapply An_Trans2 with (a1 := b1'); eauto 2. eapply AnnDefEq_weaken_available; eauto 1. eapply An_EraseEq; eauto 2. eapply An_Sym. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eapply An_Refl. eauto 2. eapply AnnDefEq_weaken_available; eauto 1. } eauto. eauto. - (* iappcong *) destruct (H G0 H1 H2) as [g1 [a1' [b1' [AB1 [EA1 [EA2 [ET1 [DE1 [TAB1 _]]]]]]]]]. clear H. destruct (H0 G0 H1 H2) as (a2' & A1 & EA3 & ET2 & TA1). clear H0. move: (AnnTyping_regularity TAB1) => TPi. destruct (erase_pi ET1 TPi) as (A' & B' & E1 & E2 & E3 & TP). inversion TP. subst. destruct (erasure_cvt TAB1 E1) as (a1'' & E5 & Ta1''); eauto. assert (exists g, AnnDefEq G0 D g a1'' a1'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H as [g4 DEa1''a1']. move: (An_Trans2 DEa1''a1' DE1) => DE4. destruct (erasure_cvt TA1) with (B := A') as (a2'' & E4 & Ta2''); eauto. assert (exists g, AnnDefEq G0 D g a2'' a2'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H as [g5 DEa2''a2']. destruct (AnnDefEq_invertb DE4) as (AB1' & b1'' & g6 & TA1' & TB1' & EB & DE5). resolve_unique_nosubst. assert (TT : AnnTyping G0 (a_App a1'' Irrel a2'') (open_tm_wrt_tm B' a2'')). { eapply An_App. eauto. eauto. } assert (AnnTyping G0 (a_App b1'' Irrel a2'') (open_tm_wrt_tm B' a2'')). { eapply An_App. eauto. eauto. } eexists. exists (a_App a1'' Irrel a2''). exists (a_App b1'' Irrel a2''). exists (open_tm_wrt_tm B' a2''). repeat split. simpl. autorewcs. congruence. simpl. autorewcs. congruence. rewrite -open_tm_erase_tm. f_equal. auto. { eapply An_Trans2 with (a1 := (a_App b1'' Irrel a2'')). eapply An_AppCong; eauto 1. eapply An_Trans2 with (a1 := b1'); eauto 2. eapply An_Refl; eauto 2. eapply An_Refl; eauto 2. eapply AnnTyping_regularity. eauto 2. eapply An_Refl; eauto 2. } eauto. eauto. - destruct (H G0 H0 H1) as [g [AB1 [AB2 [S1 [E1 [E2 [E3 [DE [T1 _]]]]]]]]]. clear H. destruct (AnnDefEq_regularity DE) as [S2 [S2' [g1 [T2 [T3 DE2]]]]]. resolve_unique_nosubst. destruct (erase_pi E1 T1) as [A1' [B1' [F1 [F2 [F3 AT]]]]]. destruct (erase_pi E2 T3) as [A2' [B2' [F1' [F2' [F3' AT']]]]]. subst. destruct (erasure_AnnDefEq DE T1 E3 F1 F1' AT AT') as (g2 & DE3). inversion AT. inversion AT'. subst. eexists. exists A1', A2', a_Star. repeat split. eauto. eauto. auto. - (* PiSnd *) clear d. clear d0. destruct (H G0 H1 H2) as [g [AB1 [AB2 [S1 [E1 [E2 [E3 [DE1 [AT1 _]]]]]]]]]. clear H. destruct (H0 G0 H1 H2) as [g1 [a1' [a2' [S2 [E1' [E2' [E3' [DE2 [AT2 _]]]]]]]]]. clear H0. destruct (AnnDefEq_regularity DE1) as [SS1 [SS2 [g4 [T3 [T4 DE3]]]]]. destruct (erase_pi E1 T3) as [A14 [A24 [F1 [F2 [F3 AT]]]]]. destruct (erase_pi E2 T4) as [A15 [A25 [F1' [F2' [F3' AT']]]]]. inversion AT. subst. inversion AT'. subst. (* Get equality between Pi types *) destruct (erasure_AnnDefEq DE1 AT1 E3 F1 F1' AT) as (g6 & DE5). eauto. resolve_unique_nosubst. (* a1 of domain type A14 *) destruct (erasure_cvt AT2 (symmetry F2)) as [a1 [EA1 TA1]]. eauto. (* a2 of domain type A15 *) destruct (AnnDefEq_invertb DE2) as (S3 & a2'' & g7 & T5 & T6 & ? & DE6). resolve_unique_nosubst. destruct (erasure_cvt T6 (symmetry F2)) as [a2''' [EA2 TA2]]. eauto. assert (AnnDefEq G0 D (g_PiFst g6) A14 A15). { eapply An_PiFst. eauto. } remember (a_Conv a2''' (g_PiFst g6)) as a2. assert (AnnTyping G0 a2 A15). { rewrite Heqa2. eapply An_Conv; eauto. eapply AnnDefEq_weaken_available. eauto. } (* a1 ~ a2 *) assert (TEMP : exists g, AnnDefEq G0 D g a1 a1'). { eexists. eapply An_EraseEq; eauto 1. eapply An_EraseEq; eauto 1. eapply AnnTyping_regularity; eauto. eapply An_Refl. eauto. } destruct TEMP as (? & Ha1a1'). assert (TEMP : exists g, AnnDefEq G0 D g a2'' a2'''). { eexists. eapply An_EraseEq; eauto 1. eapply An_EraseEq; eauto 1. eapply AnnTyping_regularity; eauto. eapply An_Refl. eauto. } destruct TEMP as (? & Ha2''a2'''). assert (TEMP : exists g, AnnDefEq G0 D g a2''' a2). { rewrite Heqa2. eexists. eapply An_EraseEq. eauto. rewrite -Heqa2. eauto. eauto. eapply AnnDefEq_weaken_available. eauto. } destruct TEMP as (? & Ha2'''a2). move: (An_Trans2 Ha1a1' DE2) => Ha1a2'. move: (An_Trans2 Ha1a2' DE6) => Ha1a2''. move: (An_Trans2 Ha1a2'' Ha2''a2''') => Ha1a2'''. move: (An_Trans2 Ha1a2''' Ha2'''a2) => Ha1a2. eexists. exists (open_tm_wrt_tm A24 a1). exists (open_tm_wrt_tm A25 a2). exists a_Star. repeat split. rewrite <- open_tm_erase_tm. congruence. rewrite <- open_tm_erase_tm. rewrite Heqa2. simpl. f_equal. autorewcs. congruence. eapply An_PiSnd; eauto. pick fresh x2 for (L \u fv_tm_tm_tm A24). rewrite (tm_subst_tm_tm_intro x2); auto. replace a_Star with (tm_subst_tm_tm a1 x2 a_Star). eapply AnnTyping_tm_subst. eapply H4. auto. auto. simpl. auto. pick fresh x2 for (L0 \u fv_tm_tm_tm A25). rewrite (tm_subst_tm_tm_intro x2); auto. replace a_Star with (tm_subst_tm_tm a2 x2 a_Star). eapply AnnTyping_tm_subst. eapply H3. auto. auto. simpl. auto. - (* CPiCong *) idtac. rename A into B1. rename B into B2. clear H1. rename H2 into H1. rename H3 into H2. rename H4 into H3. rename H5 into H4. clear d. clear i. destruct (H G0 H3 H4) as (g1 & phi1' & phi2' & EP1 & EP2 & IP). clear H. clear H1 H2. rename H3 into H1. rename H4 into H2. destruct (AnnIso_regularity IP) as [WFF1 WFF2]. inversion WFF1. inversion WFF2. subst. move: (AnnTyping_regularity H) => ?. move: (AnnTyping_regularity H7) => ?. move: (AnnTyping_regularity H3) => ?. move: (AnnTyping_regularity H8) => ?. assert (exists g, AnnDefEq G0 D g A0 B0). { eexists. eapply An_EraseEq; eauto 1. eauto. } destruct H1 as [g2 EA0B0]. assert (exists g, AnnDefEq G0 D g A B). { eexists. eapply An_EraseEq; eauto 1. eauto. } destruct H1 as [g3 EAB]. pick fresh x1. assert (FrL : x1 `notin` L). auto. assert (CTX1 : AnnCtx ([(x1, Co (Eq a b A))] ++ G0)). eauto with ctx_wff. destruct (H0 x1 FrL ([(x1,Co (Eq a b A))] ++ G0)) as (g4 & B1' & B2' & S & EB1 & EB2 & ES & DEB & DT & _); auto. clear H0. destruct (AnnDefEq_invert_a_Star DEB DT ES) as (B1'' & B2'' & g6 & EB3 & EB4 & DE5 & TB1' & TB2'); auto. assert (erase B1'' = open_tm_wrt_co B1 (g_Var_f x1)). congruence. assert (erase B2'' = open_tm_wrt_co B2 (g_Var_f x1)). congruence. clear dependent B1'. clear dependent B2'. clear dependent S. pose AVOID := erase B2''. pick fresh x2. remember (close_tm_wrt_co x1 B2'') as CB2. remember (open_tm_wrt_co CB2 (g_Cast (g_Var_f x2) (g_Sym g1))) as B3. assert (CTX2 : AnnCtx ([(x2, Co (Eq a0 b0 A0))] ++ G0)). eauto with ctx_wff. assert (CTX3 : AnnCtx ([(x2, Co (Eq a0 b0 A0))] ++ [(x1, Co (Eq a b A))] ++ G0)). { eapply An_ConsCo; eauto. eapply (AnnPropWff_weakening _ [(x1, Co (Eq a b A))] nil); simpl; eauto. } assert (AnnTyping G0 (a_CPi (Eq a b A) (close_tm_wrt_co x1 B1'')) a_Star). { eapply An_CPi_exists with (c := x1). autorewrite with lngen. clear dependent x2. auto. autorewrite with lngen. auto. autorewrite with lngen. eauto. } assert (AnnTyping G0 (a_CPi (Eq a0 b0 A0) (close_tm_wrt_co x2 B3)) a_Star). { eapply An_CPi_exists with (c := x2). autorewrite with lngen. auto. eauto. rewrite HeqB3. rewrite HeqCB2. autorewrite with lngen. rewrite -co_subst_co_tm_spec. replace a_Star with (co_subst_co_tm (g_Cast (g_Var_f x2) (g_Sym g1)) x1 a_Star); [|simpl; auto]. eapply AnnTyping_co_subst with (D := dom ([(x2, Co (Eq a0 b0 A0))] ++ G0)); eauto. eapply AnnTyping_weakening with (F := ([(x1, Co (Eq a b A))])); eauto 1. eapply An_ConsCo; eauto. eapply AnnPropWff_weakening with (F := nil); eauto. eapply An_Cast; eauto 2. eapply An_Assn; eauto. simpl. simpl_env. eapply AnnIso_weakening with (F := nil)(G0 := G0). eapply (third ann_weaken_available_mutual) with (D := dom G0). eapply AnnIso_weaken_available. eauto. simpl. clear Fr Fr0. fsetdec. eauto. simpl_env. auto. } exists (g_CPiCong g1 (close_co_wrt_co x1 g6)), (a_CPi (Eq a b A) (close_tm_wrt_co x1 B1'')), (a_CPi (Eq a0 b0 A0) (close_tm_wrt_co x2 B3)), a_Star. repeat split. + simpl. rewrite <- close_co_erase_tm; auto. rewrite H0. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co; auto. + simpl. f_equal. rewrite <- close_co_erase_tm; auto. rewrite HeqB3. rewrite HeqCB2. rewrite <- (open_co_erase_tm2 _ _ (g_Var_f x2)). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. rewrite <- close_co_erase_tm. rewrite H1. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. auto. clear Fr0. auto. rewrite <- close_co_erase_tm. autorewrite with lngen. apply notin_remove_2. auto. + eapply An_CPiCong_exists with (c1 := x1) (c2 := x2) (B2 := CB2). ++ auto. ++ rewrite HeqCB2. autorewrite with lngen. auto. ++ rewrite HeqB3. rewrite HeqCB2. autorewrite with lngen. apply notin_union; auto. ++ rewrite HeqCB2. autorewrite with lngen. auto. ++ rewrite HeqB3. rewrite HeqCB2. autorewrite with lngen. auto. ++ auto. ++ auto. ++ rewrite HeqCB2. autorewrite with lngen. clear Fr Fr0. move: (AnnDefEq_context_fv DE5) => /= ?. inversion CTX1. subst. eapply An_CPi_exists with (c:=x1). autorewrite with lngen. fsetdec. auto. autorewrite with lngen. auto. + auto. + auto. - (* CAbsCong *) rename a into B1. rename b into B2. rename B into S. (*clear H1. rename H2 into H1. rename H3 into H2.*) destruct (H0 G0 H1 H2) as (phi1' & EP1 & WFF1). clear H0. inversion WFF1. subst. move: (AnnTyping_regularity H0) => ?. move: (AnnTyping_regularity H3) => ?. assert (exists g, AnnDefEq G0 D g A B). { eexists. eapply An_EraseEq; eauto 1. eauto. } destruct H1 as [g3 EAB]. pick fresh x1. assert (FrL : x1 `notin` L). auto. assert (CTX1 : AnnCtx ([(x1, Co (Eq a b A))] ++ G0)). eauto with ctx_wff. destruct (H x1 FrL ([(x1,Co (Eq a b A))] ++ G0)) as (g4 & B1' & B2' & C1 & EB1 & EB2 & ES & DEB & DT & DU); auto. clear H. destruct (AnnDefEq_regularity DEB) as (? & C2 & g & ? & TB2 & DEC). resolve_unique_nosubst. resolve_unique_nosubst. pose AVOID := erase B2'. pick fresh x2. remember (close_tm_wrt_co x1 B2') as CB2. have refl: exists g, AnnIso G0 D g (Eq a b A) (Eq a b A). { eexists. apply An_PropCong. eapply An_Refl. eassumption. eapply An_Refl. eassumption. apply WFF1. apply WFF1. } destruct refl as [g1 refl]. remember (open_tm_wrt_co CB2 (g_Cast (g_Var_f x2) (g_Sym g1))) as B3. remember (open_tm_wrt_co (close_tm_wrt_co x1 C1) (g_Cast (g_Var_f x2) (g_Sym g1))) as C3. assert (CTX2 : AnnCtx ([(x2, Co (Eq a b A))] ++ G0)). eauto 2 with ctx_wff. assert (CTX3 : AnnCtx ([(x2, Co (Eq a b A))] ++ [(x1, Co (Eq a b A))] ++ G0)). { eapply An_ConsCo; eauto 1. eapply (AnnPropWff_weakening _ [(x1, Co (Eq a b A))] nil); simpl; eauto. } assert (AnnTyping G0 (a_CAbs (Eq a b A) (close_tm_wrt_co x1 B1')) (a_CPi (Eq a b A) (close_tm_wrt_co x1 C1))). { eapply An_CAbs_exists with (c := x1). autorewrite with lngen. clear dependent x2. apply notin_union; auto. auto. autorewrite with lngen. auto. } assert (AnnTyping G0 (a_CAbs (Eq a b A) (close_tm_wrt_co x2 B3)) (a_CPi (Eq a b A) (close_tm_wrt_co x2 C3))). { eapply An_CAbs_exists with (c := x2). autorewrite with lngen. auto. eauto. rewrite HeqB3. rewrite HeqCB2. rewrite HeqC3. autorewrite with lngen. rewrite -co_subst_co_tm_spec. rewrite -co_subst_co_tm_spec. eapply AnnTyping_co_subst with (D := dom ([(x2, Co (Eq a b A))] ++ G0)); eauto. eapply AnnTyping_weakening with (F := ([(x1, Co (Eq a b A))])); eauto 1. eapply An_ConsCo; eauto. eapply AnnPropWff_weakening with (F := nil); eauto. eapply An_Cast; eauto 2. eapply An_Assn; eauto. simpl; eauto 2. simpl_env. eapply AnnIso_weakening with (F := nil)(G0 := G0). eapply (third ann_weaken_available_mutual) with (D := dom G0). eapply AnnIso_weaken_available. eauto. simpl. clear Fr Fr0. fsetdec. eauto. simpl_env. auto. } assert (exists g, AnnDefEq ([(x1, Co (Eq a b A))] ++ G0) (dom G0) g C1 C1). { eexists. eapply An_Refl. eapply AnnTyping_regularity. eauto 1. } destruct H5 as [ grefl EC1C1]. assert (exists g, AnnDefEq G0 (dom G0) g (a_CPi (Eq a b A) (close_tm_wrt_co x1 C1)) (a_CPi (Eq a b A) (close_tm_wrt_co x2 C3))). { eexists. eapply An_CPiCong_exists with (c1 := x1) (c2 := x2) (B2 := close_tm_wrt_co x1 C1) (g3 := close_co_wrt_co x1 grefl). + eapply AnnIso_weaken_available. eauto 1. + simpl. autorewrite with lngen. clear Fr0. auto. + autorewrite with lngen. apply notin_union. pose M := AnnIso_context_fv refl. clearbody M. destruct M as [_ [h4 _]]. unfold "[<=]" in h4. move => h6. have h1: x2 `notin` dom G0; auto. auto 3. + autorewrite with lngen. eauto 1. + rewrite HeqC3. autorewrite with lngen. auto. + eapply AnnTyping_regularity; eauto 1. + eapply AnnTyping_regularity; eauto 1. + autorewrite with lngen. clear Fr Fr0. move: (AnnTyping_context_fv DT) => /= ?. inversion CTX3. inversion H8. subst. eapply An_CPi_exists with (c:=x1). autorewrite with lngen. fsetdec. auto. autorewrite with lngen. eapply AnnTyping_regularity. eauto. } destruct H5 as [g5 Epipi]. assert (AnnTyping G0 (a_Conv (a_CAbs (Eq a b A) (close_tm_wrt_co x2 B3)) (g_Sym g5)) (a_CPi (Eq a b A) (close_tm_wrt_co x1 C1))). { eapply An_Conv; eauto 1. eapply An_Sym2; auto. eapply AnnTyping_regularity; eauto 1. } eexists. exists (a_CAbs (Eq a b A) (close_tm_wrt_co x1 B1')), (a_Conv (a_CAbs (Eq a b A) (close_tm_wrt_co x2 B3)) (g_Sym g5)), (a_CPi (Eq a b A) (close_tm_wrt_co x1 C1)). repeat split. + simpl. rewrite <- close_co_erase_tm; auto. rewrite EB1. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co; auto. + simpl. f_equal. rewrite <- close_co_erase_tm; auto. rewrite HeqB3. rewrite HeqCB2. rewrite <- (open_co_erase_tm2 _ _ (g_Var_f x2)). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. rewrite <- close_co_erase_tm. rewrite EB2. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. auto. clear Fr0. auto. rewrite <- close_co_erase_tm. autorewrite with lngen. apply notin_remove_2. auto. + simpl. f_equal. rewrite <- close_co_erase_tm; auto. rewrite ES. simpl. rewrite close_tm_wrt_co_open_tm_wrt_co; auto. + eapply An_Trans2 with (a1 := (a_CAbs (Eq a b A)(close_tm_wrt_co x2 B3))). eapply An_CAbsCong_exists with (c1 := x1) (c2 := x2) (a2 := CB2) (g3 := close_co_wrt_co x1 g4) (B := a_CPi (Eq a b A) (close_tm_wrt_co x1 C1)); eauto 1. ++ rewrite HeqCB2. autorewrite with lngen. auto. ++ autorewrite with lngen. apply notin_union. pose M := AnnIso_context_fv refl. clearbody M. destruct M as [_ [h4 _]]. unfold "[<=]" in h4. move => h6. have h1: x2 `notin` dom G0; auto. rewrite HeqCB2. autorewrite with lngen. auto 3. ++ rewrite HeqCB2. autorewrite with lngen. auto. ++ rewrite HeqB3. rewrite HeqCB2. autorewrite with lngen. auto. ++ autorewrite with lngen. clear Fr Fr0. subst CB2. inversion CTX3. inversion H9. subst. eapply An_CAbs_exists with (c:=x1). autorewrite with lngen. fsetdec. auto. autorewrite with lngen. auto. ++ eapply An_EraseEq; eauto 1. eapply An_Sym2; eauto 1. + eauto 1. + eauto 1. Unshelve. eauto 1. eauto 1. - (* CAppCong *) clear d. destruct (H G0 H1 H2) as [g1 [a1' [b1' [AB1 [EA1 [EA2 [ET1 [DE1 [TAB1 _]]]]]]]]]. clear H. move: (AnnTyping_regularity TAB1) => TPi. destruct (erase_cpi ET1 TPi) as (A' & B' & E1 & E2 & E3 & TP). inversion TP. destruct A' as [a2'' b2'']. simpl in E2. inversion E2. clear E2. inversion H5. destruct (H0 G0 H1 H2) as [g2 [a2' [b2' [A1' [EA3 [EA4 [ET2 [DE2 [TA1 _]]]]]]]]]. clear H0. subst. move: (AnnTyping_regularity H14) => SA1. move: (AnnTyping_regularity H15) => ?. move: (AnnTyping_regularity TA1) => SA1'. (* Make sure func has the cpi-type *) destruct (erasure_cvt TAB1 E1) as (a1'' & E5 & Ta1''); eauto. assert (exists g, AnnDefEq G0 D g a1'' a1'). { eexists. eapply An_EraseEq. eauto. eauto. autorewcs. congruence. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H as [g4 DEa1''a1']. move: (An_Trans2 DEa1''a1' DE1) => DE4. (* Find the coercion that corresponds to that prop. *) destruct (AnnDefEq_regularity DE2) as (? & B2' & g3 & ? & Tb2' & DEa2b1). move: (AnnTyping_regularity Tb2') => ?. resolve_unique_nosubst. assert (exists g, AnnDefEq G0 (dom G0) g a2'' a2'). { eexists. eapply An_EraseEq; eauto 1. eapply An_EraseEq; eauto 1. eapply An_Refl. eauto. } destruct H as [g5 Ea2''a2']. assert (exists g, AnnDefEq G0 (dom G0) g A0 B1). { eexists. eapply An_EraseEq; eauto 1. eapply An_Refl. eauto. } destruct H as [g6 EA1B1]. assert (exists g, AnnDefEq G0 (dom G0) g A1' A0). { eexists. eapply An_EraseEq; eauto 1. eapply An_Refl. eauto. } destruct H as [g7 EA1'A1]. (* Make b'' have the same type as b2' *) assert (exists g, AnnDefEq G0 (dom G0) g b2' b2''). { eexists. eapply An_EraseEq; eauto 1. eapply An_Trans2 with (a1 := A1'). eapply An_Sym2; eauto 1. eapply An_Trans2 with (a1 := A0); eauto 1. } destruct H as [g8 Eb2'b2'']. rewrite -erase_dom in DE2. move: (An_Trans2 Ea2''a2' (An_Trans2 DE2 Eb2'b2'')) => Ea2''b2''. remember (g_Trans g5 (g_Trans g2 g8)) as g9. (* Find b1' type as a CPi type *) destruct (AnnDefEq_invertb DE4) as (? & b1'' & g10 & ? & Tb1'' & E6 & DEBB). resolve_unique_nosubst. assert (TT : AnnTyping G0 (a_CApp a1'' g9) (open_tm_wrt_co B' g9)). { eapply An_CApp. eauto. eauto. } assert (AnnTyping G0 (a_CApp b1'' g9) (open_tm_wrt_co B' g9)). { eapply An_CApp. eauto. eauto. } eexists. exists (a_CApp a1'' g9). exists (a_CApp b1'' g9). exists (open_tm_wrt_co B' g9). repeat split. simpl. f_equal. eauto 1. simpl. f_equal. eauto 2. rewrite <- (open_co_erase_tm2 _ _ g_Triv). auto. eapply An_CAppCong; eauto 2. eapply An_Trans2 with (a1 := b1'); eauto 1. eapply An_Refl. eapply AnnTyping_regularity; eauto 1. assumption. assumption. - (* CPiSnd *) clear d. clear d1. clear d0. rename a1' into b1. rename a2' into b2. rename A' into B. destruct (H G0 H2 H3) as [g [AB1 [AB2 [S1 [E1 [E2 [E3 [DE1 [T1 _]]]]]]]]]. clear H. destruct (H0 G0 H2 H3) as [g1 [a1' [a2' [A' [EA11 [EA21 [E31 [DEA [T1A _]]]]]]]]]. clear H0. destruct (H1 G0 H2 H3) as [g1' [b1' [b2' [B' [EA11' [EA21' [E31' [DEA' [T1A' _]]]]]]]]]. clear H1. destruct (AnnDefEq_regularity DE1) as [S1' [S2' [g4 [T3 [T4 DE3]]]]]. destruct (AnnDefEq_regularity DEA) as [S1'' [S2'' [g5 [T3' [T4' DE3']]]]]. destruct (AnnDefEq_regularity DEA') as [S1''' [S2''' [g5' [T3'' [T4'' DE3'']]]]]. resolve_unique_nosubst. resolve_unique_nosubst. resolve_unique_nosubst. move: (AnnTyping_regularity T1A) => ?. move: (AnnTyping_regularity T1A') => ?. move: (AnnTyping_regularity T4) => ?. move: (AnnTyping_regularity T4') => ?. destruct (erase_cpi E1 T1) as [phi1' [B1' [F1 [F2 [F3 AT]]]]]. destruct (erase_cpi E2 T4) as [phi2' [B2' [F1' [F2' [F3' AT']]]]]. destruct phi1' as [a1'' a2'' A'']. simpl in F2. inversion F2. clear F2. destruct phi2' as [b1'' b2'' B'']. simpl in F2'. inversion F2'. clear F2'. destruct (erasure_AnnDefEq DE1 T1 E3 F1 F1' AT AT') as [g2 DE2]. inversion AT. inversion AT'. inversion H10. inversion H15. subst. (* Have the equality between the CPi types. Now we need to get the coercions to match them. *) assert (TMP : exists g, AnnDefEq G0 D g a1'' a1'). { eexists. eapply An_EraseEq; eauto 1. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g3 Ea1''a1']. assert (TMP : exists g, AnnDefEq G0 (dom G0) g A'' B4). { eexists. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g6 EA''B4]. assert (TMP : exists g, AnnDefEq G0 (dom G0) g A' A''). { eexists. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g7 EA'A'']. move: (An_Trans2 (An_Sym2 DE3') (An_Trans2 EA'A'' EA''B4)) => ?. assert (TMP : exists g, AnnDefEq G0 D g a2' a2''). { eexists. eapply An_EraseEq; eauto 1. } destruct TMP as [g8 Ea2'a2'']. move: (AnnDefEq_weaken_available Ea1''a1') => y. rewrite erase_dom in y. move: (AnnDefEq_weaken_available Ea2'a2'') => x. rewrite erase_dom in x. move: (An_Trans2 y (An_Trans2 DEA x)) => Ea1''a2''. assert (TMP : exists g, AnnDefEq G0 D g b1'' b1'). { eexists. eapply An_EraseEq; eauto 1. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g9 Eb1'Eb1'']. (* WANT S''' B5 *) assert (TMP : exists g, AnnDefEq G0 (dom G0) g B'' B5). { eexists. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g10 EB''B5]. assert (TMP : exists g, AnnDefEq G0 (dom G0) g B' B''). { eexists. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g11 EB'B'']. move: (An_Trans2 (An_Sym2 DE3'') (An_Trans2 EB'B'' EB''B5)) => ?. assert (TMP : exists g, AnnDefEq G0 D g b2' b2''). { eexists. eapply An_EraseEq; eauto 1. } destruct TMP as [g12 Eb2'b2'']. assert (TMP : exists g, AnnDefEq G0 D g b1'' b1'). { eexists. eapply An_EraseEq; eauto 1. eapply An_EraseEq. eapply AnnTyping_regularity; eauto 1. eapply AnnTyping_regularity; eauto 1. eauto 1. eapply An_Refl; eauto 2. } destruct TMP as [g13 Eb1''b1']. move: (AnnDefEq_weaken_available Eb2'b2'') => y1. rewrite erase_dom in y1. move: (AnnDefEq_weaken_available Eb1''b1') => x1. rewrite erase_dom in x1. move: (An_Trans2 x1 (An_Trans2 DEA' y1)) => Eb1''b2''. clear x1. clear y1. eexists. exists (open_tm_wrt_co B1' (g_Trans g3 (g_Trans g1 g8))), (open_tm_wrt_co B2' (g_Trans g13 (g_Trans g1' g12))), a_Star. repeat split. + simpl. rewrite <- open_co_erase_tm2 with (g := g_Triv). auto. + simpl. rewrite <- open_co_erase_tm2 with (g := g_Triv). auto. + eapply An_CPiSnd; eauto. rewrite erase_dom. auto. rewrite erase_dom. auto. + pick fresh x1 for (L \u fv_co_co_tm B1'). rewrite (co_subst_co_tm_intro x1). replace a_Star with (co_subst_co_tm (g_Trans g3 (g_Trans g1 g8)) x1 a_Star). eapply AnnTyping_co_subst. eauto. eauto. simpl. auto. auto. + pick fresh x1 for (L0 \u fv_co_co_tm B2'). rewrite (co_subst_co_tm_intro x1). replace a_Star with (co_subst_co_tm (g_Trans g13 (g_Trans g1' g12)) x1 a_Star). eapply AnnTyping_co_subst. eapply H16; eauto 1. eauto 1. simpl. auto. auto. - (* Cast *) clear i. clear d. destruct (H G0 H1 H2) as [g [a0' [b0' [A0' [EA [EB [S2 [DE [T1 _]]]]]]]]]. clear H. destruct (H0 G0 H1 H2) as [g1 [phi' [phi2' [EP1 [EP2 IP]]]]]. clear H0. destruct (AnnIso_regularity IP) as [WFF1 WFF2]. inversion WFF1. inversion WFF2. subst. move: (AnnTyping_regularity H) => ?. move: (AnnTyping_regularity H0) => ?. move: (AnnTyping_regularity H6) => ?. move: (AnnTyping_regularity H7) => ?. assert (EA0A1 : AnnDefEq G0 D (g_IsoSnd g1) A0 A1). { eapply An_IsoSnd. eauto. } assert (exists g, AnnDefEq G0 D g B B0). { eapply (erasure_AnnDefEq EA0A1); eauto 1. } destruct H1 as [g2 EBB0]. destruct (AnnDefEq_regularity DE) as [C [D1 [g3 [TC [TD CD]]]]]. simpl in EP1. inversion EP1. simpl in EP2. inversion EP2. subst. clear EP2. clear EP1. resolve_unique_nosubst. assert (exists g, AnnDefEq G0 D g a0 a0'). { eexists. eapply An_EraseEq. eauto. eauto. eauto. eapply An_EraseEq. eapply AnnTyping_regularity. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H1 as [g4 Ea0a0']. assert (exists g, AnnDefEq G0 D g B0 A1). { eexists. eapply An_EraseEq. eauto. eapply AnnTyping_regularity. eauto. eauto. eapply An_Refl. eauto. } destruct H1 as [g5 EB0A1]. assert (exists g, AnnDefEq G0 D g A0 A0'). { eexists. eapply An_EraseEq. eauto. eapply AnnTyping_regularity. eauto. eauto. eauto. } destruct H1 as [g6 EA0A0']. move: (An_Trans2 (An_Trans2 EB0A1 (An_Sym2 EA0A1)) EA0A0') => EB0A0'. move: (An_Trans2 (AnnDefEq_weaken_available EB0A0') CD) => EB0D1. move: (An_Trans2 (AnnDefEq_weaken_available EBB0) EB0D1) => EBD1. assert (exists g, AnnDefEq G0 D g b0 b0'). { eexists. eapply An_EraseEq. eauto. eauto. eauto. eauto. } destruct H1 as [g7 Eb0b0']. (* assert (exists g, AnnIso G0 D g (Eq a0 b0 A0) (Eq a1 (a_Conv b1 g5) A0)) *) eexists. exists a1, (a_Conv b1 g5), A1. repeat split. eapply An_Trans2 with (a1 := b1). eapply (An_Cast _ _ _ _ _ _ _ _ _ _ _ IP); eauto 1. eapply An_EraseEq. eauto 1. eapply An_Conv with (B := A1); eauto 1. eapply AnnDefEq_weaken_available; eauto 1. simpl. auto. eapply AnnDefEq_weaken_available; eauto 1. eauto 1. eapply An_Conv with (B := A1); eauto 1. eapply AnnDefEq_weaken_available; eauto 1. Unshelve. Focus 2. eapply (An_Trans2 (An_Trans2 Ea0a0' DE) (An_Sym2 Eb0b0')). - (* EqConv *) clear d. clear d0. destruct (H G0 H1 H2) as [g [a0' [b0' [A0' [EA [EB [S2 [DE [T1 U1]]]]]]]]]. clear H. destruct (H0 G0 H1 H2) as [g1 [A' [B' [S' [EP1 [EP2 [ES [DE2 [T2 U2]]]]]]]]]. clear H0. subst. rewrite -erase_dom in DE2. assert (exists g, AnnDefEq G0 D g A0' A'). { eexists. eapply An_EraseEq. eauto. eapply AnnTyping_regularity. eauto. eauto. eauto. eapply An_EraseEq. eapply An_Star. eauto 1. eapply AnnTyping_regularity. eauto. autorewcs. eauto 1. eapply An_Refl. eauto. } destruct H as [g2 EA0'A']. move: (An_Trans2 (AnnDefEq_weaken_available EA0'A') DE2) => EA0'B'. move: (AnnTyping_regularity T1) => TA0'. destruct (AnnDefEq_invertb EA0'B') as (S'' & B'' & g3 & TS & TB & EB & DB'B''). resolve_unique_nosubst. move: (An_Trans2 EA0'B' DB'B'') => EA0'B''. assert (exists g, AnnDefEq G0 D g (a_Conv a0' (g_Trans (g_Trans g2 g1) g3)) a0'). { eexists. eapply An_EraseEq; eauto 1. eapply An_Conv; eauto 1. eapply An_Sym2. eauto. } destruct H as [g4 Ea0']. eexists. exists (a_Conv a0' (g_Trans (g_Trans g2 g1) g3)), (a_Conv b0' (g_Trans (g_Trans g2 g1) g3)), B''. repeat split; auto. eapply An_Trans2 with (a1 := a0'); eauto 1. eapply An_Trans2 with (a1 := b0'); eauto 1. eapply An_EraseEq; eauto 1. eapply An_Conv; eauto 1. eapply An_Conv; eauto 1. eapply An_Conv; eauto 1. - clear i. destruct (H G0 H0 H1) as [g1 [phi' [phi2' [EP1 [EP2 IP]]]]]. clear H. destruct (AnnIso_regularity IP) as [WFF1 WFF2]. inversion WFF1. inversion WFF2. subst. move: (AnnTyping_regularity H) => ?. move: (AnnTyping_regularity H6) => ?. simpl in EP1. inversion EP1. simpl in EP2. inversion EP2. subst. clear EP2. clear EP1. eexists. exists A0, A1, a_Star. repeat split; eauto 1. eapply An_IsoSnd. eauto. - destruct (H _ H0 H1) as (a0 & A0 & E1 & E2 & AT). clear H. move: (AnnTyping_regularity AT) => h0. destruct (erase_pi E2 h0) as (A1 & B1 & E3 & E4 & E5 & AT1). have h1: (exists g, AnnDefEq G0 (dom G0) g A0 (a_Pi Rel A1 B1)). { eexists. eapply An_EraseEq; eauto. } move: h1 => [g TT]. have h1: AnnTyping G0 (a_Conv a0 g) (a_Pi Rel A1 B1) by eauto. subst. have h2: erase a0 = erase (a_Conv a0 g) by simpl; auto. pick fresh y. move: (e y ltac:(auto)) => e0. rewrite h2 in e0. replace (a_App (erase (a_Conv a0 g)) Rel (a_Var_f y)) with (erase (a_App (a_Conv a0 g) Rel (a_Var_f y))) in e0. move: (An_Pi_inversion AT1) => h3. split_hyp. eexists. exists (a_Abs Rel A1 (close_tm_wrt_tm y (a_App (a_Conv a0 g) Rel (a_Var_f y)))). exists (a_Conv a0 g). exists (a_Pi Rel A1 B1). split. replace (erase (a_Abs Rel A1 (close_tm_wrt_tm y (a_App (a_Conv a0 g) Rel (a_Var_f y))))) with (a_UAbs Rel (erase (close_tm_wrt_tm y (a_App (a_Conv a0 g) Rel (a_Var_f y))))). autorewcs. rewrite -close_tm_erase_tm. rewrite -e0. autorewrite with lngen. auto. simpl; auto. repeat split; simpl; eauto 2. eapply An_Eta with (L := L \u dom G0 \u {{y}} ). eauto. intros. rewrite -tm_subst_tm_tm_spec. simpl. rewrite tm_subst_tm_tm_fresh_eq; auto. rewrite tm_subst_tm_co_fresh_eq; auto. destruct eq_dec; try done. eapply (@An_Abs_exists y); autorewrite with lngen; eauto 2. + fsetdec. + econstructor. eapply AnnTyping_weakening with (F:=nil); eauto with ctx_wff. simpl_env; eauto. + simpl; auto. - destruct (H _ H0 H1) as (a0 & A0 & E1 & E2 & AT). clear H. move: (AnnTyping_regularity AT) => h0. destruct (erase_pi E2 h0) as (A1 & B1 & E3 & E4 & E5 & AT1). have h1: (exists g, AnnDefEq G0 (dom G0) g A0 (a_Pi Irrel A1 B1)). { eexists. eapply An_EraseEq; eauto. } move: h1 => [g TT]. have h1: AnnTyping G0 (a_Conv a0 g) (a_Pi Irrel A1 B1) by eauto. subst. have h2: erase a0 = erase (a_Conv a0 g) by simpl; auto. pick fresh y. move: (e y ltac:(auto)) => e0. rewrite h2 in e0. replace (a_App (erase (a_Conv a0 g)) Irrel a_Bullet) with (erase (a_App (a_Conv a0 g) Irrel (a_Var_f y))) in e0. move: (An_Pi_inversion AT1) => h3. split_hyp. eexists. exists (a_Abs Irrel A1 (close_tm_wrt_tm y (a_App (a_Conv a0 g) Irrel (a_Var_f y)))). exists (a_Conv a0 g). exists (a_Pi Irrel A1 B1). split. replace (erase (a_Abs Irrel A1 (close_tm_wrt_tm y (a_App (a_Conv a0 g) Irrel (a_Var_f y))))) with (a_UAbs Irrel (erase (close_tm_wrt_tm y (a_App (a_Conv a0 g) Irrel (a_Var_f y))))). autorewcs. rewrite -close_tm_erase_tm. simpl. simpl in e0. rewrite -e0. autorewrite with lngen. auto. simpl; auto. repeat split; simpl; eauto 2. eapply An_Eta with (L := L \u dom G0 \u {{y}} ). eauto. intros. rewrite -tm_subst_tm_tm_spec. simpl. rewrite tm_subst_tm_tm_fresh_eq; auto. rewrite tm_subst_tm_co_fresh_eq; auto. destruct eq_dec; try done. eapply (@An_Abs_exists y); autorewrite with lngen; eauto 2. + fsetdec. + econstructor. eapply AnnTyping_weakening with (F:=nil); eauto with ctx_wff. simpl_env; eauto. + simpl; auto. constructor. simpl. apply union_notin_iff. split. apply fv_tm_erase_tm. fsetdec. eauto. + simpl. auto. - destruct (H _ H0 H1) as (a0 & A0 & E1 & E2 & AT). clear H. move: (AnnTyping_regularity AT) => h0. destruct (erase_cpi E2 h0) as (A1 & B1 & E3 & E4 & E5 & AT1). have h1: (exists g, AnnDefEq G0 (dom G0) g A0 (a_CPi A1 B1)). { eexists. eapply An_EraseEq; eauto. } move: h1 => [g TT]. have h1: AnnTyping G0 (a_Conv a0 g) (a_CPi A1 B1) by eauto. subst. have h2: erase a0 = erase (a_Conv a0 g) by simpl; auto. pick fresh y. move: (e y ltac:(auto)) => e0. rewrite h2 in e0. replace (a_CApp (erase (a_Conv a0 g)) g_Triv) with (erase (a_CApp (a_Conv a0 g) g_Triv)) in e0; eauto. move: (An_CPi_inversion AT1) => h3. split_hyp. eexists. exists (a_CAbs A1 (close_tm_wrt_co y (a_CApp (a_Conv a0 g) (g_Var_f y)))). exists (a_Conv a0 g). exists (a_CPi A1 B1). split. replace (erase (a_CAbs A1 (close_tm_wrt_co y (a_CApp (a_Conv a0 g) (g_Var_f y))))) with (a_UCAbs (erase (close_tm_wrt_co y (a_CApp (a_Conv a0 g) (g_Var_f y))))). autorewcs. rewrite -close_co_erase_tm. simpl. simpl in e0. rewrite -e0. autorewrite with lngen. auto. simpl; auto. repeat split; simpl; eauto 2. eapply An_EtaC with (L := L \u dom G0 \u {{y}} ). eauto. intros. rewrite -co_subst_co_tm_spec. simpl. rewrite co_subst_co_tm_fresh_eq; auto. rewrite co_subst_co_co_fresh_eq; auto. auto. destruct eq_dec; try done. eapply (@An_CAbs_exists y); autorewrite with lngen; eauto 2. + fsetdec. + destruct A1. eapply An_CApp. eapply AnnTyping_weakening with (F:=nil); eauto with ctx_wff. eapply An_Assn; eauto. Qed. (* --------------------------------------------------------------------- *) (* - (* LeftRel *) destruct (H _ H5 H6) as (a0 & AB & hyp). split_hyp. clear H. destruct (H0 _ H5 H6) as (b0 & B0 & hyp). split_hyp. clear H0. destruct (H1 _ H5 H6) as (a0' & AB' & hyp). split_hyp. clear H1. destruct (H2 _ H5 H6) as (b0' & B0' & hyp). split_hyp. clear H2. destruct (H3 _ H5 H6) as (gg & ab & ab' & s & hyp). split_hyp. clear H3. destruct (H4 _ H5 H6) as (gB & Ba & Ba' & s1 & hyp). split_hyp. clear H4. have ?: AnnTyping G0 AB a_Star. eauto using AnnTyping_regularity. have ?: AnnTyping G0 AB' a_Star. eauto using AnnTyping_regularity. match goal with [H : erase AB = a_Pi Rel A B, H1 : AnnTyping G0 AB a_Star |- _ ] => destruct (erase_pi H H1) as (A1 & B1 & hyp1); split_hyp end. match goal with [H : erase AB' = a_Pi Rel A B, H1 : AnnTyping G0 AB' a_Star |- _ ] => destruct (erase_pi H H1) as (A1' & B1' & hyp1); split_hyp end. match goal with [H4: erase ab = a_App a Rel b, H8 : AnnTyping G0 ab s |- _ ] => destruct (erase_app_Rel H4 H8) as (a1 & b1 & gab & C & h0 & h1 & h2 & h3 & h4); inversion h3 end. match goal with [H4: erase ab' = a_App a' Rel b', H8 : AnnTyping G0 ab' s |- _ ] => destruct (erase_app_Rel H4 H8) as (a1' & b1' & gab' & C' & h0' & h1' & h2' & h3' & h4'); inversion h3' end. match goal with [H: AnnTyping G0 (a_Pi Rel A1 B1) a_Star |- _ ] => inversion H end. match goal with [H: AnnTyping G0 (a_Pi Rel A1' B1') a_Star |- _ ] => inversion H end. subst. have ?: AnnCtx G0 by eauto with ctx_wff. have: exists g, AnnDefEq G0 (dom G0) g B0 A1. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. eauto. eauto. } move => [g1 ?]. have: exists g, AnnDefEq G0 (dom G0) g B0 A1'. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. eauto. eauto. } move => [g1' ?]. (* need to cast in B1 *) pick fresh x. have ?: AnnCtx ([(x, Tm B0)] ++ G0). { econstructor. eauto. eauto using AnnTyping_regularity. fsetdec_fast. } have ?: AnnTyping ([(x, Tm B0)] ++ G0) A1 a_Star. { eapply AnnTyping_weakening with (F:=nil). eauto. auto. auto. } remember (close_tm_wrt_tm x (open_tm_wrt_tm B1 (a_Conv (a_Var_f x) g1))) as B11. have h5: AnnTyping G0 (a_Pi Rel B0 B11) a_Star. { rewrite HeqB11. eapply An_Pi_exists2 with (x:=x). autorewrite with lngen. auto. autorewrite with lngen. eapply (@AnnTyping_tm_subst_nondep (L \u {{x}} \u dom G0)). econstructor. econstructor. econstructor. auto. eauto using AnnTyping_regularity. auto. auto. eapply AnnDefEq_weaken_available. eapply AnnDefEq_weakening with (F:=nil). eauto. auto. auto. auto. intros. eapply AnnTyping_weakening. match goal with [H44 : ∀ x : atom, ¬ x `in` L → AnnTyping ([(x, _)] ++ _) (open_tm_wrt_tm _ (a_Var_f x)) a_Star |- _ ] => eapply H44 end. auto. auto. econstructor. auto. auto. simpl. auto. } inversion h5. (* pi type coercions *) have: exists g, AnnDefEq G0 (dom G0) g AB (a_Pi Rel B0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. auto. apply fv_tm_erase_tm. auto. eauto. } move => [ga0 ?]. have: exists g, AnnDefEq G0 (dom G0) g AB' (a_Pi Rel B0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. auto. apply fv_tm_erase_tm. auto. eauto. } move => [ga0' ?]. have ?: AnnTyping G0 (a_App (a_Conv a0 ga0) Rel b0) (open_tm_wrt_tm B11 b0). { econstructor. eapply An_Conv. eauto. eauto. auto. auto. } have: exists g, AnnDefEq G0 (dom G0) g B0' B0. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. eauto. eauto. } move => [gb0' ?]. have ?: AnnTyping G0 (a_App (a_Conv a0' ga0') Rel (a_Conv b0' gb0')) (open_tm_wrt_tm B11 (a_Conv b0' gb0')). { econstructor. eapply An_Conv. eauto. eauto. auto. eapply An_Conv. eauto. eauto. eauto using AnnTyping_regularity. } have: exists g, AnnDefEq G0 D g (a_App (a_Conv a0 ga0) Rel b0) (a_App a1' Rel b1'). { eexists. eapply An_Trans2 with (a1 := a_App a1 Rel b1). { eapply An_EraseEq. eauto. econstructor. eauto. eauto. simpl. f_equal. auto. auto. eapply An_Trans2 with (a1 := s). eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase s = _ |- _ ] => rewrite H end. rewrite -open_tm_erase_tm. f_equal. rewrite HeqB11. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. eauto. eauto. } eapply An_Trans2 with (a1 := ab). { eapply An_EraseEq. eauto. eauto. eauto. eapply An_Sym2. eauto. } eapply An_Trans2 with (a1 := ab'). eauto. { eapply An_EraseEq. eauto. eauto. eauto. eauto. } } move => [g ?]. have: exists g, AnnDefEq G0 (dom G0) g (open_tm_wrt_tm B3 b1') (open_tm_wrt_tm B11 (a_Conv b0' gb0')). { eexists. eapply An_Trans2 with (a1 := s). eapply An_Sym2. eauto. eapply An_Trans2 with (a1 := Ba). { eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase Ba = _ |- _ ] => rewrite H end. auto. eapply An_EraseEq. eauto. eauto using AnnTyping_regularity. auto. eauto. } eapply An_Trans2 with (a1 := Ba'). { match goal with [ H : AnnDefEq G0 _ _ Ba Ba' |- _ ] => rewrite -erase_dom in H end. eauto. } eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. rewrite HeqB11. autorewcs. rewrite -open_tm_erase_tm. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. auto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_App a1' Rel b1') (a_App (a_Conv a0' ga0') Rel (a_Conv b0' gb0')). { eexists. eapply An_EraseEq. eauto. eauto. simpl. f_equal. eauto. eauto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_App (a_Conv a0 ga0) Rel b0) (a_App (a_Conv a0' ga0') Rel (a_Conv b0' gb0')). { eexists. eapply An_Trans2. eauto. eauto. } move => [? ?]. have LC: lc_tm (a_Conv a0 ga0). eauto using AnnTyping_lc1. move: (Path_to_Path LC p eq_refl) => P. have LC':lc_tm (a_Conv a0' ga0'). eauto using AnnTyping_lc1. move: (Path_to_Path LC' p0 eq_refl) => P'. have ?: AnnTyping G0 (a_Conv a0 ga0) (a_Pi Rel B0 B11). { eapply An_Conv; eauto. } eexists. exists (a_Conv a0 ga0). exists (a_Conv a0' ga0'). exists (a_Pi Rel B0 B11). repeat split. + rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. + eapply An_Left2; eauto. + eauto. + eauto. - (* LeftIrrel *) destruct (H _ H5 H6) as (a0 & AB & hyp). split_hyp. clear H. destruct (H0 _ H5 H6) as (b0 & B0 & hyp). split_hyp. clear H0. destruct (H1 _ H5 H6) as (a0' & AB' & hyp). split_hyp. clear H1. destruct (H2 _ H5 H6) as (b0' & B0' & hyp). split_hyp. clear H2. destruct (H3 _ H5 H6) as (gg & ab & ab' & s & hyp). split_hyp. clear H3. destruct (H4 _ H5 H6) as (gB & Ba & Ba' & s1 & hyp). split_hyp. clear H4. have ?: AnnTyping G0 AB a_Star. eauto using AnnTyping_regularity. have ?: AnnTyping G0 AB' a_Star. eauto using AnnTyping_regularity. match goal with [H : erase AB = a_Pi _ A B, H1 : AnnTyping G0 AB a_Star |- _ ] => destruct (erase_pi H H1) as (A1 & B1 & hyp1); split_hyp end. match goal with [H : erase AB' = a_Pi _ A B, H1 : AnnTyping G0 AB' a_Star |- _ ] => destruct (erase_pi H H1) as (A1' & B1' & hyp1); split_hyp end. match goal with [H4: erase ab = a_App a _ _, H8 : AnnTyping G0 ab s |- _ ] => destruct (erase_app_Irrel H4 H8) as (a1 & b1 & gab & C & ? & ? & h3 & ?); inversion h3 end. match goal with [H4: erase ab' = a_App a' _ _, H8 : AnnTyping G0 ab' s |- _ ] => destruct (erase_app_Irrel H4 H8) as (a1' & b1' & gab' & C' & ? & ? & h3' & ?) ; inversion h3' end. match goal with [H: AnnTyping G0 (a_Pi _ A1 B1) a_Star |- _ ] => inversion H end. match goal with [H: AnnTyping G0 (a_Pi _ A1' B1') a_Star |- _ ] => inversion H end. subst. have ?: AnnCtx G0 by eauto with ctx_wff. have: exists g, AnnDefEq G0 (dom G0) g B0 A1. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. eauto. eauto. } move => [g1 ?]. have: exists g, AnnDefEq G0 (dom G0) g B0 A1'. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. eauto. eauto. } move => [g1' ?]. (* need to cast in B1 *) pick fresh x. have ?: AnnCtx ([(x, Tm B0)] ++ G0). { econstructor. eauto. eauto using AnnTyping_regularity. fsetdec_fast. } have ?: AnnTyping ([(x, Tm B0)] ++ G0) A1 a_Star. { eapply AnnTyping_weakening with (F:=nil). eauto. auto. auto. } remember (close_tm_wrt_tm x (open_tm_wrt_tm B1 (a_Conv (a_Var_f x) g1))) as B11. have h5: AnnTyping G0 (a_Pi Irrel B0 B11) a_Star. { rewrite HeqB11. eapply An_Pi_exists2 with (x:=x). autorewrite with lngen. auto. autorewrite with lngen. eapply (@AnnTyping_tm_subst_nondep (L \u {{x}} \u dom G0)). econstructor. econstructor. econstructor. auto. eauto using AnnTyping_regularity. auto. auto. eapply AnnDefEq_weaken_available. eapply AnnDefEq_weakening with (F:=nil). eauto. auto. auto. auto. intros. eapply AnnTyping_weakening. match goal with [H44 : ∀ x : atom, ¬ x `in` L → AnnTyping ([(x, _)] ++ _) (open_tm_wrt_tm _ (a_Var_f x)) a_Star |- _ ] => eapply H44 end. auto. auto. econstructor. auto. auto. simpl. auto. } inversion h5. (* pi type coercions *) have: exists g, AnnDefEq G0 (dom G0) g AB (a_Pi Irrel B0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. auto. apply fv_tm_erase_tm. auto. eauto. } move => [ga0 ?]. have: exists g, AnnDefEq G0 (dom G0) g AB' (a_Pi Irrel B0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. auto. apply fv_tm_erase_tm. auto. eauto. } move => [ga0' ?]. have ?: AnnTyping G0 (a_App (a_Conv a0 ga0) Irrel b0) (open_tm_wrt_tm B11 b0). { econstructor. eapply An_Conv. eauto. eauto. auto. auto. } have: exists g, AnnDefEq G0 (dom G0) g B0' B0. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. eauto. eauto. } move => [gb0' ?]. have ?: AnnTyping G0 (a_App (a_Conv a0' ga0') Irrel (a_Conv b0' gb0')) (open_tm_wrt_tm B11 (a_Conv b0' gb0')). { econstructor. eapply An_Conv. eauto. eauto. auto. eapply An_Conv. eauto. eauto. eauto using AnnTyping_regularity. } have: exists g, AnnDefEq G0 D g (a_App (a_Conv a0 ga0) Irrel b0) (a_App a1' Irrel b1'). { eexists. eapply An_Trans2 with (a1 := a_App a1 Irrel b1). { eapply An_EraseEq. eauto. econstructor. eauto. eauto. simpl. f_equal. auto. auto. eapply An_Trans2 with (a1 := s). eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase s = _ |- _ ] => rewrite H end. rewrite -open_tm_erase_tm. f_equal. rewrite HeqB11. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. eauto. eauto. } eapply An_Trans2 with (a1 := ab). { eapply An_EraseEq. eauto. eauto. eauto. eapply An_Sym2. eauto. } eapply An_Trans2 with (a1 := ab'). eauto. { eapply An_EraseEq. eauto. eauto. eauto. eauto. } } move => [g ?]. have: exists g, AnnDefEq G0 (dom G0) g (open_tm_wrt_tm B3 b1') (open_tm_wrt_tm B11 (a_Conv b0' gb0')). { eexists. eapply An_Trans2 with (a1 := s). eapply An_Sym2. eauto. eapply An_Trans2 with (a1 := Ba). { eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase Ba = _ |- _ ] => rewrite H end. auto. eapply An_EraseEq. eauto. eauto using AnnTyping_regularity. auto. eauto. } eapply An_Trans2 with (a1 := Ba'). { match goal with [ H : AnnDefEq G0 _ _ Ba Ba' |- _ ] => rewrite -erase_dom in H end. eauto. } eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. rewrite HeqB11. autorewcs. rewrite -open_tm_erase_tm. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. auto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_App a1' Irrel b1') (a_App (a_Conv a0' ga0') Irrel (a_Conv b0' gb0')). { eexists. eapply An_EraseEq. eauto. eauto. simpl. f_equal. eauto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_App (a_Conv a0 ga0) Irrel b0) (a_App (a_Conv a0' ga0') Irrel (a_Conv b0' gb0')). { eexists. eapply An_Trans2. eauto. eauto. } move => [? ?]. have LC: lc_tm (a_Conv a0 ga0). eauto using AnnTyping_lc1. move: (Path_to_Path LC p eq_refl) => P. have LC':lc_tm (a_Conv a0' ga0'). eauto using AnnTyping_lc1. move: (Path_to_Path LC' p0 eq_refl) => P'. have ?: AnnTyping G0 (a_Conv a0 ga0) (a_Pi Irrel B0 B11). { eapply An_Conv; eauto. } eexists. exists (a_Conv a0 ga0). exists (a_Conv a0' ga0'). exists (a_Pi Irrel B0 B11). repeat split. + rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. + eapply An_Left2; eauto. + eauto. + eauto. - (* Right *) destruct (H _ H5 H6) as (a0 & AB & hyp). split_hyp. clear H. destruct (H0 _ H5 H6) as (b0 & B0 & hyp). split_hyp. clear H0. destruct (H1 _ H5 H6) as (a0' & AB' & hyp). split_hyp. clear H1. destruct (H2 _ H5 H6) as (b0' & B0' & hyp). split_hyp. clear H2. destruct (H3 _ H5 H6) as (gg & ab & ab' & s & hyp). split_hyp. clear H3. destruct (H4 _ H5 H6) as (gB & Ba & Ba' & s1 & hyp). split_hyp. clear H4. have ?: AnnTyping G0 AB a_Star. eauto using AnnTyping_regularity. have ?: AnnTyping G0 AB' a_Star. eauto using AnnTyping_regularity. match goal with [H : erase AB = a_Pi _ A B, H1 : AnnTyping G0 AB a_Star |- _ ] => destruct (erase_pi H H1) as (A1 & B1 & hyp1); split_hyp end. match goal with [H : erase AB' = a_Pi _ A B, H1 : AnnTyping G0 AB' a_Star |- _ ] => destruct (erase_pi H H1) as (A1' & B1' & hyp1); split_hyp end. match goal with [H4: erase ab = a_App a _ _, H8 : AnnTyping G0 ab s |- _ ] => destruct (erase_app_Rel H4 H8) as (a1 & b1 & gab & C & ? & ? & ? & h3 & ?); inversion h3 end. match goal with [H4: erase ab' = a_App a' _ _, H8 : AnnTyping G0 ab' s |- _ ] => destruct (erase_app_Rel H4 H8) as (a1' & b1' & gab' & C' & ? & ? & ? & h3' & ?) ; inversion h3' end. match goal with [H: AnnTyping G0 (a_Pi _ A1 B1) a_Star |- _ ] => inversion H end. match goal with [H: AnnTyping G0 (a_Pi _ A1' B1') a_Star |- _ ] => inversion H end. subst. have ?: AnnCtx G0 by eauto with ctx_wff. have: exists g, AnnDefEq G0 (dom G0) g B0 A1. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. eauto. eauto. } move => [g1 ?]. have: exists g, AnnDefEq G0 (dom G0) g B0 A1'. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. eauto. eauto. } move => [g1' ?]. (* need to cast in B1 *) pick fresh x. have ?: AnnCtx ([(x, Tm B0)] ++ G0). { econstructor. eauto. eauto using AnnTyping_regularity. fsetdec_fast. } have ?: AnnTyping ([(x, Tm B0)] ++ G0) A1 a_Star. { eapply AnnTyping_weakening with (F:=nil). eauto. auto. auto. } remember (close_tm_wrt_tm x (open_tm_wrt_tm B1 (a_Conv (a_Var_f x) g1))) as B11. have h5: AnnTyping G0 (a_Pi Rel B0 B11) a_Star. { rewrite HeqB11. eapply An_Pi_exists2 with (x:=x). autorewrite with lngen. auto. autorewrite with lngen. eapply (@AnnTyping_tm_subst_nondep (L \u {{x}} \u dom G0)). econstructor. econstructor. econstructor. auto. eauto using AnnTyping_regularity. auto. auto. eapply AnnDefEq_weaken_available. eapply AnnDefEq_weakening with (F:=nil). eauto. auto. auto. auto. intros. eapply AnnTyping_weakening. match goal with [H44 : ∀ x : atom, ¬ x `in` L → AnnTyping ([(x, _)] ++ _) (open_tm_wrt_tm _ (a_Var_f x)) a_Star |- _ ] => eapply H44 end. auto. auto. econstructor. auto. auto. simpl. auto. } inversion h5. (* pi type coercions *) have: exists g, AnnDefEq G0 (dom G0) g AB (a_Pi Rel B0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. auto. apply fv_tm_erase_tm. auto. eauto. } move => [ga0 ?]. have: exists g, AnnDefEq G0 (dom G0) g AB' (a_Pi Rel B0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. auto. apply fv_tm_erase_tm. auto. eauto. } move => [ga0' ?]. have ?: AnnTyping G0 (a_App (a_Conv a0 ga0) Rel b0) (open_tm_wrt_tm B11 b0). { econstructor. eapply An_Conv. eauto. eauto. auto. auto. } have: exists g, AnnDefEq G0 (dom G0) g B0' B0. { eexists. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. eauto. eauto. } move => [gb0' ?]. have ?: AnnTyping G0 (a_App (a_Conv a0' ga0') Rel (a_Conv b0' gb0')) (open_tm_wrt_tm B11 (a_Conv b0' gb0')). { econstructor. eapply An_Conv. eauto. eauto. auto. eapply An_Conv. eauto. eauto. eauto using AnnTyping_regularity. } have: exists g, AnnDefEq G0 D g (a_App (a_Conv a0 ga0) Rel b0) (a_App a1' Rel b1'). { eexists. eapply An_Trans2 with (a1 := a_App a1 Rel b1). { eapply An_EraseEq. eauto. econstructor. eauto. eauto. simpl. f_equal. auto. auto. eapply An_Trans2 with (a1 := s). eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase s = _ |- _ ] => rewrite H end. rewrite -open_tm_erase_tm. f_equal. rewrite HeqB11. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. eauto. eauto. } eapply An_Trans2 with (a1 := ab). { eapply An_EraseEq. eauto. eauto. eauto. eapply An_Sym2. eauto. } eapply An_Trans2 with (a1 := ab'). eauto. { eapply An_EraseEq. eauto. eauto. eauto. eauto. } } move => [g ?]. have: exists g, AnnDefEq G0 (dom G0) g (open_tm_wrt_tm B3 b1') (open_tm_wrt_tm B11 (a_Conv b0' gb0')). { eexists. eapply An_Trans2 with (a1 := s). eapply An_Sym2. eauto. eapply An_Trans2 with (a1 := Ba). { eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase Ba = _ |- _ ] => rewrite H end. auto. eapply An_EraseEq. eauto. eauto using AnnTyping_regularity. auto. eauto. } eapply An_Trans2 with (a1 := Ba'). { match goal with [ H : AnnDefEq G0 _ _ Ba Ba' |- _ ] => rewrite -erase_dom in H end. eauto. } eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. rewrite HeqB11. autorewcs. rewrite -open_tm_erase_tm. simpl. autorewcs. rewrite -close_tm_erase_tm. rewrite -open_tm_erase_tm. simpl. rewrite close_tm_wrt_tm_open_tm_wrt_tm. auto. apply fv_tm_erase_tm. auto. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto. auto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_App a1' Rel b1') (a_App (a_Conv a0' ga0') Rel (a_Conv b0' gb0')). { eexists. eapply An_EraseEq. eauto. eauto. simpl. f_equal. eauto. eauto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_App (a_Conv a0 ga0) Rel b0) (a_App (a_Conv a0' ga0') Rel (a_Conv b0' gb0')). { eexists. eapply An_Trans2. eauto. eauto. } move => [? ?]. have LC: lc_tm (a_Conv a0 ga0). eauto using AnnTyping_lc1. move: (Path_to_Path LC p eq_refl) => P. have LC':lc_tm (a_Conv a0' ga0'). eauto using AnnTyping_lc1. move: (Path_to_Path LC' p0 eq_refl) => P'. have ?: AnnTyping G0 (a_Conv a0 ga0) (a_Pi Rel B0 B11). { eapply An_Conv; eauto. } eexists. exists b0. exists (a_Conv b0' gb0'). exists B0. repeat split. + eapply An_Right2 with (a := (a_Conv a0 ga0)) (a' := (a_Conv a0' ga0')) (A := B0) (A' := B0); try eassumption. eapply An_Conv. eauto. eauto. eauto using AnnTyping_regularity. eapply An_Refl. eauto using AnnTyping_regularity. + eauto. + eauto. - (* CLeft. *) destruct (H _ H3 H4) as (a0 & AB & hyp). split_hyp. clear H. destruct (H0 _ H3 H4) as (a0' & AB' & hyp). split_hyp. clear H0. destruct (H1 _ H3 H4) as (gg & a10 & a20 & A0 & hyp). split_hyp. clear H1. destruct (H2 _ H3 H4) as (gAB & ab & ab' & Ba & hyp). split_hyp. clear H2. have ?: AnnTyping G0 AB a_Star. eauto using AnnTyping_regularity. have ?: AnnTyping G0 AB' a_Star. eauto using AnnTyping_regularity. match goal with [H : erase AB = a_CPi _ B, H1 : AnnTyping G0 AB a_Star |- _ ] => destruct (erase_cpi H H1) as (A1 & B1 & hyp1); split_hyp end. match goal with [H : erase AB' = a_CPi _ B, H1 : AnnTyping G0 AB' a_Star |- _ ] => destruct (erase_cpi H H1) as (A1' & B1' & hyp1); split_hyp end. match goal with [H4: erase ab = a_CApp a _ , H8 : AnnTyping G0 ab Ba |- _ ] => destruct (erase_capp H8 H4) as (a3 & g2 & g3 & C & ? & ? & h3 & ?); inversion h3 end. match goal with [H4: erase ab' = a_CApp a' _ , H8 : AnnTyping G0 ab' Ba |- _ ] => destruct (erase_capp H8 H4) as (a3' & g2' & g3' & C' & ? & ? & h3' & ?) ; inversion h3' end. match goal with [H: AnnTyping G0 (a_CPi _ B1) a_Star |- _ ] => inversion H end. match goal with [H: AnnTyping G0 (a_CPi _ B1') a_Star |- _ ] => inversion H end. have ?: AnnCtx G0 by eauto with ctx_wff. have ?: AnnPropWff G0 (Eq a10 a20 A0) by eauto. subst. match goal with [H: AnnDefEq G0 (dom (erase_context G0)) _ _ _ |- _ ]=> rewrite -erase_dom in H end. remember (Eq a10 a20 A0) as phi0. have: exists g, AnnIso G0 (dom G0) g phi0 A1. { eapply An_IsoRefl2_derivable. auto. auto. rewrite Heqphi0. simpl. auto. } move => [g1 ?]. have: exists g, AnnIso G0 (dom G0) g phi0 A1'. { eapply An_IsoRefl2_derivable. auto. auto. rewrite Heqphi0. simpl. auto. } move => [g1' ?]. (* need to cast in B1 *) pick fresh x. have ?: AnnCtx ([(x, Co phi0)] ++ G0). { econstructor. eauto. eauto using AnnTyping_regularity. fsetdec_fast. } have ?: AnnPropWff ([(x, Co phi0)] ++ G0) A1. { eapply AnnPropWff_weakening with (F:=nil). eauto. auto. auto. } remember (close_tm_wrt_co x (open_tm_wrt_co B1 (g_Cast (g_Var_f x) g1))) as B11. have h5: AnnTyping G0 (a_CPi phi0 B11) a_Star. { rewrite HeqB11. eapply An_CPi_exists2 with (c:=x). autorewrite with lngen. auto. autorewrite with lngen. destruct A1. eapply (@AnnTyping_co_subst_nondep (L \u {{x}} \u dom G0) ((x ~ Co phi0) ++ G0) (dom ((x ~ Co phi0) ++ G0))). econstructor. econstructor. econstructor. auto. eauto using AnnPropWff_regularity. auto. rewrite Heqphi0. auto. eauto. eapply AnnIso_weaken_available. eapply AnnIso_weakening with (F:=nil). match goal with [ H : AnnIso G0 (dom G0) g1 phi0 _ |- _] => move: H => h0; rewrite Heqphi0 in h0 end. eapply h0. eauto. auto. intros. eapply AnnTyping_weakening. match goal with [H44 : ∀ x : atom, ¬ x `in` L → AnnTyping ([(x, _)] ++ _) (open_tm_wrt_co _ (g_Var_f x)) a_Star |- _ ] => eapply H44 end. auto. auto. econstructor. auto. auto. simpl. auto. } inversion h5. (* pi type coercions *) have: exists g, AnnDefEq G0 (dom G0) g AB (a_CPi phi0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_co_erase_tm. rewrite <- open_co_erase_tm2 with (g:= (g_Var_f x)). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. rewrite Heqphi0. simpl. auto. apply fv_co_erase_tm. auto. eauto. } move => [ga0 ?]. have: exists g, AnnDefEq G0 (dom G0) g AB' (a_CPi phi0 B11). { eexists. eapply An_EraseEq. eauto. eauto. rewrite HeqB11. simpl. autorewcs. rewrite -close_co_erase_tm. rewrite <- open_co_erase_tm2 with (g := (g_Var_f x)). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. rewrite Heqphi0. simpl. auto. apply fv_co_erase_tm. auto. eauto. } move => [ga0' ?]. have ?: AnnTyping G0 (a_CApp (a_Conv a0 ga0) gg) (open_tm_wrt_co B11 gg). { match goal with [ H : AnnDefEq G0 (dom G0) ga0 AB (a_CPi phi0 B11) |- _] => move: H => h0; rewrite Heqphi0 in h0 end. econstructor. eapply An_Conv. eauto. eauto. rewrite <- Heqphi0. auto. auto. } have ?: AnnTyping G0 (a_CApp (a_Conv a0' ga0') gg) (open_tm_wrt_co B11 gg). { match goal with [ H : AnnDefEq G0 (dom G0) ga0' AB' (a_CPi phi0 B11) |- _] => move: H => h0; rewrite Heqphi0 in h0 end. move: (AnnDefEq_regularity h0) => [C1 [C2 [gC [? [h2 ?]]]]]. inversion h2. subst. econstructor. eapply An_Conv. eauto. eauto. eauto. auto. } have: exists g, AnnDefEq G0 D g (a_CApp (a_Conv a0 ga0) gg) (a_CApp a3' g2'). { eexists. eapply An_Trans2 with (a1 := a_CApp a3 g2). { eapply An_EraseEq. eauto. econstructor. eauto. eauto. simpl. f_equal. auto. eapply An_Trans2 with (a1 := Ba). eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. autorewcs. match goal with [ H : erase Ba = _ |- _ ] => rewrite H end. rewrite <- open_co_erase_tm2 with (g:= g_Triv). rewrite HeqB11. rewrite -close_co_erase_tm. rewrite <- open_co_erase_tm2 with (g:= g_Var_f x). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. auto. apply fv_co_erase_tm. auto. eauto. eauto. } eapply An_Trans2 with (a1 := ab). { eapply An_EraseEq. eauto. eauto. eauto. eapply An_Sym2. eauto. } eapply An_Trans2 with (a1 := ab'). eauto. { eapply An_EraseEq. eauto. eauto. eauto. eauto. } } move => [g ?]. have: exists g, AnnDefEq G0 (dom G0) g (open_tm_wrt_co B2 g2') (open_tm_wrt_co B11 gg). { eexists. eapply An_Trans2 with (a1 := Ba). eapply An_Sym2. eauto. eapply An_EraseEq. eauto using AnnTyping_regularity. eauto using AnnTyping_regularity. rewrite HeqB11. autorewcs. rewrite <- open_co_erase_tm2 with (g := g_Triv). simpl. autorewcs. rewrite -close_co_erase_tm. rewrite <- open_co_erase_tm2 with (g:= g_Var_f x). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. auto. apply fv_co_erase_tm. auto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_CApp a3' g2') (a_CApp (a_Conv a0' ga0') gg). { eexists. eapply An_EraseEq. eauto. eauto. simpl. f_equal. eauto. eauto. } move => [? ?]. have: exists g, AnnDefEq G0 D g (a_CApp (a_Conv a0 ga0) gg) (a_CApp (a_Conv a0' ga0') gg). { eexists. eapply An_Trans2. eauto. eauto. } move => [? ?]. have LC: lc_tm (a_Conv a0 ga0). eauto using AnnTyping_lc1. move: (Path_to_Path LC p eq_refl) => P. have LC':lc_tm (a_Conv a0' ga0'). eauto using AnnTyping_lc1. move: (Path_to_Path LC' p0 eq_refl) => P'. have ?: AnnTyping G0 (a_Conv a0 ga0) (a_CPi phi0 B11). { eapply An_Conv; eauto. } eexists. exists (a_Conv a0 ga0). exists (a_Conv a0' ga0'). exists (a_CPi phi0 B11). repeat split. + rewrite HeqB11. rewrite Heqphi0. simpl. autorewcs. f_equal. rewrite -close_co_erase_tm. rewrite <- open_co_erase_tm2 with (g:= g_Var_f x). simpl. rewrite close_tm_wrt_co_open_tm_wrt_co. auto. apply fv_co_erase_tm. auto. + eapply An_CLeft2; try eassumption. ++ subst. eauto. ++ subst. eauto. + eauto. + eauto. *) End erase.
lemma real_lim: fixes l::complex assumes "(f \<longlongrightarrow> l) F" and "\<not> trivial_limit F" and "eventually P F" and "\<And>a. P a \<Longrightarrow> f a \<in> \<real>" shows "l \<in> \<real>"
This tool makes it possible to check all your WiNRADiO G3 series software installations, detect errors or correct the XRS subsystem. In most cases you can use the report to diagnose and rectify the problem on your side (for example, problems caused by corrupted software installation are usually easy to fix by simply reinstalling the software). If you are unable to solve the problem, you may like to send us the diagnostics report produced by this tool for us to be able to assist you. 1. Driver errors (drivers not found in the system directory, not loaded in memory, or having invalid registry entries). 2. WiNRADiO API errors (dynamic link libraries not found or failing to open). 3. Application installation errors (miscellaneous files such as fonts or control applets not installed properly). 4. XRS subsystem errors (ambiguous path to XRS plugins or XRS path variable not found). 5. Windows related problems (insufficient user rights to access drivers or Win API calls failing). Errors 1-3 should be resolved by WiNRADiO software reinstallation. Error 4 can be resolved by WiNRADiO software reinstallation or using the 'XRS repair' function. Error 5 may indicate Windows problems and Windows reinstallation may be required. 1. Close your WiNRADiO receiver application if it is currently running. 2. Run the diagnostic program. 3. Generate the report by pressing "Get info". 4. Select "Copy report" from the File menu. This will copy the report to the Clipboard. 5. Insert the Clipboard information into your email message to us (using Shift Ins). Please note that this diagnostics tool assumes that you are using the latest available receiver application. Before using it, we recommend that you upgrade your receiver application to the latest available version. Note: After downloading the ZIP file, unzip it to a new folder and run the EXE file.
(* Property from Productive Use of Failure in Inductive Proof, Andrew Ireland and Alan Bundy, JAR 1996. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_prop_25 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun x :: "'a list => 'a list => 'a list" where "x (nil2) z = z" | "x (cons2 z2 xs) z = cons2 z2 (x xs z)" fun length :: "'a list => Nat" where "length (nil2) = Z" | "length (cons2 z xs) = S (length xs)" fun even :: "Nat => bool" where "even (Z) = True" | "even (S (Z)) = False" | "even (S (S z2)) = even z2" fun t2 :: "Nat => Nat => Nat" where "t2 (Z) z = z" | "t2 (S z2) z = S (t2 z2 z)" theorem property0 : "((even (length (x y z))) = (even (t2 (length z) (length y))))" oops end
function marginal = marginal_nodes(engine, nodes, t, fam) % MARGINAL_NODES Compute the marginal on the specified query nodes (bk) % % marginal = marginal_nodes(engine, i, t) % returns Pr(X(i,t) | Y(1:T)), where X(i,t) is the i'th node in the t'th slice. % % marginal = marginal_nodes(engine, query, t) % returns Pr(X(query(1),t), ... X(query(end),t) | Y(1:T)), % where 't' specifies the time slice of the earliest node in the query. % 'query' cannot span more than 2 time slices. % % Example: % Consider a DBN with 2 nodes per slice. % Then t=2, nodes=[1 3] refers to node 1 in slice 2 and node 1 in slice 3. if nargin < 3, t = 1; end if nargin < 4, fam = 0; else fam = 1; end % clpot{t} contains slice t-1 and t % Example % clpot #: 1 2 3 % slices: 1 1,2 2,3 % For filtering, we must take care not to take future evidence into account. % For smoothing, clpot{1} does not exist. bnet = bnet_from_engine(engine); ss = length(bnet.intra); if t < engine.T slice = t+1; nodes2 = nodes; else % earliest t is T, so all nodes fit in one slice slice = engine.T; nodes2 = nodes + ss; end c = clq_containing_nodes(engine.jtree_engine, nodes2, fam); assert(c >= 1); %disp(['computing marginal on ' num2str(nodes) ' t = ' num2str(t)]); %disp(['using ' num2str(nodes2) ' slice = ' num2str(slice) 'clq = ' num2str(c)]); bigpot = engine.clpot{c, slice}; pot = marginalize_pot(bigpot, nodes2, engine.maximize); marginal = pot_to_marginal(pot); % we convert the domain to the unrolled numbering system % so that update_ess extracts the right evidence. marginal.domain = nodes+(t-1)*ss;
module Numeric.Rational where open import Prelude open import Numeric.Nat.GCD open import Numeric.Nat.GCD.Extended open import Numeric.Nat.GCD.Properties open import Numeric.Nat.Prime open import Numeric.Nat.Prime.Properties open import Numeric.Nat.Divide open import Numeric.Nat.Divide.Properties open import Numeric.Nat.Properties open import Tactic.Nat open import Tactic.Nat.Coprime record Rational : Set where no-eta-equality constructor ratio field numerator : Nat denominator : Nat ⦃ d>0 ⦄ : NonZero denominator n⊥d : Coprime numerator denominator open Rational public using (numerator; denominator) infixl 7 mkratio syntax mkratio p q = p :/ q mkratio : (p q : Nat) {{_ : NonZero q}} → Rational mkratio p q = gcdReduce-r p q λ p′ q′ _ _ _ prf → ratio p′ q′ prf mkratio-sound : (p q : Nat) {{_ : NonZero q}} → p * denominator (mkratio p q) ≡ q * numerator (mkratio p q) mkratio-sound p q with gcd p q ... | gcd-res d (is-gcd (factor! p′) (factor! q′) _) = auto NonZeroQ : Rational → Set NonZeroQ x = NonZero (numerator x) infixl 6 _+Q_ _-Q_ infixl 7 _*Q_ _/Q_ _+Q_ : Rational → Rational → Rational ratio n₁ d₁ n₁/d₁ +Q ratio n₂ d₂ n₂/d₂ = gcdReduce d₁ d₂ λ d₁′ d₂′ g eq₁ eq₂ d₁′/d₂′ → gcdReduce-r (n₁ * d₂′ + n₂ * d₁′) g λ s′ g′ g₁ eqs eqg s′/g′ → let instance _ = mul-nonzero d₁′ d₂′ _ = mul-nonzero (d₁′ * d₂′) g′ in ratio s′ (d₁′ * d₂′ * g′) $ let[ _ := lemma s′ n₁ d₁ n₂ d₂ d₁′ d₂′ g g₁ eqs eq₁ n₁/d₁ d₁′/d₂′ ] let[ _ := lemma s′ n₂ d₂ n₁ d₁ d₂′ d₁′ g g₁ (by eqs) eq₂ n₂/d₂ auto-coprime ] auto-coprime where lemma : ∀ s′ n₁ d₁ n₂ d₂ d₁′ d₂′ g g₁ → s′ * g₁ ≡ n₁ * d₂′ + n₂ * d₁′ → d₁′ * g ≡ d₁ → Coprime n₁ d₁ → Coprime d₁′ d₂′ → Coprime s′ d₁′ lemma s′ n₁ d₁ n₂ d₂ d₁′ d₂′ g g₁ eqs refl n₁/d₁ d₁′/d₂′ = coprimeByPrimes s′ d₁′ λ p isP p|s′ p|d₁′ → let p|n₁d₂′ : p Divides (n₁ * d₂′) p|n₁d₂′ = divides-sub-r {n₁ * d₂′} {n₂ * d₁′} (transport (p Divides_) eqs (divides-mul-l g₁ p|s′)) (divides-mul-r n₂ p|d₁′) p|d₁ : p Divides d₁ p|d₁ = divides-mul-l g p|d₁′ p/n₁ : Coprime p n₁ p/n₁ = case prime-coprime/divide p n₁ isP of λ where (left p/n₁) → p/n₁ (right p|n₁) → ⊥-elim (prime-divide-coprime p n₁ d₁ isP n₁/d₁ p|n₁ p|d₁) p|d₂′ : p Divides d₂′ p|d₂′ = coprime-divide-mul-l p n₁ d₂′ p/n₁ p|n₁d₂′ in divide-coprime p d₁′ d₂′ d₁′/d₂′ p|d₁′ p|d₂′ -- Specification for addition slowAddQ : Rational → Rational → Rational slowAddQ (ratio p q _) (ratio p₁ q₁ _) = mkratio (p * q₁ + p₁ * q) (q * q₁) ⦃ mul-nonzero q q₁ ⦄ _-Q_ : Rational → Rational → Rational ratio p q _ -Q ratio p₁ q₁ _ = mkratio (p * q₁ - p₁ * q) (q * q₁) ⦃ mul-nonzero q q₁ ⦄ -- Fast multiplication based on the same technique as the fast addition, except it's much -- simpler for multiplication. _*Q_ : Rational → Rational → Rational ratio n₁ d₁ _ *Q ratio n₂ d₂ _ = gcdReduce-r n₁ d₂ λ n₁′ d₂′ g₁ n₁′g₁=n₁ d₂′g₁=d₂ _ → gcdReduce-r n₂ d₁ λ n₂′ d₁′ g₂ n₂′g₂=n₂ d₁′g₂=d₁ _ → let instance _ = mul-nonzero d₁′ d₂′ in ratio (n₁′ * n₂′) (d₁′ * d₂′) $ case₄ n₁′g₁=n₁ , d₂′g₁=d₂ , n₂′g₂=n₂ , d₁′g₂=d₁ of λ where refl refl refl refl → auto-coprime -- Specification for multiplication slowMulQ : Rational → Rational → Rational slowMulQ (ratio p q _) (ratio p₁ q₁ _) = mkratio (p * p₁) (q * q₁) {{mul-nonzero q q₁}} recip : (x : Rational) {{_ : NonZeroQ x}} → Rational recip (ratio 0 q eq) {{}} recip (ratio (suc p) q eq) = ratio q (suc p) auto-coprime _/Q_ : (x y : Rational) {{_ : NonZeroQ y}} → Rational x /Q y = x *Q recip y instance FracQ : Fractional Rational Fractional.Constraint FracQ _ y = NonZeroQ y Fractional._/_ FracQ x y = x /Q y {-# DISPLAY _+Q_ a b = a + b #-} {-# DISPLAY _-Q_ a b = a - b #-} {-# DISPLAY _*Q_ a b = a * b #-} {-# DISPLAY ratio a b refl = a / b #-} instance NumberRational : Number Rational Number.Constraint NumberRational _ = ⊤ fromNat {{NumberRational}} n = n :/ 1 SemiringRational : Semiring Rational zro {{SemiringRational}} = 0 :/ 1 one {{SemiringRational}} = 1 :/ 1 _+_ {{SemiringRational}} = _+Q_ _*_ {{SemiringRational}} = _*Q_ ShowRational : Show Rational showsPrec {{ShowRational}} _ (ratio p 1 _) = shows p showsPrec {{ShowRational}} _ (ratio p q _) = shows p ∘ showString "/" ∘ shows q -- Ordering -- private module _ {p q eq p₁ q₁ eq₁} {{_ : NonZero q}} {{_ : NonZero q₁}} where ratio-inj₁ : ratio p q eq ≡ ratio p₁ q₁ eq₁ → p ≡ p₁ ratio-inj₁ refl = refl ratio-inj₂ : ratio p q eq ≡ ratio p₁ q₁ eq₁ → q ≡ q₁ ratio-inj₂ refl = refl cong-ratio : ∀ {p q eq p₁ q₁ eq₁} {nzq : NonZero q} {nzq₁ : NonZero q₁} → p ≡ p₁ → q ≡ q₁ → ratio p q ⦃ nzq ⦄ eq ≡ ratio p₁ q₁ ⦃ nzq₁ ⦄ eq₁ cong-ratio {q = zero} {nzq = ()} cong-ratio {q = suc q} refl refl = ratio _ _ $≡ smashed instance EqRational : Eq Rational _==_ {{EqRational}} (ratio p q prf) (ratio p₁ q₁ prf₁) with p == p₁ | q == q₁ ... | no p≠p₁ | _ = no (p≠p₁ ∘ ratio-inj₁) ... | yes _ | no q≠q₁ = no (q≠q₁ ∘ ratio-inj₂) ... | yes p=p₁ | yes q=q₁ = yes (cong-ratio p=p₁ q=q₁) data LessQ (x y : Rational) : Set where lessQ : numerator x * denominator y < numerator y * denominator x → LessQ x y private lem-unique : ∀ n₁ d₁ n₂ d₂ ⦃ _ : NonZero d₁ ⦄ ⦃ _ : NonZero d₂ ⦄ → Coprime n₁ d₁ → Coprime n₂ d₂ → n₁ * d₂ ≡ n₂ * d₁ → n₁ ≡ n₂ × d₁ ≡ d₂ lem-unique n₁ d₁ n₂ d₂ n₁⊥d₁ n₂⊥d₂ eq = let n₁|n₂ : n₁ Divides n₂ n₁|n₂ = coprime-divide-mul-r n₁ n₂ d₁ n₁⊥d₁ (factor d₂ (by eq)) n₂|n₁ : n₂ Divides n₁ n₂|n₁ = coprime-divide-mul-r n₂ n₁ d₂ n₂⊥d₂ (factor d₁ (by eq)) d₁|d₂ : d₁ Divides d₂ d₁|d₂ = coprime-divide-mul-r d₁ d₂ n₁ auto-coprime (factor n₂ (by eq)) d₂|d₁ : d₂ Divides d₁ d₂|d₁ = coprime-divide-mul-r d₂ d₁ n₂ auto-coprime (factor n₁ (by eq)) in divides-antisym n₁|n₂ n₂|n₁ , divides-antisym d₁|d₂ d₂|d₁ compareQ : ∀ x y → Comparison LessQ x y compareQ (ratio n₁ d₁ n₁⊥d₁) (ratio n₂ d₂ n₂⊥d₂) = case compare (n₁ * d₂) (n₂ * d₁) of λ where (less lt) → less (lessQ lt) (equal eq) → equal (uncurry cong-ratio (lem-unique n₁ d₁ n₂ d₂ n₁⊥d₁ n₂⊥d₂ eq)) (greater gt) → greater (lessQ gt) instance OrdQ : Ord Rational OrdQ = defaultOrd compareQ
```python import numpy as np import matplotlib.pyplot as plt import pandas as pd import sys sys.path.append('../../pyutils') import metrics import utils ``` # When $p$ is much bigger than $N$ High variance and overfitting are a major concern in this setting. Simple, highly regularized models are often used. Let's suppose we are trying to predict a linear model. With $p<<N$, we can identify as many coefficients as we want without shrinkage. With $p=N$, we can identify some non-zero coefficients with moderate shrinkage. With $p>>N$, even though they are many non-zero coefficients, we don't have a hope to find them, we need to shrink a lot. # Diagonal LDA and Nearest Shrunken Centroids The simplest form of regularization assumes that the features are independant within each class (the within class covariance matris is diagonal). It greatly reduces the number of parameters and often result in an effective and interpretable classifier. The discriminant score for class $k$ is: $$\theta_k(x) = - \sum_{j=1}^p \frac{(x_j - \bar{x}_{kj})^2}{s_j^2} + 2 \log \pi_k$$ with $s_j$ the within-class standard deriavtion for feature $j$, and: $$\bar{x}_{kj} = \frac{1}{N_k} \sum_{i \in C_k} x_{ij}$$ We call $\bar{x}_k$ the centroid of class $k$. Diagonal LDA can be seen as a nearest centroid classifier with appropriate standardization. To regularize in order to drop out features, we can shrink the classwise mean toward the overall mean for each feature separately. This method is called Nearest Shrunken Procedure (NSC). Let $$d_{jk} = \frac{\bar{x}_{kj} - \bar{x}_j}{m_k(s_j + s_0)}$$ with $m_k^2 = 1/N_k - 1/N$ and $s_0$ a small positive constant. We can shrink $d_{kj}$ toward zero using soft thresholding: $$d'_{kj} = \text{sign}(d_{kj})(|d_{kj}| - \Delta)_+$$ with $\Delta$ a parameter to be determined. The shruken centroids are obtained by: $$\bar{x}'_{kj} = \bar{x}_j + m_k(s_j + s_0)d'_{kj}$$ We use the shrunken centroids $\bar{x}'_{kj}$ instead of the original $\bar{x}_{kj}$ in the discriminant score. # Linear Classifiers with Quadratic Regularization ## Regularized Discriminant Analysis LDA involves the inversion of a $p*p$ within-covariance matrix $\Sigma$. When $p > n$, the matrix is singular. RDA solves the issue by shrinking $\Sigma$ towards its diagonal: $$\hat{\Sigma}(\gamma) = \gamma \hat{\Sigma} + (1 - \gamma) \text{diag}(\hat{\Sigma})$$ ## Logistic Regression with Quadratic Regularization The multiclass logistic model is expressed as: $$P(G=k|X=x) \frac{\exp (\beta_{k0} + x^T \beta_k)}{\sum_{l=1}^K \exp (\beta_{l0} + x^T \beta_l)}$$ This has $K$ coefficients vecors $\beta_k$. We regalurize the fitting by maximizing the penalized log-likelihhood: $$\max_{ \{ \beta_{0k}, \beta_k \}_1^K} \left[ \sum_{i=1}^N \log P(g_i|x_i) - \frac{\lambda}{2} \sum_{k=1}^K ||\beta_k||_2^2 \right]$$ ## The Support Vector Classifier When $p > N$, the classes are perfectly separable, unless there are identical feature vectors in different classes. Surprisingly, the unregularized SVC often works about as well as the best regularized version. ## Feature Selection RDA, regularized logistic regression and SVC shrinks weights toward zero, but they keep all features. Recursive feature elimination remove feature with small weights, and retrain the classifier. All 3 approches can be modified using kernels, to increase model complixity. With $p > N$ overfitting is always a danger, and yet using kernel may sometimes give better results. ## Computational shorcuts when $p \gg N$ Instead of working with $X \in \mathbb{R}^{N*p}$ matrix, we can work with a matrix of size $N*N$, using the SVD: $$ \begin{equation} \begin{split} X & = UDV^T \\ & = RV^T \end{split} \end{equation} $$ with $R \in \mathbb{R}^{N*N}$: $$R = UD$$ We can usually work with $R$ instead of $X$. For example, let's consider the estimates from a ridge regression: $$\hat{\beta} = (X^TX + \delta I)^{-1}X^ty$$ We can instead get $\hat{\theta}$ the ridge regression estimate using $(r_i, y_i)$. And then we get $\hat{\beta} = V \hat{\theta}$. This idea can be generalized to any linear models with a quadratic penalty on the weights. # Linear classifiers with $L_1$ Regularization The lasso for linear regression is: $$\min_{\beta} \frac{1}{2} \sum_{i=1}^N \left( y_i - \beta_0 - \sum_{j=1}^p x_{ij}\beta_j \right) ^2 + \lambda \sum_{j=1}^p |\beta_j|$$ $L_1$ penalty causes a subset of the $\hat{\beta}_j$ to be exactly zero for a sufficiently large value of $\lambda$, and hence performs feature selection. When $p > N$, as $\lambda \to 0$ the model fits perfectly the dataset. When $p > N$ the number of non-zero coefficients is at most $N$ for any values of $\lambda$. Linear regression can be applied for two-class clasification using $\pm 1$ as labels, and using sign for the predictions. A more natural approach is to use the lasso penalty on logistic regression. We can use a symmetric multinomial logistric regression model, and maximixe the penalized log-likelihood: $$\max_{ \{ \beta_{0k}, \beta_k \}_1^K} \left[ \sum_{i=1}^N log P(g_i|x_i) - \lambda \sum_{k=1}^K \sum_{j=1}^p |\beta_{kj}| \right]$$ The lasso tends to encourage a sparse solution, and ridge tends to shrink the oefficients of correlated variables toward each other. The elastic net penalty is a compromise: $$\sum_{j=1}^p (\alpha |\beta|_j + (1 - \alpha) \beta_j^2)$$ with $\alpha \in [0,1]$ parameter that determines the mix of the penalties. The logistic regression problem above with the elastic net penalty becomes: $$\max_{ \{ \beta_{0k}, \beta_k \}_1^K} \left[ \sum_{i=1}^N log P(g_i|x_i) - \lambda \sum_{k=1}^K \sum_{j=1}^p (\alpha|\beta_{kj}| + (1 - \alpha) \beta_{kj}^2) \right]$$ ## The Fused Lasso The Fused Lasso is a method that tend to smooth the coefficients uniformly. We add a penalty to take into account the ordering of the features: $$\min_{\beta} \sum_{i=1}^N \left( y_i - \beta_0 - \sum_{j=1}^p x_{ij}\beta_j \right) ^2 + \lambda_1 \sum_{j=1}^p |\beta_j| + \lambda_2 \sum_{j=1}^{p-1} |\beta_{j-1} - \beta_j|$$ # Classification When Features are Unavailable Instead of working with features, we can instead work with an $N*N$ proximity matrix, and we can interpret the proximities as inner-products. For example, it can be considerer as the matrix kernel $K$, and can be used with kernel methods, SVM. ## Classition and other methods using Kernels They are a number of other classifier, besides SVM, that can be implemented using only inner-product matrices. This also implies they can be kernelized like the SVM. For nearest-neigbor, we can transform inner-products to distances: $$||x_i - x_{i'}||^2 = \langle x_i, x_i \rangle + \langle x_{i'}, x_{i'} \rangle - \langle x_i, x_{i'} \rangle$$ For nearest-centroid classification, with training pairs $(x_i, g_i)$, and class centroids $\bar{x}_k$, we can compute the distance of the test point to each centroid: $$||x_0 - \bar{x}_k||^2 = \langle x_0, x_0 \rangle - \frac{2}{N_k} \sum_{g_i=k} \langle x_0, x_i \rangle + \frac{1}{N_k^2} \sum_{g_i=k} \sum_{g_{i'}=k} \langle x_i, x_{i'} \rangle$$ We can also perform kernel PCA. Let $X$ centered data matrix, with SVD decomposition: $$X=UDV^T$$ We get the matrix of principal components $Z$: $$Z = UD$$ When $K=XX^T$, it follow that $K=UD^2U^T$, and hence we can compute $Z$ from the eigeindecomposition of $K$. If $X$ is not centered, we need to use the double-centered kernel instead: $$\tilde{K} = (I-M)K(I-M)$$ with $M = \frac{1}{N} 1 1^T$. But they are some things that we cannot do with kernels: - We cannot standardize the variables. - We cannot assess directly the contribution of individual variables (i.e. we cannot use the lasso penalty) - We cannot separate the good variables from the noise: they all get an equal say. # High-Dimensional Regression: Supervised Principal Components PCA is an effective method to find linear combinations of features that exhibit large variation in the data. Supervised PCA find linear linear combination with both high variance and significant correlation with the outcome. Supervised PCA can be related Latent-Variable modeling. Suppose we hase a response variable $Y$ related to an underlying latent variable U by a linear model: $$Y = \beta_0 + \beta_1 U + \sigma$$ We have measurements on a set of features $X_j$, $j \in \mathcal{P}$: $$X_j = \alpha_{0j} + \alpha_{1j}U + \sigma_j, \space j \in \mathcal{P}$$ We also have many additional features $X_k$, $k \notin \mathcal{P}$, which are independent of $U$. This is a 3 steps, proccess, similar to supervised PCA: - Estimates the set $\mathcal{P}$ - Given $\hat{\mathcal{P}}$, estimates $U$ - Perform a regression fit to estimate $\beta$, $\beta_0$. # Feature Assessment and the Multiple-Testing Problem Feature Assessment asses the significance of each features, it's related to multiple hypothesis testing. Let's suppose we have a dataset ok $N$ observations, each with $M$ features, separated into $K=2$ groups. To identify which features are informative to guess the group, we construct a two-sample t-statistic for each feature: $$t_j = \frac{\bar{x}_{2j} - \bar{x}_{1j}}{\text{se}_j}$$ where: $$\bar{x}_{kj} = \frac{1}{N_k} \sum_{i \in C_k} x_{ij}$$ $\text{se}_j$ is the pooled within-group standard error for feature $j$: $$\text{se}_j = \hat{\sigma}_j \sqrt{\frac{1}{N_1} + \frac{1}{N_2}}$$ $$\hat{\sigma}_j^2 = \frac{1}{N_1 + N_2 - 2} \left( \sum_{i \in C_1} (x_{ij} - \bar{x}_{1j})^2 + \sum_{i \in C_2} (x_{ij} - \bar{x}_{2j})^2 \right)$$ We could consider any value $> 2$ in absoluve value to be significantly large. However, with $M$ large, we would expect many largve values to occur by chance. We can assess the result for all $M$ using the multiple testing problem. We can compute the p-value for each feature $p_j$: $$p_j = \frac{1}{K} \sum_{k=1}^K I(|t_j^k| > |t_j|)$$ where whe take $K$ random sample labels permutations $t_j^k$. Using p-values, we can test the hypotheses: $H_{0j} = $ label has no effect on feature $j$. $H_{1j} = $ label has an effect on feature $j$. We reject $H_{0j}$ at level $\alpha$ if $p_j < \alpha$. Let $A_j$ the event that $H_{0j}$ if falsely rejected: $P(A_j) = \alpha$ The familiy-wise error rate (FWER) is the probability of at least one false rejection. ## The False Discovery Rate Possible outcomes from $M$ hypotesis tests: |&nbsp;|Called not significant|Called signification|Total| |---|---|---|---| |$H_0$ True| $U$ | $V$ | $M_0$| |$H_0$ False|$T$|$S$|$M_1$| |Total|$M-R$|$R$|$M$| The false discovery rate is: $$\text{FDR} = E(V/R)$$ The expectation is taken over the sampled data. ## Asymmetric Cutpoints and the SAM Procedure In previous approaches, we used the absolute value of $t_j$, hence applying the same cutpoint to both positive and negative values. Significance analysis of microrrays (SAM) derive separate cut-point for the two classes.
lemma countable_disjoint_open_subsets: fixes \<F> :: "'a::second_countable_topology set set" assumes "\<And>S. S \<in> \<F> \<Longrightarrow> open S" and pw: "pairwise disjnt \<F>" shows "countable \<F>"
#redirect Rec Pool Lodge
Formal statement is: lemma gcd_bezout_sum_nat: fixes a::nat assumes "a * x + b * y = d" shows "gcd a b dvd d" Informal statement is: If $a$ and $b$ are relatively prime, then $a$ and $b$ divide $a x + b y$ for any $x$ and $y$.
(* * © 2020 Massachusetts Institute of Technology. * MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the Contractor (May 2014) * SPDX-License-Identifier: MIT * *) From Coq Require Import List. From SPICY Require Import MyPrelude Maps ChMaps Messages Keys Automation Tactics Simulation AdversaryUniverse ModelCheck.ProtocolFunctions . From SPICY Require IdealWorld RealWorld. Import IdealWorld.IdealNotations RealWorld.RealWorldNotations. From Frap Require Import Sets. Module Foo <: Sets.EMPTY. End Foo. Module Import SN := Sets.SetNotations(Foo). Set Implicit Arguments. Open Scope protocol_scope. Module AvgSalaryProtocol. (* Start with two users, as that is the minimum for any interesting protocol *) Notation USR1 := 0. Notation USR2 := 1. Notation USR3 := 2. Notation USR4 := 3. Section IW. Import IdealWorld. (* Set up initial communication channels so each user can talk directly to the other *) Notation pCH14 := 0. Notation pCH24 := 1. Notation pCH34 := 2. Notation CH14 := (# pCH14). Notation CH24 := (# pCH24). Notation CH34 := (# pCH34). (* This is the initial channel vector, each channel should be represented and start with * no messages. *) Notation empty_chs := (#0 #+ (CH14, []) #+ (CH24, []) #+ (CH34, [])). Notation PERMS1 := ($0 $+ (pCH14, writer)). Notation PERMS2 := ($0 $+ (pCH24, writer)). Notation PERMS3 := ($0 $+ (pCH34, writer)). Notation PERMS4 := ($0 $+ (pCH14, reader) $+ (pCH24, reader) $+ (pCH34, reader)). (* Fill in the users' protocol specifications here, adding additional users as needed. * Note that all users must return an element of the same type, and that type needs to * be one of: ... *) Notation ideal_users := [ mkiUsr USR1 PERMS1 ( _ <- Send (Content 1) CH14 ; @Return (Base Nat) 1 ) ; mkiUsr USR2 PERMS2 ( _ <- Send (Content 1) CH24 ; @Return (Base Nat) 1 ) ; mkiUsr USR3 PERMS3 ( _ <- Send (Content 1) CH34 ; @Return (Base Nat) 1 ) ; mkiUsr USR4 PERMS4 ( m1 <- @Recv Nat CH14 ; m2 <- @Recv Nat CH24 ; m3 <- @Recv Nat CH34 ; @Return (Base Nat) (let c1 := extractContent m1 in let c2 := extractContent m2 in let c3 := extractContent m3 in (c1 + c2 + c3) / 3) ) ]. (* This is where the entire specification universe gets assembled. It is unlikely anything * will need to change here. *) Definition ideal_univ_start := mkiU empty_chs ideal_users. End IW. Section RW. Import RealWorld. Import RealWorld.message. (* Key management needs to be bootstrapped. Since all honest users must only send signed * messages, we need some way of initially distributing signing keys in order to be able * to begin secure communication. This is analagous in the real world where we need to * have some sort of trust relationship in order to distribute trusted keys. * * Here, each user has a public asymmetric signing key. *) Notation KID1 := 0. Notation KID2 := 1. Notation KID3 := 2. Notation KID4 := 3. Notation KEYS := [ skey KID1 ; skey KID2 ; skey KID3; ekey KID4 ]. Notation KEYS1 := ($0 $+ (KID1, true) $+ (KID4, false)). Notation KEYS2 := ($0 $+ (KID2, true) $+ (KID4, false)). Notation KEYS3 := ($0 $+ (KID3, true) $+ (KID4, false)). Notation KEYS4 := ($0 $+ (KID1, false) $+ (KID2, false) $+ (KID3, false) $+ (KID4, true) ). Notation real_users := [ (* User 1 implementation *) MkRUserSpec USR1 KEYS1 ( c <- SignEncrypt KID1 KID4 USR4 (Content 1) ; _ <- Send USR4 c ; ret 1 ) ; (* User 2 implementation *) MkRUserSpec USR2 KEYS2 ( c <- SignEncrypt KID2 KID4 USR4 (Content 1) ; _ <- Send USR4 c ; ret 1 ) ; (* User 3 implementation *) MkRUserSpec USR3 KEYS3 ( c <- SignEncrypt KID3 KID4 USR4 (Content 1) ; _ <- Send USR4 c ; ret 1 ) ; (* Server implementation *) MkRUserSpec USR4 KEYS4 ( salC1 <- @Recv Nat (SignedEncrypted KID1 KID4 true) ; salC2 <- @Recv Nat (SignedEncrypted KID2 KID4 true) ; salC3 <- @Recv Nat (SignedEncrypted KID3 KID4 true) ; sal1 <- Decrypt salC1 ; sal2 <- Decrypt salC2 ; sal3 <- Decrypt salC3 ; ret (let s1 := extractContent sal1 in let s2 := extractContent sal2 in let s3 := extractContent sal3 in (s1 + s2 + s3) / 3 ) ) ]. (* Here is where we put the implementation universe together. Like above, it is * unlikely anything will need to change here. *) Definition real_univ_start := mkrU (mkKeys KEYS) real_users. End RW. (* These are here to help the proof automation. Don't change. *) #[export] Hint Unfold real_univ_start ideal_univ_start : user_build. #[export] Hint Extern 0 (IdealWorld.lstep_universe _ _ _) => progress(autounfold with user_build; simpl) : core. End AvgSalaryProtocol.
import separation_world.level4 -- hide /- # Level 5: Every T₂ space is also T₁ -/ variables {X : Type} -- hide variables [topological_space X] -- hide namespace topological_space -- hide open set -- hide /- Lemma Let τ be a topological space. τ is a frechet space if only if for all the points in the topology, their singletons are closed sets. -/ lemma T2_space.T1_space [T2_space X]: T1_space X := begin exact {t1 := λ x y hxy, let ⟨U, V, hU, hV, hUV, hh⟩ := T2_space.t2 x y hxy in ⟨U, hU, hh.1, not.imp (not_not.mpr hh.2) (λ c, (subset_compl_iff_disjoint.2 hUV) c)⟩ } end end topological_space -- hide
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon. 20081020 22:18:24 nbsp Welcome to the Wiki Howdy Mr. or Ms. Fire and Welcome to the Wiki! You might want to check out the importance of using your RealName, just so we can get to know you (or not: its your choice, but people were pretty friendly here). My names Evan, pleased to meet you! Thanks for adding the recent reviews. Keep in mind that this is your wiki too, so if you think an entry is too slanted in a particular direction, hit the Edit button and rewrite the entry. A good technique is to respect the opinion of the other editor (who you disagree with) and create a new entry that shows both your opinions. For instance, on the Pizza Hut Express entry, you could go hit edit, and rewrite it so it reflects both your opinion (that its tasty) and that others feel that its too greasy, but that its better than the pizzas at the Silo (thus adding Patricks opinion). You arent limited to just adding comments it is your wiki too, and you have a full say in what is in the entries themselves (just like everybody else). Once again, welcome to the wiki! Users/JabberWokky Evan JabberWokky Edwards
= = = 1990s = = =
idF : a -> a idF = id extensionality : (f : a -> b) -> (g : a -> b) -> ((x : a) -> f x = g x) -> f = g extensionality f g = believe_me leftIdPoint : (f : a -> b) -> (x : a) -> idF (f x) = f x leftIdPoint f x = Refl leftId : (f : a -> b) -> (idF . f = f) leftId f = ?hole
State Before: α : Sort u β : Sort v γ : Sort w x : α y : (fun x => β) x f : α ≃ β ⊢ ↑f x = y ↔ x = ↑f.symm y State After: α : Sort u β : Sort v γ : Sort w x : α y : (fun x => β) x f : α ≃ β ⊢ ↑f x = ↑f (↑f.symm y) ↔ x = ↑f.symm y Tactic: conv_lhs => rw [← apply_symm_apply f y] State Before: α : Sort u β : Sort v γ : Sort w x : α y : (fun x => β) x f : α ≃ β ⊢ ↑f x = ↑f (↑f.symm y) ↔ x = ↑f.symm y State After: no goals Tactic: rw [apply_eq_iff_eq]
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj12eqsynthconj4 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus lv0 lv1) (plus lv1 (plus Zero lv0))). Admitted. QuickChick conj12eqsynthconj4.
function ap_multi = compute_AP_multiCam(good_image, junk_image, index, queryCam, testCam) good_cam = testCam(good_image); good_cam_uni = unique(good_cam); ap_multi = zeros(1, 6); % on the same camera good_cam_now = queryCam; ngood = length(junk_image); junk_image_now = good_image; good_image_now = junk_image; old_recall = 0; old_precision = 1.0; ap = 0; intersect_size = 0; j = 0; good_now = 0; for n = 1:length(index) flag = 0; if ~isempty(find(good_image_now == index(n), 1)) flag = 1; % good image good_now = good_now+1; end if ~isempty(find(junk_image_now == index(n), 1)) continue; % junk image end if flag == 1%good intersect_size = intersect_size + 1; end if ngood == 0 ap_multi(good_cam_now) = 0; break; end recall = intersect_size/ngood; precision = intersect_size/(j + 1); ap = ap + (recall - old_recall)*((old_precision+precision)/2); old_recall = recall; old_precision = precision; j = j+1; if good_now == ngood ap_multi(good_cam_now) = ap; break; end end for k = 1:length(good_cam_uni) good_cam_now = good_cam_uni(k); ngood = length(find(good_cam == good_cam_now)); pos_junk = find(good_cam ~= good_cam_now); junk_image_now = [junk_image good_image(pos_junk)]; pos_good = find(good_cam == good_cam_now); good_image_now = good_image(pos_good); old_recall = 0; old_precision = 1.0; ap = 0; intersect_size = 0; j = 0; good_now = 0; for n = 1:length(index) flag = 0; if ~isempty(find(good_image_now == index(n), 1)) flag = 1; % good image good_now = good_now+1; end if ~isempty(find(junk_image_now == index(n), 1)) continue; % junk image end if flag == 1%good intersect_size = intersect_size + 1; end recall = intersect_size/ngood; precision = intersect_size/(j + 1); ap = ap + (recall - old_recall)*((old_precision+precision)/2); old_recall = recall; old_precision = precision; j = j+1; if good_now == ngood ap_multi(good_cam_now) = ap; break; end end end end
""" Struct for European Option euOption=EuropeanOptionND(T::num1,K::num2,isCall::Bool=true) where {num1 <: Number,num2 <: Number} Where:\n T = Time to maturity of the Option. K = Strike Price of the Option. isCall = true for CALL, false for PUT. """ mutable struct EuropeanOptionND{num1 <: Number, num2 <: Number, numtype <: Number} <: EuropeanBasketPayoff{numtype} T::num1 K::num2 isCall::Bool function EuropeanOptionND(T::num1, K::num2, isCall::Bool = true) where {num1 <: Number, num2 <: Number} if T <= 0.0 error("Time to Maturity must be positive") elseif K <= 0.0 error("Strike Price must be positive") else zero_typed = zero(num1) + zero(num2) return new{num1, num2, typeof(zero_typed)}(T, K, isCall) end end end export EuropeanOptionND; function payoff(S::Array{abstractMatrix}, euPayoff::EuropeanOptionND, rfCurve::abstractZeroRateCurve, T1::num2 = maturity(euPayoff)) where {abstractMatrix <: AbstractMatrix{num}, num2 <: Number} where {abstractZeroRateCurve <: AbstractZeroRateCurve, num <: Number} r = rfCurve.r T = euPayoff.T iscall = euPayoff.isCall ? 1 : -1 (Nsim, NStep) = size(S[1]) NStep -= 1 index1 = round(Int, T / T1 * NStep) + 1 K = euPayoff.K #ST_all=[ sum(x_i[j,index1] for x_i in S) for j in 1:Nsim] ST_all = S[1][:, index1] for i = 2:length(S) ST_all += S[i][:, index1] end payoff2 = max.(iscall * (ST_all .- K), 0.0) return payoff2 * exp(-integral(r, T)) end
function d = fd05 ( p ) %*****************************************************************************80 % %% FD05 is a signed distance function for the cylinder with a hole. % % Modified: % % 15 September 2005 % % Author: % % John Burkardt % % Parameters: % % Input, real P(N,3), one or more points. % % Output, real D(N), the signed distance of each point to the boundary of the region. % r = sqrt ( p(:,1).^2 + p(:,2).^2 ); z = p(:,3); d1 = r - 1.0; d2 = z - 1.0; d3 = - z - 1.0; d4 = sqrt ( d1.^2 + d2.^2 ); d5 = sqrt ( d1.^2 + d3.^2 ); d = dintersect ( dintersect ( d1, d2 ), d3 ); ix = ( 0.0 < d1 ) & ( 0.0 < d2 ); d(ix) = d4(ix); ix = ( 0.0 < d1 ) & ( 0.0 < d3 ); d(ix) = d5(ix); d = ddiff ( d, dsphere ( p, 0.0, 0.0, 0.0, 0.5 ) ); return end
program sssimp c c This example program is intended to illustrate the c simplest case of using ARPACK in considerable detail. c This code may be used to understand basic usage of ARPACK c and as a template for creating an interface to ARPACK. c c This code shows how to use ARPACK to find a few eigenvalues c (lambda) and corresponding eigenvectors (x) for the standard c eigenvalue problem: c c A*x = lambda*x c c where A is an n by n real symmetric matrix. c c The main points illustrated here are c c 1) How to declare sufficient memory to find NEV c eigenvalues of largest magnitude. Other options c are available. c c 2) Illustration of the reverse communication interface c needed to utilize the top level ARPACK routine SSAUPD c that computes the quantities needed to construct c the desired eigenvalues and eigenvectors(if requested). c c 3) How to extract the desired eigenvalues and eigenvectors c using the ARPACK routine SSEUPD. c c The only thing that must be supplied in order to use this c routine on your problem is to change the array dimensions c appropriately, to specify WHICH eigenvalues you want to compute c and to supply a matrix-vector product c c w <- Av c c in place of the call to AV( ) below. c c Once usage of this routine is understood, you may wish to explore c the other available options to improve convergence, to solve generalized c problems, etc. Look at the file ex-sym.doc in DOCUMENTS directory. c This codes implements c c\Example-1 c ... Suppose we want to solve A*x = lambda*x in regular mode, c where A is derived from the central difference discretization c of the 2-dimensional Laplacian on the unit square with c zero Dirichlet boundary condition. c ... OP = A and B = I. c ... Assume "call av (n,x,y)" computes y = A*x c ... Use mode 1 of SSAUPD. c c\BeginLib c c\Routines called: c ssaupd ARPACK reverse communication interface routine. c sseupd ARPACK routine that returns Ritz values and (optionally) c Ritz vectors. c snrm2 Level 1 BLAS that computes the norm of a vector. c saxpy Level 1 BLAS that computes y <- alpha*x+y. c c\Author c Richard Lehoucq c Danny Sorensen c Chao Yang c Dept. of Computational & c Applied Mathematics c Rice University c Houston, Texas c c\SCCS Information: @(#) c FILE: ssimp.F SID: 2.6 DATE OF SID: 10/17/00 RELEASE: 2 c c\Remarks c 1. None c c\EndLib c c----------------------------------------------------------------------- c c %------------------------------------------------------% c | Storage Declarations: | c | | c | The maximum dimensions for all arrays are | c | set here to accommodate a problem size of | c | N .le. MAXN | c | | c | NEV is the number of eigenvalues requested. | c | See specifications for ARPACK usage below. | c | | c | NCV is the largest number of basis vectors that will | c | be used in the Implicitly Restarted Arnoldi | c | Process. Work per major iteration is | c | proportional to N*NCV*NCV. | c | | c | You must set: | c | | c | MAXN: Maximum dimension of the A allowed. | c | MAXNEV: Maximum NEV allowed. | c | MAXNCV: Maximum NCV allowed. | c %------------------------------------------------------% c integer maxn, maxnev, maxncv, ldv parameter (maxn=256, maxnev=10, maxncv=25, $ ldv=maxn ) c c %--------------% c | Local Arrays | c %--------------% c Real & v(ldv,maxncv), workl(maxncv*(maxncv+8)), & workd(3*maxn), d(maxncv,2), resid(maxn), & ax(maxn) logical select(maxncv) integer iparam(11), ipntr(11) c c %---------------% c | Local Scalars | c %---------------% c character bmat*1, which*2 integer ido, n, nev, ncv, lworkl, info, ierr, & j, nx, ishfts, maxitr, mode1, nconv logical rvec Real & tol, sigma c c %------------% c | Parameters | c %------------% c Real & zero parameter (zero = 0.0E+0) c c %-----------------------------% c | BLAS & LAPACK routines used | c %-----------------------------% c Real & snrm2 external snrm2, saxpy c c %--------------------% c | Intrinsic function | c %--------------------% c intrinsic abs c c %-----------------------% c | Executable Statements | c %-----------------------% c c %-------------------------------------------------% c | The following include statement and assignments | c | initiate trace output from the internal | c | actions of ARPACK. See debug.doc in the | c | DOCUMENTS directory for usage. Initially, the | c | most useful information will be a breakdown of | c | time spent in the various stages of computation | c | given by setting msaupd = 1. | c %-------------------------------------------------% c include 'debug.h' ndigit = -3 logfil = 6 msgets = 0 msaitr = 0 msapps = 0 msaupd = 1 msaup2 = 0 mseigt = 0 mseupd = 0 c c %-------------------------------------------------% c | The following sets dimensions for this problem. | c %-------------------------------------------------% c nx = 10 n = nx*nx c c %-----------------------------------------------% c | | c | Specifications for ARPACK usage are set | c | below: | c | | c | 1) NEV = 4 asks for 4 eigenvalues to be | c | computed. | c | | c | 2) NCV = 20 sets the length of the Arnoldi | c | factorization | c | | c | 3) This is a standard problem | c | (indicated by bmat = 'I') | c | | c | 4) Ask for the NEV eigenvalues of | c | largest magnitude | c | (indicated by which = 'LM') | c | See documentation in SSAUPD for the | c | other options SM, LA, SA, LI, SI. | c | | c | Note: NEV and NCV must satisfy the following | c | conditions: | c | NEV <= MAXNEV | c | NEV + 1 <= NCV <= MAXNCV | c %-----------------------------------------------% c nev = 4 ncv = 20 bmat = 'I' which = 'LM' c if ( n .gt. maxn ) then print *, ' ERROR with _SSIMP: N is greater than MAXN ' go to 9000 else if ( nev .gt. maxnev ) then print *, ' ERROR with _SSIMP: NEV is greater than MAXNEV ' go to 9000 else if ( ncv .gt. maxncv ) then print *, ' ERROR with _SSIMP: NCV is greater than MAXNCV ' go to 9000 end if c c %-----------------------------------------------------% c | | c | Specification of stopping rules and initial | c | conditions before calling SSAUPD | c | | c | TOL determines the stopping criterion. | c | | c | Expect | c | abs(lambdaC - lambdaT) < TOL*abs(lambdaC) | c | computed true | c | | c | If TOL .le. 0, then TOL <- macheps | c | (machine precision) is used. | c | | c | IDO is the REVERSE COMMUNICATION parameter | c | used to specify actions to be taken on return | c | from SSAUPD. (See usage below.) | c | | c | It MUST initially be set to 0 before the first | c | call to SSAUPD. | c | | c | INFO on entry specifies starting vector information | c | and on return indicates error codes | c | | c | Initially, setting INFO=0 indicates that a | c | random starting vector is requested to | c | start the ARNOLDI iteration. Setting INFO to | c | a nonzero value on the initial call is used | c | if you want to specify your own starting | c | vector (This vector must be placed in RESID.) | c | | c | The work array WORKL is used in SSAUPD as | c | workspace. Its dimension LWORKL is set as | c | illustrated below. | c | | c %-----------------------------------------------------% c lworkl = ncv*(ncv+8) tol = zero info = 0 ido = 0 c c %---------------------------------------------------% c | Specification of Algorithm Mode: | c | | c | This program uses the exact shift strategy | c | (indicated by setting PARAM(1) = 1). | c | IPARAM(3) specifies the maximum number of Arnoldi | c | iterations allowed. Mode 1 of SSAUPD is used | c | (IPARAM(7) = 1). All these options can be changed | c | by the user. For details see the documentation in | c | SSAUPD. | c %---------------------------------------------------% c ishfts = 1 maxitr = 300 mode1 = 1 c iparam(1) = ishfts c iparam(3) = maxitr c iparam(7) = mode1 c c %------------------------------------------------% c | M A I N L O O P (Reverse communication loop) | c %------------------------------------------------% c 10 continue c c %---------------------------------------------% c | Repeatedly call the routine SSAUPD and take | c | actions indicated by parameter IDO until | c | either convergence is indicated or maxitr | c | has been exceeded. | c %---------------------------------------------% c call ssaupd ( ido, bmat, n, which, nev, tol, resid, & ncv, v, ldv, iparam, ipntr, workd, workl, & lworkl, info ) c if (ido .eq. -1 .or. ido .eq. 1) then c c %--------------------------------------% c | Perform matrix vector multiplication | c | y <--- OP*x | c | The user should supply his/her own | c | matrix vector multiplication routine | c | here that takes workd(ipntr(1)) as | c | the input, and return the result to | c | workd(ipntr(2)). | c %--------------------------------------% c call av (nx, workd(ipntr(1)), workd(ipntr(2))) c c %-----------------------------------------% c | L O O P B A C K to call SSAUPD again. | c %-----------------------------------------% c go to 10 c end if c c %----------------------------------------% c | Either we have convergence or there is | c | an error. | c %----------------------------------------% c if ( info .lt. 0 ) then c c %--------------------------% c | Error message. Check the | c | documentation in SSAUPD. | c %--------------------------% c print *, ' ' print *, ' Error with _saupd, info = ', info print *, ' Check documentation in _saupd ' print *, ' ' c else c c %-------------------------------------------% c | No fatal errors occurred. | c | Post-Process using SSEUPD. | c | | c | Computed eigenvalues may be extracted. | c | | c | Eigenvectors may be also computed now if | c | desired. (indicated by rvec = .true.) | c | | c | The routine SSEUPD now called to do this | c | post processing (Other modes may require | c | more complicated post processing than | c | mode1.) | c | | c %-------------------------------------------% c rvec = .true. c call sseupd ( rvec, 'All', select, d, v, ldv, sigma, & bmat, n, which, nev, tol, resid, ncv, v, ldv, & iparam, ipntr, workd, workl, lworkl, ierr ) c c %----------------------------------------------% c | Eigenvalues are returned in the first column | c | of the two dimensional array D and the | c | corresponding eigenvectors are returned in | c | the first NCONV (=IPARAM(5)) columns of the | c | two dimensional array V if requested. | c | Otherwise, an orthogonal basis for the | c | invariant subspace corresponding to the | c | eigenvalues in D is returned in V. | c %----------------------------------------------% c if ( ierr .ne. 0) then c c %------------------------------------% c | Error condition: | c | Check the documentation of SSEUPD. | c %------------------------------------% c print *, ' ' print *, ' Error with _seupd, info = ', ierr print *, ' Check the documentation of _seupd. ' print *, ' ' c else c nconv = iparam(5) do 20 j=1, nconv c c %---------------------------% c | Compute the residual norm | c | | c | || A*x - lambda*x || | c | | c | for the NCONV accurately | c | computed eigenvalues and | c | eigenvectors. (iparam(5) | c | indicates how many are | c | accurate to the requested | c | tolerance) | c %---------------------------% c call av(nx, v(1,j), ax) call saxpy(n, -d(j,1), v(1,j), 1, ax, 1) d(j,2) = snrm2(n, ax, 1) d(j,2) = d(j,2) / abs(d(j,1)) c 20 continue c c %-----------------------------% c | Display computed residuals. | c %-----------------------------% c call smout(6, nconv, 2, d, maxncv, -6, & 'Ritz values and relative residuals') end if c c %-------------------------------------------% c | Print additional convergence information. | c %-------------------------------------------% c if ( info .eq. 1) then print *, ' ' print *, ' Maximum number of iterations reached.' print *, ' ' else if ( info .eq. 3) then print *, ' ' print *, ' No shifts could be applied during implicit', & ' Arnoldi update, try increasing NCV.' print *, ' ' end if c print *, ' ' print *, ' _SSIMP ' print *, ' ====== ' print *, ' ' print *, ' Size of the matrix is ', n print *, ' The number of Ritz values requested is ', nev print *, ' The number of Arnoldi vectors generated', & ' (NCV) is ', ncv print *, ' What portion of the spectrum: ', which print *, ' The number of converged Ritz values is ', & nconv print *, ' The number of Implicit Arnoldi update', & ' iterations taken is ', iparam(3) print *, ' The number of OP*x is ', iparam(9) print *, ' The convergence criterion is ', tol print *, ' ' c end if c c %---------------------------% c | Done with program sssimp. | c %---------------------------% c 9000 continue c end c c ------------------------------------------------------------------ c matrix vector subroutine c c The matrix used is the 2 dimensional discrete Laplacian on unit c square with zero Dirichlet boundary condition. c c Computes w <--- OP*v, where OP is the nx*nx by nx*nx block c tridiagonal matrix c c | T -I | c |-I T -I | c OP = | -I T | c | ... -I| c | -I T| c c The subroutine TV is called to computed y<---T*x. c subroutine av (nx, v, w) integer nx, j, lo, n2 Real & v(nx*nx), w(nx*nx), one, h2 parameter ( one = 1.0E+0 ) c call tv(nx,v(1),w(1)) call saxpy(nx, -one, v(nx+1), 1, w(1), 1) c do 10 j = 2, nx-1 lo = (j-1)*nx call tv(nx, v(lo+1), w(lo+1)) call saxpy(nx, -one, v(lo-nx+1), 1, w(lo+1), 1) call saxpy(nx, -one, v(lo+nx+1), 1, w(lo+1), 1) 10 continue c lo = (nx-1)*nx call tv(nx, v(lo+1), w(lo+1)) call saxpy(nx, -one, v(lo-nx+1), 1, w(lo+1), 1) c c Scale the vector w by (1/h^2), where h is the mesh size c n2 = nx*nx h2 = one / real((nx+1)*(nx+1)) call sscal(n2, one/h2, w, 1) return end c c------------------------------------------------------------------- subroutine tv (nx, x, y) c integer nx, j Real & x(nx), y(nx), dd, dl, du c Real & one, four parameter (one = 1.0E+0, four = 4.0E+0) c c Compute the matrix vector multiplication y<---T*x c where T is a nx by nx tridiagonal matrix with DD on the c diagonal, DL on the subdiagonal, and DU on the superdiagonal. c c dd = four dl = -one du = -one c y(1) = dd*x(1) + du*x(2) do 10 j = 2,nx-1 y(j) = dl*x(j-1) + dd*x(j) + du*x(j+1) 10 continue y(nx) = dl*x(nx-1) + dd*x(nx) return end
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.Data.Prod where open import Cubical.Data.Prod.Base public open import Cubical.Data.Prod.Properties public
State Before: F : Type ?u.19004 α : Type u_2 β : Type u_1 γ : Type ?u.19013 inst✝³ : LinearOrderedField α inst✝² : ConditionallyCompleteLinearOrderedField β inst✝¹ : ConditionallyCompleteLinearOrderedField γ inst✝ : Archimedean α ⊢ inducedMap α β 1 = 1 State After: no goals Tactic: exact_mod_cast inducedMap_rat α β 1
#BeautifulPainMovie (#Redha), is the first Malaysian film to tackle about the subject on autism. In her feature directing debut, Tunku Mona Riza delivers a quiet, compassionate look at the condition which afflicts 1 in 160 children, according to the World Health Organization. Upon learning that their only son Danial, has autism, his mother, Alina and father, Razlan's world suddenly crumbles. Razlan was devastated by his son's condition and his inability to accept the truth about their son being different with the rest of the children, causes friction within the family, but Alina’s perseverance and maternal instinct help wade through the difficult times in raising Danial. With hardly any knowledge about this condition, they struggle to confront the harsh realities and the challenges of raising an autistic child. With Sasha, her sister and close friend by her side, they may have found a way to improve Danial’s quality of life until a tragic accident causes the family to re-think its strategy. Beautiful Pain was chosen by the National Film Development Corporation of Malaysia (Finas) as the official Malaysian entry for the Best Foreign Language Film at the 89th Academy Awards but unfortunately it was not nominated. Some of the major scenes in Redha were shot on location at the beautiful beach in Terengganu’s Redang Island. It is also worthwhile to mention that the parents of Danial in the movie namely Alina and Razlan, played by June Lojong and Namron, are husband and wife in real life.
Require Import VST.floyd.proofauto. Require Import cprogs.mytest_prog. Require Import cprogs.mytest_def. Require Import FloydSeq.AClight. Import AClightNotations. Require Import CSplit.strong. Instance CompSpecs : compspecs. make_compspecs prog. Defined. Definition Vprog : varspecs. mk_varspecs prog. Defined. Definition f_is_leap_year_spec_annotation := ANNOTATION_WITH year : Z, (( PROP (Int.min_signed <= year <= Int.max_signed) LOCAL (temp _year (Vint (Int.repr year))) SEP ()), ( PROP () LOCAL (temp ret_temp (Val.of_bool (is_leap_year year))) SEP ())). Definition f_is_leap_year_spec_complex := ltac:(uncurry_funcspec f_is_leap_year_spec_annotation). Definition f_is_leap_year_funsig: funsig := (((_year, tint) :: nil), tint). Definition is_leap_year_spec := ltac:(make_funcspec _is_leap_year f_is_leap_year_funsig f_is_leap_year_spec_complex). Definition Gprog : funspecs := ltac:(with_library prog [is_leap_year_spec]). Print semax. Ltac forward_setx := ensure_normal_ret_assert; (* hoist_later_in_pre; *) match goal with | |- semax ?Delta ((PROPx ?P (LOCALx ?Q (SEPx ?R)))) (Sset _ ?e) _ => eapply semax_PTree_set; [ reflexivity | reflexivity | check_cast_assignment | solve_msubst_eval; simplify_casts; reflexivity | first [ quick_typecheck3 | pre_entailer; try solve [entailer!]] ] end. Lemma append_verif: semax_body Vprog Gprog f_is_leap_year is_leap_year_spec. Proof. leaf_function. floyd.forward.start_function. check_precondition. eapply semax_seq'. { (* forward1 ((Sset _a (Ebinop Oadd (Etempvar _year tint) (Econst_int (Int.repr (Zpos xH)) tint) tint))). *) apply semax_derives. eapply semax_PTree_set. [ reflexivity | reflexivity | check_cast_assignment | solve_msubst_eval; simplify_casts; reflexivity | first [ quick_typecheck3 | pre_entailer; try solve [entailer!]] ] forward_setx. clear_Delta_specs. no_loads_expr (Ebinop Oadd (Etempvar _year tint) (Econst_int (Int.repr (Zpos xH)) tint) tint) false. forward.ensure_normal_ret_assert. Locate hoist_later_in_pre. } [ forward1 c | fwd_result; Intros; abbreviate_semax; try fwd_skip ] Locate forward. forward.forward. forward_setx. Locate forward_setx. apply semax_derives. forward_setx. forward. admit. forward. admit. forward. admit. forward. admit. forward_if. admit.
State Before: b u v w : Ordinal hvb : v < b hw : w < b ^ u ⊢ b ^ u * v + w < b ^ succ u State After: case h.e'_4 b u v w : Ordinal hvb : v < b hw : w < b ^ u ⊢ b ^ succ u = b ^ u * b Tactic: convert (opow_mul_add_lt_opow_mul_succ v hw).trans_le (mul_le_mul_left' (succ_le_of_lt hvb) _) using 1 State Before: case h.e'_4 b u v w : Ordinal hvb : v < b hw : w < b ^ u ⊢ b ^ succ u = b ^ u * b State After: no goals Tactic: exact opow_succ b u
open import Data.Product using ( ∃ ; _×_ ; _,_ ) open import Relation.Binary.PropositionalEquality using ( _≡_ ; refl ) open import Relation.Nullary using ( ¬_ ) open import Relation.Unary using ( _∈_ ; ∅ ) open import Web.Semantic.DL.Signature using ( Signature ; CN ; RN ) open import Web.Semantic.Util using ( Setoid ; Subset ; _∘_ ; False ) module Web.Semantic.DL.TBox.Interp where data Interp (Σ : Signature) : Set₁ where interp : (Δ : Set) → (_≈_ : Δ → Δ → Set) → (ref : ∀ {x} → (x ≈ x)) → (sym : ∀ {x y} → (x ≈ y) → (y ≈ x)) → (trans : ∀ {x y z} → (x ≈ y) → (y ≈ z) → (x ≈ z)) → (con : CN Σ → Subset Δ) → (rol : RN Σ → Subset (Δ × Δ)) → (con-≈ : ∀ {x y} c → (x ∈ con c) → (x ≈ y) → (y ∈ con c)) → (rol-≈ : ∀ {w x y z} r → (w ≈ x) → ((x , y) ∈ rol r) → (y ≈ z) → ((w , z) ∈ rol r)) → Interp Σ Δ : ∀ {Σ} → Interp Σ → Set Δ (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = Δ _⊨_≈_ : ∀ {Σ} → (I : Interp Σ) → (Δ I) → (Δ I) → Set _⊨_≈_ (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = _≈_ ≈-refl : ∀ {Σ} → (I : Interp Σ) → ∀ {x} → (I ⊨ x ≈ x) ≈-refl (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = ref ≈-sym : ∀ {Σ} → (I : Interp Σ) → ∀ {x y} → (I ⊨ x ≈ y) → (I ⊨ y ≈ x) ≈-sym (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = sym ≈-trans : ∀ {Σ} → (I : Interp Σ) → ∀ {x y z} → (I ⊨ x ≈ y) → (I ⊨ y ≈ z) → (I ⊨ x ≈ z) ≈-trans (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = trans con : ∀ {Σ} → (I : Interp Σ) → CN Σ → Subset (Δ I) con (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = con rol : ∀ {Σ} → (I : Interp Σ) → RN Σ → Subset (Δ I × Δ I) rol (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = rol con-≈ : ∀ {Σ} → (I : Interp Σ) → ∀ {x y} c → (x ∈ con I c) → (I ⊨ x ≈ y) → (y ∈ con I c) con-≈ (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = con-≈ rol-≈ : ∀ {Σ} → (I : Interp Σ) → ∀ {w x y z} r → (I ⊨ w ≈ x) → ((x , y) ∈ rol I r) → (I ⊨ y ≈ z) → ((w , z) ∈ rol I r) rol-≈ (interp Δ _≈_ ref sym trans con rol con-≈ rol-≈) = rol-≈ _⊨_≉_ : ∀ {Σ} → (I : Interp Σ) → (Δ I) → (Δ I) → Set I ⊨ x ≉ y = ¬(I ⊨ x ≈ y) emp : ∀ {Σ} → Interp Σ emp = interp False (λ ()) (λ {}) (λ {}) (λ {}) (λ c → ∅) (λ r → ∅) (λ {}) (λ {}) ≈-refl′ : ∀ {Σ} (I : Interp Σ) → ∀ {x y} → (x ≡ y) → (I ⊨ x ≈ y) ≈-refl′ I refl = ≈-refl I
[STATEMENT] lemma pow_res_classes_semialg: assumes "S \<in> pow_res_classes n" shows "is_semialgebraic 1 (to_R1` S)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. is_semialgebraic 1 ((\<lambda>a. [a]) ` S) [PROOF STEP] using pow_res_classes_univ_semialg assms(1) is_univ_semialgebraicE [PROOF STATE] proof (prove) using this: ?S \<in> pow_res_classes ?n \<Longrightarrow> is_univ_semialgebraic ?S S \<in> pow_res_classes n is_univ_semialgebraic ?S \<Longrightarrow> is_semialgebraic 1 ((\<lambda>a. [a]) ` ?S) goal (1 subgoal): 1. is_semialgebraic 1 ((\<lambda>a. [a]) ` S) [PROOF STEP] by blast
factIter <- function(n) { f = 1 if (n > 1) { for (i in 2:n) f <- f * i } f }
Formal statement is: lemma contour_integrable_holomorphic_simple: assumes fh: "f holomorphic_on S" and os: "open S" and g: "valid_path g" "path_image g \<subseteq> S" shows "f contour_integrable_on g" Informal statement is: If $f$ is holomorphic on an open set $S$ and $g$ is a path in $S$, then $f$ is contour integrable along $g$.
(* *********************************************************************) (* *) (* The Compcert verified compiler *) (* *) (* Xavier Leroy, INRIA Paris-Rocquencourt *) (* *) (* Copyright Institut National de Recherche en Informatique et en *) (* Automatique. All rights reserved. This file is distributed *) (* under the terms of the INRIA Non-Commercial License Agreement. *) (* *) (* *********************************************************************) (** Instruction selection *) (** The instruction selection pass recognizes opportunities for using combined arithmetic and logical operations and addressing modes offered by the target processor. For instance, the expression [x + 1] can take advantage of the "immediate add" instruction of the processor, and on the PowerPC, the expression [(x >> 6) & 0xFF] can be turned into a "rotate and mask" instruction. Instruction selection proceeds by bottom-up rewriting over expressions. The source language is Cminor and the target language is CminorSel. *) Require String. Require Import Coqlib Maps. Require Import AST Errors Integers Globalenvs Switch. Require Cminor. Require Import Op CminorSel. Require Import SelectOp SplitLong SelectLong SelectDiv. Require Machregs. Local Open Scope cminorsel_scope. Local Open Scope error_monad_scope. (** Conversion of conditions *) Function condexpr_of_expr (e: expr) : condexpr := match e with | Eop (Ocmp c) el => CEcond c el | Econdition a b c => CEcondition a (condexpr_of_expr b) (condexpr_of_expr c) | Elet a b => CElet a (condexpr_of_expr b) | _ => CEcond (Ccompuimm Cne Int.zero) (e ::: Enil) end. (** Conversion of loads and stores *) Definition load (chunk: memory_chunk) (e1: expr) := match addressing chunk e1 with | (mode, args) => Eload chunk mode args end. Definition store (chunk: memory_chunk) (e1 e2: expr) := match addressing chunk e1 with | (mode, args) => Sstore chunk mode args e2 end. (** Instruction selection for operator applications. Most of the work is done by the processor-specific smart constructors defined in modules [SelectOp] and [SelectLong]. *) Section SELECTION. Definition globdef := AST.globdef Cminor.fundef unit. Variable defmap: PTree.t globdef. Context {hf: helper_functions}. Definition sel_constant (cst: Cminor.constant) : expr := match cst with | Cminor.Ointconst n => Eop (Ointconst n) Enil | Cminor.Ofloatconst f => Eop (Ofloatconst f) Enil | Cminor.Osingleconst f => Eop (Osingleconst f) Enil | Cminor.Olongconst n => longconst n | Cminor.Oaddrsymbol id ofs => addrsymbol id ofs | Cminor.Oaddrstack ofs => addrstack ofs end. Definition sel_unop (op: Cminor.unary_operation) (arg: expr) : expr := match op with | Cminor.Ocast8unsigned => cast8unsigned arg | Cminor.Ocast8signed => cast8signed arg | Cminor.Ocast16unsigned => cast16unsigned arg | Cminor.Ocast16signed => cast16signed arg | Cminor.Onegint => negint arg | Cminor.Onotint => notint arg | Cminor.Onegf => negf arg | Cminor.Oabsf => absf arg | Cminor.Onegfs => negfs arg | Cminor.Oabsfs => absfs arg | Cminor.Osingleoffloat => singleoffloat arg | Cminor.Ofloatofsingle => floatofsingle arg | Cminor.Ointoffloat => intoffloat arg | Cminor.Ointuoffloat => intuoffloat arg | Cminor.Ofloatofint => floatofint arg | Cminor.Ofloatofintu => floatofintu arg | Cminor.Ointofsingle => intofsingle arg | Cminor.Ointuofsingle => intuofsingle arg | Cminor.Osingleofint => singleofint arg | Cminor.Osingleofintu => singleofintu arg | Cminor.Onegl => negl arg | Cminor.Onotl => notl arg | Cminor.Ointoflong => intoflong arg | Cminor.Olongofint => longofint arg | Cminor.Olongofintu => longofintu arg | Cminor.Olongoffloat => longoffloat arg | Cminor.Olonguoffloat => longuoffloat arg | Cminor.Ofloatoflong => floatoflong arg | Cminor.Ofloatoflongu => floatoflongu arg | Cminor.Olongofsingle => longofsingle arg | Cminor.Olonguofsingle => longuofsingle arg | Cminor.Osingleoflong => singleoflong arg | Cminor.Osingleoflongu => singleoflongu arg end. Definition sel_binop (op: Cminor.binary_operation) (arg1 arg2: expr) : expr := match op with | Cminor.Oadd => add arg1 arg2 | Cminor.Osub => sub arg1 arg2 | Cminor.Omul => mul arg1 arg2 | Cminor.Odiv => divs arg1 arg2 | Cminor.Odivu => divu arg1 arg2 | Cminor.Omod => mods arg1 arg2 | Cminor.Omodu => modu arg1 arg2 | Cminor.Oand => and arg1 arg2 | Cminor.Oor => or arg1 arg2 | Cminor.Oxor => xor arg1 arg2 | Cminor.Oshl => shl arg1 arg2 | Cminor.Oshr => shr arg1 arg2 | Cminor.Oshru => shru arg1 arg2 | Cminor.Oaddf => addf arg1 arg2 | Cminor.Osubf => subf arg1 arg2 | Cminor.Omulf => mulf arg1 arg2 | Cminor.Odivf => divf arg1 arg2 | Cminor.Oaddfs => addfs arg1 arg2 | Cminor.Osubfs => subfs arg1 arg2 | Cminor.Omulfs => mulfs arg1 arg2 | Cminor.Odivfs => divfs arg1 arg2 | Cminor.Oaddl => addl arg1 arg2 | Cminor.Osubl => subl arg1 arg2 | Cminor.Omull => mull arg1 arg2 | Cminor.Odivl => divls arg1 arg2 | Cminor.Odivlu => divlu arg1 arg2 | Cminor.Omodl => modls arg1 arg2 | Cminor.Omodlu => modlu arg1 arg2 | Cminor.Oandl => andl arg1 arg2 | Cminor.Oorl => orl arg1 arg2 | Cminor.Oxorl => xorl arg1 arg2 | Cminor.Oshll => shll arg1 arg2 | Cminor.Oshrl => shrl arg1 arg2 | Cminor.Oshrlu => shrlu arg1 arg2 | Cminor.Ocmp c => comp c arg1 arg2 | Cminor.Ocmpu c => compu c arg1 arg2 | Cminor.Ocmpf c => compf c arg1 arg2 | Cminor.Ocmpfs c => compfs c arg1 arg2 | Cminor.Ocmpl c => cmpl c arg1 arg2 | Cminor.Ocmplu c => cmplu c arg1 arg2 end. (** Conversion from Cminor expression to Cminorsel expressions *) Fixpoint sel_expr (a: Cminor.expr) : expr := match a with | Cminor.Evar id => Evar id | Cminor.Econst cst => sel_constant cst | Cminor.Eunop op arg => sel_unop op (sel_expr arg) | Cminor.Ebinop op arg1 arg2 => sel_binop op (sel_expr arg1) (sel_expr arg2) | Cminor.Eload chunk addr => load chunk (sel_expr addr) end. Fixpoint sel_exprlist (al: list Cminor.expr) : exprlist := match al with | nil => Enil | a :: bl => Econs (sel_expr a) (sel_exprlist bl) end. (** Recognition of immediate calls and calls to built-in functions that should be inlined *) Inductive call_kind : Type := | Call_default | Call_imm (id: ident) | Call_builtin (ef: external_function). Definition expr_is_addrof_ident (e: Cminor.expr) : option ident := match e with | Cminor.Econst (Cminor.Oaddrsymbol id ofs) => if Ptrofs.eq ofs Ptrofs.zero then Some id else None | _ => None end. Definition classify_call (e: Cminor.expr) : call_kind := match expr_is_addrof_ident e with | None => Call_default | Some id => match defmap!id with | Some(Gfun(External ef)) => if ef_inline ef then Call_builtin ef else Call_imm id | _ => Call_imm id end end. (** Builtin arguments and results *) Definition sel_builtin_arg (e: Cminor.expr) (c: builtin_arg_constraint): AST.builtin_arg expr := let e' := sel_expr e in let ba := builtin_arg e' in if builtin_arg_ok ba c then ba else BA e'. Fixpoint sel_builtin_args (el: list Cminor.expr) (cl: list builtin_arg_constraint): list (AST.builtin_arg expr) := match el with | nil => nil | e :: el => sel_builtin_arg e (List.hd OK_default cl) :: sel_builtin_args el (List.tl cl) end. Definition sel_builtin_res (optid: option ident) : builtin_res ident := match optid with | None => BR_none | Some id => BR id end. (** Conversion of Cminor [switch] statements to decision trees. *) Parameter compile_switch: Z -> nat -> table -> comptree. Section SEL_SWITCH. Variable make_cmp_eq: expr -> Z -> expr. Variable make_cmp_ltu: expr -> Z -> expr. Variable make_sub: expr -> Z -> expr. Variable make_to_int: expr -> expr. Fixpoint sel_switch (arg: nat) (t: comptree): exitexpr := match t with | CTaction act => XEexit act | CTifeq key act t' => XEcondition (condexpr_of_expr (make_cmp_eq (Eletvar arg) key)) (XEexit act) (sel_switch arg t') | CTiflt key t1 t2 => XEcondition (condexpr_of_expr (make_cmp_ltu (Eletvar arg) key)) (sel_switch arg t1) (sel_switch arg t2) | CTjumptable ofs sz tbl t' => XElet (make_sub (Eletvar arg) ofs) (XEcondition (condexpr_of_expr (make_cmp_ltu (Eletvar O) sz)) (XEjumptable (make_to_int (Eletvar O)) tbl) (sel_switch (S arg) t')) end. End SEL_SWITCH. Definition sel_switch_int := sel_switch (fun arg n => comp Ceq arg (Eop (Ointconst (Int.repr n)) Enil)) (fun arg n => compu Clt arg (Eop (Ointconst (Int.repr n)) Enil)) (fun arg ofs => sub arg (Eop (Ointconst (Int.repr ofs)) Enil)) (fun arg => arg). Definition sel_switch_long := sel_switch (fun arg n => cmpl Ceq arg (longconst (Int64.repr n))) (fun arg n => cmplu Clt arg (longconst (Int64.repr n))) (fun arg ofs => subl arg (longconst (Int64.repr ofs))) lowlong. (** Conversion from Cminor statements to Cminorsel statements. *) Fixpoint sel_stmt (s: Cminor.stmt) : res stmt := match s with | Cminor.Sskip => OK Sskip | Cminor.Sassign id e => OK (Sassign id (sel_expr e)) | Cminor.Sstore chunk addr rhs => OK (store chunk (sel_expr addr) (sel_expr rhs)) | Cminor.Scall optid sg fn args => OK (match classify_call fn with | Call_default => Scall optid sg (inl _ (sel_expr fn)) (sel_exprlist args) | Call_imm id => Scall optid sg (inr _ id) (sel_exprlist args) | Call_builtin ef => Sbuiltin (sel_builtin_res optid) ef (sel_builtin_args args (Machregs.builtin_constraints ef)) end) | Cminor.Sbuiltin optid ef args => OK (Sbuiltin (sel_builtin_res optid) ef (sel_builtin_args args (Machregs.builtin_constraints ef))) | Cminor.Stailcall sg fn args => OK (match classify_call fn with | Call_imm id => Stailcall sg (inr _ id) (sel_exprlist args) | _ => Stailcall sg (inl _ (sel_expr fn)) (sel_exprlist args) end) | Cminor.Sseq s1 s2 => do s1' <- sel_stmt s1; do s2' <- sel_stmt s2; OK (Sseq s1' s2') | Cminor.Sifthenelse e ifso ifnot => do ifso' <- sel_stmt ifso; do ifnot' <- sel_stmt ifnot; OK (Sifthenelse (condexpr_of_expr (sel_expr e)) ifso' ifnot') | Cminor.Sloop body => do body' <- sel_stmt body; OK (Sloop body') | Cminor.Sblock body => do body' <- sel_stmt body; OK (Sblock body') | Cminor.Sexit n => OK (Sexit n) | Cminor.Sswitch false e cases dfl => let t := compile_switch Int.modulus dfl cases in if validate_switch Int.modulus dfl cases t then OK (Sswitch (XElet (sel_expr e) (sel_switch_int O t))) else Error (msg "Selection: bad switch (int)") | Cminor.Sswitch true e cases dfl => let t := compile_switch Int64.modulus dfl cases in if validate_switch Int64.modulus dfl cases t then OK (Sswitch (XElet (sel_expr e) (sel_switch_long O t))) else Error (msg "Selection: bad switch (long)") | Cminor.Sreturn None => OK (Sreturn None) | Cminor.Sreturn (Some e) => OK (Sreturn (Some (sel_expr e))) | Cminor.Slabel lbl body => do body' <- sel_stmt body; OK (Slabel lbl body') | Cminor.Sgoto lbl => OK (Sgoto lbl) end. End SELECTION. (** Conversion of functions. *) Definition sel_function (dm: PTree.t globdef) (hf: helper_functions) (f: Cminor.function) : res function := do body' <- sel_stmt dm f.(Cminor.fn_body); OK (mkfunction f.(Cminor.fn_sig) f.(Cminor.fn_params) f.(Cminor.fn_vars) f.(Cminor.fn_stackspace) body'). Definition sel_fundef (dm: PTree.t globdef) (hf: helper_functions) (f: Cminor.fundef) : res fundef := transf_partial_fundef (sel_function dm hf) f. (** Setting up the helper functions. *) (** We build a partial mapping from global identifiers to their definitions, restricting ourselves to the globals we are interested in, namely the external function declarations that are marked as runtime library helpers. This ensures that the mapping remains small and that [lookup_helper] below is efficient. *) Definition globdef_of_interest (gd: globdef) : bool := match gd with | Gfun (External (EF_runtime name sg)) => true | _ => false end. Definition record_globdefs (defmap: PTree.t globdef) : PTree.t globdef := PTree.fold (fun m id gd => if globdef_of_interest gd then PTree.set id gd m else m) defmap (PTree.empty globdef). Definition lookup_helper_aux (name: String.string) (sg: signature) (res: option ident) (id: ident) (gd: globdef) := match gd with | Gfun (External (EF_runtime name' sg')) => if String.string_dec name name' && signature_eq sg sg' then Some id else res | _ => res end. Definition lookup_helper (globs: PTree.t globdef) (name: String.string) (sg: signature) : res ident := match PTree.fold (lookup_helper_aux name sg) globs None with | Some id => OK id | None => Error (MSG name :: MSG ": missing or incorrect declaration" :: nil) end. Local Open Scope string_scope. Definition get_helpers (defmap: PTree.t globdef) : res helper_functions := let globs := record_globdefs defmap in do i64_dtos <- lookup_helper globs "__i64_dtos" sig_f_l ; do i64_dtou <- lookup_helper globs "__i64_dtou" sig_f_l ; do i64_stod <- lookup_helper globs "__i64_stod" sig_l_f ; do i64_utod <- lookup_helper globs "__i64_utod" sig_l_f ; do i64_stof <- lookup_helper globs "__i64_stof" sig_l_s ; do i64_utof <- lookup_helper globs "__i64_utof" sig_l_s ; do i64_sdiv <- lookup_helper globs "__i64_sdiv" sig_ll_l ; do i64_udiv <- lookup_helper globs "__i64_udiv" sig_ll_l ; do i64_smod <- lookup_helper globs "__i64_smod" sig_ll_l ; do i64_umod <- lookup_helper globs "__i64_umod" sig_ll_l ; do i64_shl <- lookup_helper globs "__i64_shl" sig_li_l ; do i64_shr <- lookup_helper globs "__i64_shr" sig_li_l ; do i64_sar <- lookup_helper globs "__i64_sar" sig_li_l ; do i64_umulh <- lookup_helper globs "__i64_umulh" sig_ll_l ; do i64_smulh <- lookup_helper globs "__i64_smulh" sig_ll_l ; OK (mk_helper_functions i64_dtos i64_dtou i64_stod i64_utod i64_stof i64_utof i64_sdiv i64_udiv i64_smod i64_umod i64_shl i64_shr i64_sar i64_umulh i64_smulh). (** Conversion of programs. *) Definition sel_program (p: Cminor.program) : res program := let dm := prog_defmap p in do hf <- get_helpers dm; transform_partial_program (sel_fundef dm hf) p.
/- Copyright (c) 2018 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Johannes Hölzl, Simon Hudon, Kenny Lau -/ import data.multiset.basic import control.traversable.lemmas import control.traversable.instances /-! # Functoriality of `multiset`. -/ universes u namespace multiset open list instance : functor multiset := { map := @map } @[simp] lemma fmap_def {α' β'} {s : multiset α'} (f : α' → β') : f <$> s = s.map f := rfl instance : is_lawful_functor multiset := by refine { .. }; intros; simp open is_lawful_traversable is_comm_applicative variables {F : Type u → Type u} [applicative F] [is_comm_applicative F] variables {α' β' : Type u} (f : α' → F β') def traverse : multiset α' → F (multiset β') := quotient.lift (functor.map coe ∘ traversable.traverse f) begin introv p, unfold function.comp, induction p, case perm.nil { refl }, case perm.cons { have : multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₁) = multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₂), { rw [p_ih] }, simpa with functor_norm }, case perm.swap { have : (λa b (l:list β'), (↑(a :: b :: l) : multiset β')) <$> f p_y <*> f p_x = (λa b l, ↑(a :: b :: l)) <$> f p_x <*> f p_y, { rw [is_comm_applicative.commutative_map], congr, funext a b l, simpa [flip] using perm.swap b a l }, simp [(∘), this] with functor_norm }, case perm.trans { simp [*] } end instance : monad multiset := { pure := λ α x, x ::ₘ 0, bind := @bind, .. multiset.functor } @[simp] lemma pure_def {α} : (pure : α → multiset α) = (λ x, x ::ₘ 0) := rfl @[simp] lemma bind_def {α β} : (>>=) = @bind α β := rfl instance : is_lawful_monad multiset := { bind_pure_comp_eq_map := λ α β f s, multiset.induction_on s rfl $ λ a s ih, by simp, pure_bind := λ α β x f, by simp, bind_assoc := @bind_assoc } open functor open traversable is_lawful_traversable @[simp] lemma lift_coe {α β : Type*} (x : list α) (f : list α → β) (h : ∀ a b : list α, a ≈ b → f a = f b) : quotient.lift f h (x : multiset α) = f x := quotient.lift_mk _ _ _ @[simp] lemma map_comp_coe {α β} (h : α → β) : functor.map h ∘ coe = (coe ∘ functor.map h : list α → multiset β) := by funext; simp [functor.map] lemma id_traverse {α : Type*} (x : multiset α) : traverse id.mk x = x := quotient.induction_on x begin intro, simp [traverse], refl end lemma comp_traverse {G H : Type* → Type*} [applicative G] [applicative H] [is_comm_applicative G] [is_comm_applicative H] {α β γ : Type*} (g : α → G β) (h : β → H γ) (x : multiset α) : traverse (comp.mk ∘ functor.map h ∘ g) x = comp.mk (functor.map (traverse h) (traverse g x)) := quotient.induction_on x (by intro; simp [traverse,comp_traverse] with functor_norm; simp [(<$>),(∘)] with functor_norm) lemma map_traverse {G : Type* → Type*} [applicative G] [is_comm_applicative G] {α β γ : Type*} (g : α → G β) (h : β → γ) (x : multiset α) : functor.map (functor.map h) (traverse g x) = traverse (functor.map h ∘ g) x := quotient.induction_on x (by intro; simp [traverse] with functor_norm; rw [is_lawful_functor.comp_map, map_traverse]) lemma traverse_map {G : Type* → Type*} [applicative G] [is_comm_applicative G] {α β γ : Type*} (g : α → β) (h : β → G γ) (x : multiset α) : traverse h (map g x) = traverse (h ∘ g) x := quotient.induction_on x (by intro; simp [traverse]; rw [← traversable.traverse_map h g]; [ refl, apply_instance ]) lemma naturality {G H : Type* → Type*} [applicative G] [applicative H] [is_comm_applicative G] [is_comm_applicative H] (eta : applicative_transformation G H) {α β : Type*} (f : α → G β) (x : multiset α) : eta (traverse f x) = traverse (@eta _ ∘ f) x := quotient.induction_on x (by intro; simp [traverse,is_lawful_traversable.naturality] with functor_norm) end multiset
[STATEMENT] lemma fset_sum_ge_elem: "finite xs \<Longrightarrow> x \<in> xs \<Longrightarrow> (\<Sum>u\<in>xs. (f::'a \<Rightarrow> nat) u) \<ge> f x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>finite xs; x \<in> xs\<rbrakk> \<Longrightarrow> f x \<le> sum f xs [PROOF STEP] by (simp add: sum_nonneg_leq_bound)
If $f$ is a function from a topological space to a normed vector space, and if $f$ converges to $l$, then the norm of $l$ is bounded by the norm of $f$.
% The OxyGEN proyect by Profoty.xyz % Together against the Covid-19 % V4.0 rev.17/03/2020 % % Website/blog: oxygen.protofy.xyz % Contact: [email protected] %GEOMETRICAL PARAMETERS clear; %Length from the bearing to the hinge l0 = 27.5; %Length of the vertical wall that supports the hinge h0 = 6.5; %Lenght of the vertical support of the bearing hb = 4.0; %Bearing radius br = 1.1; %Array of different max and min positions of the bearing h1a = [9, 10, 11]; h2a = [20, 20, 20]; %Number of cycles per cam turn (1 to 3). nc = 1; %Minimum radius of the camshaft rmin = 5.5; %BREATHING CYCLE PARAMETERS %Duration of the inhale cycle / duration of the whole cycle lambda1 = 0.500; lambda2 = 0.100; %Soft transition between inhale and exhale cycles dpsi21 = 0.11; dpsi12 = 0.15; %Adjust parameters of the inhale curve ga1 = 3.8; gb1 = .9; ff1 = 50; %Adjust parametes of the exhale curve ga2 = 3.5; gb2 = .70; ff2 = 55; %GENERATION OF THE CURVES AND THE CAMSHAFT %Generation of the geometry l = sqrt(l0^2+hb^2); for i = 1:numel(h1a); ymin(i) = h1a(i); ymax(i) = h2a(i); alphamin(i) = acos((h0 - h1a(i)) / l); alphamax(i) = acos((h0 - h2a(i)) / l); xmin(i) = l*sin(alphamin(i)); xmax(i) = l*sin(alphamax(i)); d(i) = sqrt((xmin(i)-xmax(i))^2 + (ymin(i)-ymax(i))^2); alphatan(i) = (alphamin(i) + alphamax(i)) / 2; xtan(i) = l*sin(alphatan(i)); ytan(i) = h0 - l*cos(alphatan(i)); xsup(i) = xtan(i) + (d(i)/2)*cos(alphatan(i)); xinf(i) = xtan(i) - (d(i)/2)*cos(alphatan(i)); ysup(i) = ytan(i) + (d(i)/2)*sin(alphatan(i)); yinf(i) = ytan(i) - (d(i)/2)*sin(alphatan(i)); xcam(i) = xtan(i) + (d(i)/2 + rmin + br)*cos(alphatan(i)); ycam(i) = ytan(i) + (d(i)/2 + rmin + br)*sin(alphatan(i)); end; %Time increment dt = 0.01; %time coordinate during the whole cycle theta = 0:dt:2*pi; %Generation of the soft transition between inhale and exhale curves psi1 = []; psi2 = []; for i = 1:numel(theta); if theta(i) < (lambda2-dpsi21/2)*2*pi; psi1(i) = 0; elseif theta(i) < (lambda2+dpsi21/2)*2*pi; psi1(i) = 0.5 + 0.5*cos((theta(i)-(lambda2+dpsi21/2)*2*pi)/(2*dpsi21)); else theta(i) < (lambda1-dpsi12/2)*2*pi; psi1(i) = 1; end; psi2(i) = 1 - psi1(i); end; %Generation of the complete breathing cycle rho(theta) rho = []; rho1 = []; rho2 = []; rho2next = []; rhomin = 1000; rhomax = 0; for i = 1:numel(theta); %Inhale curve rho1(i) = ff1*gampdf(theta(i), ga1, gb1); rho1next(i) = ff1*gampdf(theta(i)+2*pi, ga1, gb1); %Exhale curve rho2(i) = ff2*gampdf(theta(i), ga2, gb2); rho2next(i) = ff2*gampdf(theta(i)+2*pi, ga2, gb2); rho(i) = psi1(i)*rho1(i) + psi2(i)*rho1next(i); %Capturing min and max in order to generate the normalized curve if rho(i) > rhomax rhomax = rho(i); end; if rho(i) < rhomin rhomin = rho(i); end; end; %Generation of a normalised curve and camshaft rhonorm = []; rhocam = []; for i = 1:numel(theta); rhonorm(i) = (rho(i)-rhomin)/(rhomax-rhomin); for n = 1:numel(d) rhocam(n,i) = rmin + rhonorm(i)*d(n); end; end; %Generation of the first derivate of the camshaft geometry to analize and %validate the design drho = []; drhonorm = []; drhocam = []; a = rmin; b = 1/dt; for i = 1 : numel(rho)-1; drho(i) = b*(rho(i+1)-rho(i)); drhonorm(i) = b*(rhonorm(i+1)-rhonorm(i)); drhocam(i) = b*(rhocam(i+1)-rhocam(i)) + a; end; drho(numel(rho)) = b*(rho(1)-rho(numel(rho))); drhonorm(numel(rhonorm)) = b*(rhonorm(1)-rhonorm(numel(rhonorm))); drhocam(numel(rhocam)) = b*(rhocam(1)-rhocam(numel(rhocam))) + a; %X and Y coordinates during two cycles (for 2-cycle camshaft plot) theta2 = [theta/2, (2*pi+theta)/2]; rhocam2 = [rhocam, rhocam]; drhocam2 = [drho, drho]; %X and Y coordinates during three cycles (for 3-cycle camshaft plot) theta3 = [theta/3, (2*pi+theta)/3, (4*pi+theta)/3]; rhocam3 = [rhocam, rhocam, rhocam]; drhocam3 = [drho, drho, drho]; %GENERATION OF THE PLOTS %Trying to print it out in real size (FAIL) %set(gcf,'PaperUnits','centimeters'); %set(gcf,'PaperSize',[42 29.7]); fpos = figure('Name', 'Dimensions', 'Units', 'centimeters', 'NumberTitle', 'off'); set(gcf,'PaperUnits','centimeters', 'PaperSize', [42/2 27.9/2]); set(gcf, 'units', 'centimeters', 'position', [0, 0, 28, 20]); fpos = gcf; hold on xlim([-5 35]); ylim([0 30]); for i = 1:numel(xmin) plot([0, xmin(i)], [h0, ymin(i)], '-x') plot([0, xmax(i)], [h0, ymax(i)], '-x') plot([0, xtan(i)], [h0, ytan(i)], '--xb') plot([xsup(i), xinf(i)], [ysup(i), yinf(i)], '--xr') scatter(xcam(i), ycam(i)) end; hold off %Plot of the breathing cycle interpolation by curves fcycle = figure('Name', 'Breath Cycle curves', 'Units', 'centimeters', 'NumberTitle', 'off'); hold on plot(theta, psi1, '-k') plot(theta, psi2, '-k') plot(theta, rho1, '-g') plot(theta, rho1next, '-g') plot(theta, rho2, '-g') plot(theta, rho2next, '-g') %plot(theta, drho, '-r') plot(theta, rho, '-b') hold off set(gcf,'PaperUnits','centimeters', 'PaperSize', [42/2 27.9/2]); set(gcf, 'units', 'centimeters', 'position', [0, 0, 27.9, 27.9]); fcam3 = gcf; %Plot of the normalized breathing cycle and first derivate fnorm = figure('Name', 'Normalized Breath Cycle', 'Units', 'centimeters', 'NumberTitle', 'off'); hold on %plot(theta, drhonorm, '-r') plot(theta, rhonorm, '-b') hold off set(gcf,'PaperUnits','centimeters', 'PaperSize', [42/2 27.9/2]); set(gcf, 'units', 'centimeters', 'position', [0, 0, 27.9, 27.9]); fcam3 = gcf; %Plot of a 1-cycle camshaft for n = 1:numel(d) fcam1 = figure('Name', '1-Cycle Camshaft', 'Units', 'centimeters', 'NumberTitle', 'off'); %hold on %polarplot(theta, drhocam, '-r') polar(theta, rhocam(n,:), '-b') %rlim([0 25]); hold off set(gcf,'PaperUnits','centimeters', 'PaperSize', [42 27.9]); set(gcf, 'units', 'centimeters', 'position', [0, 0, 27.9, 27.9]); fcam3 = gcf; end % %Plot of a 2-cycle camshaft % % fcam2 = figure('Name', '2-Cycle', 'Units', 'centimeters', 'NumberTitle', 'off'); % % %hold on; % %polar(theta, drhocam2, '-r') % polar(theta2, rhocam2, '-b') % %hold off; % % set(gcf,'PaperUnits','centimeters', 'PaperSize', [42 27.9]); % set(gcf, 'units', 'centimeters', 'position', [0, 0, 27.9, 27.9]); % fcam3 = gcf; % % % %Plot of a 3-cycle camshaft % % fcam3 = figure('Name', '3-Cycle', 'Units', 'centimeters', 'NumberTitle', 'off'); % % %hold on; % %polar(theta,drhocam3) % polar(theta3, rhocam3) % %hold off; % % set(gcf,'PaperUnits','centimeters', 'PaperSize', [42 27.9]); % set(gcf, 'units', 'centimeters', 'position', [0, 0, 27.9, 27.9]); % fcam3 = gcf; % The OxyGEN proyect by Profoty.xyz % Together against the Covid-19 % V4.0 rev.17/03/2020 % % Website/blog: oxygen.protofy.xyz % Contact: [email protected]
-- check that reorder works correctly import System.Concurrency.Queue main : IO () main = do q <- makeQueue let val1 = 1 let val2 = 2 let val3 = 3 -- enqueue 2 values, which go on the rear stack enqueue q val1 enqueue q val2 -- dequeue to reorder (Just val1') <- dequeue q | Nothing => putStrLn "ERROR: First two values disappeared." if val1' == val2 then putStrLn "ERROR: Queue behaved like a stack (broken reorder)." else do enqueue q val3 -- should go on rear; front contains val2 (Just val2') <- dequeue q | Nothing => putStrLn "ERROR: Second value disappeared." if val2' /= val2 then putStrLn "ERROR: Second value changed." else do (Just val3') <- dequeue q | Nothing => putStrLn "ERROR: Third value disappeared." if (val1 == val1') && (val2 == val2') && (val3 == val3') then putStrLn "Success!" else putStrLn "ERROR: Got different values back."
module Luau.Var.ToString where open import Agda.Builtin.String using (String) open import Luau.Var using (Var) varToString : Var → String varToString x = x
module example include("propwing.jl") import CCBlade using PyPlot close("all") # ------- common parameters (standard metric units) ------ rho = 1.225 # air density Vinf = 10.0 # freestream speed J = 0.5 # prop advance ratio xtowing = 18.0 # distance from prop to wing (for wake contraction) - assuming it is the same for all props etaprop_center = [40.0/94, 70.0/94] # spanwise location of prop (center) cw = 1.0 # 1 if clockwise, -1 if counter-clockwise (when viewed from upstream) gamma = 3*pi/180 # flight path angle alpha_wing_relative_to_fuselage = 2*pi/180 # angle of attack of wing relative to fuselage reference line alpha = gamma + alpha_wing_relative_to_fuselage # angle of attack # --------- propeller -------- # geometry Rhub = 3*.5 Rtip = 3*3.0 r = 3*[0.526, 0.628, 0.729, 0.831, 0.9132, 0.9586, 1.0332, 1.1128, 1.1925, 1.2722, 1.3519, 1.4316, 1.5114, 1.5911, 1.6708, 1.7505, 1.8302, 1.9099, 1.9896, 2.0693, 2.1490, 2.2287, 2.3084, 2.3881, 2.4678, 2.5475, 2.6273, 2.7070, 2.7867, 2.8661, 2.9410] chord_prop = 3*[0.6270, 0.6255, 0.6231, 0.6199, 0.6165, 0.6125, 0.6054, 0.5973, 0.5887, 0.5794, 0.5695, 0.5590, 0.5479, 0.5362, 0.5240, 0.5111, 0.4977, 0.4836, 0.4689, 0.4537, 0.4379, 0.4214, 0.4044, 0.3867, 0.3685, 0.3497, 0.3303, 0.3103, 0.2897, 0.2618, 0.1920] twist_prop = pi/180.0*[40.2273, 38.7657, 37.3913, 36.0981, 34.8803, 33.5899, 31.6400, 29.7730, 28.0952, 26.5833, 25.2155, 23.9736, 22.8421, 21.8075, 20.8586, 19.9855, 19.1800, 18.4347, 17.7434, 17.1005, 16.5013, 15.9417, 15.4179, 14.9266, 14.4650, 14.0306, 13.6210, 13.2343, 12.8685, 12.5233, 12.2138] B = 3 # number of blades aftype = CCBlade.af_from_aerodynfile("NACA64_A17.dat") n = length(r) af = Array{CCBlade.AirfoilData}(n) for i = 1:n af[i] = aftype end Dprop = Rtip*2 # propeller diameter Omega = Vinf/(J*Dprop)*2*pi # rotation rate uwake, vwake, T, Q, reff = propwing.propanalysis(Rhub, Rtip, r, chord_prop, twist_prop, B, af, Vinf, Omega, rho, xtowing) # --------- wing geometry ------- sectionspan = [29, 65] chord_wing = [43, 26, 11] twist_wing = [0, 0, 0]*pi/180 tc = [0.13, 0.12, 0.11] sweep = [25, 30]*pi/180 dihedral = [0, 0] N = [29, 65] # L, Di, CP, cl, cllocal, Vinfeff = propwing.winganalysis(sectionspan, chord_wing, twist_wing, tc, sweep, dihedral, N, alpha, rho, Vinf, reff, uwake, vwake, etaprop_center, cw) # println(L) # println(Di) # # figure() # plot(CP.y, cl) # plot(CP.y, cllocal) # # # ------ viscous drag ----- # # TODO # Dp = 0.0 # D = Di + Dp # # # ------- add prop thrust to lift -------- # L += T*sin(gamma) # add prop thrust to lift # thrust_ratio = T*cos(gamma)/D # must be larger than 1 # -------- diameter checking -------- nd = 100 dratio = linspace(0.02, 0.8, nd) CLvec = zeros(nd) CDivec = zeros(nd) etaprop_center = [40.5828/94] # spanwise location of prop (center) for i = 1:nd rprop = reff/reff[end]*dratio[i]/2*sum(sectionspan) L, Di, CP, cl, cllocal, Vinfeff, alphaeff = propwing.winganalysis(sectionspan, chord_wing, twist_wing, tc, sweep, dihedral, N, alpha, rho, Vinf, rprop, uwake, vwake, etaprop_center, cw) q = 0.5*rho*Vinf^2 S = 2*sum(CP.chord.*CP.ds) CLvec[i] = L/(q*S) CDivec[i] = Di/(q*S) end # println(CLvec) # println(CDivec) CLbad = [0.382905, 0.386424, 0.386774, 0.386081, 0.389872, 0.390167, 0.392373, 0.39475, 0.394911, 0.397192, 0.397608, 0.397071, 0.400073, 0.400052, 0.399881, 0.402262, 0.40314, 0.405263, 0.406519, 0.406577, 0.409258, 0.410152, 0.409989, 0.41318, 0.414047, 0.415491, 0.417441, 0.417875, 0.420247, 0.421134, 0.42098, 0.42363, 0.424618, 0.425658, 0.428337, 0.429073, 0.430862, 0.432741, 0.433258, 0.436002, 0.437109, 0.437732, 0.44078, 0.441917, 0.443226, 0.445345, 0.445991, 0.448085, 0.449637, 0.450339, 0.453482, 0.454775, 0.456139, 0.458766, 0.459777, 0.461669, 0.463841, 0.464765, 0.467362, 0.46903, 0.470428, 0.473236, 0.474219, 0.475866, 0.478265, 0.479343, 0.481874, 0.483983, 0.485173, 0.488129, 0.489759, 0.491443, 0.494142, 0.495331, 0.497667, 0.500125, 0.501265, 0.504214, 0.506247, 0.507659, 0.51031, 0.511974, 0.514132, 0.516882, 0.51815, 0.520937, 0.523536, 0.525125, 0.52798, 0.530019, 0.53199, 0.534845, 0.536503, 0.539055, 0.541797, 0.543271, 0.546195, 0.548757, 0.550623, 0.553543] CDibad = [0.00588463, 0.00587591, 0.00587341, 0.00590396, 0.00588026, 0.00587782, 0.00588183, 0.00585698, 0.00585025, 0.0058566, 0.00585454, 0.005875, 0.00586265, 0.00586892, 0.00589654, 0.00588845, 0.00588124, 0.00588864, 0.00588322, 0.00589091, 0.00588995, 0.00588725, 0.00590671, 0.00589429, 0.00588971, 0.00589854, 0.00589212, 0.00589244, 0.00589664, 0.00589763, 0.00591354, 0.00591262, 0.00591137, 0.00592136, 0.00591176, 0.00591071, 0.00591893, 0.00591281, 0.00591794, 0.00591655, 0.00591542, 0.00592514, 0.00591515, 0.0059112, 0.00592028, 0.00591486, 0.00591883, 0.00592354, 0.0059209, 0.00592647, 0.00591972, 0.00591554, 0.00591953, 0.00591197, 0.00591032, 0.00591382, 0.0059059, 0.00590737, 0.00590384, 0.00589778, 0.00589761, 0.00588931, 0.00588846, 0.00589156, 0.00588417, 0.00588266, 0.00587735, 0.00586829, 0.00586781, 0.00585689, 0.00584926, 0.00584727, 0.00583654, 0.00583216, 0.00582569, 0.00581349, 0.00580967, 0.00579741, 0.00578652, 0.00578435, 0.00577362, 0.0057639, 0.00575583, 0.00574088, 0.00573432, 0.00572027, 0.00570297, 0.00569357, 0.00567768, 0.00566346, 0.00565299, 0.0056353, 0.00562337, 0.00560788, 0.00558971, 0.00557922, 0.00555981, 0.00554011, 0.00552628, 0.00550529] rc("figure", figsize=(3.5, 2.6)) rc("font", size=10.0) #, family="CMU Serif") rc("lines", linewidth=1.5) rc("legend", frameon=false) rc("axes.spines", right=false, top=false) rc("figure.subplot", left=0.18, bottom=0.16, top=0.97, right=0.95) rc("axes", color_cycle=["348ABD", "A60628", "009E73"]) figure() plot(dratio, CLbad) plot(dratio, CLvec, linewidth=1) xlabel("diameter/semispan") ylabel(L"C_L") xlim([0, 0.3]) ylim([0.38, 0.44]) legend(["original", "smooth"], loc="upper left") # savefig("../MDAO_paper/figures/smoothCL.pdf", transparent=true) figure() plot(dratio, CDibad*10000) plot(dratio, CDivec*10000, linewidth=1) xlabel("diameter/semispan") ylabel(L"$C_{Di}$ (counts)") xlim([0, 0.8]) legend(["original", "smooth"]) # savefig("../MDAO_paper/figures/smoothCDi.pdf", transparent=true) end
lemma cdiv_in_iff' [simp]: "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. f x / c) \<in> L F (g) \<longleftrightarrow> f \<in> L F (g)"
Full Description: This MailWraps® magnetic mailbox cover is pre-cut to fit a standard (T1) rural mailbox 6 1/2" wide x 19" deep. UV-printed for vivid color reproduction and exceptional durability. Made in USA. Mailbox not included.
State Before: R : Type u S : Type v a✝ b c d : R n✝ m : ℕ inst✝ : Semiring R p q r : R[X] n : ℕ a : R ⊢ degree (↑C a * X ^ n) ≤ ↑n State After: R : Type u S : Type v a✝ b c d : R n✝ m : ℕ inst✝ : Semiring R p q r : R[X] n : ℕ a : R ⊢ degree (↑(monomial n) a) ≤ ↑n Tactic: rw [C_mul_X_pow_eq_monomial] State Before: R : Type u S : Type v a✝ b c d : R n✝ m : ℕ inst✝ : Semiring R p q r : R[X] n : ℕ a : R ⊢ degree (↑(monomial n) a) ≤ ↑n State After: no goals Tactic: apply degree_monomial_le
//================================================================================================== /** Copyright 2016 Numscale SAS Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) **/ //================================================================================================== #ifndef BOOST_SIMD_ARCH_X86_AVX_SIMD_FUNCTION_DEINTERLEAVE_HPP_INCLUDED #define BOOST_SIMD_ARCH_X86_AVX_SIMD_FUNCTION_DEINTERLEAVE_HPP_INCLUDED #include <boost/simd/detail/overload.hpp> #include <boost/simd/function/bitwise_cast.hpp> #include <boost/simd/function/slice.hpp> #include <boost/simd/function/combine.hpp> #include <boost/simd/function/deinterleave_first.hpp> #include <boost/simd/function/deinterleave_second.hpp> #include <boost/simd/detail/dispatch/meta/as_floating.hpp> #include <array> namespace boost { namespace simd { namespace ext { namespace bd = boost::dispatch; namespace bs = boost::simd; BOOST_DISPATCH_OVERLOAD ( deinterleave_ , (typename A0) , bs::avx_ , bs::pack_<bd::type64_<A0>, bs::avx_> , bs::pack_<bd::type64_<A0>, bs::avx_> ) { BOOST_FORCEINLINE std::array<A0,2> operator()(A0 const& a0, A0 const& a1 ) const BOOST_NOEXCEPT { auto b0 = bitwise_cast<bd::as_floating_t<A0>>(a0); auto b1 = bitwise_cast<bd::as_floating_t<A0>>(a1); auto p0 = _mm256_permute2f128_pd(b0,b1,0x20); auto p1 = _mm256_permute2f128_pd(b0,b1,0x31); A0 f = bitwise_cast<A0>( _mm256_unpacklo_pd( p0, p1 ) ); A0 s = bitwise_cast<A0>( _mm256_unpackhi_pd( p0, p1 ) ); return {f,s}; } }; BOOST_DISPATCH_OVERLOAD ( deinterleave_ , (typename A0) , bs::avx_ , bs::pack_<bd::type32_<A0>, bs::avx_> , bs::pack_<bd::type32_<A0>, bs::avx_> ) { BOOST_FORCEINLINE std::array<A0,2> operator()(A0 const& a0, A0 const& a1 ) const BOOST_NOEXCEPT { auto b0 = bitwise_cast<bd::as_floating_t<A0>>(a0); auto b1 = bitwise_cast<bd::as_floating_t<A0>>(a1); auto x = _mm256_permute2f128_ps(b0,b1,0x20); auto y = _mm256_permute2f128_ps(b0,b1,0x31); auto u0 = _mm256_unpacklo_ps(x,y); auto u1 = _mm256_unpackhi_ps(x,y); A0 f = bitwise_cast<A0>( _mm256_unpacklo_ps( u0, u1 ) ); A0 s = bitwise_cast<A0>( _mm256_unpackhi_ps( u0, u1 ) ); return {f,s}; } }; BOOST_DISPATCH_OVERLOAD ( deinterleave_ , (typename A0) , bs::avx_ , bs::pack_<bd::integer_<A0>, bs::avx_> , bs::pack_<bd::integer_<A0>, bs::avx_> ) { BOOST_FORCEINLINE std::array<A0,2> operator()(A0 const& a0, A0 const& a1 ) const BOOST_NOEXCEPT { auto s00 = slice_low(a0), s01 = slice_high(a0); auto s10 = slice_low(a1), s11 = slice_high(a1); auto f = combine(deinterleave_first (s00,s01), deinterleave_first (s10,s11)); auto s = combine(deinterleave_second(s00,s01), deinterleave_second(s10,s11)); return {f,s}; } }; } } } #endif
theory Conntrack_State_Transform imports "Common_Primitive_Matcher" "../Semantics_Ternary/Semantics_Ternary" begin text\<open>The following function assumes that the packet is in a certain state.\<close> fun ctstate_assume_state :: "ctstate \<Rightarrow> 'i::len common_primitive match_expr \<Rightarrow> 'i common_primitive match_expr" where "ctstate_assume_state s (Match (CT_State x)) = (if s \<in> x then MatchAny else MatchNot MatchAny)" | "ctstate_assume_state s (Match m) = Match m" | "ctstate_assume_state s (MatchNot m) = MatchNot (ctstate_assume_state s m)" | "ctstate_assume_state _ MatchAny = MatchAny" | "ctstate_assume_state s (MatchAnd m1 m2) = MatchAnd (ctstate_assume_state s m1) (ctstate_assume_state s m2)" lemma ctstate_assume_state: "p_tag_ctstate p = s \<Longrightarrow> matches (common_matcher, \<alpha>) (ctstate_assume_state s m) a p \<longleftrightarrow> matches (common_matcher, \<alpha>) m a p" apply(rule matches_iff_apply_f) by(induction m rule: ctstate_assume_state.induct) (simp_all) definition ctstate_assume_new :: "'i::len common_primitive rule list \<Rightarrow> 'i common_primitive rule list" where "ctstate_assume_new \<equiv> optimize_matches (ctstate_assume_state CT_New)" lemma ctstate_assume_new_simple_ruleset: "simple_ruleset rs \<Longrightarrow> simple_ruleset (ctstate_assume_new rs)" by (simp add: ctstate_assume_new_def optimize_matches_simple_ruleset) text\<open>Usually, the interesting part of a firewall is only about the rules for setting up connections. That means, we mostly only care about packets in state @{const CT_New}. Use the function @{const ctstate_assume_new} to remove all state matching and just care about the connection setup. \<close> corollary ctstate_assume_new: "p_tag_ctstate p = CT_New \<Longrightarrow> approximating_bigstep_fun (common_matcher, \<alpha>) p (ctstate_assume_new rs) s = approximating_bigstep_fun (common_matcher, \<alpha>) p rs s" unfolding ctstate_assume_new_def apply(rule optimize_matches) apply(simp add: ctstate_assume_state) done text\<open>If we assume the CT State is @{const CT_New}, we can also assume that the TCP SYN flag (@{const ipt_tcp_syn}) is set.\<close> fun ipt_tcp_flags_assume_flag :: "ipt_tcp_flags \<Rightarrow> 'i::len common_primitive match_expr \<Rightarrow> 'i common_primitive match_expr" where "ipt_tcp_flags_assume_flag flg (Match (L4_Flags x)) = (if ipt_tcp_flags_equal x flg then MatchAny else (case match_tcp_flags_conjunct_option x flg of None \<Rightarrow> MatchNot MatchAny | Some f3 \<Rightarrow> Match (L4_Flags f3)))" | "ipt_tcp_flags_assume_flag flg (Match m) = Match m" | "ipt_tcp_flags_assume_flag flg (MatchNot m) = MatchNot (ipt_tcp_flags_assume_flag flg m)" | "ipt_tcp_flags_assume_flag _ MatchAny = MatchAny" | "ipt_tcp_flags_assume_flag flg (MatchAnd m1 m2) = MatchAnd (ipt_tcp_flags_assume_flag flg m1) (ipt_tcp_flags_assume_flag flg m2)" lemma ipt_tcp_flags_assume_flag: assumes "match_tcp_flags flg (p_tcp_flags p)" shows "matches (common_matcher, \<alpha>) (ipt_tcp_flags_assume_flag flg m) a p \<longleftrightarrow> matches (common_matcher, \<alpha>) m a p" proof(rule matches_iff_apply_f) show "ternary_ternary_eval (map_match_tac common_matcher p (ipt_tcp_flags_assume_flag flg m)) = ternary_ternary_eval (map_match_tac common_matcher p m)" using assms proof(induction m rule: ipt_tcp_flags_assume_flag.induct) case (1 flg x) thus ?case apply(simp add: ipt_tcp_flags_equal del: match_tcp_flags.simps) apply(cases "match_tcp_flags_conjunct_option x flg") apply(simp) using match_tcp_flags_conjunct_option_None bool_to_ternary_simps(2) apply metis apply(simp) apply(drule_tac pkt="(p_tcp_flags p)" in match_tcp_flags_conjunct_option_Some) by simp qed(simp_all del: match_tcp_flags.simps) qed definition ipt_tcp_flags_assume_syn :: "'i::len common_primitive rule list \<Rightarrow> 'i common_primitive rule list" where "ipt_tcp_flags_assume_syn \<equiv> optimize_matches (ipt_tcp_flags_assume_flag ipt_tcp_syn)" lemma ipt_tcp_flags_assume_syn_simple_ruleset: "simple_ruleset rs \<Longrightarrow> simple_ruleset (ipt_tcp_flags_assume_syn rs)" by (simp add: ipt_tcp_flags_assume_syn_def optimize_matches_simple_ruleset) corollary ipt_tcp_flags_assume_syn: "match_tcp_flags ipt_tcp_syn (p_tcp_flags p) \<Longrightarrow> approximating_bigstep_fun (common_matcher, \<alpha>) p (ipt_tcp_flags_assume_syn rs) s = approximating_bigstep_fun (common_matcher, \<alpha>) p rs s" unfolding ipt_tcp_flags_assume_syn_def apply(rule optimize_matches) apply(simp add: ipt_tcp_flags_assume_flag) done definition packet_assume_new :: "'i::len common_primitive rule list \<Rightarrow> 'i common_primitive rule list" where "packet_assume_new \<equiv> ctstate_assume_new \<circ> ipt_tcp_flags_assume_syn" lemma packet_assume_new_simple_ruleset: "simple_ruleset rs \<Longrightarrow> simple_ruleset (packet_assume_new rs)" by (simp add: packet_assume_new_def ipt_tcp_flags_assume_syn_simple_ruleset ctstate_assume_new_simple_ruleset) corollary packet_assume_new: "match_tcp_flags ipt_tcp_syn (p_tcp_flags p) \<Longrightarrow> p_tag_ctstate p = CT_New \<Longrightarrow> approximating_bigstep_fun (common_matcher, \<alpha>) p (packet_assume_new rs) s = approximating_bigstep_fun (common_matcher, \<alpha>) p rs s" unfolding packet_assume_new_def by (simp add: ctstate_assume_new ipt_tcp_flags_assume_syn) end
%??Section 11.1.2.1 on the evolution of anti-construct state agreement marking in Baltic and Slavic striked me as highly speculative; it does not include a single example from real texts from the earlier stages of Baltic or Slavic! The same observation pertains to the following section on Germanic. In my view, this largely invalidates the whole discussion. In particular, the empirical justification for the following claim on p. 248: “The three most important results of this study are ... (3) the diachronic attestation of contrastive-focus constructions with phrasally embedded adjectival modifiers as a common source of innovative adjective attribution marking devices in the northern Eurasian languages” — is not provided. %??By the way, on the history of “long” versus “short” adjectives in Russian, see works by Karin Larsen (2005, 2006, 2007). \is{grammaticalization|(} \chapter[The evolution of attribution marking]{The evolution of attribution marking in northern Eurasian languages} %%% Attribution marking devices were typologized in Part~II (Typology) and their geographic distribution across the genealogical entities of northern Eurasia was presented in Part~III (Synchrony). The present, diachronic part focuses on linguistic changes which led to the emergence of the attested synchronic diversity within the northern Eurasian area. \largerpage[-1] Not all attested changes are investigated in equal depth in each genealogical unit. Special focus lies on the grammaticalization of attributive markers from attributive nominalizers in the Saamic \il{Saamic languages} and Finnic\il{Finnic languages} branches of Uralic as well as in the Baltic,\il{Baltic languages} Slavic\il{Slavic languages} and Germanic\il{Germanic languages} branches of Indo-European. Different types of adjective attribution marking have been grammaticalized from attributive nominalizers\is{attributive nominalization} in different languages of the area and during different periods of time. Up to now, these diachronic patterns have not been systematically investigated from a cross-linguistic perspective. The parallel evolution of attributive nominalizers and other adjective attribution marking devices is interesting not only from a general typological perspective. The linguistic interference zone between Uralic and Indo-European in northeastern Europe exhibits a relatively high degree of diversity from a synchronic point of view (see Chapter~\ref{areality}). Consequently, it appears that the synchronically and diachronically attested developments have to be described in areal linguistic terms and provide further evidence for establishing a Northern European \textit{Sprachbund}.\is{Sprachbund} \is{attributive nominalization|(} \section[Attributive nominalizers]{The emergence of attributive nominalizers} %%% Attributive nominalization as a special subtype of dependent\hyp{}marking attributive state (see \S\ref{attr nmlz}) is not synchronically attested as a default licenser of the attributive connection of adjectives in any language of northern Eurasia. However, in several languages of the area, attributive constructions with nominalizers constitute a special type of noun phrases characterized earlier as attributive apposition. A typical example is \ili{Udmurt} (Uralic) where an adjectival attribute equipped with an article is marked for contrastive focus (see \S\ref{udmurt synchr}). The only two Northern Eurasian languages exhibiting attributive nominalization as a default attribution marking device synchronically are \ili{Albanian} proper and \ili{Arvanitika} from the Albanian\il{Albanian languages} branch (Indo-European). The marker, however, is used only in a circumfixed\is{position!circumfixed} construction together with the inherited \isi{head\hyp{}driven agreement}. Attributive nominalizers are also documented in historical stages of several Indo-European branches, such as Baltic,\il{Baltic languages} Slavic\il{Slavic languages} and Germanic.\il{Germanic languages} But even here, these markers are not the default devices. Instead, attributive articles compete with other attributive markers and are restricted to emphatically marked noun phrases. In several of these \ili{Indo-European languages}, however, the articles have evolved into new default types of attribution marking. A prototypical example of attribution marking originating from an attributive article is anti\hyp{}construct state agreement marking in \ili{Russian} (see \S\ref{russian synchr}). In other languages, the former attributive article is still traceable as a secondary type of attribution marking, as in the modern \ili{Baltic languages}. Here, the attributive article also evolved into an anti\hyp{}construct state agreement marker but it is still restricted to a semantically defined subset of noun phrases (see \S\ref{baltic synchr}). \is{attributive article|(} \ia{Himmelmann, Nikolaus|(} The synchrony and diachrony of attributive articles have also been dealt with in a cross-linguistic investigation of grammaticalized adnominal D(eictic) elements by \citet{himmelmann1997}. Himmelmann assumes that attributive articles (“linking articles” in his terminology) originally occurred in appositional nominal expressions. These “linking constructions” are characterized as complex noun phrases in which the attribute occurs as a syntactically independent nominal expression. The “linking article” (i.e., \textit{attributive article} in terms of the present typology) serves as a nominalizer and licenses the attribute as a syntagma of its own \citep[188]{himmelmann1997}. \is{attributive article|)} The diachronic data from several Indo-European,\il{Indo-European languages} Uralic\il{Uralic languages} and \ili{Turkic languages} presented in the following sections support Himmelmann's conclusions about a common source of attributive marking originating from pronouns or other deictic elements used as attributive nominalizers. \ia{Himmelmann, Nikolaus|)} \il{Uralic languages|(} \il{Turkic languages|(} \is{juxtaposition|(} \subsection{Attributive nominalizers in Uralic and Turkic} \label{uralic-turkic diachr} %%% Juxtaposition has been the prototype of adjective attributive marking in all Turkic and most Uralic languages and go likely back to the proto-stages of these families (cf.~\citealt[80–81]{decsy1990} for Uralic and \citealt[75–76]{decsy1998} for Turkic). However, as the result of a secondary development in some branches of Uralic and Turkic, an attributive nominalizer grammaticalized. Synchronically, it occurs as minor attribution marking device in specially marked noun phrase types in several languages of these two families.%sagt {decsy1998} eigentlich richtig etwas zu Kongruenz? In the Saamic\il{Saamic languages} and Finnic\il{Finnic languages} branches of Uralic, juxtaposition has been replaced completely by new adjective attribution marking devices. In \ili{Proto\hyp{}Saamic} the prototypical attributive connector of adjectives was probably anti\hyp{}construct state marking. A comparison of synchronic evidence across modern Saamic languages makes this reconstruction very likely \citep{riesler2006b}. However, the modern \ili{Saamic languages} show a strong tendency to abandon the anti\hyp{}construct state marker and re-introduce the morphologically unmarked adjective attribution marking device juxtaposition. In \ili{Proto\hyp{}Finnic}, the original Uralic type has also been lost and has now been replaced by \isi{head\hyp{}driven agreement} marking of attributive adjectives. In \S\ref{Finnic diachr} and \ref{saamic diachr}, the emergence of agreement in Finnic and anti\hyp{}construct state marking in Saamic will be explored and described as a possible result of the grammaticalization of attributive nominalizers. \is{juxtaposition|)} \il{Udmurt|(} Since the emergence of attributive nominalizers in Udmurt (and other modern Uralic languages) probably reflects structurally similar stages of development as those assumed for \ili{Proto\hyp{}Saamic} and \ili{Proto\hyp{}Finnic}, the Udmurt case will be described in depth in the following sections. \subsubsection{The contrastive focus marker in Udmurt} \label{udmurt diachr} %%% Synchronic data from Udmurt illustrates the emergence of an attributive article and might even indicate how this attribution marker has been generalized as an anti\hyp{}construct state marker. The use of the 3\textsuperscript{rd} person possessive suffix as a contrastive focus marker in Udmurt was exemplified in \S\ref{udmurt synchr} on the synchrony of attribution marking in Permic.\il{Permic languages} In the following sections, the etymological source and the evolution of this contrastive focus construction will be illustrated with the help of further examples. As in several other Uralic languages, the possessive suffix 3\textsuperscript{rd} person singular in Udmurt is often used as a definite-like marker. Grammatical descriptions of Udmurt use different terms to define the function of this formative, for example as “determinative” \citep{kelmakov-etal1999}, “contrastive-deictic” \citep{alatyrev1970}, “anaphorical-emphasizing” \citep{kiekbaev1965}, or simply “definite” \citep{winkler2001}. The suffix is characterized in the following as “quasi-definite” since Udmurt (like most other Uralic languages) has no morphologized feature \textsc{species}. The use of the marker is obviously determined by the referential status of the noun phrase, but it does not occur obligatorily in definite noun phrases. Since the rules for definiteness marking are not the subject of the present investigation, the formative in definite-like constructions will simply be referred to as \textit{determinative suffix}, which is also consistent with some of the grammatical descriptions mentioned above (e.g., \citealt{kelmakov-etal1999}).\is{species marking!definite} Besides its function as a possessive marker, the 3\textsuperscript{rd} person singular possessive suffix occurs not only in quasi-definite noun phrases but is even used as an (attributive) nominalizer and as a marker of contrastive focus on adjectives. From a synchronic point of view, the functions of \textsc{poss:3sg} in the different non-possessive uses are probably better analyzed as belonging to different grammatical categories. Consequently, different glosses (such as \textsc{poss, def, nmlz, contr}) should be applied. However, in order to illustrate the similar historical source of the synchronically differentiated grammatical meanings one and the same gloss (i.e., \textsc{poss:3sg}) is used in the following examples. %%% \begin{exe} \ex {\upshape Possessive and non-possessive functions of (historical) \textsc{poss:3sg}} \begin{xlist} \ex {\upshape Possessive marking} \label{udmurt possmarking} \begin{xlist} \ex \gll gurt\textbf{-ėz}\\ house-\textsc{poss:3sg}\\ \glt ‘her/his/its house’ \ex \gll gurt-jos-a\textbf{-z}\\ house-\textsc{pl}-\textsc{ill}-\textsc{poss:3sg}\\ \glt ‘into her/his/its houses’ \end{xlist} \ex {\upshape “Determinative” marking} \begin{xlist} \ex \gll gurt\textbf{-ėz}\\ house-\textsc{poss:3sg}\\ \glt ‘this house’ \ex \gll gurt-jos-a\textbf{-z}\\ house-\textsc{pl}-\textsc{ill}-\textsc{poss:3sg}\\ \glt ‘into these houses’ \end{xlist} \ex {\upshape Attributive nominalization} \label{udmurt diachr nomzr} \begin{xlist} \ex {\upshape Demonstrative}\\ \label{udmurt diachr dem-nomzr} \gll ta\textbf{-iz} / so\textbf{-iz}\\ \textsc{dem:prox}-\textsc{poss:3sg} {} \textsc{dem:dist}-\textsc{poss:3sg}\\ \glt ‘this one over here’ / ‘that one over there’ %%% \newpage \ex {\upshape Possessor noun phrase}\\ \label{udmurt diachr gen-nomzr} \gll Ivan-len\textbf{-ėz}\\ Ivan-\textsc{gen}-\textsc{poss:3sg}\\ \glt ‘the one of Ivan (Ivan's)’ \ex {\upshape Adjective}\\ \label{udmurt diachr adj-nomzr} \gll badǯ́ym\textbf{-ėz}\\ big-\textsc{poss:3sg}\\ \glt ‘the big one’ \end{xlist} \ex {\upshape Contrastive focus marking} \label{udmurt diachr contr} \begin{xlist} \ex \gll badǯ́ym\textbf{-ėz} gurt\\ big-\textsc{poss:3sg} house\\ \glt ‘a/the \textsc{big} house’ \ex \gll badǯ́ym-jos-a\textbf{-z} gurt-jos-y\\ big-\textsc{pl}-\textsc{ill}-\textsc{poss:3sg} house-\textsc{pl}-\textsc{ill}\\ \glt ‘into (the) \textsc{big} houses’ \end{xlist} \end{xlist} \end{exe} %%% The use of the suffix \textit{-ėz} as marker of contrastive focus is obviously connected to its other non-possessive functions. The order of examples (\ref{udmurt possmarking}–\ref{udmurt diachr contr}) probably reflects the functional expansion of the original possessive marker to a “determinative” marker on noun phrases and a contrastive focus marker on adjectives. The clue for understanding this development is the use of the suffix \textit{-ėz} as an attributive nominalizer in \isi{headless noun phrase}s, as shown in (\ref{udmurt diachr nomzr}). Here, the determinative suffix is used as a true attributive nominalizer to mark a demonstrative (\ref{udmurt diachr dem-nomzr}), a possessor noun\is{adnominal modifier!possessor noun} (\ref{udmurt diachr gen-nomzr}) or an adjective (\ref{udmurt diachr adj-nomzr}) as modifiers by projecting a full (headless) noun phrase. Note however that headless adjectives, demonstratives,\is{adnominal modifier!demonstrative} and noun possessors (in genitive)\is{adnominal modifier!possessor noun} are not obligatorily marked by means of attributive nominalization in Udmurt. The marker is used in order to emphasize the property denoted by the attribute and to contrast it to other properties of the same set. The emphasizing function of the determinative suffix, finally, is the link to its use as contrastive focus marker on adjectives. It seems clear that these contrastive focus constructions originate from appositional constructions of nouns with emphasized headless attributes, illustrated in (\ref{udmurt apposition}).\is{headless noun phrase}\footnote{The zero-morpheme (equipped with the nominalizer Ø-\textsc{nmlz}) in (\ref{udmurt apposition}) is only presented for a better illustration of the empty head position to which the (nominalized) adjective moves in this appositional noun phrase.} %%% \begin{exe} \ex {\ob}\textsubscript{\upshape NP} {\ob}\textsubscript{\upshape NP'} \textsubscript{\upshape A}big \textsubscript{\upshape HEAD}Ø-\textsc{\upshape nmlz}{\cb} \textsubscript{\upshape N}house{\cb} \label{udmurt apposition} \end{exe} %%% The agreement patterns in noun phrases with attributes in contrastive focus provide the best evidence for this assumption. In their default use, attributive adjectives (as well as other modifiers) do not show agreement with the head noun. However, when the attribute is marked for contrastive focus (by means of the attributive nominalizer \textsc{attr} $\Leftarrow$ \textsc{poss:3sg}), case and number marking spread to the adjective, like in the minimally contrastive examples in (\ref{udmurt examples}). %%% \begin{exe} \ex {\upshape Juxtaposition\is{juxtaposition} versus anti\hyp{}construct state agreement marking (i.e., in contrastive focus) \citep{kelmakov-etal1999,winkler2001}} \label{udmurt examples} \begin{xlist} \ex {\upshape Adjective attribute} \begin{xlist} \ex \gll badǯ́ym / badǯ́ym\textbf{-ėz} gurt\\ big {} big-\textsc{attr} house\\ \glt ‘big house’ : ‘\textsc{big} house’ \ex \gll badǯ́ym / badǯ́ym\textbf{-jos-a-z} gurt-jos-y\\ big {} big-\textsc{pl}-\textsc{ill}-\textsc{attr} house-\textsc{pl}-\textsc{ill}\\ \glt ‘to (the) big houses’ : ‘to (the) \textsc{big} houses’ \end{xlist} %%% \ex {\upshape Possessor noun attribute}\is{adnominal modifier!possessor noun}\footnote{Note that the cross-referencing possessive agreement marker does not occur with a genitive construction in contrastive focus \citep[81]{kelmakov-etal1999}.} \begin{xlist} \ex \gll Ivan-len / Ivan-len\textbf{-ėz} gurt-ėz\\ Ivan-\textsc{gen} {} Ivan-\textsc{gen}-\textsc{attr} house-\textsc{poss:3sg}\\ \glt ‘Ivan's house’ : ‘\textsc{Ivan's} house’ \ex \gll Ivan-len / Ivan\textbf{-jos-a-z-len} gurt-jos-a-z\\ Ivan-\textsc{gen} {} Ivan-\textsc{pl}-\textsc{ill}-\textsc{attr}-\textsc{gen} house-\textsc{pl}-\textsc{ill}-\textsc{poss:3sg}\\ \glt ‘to Ivan's houses’ : ‘to \textsc{Ivan's} houses’ \end{xlist} \ex {\upshape Demonstrative attribute} \label{udmurt det dem} \begin{xlist} \ex \gll so / so\textbf{-iz} gurt\\ \textsc{dem:dist} {} \textsc{dem:dist}-\textsc{attr} house\\ \glt ‘that house’ : ‘\textsc{that} house’ \ex \gll ta / ta\textbf{-os-a-z} gurt-jos-y\\ \textsc{dem:prox} {} \textsc{dem:prox}-\textsc{pl}-\textsc{ill}-\textsc{attr} house-\textsc{pl}-\textsc{ill}\\ \glt ‘to these houses’ : ‘to \textsc{these} houses’ \end{xlist} \end{xlist} \end{exe} %POSS als Kongruenzkategorie %%% \newpage %longdistance Following the intuition of the authors of grammatical descriptions of Udmurt, however, one could also analyze these constructions as true noun phrases with a syntactic structure as in (\ref{udmurt notapposition}) (as opposed to \ref{udmurt apposition}) where the original nominalizer of the attribute in the \isi{headless noun phrase} became a dependent\hyp{}marking attributive construct device linking the attribute in contrastive focus to the semantic head ‘house’ in the noun phrase. %%% \begin{exe} \ex[?]{ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}big-\textsc{\upshape contr} \textsubscript{\upshape HEAD}house{\cb} } \label{udmurt notapposition} \end{exe} %%% Even if head\hyp{}driven number and case agreement is involved in attribution marking of adjectives in contrastive focus, Udmurt is better analyzed as a language exhibiting an attributive appositional construction rather than an anti\hyp{}construct state agreement marking. The agreement and anti\hyp{}construct state marking formatives are not fused and agreement marking occurs only indirectly as the result of the nominalization of the appositional headless adjective.\is{headless noun phrase} \il{Udmurt|)} \subsubsection[Possessive suffixes as attributive nominalizers]{Possessive suffixes as attributive nominalizers in other Uralic and in Turkic languages} %%% Non-possessive uses of 3\textsuperscript{rd} person singular possessive suffixes similar to Udmurt are well attested in several Uralic and Turkic languages.\footnote{In several languages, even 2\textsuperscript{rd} person singular possessive occurs in the same function.} In descriptions of these languages, the marker is often characterized as “emphatic-definite” or simply “definite” (cf.~\citealt[148]{tauli1966}; \citealt{kunnap2004}). But obviously this is greatly oversimplified. It is especially unclear what it would mean to mark an adjectival modifier as “definite”.\is{species marking!definite} Besides in Udmurt, the use of the (historical) 3\textsuperscript{rd} person singular possessive suffix as a marker of contrastive focus is similarly regular (though less systematically described) in the other \ili{Permic languages} (cf.~\citealt[67]{serebrennikov1963}).%Verweis ist nicht so gut \il{Mari languages|(} In the Mari languages, which belong to the Volgaic\il{Volgaic languages} branch of Uralic, the possessive suffix is also commonly used as a determinative suffix for nouns (cf.~\citealt[75–76]{alhoniemi1993}). The regular use of the formative to derive a certain set of “determinative” or contrastive focused demonstratives and quantifiers in Mari (\ref{mari example}, similar to the \ili{Udmurt} example (\ref{udmurt det dem}) on page~\pageref{udmurt det dem}) gives at least some evidence that the Mari languages have (or had) an attributive nominalizer in contrastive focus constructions as well.\footnote{The homophonous focus \isi{clitic} \textit{=že} in Eastern Mari (\textit{təi=že kuze ilaš tüŋalat?} ‘And how are \textsc{you} going to live?’ \citealt[80]{alhoniemi1993}) is most likely not cognate with the 3\textsuperscript{rd} person singular possessive suffix but borrowed from the formally and functionally similar marker focus marker in \ili{Russian}.}%anhand der deklinationsformen zeigen, dass es poss ist und nicht =že, z.B. weil kasus danach folgt %%% \begin{exe} \ex \langinfo{Eastern Mari}{Uralic}{\citealt{alhoniemi1993}} \label{mari example} \begin{xlist} \ex {\upshape “Short” demonstratives (i.e., unmarked)} \begin{xlist} \ex tide {\upshape ‘this’ /} tudo {\upshape ‘that’ (82)} \end{xlist} %%% \ex {\upshape “Long” demonstratives (i.e., in contrastive focus)} \begin{xlist} \ex tide\textbf{-že} {\upshape ‘this one’ /} tudo\textbf{-že} {\upshape ‘that one’ (82)}%Ist S. 82 (und 80) oben richtig? \end{xlist} %%% \ex {\upshape Quantifiers in contrastive focus}\\ \gll Tə̂nar\textbf{-žə̂}-m mə̂j nalam, Tə̂nar\textbf{-žə̂}-m tə̂j.\\ so.much-\textsc{poss:3sg}-\textsc{acc} I take, so.much-\textsc{poss:3sg}-\textsc{acc} you\\ \glt ‘So much I will take, so much you.’ (76) \end{xlist} \end{exe} %%% \il{Chuvash|(} A similar use of the (historical) 3\textsuperscript{rd} person singular possessive suffix as a marker of contrastive focus in the Turkic language Chuvash has been shown in \S\ref{chuvash synchr}. Interestingly, the Turkic language Chuvash and the Uralic languages Eastern\il{Eastern Mari} and \ili{Western Mari} and \ili{Udmurt} are among the core members of the \isi{Volga-Kama area}.\footnote{Other core members of the Volga-Kama \isi{Sprachbund} area are the Turkic languages \ili{Tatar} and \ili{Bashkir}. The Uralic languages Mordvin\il{Mordvin languages} and \ili{Komi-Permyak} are considered peripheral members \citep{helimski2005}.} The languages of this linguistic area show linguistic convergence on several levels of their grammars. In all Uralic and Turkic languages of that area, at least the “emphatic-definite” use of the 3\textsuperscript{rd} person singular possessive suffix is attested. Thus, it cannot be ruled out that the evolving attributive nominalizer in Chuvash, \ili{Udmurt} and the Mari languages has been borrowed in either direction.\is{species marking!definite}%ÿsker erwähnen? \il{Mari languages|)} \il{Tungusic languages|(} The phenomenon might even reflect a much older and more widespread feature of a larger subarea of northern Eurasia including at least Tungusic. As demonstrated in the synchronic \S\ref{tungusic synchr} on Tungusic, similar constructions with the 3\textsuperscript{rd} person singular possessive suffix also seem to regularly occur in this family. Even in other languages of the area, examples of the use of the 3\textsuperscript{rd} person singular possessive suffix as an attributive nominalizer (though not on adjectives) are attested. Example (\ref{mongolian nmlz}) illustrates the use of the 3\textsuperscript{rd} person singular possessive suffix as an attributive nominalizer of pronouns in Khalkha Mongolian. %%% \begin{exe} \ex {\upshape Attributive nominalization in \langinfo{Khalkha}{Mongolic}{\citealt[6]{pavlov1985}}} \label{mongolian nmlz} \begin{xlist} \ex olan {\upshape ‘much’}~– olan\textbf{-ki} {\upshape ‘what is in majority; the largest part’} \ex numaj {\upshape ‘much’}~– numajj\textbf{-i} {\upshape ‘what is in majority; the largest part’} \end{xlist} \end{exe} %%% Not also that the (historical) 3\textsuperscript{rd} person singular possessive suffix occurs in practically all Turkic languages in lexicalized local and temporal attributes. (\ref{chuvash examples}) are examples from Chuvash. %%% \begin{exe} \ex {\upshape Attributive nominalization in \langinfo{Chuvash}{Turkic}{\citealt[67–68]{benzing1963}}} \label{chuvash examples} \begin{xlist} \ex \gll śul-χi\\ year-\textsc{loc:poss:3sg}\\ \glt ‘yearly, annual’ (originally ‘what is in a year’) \ex \gll yal-t-i\\ village-\textsc{loc-poss:3sg}\\ \glt ‘local’ (originally ‘what is in a village’) \ex \gll kil-t-i\\ home-\textsc{loc-poss:3sg}\\ \glt ‘domestic’ (originally ‘what is in the home’) \end{xlist} \end{exe} %%% It remains unclear whether the evolution of attributive nominalization and contrastive focus marking of attributive adjectives occurs independently in certain branches or areal groupings across Indo-European, Uralic, Turkic and Tungusic or goes back to a general northern Eurasian areal tendency. \il{Tungusic languages|)} \il{Chuvash|)} \il{Uralic languages|)} \il{Turkic languages|)} \il{Indo-European languages|(} \subsection{Attributive nominalizers in Indo-European} \label{ie diachr} %%% \is{species marking!definite|(} \il{Baltic languages|(} \il{Slavic languages|(} \subsubsection[Baltic and Slavic]{Attributive articles and the emergence of anti\hyp{}construct state agreement marking in Baltic and Slavic} \label{slavic diachr} %%% %The choice of one of these markers versus the other is normally connected to the definiteness or indefiniteness of the noun phrase in the Old Slavic languages. %In \ili{Old Bulgarian} attested definite noun phrases in which the adjective is marked with the short-suffix %restrictions determined by semantic of the noun, referential status of the whole NP, but most clearly by the semantic of the adjective: So bilden Beziehungsadjective generell selten Langformen, die Possessivadjective im besonderen %kommen ausschließlich in der Kurzform vor %Another exception are nominalized adjectives in \isi{headless noun phrase}s which occur most often in the long form. (Mendoza) %It is thus questionable whether or not the original function... really was marking of definiteness %%Def marker: regular expression of anaphoric definiteness of a noun phrase; has no further functions \ili{Russian} is the only Slavic language exhibiting anti\hyp{}construct state agreement marking as the default and only type of attributive connection of adjectives (\textit{xorošij} \textsc{attr:nom.m.sg} ‘good’ versus \textit{xoroš} \textsc{pred:nom.m.sg}, see also \S\ref{russian synchr}). The Russian construction where attributive adjectives are obligatorily equipped with special anti\hyp{}construct state agreement suffixes resembles a construction in the closely related Baltic languages. In the latter, however, the occurrence of anti\hyp{}construct state agreement marking is usually described as being restricted to definite noun phrases. The competition between complex attributive agreement and “pure” agreement marking was already characteristic of \ili{Old Baltic languages} (cf.~\ili{Lithuanian} \textit{geràsis} versus \textit{g{\~e}ras}, \ili{Latvian} \textit{labais} versus \textit{labs} ‘good’) and \ili{Old Slavic languages} (cf.~\ili{Old Bulgarian} \textit{dobrъjь} versus \textit{dobrъ} ‘good’). Old Slavic and \ili{Old Baltic languages} are thus similar to modern \ili{Lithuanian} and modern \ili{Latvian} in exhibiting two types of adjective attribution marking suffixes in different functions. In the Slavic and Indo-European linguistic traditions, adjectives equipped with anti\hyp{}construct state agreement marking are normally referred to as “long-form adjectives” (contrasted to “short-form adjectives”). Other commonly used terms for the anti\hyp{}construct state agreement markers are “pronominal, complex” or “compound” agreement suffixes. Analogically, the two inflectional paradigms of long- versus short-form adjectives equipped with number, gender, and case agreement values are normally labeled in a similar way as “long-form, pronominal, complex, or compound” versus “short-form” adjective declension. Obviously, these terms describe the form or the origin of the formative rather than its function and are rather useless for a typological comparison. Similar to the modern Baltic languages, the markers are sometimes also labeled “definite” agreement suffixes in Old Slavic.\il{Old Slavic languages} As will be shown below, the notion of “definiteness” does not exactly cover the functionality of the marker in Old Slavic either. The corresponding attributive constructions in modern Slavic and Baltic languages have already been dealt with in the synchronic part of this investigation (especially \S\S\ref{slavic synchr}, \ref{baltic synchr}). In the present chapter, the origin and development of anti\hyp{}construct state agreement marking in Baltic and Slavic along two possible grammaticalization paths (see \S\ref{2paths} below) will be discussed. It will be argued that these constructions have arisen from attributive articles which originally marked contrastive focus of the attribute rather than from nominal relative constructions. Before dealing with the syntactic evolution of the attributive constructions in Slavic and Baltic, the etymology of the formative (which is similar for both scenarios) will be sketched in the following short section. \subsubsection{Etymology of the formative} Whereas the “pure” agreement declension (of the so-called short-forms) of adjectives continues the \ili{Proto\hyp{}Indo\hyp{}European} default type of adjective attribution marking, the anti\hyp{}construct (long-form) agreement suffixes, as in \ili{Lithuanian} \textit{geràs-is žmõgus}, \ili{Latvian} \textit{laba-is cilvēks}, or \ili{Old Bulgarian} \textit{dobrъ-jь človekъ} ‘the good person’, arose as a result of a phonological merger between the short-form agreement suffixes of the adjective and a pronominal stem reconstructed as \ili{Proto\hyp{}Baltic\slash{}Slavic} \textit{*-jĭ/jь-}. This pronominal part of the long-form agreement suffix likely goes back to a pronominal stem reconstructed as \ili{Proto\hyp{}Indo-European} \textit{*i̭o-} \citep[61]{wissemann1958}. The anti\hyp{}construct state agreement marker in Baltic\slash{}Slavic could thus be cognate with relative markers in other Indo-European languages, such as Old Indo-Aryan\il{Old Indo-Aryan languages} \textit{yá-h}, Old Iranian\il{Old Iranian languages} \textit{yō}, or \ili{Ancient Greek} \textit{hós} \citep[53]{heinrichs1954}. An alternative etymology has been suggested by Mikkola (\citeyear[52]{mikkola1950}; %besserer verweis notwendig? see also \citealt[102]{leskien1871}; \citealt[164–165]{leskien1919}; \citealt[19ff.]{wijk1935}). Mikkola believes that \ili{Proto\hyp{}Baltic\slash{}Slavic} \textit{*-jь-} was an anaphoric marker which goes back to the 3\textsuperscript{rd} person singular pronoun (cf.~\ili{Lithuanian} \textit{jìs}, \textit{jõ} \textsc{3sg:gen} or \ili{Old Bulgarian} \textit{jь}, \textit{jego} \textsc{3sg:gen}). The phonological merger of Indo-European \textit{*is} \textsc{3sg.m} with \textit{\textit{*i̭os}} \textsc{m} ‘which’ in Baltic\slash{}Slavic \citep[21 Footnote 8]{schmidt1959} makes this explanation possible from the point of view of sound correspondence. The terminus post quem of the innovative attribution marking in Baltic and Slavic can be determined relatively easily. Different phonological and morphological developments of the long-form agreement suffixes in Baltic and Slavic imply that the phonological merger of adjective and the formative \textit{*-jь-} took place independently in Old Slavic\il{Old Slavic languages} and Old Baltic\il{Old Baltic languages} \citep[64–65]{koch1992}. It is not certain whether the Baltic and Slavic branches of Indo-European go back to a common proto-form or \ili{Proto\hyp{}Baltic\slash{}Slavic} have to be reconstructed as independent Indo-European daughter languages. If the latter case proves to be right, the rise of anti\hyp{}construct state agreement marking could be parallel, but due to contact in \ili{Proto\hyp{}Baltic\slash{}Slavic} (as stated, for example, by \citealt[77]{pohl1980}). Since the reconstruction of proto-languages is not an aim of this investigation and since the developments in Baltic and Slavic are similar from a chronological, functional and (Indo-European) etymological point of view, discussing the rise of anti\hyp{}construct state agreement marking in Baltic and Slavic together in the same section makes perfect sense. \subsubsection{Evolution of the construction} It is commonly assumed that the function of the long-form suffix on the adjective in Old Baltic\il{Old Baltic languages} and Old Slavic\il{Old Slavic languages} was to mark the noun phrase as definite. This opinion is repeated by practically all authors of comparative grammars and reference books of the Baltic\slash{}Slavic languages as well as in works dealing specifically with adjectives and noun phrase syntax of these languages (cf.~\citealt[211]{mendoza2004} with references). However, definite nouns are not obligatorily modified by long-form adjectives in Old Slavic.\il{Old Slavic languages} Furthermore, nominalized (headless) adjectives\is{headless noun phrase} are normally equipped with long-form suffixes, regardless of the referential status of the noun phrase as definite or indefinite. The analysis of the long-form adjective suffix as definite marker might thus not be as straightforward as it appears in the reference books. \citet[214–215]{mendoza2004} connects the original distribution of long- versus short-forms to contrastive focus marking, i.e., the restrictive versus non\hyp{}restrictive semantics of the attribute, instead of the referential status of the modified noun. A similar argument is made by \citet{tolstoj1957}, who sees the main function of the long-form adjectives likewise in setting a certain property of a referent apart from properties of the rest of similar referents. The later re-interpretation of such “restrictive” (i.e., contrastive focus) expressions as definite and even the generalization of the original restrictive adjective marker to a marker of anaphoric reference of the modified noun seems functionally plausible. There is no indication, however, that the long-form agreement suffixes morphologized to true definite markers in the \ili{Old Slavic languages}. Even in the modern stages of the South Slavic languages \ili{Slovenian} and \ili{Serbo-Croatian}, where remnants of the two different adjective inflections still occur, the so-called definite (long-form) declension of adjectives is semantically restricted to certain adjectival subclasses (see \S\ref{s-slavic synchr}). Furthermore, in \ili{Bulgarian} and \ili{Macedonian}, which are the only modern Slavic languages exhibiting a fully morphologized category \textsc{species}, the corresponding definite marking does not originate from the long-form adjectives. This is true despite the fact that the long-form agreement marking in \ili{Old Bulgarian} (i.e., the ancestor language of Modern \ili{Bulgarian} and Modern \ili{Macedonian}) is attested to have almost grammaticalized as a marker of anaphoric reference of the noun phrase. Note also that even the morphological status of the so-called definite adjectives in the modern Baltic languages has been doubted. It has sometimes been argued that the long-form adjective in \ili{Lithuanian} might convey emphasis rather than definiteness, at least in certain expressions (cf.~\citealt[181–182]{kramsky1972}). Even though the suffixes marking long-form agreement in Old Baltic\il{Old Baltic languages} and Old Slavic\il{Old Slavic languages} show some functional extension to markers of anaphoric reference or even definiteness of the noun phrase, this development is secondary. The original function of the long-form agreement suffixes was to mark an adjectival attribute in an emphatic or contrastive focus construction. Consequently, the suffix \textit{*-jь-} in \ili{Proto\hyp{}Baltic\slash{}Slavic} has to be analyzed as an attribution marker on the adjective rather than as a marker of definiteness of the modified noun. \is{species marking!definite|)} Leaving aside the question about the further development of the anti\hyp{}construct state agreement marker \textit{*-jь-} in different Baltic and Slavic languages, two opposing theories about its original function and the assumed functional developments of the anti\hyp{}construct state agreement marker in Baltic and Slavic will be discussed in the following sections: %%% \begin{itemize} \item \textbf{Scenario 1:} The formative \textsc{attr} arose from a relative pronoun, hence:\\ \textsc{dem $\Rightarrow$ rel $\Rightarrow$ attr} \item \textbf{Scenario 2:} The formative \textsc{attr} arose from an attributive article, hence:\\ \textsc{dem $\Rightarrow$ nmlz $\Rightarrow$ attr} \label{2paths} \end{itemize} %%% \subsubsection{Scenario 1: Nominal relative constructions in \ili{Proto\hyp{}Baltic\slash{}Slavic}} According to the first theory, the attributive marker in Baltic and Slavic originates from a relative pronoun. This theory seems to be widely accepted since Delbrück's and Brugmann's statements on the question (cf.~\citealt[432–433]{delbruck1893}; \citealt[331, 344]{brugmann-etal1916}). Their argumentation has been taken up and augmented with new data by \citet{schmidt1959}, \citet{koch1992,koch1999} and others. Koch argues that a reflex of the \ili{Proto\hyp{}Indo-European} relative pronoun \textit{*(h)i̭o-} is attested as an attributive marker of adjectival, possessive,\is{adnominal modifier!possessor noun} and adverbial modifiers\is{adnominal modifier!adverbial phrase} of nouns in \ili{Proto\hyp{}Baltic\slash{}Slavic}. He describes the constructions in which these attributes occur as “nominal relative constructions” \citep[470, passim]{koch1999}. The most substantial part in Koch's argumentation seems to be the similar use of cognate relative pronouns as polyfunctional markers in relative constructions as attested in Old Iranian\il{Old Iranian languages} and \ili{Old Indo-Aryan languages}. %%% \il{Old Persian|(} \begin{exe} \ex {\upshape Ezafe in \langinfo{Old Persian}{Indo-European}{\citealt{meillet1931}, here cited after \citealt[4]{samvelian2007b}}} \label{ez oldpersian} \begin{xlist} \ex {\ob}kāra {\ob}\textbf{hya} manā{\upshape ]]} \glt ‘my army’ (lit. ‘army which is mine’) %%% \ex {\ob}kāsaka {\ob}\textbf{hya} kapautaka{\upshape ]]} \glt ‘the blue stone’ (lit. ‘stone which is blue’) \ex vivānam jatā utā avam {\ob}kāram {\ob}\textbf{hya} dārayavahaus xšāyaθiyhyā{\upshape ]]} \glt ‘Beat Vivâna and his army which declares itself as a proponent of the king Darius.’ \end{xlist} \end{exe} %%% Koch's (\citeyear[53, passim]{koch1992}) main arguments for the old age of the relative function of \textit{*(h)i̭o-} in \ili{Proto\hyp{}Indo-European} are found in attested cognate markers. In several Indo-European languages, the historical \textit{*(h)i̭o-} pronoun marks similar relative constructions as in the Old Persian examples (\ref{ez oldpersian}). However, Koch does not disprove the assumption that the relative function of the pronoun derives from the deictic-anaphorical marking by means of a demonstrative. In fact, the Old Persian examples (\ref{ez oldpersian}) clearly show verb-less relative constructions linked to the head noun with an attributive article. %According to Benveniste %GET CITE FROM Koch1992p57 (cf.~even \citet[PAGE]{lehmann1984}) are not originate leitet sich der idg. Relativsatz nicht aus satzartigen Vorgängern ab sondern aus verblosen Nominalsyntagmen% %%%%%two relative pronouns reconstructed for the Indo-European proto-language: \textit{*(h)i̭o-} and \textit{*k\textsuperscript{ṷ}i-/*k\textsuperscript{ṷ}o-} %%The relative function of the second pronoun \textit{*k\textsuperscript{ṷ}i-/*k\textsuperscript{ṷ}o-} clearly derives from its indefinite or interrogative meaning (KOCH, cf.~German \textit{welch-} English \textit{which} \textsc{interrog, rel}). \il{Old Persian|)} Furthermore, it is not certain whether the old pronoun (or article) \textit{*(h)i̭o-} was inherited into \ili{Proto\hyp{}Baltic\slash{}Slavic}. The pronominal stem is attested in Baltic or Slavic only as the base of some derived connectors \citep[56]{heinrichs1954}. Even though the etymological pronoun seems to be preserved in the stem of the \ili{Old Bulgarian} relative marker \textit{jь-že}, the function of this marker is clearly yielded by the emphatic particle \textit{-že} \citep[56]{heinrichs1954}. %besseres Zitat? The old relative pronoun seems to be completely lost in Old Baltic\il{Old Baltic languages} where different relative markers occur (as in \ili{Lithuanian} \textit{ku\~rs} $\Leftarrow$ \textit{kurìs}, \ili{Latvian} \textit{kuŕš} noted by \citealt[15]{schmidt1959}). \citet[468, 470]{koch1999} dates the original relative construction back to an early \ili{Pre-Proto\hyp{}Baltic\slash{}Slavic} age. According to him, the relative pronoun did not agree in case with the head noun in the inherited Indo-European relative construction (\ref{koch rel}). Such morpho-syntactic behavior would in fact be expected from a true relative pronoun. But according to Koch's reconstruction (\ref{koch nomzr}), case agreement between a head noun and a relative pronoun was already present in \ili{Proto\hyp{}Baltic\slash{}Slavic}. Finally, the long-form agreement inflection arose independently as a result of the phonological merger of the adjective and the original pronoun in Old Baltic\il{Old Baltic languages} and Old Slavic\il{Old Slavic languages} (\ref{koch attr}). Most crucial in this reconstruction is the fact that the assumed original relative pronoun has obviously never marked a true relative clause construction in \ili{Proto\hyp{}Baltic\slash{}Slavic}. %%% \begin{exe} \ex \label{koch rel} \begin{xlist} \ex {\upshape Nominal relative constructions in Pre-Proto\hyp{}Baltic\slash{}Slavic \citep[468]{koch1999}}\footnote{The example is glossed in accordance to Koch; a translation is missing in the source.}\\ \glll *dråugås gīvås jås {\upshape /} *dråugåm gīvås jås\\ friend:\textsc{nom} good:\textsc{nom} \textsc{rel:nom} / friend:\textsc{acc} good:\textsc{nom} \textsc{rel:nom}\\ N\textsubscript{nom} A\textsubscript{nom} \textsc{rel}\textsubscript{nom} { } N\textsubscript{acc} A\textsubscript{nom} \textsc{rel}\textsubscript{nom}\\ %%% \ex {\upshape \ili{Proto\hyp{}Baltic\slash{}Slavic} attributive article}\\ \label{koch nomzr} \glll *dråugås gīvås-jås {\upshape /} *dråugåm gīvåm-jåm\\ friend:\textsc{nom} good-\textsc{nmlz:nom} / friend:\textsc{acc} good-\textsc{nmlz:acc}\\ N\textsubscript{nom} A\textsubscript{nom}-\textsc{nmlz}\textsubscript{nom} { } N\textsubscript{acc} A\textsubscript{acc}-\textsc{nmlz}\textsubscript{acc}\\ %%% \ex {\upshape Old Baltic/Old Slavic anti\hyp{}construct state agreement marking}\\ \label{koch attr} \glll *dråugås gīvå-jås {\upshape /} *dråugåm gīvå-jåm\\ friend:\textsc{nom} good-\textsc{attr:nom} / friend:\textsc{acc} good-\textsc{attr:nom}\\ N\textsubscript{nom} A-\textsc{attr}\textsubscript{nom} { } N\textsubscript{acc} A-\textsc{attr}\textsubscript{acc}\\ \end{xlist} \end{exe} %%% This assumed development presupposes the transition of original “nominal relative constructions” in \ili{Pre-Proto\hyp{}Baltic\slash{}Slavic} (step 1) to a construction with an attributive article (\textsc{nmlz}) in \ili{Proto\hyp{}Baltic\slash{}Slavic} as an intermediate step (2). The anti\hyp{}construct (“long-form”, i.e., \textsc{attr}) agreement marking arose as a last step (3) in Old Baltic\il{Old Baltic languages} and Old Slavic.\il{Old Slavic languages} \largerpage %%% \begin{itemize} \item Stage 1 [\textsubscript{NP} \textsubscript{HEAD}N {\ob}\textsubscript{ATTRIBUTE(CLAUSE)} A\textsubscript{[+agr]} \textsc{rel}\textsubscript{[-agr]}{\upshape ]]} \item Stage 2 [\textsubscript{NP} \textsubscript{HEAD}N {\ob}\textsubscript{ATTRIBUTE(NP')} A\textsubscript{[+agr]}-\textsc{nmlz}\textsubscript{[+agr]}{\upshape ]]}\item Stage 3 [\textsubscript{NP} \textsubscript{HEAD}N \textsubscript{ATTRIBUTE(A)}A-\textsc{attr}\textsubscript{[+agr]}{\cb} \end{itemize} %%% Koch's reconstruction provides no conclusive arguments for the existence of “nominal relative constructions” marked with a relative pronoun \textit{*(h)i̭o-} in \ili{Pre-Proto\hyp{}Baltic\slash{}Slavic}. Theoretically, the attributive nominalization construction (step 2) could be much older and be the primary one in Indo-European. The corresponding “nominal relative constructions” in Indo-Aryan\il{Indo-Aryan languages} and Iranian\il{Iranian languages} might just as well originate from attributive nominalization constructions. The Indo-European relative pronoun \textit{*(h)i̭o-} would than go back to a deictic pronoun, probably \textit{*i-} ($\Rightarrow$ \ili{Latin}, \ili{Gothic} \textit{is} \textsc{dem}) which was used as attributive article as early as in \ili{Proto\hyp{}Indo-European}. %also The question of constituent order is left open in Koch's reconstruction. In the examples (\ref{koch rel}–\ref{koch nomzr}) the attribute is marked by a postponed pronoun and follows the noun. In the Old Iranian and Old Indo-Aryan languages, in which the assumed cognate relative pronoun is attested, %the attribute also follows the noun but the relative marker occurs between the constituents. %\begin{exe} %\ex \textsc{Old Persian} Samvelian %\ex \textsc{Modern Persian} %\end{exe} %Already in Old Slavic and Old Baltic the adjective predominantly preceded the noun. aber nachgestellte adjective belegt besonders in emphatischem ausdruck noch heute. The constituent order change from Indo-European NA to Baltic\slash{}Slavic AN unproblematisch \subsubsection{Scenario 2: Attributive nominalizing constructions in \ili{Proto\hyp{}Baltic\slash{}Slavic}} According to the second idea about the emergence of the long-form adjectives in Baltic\slash{}Slavic, the attributive marker was originally an article. One opponent of the “relative” theory is van Wijk, who believes %%% \begin{quote} [\dots] dass wir fürs Slavische vollständig auskommen ohne die Annahme relativer Pronominalformen vom idg. Stamme \textit{i̭e/i̭o-}, und dass dasselbe für das Baltische gilt.\footnote{[\dots] that there is no need whatsoever in the case of Slavic to assume the existence of relative pronominal forms going back to the Indo-European stem \textit{i̭e/i̭o-} and that the same is true for Baltic.} \citep[28]{wijk1935} \end{quote} %%% %\citet{otrebski1968} sieht in -j- der pronominalen Flexionsformen die Fortsetzung einer “hervorhebenden Partikel” Leaving open whether an attributive article or a relative pronoun constitutes the ultimate origin of the anti\hyp{}construct state agreement in \ili{Pre-Proto\hyp{}Baltic\slash{}Slavic}, Koch's reconstruction would in fact be compatible with Wijk's “article theory”. The attribute nominalizing construction with the pronominal marker \textit{*-jь-} as attributive article in \ili{Proto\hyp{}Baltic\slash{}Slavic} is clearly reflected in step 2 of Koch's reconstruction (\ref{koch nomzr}). The final step 3 in which the attributive nominalizer becomes an anti\hyp{}construct state marker is completely similar to the development assumed by \citet{wijk1935}. The most plausible functional explanation of the grammaticalization of the pronominal marker \textit{*-jь-} into an attributive article is formulated by Wissemann (\citeyear{wissemann1958}). He argues that the original function of the anti\hyp{}construct (“long-form”) agreement suffixes was that of a “Gelenkspartikel”\is{attributive article} \citep[76]{wissemann1958}, i.e., an \textit{attributive article} or \textit{attributive nominalizer} in terms of the present study. Wissemann also shows that the function as anaphoric (“quasi-definite”) noun phrase marker is secondary.\is{species marking!definite} Another argument in favor of the attributive nominalizing function of the \ili{Proto\hyp{}Baltic\slash{}Slavic} attributive article \textit{*-jь-} can be found in its polyfunctional use with different types of attributes. Besides marking the attributive connection of (emphasized) adjectives and participles, the article also served to mark some non-adjectival (and originally non-agreeing) attributes, such as adverbial phrases\is{adnominal modifier!adverbial phrase} and noun phrases marked with genitive.\is{adnominal modifier!possessor noun} \citet[467–468]{koch1999} gives a list of lexicalized attributive expressions in which \textit{*-jь-} occurs as an attributive marker. These examples of frozen nominalizations present evidence of the original attributive nominalizing function of the \ili{Proto\hyp{}Baltic\slash{}Slavic} article. %%% \begin{exe} \ex \begin{xlist} \ex {\upshape Attribution of adverbial phrases} \begin{xlist} \ex {\upshape \ili{Old Bulgarian}}\\ utrějь {\upshape ‘tomorrow- (attr.)’ $\leftarrow$} (j)utrě {\upshape ‘morning’} \ex {\upshape \ili{Old Bulgarian}}\\ vьnějь {\upshape ‘outside (attr.)’ $\leftarrow$} vьně {\upshape ‘(on the) outside’}\\ bezumajь {\upshape ‘ignorant’ $\leftarrow$} bez uma {\upshape ‘without mind’} \ex {\upshape \ili{Old Bulgarian}}\\ nabožijo̜jь {\upshape ‘pleasing to God (attr.)’ $\leftarrow$} na božijo̜ {\upshape ‘pleasing to God’} \end{xlist} %%% \ex {\upshape Attribution of noun phrases in genitive (attested only in Baltic)} \begin{xlist} \ex {\upshape \ili{Lithuanian}}\\ di\~evojis {\upshape ‘god-like (attr.)’ $\leftarrow$} di\~evo {\upshape \textsc{gen.sg} $\leftarrow$} di\~evas {\upshape \textsc{nom.sg} ‘God’} \ex {\upshape \ili{Lithuanian}}\\ pači\~u̜jis {\upshape ‘belonging to (attr.)’ $\leftarrow$} pači\~u̜ {\upshape \textsc{gen.pl} $\leftarrow$} pàts {\upshape \textsc{nom.pl} ‘self’} \end{xlist} \end{xlist} \end{exe} %%% Koch's examples provide the best arguments for the opposite assumption that attributive nominalizing constructions are the source of that marker. This is against his own suggestion that in Baltic\slash{}Slavic anti\hyp{}construct state agreement marking originates from nominal relative constructions, in other words: %%% \begin{itemize} \item \textbf{Scenario 2:} \textsc{dem $\Rightarrow$ nmlz $\Rightarrow$ attr} \end{itemize} %%% \is{species marking!definite|(} \il{Germanic languages|(} \subsubsection[Germanic]{Attributive nominalizers and the emergence of anti\hyp{}construct state agreement marking in Germanic} \label{germanic diachr} %%% As in the Baltic\slash{}Slavic languages, the emergence of attributive nominalizers in Germanic is functionally connected to the rise of definiteness marking. In Modern Baltic and some South Slavic languages, the occurrence of anti\hyp{}construct state agreement marking is restricted to (semantically) definite noun phrases. This functional devision between “true” \isi{head\hyp{}driven agreement} and anti\hyp{}construct state agreement marking was already characteristic of all Old Baltic\il{Old Baltic languages} and \ili{Old Slavic languages}. As in the \ili{Proto\hyp{}Baltic\slash{}Slavic} languages, a secondary inflectional paradigm of adjectives was innovated in \ili{Proto\hyp{}Germanic}. This so-called weak adjective declension has often been described as the first definite marking device in Germanic (e.g., by \citealt{heinrichs1954} and \citealt[170]{ringe2006}) because its use was restricted to (semantically) definite noun phrases. Semantic definiteness, however, was never marked obligatorily in any of the \ili{Old Germanic languages}. Even though demonstrative pronouns were sometimes used in semantically definite phrases, definite markers had not yet been grammaticalized in Old Germanic varieties. Examples from Old Germanic text sources show that the use of both demonstratives and “weak adjectives” in definite phrases was optional (cf.~\citealt{philippi1997}; \citealt{heinrichs1954}). \il{Baltic languages|)} \il{Slavic languages|)} Only the modern Germanic languages exhibit true definite markers and thus a grammaticalized feature \textsc{species}. But the so-called definite articles of modern Germanic languages originate from etymological sources which were different from the older anti\hyp{}construct state agreement marking suffixes. Following \citet[267–268]{riesler2006a}, the rise of the Germanic “weak” adjective declension is here explained as a result of attributive nominalization. %%% \begin{exe} \ex {\upshape “Strong” and “weak” agreement in Proto\hyp{}Germanic \citep[169]{ringe2006}} \begin{xlist} \ex {\upshape Head\hyp{}driven (“strong”) agreement}\\ \gll *k\textsuperscript{w}ik\textsuperscript{w}a-\\ quick:\textsc{m.sg.nom-}\\ %%% \ex {\upshape Anti\hyp{}construct state (“weak”) agreement}\\ \gll *k\textsuperscript{w}ik\textsuperscript{w}a\textbf{-n-}\\ quick:\textsc{m.sg.nom}\textbf{\textsc{-nmlz-}}\\ \glt ‘quick’ \end{xlist} \end{exe} %%% The \ili{Pre-Proto\hyp{}Germanic} formative marking “weak” agreement is sometimes described as an “individualizing” or “nominalizing” suffix of nominals (i.e., adjectives and, perhaps, nouns as well). These functions are reflected in (nick-) names, such as \ili{Ancient Greek} \textit{ágáthōn} ‘the Good’ ($\leftarrow$ \textit{ágáthós} ‘good’) or \ili{Latin} \textit{Catō} ‘the Shrewd’ ($\leftarrow$ \textit{catus} ‘shrewd’) which are also derived from nouns equipped with the cognate suffix \textit{*-n-} \citep[170]{ringe2006}.\footnote{Names such as Latin \textit{Marcus Catō, Ovidius Nasō} are interpreted as ‘Marcus the cunning’ and ‘Ovidius the nose’ \citep[6–7]{nocentini1996}.} Some scholars have reconstructed a pronominal stem extension \textit{*-en-/-on-} as the origin of the suffix (for example \citealt[52]{mikkola1950} and \citealt[67]{heinrichs1954}). Others express their doubt about the pronominal origin of this marker (for example \citealt[21 Footnote 6]{schmidt1959}). But even without a definitely reconstructed etymology of the formative, the construction clearly shows similarities with the attributive nominalization of adjectives in \ili{Proto\hyp{}Baltic\slash{}Slavic}. It thus seems relatively safe to follow Mikkola (\citeyear{mikkola1950}) and Heinrichs ({\citeyear{heinrichs1954}) in assuming that the weak adjective declension in Germanic goes back to a construction with an attributive nominalizer. \citet[170]{ringe2006} finds it “reasonable to hypothesize that the \textit{n-}stem suffix of the weak adjective paradigm was originally a definite article”. But this hypothesis must be rejected because the marker was never obligatory in definite contexts. Similar to Baltic\il{Baltic languages} and Slavic,\il{Slavic languages} it seems much more plausible to assume that the article was never a true definiteness marker. It can rather be assumed that the clue for understanding the origin of the “weak” adjective declension in Germanic is the nominalizing function of the \textit{article},\is{attributive article} which originally marked an (emphatically-contrasted) adjective as an appositional attribute.\is{contrastive focus} \is{species marking!definite|)} The rise of anti\hyp{}construct state agreement marking of attributive adjectives in Germanic thus followed a similar grammaticalization path as in Baltic\il{Baltic languages} and Slavic.\il{Slavic languages}\footnote{The zero-morpheme (equipped with the nominalizer Ø-\textsc{nmlz}) in (\ref{germanic gram1}) and following examples is only presented for a better illustration of the empty head position to which the (nominalized) adjective moves in the appositional noun phrase.} %%% \begin{exe} \ex {\upshape Grammaticalization of anti\hyp{}construct state agreement in Germanic} \label{germanic gram1} \begin{xlist} \ex \label{germanic1} {\upshape Stage 1} \begin{xlist} \ex {\upshape Agreement marking (default)}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}big{\rm -\textsc{agr}} \textsubscript{\upshape N}house{\cb} %%% \ex {\upshape Attributive apposition (emphatic)}\\ \label{germanic art1} {\ob}\textsubscript{\upshape NP} {\ob}\textsubscript{\upshape NP'} \textsubscript{\upshape A}big \textsubscript{\upshape HEAD}{\rm Ø-\textsc{nmlz}}{\cb} \textsubscript{\upshape N}house{\upshape ]]} \end{xlist} %%% \ex \label{germanic2} {\upshape Stage 2} \begin{xlist} \ex {\upshape Agreement marking (default)}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}big{\rm -\textsc{agr}} \textsubscript{\upshape N}house{\cb} %%% \ex {\upshape Agreement marking (emphatic)}\\ \label{germanic ACAgr} {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}big{\rm -\textsc{agr:contr}} \textsubscript{\upshape N}house{\cb} \end{xlist} %%% \ex \label{germanic3} {\upshape Stage 3} \begin{xlist} \ex {\upshape Agreement marking (default)}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}big{\rm -\textsc{agr:attr}} \textsubscript{\upshape N}house{\cb} \end{xlist} \end{xlist} \end{exe} %%% During Stage 1 (\ref{germanic1}), the attributive nominalizer (i.e., the pronominal stem extension \textit{*-en-/-on-}) competed with the default adjective attribution marking device (i.e., the inherited Indo-European \isi{head\hyp{}driven agreement}) but was restricted only to emphatic attributive appositional constructions. This stage can be dated back to \ili{Proto\hyp{}Germanic} at the latest. In all \ili{Old Germanic languages}, the original attributive appositional construction is reanalyzed\is{re-analysis} as a true noun phrase in which the former attributive nominalizer marks an adjective in contrastive focus. The secondary attribution marking device still competed with the default adjective attribution marking device (\ref{germanic2}, i.e., \isi{head\hyp{}driven agreement} during Stage 2). The competition between the two different adjective attribution marking devices was dissolved during Stage 3 (\ref{germanic3}). This stage is reflected by the modern \ili{West Germanic languages} where only one type of adjective attribution marking occurs. Due to the fact that agreement inflection of adjectives in modern \ili{West Germanic languages} (except in \ili{English}) only marks attributive but not predicative adjectives,\is{predicative marking} this adjective attribution marking device has been characterized as anti\hyp{}construct state agreement (see \S\ref{w-germanic synchr}). \is{species marking!definite|(} \subsection{Definite noun phrases in Germanic} %%% In the previous section, it was shown that the grammaticalization of the feature \textsc{species} (definiteness) in Germanic is a relatively recent phenomenon which is not directly connected to the rise of attributive nominalization and anti\hyp{}construct state agreement marking (so-called “weak” or “definite” agreement). Even though anti\hyp{}construct state agreement usually occurred in semantically definite noun phrases, true definite markers evolved much later. The etymological source of the definite markers were local-deictic (demonstrative) pronouns: \ili{Proto\hyp{}Germanic} \textit{*sa, *sō, *þat}, in North Germanic additionally also \textit{en, enn, et} \citep[15]{heinrichs1954}. Interestingly, the evolving definite markers from the first set of \ili{Proto\hyp{}Germanic} demonstratives were also first used as attribution markers of adjectives \citep{gamillscheg1937, nocentini1996}. Later, the use of the articles was extended from appositional (nominalized) adjectives to whole noun phrases \citep[63]{philippi1997}. If the grammaticalization path illustrated in (\ref{germanic gram1}) is extended with one more stage, the evolution of definiteness marking in Germanic can be included as well. Note that the additional developments in the grammaticalization path (\ref{germanic gram2}) are also partly connected to adjective attribution. \newpage %%% \begin{exe} \il{West Germanic languages} \ex {\upshape Grammaticalization of definiteness marking in West Germanic} \label{germanic gram2} \begin{xlist} \ex {\upshape Stage 3} \begin{xlist} \ex {\upshape Agreement marking (default)}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}big{\rm -\textsc{agr:attr}} \textsubscript{\upshape N}house{\cb} %%% \ex {\upshape Attributive apposition (emphatic)}\\ \label{germanic art2} {\ob}\textsubscript{\upshape NP} {\ob}\textsubscript{\upshape NP'} \textsubscript{\upshape ART}the \textsubscript{\upshape A}big{\rm -\textsc{agr:attr}} \textsubscript{\upshape HEAD}{\rm Ø}{\cb} \textsubscript{\upshape N}house{\cb} \end{xlist} \ex {\upshape Stage 4} \begin{xlist} \ex {\upshape Definiteness marking}\\ \label{germanic def} {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape DEF}the \textsubscript{\upshape A}big{\rm -\textsc{agr:attr}} \textsubscript{\upshape N}house{\cb} \end{xlist} \end{xlist} \end{exe} %%% Note that an attributive apposition construction for marking emphasis occurs twice in the illustrated grammaticalization path (\ref{germanic gram2}). In Stage 1 (\ref{germanic art1}), the attributive nominalizer is the pronominal stem extension \textit{*-en-/-on-} which becomes the anti-construct state agreement marker in the following stage (\ref{germanic ACAgr}). The second attributive nominalizer in Stage 3 (\ref{germanic art2}) is the demonstrative pronoun, which becomes the definite marker in the following stage (\ref{germanic def}). These two attributive nominalizers have different etymological sources and attach to different positions inside the noun phrase but they are functional equivalents. Stage 4 in example \REF{germanic gram2} did not fully affect North Germanic.\il{North Germanic languages} Instead, the \ili{Old North Germanic languages} (Old East\il{Old East Norse} and \ili{Old West Norse}) grammaticalized definite markers from the demonstratives \textit{en, enn, et} \citep[15]{heinrichs1954}. These markers are the complete morpho-syntactic opposites of West Germanic:\il{West Germanic languages} Unlike the West Germanic preposed and free form definite marker, all modern North Germanic\il{North Germanic languages} standard languages exhibit a postposed definite noun inflection. The different morpho-syntactic realization of the general Germanic tendency towards grammaticalization of definiteness is best explained as contact-induced change due to Saamic\il{Saamic languages} influence in North Germanic \citep{kusmenko2008}. %%% \begin{exe} \ex {\upshape Grammaticalization of definiteness marking in Germanic} \label{germanic gram3} \begin{xlist} \ex {\upshape Stage 4} \begin{xlist} \ex {\upshape Definiteness marking (West Germanic)}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape DEF}the \textsubscript{\upshape A}big{\rm -\textsc{agr:attr}} \textsubscript{\upshape N}house{\cb} %%% \ex {\upshape Definiteness marking (North Germanic)}\\ \label{ngermanic def} {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape ATTR:AGR}the\textsubscript{\upshape agr:attr} \textsubscript{\upshape A}big{\rm -\textsc{agr:attr}} \textsubscript{\upshape N}house{\rm -\textsc{def}}{\cb} \end{xlist} \end{xlist} \end{exe} %%% Note that in North Germanic Stage 4 (\ref{ngermanic def}) the former preposed nominalizer (article) did not grammaticalize into a true definite marker like in West Germanic but into an anti\hyp{}construct state agreement marker. The noun phrase structure is thus different from Stage 3 (\ref{germanic art2}) because the attributive apposition of a the nominalized headless adjective\is{headless noun phrase} is lost and the semantic head of the overall noun phrase is syntactically reunited with its adjectival modifier. Synchronic data from different North Germanic\il{North Germanic languages} varieties reflect intermediate stages in the evolution of definite noun phrase structure. This cross-linguistic variation is most likely the result of competing grammaticalization of a preposed article and a postposed definite inflection \citep{dahl2003}. Like all modern \ili{West Germanic languages},\footnote{In English, the noun phrase structure is similar in theory, with the exception of adjectives in headless noun phrases which are obligatorily nominalized: \textit{the good \textbf{one}}; see also \S\ref{w-germanic synchr}.} the Western Jutlandic\il{Danish!W-Jutlandic} dialect of Danish exhibits phrasal definite marking by means of a phonologically free and preposed definite article. %%% \begin{exe} \il{Danish!W-Jutlandic} \ex {\upshape W-Jutlandic}\footnote{The examples are constructed according to \citet{lund1932}, cf.~also \citet[121–122]{delsing1993} and \citet{dahl2003}.} \begin{xlist} \ex de korn {\upshape [\textsc{def} corn]} \ex de god (et) {\upshape [\textsc{def} good:\textsc{agr} (\textsc{nmlz:agr})]} \ex de god korn {\upshape [\textsc{def} good:\textsc{agr} corn]} \end{xlist} \end{exe} %%% In several of the northernmost North Germanic\il{North Germanic languages} varieties, definiteness is also marked phrasally but by means of a phonologically bound and postposed formative. Consequently, the phrasal definite marker attaches as suffix to definite nouns and definite headless adjectives\is{headless noun phrase} alike. Note also that adjectives are incorporated into (or compounded with) the head noun. %%% \begin{exe} \il{Swedish!Västerbotten} \ex {\upshape Västerbotten Swedish}\footnote{The examples are constructed according to \citet{astrom1893}, cf.~also \citet[122–123]{delsing1993} and \citet{dahl2003}.} \begin{xlist} \ex korn-e {\upshape [corn-\textsc{def}]} \ex god-e {\upshape [good-\textsc{def}]} \ex god-korn-e {\upshape [good-corn-\textsc{def}]} \end{xlist} \end{exe} %%% \il{Swedish|(} In the North Germanic languages \ili{Norwegian},\footnote{New- and Dano-Norwegian\il{Norwegian!New-Norwegian}\il{Norwegian!Dano-Norwegian}} Swedish, and \ili{Faroese}, the definite marker is an inflectional suffix as in the Västerbotten dialect of Swedish,\il{Swedish!Västerbotten} i.e., phonologically bound and postposed. The formative is, however, exclusively a noun marker and does not show up on adjectives in definite \isi{headless noun phrase}s. The latter are not overtly marked as definite but show circum-positioned definite agreement marking by means of a preposed attributive article and definite agreement inflection. %%% \begin{exe} \il{Swedish} \ex {\upshape Swedish (personal knowledge)} \begin{xlist} \ex[]{ korn-et {\upshape [corn-\textsc{def}]} } \ex[]{ det god-a korn-et {\upshape [\textsc{nmlz:agr} good-\textsc{agr} corn-\textsc{def}]} } \ex[]{ det god-a {\upshape [\textsc{nmlz:agr} good-\textsc{agr}]} } \ex[*]{ det korn-et {\upshape [\textsc{def} corn-\textsc{def}]} } \end{xlist} \end{exe} %%% \il{Danish|(} \il{Icelandic|(} In Danish (\ref{danish examples}) and (colloquial) Icelandic (\ref{icelandic examples}), the definite marker has two allomorphs: an inflectional noun suffix similar to Swedish (i.e., a phonologically bound and postposed) and a definite article similar to the \ili{West Germanic languages} (i.e., phonologically free and preposed). Interestingly, the allomorphy of the definite marker in Danish and Icelandic is triggered by the part-of-speech membership of the host: whereas the bound allomorph selects for nouns, the free form selects for adjectives. \il{Swedish|)} %%% \begin{exe} \ex {\upshape Danish (personal knowledge)} \label{danish examples} \begin{xlist} \ex[]{ korn-et {\upshape [corn-\textsc{def}]} } \ex[]{ det god-e korn {\upshape [\textsc{def} good-\textsc{agr} corn]} } \ex[]{ det god-e {\upshape [\textsc{def} good-\textsc{agr}]} } \ex[*]{ det god-e korn-et {\upshape [\textsc{def} good-\textsc{agr} corn-\textsc{def}]} } \end{xlist} \end{exe} %%% \begin{table}[b] \begin{tabular}{lccc} \lsptoprule &\textsc{utr} &\textsc{n} &\textsc{pl}\\ \midrule \textsc{def} &-en [den] &-et [det] &-{Ø} [de]\\ \lspbottomrule \end{tabular} \caption[Paradigm of \textsc{def} in Danish]{Paradigm of the definite marker in Danish (personal knowledge). Note that the choice whether the suffix or the free from constitute the base morpheme or the allomorph seems arbitrary.} \label{danish defallomorph} \end{table} \il{Danish|)} %%% \begin{exe} \ex {\upshape Icelandic (personal knowledge)} \label{icelandic examples} \begin{xlist} \ex[]{ korn-ið {\upshape [corn-\textsc{def}]} } \ex[]{ hið goð-a {\upshape [\textsc{def} good-\textsc{agr}]} } \ex[]{ hið goð-a korn {\upshape [\textsc{def} good-\textsc{agr} corn]} } \ex[*]{ hið goð-a korn-ið {\upshape [\textsc{def} good-\textsc{agr} corn-\textsc{def}]} } \end{xlist} \end{exe} \il{Icelandic|)} \il{North Germanic languages|(} \is{buffer zone|(} \subsection{“Double definiteness” and a “buffer zone” in North Germanic} \label{buffer} %%% The geographic distribution of different morpho-syntactic types of definiteness marking across North Germanic reveals interesting areal patterns. The occurrence of adjective incorporation coincides with the area of the missing preposed article. Both features are characteristic of the northeastern periphery of North Germanic (\citealt{delsing1996b}, cf.~also \citealt{riesler2001a,riesler2002a}). The structural connection between adjective incorporation and the missing preposed article is obvious: the construction with the compounded (incorporated) adjective in definite noun phrases replaces the corresponding construction with the preposed article in those dialects where a preposed article has not (yet) been developed from the former demonstrative. The northeastern North Germanic data thus reflects an early Stage 3 in the illustrated grammaticalization path (\ref{germanic3}). The northeastern North Germanic dialect area constitutes the innovation center of the grammaticalization of a (suffixed) inflectional category \textsc{species} (definiteness). The southwestern North Germanic dialects, located geographically at the very opposite periphery, exhibit a structurally reversed picture of northeastern North Germanic which is in its direction of evolution almost identical to the situation in West Germanic.\il{West Germanic languages} Dahl describes the phrasal definite markers in southwestern and northeastern North Germanic dialects as the result of structurally and geographically opposed processes of grammatical changes. %%% \begin{quote} [T]he variation we can see in the attributive constructions is the result of the competition between them about the same territory. \citep[147]{dahl2003} \end{quote} %%% The “competition” between northeastern and southwestern grammaticalization tendencies in Germanic is not restricted to definite marking. Several grammatical categories which developed as the result of common Germanic (or even Indo-European) tendencies, have grammaticalized into non-fusional (analytic) constructions in West Germanic\il{West Germanic languages} but into concatenate (synthetic) constructions in North Germanic. Language contact with neighboring \ili{Uralic languages} would offer the most plausible explanation for the structurally differentiated developments inside the Germanic branch. Consequently, Kusmenko (\citeyear{kusmenko2008}) proposed a model for explaining the morphological fusion of definiteness and other North Germanic innovative categories as the result of interference features during the language shift of the assimilated Saami of Mediaeval Scandinavia. A direct connection between language contact and the rise of adjective incorporation and the missing preposed adjective article in northeastern North Germanic varieties was also suggested by \citet{riesler2001a,riesler2002a}. But even if this idea cannot be proven correct, the historical connection between missing preposed adjective articles, adjective incorporation and the morpho-syntactic type of definiteness marking (i.e., morphologically fused and postposed) in the northeastern North Germanic dialect area is obvious. Saamic\il{Saamic languages} influence (causing the morphological fusion of postposed definiteness marking) would thus at least be an indirect trigger of these areal grammaticalization phenomena in North Germanic which can be described as a “buffer zone” \citep{stilo2005}.\footnote{Stilo created the term for a similar language area between competing grammaticalization tendencies due to contact induced-changes in the Southern Caucasus.\is{Caucasus} The parallel between Stilo's “buffer zone” and Dahl's (\citeyear{dahl2003}) “competing” morpho-syntactic types in North Germanic languages was first mentioned to the author by Tania Kuteva (p.c.).\ia{Kuteva, Tania} But neither Dahl nor Kuteva drew contact linguistic implications in the North Germanic case. The idea about the North Germanic “buffer zone” as an indirect result of contact-induced changes was first mentioned by \citet{riesler2006a}.} %summerizing sentence on the cyclic nature of grammaticalization in Germanic (and Baltic)? \begin{table}[t] \newcommand{\noarrow}[1]{\multicolumn{1}{p{1.8cm}}{#1}} \resizebox{\textwidth}{!}{ \begin{tabular}{c@{\,$\Rightarrow$\,}c@{\,$\Rightarrow$\,}c@{\,$\Rightarrow$\,}c@{\,$\Rightarrow$\,}cl} \lsptoprule \noarrow{}&\noarrow{Proto-\newline Germanic}&\noarrow{Old \newline Germanic}&\noarrow{Modern \newline Germanic}\\ \midrule \textsc{dem1}&\textsc{art1}&\textsc{attr}&\textsc{agr}& Ø &English, (W-Jutlandic)\\ \textsc{dem1}&\textsc{art1}&\textsc{attr}&\noarrow{\hspace*{3mm}\textsc{agr}}&\noarrow{}&W+N-Germanic\\ \noarrow{}\\ \noarrow{}&\noarrow{}&\textsc{dem2}&\textsc{art2}&\textsc{def1}&W(+N)-Germanic\\ \noarrow{}&\noarrow{}&\textsc{dem2}&\noarrow{\hspace*{2.3mm}\textsc{art2}}& \noarrow{} &N-Germanic\\ \noarrow{}&\noarrow{}&\noarrow{\hspace*{2.2mm}\textsc{dem2}}&\noarrow{} & \noarrow{} &Västerbotten Swedish\\ \noarrow{}\\ \noarrow{}&\noarrow{}&\textsc{dem3}&\noarrow{\hspace*{2mm}\textsc{def2}}&\noarrow{} &N-Germanic\\ \lspbottomrule \end{tabular} } \caption[Article grammaticalization cycle in Germanic]{Article grammaticalization cycle in Germanic languages (adapted from \citealt[272]{riesler2006a}).} \end{table} \is{species marking!definite|)} \il{North Germanic languages|)} \il{Germanic languages|)} \il{Indo-European languages|)} \is{buffer zone|)} \subsection[Attributive nominalization and anti\hyp{}construct state]{Attributive nominalization and the grammaticalization of anti\hyp{}construct state (agreement) marking} %%% The previous sections described how anti\hyp{}construct state agreement marking arose in the Baltic,\il{Baltic languages} Slavic\il{Slavic languages} and Germanic\il{Germanic languages} branches of Indo-European. Structurally similar developments were also described for \ili{Udmurt} from the Permic branch of Uralic, in \ili{Chuvash} and other so-called \ili{Uralo-Altaic languages} in \S\ref{uralic-turkic diachr}. The emergence of attributive nominalizers such as secondary attribution markers seem to reflect a general tendency in several branches of the \il{Indo-European languages}Indo-European, \il{Uralic languages}Uralic and \il{Turkic languages}Turkic language families. The etymological source of the attributive nominalizer in all of these languages is either a local deictic determiner or the 3\textsuperscript{rd} person possessive marker with “determinative” functions. \il{Lezgic languages|(} Synchronic data from several languages of the Lezgic (Daghestanian) branch of Nakh-Daghestanian (see \S\ref{lezgian synchr}) seem to reflect a similar grammaticalization path from deictics to attributive nominalizers. Most Lezgic languages sampled for the present study have \isi{juxtaposition} as the default adjective attribution marking device. Attributive nominalization also occurs in most languages of this branch but is restricted to \isi{headless noun phrase}s. The attributive nominalizer is a stem augment \textit{-tV- / -dV-} which could be connected historically to the deictic pronouns occurring with similar shapes in these languages. In \ili{Budukh}, the cognate suffix \textit{-ti} is not used as an attributive nominalizer but to emphasize “a high degree of quality”, cf.~\textit{godak} ‘short’ : \textit{godak-ti} ‘very short’ \citep[267]{alekseev1994b}. In \ili{Rutul}, the cognate marker \textit{-d} is used as an anti\hyp{}construct state marker on attributive adjectives as the default \citep[224]{alekseev1994a}. A different but nevertheless related function of the cognate marker is attested in \ili{Archi} where the suffix \textit{-t̄u} derives adjectives from nouns, adverbs and postpositions \citep[318]{kibrik1994b}. \ia{Himmelmann, Nikolaus|(} The data from Lezgic deserves further investigation, but it suggests a pattern where the dependent\hyp{}marking attributive state evolves from attributive nominalization. It is also obvious that the attributive nominalizers in Uralic\il{Uralic languages} and Turkic\il{Turkic languages} have evolved along a similar grammaticalization path as the one described for several Indo-European\il{Indo-European languages} (and other) languages by \citet{himmelmann1997}. However, important differences between Himmelman's “linking articles”\is{attributive article} and the attributive nominalizers described here are (1) the origin of the Uralic and Turkic nominalizers from person-deictic rather than from local-deictic markers and (2) the inflectional use of the markers in Uralic and Turkic as compared to their original adnominal use in Indo-European. \il{Lezgic languages|)} \is{species marking!definite|(} The data from Uralic\il{Uralic languages} and Turkic\il{Turkic languages} is especially interesting, since it contradicts Himmelmann's (\citeyear[220–221]{himmelmann1997}) assumption that a functional convergence between attributive nominalizers with a person-deictic or a local-deictic etymological source is unlikely to occur. Of central importance to Himmelmann's analyses is the “anamnestic” use of the deictic markers from which the articles are grammaticalized. According to Himmelmann, the use of “D[eictic] elements” in order to refer to properties the speaker believes to be well-known for her/his interlocutor is the most relevant precondition for their further grammaticalization into articles and definite markers. Whereas the anamnestic use is inherent in (local-deictic) demonstratives, the same is not true for (person-deictic) possessive markers. The further grammaticalization of demonstratives into functional determinative elements (like articles and definiteness markers in several \ili{Indo-European languages}) is accompanied by a functional extension of an original “anamnestic” to an associative-anaphoric use of the markers. This is in contrast to the further grammaticalization of possessive markers into functional determinative elements (like attributive articles and quasi-definiteness markers in certain \ili{Uralic languages}) which is accompanied by a functional extension from an original associative-anaphoric to “anamnestic” use. %%% \begin{quote} D\hyp{}Elemente breiten sich von pragmatisch\hyp{}definiten Kontexten auf semantisch\hyp{}definite aus, während Possessivpronomina sich umgekehrt von einem semantisch\hyp{}definiten Kontext auf einen bzw. mehrere pragmatisch\hyp{}definite Kontexte ausdehnen.\footnote{D\hyp{}Elements extend from pragmatically definite contexts to semantically definite contexts, whereas possessive pronouns extend in the opposite direction, from one semantically definite context to one or more pragmatically definite contexts.} \citep[221]{himmelmann1997} \end{quote} %%% Himmelmann's thesis regarding the opposite functional extension of person-deictics might still be valid and compatible with the Uralic and Turkic data. In those Uralic\il{Uralic languages} and \ili{Turkic languages} with attested attributive nominalization, the definite function of the possessive marker is also always present. It can therefore be assumed that the definite (or quasi-definite) use of the marker obligatorily occurs as an intermediate step during the grammaticalization of possessive markers to attributive nominalizers. \ia{Himmelmann, Nikolaus|)} \is{species marking!definite|)} %%% \il{Uralic languages} \il{Turkic languages} \begin{itemize} \item \textbf{Person-deictic source} (Uralic, Turkic)\\ \textsc{poss $\Rightarrow$ def $\Rightarrow$ nmlz} \end{itemize} %%% In the Indo-European languages with attributive articles such an intermediate step is probably not necessary. \il{Indo-European languages} \begin{itemize} \item \textbf{Local-deictic source} (Indo-European)\\ \textsc{dem ($\Rightarrow$ def) $\Rightarrow$ nmlz} \end{itemize} %%% In fact, in the West Germanic\il{West Germanic languages} and \ili{South Slavic languages}, definite markers evolve from attributive nominalizers but not vice versa.\is{species marking!definite} %%% \il{West Germanic languages} \il{South Slavic languages} \begin{itemize} \item \textbf{Local-deictic source (West Germanic, South Slavic)}\\ \textsc{dem $\Rightarrow$ nmlz ($\Rightarrow$ def)} \end{itemize} %%% This observation will be taken up again. If the tentative observation on the languages with “grammaticalized person-deictic elements” (i.e., possessive markers as attributive nominalizers) proves right it would imply the following implicational universal: %%% \begin{exe} \label{universal} \ex {\upshape \textbf{Implicational universal}}\\ \textit{Possessive markers develop into attributive nominalizers only in languages in which similar possessive markers are already used as markers of (quasi-) definiteness.} \end{exe} %%% Whereas the etymology and the evolution of attribution markers in Indo\hyp{}European has been described (more or less systematically) by different authors, much less has been written about the emergence of attribution markers in different Uralic and Turkic languages. The emergence of anti\hyp{}construct state marking in Saamic, which has not been described at all, appears to be especially interesting in this respect. \is{attributive nominalization|)} \il{Saamic languages|(} \section[Anti\hyp{}construct state in Saamic]{The emergence of anti\hyp{}construct state marking in Saamic} \label{saamic diachr} %%% In \S\ref{udmurt diachr}, it was shown that the contrastive focus marker in \ili{Udmurt} most likely evolved from an attributive article. \citet{riesler2006b} suggested the idea that a similar construction was the ultimate source of anti\hyp{}construct state marking in the languages of the relatively closely related Saamic branch of Uralic. Since this theory about the rise of attribution marking in Saamic is based on a controversial idea, it calls for a relatively detailed discussion which will be presented in the following sections. In \S\ref{saami synchr}, it was shown that adjectives in all Saamic languages are normally marked morpho-syntactically by means of differentiated attributive and predicative state markers. Even though the system of attributive and \isi{predicative marking} is highly irregular in the Saamic languages, it can be shown that the attributive forms of adjectives are prototypically marked with a suffix (\ili{Northern Saami}) \textit{-s}. This suffix constitutes a prototypical example of an anti\hyp{}construct state marker, i.e., a dependent\hyp{}marking attributive morpheme. The origin of anti\hyp{}construct state marking in Saamic is controversial. The suffix \textit{-s} is definitely not inherited from \ili{Proto\hyp{}Uralic}. It is probably not borrowed from any of the known current or historical contact languages of Saamic either. Considering this as well as the fact that Saamic is a rare instance among the Northern-Eurasian languages in exhibiting anti\hyp{}construct state marking on adjectives, relatively little attention has been paid to explaining its origin. \subsection{State of research} %%% The different proposed theories which explain the origin of the anti\hyp{}construct state marker on adjectives in Saamic can be subsumed as follows: %%% \begin{enumerate} \item Grammatical borrowing from Indo-European \item Functional extension of an adjective-derivational marker \item Grammaticalization from an attributive nominalizer\is{attributive nominalization} \end{enumerate} %%% The idea about a grammaticalization from an attributive nominalizer presented by \citet{nielsen1933} and \citet{atanyi1943} is the only contribution to the subject spelled out in certain detail. Interestingly enough, the idea has been rejected as “hardly convincing” (my translation) in a one-sentence-statement in Korhonen's (\citeyear{korhonen-m1981}) historical grammar of Saami. Korhonen's judgement that the origin of the attributive suffix in Saamic is still unclear \citep[246]{korhonen-m1981} seems to reflect the state of research up to today. Neither of the three hypotheses mentioned above has been discussed seriously in Saami or Uralic historical linguistics.\footnote{An exception is a short article by \citet{sarv-m2001} who presents the different ideas but does not come to conclusive results.} All proposed hypothesis will be evaluated in the following. \subsubsection{Grammatical borrowing from Indo-European} %%% Trond Trosterud\ia{Trosterud Trond} (p.c.) has suggested that the attributive suffix in Saamic origins from an ending typical of \ili{Proto\hyp{}Germanic} loan adjectives in Saami. The Saamic suffix \textit{-s} would then reflex the (pre-rhotacism) form of the \ili{Proto\hyp{}Germanic} case suffix \textit{-R} for masculine nominative singular which was adopted into \ili{Proto\hyp{}Saamic} together with loan adjectives. According to this hypothesis (which is not discussed in any publication so far) the adjective ending \mbox{\textit{-s}} occurred originally on Germanic loan adjectives but was later generalized and used with inherited adjectives as well. In fact, a considerable number of Germanic\il{Germanic languages} loan adjectives with the corresponding ending \textit{-s} <~\ili{Proto\hyp{}North Germanic} \textit{-R} \textsc{m.nom.sg} are attested in Saamic, for instance: %%% \begin{itemize} \item \ili{Northern Saami} \textit{smáves} ‘small’ $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*smāv̀e̮} <~\ili{Proto\hyp{}North Germanic}; cf.~\ili{Old Norse} \textit{smalr} \textsc{m} (or a more recent North Germanic\il{North Germanic languages} borrowing; cf.~\ili{Swedish} \textit{sm\aa}; \citealt[263]{sammallahti1998b}) \item \ili{Lule Saami} \textit{riukas} ‘far-reaching’ <~\ili{Proto\hyp{}North Germanic}, cf.~\ili{Old Norse} \textit{drùgr}, \ili{Norwegian} \textit{drjug} \citep[267]{qvigstad1893} \item \ili{Lule Saami} \textit{lines} ‘soft, yielding, mild’ <~\ili{Proto\hyp{}North Germanic}, cf.~\ili{Old Norse} \textit{linr}, \ili{Norwegian} \textit{lin} \citep[218]{qvigstad1893} \item \ili{Northern Saami} \textit{luov\.{o}s $\sim$ luovus} ‘loose, not tied’ $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*luovōs $\sim$ *luove̮s} <~\ili{Proto\hyp{}North Germanic} \textit{*lauss} \textsc{m} (where the suffix \textit{-R} is assimilated into /s/) \citep[264]{sammallahti1998b} \item \ili{Northern Saami} \textit{suohtas} ‘fun, nice’ $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*suohte̮s} <~\ili{Proto\hyp{}Germanic} \textit{*swōtu-} \citep[264]{sammallahti1998b}, cf.~\ili{Old Norse} \textit{*søtr} \textsc{m} \item \ili{Northern Saami} \textit{viiddis} ‘wide, extensive’ $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*vij{\dh}ēs} <~\ili{Proto\hyp{}North Germanic} \citep[148–149]{lehtiranta1989}, cf.~\ili{Old Norse} \textit{v\'i{\dh}r} \textsc{m}. \end{itemize} %%% The sound change of \ili{Proto\hyp{}Germanic} \textit{*-z} $\Rightarrow$ \ili{Proto\hyp{}North Germanic} \textit{-R} ($\Rightarrow$ \ili{Common North Germanic} \textit{-r}) took place around 500 AD. The hypothesis of the loan origin of the Saamic attributive suffix presupposes that the corresponding suffix in Germanic\il{Germanic languages} had a sound value [-z] (or ?[-s]). The exact sound value of \textit{-R}, however, is not at all certain. What is commonly accepted is that the sound was phonologically distinguished from /r/ \citep{skold1954}. From the point of view of its etymology, the adjective ending \textit{-s} is identical to the ending \textit{-s} of some borrowed \ili{Proto\hyp{}Germanic} nouns, such as \ili{Proto\hyp{}Saamic} \textit{*vālās}, cf.~\ili{Northern Saami} \textit{fàlis} ‘whale’ <~\ili{Proto\hyp{}North Germanic}, cf.~\ili{Old Norse} \textit{hvalr}, cf.~\ili{Norwegian} \textit{hval} (\citealt[144]{qvigstad1893}; \citealt[144–145]{lehtiranta1989}) or \ili{Proto\hyp{}Saamic} \textit{*kāllēs}, cf.~\ili{Northern Saami} \textit{gállis} ‘old man’ <~\ili{Proto\hyp{}Germanic} \textit{*karilaz} \textsc{m} \citep[44–45]{lehtiranta1989}. The ending \textit{-s} in disyllabic nominals is thus an indicator that the word in question might belong to the layer of \ili{Proto\hyp{}North Germanic} borrowings in Saamic. \is{predicative marking|(} In many instances of Germanic loan adjectives the ending \textit{-s}, however, marks only the predicative and not the attributive form, consider (from the list above): %%% \begin{itemize} \item \ili{Northern Saami} \textit{smávva} [small.\textsc{attr}] $\leftarrow$ \textit{smáves} ‘small’ \item \ili{Lule Saami} \textit{riuka} [far-reaching.\textsc{attr}] $\leftarrow$ \textit{riukas} ‘far-reaching’ \item \ili{Lule Saami} \textit{littna} [soft.\textsc{attr}] $\leftarrow$ \textit{lines} ‘soft’ \end{itemize} %%% Other loan adjectives have identical forms with the ending \textit{-s} in both predicative and attributive function: %%% \begin{itemize} \item \ili{Northern Saami} \textit{luov\.{o}s $\sim$ luovus} ‘loose' \item \ili{Northern Saami} \textit{suohtas} ‘fun, nice' \item \ili{Northern Saami} \textit{viiddis} ‘wide, extensive' \end {itemize} %%% It is unclear whether the Germanic\il{Germanic languages} loan adjectives ending in \textit{-s} regularly occurred in both attributive and predicative positions already in \ili{Proto\hyp{}Saamic}, or the ending \textit{-s} expanded from predicative to attributive forms, or vice versa. The relatively regular occurrence of the ending \textit{-s} in the predicative forms suggests that the corresponding Germanic\il{Germanic languages} loan adjectives also ending in \textit{-s} were originally used to denote predicates rather than attributes. This seems reasonable from the point of view of the morpho-semantics of the borrowed Germanic\il{Germanic languages} adjectives as well. The ending \textit{-R} ($\Leftarrow$ \textit{*-z}) marks masculine nominals only in the so-called strong declension and thus more likely occurred on predicative adjectives which normally denote temporary properties. Attributive adjectives in Germanic,\il{Germanic languages} by contrast, could be marked either by means of \isi{head\hyp{}driven agreement} (“strong declension”) or anti\hyp{}construct state agreement (“weak declension”) depending on the semantic or referential status of the attribute. An adjective denoting a permanent property was normally marked with the anti\hyp{}construct state agreement suffix (see \S\ref{germanic diachr}). Consequently, the Saamic ending \textit{-s} could have been borrowed exclusively from “strong” adjectives in masculine nominative singular, the only form which had the ending \textit{-R} ($\Leftarrow$ \textit{*-z}) in \ili{Proto\hyp{}North Germanic}. It is thus doubtful that just the borrowed forms with \textit{-s} have been generalized as attributive forms by bilingual speakers in the assumed Saamic-Germanic language contact situation.\footnote{There is no doubt that language contact between speakers of Proto\hyp{}Saamic and Proto\hyp{}North Germanic took place; cf.~\citealt{kusmenko2008}. It is, however, rather irrelevant to the case described here which contact scenario has to be assumed: borrowing proper or shift-induced interference in the Saamic L2 of original Germanic speakers.} It should thus be assumed that the Germanic loan etymology of certain adjectives in Saamic does not provide a clue for the origin of the attributive suffix. Another problem in the hypothesis of the Germanic origin of the Saamic adjective ending \textit{-s} might be the class of inherited Saamic adjectives which also have the ending \textit{-s} when used predicatively. Consider the following examples: %%% \begin{itemize} \item \ili{Northern Saami} \textit{báhkas} ‘hot’ $\leftarrow$ \textit{báhkka} [hot.\textsc{attr}] $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*pāh\-ke̮s} $\Leftarrow$ \ili{Pre-Proto\hyp{}Saamic} \textit{*pakka-} 'hot; cold’; cf.~Finnish \textit{pakkanen} ‘frost’ \citep[230]{sammallahti1998b} \item \ili{Northern Saami} \textit{garas} ‘hard’ $\leftarrow$ \textit{garra} [hard.\textsc{attr}] $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*ke̮\`re̮-} $\Leftarrow$ \ili{Pre-Proto\hyp{}Saamic} \textit{*kiri-}; cf.~Finnish \textit{kireä} ‘tight, tense’ \citep[242]{sammallahti1998b} \item \ili{Northern Saami} \textit{o{\dj}as} ‘new’ $\leftarrow$ \textit{o{\dj}{\dj}a} [new.\textsc{attr}] $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*o\`{\dh}e̮-}\\ \citep[258]{sammallahti1998b}. \end{itemize} %%% Since the most typical \ili{Proto\hyp{}Saamic} root can be reconstructed as an open disyllabic,\footnote{Cf.~the list of reconstructed Proto\hyp{}Saamic lexemes in \citet{lehtiranta1989}.} the ending \textit{-s} of these predicative adjectives could not have belonged to the root originally. The ending-less attributive forms in the examples above would then reflect the original adjective roots, characterized as disyllabics with an open second syllable. According to the \ili{Proto\hyp{}Saamic} morpho-phonological rules, the stem consonant center exhibits the strong grade before an open second syllable, unlike the predicative forms which have a closed second syllable ending in \textit{-s} and show the weak grade of the consonant center. The same morpho-phonological rule applies to loan adjectives with ending-less attributive forms (like ‘small’ in \ili{Northern Saami}: \textit{smávva} [small:\textsc{attr}] $\leftarrow$ \textit{smáves}). If one adopts the idea of \textit{-s} originally being a Germanic\il{Germanic languages} case suffix, the attributive forms of the loan adjectives in Saamic can only be derived from the strong-declension forms of Germanic\il{Germanic languages} predicative adjectives and not from attributive adjectives. In the case of the inherited Saamic adjectives, however, it is usually assumed that the predicative ending \textit{-s} is derivational (see also the following paragraph). This assumption presupposes the ending-less (attributive) adjective being the base form from which the predicative form is derived by means of the derivational ending \textit{-s}. %But this would likewise fit the morpho-phonological rules with weak consonant stem grades in the attributive forms. \is{predicative marking|)} \subsubsection{Functional extension of an adjective-derivational marker} According to \citet[96]{bergsland1946}, the origin of the attributive suffix \textit{-s} in Saamic is identical with that of the synchronically homophonous adjective derivational suffix \textit{-s} originating from a lative case marker. Cognate formatives deriving adjectives from nouns occur in other \ili{Uralic languages}, like \ili{Hungarian} \textit{erős} ‘powerful, strong’ ($\leftarrow$ \textit{erő} ‘power, strength’), \textit{kékes} ‘bluish’ ($\leftarrow$ \textit{kék} ‘blue’). The development of local case expressions to adjectives is semantically plausible and could in principle be adopted for Saamic. Probably, the local case suffix was first used as adverbalizer of nominal stems and became a true adjectivizer at a later stage, hence:\is{adjective derivation} %%% \begin{itemize} \item \textsc{lative case} $\Rightarrow$ \textsc{adverbalizer} $\Rightarrow$ \textsc{adjectivizer} \end{itemize} %%% The intermediate stage in the assumed development from a local case expression to an adjective is reflected in place adverbs like \ili{Northern Saami} \textit{guhkás} ‘(going) far’ $\Leftarrow$ \ili{Proto\hyp{}Saamic} \textit{*kuhkā-se̮} \citep[246]{sammallahti1998b} and probably also in other adverbal derivations, like the collective numbers on \textit{-s}, cf.~\ili{Northern Saami} \textit{golmmas} ‘a group of three’ $\leftarrow$ \textit{golbma} ‘three’. \is{predicative marking|(} Since predicative adjectives are not subject of this investigation, the the observation is sufficient that both the assumed (inherited) locative derivation and the assumed suffix borrowing are possible scenarios which do not necessarily exclude each other. As a result of these developments, a lexically defined subclass of adjectives with predicative forms on \textit{-s} arose in \ili{Common Saamic} (or earlier). The marker of this class of adjectives, the ending \textit{-s}, is either: %%% \begin{itemize} \item borrowed from <~\ili{Proto\hyp{}North Germanic} \textit{-R} \textsc{m.nom.sg} \item derived (historically) from $\Leftarrow$ \textsc{lative case}, %gib die Etymologie vom LATIVE \item the result of merger of both developments. \end{itemize} %%% The adjective class characterized by predicative forms on \textit{-s} (which has more or less regular ending-less attributive forms) is clearly identifiable in all modern Saamic languages. Bergsland's (\citeyear[96]{bergsland1946}) suggestion that the similar ending \textit{-s} in the attributive forms of certain adjectives goes back to the Uralic lative case suffix as well is relevant to the present investigation. Deduced from his statement that the attributive suffix \textit{-s} is “originally a Finno-Volgaic lative suffix”, \citet[71]{sammallahti1998b} agrees with Bergsland's explanation. Also \citet{judakin1997} argues in this direction. The adjective ending \textit{-s}, which is the basis for Bergsland's and Sammallahti's argumentation, marks the predicative form of some adjectives and the attributive form of others. There are only a few adjectives which have the ending \textit{-s} in both predicative and attributive forms. Neither Bergsland\ia{Bergsland, Knut} nor Sammallahti\ia{Sammallahti, Pekka} discusses the question as to whether the assumed lative derivation originally occurred: a) on predicative adjectives, b) on attributive adjectives, or c) on both forms simultaneously. A cross-comparison of cognate forms of attributive and predicative adjectives in different Saamic languages suggests that adjectives with similar predicative and attributive forms with \textit{-s} form a minor class which very likely arose as the result of a secondary development. Cross-comparison can also provide evidence for separate etymologies of two homophonous predicative and attributive endings \textit{-s}. The locative derivational suffix can only be the source of this suffix \textit{-s} which is homophonous on predicative and attributive adjectives in modern West Saamic languages. The original attributive adjective suffix, however, should be reconstructed as a (phonetically palatalized) suffix *[-sVʲ\textsubscript{[+front]}] preceding a front vowel. In the easternmost \ili{Kola Saami languages}, the attributive suffix \textit{-s'} has a palatalized coda and is clearly distinct from the non-palatalized \textit{-s} on predicative adjectives, as well as from the (cognate) lative adverbalizer \textit{-s}. %%% \il{Kildin Saami} \il{Northern Saami} \begin{exe} \settowidth\jamwidth{(Northern Saami)} \ex \begin{xlist} \ex {\upshape Adjective stem ‘long (pred.)’}\\ \textit{guhkki} \jambox{ {\upshape Northern Saami} } \textit{kuhk'} \jambox{ {\upshape Kildin Saami} } %%% \ex {\upshape Adverb ‘(going) far’}\\ {\upshape (adverbalizer suffix (non-palatalized) $\Leftarrow$ \textit{*-s})}\\ \textit{guhkás} \jambox{ {\upshape Northern Saami} } \textit{kugkas} \jambox{ {\upshape Kildin Saami} } %%% \ex {\upshape Attributive form ‘long (attr.)’}\\ {\upshape (attributive suffix (palatalized) $\Leftarrow$ \textit{*-s'})}\\ \textit{guhkes} \jambox{ {\upshape Northern Saami} } \textit{kugk'es'} \jambox{ {\upshape Kildin Saami} } \end{xlist} \end{exe} \is{predicative marking|)} \is{attributive nominalization|(} \subsubsection{Grammaticalization from an attributive nominalizer} %%% A different hypothesis about the origin of the attributive forms in Saamic has been proposed by Joszéf Budenz (\citeyear{budenz1870}; according to \citealt{atanyi1943}) who believed that the suffix \textit{-s} represents the original possessive suffix 3\textsuperscript{rd} person singular. Budenz does not give any evidence specifically for Saami. He simply assumes that the determinative function of the possessive suffix, a similar use of which he observed in different Uralic\il{Uralic languages} and \ili{Turkic languages} (see \S\ref{uralic-turkic diachr}), caused the development in Saami. Budenz's idea was taken up specifically for Saamic by István Atányi (\citeyear{atanyi1942}, reprinted in \citealt{atanyi1943}). Atányi also refers to Nielsen (\citeyear{nielsen1933}, reprinted in \citealt{nielsen1945b}), who had a similar idea (probably independently of Budenz, whom he does not refer to). This hypothesis on the origin of the attributive forms in Saamic perfectly accounts for the different phonological shapes of the (historical) adjectivizer\is{adjective derivation} \mbox{\textit{*-s}} and the attributive suffix \textit{-s} ($\Rightarrow$ E-Saamic \textit{-s'}). According to this theory, recently taken up again by \citet{riesler2006b}, the attributive suffix \textit{-s/-s'} reflects an old 3\textsuperscript{rd} person singular possessive suffix which was used as an attributive article on contrastive-emphasized adjectives. The reconstructed \ili{Proto\hyp{}Saamic} forms of the possessive marker \textit{*-sē} \citep[73]{sammallahti1998b} versus the adjectivizer\is{adjective derivation} \textit{*-se̮} are consistent with the synchronic findings. The different phonological form of the two suffixes (/-s\textsuperscript{j}/ versus /-s/) in the \ili{Kola Saami languages} and the phonological merger of both suffixes (non-palatalized /-s/) in the western Saamic languages can be accounted for by a regular sound law: in the \ili{Kola Saami languages} the apocope of etymologically front vowels (\textit{*i, *e}) is reflected by the palatalization of the consonant preceding the lost vowel. Apocope of non-front vowels (like \textit{*-se̮}) did not affect the quality of the consonant. This sound law does not apply to the western Saamic languages which do not exhibit (phonological) palatalization, and consequently consonants preceding etymologically front and back vowels are non-palatalized. %%% \begin{exe} \settowidth\jamwidth{(Northern Saami)} \ex \begin{xlist} \ex {\upshape ‘guest’ (not possessed)} \begin{xlist} \ex[*]{kuasse \jambox{ {\upshape \ili{Proto\hyp{}Saamic}} }} \ex[]{kuss' \jambox{ {\upshape \ili{Kildin Saami}} }} \ex[]{guossi \jambox{ {\upshape \ili{Northern Saami}} }} \end{xlist} %%% \ex {\upshape ‘her/his/its guest’ (marked with \textsc{poss:3sg} suffix)} \begin{xlist} \ex[*]{kuasse-sē \jambox{ {\upshape \ili{Proto\hyp{}Saamic}} }} \ex[]{kuss'es' \jambox{ {\upshape \ili{Kildin Saami}} }} \ex[]{guossis \jambox{ {\upshape \ili{Northern Saami}} }} \end{xlist} \end{xlist} \end{exe} %%% Beside the overall irregularity in the attributive marking in all Saamic languages (see \S\ref{saami synchr}), the different morpho-phonological behavior of the nominal stems which \textsc{poss:3sg} and \textsc{attr} attach to appears to be an argument against this reconstruction. %%% \il{Kildin Saami} \il{Northern Saami} \begin{exe} \settowidth\jamwidth{(Northern Saami)} \ex {\upshape Strong (\textsc{str}) and weak (\textsc{wk}) consonant grade in adjectives and nouns} \begin{xlist} \parbox{8.2cm}{ \ex \glll kugk'{\upshape (\textsc{wk})}-es' suhk{\upshape (\textsc{str})} \\ \textit{guhke}(\textsc{wk})\textit{-s} \textit{suohkku}(\textsc{str}) \\ long-\textsc{attr} stocking\\ \glt ‘the long stocking’ } \parbox{3cm}{\upshape Kildin Saami\\Northern Saami\\~\\} \\ \parbox{8.2cm}{ \ex \glll suhk{\upshape (\textsc{str})}-es' lī kuhk'{\upshape (\textsc{str})} \\ \textit{suohkku}(\textsc{str})\textit{-s} \textit{lea} \textit{guhkki}(\textsc{str})\\ stocking-\textsc{poss:3sg} is long.\textsc{pred.}\\ \glt ‘her stocking is long’ } \parbox{3cm}{\upshape Kildin Saami\\Northern Saami\\~\\} \\ \parbox{8.2cm}{ \ex \glll kugk'{\upshape (\textsc{wk})}-es' sugk{\upshape (\textsc{wk})}-es't \\ \textit{guhke}(\textsc{wk})\textit{-s} \textit{suohku}(\textsc{wk})\textit{-s} \\ long-\textsc{attr} stocking-\textsc{loc.sg}\\ \glt ‘in the long stocking’ } \parbox{3cm}{\upshape Kildin Saami\\Northern Saami\\~\\} \end{xlist} \end{exe} %%% A noun marked for possession is in the strong consonant grade. An adjective marked for attribution is always in the weak grade. In the example above, the strong grade of the consonant (orthographically represented as \textit{hk} in Kildin Saami and \textit{hkk} in \ili{Northern Saami}) occurs in the nominative case of the bare or possessive marked noun (\textit{suhk/suohkku, suhkes'/suohkkus}) as well as in the predicative form\is{predicative marking} of the adjective (\textit{kuhk'/kuhkki}). The attributive form of the adjective (\textit{kugk'/guhkes}) and the noun stem hosting the locative suffix (\textit{sugkes't/suohkus}) are in the strong grade. Historically, consonant gradation was a purely phonological process where the strong consonant grade always occurred before the open final syllable of a disyllabic word. The stem consonant was phonetically shortened when the final open syllable was closed due to inflectional processes. Consonant gradation was later morphologized due to phonological attrition and the loss of certain inflectional suffixes. From a synchronic point of view, the consonant gradation rules account for the weak consonant grade in the attributive form of the adjective but not for the strong grade in the noun with possessive marking. The \ili{Northern Saami} words \textit{suohkku} ‘stocking’ and \textit{guhkki} ‘long (pred.)’ have open second syllables hence strong consonant stems (here a consonant cluster, the first part of which is a geminate /\=CC/). The second syllable in both forms is closed: \textit{suohkkus} /suoh:.ku-s/ marked with the possessive suffix and \textit{guhkis} /kuh.ki-s/ marked with the attributive suffix. However, the consonant stem of the noun \textit{suohkkus} remains strong (/\=CC/) even before the syllable closing suffix, whereas the geminate part of the cluster is shortened (/CC/) in the adjective \textit{guhkis}. It is important to note that the possessive suffix is reconstructed as \ili{Proto\hyp{}Saamic} \textit{*-sē} \citep[73]{sammallahti1998b} and thus originally had a different syllable structure. The formative obviously did not close the second syllable in \ili{Proto\hyp{}Saamic}, as in **/kuh:.ke.-sē/ and **/suoh:.ku.-sē/.\footnote{Note that these invented examples in simplified transcriptions serve the purpose of illustration (and are hence marked with **). The stem of the adjective ‘long’ is reconstructed as Proto\hyp{}Saamic \textit{*ku\`{h}kē} \citep[246]{sammallahti1998b}. The noun ‘stocking’ is a loan word (cf.~\ili{Swedish} (dialectal) \textit{sokk}, \ili{Finnish} \textit{sukka}) and might not be reconstructable for Proto\hyp{}Saamic.} From a diachronic point of view, the consonant gradation rules would thus account for the strong consonant grade in the noun marked with a possessive suffix but not for the weak grade in the attributive adjective. \largerpage Two possible explanations could explain the different consonant grades in the noun and the adjective marked by means of \textit{-s} $\Leftarrow$ \textit{*-sē}. %%% \begin{itemize} \item Following Nielsen (\citeyear{nielsen1945b}), the possessive marker in its function as attributive nominalizer was originally attached to a genitive (i.e., weak stem) form of the adjective. The weak consonant stem was thus triggered by the genitive suffix, reconstructed as \ili{Pre-Proto\hyp{}Saamic} \textit{*-n} $\Rightarrow$ \ili{Proto\hyp{}Saamic} \mbox{*-Ø} \citep[65]{sammallahti1998b} and preceding the attributive marker. The date of the morphologization of stem gradation would not be relevant for this explanation. \item The other possible explanation presupposes a relatively late date for the morphologization of stem gradation, i.e., not earlier than the apocope of the possessive marker's final vowel (\textit{-s $\Leftarrow$ *-sē}). If the possessive marker was not a true suffix but a phonological word on its own by the time stem gradation was morphologized in Saamic, the marker would have remained outside the phonological domains of its host word and would not have been able to trigger stem gradation on the latter. \end{itemize} %%% Since genitive (or “possessor case”) marking on attributive adjectives is attested in other northern Eurasian languages, as in both Yukaghir\il{Yukaghir languages} (see \S\ref{yukagir synchr}) and in \ili{Lezgic languages} (see \S\ref{lezgian synchr}), Nielsen's assumption that the 3\textsuperscript{rd} singular possessive marker was originally attached to an attributive form of adjectives (or other nominals) in genitive is possible in principal. Yet there is no evidence that genitive attribution marking on adjectives ever occurred regularly in Saamic or even in other \ili{Uralic languages}.\footnote{The defective agreement paradigm\is{agreement marking!defective agreement paradigm} of pronouns (and even sometimes adjectives) with the genitive singular form in all cases except nominative singular can scarcely be connected to Nielsen's idea. As an anti\hyp{}construct state marker, the “genitive” should occur through the whole paradigm including in nominative singular.} Furthermore, the functional side of the assumed development, in which an adjective marked by two attributive markers (genitive+attributive nominalizer) simultaneously, would also need some further clarification. The second hypothesis, that the possessive marker never triggered stem gradation, could also account for the weak consonant grade in adjectives (remember that the weak grade seemed to contradict the stem gradation rules from a historical point of view). In certain aspects, the possessive marker behaves like a free pronoun rather than like an affix: the possessive marker shows pronominal agreement (and hosts the agreement suffixes which co-reference the number of the possessor) but the marker itself is hosted by an inflected noun (marked for number and case of the possessed). Note also that the possessive inflection is morpho-syntactically different from case and number inflection in the closely related \ili{Finnic languages}. Only the latter features trigger noun phrase internal agreement. Only the 3\textsuperscript{rd} person singular possessive marker was used as an attributive nominalizer. Since this marker was hosted by uninflected adjectives, it is reasonable to assume that at one point the nominalizing possessive marker behaved differently from true possessive markers. The attributive nominalizer might thus have become a true phonologically bound formative earlier than the homophonous possessive marker. As a result of the apocope of the suffix-final vowel, the second syllable in the attributive form was closed: %%% \begin{exe} \ex \label{closed example} \gll **/kuh:.ke.-sē/ $\Rightarrow$ **/kuh.ke-s/\\ long-\textsc{poss:3sg} {} long-\textsc{attr}\\ \end{exe} %%% Subsequently, the stem gradation rules were applied regularly and yielded the short consonant grade of the adjective stem equipped with the affixal attributive marker (\ref{closed example}). The noun equipped with the possessive marker, however, kept its open second syllable even after the apocope (\ref{open example}). The non-affixal possessive suffix~– as a phonological word of its own~– remained outside the phonological domain of stem gradation. %%% \begin{exe} \ex \label{open example} \gll **/suoh:.ku.=sē/ $\Rightarrow$ **/suoh:.ku.=s/\\ stocking=\textsc{poss:3sg} {} stocking=\textsc{poss:3sg}\\ \end{exe} \is{attributive nominalization|)} \subsection{The origin of anti\hyp{}construct state in Saamic} %%% Synchronic data from related \ili{Uralic languages} provide good evidence in favor of the assumed grammaticalization path from possessive to anti\hyp{}construct state marking in Saami. %%% \begin{itemize} \item \textsc{possessive} (\textsc{3sg}) $\Rightarrow$ \textsc{attributive nominalization} $\Rightarrow$ \textsc{anti}-\textsc{construct} \end{itemize} %%% The first step of this development, i.e., the use of the possessive marker as an attributive article, is attested in the Permic languages \ili{Komi-Zyrian} and \ili{Udmurt}. Note also that the possessive marker in \ili{Udmurt} shows different morphological behavior depending on its function as a true possessive or as an attributive article. For more detail see the respective sections on the synchrony (\S\ref{udmurt synchr}) and diachrony (\S\ref{udmurt diachr}) of attribution marking in \ili{Udmurt}. The \ili{Permic languages} are closely related to Saamic, and theoretically, the rise of attributive marking in these two branches of Uralic could go back to a common \ili{Proto\hyp{}Uralic} construction. However, true evidence to prove such a common development at a relatively early time is missing. Quite the contrary, it could be objected that the innovation of a new type of attribution marking is currently under way in the \ili{Permic languages} whereas the innovation in Saamic took place 2000 years ago and is obviously losing ground today in favor of the re-introduced type \isi{juxtaposition}. But the comparison with the related \ili{Permic languages} makes sense from a purely typological perspective. Assuming that the possessive marker already had a “determinative” function in \ili{Proto\hyp{}Uralic} (as stated, for instance, by \citealt[32]{janhunen1981}; \citealt[66, 81]{decsy1990}; \citealt{kunnap2004}) and that this function is still present in most of the modern Uralic languages, the existence of an attributive nominalizer in Permic indisputably proves that the proposed origin of the attribution marker in Saamic is functionally plausible \citep{riesler2006b}.\is{attributive nominalization} Furthermore, the nominalizing function of the (person-deictic) marker of possession is attested not only in several \ili{Uralic languages} but also in \ili{Turkic languages}. And, finally, a typologically similar grammaticalization path of a (local-deictic) demonstrative to an attributive article is also attested in \ili{Indo-European languages} of the area. In all mentioned Turkic, Uralic and Indo-European languages where the development of attributive nominalizers is attested, this innovative type of attribution marking originally co-occurred with another, inherited type. The use of contrastive pairs of attributes marked with or without the anti\hyp{}construct state marker in modern Saamic languages provides good evidence for a similar development in earlier stages of Saami.\is{attributive nominalization} Several grammatical descriptions of \ili{Northern Saami} give examples of such contrastive pairs of attributes with different meanings. Nielsen describes the difference between forms with and forms without an attributive suffix as a difference in “modality” of the attributive relation \citep[203]{nielsen1945b}. Most examples, however, do not display true adjectives but rather attributive forms of present participles. If the property denoted by the participle is stressed or emphasized as belonging permanently to the referent of the modified noun, the participles are often equipped with the attributive suffix. %%% \begin{exe} \il{Northern Saami} \ex \begin{xlist} \ex {\upshape Northern Saami \citep[204]{nielsen1945b}} \begin{xlist} \ex \gll juhhki olmmoš\\ drinking person\\ \glt ‘drinking person’ %%% \ex \gll juhkke\textbf{-s} olmmoš\\ drinking-\textsc{attr} person\\ \glt ‘alcoholic (i.e., a person addicted to drinking)’ \end{xlist} \newpage %%% \ex {\upshape Northern Saami \citep[282]{bartens1989}} \begin{xlist} \ex \gll šaddi soahki / soahki lea šaddi\\ growing birch {} birch is growing\\ \glt ‘growing birch’ / ‘(a/the) birch is growing’ %%% \ex \gll goa{\dj}i duohkin lea šaddi\textbf{-s} soahki\\ hut behind is growing-\textsc{attr} birch\\ \glt ‘There is a fast growing birch behind the hut.’ \end{xlist} \end{xlist} %Reading people \end{exe} %%% Besides participles, there are even contrastive pairs of attributive adjectives or nouns which distinguish temporal versus permanent (or otherwise emphasized) properties. %%% \begin{exe} \il{Northern Saami} \ex \begin{xlist} \ex {\upshape Northern Saami \citep[48]{bergsland1976}} \begin{xlist} \ex \gll arve-dálki\\ rain-weather\\ \glt ‘rain-weather’ %% \ex \gll arvve\textbf{-s} dálki\\ rain-\textsc{attr} weather\\ \glt ‘wet weather (i.e., weather full of rain)’ \end{xlist} \end{xlist} \end{exe} %%% It must be emphasized that these adjectives equipped with the attributive suffix are additionally marked as denoting permanent or “definite” properties. This is exactly consistent with the reconstructed meaning of the so-called weak adjective forms in \ili{Proto\hyp{}Germanic} or the so-called long adjective forms in \ili{Proto\hyp{}Baltic\slash{}Slavic} (see \S\ref{slavic diachr}). The functions of the regular and productive contrastive focus constructions in \ili{Chuvash} and \ili{Udmurt} (which are often described as “emphatic” or “definite” as well, see \S\S\ref{chuvash synchr}, \ref{udmurt synchr}) also show a perfect parallel to Saamic.\is{species marking!definite} It is thus most likely that the Saamic anti\hyp{}construct state marker originates from a construction in which the possessive marker 3\textsuperscript{rd} person singular was used as attributive nominalizer in appositional noun phrases similar to the contrastive focus construction attested in Modern \ili{Udmurt} and in several other Uralic and non-Uralic languages of northern Eurasia.\is{attributive nominalization} \largerpage Whereas the unmarked noun phrase type in \ili{Proto\hyp{}Saamic} was characterized by \isi{juxtaposition}, the attributive article was used to mark a construction with an adjective in contrastive focus. The emphatic construction later became generalized as the default marker of the attributive connection.\footnote{The zero-morpheme (equipped with the nominalizer Ø-\textsc{nmlz}) in (\ref{saami gram}) and following examples is only presented for a better illustration of the empty head position to which the (nominalized) adjective moves in the appositional noun phrase.} %%% \begin{exe} \ex {\upshape Grammaticalization of anti\hyp{}construct state marking in Saamic} \label{saami gram} \begin{xlist} \ex {\upshape Stage 1: \ili{Pre-Proto\hyp{}Saamic}} \begin{xlist} \ex {\upshape Juxtaposition}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}long \textsubscript{\upshape N}stocking{\cb} \end{xlist} %%% \ex {\upshape Stage 2a: \ili{Proto\hyp{}Saamic}} \begin{xlist} \ex {\upshape Juxtaposition (default)}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}long \textsubscript{\upshape N}stocking{\cb} %%% \ex {\upshape Attributive apposition (emphatic)}\\ {\ob}\textsubscript{\upshape NP} {\ob}\textsubscript{\upshape NP'} \textsubscript{\upshape A}long \textsubscript{\upshape HEAD}{\rm Ø-\textsc{nmlz}}{\cb} \textsubscript{\upshape N}stocking{\cb} \end{xlist} \ex {\upshape Stage 3: modern Saamic languages} \begin{xlist} \ex {\upshape Anti\hyp{}construct state marking}\\ {\ob}\textsubscript{\upshape NP} \textsubscript{\upshape A}long{\rm -\textsc{attr}} \textsubscript{\upshape N}stocking{\cb} \end{xlist} \end{xlist} \end{exe} %%% The irregularities in the use of attributive forms within and across the modern Saamic languages are the result of recent developments. Originally, the attributive form was generated regularly and productively. A cross-comparison of adjectives in different Saamic languages clearly shows that adjectives with deleted \textit{-s/-s'} in one Saamic language exhibit the suffix in another language. Consider, for example, \ili{Northern Saami} \textit{uhca} but \ili{Lule Saami} \textit{ucces} ‘small’ or \ili{Northern Saami} \textit{seakka} but \ili{Kildin Saami} \textit{sie{\ng}{\ng}kes'} ‘thin’ (for more examples see \citealt{riesler2006b}). \is{predicative marking|(} It is most likely that neither the predicative forms (ending in \textit{-d} or \textit{-s}) nor the attributive form (ending in \textit{-s/-s'}) reflect inherited stems in Saami. Both are complex forms which are derived from either nominal or verbal stems by means of different suffixes. The predicative forms with \textit{-s} evolved from derivations by means of an old lative case suffix. Germanic\il{Germanic languages} loan adjectives with the homophonous (Germanic) ending \textit{-s} ($\Leftarrow$ \ili{Proto\hyp{}Germanic} \textit{-R}) where integrated into the class of these predicative “lative-derivations”. The attributive suffix \textit{-s/-s'}, on the other hand, originates from the possessive marker 3\textsuperscript{rd} person singular which was originally used as an attributive nominalizer (i.e., attributive article) in contrastive focus constructions. The suffix was later generalized as the default attributive state marker.\is{attributive nominalization} \largerpage The merger of predicative and attributive forms of some adjectives observed in modern Saamic languages does not contradict the proposed reconstruction of the original attributive marking. It does, however, reflect another diachronic path of adjective attribution marking: namely the collapsing of an originally regular and productive construction and the innovation of a new type. Interestingly, this secondary development in modern stages of Saamic will most likely result in the renewed introduction of \isi{juxtaposition}, i.e., the original Uralic\il{Uralic languages} prototype of adjective attribution marking. \il{Saamic languages|)} \is{predicative marking|)} \il{Finnic languages|(} \is{head\hyp{}driven agreement|(} \section[Agreement in Finnic]{The emergence of agreement in Finnic} \label{Finnic diachr} %%% The languages of the Finnic branch spoken in the northwestern periphery of Uralic are exceptional within this family because they all exhibit head\hyp{}driven agreement as the default type of attribution marking of adjectives. %%% \begin{exe} \ex {\upshape Finnish (personal knowledge)} \begin{xlist} \ex \gll iso talo\\ big house\\ \glt ‘big house’ \ex \gll iso-t talo-t\\ big-\textsc{pl} house-\textsc{pl}\\ \glt ‘big houses’ \ex \gll iso-i-ssa talo-i-ssa\\ big-\textsc{pl}-\textsc{iness} house-\textsc{pl}-\textsc{iness}\\ \glt ‘in big houses’ \end{xlist} \end{exe} %%% %Note, however, that not all morphological features assign their values to the attributive adjective in Finnish. Whereas number (\ref{fin num}) and case marking (\ref{fin case}) is assigned to the adjective, possessive marking (\ref{fin poss}) is not. %\footnote{\citet[212]{mark1979} mentions also the missing agreement category \textsc{possessive} in Finnish (as in the other Finnic languages) Finnish \textit{sininen kukka-ni} [blue flower-\textsc{poss:1sg}] ‘my blue flower'. believes that the possessive declension of nouns is younger than the agreement of adjectives} There is no doubt that agreement marking replaced \isi{juxtaposition} at a certain point during the linguistic development from \ili{Proto\hyp{}Uralic} to \ili{Proto\hyp{}Finnic}. In several \ili{Uralic languages}, irregular agreement of pronominal modifiers and even some adjectives and adjective-like modifiers are attested (cf.~examples in \citealt{honti1997} and \citealt[288–295]{stolz2015a}). This might indicate a connection to the fully developed agreement marking of adjectives in Finnic. It is, however, unclear whether the incomplete and irregular agreement phenomena in Saamic\il{Saamic languages} and other closely related \ili{Uralic languages} reflect a stage of development at which agreement marking was more widespread~– in at least the Finnic and Saamic branches~– or agreement marking is due to a more recent innovation which became completely enforced only in the Finnic branch. The rise of agreement marking on attributive adjectives, pronouns,\is{adnominal modifier!pronoun} and numerals\is{adnominal modifier!numeral} in Finnic is usually regarded as a result of language contact with \ili{Indo-European languages} from the Germanic\il{Germanic languages} and/or Baltic\il{Baltic languages} groups (cf.~\citealt[25]{tauli1955}; \citealt{hajdu1996}; see also \citealt[288–295]{stolz2015a}). Indeed, the high amount of Germanic and Baltic loanwords in Finnic languages indicates intimate contacts between speakers of Uralic and Indo-European languages in that area. However, in order to prove the hypothesis that agreement marking arose as a result of influence from Indo-European languages one has to reconstruct concrete mechanisms behind this profound contact-induced language change. The idea that agreement marking is a borrowed model might not be as straightforward as it appears. Even though many \ili{Uralic languages} under strong \ili{Russian} influence seem to have borrowed many more grammatical features than Finnic did under Germanic and Baltic influence, none of these languages shows any trace of borrowed Russian agreement marking. \is{juxtaposition|(} \il{Hungarian|(} In a short article, \citet{mark1979} presents a contact-independent explanation of the innovative head\hyp{}driven agreement marking in Finnic. His explanation is based on the observation that nominalized adjectives in apposition to nouns in Hungarian (as well as in other \ili{Uralic languages}) show agreement triggered by the semantic head of the elliptic noun phrase. %%% \begin{exe} \ex {\upshape Hungarian \citep[209]{mark1979}} \label{hung ap} \begin{xlist} \ex {\upshape Juxtaposition (no agreement marking)} \begin{xlist} \ex \textit{őreg postást} {\upshape [A N\textsubscript{\upshape nom.sg}] ‘the old postman’} \ex \textit{őreg postások} {\upshape [A N\textsubscript{\upshape nom.pl}] ‘the old postmen’} \end{xlist} \ex {\upshape Apposition (agreement marking)} \begin{xlist} \ex \textit{postást, őreg\textbf{et}} {\upshape [[N\textsubscript{\upshape nom.sg}] [A\textsubscript{nom.sg}]] ‘a postman, an old one’} \ex \textit{postások, őreg\textbf{ek}} {\upshape [[N\textsubscript{nom.sg}] [A\textsubscript{nom.sg}]] ‘postmen, old ones’} \end{xlist} \end{xlist} \end{exe} %%% Similar ideas about a possible contact-independent origin of head\hyp{}driven agreement in Finnic have also been put forward, for example by \citet{ravila1941} and \citet{papp1962}. In theory, the rise of agreement marking as a result of generalization of an originally emphasized adjective in apposition seems plausible. Language contact with agreement-marking languages could still have been a catalyst. \largerpage[2] In \ili{Hungarian}, the attributive appositions described by Márk are post\hyp{}positioned while attributive adjectives in \ili{Finnish} still precede the noun. A comparison to attributive apposition by means of nominalization in \ili{Udmurt} seems more promising. In \S\ref{udmurt synchr} on the synchrony of attributive marking in Udmurt, it has been demonstrated how case and number agreement marking occurs in the contrastive focus construction with attributive adjectives and pronouns. %%% \begin{exe} \ex {\upshape Udmurt \citep{winkler2001}} \label{udmurt ap} \begin{xlist} \ex {\upshape Juxtaposition (no agreement marking)} \begin{xlist} \ex \textit{badǯ́ym gurt} {\upshape [A N\textsubscript{nom:sg}] ‘big house’} \ex \textit{badǯ́ym gurtjos} {\upshape [A N\textsubscript{nom:pl}] ‘big houses’} \ex \textit{badǯ́ym gurtjosy} {\upshape [A N\textsubscript{pl:ill}] ‘to (the) big houses’} \end{xlist} %%% \ex {\upshape Attributive apposition (agreement marking)} \begin{xlist} \ex \textit{badǯ́ym\textbf{ėz} gurt} {\upshape [[A\textsubscript{contr}] [N]] ‘\textsc{big} house’} \ex \textit{badǯ́ym\textbf{josyz} gurtjos} {\upshape [[A\textsubscript{contr:pl}] [N\textsubscript{pl}]] ‘\textsc{big} houses’} \ex \textit{badǯ́ym\textbf{josaz} gurtjosy} {\upshape [[A\textsubscript{contr:pl:ill}] [N\textsubscript{pl:ill}]] ‘to \textsc{big} houses’} \end{xlist} \end{xlist} \end{exe} %%% In both Hungarian and \ili{Udmurt} examples (\ref{hung ap}) and (\ref{udmurt ap}), the agreement morphology is syntactically spread from the (semantic) head noun to the adjectival modifier only in appositional noun phrases (with the modifier in contrastive focus). In Udmurt, there is an additional morpheme available, i.e., the attributive nominalizer \textit{-(ė)z} ($\Leftarrow$ \textsc{poss:3sg}). In the Hungarian example, the emphasized construction is only marked by the duplicated number and case agreement (in combination with changed constituent order).\is{attributive nominalization} \il{Hungarian|)} \is{juxtaposition|)} \il{Permic languages|(} Attributive apposition in contrastive focus constructions is without a doubt innovative in \ili{Udmurt}. Since all members of the Permic group show similar constructions, the development could be dated back to \ili{Proto\hyp{}Permic} and would thus have a time depth comparable to the innovation of head\hyp{}driven agreement in Finnic. Since head\hyp{}driven agreement is also involved in \ili{Udmurt} anti\hyp{}construct state marking (namely as a “relict” of the appositional structure in which the attribute in contrastive focus originally occurred), the Permic and Finnic innovations could be structural parallels. Modern Finnic languages, however, do not provide any evidence that an attributive nominalizer was ever used as a marker of appositional attribution. The agreement markings thus seems to be the primary innovation assumedly caused by contact with “agreeing” \ili{Indo-European languages}. Regardless of contact influence being involved or not, the innovative head\hyp{}driven agreement marking in Finnic could still have been used in an appositional construction originally. Note also that in Udmurt, number agreement sometimes (irregularly) occurs even in constructions without the contrastive focus marker. %%% \begin{exe} \ex {\upshape Head\hyp{}driven plural agreement in Udmurt \citep{winkler2001}}\\ \gll badǯ́ym-jos gurt-jos\\ big-\textsc{pl} house-\textsc{pl}\\ \glt ‘\textsc{big} houses’ \end{exe} %%% Note even that a similar innovation of head\hyp{}driven agreement in contrastive focus constructions is attested not only for Permic languages but also occurs irregularly in other Uralic branches (cf.~\citealt[136–138, 142]{honti1997} for Mari\il{Mari languages} and Nenets;\il{Nenets languages} see also \S\ref{N-Samoyedic-synchr}). In the North Samoyedic language \ili{Nganasan}, head\hyp{}driven agreement has been grammaticalized as the default type. \largerpage To conclude these tentative considerations, it cannot be ruled out that the rise of head\hyp{}driven agreement marking in Finnic and anti\hyp{}construct state agreement in \ili{Udmurt} are both results of original attributive apposition constructions. However, this idea remains highly speculative for Finnic, unless one can find evidence for the occurrence of an attributive nominalizer such as the marker in Modern \ili{Udmurt} or in \ili{Proto\hyp{}Saamic}.\is{attributive nominalization} Whereas anti\hyp{}construct state agreement marking in \ili{Udmurt} (and other Permic languages) only substitutes for the default marker in contrastive focused constructions, Finnic and \ili{Saamic languages} as well as \ili{Nganasan} have completely lost Uralic\il{Uralic languages} \isi{juxtaposition} as the default adjective attribution marking device and innovated completely new morpho-syntactic devices. It must also be noted that the Finnic and Saamic innovations took place in two closely related and geographically adjacent branches of Uralic. Moreover, the developments are of similar age. And finally, non-related but geographically adjacent languages (Baltic,\il{Baltic languages} Germanic,\il{Germanic languages} Slavic\il{Slavic languages}) show structurally similar developments. \il{Permic languages|)} \il{Finnic languages|)} \is{head\hyp{}driven agreement|)} \section{Other attested scenarios of grammaticalization} %%% The previous sections dealt with the rise of adjective attribution marking devices in a few branches of Indo-European,\il{Indo-European languages} Uralic\il{Uralic languages} and Turkic.\il{Turkic languages} However, the synchronic data from the synchronic survey in Part~III (Synchrony) present evidence of several more diachronic scenarios. Only a few of them will be sketched in the following sections. \is{attributive article|(} \is{species marking!definite|(} \subsection[Articles, definiteness and adjective attribution]{Articles, definiteness and the evolution of adjective attribution marking in Indo-European} %%% \largerpage The rise of attributive articles and their (partial or complete) further development to definite markers in Baltic,\il{Baltic languages} Slavic\il{Slavic languages} and Germanic,\il{Germanic languages} as described above, took place on functionally and chronologically parallel paths in various other \ili{Indo-European languages} of Europe. This has been observed by several scholars (cf.~\citealt{brugmann-etal1916}; \citealt{gamillscheg1937}; \citealt{heinrichs1954} and, more recently, \citealt{nocentini1996}; \citealt{philippi1997}; \citealt{himmelmann1997}). It is not clear whether these parallel developments across western-Indo-European branches can be explained in terms of areal typology, i.e., as the result of linguistic contacts, or whether they are inherited from a common ancestor language. Independent developments, though theoretically possible, seem rather unlikely given the close genealogical and areal connection between the languages in question. \largerpage In those western branches of the Indo-European family where definite markers have evolved, cognate formatives are also usually attested as adjective attribution markers. The attributive article in \ili{Romanian}, for instance (see \S\ref{romanian synchr}), is also attested in \ili{Latin} and other \ili{Romance languages}, cf.~\ili{Latin} \textit{Cato ille maior, Babylon illa magna}.\footnote{Cf.~the secondary attributive articles in Germanic languages in similar constructions: \ili{English} \textit{Philip the Fair}, \ili{German} \textit{Friedrich der Große} which is also cognate (and homophonous) with the definite marker. The Germanic constructions have been dealt with in more detail in \S\ref{attr nmlz}.} The suffixed definite marker in \ili{Romanian} evolved from this attributive article (\citealt{gamillscheg1937}; \citealt[5]{nocentini1996}). Note also that the attributive article in Romance is polyfunctional and can mark adjectival, genitival and prepositional attributes as well as relative clauses. In the two \ili{Albanian languages} (see \S\ref{albanian synchr}), the attributive article \textit{i} \textsc{nom}, \textit{e/të} \textsc{acc} and \textit{të} \textsc{obl} and the definite suffix \textit{-i} \textsc{nom}, \textit{-in/-në} \textsc{acc} and \textit{it} \textsc{obl} most likely have the same etymological source, i.e., Indo-European *\textit{-to} (cf.~\citealt[165]{himmelmann1997} and the references mentioned there), which is also the etymological source of the definite marker \textit{to} and the homophonous attributive article in Ancient Greek (see \S\ref{greek synchr} for the corresponding constructions in Modern \ili{Greek}). \is{attributive nominalization|(} Indo-European *\textit{-to} is the etymological source of secondary attributive articles in \ili{Slavic languages} as well. The use of this marker in attributive apposition constructions is already well-attested in \ili{Old East Slavic} documents. %%% \begin{exe} \ex {\upshape Attributive nominalization in Old E-Slavic (Indo-European)} \begin{xlist} \ex \gll [\dots] sъ usmъ galiiei-sk\textbf{-ymъ}\\ { } with Jesus:\textsc{com} Galilee-\textsc{adjz}-\textsc{nmlz:instr}\\ \glt ‘[\dots] with Jesus the Galilean’ \citep[Matthew 26, cit.][214]{mendoza2004} \ex \gll vъ sarefto̜ sidonъ-sk\textbf{-o̜jo̜}\\ to Sarepta:\textsc{prepos} Sidonia-\textsc{adjz}-\textsc{nmlz:acc}\\ \glt ‘to Sarepta in Sidonia’ \citep[Luke 4, cit.][214]{mendoza2004} \end{xlist} \end{exe} %%% %does it show the attributive marker *-to, or the marker *-jis?? In \ili{Bulgarian}, the former attributive nominalizer grammaticalized into a true definite marker. In an analogous manner (but much later in time), reflexes of the \ili{Proto\hyp{}Baltic\slash{}Slavic} pronoun \textit{*tъ} \textsc{m} developed into definite suffixes in northern Russian dialects\il{Russian!Northern} (cf.~\citealt {leinonen2006a}).\footnote{Whereas \ili{Komi-Zyrian} (Uralic) influence triggered the suffixation of these anaphoric markers in northern Russian dialects \citep {leinonen2006a}, a typologically similar grammaticalization process due to Turkic\ili{Turkic languages} influence is behind the chronologically much older suffixation of definite marking in Bulgarian \citep[114–122]{kusmenko2008}.} \ia{Dahl, Östen|(} Dahl (\citeyear[149–152]{dahl2003}; see also \citealt[122–123]{dahl2015a}) shows that in some languages definite noun phrases with attributive adjectives (or other adnominal modifiers) show special behavior. He compares the “displaced”\footnote{The term “displaced” is not used by Dahl but adopted from \citet[114–116]{melcuk2006}.} definite marking with “long form” adjectives in the \ili{Baltic languages} with, among others, the demonstrative \textit{ille} linking postponed adjectives to proper nouns in \ili{Latin} constructions like \textit{Babylon illa magna} \citep[150]{dahl2003}. But due to its function and syntactic behavior the attributive article in Romance\il{Romance languages} can clearly be distinguished from definite markers \citep[329]{gamillscheg1937}. As it was demonstrated for the \ili{Baltic languages} (see \S\ref{anti-constr agr}), the so-called “long form” inflection (i.e., anti\hyp{}construct state agreement inflection) of adjectives is not a true definiteness marker. \il{Amharic|(} Dahl also gives examples of languages in which “displaced” definiteness markers (or “quasi-definiteness markers”) evolved from other sources than local\hyp{}deictic pronouns, as in Amharic where an attributive nominalizer grammaticalized from a (person-deictic) possessive marker in contrastive focus construction. \ia{Dahl, Östen|)} %%% \begin{exe} \ex \langinfo{Amharic}{Afro-Asiatic}{\citealt{hudson1997}} \begin{xlist} \ex {\upshape Default construction} \begin{xlist} \ex \gll təlləq bet\\ big house\\ \glt ‘(a) big house’ %%% \ex \label{amharic ambiguous} \gll təlləq bet\textbf{-u}\\ big house-\textsc{poss:3sg}\\ \glt (1) ‘his big house’ (if the owner has only one house, which is big); (2) ‘the big house’ %%% \ex \gll təlləq bet-e\\ big house-\textsc{poss:1sg}\\ \glt ‘my big house’ \end{xlist} %%% \ex {\upshape Contrastive focus construction} \begin{xlist} \ex \gll təlləq\textbf{-u} bet\\ big-\textsc{?def} house\\ \glt ‘(a/the) \textsc{big} house’ %%% \ex \label{amharic nonambiguous} \gll təlləq\textbf{-u} bet\textbf{-u}\\ big-\textsc{?def} house-\textsc{poss:3sg}\\ \glt ‘his \textsc{big} house’ (if the owner has more than one house but the expression is referring to the big one) %%% \ex \gll təlləq\textbf{-u} bet-e\\ big-\textsc{?def} house-\textsc{poss:1sg}\\ \glt ‘my \textsc{big} house’ \end{xlist} \end{xlist} \end{exe} %%% Note that the suffix \textit{-u} [\textsc{m}] used for emphasizing the adjective in Amharic is homophonous with the definite noun marker and with the 3\textsuperscript{rd} singular possessive marker. Note that the possessive and the definite suffixes of nouns (or noun phrases) are mutually exclusive \citep[463]{hudson1997}. Hence, the examples in (\ref{amharic ambiguous}) are ambiguous; they could have a possessive or a definite reading. The “emphasizing” adjective suffix \textit{-u} [\textsc{m}], however, does not co-occur with the definite suffix. Therefore, the reading of the examples in (\ref{amharic nonambiguous}) is not ambiguous. Consequently, the suffix \textit{-u} [\textsc{m}] in Amharic should be analyzed as an adjective attribution marker rather than as a “detached” marker of definiteness. %%% \begin{exe} \ex \langinfo{Amharic}{Afro-Asiatic}{\citealt{hudson1997}} \begin{xlist} \ex {\upshape Attributive nominalization (contrastive focus)} \begin{xlist} \ex \gll təlləq\textbf{-u} bet\\ big-\textsc{attr} house(\textsc{m})\\ \glt ‘(a/the) \textsc{big} house’ %%% \ex \gll qonjo\textbf{-wa} dəmmät\\ pretty-\textsc{attr:f} cat(\textsc{f})\\ \glt ‘(a/the) beautiful cat’ \end{xlist} %%% \ex {\upshape Attributive nominalization (\isi{headless noun phrase})} \begin{xlist} \ex \gll təlləq\textbf{-u}\\ big-\textsc{attr:m}\\ \glt ‘(a/the) big one’ %%% \ex \gll qonjo-wa\\ pretty-\textsc{attr:f}\\ \glt ‘(a/the) pretty one’ \end{xlist} \end{xlist} \end{exe} %%% Contrastive focus marking on adjectives in Amharic is thus very similar to the marking found in \ili{Udmurt}. In both languages, attributive apposition is marked by means of attributive nominalization. The respective formatives in both languages originate from (person-deictic) possessor markers. Consistently, data from northern Eurasian languages and Amharic do not provide evidence for the existence of “displaced” definiteness markers. From a diachronic perspective, however, there is much evidence for a functional overlapping between attributive nominalization and definiteness marking. In all \ili{Indo-European languages} dealt with so far, adjective attribution is the primary function. The former local-deictic marker in these languages always grammaticalizes into an attributive nominalizer first. The further development into true markers of definiteness comes only after this stage. \il{Amharic|)} \is{attributive article|)} \is{species marking!definite|)} \is{attributive nominalization|)} \il{Iranian languages|(} \subsection[Head-marking attributive construct state]{The emergence of head-marking attributive construct state in Iranian} \label{iranian diachr} %%% As shown in \S\ref{iranian synchr}, several Iranian languages of the northern Eurasian area exhibit a head-marking attributive construct state device as a licenser of adjective attribution. The Iranian construct state marker (aka \textit{Ezafe}) originates from the Old Iranian\il{Old Iranian languages} relative particle \textit{-hya}, which has undergone a process of grammaticalization, to end up as a part of nominal morphology in the modern Iranian languages \citep{haider-etal1984,samvelian2007b}. Since the \ili{Old Persian} relative particle \textit{-hya} itself originates from a demonstrative, the emergence of construct state marking in Iranian and anti\hyp{}construct state marking in other \ili{Indo-European languages} follow a similar path. Originally, \textit{-hya} was a grammatical word marking the phrase or clause on its right as a syntactic modifier of the noun on its left \citep{haider-etal1984}. Syntactically, the marker was an attributive article hosted by the attribute. In Baltic\il{Baltic languages} and Slavic,\il{Slavic languages} the article developed further into an anti\hyp{}construct state agreement marker (see \S\ref{slavic diachr}). In Iranian, however, the article attached phonologically to the head noun. According to \citet[3]{samvelian2007} this conflict between opposite directions of phonological and syntactic alignments was later resolved by the \isi{re-analysis} of the article as a head-marking inflectional affix. As the result of this grammaticalization, syntactic and phonological attachments were alined to each other. \il{Iranian languages|)} \is{juxtaposition|(} \subsection[Innovation of juxtaposition]{Innovation of juxtaposition} %%% Two scenarios are attested where juxtaposition has been innovated: either by loss of agreement marking or by loss of anti\hyp{}construct state marking. \subsubsection{Loss of agreement marking} \is{head\hyp{}driven agreement|(} \il{Common Kartvelian|(} Head\hyp{}driven agreement (in number and case) of adjectival modifiers following the head noun can be reconstructed for Common Kartvelian. In \ili{Old Georgian}, this pattern is more or less preserved. In modern \ili{Kartvelian languages}, however, the unmarked constituent order of adjectival modifiers and head is noun-final, although the opposite order is possible as well \citep[56]{harris1991a}. As shown in \S\ref{kartvelian synchr} of Part~III (Synchrony), the agreement features of Common Kartvelian are more or less preserved only in the marked (but inherited) head-initial noun phrase type. In the head-final noun phrase type, on the other hand, modern \ili{Kartvelian languages} display a strong tendency to lose head\hyp{}driven agreement. Preposed attributive adjectives in \ili{Mingrelian} and \ili{Laz} are juxtaposed to the head noun as a rule. In Modern \ili{Georgian} and \ili{Svan}, the agreement paradigm of preposed attributive adjectives shows a high degree of syncretism (cf.~\citealt[56]{harris1991a}; \citealt[56–60, passim]{tuite1998}). \il{Common Kartvelian|)} Two other non-related languages of the \isi{Southern Caucasus}, Armenian and \ili{Ossetic} have lost noun phrase internal agreement too \citep[272–281]{stolz2015a}.\footnote{The innovation of juxtaposition in the Eastern Armenian standard language is not complete, though. There is a small class of adjectives which are marked by means of head-driven agreement, see\S\ref{armenian-synch}.} According to \citet[109]{johanson2002a}, Turkic contact influence is the explanation for the loss of agreement in Armenian. Interestingly, the loss of adjective agreement marking in Armenian and Kartvelian is connected to the shift of the default constituent order. Note, however, that juxtaposition can also be innovated without constituent order shift, as in \ili{English} where the change is a result of the complete loss of the agreement inflection during the course of time from Middle\il{Middle English} to Modern \ili{English}. \is{juxtaposition|)} \is{head\hyp{}driven agreement|)} \subsubsection{Loss of anti\hyp{}construct state marking} \ili{Saamic languages} present another evidence of a language change in which \isi{juxtaposition} replaces an original morpho-syntactic device. The original anti\hyp{}construct state marking, which is itself innovative in \ili{Proto\hyp{}Saamic} (see \S\ref{saamic diachr}) is in dissolution in modern Saamic languages as the result of the merger of attributive and predicative adjective forms which were originally distinguished from one another.\is{predicative marking} %??Germanic, Slavic \begin{figure} \parbox[b]{0.5\textwidth}{ \begin{center}Indo-European\\ \medskip \begin{tabular}{| m{1.4cm} || m{.9cm} | m{1.1cm} | m{.7cm} |} \cline{1-1} \textsc{mod}\textsubscript{NP}\\ \cline{1-1} \textsc{attr}\textsubscript{AdP}\\ \hline & \textsc{nmlz} & \textsc{contr} & \textsc{def}\\ \hline \textsc{attr}\textsubscript{A}\\ \cline{1-1} \textsc{attr}\textsubscript{N}\\ \cline{1-1} \end{tabular} \end{center} } \parbox[b]{0.5\textwidth}{ \begin{center}Uralic\\ \medskip \begin{tabular}{| m{1.4cm} || m{.9cm} | m{1.1cm} | m{.7cm} |} \cline{1-1} \\ \cline{1-1} \\ \hline & \textsc{nmlz} & \textsc{contr} & \textsc{def}\\ \hline \textsc{attr}\textsubscript{A}\\ \cline{1-1} \\ \cline{1-1} \end{tabular} \end{center} } \parbox[b]{0.5\textwidth}{ \begin{center}Turkic\\ \medskip \begin{tabular}{| m{1.4cm} || m{.9cm} | m{1.1cm} | m{.7cm} |} \cline{1-1} \\ \cline{1-1} \\ \hline & \textsc{nmlz} & \textsc{contr} & \textsc{def}\\ \hline \\ \cline{1-1} \\ \cline{1-1} \end{tabular} \end{center} } \parbox[b]{0.5\textwidth}{ \begin{center}Tungusic\\ \medskip \begin{tabular}{| m{1.4cm} || m{.9cm} | m{1.1cm} | m{.7cm} |} \cline{1-1} \\ \cline{1-1} \\ \hline & \textsc{nmlz} & \textsc{contr} & \\ \hline \textsc{attr}\textsubscript{A}\\ \cline{1-1} \\ \cline{1-1} \end{tabular} \end{center} } \caption[Functional map of cognate devices]{Functional map of markers cognate with the Old Iranian\il{Old Iranian languages} “relative particle” \textit{-hya} (across Indo-European languages) and the possessive suffixes 3\textsuperscript{rd} person singular (across Uralic,\il{Uralic languages} Turkic\il{Turkic languages} and \ili{Tungusic languages})} \label{ie-ural funcmap} \end{figure} \section{Diachronic polyfunctionality} %%% \is{species marking!definite|(} In Chapter~\ref{polyfunctionality}, a few examples of polyfunctional adjective attribution marking devices were presented. It was shown, however, that the polyfunctionality parameter is less relevant to northern Eurasian languages because most languages of the area exhibit highly differentiated attribution marking devices. Polyfunctionality might, however, indicate a historical dimension if additional semantics of attribution marking devices is taken into consideration and if the languages of a whole taxon are compared to each other. For instance, construct state marking of adjectives and other modifiers, as attested especially in Indo-European\il{Indo-European languages} varieties (but also in Turkic\il{Turkic languages} and Uralic),\il{Uralic languages} seems to be inherently tied to the evolution of \isi{attributive nominalization}, contrastive focus and even definiteness marking in several languages. Figure~\ref{ie-ural funcmap} shows functional maps similar to the one in Figures \ref{multi abcd} and \ref{lahu funcmap} in Chapter~\ref{polyfunctionality} but with scope over cognate markers in whole language families. The polyfunctionality of the \ili{Persian} Ezafe \textit{-(y)e} was described in Chapter~\ref{polyfunctionality}. This construct state marker licenses nominal (\textsc{attr}\textsubscript{N}), adjectival (\textsc{attr}\textsubscript{A}) and adpositional (\textsc{attr}\textsubscript{AdP}) attributes as well as modification\is{modification marking!} within an adposition phrase (\textsc{mod}\textsubscript{NP}). The cognate formative in the closely related Iranian language \ili{Northern Kurdish} is even connected to definiteness marking (\textsc{def}) (\citealt{schroder2002}; cf.~also Table~\ref{ez kirmanji paradigm} on page~\pageref{ez kirmanji paradigm}). In Old Iranian\il{Old Iranian languages}, Old Baltic\il{Old Baltic languages} and \ili{Old Slavic languages}, a cognate marker was used as an attributive nominalizer (\textsc{nmlz}, or as a “relative particle” marking non-verbal attributes; see \S\ref{iranian diachr} and \ref{slavic diachr}). The further grammaticalization of this marker into an anti\hyp{}construct state agreement marker in Baltic and Slavic is connected to contrastive focus marking (\textsc{contr}). The marker described in the functional map for Uralic is the possessive suffix 3\textsuperscript{rd} person singular, which is used as a quasi-definite marker (\textsc{def}) in a variety of modern \ili{Uralic languages}. In \ili{Udmurt} the original possessive suffix is regularly used as a nominalizer (\textsc{nmlz}) and has grammaticalized into a marker of contrastive focus of adjectives (\textsc{contr}) (see \S\ref{udmurt diachr}). In Saamic,\il{Saamic languages} finally, the cognate marker has grammaticalized into an anti\hyp{}construct state marker (\textsc{attr}\textsubscript{A}). Turkic\il{Turkic languages} is similar to Uralic\il{Uralic languages} but without evidence for the grammaticalization of the possessive suffix 3\textsuperscript{rd} person singular to a true adjective attribution marker. In Tungusic,\il{Tungusic languages} finally, there is no evidence for definiteness marking but the possessive suffix 3\textsuperscript{rd} person singular is used as \isi{dependent\hyp{}driven agreement} marker in \il{Even} (\textsc{attr}\textsubscript{A}). These diachronic functional maps demonstrate general synchronic paths of attribution marking devices and give the impression that nominalization and appositional attribution play an important role in the further development of the respective markers as attribution marking devices.\is{attributive nominalization} \is{grammaticalization|)} \is{species marking!definite|)}
Formal statement is: lemma has_field_derivative_inverse_strong: fixes f :: "'a::{euclidean_space,real_normed_field} \<Rightarrow> 'a" shows "\<lbrakk>DERIV f x :> f'; f' \<noteq> 0; open S; x \<in> S; continuous_on S f; \<And>z. z \<in> S \<Longrightarrow> g (f z) = z\<rbrakk> \<Longrightarrow> DERIV g (f x) :> inverse (f')" Informal statement is: If $f$ is a function with a nonzero derivative at $x$, and $g$ is the inverse of $f$, then $g$ has a derivative at $f(x)$ equal to the reciprocal of the derivative of $f$ at $x$.
-- There are no level literals in the concrete syntax. This file tests -- if type errors use level literals. {-# OPTIONS --universe-polymorphism #-} module LevelLiterals where open import Imports.Level data ⊥ : Set₁ where DoubleNegated : ∀ {ℓ} → Set ℓ → Set DoubleNegated A = (A → ⊥) → ⊥
from torch import Tensor, uint8 import numpy as np from genEM3.data.wkwdata import WkwData from typing import Sequence, Union # This is a module for all image processing related functionalities def bboxFromCenter2D(center: Sequence[Union[float, int]], dims: Sequence[Union[float, int]]): """Returns the 2D bounding box from center and dims arrays""" # input should be numpy arrays assert type(center) is np.ndarray and type(dims) is np.ndarray # last dimension should be zero for the bbox to not change the location of third dimension if dims[2] != 0: dims[2] = 0 topLeft = center - dims/2 # make sure it is only a single slice (3rd dim size = 0) return np.hstack([topLeft, dims[0:2], np.ones(1)]).astype(int) def bboxesFromArray(centerArray, dims=np.array([140, 140, 0])): """Returns the 2D bounding box from a numpy array of coordinates and the dimensions""" # input should be numpy arrays assert type(centerArray) is np.ndarray and type(dims) is np.ndarray bboxFromCenterFixedDim = lambda coord: bboxFromCenter2D(coord, dims) bboxes = np.apply_along_axis(bboxFromCenterFixedDim, 1, centerArray) return bboxes def normalize(img: Tensor, mean: float = 148.0, std: float = 36.0): """ Returns the image values normalized to mean of 0 and std of 1""" return (img-mean)/std def undo_normalize(img: Tensor, mean: float = 148.0, std: float = 36.0): """ undo the normalization process return uint8 image for visualization""" return ((img*std)+mean).type(uint8) def normalize_to_uniform(img: Tensor, minimum: float = 0.0, maximum: float = 255.0): """ Returns the image values normalized to mean of 0 and std of 1""" return (img - minimum) / (maximum - minimum) def readWkwFromCenter(wkwdir, coordinates, dimensions): """ Returns a collection of images given their coordinate and dimensions (numpy arrays)""" # Get the bounding boxes from coordinates and dimensions for the cropping bboxes = bboxesFromArray(coordinates, dimensions) # read the wkwdata into a numpy array readWk = lambda bbox: WkwData.wkw_read(wkwdir, bbox) images = np.apply_along_axis(readWk, 1, bboxes).squeeze(4).astype('double') return images
section \<open>Preliminaries\<close> theory Prelim imports "Fresh_Identifiers.Fresh_String" "Bounded_Deducibility_Security.Trivia" begin subsection \<open>The basic types\<close> (* This version of string is needed for code generation: *) definition "emptyStr = STR ''''" (* The users of the system: *) datatype name = Nam String.literal definition "emptyName \<equiv> Nam emptyStr" datatype inform = Info String.literal definition "emptyInfo \<equiv> Info emptyStr" datatype user = Usr name inform fun nameUser where "nameUser (Usr name info) = name" fun infoUser where "infoUser (Usr name info) = info" definition "emptyUser \<equiv> Usr emptyName emptyInfo" typedecl raw_data code_printing type_constructor raw_data \<rightharpoonup> (Scala) "java.io.File" (* Images (currently, pdf, to be changed): *) datatype img = emptyImg | Imag raw_data (* Visibility outside the current api: either friends-only or public (i.e., exportable outside to the other apis): *) datatype vis = Vsb String.literal (* Accepted values: friend and public *) abbreviation "FriendV \<equiv> Vsb (STR ''friend'')" (* abbreviation "InternalV \<equiv> Vsb (STR ''internal'')" *) abbreviation "PublicV \<equiv> Vsb (STR ''public'')" fun stringOfVis where "stringOfVis (Vsb str) = str" (* A post consists of a string for title, a string for its text, a (possibly empty) image and a visibility specification: *) datatype title = Tit String.literal definition "emptyTitle \<equiv> Tit emptyStr" datatype "text" = Txt String.literal definition "emptyText \<equiv> Txt emptyStr" datatype post = Pst title "text" img (* vis *) (* Getters: *) fun titlePost where "titlePost (Pst title text img) = title" fun textPost where "textPost (Pst title text img) = text" fun imgPost where "imgPost (Pst title text img) = img" (* fun visPost where "visPost (Pst title text img vis) = vis" *) (* Setters: *) fun setTitlePost where "setTitlePost (Pst title text img) title' = Pst title' text img" fun setTextPost where "setTextPost(Pst title text img) text' = Pst title text' img" fun setImgPost where "setImgPost (Pst title text img) img' = Pst title text img'" (* fun setVisPost where "setVisPost (Pst title text img vis) vis' = Pst title text img vis'" *) (* *) definition emptyPost :: post where "emptyPost \<equiv> Pst emptyTitle emptyText emptyImg" (* FriendV" *) (* initially set to the lowest visibility: friend *) lemma titlePost_emptyPost[simp]: "titlePost emptyPost = emptyTitle" and textPost_emptyPost[simp]: "textPost emptyPost = emptyText" and imgPost_emptyPost[simp]: "imgPost emptyPost = emptyImg" (* and visPost_emptyPost[simp]: "visPost emptyPost = FriendV" *) unfolding emptyPost_def by simp_all lemma set_get_post[simp]: "titlePost (setTitlePost ntc title) = title" "titlePost (setTextPost ntc text) = titlePost ntc" "titlePost (setImgPost ntc img) = titlePost ntc" (* "titlePost (setVisPost ntc vis) = titlePost ntc" *) (* *) "textPost (setTitlePost ntc title) = textPost ntc" "textPost (setTextPost ntc text) = text" "textPost (setImgPost ntc img) = textPost ntc" (* "textPost (setVisPost ntc vis) = textPost ntc" *) (* *) "imgPost (setTitlePost ntc title) = imgPost ntc" "imgPost (setTextPost ntc text) = imgPost ntc" "imgPost (setImgPost ntc img) = img" (* "imgPost (setVisPost ntc vis) = imgPost ntc" *) (* *) (* "visPost (setTitlePost ntc title) = visPost ntc" "visPost (setTextPost ntc text) = visPost ntc" "visPost (setImgPost ntc img) = visPost ntc" "visPost (setVisPost ntc vis) = vis" *) (* *) by(cases ntc, auto)+ lemma setTextPost_absorb[simp]: "setTitlePost (setTitlePost pst tit) tit1 = setTitlePost pst tit1" "setTextPost (setTextPost pst txt) txt1 = setTextPost pst txt1" "setImgPost (setImgPost pst img) img1 = setImgPost pst img1" (* "setVisPost (setVisPost pst vis) vis1 = setVisPost pst vis1" *) by (cases pst, auto)+ datatype password = Psw String.literal definition "emptyPass \<equiv> Psw emptyStr" datatype salt = Slt String.literal definition "emptySalt \<equiv> Slt emptyStr" (* Information associated to requests for registration: both for users and apis *) datatype requestInfo = ReqInfo String.literal definition "emptyRequestInfo \<equiv> ReqInfo emptyStr" subsection \<open>Identifiers\<close> datatype apiID = Aid String.literal datatype userID = Uid String.literal datatype postID = Pid String.literal definition "emptyApiID \<equiv> Aid emptyStr" definition "emptyUserID \<equiv> Uid emptyStr" definition "emptyPostID \<equiv> Pid emptyStr" (* *) fun apiIDAsStr where "apiIDAsStr (Aid str) = str" definition "getFreshApiID apiIDs \<equiv> Aid (fresh (set (map apiIDAsStr apiIDs)) (STR ''1''))" lemma ApiID_apiIDAsStr[simp]: "Aid (apiIDAsStr apiID) = apiID" by (cases apiID) auto lemma member_apiIDAsStr_iff[simp]: "str \<in> apiIDAsStr ` apiIDs \<longleftrightarrow> Aid str \<in> apiIDs" by (metis ApiID_apiIDAsStr image_iff apiIDAsStr.simps) lemma getFreshApiID: "\<not> getFreshApiID apiIDs \<in>\<in> apiIDs" using fresh_notIn[of "set (map apiIDAsStr apiIDs)"] unfolding getFreshApiID_def by auto (* *) fun userIDAsStr where "userIDAsStr (Uid str) = str" definition "getFreshUserID userIDs \<equiv> Uid (fresh (set (map userIDAsStr userIDs)) (STR ''2''))" lemma UserID_userIDAsStr[simp]: "Uid (userIDAsStr userID) = userID" by (cases userID) auto lemma member_userIDAsStr_iff[simp]: "str \<in> userIDAsStr ` (set userIDs) \<longleftrightarrow> Uid str \<in>\<in> userIDs" by (metis UserID_userIDAsStr image_iff userIDAsStr.simps) lemma getFreshUserID: "\<not> getFreshUserID userIDs \<in>\<in> userIDs" using fresh_notIn[of "set (map userIDAsStr userIDs)"] unfolding getFreshUserID_def by auto (* *) fun postIDAsStr where "postIDAsStr (Pid str) = str" definition "getFreshPostID postIDs \<equiv> Pid (fresh (set (map postIDAsStr postIDs)) (STR ''3''))" lemma PostID_postIDAsStr[simp]: "Pid (postIDAsStr postID) = postID" by (cases postID) auto lemma member_postIDAsStr_iff[simp]: "str \<in> postIDAsStr ` (set postIDs) \<longleftrightarrow> Pid str \<in>\<in> postIDs" by (metis PostID_postIDAsStr image_iff postIDAsStr.simps) lemma getFreshPostID: "\<not> getFreshPostID postIDs \<in>\<in> postIDs" using fresh_notIn[of "set (map postIDAsStr postIDs)"] unfolding getFreshPostID_def by auto end
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.ZCohomology.MayerVietorisUnreduced where open import Cubical.ZCohomology.Base open import Cubical.ZCohomology.Properties open import Cubical.ZCohomology.GroupStructure open import Cubical.Foundations.HLevels open import Cubical.Foundations.Function open import Cubical.Foundations.Prelude open import Cubical.Foundations.Structure open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.GroupoidLaws open import Cubical.Data.Sigma open import Cubical.HITs.Pushout open import Cubical.HITs.Sn open import Cubical.HITs.S1 open import Cubical.HITs.Susp open import Cubical.HITs.SetTruncation renaming (rec to sRec ; rec2 to sRec2 ; elim to sElim ; elim2 to sElim2) open import Cubical.HITs.PropositionalTruncation renaming (rec to pRec ; elim to pElim ; elim2 to pElim2 ; ∥_∥ to ∥_∥₁ ; ∣_∣ to ∣_∣₁) open import Cubical.Data.Nat open import Cubical.Algebra.Group open import Cubical.HITs.Truncation renaming (elim to trElim ; map to trMap ; rec to trRec ; elim3 to trElim3) open GroupHom module MV {ℓ ℓ' ℓ''} (A : Type ℓ) (B : Type ℓ') (C : Type ℓ'') (f : C → A) (g : C → B) where -- Proof from Brunerie 2016. -- We first define the three morphisms involved: i, Δ and d. private i* : (n : ℕ) → coHom n (Pushout f g) → coHom n A × coHom n B i* n = sRec (isSet× setTruncIsSet setTruncIsSet) λ δ → ∣ (λ x → δ (inl x)) ∣₂ , ∣ (λ x → δ (inr x)) ∣₂ iIsHom : (n : ℕ) → isGroupHom (coHomGr n (Pushout f g)) (×coHomGr n A B) (i* n) iIsHom n = sElim2 (λ _ _ → isOfHLevelPath 2 (isSet× setTruncIsSet setTruncIsSet) _ _) λ _ _ → refl i : (n : ℕ) → GroupHom (coHomGr n (Pushout f g)) (×coHomGr n A B) GroupHom.fun (i n) = i* n GroupHom.isHom (i n) = iIsHom n private distrLem : (n : ℕ) (x y z w : coHomK n) → (x +[ n ]ₖ y) -[ n ]ₖ (z +[ n ]ₖ w) ≡ (x -[ n ]ₖ z) +[ n ]ₖ (y -[ n ]ₖ w) distrLem n x y z w = cong (λ z → (x +[ n ]ₖ y) +[ n ]ₖ z) (-distrₖ n z w) ∙∙ sym (assocₖ n x y ((-[ n ]ₖ z) +[ n ]ₖ (-[ n ]ₖ w))) ∙∙ cong (λ y → x +[ n ]ₖ y) (commₖ n y ((-[ n ]ₖ z) +[ n ]ₖ (-[ n ]ₖ w)) ∙ sym (assocₖ n _ _ _)) ∙∙ assocₖ n _ _ _ ∙∙ cong (λ y → (x -[ n ]ₖ z) +[ n ]ₖ y) (commₖ n (-[ n ]ₖ w) y) Δ' : (n : ℕ) → coHom n A × coHom n B → coHom n C Δ' n (α , β) = coHomFun n f α -[ n ]ₕ coHomFun n g β Δ'-isMorph : (n : ℕ) → isGroupHom (×coHomGr n A B) (coHomGr n C) (Δ' n) Δ'-isMorph n = prodElim2 (λ _ _ → isOfHLevelPath 2 setTruncIsSet _ _ ) λ f' x1 g' x2 i → ∣ (λ x → distrLem n (f' (f x)) (g' (f x)) (x1 (g x)) (x2 (g x)) i) ∣₂ Δ : (n : ℕ) → GroupHom (×coHomGr n A B) (coHomGr n C) GroupHom.fun (Δ n) = Δ' n GroupHom.isHom (Δ n) = Δ'-isMorph n d-pre : (n : ℕ) → (C → coHomK n) → Pushout f g → coHomK (suc n) d-pre n γ (inl x) = 0ₖ (suc n) d-pre n γ (inr x) = 0ₖ (suc n) d-pre n γ (push a i) = Kn→ΩKn+1 n (γ a) i dHomHelper : (n : ℕ) (h l : C → coHomK n) (x : Pushout f g) → d-pre n (λ x → h x +[ n ]ₖ l x) x ≡ d-pre n h x +[ suc n ]ₖ d-pre n l x dHomHelper n h l (inl x) = sym (rUnitₖ (suc n) (0ₖ (suc n))) dHomHelper n h l (inr x) = sym (lUnitₖ (suc n) (0ₖ (suc n))) dHomHelper n h l (push a i) j = hcomp (λ k → λ { (i = i0) → rUnitₖ (suc n) (0ₖ (suc n)) (~ j) ; (i = i1) → lUnitₖ (suc n) (0ₖ (suc n)) (~ j) ; (j = i0) → Kn→ΩKn+1-hom n (h a) (l a) (~ k) i ; (j = i1) → cong₂Funct (λ x y → x +[ (suc n) ]ₖ y) (Kn→ΩKn+1 n (h a)) (Kn→ΩKn+1 n (l a)) (~ k) i }) (hcomp (λ k → λ { (i = i0) → rUnitₖ (suc n) (0ₖ (suc n)) (~ j) ; (i = i1) → lUnitₖ (suc n) (Kn→ΩKn+1 n (l a) k) (~ j)}) (hcomp (λ k → λ { (i = i0) → rUnitₖ (suc n) (0ₖ (suc n)) (~ j) ; (i = i1) → lUnitₖ≡rUnitₖ (suc n) (~ k) (~ j) ; (j = i0) → Kn→ΩKn+1 n (h a) i ; (j = i1) → (Kn→ΩKn+1 n (h a) i) +[ (suc n) ]ₖ coHom-pt (suc n)}) (rUnitₖ (suc n) (Kn→ΩKn+1 n (h a) i) (~ j)))) dIsHom : (n : ℕ) → isGroupHom (coHomGr n C) (coHomGr (suc n) (Pushout f g)) (sRec setTruncIsSet λ a → ∣ d-pre n a ∣₂) dIsHom n = sElim2 (λ _ _ → isOfHLevelPath 2 setTruncIsSet _ _) λ f g i → ∣ funExt (λ x → dHomHelper n f g x) i ∣₂ d : (n : ℕ) → GroupHom (coHomGr n C) (coHomGr (suc n) (Pushout f g)) GroupHom.fun (d n) = sRec setTruncIsSet λ a → ∣ d-pre n a ∣₂ GroupHom.isHom (d n) = dIsHom n -- The long exact sequence Im-d⊂Ker-i : (n : ℕ) (x : ⟨ (coHomGr (suc n) (Pushout f g)) ⟩) → isInIm (coHomGr n C) (coHomGr (suc n) (Pushout f g)) (d n) x → isInKer (coHomGr (suc n) (Pushout f g)) (×coHomGr (suc n) A B) (i (suc n)) x Im-d⊂Ker-i n = sElim (λ _ → isSetΠ λ _ → isOfHLevelPath 2 (isSet× setTruncIsSet setTruncIsSet) _ _) λ a → pRec (isOfHLevelPath' 1 (isSet× setTruncIsSet setTruncIsSet) _ _) (sigmaElim (λ _ → isOfHLevelPath 2 (isSet× setTruncIsSet setTruncIsSet) _ _) λ δ b i → sRec (isSet× setTruncIsSet setTruncIsSet) (λ δ → ∣ (λ x → δ (inl x)) ∣₂ , ∣ (λ x → δ (inr x)) ∣₂ ) (b (~ i))) Ker-i⊂Im-d : (n : ℕ) (x : ⟨ coHomGr (suc n) (Pushout f g) ⟩) → isInKer (coHomGr (suc n) (Pushout f g)) (×coHomGr (suc n) A B) (i (suc n)) x → isInIm (coHomGr n C) (coHomGr (suc n) (Pushout f g)) (d n) x Ker-i⊂Im-d n = sElim (λ _ → isSetΠ λ _ → isProp→isSet propTruncIsProp) λ a p → pRec {A = (λ x → a (inl x)) ≡ λ _ → 0ₖ (suc n)} (isProp→ propTruncIsProp) (λ p1 → pRec propTruncIsProp λ p2 → ∣ ∣ (λ c → ΩKn+1→Kn n (sym (cong (λ F → F (f c)) p1) ∙∙ cong a (push c) ∙∙ cong (λ F → F (g c)) p2)) ∣₂ , cong ∣_∣₂ (funExt (λ δ → helper a p1 p2 δ)) ∣₁) (Iso.fun PathIdTrunc₀Iso (cong fst p)) (Iso.fun PathIdTrunc₀Iso (cong snd p)) where helper : (F : (Pushout f g) → coHomK (suc n)) (p1 : Path (_ → coHomK (suc n)) (λ a₁ → F (inl a₁)) (λ _ → coHom-pt (suc n))) (p2 : Path (_ → coHomK (suc n)) (λ a₁ → F (inr a₁)) (λ _ → coHom-pt (suc n))) → (δ : Pushout f g) → d-pre n (λ c → ΩKn+1→Kn n ((λ i₁ → p1 (~ i₁) (f c)) ∙∙ cong F (push c) ∙∙ cong (λ F → F (g c)) p2)) δ ≡ F δ helper F p1 p2 (inl x) = sym (cong (λ f → f x) p1) helper F p1 p2 (inr x) = sym (cong (λ f → f x) p2) helper F p1 p2 (push a i) j = hcomp (λ k → λ { (i = i0) → p1 (~ j) (f a) ; (i = i1) → p2 (~ j) (g a) ; (j = i0) → Iso.rightInv (Iso-Kn-ΩKn+1 n) ((λ i₁ → p1 (~ i₁) (f a)) ∙∙ cong F (push a) ∙∙ cong (λ F₁ → F₁ (g a)) p2) (~ k) i ; (j = i1) → F (push a i)}) (doubleCompPath-filler (sym (cong (λ F → F (f a)) p1)) (cong F (push a)) (cong (λ F → F (g a)) p2) (~ j) i) open GroupHom Im-i⊂Ker-Δ : (n : ℕ) (x : ⟨ ×coHomGr n A B ⟩) → isInIm (coHomGr n (Pushout f g)) (×coHomGr n A B) (i n) x → isInKer (×coHomGr n A B) (coHomGr n C) (Δ n) x Im-i⊂Ker-Δ n (Fa , Fb) = sElim {B = λ Fa → (Fb : _) → isInIm (coHomGr n (Pushout f g)) (×coHomGr n A B) (i n) (Fa , Fb) → isInKer (×coHomGr n A B) (coHomGr n C) (Δ n) (Fa , Fb)} (λ _ → isSetΠ2 λ _ _ → isOfHLevelPath 2 setTruncIsSet _ _) (λ Fa → sElim (λ _ → isSetΠ λ _ → isOfHLevelPath 2 setTruncIsSet _ _) λ Fb → pRec (setTruncIsSet _ _) (sigmaElim (λ x → isProp→isSet (setTruncIsSet _ _)) λ Fd p → helper n Fa Fb Fd p)) Fa Fb where helper : (n : ℕ) (Fa : A → coHomK n) (Fb : B → coHomK n) (Fd : (Pushout f g) → coHomK n) → (fun (i n) ∣ Fd ∣₂ ≡ (∣ Fa ∣₂ , ∣ Fb ∣₂)) → (fun (Δ n)) (∣ Fa ∣₂ , ∣ Fb ∣₂) ≡ 0ₕ n helper n Fa Fb Fd p = cong (fun (Δ n)) (sym p) ∙∙ (λ i → ∣ (λ x → Fd (inl (f x))) ∣₂ -[ n ]ₕ ∣ (λ x → Fd (push x (~ i))) ∣₂ ) ∙∙ rCancelₕ n ∣ (λ x → Fd (inl (f x))) ∣₂ Ker-Δ⊂Im-i : (n : ℕ) (a : ⟨ ×coHomGr n A B ⟩) → isInKer (×coHomGr n A B) (coHomGr n C) (Δ n) a → isInIm (coHomGr n (Pushout f g)) (×coHomGr n A B) (i n) a Ker-Δ⊂Im-i n = prodElim (λ _ → isSetΠ (λ _ → isProp→isSet propTruncIsProp)) (λ Fa Fb p → pRec propTruncIsProp (λ q → ∣ ∣ helpFun Fa Fb q ∣₂ , refl ∣₁) (helper Fa Fb p)) where helper : (Fa : A → coHomK n) (Fb : B → coHomK n) → fun (Δ n) (∣ Fa ∣₂ , ∣ Fb ∣₂) ≡ 0ₕ n → ∥ Path (_ → _) (λ c → Fa (f c)) (λ c → Fb (g c)) ∥₁ helper Fa Fb p = Iso.fun PathIdTrunc₀Iso (sym (cong ∣_∣₂ (funExt (λ x → sym (assocₖ n _ _ _) ∙∙ cong (λ y → Fa (f x) +[ n ]ₖ y) (lCancelₖ n (Fb (g x))) ∙∙ rUnitₖ n (Fa (f x))))) ∙∙ cong (λ x → x +[ n ]ₕ ∣ (λ c → Fb (g c)) ∣₂) p ∙∙ lUnitₕ n _) helpFun : (Fa : A → coHomK n) (Fb : B → coHomK n) → ((λ c → Fa (f c)) ≡ (λ c → Fb (g c))) → Pushout f g → coHomK n helpFun Fa Fb p (inl x) = Fa x helpFun Fa Fb p (inr x) = Fb x helpFun Fa Fb p (push a i) = p i a private distrHelper : (n : ℕ) (p q : _) → ΩKn+1→Kn n p +[ n ]ₖ (-[ n ]ₖ ΩKn+1→Kn n q) ≡ ΩKn+1→Kn n (p ∙ sym q) distrHelper n p q = cong (λ x → ΩKn+1→Kn n p +[ n ]ₖ x) helper ∙ sym (ΩKn+1→Kn-hom n _ _) where helper : -[ n ]ₖ ΩKn+1→Kn n q ≡ ΩKn+1→Kn n (sym q) helper = sym (rUnitₖ n _) ∙∙ cong (λ x → (-[ n ]ₖ (ΩKn+1→Kn n q)) +[ n ]ₖ x) (sym helper2) ∙∙ (assocₖ n _ _ _ ∙∙ cong (λ x → x +[ n ]ₖ (ΩKn+1→Kn n (sym q))) (lCancelₖ n _) ∙∙ lUnitₖ n _) where helper2 : ΩKn+1→Kn n q +[ n ]ₖ (ΩKn+1→Kn n (sym q)) ≡ coHom-pt n helper2 = sym (ΩKn+1→Kn-hom n q (sym q)) ∙∙ cong (ΩKn+1→Kn n) (rCancel q) ∙∙ ΩKn+1→Kn-refl n Ker-d⊂Im-Δ : (n : ℕ) (a : coHom n C) → isInKer (coHomGr n C) (coHomGr (suc n) (Pushout f g)) (d n) a → isInIm (×coHomGr n A B) (coHomGr n C) (Δ n) a Ker-d⊂Im-Δ n = sElim (λ _ → isOfHLevelΠ 2 λ _ → isOfHLevelSuc 1 propTruncIsProp) λ Fc p → pRec propTruncIsProp (λ p → ∣ (∣ (λ a → ΩKn+1→Kn n (cong (λ f → f (inl a)) p)) ∣₂ , ∣ (λ b → ΩKn+1→Kn n (cong (λ f → f (inr b)) p)) ∣₂) , Iso.inv (PathIdTrunc₀Iso) ∣ funExt (λ c → helper2 Fc p c) ∣₁ ∣₁) (Iso.fun (PathIdTrunc₀Iso) p) where helper2 : (Fc : C → coHomK n) (p : d-pre n Fc ≡ (λ _ → coHom-pt (suc n))) (c : C) → ΩKn+1→Kn n (λ i₁ → p i₁ (inl (f c))) -[ n ]ₖ (ΩKn+1→Kn n (λ i₁ → p i₁ (inr (g c)))) ≡ Fc c helper2 Fc p c = distrHelper n _ _ ∙∙ cong (ΩKn+1→Kn n) helper3 ∙∙ Iso.leftInv (Iso-Kn-ΩKn+1 n) (Fc c) where helper3 : (λ i₁ → p i₁ (inl (f c))) ∙ sym (λ i₁ → p i₁ (inr (g c))) ≡ Kn→ΩKn+1 n (Fc c) helper3 = cong ((λ i₁ → p i₁ (inl (f c))) ∙_) (lUnit _) ∙ sym (PathP→compPathR (cong (λ f → cong f (push c)) p)) Im-Δ⊂Ker-d : (n : ℕ) (a : coHom n C) → isInIm (×coHomGr n A B) (coHomGr n C) (Δ n) a → isInKer (coHomGr n C) (coHomGr (suc n) (Pushout f g)) (d n) a Im-Δ⊂Ker-d n = sElim (λ _ → isOfHLevelΠ 2 λ _ → isOfHLevelPath 2 setTruncIsSet _ _) λ Fc → pRec (isOfHLevelPath' 1 setTruncIsSet _ _) (sigmaProdElim (λ _ → isOfHLevelPath 2 setTruncIsSet _ _) λ Fa Fb p → pRec (isOfHLevelPath' 1 setTruncIsSet _ _) (λ q → ((λ i → fun (d n) ∣ (q (~ i)) ∣₂) ∙ dΔ-Id n Fa Fb)) (Iso.fun (PathIdTrunc₀Iso) p)) where d-preLeftId : (n : ℕ) (Fa : A → coHomK n)(d : (Pushout f g)) → d-pre n (Fa ∘ f) d ≡ 0ₖ (suc n) d-preLeftId n Fa (inl x) = Kn→ΩKn+1 n (Fa x) d-preLeftId n Fa (inr x) = refl d-preLeftId n Fa (push a i) j = Kn→ΩKn+1 n (Fa (f a)) (j ∨ i) d-preRightId : (n : ℕ) (Fb : B → coHomK n) (d : (Pushout f g)) → d-pre n (Fb ∘ g) d ≡ 0ₖ (suc n) d-preRightId n Fb (inl x) = refl d-preRightId n Fb (inr x) = sym (Kn→ΩKn+1 n (Fb x)) d-preRightId n Fb (push a i) j = Kn→ΩKn+1 n (Fb (g a)) (~ j ∧ i) dΔ-Id : (n : ℕ) (Fa : A → coHomK n) (Fb : B → coHomK n) → fun (d n) (fun (Δ n) (∣ Fa ∣₂ , ∣ Fb ∣₂)) ≡ 0ₕ (suc n) dΔ-Id n Fa Fb = -distrLemma n (suc n) (d n) ∣ Fa ∘ f ∣₂ ∣ Fb ∘ g ∣₂ ∙∙ (λ i → ∣ (λ x → d-preLeftId n Fa x i) ∣₂ -[ (suc n) ]ₕ ∣ (λ x → d-preRightId n Fb x i) ∣₂) ∙∙ rCancelₕ (suc n) (0ₕ (suc n))
@testset "Pairwise" begin Γ = pairwise(GaussianVariogram(), Matrix(1.0I, 3, 3)) @test eltype(Γ) == Float64 @test issymmetric(Γ) Γ_f = pairwise(GaussianVariogram(range=1f0, sill=1f0, nugget=0f0), Matrix(1.0f0I, 3, 3)) @test eltype(Γ_f) == Float32 @test issymmetric(Γ_f) grid = RegularGrid{Float64}(10, 10) Γ = pairwise(GaussianVariogram(), grid, 1:5) @test size(Γ) == (5, 5) @test issymmetric(Γ) Γ = pairwise(GaussianVariogram(), grid, 1:3, 7:10) @test size(Γ) == (3, 4) @test all(Γ .> 0) end
Bursting with joy, this arrangement of the familiar song is malleted throughout. In addition to malleting the bells, the piece includes indications for ringers to click their own mallets together and to cross click mallets with their neighbors. An optional percussion part adds even more joy to this delightful arrangement.
module Control.Comonad.Env import public Control.Comonad.Env.Env as Control.Comonad.Env import public Control.Comonad.Env.Interface as Control.Comonad.Env
Require Import SpecDeps. Require Import RData. Require Import EventReplay. Require Import MoverTypes. Require Import Constants. Require Import CommonLib. Require Import AbsAccessor.Spec. Local Open Scope Z_scope. Section SpecLow. Definition rec_granule_measure_spec0 (rd: Pointer) (rec: Pointer) (data_size: Z64) (adt: RData) : option RData := match rd, rec, data_size with | (_rd_base, _rd_ofst), (_rec_base, _rec_ofst), VZ64 _data_size => when adt == measurement_extend_rec_header_spec (_rd_base, _rd_ofst) (_rec_base, _rec_ofst) adt; when adt == measurement_extend_rec_regs_spec (_rd_base, _rd_ofst) (_rec_base, _rec_ofst) adt; when adt == measurement_extend_rec_pstate_spec (_rd_base, _rd_ofst) (_rec_base, _rec_ofst) adt; when adt == measurement_extend_rec_sysregs_spec (_rd_base, _rd_ofst) (_rec_base, _rec_ofst) adt; Some adt end . End SpecLow.
Formal statement is: lemma of_nat_monom: "of_nat n = monom (of_nat n) 0" Informal statement is: The polynomial $n$ is equal to the monomial $nx^0$.
util_libraries = c('moments', 'nortest', 'car') invisible(lapply(util_libraries, require, character.only = TRUE)) '%nin%' <- function(x,y)!('%in%'(x,y)) #' algorithmly applies the uses the box-cox transform to make a skew of 0 #' originaly from my personal stash of macros #' https://github.com/markanewman/mnmacros/blob/master/R/apply_bcskew0.r apply_bcskew0 <- function(x) { stopifnot(!(missing(x) || is.null(x))) stopifnot(is.vector(x) && is.numeric(x)) lx <- length(x) stopifnot(8 <= lx && lx <= 46340) stopifnot(x %>% unique() %>% length() > 1) lb <- -5 ub <- 5 step <- 1 bestlamda <- NA bestskew <- Inf bestx <- x if((mx <- min(x)) <= 0) { x <- x - mx + 1 } for(i in 0:5) { for(lamda in seq(lb, ub, step) %>% round(i)) { x2 <- if(lamda == 0) { log(x) } else { x^lamda } cs <- (x2 %>% agostino.test())$statistic[1] %>% abs() if(cs < bestskew) { bestskew <- cs bestlamda <- lamda bestx <- x2 } } lb <- bestlamda - step ub <- bestlamda + step step <- step/10 } attr(bestx, 'lamda') <- bestlamda bestx } #' converts pvalues into the more familure '< .05' format #' originaly from my personal stash of copy/paste macros apa_pvalue <- function(pv) { ifelse( pv < .001, '< .001', ifelse( pv < .05, sprintf('= %.3f', pv), '> .05')) } #' runs the 5 standard univariate normality tests on every column in the data test_univariate_normality <- function(data) { univariate <- function(data, column) { t1 = ad.test(data[, column]) t2 = cvm.test(data[, column]) t3 = lillie.test(data[, column]) t4 = pearson.test(data[, column]) t5 = sf.test(data[, column]) t2s = function(t) { sprintf('%.3f (p = %.2f)', unname(t$statistic), unname(t$p.value)) } c(t2s(t1), t2s(t2), t2s(t3), t2s(t4), t2s(t5)) } columns <- colnames(data) cl <- length(columns) tmp <- vector(mode="list", length = cl) for(i in 1:cl) { tmp[[i]] <- univariate(data, columns[i]) } normality <- do.call(rbind, tmp) rownames(normality) <- columns colnames(normality) <- c('Anderson Darling', 'Cramer von Mises', 'Kolmogorov Smirnov', 'Pearson Chi Square', 'Shapiro Francia') normality } #' pulls out the (m)anova values into a table #' https://stackoverflow.com/questions/25898691 lm_to_anova <- function(model, multivariate = F, type = c('II')) { tests <- c('Pillai', 'Wilks', 'Hotelling-Lawley', 'Roy') suppress <- c('(Intercept)', 'Residuals') outtests <- car:::print.Anova.mlm body(outtests)[[16]] <- quote(invisible(tests)) body(outtests)[[15]] <- NULL mvf <- function(test_name) { result <- Anova( model, type = type, multivariate = multivariate, test.statistic = test_name) if('Anova.mlm' %in% class(result)) result <- outtests(result) result <- result[row.names(result) %nin% suppress,] row.names(result) <- sprintf('%s %s', test_name, row.names(result)) result } tab <- lapply(tests, mvf) tab <- do.call(rbind, tab) tab }
Hey there! I'm Antonio and id love to find a new friend in your furry friends. I've grown up around animals all my life and they have always been a part of the family to me. Aside from the standard dogs and cats my mom has had a horse for almost half my life, and a few years ago we were lucky enough to move to a new place in boxford with a barn and plenty of room for new friends! so now we have 2 dogs, 2 indoor cats and 1 barn cat, 2 horses, 2 goats, 8 chickens and 8 more that belong to a friend and we just started fostering a donkey(i think he's staying!! i do landscaping durring the spring summer and fall but the winter is really slow for me and any extra income helps this time of year and its always great to meet new furry friends!
[STATEMENT] lemma inj_dual_iff: "(\<partial> x = \<partial> y) = (x = y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<partial> x = \<partial> y) = (x = y) [PROOF STEP] by (meson inj_dual injD)
\section{Class Structure} \begin{definition} For $i,j\in I$, we say $i$ leads to $j$ (or sometimes $i\rightarrow j$) if $\mathbb P_i[\exists n,X_n=j]>0$ for some $n$.\\ We say $i$ communicates with $j$ (or sometimes $i\leftrightarrow j$) if $i\rightarrow j$ and $j\rightarrow i$. \end{definition} This definition, as were most definitions in maths, is motivated by a theorem. \begin{theorem} For $i\neq j$ the followings are equivalent:\\ (a) $i\rightarrow j$.\\ (b) $p_{i_1i_2}\cdots p_{i_{n-1}i_n}>0$ for some $i_1,\ldots,i_n$ with $i_1=i$ and $i_n=j$.\\ (c) $p_{ij}^{(n)}>0$ for some $n$. \end{theorem} \begin{proof} Quite obvious. \end{proof} \begin{proposition} The relation $\leftrightarrow$ is an equivalence relation. \end{proposition} \begin{proof} Reflexivity and symmetry are straight from definition. Transitivity follows from the preceding theorem. \end{proof} \begin{definition} The equivalence classes of $\leftrightarrow$ is called the communicating classes of the Markov chain.\\ A Markov chain is irreducible if there is only one communicating class in it. \end{definition} \begin{definition} A subset $C\subset I$ of the state space is closed if $i\in C$ and $i\rightarrow j$ implies $j\in C$.\\ A state $i\in I$ is absorbing if $\{i\}$ is closed. \end{definition} \begin{example} Take the Markov chain with transition matrix $$\begin{pmatrix} 1/2&1/2&&&\\ &&1&&&\\ 1/3&&&1/3&1/3&\\ &&&1/2&1/2&\\ &&&&&1\\ &&&&1& \end{pmatrix}$$ By observation, the communicating classes are $\{1,2,3\},\{4\},\{5,6\}$ and only $\{5,6\}$ is closed. \end{example}
How can repayment be made? Repayment may be made by demand draft or by depositing funds in ICICI Bank's account. What documents are acceptable as proof of dealing in commodities? A margin of 25%-40% must be maintained depending on the commodity. If the price of a commodity falls, a margin call notice is issued. When does the Bank charge 'penal interest'? Penal interest is charged if you fail to repay the loan plus interest by the end of the tenure of that tranche.
no=c(1:3) name=c("Monu","Himanshu","Palash") marks=c(75,76,77) dframe=data.frame(no,name,marks) dframe colnames=names(dframe) colnames for (i in dframe) { classc=class(i) print(classc) }
(* Title: JinjaThreads/Framework/FWBisimDeadlock.thy Author: Andreas Lochbihler *) section \<open>Preservation of deadlock across bisimulations\<close> theory FWBisimDeadlock imports FWBisimulation FWDeadlock begin context FWdelay_bisimulation_obs begin lemma actions_ok1_ex_actions_ok2: assumes "r1.actions_ok s1 t ta1" and "ta1 \<sim>m ta2" obtains s2 where "r2.actions_ok s2 t ta2" proof - let ?s2 = "(locks s1, (\<lambda>t. map_option (\<lambda>(x1, ln). (SOME x2. if final1 x1 then final2 x2 else \<not> final2 x2, ln)) (thr s1 t), undefined), wset s1, interrupts s1)" from \<open>ta1 \<sim>m ta2\<close> have "\<lbrace>ta1\<rbrace>\<^bsub>c\<^esub> = \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub>" by(simp add: ta_bisim_def) with \<open>r1.actions_ok s1 t ta1\<close> have cao1: "r1.cond_action_oks s1 t \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub>" by auto have "r2.cond_action_oks ?s2 t \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub>" unfolding r2.cond_action_oks_conv_set proof fix ct assume "ct \<in> set \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub>" with cao1 have "r1.cond_action_ok s1 t ct" unfolding r1.cond_action_oks_conv_set by auto thus "r2.cond_action_ok ?s2 t ct" using ex_final1_conv_ex_final2 by(cases ct)(fastforce intro: someI_ex[where P=final2])+ qed hence "r2.actions_ok ?s2 t ta2" using assms by(auto simp add: ta_bisim_def split del: if_split elim: rev_iffD1[OF _ thread_oks_bisim_inv]) thus thesis by(rule that) qed lemma actions_ok2_ex_actions_ok1: assumes "r2.actions_ok s2 t ta2" and "ta1 \<sim>m ta2" obtains s1 where "r1.actions_ok s1 t ta1" using FWdelay_bisimulation_obs.actions_ok1_ex_actions_ok2[OF FWdelay_bisimulation_obs_flip] assms unfolding flip_simps . lemma ex_actions_ok1_conv_ex_actions_ok2: "ta1 \<sim>m ta2 \<Longrightarrow> (\<exists>s1. r1.actions_ok s1 t ta1) \<longleftrightarrow> (\<exists>s2. r2.actions_ok s2 t ta2)" by(metis actions_ok1_ex_actions_ok2 actions_ok2_ex_actions_ok1) end context FWdelay_bisimulation_diverge begin lemma no_\<tau>Move1_\<tau>s_to_no_\<tau>Move2: fixes no_\<tau>moves1 no_\<tau>moves2 defines "no_\<tau>moves1 \<equiv> \<lambda>s1 t. wset s1 t = None \<and> (\<exists>x. thr s1 t = \<lfloor>(x, no_wait_locks)\<rfloor> \<and> (\<forall>x' m'. \<not> r1.silent_move t (x, shr s1) (x', m')))" defines "no_\<tau>moves2 \<equiv> \<lambda>s2 t. wset s2 t = None \<and> (\<exists>x. thr s2 t = \<lfloor>(x, no_wait_locks)\<rfloor> \<and> (\<forall>x' m'. \<not> r2.silent_move t (x, shr s2) (x', m')))" assumes mbisim: "s1 \<approx>m (ls2, (ts2, m2), ws2, is2)" shows "\<exists>ts2'. r2.mthr.silent_moves (ls2, (ts2, m2), ws2, is2) (ls2, (ts2', m2), ws2, is2) \<and> (\<forall>t. no_\<tau>moves1 s1 t \<longrightarrow> no_\<tau>moves2 (ls2, (ts2', m2), ws2, is2) t) \<and> s1 \<approx>m (ls2, (ts2', m2), ws2, is2)" proof - from mbisim have "finite (dom (thr s1))" by(simp add: mbisim_def) hence "finite {t. no_\<tau>moves1 s1 t}" unfolding no_\<tau>moves1_def by-(rule finite_subset, auto) thus ?thesis using \<open>s1 \<approx>m (ls2, (ts2, m2), ws2, is2)\<close> proof(induct A\<equiv>"{t. no_\<tau>moves1 s1 t}" arbitrary: s1 ts2 rule: finite_induct) case empty from \<open>{} = {t. no_\<tau>moves1 s1 t}\<close>[symmetric] have "no_\<tau>moves1 s1 = (\<lambda>t. False)" by(auto intro: ext) thus ?case using \<open>s1 \<approx>m (ls2, (ts2, m2), ws2, is2)\<close> by auto next case (insert t A) note mbisim = \<open>s1 \<approx>m (ls2, (ts2, m2), ws2, is2)\<close> from \<open>insert t A = {t. no_\<tau>moves1 s1 t}\<close> have "no_\<tau>moves1 s1 t" by auto then obtain x1 where ts1t: "thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>" and ws1t: "wset s1 t = None" and \<tau>1: "\<And>x1m1'. \<not> r1.silent_move t (x1, shr s1) x1m1'" by(auto simp add: no_\<tau>moves1_def) from ts1t mbisim obtain x2 where ts2t: "ts2 t = \<lfloor>(x2, no_wait_locks)\<rfloor>" and "t \<turnstile> (x1, shr s1) \<approx> (x2, m2)" by(auto dest: mbisim_thrD1) from mbisim ws1t have "ws2 t = None" by(simp add: mbisim_def) let ?s1 = "(locks s1, ((thr s1)(t := None), shr s1), wset s1, interrupts s1)" let ?s2 = "(ls2, (ts2(t := None), m2), ws2, is2)" from \<open>insert t A = {t. no_\<tau>moves1 s1 t}\<close> \<open>t \<notin> A\<close> have A: "A = {t. no_\<tau>moves1 ?s1 t}" by(auto simp add: no_\<tau>moves1_def) have "?s1 \<approx>m ?s2" proof(rule mbisimI) from mbisim show "finite (dom (thr ?s1))" "locks ?s1 = locks ?s2" "wset ?s1 = wset ?s2" "interrupts ?s1 = interrupts ?s2" by(simp_all add: mbisim_def) next from mbisim_wset_thread_ok1[OF mbisim] ws1t show "wset_thread_ok (wset ?s1) (thr ?s1)" by(auto intro!: wset_thread_okI dest: wset_thread_okD split: if_split_asm) next fix t' assume "thr ?s1 t' = None" with mbisim_thrNone_eq[OF mbisim, of t'] show "thr ?s2 t' = None" by auto next fix t' x1 ln assume "thr ?s1 t' = \<lfloor>(x1, ln)\<rfloor>" hence "thr s1 t' = \<lfloor>(x1, ln)\<rfloor>" "t' \<noteq> t" by(auto split: if_split_asm) with mbisim_thrD1[OF mbisim \<open>thr s1 t' = \<lfloor>(x1, ln)\<rfloor>\<close>] mbisim show "\<exists>x2. thr ?s2 t' = \<lfloor>(x2, ln)\<rfloor> \<and> t' \<turnstile> (x1, shr ?s1) \<approx> (x2, shr ?s2) \<and> (wset ?s2 t' = None \<or> x1 \<approx>w x2)" by(auto simp add: mbisim_def) qed with A have "\<exists>ts2'. r2.mthr.silent_moves ?s2 (ls2, (ts2', m2), ws2, is2) \<and> (\<forall>t. no_\<tau>moves1 ?s1 t \<longrightarrow> no_\<tau>moves2 (ls2, (ts2', m2), ws2, is2) t) \<and> ?s1 \<approx>m (ls2, (ts2', m2), ws2, is2)" by(rule insert) then obtain ts2' where "r2.mthr.silent_moves ?s2 (ls2, (ts2', m2), ws2, is2)" and no_\<tau>: "\<And>t. no_\<tau>moves1 ?s1 t \<Longrightarrow> no_\<tau>moves2 (ls2, (ts2', m2), ws2, is2) t" and "?s1 \<approx>m (ls2, (ts2', m2), ws2, is2)" by auto let ?s2' = "(ls2, (ts2'(t \<mapsto> (x2, no_wait_locks)), m2), ws2, is2)" from ts2t have "ts2(t \<mapsto> (x2, no_wait_locks)) = ts2" by(auto intro: ext) with r2.\<tau>mRedT_add_thread_inv[OF \<open>r2.mthr.silent_moves ?s2 (ls2, (ts2', m2), ws2, is2)\<close>, of t "(x2, no_wait_locks)"] have "r2.mthr.silent_moves (ls2, (ts2, m2), ws2, is2) ?s2'" by simp from no_\<tau>move1_\<tau>s_to_no_\<tau>move2[OF \<open>t \<turnstile> (x1, shr s1) \<approx> (x2, m2)\<close> \<tau>1] obtain x2' m2' where "r2.silent_moves t (x2, m2) (x2', m2')" and "\<And>x2'' m2''. \<not> r2.silent_move t (x2', m2') (x2'', m2'')" and "t \<turnstile> (x1, shr s1) \<approx> (x2', m2')" by auto let ?s2'' = "(ls2, (ts2'(t \<mapsto> (x2', no_wait_locks)), m2'), ws2, is2)" from red2_rtrancl_\<tau>_heapD[OF \<open>r2.silent_moves t (x2, m2) (x2', m2')\<close> \<open>t \<turnstile> (x1, shr s1) \<approx> (x2, m2)\<close>] have "m2' = m2" by simp with \<open>r2.silent_moves t (x2, m2) (x2', m2')\<close> have "r2.silent_moves t (x2, shr ?s2') (x2', m2)" by simp hence "r2.mthr.silent_moves ?s2' (redT_upd_\<epsilon> ?s2' t x2' m2)" by(rule red2_rtrancl_\<tau>_into_RedT_\<tau>)(auto simp add: \<open>ws2 t = None\<close> intro: \<open>t \<turnstile> (x1, shr s1) \<approx> (x2, m2)\<close>) also have "redT_upd_\<epsilon> ?s2' t x2' m2 = ?s2''" using \<open>m2' = m2\<close> by(auto simp add: fun_eq_iff redT_updLns_def finfun_Diag_const2 o_def) finally (back_subst) have "r2.mthr.silent_moves (ls2, (ts2, m2), ws2, is2) ?s2''" using \<open>r2.mthr.silent_moves (ls2, (ts2, m2), ws2, is2) ?s2'\<close> by-(rule rtranclp_trans) moreover { fix t' assume no_\<tau>1: "no_\<tau>moves1 s1 t'" have "no_\<tau>moves2 ?s2'' t'" proof(cases "t' = t") case True thus ?thesis using \<open>ws2 t = None\<close> \<open>\<And>x2'' m2''. \<not> r2.silent_move t (x2', m2') (x2'', m2'')\<close> by(simp add: no_\<tau>moves2_def) next case False with no_\<tau>1 have "no_\<tau>moves1 ?s1 t'" by(simp add: no_\<tau>moves1_def) hence "no_\<tau>moves2 (ls2, (ts2', m2), ws2, is2) t'" by(rule \<open>no_\<tau>moves1 ?s1 t' \<Longrightarrow> no_\<tau>moves2 (ls2, (ts2', m2), ws2, is2) t'\<close>) with False \<open>m2' = m2\<close> show ?thesis by(simp add: no_\<tau>moves2_def) qed } moreover have "s1 \<approx>m ?s2''" proof(rule mbisimI) from mbisim show "finite (dom (thr s1))" "locks s1 = locks ?s2''" "wset s1 = wset ?s2''" "interrupts s1 = interrupts ?s2''" by(simp_all add: mbisim_def) next from mbisim show "wset_thread_ok (wset s1) (thr s1)" by(rule mbisim_wset_thread_ok1) next fix t' assume "thr s1 t' = None" hence "thr ?s1 t' = None" "t' \<noteq> t" using ts1t by auto with mbisim_thrNone_eq[OF \<open>?s1 \<approx>m (ls2, (ts2', m2), ws2, is2)\<close>, of t'] show "thr ?s2'' t' = None" by simp next fix t' x1' ln' assume "thr s1 t' = \<lfloor>(x1', ln')\<rfloor>" show "\<exists>x2. thr ?s2'' t' = \<lfloor>(x2, ln')\<rfloor> \<and> t' \<turnstile> (x1', shr s1) \<approx> (x2, shr ?s2'') \<and> (wset ?s2'' t' = None \<or> x1' \<approx>w x2)" proof(cases "t = t'") case True with \<open>thr s1 t' = \<lfloor>(x1', ln')\<rfloor>\<close> ts1t \<open>t \<turnstile> (x1, shr s1) \<approx> (x2', m2')\<close> \<open>m2' = m2\<close> \<open>ws2 t = None\<close> show ?thesis by auto next case False with mbisim_thrD1[OF \<open>?s1 \<approx>m (ls2, (ts2', m2), ws2, is2)\<close>, of t' x1' ln'] \<open>thr s1 t' = \<lfloor>(x1', ln')\<rfloor>\<close> \<open>m2' = m2\<close> mbisim show ?thesis by(auto simp add: mbisim_def) qed qed ultimately show ?case unfolding \<open>m2' = m2\<close> by blast qed qed lemma no_\<tau>Move2_\<tau>s_to_no_\<tau>Move1: fixes no_\<tau>moves1 no_\<tau>moves2 defines "no_\<tau>moves1 \<equiv> \<lambda>s1 t. wset s1 t = None \<and> (\<exists>x. thr s1 t = \<lfloor>(x, no_wait_locks)\<rfloor> \<and> (\<forall>x' m'. \<not> r1.silent_move t (x, shr s1) (x', m')))" defines "no_\<tau>moves2 \<equiv> \<lambda>s2 t. wset s2 t = None \<and> (\<exists>x. thr s2 t = \<lfloor>(x, no_wait_locks)\<rfloor> \<and> (\<forall>x' m'. \<not> r2.silent_move t (x, shr s2) (x', m')))" assumes "(ls1, (ts1, m1), ws1, is1) \<approx>m s2" shows "\<exists>ts1'. r1.mthr.silent_moves (ls1, (ts1, m1), ws1, is1) (ls1, (ts1', m1), ws1, is1) \<and> (\<forall>t. no_\<tau>moves2 s2 t \<longrightarrow> no_\<tau>moves1 (ls1, (ts1', m1), ws1, is1) t) \<and> (ls1, (ts1', m1), ws1, is1) \<approx>m s2" using assms FWdelay_bisimulation_diverge.no_\<tau>Move1_\<tau>s_to_no_\<tau>Move2[OF FWdelay_bisimulation_diverge_flip] unfolding flip_simps by blast lemma deadlock_mbisim_not_final_thread_pres: assumes dead: "t \<in> r1.deadlocked s1 \<or> r1.deadlock s1" and nfin: "r1.not_final_thread s1 t" and fin: "r1.final_thread s1 t \<Longrightarrow> r2.final_thread s2 t" and mbisim: "s1 \<approx>m s2" shows "r2.not_final_thread s2 t" proof - from nfin obtain x1 ln where "thr s1 t = \<lfloor>(x1, ln)\<rfloor>" by cases auto with mbisim obtain x2 where "thr s2 t = \<lfloor>(x2, ln)\<rfloor>" "t \<turnstile> (x1, shr s1) \<approx> (x2, shr s2)" "wset s1 t = None \<or> x1 \<approx>w x2" by(auto dest: mbisim_thrD1) show "r2.not_final_thread s2 t" proof(cases "wset s1 t = None \<and> ln = no_wait_locks") case False with \<open>r1.not_final_thread s1 t\<close> \<open>thr s1 t = \<lfloor>(x1, ln)\<rfloor>\<close> \<open>thr s2 t = \<lfloor>(x2, ln)\<rfloor>\<close> mbisim show ?thesis by cases(auto simp add: mbisim_def r2.not_final_thread_iff) next case True with \<open>r1.not_final_thread s1 t\<close> \<open>thr s1 t = \<lfloor>(x1, ln)\<rfloor>\<close> have "\<not> final1 x1" by(cases) auto have "\<not> final2 x2" proof assume "final2 x2" with final2_simulation[OF \<open>t \<turnstile> (x1, shr s1) \<approx> (x2, shr s2)\<close>] obtain x1' m1' where "r1.silent_moves t (x1, shr s1) (x1', m1')" "t \<turnstile> (x1', m1') \<approx> (x2, shr s2)" "final1 x1'" by auto from \<open>r1.silent_moves t (x1, shr s1) (x1', m1')\<close> have "x1' = x1" proof(cases rule: converse_rtranclpE2[consumes 1, case_names refl step]) case (step x1'' m1'') from \<open>r1.silent_move t (x1, shr s1) (x1'', m1'')\<close> have "t \<turnstile> (x1, shr s1) -1-\<epsilon>\<rightarrow> (x1'', m1'')" by(auto dest: r1.silent_tl) hence "r1.redT s1 (t, \<epsilon>) (redT_upd_\<epsilon> s1 t x1'' m1'')" using \<open>thr s1 t = \<lfloor>(x1, ln)\<rfloor>\<close> True by -(erule r1.redT_normal, auto simp add: redT_updLns_def finfun_Diag_const2 o_def redT_updWs_def) hence False using dead by(auto intro: r1.deadlock_no_red r1.red_no_deadlock) thus ?thesis .. qed simp with \<open>\<not> final1 x1\<close> \<open>final1 x1'\<close> show False by simp qed thus ?thesis using \<open>thr s2 t = \<lfloor>(x2, ln)\<rfloor>\<close> by(auto simp add: r2.not_final_thread_iff) qed qed lemma deadlocked1_imp_\<tau>s_deadlocked2: assumes mbisim: "s1 \<approx>m s2" and dead: "t \<in> r1.deadlocked s1" shows "\<exists>s2'. r2.mthr.silent_moves s2 s2' \<and> t \<in> r2.deadlocked s2' \<and> s1 \<approx>m s2'" proof - from mfinal1_inv_simulation[OF mbisim] obtain ls2 ts2 m2 ws2 is2 where red1: "r2.mthr.silent_moves s2 (ls2, (ts2, m2), ws2, is2)" and "s1 \<approx>m (ls2, (ts2, m2), ws2, is2)" and "m2 = shr s2" and fin: "\<And>t. r1.final_thread s1 t \<Longrightarrow> r2.final_thread (ls2, (ts2, m2), ws2, is2) t" by fastforce from no_\<tau>Move1_\<tau>s_to_no_\<tau>Move2[OF \<open>s1 \<approx>m (ls2, (ts2, m2), ws2, is2)\<close>] obtain ts2' where red2: "r2.mthr.silent_moves (ls2, (ts2, m2), ws2, is2) (ls2, (ts2', m2), ws2, is2)" and no_\<tau>: "\<And>t x1 x2 x2' m2'. \<lbrakk> wset s1 t = None; thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>; ts2' t = \<lfloor>(x2, no_wait_locks)\<rfloor>; \<And>x' m'. r1.silent_move t (x1, shr s1) (x', m') \<Longrightarrow> False \<rbrakk> \<Longrightarrow> \<not> r2.silent_move t (x2, m2) (x2', m2')" and mbisim: "s1 \<approx>m (ls2, (ts2', m2), ws2, is2)" by fastforce from mbisim have mbisim_eqs: "ls2 = locks s1" "ws2 = wset s1" "is2 = interrupts s1" by(simp_all add: mbisim_def) let ?s2 = "(ls2, (ts2', m2), ws2, is2)" from red2 have fin': "\<And>t. r1.final_thread s1 t \<Longrightarrow> r2.final_thread ?s2 t" by(rule r2.\<tau>mRedT_preserves_final_thread)(rule fin) from dead have "t \<in> r2.deadlocked ?s2" proof(coinduct) case (deadlocked t) thus ?case proof(cases rule: r1.deadlocked_elims) case (lock x1) hence csmw: "\<And>LT. r1.can_sync t x1 (shr s1) LT \<Longrightarrow> \<exists>lt\<in>LT. r1.must_wait s1 t lt (r1.deadlocked s1 \<union> r1.final_threads s1)" by blast from \<open>thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>\<close> mbisim obtain x2 where "ts2' t = \<lfloor>(x2, no_wait_locks)\<rfloor>" and bisim: "t \<turnstile> (x1, shr s1) \<approx> (x2, m2)" by(auto dest: mbisim_thrD1) note \<open>ts2' t = \<lfloor>(x2, no_wait_locks)\<rfloor>\<close> moreover { from \<open>r1.must_sync t x1 (shr s1)\<close> obtain ta1 x1' m1' where r1: "t \<turnstile> (x1, shr s1) -1-ta1\<rightarrow> (x1', m1')" and s1': "\<exists>s1'. r1.actions_ok s1' t ta1" by(fastforce elim: r1.must_syncE) have "\<not> \<tau>move1 (x1, shr s1) ta1 (x1', m1')" (is "\<not> ?\<tau>") proof assume "?\<tau>" hence "ta1 = \<epsilon>" by(rule r1.silent_tl) with r1 have "r1.can_sync t x1 (shr s1) {}" by(auto intro!: r1.can_syncI simp add: collect_locks_def collect_interrupts_def) from csmw[OF this] show False by blast qed from simulation1[OF bisim r1 this] obtain x2' m2' x2'' m2'' ta2 where r2: "r2.silent_moves t (x2, m2) (x2', m2')" and r2': "t \<turnstile> (x2', m2') -2-ta2\<rightarrow> (x2'', m2'')" and \<tau>2: "\<not> \<tau>move2 (x2', m2') ta2 (x2'', m2'')" and bisim': "t \<turnstile> (x1', m1') \<approx> (x2'', m2'')" and tasim: "ta1 \<sim>m ta2" by auto from r2 have "\<exists>ta2 x2' m2' s2'. t \<turnstile> (x2, m2) -2-ta2\<rightarrow> (x2', m2') \<and> r2.actions_ok s2' t ta2" proof(cases rule: converse_rtranclpE2[consumes 1, case_names base step]) case base from r2'[folded base] s1'[unfolded ex_actions_ok1_conv_ex_actions_ok2[OF tasim]] show ?thesis by blast next case (step x2''' m2''') hence "t \<turnstile> (x2, m2) -2-\<epsilon>\<rightarrow> (x2''', m2''')" by(auto dest: r2.silent_tl) moreover have "r2.actions_ok (undefined, (undefined, undefined), Map.empty, undefined) t \<epsilon>" by auto ultimately show ?thesis by-(rule exI conjI|assumption)+ qed hence "r2.must_sync t x2 m2" unfolding r2.must_sync_def2 . } moreover { fix LT assume "r2.can_sync t x2 m2 LT" then obtain ta2 x2' m2' where r2: "t \<turnstile> (x2, m2) -2-ta2\<rightarrow> (x2', m2')" and LT: "LT = collect_locks \<lbrace>ta2\<rbrace>\<^bsub>l\<^esub> <+> collect_cond_actions \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub> <+> collect_interrupts \<lbrace>ta2\<rbrace>\<^bsub>i\<^esub>" by(auto elim: r2.can_syncE) from \<open>wset s1 t = None\<close> \<open>thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>\<close> \<open>ts2' t = \<lfloor>(x2, no_wait_locks)\<rfloor>\<close> have "\<not> r2.silent_move t (x2, m2) (x2', m2')" proof(rule no_\<tau>) fix x1' m1' assume "r1.silent_move t (x1, shr s1) (x1', m1')" hence "t \<turnstile> (x1, shr s1) -1-\<epsilon>\<rightarrow> (x1', m1')" by(auto dest: r1.silent_tl) hence "r1.can_sync t x1 (shr s1) {}" by(auto intro: r1.can_syncI simp add: collect_locks_def collect_interrupts_def) with csmw[OF this] show False by blast qed with r2 have "\<not> \<tau>move2 (x2, m2) ta2 (x2', m2')" by auto from simulation2[OF bisim r2 this] obtain x1' m1' x1'' m1'' ta1 where \<tau>r1: "r1.silent_moves t (x1, shr s1) (x1', m1')" and r1: "t \<turnstile> (x1', m1') -1-ta1\<rightarrow> (x1'', m1'')" and n\<tau>1: "\<not> \<tau>move1 (x1', m1') ta1 (x1'', m1'')" and bisim': "t \<turnstile> (x1'', m1'') \<approx> (x2', m2')" and tlsim: "ta1 \<sim>m ta2" by auto from \<tau>r1 obtain [simp]: "x1' = x1" "m1' = shr s1" proof(cases rule: converse_rtranclpE2[consumes 1, case_names refl step]) case (step X M) from \<open>r1.silent_move t (x1, shr s1) (X, M)\<close> have "t \<turnstile> (x1, shr s1) -1-\<epsilon>\<rightarrow> (X, M)" by(auto dest: r1.silent_tl) hence "r1.can_sync t x1 (shr s1) {}" by(auto intro: r1.can_syncI simp add: collect_locks_def collect_interrupts_def) with csmw[OF this] have False by blast thus ?thesis .. qed blast from tlsim LT have "LT = collect_locks \<lbrace>ta1\<rbrace>\<^bsub>l\<^esub> <+> collect_cond_actions \<lbrace>ta1\<rbrace>\<^bsub>c\<^esub> <+> collect_interrupts \<lbrace>ta1\<rbrace>\<^bsub>i\<^esub>" by(auto simp add: ta_bisim_def) with r1 have "r1.can_sync t x1 (shr s1) LT" by(auto intro: r1.can_syncI) from csmw[OF this] obtain lt where lt: "lt \<in> LT" and mw: "r1.must_wait s1 t lt (r1.deadlocked s1 \<union> r1.final_threads s1)" by blast have subset: "r1.deadlocked s1 \<union> r1.final_threads s1 \<subseteq> r1.deadlocked s1 \<union> r2.deadlocked s2 \<union> r2.final_threads ?s2" by(auto dest: fin') from mw have "r2.must_wait ?s2 t lt (r1.deadlocked s1 \<union> r2.deadlocked ?s2 \<union> r2.final_threads ?s2)" proof(cases rule: r1.must_wait_elims) case lock thus ?thesis by(auto simp add: mbisim_eqs dest!: fin') next case (join t') from \<open>r1.not_final_thread s1 t'\<close> obtain x1 ln where "thr s1 t' = \<lfloor>(x1, ln)\<rfloor>" by cases auto with mbisim obtain x2 where "ts2' t' = \<lfloor>(x2, ln)\<rfloor>" "t' \<turnstile> (x1, shr s1) \<approx> (x2, m2)" by(auto dest: mbisim_thrD1) show ?thesis proof(cases "wset s1 t' = None \<and> ln = no_wait_locks") case False with \<open>r1.not_final_thread s1 t'\<close> \<open>thr s1 t' = \<lfloor>(x1, ln)\<rfloor>\<close> \<open>ts2' t' = \<lfloor>(x2, ln)\<rfloor>\<close> \<open>lt = Inr (Inl t')\<close> join show ?thesis by(auto simp add: mbisim_eqs r2.not_final_thread_iff r1.final_thread_def) next case True with \<open>r1.not_final_thread s1 t'\<close> \<open>thr s1 t' = \<lfloor>(x1, ln)\<rfloor>\<close> have "\<not> final1 x1" by(cases) auto with join \<open>thr s1 t' = \<lfloor>(x1, ln)\<rfloor>\<close> have "t' \<in> r1.deadlocked s1" by(auto simp add: r1.final_thread_def) have "\<not> final2 x2" proof assume "final2 x2" with final2_simulation[OF \<open>t' \<turnstile> (x1, shr s1) \<approx> (x2, m2)\<close>] obtain x1' m1' where "r1.silent_moves t' (x1, shr s1) (x1', m1')" and "t' \<turnstile> (x1', m1') \<approx> (x2, m2)" "final1 x1'" by auto from \<open>r1.silent_moves t' (x1, shr s1) (x1', m1')\<close> have "x1' = x1" proof(cases rule: converse_rtranclpE2[consumes 1, case_names refl step]) case (step x1'' m1'') from \<open>r1.silent_move t' (x1, shr s1) (x1'', m1'')\<close> have "t' \<turnstile> (x1, shr s1) -1-\<epsilon>\<rightarrow> (x1'', m1'')" by(auto dest: r1.silent_tl) hence "r1.redT s1 (t', \<epsilon>) (redT_upd_\<epsilon> s1 t' x1'' m1'')" using \<open>thr s1 t' = \<lfloor>(x1, ln)\<rfloor>\<close> True by -(erule r1.redT_normal, auto simp add: redT_updLns_def redT_updWs_def finfun_Diag_const2 o_def) hence False using \<open>t' \<in> r1.deadlocked s1\<close> by(rule r1.red_no_deadlock) thus ?thesis .. qed simp with \<open>\<not> final1 x1\<close> \<open>final1 x1'\<close> show False by simp qed thus ?thesis using \<open>ts2' t' = \<lfloor>(x2, ln)\<rfloor>\<close> join by(auto simp add: r2.not_final_thread_iff r1.final_thread_def) qed next case (interrupt t') have "r2.all_final_except ?s2 (r1.deadlocked s1 \<union> r2.deadlocked ?s2 \<union> r2.final_threads ?s2)" proof(rule r2.all_final_exceptI) fix t'' assume "r2.not_final_thread ?s2 t''" then obtain x2 ln where "thr ?s2 t'' = \<lfloor>(x2, ln)\<rfloor>" and fin: "\<not> final2 x2 \<or> ln \<noteq> no_wait_locks \<or> wset ?s2 t'' \<noteq> None" by(auto simp add: r2.not_final_thread_iff) from \<open>thr ?s2 t'' = \<lfloor>(x2, ln)\<rfloor>\<close> mbisim obtain x1 where ts1t'': "thr s1 t'' = \<lfloor>(x1, ln)\<rfloor>" and bisim'': "t'' \<turnstile> (x1, shr s1) \<approx> (x2, shr ?s2)" by(auto dest: mbisim_thrD2) have "r1.not_final_thread s1 t''" proof(cases "wset ?s2 t'' = None \<and> ln = no_wait_locks") case True with fin have "\<not> final2 x2" by simp hence "\<not> final1 x1" proof(rule contrapos_nn) assume "final1 x1" with final1_simulation[OF bisim''] obtain x2' m2' where \<tau>s2: "r2.silent_moves t'' (x2, shr ?s2) (x2', m2')" and bisim''': "t'' \<turnstile> (x1, shr s1) \<approx> (x2', m2')" and "final2 x2'" by auto from \<tau>s2 have "x2' = x2" proof(cases rule: converse_rtranclpE2[consumes 1, case_names refl step]) case refl thus ?thesis by simp next case (step x2'' m2'') from True have "wset s1 t'' = None" "thr s1 t'' = \<lfloor>(x1, no_wait_locks)\<rfloor>" "ts2' t'' = \<lfloor>(x2, no_wait_locks)\<rfloor>" using ts1t'' \<open>thr ?s2 t'' = \<lfloor>(x2, ln)\<rfloor>\<close> mbisim by(simp_all add: mbisim_def) hence no_\<tau>2: "\<not> r2.silent_move t'' (x2, m2) (x2'', m2'')" proof(rule no_\<tau>) fix x1' m1' assume "r1.silent_move t'' (x1, shr s1) (x1', m1')" with \<open>final1 x1\<close> show False by(auto dest: r1.final_no_red) qed with \<open>r2.silent_move t'' (x2, shr ?s2) (x2'', m2'')\<close> have False by simp thus ?thesis .. qed with \<open>final2 x2'\<close> show "final2 x2" by simp qed with ts1t'' show ?thesis .. next case False with ts1t'' mbisim show ?thesis by(auto simp add: r1.not_final_thread_iff mbisim_def) qed with \<open>r1.all_final_except s1 (r1.deadlocked s1 \<union> r1.final_threads s1)\<close> have "t'' \<in> r1.deadlocked s1 \<union> r1.final_threads s1" by(rule r1.all_final_exceptD) thus "t'' \<in> r1.deadlocked s1 \<union> r2.deadlocked ?s2 \<union> r2.final_threads ?s2" by(auto dest: fin' simp add: mbisim_eqs) qed thus ?thesis using interrupt mbisim by(auto simp add: mbisim_def) qed hence "\<exists>lt\<in>LT. r2.must_wait ?s2 t lt (r1.deadlocked s1 \<union> r2.deadlocked ?s2 \<union> r2.final_threads ?s2)" using \<open>lt \<in> LT\<close> by blast } moreover from mbisim \<open>wset s1 t = None\<close> have "wset ?s2 t = None" by(simp add: mbisim_def) ultimately have ?Lock by simp thus ?thesis .. next case (wait x1 ln) from mbisim \<open>thr s1 t = \<lfloor>(x1, ln)\<rfloor>\<close> obtain x2 where "ts2' t = \<lfloor>(x2, ln)\<rfloor>" by(auto dest: mbisim_thrD1) moreover have "r2.all_final_except ?s2 (r1.deadlocked s1)" proof(rule r2.all_final_exceptI) fix t assume "r2.not_final_thread ?s2 t" then obtain x2 ln where "ts2' t = \<lfloor>(x2, ln)\<rfloor>" by(auto simp add: r2.not_final_thread_iff) with mbisim obtain x1 where "thr s1 t = \<lfloor>(x1, ln)\<rfloor>" "t \<turnstile> (x1, shr s1) \<approx> (x2, m2)" by(auto dest: mbisim_thrD2) hence "r1.not_final_thread s1 t" using \<open>r2.not_final_thread ?s2 t\<close> \<open>ts2' t = \<lfloor>(x2, ln)\<rfloor>\<close> mbisim fin'[of t] by(cases "wset s1 t")(auto simp add: r1.not_final_thread_iff r2.not_final_thread_iff mbisim_def r1.final_thread_def r2.final_thread_def) with \<open>r1.all_final_except s1 (r1.deadlocked s1)\<close> show "t \<in> r1.deadlocked s1" by(rule r1.all_final_exceptD) qed hence "r2.all_final_except ?s2 (r1.deadlocked s1 \<union> r2.deadlocked ?s2)" by(rule r2.all_final_except_mono') blast moreover from \<open>waiting (wset s1 t)\<close> mbisim have "waiting (wset ?s2 t)" by(simp add: mbisim_def) ultimately have ?Wait by simp thus ?thesis by blast next case (acquire x1 ln l t') from mbisim \<open>thr s1 t = \<lfloor>(x1, ln)\<rfloor>\<close> obtain x2 where "ts2' t = \<lfloor>(x2, ln)\<rfloor>" by(auto dest: mbisim_thrD1) moreover from \<open>t' \<in> r1.deadlocked s1 \<or> r1.final_thread s1 t'\<close> have "(t' \<in> r1.deadlocked s1 \<or> t' \<in> r2.deadlocked ?s2) \<or> r2.final_thread ?s2 t'" by(blast dest: fin') moreover from mbisim \<open>has_lock (locks s1 $ l) t'\<close> have "has_lock (locks ?s2 $ l) t'" by(simp add: mbisim_def) ultimately have ?Acquire using \<open>0 < ln $ l\<close> \<open>t \<noteq> t'\<close> \<open>\<not> waiting (wset s1 t)\<close> mbisim by(auto simp add: mbisim_def) thus ?thesis by blast qed qed with red1 red2 mbisim show ?thesis by(blast intro: rtranclp_trans) qed lemma deadlocked2_imp_\<tau>s_deadlocked1: "\<lbrakk> s1 \<approx>m s2; t \<in> r2.deadlocked s2 \<rbrakk> \<Longrightarrow> \<exists>s1'. r1.mthr.silent_moves s1 s1' \<and> t \<in> r1.deadlocked s1' \<and> s1' \<approx>m s2" using FWdelay_bisimulation_diverge.deadlocked1_imp_\<tau>s_deadlocked2[OF FWdelay_bisimulation_diverge_flip] unfolding flip_simps . lemma deadlock1_imp_\<tau>s_deadlock2: assumes mbisim: "s1 \<approx>m s2" and dead: "r1.deadlock s1" shows "\<exists>s2'. r2.mthr.silent_moves s2 s2' \<and> r2.deadlock s2' \<and> s1 \<approx>m s2'" proof(cases "\<exists>t. r1.not_final_thread s1 t") case True then obtain t where nfin: "r1.not_final_thread s1 t" .. from mfinal1_inv_simulation[OF mbisim] obtain ls2 ts2 m2 ws2 is2 where red1: "r2.mthr.silent_moves s2 (ls2, (ts2, m2), ws2, is2)" and "s1 \<approx>m (ls2, (ts2, m2), ws2, is2)" and "m2 = shr s2" and fin: "\<And>t. r1.final_thread s1 t \<Longrightarrow> r2.final_thread (ls2, (ts2, m2), ws2, is2) t" by fastforce from no_\<tau>Move1_\<tau>s_to_no_\<tau>Move2[OF \<open>s1 \<approx>m (ls2, (ts2, m2), ws2, is2)\<close>] obtain ts2' where red2: "r2.mthr.silent_moves (ls2, (ts2, m2), ws2, is2) (ls2, (ts2', m2), ws2, is2)" and no_\<tau>: "\<And>t x1 x2 x2' m2'. \<lbrakk> wset s1 t = None; thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>; ts2' t = \<lfloor>(x2, no_wait_locks)\<rfloor>; \<And>x' m'. r1.silent_move t (x1, shr s1) (x', m') \<Longrightarrow> False \<rbrakk> \<Longrightarrow> \<not> r2.silent_move t (x2, m2) (x2', m2')" and mbisim: "s1 \<approx>m (ls2, (ts2', m2), ws2, is2)" by fastforce from mbisim have mbisim_eqs: "ls2 = locks s1" "ws2 = wset s1" "is2 = interrupts s1" by(simp_all add: mbisim_def) let ?s2 = "(ls2, (ts2', m2), ws2, is2)" from red2 have fin': "\<And>t. r1.final_thread s1 t \<Longrightarrow> r2.final_thread ?s2 t" by(rule r2.\<tau>mRedT_preserves_final_thread)(rule fin) have "r2.deadlock ?s2" proof(rule r2.deadlockI, goal_cases) case (1 t x2) note ts2t = \<open>thr ?s2 t = \<lfloor>(x2, no_wait_locks)\<rfloor>\<close> with mbisim obtain x1 where ts1t: "thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>" and bisim: "t \<turnstile> (x1, shr s1) \<approx> (x2, m2)" by(auto dest: mbisim_thrD2) from \<open>wset ?s2 t = None\<close> mbisim have ws1t: "wset s1 t = None" by(simp add: mbisim_def) have "\<not> final1 x1" proof assume "final1 x1" with ts1t ws1t have "r1.final_thread s1 t" by(simp add: r1.final_thread_def) hence "r2.final_thread ?s2 t" by(rule fin') with \<open>\<not> final2 x2\<close> ts2t \<open>wset ?s2 t = None\<close> show False by(simp add: r2.final_thread_def) qed from r1.deadlockD1[OF dead ts1t this \<open>wset s1 t = None\<close>] have ms: "r1.must_sync t x1 (shr s1)" and csmw: "\<And>LT. r1.can_sync t x1 (shr s1) LT \<Longrightarrow> \<exists>lt\<in>LT. r1.must_wait s1 t lt (dom (thr s1))" by blast+ { from \<open>r1.must_sync t x1 (shr s1)\<close> obtain ta1 x1' m1' where r1: "t \<turnstile> (x1, shr s1) -1-ta1\<rightarrow> (x1', m1')" and s1': "\<exists>s1'. r1.actions_ok s1' t ta1" by(fastforce elim: r1.must_syncE) have "\<not> \<tau>move1 (x1, shr s1) ta1 (x1', m1')" (is "\<not> ?\<tau>") proof assume "?\<tau>" hence "ta1 = \<epsilon>" by(rule r1.silent_tl) with r1 have "r1.can_sync t x1 (shr s1) {}" by(auto intro!: r1.can_syncI simp add: collect_locks_def collect_interrupts_def) from csmw[OF this] show False by blast qed from simulation1[OF bisim r1 this] obtain x2' m2' x2'' m2'' ta2 where r2: "r2.silent_moves t (x2, m2) (x2', m2')" and r2': "t \<turnstile> (x2', m2') -2-ta2\<rightarrow> (x2'', m2'')" and bisim': "t \<turnstile> (x1', m1') \<approx> (x2'', m2'')" and tasim: "ta1 \<sim>m ta2" by auto from r2 have "\<exists>ta2 x2' m2' s2'. t \<turnstile> (x2, m2) -2-ta2\<rightarrow> (x2', m2') \<and> r2.actions_ok s2' t ta2" proof(cases rule: converse_rtranclpE2[consumes 1, case_names base step]) case base from r2'[folded base] s1'[unfolded ex_actions_ok1_conv_ex_actions_ok2[OF tasim]] show ?thesis by blast next case (step x2''' m2''') hence "t \<turnstile> (x2, m2) -2-\<epsilon>\<rightarrow> (x2''', m2''')" by(auto dest: r2.silent_tl) moreover have "r2.actions_ok (undefined, (undefined, undefined), Map.empty, undefined) t \<epsilon>" by auto ultimately show ?thesis by-(rule exI conjI|assumption)+ qed hence "r2.must_sync t x2 m2" unfolding r2.must_sync_def2 . } moreover { fix LT assume "r2.can_sync t x2 m2 LT" then obtain ta2 x2' m2' where r2: "t \<turnstile> (x2, m2) -2-ta2\<rightarrow> (x2', m2')" and LT: "LT = collect_locks \<lbrace>ta2\<rbrace>\<^bsub>l\<^esub> <+> collect_cond_actions \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub> <+> collect_interrupts \<lbrace>ta2\<rbrace>\<^bsub>i\<^esub>" by(auto elim: r2.can_syncE) from ts2t have "ts2' t = \<lfloor>(x2, no_wait_locks)\<rfloor>" by simp with ws1t ts1t have "\<not> r2.silent_move t (x2, m2) (x2', m2')" proof(rule no_\<tau>) fix x1' m1' assume "r1.silent_move t (x1, shr s1) (x1', m1')" hence "t \<turnstile> (x1, shr s1) -1-\<epsilon>\<rightarrow> (x1', m1')" by(auto dest: r1.silent_tl) hence "r1.can_sync t x1 (shr s1) {}" by(auto intro: r1.can_syncI simp add: collect_locks_def collect_interrupts_def) with csmw[OF this] show False by blast qed with r2 have "\<not> \<tau>move2 (x2, m2) ta2 (x2', m2')" by auto from simulation2[OF bisim r2 this] obtain x1' m1' x1'' m1'' ta1 where \<tau>r1: "r1.silent_moves t (x1, shr s1) (x1', m1')" and r1: "t \<turnstile> (x1', m1') -1-ta1\<rightarrow> (x1'', m1'')" and n\<tau>1: "\<not> \<tau>move1 (x1', m1') ta1 (x1'', m1'')" and bisim': "t \<turnstile> (x1'', m1'') \<approx> (x2', m2')" and tlsim: "ta1 \<sim>m ta2" by auto from \<tau>r1 obtain [simp]: "x1' = x1" "m1' = shr s1" proof(cases rule: converse_rtranclpE2[consumes 1, case_names refl step]) case (step X M) from \<open>r1.silent_move t (x1, shr s1) (X, M)\<close> have "t \<turnstile> (x1, shr s1) -1-\<epsilon>\<rightarrow> (X, M)" by(auto dest: r1.silent_tl) hence "r1.can_sync t x1 (shr s1) {}" by(auto intro: r1.can_syncI simp add: collect_locks_def collect_interrupts_def) with csmw[OF this] have False by blast thus ?thesis .. qed blast from tlsim LT have "LT = collect_locks \<lbrace>ta1\<rbrace>\<^bsub>l\<^esub> <+> collect_cond_actions \<lbrace>ta1\<rbrace>\<^bsub>c\<^esub> <+> collect_interrupts \<lbrace>ta1\<rbrace>\<^bsub>i\<^esub>" by(auto simp add: ta_bisim_def) with r1 have "r1.can_sync t x1 (shr s1) LT" by(auto intro: r1.can_syncI) from csmw[OF this] obtain lt where lt: "lt \<in> LT" "r1.must_wait s1 t lt (dom (thr s1))" by blast from \<open>r1.must_wait s1 t lt (dom (thr s1))\<close> have "r2.must_wait ?s2 t lt (dom (thr ?s2))" proof(cases rule: r1.must_wait_elims) case (lock l) with mbisim_dom_eq[OF mbisim] show ?thesis by(auto simp add: mbisim_eqs) next case (join t') from dead deadlock_mbisim_not_final_thread_pres[OF _ \<open>r1.not_final_thread s1 t'\<close> fin' mbisim] have "r2.not_final_thread ?s2 t'" by auto thus ?thesis using join mbisim_dom_eq[OF mbisim] by auto next case (interrupt t') have "r2.all_final_except ?s2 (dom (thr ?s2))" by(auto intro!: r2.all_final_exceptI) with interrupt show ?thesis by(auto simp add: mbisim_eqs) qed with lt have "\<exists>lt\<in>LT. r2.must_wait ?s2 t lt (dom (thr ?s2))" by blast } ultimately show ?case by fastforce next case (2 t x2 ln l) note dead moreover from mbisim \<open>thr ?s2 t = \<lfloor>(x2, ln)\<rfloor>\<close> obtain x1 where "thr s1 t = \<lfloor>(x1, ln)\<rfloor>" by(auto dest: mbisim_thrD2) moreover note \<open>0 < ln $ l\<close> moreover from \<open>\<not> waiting (wset ?s2 t)\<close> mbisim have "\<not> waiting (wset s1 t)" by(simp add: mbisim_def) ultimately obtain l' t' where "0 < ln $ l'" "t \<noteq> t'" "thr s1 t' \<noteq> None" "has_lock (locks s1 $ l') t'" by(rule r1.deadlockD2) thus ?case using mbisim_thrNone_eq[OF mbisim, of t'] mbisim by(auto simp add: mbisim_def) next case (3 t x2 w) from mbisim_thrD2[OF mbisim this] obtain x1 where "thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>" by auto with dead have "wset s1 t \<noteq> \<lfloor>PostWS w\<rfloor>" by(rule r1.deadlockD3[rule_format]) with mbisim show ?case by(simp add: mbisim_def) qed with red1 red2 mbisim show ?thesis by(blast intro: rtranclp_trans) next case False hence "r1.mfinal s1" by(auto intro: r1.mfinalI simp add: r1.not_final_thread_iff) from mfinal1_simulation[OF mbisim this] obtain s2' where "\<tau>mRed2 s2 s2'" "s1 \<approx>m s2'" "r2.mfinal s2'" "shr s2' = shr s2" by blast thus ?thesis by(blast intro: r2.mfinal_deadlock) qed lemma deadlock2_imp_\<tau>s_deadlock1: "\<lbrakk> s1 \<approx>m s2; r2.deadlock s2 \<rbrakk> \<Longrightarrow> \<exists>s1'. r1.mthr.silent_moves s1 s1' \<and> r1.deadlock s1' \<and> s1' \<approx>m s2" using FWdelay_bisimulation_diverge.deadlock1_imp_\<tau>s_deadlock2[OF FWdelay_bisimulation_diverge_flip] unfolding flip_simps . lemma deadlocked'1_imp_\<tau>s_deadlocked'2: "\<lbrakk> s1 \<approx>m s2; r1.deadlocked' s1 \<rbrakk> \<Longrightarrow> \<exists>s2'. r2.mthr.silent_moves s2 s2' \<and> r2.deadlocked' s2' \<and> s1 \<approx>m s2'" unfolding r1.deadlock_eq_deadlocked'[symmetric] r2.deadlock_eq_deadlocked'[symmetric] by(rule deadlock1_imp_\<tau>s_deadlock2) lemma deadlocked'2_imp_\<tau>s_deadlocked'1: "\<lbrakk> s1 \<approx>m s2; r2.deadlocked' s2 \<rbrakk> \<Longrightarrow> \<exists>s1'. r1.mthr.silent_moves s1 s1' \<and> r1.deadlocked' s1' \<and> s1' \<approx>m s2" unfolding r1.deadlock_eq_deadlocked'[symmetric] r2.deadlock_eq_deadlocked'[symmetric] by(rule deadlock2_imp_\<tau>s_deadlock1) end context FWbisimulation begin lemma mbisim_final_thread_preserve1: assumes mbisim: "s1 \<approx>m s2" and fin: "r1.final_thread s1 t" shows "r2.final_thread s2 t" proof - from fin obtain x1 where ts1t: "thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>" and fin1: "final1 x1" and ws1t: "wset s1 t = None" by(auto elim: r1.final_threadE) from mbisim ts1t obtain x2 where ts2t: "thr s2 t = \<lfloor>(x2, no_wait_locks)\<rfloor>" and bisim: "t \<turnstile> (x1, shr s1) \<approx> (x2, shr s2)" by(auto dest: mbisim_thrD1) note ts2t moreover from fin1 bisim have "final2 x2" by(auto dest: bisim_final) moreover from mbisim ws1t have "wset s2 t = None" by(simp add: mbisim_def) ultimately show ?thesis by(rule r2.final_threadI) qed lemma mbisim_final_thread_preserve2: "\<lbrakk> s1 \<approx>m s2; r2.final_thread s2 t \<rbrakk> \<Longrightarrow> r1.final_thread s1 t" using FWbisimulation.mbisim_final_thread_preserve1[OF FWbisimulation_flip] unfolding flip_simps . lemma mbisim_final_thread_inv: "s1 \<approx>m s2 \<Longrightarrow> r1.final_thread s1 t \<longleftrightarrow> r2.final_thread s2 t" by(blast intro: mbisim_final_thread_preserve1 mbisim_final_thread_preserve2) lemma mbisim_not_final_thread_inv: assumes bisim: "mbisim s1 s2" shows "r1.not_final_thread s1 = r2.not_final_thread s2" proof(rule ext) fix t show "r1.not_final_thread s1 t = r2.not_final_thread s2 t" proof(cases "thr s1 t") case None with mbisim_thrNone_eq[OF bisim, of t] have "thr s2 t = None" by simp with None show ?thesis by(auto elim!: r2.not_final_thread.cases r1.not_final_thread.cases intro: r2.not_final_thread.intros r1.not_final_thread.intros) next case (Some a) then obtain x1 ln where tst1: "thr s1 t = \<lfloor>(x1, ln)\<rfloor>" by(cases a) auto from mbisim_thrD1[OF bisim tst1] obtain x2 where tst2: "thr s2 t = \<lfloor>(x2, ln)\<rfloor>" and bisimt: "t \<turnstile> (x1, shr s1) \<approx> (x2, shr s2)" by blast from bisim have "wset s2 = wset s1" by(simp add: mbisim_def) with tst2 tst1 bisim_final[OF bisimt] show ?thesis by(simp add: r1.not_final_thread_conv r2.not_final_thread_conv)(rule mbisim_final_thread_inv[OF bisim]) qed qed lemma mbisim_deadlocked_preserve2: "\<lbrakk> s1 \<approx>m s2; t \<in> r2.deadlocked s2 \<rbrakk> \<Longrightarrow> t \<in> r1.deadlocked s1" using FWbisimulation.mbisim_deadlocked_preserve1[OF FWbisimulation_flip] unfolding flip_simps . lemma mbisim_deadlocked_inv: "s1 \<approx>m s2 \<Longrightarrow> r1.deadlocked s1 = r2.deadlocked s2" by(blast intro!: mbisim_deadlocked_preserve1 mbisim_deadlocked_preserve2) end (* Nice to have, but not needed any more *) context FWbisimulation begin lemma bisim_can_sync_preserve1: assumes bisim: "t \<turnstile> (x1, m1) \<approx> (x2, m2)" and cs: "t \<turnstile> \<langle>x1, m1\<rangle> LT \<wrong>1" shows "t \<turnstile> \<langle>x2, m2\<rangle> LT \<wrong>2" proof - from cs obtain ta1 x1' m1' where red1: "t \<turnstile> (x1, m1) -1-ta1\<rightarrow> (x1', m1')" and LT: "LT = collect_locks \<lbrace>ta1\<rbrace>\<^bsub>l\<^esub> <+> collect_cond_actions \<lbrace>ta1\<rbrace>\<^bsub>c\<^esub> <+> collect_interrupts \<lbrace>ta1\<rbrace>\<^bsub>i\<^esub>" by(rule r1.can_syncE) from bisimulation.simulation1[OF bisimulation_axioms, OF bisim red1] obtain x2' ta2 m2' where red2: "t \<turnstile> (x2, m2) -2-ta2\<rightarrow> (x2', m2')" and tasim: "ta1 \<sim>m ta2" by fastforce from tasim LT have "LT = collect_locks \<lbrace>ta2\<rbrace>\<^bsub>l\<^esub> <+> collect_cond_actions \<lbrace>ta2\<rbrace>\<^bsub>c\<^esub> <+> collect_interrupts \<lbrace>ta2\<rbrace>\<^bsub>i\<^esub>" by(auto simp add: ta_bisim_def) with red2 show ?thesis by(rule r2.can_syncI) qed lemma bisim_can_sync_preserve2: "\<lbrakk> t \<turnstile> (x1, m1) \<approx> (x2, m2); t \<turnstile> \<langle>x2, m2\<rangle> LT \<wrong>2 \<rbrakk> \<Longrightarrow> t \<turnstile> \<langle>x1, m1\<rangle> LT \<wrong>1" using FWbisimulation.bisim_can_sync_preserve1[OF FWbisimulation_flip] unfolding flip_simps . lemma bisim_can_sync_inv: "t \<turnstile> (x1, m1) \<approx> (x2, m2) \<Longrightarrow> t \<turnstile> \<langle>x1, m1\<rangle> LT \<wrong>1 \<longleftrightarrow> t \<turnstile> \<langle>x2, m2\<rangle> LT \<wrong>2" by(blast intro: bisim_can_sync_preserve1 bisim_can_sync_preserve2) lemma bisim_must_sync_preserve1: assumes bisim: "t \<turnstile> (x1, m1) \<approx> (x2, m2)" and ms: "t \<turnstile> \<langle>x1, m1\<rangle> \<wrong>1" shows "t \<turnstile> \<langle>x2, m2\<rangle> \<wrong>2" proof - from ms obtain ta1 x1' m1' where red1: "t \<turnstile> (x1, m1) -1-ta1\<rightarrow> (x1', m1')" and s1': "\<exists>s1'. r1.actions_ok s1' t ta1" by(fastforce elim: r1.must_syncE) from bisimulation.simulation1[OF bisimulation_axioms, OF bisim red1] obtain x2' ta2 m2' where red2: "t \<turnstile> (x2, m2) -2-ta2\<rightarrow> (x2', m2')" and tasim: "ta1 \<sim>m ta2" by fastforce from ex_actions_ok1_conv_ex_actions_ok2[OF tasim, of t] s1' red2 show ?thesis unfolding r2.must_sync_def2 by blast qed lemma bisim_must_sync_preserve2: "\<lbrakk> t \<turnstile> (x1, m1) \<approx> (x2, m2); t \<turnstile> \<langle>x2, m2\<rangle> \<wrong>2 \<rbrakk> \<Longrightarrow> t \<turnstile> \<langle>x1, m1\<rangle> \<wrong>1" using FWbisimulation.bisim_must_sync_preserve1[OF FWbisimulation_flip] unfolding flip_simps . lemma bisim_must_sync_inv: "t \<turnstile> (x1, m1) \<approx> (x2, m2) \<Longrightarrow> t \<turnstile> \<langle>x1, m1\<rangle> \<wrong>1 \<longleftrightarrow> t \<turnstile> \<langle>x2, m2\<rangle> \<wrong>2" by(blast intro: bisim_must_sync_preserve1 bisim_must_sync_preserve2) end end
lemma abs_triangle_half_l: fixes y :: "'a::linordered_field" assumes "abs (x - y) < e / 2" and "abs (x' - y) < e / 2" shows "abs (x - x') < e"
/** @file */ #ifndef __CCL_NEUTRINOS_H_INCLUDED__ #define __CCL_NEUTRINOS_H_INCLUDED__ #include <gsl/gsl_spline.h> #include <gsl/gsl_const_mksa.h> // maximum number of species #define CCL_MAX_NU_SPECIES 3 // limits for the precomputed spline of phase // space diagram in MNU/T #define CCL_NU_MNUT_MIN 1e-4 #define CCL_NU_MNUT_MAX 500 // and number of points #define CCL_NU_MNUT_N 1000 // The combination of constants required in Omeganuh2 #define NU_CONST ( \ 8. * pow(M_PI,5) *pow((ccl_constants.KBOLTZ/ ccl_constants.HPLANCK),3)* \ ccl_constants.KBOLTZ/(15. *pow( ccl_constants.CLIGHT,3))* \ (8. * M_PI * ccl_constants.GNEWT) / \ (3. * 100.*100.*1000.*1000. /ccl_constants.MPC_TO_METER /ccl_constants.MPC_TO_METER * ccl_constants.CLIGHT * ccl_constants.CLIGHT)) CCL_BEGIN_DECLS typedef enum ccl_neutrino_mass_splits{ ccl_nu_normal=0, ccl_nu_inverted=1, ccl_nu_equal=2, ccl_nu_sum=3, ccl_nu_single=4 } ccl_neutrino_mass_splits; /** * Returns density of one neutrino species at a scale factor a. * Users are encouraged to access this quantity via the function ccl_omega_x. * @param a Scale factor * @param Neff The effective number of species with neutrino mass mnu. * @param mnu Pointer to array containing neutrino mass (can be 0). * @param T_CMB Temperature of the CMB * @param status Status flag. 0 if there are no errors, nonzero otherwise. * For specific cases see documentation for ccl_error.c * @return OmNuh2 Fractional energy density of neutrions with mass mnu, multiplied by h squared. */ double ccl_Omeganuh2(double a, int N_nu_mass, double* mnu, double T_CMB, int * status); /** * Returns mass of one neutrino species at a scale factor a. * @param a Scale factor * @param Neff The effective number of species with neutrino mass mnu. * @param OmNuh2 Fractional energy density of neutrions with mass mnu, multiplied by h squared. (can be 0). * @param T_CMB Temperature of the CMB * @param status Status flag. 0 if there are no errors, nonzero otherwise. * For specific cases see documentation for ccl_error.c * @return Mnu Neutrino mass [eV]. */ double* ccl_nu_masses(double OmNuh2, ccl_neutrino_mass_splits mass_split, double T_CMB, int * status); CCL_END_DECLS #endif
Require Import Crypto.Specific.Framework.RawCurveParameters. Require Import Crypto.Util.LetIn. (*** Modulus : 2^129 - 25 Base: 32 ***) Definition curve : CurveParameters := {| sz := 5%nat; base := 32; bitwidth := 32; s := 2^129; c := [(1, 25)]; carry_chains := None; a24 := None; coef_div_modulus := None; goldilocks := None; karatsuba := None; montgomery := true; freeze := Some false; ladderstep := false; mul_code := None; square_code := None; upper_bound_of_exponent_loose := None; upper_bound_of_exponent_tight := None; allowable_bit_widths := None; freeze_extra_allowable_bit_widths := None; modinv_fuel := None |}. Ltac extra_prove_mul_eq _ := idtac. Ltac extra_prove_square_eq _ := idtac.
State Before: α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s ⊢ Measurable (Set.piecewise s f g) State After: α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (Set.piecewise s f g ⁻¹' t) Tactic: refine' measurable_of_isOpen fun t ht => _ State Before: α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (Set.piecewise s f g ⁻¹' t) State After: α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (f ⁻¹' t ∩ s ∪ g ⁻¹' t \ s) Tactic: rw [piecewise_preimage, Set.ite] State Before: α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (f ⁻¹' t ∩ s ∪ g ⁻¹' t \ s) State After: case h₁ α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (f ⁻¹' t ∩ s) case h₂ α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (g ⁻¹' t \ s) Tactic: apply MeasurableSet.union State Before: case h₁ α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (f ⁻¹' t ∩ s) State After: case h₁.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : f ⁻¹' t ∩ s = u ∩ s ⊢ MeasurableSet (f ⁻¹' t ∩ s) Tactic: rcases _root_.continuousOn_iff'.1 hf t ht with ⟨u, u_open, hu⟩ State Before: case h₁.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : f ⁻¹' t ∩ s = u ∩ s ⊢ MeasurableSet (f ⁻¹' t ∩ s) State After: case h₁.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : f ⁻¹' t ∩ s = u ∩ s ⊢ MeasurableSet (u ∩ s) Tactic: rw [hu] State Before: case h₁.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : f ⁻¹' t ∩ s = u ∩ s ⊢ MeasurableSet (u ∩ s) State After: no goals Tactic: exact u_open.measurableSet.inter hs State Before: case h₂ α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t ⊢ MeasurableSet (g ⁻¹' t \ s) State After: case h₂.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : g ⁻¹' t ∩ sᶜ = u ∩ sᶜ ⊢ MeasurableSet (g ⁻¹' t \ s) Tactic: rcases _root_.continuousOn_iff'.1 hg t ht with ⟨u, u_open, hu⟩ State Before: case h₂.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : g ⁻¹' t ∩ sᶜ = u ∩ sᶜ ⊢ MeasurableSet (g ⁻¹' t \ s) State After: case h₂.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : g ⁻¹' t ∩ sᶜ = u ∩ sᶜ ⊢ MeasurableSet (u ∩ sᶜ) Tactic: rw [diff_eq_compl_inter, inter_comm, hu] State Before: case h₂.intro.intro α : Type u_1 β : Type ?u.930878 γ : Type u_2 γ₂ : Type ?u.930884 δ : Type ?u.930887 ι : Sort y s✝ t✝ u✝ : Set α inst✝¹⁵ : TopologicalSpace α inst✝¹⁴ : MeasurableSpace α inst✝¹³ : OpensMeasurableSpace α inst✝¹² : TopologicalSpace β inst✝¹¹ : MeasurableSpace β inst✝¹⁰ : OpensMeasurableSpace β inst✝⁹ : TopologicalSpace γ inst✝⁸ : MeasurableSpace γ inst✝⁷ : BorelSpace γ inst✝⁶ : TopologicalSpace γ₂ inst✝⁵ : MeasurableSpace γ₂ inst✝⁴ : BorelSpace γ₂ inst✝³ : MeasurableSpace δ α' : Type ?u.930980 inst✝² : TopologicalSpace α' inst✝¹ : MeasurableSpace α' f g : α → γ s : Set α inst✝ : (j : α) → Decidable (j ∈ s) hf : ContinuousOn f s hg : ContinuousOn g (sᶜ) hs : MeasurableSet s t : Set γ ht : IsOpen t u : Set α u_open : IsOpen u hu : g ⁻¹' t ∩ sᶜ = u ∩ sᶜ ⊢ MeasurableSet (u ∩ sᶜ) State After: no goals Tactic: exact u_open.measurableSet.inter hs.compl
[STATEMENT] lemma eval_member: "member xq = eval (pred_of_seq xq)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. member xq = eval (pred_of_seq xq) [PROOF STEP] proof (induct xq) [PROOF STATE] proof (state) goal (3 subgoals): 1. member Empty = eval (pred_of_seq Empty) 2. \<And>x1 x2. member (Insert x1 x2) = eval (pred_of_seq (Insert x1 x2)) 3. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] case Empty [PROOF STATE] proof (state) this: goal (3 subgoals): 1. member Empty = eval (pred_of_seq Empty) 2. \<And>x1 x2. member (Insert x1 x2) = eval (pred_of_seq (Insert x1 x2)) 3. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. member Empty = eval (pred_of_seq Empty) [PROOF STEP] by (auto simp add: fun_eq_iff elim: botE) [PROOF STATE] proof (state) this: member Empty = eval (pred_of_seq Empty) goal (2 subgoals): 1. \<And>x1 x2. member (Insert x1 x2) = eval (pred_of_seq (Insert x1 x2)) 2. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] next [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>x1 x2. member (Insert x1 x2) = eval (pred_of_seq (Insert x1 x2)) 2. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] case Insert [PROOF STATE] proof (state) this: goal (2 subgoals): 1. \<And>x1 x2. member (Insert x1 x2) = eval (pred_of_seq (Insert x1 x2)) 2. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. member (Insert x1_ x2_) = eval (pred_of_seq (Insert x1_ x2_)) [PROOF STEP] by (auto simp add: fun_eq_iff elim: supE singleE intro: supI1 supI2 singleI) [PROOF STATE] proof (state) this: member (Insert x1_ x2_) = eval (pred_of_seq (Insert x1_ x2_)) goal (1 subgoal): 1. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] case Join [PROOF STATE] proof (state) this: member xq_ = eval (pred_of_seq xq_) goal (1 subgoal): 1. \<And>x1 xq. member xq = eval (pred_of_seq xq) \<Longrightarrow> member (Join x1 xq) = eval (pred_of_seq (Join x1 xq)) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: member xq_ = eval (pred_of_seq xq_) [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: member xq_ = eval (pred_of_seq xq_) goal (1 subgoal): 1. member (Join x1_ xq_) = eval (pred_of_seq (Join x1_ xq_)) [PROOF STEP] by (auto simp add: fun_eq_iff elim: supE intro: supI1 supI2) [PROOF STATE] proof (state) this: member (Join x1_ xq_) = eval (pred_of_seq (Join x1_ xq_)) goal: No subgoals! [PROOF STEP] qed
If $a \neq 0$, then the Euclidean relation between $x$ and $y$ is the same as the Euclidean relation between $x$ and $a y$.
If $f(n)$ converges to $L$ as $n \to \infty$, then $f(n)$ converges to $L$ as $n \to \infty$.
Require Export NFA. Section Reversing. Context {State Symbol : Type}. Definition Word := @Word Symbol. Hypothesis State_eq_dec : forall (x1 x2:State), { x1 = x2 } + { x1 <> x2 }. Hypothesis Symbol_eq_dec : forall (x1 x2:Symbol), { x1 = x2 } + { x1 <> x2 }. Definition NFA := @NFA State Symbol. Definition ext_transitionf := ext_transitionf State_eq_dec Symbol_eq_dec. Definition nfa_accepts := nfa_accepts State_eq_dec Symbol_eq_dec. (* Reverses word *) Fixpoint rev (w:Word) := match w with | a::w => rev w ++ [a] | nil => nil end. (* Reverses NFA *) Fixpoint rev_nfa (g:NFA) := match g with | start q::g => accept q::rev_nfa g | accept q::g => start q::rev_nfa g | transition q1 a q2::g => transition q2 a q1::rev_nfa g | x::g => x::rev_nfa g | nil => nil end. (* Distribution of word reversion *) Lemma rev_distr w1 w2 : rev (w1 ++ w2) = rev w2 ++ rev w1. Proof. induction w1 as [|a w1 IH]. symmetry; apply app_nil_r. simpl. rewrite IH, app_assoc_reverse. intuition. Qed. (* A word reversed twice is equal to itself *) Lemma rev_twice w : rev (rev w) = w. Proof. induction w as [|a w IH]. intuition. simpl. rewrite rev_distr, IH. intuition. Qed. (* The resulting states are the same *) Lemma rev_states g q : In q (states (rev_nfa g)) -> In q (states g). Proof. intro H. induction g as [|c g IH]. contradiction. destruct c. 1-4: try destruct H; subst. 1,4,6: left; intuition. 1-4: try right; intuition. destruct H as [H|[H|H]]. 1,3: right. 1,3: subst; left; intuition. right; intuition. Qed. (* The resulting accept states are the original start states *) Lemma rev_start_states g : accept_states (rev_nfa g) = start_states g. Proof. induction g as [|c g IH]. intuition. destruct c; simpl; rewrite IH; intuition. Qed. (* The resulting start states are the original accept states *) Lemma rev_accept_states g : start_states (rev_nfa g) = accept_states g. Proof. induction g as [|c g IH]. intuition. destruct c; simpl; rewrite IH; intuition. Qed. (* The transitions go reversed *) Lemma rev_transition g q1 a q2 : In (transition q1 a q2) g <-> In (transition q2 a q1) (rev_nfa g). Proof. induction g as [|c g IH]. intuition. destruct c; simpl. 1-4: split; intros [H|H]; try discriminate; intuition. split; intros [H|H]. 1,3: inversion H; subst; intuition. 1,2: intuition. Qed. (* Same for paths *) Lemma rev_path g q1 q2 w : path g q1 q2 w <-> path (rev_nfa g) q2 q1 (rev w). Proof. split; intro H. - induction H. constructor. simpl. pose proof (path_trans_inv1 (rev_nfa g) q3 q2 q1 (rev w) a). apply H1. 2: apply rev_transition in H. 1,2: intuition. - rewrite <- rev_twice; remember (rev w) as w'; clear Heqw' w. induction H. constructor. simpl. pose proof (path_trans_inv1 g q3 q2 q1 (rev w) a). apply H1. 2: apply rev_transition. 1,2: intuition. Qed. (* And for the extended transition function *) Lemma rev_ext_transitionf g q1 q2 w : In q1 (ext_transitionf g [q2] w) <-> In q2 (ext_transitionf (rev_nfa g) [q1] (rev w)). Proof. split; intro H. - apply path_ext_transitionf; apply path_ext_transitionf, rev_path in H; intuition. - apply path_ext_transitionf; apply path_ext_transitionf, rev_path in H; intuition. Qed. (* The reversed language *) Lemma rev_language g w : nfa_accepts g w <-> nfa_accepts (rev_nfa g) (rev w). Proof. unfold nfa_accepts, NFA.nfa_accepts, has_accept_state; split; intros [q [H H0]]. - apply ext_transitionf_singleton in H; destruct H as [q0 [H H1]]. apply path_ext_transitionf in H1. apply rev_path in H1. pose proof (path_ext_transitionf State_eq_dec Symbol_eq_dec (rev_nfa g) q q0 (rev w)) as H2. apply H2 in H1. exists q0; split. 2: rewrite rev_start_states. apply ext_transitionf_generalize with q. rewrite rev_accept_states. 1-3: intuition. - apply ext_transitionf_singleton in H; destruct H as [q0 [H H1]]. apply path_ext_transitionf in H1. apply rev_path in H1. pose proof (path_ext_transitionf State_eq_dec Symbol_eq_dec g q q0 w) as H2. apply H2 in H1. exists q0; split. 2: rewrite <- rev_accept_states. apply ext_transitionf_generalize with q. rewrite <- rev_start_states. 1-3: intuition. Qed. End Reversing.
||| A Reversed List module Data.SnocList import Decidable.Equality import Data.List %default total infixl 7 <>< infixr 6 <>> ||| 'fish': Action of lists on snoc-lists public export (<><) : SnocList a -> List a -> SnocList a sx <>< [] = sx sx <>< (x :: xs) = sx :< x <>< xs ||| 'chips': Action of snoc-lists on lists public export (<>>) : SnocList a -> List a -> List a Lin <>> xs = xs (sx :< x) <>> xs = sx <>> x :: xs Cast (SnocList a) (List a) where cast sx = sx <>> [] Cast (List a) (SnocList a) where cast xs = Lin <>< xs ||| Transform to a list but keeping the contents in the spine order (term depth). public export asList : SnocList type -> List type asList = (reverse . cast) public export Eq a => Eq (SnocList a) where (==) Lin Lin = True (==) (sx :< x) (sy :< y) = x == y && sx == sy (==) _ _ = False public export Ord a => Ord (SnocList a) where compare Lin Lin = EQ compare Lin (sx :< x) = LT compare (sx :< x) Lin = GT compare (sx :< x) (sy :< y) = case compare sx sy of EQ => compare x y c => c ||| True iff input is Lin public export isLin : SnocList a -> Bool isLin Lin = True isLin (sx :< x) = False ||| True iff input is (:<) public export isSnoc : SnocList a -> Bool isSnoc Lin = False isSnoc (sx :< x) = True public export (++) : (sx, sy : SnocList a) -> SnocList a (++) sx Lin = sx (++) sx (sy :< y) = (sx ++ sy) :< y public export length : SnocList a -> Nat length Lin = Z length (sx :< x) = length sx + 1 export Show a => Show (SnocList a) where show xs = "[< " ++ show' "" xs ++ "]" where show' : String -> SnocList a -> String show' acc Lin = acc show' acc (Lin :< x)= show x ++ acc show' acc (xs :< x) = show' (", " ++ show x ++ acc) xs public export Functor SnocList where map f Lin = Lin map f (sx :< x) = (map f sx) :< (f x) public export Semigroup (SnocList a) where (<+>) = (++) public export Monoid (SnocList a) where neutral = Lin ||| Check if something is a member of a list using the default Boolean equality. public export elem : Eq a => a -> SnocList a -> Bool elem x Lin = False elem x (sx :< y) = x == y || elem x sx
/* multifit_nlinear/svd.c * * Copyright (C) 2016 Patrick Alken * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* * This module handles the solution of the linear least squares * system: * * [ J ] dx = - [ f ] * [ sqrt(mu)*D ] [ 0 ] * * using an SVD approach. The system above is transformed to "standard form" * via: * * J~ = J D^{-1} * dx~ = D dx * * so that * * [ J~ ] dx~ = - [ f ] * [ sqrt(mu)*I ] [ 0 ] * * can be solved with a standard SVD method, and then dx is recovered * from dx~ via: dx = D^{-1} dx~ */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_multifit_nlinear.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_blas.h> typedef struct { size_t n; /* number of residuals */ size_t p; /* number of parameters */ gsl_matrix *U; /* U factor of J, n-by-p */ gsl_matrix *V; /* V factor of J, p-by-p */ gsl_vector *S; /* singular values, size p */ gsl_vector *workp; /* workspace, length p */ double mu; /* LM parameter */ } svd_state_t; static int svd_init(const void * vtrust_state, void * vstate); static int svd_presolve(const double mu, const void * vtrust_state, void * vstate); static int svd_solve(const gsl_vector * f, gsl_vector *x, const void * vtrust_state, void *vstate); static int svd_rcond(double * rcond, void * vstate); static void * svd_alloc (const size_t n, const size_t p) { svd_state_t *state; (void)n; state = calloc(1, sizeof(svd_state_t)); if (state == NULL) { GSL_ERROR_NULL ("failed to allocate svd state", GSL_ENOMEM); } state->U = gsl_matrix_alloc(n, p); if (state->U == NULL) { GSL_ERROR_NULL ("failed to allocate space for U", GSL_ENOMEM); } state->V = gsl_matrix_alloc(p, p); if (state->V == NULL) { GSL_ERROR_NULL ("failed to allocate space for V", GSL_ENOMEM); } state->S = gsl_vector_alloc(p); if (state->S == NULL) { GSL_ERROR_NULL ("failed to allocate space for S", GSL_ENOMEM); } state->workp = gsl_vector_alloc(p); if (state->workp == NULL) { GSL_ERROR_NULL ("failed to allocate space for workp", GSL_ENOMEM); } state->mu = 0.0; state->n = n; state->p = p; return state; } static void svd_free(void *vstate) { svd_state_t *state = (svd_state_t *) vstate; if (state->U) gsl_matrix_free(state->U); if (state->V) gsl_matrix_free(state->V); if (state->S) gsl_vector_free(state->S); if (state->workp) gsl_vector_free(state->workp); free(state); } /* compute svd of J */ static int svd_init(const void * vtrust_state, void * vstate) { int status; const gsl_multifit_nlinear_trust_state *trust_state = (const gsl_multifit_nlinear_trust_state *) vtrust_state; svd_state_t *state = (svd_state_t *) vstate; size_t i; gsl_matrix_set_zero(state->U); /* compute U = J D^{-1} */ for (i = 0; i < state->p; ++i) { gsl_vector_const_view Ji = gsl_matrix_const_column(trust_state->J, i); gsl_vector_view ui = gsl_matrix_column(state->U, i); double di = gsl_vector_get(trust_state->diag, i); gsl_blas_daxpy(1.0 / di, &Ji.vector, &ui.vector); } status = gsl_linalg_SV_decomp(state->U, state->V, state->S, state->workp); return status; } static int svd_presolve(const double mu, const void * vtrust_state, void * vstate) { svd_state_t *state = (svd_state_t *) vstate; state->mu = mu; (void)vtrust_state; return GSL_SUCCESS; } static int svd_solve(const gsl_vector * f, gsl_vector *x, const void * vtrust_state, void *vstate) { int status = GSL_SUCCESS; const gsl_multifit_nlinear_trust_state *trust_state = (const gsl_multifit_nlinear_trust_state *) vtrust_state; svd_state_t *state = (svd_state_t *) vstate; const size_t p = state->p; const double tol = GSL_DBL_EPSILON; const double s0 = gsl_vector_get(state->S, 0); size_t j; /* compute workp = - U^T f */ gsl_blas_dgemv(CblasTrans, -1.0, state->U, f, 0.0, state->workp); /* * compute: * * workp = sum_i s_i / (s_i^2 + mu) (-u_i^T f) */ if (state->mu == 0.0) { /* * compute Gauss-Newton direction by solving * J x = -f */ for (j = 0; j < p; ++j) { double sj = gsl_vector_get(state->S, j); double *ptr = gsl_vector_ptr(state->workp, j); double alpha; if (sj <= tol * s0) alpha = 0.0; else alpha = 1.0 / sj; *ptr *= alpha; } } else { /* * solve: * * [ J D^{-1} ] (D x) = -[ f ] * [ sqrt(mu) I ] [ 0 ] * * using SVD factorization of J D^{-1} */ for (j = 0; j < p; ++j) { double sj = gsl_vector_get(state->S, j); double *ptr = gsl_vector_ptr(state->workp, j); *ptr *= sj / (sj*sj + state->mu); } } /* compute: x = V * workp */ gsl_blas_dgemv(CblasNoTrans, 1.0, state->V, state->workp, 0.0, x); /* compute D^{-1} x */ gsl_vector_div(x, trust_state->diag); return status; } static int svd_rcond(double * rcond, void * vstate) { int status = GSL_SUCCESS; svd_state_t *state = (svd_state_t *) vstate; double smax = gsl_vector_get(state->S, 0); double smin = gsl_vector_get(state->S, state->p - 1); *rcond = smin / smax; return status; } static const gsl_multifit_nlinear_solver svd_type = { "svd", svd_alloc, svd_init, svd_presolve, svd_solve, svd_rcond, svd_free }; const gsl_multifit_nlinear_solver *gsl_multifit_nlinear_solver_svd = &svd_type;
/* * Copyright 2019 Applied Research Center for Computer Networks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> #include <cstddef> #include <functional> // hash #include <ostream> #include <typeinfo> #include <boost/core/demangle.hpp> #include "type_fwd.hh" #include "../lib/bits.hpp" namespace runos { namespace oxm { class type { uint16_t _ns; uint8_t _id:7; bool _maskable:1; uint16_t _nbits; const std::type_info* _cpptype; public: using value_type = bits<>; using mask_type = bits<>; constexpr type(uint16_t ns, uint8_t id, bool maskable, uint16_t nbits, const std::type_info* cpptype = nullptr) noexcept : _ns{ns} , _id{id} , _maskable{maskable} , _nbits{nbits} , _cpptype{cpptype} { } constexpr uint16_t ns() const noexcept { return _ns; } constexpr uint8_t id() const noexcept { return _id; } constexpr size_t nbits() const noexcept { return _nbits; } constexpr bool maskable() const noexcept { return _maskable; } constexpr size_t nbytes() const noexcept { return (nbits() / 8) + ( nbits() % 8 ? 1 : 0 ); } friend constexpr bool operator==(const type& lhs, const type& rhs) noexcept { return (lhs.ns() == rhs.ns()) && (lhs.id() == rhs.id()); } friend constexpr bool operator!=(const type& lhs, const type& rhs) noexcept { return ! (lhs == rhs); } friend std::ostream& operator<<(std::ostream& out, const type t) { if (not t._cpptype) { return out << "oxm::type{ns=" << t.ns() << ", id=" << unsigned(t.id()) << ", mask=" << t.maskable() << ", nbits=" << t.nbits() << "}"; } else { return out << boost::core::demangle(t._cpptype->name()) << "{}"; } } }; template< class Final, uint16_t NS, uint8_t ID, size_t NBITS, class ValueType, class MaskType = ValueType, bool HASMASK = false > struct define_type : type { using value_type = ValueType; using mask_type = MaskType; constexpr define_type() : type{ NS, ID, HASMASK, NBITS, &typeid(Final) } { } }; } // namespace oxm } // namespace runos namespace std { template<> struct hash<runos::oxm::type> { size_t operator() (runos::oxm::type t) const noexcept { return std::hash<uint64_t>() (static_cast<uint64_t>(t.ns()) << 8ULL | t.id()); } }; }
fl fswrite servo.f \ \ waitcnt ( n1 n2 -- n1 ) \ wait until n1, add n2 to n1 [ifndef waitcnt : waitcnt _xasm2>1 h1F1 _cnip ; ] \ a cog special register [ifndef ctra h1F8 wconstant ctra ] \ a cog special register [ifndef ctrb h1F9 wconstant ctrb ] \ a cog special register [ifndef frqa h1FA wconstant frqa ] \ a cog special register [ifndef frqb h1FB wconstant frqb ] \ a cog special register [ifndef phsa h1FC wconstant phsa ] \ a cog special register [ifndef phsb h1FD wconstant phsb ] \ \ Servo motors are controlled by a pulse width modulation (pwm). \ Every 20 milliseconds a pulse of length 0.75 milliseconds \ to 2.25 millseconds determines the position of the servo motor. \ \ Example1: 0.75ms \ ---_____________________________________________________________________________---________ \ \ Example2: 1.5ms \ ------__________________________________________________________________________------_____ \ \ Example3: 2.25ms \ ---------_______________________________________________________________________---------__ \ \ \ Each servo motor requires calibration, so we will set the absolute minimum to 0.5 ms and the \ absolute maximum to 2.5 ms. Depending on the servos you are using, you may want to adjust these \ values. \ \ \ This driver will drive 16 servos per cog, it uses the 2 counters in a time domain multiplexed \ mode to generate the pulses. \ \ Each counter is set to single ended PWM/NCO mode. In this mode the counter drives an io pin. \ The value of the pin is controlled by bit 31 of the phsa/phsb registers. These registers and \ incremented every cycle of the system clock. For an 80 Mhz system clock this provides for \ 12.5 ns resolution. \ \ Without intervention the phsa/phsb registers would generate a square wave with the cycle time \ being more than 50 seconds \ ----------------------____________________----------------------____________________ \ \ By starting the phsa/phsb register at the appropriate place in the cycle, and by resetting \ it to the same position every 20 ms, we generate a pwm signal on the io pin. \ \ Since there are 2 counters we can do this with 2 pins at a time. \ \ And since we only need intervention during the hi time of the signal, we can interleave the \ processing so that each clock will address 8 io pins. \ \ This is done by taking the 20ms cycle, and splitting it into 8 2.5ms sections, and driving the \ corresponding pin hi in that portion of the cycle. \ \ This means the absolute maximum we can set the pulse width will be 2.5ms \ \ 0ms 5ms 10ms 15ms 20ms \ ------__________________________________________________________________________------_____ ctra pin n1 \ __________------___________________________________________________________________________ ctra pin n1 + 2 \ ____________________------_________________________________________________________________ ctra pin n1 + 4 \ ______________________________------_______________________________________________________ ctra pin n1 + 6 \ ________________________________________------_____________________________________________ ctra pin n1 + 8 \ __________________________________________________------___________________________________ ctra pin n1 + 10 \ ____________________________________________________________------_________________________ ctra pin n1 + 12 \ ______________________________________________________________________------_______________ ctra pin n1 + 14 \ ------__________________________________________________________________________------_____ ctrb pin n1 + 1 \ __________------___________________________________________________________________________ ctrb pin n1 + 3 \ ____________________------_________________________________________________________________ ctrb pin n1 + 5 \ ______________________________------_______________________________________________________ ctrb pin n1 + 7 \ ________________________________________------_____________________________________________ ctrb pin n1 + 9 \ __________________________________________________------___________________________________ ctrb pin n1 + 11 \ ____________________________________________________________------_________________________ ctrb pin n1 + 13 \ ______________________________________________________________________------_______________ ctrb pin n1 + 15 \ \ This constant defines the number of times per second a pulse is generated \ for the servo motor. \ d_50 wconstant sm_cyclefreq \ \ The number of clock cycles in one pulse sequence of 20ms \ clkfreq sm_cyclefreq u/ constant sm_cyclecnt \ \ The number of cycles in 2.5ms, 1/8 of the 20 ms cycle. However an effect \ of this is that the maximum pulse time is 2.5ms less the time it take to \ process a cycle. Thus the absolute maximum time it for a pulse width is \ approximately 2.39ms to 2.4ms on an 80mHz system. \ sm_cyclecnt 8 u/ constant sm_cyclecnt/8 \ \ These constants define the absolute minium and maximum lengths of time \ for the pulse. The maximum can never be more than the time slot. \ \ The minimum was set to 500usec and and maximum to 2500usec \ but someone managed to turn their servo into a countinuos rotation servo \ without intending to do so. \ So the defaults are set to 750usec and 2250usec. \ clkfreq d_750 d_1_000_000 u*/ constant sm_minhi clkfreq d_2_250 d_1_000_000 u*/ sm_cyclecnt/8 min constant sm_maxhi \ \ This variable defines which IO pins are enabled as outputs \ by the servo driver, the default is 0 - 27 since 30 & 31 are used \ for the serial port, and 28 & 29 for the eeprom \ \ variable sm_enable h_0FFF_FFFF sm_enable L! variable sm_enable h_0000_000F sm_enable L! \ \ this array defines the pulse hi times, it is initialized 1/2 way between \ the min and max \ lockdict variable sm_hitime d_124 allot freedict \ 32 longs \ \ this array defines the minimum pulse hi time, it is initialized to the minimum \ lockdict variable sm_minhitime d_124 allot freedict \ 32 longs \ \ this array defines the maximum pulse hi time, it is initialized to the maximum \ lockdict variable sm_maxhitime d_124 allot freedict \ 32 longs \ \ _sm_idx ( n1 addr -- addr ) calculates the array offset for the particular servo : _sm_idx swap 0 max d_31 min 4* + ; \ sm_setpos ( n1 u -- ) n1 is an integer between 0 and 31, u is an unsigned integer between 0 and 10,000 : sm_setpos 0 max d_10_000 min \ \ ( n1 u -- ) over dup sm_minhitime _sm_idx L@ \ \ ( n1 u n1 minhitime -- ) swap sm_maxhitime _sm_idx L@ \ \ ( n1 u minhitime maxhitime -- ) over - \ \ ( n1 u minhitime range -- ) rot d_10_000 u*/ + \ \ ( n1 pos -- ) swap sm_hitime _sm_idx L! ; \ \ sm_servo ( n1 -- ) this cog will drive servos n1 to n1 + 15 : sm_servo 4 state andnC! c" SERVO" cds W! \ \ ( n1 -- ) 0 max h18 min dup h_10 bounds do sm_enable L@ 1 i lshift and if i dup pinlo pinout then loop \ \ ( n1 -- ) 1 frqa COG! 1 frqb COG! \ \ counter set to drive pin n1, single ended nco/pwm mode dup h_1000_0000 + \ \ the offset into the array defining the pulse width swap 4* sm_hitime + \ \ ( ctrn1 hitimeoffset -- ) cnt COG@ sm_cyclecnt + \ \ ( ctrn1 hitimeoffset nextcycletime -- ) begin h10 0 do 0 phsa COG! 0 phsb COG! \ \ counter a set to drive pin n1, single ended nco/pwm mode \ \ counter b set to drive pin n1+1, single ended nco/pwm mode \ \ ( ctrn1 hitimeoffset nextcycletime -- ) rot dup i + dup ctra COG! 1 + ctrb COG! \ \ ( hitimeoffset nextcycletime ctrn1 -- ) rot dup i 2 lshift + dup L@ negate phsa COG! 4 + L@ negate phsb COG! \ \ ( nextcycletime ctrn1 hitimeoffset -- ) rot \ \ ( hitimeoffset ctrn1 nextcycletime -- ) \ \ wait for the next 2.5ms time slot sm_cyclecnt/8 waitcnt 2 +loop 0 until ; \ _pos? ( n1 n2 -- n3 n4 ) n3 usec per cycle for pin n1, n4 usec per cycle for pin n2 : _pos? \ \ increment counter when pulse is hi for pin n1 h_6800_0000 + ctra COG! 1 frqa COG! \ \ increment counter when pulse is hi for pin n2 h_6800_0000 + ctrb COG! 1 frqb COG! \ \ zero counts, wait one cycle and get counts cnt COG@ 0 phsb COG! 0 phsa COG! sm_cyclecnt + 0 waitcnt phsb COG@ phsa COG@ \ \ display number of usec pin was hi in one cycle count rot drop \ ( n1 n2 timen1 timen2 -- ) ; \ pos? ( n1 n2 -- ) display for pin n1 and pin n2 the number of usec hi per cycle : pos? 2dup _pos? >r \ ( n1 n2 timen1 -- ) rot <# # # #> .cstr h2D emit hF4240 clkfreq u*/ <# # # # # #> .cstr space r> swap <# # # #> .cstr h2D emit hF4240 clkfreq u*/ <# # # # # #> .cstr space ; \ sm_setminmax ( min max n1 -- ) sets calibration parameters for each servo : sm_setminmax swap sm_maxhi min over sm_maxhitime _sm_idx L! swap sm_minhi max swap sm_minhitime _sm_idx L! ; \ _sm_cal2 ( n1 -- ) \ _sm_cal2 : _sm_cal2 sm_enable L@ over 3 swap lshift and if dup 1+ pos? hD emit else drop then ; \ _sm_cal1 ( servo key delta -- servo key ) : _sm_cal1 rot tuck \ \ ( key servo delta servo -- ) sm_hitime _sm_idx dup \ \ ( key servo delta addr addr -- ) L@ rot + sm_minhi max sm_maxhi min swap L! \ \ ( key servo -- ) swap ; \ _sm_cal ( n1 -- n1 ) n1 - the serv0 0 - 9 : _sm_cal begin key dup h61 = if h-2710 _sm_cal1 else dup h73 = if h-3E8 _sm_cal1 else dup h64 = if h-64 _sm_cal1 else dup h66 = if h-10 _sm_cal1 else dup h68 = if h10 _sm_cal1 else dup h6A = if h64 _sm_cal1 else dup h6B = if h3E8 _sm_cal1 else dup h6C = if h2710 _sm_cal1 thens over _sm_cal2 hD = until ; \ sm_calibrate ( n1 -- ) n1 - servo : sm_calibrate begin ." a - <<<< s - <<< d - << f - < h - > j - >> k - >>> l - >>>>" cr ." Move servo to leftmost position then hit enter" cr _sm_cal cr dup sm_hitime _sm_idx L@ swap \ \ ( left n1 -- ) ." Move servo to rightmost position then hit enter" cr _sm_cal cr dup sm_hitime _sm_idx L@ swap \ \ ( left right n1 -- ) dup >r sm_setminmax r> \ \ ( center left-center right n1 -- ) ." 0 - Leftmost 1 - Center 2 - Rightmost r - Recalibrate ESC - Done" cr 0 begin drop key dup h30 = if over 0 sm_setpos else dup h31 = if over h1388 sm_setpos else dup h32 = if over h2710 sm_setpos thens over _sm_cal2 dup h1B = over 72 = or until h1B = until cr dup sm_minhitime _sm_idx L@ . dup sm_maxhitime _sm_idx L@ . . cr ; \ \ sm_start_servos ( -- ) runs 32 servo drivers on cogs 0 - 1, io pins 0 - 31 \ modify after calibrating servos : sm_start_servos \ initialize arrays, default calibration \ 32 0 do sm_minhi i sm_minhitime _sm_idx L! sm_maxhi i sm_maxhitime _sm_idx L! loop \ calibration for servos \ 105000 200000 0 sm_setminmax 32 0 do i sm_minhitime _sm_idx L@ i sm_maxhitime _sm_idx L@ over - 2/ + i sm_hitime _sm_idx L! loop 1 cogreset h_10 delms c" 0 sm_servo" 1 cogx \ 1 cogreset h_10 delms c" d_16 sm_servo" 2 cogx 10 delms ; ...
proposition compact_eq_seq_compact_metric: "compact (S :: 'a::metric_space set) \<longleftrightarrow> seq_compact S"