Datasets:
AI4M
/

text
stringlengths
0
3.34M
#pragma once #include "common/to_do.h" #include "open_file.h" #include "trees/regular_file.h" #include <gsl/span> namespace dogbox::tree { enum class read_caching { none, one_piece }; inline std::ostream &operator<<(std::ostream &out, read_caching const printed) { switch (printed) { case read_caching::none: return out << "none"; case read_caching::one_piece: return out << "one_piece"; } TO_DO(); } constexpr dogbox::tree::read_caching all_read_caching_modes[] = { dogbox::tree::read_caching::none, dogbox::tree::read_caching::one_piece}; size_t read_file(open_file &file, sqlite3 &database, regular_file::length_type const offset, gsl::span<std::byte> const into, read_caching const caching); }
/- Copyright (c) 2022 Joseph Hua. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Bhavik Mehta, Johan Commelin, Reid Barton, Rob Lewis, Joseph Hua -/ import category_theory.functor.reflects_isomorphisms import category_theory.limits.shapes.terminal /-! # Algebras of endofunctors This file defines (co)algebras of an endofunctor, and provides the category instance for them. It also defines the forgetful functor from the category of (co)algebras. It is shown that the structure map of the initial algebra of an endofunctor is an isomorphism. Furthermore, it is shown that for an adjunction `F ⊣ G` the category of algebras over `F` is equivalent to the category of coalgebras over `G`. ## TODO * Prove the dual result about the structure map of the terminal coalgebra of an endofunctor. * Prove that if the countable infinite product over the powers of the endofunctor exists, then algebras over the endofunctor coincide with algebras over the free monad on the endofunctor. -/ universes v u namespace category_theory namespace endofunctor variables {C : Type u} [category.{v} C] /-- An algebra of an endofunctor; `str` stands for "structure morphism" -/ structure algebra (F : C ⥤ C) := (A : C) (str : F.obj A ⟶ A) instance [inhabited C] : inhabited (algebra (𝟭 C)) := ⟨⟨ default , 𝟙 _ ⟩⟩ namespace algebra variables {F : C ⥤ C} (A : algebra F) {A₀ A₁ A₂ : algebra F} /- ``` str F A₀ -----> A₀ | | F f | | f V V F A₁ -----> A₁ str ``` -/ /-- A morphism between algebras of endofunctor `F` -/ @[ext] structure hom (A₀ A₁ : algebra F) := (f : A₀.1 ⟶ A₁.1) (h' : F.map f ≫ A₁.str = A₀.str ≫ f . obviously) restate_axiom hom.h' attribute [simp, reassoc] hom.h namespace hom /-- The identity morphism of an algebra of endofunctor `F` -/ def id : hom A A := { f := 𝟙 _ } instance : inhabited (hom A A) := ⟨{ f := 𝟙 _ }⟩ /-- The composition of morphisms between algebras of endofunctor `F` -/ def comp (f : hom A₀ A₁) (g : hom A₁ A₂) : hom A₀ A₂ := { f := f.1 ≫ g.1 } end hom instance (F : C ⥤ C) : category_struct (algebra F) := { hom := hom, id := hom.id, comp := @hom.comp _ _ _ } @[simp] lemma id_eq_id : algebra.hom.id A = 𝟙 A := rfl @[simp] lemma id_f : (𝟙 _ : A ⟶ A).1 = 𝟙 A.1 := rfl variables {A₀ A₁ A₂} (f : A₀ ⟶ A₁) (g : A₁ ⟶ A₂) @[simp] lemma comp_eq_comp : algebra.hom.comp f g = f ≫ g := rfl @[simp] lemma comp_f : (f ≫ g).1 = f.1 ≫ g.1 := rfl /-- Algebras of an endofunctor `F` form a category -/ instance (F : C ⥤ C) : category (algebra F) := {} /-- To construct an isomorphism of algebras, it suffices to give an isomorphism of the As which commutes with the structure morphisms. -/ @[simps] def iso_mk (h : A₀.1 ≅ A₁.1) (w : F.map h.hom ≫ A₁.str = A₀.str ≫ h.hom) : A₀ ≅ A₁ := { hom := { f := h.hom }, inv := { f := h.inv, h' := by { rw [h.eq_comp_inv, category.assoc, ←w, ←functor.map_comp_assoc], simp } } } /-- The forgetful functor from the category of algebras, forgetting the algebraic structure. -/ @[simps] def forget (F : C ⥤ C) : algebra F ⥤ C := { obj := λ A, A.1, map := λ A B f, f.1 } /-- An algebra morphism with an underlying isomorphism hom in `C` is an algebra isomorphism. -/ lemma iso_of_iso (f : A₀ ⟶ A₁) [is_iso f.1] : is_iso f := ⟨⟨{ f := inv f.1, h' := by { rw [is_iso.eq_comp_inv f.1, category.assoc, ← f.h], simp } }, by tidy⟩⟩ instance forget_reflects_iso : reflects_isomorphisms (forget F) := { reflects := λ A B, iso_of_iso } instance forget_faithful : faithful (forget F) := {} /-- An algebra morphism with an underlying epimorphism hom in `C` is an algebra epimorphism. -/ lemma epi_of_epi {X Y : algebra F} (f : X ⟶ Y) [h : epi f.1] : epi f := (forget F).epi_of_epi_map h /-- An algebra morphism with an underlying monomorphism hom in `C` is an algebra monomorphism. -/ lemma mono_of_mono {X Y : algebra F} (f : X ⟶ Y) [h : mono f.1] : mono f := (forget F).mono_of_mono_map h /-- From a natural transformation `α : G → F` we get a functor from algebras of `F` to algebras of `G`. -/ @[simps] def functor_of_nat_trans {F G : C ⥤ C} (α : G ⟶ F) : algebra F ⥤ algebra G := { obj := λ A, { A := A.1, str := α.app A.1 ≫ A.str }, map := λ A₀ A₁ f, { f := f.1 } } /-- The identity transformation induces the identity endofunctor on the category of algebras. -/ @[simps {rhs_md := semireducible}] def functor_of_nat_trans_id : functor_of_nat_trans (𝟙 F) ≅ 𝟭 _ := nat_iso.of_components (λ X, iso_mk (iso.refl _) (by { dsimp, simp, })) (λ X Y f, by { ext, dsimp, simp }) /-- A composition of natural transformations gives the composition of corresponding functors. -/ @[simps {rhs_md := semireducible}] def functor_of_nat_trans_comp {F₀ F₁ F₂ : C ⥤ C} (α : F₀ ⟶ F₁) (β : F₁ ⟶ F₂) : functor_of_nat_trans (α ≫ β) ≅ functor_of_nat_trans β ⋙ functor_of_nat_trans α := nat_iso.of_components (λ X, iso_mk (iso.refl _) (by { dsimp, simp })) (λ X Y f, by { ext, dsimp, simp }) /-- If `α` and `β` are two equal natural transformations, then the functors of algebras induced by them are isomorphic. We define it like this as opposed to using `eq_to_iso` so that the components are nicer to prove lemmas about. -/ @[simps {rhs_md := semireducible}] def functor_of_nat_trans_eq {F G : C ⥤ C} {α β : F ⟶ G} (h : α = β) : functor_of_nat_trans α ≅ functor_of_nat_trans β := nat_iso.of_components (λ X, iso_mk (iso.refl _) (by { dsimp, simp [h] })) (λ X Y f, by { ext, dsimp, simp }) /-- Naturally isomorphic endofunctors give equivalent categories of algebras. Furthermore, they are equivalent as categories over `C`, that is, we have `equiv_of_nat_iso h ⋙ forget = forget`. -/ @[simps] def equiv_of_nat_iso {F G : C ⥤ C} (α : F ≅ G) : algebra F ≌ algebra G := { functor := functor_of_nat_trans α.inv, inverse := functor_of_nat_trans α.hom, unit_iso := functor_of_nat_trans_id.symm ≪≫ functor_of_nat_trans_eq (by simp) ≪≫ functor_of_nat_trans_comp _ _, counit_iso := (functor_of_nat_trans_comp _ _).symm ≪≫ functor_of_nat_trans_eq (by simp) ≪≫ functor_of_nat_trans_id }. namespace initial variables {A} (h : limits.is_initial A) /-- The inverse of the structure map of an initial algebra -/ @[simp] def str_inv : A.1 ⟶ F.obj A.1 := (h.to ⟨ F.obj A.1 , F.map A.str ⟩).1 lemma left_inv' : (⟨str_inv h ≫ A.str⟩ : A ⟶ A) = 𝟙 A := limits.is_initial.hom_ext h _ (𝟙 A) lemma left_inv : str_inv h ≫ A.str = 𝟙 _ := congr_arg hom.f (left_inv' h) lemma right_inv : A.str ≫ str_inv h = 𝟙 _ := by { rw [str_inv, ← (h.to ⟨ F.obj A.1 , F.map A.str ⟩).h, ← F.map_id, ← F.map_comp], congr, exact (left_inv h) } /-- The structure map of the inital algebra is an isomorphism, hence endofunctors preserve their initial algebras -/ end initial end algebra /-- A coalgebra of an endofunctor; `str` stands for "structure morphism" -/ structure coalgebra (F : C ⥤ C) := (V : C) (str : V ⟶ F.obj V) instance [inhabited C] : inhabited (coalgebra (𝟭 C)) := ⟨⟨ default , 𝟙 _ ⟩⟩ namespace coalgebra variables {F : C ⥤ C} (V : coalgebra F) {V₀ V₁ V₂ : coalgebra F} /- ``` str V₀ -----> F V₀ | | f | | F f V V V₁ -----> F V₁ str ``` -/ /-- A morphism between coalgebras of an endofunctor `F` -/ @[ext] structure hom (V₀ V₁ : coalgebra F) := (f : V₀.1 ⟶ V₁.1) (h' : V₀.str ≫ F.map f = f ≫ V₁.str . obviously) restate_axiom hom.h' attribute [simp, reassoc] hom.h namespace hom /-- The identity morphism of an algebra of endofunctor `F` -/ def id : hom V V := { f := 𝟙 _ } instance : inhabited (hom V V) := ⟨{ f := 𝟙 _ }⟩ /-- The composition of morphisms between algebras of endofunctor `F` -/ def comp (f : hom V₀ V₁) (g : hom V₁ V₂) : hom V₀ V₂ := { f := f.1 ≫ g.1 } end hom instance (F : C ⥤ C) : category_struct (coalgebra F) := { hom := hom, id := hom.id, comp := @hom.comp _ _ _ } @[simp] lemma id_eq_id : coalgebra.hom.id V = 𝟙 V := rfl @[simp] lemma id_f : (𝟙 _ : V ⟶ V).1 = 𝟙 V.1 := rfl variables {V₀ V₁ V₂} (f : V₀ ⟶ V₁) (g : V₁ ⟶ V₂) @[simp] lemma comp_eq_comp : coalgebra.hom.comp f g = f ≫ g := rfl @[simp] lemma comp_f : (f ≫ g).1 = f.1 ≫ g.1 := rfl /-- Coalgebras of an endofunctor `F` form a category -/ instance (F : C ⥤ C) : category (coalgebra F) := {} /-- To construct an isomorphism of coalgebras, it suffices to give an isomorphism of the Vs which commutes with the structure morphisms. -/ @[simps] def iso_mk (h : V₀.1 ≅ V₁.1) (w : V₀.str ≫ F.map h.hom = h.hom ≫ V₁.str ) : V₀ ≅ V₁ := { hom := { f := h.hom }, inv := { f := h.inv, h' := by { rw [h.eq_inv_comp, ← category.assoc, ←w, category.assoc, ← functor.map_comp], simp only [iso.hom_inv_id, functor.map_id, category.comp_id] } } } /-- The forgetful functor from the category of coalgebras, forgetting the coalgebraic structure. -/ @[simps] def forget (F : C ⥤ C) : coalgebra F ⥤ C := { obj := λ A, A.1, map := λ A B f, f.1 } /-- A coalgebra morphism with an underlying isomorphism hom in `C` is a coalgebra isomorphism. -/ lemma iso_of_iso (f : V₀ ⟶ V₁) [is_iso f.1] : is_iso f := ⟨⟨{ f := inv f.1, h' := by { rw [is_iso.eq_inv_comp f.1, ← category.assoc, ← f.h, category.assoc], simp } }, by tidy⟩⟩ instance forget_reflects_iso : reflects_isomorphisms (forget F) := { reflects := λ A B, iso_of_iso } instance forget_faithful : faithful (forget F) := {} /-- An algebra morphism with an underlying epimorphism hom in `C` is an algebra epimorphism. -/ lemma epi_of_epi {X Y : coalgebra F} (f : X ⟶ Y) [h : epi f.1] : epi f := (forget F).epi_of_epi_map h /-- An algebra morphism with an underlying monomorphism hom in `C` is an algebra monomorphism. -/ lemma mono_of_mono {X Y : coalgebra F} (f : X ⟶ Y) [h : mono f.1] : mono f := (forget F).mono_of_mono_map h /-- From a natural transformation `α : F → G` we get a functor from coalgebras of `F` to coalgebras of `G`. -/ @[simps] def functor_of_nat_trans {F G : C ⥤ C} (α : F ⟶ G) : coalgebra F ⥤ coalgebra G := { obj := λ V, { V := V.1, str := V.str ≫ α.app V.1 }, map := λ V₀ V₁ f, { f := f.1, h' := by rw [category.assoc, ← α.naturality, ← category.assoc, f.h, category.assoc] } } /-- The identity transformation induces the identity endofunctor on the category of coalgebras. -/ @[simps {rhs_md := semireducible}] def functor_of_nat_trans_id : functor_of_nat_trans (𝟙 F) ≅ 𝟭 _ := nat_iso.of_components (λ X, iso_mk (iso.refl _) (by { dsimp, simp, })) (λ X Y f, by { ext, dsimp, simp }) /-- A composition of natural transformations gives the composition of corresponding functors. -/ @[simps {rhs_md := semireducible}] def functor_of_nat_trans_comp {F₀ F₁ F₂ : C ⥤ C} (α : F₀ ⟶ F₁) (β : F₁ ⟶ F₂) : functor_of_nat_trans (α ≫ β) ≅ functor_of_nat_trans α ⋙ functor_of_nat_trans β := nat_iso.of_components (λ X, iso_mk (iso.refl _) (by { dsimp, simp })) (λ X Y f, by { ext, dsimp, simp }) /-- If `α` and `β` are two equal natural transformations, then the functors of coalgebras induced by them are isomorphic. We define it like this as opposed to using `eq_to_iso` so that the components are nicer to prove lemmas about. -/ @[simps {rhs_md := semireducible}] def functor_of_nat_trans_eq {F G : C ⥤ C} {α β : F ⟶ G} (h : α = β) : functor_of_nat_trans α ≅ functor_of_nat_trans β := nat_iso.of_components (λ X, iso_mk (iso.refl _) (by { dsimp, simp [h] })) (λ X Y f, by { ext, dsimp, simp }) /-- Naturally isomorphic endofunctors give equivalent categories of coalgebras. Furthermore, they are equivalent as categories over `C`, that is, we have `equiv_of_nat_iso h ⋙ forget = forget`. -/ @[simps] def equiv_of_nat_iso {F G : C ⥤ C} (α : F ≅ G) : coalgebra F ≌ coalgebra G := { functor := functor_of_nat_trans α.hom, inverse := functor_of_nat_trans α.inv, unit_iso := functor_of_nat_trans_id.symm ≪≫ functor_of_nat_trans_eq (by simp) ≪≫ functor_of_nat_trans_comp _ _, counit_iso := (functor_of_nat_trans_comp _ _).symm ≪≫ functor_of_nat_trans_eq (by simp) ≪≫ functor_of_nat_trans_id }. end coalgebra namespace adjunction variables {F : C ⥤ C} {G : C ⥤ C} lemma algebra.hom_equiv_naturality_str (adj : F ⊣ G) (A₁ A₂ : algebra F) (f : A₁ ⟶ A₂) : (adj.hom_equiv A₁.A A₁.A) A₁.str ≫ G.map f.f = f.f ≫ (adj.hom_equiv A₂.A A₂.A) A₂.str := by { rw [← adjunction.hom_equiv_naturality_right, ← adjunction.hom_equiv_naturality_left, f.h] } lemma coalgebra.hom_equiv_naturality_str_symm (adj : F ⊣ G) (V₁ V₂ : coalgebra G) (f : V₁ ⟶ V₂) : F.map f.f ≫ ((adj.hom_equiv V₂.V V₂.V).symm) V₂.str = ((adj.hom_equiv V₁.V V₁.V).symm) V₁.str ≫ f.f := by { rw [← adjunction.hom_equiv_naturality_left_symm, ← adjunction.hom_equiv_naturality_right_symm, f.h] } /-- Given an adjunction `F ⊣ G`, the functor that associates to an algebra over `F` a coalgebra over `G` defined via adjunction applied to the structure map. -/ def algebra.to_coalgebra_of (adj : F ⊣ G) : algebra F ⥤ coalgebra G := { obj := λ A, { V := A.1, str := (adj.hom_equiv A.1 A.1).to_fun A.2 }, map := λ A₁ A₂ f, { f := f.1, h' := (algebra.hom_equiv_naturality_str adj A₁ A₂ f) } } /-- Given an adjunction `F ⊣ G`, the functor that associates to a coalgebra over `G` an algebra over `F` defined via adjunction applied to the structure map. -/ def coalgebra.to_algebra_of (adj : F ⊣ G) : coalgebra G ⥤ algebra F := { obj := λ V, { A := V.1, str := (adj.hom_equiv V.1 V.1).inv_fun V.2 }, map := λ V₁ V₂ f, { f := f.1, h' := (coalgebra.hom_equiv_naturality_str_symm adj V₁ V₂ f) } } /-- Given an adjunction, assigning to an algebra over the left adjoint a coalgebra over its right adjoint and going back is isomorphic to the identity functor. -/ def alg_coalg_equiv.unit_iso (adj : F ⊣ G) : 𝟭 (algebra F) ≅ (algebra.to_coalgebra_of adj) ⋙ (coalgebra.to_algebra_of adj) := { hom := { app := λ A, { f := (𝟙 A.1), h' := by { erw [F.map_id, category.id_comp, category.comp_id], apply (adj.hom_equiv _ _).left_inv A.str } }, naturality' := λ A₁ A₂ f, by { ext1, dsimp, erw [category.id_comp, category.comp_id], refl } }, inv := { app := λ A, { f := (𝟙 A.1), h' := by { erw [F.map_id, category.id_comp, category.comp_id], apply ((adj.hom_equiv _ _).left_inv A.str).symm } }, naturality' := λ A₁ A₂ f, by { ext1, dsimp, erw [category.comp_id, category.id_comp], refl } }, hom_inv_id' := by { ext, exact category.comp_id _ }, inv_hom_id' := by { ext, exact category.comp_id _ } } /-- Given an adjunction, assigning to a coalgebra over the right adjoint an algebra over the left adjoint and going back is isomorphic to the identity functor. -/ def alg_coalg_equiv.counit_iso (adj : F ⊣ G) : (coalgebra.to_algebra_of adj) ⋙ (algebra.to_coalgebra_of adj) ≅ 𝟭 (coalgebra G) := { hom := { app := λ V, { f := (𝟙 V.1), h' := by { dsimp, erw [G.map_id, category.id_comp, category.comp_id], apply (adj.hom_equiv _ _).right_inv V.str } }, naturality' := λ V₁ V₂ f, by { ext1, dsimp, erw [category.comp_id, category.id_comp], refl, } }, inv := { app := λ V, { f := (𝟙 V.1), h' := by { dsimp, rw [G.map_id, category.comp_id, category.id_comp], apply ((adj.hom_equiv _ _).right_inv V.str).symm } }, naturality' := λ V₁ V₂ f, by { ext1, dsimp, erw [category.comp_id, category.id_comp], refl } }, hom_inv_id' := by { ext, exact category.comp_id _ }, inv_hom_id' := by { ext, exact category.comp_id _ } } /-- If `F` is left adjoint to `G`, then the category of algebras over `F` is equivalent to the category of coalgebras over `G`. -/ def algebra_coalgebra_equiv (adj : F ⊣ G) : algebra F ≌ coalgebra G := { functor := algebra.to_coalgebra_of adj, inverse := coalgebra.to_algebra_of adj, unit_iso := alg_coalg_equiv.unit_iso adj, counit_iso := alg_coalg_equiv.counit_iso adj, functor_unit_iso_comp' := λ A, by { ext, exact category.comp_id _ } } end adjunction end endofunctor end category_theory
doit3times : (f : x -> x) -> (n : x) -> x doit3times f = f . f . f test_doit3times1 : doit3times (\n => n + 2) 3 = 9 test_doit3times1 = Refl test_doit3times2 : doit3times (\b => not b) True = False test_doit3times2 = Refl fltr : (test : a -> Bool) -> (l : List a) -> List a fltr test [] = [] fltr test (x :: xs) = if test x then x :: fltr test xs else fltr test xs evenb : (n : Nat) -> Bool evenb Z = True evenb (S (S k)) = evenb k evenb k = False test_filter1 : fltr Main.evenb [1, 2, 3, 4] = [2, 4] test_filter1 = Refl test_filter2 : fltr (\l => length l == 1) [[1, 2], [3], [4], [5, 6, 7], [], [8]] = [[3], [4], [8]] test_filter2 = Refl oddb : (n : Nat) -> Bool oddb Z = False oddb (S Z) = True oddb (S (S k)) = oddb k count_oddmembers : (l : List Nat) -> Nat count_oddmembers l = length $ fltr oddb l test_count_oddmembers1 : count_oddmembers [1, 0, 3, 1, 4, 5] = 4 test_count_oddmembers1 = Refl test_count_oddmembers2 : count_oddmembers [0, 2, 4] = 0 test_count_oddmembers2 = Refl test_count_oddmembers3 : count_oddmembers [] = 0 test_count_oddmembers3 = Refl test_anon_fn : doit3times (\n => mult n n) 2 = 256 test_anon_fn = Refl filter_even_gt7 : (l : List Nat) -> List Nat filter_even_gt7 = filter evenb . filter (\n => n > 7) test_filter_even_gt7_1 : filter_even_gt7 [1, 2, 6, 9, 10, 3, 12, 8] = [10, 12, 8] test_filter_even_gt7_1 = Refl test_filter_even_gt7_2 : filter_even_gt7 [5, 2, 6, 19, 129] = [] test_filter_even_gt7_2 = Refl partn : (test : a -> Bool) -> (l : List a) -> (List a, List a) partn test [] = ([], []) partn test (x :: xs) = let (ts, fs) = partn test xs in if test x then (x :: ts, fs) else (ts, x :: fs) test_partition1 : partn Main.oddb [1, 2, 3, 4, 5] = ([1, 3, 5], [2, 4]) test_partition1 = Refl test_partition2 : partn (\x => False) [5, 9, 0] = ([], [5, 9, 0]) test_partition2 = Refl test_map1 : map (\x => 3 + x) [2, 0, 2] = [5, 3, 5] test_map1 = Refl test_map2 : map Main.oddb [2, 1, 2, 5] = [False, True, False, True] test_map2 = Refl test_map3 : map (\n => [evenb n, oddb n]) [2, 1, 2, 5] = [[True, False], [False, True], [True, False], [False, True]] test_map3 = Refl
[STATEMENT] lemma (in fbrelation_pair) fbrelation_fcomp: "fbrelation (r\<^sub>1 \<circ>\<^sub>\<bullet> r\<^sub>2)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. fbrelation (r\<^sub>1 \<circ>\<^sub>\<bullet> r\<^sub>2) [PROOF STEP] using r\<^sub>1.fbrelation_axioms r\<^sub>2.fbrelation_axioms [PROOF STATE] proof (prove) using this: fbrelation r\<^sub>1 fbrelation r\<^sub>2 goal (1 subgoal): 1. fbrelation (r\<^sub>1 \<circ>\<^sub>\<bullet> r\<^sub>2) [PROOF STEP] by auto
Load MyLists. Search (nat -> nat -> bool). Fixpoint count (v:nat) (s:bag) : nat := match s with | [] => 0 | cons x s' => match (Nat.eqb x v) with | true => 1 + (count v s') | false => count v s' end end. Example test_count1: count 1 [1;2;3;1;4;1] = 3. Proof. simpl. reflexivity. Qed. Example test_count2: count 6 [1;2;3;1;4;1] = 0. Proof. simpl. reflexivity. Qed. Definition sum : bag -> bag -> bag := app. Example test_sum1: count 1 (sum [1;2;3] [1;4;1]) = 3. Proof. simpl. reflexivity. Qed. Definition add (v:nat) (s:bag) : bag := cons v s. Example test_add1: count 1 (add 1 [1;4;1]) = 3. Proof. simpl. reflexivity. Qed. Example test_add2: count 5 (add 1 [1;4;1]) = 0. Proof. simpl. reflexivity. Qed. Definition member (v:nat) (s:bag) : bool := negb (Nat.leb (count v s) 0). Example test_member1: member 1 [1;4;1] = true. Proof. simpl. reflexivity. Qed. Example test_member2: member 2 [1;4;1] = false. Proof. simpl. reflexivity. Qed.
State Before: R : Type u_1 inst✝ : CommRing R I : Ideal R ⊢ ∀ (a : R), a ∈ I → ↑(RingHom.comp (Quotient.mk (map C I)) C) a = 0 State After: R : Type u_1 inst✝ : CommRing R I : Ideal R a : R ha : a ∈ I ⊢ ↑(RingHom.comp (Quotient.mk (map C I)) C) a = 0 Tactic: intro a ha State Before: R : Type u_1 inst✝ : CommRing R I : Ideal R a : R ha : a ∈ I ⊢ ↑(RingHom.comp (Quotient.mk (map C I)) C) a = 0 State After: R : Type u_1 inst✝ : CommRing R I : Ideal R a : R ha : a ∈ I ⊢ ↑C a ∈ map C I Tactic: rw [RingHom.comp_apply, Quotient.eq_zero_iff_mem] State Before: R : Type u_1 inst✝ : CommRing R I : Ideal R a : R ha : a ∈ I ⊢ ↑C a ∈ map C I State After: no goals Tactic: exact mem_map_of_mem _ ha
lemma differentiable_cnj_iff: "(\<lambda>z. cnj (f z)) differentiable at x within A \<longleftrightarrow> f differentiable at x within A"
#ifndef NNQS_OPTIMIZERS_MODIFIEDADAM_HPP #define NNQS_OPTIMIZERS_MODIFIEDADAM_HPP #include <Eigen/Dense> #include <type_traits> #include "Utilities/type_traits.hpp" #include "Optimizers/Optimizer.hpp" namespace yannq { template <typename T> class ModifiedAdam : public OptimizerGeometry<T> { public: using typename OptimizerGeometry<T>::RealT; using typename OptimizerGeometry<T>::Vector; using typename OptimizerGeometry<T>::RealVector; private: const double alpha_; const double beta_; int t; Vector first_; RealVector second_; public: static constexpr double DEFAULT_PARAMS[] = {1e-3, 0.95}; static nlohmann::json defaultParams() { return nlohmann::json { {"name", "ModifiedAdam"}, {"alhpa", DEFAULT_PARAMS[0]}, {"beta", DEFAULT_PARAMS[1]}, }; } nlohmann::json params() const override { return nlohmann::json { {"name", "ModifiedAdam"}, {"alhpa", alpha_}, {"beta", beta_}, }; } ModifiedAdam(const nlohmann::json& params) : alpha_(params.value("alpha", DEFAULT_PARAMS[0])), beta_(params.value("beta", DEFAULT_PARAMS[1])), t{} { } ModifiedAdam(double alpha = DEFAULT_PARAMS[0], double beta = DEFAULT_PARAMS[1]) : alpha_(alpha), beta_(beta), t{} { } Vector getUpdate(const Vector& grad, const Vector& oloc) override { const double eps = 1e-5; auto norm = [](T x) -> RealT { return std::norm(x); }; if(t ==0) { first_ = oloc; second_ = RealVector::Zero(oloc.rows()); } else { Vector delta = oloc - first_; first_ += beta_*delta; RealVector g2 = delta.unaryExpr(norm); second_ *= (1-beta_); second_ += (1-beta_)*beta_*g2; } //RealVector denom = second_.unaryExpr([eps](RealT x)->RealT { return x + eps; }); RealVector denom = second_.array() + std::max(1.0*pow(0.9,t),eps); ++t; return -alpha_*grad.cwiseQuotient(denom); } }; template<typename T> constexpr double ModifiedAdam<T>::DEFAULT_PARAMS[]; } //namespace yannq #endif//NNQS_OPTIMIZERS_MODIFIEDADAM_HPP
Rowson writes that " in general one would assume that whatever advantage White has would be revealed most clearly in symmetrical positions . " Accordingly , Watson , Suba , Evans , and the eminent player and theorist Aron Nimzowitsch ( 1886 – 1935 ) have all argued that it is in Black 's interest to avoid symmetry . Nonetheless , even symmetrical opening lines sometimes illustrate the tenuous nature of White 's advantage , in several respects .
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj15eqsynthconj5 : forall (lv0 : natural) (lv1 : natural), (@eq natural (Succ (plus lv0 lv1)) (plus (Succ lv1) lv0)). Admitted. QuickChick conj15eqsynthconj5.
#include <string_view> #include <type_traits> #include <utility> namespace typeindex::detail{ /// \brief constexpr string length constexpr std::size_t strlen(char const* const str)noexcept{ auto iter = str; while(*iter != '\0') ++iter; return static_cast< std::size_t >(iter - str); } /// \brief A simple compile time string template < std::size_t N > struct ct_string{ /// \brief Copys c-string into array data template < std::size_t ... I > constexpr ct_string( char const* str, std::index_sequence< I ... > )noexcept: data{ *(str + I) ... } {} /// \brief The storage char const data[N]; /// \brief Use object as string_view constexpr std::string_view operator()()const noexcept{ return {data, N}; } }; /// \brief Returns the function name inclusive its template parameter T /// as text template < typename T > constexpr char const* r()noexcept{ // Adjust this line for your compiler return __PRETTY_FUNCTION__; } /// \brief A dummy type whichs text representation is known struct ref; using namespace std::literals::string_view_literals; /// \brief The expected text representation of struct ref constexpr auto ref_text = "typeindex::detail::ref"sv; /// \brief Owner of the type text representation of r< ref > constexpr ct_string< strlen(r< ref >()) > ref_data {r< ref >(), std::make_index_sequence< strlen(r< ref >()) >()}; /// \brief Length of text before the actual type begins constexpr std::size_t leading = ref_data().find(ref_text); static_assert(leading != std::string_view::npos, "Compiler not supported"); /// \brief Length of text which is not part of the actual type constexpr std::size_t overhead = ref_data().size() - ref_text.size(); /// \brief Owner of a type text representation template < typename T > struct raw_data{ /// \brief Length of the text representation of T static constexpr std::size_t size = strlen(r< T >() + overhead); /// \brief The text representation itself static constexpr ct_string< size > data {r< T >() + leading, std::make_index_sequence< size >()}; }; /// \brief Text representation of T template < typename T > constexpr auto raw_name = raw_data< T >::data(); /// \brief Cultivated version of the text representation of T template < typename T > constexpr std::string_view pretty_name()noexcept{ // Build a new ct_string object with a cultivated version of raw_name // (currently raw_name is pretty enough on all tested plattforms) return raw_name< T >; } template <typename ...> struct is_valid { static constexpr bool value = true; }; template <bool condition> struct when{}; template < typename T, typename = void > struct tag_of: tag_of< T, when< true > >{}; template < typename T, bool condition > struct tag_of< T, when< condition > >{ using type = void; }; template < typename T > struct tag_of< T, when< is_valid< typename T::type_tag >::value > >{ using type = typename T::type_tag; }; template < typename T > struct tag_of< T const >: tag_of< T >{}; template < typename T > struct tag_of< T volatile >: tag_of< T >{}; template < typename T > struct tag_of< T const volatile >: tag_of< T >{}; template < typename T > struct tag_of< T& >: tag_of< T >{}; template < typename T > struct tag_of< T&& >: tag_of< T >{}; template < typename T > using tag_of_t = typename tag_of< T >::type; /// \brief Unique type to identify a typeindex::type struct type_tag; template < typename > constexpr bool true_c = true; } namespace typeindex{ /// \brief Value representation class of a type at compile time template < typename T > struct type{ /// \brief Used by is_a_type using type_tag = detail::type_tag; // constexpr type()noexcept = default; // template < // typename Type, // typename = std::enable_if_t< is_a_type< Type > > > // constexpr type(Type)noexcept{} }; /// \brief true if Type< T > is a type representation, false otherwise template < typename Type, typename = void > constexpr bool is_a_type = std::is_same_v< detail::type_tag, detail::tag_of_t< Type > >; /// \brief Type trait to get the type member as type template < typename Type, typename = void > struct unpack_type: unpack_type< Type, detail::when< true > >{ static_assert(is_a_type< Type >); }; /// \brief Type trait to get the type member as type template < typename T, bool condition > struct unpack_type< type< T >, detail::when< condition > >{ using type = T; }; /// \brief Type trait to get the type template < typename Type, bool condition > struct unpack_type< Type, detail::when< condition > >{ using type = typename Type::type; }; template < typename T > struct unpack_type< T const >: unpack_type< T >{}; template < typename T > struct unpack_type< T volatile >: unpack_type< T >{}; template < typename T > struct unpack_type< T const volatile >: unpack_type< T >{}; template < typename T > struct unpack_type< T& >: unpack_type< T >{}; template < typename T > struct unpack_type< T&& >: unpack_type< T >{}; /// \brief Type trait to get the type from a type representator type template < typename T > using unpack_type_t = typename unpack_type< T >::type; /// \brief Value representation of a type at compile time template < typename T, typename = void > constexpr auto type_c = type< T >{}; /// \brief Use encapsulated type template < typename Type > constexpr auto type_c< Type, std::enable_if_t< is_a_type< Type > > > = type< unpack_type_t< Type > >{}; /// \brief Make a compile time value representation of a type with cvr /// stripped template < typename T, typename = std::enable_if_t< !is_a_type< T > > > constexpr type< std::remove_cv_t< std::remove_reference_t< T > > > typeid_(T&&)noexcept{ return{}; } /// \brief Overload if type is already a typeindex::type template < typename Type, typename = std::enable_if_t< is_a_type< Type > > > constexpr type< std::remove_cv_t< std::remove_reference_t< unpack_type_t< Type > > > > typeid_(Type&&)noexcept{ return{}; } /// \brief Make a compile time value representation of a type template < typename T, typename = std::enable_if_t< !is_a_type< T > > > constexpr type< T&& > typeid_with_cvr(T&&)noexcept{ return{}; } /// \brief Overload if type is already a typeindex::type template < typename Type, typename = std::enable_if_t< is_a_type< Type > > > constexpr type< unpack_type_t< Type > > typeid_with_cvr(Type&&)noexcept{ return{}; } /// \brief Callable Converter template < typename Converter > struct meta_call_with_cvr{ /// \brief Call the Converter's data function with the type of the /// given agument template < typename T > constexpr typename Converter::data_type operator()(T&& v)const noexcept{ return Converter::data(typeid_with_cvr(static_cast< T&& >(v))); } }; /// \brief Callable Converter template < typename Converter > struct meta_call{ /// \brief Call the Converter's data function with the type of the /// given agument template < typename T > constexpr typename Converter::data_type operator()(T&& v)const noexcept{ return Converter::data(typeid_(static_cast< T&& >(v))); } }; /// \brief The raw name of a type template < typename T, typename = void > constexpr std::string_view name_c = detail::raw_name< T >; /// \brief Use encapsulated type template < typename Type > constexpr std::string_view name_c< Type, std::enable_if_t< is_a_type< Type > > > = name_c< unpack_type_t< Type > >; /// \brief Convert class for name_c struct name_t{ using data_type = std::string_view; template < typename T > static constexpr data_type data(type< T >)noexcept{ return name_c< T >; } }; /// \brief Callable with object, returns its types name_c constexpr meta_call< name_t > name{}; /// \brief Callable with object, returns its types name_c constexpr meta_call_with_cvr< name_t > name_with_cvr{}; /// \brief The pretty name of a type template < typename T, typename = void > constexpr std::string_view pretty_name_c = detail::pretty_name< T >(); /// \brief Use encapsulated type template < typename Type > constexpr std::string_view pretty_name_c< Type, std::enable_if_t< is_a_type< Type > > > = pretty_name_c< unpack_type_t< Type > >; /// \brief Convert class for pretty_name_c struct pretty_name_t{ using data_type = std::string_view; template < typename T > static constexpr data_type data(type< T >)noexcept{ return pretty_name_c< T >; } }; /// \brief Callable with object, returns its types pretty_name_c constexpr meta_call< pretty_name_t > pretty_name{}; /// \brief Callable with object, returns its types pretty_name_c constexpr meta_call_with_cvr< pretty_name_t > pretty_name_with_cvr{}; /// \brief Pair of name and pretty name struct combined{ /// \brief Default constructor constexpr combined()noexcept = default; /// \brief Construct by a string_view template < typename T > constexpr combined(type< T >)noexcept : name(name_c< T >) , pretty_name(pretty_name_c< T >) {} /// \brief Runtime representation of the type std::string_view name; /// \brief Pretty runtime representation of the type std::string_view pretty_name; }; /// \brief The pretty name of a type template < typename T, typename = void > constexpr combined combined_name_c = combined(type_c< T >); /// \brief Use encapsulated type template < typename Type > constexpr combined combined_name_c< Type, std::enable_if_t< is_a_type< Type > > > = combined_name_c< unpack_type_t< Type > >; /// \brief Convert class for combined_name_c struct combined_name_t{ using data_type = combined; template < typename T > static constexpr data_type data(type< T >)noexcept{ return combined_name_c< T >; } }; /// \brief Callable with object, returns its types combined_name_c constexpr meta_call< combined_name_t > combined_name{}; /// \brief Callable with object, returns its types combined_name_c constexpr meta_call_with_cvr< combined_name_t > combined_name_with_cvr{}; constexpr bool operator==(combined const& a, combined const& b)noexcept{ return a.name == b.name; } constexpr bool operator!=(combined const& a, combined const& b)noexcept{ return a.name != b.name; } constexpr bool operator<(combined const& a, combined const& b)noexcept{ return a.name < b.name; } constexpr bool operator>(combined const& a, combined const& b)noexcept{ return a.name > b.name; } constexpr bool operator<=(combined const& a, combined const& b)noexcept{ return a.name <= b.name; } constexpr bool operator>=(combined const& a, combined const& b)noexcept{ return a.name >= b.name; } /// \brief A runtime type index template < typename Converter > struct basic_type_index{ public: /// \brief Default constructor constexpr basic_type_index()noexcept = default; /// \brief Construct by a string_view template < typename T > constexpr basic_type_index(type< T > t)noexcept : data(Converter::data(t)) {} /// \brief Runtime representation of the type typename Converter::data_type data; }; template < typename Converter > constexpr bool operator==( basic_type_index< Converter > const& a, basic_type_index< Converter > const& b )noexcept{ return a.data == b.data; } template < typename Converter > constexpr bool operator!=( basic_type_index< Converter > const& a, basic_type_index< Converter > const& b )noexcept{ return a.data != b.data; } template < typename Converter > constexpr bool operator<( basic_type_index< Converter > const& a, basic_type_index< Converter > const& b )noexcept{ return a.data < b.data; } template < typename Converter > constexpr bool operator>( basic_type_index< Converter > const& a, basic_type_index< Converter > const& b )noexcept{ return a.data > b.data; } template < typename Converter > constexpr bool operator<=( basic_type_index< Converter > const& a, basic_type_index< Converter > const& b )noexcept{ return a.data <= b.data; } template < typename Converter > constexpr bool operator>=( basic_type_index< Converter > const& a, basic_type_index< Converter > const& b )noexcept{ return a.data >= b.data; } /// \brief Use the raw name of the type as runtime representation using type_index = basic_type_index< name_t >; /// \brief Use the pretty name of the type as runtime representation using pretty_type_index = basic_type_index< pretty_name_t >; /// \brief Use the name of the type as runtime representation, but also /// provide the pretty_name using combined_type_index = basic_type_index< combined_name_t >; } #include <functional> namespace std{ /// \brief Hash by name, ignore pretty name template <> struct hash< typeindex::combined >{ std::size_t operator()(typeindex::combined data)const noexcept(noexcept(hash< std::string_view >()(data.name))){ return hash< std::string_view >()(data.name); } }; /// \brief Hash is the same as the hash of its name template < typename Converter > struct hash< typeindex::basic_type_index< Converter > >{ constexpr std::size_t operator()( typeindex::basic_type_index< Converter > index )const noexcept(noexcept( hash< typename Converter::data_type >()(index.data) )){ return hash< typename Converter::data_type >()(index.data); } }; } #include <iostream> #include <iomanip> #include <boost/hana.hpp> /// \brief Every typeindex::type is a type representator template < typename T > constexpr bool typeindex::is_a_type< T, std::enable_if_t< boost::hana::is_a< boost::hana::type_tag, T > > > = true; template < typename > struct A{}; template < typename, typename > struct B{}; template < typename ... > struct C{}; int main(){ using namespace typeindex; std::cout << std::boolalpha; std::cout << detail::ref_data() << '\n'; std::cout << is_a_type< bool > << '\n'; std::cout << is_a_type< type< bool > > << '\n'; std::cout << is_a_type< type< type< bool > > > << '\n'; std::cout << is_a_type< boost::hana::basic_type< bool > > << '\n'; std::cout << name_c< type< type< bool& > > > << '\n'; std::cout << name_c< boost::hana::basic_type< char& > > << '\n'; std::cout << name_c< signed char& > << '\n'; std::cout << name_c< unsigned char > << '\n'; std::cout << name_c< int > << '\n'; std::cout << name_c< unsigned int > << '\n'; std::cout << name_c< float > << '\n'; std::cout << name_c< double > << '\n'; std::cout << name(type_c< float const >) << '\n'; std::cout << name(boost::hana::type_c< long const& >) << '\n'; std::cout << name(false) << '\n'; std::cout << name('a') << '\n'; std::cout << name_with_cvr(type_c< float const >) << '\n'; std::cout << name_with_cvr(boost::hana::type_c< long const& >) << '\n'; std::cout << name_with_cvr(false) << '\n'; std::cout << name_with_cvr('a') << '\n'; // std::cout << name< tmpwrap< A > > << '\n'; // std::cout << name< tmpwrap< B > > << '\n'; // std::cout << name< tmpwrap< C > > << '\n'; // std::cout << pretty_template_name< A > << '\n'; // std::cout << pretty_template_name< B > << '\n'; // std::cout << pretty_template_name< C > << '\n'; }
The modulus of a complex number is less than or equal to the modulus of the sum of that number and another complex number plus the modulus of the second complex number.
(* Calls to external decision procedures *) Require Export ZArith. Require Export Classical. (* Zenon *) (* Copyright 2004 INRIA *) Lemma zenon_nottrue : (~True -> False). Proof. tauto. Qed. Lemma zenon_noteq : forall (T : Type) (t : T), ((t <> t) -> False). Proof. tauto. Qed. Lemma zenon_and : forall P Q : Prop, (P -> Q -> False) -> (P /\ Q -> False). Proof. tauto. Qed. Lemma zenon_or : forall P Q : Prop, (P -> False) -> (Q -> False) -> (P \/ Q -> False). Proof. tauto. Qed. Lemma zenon_imply : forall P Q : Prop, (~P -> False) -> (Q -> False) -> ((P -> Q) -> False). Proof. tauto. Qed. Lemma zenon_equiv : forall P Q : Prop, (~P -> ~Q -> False) -> (P -> Q -> False) -> ((P <-> Q) -> False). Proof. tauto. Qed. Lemma zenon_notand : forall P Q : Prop, (~P -> False) -> (~Q -> False) -> (~(P /\ Q) -> False). Proof. tauto. Qed. Lemma zenon_notor : forall P Q : Prop, (~P -> ~Q -> False) -> (~(P \/ Q) -> False). Proof. tauto. Qed. Lemma zenon_notimply : forall P Q : Prop, (P -> ~Q -> False) -> (~(P -> Q) -> False). Proof. tauto. Qed. Lemma zenon_notequiv : forall P Q : Prop, (~P -> Q -> False) -> (P -> ~Q -> False) -> (~(P <-> Q) -> False). Proof. tauto. Qed. Lemma zenon_ex : forall (T : Type) (P : T -> Prop), (forall z : T, ((P z) -> False)) -> ((exists x : T, (P x)) -> False). Proof. firstorder. Qed. Lemma zenon_all : forall (T : Type) (P : T -> Prop) (t : T), ((P t) -> False) -> ((forall x : T, (P x)) -> False). Proof. firstorder. Qed. Lemma zenon_notex : forall (T : Type) (P : T -> Prop) (t : T), (~(P t) -> False) -> (~(exists x : T, (P x)) -> False). Proof. firstorder. Qed. Lemma zenon_notall : forall (T : Type) (P : T -> Prop), (forall z : T, (~(P z) -> False)) -> (~(forall x : T, (P x)) -> False). Proof. intros T P Ha Hb. apply Hb. intro. apply NNPP. exact (Ha x). Qed. Lemma zenon_equal_base : forall (T : Type) (f : T), f = f. Proof. auto. Qed. Lemma zenon_equal_step : forall (S T : Type) (fa fb : S -> T) (a b : S), (fa = fb) -> (a <> b -> False) -> ((fa a) = (fb b)). Proof. intros. rewrite (NNPP (a = b)). congruence. auto. Qed. Lemma zenon_pnotp : forall P Q : Prop, (P = Q) -> (P -> ~Q -> False). Proof. intros P Q Ha. rewrite Ha. auto. Qed. Lemma zenon_notequal : forall (T : Type) (a b : T), (a = b) -> (a <> b -> False). Proof. auto. Qed. Ltac zenon_intro id := intro id || let nid := fresh in (intro nid; clear nid) . Definition zenon_and_s := fun P Q a b => zenon_and P Q b a. Definition zenon_or_s := fun P Q a b c => zenon_or P Q b c a. Definition zenon_imply_s := fun P Q a b c => zenon_imply P Q b c a. Definition zenon_equiv_s := fun P Q a b c => zenon_equiv P Q b c a. Definition zenon_notand_s := fun P Q a b c => zenon_notand P Q b c a. Definition zenon_notor_s := fun P Q a b => zenon_notor P Q b a. Definition zenon_notimply_s := fun P Q a b => zenon_notimply P Q b a. Definition zenon_notequiv_s := fun P Q a b c => zenon_notequiv P Q b c a. Definition zenon_ex_s := fun T P a b => zenon_ex T P b a. Definition zenon_notall_s := fun T P a b => zenon_notall T P b a. Definition zenon_pnotp_s := fun P Q a b c => zenon_pnotp P Q c a b. Definition zenon_notequal_s := fun T a b x y => zenon_notequal T a b y x. (* Ergo *) Set Implicit Arguments. Section congr. Variable t:Type. Lemma ergo_eq_concat_1 : forall (P:t -> Prop) (x y:t), P x -> x = y -> P y. Proof. intros; subst; auto. Qed. Lemma ergo_eq_concat_2 : forall (P:t -> t -> Prop) (x1 x2 y1 y2:t), P x1 x2 -> x1 = y1 -> x2 = y2 -> P y1 y2. Proof. intros; subst; auto. Qed. End congr.
Formal statement is: lemma INF_Lim: fixes X :: "nat \<Rightarrow> 'a::{complete_linorder,linorder_topology}" assumes dec: "decseq X" and l: "X \<longlonglongrightarrow> l" shows "(INF n. X n) = l" Informal statement is: If $X$ is a decreasing sequence of real numbers that converges to $l$, then $\inf_{n \in \mathbb{N}} X_n = l$.
function [V, S, varexp, w, Yhat] = plssquash(X, Y, varargin) % Decomposes data X into K components that are ordered in their % covariance with Y, designed to predict orthogonal parts of Y % % :Usage: % :: % % [V, S, varexp, w, Yhat] = plssquash(X, Y, varargin) % % :Optional Inputs: % - case {'noplot'}, turn off plotting % - case 'ndims', save only first ndims (K) vectors % % :Outputs: % % **V:** % 'eigenvetors', or weights, on data (columns) % % **S:** % score matrix, N x K % % **varexp:** % sqrt(r-square) with first k components predicting Y % % **w:** % V*b, integrated weights. for predicting new data, pred = X*w % % **Yhat:** % X*V*b, or S*b % % .. % Tor Wager, 9/12/09 % .. doplot = 1; ndims = length(Y) - 1; for i = 1:length(varargin) if ischar(varargin{i}) switch varargin{i} case {'noplot'}, doplot = 0; case 'ndims', ndims = varargin{i + 1}; otherwise, warning(['Unknown input string option:' varargin{i}]); end end end % univariate covariance-based weights % imperfect prediction X = X - repmat(mean(X), size(X, 1), 1); %scale(X); Y = scale(Y); % % neither V nor S appears to have orthogonal columns, though many are % later columns, as df in Y is approached, appear to be highly colinear % this may be because all variance in Y is basically explained... clear V % V is voxel weights for each component 1:K clear S rY = Y; % Y values to successively predict; intialize to Y for i = 1:ndims %ceil(length(Y)./2) % V = voxel weights, based on univariate relationship with rY V(:,i) = (X' * rY); %.^ .5; V(:,i) = V(:,i) ./ norm(V(:,i)); % score matrix, N x K % these will ultimately be predictors for Y % chosen to maximize predictive power with few orthogonal components S = X*V; rY = Y - S * pinv(S) * Y; b = pinv(S) * Y; Yhat = S * b; rsq(i) = 1 - var(rY) ./ var(Y); if doplot create_figure('Fit'); plot(Yhat, Y, 'kx'); refline; rsq_adj(i) = 1 - (1-rsq(i)) * ((size(Y,1)-1) ./ (size(Y,1)-i-1)); title(sprintf('cum. r-squared = %3.1f, adj = %3.1f', 100*rsq(i), 100*rsq_adj(i))); drawnow; pause(.3) end end varexp = sqrt(rsq); b = pinv(S) * Y; Yhat = S * b; w = V * b; % final weight vector %prediction = mean(training) + data*S*b %Yhat = X * V * b; %figure; plot(Yhat, Y, 'kx') end
lemma norm_add_less: "norm x < r \<Longrightarrow> norm y < s \<Longrightarrow> norm (x + y) < r + s"
""" modularity(g, c, distmx=weights(g), γ=1.0) Return a value representing Newman's modularity `Q` for the undirected and directed graph `g` given the partitioning vector `c`. This method also supports weighted graphs if the distance matrix is provided. Modularity ``Q`` for undirected graph: ```math Q = \\frac{1}{2m} \\sum_{c} \\left( e_{c} - \\gamma \\frac{K_c^2}{2m} \\right) ``` Modularity ``Q`` for directed graph: ```math Q = \\frac{1}{m} \\sum_{c} \\left( e_{c} - \\gamma \\frac{K_c^{in} K_c^{out}}{m} \\right) ``` where: - ``m``: total number of edges in the network - ``e_c``: number of edges in community ``c`` - ``K_c``: sum of the degrees of the nodes in community ``c`` or the sum of the weighted degree of the nodes in community ``c`` when the graph is weighted. ``K_c^{in}`` sum of the in-degrees of the nodes in community ``c``. ### Optional Arguments - `distmx=weights(g)`: distance matrix for weighted graphs - `γ=1.0`: where `γ > 0` is a resolution parameter. When the modularity is used to find communities structure in networks (i.e with [Louvain's method for community detection](https://en.wikipedia.org/wiki/Louvain_Modularity)), higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Where `γ=1.0` it lead to the traditional definition of the modularity. ### References - M. E. J. Newman and M. Girvan. "Finding and evaluating community structure in networks". Phys. Rev. E 69, 026113 (2004). [(arXiv)](https://arxiv.org/abs/cond-mat/0308217) - J. Reichardt and S. Bornholdt. "Statistical mechanics of community detection". Phys. Rev. E 74, 016110 (2006). [(arXiv)](https://arxiv.org/abs/cond-mat/0603718) - E. A. Leicht and M. E. J. Newman. "Community structure in directed networks". Physical Review Letter, 100:118703, (2008). [(arXiv)](https://arxiv.org/pdf/0709.4500.pdf) # Examples ```jldoctest julia> using LightGraphs julia> barbell = blockdiag(complete_graph(3), complete_graph(3)); julia> add_edge!(barbell, 1, 4); julia> modularity(barbell, [1, 1, 1, 2, 2, 2]) 0.35714285714285715 julia> modularity(barbell, [1, 1, 1, 2, 2, 2], γ=0.5) 0.6071428571428571 julia> using SimpleWeightedGraphs julia> triangle = SimpleWeightedGraph(3); julia> add_edge!(triangle, 1, 2, 1); julia> add_edge!(triangle, 2, 3, 1); julia> add_edge!(triangle, 3, 1, 1); julia> barbell = blockdiag(triangle, triangle); julia> add_edge!(barbell, 1, 4, 5); # this edge has a weight of 5 julia> modularity(barbell, [1, 1, 1, 2, 2, 2]) 0.045454545454545456 ``` """ function modularity( g::AbstractGraph, c::AbstractVector{<:Integer}; distmx::AbstractArray{<:Number}=weights(g), γ=1.0 ) m = sum([distmx[src(e), dst(e)] for e in edges(g)]) m = is_directed(g) ? m : 2 * m m == 0 && return 0. nc = maximum(c) kin = zeros(Float32, nc) kout = zeros(Float32, nc) Q = 0.0 for u in vertices(g) for v in neighbors(g, u) c1 = c[u] c2 = c[v] if c1 == c2 Q += distmx[u,v] end kout[c1] += distmx[u,v] kin[c2] += distmx[u,v] end end Q = Q * m @inbounds for i = 1:nc Q -= γ * kin[i] * kout[i] end return Q / m^2 end
module Data.CatQueue import Data.List.Properties import Interfaces.Verified %hide Prelude.List.reverse %hide Prelude.Strings.reverse %hide Prelude.Strings.null %hide Prelude.Strings.(++) %default total %access public export data CatQueue a = MkCatQueue (List a) (List a) data NonEmptyCatQueue : CatQueue a -> Type where IsNonEmptyCatQueue : Either (NonEmpty x) (NonEmpty y) -> NonEmptyCatQueue (MkCatQueue x y) data EmptyCatQueue : CatQueue a -> Type where IsEmptyCatQueue : EmptyCatQueue (MkCatQueue [] []) data SingletonCatQueue : CatQueue a -> Type where IsSingletonCatQueueLeft : SingletonCatQueue (MkCatQueue [x] []) IsSingletonCatQueueRight : SingletonCatQueue (MkCatQueue [] [x]) -------------------------------------------------------------------------------- -- Implementations -------------------------------------------------------------------------------- Uninhabited (NonEmptyCatQueue (MkCatQueue [] [])) where uninhabited (IsNonEmptyCatQueue (Left IsNonEmpty)) impossible uninhabited (IsNonEmptyCatQueue (Right IsNonEmpty)) impossible Show q => Show (CatQueue q) where show (MkCatQueue xs ys) = "MkCatQueue " <+> show xs <+> " " <+> show ys Eq q => Eq (CatQueue q) where (==) (MkCatQueue xs ys) (MkCatQueue zs ws) = xs <+> reverse ys == zs <+> reverse ws (/=) x y = not (x == y) Ord q => Ord (CatQueue q) where compare (MkCatQueue [] []) (MkCatQueue [] []) = EQ compare (MkCatQueue [] []) _ = LT compare _ (MkCatQueue [] []) = GT compare (MkCatQueue xs ys) (MkCatQueue zs ws) = compare (xs <+> reverse ys) (zs <+> reverse ws) Semigroup (CatQueue q) where (<+>) (MkCatQueue xs ys) (MkCatQueue zs ws) = MkCatQueue ((xs <+> reverse ys) <+> (zs <+> reverse ws)) [] Monoid (CatQueue q) where neutral = MkCatQueue [] [] Functor CatQueue where map f (MkCatQueue xs ys) = MkCatQueue (map f xs) (map f ys) Foldable CatQueue where foldr f init (MkCatQueue xs ys) = foldr f init (xs <+> reverse ys) foldl f init (MkCatQueue xs ys) = foldl f init (xs <+> reverse ys) -------------------------------------------------------------------------------- -- Verified implementations -------------------------------------------------------------------------------- -- TODO: how do I formally write that this is an axiom? private axiomCatQueueSame : (xs : List a) -> (ys : List a) -> MkCatQueue xs ys = MkCatQueue (xs <+> reverse ys) [] axiomCatQueueSame xs ys = ?axiomCatQueueSame_rhs -- However, we can't use this axiom due to some limitations of ITT. Even though we have an axiom saying that these -- two CatQueues are the same, we can contradict this axiom easily: private inconsistentAxiom : MkCatQueue xs ys = MkCatQueue (xs <+> reverse ys) [] -> Void inconsistentAxiom = case axiomCatQueueSame [] [1] of Refl impossible -- This could be viewed as a reasonable compromise, since we're only using it internally to prove that other -- properties hold. But it severely limits the ability to provide proofs for this type of data structure, specifically -- any data structure where two different constructions are considered equal VerifiedFunctor CatQueue where functorIdentity (MkCatQueue xs ys) = rewrite functorIdentity xs in rewrite functorIdentity ys in Refl functorComposition (MkCatQueue xs ys) g1 g2 = rewrite functorComposition xs g1 g2 in rewrite functorComposition ys g1 g2 in Refl VerifiedSemigroup (CatQueue q) where semigroupOpIsAssociative (MkCatQueue ls ls') (MkCatQueue cs cs') (MkCatQueue rs rs') = rewrite appendAssociative (ls ++ reverse' [] ls') ((cs ++ reverse' [] cs') ++ rs ++ reverse' [] rs') [] in rewrite appendNilRightNeutral ((ls ++ reverse' [] ls') ++ cs ++ reverse' [] cs') in rewrite appendNilRightNeutral ((ls ++ reverse' [] ls') ++ (cs ++ reverse' [] cs') ++ rs ++ reverse' [] rs') in rewrite appendAssociative (ls ++ reverse' [] ls') (cs ++ reverse' [] cs') (rs ++ reverse' [] rs') in Refl VerifiedMonoid (CatQueue q) where monoidNeutralIsNeutralL (MkCatQueue xs ys) = rewrite appendNilRightNeutral (xs ++ reverse' [] ys) in rewrite axiomCatQueueSame xs ys in Refl monoidNeutralIsNeutralR (MkCatQueue xs ys) = rewrite axiomCatQueueSame xs ys in Refl -------------------------------------------------------------------------------- -- Functions -------------------------------------------------------------------------------- -- | Test whether a queue is empty. -- | -- | Running time: `O(1)` null : CatQueue a -> Bool null (MkCatQueue [] []) = True null _ = False -- | Append an element to the end of the queue, creating a new queue. -- | -- | Running time: `O(1)` snoc : CatQueue a -> a -> CatQueue a snoc (MkCatQueue l r) a = MkCatQueue l (a :: r) -- | Decompose a queue into a `Tuple` of the first element and the rest of the queue. -- | -- | Running time: `O(1)` -- | -- | Note that any single operation may run in `O(n)`. uncons : (q : CatQueue a) -> {auto prf : NonEmptyCatQueue q} -> (a, CatQueue a) uncons (MkCatQueue [] []) {prf} = absurd prf uncons (MkCatQueue [] (x :: xs)) = uncons_prf (x :: xs) {prf' = reverseNonEmpty (x :: xs) IsNonEmpty} where uncons_prf : (l : List a) -> {auto prf : NonEmpty l} -> {prf' : NonEmpty (reverse l)} -> (a, CatQueue a) uncons_prf l = assert_total $ uncons (MkCatQueue (reverse l) []) uncons (MkCatQueue (x :: xs) ys) = (x, (MkCatQueue xs ys)) -- | Decompose a queue into a `Tuple` of the first element and the rest of the queue. -- | -- | Running time: `O(1)` -- | -- | Note that any single operation may run in `O(n)`. uncons' : CatQueue a -> Maybe (a, (CatQueue a)) uncons' (MkCatQueue [] []) = Nothing uncons' (MkCatQueue [] r) = assert_total $ uncons' (MkCatQueue (reverse r) []) uncons' (MkCatQueue (a :: as) r) = Just (a, (MkCatQueue as r)) -------------------------------------------------------------------------------- -- Properties -------------------------------------------------------------------------------- snocNotEmpty : (x : a) -> (q : CatQueue a) -> {auto prf : EmptyCatQueue q} -> NonEmptyCatQueue (snoc q x) snocNotEmpty x q@(MkCatQueue [] []) = the (NonEmptyCatQueue (snoc q x)) (IsNonEmptyCatQueue (Right IsNonEmpty)) singletonNonEmpty : SingletonCatQueue q -> NonEmptyCatQueue q singletonNonEmpty IsSingletonCatQueueLeft = IsNonEmptyCatQueue (Left IsNonEmpty) singletonNonEmpty IsSingletonCatQueueRight = IsNonEmptyCatQueue (Right IsNonEmpty) snocEmptySingleton : (x : a) -> (q : CatQueue a) -> {auto prf : EmptyCatQueue q} -> SingletonCatQueue (snoc q x) snocEmptySingleton x (MkCatQueue [] []) = IsSingletonCatQueueRight nonEmptyNotNull : (q : CatQueue a) -> {auto prf : NonEmptyCatQueue q} -> null q = False nonEmptyNotNull (MkCatQueue [] []) {prf} = absurd prf nonEmptyNotNull (MkCatQueue (x :: xs) []) = Refl nonEmptyNotNull (MkCatQueue [] (y :: ys)) = Refl nonEmptyNotNull (MkCatQueue (x :: xs) (y :: ys)) = Refl Uninhabited (False = True) where uninhabited Refl impossible nullDecidable : (q : CatQueue a) -> Dec (null q = True) nullDecidable (MkCatQueue [] []) = Yes Refl nullDecidable (MkCatQueue (x::xs) []) = No uninhabited nullDecidable (MkCatQueue [] (x::xs)) = No uninhabited nullDecidable (MkCatQueue (x::xs) (y::ys)) = No uninhabited
YouTube is the best and popular video search engine with all its amazing features like free to use, easy to share, free to upload videos, but with some privileges like videos are free to watch by anyone and videos can be only uploaded by registered users. In some cases, you might have faced the problem of displaying an error message that This Video Is Not Available In Your Country. This problem occurs due to the restrictions of governments or channels to make the video available for only a particular country and making rest of the countries unavailable to watch the video and thus displays the message “This video is not available in your country”. Let’s see various ways to solve the issue of ‘this video is not available in your country’ as it restricts you to watch videos online. We have given you some ways which will help you to solve the issue and help you in watching videos without any problem. Using VPN is one of the easiest methods to fix the error while watching videos yet most effective as well. It’s easy to use this method by just following the below guide. We will use PD Proxy VPN, but you can also use any VPN of your choice (Make sure that you select best one with high-speed servers). Initially, Download PD Proxy and get a premium account of PD Proxy. Next, Select any server and Click on Connect. (Select server based on country in which the video is available). Just, Visit that video page and enjoy the video now without any restriction. extension which is known as Hola Extension. We have provided you with the best to show you how to fix the videos that are not available in your country error by creating a simple tutorial. So, follow steps carefully and solve the error. that you watch location that restricts YouTube videos for free. properly. Download Hola Extension for Chrome or Firefox or any other search engine. Now, Click on Hola Extension icon which is located above the Bookmarks bar. Once, you have clicked on Hola Extension icon, you will see some different country flags there. Select, the country in which the video is available from there, and Reload the page. Now you can watch the video easily by following the above simple method. This method can solve the error ‘this video is not available in your country’ android fix. Downloading the YouTube videos that are blocked in your country will be a solution for this error. As watching the video will charge the same data as you download that video, it would be better you download to watch later when you’re in offline. Click on the download button there and select the video quality and you can see that the video is getting downloaded. By this, you can download and watch the videos in offline without getting any error in the middle. This is one of easiest and simple method to unblock any YouTube Videos of your country. By doing this, you can watch online videos without any restrictions or errors. First of all, you need to open any video which is blocked in your country. Simply, you need to replace /watch?v= with /v/. is blocked in your country without any problem. Bypass YouTube country. By doing some modifications in URL you can watch them safely. This method is one of the quickest fixes of this error that occurs when you watch online video. As we know that video uploader sometimes restricts video to a specific country. So, using YouTube Proxies of that country, you can easily access the videos for free. This method is much similar to VPN method, but in this method, there is no need to install any additional software of VPN. You can get YouTube proxies from sites as well from free proxy’s list page. Hope this video will help you to watch blocked YouTube videos online. It will also solve errors like videos not available in your country play store. Start using the above methods and fix blocked videos in your country online. Hope you liked our article and understood the ways on how to watch blocked youtube videos in your country!
On February 23 , 2008 , Fey hosted the first episode of SNL after the 2007 – 2008 Writers Guild of America strike . For this appearance , she was nominated for an Emmy in the category of Individual Performance in a Variety or Music Program . Fey hosted SNL for a second time on April 10 , 2010 , and for her appearance she received an Emmy nomination for Outstanding Guest Actress in a Comedy Series .
{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RebindableSyntax #-} {-# LANGUAGE TypeSynonymInstances #-} module DSLsofMath.W06 where import DSLsofMath.FunExp hiding (eval, derive) import DSLsofMath.W05 import DSLsofMath.Algebra import Prelude hiding (Num(..),(/),(^),Fractional(..),Floating(..)) import Prelude (abs) import Data.Complex () evalAll :: Transcendental a => FunExp -> [a -> a] evalAll e = (evalFunExp e) : evalAll (derive e) help a b as bs = as * (b : bs) + (a : as) * bs mulStream :: Ring a => Stream a -> Stream a -> Stream a mulStream [] _ = [] mulStream _ [] = [] mulStream (a : as) (b : bs) = (a*b) : (as * (b : bs) + (a : as) * bs) type Stream a = [a] instance Additive a => Additive (Stream a) where zero = repeat zero (+) = addStream instance AddGroup a => AddGroup (Stream a) where negate = negStream instance Ring a => Multiplicative (Stream a) where one = one : zero (*) = mulStream addStream :: Additive a => Stream a -> Stream a -> Stream a addStream = zipWithLonger (+) negStream :: AddGroup a => Stream a -> Stream a negStream = map negate type Taylor a = Stream a toMaclaurin :: Ring a => PowerSeries a -> Taylor a toMaclaurin (Poly as) = zipWith (*) as factorials fromMaclaurin :: Field a => Taylor a -> PowerSeries a fromMaclaurin as = Poly (zipWith (/) as factorials) factorials :: Ring a => [a] factorials = factorialsFrom 0 1 factorialsFrom :: Ring a => a -> a -> [a] factorialsFrom n factn = factn : factorialsFrom (n+1) (factn * (n + 1)) ex3, ex4 :: (Eq a, Field a) => Taylor a ex3 = toMaclaurin (x^3 + two * x) ex4 = toMaclaurin sinx ida a = toMaclaurin (evalP (X :+: Const a)) d f a = take 10 (toMaclaurin (evalP (f (X :+: Const a)))) dP f a = toMaclaurin (f (idx + Poly [a])) integT :: a -> Taylor a -> Taylor a integT = (:) integ :: Field a => a -> PowerSeries a -> PowerSeries a integ a0 (Poly as) = Poly (integL a0 as) integL :: Field a => a -> [a] -> [a] integL c cs = c : zipWith (/) cs oneUp type PS a = PowerSeries a solve :: Field a => a -> (PS a -> PS a) -> PS a solve f0 g = f -- solves |f' = g f|, |f 0 = f0| where f = integ f0 (g f) idx :: Field a => PS a idx = solve 0 (\_f -> 1) -- \(f'(x) = 1\), \(f(0) = 0\) expx :: Field a => PS a expx = solve 1 (\f -> f) -- \(f'(x) = f(x)\), \(f(0) = 1\) expf :: Field a => a -> a expf = evalPS 100 expx testExp :: Double testExp = maximum (map diff [0,0.001..1::Double]) where diff = abs . (expf - exp) -- using the function instance for |exp| testExpUnits :: Double testExpUnits = testExp / epsilon epsilon :: Double -- one bit of |Double| precision epsilon = last (takeWhile (\x -> 1 + x /= 1) (iterate (/2) 1)) sinx, cosx :: Field a => PS a sinx = integ 0 cosx cosx = integ 1 (-sinx) sinf, cosf :: Field a => a -> a sinf = evalPS 100 sinx cosf = evalPS 100 cosx sx,cx::[Double] sx = 0 : 1 : neg 0 : frac (neg 1) 6 : error "TODO" cx = 1 : neg 0 : frac (neg 1) 2 : 0 : error "TODO" instance (Eq a, Transcendental a) => Transcendental (PowerSeries a) where pi = Poly [pi] exp = expPS sin = sinPS cos = cosPS expPS, sinPS, cosPS :: (Eq a, Transcendental a) => PS a -> PS a expPS as = integ (exp (val as)) (expPS as * deriv as) sinPS as = integ (sin (val as)) (cosPS as * deriv as) cosPS as = integ (cos (val as)) (-sinPS as * deriv as) val :: Additive a => PS a -> a val (Poly (a:_)) = a val _ = zero evalP :: (Eq r, Transcendental r) => FunExp -> PS r evalP (Const x) = Poly [fromRational (toRational x)] evalP (e1 :+: e2) = evalP e1 + evalP e2 evalP (e1 :*: e2) = evalP e1 * evalP e2 evalP X = idx evalP (Negate e) = negate (evalP e) evalP (Recip e) = recip (evalP e) evalP (Exp e) = exp (evalP e) evalP (Sin e) = sin (evalP e) evalP (Cos e) = cos (evalP e) evalFunExp :: Transcendental a => FunExp -> a -> a evalFunExp (Const alpha) = const (fromRational (toRational alpha)) evalFunExp X = id evalFunExp (e1 :+: e2) = evalFunExp e1 + evalFunExp e2 evalFunExp (e1 :*: e2) = evalFunExp e1 * evalFunExp e2 evalFunExp (Exp e) = exp (evalFunExp e) evalFunExp (Sin e) = sin (evalFunExp e) evalFunExp (Cos e) = cos (evalFunExp e) evalFunExp (Recip e) = recip (evalFunExp e) evalFunExp (Negate e) = negate (evalFunExp e) derive (Const _) = Const 0 derive X = Const 1 derive (e1 :+: e2) = derive e1 :+: derive e2 derive (e1 :*: e2) = (derive e1 :*: e2) :+: (e1 :*: derive e2) derive (Recip e) = let re = Recip e in Negate (re:*:re) :*: derive e derive (Negate e) = Negate (derive e) derive (Exp e) = Exp e :*: derive e derive (Sin e) = Cos e :*: derive e derive (Cos e) = Const (-1) :*: Sin e :*: derive e instance Additive FunExp where (+) = (:+:) zero = Const 0 instance AddGroup FunExp where negate x = Const (-1) * x instance Multiplicative FunExp where (*) = (:*:) one = Const 1 instance MulGroup FunExp where recip = Recip instance Transcendental FunExp where pi = Const pi exp = Exp sin = Sin cos = Cos instance Additive a => Additive (a, a) where (f, f') + (g, g') = (f + g, f' + g') zero = (zero, zero) instance AddGroup a => AddGroup (a, a) where negate (f, f') = (negate f, negate f') instance Ring a => Multiplicative (a,a) where (f, f') * (g, g') = (f * g, f' * g + f * g') one = (one,zero) instance Field a => MulGroup (a, a) where (f, f') / (g, g') = (f / g, (f' * g - g' * f) / (g * g)) instance Transcendental a => Transcendental (a, a) where pi = (pi, zero) exp (f, f') = (exp f, (exp f) * f') sin (f, f') = (sin f, cos f * f') cos (f, f') = (cos f, -(sin f) * f')
using Revise using MathOptInterface const MOI = MathOptInterface export YasolVariable, YasolConstraint # JUMP extensions # variable extension struct YasolVariable info::JuMP.VariableInfo quantifier::String block::Int64 end function JuMP.build_variable( _error::Function, info::JuMP.VariableInfo, ::Type{YasolVariable}; quantifier::String, block::Int64, kwargs..., ) return YasolVariable( info, quantifier, block, ) end function JuMP.add_variable( model::JuMP.Model, yasolVar::YasolVariable, name::String, ) var = JuMP.add_variable( model, JuMP.ScalarVariable(yasolVar.info), name, ) # add variable attributes to variable MOI.set(model, YasolSolver.VariableAttribute("quantifier"), var, yasolVar.quantifier) MOI.set(model, YasolSolver.VariableAttribute("block"), var, yasolVar.block) # print warning, if variable in first block is not existential if(yasolVar.block == 1 && yasolVar.quantifier != "exists") @error string("Variables in the first block need to be existential! Please add a dummy variable!") return end # check if quantifier is "exists" or "all" if((yasolVar.quantifier != "exists") && (yasolVar.quantifier != "all")) @error string("Variable quantifier has to be either 'exists' or 'all'!") end # check if block is an integer if(!isinteger(yasolVar.block)) @error string("Variable blocks need to be of type integer!") end return var end # constraint extension struct YasolConstraint f::AffExpr s::MOI.AbstractScalarSet quantifier::String end function JuMP.build_constraint( _error::Function, f::AffExpr, s::MOI.AbstractScalarSet, ::Type{YasolConstraint}; quantifier::String, ) return YasolConstraint(f, s, quantifier) end function JuMP.add_constraint( model::Model, yasolCon::YasolConstraint, name::String, ) con = JuMP.add_constraint( model, ScalarConstraint(yasolCon.f, yasolCon.s), name, ) # add constarint attributes to constraint MOI.set(model, YasolSolver.ConstraintAttribute("quantifier"), con, yasolCon.quantifier) # check if quantifier is "exists" or "all" if((yasolCon.quantifier != "exists") && (yasolCon.quantifier != "all")) @error string("Constraint quantifier has to be either 'exists' or 'all'!") end return con end
module Prelude where open import Agda.Primitive using (Level; lzero; lsuc) renaming (_⊔_ to lmax) -- empty type data ⊥ : Set where -- from false, derive whatever abort : ∀ {C : Set} → ⊥ → C abort () -- unit data ⊤ : Set where <> : ⊤ -- sums data _+_ (A B : Set) : Set where Inl : A → A + B Inr : B → A + B -- pairs infixr 1 _,_ record Σ {l1 l2 : Level} (A : Set l1) (B : A → Set l2) : Set (lmax l1 l2) where constructor _,_ field π1 : A π2 : B π1 open Σ public syntax Σ A (\ x -> B) = Σ[ x ∈ A ] B _×_ : {l1 : Level} {l2 : Level} → (Set l1) → (Set l2) → Set (lmax l1 l2) A × B = Σ A λ _ → B infixr 1 _×_ -- equality data _==_ {l : Level} {A : Set l} (M : A) : A → Set l where refl : M == M infixr 9 _==_ {-# BUILTIN EQUALITY _==_ #-} {-# BUILTIN REFL refl #-} _·_ : {l : Level} {α : Set l} {x y z : α} → x == y → y == z → x == z refl · refl = refl -- β: ! (refl m) == refl m ! : {l : Level} {α : Set l} {x y : α} → x == y → y == x ! refl = refl -- β: (ap f (refl m)) == refl (f m) ap1 : {l1 l2 : Level} {α : Set l1} {β : Set l2} {x y : α} (F : α → β) → x == y → F x == F y ap1 F refl = refl -- β? : tr β (refl x) y == y tr : {l1 l2 : Level} {α : Set l1} {x y : α} (B : α → Set l2) → x == y → B x → B y tr B refl x₁ = x₁ ap2 : {l1 l2 l3 : Level} {A : Set l1} {B : Set l2} {C : Set l3} {M N : A} {M' N' : B} (f : A -> B -> C) -> M == N -> M' == N' -> (f M M') == (f N N') ap2 f refl refl = refl infix 2 _■ infixr 2 _=<_>_ _=<_>_ : {l : Level} {A : Set l} (x : A) {y z : A} → x == y → y == z → x == z _ =< p1 > p2 = p1 · p2 _■ : {l : Level} {A : Set l} (x : A) → x == x _■ _ = refl -- options data Maybe (A : Set) : Set where Some : A → Maybe A None : Maybe A -- the some constructor is injective. perhaps unsurprisingly. someinj : {A : Set} {x y : A} → Some x == Some y → x == y someinj refl = refl -- order data Order : Set where Less : Order Equal : Order Greater : Order -- function extensionality postulate funext : {A : Set} {B : A → Set} {f g : (x : A) → (B x)} → ((x : A) → f x == g x) → f == g
module WaterReservoir using Interpolations include("reservoir-geometry.jl") include("inflow.jl") include("outlet.jl") Base.@kwdef struct ReservoirModel{G<:ReservoirGeometry,I<:Inflow,O<:Outlet} geometry::G inflow::I outlet::O z₀::Float64 = 0.0 end Base.@kwdef struct Simulation{M <: ReservoirModel} model::M Δt::Float64 n::Int end function run(sim) # Result storage t = Vector{Float64}(undef, sim.n + 1) z = similar(t) Qout = similar(t) # Initial conditions t[1] = 0.0 z[1] = sim.model.z₀ Qout[1] = discharge(sim.model.outlet, z = z[1]) # Iterations for i = 1:sim.n Qin = discharge(sim.model.inflow, t[i]) S = surface_area(sim.model.geometry, z[i]) t[i+1] = i * sim.Δt z[i+1] = z[i] + sim.Δt * (Qin - Qout[i]) / S Qout[i+1] = discharge(sim.model.outlet, z = z[i+1]) end return t, z, Qout end end # module
lemma LIMSEQ_ignore_initial_segment: "f \<longlonglongrightarrow> a \<Longrightarrow> (\<lambda>n. f (n + k)) \<longlonglongrightarrow> a"
From Coq Require Export List. Import ListNotations. Local Open Scope list_scope. Require Import Nat. Require Import Psatz. Require Import ZArith. Require Import Common. Require Import Basic. Require Import Lt. Import Common. Local Open Scope nat_scope. Fixpoint plus (b b' : bilist) : bilist := match b with | [] => b' | h :: t => let l := plus t b' in let (p, r) := split (length l - (length t)) l in match h with | E1 => (incr1 p) ++ r | E2 => (incr2 p) ++ r end end. Example test_plus_1 : plus [E1] [E1] = [E2]. Proof. auto. Qed. Example test_plus_2 : plus [E1] [E2] = [E1;E1]. Proof. auto. Qed. Example test_plus_3 : plus [E2] [E2] = [E1;E2]. Proof. auto. Qed. Example test_plus_4 : plus [E1] [E1;E1] = [E1;E2]. Proof. auto. Qed. Example test_plus_5 : plus [E1] [E2;E2] = [E1;E1;E1]. Proof. auto. Qed. Example test_plus_6 : plus [E2;E2] [E2;E2] = [E2;E1;E2]. Proof. auto. Qed. Example test_plus_7: plus [E1;E1] [E1] = [E1;E2]. Proof. auto. Qed. Example test_plus_8 : plus [E2;E2] [E1] = [E1;E1;E1]. Proof. auto. Qed. Example test_plus_9 : plus [E1;E2] [E2;E2] = [E1;E2;E2]. Proof. auto. Qed. Example test_plus_10 : plus [E1; E1] [E1; E1; E1] = [E1;E2;E2]. Proof. auto. Qed. Example test_plus_11 : plus [E1; E2] [E1; E2; E1] = [E2;E2;E1]. Proof. auto. Qed. Example test_plus_12 : plus [E2;E2] [E1;E2] = [E1;E2;E2]. Proof. auto. Qed. Example test_plus_13 : plus [E1; E1;E1] [ E1; E1] = [E1;E2;E2]. Proof. auto. Qed. Example test_plus_14 : plus [E1; E2;E1] [E1; E2] = [E2;E2;E1]. Proof. auto. Qed. Lemma incr_plus_r: forall b : bilist, incr1 b = plus [E1] b. Proof. induction b. auto. simpl. simpl in IHb. rewrite skipn_n in IHb. rewrite PeanoNat.Nat.sub_0_r with (length b) in IHb. simpl in IHb. rewrite firstn_n. rewrite <- PeanoNat.Nat.sub_0_r with (length b). rewrite skipn_n. rewrite app_nil_r. auto. Qed. Lemma incr_plus_l: forall b : bilist, incr1 b = plus b [E1]. Proof. induction b using list_ind_length. auto. destruct b. auto. simpl. remember (length (incr1 b0) =? length b0). destruct b1; destruct b. - rewrite <- H. symmetry in Heqb1. apply PeanoNat.Nat.eqb_eq in Heqb1. rewrite Heqb1. rewrite PeanoNat.Nat.sub_diag. rewrite firstn_O. rewrite skipn_O. simpl. auto. auto. - rewrite <- H. symmetry in Heqb1. apply PeanoNat.Nat.eqb_eq in Heqb1. rewrite Heqb1. rewrite PeanoNat.Nat.sub_diag. rewrite firstn_O. rewrite skipn_O. simpl. auto. auto. - rewrite <- H. symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. enough (S (length b0) = length (incr1 b0)). rewrite <- H0. replace (S (length b0) - length b0) with 1. replace ((firstn 1 (incr1 b0))) with [E1]. replace (skipn 1 (incr1 b0)) with (tl ((incr1 b0))). simpl. auto. rewrite skipn_1. auto. destruct b0. simpl. auto. simpl. destruct b; destruct (length (incr1 b0) =? length b0); simpl; try lia; auto. apply forall_head. destruct b0. simpl. auto. simpl. destruct b; destruct (length (incr1 b0) =? length b0); simpl; try lia; auto. apply incr1_cons1. lia. lia. apply incr1_length_plus. enough (length b0 <= length (incr1 b0)). lia. apply incr1_length. auto. - rewrite <- H. symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. enough (S (length b0) = length (incr1 b0)). rewrite <- H0. replace (S (length b0) - length b0) with 1. replace ((firstn 1 (incr1 b0))) with [E1]. replace (skipn 1 (incr1 b0)) with (tl ((incr1 b0))). simpl. rewrite head_tail with (l:=incr1 b0) at 1. replace (firstn 1 (incr1 b0)) with [E1]. auto. apply forall_head. lia. apply incr1_cons1. lia. lia. apply skipn_1. lia. apply forall_head. lia. apply incr1_cons1. lia. lia. apply incr1_length_plus. enough (length b0 <= length (incr1 b0)). lia. apply incr1_length. auto. Qed. Lemma plus_nil: forall b, plus b [] = b. Proof. induction b. auto. simpl. destruct a. rewrite ?IHb. rewrite ?PeanoNat.Nat.sub_diag. simpl. auto. rewrite ?IHb. rewrite ?PeanoNat.Nat.sub_diag. simpl. auto. Qed. Lemma plus_len: forall b b', length b <= length (plus b b'). Proof. intros. generalize dependent b'. induction b using list_ind_length; intros. simpl. lia. destruct b. simpl. lia. simpl. destruct b. - rewrite app_length. rewrite skipn_length. replace (length (plus b0 b') - (length (plus b0 b') - length b0)) with (length b0). remember (firstn (length (plus b0 b') - length b0) (plus b0 b')). destruct l. simpl. lia. simpl. destruct b; destruct (length (incr1 l) =? length l); simpl; lia. enough (length b0 <= length (plus b0 b')). lia. apply H. simpl. lia. - rewrite app_length. rewrite skipn_length. replace (length (plus b0 b') - (length (plus b0 b') - length b0)) with (length b0). remember (firstn (length (plus b0 b') - length b0) (plus b0 b')). destruct l. simpl. lia. simpl. destruct b; destruct (length (incr2 l) =? length l); simpl; destruct l; simpl length; lia. enough (length b0 <= length (plus b0 b')). lia. apply H. simpl. lia. Qed. Lemma lt_plus: forall b b', 0 < length b' -> binary_lt b (plus b b'). Proof. intros. generalize dependent b'. induction b using list_ind_length; intros. simpl. destruct b'. inversion H. constructor. simpl. lia. destruct b. simpl. destruct b'. inversion H0. constructor. simpl. lia. simpl. destruct b. - remember (length (plus b0 b') - length b0) as n. remember (plus b0 b') as x. remember (iter_incr1_tail x (length b0)). assert (iter (2 ^ length b0) incr1 x = incr1 (firstn (length x - length b0) x) ++ skipn (length x - length b0) x). apply a. rewrite Heqx. apply binary_lt_length. apply H. simpl. lia. destruct b'. inversion H0. simpl. lia. rewrite <- Heqn in H1. rewrite <- H1. clear a Heqa. assert (E1 :: b0 = iter (2 ^ length b0) incr1 b0). rewrite iter_incr1_tail1. auto. rewrite H2. apply binary_lt_incr1_iter_hom. rewrite Heqx. apply H. simpl. lia. destruct b'. inversion H0. simpl. lia. - remember (length (plus b0 b') - length b0) as n. remember (plus b0 b') as x. remember (iter_incr1_tail x (length b0)). assert (iter (2 ^ length b0 + 2 ^ length b0) incr1 x = incr2 (firstn (length x - length b0) x) ++ skipn (length x - length b0) x). apply a. rewrite Heqx. apply binary_lt_length. apply H. simpl. lia. destruct b'. inversion H0. simpl. lia. rewrite <- Heqn in H1. rewrite <- H1. clear a Heqa. assert (E2 :: b0 = iter (2 ^ length b0 + 2 ^ length b0) incr1 b0). rewrite iter_incr1_tail2. auto. rewrite H2. apply binary_lt_incr1_iter_hom. rewrite Heqx. apply H. simpl. lia. destruct b'. inversion H0. simpl. lia. Qed. Lemma plus_incr1_left: forall b b', plus (incr1 b) b' = incr1 (plus b b') . Proof. intros. generalize dependent b'. induction b using bilist_lt_ind. intros. simpl. replace (length b' - 0) with (length b'). rewrite firstn_all. rewrite skipn_all. rewrite ?app_nil_r. auto. lia. intros. destruct b. simpl. replace (length b' - 0) with (length b'). rewrite firstn_all. rewrite skipn_all. rewrite ?app_nil_r. auto. lia. simpl. remember (length (incr1 b0) =? length b0). remember (length (plus b0 b') - length b0). remember (plus b0 b') as x. destruct b; destruct b1. - simpl. rewrite H. rewrite <- Heqx. remember ((length (incr1 x) - length (incr1 b0))) as m. remember (iter_incr1_tail x (length b0)). clear Heqa. rewrite <- Heqn in a. assert (iter (2 ^ length b0) incr1 x = incr1 (firstn n x) ++ skipn n x). apply a. rewrite Heqx. apply plus_len. rewrite <- H0. remember (iter_incr1_tail (incr1 x) (length (incr1 b0))). clear Heqa0. rewrite <- Heqm in a0. assert (iter (2 ^ length (incr1 b0)) incr1 (incr1 x) = incr1 (firstn m (incr1 x)) ++ skipn m (incr1 x)). apply a0. rewrite Heqx. destruct b'. rewrite plus_nil. lia. apply incr1_length_hom. apply lt_plus. simpl. lia. clear a H0 a0 H1. remember (iter_incr1_tail (incr1 x) (length (incr1 b0))). assert (iter (2 ^ length (incr1 b0)) incr1 (incr1 x) = incr1 (firstn (length (incr1 x) - length (incr1 b0)) (incr1 x)) ++ skipn (length (incr1 x) - length (incr1 b0)) (incr1 x)). apply a. rewrite Heqx. destruct b'. rewrite plus_nil. lia. apply incr1_length_hom. apply lt_plus. simpl. lia. rewrite <- Heqm in H0. rewrite <- H0. replace (length (incr1 b0)) with (length b0). rewrite <- iterS. simpl. auto. apply PeanoNat.Nat.eqb_eq. rewrite Heqb1. apply PeanoNat.Nat.eqb_sym. constructor. lia. - simpl. remember (length (plus (tl (incr1 b0)) b') - length (tl (incr1 b0))) as m. remember ((plus (tl (incr1 b0)) b')). destruct b0. + simpl in Heqb. simpl in Heqm. simpl in Heqx. simpl in Heqn. rewrite Heqx. rewrite Heqb. replace m with n. subst. replace (length b' - 0) with (length b') by lia. rewrite firstn_all2. rewrite skipn_all2. rewrite ?app_nil_r. rewrite incr2_incr1. auto. lia. lia. subst. auto. + rewrite <- incr1_tl_comm_E2 in Heqb. rewrite H in Heqb. rewrite <- incr1_tl_comm_E2 in Heqm. simpl in Heqm. simpl in Heqb. remember (iter_incr1_tail b (length (incr1 b1))). assert (iter (2 ^ length (incr1 b1) + 2 ^ length (incr1 b1)) incr1 b = incr2 (firstn (length b - length (incr1 b1)) b) ++ skipn (length b - length (incr1 b1)) b). apply a. rewrite Heqb. destruct b'. rewrite plus_nil. lia. apply incr1_length_hom. apply lt_plus. simpl. lia. rewrite <- Heqm in H0. rewrite <- H0. clear a Heqa H0. remember (iter_incr1_tail x (length (b0 :: b1))). assert (iter (2 ^ length (b0 :: b1)) incr1 x = incr1 (firstn (length x - length (b0 :: b1)) x) ++ skipn (length x - length (b0 :: b1)) x). apply a. rewrite Heqx. apply plus_len. rewrite <- Heqn in H0. rewrite <- H0. simpl. simpl in Heqx. replace b0 with E2 in Heqx. clear a Heqa H0. remember (plus b1 b') as y. remember (iter_incr1_tail y (length b1)). assert (iter (2 ^ length b1 + 2 ^ length b1) incr1 y = incr2 (firstn (length y - length b1) y) ++ skipn (length y - length b1) y). apply a. rewrite Heqy. apply plus_len. rewrite <- H0 in Heqx. clear a Heqa H0. rewrite Heqx. rewrite Heqb. replace (2 ^ length b1 + (2 ^ length b1 + 0)) with (2 ^ length b1 + 2 ^ length b1 ). replace (length (incr1 b1)) with (S (length b1)). simpl. replace (2 ^ length b1 + 0) with (2 ^ length b1 ). rewrite iter_plus. rewrite <- iterS. simpl. rewrite <- iterS. simpl. auto. lia. apply incr1_length_plus. apply incr1_cons2_inv. enough (length (b0 :: b1) < length (incr1 (b0 :: b1))). apply incr1_cons2 in H0. inversion H0. auto. enough (length (b0 :: b1) <= length (incr1 (b0 :: b1))). symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. lia. apply incr1_length. lia. enough (Forall (eq E2) (b0 :: b1)). inversion H1. auto. apply incr1_cons2. enough (length (b0 :: b1) <= length (incr1 (b0 :: b1))). symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. lia. apply incr1_length. simpl. lia. apply incr1_cons2. enough (length (b0 :: b1) <= length (incr1 (b0 :: b1))). symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. lia. apply incr1_length. simpl. constructor 1. simpl. lia. simpl. lia. apply incr1_cons2. enough (length (b0 :: b1) <= length (incr1 (b0 :: b1))). symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. lia. apply incr1_length. - simpl. remember (length (plus (incr1 b0) b') - length (incr1 b0)) as m. rewrite H. rewrite <- Heqx. remember (iter_incr1_tail x (length b0)). assert (iter (2 ^ length b0 + 2 ^ length b0) incr1 x = incr2 (firstn (length x - length b0) x) ++ skipn (length x - length b0) x). apply a. rewrite Heqx. apply plus_len. rewrite <- Heqn in H0. rewrite <- H0. clear a Heqa H0. rewrite H in Heqm. rewrite <- Heqx in Heqm. remember (iter_incr1_tail (incr1 x) (length (incr1 b0))). assert (iter (2 ^ length (incr1 b0) + 2 ^ length (incr1 b0)) incr1 (incr1 x) = incr2 (firstn (length (incr1 x) - length (incr1 b0)) (incr1 x)) ++ skipn (length (incr1 x) - length (incr1 b0)) (incr1 x)). apply a. rewrite Heqx. destruct b'. rewrite plus_nil. lia. apply incr1_length_hom. apply lt_plus. simpl. lia. rewrite <- Heqm in H0. rewrite <- H0. rewrite <- iterS. simpl. replace (length (incr1 b0)) with (length b0). auto. apply PeanoNat.Nat.eqb_eq. rewrite Heqb1. apply PeanoNat.Nat.eqb_sym. constructor. lia. constructor. lia. - simpl. rewrite H. rewrite <- Heqx. remember (length (incr1 x) - length (incr1 b0)) as m. remember (iter_incr1_tail x (length b0)). assert (iter (2 ^ length b0 + 2 ^ length b0) incr1 x = incr2 (firstn (length x - length b0) x) ++ skipn (length x - length b0) x). apply a. rewrite Heqx. apply plus_len. rewrite <- Heqn in H0. rewrite <- H0. clear a Heqa H0. remember (iter_incr1_tail (incr1 x) (length (incr1 b0))). assert (iter (2 ^ length (incr1 b0)) incr1 (incr1 x) = incr1 (firstn (length (incr1 x) - length (incr1 b0)) (incr1 x)) ++ skipn (length (incr1 x) - length (incr1 b0)) (incr1 x)). apply a. rewrite Heqx. destruct b'. rewrite plus_nil. lia. apply incr1_length_hom. apply lt_plus. simpl. lia. rewrite <- Heqm in H0. rewrite <- H0. replace (length (incr1 b0)) with (S (length b0)). simpl. replace (2 ^ length b0 + (2 ^ length b0 + 0)) with (2 ^ length b0 + 2 ^ length b0 ). rewrite <- iterS. simpl. auto. lia. apply incr1_length_plus. enough (length b0 <= length (incr1 b0)). symmetry in Heqb1. apply PeanoNat.Nat.eqb_neq in Heqb1. lia. apply incr1_length. constructor. lia. Qed. Lemma plus_incr1_right: forall b b', plus b (incr1 b') = incr1 (plus b b') . Proof. intros. generalize dependent b'. induction b using bilist_lt_ind. intros. simpl. auto. intros. destruct b. simpl. auto. simpl. rewrite H. remember (plus b0 b') as x. destruct b. - remember ((length (incr1 x) - length b0)) as m. remember (length x - length b0) as n. remember (iter_incr1_tail x (length b0)). clear Heqa. assert (iter (2 ^ length b0) incr1 x = incr1 (firstn (length x - length b0) x) ++ skipn (length x - length b0) x). apply a. rewrite Heqx. apply plus_len. rewrite <- Heqn in H0. rewrite <- H0. clear a H0. remember (iter_incr1_tail (incr1 x) (length b0)). assert (iter (2 ^ length b0) incr1 (incr1 x) = incr1 (firstn (length (incr1 x) - length b0) (incr1 x)) ++ skipn (length (incr1 x) - length b0) (incr1 x)). apply a. rewrite Heqx. destruct b'. rewrite plus_nil. apply incr1_length. apply binary_lt_length. apply binary_lt_trans with (b:=(plus b0 (b :: b'))). apply lt_plus. simpl. lia. apply lt_incr1. rewrite <- Heqm in H0. rewrite <- H0. clear a Heqa H0. rewrite <- iterS. simpl. auto. - remember ((length (incr1 x) - length b0)) as m. remember (length x - length b0) as n. remember (iter_incr1_tail x (length b0)). clear Heqa. assert (iter (2 ^ length b0 + 2 ^ length b0) incr1 x = incr2 (firstn (length x - length b0) x) ++ skipn (length x - length b0) x). apply a. rewrite Heqx. apply plus_len. rewrite <- Heqn in H0. rewrite <- H0. clear a H0. remember (iter_incr1_tail (incr1 x) (length b0)). assert (iter (2 ^ length b0 + 2 ^ length b0) incr1 (incr1 x) = incr2 (firstn (length (incr1 x) - length b0) (incr1 x)) ++ skipn (length (incr1 x) - length b0) (incr1 x)). apply a. rewrite Heqx. destruct b'. rewrite plus_nil. apply incr1_length. apply binary_lt_length. apply binary_lt_trans with (b:=(plus b0 (b :: b'))). apply lt_plus. simpl. lia. apply lt_incr1. rewrite <- Heqm in H0. rewrite <- H0. clear a Heqa H0. rewrite <- iterS. simpl. auto. - constructor. lia. Qed. Theorem plus_comm: forall b b' : bilist, plus b b' = plus b' b. Proof. intros. generalize dependent b'. induction b using bilist_incr_ind. intros. rewrite plus_nil. simpl. auto. intros. enough (forall b b', plus (incr1 b) b' = incr1 (plus b b')). enough (forall b b', plus b (incr1 b') = incr1 (plus b b')). rewrite H. rewrite IHb. rewrite H0. auto. intros. apply plus_incr1_right. intros. apply plus_incr1_left. Qed. Fixpoint plus' (b b' : bilist) : bilist := match b with | [] => b' | E1 :: t => iter (pow 2 (length t)) incr1 (plus' t b') | E2 :: t => iter (pow 2 (length t) + pow 2 (length t)) incr1 (plus' t b') end. Lemma plus'_0_l: forall b, plus' [] b = b . Proof. simpl. auto. Qed. Lemma iter_incr1_next_digit: forall b, iter (2 ^ length b) incr1 b = E1 :: b /\ iter (2 ^ length b + 2 ^ length b) incr1 b = E2 :: b. Proof. intros. remember (iter_incr1_tail b (length b)). assert (iter (2 ^ length b) incr1 b = incr1 (firstn (length b - length b) b) ++ skipn (length b - length b) b /\ iter (2 ^ length b + 2 ^ length b) incr1 b = incr2 (firstn (length b - length b) b) ++ skipn (length b - length b) b). apply a. lia. clear Heqa a. inversion_clear H. rewrite H0. rewrite H1. replace (length b - length b) with 0 by lia. simpl. auto. Qed. Lemma plus'_0_r: forall b, plus' b [] = b . Proof. induction b using list_ind_length. auto. destruct b; auto. simpl. destruct b. - rewrite H; [| simpl; lia]. apply iter_incr1_next_digit. - rewrite H; [| simpl; lia]. apply iter_incr1_next_digit. Qed. Lemma plus_plus': forall b b', plus b b' = plus' b b'. Proof. intros. generalize dependent b'. induction b using list_ind_length; intros. simpl. auto. destruct b; auto. simpl. rewrite <- H. remember (plus b0 b'). remember (iter_incr1_tail b1 (length b0)). enough (length b0 <= length b1). apply a in H0. inversion_clear H0. rewrite H1, H2. auto. rewrite Heqb1. apply plus_len. simpl. lia. Qed.
! This approximation is not valid for cells which have emission in them, but ! that's fine, because if they had emission in them we wouldn't be calling this ! here module grid_pda use core_lib use type_grid_cell use grid_physics use grid_geometry use dust_main use grid_io use grid_pda_geometry implicit none save private public :: solve_pda real(dp), parameter :: tolerance_iter = 1.e-4 ! energy calculation convergence criterion real(dp), parameter :: tolerance_exact = 1.e-5 ! energy calculation convergence criterion real(dp), parameter :: threshold_pda = 0.005 ! maximum number of photons required to use PDA real(dp), allocatable :: e_mean(:) contains real(dp) elemental function difference_ratio(a, b) implicit none real(dp), intent(in) :: a, b difference_ratio = max(a/b, b/a) end function difference_ratio subroutine update_specific_energy(ic) implicit none integer,intent(in) :: ic integer :: id real(dp) :: s_prev, s, smin, smax do id=1,n_dust s = specific_energy(ic, id) smin = d(id)%specific_energy(1) smax = d(id)%specific_energy(d(id)%n_e) if(e_mean(ic) < smin / kappa_planck(id, smin)) then call warn("update_specific_energy", "specific energy in PDA below minimum allowed by dust type - resetting") s = smin else if (e_mean(ic) > smax / kappa_planck(id, smax)) then call warn("update_specific_energy", "specific energy in PDA above maximum allowed by dust type - resetting") s = smax else do s_prev = s s = e_mean(ic) * kappa_planck(id, s) if(difference_ratio(s, s_prev) - 1._dp < 1.e-5_dp) exit end do end if specific_energy(ic, id) = s end do end subroutine update_specific_energy subroutine update_e_mean(ic) implicit none integer,intent(in) :: ic integer :: id e_mean(ic) = 0. if(sum(density(ic, :)) > 0._dp) then do id=1,n_dust e_mean(ic) = e_mean(ic) + density(ic,id) * specific_energy(ic,id) / kappa_planck(id, specific_energy(ic,id)) end do e_mean(ic) = e_mean(ic) / sum(density(ic, :)) end if end subroutine update_e_mean subroutine solve_pda() implicit none real(dp),allocatable :: specific_energy_prev(:,:) logical,allocatable :: do_pda(:) real(dp) :: maxdiff real(dp) :: mean_n_photons real(dp) :: tolerance integer :: ic integer :: ipda type(grid_cell), allocatable :: pda_cells(:) integer,allocatable :: id_pda_cell(:) mean_n_photons = sum(n_photons) / size(n_photons) allocate(specific_energy_prev(geo%n_cells, n_dust)) allocate(do_pda(geo%n_cells)) do_pda = n_photons < max(30,ceiling(threshold_pda*mean_n_photons)) .and. sum(density, dim=2) > 0._dp call check_allowed_pda(do_pda) if(.not.any(do_pda)) then write(*,'(" [pda] not necessary for this iteration")') return end if if(count(do_pda) < 10000) then write(*,'(" [pda] fewer than 10,000 PDA cells - using Gauss pivot method")') tolerance = tolerance_exact else write(*,'(" [pda] more than 10,000 PDA cells - using iterative method")') tolerance = tolerance_iter end if allocate(e_mean(geo%n_cells)) do ic=1,geo%n_cells call update_e_mean(ic) end do ! Precompute the cells where the PDA will be computed allocate(pda_cells(count(do_pda))) allocate(id_pda_cell(geo%n_cells)) id_pda_cell = -1 ipda = 0 do ic=1,geo%n_cells if(do_pda(ic)) then ipda = ipda + 1 pda_cells(ipda) = new_grid_cell(ic, geo) id_pda_cell(ic) = ipda end if end do specific_energy_prev = specific_energy do specific_energy_prev = specific_energy if(count(do_pda) < 10000) then call solve_pda_indiv_exact(pda_cells, id_pda_cell) else call solve_pda_indiv_iterative(pda_cells) end if maxdiff = maxval(abs(specific_energy - specific_energy_prev) / specific_energy_prev) write(*,'(" [pda] maximum energy difference: ", ES9.2)') maxdiff if(maxdiff < tolerance) exit end do write(*,'(" [pda] converged")') deallocate(do_pda) deallocate(specific_energy_prev) deallocate(e_mean) call update_energy_abs_tot() call check_energy_abs() end subroutine solve_pda real(dp) function dtau_rosseland(cell, idir) implicit none type(grid_cell), intent(in) :: cell integer,intent(in) :: idir integer :: id dtau_rosseland = 0._dp do id=1,n_dust dtau_rosseland = dtau_rosseland + density(cell%ic,id) * chi_rosseland(id, specific_energy(cell%ic,id)) * cell_width(cell,idir) end do end function dtau_rosseland subroutine solve_pda_indiv_exact(pda_cells, id_pda_cell) implicit none type(grid_cell),intent(in) :: pda_cells(:) integer,intent(in) :: id_pda_cell(:) ! Which cells should be used for the PDA integer :: direction, wall type(grid_cell) :: curr, next real(dp) :: dtau_ross_curr, dtau_ross_next, dtau_sum integer :: ic real(dp) :: coefficient real(dp),allocatable :: a(:,:), b(:) integer :: id_curr, id_next do id_curr=1,size(pda_cells) ic = pda_cells(id_curr)%ic call update_e_mean(ic) end do allocate(a(size(pda_cells), size(pda_cells)), b(size(pda_cells))) a = 0._dp b = 0._dp do id_curr=1,size(pda_cells) curr = pda_cells(id_curr) do wall = 1, geo%n_dim * 2 direction = int((wall+1)/2) next = next_cell(curr, wall) dtau_ross_curr = dtau_rosseland(curr, direction) dtau_ross_next = dtau_rosseland(next, direction) dtau_sum = dtau_ross_curr + dtau_ross_next ! If the optical depth is too small, we have to reset it to avoid ! issues. if(dtau_sum < 1e-100_dp) dtau_sum = 1e-100_dp coefficient = 1. / dtau_sum / cell_width(curr, direction) coefficient = coefficient * geometrical_factor(wall, curr) a(id_curr, id_curr) = a(id_curr, id_curr) - coefficient if(id_pda_cell(next%ic) > 0) then id_next = id_pda_cell(next%ic) a(id_next, id_curr) = coefficient else b(id_curr) = b(id_curr) - coefficient * e_mean(next%ic) end if end do end do call lineq_gausselim(a, b) do id_curr=1,size(pda_cells) ic = pda_cells(id_curr)%ic e_mean(ic) = b(id_curr) call update_specific_energy(ic) end do deallocate(a, b) end subroutine solve_pda_indiv_exact subroutine solve_pda_indiv_iterative(pda_cells) implicit none type(grid_cell),intent(in) :: pda_cells(:) ! Which cells should be used for the PDA integer :: direction, wall type(grid_cell) :: curr, next real(dp) :: dtau_ross_curr, dtau_ross_next real(dp) :: coefficient real(dp) :: a, b integer :: id_curr, ic real(dp) :: max_e_diff, e_diff, e_new do id_curr=1,size(pda_cells) ic = pda_cells(id_curr)%ic call update_e_mean(ic) end do do max_e_diff = 0. do id_curr=1,size(pda_cells) curr = pda_cells(id_curr) a = 0._dp b = 0._dp do wall = 1, geo%n_dim * 2 direction = int((wall+1)/2) next = next_cell(curr, wall) dtau_ross_curr = dtau_rosseland(curr, direction) dtau_ross_next = dtau_rosseland(next, direction) coefficient = 1. / (dtau_ross_curr + dtau_ross_next) / cell_width(curr, direction) coefficient = coefficient * geometrical_factor(wall, curr) a = a - coefficient b = b - coefficient * e_mean(next%ic) end do e_new = b/a e_diff = abs(e_new - e_mean(curr%ic)) / e_mean(curr%ic) if(e_diff > max_e_diff) max_e_diff = e_diff e_mean(curr%ic) = e_new end do if(max_e_diff < tolerance_iter) exit end do do id_curr=1,size(pda_cells) ic = pda_cells(id_curr)%ic call update_specific_energy(ic) end do end subroutine solve_pda_indiv_iterative end module grid_pda
[STATEMENT] lemma lift_clear_vars : "vars (liftPoly i j (p::real mpoly)) \<inter> {i..<i + j} = {}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vars (liftPoly i j p) \<inter> {i..<i + j} = {} [PROOF STEP] proof(induction p rule: mpoly_induct) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>m a. vars (liftPoly i j (MPoly_Type.monom m a)) \<inter> {i..<i + j} = {} 2. \<And>p1 p2 m a. \<lbrakk>vars (liftPoly i j p1) \<inter> {i..<i + j} = {}; vars (liftPoly i j p2) \<inter> {i..<i + j} = {}; p2 = MPoly_Type.monom m a; m \<notin> monomials p1; a \<noteq> 0\<rbrakk> \<Longrightarrow> vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] case (monom m a) [PROOF STATE] proof (state) this: goal (2 subgoals): 1. \<And>m a. vars (liftPoly i j (MPoly_Type.monom m a)) \<inter> {i..<i + j} = {} 2. \<And>p1 p2 m a. \<lbrakk>vars (liftPoly i j p1) \<inter> {i..<i + j} = {}; vars (liftPoly i j p2) \<inter> {i..<i + j} = {}; p2 = MPoly_Type.monom m a; m \<notin> monomials p1; a \<noteq> 0\<rbrakk> \<Longrightarrow> vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. vars (liftPoly i j (MPoly_Type.monom m a)) \<inter> {i..<i + j} = {} [PROOF STEP] unfolding lift_vars_monom [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<lambda>x. if i \<le> x then x + j else x) ` vars (MPoly_Type.monom m a) \<inter> {i..<i + j} = {} [PROOF STEP] by auto [PROOF STATE] proof (state) this: vars (liftPoly i j (MPoly_Type.monom m a)) \<inter> {i..<i + j} = {} goal (1 subgoal): 1. \<And>p1 p2 m a. \<lbrakk>vars (liftPoly i j p1) \<inter> {i..<i + j} = {}; vars (liftPoly i j p2) \<inter> {i..<i + j} = {}; p2 = MPoly_Type.monom m a; m \<notin> monomials p1; a \<noteq> 0\<rbrakk> \<Longrightarrow> vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>p1 p2 m a. \<lbrakk>vars (liftPoly i j p1) \<inter> {i..<i + j} = {}; vars (liftPoly i j p2) \<inter> {i..<i + j} = {}; p2 = MPoly_Type.monom m a; m \<notin> monomials p1; a \<noteq> 0\<rbrakk> \<Longrightarrow> vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] case (sum p1 p2 m a) [PROOF STATE] proof (state) this: vars (liftPoly i j p1) \<inter> {i..<i + j} = {} vars (liftPoly i j p2) \<inter> {i..<i + j} = {} p2 = MPoly_Type.monom m a m \<notin> monomials p1 a \<noteq> 0 goal (1 subgoal): 1. \<And>p1 p2 m a. \<lbrakk>vars (liftPoly i j p1) \<inter> {i..<i + j} = {}; vars (liftPoly i j p2) \<inter> {i..<i + j} = {}; p2 = MPoly_Type.monom m a; m \<notin> monomials p1; a \<noteq> 0\<rbrakk> \<Longrightarrow> vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: vars (liftPoly i j p1) \<inter> {i..<i + j} = {} vars (liftPoly i j p2) \<inter> {i..<i + j} = {} p2 = MPoly_Type.monom m a m \<notin> monomials p1 a \<noteq> 0 [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: vars (liftPoly i j p1) \<inter> {i..<i + j} = {} vars (liftPoly i j p2) \<inter> {i..<i + j} = {} p2 = MPoly_Type.monom m a m \<notin> monomials p1 a \<noteq> 0 goal (1 subgoal): 1. vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] using vars_lift_add[of i j p1 p2] [PROOF STATE] proof (prove) using this: vars (liftPoly i j p1) \<inter> {i..<i + j} = {} vars (liftPoly i j p2) \<inter> {i..<i + j} = {} p2 = MPoly_Type.monom m a m \<notin> monomials p1 a \<noteq> 0 vars (liftPoly i j (p1 + p2)) \<subseteq> vars (liftPoly i j p1) \<union> vars (liftPoly i j p2) goal (1 subgoal): 1. vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} [PROOF STEP] by blast [PROOF STATE] proof (state) this: vars (liftPoly i j (p1 + p2)) \<inter> {i..<i + j} = {} goal: No subgoals! [PROOF STEP] qed
% Chapter Template \chapter{Related Work} % Main chapter title \label{Chapter8} % Change X to a consecutive number; for referencing this chapter elsewhere, use \ref{ChapterX} There has been a lot of research on big data analytics. Open source tools like Hadoop and Spark have made available scalable, distributed computation to the general public. In addition, thanks to the progress in big data storage, with systems like Cassandra, hosting internal storage has never been easier. However there is not a lot of work regarding using container-orchestration for big data analysis systems. There has been a lot work in both fields, but there is not a lot of work about combining them. The work presented by \parencite{smack} is probably the closest approach to the system I present here. The main differences are in the actor system and the orchestration platform. Instead of using Mesos for orchestration and Akka for actors, I preferred to use Kubernetes with container microservices instead of actors. The stack described also uses Spark Streaming to deploy a Lambda architecture which could be done in the current infrastructure extending the capabilities of the system. The main reason for avoiding Spark Streaming has been that its micro batching focus could involve losing tweets during the normalization process. Furthermore, the reason to avoid Akka for actors was that it had similar features to Kafka and microservices in Kubernetes. In that way, I considered that adding Akka would make the system more complex and therefore more difficult to maintain. There has also been previous work trying to approach big data analytics to a higher scalability like this paper from 2014 \parencite{scalableHadoop}. This system proposal is agnostic on how to deploy such systems, leaving the developer to deal with it. As container and container orchestrated systems were not popular at that time, they are not considered. Its main approach to getting these systems to scale is via the use of different Hadoop components. Currently, Spark is preferred over Hadoop, given the increased performance made possible via its memory-based approch to MapReduce. In \parencite{lambda2014}, we see a similar approach using Hadoop and Pig in the batch layer instead of Cassandra and Spark. They also incorporate a streaming layer for real-time analysis making use of the lambda infrastructure. In \parencite{streamingAnalysis}, we can see Zeppelin used as a dashboard interface similar to our work. However none of this systems made use of the container-orchestrated approach and so lack the scalability and reliablity benefits that they provide.
%!TEX root = ../dissertation.tex \chapter{Environment Overview} \label{sec:network_overview} There are many applicability domains and different methods for creating \gls{IoT} networks. Some provide direct connectivity of nodes to the Internet while others provide a common gateway for interfacing with external networks. This main difference has profound implications on the type of devices and protocols used for communication. In the following paragraphs a short overview of these two types of architectures is presented. \section{Large Area Networks} \begin{figure}[h] \centering \includegraphics[width=0.85\linewidth]{figures/Network_Overview_Sparse.pdf} \caption{IoT Large Area Network Overview.} \label{fig:net_overview_large} \end{figure} In this type of architectures, the network nodes transmit sensing information to global access points without requiring inter-devices communication. The access point can range from routers, to GSM towers of even satellites. The outgoing messages and then transmitted through the Internet to external data processing facilities, where large amounts of data from many sources are converted into useful information on demand. This information can then be access by the users from outside the site, also through the Internet. This type of deployments could be used, for example, in Smart Cities, where there is the need to cover a large area with sensors. A typical scenario would be to collect information on the city garbage bins to make dynamic collection routes based on the status of the recipient. \section{Personal Area Networks} \begin{figure}[h] \centering \includegraphics[width=0.8\linewidth]{figures/Network_Overview.pdf} \caption{IoT Personal Area Network Overview.} \label{fig:net_overview_small} \end{figure} In this type of architectures, the sensing or actuating nodes belong to a very constrained network with specific protocols and header compression mechanisms, requiring an interface device -- the border router -- in order to communicate with external networks. After reaching the external network, incoming messages are processed to convert sensor data into useful information which is then stored or used to trigger events. This information can then be accessed by users either on the same network or by making requests through the Internet. This type of deployment could be used, for example, in a home intrusion system where the network nodes would create a sensor network that propagates events in the case of an intrusion and the additional infrastructure would be in charge of receiving these events and notifying the police authorities.\\ Given the power-aware focus of our work, the personal area networks architecture allows a greater resource consumptions reduction because of its communication model. Furthermore, since the back-end facilities are usually present in the same site as the network, there is the possibility of adding external components to allow the inclusion of our solution. With these two aspects in mind, in our work we focus on the scenarios where network nodes are not directly connected to the Internet and require additional network components for proper communication.
using YaoZX using Test @testset "push gate" begin include("push_gate.jl") end @testset "to qbir" begin include("toqbir.jl") end @testset "simplification" begin include("simplification.jl") end
theory BTree_Set imports BTree "HOL-Data_Structures.Set_Specs" begin section "Set interpretation" subsection "Auxiliary functions" fun split_half:: "('a btree\<times>'a) list \<Rightarrow> (('a btree\<times>'a) list \<times> ('a btree\<times>'a) list)" where "split_half xs = (take (length xs div 2) xs, drop (length xs div 2) xs)" lemma drop_not_empty: "xs \<noteq> [] \<Longrightarrow> drop (length xs div 2) xs \<noteq> []" apply(induction xs) apply(auto split!: list.splits) done lemma split_half_not_empty: "length xs \<ge> 1 \<Longrightarrow> \<exists>ls sub sep rs. split_half xs = (ls,(sub,sep)#rs)" using drop_not_empty by (metis (no_types, opaque_lifting) drop0 drop_eq_Nil eq_snd_iff hd_Cons_tl le_trans not_one_le_zero split_half.simps) subsection "The split function locale" text "Here, we abstract away the inner workings of the split function for B-tree operations." (* TODO what if we define a function "list_split" that returns a split list for mapping arbitrary f (separators) and g (subtrees) s.th. f :: 'a \<Rightarrow> ('b::linorder) and g :: 'a \<Rightarrow> 'a btree this would allow for key,pointer pairs to be inserted into the tree *) (* TODO what if the keys are the pointers? *) locale split = fixes split :: "('a btree\<times>'a::linorder) list \<Rightarrow> 'a \<Rightarrow> (('a btree\<times>'a) list \<times> ('a btree\<times>'a) list)" assumes split_req: "\<lbrakk>split xs p = (ls,rs)\<rbrakk> \<Longrightarrow> xs = ls @ rs" "\<lbrakk>split xs p = (ls@[(sub,sep)],rs); sorted_less (separators xs)\<rbrakk> \<Longrightarrow> sep < p" "\<lbrakk>split xs p = (ls,(sub,sep)#rs); sorted_less (separators xs)\<rbrakk> \<Longrightarrow> p \<le> sep" begin lemmas split_conc = split_req(1) lemmas split_sorted = split_req(2,3) lemma [termination_simp]:"(ls, (sub, sep) # rs) = split ts y \<Longrightarrow> size sub < Suc (size_list (\<lambda>x. Suc (size (fst x))) ts + size l)" using split_conc[of ts y ls "(sub,sep)#rs"] by auto fun invar_inorder where "invar_inorder k t = (bal t \<and> root_order k t)" definition "empty_btree = Leaf" subsection "Membership" fun isin:: "'a btree \<Rightarrow> 'a \<Rightarrow> bool" where "isin (Leaf) y = False" | "isin (Node ts t) y = ( case split ts y of (_,(sub,sep)#rs) \<Rightarrow> ( if y = sep then True else isin sub y ) | (_,[]) \<Rightarrow> isin t y )" subsection "Insertion" text "The insert function requires an auxiliary data structure and auxiliary invariant functions." datatype 'b up\<^sub>i = T\<^sub>i "'b btree" | Up\<^sub>i "'b btree" 'b "'b btree" fun order_up\<^sub>i where "order_up\<^sub>i k (T\<^sub>i sub) = order k sub" | "order_up\<^sub>i k (Up\<^sub>i l a r) = (order k l \<and> order k r)" fun root_order_up\<^sub>i where "root_order_up\<^sub>i k (T\<^sub>i sub) = root_order k sub" | "root_order_up\<^sub>i k (Up\<^sub>i l a r) = (order k l \<and> order k r)" fun height_up\<^sub>i where "height_up\<^sub>i (T\<^sub>i t) = height t" | "height_up\<^sub>i (Up\<^sub>i l a r) = max (height l) (height r)" fun bal_up\<^sub>i where "bal_up\<^sub>i (T\<^sub>i t) = bal t" | "bal_up\<^sub>i (Up\<^sub>i l a r) = (height l = height r \<and> bal l \<and> bal r)" fun inorder_up\<^sub>i where "inorder_up\<^sub>i (T\<^sub>i t) = inorder t" | "inorder_up\<^sub>i (Up\<^sub>i l a r) = inorder l @ [a] @ inorder r" text "The following function merges two nodes and returns separately split nodes if an overflow occurs" fun node\<^sub>i:: "nat \<Rightarrow> ('a btree \<times> 'a) list \<Rightarrow> 'a btree \<Rightarrow> 'a up\<^sub>i" where "node\<^sub>i k ts t = ( if length ts \<le> 2*k then T\<^sub>i (Node ts t) else ( case split_half ts of (ls, (sub,sep)#rs) \<Rightarrow> Up\<^sub>i (Node ls sub) sep (Node rs t) ) )" lemma nodei_ti_simp: "node\<^sub>i k ts t = T\<^sub>i x \<Longrightarrow> x = Node ts t" apply (cases "length ts \<le> 2*k") apply (auto split!: list.splits) done fun ins:: "nat \<Rightarrow> 'a \<Rightarrow> 'a btree \<Rightarrow> 'a up\<^sub>i" where "ins k x Leaf = (Up\<^sub>i Leaf x Leaf)" | "ins k x (Node ts t) = ( case split ts x of (ls,(sub,sep)#rs) \<Rightarrow> (if sep = x then T\<^sub>i (Node ts t) else (case ins k x sub of Up\<^sub>i l a r \<Rightarrow> node\<^sub>i k (ls @ (l,a)#(r,sep)#rs) t | T\<^sub>i a \<Rightarrow> T\<^sub>i (Node (ls @ (a,sep) # rs) t))) | (ls, []) \<Rightarrow> (case ins k x t of Up\<^sub>i l a r \<Rightarrow> node\<^sub>i k (ls@[(l,a)]) r | T\<^sub>i a \<Rightarrow> T\<^sub>i (Node ls a) ) )" fun tree\<^sub>i::"'a up\<^sub>i \<Rightarrow> 'a btree" where "tree\<^sub>i (T\<^sub>i sub) = sub" | "tree\<^sub>i (Up\<^sub>i l a r) = (Node [(l,a)] r)" fun insert::"nat \<Rightarrow> 'a \<Rightarrow> 'a btree \<Rightarrow> 'a btree" where "insert k x t = tree\<^sub>i (ins k x t)" subsection "Deletion" text "The following deletion method is inspired by Bayer (70) and Fielding (80). Rather than stealing only a single node from the neighbour, the neighbour is fully merged with the potentially underflowing node. If the resulting node is still larger than allowed, the merged node is split again, using the rules known from insertion splits. If the resulting node has admissable size, it is simply kept in the tree." fun rebalance_middle_tree where "rebalance_middle_tree k ls Leaf sep rs Leaf = ( Node (ls@(Leaf,sep)#rs) Leaf )" | "rebalance_middle_tree k ls (Node mts mt) sep rs (Node tts tt) = ( if length mts \<ge> k \<and> length tts \<ge> k then Node (ls@(Node mts mt,sep)#rs) (Node tts tt) else ( case rs of [] \<Rightarrow> ( case node\<^sub>i k (mts@(mt,sep)#tts) tt of T\<^sub>i u \<Rightarrow> Node ls u | Up\<^sub>i l a r \<Rightarrow> Node (ls@[(l,a)]) r) | (Node rts rt,rsep)#rs \<Rightarrow> ( case node\<^sub>i k (mts@(mt,sep)#rts) rt of T\<^sub>i u \<Rightarrow> Node (ls@(u,rsep)#rs) (Node tts tt) | Up\<^sub>i l a r \<Rightarrow> Node (ls@(l,a)#(r,rsep)#rs) (Node tts tt)) ))" text "Deletion" text "All trees are merged with the right neighbour on underflow. Obviously for the last tree this would not work since it has no right neighbour. Therefore this tree, as the only exception, is merged with the left neighbour. However since we it does not make a difference, we treat the situation as if the second to last tree underflowed." fun rebalance_last_tree where "rebalance_last_tree k ts t = ( case last ts of (sub,sep) \<Rightarrow> rebalance_middle_tree k (butlast ts) sub sep [] t )" text "Rather than deleting the minimal key from the right subtree, we remove the maximal key of the left subtree. This is due to the fact that the last tree can easily be accessed and the left neighbour is way easier to access than the right neighbour, it resides in the same pair as the separating element to be removed." fun split_max where "split_max k (Node ts t) = (case t of Leaf \<Rightarrow> ( let (sub,sep) = last ts in (Node (butlast ts) sub, sep) )| _ \<Rightarrow> case split_max k t of (sub, sep) \<Rightarrow> (rebalance_last_tree k ts sub, sep) )" fun del where "del k x Leaf = Leaf" | "del k x (Node ts t) = ( case split ts x of (ls,[]) \<Rightarrow> rebalance_last_tree k ls (del k x t) | (ls,(sub,sep)#rs) \<Rightarrow> ( if sep \<noteq> x then rebalance_middle_tree k ls (del k x sub) sep rs t else if sub = Leaf then Node (ls@rs) t else let (sub_s, max_s) = split_max k sub in rebalance_middle_tree k ls sub_s max_s rs t ) )" fun reduce_root where "reduce_root Leaf = Leaf" | "reduce_root (Node ts t) = (case ts of [] \<Rightarrow> t | _ \<Rightarrow> (Node ts t) )" fun delete where "delete k x t = reduce_root (del k x t)" text "An invariant for intermediate states at deletion. In particular we allow for an underflow to 0 subtrees." fun almost_order where "almost_order k Leaf = True" | "almost_order k (Node ts t) = ( (length ts \<le> 2*k) \<and> (\<forall>s \<in> set (subtrees ts). order k s) \<and> order k t )" text "A recursive property of the \"spine\" we want to walk along for splitting off the maximum of the left subtree." fun nonempty_lasttreebal where "nonempty_lasttreebal Leaf = True" | "nonempty_lasttreebal (Node ts t) = ( (\<exists>ls tsub tsep. ts = (ls@[(tsub,tsep)]) \<and> height tsub = height t) \<and> nonempty_lasttreebal t )" subsection "Proofs of functional correctness" lemma split_set: assumes "split ts z = (ls,(a,b)#rs)" shows "(a,b) \<in> set ts" and "(x,y) \<in> set ls \<Longrightarrow> (x,y) \<in> set ts" and "(x,y) \<in> set rs \<Longrightarrow> (x,y) \<in> set ts" and "set ls \<union> set rs \<union> {(a,b)} = set ts" and "\<exists>x \<in> set ts. b \<in> Basic_BNFs.snds x" using split_conc assms by fastforce+ lemma split_length: "split ts x = (ls, rs) \<Longrightarrow> length ls + length rs = length ts" by (auto dest: split_conc) text "Isin proof" thm isin_simps (* copied from comment in List_Ins_Del *) lemma sorted_ConsD: "sorted_less (y # xs) \<Longrightarrow> x \<le> y \<Longrightarrow> x \<notin> set xs" by (auto simp: sorted_Cons_iff) lemma sorted_snocD: "sorted_less (xs @ [y]) \<Longrightarrow> y \<le> x \<Longrightarrow> x \<notin> set xs" by (auto simp: sorted_snoc_iff) lemmas isin_simps2 = sorted_lems sorted_ConsD sorted_snocD (*-----------------------------*) lemma isin_sorted: "sorted_less (xs@a#ys) \<Longrightarrow> (x \<in> set (xs@a#ys)) = (if x < a then x \<in> set xs else x \<in> set (a#ys))" by (auto simp: isin_simps2) (* lift to split *) lemma isin_sorted_split: assumes "sorted_less (inorder (Node ts t))" and "split ts x = (ls, rs)" shows "x \<in> set (inorder (Node ts t)) = (x \<in> set (inorder_list rs @ inorder t))" proof (cases ls) case Nil then have "ts = rs" using assms by (auto dest!: split_conc) then show ?thesis by simp next case Cons then obtain ls' sub sep where ls_tail_split: "ls = ls' @ [(sub,sep)]" by (metis list.simps(3) rev_exhaust surj_pair) then have "sep < x" using split_req(2)[of ts x ls' sub sep rs] using sorted_inorder_separators[OF assms(1)] using assms by simp then show ?thesis using assms(1) split_conc[OF assms(2)] ls_tail_split using isin_sorted[of "inorder_list ls' @ inorder sub" sep "inorder_list rs @ inorder t" x] by auto qed lemma isin_sorted_split_right: assumes "split ts x = (ls, (sub,sep)#rs)" and "sorted_less (inorder (Node ts t))" and "sep \<noteq> x" shows "x \<in> set (inorder_list ((sub,sep)#rs) @ inorder t) = (x \<in> set (inorder sub))" proof - from assms have "x < sep" proof - from assms have "sorted_less (separators ts)" by (simp add: sorted_inorder_separators) then show ?thesis using split_req(3) using assms by fastforce qed moreover have "sorted_less (inorder_list ((sub,sep)#rs) @ inorder t)" using assms sorted_wrt_append split_conc by fastforce ultimately show ?thesis using isin_sorted[of "inorder sub" "sep" "inorder_list rs @ inorder t" x] by simp qed theorem isin_set_inorder: "sorted_less (inorder t) \<Longrightarrow> isin t x = (x \<in> set (inorder t))" proof(induction t x rule: isin.induct) case (2 ts t x) then obtain ls rs where list_split: "split ts x = (ls, rs)" by (meson surj_pair) then have list_conc: "ts = ls @ rs" using split_conc by auto show ?case proof (cases rs) case Nil then have "isin (Node ts t) x = isin t x" by (simp add: list_split) also have "\<dots> = (x \<in> set (inorder t))" using "2.IH"(1) list_split Nil using "2.prems" sorted_inorder_induct_last by auto also have "\<dots> = (x \<in> set (inorder (Node ts t)))" using isin_sorted_split[of ts t x ls rs] using "2.prems" list_split list_conc Nil by simp finally show ?thesis . next case (Cons a list) then obtain sub sep where a_split: "a = (sub,sep)" by (cases a) then show ?thesis proof (cases "x = sep") case True then show ?thesis using list_conc Cons a_split list_split by auto next case False then have "isin (Node ts t) x = isin sub x" using list_split Cons a_split False by auto also have "\<dots> = (x \<in> set (inorder sub))" using "2.IH"(2) using "2.prems" False a_split list_conc list_split local.Cons sorted_inorder_induct_subtree by fastforce also have "\<dots> = (x \<in> set (inorder (Node ts t)))" using isin_sorted_split[OF "2.prems" list_split] using isin_sorted_split_right "2.prems" list_split Cons a_split False by simp finally show ?thesis . qed qed qed auto (* TODO way to use this for custom case distinction? *) lemma node\<^sub>i_cases: "length xs \<le> k \<or> (\<exists>ls sub sep rs. split_half xs = (ls,(sub,sep)#rs))" proof - have "\<not> length xs \<le> k \<Longrightarrow> length xs \<ge> 1" by linarith then show ?thesis using split_half_not_empty by blast qed lemma root_order_tree\<^sub>i: "root_order_up\<^sub>i (Suc k) t = root_order (Suc k) (tree\<^sub>i t)" apply (cases t) apply auto done lemma node\<^sub>i_root_order: assumes "length ts > 0" and "length ts \<le> 4*k+1" and "\<forall>x \<in> set (subtrees ts). order k x" and "order k t" shows "root_order_up\<^sub>i k (node\<^sub>i k ts t)" proof (cases "length ts \<le> 2*k") case True then show ?thesis using assms by (simp add: node\<^sub>i.simps) next case False then obtain ls sub sep rs where split_half_ts: "take (length ts div 2) ts = ls" "drop (length ts div 2) ts = (sub,sep)#rs" using split_half_not_empty[of ts] by auto then have length_rs: "length rs = length ts - (length ts div 2) - 1" using length_drop by (metis One_nat_def add_diff_cancel_right' list.size(4)) also have "\<dots> \<le> 4*k - ((4*k + 1) div 2)" using assms(2) by simp also have "\<dots> = 2*k" by auto finally have "length rs \<le> 2*k" by simp moreover have "length rs \<ge> k" using False length_rs by simp moreover have "set ((sub,sep)#rs) \<subseteq> set ts" by (metis split_half_ts(2) set_drop_subset) ultimately have o_r: "order k sub" "order k (Node rs t)" using split_half_ts assms by auto moreover have "length ls \<ge> k" using length_take assms split_half_ts False by auto moreover have "length ls \<le> 2*k" using assms(2) split_half_ts by auto ultimately have o_l: "order k (Node ls sub)" using set_take_subset assms split_half_ts by fastforce from o_r o_l show ?thesis by (simp add: node\<^sub>i.simps False split_half_ts) qed lemma node\<^sub>i_order_helper: assumes "length ts \<ge> k" and "length ts \<le> 4*k+1" and "\<forall>x \<in> set (subtrees ts). order k x" and "order k t" shows "case (node\<^sub>i k ts t) of T\<^sub>i t \<Rightarrow> order k t | _ \<Rightarrow> True" proof (cases "length ts \<le> 2*k") case True then show ?thesis using assms by (simp add: node\<^sub>i.simps) next case False then obtain sub sep rs where "drop (length ts div 2) ts = (sub,sep)#rs" using split_half_not_empty[of ts] by auto then show ?thesis using assms by (simp add: node\<^sub>i.simps) qed lemma node\<^sub>i_order: assumes "length ts \<ge> k" and "length ts \<le> 4*k+1" and "\<forall>x \<in> set (subtrees ts). order k x" and "order k t" shows "order_up\<^sub>i k (node\<^sub>i k ts t)" apply(cases "node\<^sub>i k ts t") using node\<^sub>i_root_order node\<^sub>i_order_helper assms apply fastforce apply (metis node\<^sub>i_root_order assms(2,3,4) le0 length_greater_0_conv list.size(3) node\<^sub>i.simps order_up\<^sub>i.simps(2) root_order_up\<^sub>i.simps(2) up\<^sub>i.distinct(1)) done (* explicit proof *) lemma ins_order: "order k t \<Longrightarrow> order_up\<^sub>i k (ins k x t)" proof(induction k x t rule: ins.induct) case (2 k x ts t) then obtain ls rs where split_res: "split ts x = (ls, rs)" by (meson surj_pair) then have split_app: "ls@rs = ts" using split_conc by simp show ?case proof (cases rs) case Nil then have "order_up\<^sub>i k (ins k x t)" using 2 split_res by simp then show ?thesis using Nil 2 split_app split_res Nil node\<^sub>i_order by (auto split!: up\<^sub>i.splits simp del: node\<^sub>i.simps) next case (Cons a list) then obtain sub sep where a_prod: "a = (sub, sep)" by (cases a) then show ?thesis proof (cases "x = sep") case True then show ?thesis using 2 a_prod Cons split_res by simp next case False then have "order_up\<^sub>i k (ins k x sub)" using "2.IH"(2) "2.prems" a_prod local.Cons split_app split_res by auto then show ?thesis using 2 split_app Cons length_append node\<^sub>i_order a_prod split_res by (auto split!: up\<^sub>i.splits simp del: node\<^sub>i.simps simp add: order_impl_root_order) qed qed qed simp (* notice this is almost a duplicate of ins_order *) lemma ins_root_order: assumes "root_order k t" shows "root_order_up\<^sub>i k (ins k x t)" proof(cases t) case (Node ts t) then obtain ls rs where split_res: "split ts x = (ls, rs)" by (meson surj_pair) then have split_app: "ls@rs = ts" using split_conc by fastforce show ?thesis proof (cases rs) case Nil then have "order_up\<^sub>i k (ins k x t)" using Node assms split_res by (simp add: ins_order) then show ?thesis using Nil Node split_app split_res assms node\<^sub>i_root_order by (auto split!: up\<^sub>i.splits simp del: node\<^sub>i.simps simp add: order_impl_root_order) next case (Cons a list) then obtain sub sep where a_prod: "a = (sub, sep)" by (cases a) then show ?thesis proof (cases "x = sep") case True then show ?thesis using assms Node a_prod Cons split_res by simp next case False then have "order_up\<^sub>i k (ins k x sub)" using Node a_prod assms ins_order local.Cons split_app by auto then show ?thesis using assms split_app Cons length_append Node node\<^sub>i_root_order a_prod split_res by (auto split!: up\<^sub>i.splits simp del: node\<^sub>i.simps simp add: order_impl_root_order) qed qed qed simp lemma height_list_split: "height_up\<^sub>i (Up\<^sub>i (Node ls a) b (Node rs t)) = height (Node (ls@(a,b)#rs) t) " by (induction ls) (auto simp add: max.commute) lemma node\<^sub>i_height: "height_up\<^sub>i (node\<^sub>i k ts t) = height (Node ts t)" proof(cases "length ts \<le> 2*k") case False then obtain ls sub sep rs where split_half_ts: "split_half ts = (ls, (sub, sep) # rs)" by (meson node\<^sub>i_cases) then have "node\<^sub>i k ts t = Up\<^sub>i (Node ls (sub)) sep (Node rs t)" using False by simp then show ?thesis using split_half_ts by (metis append_take_drop_id fst_conv height_list_split snd_conv split_half.elims) qed simp lemma bal_up\<^sub>i_tree: "bal_up\<^sub>i t = bal (tree\<^sub>i t)" apply(cases t) apply auto done lemma bal_list_split: "bal (Node (ls@(a,b)#rs) t) \<Longrightarrow> bal_up\<^sub>i (Up\<^sub>i (Node ls a) b (Node rs t))" by (auto simp add: image_constant_conv) lemma node\<^sub>i_bal: assumes "bal (Node ts t)" shows "bal_up\<^sub>i (node\<^sub>i k ts t)" using assms proof(cases "length ts \<le> 2*k") case False then obtain ls sub sep rs where split_half_ts: "split_half ts = (ls, (sub, sep) # rs)" by (meson node\<^sub>i_cases) then have "bal (Node (ls@(sub,sep)#rs) t)" using assms append_take_drop_id[where n="length ts div 2" and xs=ts] by auto then show ?thesis using split_half_ts assms False by (auto simp del: bal.simps bal_up\<^sub>i.simps dest!: bal_list_split[of ls sub sep rs t]) qed simp lemma height_up\<^sub>i_merge: "height_up\<^sub>i (Up\<^sub>i l a r) = height t \<Longrightarrow> height (Node (ls@(t,x)#rs) tt) = height (Node (ls@(l,a)#(r,x)#rs) tt)" by simp lemma ins_height: "height_up\<^sub>i (ins k x t) = height t" proof(induction k x t rule: ins.induct) case (2 k x ts t) then obtain ls rs where split_list: "split ts x = (ls,rs)" by (meson surj_pair) then have split_append: "ls@rs = ts" using split_conc by auto then show ?case proof (cases rs) case Nil then have height_sub: "height_up\<^sub>i (ins k x t) = height t" using 2 by (simp add: split_list) then show ?thesis proof (cases "ins k x t") case (T\<^sub>i a) then have "height (Node ts t) = height (Node ts a)" using height_sub by simp then show ?thesis using T\<^sub>i Nil split_list split_append by simp next case (Up\<^sub>i l a r) then have "height (Node ls t) = height (Node (ls@[(l,a)]) r)" using height_btree_order height_sub by (induction ls) auto then show ?thesis using 2 Nil split_list Up\<^sub>i split_append by (simp del: node\<^sub>i.simps add: node\<^sub>i_height) qed next case (Cons a list) then obtain sub sep where a_split: "a = (sub,sep)" by (cases a) then show ?thesis proof (cases "x = sep") case True then show ?thesis using Cons a_split 2 split_list by (simp del: height_btree.simps) next case False then have height_sub: "height_up\<^sub>i (ins k x sub) = height sub" by (metis "2.IH"(2) a_split Cons split_list) then show ?thesis proof (cases "ins k x sub") case (T\<^sub>i a) then have "height a = height sub" using height_sub by auto then have "height (Node (ls@(sub,sep)#rs) t) = height (Node (ls@(a,sep)#rs) t)" by auto then show ?thesis using T\<^sub>i height_sub False Cons 2 split_list a_split split_append by (auto simp add: image_Un max.commute finite_set_ins_swap) next case (Up\<^sub>i l a r) then have "height (Node (ls@(sub,sep)#list) t) = height (Node (ls@(l,a)#(r,sep)#list) t)" using height_up\<^sub>i_merge height_sub by fastforce then show ?thesis using Up\<^sub>i False Cons 2 split_list a_split split_append by (auto simp del: node\<^sub>i.simps simp add: node\<^sub>i_height image_Un max.commute finite_set_ins_swap) qed qed qed qed simp (* the below proof is overly complicated as a number of lemmas regarding height are missing *) lemma ins_bal: "bal t \<Longrightarrow> bal_up\<^sub>i (ins k x t)" proof(induction k x t rule: ins.induct) case (2 k x ts t) then obtain ls rs where split_res: "split ts x = (ls, rs)" by (meson surj_pair) then have split_app: "ls@rs = ts" using split_conc by fastforce show ?case proof (cases rs) case Nil then show ?thesis proof (cases "ins k x t") case (T\<^sub>i a) then have "bal (Node ls a)" unfolding bal.simps by (metis "2.IH"(1) "2.prems" append_Nil2 bal.simps(2) bal_up\<^sub>i.simps(1) height_up\<^sub>i.simps(1) ins_height local.Nil split_app split_res) then show ?thesis using Nil T\<^sub>i 2 split_res by simp next case (Up\<^sub>i l a r) then have "(\<forall>x\<in>set (subtrees (ls@[(l,a)])). bal x)" "(\<forall>x\<in>set (subtrees ls). height r = height x)" using 2 Up\<^sub>i Nil split_res split_app by simp_all (metis height_up\<^sub>i.simps(2) ins_height max_def) then show ?thesis unfolding ins.simps using Up\<^sub>i Nil 2 split_res by (simp del: node\<^sub>i.simps add: node\<^sub>i_bal) qed next case (Cons a list) then obtain sub sep where a_prod: "a = (sub, sep)" by (cases a) then show ?thesis proof (cases "x = sep") case True then show ?thesis using a_prod 2 split_res Cons by simp next case False then have "bal_up\<^sub>i (ins k x sub)" using 2 split_res using a_prod local.Cons split_app by auto show ?thesis proof (cases "ins k x sub") case (T\<^sub>i x1) then have "height x1 = height t" by (metis "2.prems" a_prod add_diff_cancel_left' bal_split_left(1) bal_split_left(2) height_bal_tree height_up\<^sub>i.simps(1) ins_height local.Cons plus_1_eq_Suc split_app) then show ?thesis using split_app Cons T\<^sub>i 2 split_res a_prod by auto next case (Up\<^sub>i l a r) (* The only case where explicit reasoning is required - likely due to the insertion of 2 elements in the list *) then have "\<forall>x \<in> set (subtrees (ls@(l,a)#(r,sep)#list)). bal x" using Up\<^sub>i split_app Cons 2 \<open>bal_up\<^sub>i (ins k x sub)\<close> by auto moreover have "\<forall>x \<in> set (subtrees (ls@(l,a)#(r,sep)#list)). height x = height t" using False Up\<^sub>i split_app Cons 2 \<open>bal_up\<^sub>i (ins k x sub)\<close> ins_height split_res a_prod apply auto by (metis height_up\<^sub>i.simps(2) sup.idem sup_nat_def) ultimately show ?thesis using Up\<^sub>i Cons 2 split_res a_prod by (simp del: node\<^sub>i.simps add: node\<^sub>i_bal) qed qed qed qed simp (* ins acts as ins_list wrt inorder *) (* "simple enough" to be automatically solved *) lemma node\<^sub>i_inorder: "inorder_up\<^sub>i (node\<^sub>i k ts t) = inorder (Node ts t)" apply(cases "length ts \<le> 2*k") apply (auto split!: list.splits) (* we want to only transform in one direction here.. *) supply R = sym[OF append_take_drop_id, of "map _ ts" "(length ts div 2)"] thm R apply(subst R) apply (simp del: append_take_drop_id add: take_map drop_map) done corollary node\<^sub>i_inorder_simps: "node\<^sub>i k ts t = T\<^sub>i t' \<Longrightarrow> inorder t' = inorder (Node ts t)" "node\<^sub>i k ts t = Up\<^sub>i l a r \<Longrightarrow> inorder l @ a # inorder r = inorder (Node ts t)" apply (metis inorder_up\<^sub>i.simps(1) node\<^sub>i_inorder) by (metis append_Cons inorder_up\<^sub>i.simps(2) node\<^sub>i_inorder self_append_conv2) lemma ins_sorted_inorder: "sorted_less (inorder t) \<Longrightarrow> (inorder_up\<^sub>i (ins k (x::('a::linorder)) t)) = ins_list x (inorder t)" apply(induction k x t rule: ins.induct) using split_axioms apply (auto split!: prod.splits list.splits up\<^sub>i.splits simp del: node\<^sub>i.simps simp add: node\<^sub>i_inorder node\<^sub>i_inorder_simps) (* from here on we prefer an explicit proof, showing how to apply the IH *) oops (* specialize ins_list_sorted since it is cumbersome to express "inorder_list ts" as "xs @ [a]" and always having to use the implicit properties of split*) lemma ins_list_split: assumes "split ts x = (ls, rs)" and "sorted_less (inorder (Node ts t))" shows "ins_list x (inorder (Node ts t)) = inorder_list ls @ ins_list x (inorder_list rs @ inorder t)" proof (cases ls) case Nil then show ?thesis using assms by (auto dest!: split_conc) next case Cons then obtain ls' sub sep where ls_tail_split: "ls = ls' @ [(sub,sep)]" by (metis list.distinct(1) rev_exhaust surj_pair) moreover have "sep < x" using split_req(2)[of ts x ls' sub sep rs] using sorted_inorder_separators using assms(1) assms(2) ls_tail_split by auto moreover have "sorted_less (inorder_list ls)" using assms sorted_wrt_append split_conc by fastforce ultimately show ?thesis using assms(2) split_conc[OF assms(1)] using ins_list_sorted[of "inorder_list ls' @ inorder sub" sep] by auto qed lemma ins_list_split_right_general: assumes "split ts x = (ls, (sub,sep)#rs)" and "sorted_less (inorder_list ts)" and "sep \<noteq> x" shows "ins_list x (inorder_list ((sub,sep)#rs) @ zs) = ins_list x (inorder sub) @ sep # inorder_list rs @ zs" proof - from assms have "x < sep" proof - from assms have "sorted_less (separators ts)" by (simp add: sorted_inorder_list_separators) then show ?thesis using split_req(3) using assms by fastforce qed moreover have "sorted_less (inorder_pair (sub,sep))" by (metis (no_types, lifting) assms(1) assms(2) concat.simps(2) concat_append list.simps(9) map_append sorted_wrt_append split_conc) ultimately show ?thesis using ins_list_sorted[of "inorder sub" "sep"] by auto qed (* this fits the actual use cases better *) corollary ins_list_split_right: assumes "split ts x = (ls, (sub,sep)#rs)" and "sorted_less (inorder (Node ts t))" and "sep \<noteq> x" shows "ins_list x (inorder_list ((sub,sep)#rs) @ inorder t) = ins_list x (inorder sub) @ sep # inorder_list rs @ inorder t" using assms sorted_wrt_append split.ins_list_split_right_general split_axioms by fastforce (* a simple lemma, missing from the standard as of now *) lemma ins_list_idem_eq_isin: "sorted_less xs \<Longrightarrow> x \<in> set xs \<longleftrightarrow> (ins_list x xs = xs)" apply(induction xs) apply auto done lemma ins_list_contains_idem: "\<lbrakk>sorted_less xs; x \<in> set xs\<rbrakk> \<Longrightarrow> (ins_list x xs = xs)" using ins_list_idem_eq_isin by auto declare node\<^sub>i.simps [simp del] declare node\<^sub>i_inorder [simp add] lemma ins_inorder: "sorted_less (inorder t) \<Longrightarrow> (inorder_up\<^sub>i (ins k x t)) = ins_list x (inorder t)" proof(induction k x t rule: ins.induct) case (1 k x) then show ?case by auto next case (2 k x ts t) then obtain ls rs where list_split: "split ts x = (ls,rs)" by (cases "split ts x") then have list_conc: "ts = ls@rs" using split.split_conc split_axioms by blast then show ?case proof (cases rs) case Nil then show ?thesis proof (cases "ins k x t") case (T\<^sub>i a) then have IH:"inorder a = ins_list x (inorder t)" using "2.IH"(1) "2.prems" list_split local.Nil sorted_inorder_induct_last by auto have "inorder_up\<^sub>i (ins k x (Node ts t)) = inorder_list ls @ inorder a" using list_split T\<^sub>i Nil by (auto simp add: list_conc) also have "\<dots> = inorder_list ls @ (ins_list x (inorder t))" by (simp add: IH) also have "\<dots> = ins_list x (inorder (Node ts t))" using ins_list_split using "2.prems" list_split Nil by auto finally show ?thesis . next case (Up\<^sub>i l a r) then have IH:"inorder_up\<^sub>i (Up\<^sub>i l a r) = ins_list x (inorder t)" using "2.IH"(1) "2.prems" list_split local.Nil sorted_inorder_induct_last by auto have "inorder_up\<^sub>i (ins k x (Node ts t)) = inorder_list ls @ inorder_up\<^sub>i (Up\<^sub>i l a r)" using list_split Up\<^sub>i Nil by (auto simp add: list_conc) also have "\<dots> = inorder_list ls @ ins_list x (inorder t)" using IH by simp also have "\<dots> = ins_list x (inorder (Node ts t))" using ins_list_split using "2.prems" list_split local.Nil by auto finally show ?thesis . qed next case (Cons h list) then obtain sub sep where h_split: "h = (sub,sep)" by (cases h) then have sorted_inorder_sub: "sorted_less (inorder sub)" using "2.prems" list_conc local.Cons sorted_inorder_induct_subtree by fastforce then show ?thesis proof(cases "x = sep") case True then have "x \<in> set (inorder (Node ts t))" using list_conc h_split Cons by simp then have "ins_list x (inorder (Node ts t)) = inorder (Node ts t)" using "2.prems" ins_list_contains_idem by blast also have "\<dots> = inorder_up\<^sub>i (ins k x (Node ts t))" using list_split h_split Cons True by auto finally show ?thesis by simp next case False then show ?thesis proof (cases "ins k x sub") case (T\<^sub>i a) then have IH:"inorder a = ins_list x (inorder sub)" using "2.IH"(2) "2.prems" list_split Cons sorted_inorder_sub h_split False by auto have "inorder_up\<^sub>i (ins k x (Node ts t)) = inorder_list ls @ inorder a @ sep # inorder_list list @ inorder t" using h_split False list_split T\<^sub>i Cons by simp also have "\<dots> = inorder_list ls @ ins_list x (inorder sub) @ sep # inorder_list list @ inorder t" using IH by simp also have "\<dots> = ins_list x (inorder (Node ts t))" using ins_list_split ins_list_split_right using list_split "2.prems" Cons h_split False by auto finally show ?thesis . next case (Up\<^sub>i l a r) then have IH:"inorder_up\<^sub>i (Up\<^sub>i l a r) = ins_list x (inorder sub)" using "2.IH"(2) False h_split list_split local.Cons sorted_inorder_sub by auto have "inorder_up\<^sub>i (ins k x (Node ts t)) = inorder_list ls @ inorder l @ a # inorder r @ sep # inorder_list list @ inorder t" using h_split False list_split Up\<^sub>i Cons by simp also have "\<dots> = inorder_list ls @ ins_list x (inorder sub) @ sep # inorder_list list @ inorder t" using IH by simp also have "\<dots> = ins_list x (inorder (Node ts t))" using ins_list_split ins_list_split_right using list_split "2.prems" Cons h_split False by auto finally show ?thesis . qed qed qed qed declare node\<^sub>i.simps [simp add] declare node\<^sub>i_inorder [simp del] thm ins.induct thm btree.induct (* wrapped up insert invariants *) lemma tree\<^sub>i_bal: "bal_up\<^sub>i u \<Longrightarrow> bal (tree\<^sub>i u)" apply(cases u) apply(auto) done lemma tree\<^sub>i_order: "\<lbrakk>k > 0; root_order_up\<^sub>i k u\<rbrakk> \<Longrightarrow> root_order k (tree\<^sub>i u)" apply(cases u) apply(auto simp add: order_impl_root_order) done lemma tree\<^sub>i_inorder: "inorder_up\<^sub>i u = inorder (tree\<^sub>i u)" apply (cases u) apply auto done lemma insert_bal: "bal t \<Longrightarrow> bal (insert k x t)" using ins_bal by (simp add: tree\<^sub>i_bal) lemma insert_order: "\<lbrakk>k > 0; root_order k t\<rbrakk> \<Longrightarrow> root_order k (insert k x t)" using ins_root_order by (simp add: tree\<^sub>i_order) lemma insert_inorder: "sorted_less (inorder t) \<Longrightarrow> inorder (insert k x t) = ins_list x (inorder t)" using ins_inorder by (simp add: tree\<^sub>i_inorder) text "Deletion proofs" thm list.simps lemma rebalance_middle_tree_height: assumes "height t = height sub" and "case rs of (rsub,rsep) # list \<Rightarrow> height rsub = height t | [] \<Rightarrow> True" shows "height (rebalance_middle_tree k ls sub sep rs t) = height (Node (ls@(sub,sep)#rs) t)" proof (cases "height t") case 0 then have "t = Leaf" "sub = Leaf" using height_Leaf assms by auto then show ?thesis by simp next case (Suc nat) then obtain tts tt where t_node: "t = Node tts tt" using height_Leaf by (cases t) simp then obtain mts mt where sub_node: "sub = Node mts mt" using assms by (cases sub) simp then show ?thesis proof (cases "length mts \<ge> k \<and> length tts \<ge> k") case False then show ?thesis proof (cases rs) case Nil then have "height_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#tts) tt) = height (Node (mts@(mt,sep)#tts) tt)" using node\<^sub>i_height by blast also have "\<dots> = max (height t) (height sub)" by (metis assms(1) height_up\<^sub>i.simps(2) height_list_split sub_node t_node) finally have height_max: "height_up\<^sub>i (node\<^sub>i k (mts @ (mt, sep) # tts) tt) = max (height t) (height sub)" by simp then show ?thesis proof (cases "node\<^sub>i k (mts@(mt,sep)#tts) tt") case (T\<^sub>i u) then have "height u = max (height t) (height sub)" using height_max by simp then have "height (Node ls u) = height (Node (ls@[(sub,sep)]) t)" by (induction ls) (auto simp add: max.commute) then show ?thesis using Nil False T\<^sub>i by (simp add: sub_node t_node) next case (Up\<^sub>i l a r) then have "height (Node (ls@[(sub,sep)]) t) = height (Node (ls@[(l,a)]) r)" using assms(1) height_max by (induction ls) auto then show ?thesis using Up\<^sub>i Nil sub_node t_node by auto qed next case (Cons a list) then obtain rsub rsep where a_split: "a = (rsub, rsep)" by (cases a) then obtain rts rt where r_node: "rsub = Node rts rt" using assms(2) Cons height_Leaf Suc by (cases rsub) simp_all then have "height_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#rts) rt) = height (Node (mts@(mt,sep)#rts) rt)" using node\<^sub>i_height by blast also have "\<dots> = max (height rsub) (height sub)" by (metis r_node height_up\<^sub>i.simps(2) height_list_split max.commute sub_node) finally have height_max: "height_up\<^sub>i (node\<^sub>i k (mts @ (mt, sep) # rts) rt) = max (height rsub) (height sub)" by simp then show ?thesis proof (cases "node\<^sub>i k (mts@(mt,sep)#rts) rt") case (T\<^sub>i u) then have "height u = max (height rsub) (height sub)" using height_max by simp then show ?thesis using T\<^sub>i False Cons r_node a_split sub_node t_node by auto next case (Up\<^sub>i l a r) then have height_max: "max (height l) (height r) = max (height rsub) (height sub)" using height_max by auto then show ?thesis using Cons a_split r_node Up\<^sub>i sub_node t_node by auto qed qed qed (simp add: sub_node t_node) qed lemma rebalance_last_tree_height: assumes "height t = height sub" and "ts = list@[(sub,sep)]" shows "height (rebalance_last_tree k ts t) = height (Node ts t)" using rebalance_middle_tree_height assms by auto lemma split_max_height: assumes "split_max k t = (sub,sep)" and "nonempty_lasttreebal t" and "t \<noteq> Leaf" shows "height sub = height t" using assms proof(induction t arbitrary: k sub sep) case Node1: (Node tts tt) then obtain ls tsub tsep where tts_split: "tts = ls@[(tsub,tsep)]" by auto then show ?case proof (cases tt) case Leaf then have "height (Node (ls@[(tsub,tsep)]) tt) = max (height (Node ls tsub)) (Suc (height tt))" using height_btree_last height_btree_order by metis moreover have "split_max k (Node tts tt) = (Node ls tsub, tsep)" using Leaf Node1 tts_split by auto ultimately show ?thesis using Leaf Node1 height_Leaf max_def by auto next case Node2: (Node l a) then obtain subsub subsep where sub_split: "split_max k tt = (subsub,subsep)" by (cases "split_max k tt") then have "height subsub = height tt" using Node1 Node2 by auto moreover have "split_max k (Node tts tt) = (rebalance_last_tree k tts subsub, subsep)" using Node1 Node2 tts_split sub_split by auto ultimately show ?thesis using rebalance_last_tree_height Node1 Node2 by auto qed qed auto lemma order_bal_nonempty_lasttreebal: "\<lbrakk>k > 0; root_order k t; bal t\<rbrakk> \<Longrightarrow> nonempty_lasttreebal t" proof(induction k t rule: order.induct) case (2 k ts t) then have "length ts > 0" by auto then obtain ls tsub tsep where ts_split: "ts = (ls@[(tsub,tsep)])" by (metis eq_fst_iff length_greater_0_conv snoc_eq_iff_butlast) moreover have "height tsub = height t" using "2.prems"(3) ts_split by auto moreover have "nonempty_lasttreebal t" using 2 order_impl_root_order by auto ultimately show ?case by simp qed simp lemma bal_sub_height: "bal (Node (ls@a#rs) t) \<Longrightarrow> (case rs of [] \<Rightarrow> True | (sub,sep)#_ \<Rightarrow> height sub = height t)" by (cases rs) (auto) lemma del_height: "\<lbrakk>k > 0; root_order k t; bal t\<rbrakk> \<Longrightarrow> height (del k x t) = height t" proof(induction k x t rule: del.induct) case (2 k x ts t) then obtain ls list where list_split: "split ts x = (ls, list)" by (cases "split ts x") then show ?case proof(cases list) case Nil then have "height (del k x t) = height t" using 2 list_split order_bal_nonempty_lasttreebal by (simp add: order_impl_root_order) moreover obtain lls sub sep where "ls = lls@[(sub,sep)]" using split_conc 2 list_split Nil by (metis append_Nil2 nonempty_lasttreebal.simps(2) order_bal_nonempty_lasttreebal) moreover have "Node ls t = Node ts t" using split_conc Nil list_split by auto ultimately show ?thesis using rebalance_last_tree_height 2 list_split Nil split_conc by (auto simp add: max.assoc sup_nat_def max_def) next case (Cons a rs) then have rs_height: "case rs of [] \<Rightarrow> True | (rsub,rsep)#_ \<Rightarrow> height rsub = height t" (* notice the difference if rsub and t are switched *) using "2.prems"(3) bal_sub_height list_split split_conc by blast from Cons obtain sub sep where a_split: "a = (sub,sep)" by (cases a) consider (sep_n_x) "sep \<noteq> x" | (sep_x_Leaf) "sep = x \<and> sub = Leaf" | (sep_x_Node) "sep = x \<and> (\<exists>ts t. sub = Node ts t)" using btree.exhaust by blast then show ?thesis proof cases case sep_n_x have height_t_sub: "height t = height sub" using "2.prems"(3) a_split list_split local.Cons split.split_set(1) split_axioms by fastforce have height_t_del: "height (del k x sub) = height t" by (metis "2.IH"(2) "2.prems"(1) "2.prems"(2) "2.prems"(3) a_split bal.simps(2) list_split local.Cons order_impl_root_order root_order.simps(2) sep_n_x some_child_sub(1) split_set(1)) then have "height (rebalance_middle_tree k ls (del k x sub) sep rs t) = height (Node (ls@((del k x sub),sep)#rs) t)" using rs_height rebalance_middle_tree_height by simp also have "\<dots> = height (Node (ls@(sub,sep)#rs) t)" using height_t_sub "2.prems" height_t_del by auto also have "\<dots> = height (Node ts t)" using 2 a_split sep_n_x list_split Cons split_set(1) split_conc by auto finally show ?thesis using sep_n_x Cons a_split list_split 2 by simp next case sep_x_Leaf then have "height (Node ts t) = height (Node (ls@rs) t)" using bal_split_last(2) "2.prems"(3) a_split list_split Cons split_conc by metis then show ?thesis using a_split list_split Cons sep_x_Leaf 2 by auto next case sep_x_Node then obtain sts st where sub_node: "sub = Node sts st" by blast obtain sub_s max_s where sub_split: "split_max k sub = (sub_s, max_s)" by (cases "split_max k sub") then have "height sub_s = height t" by (metis "2.prems"(1) "2.prems"(2) "2.prems"(3) a_split bal.simps(2) btree.distinct(1) list_split Cons order_bal_nonempty_lasttreebal order_impl_root_order root_order.simps(2) some_child_sub(1) split_set(1) split_max_height sub_node) then have "height (rebalance_middle_tree k ls sub_s max_s rs t) = height (Node (ls@(sub_s,sep)#rs) t)" using rs_height rebalance_middle_tree_height by simp also have "\<dots> = height (Node ts t)" using 2 a_split sep_x_Node list_split Cons split_set(1) \<open>height sub_s = height t\<close> by (auto simp add: split_conc[of ts]) finally show ?thesis using sep_x_Node Cons a_split list_split 2 sub_node sub_split by auto qed qed qed simp (* proof for inorders *) (* note: this works (as it should, since there is not even recursion involved) automatically. *yay* *) lemma rebalance_middle_tree_inorder: assumes "height t = height sub" and "case rs of (rsub,rsep) # list \<Rightarrow> height rsub = height t | [] \<Rightarrow> True" shows "inorder (rebalance_middle_tree k ls sub sep rs t) = inorder (Node (ls@(sub,sep)#rs) t)" apply(cases sub; cases t) using assms apply (auto split!: btree.splits up\<^sub>i.splits list.splits simp del: node\<^sub>i.simps simp add: node\<^sub>i_inorder_simps ) done lemma rebalance_last_tree_inorder: assumes "height t = height sub" and "ts = list@[(sub,sep)]" shows "inorder (rebalance_last_tree k ts t) = inorder (Node ts t)" using rebalance_middle_tree_inorder assms by auto lemma butlast_inorder_app_id: "xs = xs' @ [(sub,sep)] \<Longrightarrow> inorder_list xs' @ inorder sub @ [sep] = inorder_list xs" by simp lemma split_max_inorder: assumes "nonempty_lasttreebal t" and "t \<noteq> Leaf" shows "inorder_pair (split_max k t) = inorder t" using assms proof (induction k t rule: split_max.induct) case (1 k ts t) then show ?case proof (cases t) case Leaf then have "ts = butlast ts @ [last ts]" using "1.prems"(1) by auto moreover obtain sub sep where "last ts = (sub,sep)" by fastforce ultimately show ?thesis using Leaf apply (auto split!: prod.splits btree.splits) by (simp add: butlast_inorder_app_id) next case (Node tts tt) then have IH: "inorder_pair (split_max k t) = inorder t" using "1.IH" "1.prems"(1) by auto obtain sub sep where split_sub_sep: "split_max k t = (sub,sep)" by fastforce then have height_sub: "height sub = height t" by (metis "1.prems"(1) Node btree.distinct(1) nonempty_lasttreebal.simps(2) split_max_height) have "inorder_pair (split_max k (Node ts t)) = inorder (rebalance_last_tree k ts sub) @ [sep]" using Node 1 split_sub_sep by auto also have "\<dots> = inorder_list ts @ inorder sub @ [sep]" using rebalance_last_tree_inorder height_sub "1.prems" by (auto simp del: rebalance_last_tree.simps) also have "\<dots> = inorder (Node ts t)" using IH split_sub_sep by simp finally show ?thesis . qed qed simp lemma height_bal_subtrees_merge: "\<lbrakk>height (Node as a) = height (Node bs b); bal (Node as a); bal (Node bs b)\<rbrakk> \<Longrightarrow> \<forall>x \<in> set (subtrees as) \<union> {a}. height x = height b" by (metis Suc_inject Un_iff bal.simps(2) height_bal_tree singletonD) lemma bal_list_merge: assumes "bal_up\<^sub>i (Up\<^sub>i (Node as a) x (Node bs b))" shows "bal (Node (as@(a,x)#bs) b)" proof - have "\<forall>x\<in>set (subtrees (as @ (a, x) # bs)). bal x" using subtrees_split assms by auto moreover have "bal b" using assms by auto moreover have "\<forall>x\<in>set (subtrees as) \<union> {a} \<union> set (subtrees bs). height x = height b" using assms height_bal_subtrees_merge unfolding bal_up\<^sub>i.simps by blast ultimately show ?thesis by auto qed lemma node\<^sub>i_bal_up\<^sub>i: assumes "bal_up\<^sub>i (node\<^sub>i k ts t)" shows "bal (Node ts t)" using assms proof(cases "length ts \<le> 2*k") case False then obtain ls sub sep rs where split_list: "split_half ts = (ls, (sub,sep)#rs)" using node\<^sub>i_cases by blast then have "node\<^sub>i k ts t = Up\<^sub>i (Node ls sub) sep (Node rs t)" using False by auto moreover have "ts = ls@(sub,sep)#rs" by (metis append_take_drop_id fst_conv local.split_list snd_conv split_half.elims) ultimately show ?thesis using bal_list_merge[of ls sub sep rs t] assms by (simp del: bal.simps bal_up\<^sub>i.simps) qed simp lemma node\<^sub>i_bal_simp: "bal_up\<^sub>i (node\<^sub>i k ts t) = bal (Node ts t)" using node\<^sub>i_bal node\<^sub>i_bal_up\<^sub>i by blast lemma rebalance_middle_tree_bal: "bal (Node (ls@(sub,sep)#rs) t) \<Longrightarrow> bal (rebalance_middle_tree k ls sub sep rs t)" proof (cases t) case t_node: (Node tts tt) assume assms: "bal (Node (ls @ (sub, sep) # rs) t)" then obtain mts mt where sub_node: "sub = Node mts mt" by (cases sub) (auto simp add: t_node) have sub_heights: "height sub = height t" "bal sub" "bal t" using assms by auto show ?thesis proof (cases "length mts \<ge> k \<and> length tts \<ge> k") case True then show ?thesis using t_node sub_node assms by (auto simp del: bal.simps) next case False then show ?thesis proof (cases rs) case Nil have "height_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#tts) tt) = height (Node (mts@(mt,sep)#tts) tt)" using node\<^sub>i_height by blast also have "\<dots> = Suc (height tt)" by (metis height_bal_tree height_up\<^sub>i.simps(2) height_list_split max.idem sub_heights(1) sub_heights(3) sub_node t_node) also have "\<dots> = height t" using height_bal_tree sub_heights(3) t_node by fastforce finally have "height_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#tts) tt) = height t" by simp moreover have "bal_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#tts) tt)" by (metis bal_list_merge bal_up\<^sub>i.simps(2) node\<^sub>i_bal sub_heights(1) sub_heights(2) sub_heights(3) sub_node t_node) ultimately show ?thesis apply (cases "node\<^sub>i k (mts@(mt,sep)#tts) tt") using assms Nil sub_node t_node by auto next case (Cons r rs) then obtain rsub rsep where r_split: "r = (rsub,rsep)" by (cases r) then have rsub_height: "height rsub = height t" "bal rsub" using assms Cons by auto then obtain rts rt where r_node: "rsub = (Node rts rt)" apply(cases rsub) using t_node by simp have "height_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#rts) rt) = height (Node (mts@(mt,sep)#rts) rt)" using node\<^sub>i_height by blast also have "\<dots> = Suc (height rt)" by (metis Un_iff \<open>height rsub = height t\<close> assms bal.simps(2) bal_split_last(1) height_bal_tree height_up\<^sub>i.simps(2) height_list_split list.set_intros(1) Cons max.idem r_node r_split set_append some_child_sub(1) sub_heights(1) sub_node) also have "\<dots> = height rsub" using height_bal_tree r_node rsub_height(2) by fastforce finally have 1: "height_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#rts) rt) = height rsub" . moreover have 2: "bal_up\<^sub>i (node\<^sub>i k (mts@(mt,sep)#rts) rt)" by (metis bal_list_merge bal_up\<^sub>i.simps(2) node\<^sub>i_bal r_node rsub_height(1) rsub_height(2) sub_heights(1) sub_heights(2) sub_node) ultimately show ?thesis proof (cases "node\<^sub>i k (mts@(mt,sep)#rts) rt") case (T\<^sub>i u) then have "bal (Node (ls@(u,rsep)#rs) t)" using 1 2 Cons assms t_node subtrees_split sub_heights r_split rsub_height unfolding bal.simps by (auto simp del: height_btree.simps) then show ?thesis using Cons assms t_node sub_node r_split r_node False T\<^sub>i by (auto simp del: node\<^sub>i.simps bal.simps) next case (Up\<^sub>i l a r) then have "bal (Node (ls@(l,a)#(r,rsep)#rs) t)" using 1 2 Cons assms t_node subtrees_split sub_heights r_split rsub_height unfolding bal.simps by (auto simp del: height_btree.simps) then show ?thesis using Cons assms t_node sub_node r_split r_node False Up\<^sub>i by (auto simp del: node\<^sub>i.simps bal.simps) qed qed qed qed (simp add: height_Leaf) lemma rebalance_last_tree_bal: "\<lbrakk>bal (Node ts t); ts \<noteq> []\<rbrakk> \<Longrightarrow> bal (rebalance_last_tree k ts t)" using rebalance_middle_tree_bal append_butlast_last_id[of ts] apply(cases "last ts") apply(auto simp del: bal.simps rebalance_middle_tree.simps) done lemma split_max_bal: assumes "bal t" and "t \<noteq> Leaf" and "nonempty_lasttreebal t" shows "bal (fst (split_max k t))" using assms proof(induction k t rule: split_max.induct) case (1 k ts t) then show ?case proof (cases t) case Leaf then obtain sub sep where last_split: "last ts = (sub,sep)" using 1 by auto then have "height sub = height t" using 1 by auto then have "bal (Node (butlast ts) sub)" using 1 last_split by auto then show ?thesis using 1 Leaf last_split by auto next case (Node tts tt) then obtain sub sep where t_split: "split_max k t = (sub,sep)" by (cases "split_max k t") then have "height sub = height t" using 1 Node by (metis btree.distinct(1) nonempty_lasttreebal.simps(2) split_max_height) moreover have "bal sub" using "1.IH" "1.prems"(1) "1.prems"(3) Node t_split by fastforce ultimately have "bal (Node ts sub)" using 1 t_split Node by auto then show ?thesis using rebalance_last_tree_bal t_split Node 1 by (auto simp del: bal.simps rebalance_middle_tree.simps) qed qed simp lemma del_bal: assumes "k > 0" and "root_order k t" and "bal t" shows "bal (del k x t)" using assms proof(induction k x t rule: del.induct) case (2 k x ts t) then obtain ls rs where list_split: "split ts x = (ls,rs)" by (cases "split ts x") then show ?case proof (cases rs) case Nil then have "bal (del k x t)" using 2 list_split by (simp add: order_impl_root_order) moreover have "height (del k x t) = height t" using 2 del_height by (simp add: order_impl_root_order) moreover have "ts \<noteq> []" using 2 by auto ultimately have "bal (rebalance_last_tree k ts (del k x t))" using 2 Nil order_bal_nonempty_lasttreebal rebalance_last_tree_bal by simp then have "bal (rebalance_last_tree k ls (del k x t))" using list_split split_conc Nil by fastforce then show ?thesis using 2 list_split Nil by auto next case (Cons r rs) then obtain sub sep where r_split: "r = (sub,sep)" by (cases r) then have sub_height: "height sub = height t" "bal sub" using 2 Cons list_split split_set(1) by fastforce+ consider (sep_n_x) "sep \<noteq> x" | (sep_x_Leaf) "sep = x \<and> sub = Leaf" | (sep_x_Node) "sep = x \<and> (\<exists>ts t. sub = Node ts t)" using btree.exhaust by blast then show ?thesis proof cases case sep_n_x then have "bal (del k x sub)" "height (del k x sub) = height sub" using sub_height apply (metis "2.IH"(2) "2.prems"(1) "2.prems"(2) list_split local.Cons order_impl_root_order r_split root_order.simps(2) some_child_sub(1) split_set(1)) by (metis "2.prems"(1) "2.prems"(2) list_split Cons order_impl_root_order r_split root_order.simps(2) some_child_sub(1) del_height split_set(1) sub_height(2)) moreover have "bal (Node (ls@(sub,sep)#rs) t)" using "2.prems"(3) list_split Cons r_split split_conc by blast ultimately have "bal (Node (ls@(del k x sub,sep)#rs) t)" using bal_substitute_subtree[of ls sub sep rs t "del k x sub"] by metis then have "bal (rebalance_middle_tree k ls (del k x sub) sep rs t)" using rebalance_middle_tree_bal[of ls "del k x sub" sep rs t k] by metis then show ?thesis using 2 list_split Cons r_split sep_n_x by auto next case sep_x_Leaf moreover have "bal (Node (ls@rs) t)" using bal_split_last(1) list_split split_conc r_split by (metis "2.prems"(3) Cons) ultimately show ?thesis using 2 list_split Cons r_split by auto next case sep_x_Node then obtain sts st where sub_node: "sub = Node sts st" by auto then obtain sub_s max_s where sub_split: "split_max k sub = (sub_s, max_s)" by (cases "split_max k sub") then have "height sub_s = height sub" using split_max_height by (metis "2.prems"(1) "2.prems"(2) btree.distinct(1) list_split Cons order_bal_nonempty_lasttreebal order_impl_root_order r_split root_order.simps(2) some_child_sub(1) split_set(1) sub_height(2) sub_node) moreover have "bal sub_s" using split_max_bal by (metis "2.prems"(1) "2.prems"(2) btree.distinct(1) fst_conv list_split local.Cons order_bal_nonempty_lasttreebal order_impl_root_order r_split root_order.simps(2) some_child_sub(1) split_set(1) sub_height(2) sub_node sub_split) moreover have "bal (Node (ls@(sub,sep)#rs) t)" using "2.prems"(3) list_split Cons r_split split_conc by blast ultimately have "bal (Node (ls@(sub_s,sep)#rs) t)" using bal_substitute_subtree[of ls sub sep rs t "sub_s"] by metis then have "bal (Node (ls@(sub_s,max_s)#rs) t)" using bal_substitute_separator by metis then have "bal (rebalance_middle_tree k ls sub_s max_s rs t)" using rebalance_middle_tree_bal[of ls sub_s max_s rs t k] by metis then show ?thesis using 2 list_split Cons r_split sep_x_Node sub_node sub_split by auto qed qed qed simp lemma rebalance_middle_tree_order: assumes "almost_order k sub" and "\<forall>s \<in> set (subtrees (ls@rs)). order k s" "order k t" and "case rs of (rsub,rsep) # list \<Rightarrow> height rsub = height t | [] \<Rightarrow> True" and "length (ls@(sub,sep)#rs) \<le> 2*k" and "height sub = height t" shows "almost_order k (rebalance_middle_tree k ls sub sep rs t)" proof(cases t) case Leaf then have "sub = Leaf" using height_Leaf assms by auto then show ?thesis using Leaf assms by auto next case t_node: (Node tts tt) then obtain mts mt where sub_node: "sub = Node mts mt" using assms by (cases sub) (auto) then show ?thesis proof(cases "length mts \<ge> k \<and> length tts \<ge> k") case True then have "order k sub" using assms by (simp add: sub_node) then show ?thesis using True t_node sub_node assms by auto next case False then show ?thesis proof (cases rs) case Nil have "order_up\<^sub>i k (node\<^sub>i k (mts@(mt,sep)#tts) tt)" using node\<^sub>i_order[of k "mts@(mt,sep)#tts" tt] assms(1,3) t_node sub_node by (auto simp del: order_up\<^sub>i.simps node\<^sub>i.simps) then show ?thesis apply(cases "node\<^sub>i k (mts@(mt,sep)#tts) tt") using assms t_node sub_node False Nil apply (auto simp del: node\<^sub>i.simps) done next case (Cons r rs) then obtain rsub rsep where r_split: "r = (rsub,rsep)" by (cases r) then have rsub_height: "height rsub = height t" using assms Cons by auto then obtain rts rt where r_node: "rsub = (Node rts rt)" apply(cases rsub) using t_node by simp have "order_up\<^sub>i k (node\<^sub>i k (mts@(mt,sep)#rts) rt)" using node\<^sub>i_order[of k "mts@(mt,sep)#rts" rt] assms(1,2) t_node sub_node r_node r_split Cons by (auto simp del: order_up\<^sub>i.simps node\<^sub>i.simps) then show ?thesis apply(cases "node\<^sub>i k (mts@(mt,sep)#rts) rt") using assms t_node sub_node False Cons r_split r_node apply (auto simp del: node\<^sub>i.simps) done qed qed qed (* we have to proof the order invariant once for an underflowing last tree *) lemma rebalance_middle_tree_last_order: assumes "almost_order k t" and "\<forall>s \<in> set (subtrees (ls@(sub,sep)#rs)). order k s" and "rs = []" and "length (ls@(sub,sep)#rs) \<le> 2*k" and "height sub = height t" shows "almost_order k (rebalance_middle_tree k ls sub sep rs t)" proof (cases t) case Leaf then have "sub = Leaf" using height_Leaf assms by auto then show ?thesis using Leaf assms by auto next case t_node: (Node tts tt) then obtain mts mt where sub_node: "sub = Node mts mt" using assms by (cases sub) (auto) then show ?thesis proof(cases "length mts \<ge> k \<and> length tts \<ge> k") case True then have "order k sub" using assms by (simp add: sub_node) then show ?thesis using True t_node sub_node assms by auto next case False have "order_up\<^sub>i k (node\<^sub>i k (mts@(mt,sep)#tts) tt)" using node\<^sub>i_order[of k "mts@(mt,sep)#tts" tt] assms t_node sub_node by (auto simp del: order_up\<^sub>i.simps node\<^sub>i.simps) then show ?thesis apply(cases "node\<^sub>i k (mts@(mt,sep)#tts) tt") using assms t_node sub_node False Nil apply (auto simp del: node\<^sub>i.simps) done qed qed lemma rebalance_last_tree_order: assumes "ts = ls@[(sub,sep)]" and "\<forall>s \<in> set (subtrees (ts)). order k s" "almost_order k t" and "length ts \<le> 2*k" and "height sub = height t" shows "almost_order k (rebalance_last_tree k ts t)" using rebalance_middle_tree_last_order assms by auto lemma split_max_order: assumes "order k t" and "t \<noteq> Leaf" and "nonempty_lasttreebal t" shows "almost_order k (fst (split_max k t))" using assms proof(induction k t rule: split_max.induct) case (1 k ts t) then obtain ls sub sep where ts_not_empty: "ts = ls@[(sub,sep)]" by auto then show ?case proof (cases t) case Leaf then show ?thesis using ts_not_empty 1 by auto next case (Node) then obtain s_sub s_max where sub_split: "split_max k t = (s_sub, s_max)" by (cases "split_max k t") moreover have "height sub = height s_sub" by (metis "1.prems"(3) Node Pair_inject append1_eq_conv btree.distinct(1) nonempty_lasttreebal.simps(2) split_max_height sub_split ts_not_empty) ultimately have "almost_order k (rebalance_last_tree k ts s_sub)" using rebalance_last_tree_order[of ts ls sub sep k s_sub] 1 ts_not_empty Node sub_split by force then show ?thesis using Node 1 sub_split by auto qed qed simp lemma del_order: assumes "k > 0" and "root_order k t" and "bal t" shows "almost_order k (del k x t)" using assms proof (induction k x t rule: del.induct) case (2 k x ts t) then obtain ls list where list_split: "split ts x = (ls, list)" by (cases "split ts x") then show ?case proof (cases list) case Nil then have "almost_order k (del k x t)" using 2 list_split by (simp add: order_impl_root_order) moreover obtain lls lsub lsep where ls_split: "ls = lls@[(lsub,lsep)]" using 2 Nil list_split by (metis append_Nil2 nonempty_lasttreebal.simps(2) order_bal_nonempty_lasttreebal split_conc) moreover have "height t = height (del k x t)" using del_height 2 by (simp add: order_impl_root_order) moreover have "length ls = length ts" using Nil list_split by (auto dest: split_length) ultimately have "almost_order k (rebalance_last_tree k ls (del k x t))" using rebalance_last_tree_order[of ls lls lsub lsep k "del k x t"] by (metis "2.prems"(2) "2.prems"(3) Un_iff append_Nil2 bal.simps(2) list_split Nil root_order.simps(2) singletonI split_conc subtrees_split) then show ?thesis using 2 list_split Nil by auto next case (Cons r rs) from Cons obtain sub sep where r_split: "r = (sub,sep)" by (cases r) have inductive_help: "case rs of [] \<Rightarrow> True | (rsub,rsep)#_ \<Rightarrow> height rsub = height t" "\<forall>s\<in>set (subtrees (ls @ rs)). order k s" "Suc (length (ls @ rs)) \<le> 2 * k" "order k t" using Cons r_split "2.prems" list_split split_set by (auto dest: split_conc split!: list.splits) consider (sep_n_x) "sep \<noteq> x" | (sep_x_Leaf) "sep = x \<and> sub = Leaf" | (sep_x_Node) "sep = x \<and> (\<exists>ts t. sub = Node ts t)" using btree.exhaust by blast then show ?thesis proof cases case sep_n_x then have "almost_order k (del k x sub)" using 2 list_split Cons r_split order_impl_root_order by (metis bal.simps(2) root_order.simps(2) some_child_sub(1) split_set(1)) moreover have "height (del k x sub) = height t" by (metis "2.prems"(1) "2.prems"(2) "2.prems"(3) bal.simps(2) list_split Cons order_impl_root_order r_split root_order.simps(2) some_child_sub(1) del_height split_set(1)) ultimately have "almost_order k (rebalance_middle_tree k ls (del k x sub) sep rs t)" using rebalance_middle_tree_order[of k "del k x sub" ls rs t sep] using inductive_help using Cons r_split sep_n_x list_split by auto then show ?thesis using 2 Cons r_split sep_n_x list_split by auto next case sep_x_Leaf then have "almost_order k (Node (ls@rs) t)" using inductive_help by auto then show ?thesis using 2 Cons r_split sep_x_Leaf list_split by auto next case sep_x_Node then obtain sts st where sub_node: "sub = Node sts st" by auto then obtain sub_s max_s where sub_split: "split_max k sub = (sub_s, max_s)" by (cases "split_max k sub") then have "height sub_s = height t" using split_max_height by (metis "2.prems"(1) "2.prems"(2) "2.prems"(3) bal.simps(2) btree.distinct(1) list_split Cons order_bal_nonempty_lasttreebal order_impl_root_order r_split root_order.simps(2) some_child_sub(1) split_set(1) sub_node) moreover have "almost_order k sub_s" using split_max_order by (metis "2.prems"(1) "2.prems"(2) "2.prems"(3) bal.simps(2) btree.distinct(1) fst_conv list_split local.Cons order_bal_nonempty_lasttreebal order_impl_root_order r_split root_order.simps(2) some_child_sub(1) split_set(1) sub_node sub_split) ultimately have "almost_order k (rebalance_middle_tree k ls sub_s max_s rs t)" using rebalance_middle_tree_order[of k sub_s ls rs t max_s] inductive_help by auto then show ?thesis using 2 Cons r_split list_split sep_x_Node sub_split by auto qed qed qed simp (* sortedness of delete by inorder *) (* generalize del_list_sorted since its cumbersome to express inorder_list ts as xs @ [a] note that the proof scheme is almost identical to ins_list_sorted *) thm del_list_sorted lemma del_list_split: assumes "split ts x = (ls, rs)" and "sorted_less (inorder (Node ts t))" shows "del_list x (inorder (Node ts t)) = inorder_list ls @ del_list x (inorder_list rs @ inorder t)" proof (cases ls) case Nil then show ?thesis using assms by (auto dest!: split_conc) next case Cons then obtain ls' sub sep where ls_tail_split: "ls = ls' @ [(sub,sep)]" by (metis list.distinct(1) rev_exhaust surj_pair) moreover have "sep < x" using split_req(2)[of ts x ls' sub sep rs] using assms(1) assms(2) ls_tail_split sorted_inorder_separators by blast moreover have "sorted_less (inorder_list ls)" using assms sorted_wrt_append split_conc by fastforce ultimately show ?thesis using assms(2) split_conc[OF assms(1)] using del_list_sorted[of "inorder_list ls' @ inorder sub" sep] by auto qed (* del sorted requires sortedness of the full list so we need to change the right specialization a bit *) lemma del_list_split_right: assumes "split ts x = (ls, (sub,sep)#rs)" and "sorted_less (inorder (Node ts t))" and "sep \<noteq> x" shows "del_list x (inorder_list ((sub,sep)#rs) @ inorder t) = del_list x (inorder sub) @ sep # inorder_list rs @ inorder t" proof - from assms have "x < sep" proof - from assms have "sorted_less (separators ts)" using sorted_inorder_separators by blast then show ?thesis using split_req(3) using assms by fastforce qed moreover have "sorted_less (inorder sub @ sep # inorder_list rs @ inorder t)" using assms sorted_wrt_append[where xs="inorder_list ls"] by (auto dest!: split_conc) ultimately show ?thesis using del_list_sorted[of "inorder sub" "sep"] by auto qed thm del_list_idem lemma del_inorder: assumes "k > 0" and "root_order k t" and "bal t" and "sorted_less (inorder t)" shows "inorder (del k x t) = del_list x (inorder t)" using assms proof (induction k x t rule: del.induct) case (2 k x ts t) then obtain ls rs where list_split: "split ts x = (ls, rs)" by (meson surj_pair) then have list_conc: "ts = ls @ rs" using split.split_conc split_axioms by blast show ?case proof (cases rs) case Nil then have IH: "inorder (del k x t) = del_list x (inorder t)" by (metis "2.IH"(1) "2.prems" bal.simps(2) list_split order_impl_root_order root_order.simps(2) sorted_inorder_induct_last) have "inorder (del k x (Node ts t)) = inorder (rebalance_last_tree k ts (del k x t))" using list_split Nil list_conc by auto also have "\<dots> = inorder_list ts @ inorder (del k x t)" proof - obtain ts' sub sep where ts_split: "ts = ts' @ [(sub, sep)]" by (meson "2.prems"(1) "2.prems"(2) "2.prems"(3) nonempty_lasttreebal.simps(2) order_bal_nonempty_lasttreebal) then have "height sub = height t" using "2.prems"(3) by auto moreover have "height t = height (del k x t)" by (metis "2.prems"(1) "2.prems"(2) "2.prems"(3) bal.simps(2) del_height order_impl_root_order root_order.simps(2)) ultimately show ?thesis using rebalance_last_tree_inorder using ts_split by auto qed also have "\<dots> = inorder_list ts @ del_list x (inorder t)" using IH by blast also have "\<dots> = del_list x (inorder (Node ts t))" using "2.prems"(4) list_conc list_split Nil del_list_split by auto finally show ?thesis . next case (Cons h rs) then obtain sub sep where h_split: "h = (sub,sep)" by (cases h) then have node_sorted_split: "sorted_less (inorder (Node (ls@(sub,sep)#rs) t))" "root_order k (Node (ls@(sub,sep)#rs) t)" "bal (Node (ls@(sub,sep)#rs) t)" using "2.prems" h_split list_conc Cons by blast+ consider (sep_n_x) "sep \<noteq> x" | (sep_x_Leaf) "sep = x \<and> sub = Leaf" | (sep_x_Node) "sep = x \<and> (\<exists>ts t. sub = Node ts t)" using btree.exhaust by blast then show ?thesis proof cases case sep_n_x then have IH: "inorder (del k x sub) = del_list x (inorder sub)" by (metis "2.IH"(2) "2.prems"(1) "2.prems"(2) bal.simps(2) bal_split_left(1) h_split list_split local.Cons node_sorted_split(1) node_sorted_split(3) order_impl_root_order root_order.simps(2) some_child_sub(1) sorted_inorder_induct_subtree split_set(1)) from sep_n_x have "inorder (del k x (Node ts t)) = inorder (rebalance_middle_tree k ls (del k x sub) sep rs t)" using list_split Cons h_split by auto also have "\<dots> = inorder (Node (ls@(del k x sub, sep)#rs) t)" proof - have "height t = height (del k x sub)" using del_height using order_impl_root_order "2.prems" by (auto simp add: order_impl_root_order Cons list_conc h_split) moreover have "case rs of [] \<Rightarrow> True | (rsub, rsep) # list \<Rightarrow> height rsub = height t" using "2.prems"(3) bal_sub_height list_conc Cons by blast ultimately show ?thesis using rebalance_middle_tree_inorder by simp qed also have "\<dots> = inorder_list ls @ del_list x (inorder sub) @ sep # inorder_list rs @ inorder t" using IH by simp also have "\<dots> = del_list x (inorder (Node ts t))" using del_list_split[of ts x ls "(sub,sep)#rs" t] using del_list_split_right[of ts x ls sub sep rs t] using list_split list_conc h_split Cons "2.prems"(4) sep_n_x by auto finally show ?thesis . next case sep_x_Leaf then have "del_list x (inorder (Node ts t)) = inorder (Node (ls@rs) t)" using list_conc h_split Cons using del_list_split[OF list_split "2.prems"(4)] by simp also have "\<dots> = inorder (del k x (Node ts t))" using list_split sep_x_Leaf list_conc h_split Cons by auto finally show ?thesis by simp next case sep_x_Node obtain ssub ssep where split_split: "split_max k sub = (ssub, ssep)" by fastforce from sep_x_Node have "x = sep" by simp then have "del_list x (inorder (Node ts t)) = inorder_list ls @ inorder sub @ inorder_list rs @ inorder t" using list_split list_conc h_split Cons "2.prems"(4) using del_list_split[OF list_split "2.prems"(4)] using del_list_sorted1[of "inorder sub" sep "inorder_list rs @ inorder t" x] sorted_wrt_append by auto also have "\<dots> = inorder_list ls @ inorder_pair (split_max k sub) @ inorder_list rs @ inorder t" using sym[OF split_max_inorder[of sub k]] using order_bal_nonempty_lasttreebal[of k sub] "2.prems" list_conc h_split Cons sep_x_Node by (auto simp del: split_max.simps simp add: order_impl_root_order) also have "\<dots> = inorder_list ls @ inorder ssub @ ssep # inorder_list rs @ inorder t" using split_split by auto also have "\<dots> = inorder (rebalance_middle_tree k ls ssub ssep rs t)" proof - have "height t = height ssub" using split_max_height by (metis "2.prems"(1,2,3) bal.simps(2) btree.distinct(1) h_split list_split local.Cons order_bal_nonempty_lasttreebal order_impl_root_order root_order.simps(2) sep_x_Node some_child_sub(1) split_set(1) split_split) moreover have "case rs of [] \<Rightarrow> True | (rsub, rsep) # list \<Rightarrow> height rsub = height t" using "2.prems"(3) bal_sub_height list_conc local.Cons by blast ultimately show ?thesis using rebalance_middle_tree_inorder by auto qed also have "\<dots> = inorder (del k x (Node ts t))" using list_split sep_x_Node list_conc h_split Cons split_split by auto finally show ?thesis by simp qed qed qed auto lemma reduce_root_order: "\<lbrakk>k > 0; almost_order k t\<rbrakk> \<Longrightarrow> root_order k (reduce_root t)" apply(cases t) apply(auto split!: list.splits simp add: order_impl_root_order) done lemma reduce_root_bal: "bal (reduce_root t) = bal t" apply(cases t) apply(auto split!: list.splits) done lemma reduce_root_inorder: "inorder (reduce_root t) = inorder t" apply (cases t) apply (auto split!: list.splits) done lemma delete_order: "\<lbrakk>k > 0; bal t; root_order k t\<rbrakk> \<Longrightarrow> root_order k (delete k x t)" using del_order by (simp add: reduce_root_order) lemma delete_bal: "\<lbrakk>k > 0; bal t; root_order k t\<rbrakk> \<Longrightarrow> bal (delete k x t)" using del_bal by (simp add: reduce_root_bal) lemma delete_inorder: "\<lbrakk>k > 0; bal t; root_order k t; sorted_less (inorder t)\<rbrakk> \<Longrightarrow> inorder (delete k x t) = del_list x (inorder t)" using del_inorder by (simp add: reduce_root_inorder) (* TODO (opt) runtime wrt runtime of split *) (* we are interested in a) number of comparisons b) number of fetches c) number of writes *) (* a) is dependent on t_split, the remainder is not (we assume the number of fetches and writes for split fun is 0 *) (* TODO simpler induction schemes /less boilerplate isabelle/src/HOL/ex/Induction_Schema *) subsection "Set specification by inorder" interpretation S_ordered: Set_by_Ordered where empty = empty_btree and insert = "insert (Suc k)" and delete = "delete (Suc k)" and isin = "isin" and inorder = "inorder" and inv = "invar_inorder (Suc k)" proof (standard, goal_cases) case (2 s x) then show ?case by (simp add: isin_set_inorder) next case (3 s x) then show ?case using insert_inorder by simp next case (4 s x) then show ?case using delete_inorder by auto next case (6 s x) then show ?case using insert_order insert_bal by auto next case (7 s x) then show ?case using delete_order delete_bal by auto qed (simp add: empty_btree_def)+ (* if we remove this, it is not possible to remove the simp rules in subsequent contexts... *) declare node\<^sub>i.simps[simp del] end end
Require Import VST.msl.log_normalize. Require Import VST.msl.alg_seplog. Require Export VST.veric.Clight_base. Require Import VST.veric.compcert_rmaps. Require Import VST.veric.mpred. Require Import VST.veric.tycontext. Require Import VST.veric.expr2. Require Import VST.veric.binop_lemmas2. Local Open Scope pred. Definition tc_expr {CS: compspecs} (Delta: tycontext) (e: expr) : environ -> mpred:= fun rho => denote_tc_assert (typecheck_expr Delta e) rho. Definition tc_exprlist {CS: compspecs} (Delta: tycontext) (t : list type) (e: list expr) : environ -> mpred := fun rho => denote_tc_assert (typecheck_exprlist Delta t e) rho. Definition tc_lvalue {CS: compspecs} (Delta: tycontext) (e: expr) : environ -> mpred := fun rho => denote_tc_assert (typecheck_lvalue Delta e) rho. Definition tc_temp_id {CS: compspecs} (id : positive) (ty : type) (Delta : tycontext) (e : expr) : environ -> mpred := fun rho => denote_tc_assert (typecheck_temp_id id ty Delta e) rho. Definition tc_expropt {CS: compspecs} Delta (e: option expr) (t: type) : environ -> mpred := match e with None => `!!(t=Tvoid) | Some e' => tc_expr Delta (Ecast e' t) end. Definition tc_temp_id_load id tfrom Delta v : environ -> mpred := fun rho => !! (exists tto, (temp_types Delta) ! id = Some tto /\ tc_val tto (eval_cast tfrom tto (v rho))). Lemma extend_prop: forall P, boxy extendM (prop P). Proof. intros. hnf. apply pred_ext. intros ? ?. apply H; auto. apply extendM_refl. repeat intro. apply H. Qed. Hint Resolve extend_prop. Lemma extend_tc_temp_id_load : forall id tfrom Delta v rho, boxy extendM (tc_temp_id_load id tfrom Delta v rho). Proof. intros. unfold tc_temp_id_load. auto. Qed. Lemma extend_tc_andp: forall {CS: compspecs} A B rho, boxy extendM (denote_tc_assert A rho) -> boxy extendM (denote_tc_assert B rho) -> boxy extendM (denote_tc_assert (tc_andp A B) rho). Proof. intros. rewrite denote_tc_assert_andp. apply boxy_andp; auto. apply extendM_refl. Qed. Lemma extend_tc_bool: forall {CS: compspecs} A B rho, boxy extendM (denote_tc_assert (tc_bool A B) rho). Proof. intros. destruct A; simpl; apply extend_prop. Qed. Lemma extend_tc_int_or_ptr_type: forall {CS: compspecs} A rho, boxy extendM (denote_tc_assert (tc_int_or_ptr_type A) rho). Proof. intros. apply extend_tc_bool. Qed. Lemma extend_tc_Zge: forall {CS: compspecs} v i rho, boxy extendM (denote_tc_assert (tc_Zge v i) rho). Proof. intros. induction v; simpl; unfold_lift; simpl; unfold denote_tc_Zle; try apply extend_prop; repeat match goal with |- boxy _ (match ?A with _ => _ end) => destruct A end; try apply extend_prop. Qed. Lemma extend_tc_Zle: forall {CS: compspecs} v i rho, boxy extendM (denote_tc_assert (tc_Zle v i) rho). Proof. intros. induction v; simpl; unfold_lift; simpl; unfold denote_tc_Zge; try apply extend_prop; repeat match goal with |- boxy _ (match ?A with _ => _ end) => destruct A end; try apply extend_prop. Qed. Lemma extend_tc_iszero: forall {CS: compspecs} v rho, boxy extendM (denote_tc_assert (tc_iszero v) rho). Proof. intros. rewrite denote_tc_assert_iszero. destruct (eval_expr v rho); apply extend_prop. Qed. Lemma extend_valid_pointer': forall a b, boxy extendM (valid_pointer' a b). Proof. intros. apply boxy_i; intros. apply extendM_refl. unfold valid_pointer' in *. simpl in *. destruct a; simpl in *; auto. forget (b0, Ptrofs.unsigned i + b) as p. destruct (w @ p) eqn:?H; try contradiction. destruct H as [w2 ?]. apply (resource_at_join _ _ _ p) in H. rewrite H1 in H. inv H; auto. clear - H0 RJ. eapply join_nonidentity; eauto. destruct H as [w2 ?]. apply (resource_at_join _ _ _ p) in H. rewrite H1 in H. inv H; auto. Qed. Lemma extend_andp: forall P Q, boxy extendM P -> boxy extendM Q -> boxy extendM (andp P Q). Proof. intros. apply boxy_i; intros. apply extendM_refl. destruct H2; split; eapply boxy_e; eauto. Qed. Lemma extend_orp: forall P Q, boxy extendM P -> boxy extendM Q -> boxy extendM (orp P Q). Proof. intros. apply boxy_i; intros. apply extendM_refl. destruct H2; [left|right]; eapply boxy_e; eauto. Qed. Lemma extend_tc_test_eq: forall {CS: compspecs} e1 e2 rho, boxy extendM (denote_tc_assert (tc_test_eq e1 e2) rho). Proof. intros. rewrite denote_tc_assert_test_eq'. apply boxy_i; intros. apply extendM_refl. simpl in *. super_unfold_lift. unfold denote_tc_test_eq in *. destruct (eval_expr e1 rho); auto; destruct (eval_expr e2 rho); auto. + destruct H0; split; auto. destruct H1 as [H1|H1]; [left|right]; apply (boxy_e _ _ (extend_valid_pointer' _ _) _ w' H H1). + destruct H0; split; auto. destruct H1 as [H1|H1]; [left|right]; apply (boxy_e _ _ (extend_valid_pointer' _ _) _ w' H H1). + unfold test_eq_ptrs in *. simple_if_tac; (eapply boxy_e; [apply extend_andp; try apply extend_orp; apply extend_valid_pointer' | apply H | apply H0]). Qed. Lemma extend_tc_test_order: forall {CS: compspecs} e1 e2 rho, boxy extendM (denote_tc_assert (tc_test_order e1 e2) rho). Proof. intros. rewrite denote_tc_assert_test_order'. apply boxy_i; intros. apply extendM_refl. simpl in *. super_unfold_lift. unfold denote_tc_test_order in *. destruct (eval_expr e1 rho); auto; destruct (eval_expr e2 rho); auto. + unfold test_order_ptrs in *. simple_if_tac; auto. eapply boxy_e; [apply extend_andp; eapply extend_orp; apply extend_valid_pointer' | apply H | apply H0]. Qed. Lemma extend_isCastResultType: forall {CS: compspecs} t t' v rho, boxy extendM (denote_tc_assert (isCastResultType t t' v) rho). Proof. intros. unfold isCastResultType; destruct (classify_cast t t'); repeat apply extend_tc_andp; try match goal with |- context [eqb_type _ _] => destruct (eqb_type t t') end; repeat match goal with | |- boxy _ (match ?A with _ => _ end) => destruct A | |- boxy _ (denote_tc_assert (if ?A then _ else _) rho) => destruct A | |- boxy _ (denote_tc_assert (match t' with _ => _ end) rho) => destruct t' as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ] end; repeat apply extend_tc_andp; try apply extend_prop; try simple apply extend_tc_bool; try simple apply extend_tc_Zge; try simple apply extend_tc_Zle; try simple apply extend_tc_iszero; try simple apply extend_tc_test_eq; try simple apply extend_tc_test_order. Qed. Lemma extend_tc_temp_id: forall {CS: compspecs} id ty Delta e rho, boxy extendM (tc_temp_id id ty Delta e rho). Proof. intros. unfold tc_temp_id. unfold typecheck_temp_id. destruct ((temp_types Delta) ! id) as [? | ]; repeat apply extend_tc_andp; try apply extend_prop; try simple apply extend_tc_bool. apply extend_isCastResultType. Qed. Lemma extend_tc_samebase: forall {CS: compspecs} e1 e2 rho, boxy extendM (denote_tc_assert (tc_samebase e1 e2) rho). Proof. intros. unfold denote_tc_assert; simpl. unfold_lift. destruct (eval_expr e1 rho), (eval_expr e2 rho); apply extend_prop. Qed. Lemma extend_tc_nonzero: forall {CS: compspecs} v rho, boxy extendM (denote_tc_assert (tc_nonzero v) rho). Proof. intros. rewrite denote_tc_assert_nonzero. destruct (eval_expr v rho); apply extend_prop. Qed. Lemma extend_tc_nodivover: forall {CS: compspecs} e1 e2 rho, boxy extendM (denote_tc_assert (tc_nodivover e1 e2) rho). Proof. intros. rewrite denote_tc_assert_nodivover. destruct (eval_expr e1 rho); try apply extend_prop; destruct (eval_expr e2 rho); try apply extend_prop. Qed. Lemma extend_tc_nosignedover: forall op {CS: compspecs} e1 e2 rho, boxy extendM (denote_tc_assert (tc_nosignedover op e1 e2) rho). Proof. intros. unfold denote_tc_assert. unfold_lift. unfold denote_tc_nosignedover. destruct (eval_expr e1 rho); try apply extend_prop; destruct (eval_expr e2 rho); try apply extend_prop. Qed. Lemma extend_tc_nobinover: forall op {CS: compspecs} e1 e2 rho, boxy extendM (denote_tc_assert (tc_nobinover op e1 e2) rho). Proof. intros. unfold tc_nobinover. unfold if_expr_signed. destruct (typeof e1); try apply extend_prop. destruct s; try apply extend_prop. destruct (eval_expr e1 any_environ); try apply extend_prop; destruct (eval_expr e2 any_environ); try apply extend_prop; try apply extend_tc_nosignedover; simple_if_tac; try apply extend_prop; try apply extend_tc_nosignedover. destruct (eval_expr e1 any_environ); try apply extend_prop; destruct (eval_expr e2 any_environ); try apply extend_prop; try apply extend_tc_nosignedover; try destruct s; try apply extend_prop; try apply extend_tc_nosignedover. all: simple_if_tac; try apply extend_prop; try apply extend_tc_nosignedover. Qed. Lemma boxy_orp {A} `{H : ageable A}: forall (M: modality) , reflexive _ (app_mode M) -> forall P Q, boxy M P -> boxy M Q -> boxy M (P || Q). Proof. destruct M; intros. simpl in *. apply boxy_i; intros; auto. destruct H4; [left|right]; eapply boxy_e; eauto. Qed. Lemma extend_tc_orp: forall {CS: compspecs} A B rho, boxy extendM (denote_tc_assert A rho) -> boxy extendM (denote_tc_assert B rho) -> boxy extendM (denote_tc_assert (tc_orp A B) rho). Proof. intros. rewrite denote_tc_assert_orp. apply boxy_orp; auto. apply extendM_refl. Qed. Lemma extend_tc_ilt: forall {CS: compspecs} e i rho, boxy extendM (denote_tc_assert (tc_ilt e i) rho). Proof. intros. rewrite denote_tc_assert_ilt'. simpl. unfold_lift. destruct (eval_expr e rho); try apply extend_prop. Qed. Lemma extend_tc_llt: forall {CS: compspecs} e i rho, boxy extendM (denote_tc_assert (tc_llt e i) rho). Proof. intros. rewrite denote_tc_assert_llt'. simpl. unfold_lift. destruct (eval_expr e rho); try apply extend_prop. Qed. Lemma extend_tc_andp': forall {CS: compspecs} A B rho, boxy extendM (denote_tc_assert A rho) -> boxy extendM (denote_tc_assert B rho) -> boxy extendM (denote_tc_assert (tc_andp' A B) rho). Proof. intros. apply boxy_andp; auto. apply extendM_refl. Qed. Ltac extend_tc_prover := match goal with | |- _ => solve [immediate] | |- _ => apply extend_prop | |- _ => first [ simple apply extend_tc_bool | simple apply extend_tc_int_or_ptr_type | simple apply extend_tc_andp | simple apply extend_tc_andp' | simple apply extend_tc_Zge | simple apply extend_tc_Zle | simple apply extend_tc_iszero | simple apply extend_tc_nonzero | simple apply extend_tc_nodivover | simple apply extend_tc_nobinover | simple apply extend_tc_samebase | simple apply extend_tc_ilt | simple apply extend_tc_llt | simple apply extend_isCastResultType | simple apply extend_tc_test_eq | simple apply extend_tc_test_order] | |- boxy _ (denote_tc_assert (if ?A then _ else _) _) => destruct A | |- boxy _ (denote_tc_assert match tc_bool ?A _ with _ => _ end _) => destruct A | |- boxy _ (denote_tc_assert match ?A with Some _ => _ | None => _ end _) => destruct A end. Lemma extend_tc_binop: forall {CS: compspecs} Delta e1 e2 b t rho, boxy extendM (denote_tc_assert (typecheck_expr Delta e1) rho) -> boxy extendM (denote_tc_assert (typecheck_expr Delta e2) rho) -> boxy extendM (denote_tc_assert (isBinOpResultType b e1 e2 t) rho). Proof. intros. destruct b; unfold isBinOpResultType, tc_int_or_ptr_type, check_pp_int; match goal with | |- context [classify_add] => destruct (classify_add (typeof e1) (typeof e2)) eqn:C | |- context [classify_sub] => destruct (classify_sub (typeof e1) (typeof e2)) eqn:C | |- context [classify_cmp] => destruct (classify_cmp (typeof e1) (typeof e2)) eqn:C | |- context [classify_shift] => destruct (classify_shift (typeof e1) (typeof e2)) eqn:C | |- _ => idtac end; repeat extend_tc_prover; destruct (typeof e1) as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; destruct (typeof e2) as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; try inv C; try apply extend_prop; unfold binarithType, classify_binarith; repeat extend_tc_prover. Qed. Lemma extend_tc_expr: forall {CS: compspecs} Delta e rho, boxy extendM (tc_expr Delta e rho) with extend_tc_lvalue: forall {CS: compspecs} Delta e rho, boxy extendM (tc_lvalue Delta e rho). Proof. * clear extend_tc_expr. intros. unfold tc_expr. unfold tc_lvalue in extend_tc_lvalue. induction e; simpl; try pose proof (extend_tc_lvalue CS Delta e rho); clear extend_tc_lvalue; try solve [ repeat extend_tc_prover; try destruct t as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; simpl; repeat extend_tc_prover ]. + (* unop *) repeat extend_tc_prover. destruct u; simpl; repeat extend_tc_prover; destruct (typeof e) as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; simpl; repeat extend_tc_prover. unfold denote_tc_assert. unfold_lift. apply extend_tc_nosignedover. unfold denote_tc_assert. unfold_lift. apply extend_tc_nosignedover. unfold denote_tc_assert. unfold_lift. apply extend_tc_nosignedover. unfold denote_tc_assert. unfold_lift. apply extend_tc_nosignedover. + repeat extend_tc_prover. eapply extend_tc_binop; eauto. + destruct t as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; repeat extend_tc_prover; destruct (typeof e) as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; simpl; repeat extend_tc_prover. * clear extend_tc_lvalue. intros. unfold tc_expr in *. unfold tc_lvalue. induction e; simpl; try specialize (extend_tc_expr CS Delta e rho); repeat extend_tc_prover; destruct (typeof e) as [ | [ | | | ] [ | ] ? | [ | ] ? | [ | ] ? | | | | | ]; simpl; repeat extend_tc_prover. Qed. Lemma extend_tc_exprlist: forall {CS: compspecs} Delta t e rho, boxy extendM (tc_exprlist Delta t e rho). Proof. intros. unfold tc_exprlist. revert e; induction t; destruct e; intros; simpl; auto; try apply extend_prop. repeat apply extend_tc_andp; auto. apply extend_tc_expr. try simple apply extend_isCastResultType. Qed. Lemma extend_tc_expropt: forall {CS: compspecs} Delta e t rho, boxy extendM (tc_expropt Delta e t rho). Proof. intros. unfold tc_expropt. destruct e. + apply extend_tc_expr. + apply extend_prop. Qed. Hint Resolve extend_tc_expr extend_tc_temp_id extend_tc_temp_id_load extend_tc_exprlist extend_tc_expropt extend_tc_lvalue. Hint Resolve (@extendM_refl rmap _ _ _ _ _).
This Jupyter Notebook calculates the FRC between two 2D images. The basic requirement is for the images to be ${squared}$, regardless of the size. The first cell loads the libraries needed for the computation. ```python import sys import imageio import numpy as np import secondary_utils as su import matplotlib.pyplot as plt from matplotlib.pyplot import figure import numpy.fft as fft import matplotlib.font_manager as font_manager from scipy.interpolate import interp1d from skimage.filters import window #from astropy.io import fits ``` Two important functions: - openimage (quite self-explanatory) - rings: generates the array of indices to be used to calculate the FRC. Through analytic shape it returns the coordinates of the pixels within the rings. ```python def openimage(path): return(imageio.imread(path)) def rings(x): shape = np.shape(x) nr,nc = shape nrdc = np.floor(nr/2) ncdc = np.floor(nc/2) r = np.arange(nr)-nrdc c = np.arange(nc)-ncdc [R,C] = np.meshgrid(r,c) index = np.round(np.sqrt(R**2+C**2)) indexf = np.floor(np.sqrt(R**2+C**2)) indexC = np.ceil(np.sqrt(R**2+C**2)) print(np.shape(index)) maxindex = nr/2 output = np.zeros(int(maxindex),dtype = complex) print('performed by index method') indices = [] indicesf, indicesC = [], [] for i in np.arange(int(maxindex)): indicesf.append(np.where(indexf == i)) indicesC.append(np.where(indexC == i)) for i in np.arange(int(maxindex)): output[i] = (sum(x[indicesf[i]])+sum(x[indicesC[i]]))/2 return output ``` The two following functions are the core: - apply_hanning_2d to apply the hanning window to the Fourier transformed images in order to reduce the noise - FRC: Calculates the Fourier Ring Correlation through the formula: \begin{equation}\text{FRC}_{12}(r_{i})=\frac{\underset{r\epsilon r_{i}}{\sum}F_{1}(r)\cdot F_{2}(r)^{*}}{\sqrt{\underset{r\epsilon r_{i}}{\sum}|F_{1}|^{2}(r)\cdot\underset{r\epsilon r_{i}}{\sum}|F_{2}|^{2}(r)}}\end{equation} ```python def apply_hanning_2d(img): wimage = img * window('hann', img.shape) print("Hann applied") return(wimage) def FRC(i1, i2,hanning): ''' Performing the fourier transform of input images to determine the FRC ''' I1 = fft.fftshift(fft.fft2(i1)) I2 = fft.fftshift(fft.fft2(i2)) if hanning==True: I1 = apply_hanning_2d(I1) I2 = apply_hanning_2d(I2) C = rings(I1*np.conjugate(I2)) C = np.real(C) C1 = rings(np.abs(I1)**2) C2 = rings(np.abs(I2)**2) C = C.astype(np.float64) C1 = np.real(C1).astype(np.float64) C2 = np.real(C2).astype(np.float64) FSC = abs(C)/np.sqrt(C1*C2) x_fsc = np.arange(np.shape(C)[0])/(np.shape(i1)[0]/2) r = np.arange(np.shape(i1)[0]/2) # array (0:1:L/2-1) n = 2*np.pi*r # perimeter of r's from above n[0] = 1 eps = np.finfo(float).eps #t1 = np.divide(np.ones(np.shape(n)),n+eps) inv_sqrt_n = np.divide(np.ones(np.shape(n)),np.sqrt(n)) # 1/sqrt(n) x_T = r/(np.shape(i1)[0]/2) #one bit T1 = (0.5+2.4142*inv_sqrt_n)/(1.5+1.4142*inv_sqrt_n) #half bit T2 = (0.4142+2.287*inv_sqrt_n)/ (1.4142+1.287*inv_sqrt_n) return (x_fsc, FSC, x_T, T1,T2) def gauss_noise(image, mean=0, var=0.001): ''' Add Gaussian noise mean: mean value var: variance ''' image = np.array(image, dtype=float) noise = np.random.normal(mean, var ** 0.5, image.shape) out = image + noise if out.min() < 0: low_clip = -1. else: low_clip = 0. out = np.clip(out, low_clip, 1.0) out = np.uint8(out*255) return out #lena = su.imageio_imread('./demo_images/514/Result of Siemens0.jpg') lena = su.imageio_imread('./demo_images/barques.jpg') lena = lena[:, :,0] lena = lena[750:1250,1500:2000] print(np.max(lena)) lena = lena.astype(float) lena = su.normalize_data_ab(0, 1, lena) ``` 244 Calculate the FRC and the threshold curve through the half-bit formula ```python xf, frc, x_T, T1,T2 = FRC(Image1, Image2,hanning=False) ``` (500, 500) performed by index method (500, 500) performed by index method (500, 500) performed by index method Plot everything! ```python plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') #plt.plot(x_T[:-1], T1[:-1], label = 'one-bit', color='blue') plt.plot(x_T[:-1], T2[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Posizione 0') plt.legend() plt.show() ``` ```python ''' Image1 = gauss_noise(lena,0,0.001) Image2 = gauss_noise(lena,0,0.001) f, axrr = plt.subplots(1, 2) axrr[0].imshow(Image1, cmap='Greys_r') axrr[0].set_title('Measurement 1') axrr[1].imshow(Image2, cmap='Greys_r') axrr[1].set_title('Measurement 2') plt.show() xf, frc, x_T, T1,T2 = FRC(Image1, Image2,hanning=False) plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T1[:-1], label = 'one-bit', color='blue') plt.plot(x_T[:-1], T2[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Posizione 0') plt.legend() plt.show() ''' fig, axs = plt.subplots(2, 2, figsize=(15, 15)) fig.subplots_adjust(left=0.01, bottom=0.06, right=0.99, top=0.65, wspace=0.12) plt.rcParams["font.family"] = "serif" plt.style.use('classic') font = font_manager.FontProperties(family='serif', size=14) fig.patch.set_facecolor('white') coordinates = [(0,0),(0,1),(1,0),(1,1)] variation = [0.001,0.01,0.1,1] for i in range(4): Image1 = gauss_noise(lena,0,variation[i]) Image2 = gauss_noise(lena,0,variation[i]) xf, frc, x_T, T1,T2 = FRC(Image1, Image2,hanning=False) axs[coordinates[i]].plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') axs[coordinates[i]].plot(x_T[:-1], T2[:-1], label = 'half-bit', color='red') axs[coordinates[i]].set_xlim(-0.01, 0.51) axs[coordinates[i]].set_xticks(np.arange(0, 0.6, step=0.1)) axs[coordinates[i]].grid(linestyle='dotted', color='black', alpha=0.5) axs[coordinates[i]].set_ylabel("FRC($r_i$)",fontname="serif") axs[coordinates[i]].set_xlabel("1/resolution",fontname="serif") axs[coordinates[i]].set_title('Noise variance: '+str(variation[i]),fontname="serif") axs[coordinates[i]].legend() file_path = "/Users/oriolsansplanell/Dropbox/Aplicaciones/Overleaf/Thesis/Figures/Fourier/" file_name = "4-example-frc" plt.savefig(file_path+file_name+".pdf", bbox_inches='tight') plt.show() fig=figure(figsize=(8,4)) plt.rcParams["font.family"] = "serif" plt.style.use('classic') font = font_manager.FontProperties(family='serif', size=12) fig.patch.set_facecolor('white') Image1 = gauss_noise(lena,0,0.001) Image2 = gauss_noise(lena,0,0.001) xf, frc, x_T, T1,T2 = FRC(Image1, Image2,hanning=False) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T2[:-1], label = 'half-bit', color='red') plt.xlim(-0.01, 0.51) plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)",fontname="serif") plt.xlabel("1/resolution",fontname="serif") plt.title('Noise variance: '+str(variation[i]),fontname="serif") plt.legend() file_path = "/Users/oriolsansplanell/Dropbox/Aplicaciones/Overleaf/Thesis/Figures/Fourier/" file_name = "FRC-T" plt.savefig(file_path+file_name+".pdf", bbox_inches='tight') plt.show() ``` ```python for i in range(50): Image1 = openimage("Res-pos-0-0.tif")[300:-200+10*i,300:-200+10*i] Image2 = openimage("Res-pos-0-1.tif")[300:-200+10*i,300:-200+10*i] ''' f, axrr = plt.subplots(1, 2) axrr[0].imshow(Image1, cmap='Greys_r') axrr[0].set_title('Measurement 1') axrr[1].imshow(Image2, cmap='Greys_r') axrr[1].set_title('Measurement 2') plt.show() ''' xf, frc, x_T, T1,T2 = FRC(Image1, Image2,hanning=False) plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T1[:-1], label = 'one-bit', color='blue') plt.plot(x_T[:-1], T2[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Posizione 2') plt.legend() plt.show() plt.close() ``` ```python plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Spada Indiana 1 +/-') plt.legend() plt.show() ``` ```python plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Spada Indiana 1 0/0') plt.legend() plt.show() ``` ```python plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Spada Indiana original') plt.legend() plt.show() ``` ```python plt.xlim(-0.01, 0.51) plt.plot(xf[:-1]/2, frc[:-1], label = 'FRC', color='black') plt.plot(x_T[:-1], T[:-1], label = 'half-bit', color='red') plt.xticks(np.arange(0, 0.6, step=0.1)) plt.grid(linestyle='dotted', color='black', alpha=0.5) plt.ylabel("FRC($r_i$)") plt.xlabel("1/resolution") plt.title('Spada Italiana 1 aprox') plt.legend() plt.show() ``` ```python ```
## Funciones # La creación de funciones es la principal utilidad de un lenguaje programado, # aún en un lenguaje orientado a la estadística como R. Anteriormente nos # ocupamos de distintos tipos de objeto que podemos encontrar en R. En la pre- # sente unidad nos dedicaremos a la comprensión de la estructura y funcionamien- # to de las funciones, así como a la creación y corrección de las mismas. ## ¿Para qué hacer funciones? # El primer motivo que encontramos para escribir funciones es la practicidad. # Esto es cierto en situaciones en las que se debe realizar un procedimiento # que requiere la ejecución de comandos en un orden específico, o volver a eje- # cutar una serie de comandos que nos quedó muy atrás en el historial y sabemos # que los vamos a tener que utilizar nuevamente. En casos como estos, puede # ahorrarnos tiempo y esfuerzo escribir una función que contenga estos pasos y # que podamos utilizar una y otra vez. Un ejemplo claro de esto es la función # 'sort', que ejecuta las instrucciones necesarias para ordenar un vector. x <- sample(1:15) # Si generamos un vector muestreando los valores de 1 a 15 sort (x) # la ejecución de 'sort' nos devuelve este vector ordenado. # Dentro de esta función están las instrucciones necesarias para realizar la # tarea, y que se ejecutarán sin necesidad de hacerlo paso a paso. sort # Más adelante volveremos sobre la estructura de las funciones, su anatomía, # para entender qué exactamente es cada uno de sus componentes. # El segundo motivo, que se desprende del anterior, es la generalización. # Muchas veces vamos a generar una función para atender a un caso particular, # a un determinado set de datos o situación. Sin embargo, en la medida en que # las funciones que escribimos nos resulten útiles, es importante poder pasar # de su aplicabilidad a valores fijos a utilizar variables que podamos modificar # en cada caso. Veamos un ejemplo, en la creación de una función que nos permita # calcular el área de un triángulo. # El problema planteado es el área de un triángulo que presenta 3 cm de base y # 4 de altura. La forma más sencilla de estimar la misma es, directamente, 4 * 3 / 2 # Sin embargo, si necesitamos hallar el área de varios triángulos, podemos crear # la función 'area', utilizando base y altura como variables. area <- function(a, b) { a * b / 2 } area (4, 3) # Podemos usarla con las medidas de nuestro triángulo area (12, 10) # o de cualquier otro. # En este caso, es muy sencillo entender que se pasó del caso particular de un # par de valores de base y altura a una función que corre para cualquier par de # valores. En este sentido, la abstracción es similar a lo que uno ve en las # matemáticas, en el sentido de pasar de valores particulares a símbolos que # pueden ser sustituidos en una ecuación por valores conocidos, logrando poder # aplicar la función a cualquier situación similar. # Otro ejemplo más ilustrativo puede ser el siguiente: teniendo un vector `x` de # 15 elementos, podemos calcular su promedio de la siguiente manera: x <- rpois(15, 4) p <- sum(x) / 15 p # Este código sólo sirve para `x` u otro vector con 15 elementos. Si queremos # adaptar este código para calcular el promedio de cualquier vector, indepen- # dientemente de su longitud, vamos a tener que sustituir este valor por la # longitud de un vector genérico, empleando `length(x)`. p <- sum(x) / length(x) # En este caso, aún sin crear una función, hemos ganado en abstracción, ya que # salimos del caso concreto en el que `x` tiene 15 elementos y ahora podemos # utilizar esta línea de código para hacerlo para cualquier vector `x` posible. # Pero aún dependemos de esta línea de comando, que podemos haber olvidado o # puede haber quedado muy atrás en el historial y volver a encontrarla para # ejecutarla puede ser un derroche de tiempo y esfuerzo. Sin mencionar que el # vector tiene necesariamente que llamarse `x` para que funcione. En este caso # puede ser más sencillo crear una función que haga la operación deseada. f <- function(v) { p <- sum(v) / length(v) p } f(rpois(19, 23)) # Y sabemos que devuelve un resultado para cualquier vector # Si bien puede parecer un derroche crear una función para una operación tan # sencilla. Esto es cierto, pero también debe tenerse en cuenta que se pueden agregar nuevas # capacidades y detalles a la función, consideremos la situación en que nuestro vector # presenta NAs: x <- rpois(20, 10) x[c(3, 6, 7)] <- NA f(x) # En tal caso, deberemos agregar un comando para descartar dichos valores en la # propia función, por ejemplo creando un objeto 'v' dentro de la misma que sea # nuestro vector sin los NAs. f <- function(v) { v <- v[!is.na(v)] p <- sum(v) / length(v) p } f(x) # Como último detalle, no es necesario que las funciones representen una # abstracción o generalización de una operación a partir de un caso concreto. # Es posible crear funciones sin argumentos, cosas tan simples como: g <- function() print("Hola mundo") g() # En la próxima lección exploraremos los componentes que hacen a las funciones, # en una suerte de lección de anatomía de las mismas.
function a = moler3 ( m, n ) %*****************************************************************************80 % %% MOLER3 returns the MOLER3 matrix. % % Formula: % % if ( I == J ) % A(I,J) = I % else % A(I,J) = min(I,J) - 2 % % Example: % % N = 5 % % 1 -1 -1 -1 -1 % -1 2 0 0 0 % -1 0 3 1 1 % -1 0 1 4 2 % -1 0 1 2 5 % % Properties: % % A is integral, therefore det ( A ) is integral, and % det ( A ) * inverse ( A ) is integral. % % A is positive definite. % % A is symmetric: A' = A. % % Because A is symmetric, it is normal. % % Because A is normal, it is diagonalizable. % % A has a simple Cholesky factorization. % % A has one small eigenvalue. % % The family of matrices is nested as a function of N. % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 16 October 2007 % % Author: % % John Burkardt % % Parameters: % % Input, integer M, N, the number of rows and columns of A. % % Output, real A(M,N), the matrix. % for i = 1 : m for j = 1 : n if ( i == j ) a(i,j) = i; else a(i,j) = min ( i, j ) - 2; end end end return end
#include <NTL/ZZVec.h> #include <NTL/new.h> NTL_START_IMPL void ZZVec::SetSize(long n, long d) { if (n < 0 || d <= 0) Error("bad args to ZZVec::SetSize()"); if (v) Error("illegal ZZVec initialization"); len = n; bsize = d; if (n == 0) return; v = (ZZ*) NTL_MALLOC(n, sizeof(ZZ), 0); if (!v) Error("out of memory in ZZVec::SetSize()"); long i = 0; long m; long j; while (i < n) { m = ZZ_BlockConstructAlloc(v[i], d, n-i); for (j = 1; j < m; j++) ZZ_BlockConstructSet(v[i], v[i+j], j); i += m; } } void ZZVec::kill() { long n = len; len = 0; bsize = 0; if (n == 0) return; long i = 0; long m; while (i < n) { m = ZZ_BlockDestroy(v[i]); i += m; } free(v); v = 0; } ZZVec& ZZVec::operator=(const ZZVec& a) { if (this == &a) return *this; kill(); SetSize(a.len, a.bsize); long i; for (i = 0; i < a.len; i++) v[i] = (a.v)[i]; return *this; } ZZVec::ZZVec(const ZZVec& a) { v = 0; len = 0; bsize = 0; SetSize(a.len, a.bsize); long i; for (i = 0; i < a.len; i++) v[i] = (a.v)[i]; } void ZZVec::swap_impl(ZZVec& x, ZZVec& y) { ZZ* t1; long t2; t1 = x.v; x.v = y.v; y.v = t1; t2 = x.len; x.len = y.len; y.len = t2; t2 = x.bsize; x.bsize = y.bsize; y.bsize = t2; } NTL_END_IMPL
If $g$ converges to $a$ and $f - g$ converges to $0$, then $f$ converges to $a$.
[STATEMENT] lemma composition_of_tangle_diagrams: assumes "is_tangle_diagram x" and "is_tangle_diagram y" and "(domain_wall y = codomain_wall x)" shows "is_tangle_diagram (x \<circ> y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. is_tangle_diagram (x \<circ> y) [PROOF STEP] using comp_of_tangle_dgms [PROOF STATE] proof (prove) using this: \<lbrakk>is_tangle_diagram ?y; is_tangle_diagram ?x \<and> codomain_wall ?x = domain_wall ?y\<rbrakk> \<Longrightarrow> is_tangle_diagram (?x \<circ> ?y) goal (1 subgoal): 1. is_tangle_diagram (x \<circ> y) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: \<lbrakk>is_tangle_diagram ?y; is_tangle_diagram ?x \<and> codomain_wall ?x = domain_wall ?y\<rbrakk> \<Longrightarrow> is_tangle_diagram (?x \<circ> ?y) is_tangle_diagram x is_tangle_diagram y domain_wall y = codomain_wall x goal (1 subgoal): 1. is_tangle_diagram (x \<circ> y) [PROOF STEP] by auto
! { dg-do compile } ! ! PR fortran/45170 ! PR fortran/52158 ! ! Contributed by Damian Rouson module speaker_class type speaker contains procedure :: speak end type contains function speak(this) class(speaker) ,intent(in) :: this character(:) ,allocatable :: speak end function subroutine say_something(somebody) class(speaker) :: somebody print *,somebody%speak() end subroutine end module
My father, when he was young, was interested in becoming a musician. His father (my grandad) said no. So he became an engineer instead, but his deep love and longing of music never waned. My mom also, was on her way to becoming a piano virtuoso, but had to change course due to life and work circumstance. This was all before I was born. Right before my first birthday, my father passed away from liver cancer at the age of 49. It was his last wish that my mom and I go to New York for me to live, go to school and thrive. Michal Jackson. There will never be another. I still can't really wrap my head around the fact that he's gone. That, my friends, I cannot say :) ...But, the second most fun thing I've ever done is play on a float raft to thousands in a New York City parade. I'd love to be in a cereal commercial. Lol. I don't know why, but it's been a silly dream of mine. Or a makeup commercial. Maybe that's more suitable. I'd love to go on an arena tour with another artist. I sang the National Anthem to 30,000 people at Staples Center and it was quite a rush. I can't imagine what singing my own songs to that many people - who know the words - might feel like.
State Before: R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ⊢ n ∣ a * b ↔ n ∣ a ∨ n ∣ b State After: case inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n b : ℕ hn : IsPrimePow n hab : coprime 0 b ⊢ n ∣ 0 * b ↔ n ∣ 0 ∨ n ∣ b case inr R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ha : a ≠ 0 ⊢ n ∣ a * b ↔ n ∣ a ∨ n ∣ b Tactic: rcases eq_or_ne a 0 with (rfl | ha) State Before: case inr R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ha : a ≠ 0 ⊢ n ∣ a * b ↔ n ∣ a ∨ n ∣ b State After: case inr.inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a : ℕ hn : IsPrimePow n ha : a ≠ 0 hab : coprime a 0 ⊢ n ∣ a * 0 ↔ n ∣ a ∨ n ∣ 0 case inr.inr R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ha : a ≠ 0 hb : b ≠ 0 ⊢ n ∣ a * b ↔ n ∣ a ∨ n ∣ b Tactic: rcases eq_or_ne b 0 with (rfl | hb) State Before: case inr.inr R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ha : a ≠ 0 hb : b ≠ 0 ⊢ n ∣ a * b ↔ n ∣ a ∨ n ∣ b State After: case inr.inr R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ha : a ≠ 0 hb : b ≠ 0 ⊢ n ∣ a * b → n ∣ a ∨ n ∣ b Tactic: refine' ⟨_, fun h => Or.elim h (fun i => i.trans ((@dvd_mul_right a b a hab).mpr (dvd_refl a))) fun i => i.trans ((@dvd_mul_left a b b hab.symm).mpr (dvd_refl b))⟩ State Before: case inr.inr R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a b : ℕ hab : coprime a b hn : IsPrimePow n ha : a ≠ 0 hb : b ≠ 0 ⊢ n ∣ a * b → n ∣ a ∨ n ∣ b State After: case inr.inr.intro.intro.intro.intro R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ p ^ k ∣ a * b → p ^ k ∣ a ∨ p ^ k ∣ b Tactic: obtain ⟨p, k, hp, _, rfl⟩ := (isPrimePow_nat_iff _).1 hn State Before: case inr.inr.intro.intro.intro.intro R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ p ^ k ∣ a * b → p ^ k ∣ a ∨ p ^ k ∣ b State After: case inr.inr.intro.intro.intro.intro R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ k ≤ ↑(factorization a) p + ↑(factorization b) p → k ≤ ↑(factorization a) p ∨ k ≤ ↑(factorization b) p Tactic: simp only [hp.pow_dvd_iff_le_factorization (mul_ne_zero ha hb), Nat.factorization_mul ha hb, hp.pow_dvd_iff_le_factorization ha, hp.pow_dvd_iff_le_factorization hb, Pi.add_apply, Finsupp.coe_add] State Before: case inr.inr.intro.intro.intro.intro R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ k ≤ ↑(factorization a) p + ↑(factorization b) p → k ≤ ↑(factorization a) p ∨ k ≤ ↑(factorization b) p State After: case inr.inr.intro.intro.intro.intro R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) this : ↑(factorization a) p = 0 ∨ ↑(factorization b) p = 0 ⊢ k ≤ ↑(factorization a) p + ↑(factorization b) p → k ≤ ↑(factorization a) p ∨ k ≤ ↑(factorization b) p Tactic: have : a.factorization p = 0 ∨ b.factorization p = 0 := by rw [← Finsupp.not_mem_support_iff, ← Finsupp.not_mem_support_iff, ← not_and_or, ← Finset.mem_inter] intro t simpa using (Nat.factorization_disjoint_of_coprime hab).le_bot t State Before: case inr.inr.intro.intro.intro.intro R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) this : ↑(factorization a) p = 0 ∨ ↑(factorization b) p = 0 ⊢ k ≤ ↑(factorization a) p + ↑(factorization b) p → k ≤ ↑(factorization a) p ∨ k ≤ ↑(factorization b) p State After: no goals Tactic: cases' this with h h <;> simp [h, imp_or] State Before: case inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n b : ℕ hn : IsPrimePow n hab : coprime 0 b ⊢ n ∣ 0 * b ↔ n ∣ 0 ∨ n ∣ b State After: case inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n b : ℕ hn : IsPrimePow n hab : b = 1 ⊢ n ∣ 0 * b ↔ n ∣ 0 ∨ n ∣ b Tactic: simp only [Nat.coprime_zero_left] at hab State Before: case inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n b : ℕ hn : IsPrimePow n hab : b = 1 ⊢ n ∣ 0 * b ↔ n ∣ 0 ∨ n ∣ b State After: no goals Tactic: simp [hab, Finset.filter_singleton, not_isPrimePow_one] State Before: case inr.inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a : ℕ hn : IsPrimePow n ha : a ≠ 0 hab : coprime a 0 ⊢ n ∣ a * 0 ↔ n ∣ a ∨ n ∣ 0 State After: case inr.inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a : ℕ hn : IsPrimePow n ha : a ≠ 0 hab : a = 1 ⊢ n ∣ a * 0 ↔ n ∣ a ∨ n ∣ 0 Tactic: simp only [Nat.coprime_zero_right] at hab State Before: case inr.inl R : Type ?u.12477 inst✝ : CommMonoidWithZero R n✝ p : R k n a : ℕ hn : IsPrimePow n ha : a ≠ 0 hab : a = 1 ⊢ n ∣ a * 0 ↔ n ∣ a ∨ n ∣ 0 State After: no goals Tactic: simp [hab, Finset.filter_singleton, not_isPrimePow_one] State Before: R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ ↑(factorization a) p = 0 ∨ ↑(factorization b) p = 0 State After: R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ ¬p ∈ (factorization a).support ∩ (factorization b).support Tactic: rw [← Finsupp.not_mem_support_iff, ← Finsupp.not_mem_support_iff, ← not_and_or, ← Finset.mem_inter] State Before: R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) ⊢ ¬p ∈ (factorization a).support ∩ (factorization b).support State After: R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) t : p ∈ (factorization a).support ∩ (factorization b).support ⊢ False Tactic: intro t State Before: R : Type ?u.12477 inst✝ : CommMonoidWithZero R n p✝ : R k✝ a b : ℕ hab : coprime a b ha : a ≠ 0 hb : b ≠ 0 p k : ℕ hp : Prime p left✝ : 0 < k hn : IsPrimePow (p ^ k) t : p ∈ (factorization a).support ∩ (factorization b).support ⊢ False State After: no goals Tactic: simpa using (Nat.factorization_disjoint_of_coprime hab).le_bot t
(* * Copyright 2018, Data61 * Commonwealth Scientific and Industrial Research Organisation (CSIRO) * ABN 41 687 119 230. * * This software may be distributed and modified according to the terms of * the BSD 2-Clause license. Note that NO WARRANTY is provided. * See "LICENSE_BSD2.txt" for details. * * @TAG(DATA61_BSD) *) theory Shallow_Tac imports Shallow "Cogent.ML_Old" begin locale shallow = value_sem context shallow begin ML \<open> val scorres_abs_assmsN = "scorres_abs_assms" fun SOLVE_GOAL tac = tac THEN_MAYBE no_tac infix 1 XTHEN fun t1 XTHEN t2 = (DETERM t1) THEN (DETERM t2) fun add_thm thm atts name lthy = Local_Theory.notes [((Binding.name name, atts), [([thm], atts)])] lthy |> #2 fun mk_goal ctxt str = let val prop = Syntax.parse_term ctxt str |> Syntax.check_term ctxt val ctxt = Variable.auto_fixes prop ctxt in (ctxt, prop) end fun mk_scorres_nm fn_name = "scorres_" ^ fn_name fun abs_thm_name abs_name = mk_scorres_nm abs_name ^ "_assm" fun gen_scorres_abs_thm lthy Aname abs_name = let val prop = betapply (Syntax.read_term lthy ("\<lambda>(n::string). scorres " ^ Aname ^ "." ^ abs_name ^ " (AFun n ts) \<gamma> \<xi>"), HOLogic.mk_string abs_name) val ctxt = Variable.auto_fixes prop lthy val thm = Goal.prove ctxt [] [] (HOLogic.mk_Trueprop prop) (fn _ => Skip_Proof.cheat_tac ctxt 1) (* FIXME: def and proof instead *) val thm' = hd ((map (Goal.norm_result lthy) o Proof_Context.export ctxt lthy) [thm]) in (abs_thm_name abs_name, thm') end fun gen_scorres_abs_assms Aname abs_names lthy = let val thms = map (gen_scorres_abs_thm lthy Aname) abs_names; val athms = map #2 thms val atts = []; val lthy = fold (fn (name, thm) => add_thm thm atts name) thms lthy; val lthy = Local_Theory.notes [((Binding.name scorres_abs_assmsN, atts), [(athms, atts)])] lthy in lthy end fun gen_scorres_lemma skip Aname Dname generic_step fn_name (result_thms, callee_thms, lthy) = let val str = "valRel \<xi> v v' \<Longrightarrow> " ^ "scorres (" ^ Aname ^ "." ^ fn_name ^ " (shallow_tac__var v)) " ^ "(specialise ts " ^ Dname ^ "." ^ fn_name ^ ") [v'] \<xi>" val nm = mk_scorres_nm fn_name val _ = tracing (nm ^ ": \"" ^ str ^ "\"") val (ctxt, prop) = mk_goal lthy str val unfold_A = Proof_Context.get_thms lthy (Aname ^ "." ^ fn_name ^ "_def") val unfold_D = Proof_Context.get_thms lthy (Dname ^ "." ^ fn_name ^ "_def") val thm = Goal.prove_future ctxt [] [] prop (fn _ => fn st => let val start = Timing.start () in case st |> (if skip then Skip_Proof.cheat_tac ctxt 1 else ((resolve_tac ctxt result_thms 1 THEN atac 1) ORELSE (Local_Defs.unfold_tac ctxt unfold_D XTHEN Local_Defs.unfold_tac ctxt unfold_A XTHEN simp_tac (put_simpset HOL_basic_ss ctxt addsimps @{thms specialise.simps list.map fun_app_def}) 1 XTHEN REPEAT_DETERM ( (*(print_tac ctxt "" THEN no_tac) ORELSE*) generic_step ctxt 1 ORELSE resolve_tac ctxt callee_thms 1 )))) |> Seq.pull of NONE => Seq.empty | SOME (t, ts) => (tracing (nm ^ ": " ^ Timing.message (Timing.result start)); Seq.cons t ts) end) val thm = Simplifier.rewrite_rule lthy @{thms shallow_tac__var_def} thm val thm' = hd ((map (Goal.norm_result lthy) o Proof_Context.export ctxt lthy) [thm]) val (ctxt, callee_prop) = mk_goal lthy ("Trueprop (scorres " ^ Aname ^ "." ^ fn_name ^ " " ^ "(Fun " ^ Dname ^ "." ^ fn_name ^ " ts) \<gamma> \<xi>)") val callee_thm = Goal.prove ctxt [] [] callee_prop (K (rtac @{thm scorres_fun} 1 THEN asm_full_simp_tac (lthy delsimprocs [Simplifier.the_simproc lthy "Product_Type.unit_eq"] addsimps [thm']) 1)) val callee_thm = hd ((map (Goal.norm_result lthy) o Proof_Context.export ctxt lthy) [callee_thm]) val callee_fun_app = callee_thm RS @{thm scorres_app} val lthy' = add_thm thm' [] nm lthy in (thm'::result_thms, callee_thm::callee_fun_app::callee_thms, lthy') end fun gen_scorres_lemmas skip fun_thms Aname Dname generic_step fn_names lthy = fold (gen_scorres_lemma skip Aname Dname generic_step) fn_names ([], fun_thms, lthy) |> #3 fun gen_scorres_lemmas' skip Absname Aname Dname fn_anames fn_dnames lthy = let val ([(_, abs_thms)], lthy) = if null fn_anames then ([("", [])], lthy) else gen_scorres_abs_assms Absname fn_anames lthy; val abs_fun_app = abs_thms RL @{thms scorres_app} val read_buckets = maps (fn n => Proof_Context.get_thms lthy n handle ERROR _ => []) (* Prioritise flattened cases over everything else, as the unflattened case rule might work locally even if the shallow representation is flattened *) val flat_case_net = Tactic.build_net (read_buckets ["scorres_flat_cases"]) (* flat_case lemmas have some cruft like "if tag_1 = ''Tag1''" in their assumptions, and applying the rule should instantiate tag_1 to a constant string, so we want to do just enough simplification to check whether two strings are equal and commit to one branch of the if. *) (* Need to mess a bit with the simpset, can't just use the lthy context in full_simp_tac: apparently it's not a super context? *) val flat_case_simp_ss = simpset_of ((clear_simpset @{context}) addsimps @{thms char.inject list.inject if_True if_False HOL.simp_thms}) fun flat_case_tac ctxt = resolve_from_net_tac ctxt flat_case_net THEN_ALL_NEW (full_simp_tac (put_simpset flat_case_simp_ss ctxt)) val step_net = Tactic.build_net (@{thms scorres_simple_step} @ read_buckets ["scorres_cases", "scorres_esacs", "scorres_cons", "scorres_structs"]) val step_simp_net = Tactic.build_net @{thms scorres_var scorres_app[OF scorres_var] scorres_lit} val struct_op_net = Tactic.build_net @{thms scorres_take scorres_put scorres_member} val struct_field_net = Tactic.build_net (read_buckets ["scorres_rec_fields"]) fun generic_step ctxt n = flat_case_tac ctxt n ORELSE (resolve_from_net_tac ctxt step_net n) ORELSE (resolve_from_net_tac ctxt step_simp_net n THEN SOLVE_GOAL (full_simp_tac ctxt n)) ORELSE (resolve_from_net_tac ctxt struct_op_net n THEN resolve_from_net_tac ctxt struct_field_net n) val lthy = gen_scorres_lemmas skip (abs_thms @ abs_fun_app) Aname Dname generic_step fn_dnames lthy; in lthy end val gen_scorres_lemmas = gen_scorres_lemmas' false \<close> (* TODO: - make \<xi> a definition - get rid of cheat_tac, using above to prove AFuns *) end end
If $w$ is inside the circle of radius $r$ centered at $z$, then $w$ is not on the circle.
Require Import Arith. Require Import List. Import ListNotations. Require Import StructTact.StructTactics. Set Implicit Arguments. Lemma leb_false_lt : forall m n, leb m n = false -> n < m. Proof. induction m; intros. - discriminate. - simpl in *. break_match; subst; auto with arith. Qed. Lemma leb_true_le : forall m n, leb m n = true -> m <= n. Proof. induction m; intros. - auto with arith. - simpl in *. break_match; subst; auto with arith. discriminate. Qed. Lemma ltb_false_le : forall m n, m <? n = false -> n <= m. Proof. induction m; intros; destruct n; try discriminate; auto with arith. Qed. Lemma ltb_true_lt : forall m n, m <? n = true -> m < n. induction m; intros; destruct n; try discriminate; auto with arith. Qed. Ltac do_bool := repeat match goal with | [ H : beq_nat _ _ = true |- _ ] => apply beq_nat_true in H | [ H : beq_nat _ _ = false |- _ ] => apply beq_nat_false in H | [ H : andb _ _ = true |- _ ] => apply Bool.andb_true_iff in H | [ H : andb _ _ = false |- _ ] => apply Bool.andb_false_iff in H | [ H : orb _ _ = true |- _ ] => apply Bool.orb_prop in H | [ H : negb _ = true |- _ ] => apply Bool.negb_true_iff in H | [ H : negb _ = false |- _ ] => apply Bool.negb_false_iff in H | [ H : PeanoNat.Nat.ltb _ _ = true |- _ ] => apply ltb_true_lt in H | [ H : PeanoNat.Nat.ltb _ _ = false |- _ ] => apply ltb_false_le in H | [ H : leb _ _ = true |- _ ] => apply leb_true_le in H | [ H : leb _ _ = false |- _ ] => apply leb_false_lt in H | [ |- andb _ _ = true ]=> apply Bool.andb_true_iff | [ |- andb _ _ = false ] => apply Bool.andb_false_iff | [ |- leb _ _ = true ] => apply leb_correct | [ |- _ <> false ] => apply Bool.not_false_iff_true | [ |- beq_nat _ _ = false ] => apply beq_nat_false_iff | [ |- beq_nat _ _ = true ] => apply beq_nat_true_iff end. Definition null {A : Type} (xs : list A) : bool := match xs with | [] => true | _ => false end. Lemma null_sound : forall A (l : list A), null l = true -> l = []. Proof. destruct l; simpl in *; auto; discriminate. Qed. Lemma null_false_neq_nil : forall A (l : list A), null l = false -> l <> []. Proof. destruct l; simpl in *; auto; discriminate. Qed.
module Expr data Expr : Type where Value : (a:Int) -> Expr Plus : Expr -> Expr -> Expr Minus : Expr -> Expr -> Expr Mult : Expr -> Expr -> Expr evaluate : Expr -> Int evaluate (Value x) = x evaluate (Plus x y) = (evaluate x) + (evaluate y) evaluate (Minus x y) = (evaluate x) - (evaluate y) evaluate (Mult x y) = (evaluate x) * (evaluate y)
If $f$ satisfies the convexity inequality for all $t \in (0,1)$, then $f$ is convex on $A$.
\documentclass[11pt,a4paper]{article} %\usepackage{harvard} \usepackage[margin=1.5cm, bottom=2cm]{geometry} \usepackage{graphicx} \usepackage{tabularx} \usepackage{color} \usepackage{caption} \usepackage{tabu} \usepackage{gensymb} \usepackage{enumitem} \captionsetup[figure]{labelfont=bf} \captionsetup[table]{labelfont=bf} \title{\vspace{-2em}Virtual and Augmented Reality} \author{pbqk24} %\date{} \begin{document} \maketitle \vspace{-3em} \section*{Problem 1} The following functional interfaces were created: \begin{table}[h!] \caption{Functional interfaces implemented and their inputs/outputs} \label{table_functional_interfaces} \begin{tabu} to 1.0\linewidth {|X[l]|X[l]|X[l]|} \hline \textbf{Functional interface}&\textbf{Inputs}&\textbf{Outputs}\\ \hline Euler angle $\rightarrow$ quaternion conversion&Euler angles in radians of format $(x, y, z)$&A quaternion of format $(w, x, y, z)$\\ \hline Quaternion $\rightarrow$ Euler angle conversion&A quaternion of format $(w, x, y, z)$&Euler angles in radians of format $(x, y, z)$\\ \hline Quaternion conjugate calculation&A quaternion of format $(w, x, y, z)$& The conjugate of the input: $(w, -x, -y, -z)$\\ \hline Quaternion product calculation&Two quaternions $a, b$ of format $(w, x, y, z)$&The product of $a$ and $b$ of format $(w, x, y, z)$\\ \hline \end{tabu}\\ \end{table} \noindent Similar descriptions for each function are also included in the code. \section*{Problem 3} Several different alpha values were investigated for the tilt-corrected orientation tracking. They are listed in Table \ref{table_alpha_tilt} below, along with a description of the findings: \begin{table}[h!] \caption{Effect of Alpha Values on Drift Compensation in Tilt-Corrected Orientation Tracking} \label{table_alpha_tilt} \begin{tabu} to 1.0\linewidth {|r|X[l]|} \hline alpha&Description\\ \hline $0.01$&Minimal change from simple dead reckoning filter. Slightly reduces the manifestation of gimbal lock in $X$ and $Y$ axes during the $-90\degree$ rotation around $Y$. $X$ and $Y$ rcomponents are marginally further from $0$ at the end of the sequence.\\ \hline $0.03$&Slight increase in the manifestation of gimbal lock compared to an alpha of $0.01$. $X$ and $Y$ components converge marginally closer to $0$ than with an alpha of $0.01$ and in dead reckoning filter, although the change is extremely subtle.\\ \hline $0.05$&Significantly reduced gimbal lock manifestation when $Y \simeq \pm90$. $X$, $Y$ and $Z$ all converge to $0$ at the end of the sequence, however the last rotation of $90\degree$ in $Z$ has decayed to $\sim45\degree$.\\ \hline $0.10$&Complete decay of all angles to $0$ after $t\sim8s$. Before this the $90\degree$ rotation around $X$ occurs, which reaches $90\degree$ then quickly falls towards $0$, instead of staying $\sim90\degree$ for a few seconds as expected. The orientation tracking has completely failed, as the (imperfect) corrections introduced by the tilt correction completely overpower any actual rotations registered after the first few seconds. This effect becomes more extreme as alpha is further increased.\\ \hline \end{tabu} \end{table} \noindent An alpha value of $0.03$ was chosen as the best value, as this causes the $X$ and $Y$ components to be closest to $0$ at the end of the sequence without decaying the tracking of any of the $\pm90\degree$ rotations. \section*{Problem 4} As for Problem 3 above, several alpha values were investigated for the yaw-corrected orientation tracking. They are listed in Table \ref{table_alpha_yaw} below, along with a description of the findings. All investigation was done with an alpha of $0.03$ for tilt-correction; the alpha listed below is specifically the alpha used for yaw-correction. \begin{table}[h!] \caption{Effect of Alpha Values on Drift Compensation in Yaw-Corrected Orientation Tracking} \label{table_alpha_yaw} \begin{tabu} to 1.0\linewidth {|r|X[l]|} \hline alpha&Description\\ \hline $0.0001$&The $Z$ component is slightly closer to $0$ at the end of the sequence, but there is still significant drift present. Thus, the yaw correction being applied is not strong enough, and a higher alpha value should be used.\\ \hline $0.0002$&At this alpha value the $Z$ component is reduced to 0 at the end of the sequence, without decaying the $90\degree$ rotations or causing any visible inconsistencies in the graph. This produces the best results of all alpha values investigated\\ \hline $0.0005$&The yaw drift is over-compensated, resulting in a significant drift in the $Z$ component at the end of the sequence, reaching $\sim\frac{-\pi}{2}$. This decay of the $Z$ component is noticeable from the end of the $-90\degree$ rotation around $Y$ at $t\sim16s$.\\ \hline $0.0010$&Similar results as with alpha $=0.0005$, but more extreme. The $Z$ rotation is decayed by $\sim\frac{-\pi}{2}$ from $t\sim16s$ to the end of the sequence.\\ \hline $0.0100$&The decay of $\sim\frac{-\pi}{2}$ in the $Z$ compoent is constant starting from $t\sim2s$. Significant noise is present in the $Z$ rotation, making it rapidly `jitter.'\\ \hline $0.1000$&The $Z$ rotation shows the same general shape as for alpha $=0.01$, but the amount of noise is significantly larger. There are also occasional jumps from $-\pi$ to $+\pi$, for example when the headset is rotated by $-90\degree$ around the $Z$ axis.\\ \hline \end{tabu} \end{table} \noindent An alpha value of $0.0002$ was chosen as the best value, as this caused the $Z$ component to converge with $X$ and $Y$ at $0$ at the end of the sequence. Additionally, with this alpha there is no noticeable distortion or over-correction at any point during the sequence. \section*{Problem 5} Note: the 3D plots produced for Problems 5 and 6 skip data points in order to render in the requested full-speed and half-speed. For full-speed, only every 25\textsuperscript{th} orientation is plotted, while for half-speed every 12\textsuperscript{th} orientation is plotted (these were the highest resolutions that worked for these speed constraints on my laptop). This is due to the rendering capabilities of Matplotlib, and the speeds may vary slightly depending on your hardware. \begin{figure}[h!] \centering \includegraphics[width=1.0\linewidth]{figures/Orientation_Tracking_2D} \caption{2D Visualization of Various Methods for Head Orientation Tracking} \label{2D_Orientation} \end{figure} Figure \ref{2D_Orientation} show 2D plots of the results of head orientation tracking for the three different methods: simple dead reckoning (gyroscope integration), tilt correction (gyroscope + accelerometer), yaw correction (gyroscope + accelerometer + magnetometer). The $x$ axis represents time in seconds, and the $y$ axis of each plot represents the Euler angle for each axis in degrees, where $Z$ is vertical (the axis of yaw), $Y$ is horizontal (the axis of pitch), and $X$ is depth (the axis of roll). The simple dead reckoning and tilt-corrected orientation tracking approaches produce very similar results. Because the simple dead reckoning suffers from very little drift in the $X$ and $Y$ axes, there is little change once this drift is corrected for. The tilt correction does slightly improve the results however. As expected, the most stable method is the yaw-corrected head orientation tracking. This can be clearly seen by looking at the $Z$ component values at the end of the sequences. In both the simple dead reckoning and tilt-corrected orientation tracking approaches the $Z$ component suffers from gyroscopic drift, which is successfully corrected for by the yaw correction. This can also be clearly seen in the 3D plot shown in Figure \ref{3D_Orientation}. This is a snapshot from the end of the sequence of rotations, and thus the gyroscopic drift is relatively high. In this image both the simple dead reckoning and tilt correction suffer from yaw drift, while the yaw corrected tracking does not (most noticeable when comparing the $Y$ component vectors). This further showcases the improved stability of orientation tracking achieved by yaw correction. \begin{figure}[h!] \centering \includegraphics[width=1.0\linewidth]{figures/Orientation_Tracking_3D} \caption{Example 3D Result of Head Orientation Tracking} \label{3D_Orientation} \end{figure} \section*{Problem 6} In order to try to achieve positional tracking using the IMU, I double integrated the accelerometer readings and applied a two-bar kinematic head model. This assumes that the head is locked to the neck, the neck can be rotated about its base (at the joint to the torso), and the torso can be rotated around the waist. This was applied as follows: \begin{enumerate}[noitemsep] \item Convert acceleration readings to global acceleration by rotating the readings by the current head orientation estimate: $a_{global}=q*(0, a_x, a_y, a_z)*q^{-1}$ \item Extract linear acceleration by subtracting gravity (1) from the $Z$ component \item Integrate the linear acceleration into velocity and convert into angular velocity as $\omega=\frac{a_l*\Delta t}{l_t}$, where $a_l$ is linear acceleration, $\Delta t$ is the time since the last time step, and $l_t$ is an estimated length of the torso, e.g. $l_t=0.8m$. \textbf{Note:} this makes the assumption that the linear acceleration is caused by a rotation of the body, rather than the neck and body combined \item Add the previous angular velocity reading to produce the current angular velocity \item Apply the simple dead reckoning filter to get an estimate for the orientation of the torso, $q_1$ \item Calculate the position of the torso relative to the origin (the waist) as $r_1=q_1*(0,0,l_t)*q_1^{-1}$ \item Apply the simple kinematic head model to get the position of the head relative to the torso, $r$, as $r=q*(0,0,l_n)*q^{-1}$, where $l_n$ is the estimated length of the neck, e.g. $l_n=0.15m$, and $q$ is the current estimated head orientation \item Compose the two positions to get the global position of the head as $p=r_1+r$ \end{enumerate} This approach constrains the position of the head to what could physically be reached by a person anchored at the waist, such as when sitting down. Even with these constraints and the short duration of the data sequence (around 28s) there is significant drift in the positional tracking. One example of this is shown in Figure \ref{Position_Y_Rotation}. This shows a snapshot of the head position tracking for all three methods of orientation tracking, shortly after the $+90\degree$ rotation about the $Y$ axis ($t\sim12s$). In this image the gyroscope integration based method shows some drift in the $X$ and $Y$ components, and very high drift in $Z$ (see also Figure \ref{Position_2D}). The other tracking methods show slight drift, as they have a lower $X$ position than would be expected, but fare much better than the gyroscope integration. This is likely due to the poor performance of the gyroscope integration orientation tracking resulting in much larger errors due to the quadratic growth of drift error. \begin{figure}[h!] \centering \includegraphics[width=1.0\linewidth]{figures/Position_Tracking_First_Y_Rotation} \caption{Example 3D Result of Various Methods for Head Orientation Tracking} \label{Position_Y_Rotation} \end{figure} Figure \ref{Position_2D} shows the tri-axial position tracking output for the three types of tracking. The left image shows gyroscopic integration position tracking. The right image shows tilt-corrected and yaw-corrected tracking, which produced extremely similar results. The gyroscope results are vaguely similar to the other methods until $t\sim5s$, where the positions all enter sinusoidal patterns. This obviously incorrect, and such tracking would likely make a user of the system ill very quickly. As previously mentioned, this drastic difference in positional tracking performance is likely caused by the poor performance of gyroscopic integration for orientation tracking, and the compounding of errors amplified by integration which are not corrected for. \begin{figure}[h!] \centering \begin{minipage}{0.45\linewidth} \includegraphics[width=0.9\linewidth]{figures/Position_Tracking_2D_Gyro} \end{minipage} \begin{minipage}{0.45\linewidth} \includegraphics[width=0.9\linewidth]{figures/Position_Tracking_2D_Yaw_Tilt} \end{minipage} \caption{Example 3D Result of Various Methods for Head Orientation Tracking} \label{Position_2D} \end{figure} All methods of orientation tracking show significant drift in all three axes in positional tracking. This is likely caused by two main factors. Firstly, there is the large assumption made that the linear acceleration corresponds to rotation of the torso about the waist, rather than the neck around the base of the neck (or likely a combination of the two). This assumption was made in order to simplify the kinematic system, and justified by the fact that the neck often only slightly moves around its base, and most of the movement of the head relative to the waist is due to a rotation of the torso. Secondly, a large amount of error is introduced by the double integration of the accelerometer readings. Numerical integration by itself produces inaccurate results, and this is only made worse by applying double integration to already noisy data (as the accelerometer itself has inherent noise, and the local to global frame conversion is based on an imperfect head orientation estimate). All these sources of noise compound to produce potentially massive errors in the result, which are propagated in each time step of the tracking. Thus, any positional tracking based only on the IMU with no external frame of reference is going suffer from large drift and propagated errors. \end{document}
(* Author: Tobias Nipkow *) theory Def_Init_Big imports Def_Init_Exp Def_Init begin subsection "Initialization-Sensitive Big Step Semantics" inductive big_step :: "(com \<times> state option) \<Rightarrow> state option \<Rightarrow> bool" (infix "\<Rightarrow>" 55) where None: "(c,None) \<Rightarrow> None" | Skip: "(SKIP,s) \<Rightarrow> s" | AssignNone: "aval a s = None \<Longrightarrow> (x ::= a, Some s) \<Rightarrow> None" | Assign: "aval a s = Some i \<Longrightarrow> (x ::= a, Some s) \<Rightarrow> Some(s(x := Some i))" | Seq: "(c\<^sub>1,s\<^sub>1) \<Rightarrow> s\<^sub>2 \<Longrightarrow> (c\<^sub>2,s\<^sub>2) \<Rightarrow> s\<^sub>3 \<Longrightarrow> (c\<^sub>1;;c\<^sub>2,s\<^sub>1) \<Rightarrow> s\<^sub>3" | IfNone: "bval b s = None \<Longrightarrow> (IF b THEN c\<^sub>1 ELSE c\<^sub>2,Some s) \<Rightarrow> None" | IfTrue: "\<lbrakk> bval b s = Some True; (c\<^sub>1,Some s) \<Rightarrow> s' \<rbrakk> \<Longrightarrow> (IF b THEN c\<^sub>1 ELSE c\<^sub>2,Some s) \<Rightarrow> s'" | IfFalse: "\<lbrakk> bval b s = Some False; (c\<^sub>2,Some s) \<Rightarrow> s' \<rbrakk> \<Longrightarrow> (IF b THEN c\<^sub>1 ELSE c\<^sub>2,Some s) \<Rightarrow> s'" | WhileNone: "bval b s = None \<Longrightarrow> (WHILE b DO c,Some s) \<Rightarrow> None" | WhileFalse: "bval b s = Some False \<Longrightarrow> (WHILE b DO c,Some s) \<Rightarrow> Some s" | WhileTrue: "\<lbrakk> bval b s = Some True; (c,Some s) \<Rightarrow> s'; (WHILE b DO c,s') \<Rightarrow> s'' \<rbrakk> \<Longrightarrow> (WHILE b DO c,Some s) \<Rightarrow> s''" lemmas big_step_induct = big_step.induct[split_format(complete)] subsection "Soundness wrt Big Steps" text\<open>Note the special form of the induction because one of the arguments of the inductive predicate is not a variable but the term \<^term>\<open>Some s\<close>:\<close> theorem Sound: "\<lbrakk> (c,Some s) \<Rightarrow> s'; D A c A'; A \<subseteq> dom s \<rbrakk> \<Longrightarrow> \<exists> t. s' = Some t \<and> A' \<subseteq> dom t" proof (induction c "Some s" s' arbitrary: s A A' rule:big_step_induct) case AssignNone thus ?case by auto (metis aval_Some option.simps(3) subset_trans) next case Seq thus ?case by auto metis next case IfTrue thus ?case by auto blast next case IfFalse thus ?case by auto blast next case IfNone thus ?case by auto (metis bval_Some option.simps(3) order_trans) next case WhileNone thus ?case by auto (metis bval_Some option.simps(3) order_trans) next case (WhileTrue b s c s' s'') from \<open>D A (WHILE b DO c) A'\<close> obtain A' where "D A c A'" by blast then obtain t' where "s' = Some t'" "A \<subseteq> dom t'" by (metis D_incr WhileTrue(3,7) subset_trans) from WhileTrue(5)[OF this(1) WhileTrue(6) this(2)] show ?case . qed auto corollary sound: "\<lbrakk> D (dom s) c A'; (c,Some s) \<Rightarrow> s' \<rbrakk> \<Longrightarrow> s' \<noteq> None" by (metis Sound not_Some_eq subset_refl) end
-- import category_theory.limits.terminal -- import category_theory.tactics.obviously -- open category_theory -- universes u v -- namespace category_theory.limits -- variables {C : Type u} [𝒞 : category.{u v} C] -- include 𝒞 -- structure is_zero (t : C) := -- (lift : ∀ (s : C), s ⟶ t) -- (uniq_lift' : ∀ (s : C) (m : s ⟶ t), m = lift s . obviously) -- (desc : ∀ (s : C), t ⟶ s) -- (uniq_desc' : ∀ (s : C) (m : t ⟶ s), m = desc s . obviously) -- namespace is_zero -- def to_is_initial {t : C} (Z : is_zero.{u v} t) : is_initial.{u v} t := { desc := Z.desc, uniq' := Z.uniq_desc' } -- def to_is_terminal {t : C} (Z : is_zero.{u v} t) : is_terminal.{u v} t := { lift := Z.lift, uniq' := Z.uniq_lift' } -- end is_zero -- restate_axiom is_zero.uniq_lift' -- restate_axiom is_zero.uniq_desc' -- attribute [search,elim] is_zero.uniq_lift is_zero.uniq_desc -- @[extensionality] lemma is_zero.ext {X : C} (P Q : is_zero.{u v} X) : P = Q := -- begin tactic.unfreeze_local_instances, cases P, cases Q, congr, obviously, end -- instance hom_to_zero_subsingleton (X Z : C) (B : is_zero.{u v} Z) : subsingleton (X ⟶ Z) := -- limits.hom_to_terminal_subsingleton X Z B.to_is_terminal -- instance hom_from_zero_subsingleton (Z X : C) (B : is_zero.{u v} Z) : subsingleton (Z ⟶ X) := -- limits.hom_from_initial_subsingleton Z X B.to_is_initial -- variable (C) -- class has_zero_object := -- (zero : C) -- (is_zero : is_zero.{u v} zero) -- end category_theory.limits -- namespace category_theory.limits -- def zero_object := has_zero_object.zero.{u v} -- variables {C : Type u} [𝒞 : category.{u v} C] -- include 𝒞 -- variables [has_zero_object.{u v} C] -- def zero_is_zero : is_zero.{u v} (zero_object.{u v} C) := has_zero_object.is_zero C -- instance has_initial_object_of_has_zero : has_initial_object.{u v} C := -- { initial := zero_object.{u v} C, -- is_initial := zero_is_zero.to_is_initial } -- instance has_terminal_object_of_has_zero: has_terminal_object.{u v} C := -- { terminal := zero_object.{u v} C, -- is_terminal := zero_is_zero.to_is_terminal } -- def zero_morphism (X Y : C) : X ⟶ Y := (zero_is_zero.lift.{u v} X) ≫ (zero_is_zero.desc.{u v} Y) -- instance hom_has_zero (X Y : C) : _root_.has_zero (X ⟶ Y) := { zero := zero_morphism X Y } -- @[extensionality] lemma ext.out (Y : C) (f g : zero_object.{u v} C ⟶ Y) : f = g := -- begin -- rw (initial.universal_property).uniq _ f, -- rw (initial.universal_property).uniq _ g, -- end -- @[extensionality] lemma ext.in (Y : C) (f g : Y ⟶ zero_object.{u v} C) : f = g := -- begin -- rw (terminal.universal_property).uniq _ f, -- rw (terminal.universal_property).uniq _ g, -- end -- @[simp] lemma zero_morphism_left {X Y Z : C} (f : Y ⟶ Z) : (zero_morphism X Y) ≫ f = zero_morphism X Z := -- begin -- unfold zero_morphism, -- rw category.assoc, -- congr, -- tidy, -- end -- @[simp] lemma zero_morphism_right {X Y Z : C} (f : X ⟶ Y) : f ≫ (zero_morphism Y Z) = zero_morphism X Z := -- begin -- unfold zero_morphism, -- rw ← category.assoc, -- congr, -- tidy, -- end -- end category_theory.limits
State Before: α : Type u_1 β : Type ?u.49680 γ : Type ?u.49683 f : α → α H : ∀ (x : α), f x = x o : Part α ⊢ map f o = o State After: α : Type u_1 β : Type ?u.49680 γ : Type ?u.49683 f : α → α H : ∀ (x : α), f x = x o : Part α ⊢ map id o = o Tactic: rw [show f = id from funext H] State Before: α : Type u_1 β : Type ?u.49680 γ : Type ?u.49683 f : α → α H : ∀ (x : α), f x = x o : Part α ⊢ map id o = o State After: no goals Tactic: exact id_map o
import streamlit as st import pandas as pd import numpy as np import sqlite3 from PIL import Image #Create a SQL connection to our SQLite database con = sqlite3.connect("chemistry.db") df = pd.read_sql_query('SELECT * from chemistry', con) con.close() st.title('Product Identification based on Reactant based of Chemical Reaction') st.sidebar.title('Chemistry Project by Vedang Dwivedi, XII') #image = Image.open('https://github.com/VijayDwivedi-ml/timeseries/blob/main/vedang_pic_final.JPG') #st.sidebar.image('image', width = 100) select1 = st.sidebar.selectbox('Reactant Selection', ['Alkyne','Alkene','Alkanes','Alcohol','Alcohol(Secondary)','Aldehyde','Ketone','Carboxylic Acid','Ether','Amine', ' ']) st.write('You Selected Reactant:', select1) st.write('\n') st.write('\n') st.write('\n') df2 = df[df['Reactant1'] == select1] st.write(df2) st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write('\n') select2 = st.sidebar.selectbox('Process-Reagent Selection', ['Hydrogenation','Halogenation','Ozonolysis','Diboration','Dedydration','Oxidation','Grignard Reagent','Tollens Reagent','Fehlings', 'Reagent', ' ']) st.write('You Selected Reactant and Process-Reagent:', select1, ',',select2) st.write('\n') st.write('\n') st.write('\n') df3 = df2[df2['Process_Reagent'] == select2] st.write(df3) st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write('\n') select3 = st.sidebar.selectbox('Product Selection', ['Alkene', 'Halo Alkane', 'Aldehyde', 'Alcohol', 'Alkane',' ']) st.write('You Selected Reactant, Process-Reagent and Product:', select1, ',', select2, ', ', select3) st.write('\n') st.write('\n') st.write('\n') df4 = df3[df3['Product1'] == select3] st.write(df4) st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write('\n') st.write("##################################################################")
/* linalg/trimult.c * * Copyright (C) 2019 Patrick Alken * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 3, or (at your option) any * later version. * * This source is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * This module contains code to compute L^T L where L is a lower triangular matrix */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_linalg.h> #include "recurse.h" static int triangular_multsymm_L2(CBLAS_UPLO_t Uplo, gsl_matrix * T); static int triangular_multsymm_L3(CBLAS_UPLO_t Uplo, gsl_matrix * T); static int triangular_mult_L2(CBLAS_UPLO_t Uplo, gsl_matrix * A); static int triangular_mult_L3(CBLAS_UPLO_t Uplo, gsl_matrix * A); int gsl_linalg_tri_LTL(gsl_matrix * L) { return triangular_multsymm_L3(CblasLower, L); } int gsl_linalg_tri_UL(gsl_matrix * LU) { return triangular_mult_L3(CblasUpper, LU); } /* triangular_multsymm_L2() Compute L^T L or U U^T Inputs: Uplo - CblasUpper or CblasLower T - on output the upper (or lower) part of T is replaced by L^T L or U U^T Return: success/error Notes: 1) Based on LAPACK routine DLAUU2 using Level 2 BLAS */ static int triangular_multsymm_L2(CBLAS_UPLO_t Uplo, gsl_matrix * T) { const size_t N = T->size1; if (N != T->size2) { GSL_ERROR ("matrix must be square", GSL_ENOTSQR); } else { gsl_vector_view v1, v2; size_t i; if (Uplo == CblasUpper) { } else { for (i = 0; i < N; ++i) { double Tii = gsl_matrix_get(T, i, i); if (i < N - 1) { double tmp; v1 = gsl_matrix_subcolumn(T, i, i, N - i); gsl_blas_ddot(&v1.vector, &v1.vector, &tmp); gsl_matrix_set(T, i, i, tmp); if (i > 0) { gsl_matrix_view m = gsl_matrix_submatrix(T, i + 1, 0, N - i - 1, i); v1 = gsl_matrix_subcolumn(T, i, i + 1, N - i - 1); v2 = gsl_matrix_subrow(T, i, 0, i); gsl_blas_dgemv(CblasTrans, 1.0, &m.matrix, &v1.vector, Tii, &v2.vector); } } else { v1 = gsl_matrix_row(T, N - 1); gsl_blas_dscal(Tii, &v1.vector); } } } return GSL_SUCCESS; } } /* triangular_multsymm_L3() Compute L^T L or U U^T Inputs: Uplo - CblasUpper or CblasLower T - on output the upper (or lower) part of T is replaced by L^T L or U U^T Return: success/error Notes: 1) Based on ReLAPACK routine DLAUUM using Level 3 BLAS */ static int triangular_multsymm_L3(CBLAS_UPLO_t Uplo, gsl_matrix * T) { const size_t N = T->size1; if (N != T->size2) { GSL_ERROR ("matrix must be square", GSL_ENOTSQR); } else if (N <= CROSSOVER_TRIMULT) { return triangular_multsymm_L2(Uplo, T); } else { /* partition matrix: * * T11 T12 * T21 T22 * * where T11 is N1-by-N1 */ int status; const size_t N1 = GSL_LINALG_SPLIT(N); const size_t N2 = N - N1; gsl_matrix_view T11 = gsl_matrix_submatrix(T, 0, 0, N1, N1); gsl_matrix_view T12 = gsl_matrix_submatrix(T, 0, N1, N1, N2); gsl_matrix_view T21 = gsl_matrix_submatrix(T, N1, 0, N2, N1); gsl_matrix_view T22 = gsl_matrix_submatrix(T, N1, N1, N2, N2); /* recursion on T11 */ status = triangular_multsymm_L3(Uplo, &T11.matrix); if (status) return status; if (Uplo == CblasLower) { /* T11 += T21^T T21 */ gsl_blas_dsyrk(Uplo, CblasTrans, 1.0, &T21.matrix, 1.0, &T11.matrix); /* T21 = T22^T * T21 */ gsl_blas_dtrmm(CblasLeft, Uplo, CblasTrans, CblasNonUnit, 1.0, &T22.matrix, &T21.matrix); } else { /* T11 += T12 T12^T */ gsl_blas_dsyrk(Uplo, CblasNoTrans, 1.0, &T12.matrix, 1.0, &T11.matrix); /* T12 = T12 * T22^T */ gsl_blas_dtrmm(CblasRight, Uplo, CblasTrans, CblasNonUnit, 1.0, &T22.matrix, &T12.matrix); } /* recursion on T22 */ status = triangular_multsymm_L3(Uplo, &T22.matrix); if (status) return status; return GSL_SUCCESS; } } /* triangular_mult_L2() Compute U L or L U Inputs: Uplo - CblasUpper or CblasLower (for the first triangular factor) A - on input, matrix in LU format; on output, U L or L U Return: success/error */ static int triangular_mult_L2(CBLAS_UPLO_t Uplo, gsl_matrix * A) { const size_t N = A->size1; if (N != A->size2) { GSL_ERROR ("matrix must be square", GSL_ENOTSQR); } else { size_t i; /* quick return */ if (N == 1) return GSL_SUCCESS; if (Uplo == CblasUpper) { /* compute U * L and store in A */ for (i = 0; i < N; ++i) { double * Aii = gsl_matrix_ptr(A, i, i); double Uii = *Aii; if (i < N - 1) { gsl_vector_view lb = gsl_matrix_subcolumn(A, i, i + 1, N - i - 1); gsl_vector_view ur = gsl_matrix_subrow(A, i, i + 1, N - i - 1); double tmp; gsl_blas_ddot(&lb.vector, &ur.vector, &tmp); *Aii += tmp; if (i > 0) { gsl_matrix_view U_TR = gsl_matrix_submatrix(A, 0, i + 1, i, N - i - 1); gsl_matrix_view L_BL = gsl_matrix_submatrix(A, i + 1, 0, N - i - 1, i); gsl_vector_view ut = gsl_matrix_subcolumn(A, i, 0, i); gsl_vector_view ll = gsl_matrix_subrow(A, i, 0, i); gsl_blas_dgemv(CblasTrans, 1.0, &L_BL.matrix, &ur.vector, Uii, &ll.vector); gsl_blas_dgemv(CblasNoTrans, 1.0, &U_TR.matrix, &lb.vector, 1.0, &ut.vector); } } else { gsl_vector_view v = gsl_matrix_subrow(A, N - 1, 0, N - 1); gsl_blas_dscal(Uii, &v.vector); } } } else { } return GSL_SUCCESS; } } /* triangular_mult_L3() Compute U L or L U Inputs: Uplo - CblasUpper or CblasLower (for the first triangular factor) A - on input, matrix in LU format; on output, U L or L U Return: success/error */ static int triangular_mult_L3(CBLAS_UPLO_t Uplo, gsl_matrix * A) { const size_t N = A->size1; if (N != A->size2) { GSL_ERROR ("matrix must be square", GSL_ENOTSQR); } else if (N <= CROSSOVER_TRIMULT) { return triangular_mult_L2(Uplo, A); } else { /* partition matrix: * * A11 A12 * A21 A22 * * where A11 is N1-by-N1 */ int status; const size_t N1 = GSL_LINALG_SPLIT(N); const size_t N2 = N - N1; gsl_matrix_view A11 = gsl_matrix_submatrix(A, 0, 0, N1, N1); gsl_matrix_view A12 = gsl_matrix_submatrix(A, 0, N1, N1, N2); gsl_matrix_view A21 = gsl_matrix_submatrix(A, N1, 0, N2, N1); gsl_matrix_view A22 = gsl_matrix_submatrix(A, N1, N1, N2, N2); /* recursion on A11 */ status = triangular_mult_L3(Uplo, &A11.matrix); if (status) return status; if (Uplo == CblasLower) { } else { /* form U * L */ /* A11 += A12 A21 */ gsl_blas_dgemm(CblasNoTrans, CblasNoTrans, 1.0, &A12.matrix, &A21.matrix, 1.0, &A11.matrix); /* A12 = A12 * L22 */ gsl_blas_dtrmm(CblasRight, CblasLower, CblasNoTrans, CblasUnit, 1.0, &A22.matrix, &A12.matrix); /* A21 = U22 * A21 */ gsl_blas_dtrmm(CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, 1.0, &A22.matrix, &A21.matrix); } /* recursion on A22 */ status = triangular_mult_L3(Uplo, &A22.matrix); if (status) return status; return GSL_SUCCESS; } }
{-# OPTIONS --cubical --safe #-} module Cubical.HITs.Truncation.Properties where open import Cubical.Foundations.Prelude open import Cubical.Foundations.Function open import Cubical.Foundations.Equiv open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.HLevels open import Cubical.Foundations.PathSplitEquiv open isPathSplitEquiv open import Cubical.Modalities.Everything open Modality open import Cubical.Data.Empty as ⊥ using (⊥) open import Cubical.Data.Nat hiding (elim) open import Cubical.Data.NatMinusOne as ℕ₋₁ hiding (1+_) open import Cubical.Data.NatMinusTwo as ℕ₋₂ hiding (-1+_) open import Cubical.HITs.Sn open import Cubical.HITs.Susp open import Cubical.HITs.Nullification as Null hiding (rec; elim) open import Cubical.HITs.Truncation.Base open import Cubical.HITs.PropositionalTruncation as PropTrunc renaming (∥_∥ to ∥_∥₋₁; ∣_∣ to ∣_∣₋₁; squash to squash₋₁) using () open import Cubical.HITs.SetTruncation as SetTrunc using (∥_∥₀; ∣_∣₀; squash₀) open import Cubical.HITs.GroupoidTruncation as GpdTrunc using (∥_∥₁; ∣_∣₁; squash₁) open import Cubical.HITs.2GroupoidTruncation as 2GpdTrunc using (∥_∥₂; ∣_∣₂; squash₂) private variable ℓ ℓ' : Level A : Type ℓ sphereFill : (n : ℕ₋₁) (f : S n → A) → Type _ sphereFill {A = A} n f = Σ[ top ∈ A ] ((x : S n) → top ≡ f x) isSphereFilled : ℕ₋₁ → Type ℓ → Type ℓ isSphereFilled n A = (f : S n → A) → sphereFill n f isSphereFilledTrunc : {n : ℕ} → isSphereFilled (-1+ n) (hLevelTrunc n A) isSphereFilledTrunc {n = zero} f = hub f , ⊥.elim isSphereFilledTrunc {n = suc n} f = hub f , spoke f isSphereFilled→isOfHLevelSuc : {n : ℕ} → isSphereFilled (ℕ→ℕ₋₁ n) A → isOfHLevel (suc n) A isSphereFilled→isOfHLevelSuc {A = A} {zero} h x y = sym (snd (h f) north) ∙ snd (h f) south where f : Susp ⊥ → A f north = x f south = y f (merid () i) isSphereFilled→isOfHLevelSuc {A = A} {suc n} h x y = isSphereFilled→isOfHLevelSuc (helper h x y) where helper : isSphereFilled (ℕ→ℕ₋₁ (suc n)) A → (x y : A) → isSphereFilled (ℕ→ℕ₋₁ n) (x ≡ y) helper h x y f = l , r where f' : Susp (S (ℕ→ℕ₋₁ n)) → A f' north = x f' south = y f' (merid u i) = f u i u : sphereFill (ℕ→ℕ₋₁ (suc n)) f' u = h f' z : A z = fst u p : z ≡ x p = snd u north q : z ≡ y q = snd u south l : x ≡ y l = sym p ∙ q r : (s : S (ℕ→ℕ₋₁ n)) → l ≡ f s r s i j = hcomp (λ k → λ { (i = i0) → compPath-filler (sym p) q k j ; (i = i1) → snd u (merid s j) k ; (j = i0) → p (k ∨ (~ i)) ; (j = i1) → q k }) (p ((~ i) ∧ (~ j))) isOfHLevel→isSphereFilled : {n : ℕ} → isOfHLevel n A → isSphereFilled (-1+ n) A isOfHLevel→isSphereFilled {A = A} {zero} h f = fst h , λ _ → snd h _ isOfHLevel→isSphereFilled {A = A} {suc zero} h f = f north , λ _ → h _ _ isOfHLevel→isSphereFilled {A = A} {suc (suc n)} h = helper λ x y → isOfHLevel→isSphereFilled (h x y) where helper : {n : ℕ} → ((x y : A) → isSphereFilled (-1+ n) (x ≡ y)) → isSphereFilled (suc₋₁ (-1+ n)) A helper {n = n} h f = l , r where l : A l = f north f' : S (-1+ n) → f north ≡ f south f' x i = f (merid x i) h' : sphereFill (-1+ n) f' h' = h (f north) (f south) f' r : (x : S (suc₋₁ (-1+ n))) → l ≡ f x r north = refl r south = h' .fst r (merid x i) j = hcomp (λ k → λ { (i = i0) → f north ; (i = i1) → h' .snd x (~ k) j ; (j = i0) → f north ; (j = i1) → f (merid x i) }) (f (merid x (i ∧ j))) -- isNull (S n) A ≃ (isSphereFilled n A) × (∀ (x y : A) → isSphereFilled n (x ≡ y)) isOfHLevel→isSnNull : {n : ℕ} → isOfHLevel n A → isNull (S (-1+ n)) A fst (sec (isOfHLevel→isSnNull h)) f = fst (isOfHLevel→isSphereFilled h f) snd (sec (isOfHLevel→isSnNull h)) f i s = snd (isOfHLevel→isSphereFilled h f) s i fst (secCong (isOfHLevel→isSnNull h) x y) p = fst (isOfHLevel→isSphereFilled (isOfHLevelPath _ h x y) (funExt⁻ p)) snd (secCong (isOfHLevel→isSnNull h) x y) p i j s = snd (isOfHLevel→isSphereFilled (isOfHLevelPath _ h x y) (funExt⁻ p)) s i j isSnNull→isOfHLevel : {n : ℕ} → isNull (S (-1+ n)) A → isOfHLevel n A isSnNull→isOfHLevel {n = zero} nA = fst (sec nA) ⊥.rec , λ y → fst (secCong nA _ y) (funExt ⊥.elim) isSnNull→isOfHLevel {n = suc n} nA = isSphereFilled→isOfHLevelSuc (λ f → fst (sec nA) f , λ s i → snd (sec nA) f i s) isOfHLevelTrunc : (n : ℕ) → isOfHLevel n (hLevelTrunc n A) isOfHLevelTrunc zero = hub ⊥.rec , λ _ → ≡hub ⊥.rec isOfHLevelTrunc (suc n) = isSphereFilled→isOfHLevelSuc isSphereFilledTrunc -- isOfHLevelTrunc n = isSnNull→isOfHLevel isNull-Null -- hLevelTrunc n is a modality rec : {n : ℕ} {B : Type ℓ'} → (isOfHLevel n B) → (g : (a : A) → B) → (hLevelTrunc n A → B) rec {B = B} h = Null.elim {B = λ _ → B} λ x → isOfHLevel→isSnNull h elim : {n : ℕ} {B : hLevelTrunc n A → Type ℓ'} (hB : (x : hLevelTrunc n A) → isOfHLevel n (B x)) (g : (a : A) → B (∣ a ∣)) (x : hLevelTrunc n A) → B x elim hB = Null.elim (λ x → isOfHLevel→isSnNull (hB x)) elim2 : {n : ℕ} {B : hLevelTrunc n A → hLevelTrunc n A → Type ℓ'} (hB : ((x y : hLevelTrunc n A) → isOfHLevel n (B x y))) (g : (a b : A) → B ∣ a ∣ ∣ b ∣) (x y : hLevelTrunc n A) → B x y elim2 {n = n} hB g = elim (λ _ → isOfHLevelPi n (λ _ → hB _ _)) (λ a → elim (λ _ → hB _ _) (λ b → g a b)) elim3 : {n : ℕ} {B : (x y z : hLevelTrunc n A) → Type ℓ'} (hB : ((x y z : hLevelTrunc n A) → isOfHLevel n (B x y z))) (g : (a b c : A) → B (∣ a ∣) ∣ b ∣ ∣ c ∣) (x y z : hLevelTrunc n A) → B x y z elim3 {n = n} hB g = elim2 (λ _ _ → isOfHLevelPi n (hB _ _)) (λ a b → elim (λ _ → hB _ _ _) (λ c → g a b c)) HLevelTruncModality : ∀ {ℓ} (n : ℕ) → Modality ℓ isModal (HLevelTruncModality n) = isOfHLevel n isModalIsProp (HLevelTruncModality n) = isPropIsOfHLevel n ◯ (HLevelTruncModality n) = hLevelTrunc n ◯-isModal (HLevelTruncModality n) = isOfHLevelTrunc n η (HLevelTruncModality n) = ∣_∣ ◯-elim (HLevelTruncModality n) = elim ◯-elim-β (HLevelTruncModality n) = λ _ _ _ → refl ◯-=-isModal (HLevelTruncModality n) = isOfHLevelPath n (isOfHLevelTrunc n) idemTrunc : (n : ℕ) → isOfHLevel n A → A ≃ (hLevelTrunc n A) idemTrunc n hA = ∣_∣ , isModalToIsEquiv (HLevelTruncModality n) hA -- equivalences to prop/set/groupoid truncations propTrunc≃Trunc-1 : ∥ A ∥₋₁ ≃ ∥ A ∥ -1 propTrunc≃Trunc-1 = isoToEquiv (iso (PropTrunc.elim (λ _ → isOfHLevelTrunc 1) ∣_∣) (elim (λ _ → squash₋₁) ∣_∣₋₁) (elim (λ _ → isOfHLevelPath 1 (isOfHLevelTrunc 1) _ _) (λ _ → refl)) (PropTrunc.elim (λ _ → isOfHLevelPath 1 squash₋₁ _ _) (λ _ → refl))) setTrunc≃Trunc0 : ∥ A ∥₀ ≃ ∥ A ∥ 0 setTrunc≃Trunc0 = isoToEquiv (iso (SetTrunc.elim (λ _ → isOfHLevelTrunc 2) ∣_∣) (elim (λ _ → squash₀) ∣_∣₀) (elim (λ _ → isOfHLevelPath 2 (isOfHLevelTrunc 2) _ _) (λ _ → refl)) (SetTrunc.elim (λ _ → isOfHLevelPath 2 squash₀ _ _) (λ _ → refl))) groupoidTrunc≃Trunc1 : ∥ A ∥₁ ≃ ∥ A ∥ 1 groupoidTrunc≃Trunc1 = isoToEquiv (iso (GpdTrunc.elim (λ _ → isOfHLevelTrunc 3) ∣_∣) (elim (λ _ → squash₁) ∣_∣₁) (elim (λ _ → isOfHLevelPath 3 (isOfHLevelTrunc 3) _ _) (λ _ → refl)) (GpdTrunc.elim (λ _ → isOfHLevelPath 3 squash₁ _ _) (λ _ → refl))) 2GroupoidTrunc≃Trunc2 : ∥ A ∥₂ ≃ ∥ A ∥ 2 2GroupoidTrunc≃Trunc2 = isoToEquiv (iso (2GpdTrunc.elim (λ _ → isOfHLevelTrunc 4) ∣_∣) (elim (λ _ → squash₂) ∣_∣₂) (elim (λ _ → isOfHLevelPath 4 (isOfHLevelTrunc 4) _ _) (λ _ → refl)) (2GpdTrunc.elim (λ _ → isOfHLevelPath 4 squash₂ _ _) (λ _ → refl))) ---- ∥ Ω A ∥ ₙ ≡ Ω ∥ A ∥ₙ₊₁ ---- {- Proofs of Theorem 7.3.12. and Corollary 7.3.13. in the HoTT book -} private {- We define the fibration P to show a more general result -} P : ∀ {ℓ} {B : Type ℓ}{n : ℕ₋₂} → ∥ B ∥ (suc₋₂ n) → ∥ B ∥ (suc₋₂ n) → Type ℓ P x y = fst (P₁ x y) where P₁ : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} → ∥ B ∥ (suc₋₂ n) → ∥ B ∥ (suc₋₂ n) → (HLevel ℓ (2+ n)) P₁ {ℓ} {n = n} x y = elim2 (λ _ _ → isOfHLevelHLevel (2+ n)) (λ a b → ∥ a ≡ b ∥ n , isOfHLevelTrunc (2+ n)) x y {- We will need P to be of hLevel n + 3 -} hLevelP : ∀{ℓ} {n : ℕ₋₂} {B : Type ℓ} (a b : ∥ B ∥ (suc₋₂ n)) → isOfHLevel (2+ (suc₋₂ n)) (P a b ) hLevelP {n = n} = elim2 (λ x y → isProp→isOfHLevelSuc (2+ n) (isPropIsOfHLevel (2+ suc₋₂ n)) ) (λ a b → isOfHLevelSuc (2+ n) (isOfHLevelTrunc (2+ n))) {- decode function from P x y to x ≡ y -} decode-fun : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (x y : ∥ B ∥ (suc₋₂ n)) → P x y → x ≡ y decode-fun {B = B} {n = n} = elim2 (λ u v → isOfHLevelPi (2+ suc₋₂ n) (λ _ → isOfHLevelSuc (2+ suc₋₂ n) (isOfHLevelTrunc (2+ suc₋₂ n)) u v)) decode* where decode* : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂}(u v : B) → (P {n = n} ∣ u ∣ ∣ v ∣) → _≡_ {A = ∥ B ∥ (suc₋₂ n)} ∣ u ∣ ∣ v ∣ decode* {B = B} {n = neg2} u v = rec ( isOfHLevelTrunc (suc zero) ∣ u ∣ ∣ v ∣ , λ _ → isOfHLevelSuc (suc zero) (isOfHLevelTrunc (suc zero)) _ _ _ _ ) (λ p → cong (λ z → ∣ z ∣) p) decode* {n = ℕ₋₂.-1+ n} u v = rec (isOfHLevelTrunc (suc (suc n)) ∣ u ∣ ∣ v ∣) (λ p → cong (λ z → ∣ z ∣) p) {- auxilliary function r used to define encode -} r : ∀ {ℓ} {B : Type ℓ} {m : ℕ₋₂} (u : ∥ B ∥ (suc₋₂ m)) → P u u r {m = m} = elim (λ x → hLevelP x x) (λ a → ∣ refl ∣) {- encode function from x ≡ y to P x y -} encode-fun : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (x y : ∥ B ∥ (suc₋₂ n)) → x ≡ y → P x y encode-fun x y p = transport (λ i → P x (p i)) (r x) {- We need the following two lemmas on the functions behaviour for refl -} dec-refl : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (x : ∥ B ∥ (suc₋₂ n)) → decode-fun x x (r x) ≡ refl {x = x} dec-refl {B = B} {n = neg2} = elim (λ x → isOfHLevelSuc (suc zero) (isOfHLevelSuc (suc zero) (isOfHLevelTrunc (suc zero)) x x) _ _) (λ a → refl) dec-refl {n = ℕ₋₂.-1+ n} = elim (λ x → isOfHLevelSuc (suc n) (isOfHLevelSuc (suc n) (isOfHLevelTrunc (suc (suc n)) x x) (decode-fun x x (r x)) refl)) (λ c → refl) enc-refl : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (x : ∥ B ∥ (suc₋₂ n)) → encode-fun x x refl ≡ r x enc-refl x j = transp (λ i → P x (refl {x = x} i)) j (r x) {- decode-fun is a right-inverse -} P-rinv : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (u v : ∥ B ∥ (suc₋₂ n)) → (x : _≡_ {A = ∥ B ∥ (suc₋₂ n)} u v) → decode-fun u v (encode-fun u v x) ≡ x P-rinv {ℓ = ℓ} {B = B} {n = n} u v = J (λ y p → decode-fun u y (encode-fun u y p) ≡ p) ((λ i → (decode-fun u u (enc-refl u i))) ∙ dec-refl u) {- decode-fun is a left-inverse -} P-linv : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (u v : ∥ B ∥ (suc₋₂ n )) → (x : P u v) → encode-fun u v (decode-fun u v x) ≡ x P-linv {n = n} = elim2 (λ x y → isOfHLevelPi (2+ suc₋₂ n) (λ z → isOfHLevelSuc (2+ suc₋₂ n) (hLevelP x y) _ _)) helper where helper : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (a b : B) (x : P {n = n} ∣ a ∣ ∣ b ∣) → encode-fun ∣ a ∣ ∣ b ∣ (decode-fun ∣ a ∣ ∣ b ∣ x) ≡ x helper {n = neg2} a b = elim (λ x → ( sym (isOfHLevelTrunc zero .snd (encode-fun ∣ a ∣ ∣ b ∣ (decode-fun ∣ a ∣ ∣ b ∣ x))) ∙ (isOfHLevelTrunc zero .snd x) , λ y → isOfHLevelSuc (suc zero) (isOfHLevelSuc zero (isOfHLevelTrunc {A = a ≡ b} zero)) _ _ _ _ )) (J (λ y p → encode-fun ∣ a ∣ ∣ y ∣ ((decode-fun ∣ a ∣ ∣ y ∣) ∣ p ∣) ≡ ∣ p ∣) (enc-refl ∣ a ∣)) helper {n = ℕ₋₂.-1+ n} a b = elim (λ x → hLevelP {n = ℕ₋₂.-1+ n} ∣ a ∣ ∣ b ∣ _ _) (J (λ y p → encode-fun {n = ℕ₋₂.-1+ n} ∣ a ∣ ∣ y ∣ ((decode-fun ∣ a ∣ ∣ y ∣) ∣ p ∣) ≡ ∣ p ∣) (enc-refl ∣ a ∣)) {- The final Iso established -} IsoFinal : ∀ {ℓ} {B : Type ℓ} {n : ℕ₋₂} (x y : ∥ B ∥ (suc₋₂ n)) → Iso (x ≡ y) (P x y) IsoFinal x y = iso (encode-fun x y ) (decode-fun x y) (P-linv x y) (P-rinv x y) PathIdTrunc : {a b : A} (n : ℕ₋₂) → (_≡_ {A = ∥ A ∥ (suc₋₂ n)} ∣ a ∣ ∣ b ∣) ≡ (∥ a ≡ b ∥ n) PathIdTrunc {a = a} {b = b} n = isoToPath (IsoFinal {n = n} ∣ a ∣ ∣ b ∣) PathΩ : {a : A} (n : ℕ₋₂) → (_≡_ {A = ∥ A ∥ (suc₋₂ n)} ∣ a ∣ ∣ a ∣) ≡ (∥ a ≡ a ∥ n) PathΩ {a = a} n = PathIdTrunc {a = a} {b = a} n
lemma homeomorphic_affinity: fixes S :: "'a::real_normed_vector set" assumes "c \<noteq> 0" shows "S homeomorphic ((\<lambda>x. a + c *\<^sub>R x) ` S)"
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.Talks.DURG where open import Cubical.Algebra.Group open import Cubical.Foundations.Prelude open import Cubical.Foundations.HLevels open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.Univalence open import Cubical.Foundations.Equiv open import Cubical.Functions.FunExtEquiv open import Cubical.Data.Sigma open import Cubical.Data.Empty open import Cubical.Data.Unit open import Cubical.Data.Nat open import Cubical.Relation.Binary open import Cubical.DStructures.Base open import Cubical.DStructures.Meta.Properties open import Cubical.DStructures.Meta.Isomorphism open import Cubical.DStructures.Structures.XModule private variable ℓ ℓ' ℓ'' ℓ₁ ℓ₁' ℓ₁'' ℓ₂ ℓA ℓ≅A ℓA' ℓ≅A' ℓB ℓB' ℓ≅B' ℓ≅B ℓC ℓ≅C ℓ≅ᴰ ℓP : Level {- Goals of the project: - define strict 2-groups - define crossed modules - prove their equivalence - do something with the classifying space perspective on groups Problems: - performance - the maps going back and forth were fine, but the identity types stating that these maps are inverse to each other were too complex How did we solve this? - Copatterns - Ulrik's idea: displayed univalent reflexive graphs - Provide a fiberwise characterization of the identity types of a type family to obtain a characterization of the identity types of the total space - Avoid equality on objects in proofs - Modular and abstract -} -- DEFINITION -- - URG structure -- - alternative constructors record URGStr' (A : Type ℓA) (ℓ≅A : Level) : Type (ℓ-max ℓA (ℓ-suc ℓ≅A)) where no-eta-equality constructor urgstr' field _≅_ : Rel A A ℓ≅A ρ : isRefl _≅_ uni : isUnivalent _≅_ ρ -- substituted version record URGStr'' (A : Type ℓA) (ℓ≅A : Level) : Type (ℓ-max ℓA (ℓ-suc ℓ≅A)) where field -- a binary relation _≅_ : A → A → Type ℓ≅A -- a witness of reflexivity ρ : (a : A) → a ≅ a -- these two fields induce a map that turns -- a path into a proof the endpoints are related ≡→≅ : {a a' : A} → a ≡ a' → a ≅ a' ≡→≅ {a} {a'} p = subst (λ z → a ≅ z) p (ρ a) field -- that natural map is a fiberwise equivalence uni : (a a' : A) → isEquiv (≡→≅ {a} {a'}) -- alternatively, we could ask for any fiberwise equivalence uni' = (a a' : A) → (a ≡ a') ≃ (a ≅ a') -- another alternative: all ≅-singletons should be contractible contrRelSingl' = (a : A) → isContr (Σ[ a' ∈ A ] (a ≅ a')) -- We can prove that these are equivalent: -- uni ↔ uni' ↔ contrRelSingl' -- This gives rise to alternative constructors for URGs: make-𝒮' : {A : Type ℓA} {_≅_ : Rel A A ℓ≅A} (ρ : isRefl _≅_) (contrTotal : contrRelSingl _≅_) → URGStr A ℓ≅A make-𝒮' {_≅_ = _≅_} ρ contrTotal = urgstr _≅_ ρ (contrRelSingl→isUnivalent _≅_ ρ contrTotal) -- EXAMPLES -- - groups -- - univalent categories -- - observational equality on ℕ -- - universe -- - identity types -- The SIP for groups produces a URG structure on the type of groups 𝒮-group' : (ℓ : Level) → URGStr (Group {ℓ}) ℓ 𝒮-group' ℓ .URGStr._≅_ = GroupEquiv 𝒮-group' ℓ .URGStr.ρ = idGroupEquiv 𝒮-group' ℓ .URGStr.uni = isUnivalent'→isUnivalent GroupEquiv idGroupEquiv λ G H → invEquiv (GroupPath G H) -- Every univalent Category induces a URG on its type of objects open import Cubical.Categories.Category renaming (isUnivalent to isUnivalentCat) Cat→𝒮 : (𝒞 : Precategory ℓ ℓ') → (uni : isUnivalentCat 𝒞) → URGStr (𝒞 .ob) ℓ' Cat→𝒮 𝒞 uni = urgstr (CatIso {𝒞 = 𝒞}) idCatIso λ x y → isUnivalentCat.univ uni x y -- observational equality on ℕ ℕ-≅ : ℕ → ℕ → Type ℓ-zero ℕ-≅ 0 0 = Unit ℕ-≅ 0 (suc _) = ⊥ ℕ-≅ (suc _) 0 = ⊥ ℕ-≅ (suc n) (suc m) = ℕ-≅ n m -- observational equality on ℕ is a URG 𝒮-Nat' : URGStr ℕ ℓ-zero 𝒮-Nat' = {!!} where import Cubical.DStructures.Structures.Nat using (𝒮-Nat) -- equivalences determine a URG on any universe 𝒮-universe : URGStr (Type ℓ) ℓ 𝒮-universe = make-𝒮 {_≅_ = _≃_} idEquiv λ A → isContrRespectEquiv (Σ-cong-equiv-snd (λ A' → isoToEquiv (equivInv A' A))) (equivContr' A) where module _ (A : Type ℓ) where equivInv : (A' : Type ℓ) → Iso (A ≃ A') (A' ≃ A) Iso.fun (equivInv A') = invEquiv Iso.inv (equivInv A') = invEquiv Iso.leftInv (equivInv A') = λ e → equivEq (invEquiv (invEquiv e)) e (funExt (λ x → refl)) Iso.rightInv (equivInv A') = λ e → equivEq (invEquiv (invEquiv e)) e (funExt (λ x → refl)) equivContr' : isContr (Σ[ A' ∈ Type ℓ ] A' ≃ A) equivContr' = EquivContr A -- trivially, a type is a URGStr with the relation given by its identity types 𝒮-type : (A : Type ℓ) → URGStr A ℓ 𝒮-type A = make-𝒮 {_≅_ = _≡_} (λ _ → refl) isContrSingl -- THEOREM: -- uniqueness of small URGs 𝒮-uniqueness' : (A : Type ℓA) → isContr (URGStr A ℓA) 𝒮-uniqueness' = {!!} where import Cubical.DStructures.Structures.Type using (𝒮-uniqueness) -- DEFINITION -- - displayed URG record URGStrᴰ' {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A) (B : A → Type ℓB) (ℓ≅ᴰ : Level) : Type (ℓ-max (ℓ-max (ℓ-max ℓA ℓB) ℓ≅A) (ℓ-suc ℓ≅ᴰ)) where no-eta-equality constructor urgstrᴰ' open URGStr 𝒮-A field _≅ᴰ⟨_⟩_ : {a a' : A} → B a → a ≅ a' → B a' → Type ℓ≅ᴰ ρᴰ : {a : A} → isRefl _≅ᴰ⟨ ρ a ⟩_ uniᴰ : {a : A} → isUnivalent _≅ᴰ⟨ ρ a ⟩_ ρᴰ -- Of course, this also has the alternative constructor make-𝒮ᴰ -- using that the uniᴰ field follows from uniᴰ' = {a : A} → (b : B a) → isContr (Σ[ b' ∈ B a ] b ≅ᴰ⟨ ρ a ⟩ b') -- EXAMPLE -- - pointedness displayed over the universe 𝒮ᴰ-pointed : {ℓ : Level} → URGStrᴰ (𝒮-universe {ℓ}) (λ A → A) ℓ 𝒮ᴰ-pointed {ℓ} = make-𝒮ᴰ (λ a e b → equivFun e a ≡ b) (λ _ → refl) p where p : (A : Type ℓ) (a : A) → isContr (Σ[ b ∈ A ] a ≡ b) p _ a = isContrSingl a -- THEOREM -- Every DURG on a type family B induces -- a URG on the total space Σ[ a ∈ A ] B a ∫⟨_⟩'_ : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A) {B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B) → URGStr (Σ A B) (ℓ-max ℓ≅A ℓ≅B) ∫⟨_⟩'_ = {!!} {- B ∫ | ↦ A × B A -} -- EXAMPLE -- A characterization of the identity types of pointed types 𝒮-pointed : {ℓ : Level} → URGStr (Σ[ A ∈ Type ℓ ] A) ℓ 𝒮-pointed = ∫⟨ 𝒮-universe ⟩ 𝒮ᴰ-pointed -- EXAMPLE -- - constant DURG -- - URG product -- - URG structure on pairs of groups 𝒮ᴰ-const : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A) {B : Type ℓB} (𝒮-B : URGStr B ℓ≅B) → URGStrᴰ 𝒮-A (λ _ → B) ℓ≅B 𝒮ᴰ-const {A = A} 𝒮-A {B} 𝒮-B = urgstrᴰ (λ b _ b' → b ≅ b') ρ uni where open URGStr 𝒮-B _×𝒮_ : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A) {B : Type ℓB} (𝒮-B : URGStr B ℓ≅B) → URGStr (A × B) (ℓ-max ℓ≅A ℓ≅B) _×𝒮_ 𝒮-A 𝒮-B = ∫⟨ 𝒮-A ⟩ (𝒮ᴰ-const 𝒮-A 𝒮-B) {- const B ∫ A, B ↦ | ↦ A × B A -} -- EXAMPLE -- Group Homomorphisms displayed over pairs of groups 𝒮ᴰ-G²\F' : URGStrᴰ (𝒮-group' ℓ ×𝒮 𝒮-group' ℓ') (λ (G , H) → GroupHom G H) (ℓ-max ℓ ℓ') 𝒮ᴰ-G²\F' = make-𝒮ᴰ (λ {(G , H)} {(G' , H')} f (eG , eH) f' → (g : ⟨ G ⟩) → GroupEquiv.eq eH .fst ((f .fun) g) ≡ (f' .fun) (GroupEquiv.eq eG .fst g)) (λ _ _ → refl) λ (G , H) f → isContrRespectEquiv (Σ-cong-equiv-snd (λ f' → isoToEquiv (invIso (GroupMorphismExtIso f f')))) (isContrSingl f) where open GroupHom {- The displayed relation is defined as f ≅⟨ eG , eH ⟩ f = commutativity of f G --------> H | | eG | | eH | | G'--------> H' f' Reflexivity is trivial Univalence follows from contractibility of Σ[ (f' , _) ∈ GroupHom G H ] (f ∼ f') for all (f , _) ∈ GroupHom G H -} {- Overview of Crossed Modules and Strict 2-Groups Definition: Crossed module - group action α of G₀ on H - homomorphism φ : H → G₀ - equivariance condition (g : G₀) → (h : H) → φ (g α h) ≡ g + (φ h) - g - peiffer condition (h h' : ⟨ H ⟩) → (φ h) α h' ≡ h + h' - h Definition: Strict 2-Group - internal category in the category of groups This means - split mono ι with two retractions ι : G₀ ↔ G : σ τ₁ - vertical composition operation which satisfies the interchange law _∘⟨_⟩_ : (g f : G₁) → isComposable g f → G₁ - equivalent to type of vertical compositions on internal reflexive graph: PFG (a b : G₁) → ι(σ b) + a - ι(τ a) - ι(σ b) + b + ι(τ a) ≡ b + a Produce this tree of displayed structures: PFXM PFG VertComp | | / | | / isEquivar isSecRet | | | | B B | | | | isAction isSecRet | | | | LAS F B F×B \ | | / \ | | / \ | / / \ | / / Grp | | Grp use the next result to display propositions like isAction, isEquivariant and isSecRet -} -- THEOREM -- Subtypes have a simple DURG structure given by 𝟙 -- This makes it easy to impose axioms on a structure Subtype→Sub-𝒮ᴰ : {A : Type ℓA} → (P : A → hProp ℓP) → (𝒮-A : URGStr A ℓ≅A) → URGStrᴰ 𝒮-A (λ a → P a .fst) ℓ-zero Subtype→Sub-𝒮ᴰ P 𝒮-A = make-𝒮ᴰ (λ _ _ _ → Unit) (λ _ → tt) (λ a p → isContrRespectEquiv (invEquiv (Σ-contractSnd (λ _ → isContrUnit))) (inhProp→isContr p (P a .snd))) -- EXAMPLE -- isAction axioms on pairs of groups together with a left action structure module _ (ℓ ℓ' : Level) where ℓℓ' = ℓ-max ℓ ℓ' open import Cubical.DStructures.Structures.Action 𝒮ᴰ-G²Las\Action' : URGStrᴰ (𝒮-G²Las ℓ ℓ') (λ ((G , H) , _α_) → IsGroupAction G H _α_) ℓ-zero 𝒮ᴰ-G²Las\Action' = Subtype→Sub-𝒮ᴰ (λ ((G , H) , _α_) → IsGroupAction G H _α_ , isPropIsGroupAction G H _α_) (𝒮-G²Las ℓ ℓ') 𝒮-G²LasAction' : URGStr (Action ℓ ℓ') (ℓ-max ℓ ℓ') 𝒮-G²LasAction' = ∫⟨ 𝒮-G²Las ℓ ℓ' ⟩ 𝒮ᴰ-G²Las\Action' {- -- THEOREM -- DURGs can be lifted to be displayed over the total space of -- another DURG on the same base URG B | B C Lift C \ / ↦ | A A -} VerticalLift-𝒮ᴰ' : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A) {B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B) {C : A → Type ℓC} (𝒮ᴰ-C : URGStrᴰ 𝒮-A C ℓ≅C) → URGStrᴰ (∫⟨ 𝒮-A ⟩ 𝒮ᴰ-C) (λ (a , _) → B a) ℓ≅B VerticalLift-𝒮ᴰ' {ℓ≅B = ℓ≅B} 𝒮-A {B = B} 𝒮ᴰ-B 𝒮ᴰ-C = urgstrᴰ (λ b (pA , _) b' → b ≅ᴰ⟨ pA ⟩ b') ρᴰ uniᴰ where open URGStrᴰ 𝒮ᴰ-B {- -- THEOREM -- A tower of two DURGs can be reassociated C | B split B × C | ↦ | A A (but C depends on B) -} splitTotal-𝒮ᴰ' : {A : Type ℓA} (𝒮-A : URGStr A ℓ≅A) {B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B) {C : Σ A B → Type ℓC} (𝒮ᴰ-C : URGStrᴰ (∫⟨ 𝒮-A ⟩ 𝒮ᴰ-B) C ℓ≅C) → URGStrᴰ 𝒮-A (λ a → Σ[ b ∈ B a ] C (a , b)) (ℓ-max ℓ≅B ℓ≅C) splitTotal-𝒮ᴰ' {A = A} 𝒮-A {B} 𝒮ᴰ-B {C} 𝒮ᴰ-C = make-𝒮ᴰ (λ (b , c) eA (b' , c') → Σ[ eB ∈ b B≅ᴰ⟨ eA ⟩ b' ] c ≅ᴰ⟨ eA , eB ⟩ c') (λ (b , c) → Bρᴰ b , ρᴰ c) {!!} where open URGStrᴰ 𝒮ᴰ-C open URGStr 𝒮-A _B≅ᴰ⟨_⟩_ = URGStrᴰ._≅ᴰ⟨_⟩_ 𝒮ᴰ-B Bρᴰ = URGStrᴰ.ρᴰ 𝒮ᴰ-B Buniᴰ = URGStrᴰ.uniᴰ 𝒮ᴰ-B {- -- THEOREM -- two DURGs over the same URGs can be combined B | B C Lift C split B × C \ / ↦ | ↦ | A A A -} combine-𝒮ᴰ' : {A : Type ℓA} {𝒮-A : URGStr A ℓ≅A} {B : A → Type ℓB} {C : A → Type ℓC} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B) (𝒮ᴰ-C : URGStrᴰ 𝒮-A C ℓ≅C) → URGStrᴰ 𝒮-A (λ a → B a × C a) (ℓ-max ℓ≅B ℓ≅C) combine-𝒮ᴰ' {𝒮-A = 𝒮-A} 𝒮ᴰ-B 𝒮ᴰ-C = splitTotal-𝒮ᴰ 𝒮-A 𝒮ᴰ-B (VerticalLift-𝒮ᴰ 𝒮-A 𝒮ᴰ-C 𝒮ᴰ-B) -- REMARK: DURG is equivalent to URG + morphism of URG via fibrant replacement module _ (C : Type ℓ) where dispTypeIso : Iso (C → Type ℓ) (Σ[ X ∈ Type ℓ ] (X → C)) Iso.fun dispTypeIso D = (Σ[ c ∈ C ] D c) , fst Iso.inv dispTypeIso (X , F) c = Σ[ x ∈ X ] F x ≡ c Iso.leftInv dispTypeIso = {!!} Iso.rightInv dispTypeIso = {!!} -- → combine is pullback in the (∞,1)-topos of DURGs {- With these operations we can construct the entire tree, but how to get equivalences? PFXM PFG VertComp | | / | | / isEquivar isSecRet | | | | B B | | | | isAction isSecRet | | | | LAS F B F×B \ | | / \ | | / \ | / / \ | / / Grp | | Grp -- For URGs: relational isomorphisms -} record RelIso' {A : Type ℓA} (_≅_ : Rel A A ℓ≅A) {A' : Type ℓA'} (_≅'_ : Rel A' A' ℓ≅A') : Type (ℓ-max (ℓ-max ℓA ℓA') (ℓ-max ℓ≅A ℓ≅A')) where constructor reliso' field fun : A → A' inv : A' → A rightInv : (a' : A') → fun (inv a') ≅' a' leftInv : (a : A) → inv (fun a) ≅ a RelIso→Iso' : {A : Type ℓA} {A' : Type ℓA'} (_≅_ : Rel A A ℓ≅A) (_≅'_ : Rel A' A' ℓ≅A') {ρ : isRefl _≅_} {ρ' : isRefl _≅'_} (uni : isUnivalent _≅_ ρ) (uni' : isUnivalent _≅'_ ρ') (f : RelIso _≅_ _≅'_) → Iso A A' Iso.fun (RelIso→Iso' _ _ _ _ f) = RelIso.fun f Iso.inv (RelIso→Iso' _ _ _ _ f) = RelIso.inv f Iso.rightInv (RelIso→Iso' _ _≅'_ {ρ' = ρ'} _ uni' f) a' = invEquiv (≡→R _≅'_ ρ' , uni' (RelIso.fun f (RelIso.inv f a')) a') .fst (RelIso.rightInv f a') Iso.leftInv (RelIso→Iso' _≅_ _ {ρ = ρ} uni _ f) a = invEquiv (≡→R _≅_ ρ , uni (RelIso.inv f (RelIso.fun f a)) a) .fst (RelIso.leftInv f a) {- For DURGs: pull back one of the DURGs along an equivalence and show that there is a fiberwise relational isomorphism between B and f*B' B f*B' B' | / | | / | A ≃ A' f -} 𝒮ᴰ-*-Iso-Over→TotalIso : {A : Type ℓA} {𝒮-A : URGStr A ℓ≅A} {A' : Type ℓA'} {𝒮-A' : URGStr A' ℓ≅A'} (ℱ : Iso A A') {B : A → Type ℓB} (𝒮ᴰ-B : URGStrᴰ 𝒮-A B ℓ≅B) {B' : A' → Type ℓB'} (𝒮ᴰ-B' : URGStrᴰ 𝒮-A' B' ℓ≅B') (𝒢 : 𝒮ᴰ-♭PIso (Iso.fun ℱ) 𝒮ᴰ-B 𝒮ᴰ-B') → Iso (Σ A B) (Σ A' B') 𝒮ᴰ-*-Iso-Over→TotalIso ℱ 𝒮ᴰ-B 𝒮ᴰ-B' 𝒢 = RelFiberIsoOver→Iso ℱ (𝒮ᴰ→relFamily 𝒮ᴰ-B) (𝒮ᴰ-B .uniᴰ) (𝒮ᴰ→relFamily 𝒮ᴰ-B') (𝒮ᴰ-B' .uniᴰ) 𝒢 where open URGStrᴰ {- Let's apply this machinery to our tower of DURGs. -} import Cubical.DStructures.Equivalences.GroupSplitEpiAction import Cubical.DStructures.Equivalences.PreXModReflGraph import Cubical.DStructures.Equivalences.XModPeifferGraph import Cubical.DStructures.Equivalences.PeifferGraphS2G {- DISCUSSION - alternate definition of URGs - how to layer the cake - uniformity, abstraction, no equality on objects, results transferrable across proof assistants and type theories - unlike displayed categories not limited to 1-truncated types and type families - easy to set up - associates the other way compared to SNS - every SNS gives DURG on the URG of the universe (not implemented) OTHER THINGS WE DID - Define (n,k)-groups - Display homomorphisms of (n,k)-groups over pairs of such groups - prove the equivalence of (0,1)-groups and axiomatic groups via EM-spaces FUTURE WORK - construct more operations - use reflection to automate steps - construct URG on the type of URG or even DURG structures - meta-theory - model of type theory - more higher group theory ... -}
(* Authors: Hanna Lachnitt, TU Wien, [email protected] Anthony Bordg, University of Cambridge, [email protected] *) section \<open>The Deutsch Algorithm\<close> theory Deutsch imports More_Tensor Measurement begin text \<open> Given a function $f:{0,1}\mapsto {0,1}$, Deutsch's algorithm decides if this function is constant or balanced with a single $f(x)$ circuit to evaluate the function for multiple values of $x$ simultaneously. The algorithm makes use of quantum parallelism and quantum interference. \<close> text \<open> A constant function with values in {0,1} returns either always 0 or always 1. A balanced function is 0 for half of the inputs and 1 for the other half. \<close> locale deutsch = fixes f:: "nat \<Rightarrow> nat" assumes dom: "f \<in> ({0,1} \<rightarrow>\<^sub>E {0,1})" context deutsch begin definition is_swap:: bool where "is_swap = (\<forall>x \<in> {0,1}. f x = 1 - x)" lemma is_swap_values: assumes "is_swap" shows "f 0 = 1" and "f 1 = 0" using assms is_swap_def by auto lemma is_swap_sum_mod_2: assumes "is_swap" shows "(f 0 + f 1) mod 2 = 1" using assms is_swap_def by simp definition const:: "nat \<Rightarrow> bool" where "const n = (\<forall>x \<in> {0,1}.(f x = n))" definition is_const:: "bool" where "is_const \<equiv> const 0 \<or> const 1" definition is_balanced:: "bool" where "is_balanced \<equiv> (\<forall>x \<in> {0,1}.(f x = x)) \<or> is_swap" lemma f_values: "(f 0 = 0 \<or> f 0 = 1) \<and> (f 1 = 0 \<or> f 1 = 1)" using dom by auto lemma f_cases: shows "is_const \<or> is_balanced" using dom is_balanced_def const_def is_const_def is_swap_def f_values by auto lemma const_0_sum_mod_2: assumes "const 0" shows "(f 0 + f 1) mod 2 = 0" using assms const_def by simp lemma const_1_sum_mod_2: assumes "const 1" shows "(f 0 + f 1) mod 2 = 0" using assms const_def by simp lemma is_const_sum_mod_2: assumes "is_const" shows "(f 0 + f 1) mod 2 = 0" using assms is_const_def const_0_sum_mod_2 const_1_sum_mod_2 by auto lemma id_sum_mod_2: assumes "f = id" shows "(f 0 + f 1) mod 2 = 1" using assms by simp lemma is_balanced_sum_mod_2: assumes "is_balanced" shows "(f 0 + f 1) mod 2 = 1" using assms is_balanced_def id_sum_mod_2 is_swap_sum_mod_2 by auto lemma f_ge_0: "\<forall> x. (f x \<ge> 0)" by simp end (* context deutsch *) text \<open>The Deutsch's Transform @{text U\<^sub>f}.\<close> definition (in deutsch) deutsch_transform:: "complex Matrix.mat" ("U\<^sub>f") where "U\<^sub>f \<equiv> mat_of_cols_list 4 [[1 - f(0), f(0), 0, 0], [f(0), 1 - f(0), 0, 0], [0, 0, 1 - f(1), f(1)], [0, 0, f(1), 1 - f(1)]]" lemma (in deutsch) deutsch_transform_dim [simp]: shows "dim_row U\<^sub>f = 4" and "dim_col U\<^sub>f = 4" by (auto simp add: deutsch_transform_def mat_of_cols_list_def) lemma (in deutsch) deutsch_transform_coeff_is_zero [simp]: shows "U\<^sub>f $$ (0,2) = 0" and "U\<^sub>f $$ (0,3) = 0" and "U\<^sub>f $$ (1,2) = 0" and "U\<^sub>f $$(1,3) = 0" and "U\<^sub>f $$ (2,0) = 0" and "U\<^sub>f $$(2,1) = 0" and "U\<^sub>f $$ (3,0) = 0" and "U\<^sub>f $$ (3,1) = 0" using deutsch_transform_def by auto lemma (in deutsch) deutsch_transform_coeff [simp]: shows "U\<^sub>f $$ (0,1) = f(0)" and "U\<^sub>f $$ (1,0) = f(0)" and "U\<^sub>f $$(2,3) = f(1)" and "U\<^sub>f $$ (3,2) = f(1)" and "U\<^sub>f $$ (0,0) = 1 - f(0)" and "U\<^sub>f $$(1,1) = 1 - f(0)" and "U\<^sub>f $$ (2,2) = 1 - f(1)" and "U\<^sub>f $$ (3,3) = 1 - f(1)" using deutsch_transform_def by auto abbreviation (in deutsch) V\<^sub>f:: "complex Matrix.mat" where "V\<^sub>f \<equiv> Matrix.mat 4 4 (\<lambda>(i,j). if i=0 \<and> j=0 then 1 - f(0) else (if i=0 \<and> j=1 then f(0) else (if i=1 \<and> j=0 then f(0) else (if i=1 \<and> j=1 then 1 - f(0) else (if i=2 \<and> j=2 then 1 - f(1) else (if i=2 \<and> j=3 then f(1) else (if i=3 \<and> j=2 then f(1) else (if i=3 \<and> j=3 then 1 - f(1) else 0))))))))" lemma (in deutsch) deutsch_transform_alt_rep_coeff_is_zero [simp]: shows "V\<^sub>f $$ (0,2) = 0" and "V\<^sub>f $$ (0,3) = 0" and "V\<^sub>f $$ (1,2) = 0" and "V\<^sub>f $$(1,3) = 0" and "V\<^sub>f $$ (2,0) = 0" and "V\<^sub>f $$(2,1) = 0" and "V\<^sub>f $$ (3,0) = 0" and "V\<^sub>f $$ (3,1) = 0" by auto lemma (in deutsch) deutsch_transform_alt_rep_coeff [simp]: shows "V\<^sub>f $$ (0,1) = f(0)" and "V\<^sub>f $$ (1,0) = f(0)" and "V\<^sub>f $$(2,3) = f(1)" and "V\<^sub>f $$ (3,2) = f(1)" and "V\<^sub>f $$ (0,0) = 1 - f(0)" and "V\<^sub>f $$(1,1) = 1 - f(0)" and "V\<^sub>f $$ (2,2) = 1 - f(1)" and "V\<^sub>f $$ (3,3) = 1 - f(1)" by auto lemma (in deutsch) deutsch_transform_alt_rep: shows "U\<^sub>f = V\<^sub>f" proof show c0:"dim_row U\<^sub>f = dim_row V\<^sub>f" by simp show c1:"dim_col U\<^sub>f = dim_col V\<^sub>f" by simp fix i j:: nat assume "i < dim_row V\<^sub>f" and "j < dim_col V\<^sub>f" then have "i < 4" and "j < 4" by auto thus "U\<^sub>f $$ (i,j) = V\<^sub>f $$ (i,j)" by (smt deutsch_transform_alt_rep_coeff deutsch_transform_alt_rep_coeff_is_zero deutsch_transform_coeff deutsch_transform_coeff_is_zero set_4_disj) qed text \<open>@{text U\<^sub>f} is a gate.\<close> lemma (in deutsch) transpose_of_deutsch_transform: shows "(U\<^sub>f)\<^sup>t = U\<^sub>f" proof show "dim_row (U\<^sub>f\<^sup>t) = dim_row U\<^sub>f" by simp show "dim_col (U\<^sub>f\<^sup>t) = dim_col U\<^sub>f" by simp fix i j:: nat assume "i < dim_row U\<^sub>f" and "j < dim_col U\<^sub>f" thus "U\<^sub>f\<^sup>t $$ (i, j) = U\<^sub>f $$ (i, j)" by (auto simp add: transpose_mat_def) (metis deutsch_transform_coeff(1-4) deutsch_transform_coeff_is_zero set_4_disj) qed lemma (in deutsch) adjoint_of_deutsch_transform: shows "(U\<^sub>f)\<^sup>\<dagger> = U\<^sub>f" proof show "dim_row (U\<^sub>f\<^sup>\<dagger>) = dim_row U\<^sub>f" by simp show "dim_col (U\<^sub>f\<^sup>\<dagger>) = dim_col U\<^sub>f" by simp fix i j:: nat assume "i < dim_row U\<^sub>f" and "j < dim_col U\<^sub>f" thus "U\<^sub>f\<^sup>\<dagger> $$ (i, j) = U\<^sub>f $$ (i, j)" by (auto simp add: dagger_def) (metis complex_cnj_of_nat complex_cnj_zero deutsch_transform_coeff deutsch_transform_coeff_is_zero set_4_disj) qed lemma (in deutsch) deutsch_transform_is_gate: shows "gate 2 U\<^sub>f" proof show "dim_row U\<^sub>f = 2\<^sup>2" by simp show "square_mat U\<^sub>f" by simp show "unitary U\<^sub>f" proof- have "U\<^sub>f * U\<^sub>f = 1\<^sub>m (dim_col U\<^sub>f)" proof show "dim_row (U\<^sub>f * U\<^sub>f) = dim_row (1\<^sub>m (dim_col U\<^sub>f))" by simp next show "dim_col (U\<^sub>f * U\<^sub>f) = dim_col (1\<^sub>m (dim_col U\<^sub>f))" by simp next fix i j:: nat assume "i < dim_row (1\<^sub>m (dim_col U\<^sub>f))" and "j < dim_col (1\<^sub>m (dim_col U\<^sub>f))" then show "(U\<^sub>f * U\<^sub>f) $$ (i,j) = 1\<^sub>m (dim_col U\<^sub>f) $$ (i, j)" apply (auto simp add: deutsch_transform_alt_rep one_mat_def times_mat_def) apply (auto simp: scalar_prod_def) using f_values by auto qed thus ?thesis by (simp add: adjoint_of_deutsch_transform unitary_def) qed qed text \<open> Two qubits are prepared. The first one in the state $|0\rangle$, the second one in the state $|1\rangle$. \<close> abbreviation zero where "zero \<equiv> unit_vec 2 0" abbreviation one where "one \<equiv> unit_vec 2 1" lemma ket_zero_is_state: shows "state 1 |zero\<rangle>" by (simp add: state_def ket_vec_def cpx_vec_length_def numerals(2)) lemma ket_one_is_state: shows "state 1 |one\<rangle>" by (simp add: state_def ket_vec_def cpx_vec_length_def numerals(2)) lemma ket_zero_to_mat_of_cols_list [simp]: "|zero\<rangle> = mat_of_cols_list 2 [[1, 0]]" by (auto simp add: ket_vec_def mat_of_cols_list_def) lemma ket_one_to_mat_of_cols_list [simp]: "|one\<rangle> = mat_of_cols_list 2 [[0, 1]]" apply (auto simp add: ket_vec_def unit_vec_def mat_of_cols_list_def) using less_2_cases by fastforce text \<open> Applying the Hadamard gate to the state $|0\rangle$ results in the new state @{term "\<psi>\<^sub>0\<^sub>0"} = $\dfrac {(|0\rangle + |1\rangle)} {\sqrt 2 }$ \<close> abbreviation \<psi>\<^sub>0\<^sub>0:: "complex Matrix.mat" where "\<psi>\<^sub>0\<^sub>0 \<equiv> mat_of_cols_list 2 [[1/sqrt(2), 1/sqrt(2)]]" lemma H_on_ket_zero: shows "(H * |zero\<rangle>) = \<psi>\<^sub>0\<^sub>0" proof fix i j:: nat assume "i < dim_row \<psi>\<^sub>0\<^sub>0" and "j < dim_col \<psi>\<^sub>0\<^sub>0" then have "i \<in> {0,1} \<and> j = 0" by (simp add: mat_of_cols_list_def less_2_cases) then show "(H * |zero\<rangle>) $$ (i,j) = \<psi>\<^sub>0\<^sub>0 $$ (i,j)" by (auto simp add: mat_of_cols_list_def times_mat_def scalar_prod_def H_def) next show "dim_row (H * |zero\<rangle>) = dim_row \<psi>\<^sub>0\<^sub>0" by (simp add: H_def mat_of_cols_list_def) show "dim_col (H * |zero\<rangle>) = dim_col \<psi>\<^sub>0\<^sub>0" by (simp add: H_def mat_of_cols_list_def) qed text \<open> Applying the Hadamard gate to the state $|0\rangle$ results in the new state @{text \<psi>\<^sub>0\<^sub>1} = $\dfrac {(|0\rangle - |1\rangle)} {\sqrt 2}$. \<close> abbreviation \<psi>\<^sub>0\<^sub>1:: "complex Matrix.mat" where "\<psi>\<^sub>0\<^sub>1 \<equiv> mat_of_cols_list 2 [[1/sqrt(2), -1/sqrt(2)]]" lemma H_on_ket_one: shows "(H * |one\<rangle>) = \<psi>\<^sub>0\<^sub>1" proof fix i j:: nat assume "i < dim_row \<psi>\<^sub>0\<^sub>1" and "j < dim_col \<psi>\<^sub>0\<^sub>1" then have "i \<in> {0,1} \<and> j = 0" by (simp add: mat_of_cols_list_def less_2_cases) then show "(H * |one\<rangle>) $$ (i,j) = \<psi>\<^sub>0\<^sub>1 $$ (i,j)" by (auto simp add: mat_of_cols_list_def times_mat_def scalar_prod_def H_def ket_vec_def) next show "dim_row (H * |one\<rangle>) = dim_row \<psi>\<^sub>0\<^sub>1" by (simp add: H_def mat_of_cols_list_def) show "dim_col (H * |one\<rangle>) = dim_col \<psi>\<^sub>0\<^sub>1" by (simp add: H_def mat_of_cols_list_def ket_vec_def) qed lemma H_on_ket_one_is_state: shows "state 1 (H * |one\<rangle>)" using H_is_gate ket_one_is_state by simp text\<open> Then, the state @{text \<psi>\<^sub>1} = $\dfrac {(|00\rangle - |01\rangle + |10\rangle - |11\rangle)} {2} $ is obtained by taking the tensor product of the states @{text \<psi>\<^sub>0\<^sub>0} = $\dfrac {(|0\rangle + |1\rangle)} {\sqrt 2} $ and @{text \<psi>\<^sub>0\<^sub>1} = $\dfrac {(|0\rangle - |1\rangle)} {\sqrt 2} $. \<close> abbreviation \<psi>\<^sub>1:: "complex Matrix.mat" where "\<psi>\<^sub>1 \<equiv> mat_of_cols_list 4 [[1/2, -1/2, 1/2, -1/2]]" lemma \<psi>\<^sub>0_to_\<psi>\<^sub>1: shows "(\<psi>\<^sub>0\<^sub>0 \<Otimes> \<psi>\<^sub>0\<^sub>1) = \<psi>\<^sub>1" proof fix i j:: nat assume "i < dim_row \<psi>\<^sub>1" and "j < dim_col \<psi>\<^sub>1" then have "i \<in> {0,1,2,3}" and "j = 0" using mat_of_cols_list_def by auto moreover have "complex_of_real (sqrt 2) * complex_of_real (sqrt 2) = 2" by (metis mult_2_right numeral_Bit0 of_real_mult of_real_numeral real_sqrt_four real_sqrt_mult) ultimately show "(\<psi>\<^sub>0\<^sub>0 \<Otimes> \<psi>\<^sub>0\<^sub>1) $$ (i,j) = \<psi>\<^sub>1 $$ (i,j)" using mat_of_cols_list_def by auto next show "dim_row (\<psi>\<^sub>0\<^sub>0 \<Otimes> \<psi>\<^sub>0\<^sub>1) = dim_row \<psi>\<^sub>1" by (simp add: mat_of_cols_list_def) show "dim_col (\<psi>\<^sub>0\<^sub>0 \<Otimes> \<psi>\<^sub>0\<^sub>1) = dim_col \<psi>\<^sub>1" by (simp add: mat_of_cols_list_def) qed lemma \<psi>\<^sub>1_is_state: shows "state 2 \<psi>\<^sub>1" proof show "dim_col \<psi>\<^sub>1 = 1" by (simp add: Tensor.mat_of_cols_list_def) show "dim_row \<psi>\<^sub>1 = 2\<^sup>2" by (simp add: Tensor.mat_of_cols_list_def) show "\<parallel>Matrix.col \<psi>\<^sub>1 0\<parallel> = 1" using H_on_ket_one_is_state H_on_ket_zero_is_state state.is_normal tensor_state2 \<psi>\<^sub>0_to_\<psi>\<^sub>1 H_on_ket_one H_on_ket_zero by force qed text \<open> Next, the gate @{text U\<^sub>f} is applied to the state @{text \<psi>\<^sub>1} = $\dfrac {(|00\rangle - |01\rangle + |10\rangle - |11\rangle)} {2}$ and @{text \<psi>\<^sub>2}= $\dfrac {(|0f(0)\oplus 0\rangle - |0 f(0) \oplus 1\rangle + |1 f(1)\oplus 0\rangle - |1f(1)\oplus 1\rangle)} {2}$ is obtained. This simplifies to @{text \<psi>\<^sub>2}= $\dfrac {(|0f(0)\rangle - |0 \overline{f(0)} \rangle + |1 f(1)\rangle - |1\overline{f(1)}\rangle)} {2}$ \<close> abbreviation (in deutsch) \<psi>\<^sub>2:: "complex Matrix.mat" where "\<psi>\<^sub>2 \<equiv> mat_of_cols_list 4 [[(1 - f(0))/2 - f(0)/2, f(0)/2 - (1 - f(0))/2, (1 - f(1))/2 - f(1)/2, f(1)/2 - (1- f(1))/2]]" lemma (in deutsch) \<psi>\<^sub>1_to_\<psi>\<^sub>2: shows "U\<^sub>f * \<psi>\<^sub>1 = \<psi>\<^sub>2" proof fix i j:: nat assume "i < dim_row \<psi>\<^sub>2 " and "j < dim_col \<psi>\<^sub>2" then have asm:"i \<in> {0,1,2,3} \<and> j = 0 " by (auto simp add: mat_of_cols_list_def) then have "i < dim_row U\<^sub>f \<and> j < dim_col \<psi>\<^sub>1" using deutsch_transform_def mat_of_cols_list_def by auto then have "(U\<^sub>f * \<psi>\<^sub>1) $$ (i, j) = (\<Sum> k \<in> {0 ..< dim_vec \<psi>\<^sub>1}. (Matrix.row U\<^sub>f i) $ k * (Matrix.col \<psi>\<^sub>1 j) $ k)" apply (auto simp add: times_mat_def scalar_prod_def). thus "(U\<^sub>f * \<psi>\<^sub>1) $$ (i, j) = \<psi>\<^sub>2 $$ (i, j)" using mat_of_cols_list_def deutsch_transform_def asm by auto next show "dim_row (U\<^sub>f * \<psi>\<^sub>1) = dim_row \<psi>\<^sub>2" by (simp add: mat_of_cols_list_def) show "dim_col (U\<^sub>f * \<psi>\<^sub>1) = dim_col \<psi>\<^sub>2" by (simp add: mat_of_cols_list_def) qed lemma (in deutsch) \<psi>\<^sub>2_is_state: shows "state 2 \<psi>\<^sub>2" proof show "dim_col \<psi>\<^sub>2 = 1" by (simp add: Tensor.mat_of_cols_list_def) show "dim_row \<psi>\<^sub>2 = 2\<^sup>2" by (simp add: Tensor.mat_of_cols_list_def) show "\<parallel>Matrix.col \<psi>\<^sub>2 0\<parallel> = 1" using gate_on_state_is_state \<psi>\<^sub>1_is_state deutsch_transform_is_gate \<psi>\<^sub>1_to_\<psi>\<^sub>2 state_def by (metis (no_types, lifting)) qed lemma H_tensor_Id_1: defines d:"v \<equiv> mat_of_cols_list 4 [[1/sqrt(2), 0, 1/sqrt(2), 0], [0, 1/sqrt(2), 0, 1/sqrt(2)], [1/sqrt(2), 0, -1/sqrt(2), 0], [0, 1/sqrt(2), 0, -1/sqrt(2)]]" shows "(H \<Otimes> Id 1) = v" proof show "dim_col (H \<Otimes> Id 1) = dim_col v" by (simp add: d H_def Id_def mat_of_cols_list_def) show "dim_row (H \<Otimes> Id 1) = dim_row v" by (simp add: d H_def Id_def mat_of_cols_list_def) fix i j:: nat assume "i < dim_row v" and "j < dim_col v" then have "i \<in> {0..<4} \<and> j \<in> {0..<4}" by (auto simp add: d mat_of_cols_list_def) thus "(H \<Otimes> Id 1) $$ (i, j) = v $$ (i, j)" by (auto simp add: d Id_def H_def mat_of_cols_list_def) qed lemma H_tensor_Id_1_is_gate: shows "gate 2 (H \<Otimes> Id 1)" proof show "dim_row (H \<Otimes> Quantum.Id 1) = 2\<^sup>2" using H_tensor_Id_1 by (simp add: mat_of_cols_list_def) show "square_mat (H \<Otimes> Quantum.Id 1)" using H_is_gate id_is_gate tensor_gate_sqr_mat by blast show "unitary (H \<Otimes> Quantum.Id 1)" using H_is_gate gate_def id_is_gate tensor_gate by blast qed text \<open> Applying the Hadamard gate to the first qubit of @{text \<psi>\<^sub>2} results in @{text \<psi>\<^sub>3} = $\pm |f(0)\oplus f(1)\rangle \left[ \dfrac {(|0\rangle - |1\rangle)} {\sqrt 2}\right]$ \<close> abbreviation (in deutsch) \<psi>\<^sub>3:: "complex Matrix.mat" where "\<psi>\<^sub>3 \<equiv> mat_of_cols_list 4 [[(1-f(0))/(2*sqrt(2)) - f(0)/(2*sqrt(2)) + (1-f(1))/(2*sqrt(2)) - f(1)/(2*sqrt(2)), f(0)/(2*sqrt(2)) - (1-f(0))/(2*sqrt(2)) + (f(1)/(2*sqrt(2)) - (1-f(1))/(2*sqrt(2))), (1-f(0))/(2*sqrt(2)) - f(0)/(2*sqrt(2)) - (1-f(1))/(2*sqrt(2)) + f(1)/(2*sqrt(2)), f(0)/(2*sqrt(2)) - (1-f(0))/(2*sqrt(2)) - f(1)/(2*sqrt(2)) + (1-f(1))/(2*sqrt(2))]]" lemma (in deutsch) \<psi>\<^sub>2_to_\<psi>\<^sub>3: shows "(H \<Otimes> Id 1) * \<psi>\<^sub>2 = \<psi>\<^sub>3" proof fix i j:: nat assume "i < dim_row \<psi>\<^sub>3" and "j < dim_col \<psi>\<^sub>3" then have a0:"i \<in> {0,1,2,3} \<and> j = 0" by (auto simp add: mat_of_cols_list_def) then have "i < dim_row (H \<Otimes> Id 1) \<and> j < dim_col \<psi>\<^sub>2" using mat_of_cols_list_def H_tensor_Id_1 by auto then have "((H \<Otimes> Id 1)*\<psi>\<^sub>2) $$ (i,j) = (\<Sum> k \<in> {0 ..< dim_vec \<psi>\<^sub>2}. (Matrix.row (H \<Otimes> Id 1) i) $ k * (Matrix.col \<psi>\<^sub>2 j) $ k)" by (auto simp: times_mat_def scalar_prod_def) thus "((H \<Otimes> Id 1) * \<psi>\<^sub>2) $$ (i, j) = \<psi>\<^sub>3 $$ (i, j)" using mat_of_cols_list_def H_tensor_Id_1 a0 f_ge_0 by (auto simp: diff_divide_distrib) next show "dim_row ((H \<Otimes> Id 1) * \<psi>\<^sub>2) = dim_row \<psi>\<^sub>3" using H_tensor_Id_1 mat_of_cols_list_def by simp show "dim_col ((H \<Otimes> Id 1) * \<psi>\<^sub>2) = dim_col \<psi>\<^sub>3" using H_tensor_Id_1 mat_of_cols_list_def by simp qed lemma (in deutsch) \<psi>\<^sub>3_is_state: shows "state 2 \<psi>\<^sub>3" proof- have "gate 2 (H \<Otimes> Id 1)" using H_tensor_Id_1_is_gate by simp thus "state 2 \<psi>\<^sub>3" using \<psi>\<^sub>2_is_state \<psi>\<^sub>2_to_\<psi>\<^sub>3 by (metis gate_on_state_is_state) qed text \<open> Finally, all steps are put together. The result depends on the function f. If f is constant the first qubit of $\pm |f(0)\oplus f(1)\rangle \left[ \dfrac {(|0\rangle - |1\rangle)} {\sqrt 2}\right]$ is 0, if it is is\_balanced it is 1. The algorithm only uses one evaluation of f(x) and will always succeed. \<close> definition (in deutsch) deutsch_algo:: "complex Matrix.mat" where "deutsch_algo \<equiv> (H \<Otimes> Id 1) * (U\<^sub>f * ((H * |zero\<rangle>) \<Otimes> (H * |one\<rangle>)))" lemma (in deutsch) deutsch_algo_result [simp]: shows "deutsch_algo = \<psi>\<^sub>3" using deutsch_algo_def H_on_ket_zero H_on_ket_one \<psi>\<^sub>0_to_\<psi>\<^sub>1 \<psi>\<^sub>1_to_\<psi>\<^sub>2 \<psi>\<^sub>2_to_\<psi>\<^sub>3 by simp lemma (in deutsch) deutsch_algo_result_is_state: shows "state 2 deutsch_algo" using \<psi>\<^sub>3_is_state by simp text \<open> If the function is constant then the measurement of the first qubit should result in the state $|0\rangle$ with probability 1. \<close> lemma (in deutsch) prob0_deutsch_algo_const: assumes "is_const" shows "prob0 2 deutsch_algo 0 = 1" proof - have "{k| k::nat. (k < 4) \<and> \<not> select_index 2 0 k} = {0,1}" using select_index_def by auto then have "prob0 2 deutsch_algo 0 = (\<Sum>j\<in>{0,1}. (cmod(deutsch_algo $$ (j,0)))\<^sup>2)" using deutsch_algo_result_is_state prob0_def by simp thus "prob0 2 deutsch_algo 0 = 1" using assms is_const_def const_def by auto qed lemma (in deutsch) prob1_deutsch_algo_const: assumes "is_const" shows "prob1 2 deutsch_algo 0 = 0" using assms prob0_deutsch_algo_const prob_sum_is_one[of "2" "deutsch_algo" "0"] deutsch_algo_result_is_state by simp text \<open> If the function is balanced the measurement of the first qubit should result in the state $|1\rangle$ with probability 1. \<close> lemma (in deutsch) prob0_deutsch_algo_balanced: assumes "is_balanced" shows "prob0 2 deutsch_algo 0 = 0" proof- have "{k| k::nat. (k < 4) \<and> \<not> select_index 2 0 k} = {0,1}" using select_index_def by auto then have "prob0 2 deutsch_algo 0 = (\<Sum>j \<in> {0,1}. (cmod(deutsch_algo $$ (j,0)))\<^sup>2)" using deutsch_algo_result_is_state prob0_def by simp thus "prob0 2 deutsch_algo 0 = 0" using is_swap_values assms is_balanced_def by auto qed lemma (in deutsch) prob1_deutsch_algo_balanced: assumes "is_balanced" shows "prob1 2 deutsch_algo 0 = 1" using assms prob0_deutsch_algo_balanced prob_sum_is_one[of "2" "deutsch_algo" "0"] deutsch_algo_result_is_state by simp text \<open>Eventually, the measurement of the first qubit results in $f(0)\oplus f(1)$. \<close> definition (in deutsch) deutsch_algo_eval:: "real" where "deutsch_algo_eval \<equiv> prob1 2 deutsch_algo 0" lemma (in deutsch) sum_mod_2_cases: shows "(f 0 + f 1) mod 2 = 0 \<longrightarrow> is_const" and "(f 0 + f 1) mod 2 = 1 \<longrightarrow> is_balanced" using f_cases is_balanced_sum_mod_2 is_const_sum_mod_2 by auto lemma (in deutsch) deutsch_algo_eval_is_sum_mod_2: shows "deutsch_algo_eval = (f 0 + f 1) mod 2" using deutsch_algo_eval_def f_cases is_const_sum_mod_2 is_balanced_sum_mod_2 prob1_deutsch_algo_const prob1_deutsch_algo_balanced by auto text \<open> If the algorithm returns 0 then one concludes that the input function is constant and if it returns 1 then the function is balanced. \<close> theorem (in deutsch) deutsch_algo_is_correct: shows "deutsch_algo_eval = 0 \<longrightarrow> is_const" and "deutsch_algo_eval = 1 \<longrightarrow> is_balanced" using deutsch_algo_eval_is_sum_mod_2 sum_mod_2_cases by auto end
If $f$ is analytic on a set $S$, then $f$ is analytic on any subset $T$ of $S$.
After Maidan Castle , Wheeler turned his attention to France , where the archaeological investigation of Iron Age sites had lagged behind developments in Britain . There , he oversaw a series of surveys and excavations with the aid of Leslie Scott , beginning with a survey tour of Brittany in the winter of 1936 – 37 . After this , Wheeler decided to excavate the oppidum at Camp d <unk> , near <unk> , Finistère . In addition to bringing many British archaeologists to work on the site , he hired six local Breton workmen to assist the project , coming to the belief that the oppidum had been erected by local Iron Age tribes to defend themselves from the Roman invasion led by Julius Caesar . Meanwhile , Scott had been placed in charge of an excavation at the smaller nearby hill fort of <unk> , near Quimper . In July 1939 , the project focused its attention on Normandy , with excavations beginning at the Iron Age hill forts of Camp de Canada and <unk> . They were brought to an abrupt halt in September 1939 as the Second World War broke out in Europe , and the team evacuated back to Britain . Wheeler 's excavation report , co @-@ written with Katherine Richardson , was eventually published as Hill @-@ forts of Northern France in 1957 .
From Perennial.program_proof Require Import grove_prelude. From Goose.github_com.mit_pdos.gokv Require Import pb. From Perennial.program_proof.pb Require Export ghost_proof. (* Ownership/repr predicates for owning a ghost replica. The state of a replica is split into the core Replica state, the extra state that a Primary has, and the extra state that a Committer has. A committer is just a replica that also keeps information about what part of the log has been committed. *) Section replica_ghost_defns. Context `{!heapGS Σ}. Context `{!urpcregG Σ}. Context `{!pb_ghostG Σ}. Implicit Type γ:pb_names. Record Replica := mkReplica { opLog : list u8; cn : u64; }. Definition own_Replica_ghost (rid:u64) γ (r:Replica) : iProp Σ := "Haccepted" ∷ accepted_ptsto γ r.(cn) rid r.(opLog) ∗ "HacceptedUnused" ∷ ([∗ set] cn_some ∈ (fin_to_set u64), ⌜int.Z cn_some ≤ int.Z r.(cn)⌝ ∨ accepted_ptsto γ cn_some rid [] ) ∗ "#Hproposal_lb" ∷ proposal_lb_fancy γ r.(cn) r.(opLog) . (* A primary is a replica with some more stuff; technically, the rid from the replica is not necessary to have a primary*) Record PrimaryExtra := mkPrimaryExtra { conf : list u64; matchIdx : list u64; }. Definition own_Primary_ghost γ (r:Replica) (p:PrimaryExtra) : iProp Σ := "HprimaryOwnsProposal" ∷ proposal_ptsto γ r.(cn) r.(opLog) ∗ "#HconfPtsto" ∷ config_ptsto γ r.(cn) p.(conf) ∗ "#HmatchIdxAccepted" ∷ [∗ list] _ ↦ rid;j ∈ p.(conf); p.(matchIdx), accepted_lb γ r.(cn) rid (take (int.nat j) r.(opLog)) . Record CommitterExtra := mkCommitterExtra { commitIdx : u64; }. Definition own_Committer_ghost γ (r:Replica) (c:CommitterExtra) : iProp Σ := "#Hcommit_lb" ∷ commit_lb_by γ r.(cn) (take (int.nat c.(commitIdx)) r.(opLog)) ∗ "%HcommitLeLogLen" ∷ ⌜int.Z c.(commitIdx) <= length r.(opLog)⌝ . End replica_ghost_defns.
C BSTRED.F - Gateway function for SLICOT model reduction routine C AB09HD.F C C RELEASE 2.0 of SLICOT Model and Controller Reduction Toolbox. C Based on SLICOT RELEASE 5.7. Copyright (c) 2002-2020 NICONET e.V. C C Matlab call: C [Ar,Br,Cr,Dr,HSV,info] = BSTRED(meth,A,B,C,D,tol,discr,ord,alpha,beta) C C Purpose: C To find a reduced order state-space system Gr = (Ar,Br,Cr,Dr) C from a continuous- or discrete-time original system G = (A,B,C,D) C using the balanced stochastic truncation (BST) or the balanced C stochastic singular perturbation approximation (BS-SPA) methods. C The order of the reduced model is determined either by the number C of stochastic Hankel-singular values HSV greater than tol or C by the desired order ord. C C Input parameters: C meth - method flag to specify the basic model reduction method; C Allowed values for meth are: C meth = 1 : BST method with balancing; C meth = 2 : BST method (no balancing); C meth = 3 : BS-SPA method with balancing; C meth = 4 : BS-SPA (no balancing). C A,B, C C,D - state-space system matrices of size N-by-N, N-by-M, P-by-N, C and P-by-M, respectively. C tol - (optional) tolerance vector for determining the order of C reduced system, of the form [tol1, tol2], where: C tol1 specifies the tolerance for model reduction. C Default: tol1 = NS*epsilon_machine, where NS is the C order of the alpha-stable part of G. C tol2 specifies the tolerance for computing a minimal C realization when meth = 3 or 4. C Default: tol2 = NS*epsilon_machine. C discr - (optional) type of system: C = 0 : continuous-time (default); C = 1 : discrete-time. C ord - (optional) desired order of reduced system. C Default: ord = -1 (order determined automatically). C alpha - (optional) stability boundary for the eigenvalues of A. C Default: -sqrt(epsilon_machine) for continuous-time; C 1.0-sqrt(epsilon_machine) for discrete-time. C beta - (optional) absolute/relative error weighting parameter. C beta must be positive if D has not a full row rank. C Default: 0 (pure relative method). C C Output parameters: C Ar, Br, C Cr, Dr - matrices of the reduced system. C HSV - Hankel singular values of the alpha-stable part. C info - warning message code: C info = 1 - selected order greater than the order C of a minimal realization; C info = 2 - selected order corresponds to repeated singular C values, which are neither all included nor all C excluded from the reduced model; C info = 3 - selected order less than the order of C the unstable part. C C Contributors: C D. Sima, University of Bucharest, and C A. Varga, German Aerospace Center, C DLR Oberpfaffenhofen, March 2001. C C Revisions: C V. Sima, Research Institute for Informatics, Bucharest, June 2001, C Apr. 2009, Dec. 2012. C C ********************************************************************* C SUBROUTINE MEXFUNCTION( NLHS, PLHS, NRHS, PRHS ) C C .. Parameters .. DOUBLE PRECISION ONE, TWO, ZERO PARAMETER ( ONE = 1.0D0, TWO = 2.0D0, ZERO = 0.0D0 ) C C .. Mex-file interface parameters .. INTEGER PLHS(*), PRHS(*) INTEGER*4 NLHS, NRHS C C .. Mex-file integer functions .. INTEGER mxCreateDoubleMatrix, mxGetPr INTEGER*4 mxGetM, mxGetN, mxIsNumeric, mxIsComplex C C .. Scalar parameters used by SLICOT subroutines .. CHARACTER DICO, EQUIL, JOB, ORDSEL INTEGER INFO, IWARN, LDA, LDB, LDC, LDD, LDWORK, M, N, $ NR, NS, P DOUBLE PRECISION ALPHA, BETA, TOL1, TOL2 C C .. Allocatable arrays .. C !Fortran 90/95 (Fixed dimensions should be used with Fortran 77.) DOUBLE PRECISION, ALLOCATABLE:: A(:,:), B(:,:), C(:,:), D(:,:), $ DWORK(:), HSV(:) INTEGER, ALLOCATABLE:: IWORK(:) LOGICAL, ALLOCATABLE:: BWORK(:) C C .. Local variables and constant dimension arrays .. CHARACTER*120 TEXT LOGICAL DISCR INTEGER I, M1, MB, METH, N1, N2, N3, P1 DOUBLE PRECISION DUM, TOL(2) C C .. External functions .. LOGICAL LSAME DOUBLE PRECISION DLAMCH EXTERNAL LSAME, DLAMCH C C .. External Subroutines .. EXTERNAL AB09HD, DLACPY C C .. Intrinsic functions .. INTRINSIC MAX, MIN, SQRT C C Check for proper number of arguments. C IF( NRHS.LT.5 ) THEN CALL mexErrMsgTxt $ ( 'BSTRED requires at least 5 input arguments' ) ELSE IF( NLHS.GT.6 ) THEN CALL mexErrMsgTxt $ ( 'BSTRED requires at most 6 output arguments' ) END IF C C Check dimensions of input parameters and read/set scalar parameters. C C meth C IF( mxGetM( PRHS(1) ).NE.1 .OR. mxGetN( PRHS(1) ).NE.1 ) THEN CALL mexErrMsgTxt( 'METH must be a scalar' ) END IF IF( mxIsNumeric( PRHS(1) ).EQ.0 .OR. $ mxIsComplex( PRHS(1) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'METH must be an integer scalar' ) END IF CALL mxCopyPtrToReal8( mxGetPr( PRHS(1) ), DUM, 1 ) METH = DUM IF( METH.LE.0 .OR. METH.GT.4 ) THEN CALL mexErrMsgTxt $ ( 'METH has 1 ... 4 the only admissible values' ) END IF IF( METH.EQ.1 ) THEN JOB = 'B' ELSE IF( METH.EQ.2 ) THEN JOB = 'F' ELSE IF( METH.EQ.3 ) THEN JOB = 'S' ELSE JOB = 'P' END IF C C A(NxN), B(NxM), C(PxN), D(PxM) C N = mxGetM( PRHS(2) ) M = mxGetN( PRHS(3) ) P = mxGetM( PRHS(4) ) N1 = mxGetN( PRHS(2) ) N2 = mxGetM( PRHS(3) ) N3 = mxGetN( PRHS(4) ) P1 = mxGetM( PRHS(5) ) M1 = mxGetN( PRHS(5) ) C IF( N1.NE.N ) THEN CALL mexErrMsgTxt $ ( 'A must be a square matrix' ) END IF IF( N2.NE.N ) THEN CALL mexErrMsgTxt $ ( 'B must have the same row dimension as A' ) END IF IF( N3.NE.N ) THEN CALL mexErrMsgTxt $ ( 'C must have the same column dimension as A' ) END IF IF( P1.NE.P ) THEN CALL mexErrMsgTxt $ ( 'D must have the same row dimension as C' ) END IF IF( M1.NE.M ) THEN CALL mexErrMsgTxt $ ( 'D must have the same column dimension as B' ) END IF IF( mxIsNumeric( PRHS(2) ).EQ.0 .OR. $ mxIsComplex( PRHS(2) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'A must be a real matrix' ) END IF IF( mxIsNumeric( PRHS(3) ).EQ.0 .OR. $ mxIsComplex( PRHS(3) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'B must be a real matrix' ) END IF IF( mxIsNumeric( PRHS(4) ).EQ.0 .OR. $ mxIsComplex( PRHS(4) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'C must be a real matrix' ) END IF IF( mxIsNumeric( PRHS(5) ).EQ.0 .OR. $ mxIsComplex( PRHS(5) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'D must be a real matrix' ) END IF IF( M.LE.0 ) THEN CALL mexErrMsgTxt( 'The system has no inputs' ) END IF IF( P.LE.0 ) THEN CALL mexErrMsgTxt( 'The system has no outputs' ) END IF C C tol(1x2) C TOL1 = ZERO TOL2 = ZERO IF( NRHS.GT.5 ) THEN I = mxGetM( PRHS(6) )*mxGetN( PRHS(6) ) IF( I.GT.2 ) THEN CALL mexErrMsgTxt $ ( 'TOL must be a vector with at most 2 elements' ) END IF IF( mxIsNumeric( PRHS(6) ).EQ.0 .OR. $ mxIsComplex( PRHS(6) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'TOL must be a real vector' ) END IF CALL mxCopyPtrToReal8( mxGetPr( PRHS(6) ), TOL, I ) IF( I.GT.0 ) TOL1 = TOL(1) IF( I.GT.1 ) TOL2 = TOL(2) END IF C C discr C DICO = 'C' IF( NRHS.GT.6 ) THEN IF( mxGetM( PRHS(7) ).NE.1 .OR. mxGetN( PRHS(7) ).NE.1 ) THEN CALL mexErrMsgTxt( 'DISCR must be a scalar' ) END IF IF( mxIsNumeric( PRHS(7) ).EQ.0 .OR. $ mxIsComplex( PRHS(7) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'DISCR must be an integer scalar 0 or 1' ) END IF CALL mxCopyPtrToReal8( mxGetPr( PRHS(7) ), DUM, 1 ) IF( DUM.NE.ZERO ) DICO = 'D' END IF DISCR = LSAME( DICO, 'D' ) C C ord C ORDSEL = 'A' NR = 0 IF( NRHS.GT.7 ) THEN IF( mxGetM( PRHS(8) ).NE.1 .OR. mxGetN( PRHS(8) ).NE.1 ) THEN CALL mexErrMsgTxt( 'ORD must be a scalar' ) END IF IF( mxIsNumeric( PRHS(8) ).EQ.0 .OR. $ mxIsComplex( PRHS(8) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'ORD must be an integer scalar' ) END IF CALL mxCopyPtrToReal8( mxGetPr( PRHS(8) ), DUM, 1 ) IF( DUM.GE.ZERO ) THEN ORDSEL = 'F' NR = DUM NR = MIN( N, NR ) END IF END IF C C alpha C IF( NRHS.GT.8 ) THEN IF( mxGetM( PRHS(9) ).NE.1 .OR. mxGetN( PRHS(9) ).NE.1 ) THEN CALL mexErrMsgTxt( 'ALPHA must be a scalar' ) END IF IF( mxIsNumeric( PRHS(9) ).EQ.0 .OR. $ mxIsComplex( PRHS(9) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'ALPHA must be a real scalar' ) END IF CALL mxCopyPtrToReal8( mxGetPr( PRHS(9) ), ALPHA, 1 ) IF( DISCR ) THEN IF( ALPHA.EQ.ONE ) ALPHA = ONE - SQRT( DLAMCH( 'E' ) ) ELSE IF( ALPHA.EQ.ZERO ) ALPHA = -SQRT( DLAMCH( 'E' ) ) END IF ELSE ALPHA = -SQRT( DLAMCH( 'E' ) ) IF( DISCR ) ALPHA = ONE + ALPHA END IF C C beta C BETA = ZERO IF( NRHS.GT.9 ) THEN IF( mxGetM( PRHS(10) ).NE.1 .OR. mxGetN( PRHS(10) ).NE.1 ) THEN CALL mexErrMsgTxt( 'BETA must be a scalar' ) END IF IF( mxIsNumeric( PRHS(10) ).EQ.0 .OR. $ mxIsComplex( PRHS(10) ).EQ.1 ) THEN CALL mexErrMsgTxt( 'BETA must be a real scalar' ) END IF CALL mxCopyPtrToReal8( mxGetPr( PRHS(10) ), BETA, 1 ) END IF C C Determine the lenghts of working arrays. C MB = M IF( BETA.NE.ZERO ) MB = M + P LDWORK = 2*N*N + MB*(N+P) + MAX( 2, N*(MAX( N, MB, P )+5), $ 2*N*P + MAX( P*(MB+2), 10*N*(N+1) ) ) C LDA = MAX( 1, N ) LDB = MAX( 1, N ) LDC = MAX( 1, P ) LDD = MAX( 1, P ) C C Allocate variable dimension local arrays. C !Fortran 90/95 ALLOCATE ( A( LDA, MAX( 1, N ) ), B( LDB, MAX( 1, M ) ), $ C( LDC, MAX( 1, N ) ), D( LDD, MAX( 1, M ) ), $ DWORK( LDWORK ), HSV( MAX( 1, N ) ), $ IWORK( MAX( 1, 2*N ) ), BWORK( MAX( 1, 2*N ) ) ) C C Copy inputs from MATLAB workspace to locally allocated arrays. C CALL mxCopyPtrToReal8( mxGetPr( PRHS(2) ), A, N*N ) CALL mxCopyPtrToReal8( mxGetPr( PRHS(3) ), B, N*M ) CALL mxCopyPtrToReal8( mxGetPr( PRHS(4) ), C, P*N ) CALL mxCopyPtrToReal8( mxGetPr( PRHS(5) ), D, P*M ) C C Do the actual computations. C EQUIL = 'S' CALL AB09HD( DICO, JOB, EQUIL, ORDSEL, N, M, P, NR, $ ALPHA, BETA, A, LDA, B, LDB, C, LDC, D, LDD, $ NS, HSV, TOL1, TOL2, IWORK, DWORK, LDWORK, $ BWORK, IWARN, INFO ) C C Copy output to MATLAB workspace. C IF( INFO.EQ.0 ) THEN IF( NLHS.GE.1 ) THEN PLHS(1) = mxCreateDoubleMatrix( NR, NR, 0 ) IF( NR.LT.N .AND. NR.GT.0 ) $ CALL DLACPY( 'F', NR, NR, A, LDA, A, NR ) CALL mxCopyReal8ToPtr( A, mxGetPr( PLHS(1) ), NR*NR ) END IF IF( NLHS.GE.2 ) THEN PLHS(2) = mxCreateDoubleMatrix( NR, M, 0 ) IF( NR.LT.N .AND. NR.GT.0 ) $ CALL DLACPY( 'F', NR, M, B, LDB, B, NR ) CALL mxCopyReal8ToPtr( B, mxGetPr( PLHS(2) ), NR*M ) END IF IF( NLHS.GE.3 ) THEN PLHS(3) = mxCreateDoubleMatrix( P, NR, 0 ) CALL mxCopyReal8ToPtr( C, mxGetPr( PLHS(3) ), P*NR ) END IF IF( NLHS.GE.4 ) THEN PLHS(4) = mxCreateDoubleMatrix( P, M, 0 ) CALL mxCopyReal8ToPtr( D, mxGetPr( PLHS(4) ), P*M ) END IF C IF( NLHS.GE.5 ) THEN PLHS(5) = mxCreateDoubleMatrix( NS, 1, 0 ) CALL mxCopyReal8ToPtr( HSV, mxGetPr( PLHS(5) ), NS ) END IF IF( NLHS.GE.6 ) THEN PLHS(6) = mxCreateDoubleMatrix( 1, 1, 0 ) HSV(1) = IWARN CALL mxCopyReal8ToPtr( HSV, mxGetPr( PLHS(6) ), 1 ) END IF C END IF C C Deallocate local arrays. C !Fortran 90/95 C DEALLOCATE ( A, B, C, D, HSV, IWORK, DWORK, BWORK ) C C Error handling. C IF( INFO.NE.0 ) THEN WRITE( TEXT,'( " INFO = ", I4, " ON EXIT FROM AB09HD" )' ) INFO CALL mexErrMsgTxt( TEXT ) END IF C RETURN C *** Last line of BSTRED *** END
# U of U IS Deep Learning Study Group - Notes #1 #### Author: Brian Sheng #### Art Credit: Stephen Vickers ```python from IPython.display import YouTubeVideo import tensorflow as tf import sympy as sp from sympy import Matrix sp.init_printing("latex") ``` Deep Learning has been getting a lot of attention lately. The question is why... and does it live up to the hype? Well, you may know that our ability to train and run bigger neural networks due to our increase in compute power since days past is a big part of it. **Note**: This notebook explains things from the ground up, and you may be familiar with some of these concepts already. The intention of the simplistic explanations is not to insult or condescend, but to couch things in as simple terms as possible, but no simpler. As a result, plain English is often used to introduce heady concepts in layman's terms. Feel free to skim through concepts you're already solidly familiar with, but please be open to alternative perspectives that may be enlightening. If you want the abridged version of everything, I'd suggest reading the headings, reading around the *bolded statements*, looking at the visuals, reading the formulas, and reading things that are indented in HTML quotes style like this: > This is HTML quote style At the same time it's important to know that I do not hold a Ph.D in Computer Science, nor am I currently an industy leader in AI, so what is written here is not dogma. I am simply a determined amateur who is doing his best to fill his knowledge gaps and do some good science. Many of the makers of these resources *are* experts in their field however, and I would advise that you go through them: ## Additional Learning Resources **The de facto Deep Learning textbook from Goodfellow et al (free!)**: http://deeplearningbook.org/ **An easier online Deep Learning textbook (free!)**: http://neuralnetworksanddeeplearning.com/ **Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems (~$32)**: https://www.amazon.com/gp/product/1491962291 **Hands-On Machine Learning Github (free code and Jupyter Notebooks!)**: https://github.com/ageron/handson-ml **How Deep Neural Networks Work**: https://www.youtube.com/watch?v=ILsA4nyG7I0 **How Convolutional Neural Networks work**: https://www.youtube.com/watch?v=FmpDIaiMIeA **CS231N Winter 2016 Lectures from Andrej Karpathy at Stanford**: https://www.youtube.com/playlist?list=PLkt2uSq6rBVctENoVBg1TpCC7OQi31AlC **CS231N Winter 2016 Lecture Notes**: http://cs231n.github.io/ **ConvNetJS (neural nets in your browser with JavaScript!)**: http://cs.stanford.edu/people/karpathy/convnetjs/ **Hvass Labs TensorFlow Jupyter Notebook Tutorials with YouTube Videos**: https://github.com/Hvass-Labs/TensorFlow-Tutorials **Stanford's Deep Natural Language Processing Video Lectures**: https://www.youtube.com/playlist?list=PL3FW7Lu3i5Jsnh1rnUwq_TcylNr7EkRe6 **Berkeley's Deep Reinforcement Learning Course (even more learning resources recommended there!)**: http://rll.berkeley.edu/deeprlcourse/ **YouTube playlist for developing visual intution for linear algebra**: https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab **YouTube playlist for developing visual intution for calculus**: https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr **Backpropagation Example Python Code:** https://iamtrask.github.io/2017/03/21/synthetic-gradients/ https://iamtrask.github.io/2015/07/12/basic-python-network/ With that out of the way, let's proceed. ## Introduction and Learning Goals ### Introduction #### Why can we go deeper? Why are neural nets suddenly working better? In a nutshell: - Backpropagation has been around for a while - Neural nets are nothing new #### Why are neural nets hot right now? - More compute power - More data - Use of different neural net types - Convolutional Neural Net architectures exist that have ~10^2 layers (not all “layers” have neurons) - Vanilla Fully-Connected Nets usually don’t see much benefit from >3 layers depending on the application - Recurrent Net depth is <10^1 layers, usually more like 4 or 5. (due to nature of computation and exacerbated vanishing/exploding gradient problems) - Use of different units - ReLU for ConvNets, some RNNs, Fully Connected Nets - LSTMs and GRUs for RNNs - These units train better and/or address vanishing/exploding gradients - Better initialization - RBMs initially pre-trained and then stacked to create Deep Belief Networks - Turns out better initialization is all you really need (don’t need RBMs) *Citation: Andrej Karpathy’s CS231N lectures 4, 5, 6. - Small random values, normally or uniformly distributed depending on the network type, with std dev depending on number of inputs and outputs for a given neuron. *RBM = Restricted Boltzmann Machine - Better regularization (L2, Dropout, DropConnect, Early Stopping, Ensemble, Bagging) An *in-a-nutshell* statement on the current state of AI from DARPA can be found here: ```python YouTubeVideo('-O01G3tSYpU') ``` An *in-a-nutshell* explanation of deep neural nets can be found here: ```python YouTubeVideo('ILsA4nyG7I0') ``` ### Learning Goals for this Group - Develop a better intuition of how neural nets function from the ground up. - Develop an intuition for tuning deep neural net models in practice. - Develop skills for use in practice, research, etc. #### How to accomplish this? - Presentations from week to week. - Hopefully lecture notes and code to play with on Git service of our choosing. ## Effect of Depth & General Intuitions ### Networks Have Layers You likely know that the *Deep* in *Deep Learning* is due to the large number of layers in some networks that have performed quite well on modern AI tasks in recent years. For the sake of review and reference, in a nutshell, each layer is either an input, an output, or the result of an element-wise activation function that has been applied to the result of a matrix multiply followed by an addition operation with a *bias vector*. These output vectors are the *layers*, and the weights in the matrices are the connections between them. Layers that aren't the output or input layer are called *hidden* layers. Image credit: https://en.wikipedia.org/wiki/Artificial_neural_network ### Learning of Meaningful Features The meaningful *features* that a learning system is meant to grab onto are traditionally designed by human domain experts, resulting in very complicated operations on the input data, and complex functions being fed into each *layer* of the network. The general idea with neural nets is to instead allow a learning system to define its own features as it is trained on the data. Each layer can be seen as a **layer of abstraction**, with the network creating more complicated features from simpler features. This can be seen as building up more complicated concepts from simpler ones. This can be seen in convolutional neural nets, with the learned *filters* getting more complicated as we go from input layer to output layer. Image credit: <strong data-cite="goodfellow2016">(Goodfellow et al, 2016)</strong> ### Repeated Kernel Trick (Warp & Slice) In the example of a 2d data space, and classifying inputs as one of two things, you can think of a neural network as repeatedly stretching, warping, and squashing the space of data to allow for the drawing of a flat plane to separate the data. It's like taking a sheet of rubber and warping it into some weird shape before freezing it and slicing through it with a knife. Each layer results in another warping of the sheet, and the final layer corresponds to the last warping, followed by a slice with a knife. The number of *neurons* type of *activation function* for a given layer will determine the warping type. You can see that warpings generally **get more complex with number of neurons**, and that **adding layers nests these warpings together**, feeding them into one another like Russian nesting dolls. Image credit: <strong data-cite="karpathy-et-al2016">(Karpathy et al, 2016)</strong> This can be seen in this interactive demo from Andrej Karpathy of such a neural net that runs in your browser: http://cs.stanford.edu/people/karpathy/convnetjs/demo/classify2d.html Explanation of demo: ```python YouTubeVideo('i94OvYb6noo?t=1h10m45s') ``` ### Neural Nets as Universal Function Approximators Neural Nets can be shown to approximate just about any arbitrary function given non-linear activation functions and enough units. Essentially, if you give neural nets enough capacity to learn and represent complex outputs, then you'll tend towards being able to output whatever arbitrary thing as long as it's a function on the input. See these chapters for much more detailed and rigorous explanations: http://neuralnetworksanddeeplearning.com/chap4.html http://www.deeplearningbook.org/contents/mlp.html ### Problems with Depth The problem is that when you increase depth on a network you also make it considerably harder to train. This is due to the way that the network learns for the network being tied to depth in a way that parts of the network further removed from the output will be less affected by the learning mechanism, which operates on a function of the output error of the network. Modern methods find ways around this with the aforementioned tweaks in our Introduction section to give *Deep Learning*, where we can effectively train very deep neural nets given some caveats (mainly convolutional neural nets it seems). **I strongly recommend now playing with the aforementioned ConvNetJS two-class neural net classifier with visualization. It will give you intuition about the concepts and ideas discussed in this section** ## Neural Nets as Composed Functions & Computational Graphs Neural nets can be viewed as nested, or "composed", functions. They are functions of functions of functions of inputs. Looking at the top level of abstraction, they can be unhelpfully viewed as literal boxes of *magic*. This view of (unwitting) data being pulled in to a box of magic to produce (suave) output is a bit to simplistic and doesn't really help us, so let's break it down a bit deeper. ### Computational Graphs Taking the example of a neural net with one hidden layer, if we break down the magic box into the machines that it's made out of, we have a representation that looks like this: If it looks like bedlam and chaos, that's because those little workers are computing something important in an inefficient way. We'll come back to that later. You can see that there's a matrix multiply with bias and activation function to go from input to hidden layer, then another set of matrix multiply with bias and activation function machines to go from hidden layer to output. (On a related note, this YouTube playlist gives an excellent series of intuitive explanations of linear algebra concepts, enjoyable even for the seasoned pro: https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab) If you examine the first machine in the line, you'll see that matrix multiply and bias machines look like this: Let's clean things up a bit and go back to a more helpful version of *magic* boxes. In the process, we'll also break the operations down into their useful components, or smaller magic boxes. Here's the same matrix multiply and bias operation in terms of magic boxes: You can see that it's pretty much exactly what it says on the tin. It's the multiplication of some input **x** with some matrix **W<sub>1</sub>** using *****, followed by the addition of a bias **b<sub>1</sub>** using **+** to the result. If we keep the activation function interchangeable and leave it as a magic box labelled **f**, we can then represent our neural network as a series of magic boxes chained together like this. When you expand it all out, it looks something like this: This series of chained boxes that takes an input and spits out some outputs is a **computational graph**. In the case of neural nets, it has the property where you can always find a beginning input that depends only on itself, and also the property where you can't get caught in an infinite loop. This is called **acyclic dependency**, and will be a helpful property later. ### Optimal Substructure & Gradients of Composed Functions with the Chain Rule You may notice that each box in this graph may depend on other boxes feeding into their input. They are therefore *functions* of these other input boxes. These input boxes are also functions, so we have functions of functions. The end result of the graph is a nested or composed function. If you were to find the change of the output of this graph with respect to the change in some part further back in the graph, this would be the **derivative** of the output with respect to that part. Since these are vector or matrix shape parts, we get vector or matrix-shaped derivatives. These vector or matrix-shaped derivatives would be called our **gradients**. It is important to note another property of this computational graph. The gradients of the output with respect to each box happen to depend on each other. You can make gradients from other gradients. Solutions from smaller solution pieces. This is called **optimal substructure**, and it is also a helpful property for us. The fact that the whole computational graph is a bunch of functions composed together means that we compute these gradients using the **chain rule**. Borrowing from Wikipedia's chain rule explanation, in short, the derivative of an outer function $$F(x)=f(g(x))$$ with respect to an inner function's input (the single quote indicates a derivative of F(x)) **F'(x)** is computed by: $$F'(x) = f'(g(x))g'(x)$$ This means that the derivative of the outside function with respect to the inside function's input is equal to the derivative of the outside function with respect to the inside function multiplied by the derivative of inside function with respect to the inside function's input. Alternatively, if you set $z=F(x)=f(g(x))=f(y)$ and $y=g(x)$, then you can rewrite this as: $$\frac{dz}{dx}=\frac{dz}{dy}\frac{dy}{dx}=F'(x) = f'(g(x))g'(x)$$ Where $\frac{dz}{dx}$ is the derivative of z with respect to x, $\frac{dz}{dy}$ is the derivative of z with respect to y, and $\frac{dy}{dx}$ is the derivative of y with respect to x. You can see that the $dy$ numerator and denominator appear to cancel, leaving $\frac{dz}{dx}$ as the result. The notation of $\frac{dz}{dx}$ for the derivative reflects the fact that it's *like* the ratio between a tiny change in $z$ due to a tiny change in its input $x$ by $dx$ $$dz\approx z(x+dx)-z(x)$$ and that tiny change dx. When $dx$ is *infinitely small* (take the "limit" as is goes to zero) then the derivative is *literally* that ratio and we get: $$\frac{dz}{dx}=\lim_{dx \to 0}\frac{z(x+dx)-z(x)}{dx}$$ If the function has multiple inputs, then you're taking a *partial derivative* when you take the derivative w.r.t that input while holding everything else constant. The notation for that looks like: $$\frac{\partial z}{\partial u}=\frac{\partial z}{\partial x}\frac{\partial x}{\partial u},z=f(x,y),x=g(u,v)$$ You can see that we multiply gradients together with other gradients to get our final gradients of the output with respect to each box output or input(e.g. **W<sub>1</sub>** is a box input, $\boldsymbol{f}(\boldsymbol{W_1 x}+\boldsymbol{b_1})$ is the output of the first **f** box) . Chain rule is described here: https://en.wikipedia.org/wiki/Chain_rule. If this is your first exposure to calculus and you've never heard of the chain rule before, then this YouTube playlist may also prove useful (The Essence of Calculus): https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr The video from that playlist related to the Chain Rule is here: ```python YouTubeVideo('YG15m2VwSjA') ``` How the gradients pass through split inputs on addition and multiplication is covered here (related to the Backpropagation section): http://cs231n.github.io/optimization-2/ Now, why do we want the gradient? Well, since that output *J* indicates how bad our neural net is doing on the training data, we want to make that as small as possible. Some parts that go into *J* can't be helped. Some of these bits can be helped, and would be called the **parameters** of our model that are free to change. These would be **W<sub>1</sub>** **b<sub>1</sub>** **W<sub>2</sub>** **b<sub>2</sub>**, and free to vary during the training process. We use the gradient of that *J* with respect to these parameters to change these parameters during training. This will move them in a direction to minimize that loss, *J* on the training data. This process is Gradient Descent, and is covered in the next section. ## Gradient Descent & Backpropagation ### Gradient Descent So, you have a neural net that has an associated loss function. To get it to learn, we minimize this loss function with respect to the training data, fitting it to the data. The general way to do this is with any number of modified versions of *Gradient Descent*. Looking in the motivation section of Wikipedia page for gradient: https://en.wikipedia.org/wiki/Gradient > *Consider a surface whose height above sea level at point (x, y) is H(x, y). The gradient of H at a point is a vector pointing in the direction of the **steepest slope or grade at that point.** The steepness of the slope at that point is given by the magnitude of the gradient vector.* Image credit: https://en.wikipedia.org/wiki/Gradient_descent Subtracting the gradient from its associated vector or function is like heading away from the direction of *steepest ascent*. You are heading in the direction of *steepest descent*, hence **Gradient Descent**. Intuitively, if you're heading towards a direction of steepest descent, eventually you will reach a flat spot or region locally. You will arrive at a *local minimum* of the matrix function. See the Wikipedia page for more info: https://en.wikipedia.org/wiki/Gradient_descent The general formula of interest is: $$u_{t+1}=u_{t}-\alpha \nabla f(u_t)$$ Where $u$ is a parameter of the function $f(u)$, $u_{t+1}$ is the new value of $u$, $u_t$ is the old value of $u$, $\alpha$ is a scalar real number value, and $\nabla f(u_t)$ is the gradient of $f(u_t)$. The important thing to keep in mind here is that $u$ can be any arbitrary shaped matrix, vector, or tensor, but the shapes of $u_{t+1}$, $u_{t}$, and $\nabla f(u_t)$ must be the same for gradient descent to work. That seems a bit obvious, but this will be important later. ### Computing Gradients The naive way to compute the gradients we need for our parameters is to repeatedly apply the chain rule over and over again. This however, results in use repeating ourselves much more than necessary. Let's look at the example where we need to compute the gradient of *J* w.r.t. (with respect to) **W<sub>1</sub>** **b<sub>1</sub>** **W<sub>2</sub>** **b<sub>2</sub>** as in our computational graph for a one hidden layer neural net with activation functions on the hidden and output layers. As for taking the derivative (differentiation) w.r.t a vector, the following video should outline the idea behind it: ```python YouTubeVideo('iWxY7VdcSH8') ``` Differentiation of a vector $\boldsymbol{f}$ w.r.t to another vector $\boldsymbol{x}$ gives the Jacobian matrix of $\boldsymbol{f}$ w.r.t $\boldsymbol{x}$, $\frac{\partial \boldsymbol{f}}{\partial \boldsymbol{x}}$, which is of the form: $$ \begin{bmatrix} \frac{\partial f_1}{\partial x_1} & \dots & \frac{\partial f_1}{\partial x_m} \\ \vdots & \ddots & \vdots \\ \frac{\partial f_n}{\partial x_1} & \dots & \frac{\partial f_n}{\partial x_m} \end{bmatrix} $$ This gives us this horrid mess of formulas from the Chain Rule, which we'll stick here for the sake of reference: $\boldsymbol{x}=$ Inputs $\boldsymbol{z_1}=\boldsymbol{W_1 x}+\boldsymbol{b_1}=$ Input values to "hidden" activation function $\boldsymbol{a}=\boldsymbol{f}(\boldsymbol{z_1})=$ Hidden Activations $\boldsymbol{z_2}=\boldsymbol{W_2 a}+\boldsymbol{b_2}=$ Inputs to "output" activation function $\boldsymbol{y ̂}=\boldsymbol{f}(\boldsymbol{z_2})=$Output Activations $$ \frac{\partial J}{\partial \boldsymbol{b_2}} = \frac{\partial J}{ \partial \boldsymbol{y ̂ }} \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} } \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{b_2} } $$ $$ \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \odot \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} = \boldsymbol{\delta_2} $$ Where $\odot$ is **element-wise multiplication**, a.k.a the Hadamard Product: https://en.wikipedia.org/wiki/Hadamard_product_(matrices), and $\frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} }$ is the derivative of the output activation function w.r.t its inputs. $$ \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{b_2}}=\frac{\partial}{\partial \boldsymbol{b_2}}\left( \boldsymbol{w_2 a}+\boldsymbol{b_2}\right)=\boldsymbol{1} $$ Where $\boldsymbol{1}$ is the ones vector. (i.e. $\boldsymbol{1}=\begin{bmatrix} 1 \\ ... \\ 1 \end{bmatrix}$ for some arbitrary length) In the case of $\frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{b_2}}$ , the length of the vector is the same as the length of $\boldsymbol{b_2}$. $$ \frac{\partial J}{\partial \boldsymbol{b_2}} = \boldsymbol{\delta_2} \odot \boldsymbol{1}=\boldsymbol{\delta_2} $$ $$ \frac{\partial J}{\partial \boldsymbol{ w_2 }} = \frac{\partial J}{ \partial \boldsymbol{y ̂ }} \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} } \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2} } = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2}} $$ $$ \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2}} = \frac{\partial}{\partial \boldsymbol{w_2}} \left( \boldsymbol{w_2 a}+\boldsymbol{b_2}\right) = \boldsymbol{a}^T$$ $$\frac{\partial J}{\partial \boldsymbol{ w_2 }} = \boldsymbol{\delta_2} \otimes \boldsymbol{a} = \boldsymbol{\delta_2}\boldsymbol{a}^T $$ Where $\otimes$ is the **outer (or tensor) product**: https://en.wikipedia.org/wiki/Outer_product $$\frac{\partial J}{\partial \boldsymbol{b_1}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1} } = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} $$ Where $\frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} }$ is the derivative of the hidden activation function w.r.t. its inputs. $$ \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a}} = \frac{\partial}{\partial \boldsymbol{a}} \left( \boldsymbol{w_2 a}+\boldsymbol{b_2}\right) = \boldsymbol{w_2}^T $$ $$ \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} = \frac{\partial}{\partial \boldsymbol{b_1}} \left( \boldsymbol{w_1 x}+\boldsymbol{b_1}\right) = \boldsymbol{1} $$ $$\boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } = \langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle = \boldsymbol{w_2} \cdot \boldsymbol{\delta_2} = \boldsymbol{w_2}^T\boldsymbol{\delta_2} $$ Where $\langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle$ is the **inner product** (in this case, called the **dot product**) of the matrices $\boldsymbol{w_2}$ and $\boldsymbol{\delta_2}$ : https://en.wikipedia.org/wiki/Inner_product_space https://en.wikipedia.org/wiki/Dot_product $$ \frac{\partial J}{\partial \boldsymbol{b_1}} = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} = \langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle \odot \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \odot \boldsymbol{1} = \boldsymbol{\delta_1} \odot \boldsymbol{1} = \boldsymbol{\delta_1} $$ $$ \frac{\partial J}{\partial \boldsymbol{ w_1 }} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{w_1}} = \boldsymbol{\delta_1} \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{ w_1 }} $$ $$ \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{ w_1 }} = \frac{\partial}{\partial \boldsymbol{ w_1 }} \left( \boldsymbol{w_1 x}+\boldsymbol{b_1}\right) = \boldsymbol{x}^T $$ $$ \frac{\partial J}{\partial \boldsymbol{w_1}} = \boldsymbol{\delta_1} \otimes \boldsymbol{x} = \boldsymbol{\delta_1} \boldsymbol{x}^T $$ So, that was most likely a bit overwhelming. We'll go through that in a more intuitive manner in a bit, but now you have the formulas for reference later. You can see there's a lot of repeated work in these formulas. Terms like $\frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} }$ contain other terms, such as $\frac{\partial J}{ \partial \boldsymbol{y ̂ }} \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} } $. If you were to simply calculate everything straight-ahead without reusing work, you'd be repeating a lot of derivative calculcations unnecessarily. If we go back to the factory analogy with the workers computing the gradients, you can see it goes right back to bedlam and chaos very quickly if we use the straight-ahead method. This is where backpropagation comes in. ### The Essence of Backpropagation Backpropagation takes advantage of two properties of the computational graphs of neural nets to do these calculations without having to recalculate anything. The properties of **acyclic dependency** and **optimal substructure** allow for fancy thing called **Dynamic Programming** to be applied to the problem of calculating gradients. In this case, Dynamic Programming just means that we make sure to start with a calculation that is self contained and doesn't depend on stuff we don't have (the beginnning/end of the graph), then move on to using that result to calculate things that depend on it, then continue onto things that depend on that next result and so on. We start from the *beginning* to calculate that *initial thing* and work our way through building *things* that depend on that *initial thing*, then building *new things* that depend on those *things* and so on, all the while making sure to compute everything without repeating work unnecessarily. With this in mind, we can see that: * The *things* we're building up are the gradients of the loss function output $J$ w.r.t each part. * Our beginning is at the loss function output $J$. * We can determine an order to do the calculations in by performing a **topological sort** on the compuational graph, or just keep track of what we've computed and check if we've computed something already before doing it, which is called **memoization**. * Once we've computed everything that we need to, we're done. You can see the **acyclic dependency** and **optimal substructure** of the problem in our diagram of the interdependencies of the gradient from earlier: ### Backpropagation from the Top-Down: Keeping Track of Shape Let's walk through the example with one hidden layer we had before, and go through the formulas in more depth. One thing that tends to trip people up is keeping track of the shapes of all the vectors and matrices. The shape of the matrices/tensors during feedforward and backprop is important to keep in mind. Things get confusing when you mix it up. It's easy to get lost in the abstract world of formulas, resulting in getting all the parts that you need for computation without knowing how to put them together to make actual gradients. So, let's go through the process *backwards* and figure out how things are suppose to fit together, and what shape everything is before slowly revealing more detail about the parts we're assembling. Remember that formula for gradient descent? $$u_{t+1}=u_{t}-\alpha \nabla f(u_t)$$ Well, we need to make sure that $u_{t}$ and $\nabla f(u_t)$ are the same shape. If we look at some operations available for us to play with, we can see some preserve shape while others do not. Let's set up some weight matrices and bias vectors to look at while we go through this example. We'll use the SymPy Python library for its array data structures and operations, and print things out as we go along. We'll use a neural network that looks like the following figure: This network has 3 input units, 4 hidden units, and 2 output units. If we use column vectors in our matrix multiplications, we'll have two weight matrices and two bias vectors for our input-to-hidden and hidden-to-output mappings. The input-to-hidden weight matrix and bias vector will be $\boldsymbol{w_1}$ and $\boldsymbol{b_1}$, and the hidden-to-output weight matrix and bias vector will be $\boldsymbol{w_2}$ and $\boldsymbol{b_2}$, as before. #### Non-Square Matrices as Transformations Between Dimensions (Shape Changing) A matrix can be viewed as encoding a linear function, and non-square matrices can be views as encoding linear functions between dimensions. If you're operating on column vectors, then the number of rows encodes the output dimensionality, and the number of columns is input dimensionality. So a matrix like the following one encodes a transformation from 4 dimensional vectors to 2 dimensional vectors. $$\begin{bmatrix} 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 \end{bmatrix}$$ A matrix multiply between a 4D vector and such a matrix will produce a 2D vector. $$\begin{bmatrix} 1 & 1 & 1 & 1\\ 2 & 2 & 2 & 2 \end{bmatrix}\begin{bmatrix} 2 \\ 2 \\ 2 \\ 2 \end{bmatrix}=\begin{bmatrix} 8 \\ 16 \end{bmatrix}$$ You can see that a non-square matrix multiplication of this kind can be seen as a magic box that takes in an input of one shape, and makes an input of another shape. The video below gives a more in-depth understanding of how this is the case: ```python YouTubeVideo('v8VSDg_WQlA') ``` So, looking at that neural net figure again, we'll have a 3D input vector, which must map to a 4D hidden vector, which must in turn map to a 2D output vector. This means we'll need a (4,3) (or $4x3$ depending on your notation) shaped matrix for the input-to-hidden map and a (2,4) shaped matrix for the hidden-to-output map. Our $b_1$ and $b_2$ bias vectors get added in at the hidden an output layers, so they must be 4D and 2D themselves. Let's make the variables for these in SymPy and go through a bunch of symbolic computations with matrices so we can get an idea of how things interact. ```python def names_2_sympy_str_list(shape, array_name): """Takes a shape tuple or list, and a string for the array name and makes a string formatted for making a sympy string of the same shape that is filled with symbols of the form: {array_name}_{indices of element in matrix} Returns: A list of strings of the form: [{array_name}({rows+1}\,(1:{array.shape[1]})), {array_name}({rows+1}\,(1:{array.shape[1]})), ... ] To be fed to the sympy.symbols(...) function to create symbols to be fed into a symbolic matrix. """ return ['{{{0}}}_{{({1}\,(1:{2}))}}' .format(array_name, rows+1, shape[1]+1) for rows in range(shape[0])] def string_list_to_sympy_matrix(sympy_string_list): """Takes a list of lists of strings for feeding into sympy.symbols(...) and feeds the list to sympy.symbols(...) and sympy.Matrix(...) via string_list_to_matrix(...) to create a list of sympy matrices or a single sympy matrix depending on the arguments. """ return Matrix(sp.symbols(sympy_string_list)) def sympy_matrices_from_names(shapes, names): """Takes a list of matrix shape tuples, and a list of names for the arrays (strings) and creates symbolic matrices of the form: |name_11 name_12| |name_21 name_22| |name_31 name_32| Returns: A list of matrices to be unpacked, or a single matrix if only one array is given. """ assert(isinstance(shapes,(tuple,list)) and isinstance(names,(str,tuple,list))) if isinstance(names,str): string_list_input = names_2_sympy_str_list(arrays, names) return string_list_to_sympy_matrix(string_list_input) else: assert(len(shapes)==len(names)) string_lists = [names_2_sympy_str_list(arr, nam) for arr, nam in zip(shapes, names)] matrices = [string_list_to_sympy_matrix(string_list) for string_list in string_lists] return matrices if len(matrices)>1 else matrices[0] parameter_and_variable_shapes = ((2,1), (2,1), (2,4), (4,1), (4,1), (4,3), (3,1), (4,1),) parameter_and_variable_names = ('b_2', '\delta_2', 'w_2','b_1', '\delta_1', 'w_1','x','a') (b_2, delta_2, w_2, b_1, delta_1, w_1, input_x, hidden_activations) = sympy_matrices_from_names(parameter_and_variable_shapes, parameter_and_variable_names) display(b_2, w_2, b_1, w_1, input_x, hidden_activations) ``` We'll have activation functions applied to the results of the matrix multiply and biasing on the hidden and output layers, but let's focus on the shape first. In order to increment the $\boldsymbol{b_2}$ vector, we'll need one of the same size to subtract from it. Same goes for $\boldsymbol{w_2}$, $\boldsymbol{b_1}$, and $\boldsymbol{w_1}$. We want something that looks like this: $$\boldsymbol{b_2}-\Delta \boldsymbol{b_2} = \begin{bmatrix} b_{2(1,1)} \\ b_{2(2,1)} \end{bmatrix} - \begin{bmatrix} \Delta b_{2(1,1)} \\ \Delta b_{2(2,1)} \end{bmatrix} $$ If we refer to some of the formulas from applying chain rule earlier, we see this needs to depend on the derivative of the loss $J$ w.r.t $\boldsymbol{y ̂}$, $\frac{\partial J}{ \partial \boldsymbol{y ̂ }}$, which will be a vector in the shape of $\boldsymbol{y ̂}$ due to the way that derivatives w.r.t a vector work. Activation functions are element-wise, and thus shape-preserving, so their derivatives also preserve shape. The output activation function derivative w.r.t its input, $\frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} }$, will therefore be the same shape as $\boldsymbol{b_2}$ too. $$ \frac{\partial J}{\partial \boldsymbol{b_2}} = \frac{\partial J}{ \partial \boldsymbol{y ̂ }} \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} } \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{b_2} } $$ The activation function and its derivative are like magic boxes that operate element-wise on inputs, leaving shapes unchanged. As for that last piece, from differentiation rules, we can clearly see: $$ \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{b_2}}=\frac{\partial}{\partial \boldsymbol{b_2}}\left( \boldsymbol{w_2 a}+\boldsymbol{b_2}\right)=\boldsymbol{1} $$ Where $\boldsymbol{1}$ is the same shape of $\boldsymbol{b_2}$. So, we see that we essentially use just the first two pieces. So, it's quite easy to put a vector together shaped like $\boldsymbol{b_2}$ from these pieces. Everything is already the same shape as $\boldsymbol{b_2}$, so we just multiply $\frac{\partial J}{ \partial \boldsymbol{y ̂ }}$ and $ \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} }$ together element-wise. The magic box analogy for that would be something like this: If we multiply everything together, we get this thing, which we can multiply by some $\alpha$ and subtract from $\boldsymbol{b_2}$. It pops up again and ends up backpropagating through the gradients, so we'll call it $\boldsymbol{\delta_2}$. $$ \frac{\partial J}{\partial \boldsymbol{b_2}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \odot \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} = \boldsymbol{\delta_2} $$ $$ \boldsymbol{b_2} - \Delta \boldsymbol{b_2} = \boldsymbol{b_2} - \alpha \boldsymbol{\delta_2} $$ Where $\odot$ is element-wise multiplication, aka the Hadamard Product: https://en.wikipedia.org/wiki/Hadamard_product_(matrices). So, everything's been a convenient shape so far, and we've only use shape-preserving operations to assemble the pieces we need and combine them together. What about for the next parameter set in line, $\boldsymbol{w_2}$? If we take a look at the Chain Rule equations again, we see that we need to use this $\boldsymbol{\delta_2}$ to make the gradient $\frac{\partial J}{\partial \boldsymbol{ w_2 }}$ to increment $\boldsymbol{w_2}$. $$ \frac{\partial J}{\partial \boldsymbol{ w_2 }} = \frac{\partial J}{ \partial \boldsymbol{y ̂ }} \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} } \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2} } = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2}} $$ Now, $\boldsymbol{b_2}$ looks like this: ```python display(b_2) ``` Keeping in mind that $\boldsymbol{\delta_2}$ is the same shape as $\boldsymbol{b_2}$, $\boldsymbol{w_2}$ looks like this: ```python display(w_2) ``` How is this supposed to work out? They're not the same shape! We can't hope to get there with shape-preserving operations! Well, we don't have to preserve shape, we just need to put things together from other things that they depend on. You can have more than one thing in $\boldsymbol{w_2}$ depend on a single item in $\boldsymbol{b_2}$. Specifically, differentiation rules show that we'll be using the *Sum Rule* in addition to the Chain Rule and company. This means we'll need to multiply things together, sum them, and stick them in matrices. That sounds a lot like matrix multiplication... because it *is* matrix multiplication. We'll specifically be doing a special kind of matrix multiply that's called the *outer product* or *tensor product* (https://en.wikipedia.org/wiki/Outer_product). It will take two vectors and make a matrix out of them. It's like making a transformation between dimensions using two vectors. Like making a function from two vectors. In magic box terms, that looks a bit like this: **Note: Transpose and Outer Product functions make functions from their inputs. Since having an arrow to another magic box would be confusing, we show the functions being operated on as being enclosed in bubbles. A magic box enters another one as a bubble, and exits as a modified version of itself. A pair of pegs gets combined to make a peg mapping magic box, and exits the magic box in a bubble. ** If we look at the Chain Rule formulas again, we see that we need to multiply our $\boldsymbol{\delta_2}$ by the derivative $\frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2}}$: $$ \frac{\partial J}{\partial \boldsymbol{ w_2 }} = \frac{\partial J}{ \partial \boldsymbol{y ̂ }} \frac{\partial \boldsymbol{y ̂ }}{\partial \boldsymbol{z_2} } \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2} } = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2}} $$ Applying differentiation rules, we see that it's equal to the **activations of the hidden layer** $\boldsymbol{a}$. $$ \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{w_2}} = \frac{\partial}{\partial \boldsymbol{w_2}} \left( \boldsymbol{w_2 a}+\boldsymbol{b_2}\right) = \boldsymbol{a}$$ Now, $\boldsymbol{a}$ is the same shape, (4,1), as $\boldsymbol{b_1}$ which looks like this: ```python display(b_1) ``` $\boldsymbol{b_2}$ is shaped like (2,1) and looks like this: ```python display(b_2) ``` Can we make a (2,4) shape matrix from a (2,1) and a (4,1)? Intuitively, if you have something of the shape output by a matrix multiply, and something of the shape going into a matrix multiply, you should be able to reconstruct a matrix of the same shape used in the multiply. With the outer product, we can do exactly that and make a matrix from two vectors: ```python display(delta_2, hidden_activations) display(delta_2*hidden_activations.T) ``` This is the right shape, and actually gives us our gradient for incrementing $\boldsymbol{w_2}$, but let's zoom in a bit, and take a look at why. Looking at the forward pass, we can examine what $\boldsymbol{z_2}=\boldsymbol{w_2 a}+\boldsymbol{b_2}$: ```python w_2*hidden_activations+b_2 ``` Hmm, those expressions look very similar. If you *remove the addition symbols*, and *got rid of the biases and $w_2$ factors*, it looks like you could multiply $\boldsymbol{\delta_2}$ by each column to get the same result as the outer product between $\boldsymbol{\delta_2}$ and $\boldsymbol{a}$. You can see that as seen mentioned in the lecture notes for CS231N, the pieces of $\boldsymbol{\delta_2}$ are distributed amongst its dependants in $\boldsymbol{a}$, **splitting along sum gates**. Now we need to backpropagate our gradients through the hidden layer to the biases and weights on the other side. If we look at a matrix transpose operation as taking a transformation between dimensions and flipping it, then we can use $\boldsymbol{w_2}$ to at least get our gradients in the shape we want. Essentially, with the matrix transpose, we have another magic box that takes in magic boxes, and spits out new magic boxes. Our equations of interest (again from the Chain Rule formulas): $$\frac{\partial J}{\partial \boldsymbol{b_1}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1} } = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} $$ $$ \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a}} = \frac{\partial}{\partial \boldsymbol{a}} \left( \boldsymbol{w_2 a}+\boldsymbol{b_2}\right) = \boldsymbol{w_2}^T $$ $$ \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} = \frac{\partial}{\partial \boldsymbol{b_1}} \left( \boldsymbol{w_1 x}+\boldsymbol{b_1}\right) = \boldsymbol{1} $$ $$\boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } = \langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle = \boldsymbol{w_2} \cdot \boldsymbol{\delta_2} = \boldsymbol{w_2}^T\boldsymbol{\delta_2} $$ Where $\langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle$ is the **inner product** (in this case, called the **dot product**) of the matrices $\boldsymbol{w_2}$ and $\boldsymbol{\delta_2}$ : https://en.wikipedia.org/wiki/Inner_product_space https://en.wikipedia.org/wiki/Dot_product We can see that we have $\boldsymbol{w_2}^T$ as a factor, which is (4,2) shaped, and looks like this: ```python display(w_2.T) ``` $\boldsymbol{\delta_2}$ is (2,1) shaped, and looks like this: ```python display(delta_2) ``` If we multiply them together as $\boldsymbol{w_2}^T\boldsymbol{\delta_2}$, we get: ```python w_2.T*delta_2 ``` Which is (4,1) shaped, just what we need for incrementing the bias vector $\boldsymbol{b_1}$. Except for one element-wise factor, that is. We need the derivative of the hidden layer's activation w.r.t its input. Once we multiply that through, we'll have our $\boldsymbol{\delta_1}$ which is the gradient that we can use to move our $\boldsymbol{b_1}$ in the direction of *goodness*. $$ \frac{\partial J}{\partial \boldsymbol{b_1}} = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} = \langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle \odot \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \odot \boldsymbol{1} = \boldsymbol{\delta_1} \odot \boldsymbol{1} = \boldsymbol{\delta_1} $$ Now we need to compute our $\frac{\partial J}{\partial \boldsymbol{ w_1 }}$ gradient for incrementing our weight matrix $\boldsymbol{ w_1 }$. Another peek at our formulas shows that we have one more shape-changing outer product to go. $$ \frac{\partial J}{\partial \boldsymbol{ w_1 }} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{w_1}} = \boldsymbol{\delta_1} \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{ w_1 }} $$ $$ \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{ w_1 }} = \frac{\partial}{\partial \boldsymbol{ w_1 }} \left( \boldsymbol{w_1 x}+\boldsymbol{b_1}\right) = \boldsymbol{x}^T $$ $$ \frac{\partial J}{\partial \boldsymbol{w_1}} = \boldsymbol{\delta_1} \otimes \boldsymbol{x} = \boldsymbol{\delta_1} \boldsymbol{x}^T $$ We'll multiply that out, and you can again see that distribution of gradients through summing operations at work here: ```python display(delta_1, input_x) display(delta_1*input_x.T) ``` That last matrix is our gradient $\frac{\partial J}{\partial \boldsymbol{w_1}}$, and we're now done! Just for reference, here's the corresponding feedforward portion $\boldsymbol{z_1}$ so you can see the distribution of the gradient through sum gates again: ```python display(w_1*input_x+b_1) ``` That concludes this example, but keep in mind that to backpropagate through more layers simply requires repeating the last few steps until you have all the gradients you need. You're after those $\boldsymbol{\delta}$ vectors, and the rest is rather simple. Now, we’re missing a few element-wise factors when we look at the shape this way, but let’s fill those in. Chain rule says we need these things for all the derivatives for a one-hidden-layer network. We’re missing these activation function derivatives and this cost function derivative w.r.t to y_approx. We pretty much know those if we know what activation and cost functions we’re using. We'll use sigmoid with MSE for the sake of finishing this example, but keep in mind that you can use ReLU or Softmax activations, and Cross-Entropy loss too: * https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * https://en.wikipedia.org/wiki/Softmax_function * https://en.wikipedia.org/wiki/Cross_entropy MSE gives the loss function: $$ \frac{1}{m} \sum^{m}_{i=1} (\boldsymbol{y}-\hat{\boldsymbol{y}})^2 $$ Differentiation rules tell us to sum all $m$ derivatives, bring down the power of 2 to cancel the $\frac{1}{2}$ factor, and bring out the negative sign due to the Chain Rule. This results in a derivative that looks like: $$\frac{\partial J}{\partial \boldsymbol{\hat{\boldsymbol{y}}}} = -(\boldsymbol{y}-\hat{\boldsymbol{y}}) = (\hat{\boldsymbol{y}}-\boldsymbol{y})$$ Sigmoid units have the form: $$\boldsymbol{\sigma}(\boldsymbol{z})=\frac{1}{1+e^{-\boldsymbol{z}}}$$ Which has the derivative (see http://mathworld.wolfram.com/SigmoidFunction.html): $$ \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z}} = \frac{\partial \boldsymbol{\sigma(\boldsymbol{z})}}{\partial \boldsymbol{z}} = \boldsymbol{\sigma}(\boldsymbol{z})(1-\boldsymbol{\sigma}(\boldsymbol{z})) $$ Plug those into your equations and you get: $$ \frac{\partial J}{\partial \boldsymbol{b_2}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} = \frac{\partial J}{\partial \boldsymbol{y ̂}} \odot \frac{\partial \boldsymbol{y ̂}}{\partial\boldsymbol{z_2}} = (\hat{\boldsymbol{y}}-\boldsymbol{y}) \odot \left[\boldsymbol{\sigma}(\boldsymbol{z})(1-\boldsymbol{\sigma}(\boldsymbol{z}))\right] = \boldsymbol{\delta_2} $$ $$\frac{\partial J}{\partial \boldsymbol{ w_2 }} = \boldsymbol{\delta_2} \otimes \boldsymbol{a} = \boldsymbol{\delta_2}\boldsymbol{a}^T = (\hat{\boldsymbol{y}}-\boldsymbol{y}) \odot \left[\boldsymbol{\sigma}(\boldsymbol{z})(1-\boldsymbol{\sigma}(\boldsymbol{z}))\right] \otimes \boldsymbol{a} $$ $$ \frac{\partial J}{\partial \boldsymbol{b_1}} = \boldsymbol{\delta_2} \frac{\partial \boldsymbol{z_2}}{\partial \boldsymbol{a} } \frac{\partial \boldsymbol{a}}{\partial \boldsymbol{z_1} } \frac{\partial \boldsymbol{z_1}}{\partial \boldsymbol{b_1}} = \langle \boldsymbol{w_2},\boldsymbol{\delta_2}\rangle \odot \left[\boldsymbol{\sigma}(\boldsymbol{z})(1-\boldsymbol{\sigma}(\boldsymbol{z}))\right] = \boldsymbol{\delta_1} $$ $$ \frac{\partial J}{\partial \boldsymbol{w_1}} = \boldsymbol{\delta_1} \otimes \boldsymbol{x} = \boldsymbol{\delta_1} \boldsymbol{x}^T $$ That wraps up this example and this notebook! I would highly recommend checking out the code from Andrew Trask, which takes backpropagation and packages it up in an object-oriented fashion, tying into the computational graph structure we discussed, and connecting to the computational graph structure that's actually used by TensorFlow under the hood! https://iamtrask.github.io/2017/03/21/synthetic-gradients/ We'll be moving onto Optimization and Regularization next!
[STATEMENT] lemma proj7_addpre: fixes dsn dsk flag hops nhip pre npre shows "\<pi>\<^sub>7(addpre v npre) = \<pi>\<^sub>7(v) \<union> npre" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<pi>\<^sub>7 (addpre v npre) = \<pi>\<^sub>7 v \<union> npre [PROOF STEP] unfolding addpre_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<pi>\<^sub>7 (let (dsn, dsk, flag, hops, nhip, pre) = v in (dsn, dsk, flag, hops, nhip, pre \<union> npre)) = \<pi>\<^sub>7 v \<union> npre [PROOF STEP] by (cases v) simp
Require Import Arith Bool List. Require Import Omega. Set Implicit Arguments. Lemma seq_minimum : forall m n x, In x (seq n m) -> n <= x. Proof. induction m. - simpl. intro; contradiction. - intros n x. simpl. intro. destruct H; try omega. apply IHm in H. omega. Qed. Lemma seq_nodup : forall m n, NoDup (seq n m). Proof. induction m; intros; simpl; constructor. - unfold not; intro. apply seq_minimum in H. omega. - auto. Qed. Lemma count_occ_app A dec : forall (x : A) (ys zs : list A), count_occ dec (ys ++ zs) x = count_occ dec ys x + count_occ dec zs x. Proof. induction ys; auto. intro zs. simpl. rewrite (IHys zs). destruct (dec a x); omega. Qed. Fixpoint scan_left (A B : Type) (f : A -> B -> A) (bs : list B) (a : A): list A := match bs with | nil => a :: nil | b :: bs' => let a' := f a b in a :: scan_left f bs' a' end. Lemma scanl_exist_tl A B : forall (f : A -> B -> A) bs a, exists as', scan_left f bs a = a :: as'. Proof. intros. destruct bs. - simpl. exists nil. reflexivity. - simpl. exists (scan_left f bs (f a b)). reflexivity. Qed. Lemma foldl_scanl_last A B : forall f (bs : list B) (a : A) d, fold_left f bs a = last (scan_left f bs a) d. Proof. induction bs as [| b bs' IHbs']. - intros a d. reflexivity. - intros. simpl fold_left. specialize (IHbs' (f a b) d). rewrite IHbs'. simpl scan_left. remember (scanl_exist_tl f bs' (f a b)) as Extl; clear HeqExtl. destruct Extl. rewrite H. reflexivity. Qed. Fixpoint rep A (a : A) (n : nat): list A := match n with | O => nil | S n' => a :: rep a n' end. Lemma rep_length A a n : length (@rep A a n) = n. Proof. induction n; simpl; auto. Qed. Lemma rep_in A a n : forall x, In x (@rep A a n) -> a = x. Proof. induction n; simpl; intuition. Qed. Lemma rep_count A dec a n : count_occ dec (@rep A a n) a = n. Proof. induction n; auto. simpl. case (dec a a); intro; auto. exfalso; apply n0. reflexivity. Qed. Lemma rep_count_0 A dec a b n : a <> b -> count_occ dec (@rep A a n) b = 0. Proof. intro a_ne_b. induction n; auto. simpl. case (dec a b); intro; try contradiction; auto. Qed.
/* * Copyright (c) 2015 Juniper Networks, Inc. All rights reserved. */ #include "base/os.h" #include <boost/intrusive/avl_set.hpp> #include <boost/functional/hash.hpp> #include <boost/bind.hpp> #include <boost/uuid/uuid.hpp> #include <boost/uuid/uuid_io.hpp> #include <tbb/atomic.h> #include "db/db.h" #include "db/db_table.h" #include "db/db_entry.h" #include "db/db_client.h" #include "db/db_partition.h" #include "db/db_table_walker.h" #include "base/time_util.h" #include "base/task.h" #include "base/test/task_test_util.h" #include "base/logging.h" #include "base/task_annotations.h" #include "base/string_util.h" #include "testing/gunit.h" #include <cmn/agent_cmn.h> #include <cmn/index_vector.h> #include <oper_db.h> #include "test/test_cmn_util.h" #define FIND_COUNT (40 * 1000) struct VlanTableReqKey : public AgentOperDBKey { VlanTableReqKey(int id) : uuid_(MakeUuid(id)) {} VlanTableReqKey(const boost::uuids::uuid &u) : uuid_(u) {} boost::uuids::uuid uuid_; }; struct VlanTableReqData : public AgentOperDBData { VlanTableReqData(std::string desc) : AgentOperDBData(Agent::GetInstance(), NULL), description_(desc) {} std::string description_; }; class Vlan : public AgentRefCount<Vlan>, public AgentOperDBEntry { public: Vlan(const boost::uuids::uuid &u) : type_(0), uuid_(u) { } Vlan(int id) : type_(0), uuid_(MakeUuid(id)) { } Vlan(const boost::uuids::uuid &u, std::string desc) : type_(0), uuid_(u), description_(desc) { } ~Vlan() { } bool DBEntrySandesh(Sandesh *sresp, std::string &name) const { return true; } uint32_t GetRefCount() const { return AgentRefCount<Vlan>::GetRefCount(); } bool IsLess(const DBEntry &rhs) const { const Vlan &intf = static_cast<const Vlan &>(rhs); if (type_ != intf.type_) { return type_ < intf.type_; } return Cmp(rhs); } virtual bool Cmp(const DBEntry &rhs) const { const Vlan &intf=static_cast<const Vlan &>(rhs); return uuid_ < intf.uuid_; } void SetKey(const DBRequestKey *key) { const VlanTableReqKey *k = static_cast<const VlanTableReqKey *>(key); uuid_ = k->uuid_; } std::string ToString() const { return "Vlan"; } virtual KeyPtr GetDBRequestKey() const { VlanTableReqKey *key = new VlanTableReqKey(uuid_); return KeyPtr(key); } const boost::uuids::uuid &get_uuid() const { return uuid_; } void set_description(const std::string &descr) { description_ = descr; } private: int type_; boost::uuids::uuid uuid_; std::string description_; DISALLOW_COPY_AND_ASSIGN(Vlan); }; class VlanTable : public AgentOperDBTable { public: VlanTable(DB *db) : AgentOperDBTable(db, "__vlan__.0") { } ~VlanTable() { } // Alloc a derived DBEntry virtual std::auto_ptr<DBEntry> AllocEntry(const DBRequestKey *key) const { const VlanTableReqKey *vkey = static_cast<const VlanTableReqKey *>(key); Vlan *vlan = new Vlan(vkey->uuid_); return std::auto_ptr<DBEntry>(vlan); }; size_t Hash(const DBEntry *entry) const { return 0; } size_t Hash(const DBRequestKey *key) const { return 0; } DBEntry *OperDBAdd(const DBRequest *req) { const VlanTableReqKey *key = static_cast<const VlanTableReqKey *> (req->key.get()); const VlanTableReqData *data = static_cast<const VlanTableReqData *> (req->data.get()); Vlan *vlan = new Vlan(key->uuid_); vlan->set_description(data->description_); return vlan; }; bool OperDBOnChange(DBEntry *entry, const DBRequest *req) { return true; }; bool OperDBDelete(DBEntry *entry, const DBRequest *req) { return true; } static DBTableBase *CreateTable(DB *db, const std::string &name) { VlanTable *table = new VlanTable(db); table->Init(); return table; } uint64_t FindScale(int id, uint32_t count, bool use_key, bool do_lock){ boost::uuids::uuid u = MakeUuid(id); uint64_t start = ClockMonotonicUsec(); if (use_key) { for (uint32_t i = 0; i < count; i++) { VlanTableReqKey key(u); if (do_lock) { assert(DBTable::Find(&key) != NULL); } else { assert(DBTable::FindNoLock(&key) != NULL); } } } else { for (uint32_t i = 0; i < count; i++) { Vlan entry(u); if (do_lock) { assert(DBTable::Find(&entry) != NULL); } else { assert(DBTable::FindNoLock(&entry) != NULL); } } } return ClockMonotonicUsec() - start; } DISALLOW_COPY_AND_ASSIGN(VlanTable); }; struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:01:01:01:01", 1, 1}, }; class DBTest : public ::testing::Test { public: DBTest() { table_ = static_cast<DBTable *>(db_.CreateTable("db.test.vlan.0")); vlan_table_ = static_cast<VlanTable *>(table_); scheduler_ = TaskScheduler::GetInstance(); } virtual void SetUp() { agent_ = Agent::GetInstance(); DBRequest addReq; addReq.key.reset(new VlanTableReqKey(101)); addReq.data.reset(new VlanTableReqData("DB Test Vlan")); addReq.oper = DBRequest::DB_ENTRY_ADD_CHANGE; table_->Enqueue(&addReq); task_util::WaitForIdle(); CreateVmportEnv(input, 1); task_util::WaitForIdle(); } virtual void TearDown() { DBRequest delReq; delReq.key.reset(new VlanTableReqKey(101)); delReq.oper = DBRequest::DB_ENTRY_DELETE; table_->Enqueue(&delReq); task_util::WaitForIdle(); DeleteVmportEnv(input, 1, true); task_util::WaitForIdle(); } protected: DB db_; DBTable *table_; VlanTable *vlan_table_; Agent *agent_; TaskScheduler *scheduler_; }; tbb::atomic<uint32_t> scale_count_; class ScaleTask : public Task { public: ScaleTask(VlanTable *table, bool use_key, bool do_lock, int thread_count, uint32_t find_count) : Task(TaskScheduler::GetInstance()->GetTaskId(kTaskFlowEvent), -1), table_(table), do_lock_(do_lock), use_key_(use_key), thread_count_(thread_count), find_count_(find_count) { } ~ScaleTask() { } bool Run() { scale_count_++; while (scale_count_ != thread_count_); uint64_t delay = table_->FindScale(101, find_count_, use_key_, do_lock_); std::cout << "Use-Key " << use_key_ << " Do-Lock " << do_lock_ << " Time is " << delay << " usec" << std::endl; return true; } std::string Description() const { return "ScaleTask"; } private: VlanTable *table_; bool do_lock_; bool use_key_; uint32_t thread_count_; uint32_t find_count_; }; // Find routine tests TEST_F(DBTest, Find) { ConcurrencyScope scope(kTaskFlowEvent); VlanTableReqKey key(101); EXPECT_TRUE(table_->Find(&key) != NULL); EXPECT_TRUE(table_->FindNoLock(&key) != NULL); Vlan entry(101); EXPECT_TRUE(table_->Find(&entry) != NULL); EXPECT_TRUE(table_->FindNoLock(&entry) != NULL); } // Find routine tests TEST_F(DBTest, VlanScaleNoTask) { ConcurrencyScope scope(kTaskFlowEvent); uint32_t count = FIND_COUNT; uint64_t key_with_lock = vlan_table_->FindScale(101, count,true, true); uint64_t key_no_lock = vlan_table_->FindScale(101, count, true, false); uint64_t entry_with_lock = vlan_table_->FindScale(101, count, false, true); uint64_t entry_no_lock = vlan_table_->FindScale(101, count, false, false); std::cout << "Lookup with Key with lock : " << key_with_lock << " usec" << std::endl; std::cout << "Lookup with Key no lock : " << key_no_lock << " usec" << std::endl; std::cout << "Lookup with Entry with lock : " << entry_with_lock << " usec" << std::endl; std::cout << "Lookup with Entry no lock : " << entry_no_lock << " usec" << std::endl; } TEST_F(DBTest, VlanScaleTask) { ConcurrencyScope scope(kTaskFlowEvent); scale_count_ = 0; uint32_t count = FIND_COUNT/4; scheduler_->Enqueue(new ScaleTask(vlan_table_, true, true, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, true, true, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, true, true, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, true, true, 4, count)); task_util::WaitForIdle(); scale_count_ = 0; scheduler_->Enqueue(new ScaleTask(vlan_table_, true, false, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, true, false, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, true, false, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, true, false, 4, count)); task_util::WaitForIdle(); scale_count_ = 0; scheduler_->Enqueue(new ScaleTask(vlan_table_, false, true, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, false, true, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, false, true, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, false, true, 4, count)); task_util::WaitForIdle(); scale_count_ = 0; scheduler_->Enqueue(new ScaleTask(vlan_table_, false, false, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, false, false, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, false, false, 4, count)); scheduler_->Enqueue(new ScaleTask(vlan_table_, false, false, 4, count)); task_util::WaitForIdle(); } static uint64_t FindInterfaceScale(DBTable *table, uint32_t id, uint32_t count, bool use_key, bool do_lock){ uint64_t start = ClockMonotonicUsec(); boost::uuids::uuid u = MakeUuid(id); if (use_key) { for (uint32_t i = 0; i < count; i++) { VmInterfaceKey key(VmInterfaceKey(AgentKey::RESYNC, u, "")); if (do_lock) { assert(table->Find(&key) != NULL); } else { assert(table->FindNoLock(&key) != NULL); } } } else { for (uint32_t i = 0; i < count; i++) { VmInterface entry(u, "", true, boost::uuids::nil_uuid()); if (do_lock) { assert(table->Find(&entry) != NULL); } else { assert(table->FindNoLock(&entry) != NULL); } } } return ClockMonotonicUsec() - start; } class ScaleInterfaceTask : public Task { public: ScaleInterfaceTask(DBTable *table, bool use_key, bool do_lock, int thread_count, uint32_t find_count) : Task(TaskScheduler::GetInstance()->GetTaskId(kTaskFlowEvent), -1), table_(table), do_lock_(do_lock), use_key_(use_key), thread_count_(thread_count), find_count_(find_count) { } ~ScaleInterfaceTask() { } bool Run() { scale_count_++; while (scale_count_ != thread_count_); uint64_t delay = FindInterfaceScale(table_, 1, find_count_, use_key_, do_lock_); std::cout << "Use-Key " << use_key_ << " Do-Lock " << do_lock_ << " Time is " << delay << " usec" << std::endl; return true; } std::string Description() const { return "ScaleInterfaceTask"; } private: DBTable *table_; bool do_lock_; bool use_key_; uint32_t thread_count_; uint32_t find_count_; }; TEST_F(DBTest, ScaleVmInterface) { ConcurrencyScope scope(kTaskFlowUpdate); DBTable *table = static_cast<DBTable *>(Agent::GetInstance()->interface_table()); uint32_t count = FIND_COUNT; uint64_t key_with_lock = FindInterfaceScale(table, 1, count, true, true); uint64_t key_no_lock = FindInterfaceScale(table, 1, count, true, false); uint64_t entry_with_lock = FindInterfaceScale(table, 1, count, false, true); uint64_t entry_no_lock = FindInterfaceScale(table, 1, count, false, false); std::cout << "Lookup with Key with lock : " << key_with_lock << " usec" << std::endl; std::cout << "Lookup with Key no lock : " << key_no_lock << " usec" << std::endl; std::cout << "Lookup with Entry with lock : " << entry_with_lock << " usec" << std::endl; std::cout << "Lookup with Entry no lock : " << entry_no_lock << " usec" << std::endl; } TEST_F(DBTest, ScaleTaskVmInterface) { ConcurrencyScope scope(kTaskFlowUpdate); scale_count_ = 0; uint32_t count = FIND_COUNT/4; DBTable *table = static_cast<DBTable *>(Agent::GetInstance()->interface_table()); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, true, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, true, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, true, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, true, 4, count)); task_util::WaitForIdle(); scale_count_ = 0; scheduler_->Enqueue(new ScaleInterfaceTask(table, true, false, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, false, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, false, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, true, false, 4, count)); task_util::WaitForIdle(); scale_count_ = 0; scheduler_->Enqueue(new ScaleInterfaceTask(table, false, true, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, false, true, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, false, true, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, false, true, 4, count)); task_util::WaitForIdle(); scale_count_ = 0; scheduler_->Enqueue(new ScaleInterfaceTask(table, false, false, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, false, false, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, false, false, 4, count)); scheduler_->Enqueue(new ScaleInterfaceTask(table, false, false, 4, count)); task_util::WaitForIdle(); } void RegisterFactory() { DB::RegisterFactory("db.test.vlan.0", &VlanTable::CreateTable); } int main(int argc, char **argv) { GETUSERARGS(); ::testing::InitGoogleTest(&argc, argv); RegisterFactory(); client = TestInit(init_file, ksync_init, false, false, false); bool ret = RUN_ALL_TESTS(); usleep(10000); TestShutdown(); delete client; return ret; }
function ffti(dat, parameters::GeneralParams) @unpack N1, M2 = parameters dat = vcat(dat, [conj(reverse(dat[2:N1, 1], dims=1)) conj(rotr90(rotr90(dat[2:N1,2:M2])))]) dat = ifft(dat) return dat end
State Before: α : Type u β : Type v R r : α → α → Prop l l₁ l₂ : List α a b x y : α ⊢ Chain' R [x, y] ↔ R x y State After: no goals Tactic: simp only [chain'_singleton, chain'_cons, and_true_iff]
[GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{v} ⊢ lift (succ a) = succ (lift a) [PROOFSTEP] rw [← add_one_eq_succ, lift_add, lift_one] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{v} ⊢ lift a + 1 = succ (lift a) [PROOFSTEP] rfl [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ a : α ⊢ ↑f (Sum.inl a) = Sum.inl a [PROOFSTEP] simpa only [InitialSeg.trans_apply, InitialSeg.leAdd_apply] using @InitialSeg.eq _ _ _ _ _ ((InitialSeg.leAdd r s₁).trans f) (InitialSeg.leAdd r s₂) a [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a ⊢ (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } [PROOFSTEP] intro b [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a b : β₁ ⊢ { b' // ↑f (Sum.inr b) = Sum.inr b' } [PROOFSTEP] cases e : f (Sum.inr b) [GOAL] case inl α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a b : β₁ val✝ : α e : ↑f (Sum.inr b) = Sum.inl val✝ ⊢ { b' // Sum.inl val✝ = Sum.inr b' } [PROOFSTEP] rw [← fl] at e [GOAL] case inl α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a b : β₁ val✝ : α e : ↑f (Sum.inr b) = ↑f (Sum.inl val✝) ⊢ { b' // Sum.inl val✝ = Sum.inr b' } [PROOFSTEP] have := f.inj' e [GOAL] case inl α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a b : β₁ val✝ : α e : ↑f (Sum.inr b) = ↑f (Sum.inl val✝) this : Sum.inr b = Sum.inl val✝ ⊢ { b' // Sum.inl val✝ = Sum.inr b' } [PROOFSTEP] contradiction [GOAL] case inr α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a b : β₁ val✝ : β₂ e : ↑f (Sum.inr b) = Sum.inr val✝ ⊢ { b' // Sum.inr val✝ = Sum.inr b' } [PROOFSTEP] exact ⟨_, rfl⟩ [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) x y : β₁ h : g x = g y ⊢ x = y [PROOFSTEP] injection f.inj' (by rw [fr, fr, h] : f (Sum.inr x) = f (Sum.inr y)) [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) x y : β₁ h : g x = g y ⊢ ↑f (Sum.inr x) = ↑f (Sum.inr y) [PROOFSTEP] rw [fr, fr, h] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a b : β₁ ⊢ s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b [PROOFSTEP] simpa only [Sum.lex_inr_inr, fr, InitialSeg.coe_coe_fn, Embedding.coeFn_mk] using @RelEmbedding.map_rel_iff _ _ _ _ f.toRelEmbedding (Sum.inr a) (Sum.inr b) [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) ⊢ ∃ a', ↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a' = b [PROOFSTEP] rcases f.init (by rw [fr] <;> exact Sum.lex_inr_inr.2 H) with ⟨a' | a', h⟩ [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) ⊢ Sum.Lex r s₂ ?m.57687 (↑f ?m.57686) [PROOFSTEP] rw [fr] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) ⊢ Sum.Lex r s₂ ?m.57687 (Sum.inr (g ?m.57775)) [PROOFSTEP] exact Sum.lex_inr_inr.2 H [GOAL] case intro.inl α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) a' : α h : ↑f (Sum.inl a') = Sum.inr b ⊢ ∃ a', ↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a' = b [PROOFSTEP] rw [fl] at h [GOAL] case intro.inl α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) a' : α h : Sum.inl a' = Sum.inr b ⊢ ∃ a', ↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a' = b [PROOFSTEP] cases h [GOAL] case intro.inr α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) a' : β₁ h : ↑f (Sum.inr a') = Sum.inr b ⊢ ∃ a', ↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a' = b [PROOFSTEP] rw [fr] at h [GOAL] case intro.inr α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ b✝ c : Ordinal.{u} α : Type u r : α → α → Prop hr : IsWellOrder α r β₁ : Type u s₁ : β₁ → β₁ → Prop hs₁ : IsWellOrder β₁ s₁ β₂ : Type u s₂ : β₂ → β₂ → Prop hs₂ : IsWellOrder β₂ s₂ x✝ : type r + type s₁ ≤ type r + type s₂ f : Sum.Lex r s₁ ≼i Sum.Lex r s₂ fl : ∀ (a : α), ↑f (Sum.inl a) = Sum.inl a this : (b : β₁) → { b' // ↑f (Sum.inr b) = Sum.inr b' } g : β₁ → β₂ := fun b => ↑(this b) fr : ∀ (b : β₁), ↑f (Sum.inr b) = Sum.inr (g b) a : β₁ b : β₂ H : s₂ b (↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a) a' : β₁ h : Sum.inr (g a') = Sum.inr b ⊢ ∃ a', ↑{ toEmbedding := { toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) }, map_rel_iff' := (_ : ∀ (a b : β₁), s₂ (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } a) (↑{ toFun := g, inj' := (_ : ∀ (x y : β₁), g x = g y → x = y) } b) ↔ s₁ a b) } a' = b [PROOFSTEP] exact ⟨a', Sum.inr.inj h⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} ⊢ a + b = a + c ↔ b = c [PROOFSTEP] simp only [le_antisymm_iff, add_le_add_iff_left] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} ⊢ a + b < a + c ↔ b < c [PROOFSTEP] rw [← not_le, ← not_le, add_le_add_iff_left] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} ⊢ a + ↑0 ≤ b + ↑0 ↔ a ≤ b [PROOFSTEP] simp [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} n : ℕ ⊢ a + ↑(n + 1) ≤ b + ↑(n + 1) ↔ a ≤ b [PROOFSTEP] simp only [nat_cast_succ, add_succ, add_succ, succ_le_succ_iff, add_le_add_iff_right] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} n : ℕ ⊢ a + ↑n = b + ↑n ↔ a = b [PROOFSTEP] simp only [le_antisymm_iff, add_le_add_iff_right] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop a b : Ordinal.{u_4} α : Type u_4 r : α → α → Prop x✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop x✝ : IsWellOrder β s ⊢ type r + type s = 0 ↔ type r = 0 ∧ type s = 0 [PROOFSTEP] simp_rw [← type_sum_lex, type_eq_zero_iff_isEmpty] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop a b : Ordinal.{u_4} α : Type u_4 r : α → α → Prop x✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop x✝ : IsWellOrder β s ⊢ IsEmpty (α ⊕ β) ↔ IsEmpty α ∧ IsEmpty β [PROOFSTEP] exact isEmpty_sum [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ⊢ pred (succ o) = o [PROOFSTEP] have h : ∃ a, succ o = succ a := ⟨_, rfl⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : ∃ a, succ o = succ a ⊢ pred (succ o) = o [PROOFSTEP] simpa only [pred, dif_pos h] using (succ_injective <| Classical.choose_spec h).symm [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : ∃ a, o = succ a ⊢ pred o ≤ o [PROOFSTEP] let ⟨a, e⟩ := h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : ∃ a, o = succ a a : Ordinal.{u_4} e : o = succ a ⊢ pred o ≤ o [PROOFSTEP] rw [e, pred_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : ∃ a, o = succ a a : Ordinal.{u_4} e : o = succ a ⊢ a ≤ succ a [PROOFSTEP] exact le_succ a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : ¬∃ a, o = succ a ⊢ pred o ≤ o [PROOFSTEP] rw [pred, dif_neg h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} e : pred o = o x✝ : ∃ a, o = succ a a : Ordinal.{u_4} e' : o = succ a ⊢ False [PROOFSTEP] rw [e', pred_succ] at e [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} x✝ : ∃ a, o = succ a a : Ordinal.{u_4} e : a = succ a e' : o = succ a ⊢ False [PROOFSTEP] exact (lt_succ a).ne e [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ⊢ pred o = o ↔ ∀ (a : Ordinal.{u_4}), o ≠ succ a [PROOFSTEP] simpa using pred_eq_iff_not_succ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ⊢ pred o < o ↔ ¬pred o = o [PROOFSTEP] simp only [le_antisymm_iff, pred_le_self, true_and_iff, not_le] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} x✝ : ∃ a, o = succ a a : Ordinal.{u_4} e : o = succ a ⊢ succ (pred o) = o [PROOFSTEP] simp only [e, pred_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : ∃ a, b = succ a ⊢ a < pred b ↔ succ a < b [PROOFSTEP] let ⟨c, e⟩ := h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : ∃ a, b = succ a c : Ordinal.{u_4} e : b = succ c ⊢ a < pred b ↔ succ a < b [PROOFSTEP] rw [e, pred_succ, succ_lt_succ_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : ¬∃ a, b = succ a ⊢ a < pred b ↔ succ a < b [PROOFSTEP] simp only [pred, dif_neg h, succ_lt_of_not_succ h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{v} x✝ : ∃ a, lift o = succ a a : Ordinal.{max v u} h : lift o = succ a b : Ordinal.{v} e : lift b = a ⊢ lift o = lift (succ b) [PROOFSTEP] rw [h, ← e, lift_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{v} x✝ : ∃ a, o = succ a a : Ordinal.{v} h : o = succ a ⊢ lift o = succ (lift a) [PROOFSTEP] simp only [h, lift_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{v} h : ∃ a, o = succ a ⊢ lift (pred o) = pred (lift o) [PROOFSTEP] cases' h with a e [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o a : Ordinal.{v} e : o = succ a ⊢ lift (pred o) = pred (lift o) [PROOFSTEP] simp only [e, pred_succ, lift_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{v} h : ¬∃ a, o = succ a ⊢ lift (pred o) = pred (lift o) [PROOFSTEP] rw [pred_eq_iff_not_succ.2 h, pred_eq_iff_not_succ.2 (mt lift_is_succ.1 h)] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : IsLimit o a : Ordinal.{u_4} ⊢ a < o ↔ ∃ x, x < o ∧ a < x [PROOFSTEP] simpa only [not_ball, not_le, bex_def] using not_congr (@limit_le _ h a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ⊢ lift o = 0 ↔ o = 0 [PROOFSTEP] simpa only [lift_zero] using @lift_inj o 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} H : ∀ (a : Ordinal.{max u_4 u_5}), a < lift o → succ a < lift o a : Ordinal.{u_4} h : a < o ⊢ lift (succ a) < lift o [PROOFSTEP] simpa only [lift_succ] using H _ (lift_lt.2 h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} H : ∀ (a : Ordinal.{u_4}), a < o → succ a < o a : Ordinal.{max u_4 u_5} h : a < lift o ⊢ succ a < lift o [PROOFSTEP] obtain ⟨a', rfl⟩ := lift_down h.le [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} H : ∀ (a : Ordinal.{u_4}), a < o → succ a < o a' : Ordinal.{u_4} h : lift a' < lift o ⊢ succ (lift a') < lift o [PROOFSTEP] rw [← lift_succ, lift_lt] [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} H : ∀ (a : Ordinal.{u_4}), a < o → succ a < o a' : Ordinal.{u_4} h : lift a' < lift o ⊢ succ a' < o [PROOFSTEP] exact H a' (lift_lt.1 h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : IsLimit o ⊢ 1 < o [PROOFSTEP] simpa only [succ_zero] using h.2 _ h.pos [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{?u.99300} → Sort u_4 o✝ : Ordinal.{?u.99300} H₁ : C 0 H₂ : (o : Ordinal.{?u.99300}) → C o → C (succ o) H₃ : (o : Ordinal.{?u.99300}) → IsLimit o → ((o' : Ordinal.{?u.99300}) → o' < o → C o') → C o o : Ordinal.{?u.99300} IH : (y : Ordinal.{?u.99300}) → y < o → C y o0 : o = 0 ⊢ C o [PROOFSTEP] rw [o0] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{?u.99300} → Sort u_4 o✝ : Ordinal.{?u.99300} H₁ : C 0 H₂ : (o : Ordinal.{?u.99300}) → C o → C (succ o) H₃ : (o : Ordinal.{?u.99300}) → IsLimit o → ((o' : Ordinal.{?u.99300}) → o' < o → C o') → C o o : Ordinal.{?u.99300} IH : (y : Ordinal.{?u.99300}) → y < o → C y o0 : o = 0 ⊢ C 0 [PROOFSTEP] exact H₁ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{?u.99300} → Sort u_4 o✝ : Ordinal.{?u.99300} H₁ : C 0 H₂ : (o : Ordinal.{?u.99300}) → C o → C (succ o) H₃ : (o : Ordinal.{?u.99300}) → IsLimit o → ((o' : Ordinal.{?u.99300}) → o' < o → C o') → C o o : Ordinal.{?u.99300} IH : (y : Ordinal.{?u.99300}) → y < o → C y o0 : ¬o = 0 h : ∃ a, o = succ a ⊢ C o [PROOFSTEP] rw [← succ_pred_iff_is_succ.2 h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{?u.99300} → Sort u_4 o✝ : Ordinal.{?u.99300} H₁ : C 0 H₂ : (o : Ordinal.{?u.99300}) → C o → C (succ o) H₃ : (o : Ordinal.{?u.99300}) → IsLimit o → ((o' : Ordinal.{?u.99300}) → o' < o → C o') → C o o : Ordinal.{?u.99300} IH : (y : Ordinal.{?u.99300}) → y < o → C y o0 : ¬o = 0 h : ∃ a, o = succ a ⊢ C (succ (pred o)) [PROOFSTEP] exact H₂ _ (IH _ <| pred_lt_iff_is_succ.2 h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o ⊢ limitRecOn 0 H₁ H₂ H₃ = H₁ [PROOFSTEP] rw [limitRecOn, lt_wf.fix_eq, dif_pos rfl] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o ⊢ Eq.mpr (_ : C 0 = C 0) H₁ = H₁ [PROOFSTEP] rfl [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o ⊢ limitRecOn (succ o) H₁ H₂ H₃ = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] have h : ∃ a, succ o = succ a := ⟨_, rfl⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a ⊢ limitRecOn (succ o) H₁ H₂ H₃ = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] rw [limitRecOn, lt_wf.fix_eq, dif_neg (succ_ne_zero o), dif_pos h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a ⊢ Eq.mpr (_ : C (succ o) = C (succ (pred (succ o)))) (H₂ (pred (succ o)) ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) (pred (succ o)) (_ : pred (succ o) < succ o))) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] generalize limitRecOn.proof_2 (succ o) h = h₂ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a h₂ : C (succ o) = C (succ (pred (succ o))) ⊢ Eq.mpr h₂ (H₂ (pred (succ o)) ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) (pred (succ o)) (_ : pred (succ o) < succ o))) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] generalize limitRecOn.proof_3 (succ o) h = h₃ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a h₂ : C (succ o) = C (succ (pred (succ o))) h₃ : pred (succ o) < succ o ⊢ Eq.mpr h₂ (H₂ (pred (succ o)) ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) (pred (succ o)) h₃)) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] revert h₂ h₃ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a ⊢ ∀ (h₂ : C (succ o) = C (succ (pred (succ o)))) (h₃ : pred (succ o) < succ o), Eq.mpr h₂ (H₂ (pred (succ o)) ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) (pred (succ o)) h₃)) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] generalize e : pred (succ o) = o' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a o' : Ordinal.{u_4} e : pred (succ o) = o' ⊢ ∀ (h₂ : C (succ o) = C (succ o')) (h₃ : o' < succ o), Eq.mpr h₂ (H₂ o' ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) o' h₃)) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] intros [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a o' : Ordinal.{u_4} e : pred (succ o) = o' h₂✝ : C (succ o) = C (succ o') h₃✝ : o' < succ o ⊢ Eq.mpr h₂✝ (H₂ o' ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) o' h₃✝)) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] rw [pred_succ] at e [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a o' : Ordinal.{u_4} e : o = o' h₂✝ : C (succ o) = C (succ o') h₃✝ : o' < succ o ⊢ Eq.mpr h₂✝ (H₂ o' ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) o' h₃✝)) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] subst o' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : ∃ a, succ o = succ a h₂✝ : C (succ o) = C (succ o) h₃✝ : o < succ o ⊢ Eq.mpr h₂✝ (H₂ o ((fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) o h₃✝)) = H₂ o (limitRecOn o H₁ H₂ H₃) [PROOFSTEP] rfl [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : IsLimit o ⊢ limitRecOn o H₁ H₂ H₃ = H₃ o h fun x _h => limitRecOn x H₁ H₂ H₃ [PROOFSTEP] rw [limitRecOn, lt_wf.fix_eq, dif_neg h.1, dif_neg (not_succ_of_isLimit h)] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop C : Ordinal.{u_4} → Sort u_5 o : Ordinal.{u_4} H₁ : C 0 H₂ : (o : Ordinal.{u_4}) → C o → C (succ o) H₃ : (o : Ordinal.{u_4}) → IsLimit o → ((o' : Ordinal.{u_4}) → o' < o → C o') → C o h : IsLimit o ⊢ (H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) fun y x => WellFounded.fix lt_wf (fun o IH => if o0 : o = 0 then Eq.mpr (_ : C o = C 0) H₁ else if h : ∃ a, o = succ a then Eq.mpr (_ : C o = C (succ (pred o))) (H₂ (pred o) (IH (pred o) (_ : pred o < o))) else H₃ o (_ : o ≠ 0 ∧ ∀ (a : Ordinal.{u_4}), a < o → succ a < o) IH) y) = H₃ o h fun x _h => limitRecOn x H₁ H₂ H₃ [PROOFSTEP] rfl [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{?u.103711} ⊢ o < type fun x x_1 => x < x_1 [PROOFSTEP] rw [type_lt] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{?u.103711} ⊢ o < succ o [PROOFSTEP] exact lt_succ o [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop α : Type u_4 r : α → α → Prop wo : IsWellOrder α r h : ∀ (a : Ordinal.{u_4}), a < type r → succ a < type r x : α ⊢ ∃ y, r x y [PROOFSTEP] use enum r (succ (typein r x)) (h _ (typein_lt_type r x)) [GOAL] case h α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop α : Type u_4 r : α → α → Prop wo : IsWellOrder α r h : ∀ (a : Ordinal.{u_4}), a < type r → succ a < type r x : α ⊢ r x (enum r (succ (typein r x)) (_ : succ (typein r x) < type r)) [PROOFSTEP] convert (enum_lt_enum (typein_lt_type r x) (h _ (typein_lt_type r x))).mpr (lt_succ _) [GOAL] case h.e'_1 α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop α : Type u_4 r : α → α → Prop wo : IsWellOrder α r h : ∀ (a : Ordinal.{u_4}), a < type r → succ a < type r x : α ⊢ x = enum r (typein r x) (_ : typein r x < type r) [PROOFSTEP] rw [enum_typein] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ho : ∀ (a : Ordinal.{u_4}), a < o → succ a < o ⊢ ∀ (a : Ordinal.{u_4}), (a < type fun a b => a < b) → succ a < type fun a b => a < b [PROOFSTEP] rwa [type_lt] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop r : α → α → Prop inst✝ : IsWellOrder α r hr : IsLimit (type r) x : α ⊢ Bounded r {x} [PROOFSTEP] refine' ⟨enum r (succ (typein r x)) (hr.2 _ (typein_lt_type r x)), _⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop r : α → α → Prop inst✝ : IsWellOrder α r hr : IsLimit (type r) x : α ⊢ ∀ (b : α), b ∈ {x} → r b (enum r (succ (typein r x)) (_ : succ (typein r x) < type r)) [PROOFSTEP] intro b hb [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop r : α → α → Prop inst✝ : IsWellOrder α r hr : IsLimit (type r) x b : α hb : b ∈ {x} ⊢ r b (enum r (succ (typein r x)) (_ : succ (typein r x) < type r)) [PROOFSTEP] rw [mem_singleton_iff.1 hb] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop r : α → α → Prop inst✝ : IsWellOrder α r hr : IsLimit (type r) x b : α hb : b ∈ {x} ⊢ r x (enum r (succ (typein r x)) (_ : succ (typein r x) < type r)) [PROOFSTEP] nth_rw 1 [← enum_typein r x] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop r : α → α → Prop inst✝ : IsWellOrder α r hr : IsLimit (type r) x b : α hb : b ∈ {x} ⊢ r (enum r (typein r x) (_ : typein r x < type r)) (enum r (succ (typein r x)) (_ : succ (typein r x) < type r)) [PROOFSTEP] rw [@enum_lt_enum _ r] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop r : α → α → Prop inst✝ : IsWellOrder α r hr : IsLimit (type r) x b : α hb : b ∈ {x} ⊢ typein r x < succ (typein r x) [PROOFSTEP] apply lt_succ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ type (Subrel (fun x x_1 => x < x_1) {o' | o' < o}) = lift o [PROOFSTEP] refine' Quotient.inductionOn o _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ ∀ (a : WellOrder), type (Subrel (fun x x_1 => x < x_1) {o' | o' < Quotient.mk isEquivalent a}) = lift (Quotient.mk isEquivalent a) [PROOFSTEP] rintro ⟨α, r, wo⟩ [GOAL] case mk α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} α : Type u r : α → α → Prop wo : IsWellOrder α r ⊢ type (Subrel (fun x x_1 => x < x_1) {o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }}) = lift (Quotient.mk isEquivalent { α := α, r := r, wo := wo }) [PROOFSTEP] skip [GOAL] case mk α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} α : Type u r : α → α → Prop wo : IsWellOrder α r ⊢ type (Subrel (fun x x_1 => x < x_1) {o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }}) = lift (Quotient.mk isEquivalent { α := α, r := r, wo := wo }) [PROOFSTEP] apply Quotient.sound [GOAL] case mk.a α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} α : Type u r : α → α → Prop wo : IsWellOrder α r ⊢ { α := ↑{o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }}, r := Subrel (fun x x_1 => x < x_1) {o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }}, wo := (_ : IsWellOrder (↑{o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }}) (Subrel (fun x x_1 => x < x_1) {o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }})) } ≈ { α := ULift { α := α, r := r, wo := wo }.α, r := ULift.down ⁻¹'o { α := α, r := r, wo := wo }.r, wo := (_ : IsWellOrder (ULift { α := α, r := r, wo := wo }.α) (ULift.down ⁻¹'o { α := α, r := r, wo := wo }.r)) } [PROOFSTEP] constructor [GOAL] case mk.a.val α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} α : Type u r : α → α → Prop wo : IsWellOrder α r ⊢ Subrel (fun x x_1 => x < x_1) {o' | o' < Quotient.mk isEquivalent { α := α, r := r, wo := wo }} ≃r ULift.down ⁻¹'o { α := α, r := r, wo := wo }.r [PROOFSTEP] refine' ((RelIso.preimage Equiv.ulift r).trans (enumIso r).symm).symm [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ #↑{o' | o' < o} = Cardinal.lift (card o) [PROOFSTEP] rw [lift_card, ← type_subrel_lt, card_type] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} H : IsNormal f o : Ordinal.{u_4} h : IsLimit o a : Ordinal.{u_5} ⊢ ¬a < f o ↔ ¬∃ b, b < o ∧ a < f b [PROOFSTEP] simpa only [exists_prop, not_exists, not_and, not_lt] using H.2 _ h a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} H : IsNormal f a b : Ordinal.{u_4} ⊢ f a = f b ↔ a = b [PROOFSTEP] simp only [le_antisymm_iff, H.le_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p b : Ordinal.{u_4} H₂ : ∀ (o : Ordinal.{u_4}), b ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o ⊢ f b ≤ o [PROOFSTEP] induction b using limitRecOn with | H₁ => cases' p0 with x px have := Ordinal.le_zero.1 ((H₂ _).1 (Ordinal.zero_le _) _ px) rw [this] at px exact h _ px | H₂ S _ => rcases not_ball.1 (mt (H₂ S).2 <| (lt_succ S).not_le) with ⟨a, h₁, h₂⟩ exact (H.le_iff.2 <| succ_le_of_lt <| not_le.1 h₂).trans (h _ h₁) | H₃ S L _ => refine' (H.2 _ L _).2 fun a h' => _ rcases not_ball.1 (mt (H₂ a).2 h'.not_le) with ⟨b, h₁, h₂⟩ exact (H.le_iff.2 <| (not_le.1 h₂).le).trans (h _ h₁) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p b : Ordinal.{u_4} H₂ : ∀ (o : Ordinal.{u_4}), b ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o ⊢ f b ≤ o [PROOFSTEP] induction b using limitRecOn with | H₁ => cases' p0 with x px have := Ordinal.le_zero.1 ((H₂ _).1 (Ordinal.zero_le _) _ px) rw [this] at px exact h _ px | H₂ S _ => rcases not_ball.1 (mt (H₂ S).2 <| (lt_succ S).not_le) with ⟨a, h₁, h₂⟩ exact (H.le_iff.2 <| succ_le_of_lt <| not_le.1 h₂).trans (h _ h₁) | H₃ S L _ => refine' (H.2 _ L _).2 fun a h' => _ rcases not_ball.1 (mt (H₂ a).2 h'.not_le) with ⟨b, h₁, h₂⟩ exact (H.le_iff.2 <| (not_le.1 h₂).le).trans (h _ h₁) [GOAL] case H₁ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o H₂ : ∀ (o : Ordinal.{u_4}), 0 ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o ⊢ f 0 ≤ o [PROOFSTEP] | H₁ => cases' p0 with x px have := Ordinal.le_zero.1 ((H₂ _).1 (Ordinal.zero_le _) _ px) rw [this] at px exact h _ px [GOAL] case H₁ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o H₂ : ∀ (o : Ordinal.{u_4}), 0 ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o ⊢ f 0 ≤ o [PROOFSTEP] cases' p0 with x px [GOAL] case H₁.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o H₂ : ∀ (o : Ordinal.{u_4}), 0 ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o x : Ordinal.{u_4} px : x ∈ p ⊢ f 0 ≤ o [PROOFSTEP] have := Ordinal.le_zero.1 ((H₂ _).1 (Ordinal.zero_le _) _ px) [GOAL] case H₁.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o H₂ : ∀ (o : Ordinal.{u_4}), 0 ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o x : Ordinal.{u_4} px : x ∈ p this : x = 0 ⊢ f 0 ≤ o [PROOFSTEP] rw [this] at px [GOAL] case H₁.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o H₂ : ∀ (o : Ordinal.{u_4}), 0 ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o x : Ordinal.{u_4} px : 0 ∈ p this : x = 0 ⊢ f 0 ≤ o [PROOFSTEP] exact h _ px [GOAL] case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} a✝ : (∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f S ≤ o H₂ : ∀ (o : Ordinal.{u_4}), succ S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o ⊢ f (succ S) ≤ o [PROOFSTEP] | H₂ S _ => rcases not_ball.1 (mt (H₂ S).2 <| (lt_succ S).not_le) with ⟨a, h₁, h₂⟩ exact (H.le_iff.2 <| succ_le_of_lt <| not_le.1 h₂).trans (h _ h₁) [GOAL] case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} a✝ : (∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f S ≤ o H₂ : ∀ (o : Ordinal.{u_4}), succ S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o ⊢ f (succ S) ≤ o [PROOFSTEP] rcases not_ball.1 (mt (H₂ S).2 <| (lt_succ S).not_le) with ⟨a, h₁, h₂⟩ [GOAL] case H₂.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} a✝ : (∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f S ≤ o H₂ : ∀ (o : Ordinal.{u_4}), succ S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o a : Ordinal.{u_4} h₁ : a ∈ p h₂ : ¬a ≤ S ⊢ f (succ S) ≤ o [PROOFSTEP] exact (H.le_iff.2 <| succ_le_of_lt <| not_le.1 h₂).trans (h _ h₁) [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} L : IsLimit S a✝ : ∀ (o' : Ordinal.{u_4}), o' < S → (∀ (o : Ordinal.{u_4}), o' ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f o' ≤ o H₂ : ∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o ⊢ f S ≤ o [PROOFSTEP] | H₃ S L _ => refine' (H.2 _ L _).2 fun a h' => _ rcases not_ball.1 (mt (H₂ a).2 h'.not_le) with ⟨b, h₁, h₂⟩ exact (H.le_iff.2 <| (not_le.1 h₂).le).trans (h _ h₁) [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} L : IsLimit S a✝ : ∀ (o' : Ordinal.{u_4}), o' < S → (∀ (o : Ordinal.{u_4}), o' ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f o' ≤ o H₂ : ∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o ⊢ f S ≤ o [PROOFSTEP] refine' (H.2 _ L _).2 fun a h' => _ [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} L : IsLimit S a✝ : ∀ (o' : Ordinal.{u_4}), o' < S → (∀ (o : Ordinal.{u_4}), o' ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f o' ≤ o H₂ : ∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o a : Ordinal.{u_4} h' : a < S ⊢ f a ≤ o [PROOFSTEP] rcases not_ball.1 (mt (H₂ a).2 h'.not_le) with ⟨b, h₁, h₂⟩ [GOAL] case H₃.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set Ordinal.{u_4} p0 : Set.Nonempty p h : ∀ (a : Ordinal.{u_4}), a ∈ p → f a ≤ o S : Ordinal.{u_4} L : IsLimit S a✝ : ∀ (o' : Ordinal.{u_4}), o' < S → (∀ (o : Ordinal.{u_4}), o' ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o) → f o' ≤ o H₂ : ∀ (o : Ordinal.{u_4}), S ≤ o ↔ ∀ (a : Ordinal.{u_4}), a ∈ p → a ≤ o a : Ordinal.{u_4} h' : a < S b : Ordinal.{u_4} h₁ : b ∈ p h₂ : ¬b ≤ a ⊢ f a ≤ o [PROOFSTEP] exact (H.le_iff.2 <| (not_le.1 h₂).le).trans (h _ h₁) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u_4} → Ordinal.{u_5} o : Ordinal.{u_5} H : IsNormal f p : Set α p0 : Set.Nonempty p g : α → Ordinal.{u_4} b : Ordinal.{u_4} H₂ : ∀ (o : Ordinal.{u_4}), b ≤ o ↔ ∀ (a : α), a ∈ p → g a ≤ o ⊢ f b ≤ o ↔ ∀ (a : α), a ∈ p → f (g a) ≤ o [PROOFSTEP] simpa [H₂] using H.le_set (g '' p) (p0.image g) b [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : IsLimit b H : ∀ (b' : Ordinal.{u_4}), b' < b → a + b' ≤ c ⊢ ¬c < a + b [PROOFSTEP] induction a using inductionOn with | H α r => induction b using inductionOn with | H β s => intro l suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) intro x rw [← typein_lt_typein (Sum.Lex r s), typein_enum] have := H _ (h.2 _ (typein_lt_type s x)) rw [add_succ, succ_le_iff] at this refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this · rcases a with ⟨a | b, h⟩ · exact Sum.inl a · exact Sum.inr ⟨b, by cases h; assumption⟩ · rcases a with ⟨a | a, h₁⟩ <;> rcases b with ⟨b | b, h₂⟩ <;> cases h₁ <;> cases h₂ <;> rintro ⟨⟩ <;> constructor <;> assumption [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : IsLimit b H : ∀ (b' : Ordinal.{u_4}), b' < b → a + b' ≤ c ⊢ ¬c < a + b [PROOFSTEP] induction a using inductionOn with | H α r => induction b using inductionOn with | H β s => intro l suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) intro x rw [← typein_lt_typein (Sum.Lex r s), typein_enum] have := H _ (h.2 _ (typein_lt_type s x)) rw [add_succ, succ_le_iff] at this refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this · rcases a with ⟨a | b, h⟩ · exact Sum.inl a · exact Sum.inr ⟨b, by cases h; assumption⟩ · rcases a with ⟨a | a, h₁⟩ <;> rcases b with ⟨b | b, h₂⟩ <;> cases h₁ <;> cases h₂ <;> rintro ⟨⟩ <;> constructor <;> assumption [GOAL] case H α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : IsLimit b α : Type u_4 r : α → α → Prop inst✝ : IsWellOrder α r H : ∀ (b' : Ordinal.{u_4}), b' < b → type r + b' ≤ c ⊢ ¬c < type r + b [PROOFSTEP] | H α r => induction b using inductionOn with | H β s => intro l suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) intro x rw [← typein_lt_typein (Sum.Lex r s), typein_enum] have := H _ (h.2 _ (typein_lt_type s x)) rw [add_succ, succ_le_iff] at this refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this · rcases a with ⟨a | b, h⟩ · exact Sum.inl a · exact Sum.inr ⟨b, by cases h; assumption⟩ · rcases a with ⟨a | a, h₁⟩ <;> rcases b with ⟨b | b, h₂⟩ <;> cases h₁ <;> cases h₂ <;> rintro ⟨⟩ <;> constructor <;> assumption [GOAL] case H α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : IsLimit b α : Type u_4 r : α → α → Prop inst✝ : IsWellOrder α r H : ∀ (b' : Ordinal.{u_4}), b' < b → type r + b' ≤ c ⊢ ¬c < type r + b [PROOFSTEP] induction b using inductionOn with | H β s => intro l suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) intro x rw [← typein_lt_typein (Sum.Lex r s), typein_enum] have := H _ (h.2 _ (typein_lt_type s x)) rw [add_succ, succ_le_iff] at this refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this · rcases a with ⟨a | b, h⟩ · exact Sum.inl a · exact Sum.inr ⟨b, by cases h; assumption⟩ · rcases a with ⟨a | a, h₁⟩ <;> rcases b with ⟨b | b, h₂⟩ <;> cases h₁ <;> cases h₂ <;> rintro ⟨⟩ <;> constructor <;> assumption [GOAL] case H α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : IsLimit b α : Type u_4 r : α → α → Prop inst✝ : IsWellOrder α r H : ∀ (b' : Ordinal.{u_4}), b' < b → type r + b' ≤ c ⊢ ¬c < type r + b [PROOFSTEP] induction b using inductionOn with | H β s => intro l suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) intro x rw [← typein_lt_typein (Sum.Lex r s), typein_enum] have := H _ (h.2 _ (typein_lt_type s x)) rw [add_succ, succ_le_iff] at this refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this · rcases a with ⟨a | b, h⟩ · exact Sum.inl a · exact Sum.inr ⟨b, by cases h; assumption⟩ · rcases a with ⟨a | a, h₁⟩ <;> rcases b with ⟨b | b, h₂⟩ <;> cases h₁ <;> cases h₂ <;> rintro ⟨⟩ <;> constructor <;> assumption [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c ⊢ ¬c < type r + type s [PROOFSTEP] | H β s => intro l suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) intro x rw [← typein_lt_typein (Sum.Lex r s), typein_enum] have := H _ (h.2 _ (typein_lt_type s x)) rw [add_succ, succ_le_iff] at this refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this · rcases a with ⟨a | b, h⟩ · exact Sum.inl a · exact Sum.inr ⟨b, by cases h; assumption⟩ · rcases a with ⟨a | a, h₁⟩ <;> rcases b with ⟨b | b, h₂⟩ <;> cases h₁ <;> cases h₂ <;> rintro ⟨⟩ <;> constructor <;> assumption [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c ⊢ ¬c < type r + type s [PROOFSTEP] intro l [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s ⊢ False [PROOFSTEP] suffices ∀ x : β, Sum.Lex r s (Sum.inr x) (enum _ _ l) by -- Porting note: `revert` & `intro` is required because `cases'` doesn't replace -- `enum _ _ l` in `this`. revert this; cases' enum _ _ l with x x <;> intro this · cases this (enum s 0 h.pos) · exact irrefl _ (this _) [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s this : ∀ (x : β), Sum.Lex r s (Sum.inr x) (enum (Sum.Lex r s) c l) ⊢ False [PROOFSTEP] revert this [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s ⊢ (∀ (x : β), Sum.Lex r s (Sum.inr x) (enum (Sum.Lex r s) c l)) → False [PROOFSTEP] cases' enum _ _ l with x x [GOAL] case inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : α ⊢ (∀ (x_1 : β), Sum.Lex r s (Sum.inr x_1) (Sum.inl x)) → False [PROOFSTEP] intro this [GOAL] case inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β ⊢ (∀ (x_1 : β), Sum.Lex r s (Sum.inr x_1) (Sum.inr x)) → False [PROOFSTEP] intro this [GOAL] case inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : α this : ∀ (x_1 : β), Sum.Lex r s (Sum.inr x_1) (Sum.inl x) ⊢ False [PROOFSTEP] cases this (enum s 0 h.pos) [GOAL] case inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : ∀ (x_1 : β), Sum.Lex r s (Sum.inr x_1) (Sum.inr x) ⊢ False [PROOFSTEP] exact irrefl _ (this _) [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s ⊢ ∀ (x : β), Sum.Lex r s (Sum.inr x) (enum (Sum.Lex r s) c l) [PROOFSTEP] intro x [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β ⊢ Sum.Lex r s (Sum.inr x) (enum (Sum.Lex r s) c l) [PROOFSTEP] rw [← typein_lt_typein (Sum.Lex r s), typein_enum] [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β ⊢ typein (Sum.Lex r s) (Sum.inr x) < c [PROOFSTEP] have := H _ (h.2 _ (typein_lt_type s x)) [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + succ (typein s x) ≤ c ⊢ typein (Sum.Lex r s) (Sum.inr x) < c [PROOFSTEP] rw [add_succ, succ_le_iff] at this [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c ⊢ typein (Sum.Lex r s) (Sum.inr x) < c [PROOFSTEP] refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this [GOAL] case H.H.refine'_1 α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : ↑{b | Sum.Lex r s b (Sum.inr x)} ⊢ α ⊕ ↑{b | s b x} [PROOFSTEP] rcases a with ⟨a | b, h⟩ [GOAL] case H.H.refine'_1.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : α h : Sum.inl a ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ α ⊕ ↑{b | s b x} [PROOFSTEP] exact Sum.inl a [GOAL] case H.H.refine'_1.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c b : β h : Sum.inr b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ α ⊕ ↑{b | s b x} [PROOFSTEP] exact Sum.inr ⟨b, by cases h; assumption⟩ [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c b : β h : Sum.inr b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ b ∈ {b | s b x} [PROOFSTEP] cases h [GOAL] case inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c b : β h✝ : s b x ⊢ b ∈ {b | s b x} [PROOFSTEP] assumption [GOAL] case H.H.refine'_2 α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : ↑{b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} a b → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) a) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) b) [PROOFSTEP] rcases a with ⟨a | a, h₁⟩ [GOAL] case H.H.refine'_2.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c b : ↑{b | Sum.Lex r s b (Sum.inr x)} a : α h₁ : Sum.inl a ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := h₁ } b → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := h₁ }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) b) [PROOFSTEP] rcases b with ⟨b | b, h₂⟩ [GOAL] case H.H.refine'_2.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c b : ↑{b | Sum.Lex r s b (Sum.inr x)} a : β h₁ : Sum.inr a ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := h₁ } b → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := h₁ }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) b) [PROOFSTEP] rcases b with ⟨b | b, h₂⟩ [GOAL] case H.H.refine'_2.mk.inl.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : α h₁ : Sum.inl a ∈ {b | Sum.Lex r s b (Sum.inr x)} b : α h₂ : Sum.inl b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := h₁ } { val := Sum.inl b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := h₁ }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := h₂ }) [PROOFSTEP] cases h₁ [GOAL] case H.H.refine'_2.mk.inl.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : α h₁ : Sum.inl a ∈ {b | Sum.Lex r s b (Sum.inr x)} b : β h₂ : Sum.inr b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := h₁ } { val := Sum.inr b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := h₁ }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := h₂ }) [PROOFSTEP] cases h₁ [GOAL] case H.H.refine'_2.mk.inr.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : β h₁ : Sum.inr a ∈ {b | Sum.Lex r s b (Sum.inr x)} b : α h₂ : Sum.inl b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := h₁ } { val := Sum.inl b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := h₁ }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := h₂ }) [PROOFSTEP] cases h₁ [GOAL] case H.H.refine'_2.mk.inr.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : β h₁ : Sum.inr a ∈ {b | Sum.Lex r s b (Sum.inr x)} b : β h₂ : Sum.inr b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := h₁ } { val := Sum.inr b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := h₁ }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := h₂ }) [PROOFSTEP] cases h₁ [GOAL] case H.H.refine'_2.mk.inl.mk.inl.sep α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : α h₂ : Sum.inl b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) } { val := Sum.inl b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := h₂ }) [PROOFSTEP] cases h₂ [GOAL] case H.H.refine'_2.mk.inl.mk.inr.sep α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : α b : β h₂ : Sum.inr b ∈ {b | Sum.Lex r s b (Sum.inr x)} ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) } { val := Sum.inr b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := h₂ }) [PROOFSTEP] cases h₂ [GOAL] case H.H.refine'_2.mk.inr.mk.inl.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : β b : α h₂ : Sum.inl b ∈ {b | Sum.Lex r s b (Sum.inr x)} h✝ : s a x ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) } { val := Sum.inl b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := h₂ }) [PROOFSTEP] cases h₂ [GOAL] case H.H.refine'_2.mk.inr.mk.inr.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : β h₂ : Sum.inr b ∈ {b | Sum.Lex r s b (Sum.inr x)} h✝ : s a x ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) } { val := Sum.inr b, property := h₂ } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := h₂ }) [PROOFSTEP] cases h₂ [GOAL] case H.H.refine'_2.mk.inl.mk.inl.sep.sep α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : α ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) } { val := Sum.inl b, property := (_ : Sum.Lex r s (Sum.inl b) (Sum.inr x)) } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := (_ : Sum.Lex r s (Sum.inl b) (Sum.inr x)) }) [PROOFSTEP] rintro ⟨⟩ [GOAL] case H.H.refine'_2.mk.inl.mk.inr.sep.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : α b : β h✝ : s b x ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) } { val := Sum.inr b, property := (_ : Sum.Lex r s (Sum.inr b) (Sum.inr x)) } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := (_ : Sum.Lex r s (Sum.inr b) (Sum.inr x)) }) [PROOFSTEP] rintro ⟨⟩ [GOAL] case H.H.refine'_2.mk.inr.mk.inl.inr.sep α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : β b : α h✝ : s a x ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) } { val := Sum.inl b, property := (_ : Sum.Lex r s (Sum.inl b) (Sum.inr x)) } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := (_ : Sum.Lex r s (Sum.inl b) (Sum.inr x)) }) [PROOFSTEP] rintro ⟨⟩ [GOAL] case H.H.refine'_2.mk.inr.mk.inr.inr.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : β h✝¹ : s a x h✝ : s b x ⊢ Subrel (Sum.Lex r s) {b | Sum.Lex r s b (Sum.inr x)} { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) } { val := Sum.inr b, property := (_ : Sum.Lex r s (Sum.inr b) (Sum.inr x)) } → Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := (_ : Sum.Lex r s (Sum.inr b) (Sum.inr x)) }) [PROOFSTEP] rintro ⟨⟩ [GOAL] case H.H.refine'_2.mk.inl.mk.inl.sep.sep.inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : α h✝ : r a b ⊢ Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl b, property := (_ : Sum.Lex r s (Sum.inl b) (Sum.inr x)) }) [PROOFSTEP] constructor [GOAL] case H.H.refine'_2.mk.inl.mk.inr.sep.inr.sep α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a : α b : β h✝ : s b x ⊢ Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inl a, property := (_ : Sum.Lex r s (Sum.inl a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := (_ : Sum.Lex r s (Sum.inr b) (Sum.inr x)) }) [PROOFSTEP] constructor [GOAL] case H.H.refine'_2.mk.inr.mk.inr.inr.inr.inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : β h✝² : s a x h✝¹ : s b x h✝ : s a b ⊢ Sum.Lex r (Subrel s {b | s b x}) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr a, property := (_ : Sum.Lex r s (Sum.inr a) (Sum.inr x)) }) ((fun a => Subtype.casesOn a fun val h => Sum.casesOn (motive := fun x_1 => x_1 ∈ {b | Sum.Lex r s b (Sum.inr x)} → α ⊕ ↑{b | s b x}) val (fun a h => Sum.inl a) (fun b h => Sum.inr { val := b, property := (_ : b ∈ {b | s b x}) }) h) { val := Sum.inr b, property := (_ : Sum.Lex r s (Sum.inr b) (Sum.inr x)) }) [PROOFSTEP] constructor [GOAL] case H.H.refine'_2.mk.inl.mk.inl.sep.sep.inl.h α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : α h✝ : r a b ⊢ r a b [PROOFSTEP] assumption [GOAL] case H.H.refine'_2.mk.inr.mk.inr.inr.inr.inr.h α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r + b' ≤ c l : c < type r + type s x : β this : type r + typein s x < c a b : β h✝² : s a x h✝¹ : s b x h✝ : s a b ⊢ Subrel s {b | s b x} { val := a, property := (_ : a ∈ {b | s b x}) } { val := b, property := (_ : b ∈ {b | s b x}) } [PROOFSTEP] assumption [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≤ a ⊢ b + (a - b) ≤ a [PROOFSTEP] rcases zero_or_succ_or_limit (a - b) with (e | ⟨c, e⟩ | l) [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≤ a e : a - b = 0 ⊢ b + (a - b) ≤ a [PROOFSTEP] simp only [e, add_zero, h] [GOAL] case inr.inl.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≤ a c : Ordinal.{u_4} e : a - b = succ c ⊢ b + (a - b) ≤ a [PROOFSTEP] rw [e, add_succ, succ_le_iff, ← lt_sub, e] [GOAL] case inr.inl.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≤ a c : Ordinal.{u_4} e : a - b = succ c ⊢ c < succ c [PROOFSTEP] exact lt_succ c [GOAL] case inr.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≤ a l : IsLimit (a - b) ⊢ b + (a - b) ≤ a [PROOFSTEP] exact (add_le_of_limit l).2 fun c l => (lt_sub.1 l).le [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : b ≤ a ⊢ c ≤ a - b ↔ b + c ≤ a [PROOFSTEP] rw [← add_le_add_iff_left b, Ordinal.add_sub_cancel_of_le h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ a - 0 = a [PROOFSTEP] simpa only [zero_add] using add_sub_cancel 0 a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ 0 - a = 0 [PROOFSTEP] rw [← Ordinal.le_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ 0 - a ≤ 0 [PROOFSTEP] apply sub_le_self [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ a - a = 0 [PROOFSTEP] simpa only [add_zero] using add_sub_cancel a 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : a - b = 0 ⊢ a ≤ b [PROOFSTEP] simpa only [h, add_zero] using le_add_sub a b [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : a ≤ b ⊢ a - b = 0 [PROOFSTEP] rwa [← Ordinal.le_zero, sub_le, add_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c d : Ordinal.{u_4} ⊢ a - b - c ≤ d ↔ a - (b + c) ≤ d [PROOFSTEP] rw [sub_le, sub_le, sub_le, add_assoc] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} ⊢ a + b - (a + c) = b - c [PROOFSTEP] rw [← sub_sub, add_sub_cancel] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} l : IsLimit a h : b < a ⊢ b + 0 < a [PROOFSTEP] rwa [add_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} l : IsLimit a h✝ : b < a c : Ordinal.{u_4} h : c < a - b ⊢ succ c < a - b [PROOFSTEP] rw [lt_sub, add_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} l : IsLimit a h✝ : b < a c : Ordinal.{u_4} h : c < a - b ⊢ succ (b + c) < a [PROOFSTEP] exact l.2 _ (lt_sub.1 h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ 1 + ω = ω [PROOFSTEP] refine' le_antisymm _ (le_add_left _ _) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ 1 + ω ≤ ω [PROOFSTEP] rw [omega, ← lift_one.{_, 0}, ← lift_add, lift_le, ← type_unit, ← type_sum_lex] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ type (Sum.Lex EmptyRelation fun x x_1 => x < x_1) ≤ type fun x x_1 => x < x_1 [PROOFSTEP] refine' ⟨RelEmbedding.collapse (RelEmbedding.ofMonotone _ _)⟩ [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ Unit ⊕ ℕ → ℕ [PROOFSTEP] apply Sum.rec [GOAL] case refine'_1.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ Unit → ℕ case refine'_1.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ ℕ → ℕ [PROOFSTEP] exact fun _ => 0 [GOAL] case refine'_1.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ ℕ → ℕ [PROOFSTEP] exact Nat.succ [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ⊢ ∀ (a b : Unit ⊕ ℕ), Sum.Lex EmptyRelation (fun x x_1 => x < x_1) a b → Sum.rec (fun x => 0) Nat.succ a < Sum.rec (fun x => 0) Nat.succ b [PROOFSTEP] intro a b [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Unit ⊕ ℕ ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) a b → Sum.rec (fun x => 0) Nat.succ a < Sum.rec (fun x => 0) Nat.succ b [PROOFSTEP] cases a <;> cases b <;> intro H <;> cases' H with _ _ H _ _ H <;> [exact H.elim; exact Nat.succ_pos _; exact Nat.succ_lt_succ H] [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Unit ⊕ ℕ ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) a b → Sum.rec (fun x => 0) Nat.succ a < Sum.rec (fun x => 0) Nat.succ b [PROOFSTEP] cases a [GOAL] case refine'_2.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b : Unit ⊕ ℕ val✝ : Unit ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inl val✝) b → Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝) < Sum.rec (fun x => 0) Nat.succ b [PROOFSTEP] cases b [GOAL] case refine'_2.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b : Unit ⊕ ℕ val✝ : ℕ ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inr val✝) b → Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) < Sum.rec (fun x => 0) Nat.succ b [PROOFSTEP] cases b [GOAL] case refine'_2.inl.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ val✝ : Unit ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inl val✝¹) (Sum.inl val✝) → Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝) [PROOFSTEP] intro H [GOAL] case refine'_2.inl.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ : Unit val✝ : ℕ ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inl val✝¹) (Sum.inr val✝) → Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) [PROOFSTEP] intro H [GOAL] case refine'_2.inr.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ : ℕ val✝ : Unit ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inr val✝¹) (Sum.inl val✝) → Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝) [PROOFSTEP] intro H [GOAL] case refine'_2.inr.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ val✝ : ℕ ⊢ Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inr val✝¹) (Sum.inr val✝) → Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) [PROOFSTEP] intro H [GOAL] case refine'_2.inl.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ val✝ : Unit H : Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inl val✝¹) (Sum.inl val✝) ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝) [PROOFSTEP] cases' H with _ _ H _ _ H [GOAL] case refine'_2.inl.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ : Unit val✝ : ℕ H : Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inl val✝¹) (Sum.inr val✝) ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) [PROOFSTEP] cases' H with _ _ H _ _ H [GOAL] case refine'_2.inr.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ : ℕ val✝ : Unit H : Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inr val✝¹) (Sum.inl val✝) ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝) [PROOFSTEP] cases' H with _ _ H _ _ H [GOAL] case refine'_2.inr.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ val✝ : ℕ H : Sum.Lex EmptyRelation (fun x x_1 => x < x_1) (Sum.inr val✝¹) (Sum.inr val✝) ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) [PROOFSTEP] cases' H with _ _ H _ _ H [GOAL] case refine'_2.inl.inl.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ val✝ a₁✝ a₂✝ : Unit H : EmptyRelation a₁✝ a₂✝ ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝) [PROOFSTEP] exact H.elim [GOAL] case refine'_2.inl.inr.sep α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ : Unit val✝ : ℕ a✝ : Unit ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inl val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) [PROOFSTEP] exact Nat.succ_pos _ [GOAL] case refine'_2.inr.inr.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop val✝¹ val✝ : ℕ H : val✝¹ < val✝ ⊢ Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝¹) < Sum.rec (fun x => 0) Nat.succ (Sum.inr val✝) [PROOFSTEP] exact Nat.succ_lt_succ H [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} h : ω ≤ o ⊢ 1 + o = o [PROOFSTEP] rw [← Ordinal.add_sub_cancel_of_le h, ← add_assoc, one_add_omega] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a✝ b✝ c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a b : (γ × β) × α ⊢ Prod.Lex t (Prod.Lex s r) (↑(prodAssoc γ β α) a) (↑(prodAssoc γ β α) b) ↔ Prod.Lex (Prod.Lex t s) r a b [PROOFSTEP] rcases a with ⟨⟨a₁, a₂⟩, a₃⟩ [GOAL] case mk.mk α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b✝ c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t b : (γ × β) × α a₃ : α a₁ : γ a₂ : β ⊢ Prod.Lex t (Prod.Lex s r) (↑(prodAssoc γ β α) ((a₁, a₂), a₃)) (↑(prodAssoc γ β α) b) ↔ Prod.Lex (Prod.Lex t s) r ((a₁, a₂), a₃) b [PROOFSTEP] rcases b with ⟨⟨b₁, b₂⟩, b₃⟩ [GOAL] case mk.mk.mk.mk α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₃ : α a₁ : γ a₂ : β b₃ : α b₁ : γ b₂ : β ⊢ Prod.Lex t (Prod.Lex s r) (↑(prodAssoc γ β α) ((a₁, a₂), a₃)) (↑(prodAssoc γ β α) ((b₁, b₂), b₃)) ↔ Prod.Lex (Prod.Lex t s) r ((a₁, a₂), a₃) ((b₁, b₂), b₃) [PROOFSTEP] simp [Prod.lex_def, and_or_left, or_assoc, and_assoc] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : α × PUnit ⊢ r (↑(prodPUnit α) a) (↑(prodPUnit α) b) ↔ Prod.Lex r EmptyRelation a b [PROOFSTEP] rcases a with ⟨a, ⟨⟨⟩⟩⟩ [GOAL] case mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r b : α × PUnit a : α ⊢ r (↑(prodPUnit α) (a, PUnit.unit)) (↑(prodPUnit α) b) ↔ Prod.Lex r EmptyRelation (a, PUnit.unit) b [PROOFSTEP] rcases b with ⟨b, ⟨⟨⟩⟩⟩ [GOAL] case mk.unit.mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : α ⊢ r (↑(prodPUnit α) (a, PUnit.unit)) (↑(prodPUnit α) (b, PUnit.unit)) ↔ Prod.Lex r EmptyRelation (a, PUnit.unit) (b, PUnit.unit) [PROOFSTEP] simp only [Prod.lex_def, EmptyRelation, and_false_iff, or_false_iff] [GOAL] case mk.unit.mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : α ⊢ r (↑(prodPUnit α) (a, PUnit.unit)) (↑(prodPUnit α) (b, PUnit.unit)) ↔ r a b [PROOFSTEP] rfl [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : PUnit × α ⊢ r (↑(punitProd α) a) (↑(punitProd α) b) ↔ Prod.Lex EmptyRelation r a b [PROOFSTEP] rcases a with ⟨⟨⟨⟩⟩, a⟩ [GOAL] case mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r b : PUnit × α a : α ⊢ r (↑(punitProd α) (PUnit.unit, a)) (↑(punitProd α) b) ↔ Prod.Lex EmptyRelation r (PUnit.unit, a) b [PROOFSTEP] rcases b with ⟨⟨⟨⟩⟩, b⟩ [GOAL] case mk.unit.mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : α ⊢ r (↑(punitProd α) (PUnit.unit, a)) (↑(punitProd α) (PUnit.unit, b)) ↔ Prod.Lex EmptyRelation r (PUnit.unit, a) (PUnit.unit, b) [PROOFSTEP] simp only [Prod.lex_def, EmptyRelation, false_or_iff] [GOAL] case mk.unit.mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : α ⊢ r (↑(punitProd α) (PUnit.unit, a)) (↑(punitProd α) (PUnit.unit, b)) ↔ True ∧ r a b [PROOFSTEP] simp only [eq_self_iff_true, true_and_iff] [GOAL] case mk.unit.mk.unit α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop a✝ : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r a b : α ⊢ r (↑(punitProd α) (PUnit.unit, a)) (↑(punitProd α) (PUnit.unit, b)) ↔ r a b [PROOFSTEP] rfl [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop a b : Ordinal.{u_4} α : Type u_4 x✝³ : α → α → Prop x✝² : IsWellOrder α x✝³ β : Type u_4 x✝¹ : β → β → Prop x✝ : IsWellOrder β x✝¹ ⊢ type x✝³ * type x✝¹ = 0 ↔ type x✝³ = 0 ∨ type x✝¹ = 0 [PROOFSTEP] simp_rw [← type_prod_lex, type_eq_zero_iff_isEmpty] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop a b : Ordinal.{u_4} α : Type u_4 x✝³ : α → α → Prop x✝² : IsWellOrder α x✝³ β : Type u_4 x✝¹ : β → β → Prop x✝ : IsWellOrder β x✝¹ ⊢ IsEmpty (β × α) ↔ IsEmpty α ∨ IsEmpty β [PROOFSTEP] rw [or_comm] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop a b : Ordinal.{u_4} α : Type u_4 x✝³ : α → α → Prop x✝² : IsWellOrder α x✝³ β : Type u_4 x✝¹ : β → β → Prop x✝ : IsWellOrder β x✝¹ ⊢ IsEmpty (β × α) ↔ IsEmpty β ∨ IsEmpty α [PROOFSTEP] exact isEmpty_prod [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t ⊢ ∀ {a b : (β ⊕ γ) × α}, Sum.Lex (Prod.Lex s r) (Prod.Lex t r) (↑(sumProdDistrib β γ α) a) (↑(sumProdDistrib β γ α) b) ↔ Prod.Lex (Sum.Lex s t) r a b [PROOFSTEP] rintro ⟨a₁ | a₁, a₂⟩ ⟨b₁ | b₁, b₂⟩ [GOAL] case mk.inl.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : β b₂ : α b₁ : β ⊢ Sum.Lex (Prod.Lex s r) (Prod.Lex t r) (↑(sumProdDistrib β γ α) (Sum.inl a₁, a₂)) (↑(sumProdDistrib β γ α) (Sum.inl b₁, b₂)) ↔ Prod.Lex (Sum.Lex s t) r (Sum.inl a₁, a₂) (Sum.inl b₁, b₂) [PROOFSTEP] simp only [Prod.lex_def, Sum.lex_inl_inl, Sum.Lex.sep, Sum.lex_inr_inl, Sum.lex_inr_inr, sumProdDistrib_apply_left, sumProdDistrib_apply_right] [GOAL] case mk.inl.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : β b₂ : α b₁ : γ ⊢ Sum.Lex (Prod.Lex s r) (Prod.Lex t r) (↑(sumProdDistrib β γ α) (Sum.inl a₁, a₂)) (↑(sumProdDistrib β γ α) (Sum.inr b₁, b₂)) ↔ Prod.Lex (Sum.Lex s t) r (Sum.inl a₁, a₂) (Sum.inr b₁, b₂) [PROOFSTEP] simp only [Prod.lex_def, Sum.lex_inl_inl, Sum.Lex.sep, Sum.lex_inr_inl, Sum.lex_inr_inr, sumProdDistrib_apply_left, sumProdDistrib_apply_right] [GOAL] case mk.inr.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : γ b₂ : α b₁ : β ⊢ Sum.Lex (Prod.Lex s r) (Prod.Lex t r) (↑(sumProdDistrib β γ α) (Sum.inr a₁, a₂)) (↑(sumProdDistrib β γ α) (Sum.inl b₁, b₂)) ↔ Prod.Lex (Sum.Lex s t) r (Sum.inr a₁, a₂) (Sum.inl b₁, b₂) [PROOFSTEP] simp only [Prod.lex_def, Sum.lex_inl_inl, Sum.Lex.sep, Sum.lex_inr_inl, Sum.lex_inr_inr, sumProdDistrib_apply_left, sumProdDistrib_apply_right] [GOAL] case mk.inr.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : γ b₂ : α b₁ : γ ⊢ Sum.Lex (Prod.Lex s r) (Prod.Lex t r) (↑(sumProdDistrib β γ α) (Sum.inr a₁, a₂)) (↑(sumProdDistrib β γ α) (Sum.inr b₁, b₂)) ↔ Prod.Lex (Sum.Lex s t) r (Sum.inr a₁, a₂) (Sum.inr b₁, b₂) [PROOFSTEP] simp only [Prod.lex_def, Sum.lex_inl_inl, Sum.Lex.sep, Sum.lex_inr_inl, Sum.lex_inr_inr, sumProdDistrib_apply_left, sumProdDistrib_apply_right] [GOAL] case mk.inl.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : β b₂ : α b₁ : β ⊢ s a₁ b₁ ∨ a₁ = b₁ ∧ r a₂ b₂ ↔ s a₁ b₁ ∨ Sum.inl a₁ = Sum.inl b₁ ∧ r a₂ b₂ [PROOFSTEP] simp only [Sum.inl.inj_iff, Sum.inr.inj_iff, true_or_iff, false_and_iff, false_or_iff] [GOAL] case mk.inl.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : β b₂ : α b₁ : γ ⊢ True ↔ True ∨ False ∧ r a₂ b₂ [PROOFSTEP] simp only [Sum.inl.inj_iff, Sum.inr.inj_iff, true_or_iff, false_and_iff, false_or_iff] [GOAL] case mk.inr.mk.inl α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : γ b₂ : α b₁ : β ⊢ False ↔ False ∨ False ∧ r a₂ b₂ [PROOFSTEP] simp only [Sum.inl.inj_iff, Sum.inr.inj_iff, true_or_iff, false_and_iff, false_or_iff] [GOAL] case mk.inr.mk.inr α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop a b c : Ordinal.{u} x✝² x✝¹ x✝ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t a₂ : α a₁ : γ b₂ : α b₁ : γ ⊢ t a₁ b₁ ∨ a₁ = b₁ ∧ r a₂ b₂ ↔ t a₁ b₁ ∨ Sum.inr a₁ = Sum.inr b₁ ∧ r a₂ b₂ [PROOFSTEP] simp only [Sum.inl.inj_iff, Sum.inr.inj_iff, true_or_iff, false_and_iff, false_or_iff] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a b : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s ⊢ Quotient.mk isEquivalent { α := γ, r := t, wo := wo✝ } * Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := γ, r := t, wo := wo✝ } * Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } [PROOFSTEP] refine' (RelEmbedding.ofMonotone (fun a : α × γ => (f a.1, a.2)) fun a b h => _).ordinal_type_le [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a✝ b✝ : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s a b : α × γ h : Prod.Lex r t a b ⊢ Prod.Lex s t ((fun a => (↑f a.fst, a.snd)) a) ((fun a => (↑f a.fst, a.snd)) b) [PROOFSTEP] cases' h with a₁ b₁ a₂ b₂ h' a b₁ b₂ h' [GOAL] case left α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a b : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s a₁ : α b₁ : γ a₂ : α b₂ : γ h' : r a₁ a₂ ⊢ Prod.Lex s t ((fun a => (↑f a.fst, a.snd)) (a₁, b₁)) ((fun a => (↑f a.fst, a.snd)) (a₂, b₂)) [PROOFSTEP] exact Prod.Lex.left _ _ (f.toRelEmbedding.map_rel_iff.2 h') [GOAL] case right α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a✝ b : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s a : α b₁ b₂ : γ h' : t b₁ b₂ ⊢ Prod.Lex s t ((fun a => (↑f a.fst, a.snd)) (a, b₁)) ((fun a => (↑f a.fst, a.snd)) (a, b₂)) [PROOFSTEP] exact Prod.Lex.right _ h' [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a b : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s ⊢ Function.swap (fun x x_1 => x * x_1) (Quotient.mk isEquivalent { α := γ, r := t, wo := wo✝ }) (Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² }) ≤ Function.swap (fun x x_1 => x * x_1) (Quotient.mk isEquivalent { α := γ, r := t, wo := wo✝ }) (Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ }) [PROOFSTEP] refine' (RelEmbedding.ofMonotone (fun a : γ × α => (a.1, f a.2)) fun a b h => _).ordinal_type_le [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a✝ b✝ : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s a b : γ × α h : Prod.Lex t r a b ⊢ Prod.Lex t s ((fun a => (a.fst, ↑f a.snd)) a) ((fun a => (a.fst, ↑f a.snd)) b) [PROOFSTEP] cases' h with a₁ b₁ a₂ b₂ h' a b₁ b₂ h' [GOAL] case left α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a b : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s a₁ : γ b₁ : α a₂ : γ b₂ : α h' : t a₁ a₂ ⊢ Prod.Lex t s ((fun a => (a.fst, ↑f a.snd)) (a₁, b₁)) ((fun a => (a.fst, ↑f a.snd)) (a₂, b₂)) [PROOFSTEP] exact Prod.Lex.left _ _ h' [GOAL] case right α✝ : Type u_1 β✝ : Type u_2 γ✝ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t✝ : γ✝ → γ✝ → Prop c a✝ b : Ordinal.{u} x✝³ x✝² x✝¹ : WellOrder α : Type u r : α → α → Prop wo✝² : IsWellOrder α r β : Type u s : β → β → Prop wo✝¹ : IsWellOrder β s x✝ : Quotient.mk isEquivalent { α := α, r := r, wo := wo✝² } ≤ Quotient.mk isEquivalent { α := β, r := s, wo := wo✝¹ } γ : Type u t : γ → γ → Prop wo✝ : IsWellOrder γ t f : r ≼i s a : γ b₁ b₂ : α h' : r b₁ b₂ ⊢ Prod.Lex t s ((fun a => (a.fst, ↑f a.snd)) (a, b₁)) ((fun a => (a.fst, ↑f a.snd)) (a, b₂)) [PROOFSTEP] exact Prod.Lex.right _ (f.toRelEmbedding.map_rel_iff.2 h') [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} hb : 0 < b ⊢ a ≤ a * b [PROOFSTEP] convert mul_le_mul_left' (one_le_iff_pos.2 hb) a [GOAL] case h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} hb : 0 < b ⊢ a = a * 1 [PROOFSTEP] rw [mul_one a] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} hb : 0 < b ⊢ a ≤ b * a [PROOFSTEP] convert mul_le_mul_right' (one_le_iff_pos.2 hb) a [GOAL] case h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} hb : 0 < b ⊢ a = 1 * a [PROOFSTEP] rw [one_mul a] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s ⊢ False [PROOFSTEP] suffices ∀ a b, Prod.Lex s r (b, a) (enum _ _ l) by cases' enum _ _ l with b a exact irrefl _ (this _ _) [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s this : ∀ (a : α) (b : β), Prod.Lex s r (b, a) (enum (Prod.Lex s r) c l) ⊢ False [PROOFSTEP] cases' enum _ _ l with b a [GOAL] case mk α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s this : ∀ (a : α) (b : β), Prod.Lex s r (b, a) (enum (Prod.Lex s r) c l) b : β a : α ⊢ False [PROOFSTEP] exact irrefl _ (this _ _) [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s ⊢ ∀ (a : α) (b : β), Prod.Lex s r (b, a) (enum (Prod.Lex s r) c l) [PROOFSTEP] intro a b [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β ⊢ Prod.Lex s r (b, a) (enum (Prod.Lex s r) c l) [PROOFSTEP] rw [← typein_lt_typein (Prod.Lex s r), typein_enum] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β ⊢ typein (Prod.Lex s r) (b, a) < c [PROOFSTEP] have := H _ (h.2 _ (typein_lt_type s b)) [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this : type r * succ (typein s b) ≤ c ⊢ typein (Prod.Lex s r) (b, a) < c [PROOFSTEP] rw [mul_succ] at this [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this : type r * typein s b + type r ≤ c ⊢ typein (Prod.Lex s r) (b, a) < c [PROOFSTEP] have := ((add_lt_add_iff_left _).2 (typein_lt_type _ a)).trans_le this [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c ⊢ typein (Prod.Lex s r) (b, a) < c [PROOFSTEP] refine' (RelEmbedding.ofMonotone (fun a => _) fun a b => _).ordinal_type_le.trans_lt this [GOAL] case refine'_1 α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a✝ : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a✝ < c a : ↑{b_1 | Prod.Lex s r b_1 (b, a✝)} ⊢ ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a✝} [PROOFSTEP] rcases a with ⟨⟨b', a'⟩, h⟩ [GOAL] case refine'_1.mk.mk α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b' : β a' : α h : (b', a') ∈ {b_1 | Prod.Lex s r b_1 (b, a)} ⊢ ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a} [PROOFSTEP] by_cases e : b = b' [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b' : β a' : α h : (b', a') ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e : b = b' ⊢ ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a} [PROOFSTEP] refine' Sum.inr ⟨a', _⟩ [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b' : β a' : α h : (b', a') ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e : b = b' ⊢ a' ∈ {b | r b a} [PROOFSTEP] subst e [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a' : α h : (b, a') ∈ {b_1 | Prod.Lex s r b_1 (b, a)} ⊢ a' ∈ {b | r b a} [PROOFSTEP] cases' h with _ _ _ _ h _ _ _ h [GOAL] case pos.left α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a' : α h : s b b ⊢ a' ∈ {b | r b a} [PROOFSTEP] exact (irrefl _ h).elim [GOAL] case pos.right α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a' : α h : r a' a ⊢ a' ∈ {b | r b a} [PROOFSTEP] exact h [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b' : β a' : α h : (b', a') ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e : ¬b = b' ⊢ ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a} [PROOFSTEP] refine' Sum.inl (⟨b', _⟩, a') [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b' : β a' : α h : (b', a') ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e : ¬b = b' ⊢ b' ∈ {b_1 | s b_1 b} [PROOFSTEP] cases' h with _ _ _ _ h _ _ _ h [GOAL] case neg.left α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b' : β a' : α e : ¬b = b' h : s b' b ⊢ b' ∈ {b_1 | s b_1 b} [PROOFSTEP] exact h [GOAL] case neg.right α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a' : α e : ¬b = b h : r a' a ⊢ b ∈ {b_1 | s b_1 b} [PROOFSTEP] exact (e rfl).elim [GOAL] case refine'_2 α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a✝ : α b✝ : β this✝ : type r * typein s b✝ + type r ≤ c this : type r * typein s b✝ + typein r a✝ < c a b : ↑{b | Prod.Lex s r b (b✝, a✝)} ⊢ Subrel (Prod.Lex s r) {b | Prod.Lex s r b (b✝, a✝)} a b → Sum.Lex (Prod.Lex (Subrel s {b | s b b✝}) r) (Subrel r {b | r b a✝}) ((fun a => Subtype.casesOn a fun val h => Prod.casesOn (motive := fun x => x ∈ {b | Prod.Lex s r b (b✝, a✝)} → ↑{b | s b b✝} × α ⊕ ↑{b | r b a✝}) val (fun b' a' h => if e : b✝ = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a✝}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b | s b b✝}) }, a')) h) a) ((fun a => Subtype.casesOn a fun val h => Prod.casesOn (motive := fun x => x ∈ {b | Prod.Lex s r b (b✝, a✝)} → ↑{b | s b b✝} × α ⊕ ↑{b | r b a✝}) val (fun b' a' h => if e : b✝ = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a✝}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b | s b b✝}) }, a')) h) b) [PROOFSTEP] rcases a with ⟨⟨b₁, a₁⟩, h₁⟩ [GOAL] case refine'_2.mk.mk α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b✝ : β this✝ : type r * typein s b✝ + type r ≤ c this : type r * typein s b✝ + typein r a < c b : ↑{b | Prod.Lex s r b (b✝, a)} b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b | Prod.Lex s r b (b✝, a)} ⊢ Subrel (Prod.Lex s r) {b | Prod.Lex s r b (b✝, a)} { val := (b₁, a₁), property := h₁ } b → Sum.Lex (Prod.Lex (Subrel s {b | s b b✝}) r) (Subrel r {b | r b a}) ((fun a_2 => Subtype.casesOn a_2 fun val h => Prod.casesOn (motive := fun x => x ∈ {b | Prod.Lex s r b (b✝, a)} → ↑{b | s b b✝} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b✝ = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b | s b b✝}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_2 => Subtype.casesOn a_2 fun val h => Prod.casesOn (motive := fun x => x ∈ {b | Prod.Lex s r b (b✝, a)} → ↑{b | s b b✝} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b✝ = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b | s b b✝}) }, a')) h) b) [PROOFSTEP] rcases b with ⟨⟨b₂, a₂⟩, h₂⟩ [GOAL] case refine'_2.mk.mk.mk.mk α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} ⊢ Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } → Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_2 => Subtype.casesOn a_2 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_2 => Subtype.casesOn a_2 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] intro h [GOAL] case refine'_2.mk.mk.mk.mk α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] by_cases e₁ : b = b₁ [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } e₁ : b = b₁ ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] by_cases e₂ : b = b₂ [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } e₁ : ¬b = b₁ ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] by_cases e₂ : b = b₂ [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } e₁ : b = b₁ e₂ : b = b₂ ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] substs b₁ b₂ [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a₁ a₂ : α h₁ : (b, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h₂ : (b, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b, a₁), property := h₁ } { val := (b, a₂), property := h₂ } ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b, a₂), property := h₂ }) [PROOFSTEP] simpa only [subrel_val, Prod.lex_def, @irrefl _ s _ b, true_and_iff, false_or_iff, eq_self_iff_true, dif_pos, Sum.lex_inr_inr] using h [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } e₁ : b = b₁ e₂ : ¬b = b₂ ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] subst b₁ [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a₁ : α b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e₂ : ¬b = b₂ h₁ : (b, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] simp only [subrel_val, Prod.lex_def, e₂, Prod.lex_def, dif_pos, subrel_val, eq_self_iff_true, or_false_iff, dif_neg, not_false_iff, Sum.lex_inr_inl, false_and_iff] at h ⊢ [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a₁ : α b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e₂ : ¬b = b₂ h₁ : (b, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : s b b₂ ⊢ False [PROOFSTEP] cases' h₂ with _ _ _ _ h₂_h h₂_h <;> [exact asymm h h₂_h; exact e₂ rfl] -- Porting note: `cc` hadn't ported yet. [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a₁ : α b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e₂ : ¬b = b₂ h₁ : (b, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : s b b₂ ⊢ False [PROOFSTEP] cases' h₂ with _ _ _ _ h₂_h h₂_h [GOAL] case neg.left α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a₁ : α b₂ : β a₂ : α e₂ : ¬b = b₂ h₁ : (b, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : s b b₂ h₂_h : s b₂ b ⊢ False [PROOFSTEP] exact asymm h h₂_h [GOAL] case neg.right α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝¹ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c a₁ a₂ : α h₁ : (b, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} e₂ : ¬b = b h : s b b h✝ : r a₂ a ⊢ False [PROOFSTEP] exact e₂ rfl [GOAL] case pos α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } e₁ : ¬b = b₁ e₂ : b = b₂ ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] simp [e₂, dif_neg e₁, show b₂ ≠ b₁ from e₂ ▸ e₁] [GOAL] case neg α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u_4 r : α → α → Prop s : β → β → Prop inst✝¹ : IsWellOrder α r inst✝ : IsWellOrder β s c : Ordinal.{u_4} h✝ : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c l : c < type r * type s a : α b : β this✝ : type r * typein s b + type r ≤ c this : type r * typein s b + typein r a < c b₁ : β a₁ : α h₁ : (b₁, a₁) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} b₂ : β a₂ : α h₂ : (b₂, a₂) ∈ {b_1 | Prod.Lex s r b_1 (b, a)} h : Subrel (Prod.Lex s r) {b_1 | Prod.Lex s r b_1 (b, a)} { val := (b₁, a₁), property := h₁ } { val := (b₂, a₂), property := h₂ } e₁ : ¬b = b₁ e₂ : ¬b = b₂ ⊢ Sum.Lex (Prod.Lex (Subrel s {b_1 | s b_1 b}) r) (Subrel r {b | r b a}) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₁, a₁), property := h₁ }) ((fun a_1 => Subtype.casesOn a_1 fun val h => Prod.casesOn (motive := fun x => x ∈ {b_1 | Prod.Lex s r b_1 (b, a)} → ↑{b_1 | s b_1 b} × α ⊕ ↑{b | r b a}) val (fun b' a' h => if e : b = b' then Sum.inr { val := a', property := (_ : a' ∈ {b | r b a}) } else Sum.inl ({ val := b', property := (_ : b' ∈ {b_1 | s b_1 b}) }, a')) h) { val := (b₂, a₂), property := h₂ }) [PROOFSTEP] simpa only [dif_neg e₁, dif_neg e₂, Prod.lex_def, subrel_val, Subtype.mk_eq_mk, Sum.lex_inl_inl] using h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : IsLimit b H : ∀ (b' : Ordinal.{u_4}), b' < b → a * b' ≤ c ⊢ ¬c < a * b [PROOFSTEP] induction a using inductionOn with | H α r => induction b using inductionOn with | H β s => exact mul_le_of_limit_aux h H [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : IsLimit b H : ∀ (b' : Ordinal.{u_4}), b' < b → a * b' ≤ c ⊢ ¬c < a * b [PROOFSTEP] induction a using inductionOn with | H α r => induction b using inductionOn with | H β s => exact mul_le_of_limit_aux h H [GOAL] case H α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : IsLimit b α : Type u_4 r : α → α → Prop inst✝ : IsWellOrder α r H : ∀ (b' : Ordinal.{u_4}), b' < b → type r * b' ≤ c ⊢ ¬c < type r * b [PROOFSTEP] | H α r => induction b using inductionOn with | H β s => exact mul_le_of_limit_aux h H [GOAL] case H α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : IsLimit b α : Type u_4 r : α → α → Prop inst✝ : IsWellOrder α r H : ∀ (b' : Ordinal.{u_4}), b' < b → type r * b' ≤ c ⊢ ¬c < type r * b [PROOFSTEP] induction b using inductionOn with | H β s => exact mul_le_of_limit_aux h H [GOAL] case H α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : IsLimit b α : Type u_4 r : α → α → Prop inst✝ : IsWellOrder α r H : ∀ (b' : Ordinal.{u_4}), b' < b → type r * b' ≤ c ⊢ ¬c < type r * b [PROOFSTEP] induction b using inductionOn with | H β s => exact mul_le_of_limit_aux h H [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c ⊢ ¬c < type r * type s [PROOFSTEP] | H β s => exact mul_le_of_limit_aux h H [GOAL] case H.H α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s✝ : β✝ → β✝ → Prop t : γ → γ → Prop c : Ordinal.{u_4} α : Type u_4 r : α → α → Prop inst✝¹ : IsWellOrder α r β : Type u_4 s : β → β → Prop inst✝ : IsWellOrder β s h : IsLimit (type s) H : ∀ (b' : Ordinal.{u_4}), b' < type s → type r * b' ≤ c ⊢ ¬c < type r * type s [PROOFSTEP] exact mul_le_of_limit_aux h H [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} h : 0 < a b : Ordinal.{u_4} ⊢ (fun x x_1 => x * x_1) a b < (fun x x_1 => x * x_1) a (succ b) [PROOFSTEP] dsimp only [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} h : 0 < a b : Ordinal.{u_4} ⊢ a * b < a * succ b [PROOFSTEP] rw [mul_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} h : 0 < a b : Ordinal.{u_4} ⊢ a * b < a * b + a [PROOFSTEP] simpa only [add_zero] using (add_lt_add_iff_left (a * b)).2 h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : IsLimit c ⊢ a < b * c ↔ ∃ c', c' < c ∧ a < b * c' [PROOFSTEP] simpa only [not_ball, not_le, bex_def] using not_congr (@mul_le_of_limit b c a h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h₁ : 0 < a h₂ : 0 < b ⊢ 0 < a * b [PROOFSTEP] simpa only [mul_zero] using mul_lt_mul_of_pos_left h₂ h₁ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} ⊢ a ≠ 0 → b ≠ 0 → a * b ≠ 0 [PROOFSTEP] simpa only [Ordinal.pos_iff_ne_zero] using mul_pos [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} l : IsLimit a b0 : 0 < b ⊢ IsLimit (a * b) [PROOFSTEP] rcases zero_or_succ_or_limit b with (rfl | ⟨b, rfl⟩ | lb) [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} l : IsLimit a b0 : 0 < 0 ⊢ IsLimit (a * 0) [PROOFSTEP] exact b0.false.elim [GOAL] case inr.inl.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} l : IsLimit a b : Ordinal.{u_4} b0 : 0 < succ b ⊢ IsLimit (a * succ b) [PROOFSTEP] rw [mul_succ] [GOAL] case inr.inl.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} l : IsLimit a b : Ordinal.{u_4} b0 : 0 < succ b ⊢ IsLimit (a * b + a) [PROOFSTEP] exact add_isLimit _ l [GOAL] case inr.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} l : IsLimit a b0 : 0 < b lb : IsLimit b ⊢ IsLimit (a * b) [PROOFSTEP] exact mul_isLimit l.pos lb [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ 0 • a = a * ↑0 [PROOFSTEP] rw [zero_smul, Nat.cast_zero, mul_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop n : ℕ a : Ordinal.{u_4} ⊢ (n + 1) • a = a * ↑(n + 1) [PROOFSTEP] rw [succ_nsmul', Nat.cast_add, mul_add, Nat.cast_one, mul_one, smul_eq_mul n] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≠ 0 ⊢ succ a ≤ b * succ a [PROOFSTEP] simpa only [succ_zero, one_mul] using mul_le_mul_right' (succ_le_of_lt (Ordinal.pos_iff_ne_zero.2 h)) (succ a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≠ 0 ⊢ a < b * succ (a / b) [PROOFSTEP] rw [div_def a h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≠ 0 ⊢ a < b * succ (sInf {o | a < b * succ o}) [PROOFSTEP] exact csInf_mem (div_nonempty h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≠ 0 ⊢ a < b * (a / b) + b [PROOFSTEP] simpa only [mul_succ] using lt_mul_succ_div a h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} b0 : b ≠ 0 h : a < b * succ c ⊢ a / b ≤ c [PROOFSTEP] rw [div_def a b0] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} b0 : b ≠ 0 h : a < b * succ c ⊢ sInf {o | a < b * succ o} ≤ c [PROOFSTEP] exact csInf_le' h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : c ≠ 0 ⊢ a < b / c ↔ c * succ a ≤ b [PROOFSTEP] rw [← not_le, div_le h, not_lt] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} h : c ≠ 0 ⊢ 0 < b / c ↔ c ≤ b [PROOFSTEP] simp [lt_div h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} c0 : c ≠ 0 ⊢ a ≤ b / c ↔ c * a ≤ b [PROOFSTEP] induction a using limitRecOn with | H₁ => simp only [mul_zero, Ordinal.zero_le] | H₂ _ _ => rw [succ_le_iff, lt_div c0] | H₃ _ h₁ h₂ => revert h₁ h₂ simp (config := { contextual := true }) only [mul_le_of_limit, limit_le, iff_self_iff, forall_true_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} c0 : c ≠ 0 ⊢ a ≤ b / c ↔ c * a ≤ b [PROOFSTEP] induction a using limitRecOn with | H₁ => simp only [mul_zero, Ordinal.zero_le] | H₂ _ _ => rw [succ_le_iff, lt_div c0] | H₃ _ h₁ h₂ => revert h₁ h₂ simp (config := { contextual := true }) only [mul_le_of_limit, limit_le, iff_self_iff, forall_true_iff] [GOAL] case H₁ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 ⊢ 0 ≤ b / c ↔ c * 0 ≤ b [PROOFSTEP] | H₁ => simp only [mul_zero, Ordinal.zero_le] [GOAL] case H₁ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 ⊢ 0 ≤ b / c ↔ c * 0 ≤ b [PROOFSTEP] simp only [mul_zero, Ordinal.zero_le] [GOAL] case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 o✝ : Ordinal.{u_4} a✝ : o✝ ≤ b / c ↔ c * o✝ ≤ b ⊢ succ o✝ ≤ b / c ↔ c * succ o✝ ≤ b [PROOFSTEP] | H₂ _ _ => rw [succ_le_iff, lt_div c0] [GOAL] case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 o✝ : Ordinal.{u_4} a✝ : o✝ ≤ b / c ↔ c * o✝ ≤ b ⊢ succ o✝ ≤ b / c ↔ c * succ o✝ ≤ b [PROOFSTEP] rw [succ_le_iff, lt_div c0] [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 o✝ : Ordinal.{u_4} h₁ : IsLimit o✝ h₂ : ∀ (o' : Ordinal.{u_4}), o' < o✝ → (o' ≤ b / c ↔ c * o' ≤ b) ⊢ o✝ ≤ b / c ↔ c * o✝ ≤ b [PROOFSTEP] | H₃ _ h₁ h₂ => revert h₁ h₂ simp (config := { contextual := true }) only [mul_le_of_limit, limit_le, iff_self_iff, forall_true_iff] [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 o✝ : Ordinal.{u_4} h₁ : IsLimit o✝ h₂ : ∀ (o' : Ordinal.{u_4}), o' < o✝ → (o' ≤ b / c ↔ c * o' ≤ b) ⊢ o✝ ≤ b / c ↔ c * o✝ ≤ b [PROOFSTEP] revert h₁ h₂ [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} c0 : c ≠ 0 o✝ : Ordinal.{u_4} ⊢ IsLimit o✝ → (∀ (o' : Ordinal.{u_4}), o' < o✝ → (o' ≤ b / c ↔ c * o' ≤ b)) → (o✝ ≤ b / c ↔ c * o✝ ≤ b) [PROOFSTEP] simp (config := { contextual := true }) only [mul_le_of_limit, limit_le, iff_self_iff, forall_true_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : a ≤ b * c b0 : b = 0 ⊢ a / b ≤ c [PROOFSTEP] simp only [b0, div_zero, Ordinal.zero_le] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b = 0 ⊢ b * (a / b) ≤ a [PROOFSTEP] simp only [b0, zero_mul, Ordinal.zero_le] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 c : Ordinal.{u_4} ⊢ (b * a + c) / b = a + c / b [PROOFSTEP] apply le_antisymm [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 c : Ordinal.{u_4} ⊢ (b * a + c) / b ≤ a + c / b [PROOFSTEP] apply (div_le b0).2 [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 c : Ordinal.{u_4} ⊢ b * a + c < b * succ (a + c / b) [PROOFSTEP] rw [mul_succ, mul_add, add_assoc, add_lt_add_iff_left] [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 c : Ordinal.{u_4} ⊢ c < b * (c / b) + b [PROOFSTEP] apply lt_mul_div_add _ b0 [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 c : Ordinal.{u_4} ⊢ a + c / b ≤ (b * a + c) / b [PROOFSTEP] rw [le_div b0, mul_add, add_le_add_iff_left] [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 c : Ordinal.{u_4} ⊢ b * (c / b) ≤ c [PROOFSTEP] apply mul_div_le [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : a < b ⊢ a / b = 0 [PROOFSTEP] rw [← Ordinal.le_zero, div_le <| Ordinal.pos_iff_ne_zero.1 <| (Ordinal.zero_le _).trans_lt h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : a < b ⊢ a < b * succ 0 [PROOFSTEP] simpa only [succ_zero, mul_one] using h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : b ≠ 0 ⊢ b * a / b = a [PROOFSTEP] simpa only [add_zero, zero_div] using mul_add_div a b0 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ a / 1 = a [PROOFSTEP] simpa only [one_mul] using mul_div_cancel a Ordinal.one_ne_zero [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} h : a ≠ 0 ⊢ a / a = 1 [PROOFSTEP] simpa only [mul_one] using mul_div_cancel 1 h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} a0 : a = 0 ⊢ a * (b - c) = a * b - a * c [PROOFSTEP] simp only [a0, zero_mul, sub_self] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} a0 : ¬a = 0 d : Ordinal.{u_4} ⊢ a * (b - c) ≤ d ↔ a * b - a * c ≤ d [PROOFSTEP] rw [sub_le, ← le_div a0, sub_le, ← le_div a0, mul_add_div _ a0] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} ⊢ IsLimit (a + b) ↔ IsLimit b ∨ b = 0 ∧ IsLimit a [PROOFSTEP] constructor [GOAL] case mp α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} ⊢ IsLimit (a + b) → IsLimit b ∨ b = 0 ∧ IsLimit a [PROOFSTEP] intro h [GOAL] case mpr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} ⊢ IsLimit b ∨ b = 0 ∧ IsLimit a → IsLimit (a + b) [PROOFSTEP] intro h [GOAL] case mp α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) ⊢ IsLimit b ∨ b = 0 ∧ IsLimit a [PROOFSTEP] by_cases h' : b = 0 [GOAL] case pos α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : b = 0 ⊢ IsLimit b ∨ b = 0 ∧ IsLimit a [PROOFSTEP] rw [h', add_zero] at h [GOAL] case pos α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit a h' : b = 0 ⊢ IsLimit b ∨ b = 0 ∧ IsLimit a [PROOFSTEP] right [GOAL] case pos.h α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit a h' : b = 0 ⊢ b = 0 ∧ IsLimit a [PROOFSTEP] exact ⟨h', h⟩ [GOAL] case neg α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 ⊢ IsLimit b ∨ b = 0 ∧ IsLimit a [PROOFSTEP] left [GOAL] case neg.h α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 ⊢ IsLimit b [PROOFSTEP] rw [← add_sub_cancel a b] [GOAL] case neg.h α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 ⊢ IsLimit (a + b - a) [PROOFSTEP] apply sub_isLimit h [GOAL] case neg.h α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 ⊢ a < a + b [PROOFSTEP] suffices : a + 0 < a + b [GOAL] case neg.h α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 this : a + 0 < a + b ⊢ a < a + b case this α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 ⊢ a + 0 < a + b [PROOFSTEP] simpa only [add_zero] using this [GOAL] case this α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit (a + b) h' : ¬b = 0 ⊢ a + 0 < a + b [PROOFSTEP] rwa [add_lt_add_iff_left, Ordinal.pos_iff_ne_zero] [GOAL] case mpr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit b ∨ b = 0 ∧ IsLimit a ⊢ IsLimit (a + b) [PROOFSTEP] rcases h with (h | ⟨rfl, h⟩) [GOAL] case mpr.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : IsLimit b ⊢ IsLimit (a + b) case mpr.inr.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} h : IsLimit a ⊢ IsLimit (a + 0) [PROOFSTEP] exact add_isLimit a h [GOAL] case mpr.inr.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} h : IsLimit a ⊢ IsLimit (a + 0) [PROOFSTEP] simpa only [add_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a c b : Ordinal.{u_4} x✝ : a ∣ a * b + c d : Ordinal.{u_4} e : a * b + c = a * d ⊢ c = a * (d - b) [PROOFSTEP] rw [mul_sub, ← e, add_sub_cancel] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a c b : Ordinal.{u_4} x✝ : a ∣ c d : Ordinal.{u_4} e : c = a * d ⊢ a ∣ a * b + c [PROOFSTEP] rw [e, ← mul_add] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a c b : Ordinal.{u_4} x✝ : a ∣ c d : Ordinal.{u_4} e : c = a * d ⊢ a ∣ a * (b + d) [PROOFSTEP] apply dvd_mul_right [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} a0 : a ≠ 0 b : Ordinal.{u_4} ⊢ a * (a * b / a) = a * b [PROOFSTEP] rw [mul_div_cancel _ a0] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a x✝ : Ordinal.{u_4} b0 : x✝ ≠ 0 b : Ordinal.{u_4} e : x✝ = a * b ⊢ a ≤ x✝ [PROOFSTEP] subst e [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : a * b ≠ 0 ⊢ a ≤ a * b [PROOFSTEP] simpa only [mul_one] using mul_le_mul_left' (one_le_iff_ne_zero.2 fun h : b = 0 => by simp only [h, mul_zero, Ne] at b0 ) a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} b0 : a * b ≠ 0 h : b = 0 ⊢ False [PROOFSTEP] simp only [h, mul_zero, Ne] at b0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h₁ : a ∣ b h₂ : b ∣ a a0 : a = 0 ⊢ a = b [PROOFSTEP] subst a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b : Ordinal.{u_4} h₁ : 0 ∣ b h₂ : b ∣ 0 ⊢ 0 = b [PROOFSTEP] exact (eq_zero_of_zero_dvd h₁).symm [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h₁ : a ∣ b h₂ : b ∣ a a0 : ¬a = 0 b0 : b = 0 ⊢ a = b [PROOFSTEP] subst b [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} a0 : ¬a = 0 h₁ : a ∣ 0 h₂ : 0 ∣ a ⊢ a = 0 [PROOFSTEP] exact eq_zero_of_zero_dvd h₂ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ a % 0 = a [PROOFSTEP] simp only [mod_def, div_zero, zero_mul, sub_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : a < b ⊢ a % b = a [PROOFSTEP] simp only [mod_def, div_eq_zero_of_lt h, mul_zero, sub_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b : Ordinal.{u_4} ⊢ 0 % b = 0 [PROOFSTEP] simp only [mod_def, zero_div, mul_zero, sub_self] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≠ 0 ⊢ b * (a / b) + a % b < b * (a / b) + b [PROOFSTEP] rw [div_add_mod] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} h : b ≠ 0 ⊢ a < b * (a / b) + b [PROOFSTEP] exact lt_mul_div_add a h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} a0 : a = 0 ⊢ a % a = 0 [PROOFSTEP] simp only [a0, zero_mod] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} a0 : ¬a = 0 ⊢ a % a = 0 [PROOFSTEP] simp only [mod_def, div_self a0, mul_one, sub_self] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a : Ordinal.{u_4} ⊢ a % 1 = 0 [PROOFSTEP] simp only [mod_def, div_one, one_mul, sub_self] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} H : a % b = 0 ⊢ a = b * (a / b) [PROOFSTEP] simpa [H] using (div_add_mod a b).symm [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b : Ordinal.{u_4} H : b ∣ a ⊢ a % b = 0 [PROOFSTEP] rcases H with ⟨c, rfl⟩ [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} ⊢ b * c % b = 0 [PROOFSTEP] rcases eq_or_ne b 0 with (rfl | hb) [GOAL] case intro.inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop c : Ordinal.{u_4} ⊢ 0 * c % 0 = 0 [PROOFSTEP] simp [GOAL] case intro.inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop b c : Ordinal.{u_4} hb : b ≠ 0 ⊢ b * c % b = 0 [PROOFSTEP] simp [mod_def, hb] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop x y z : Ordinal.{u_4} ⊢ (x * y + z) % x = z % x [PROOFSTEP] rcases eq_or_ne x 0 with rfl | hx [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop y z : Ordinal.{u_4} ⊢ (0 * y + z) % 0 = z % 0 [PROOFSTEP] simp [GOAL] case inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop x y z : Ordinal.{u_4} hx : x ≠ 0 ⊢ (x * y + z) % x = z % x [PROOFSTEP] rwa [mod_def, mul_add_div, mul_add, ← sub_sub, add_sub_cancel, mod_def] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop x y : Ordinal.{u_4} ⊢ x * y % x = 0 [PROOFSTEP] simpa using mul_add_mod_self x y 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : c ∣ b ⊢ a % b % c = a % c [PROOFSTEP] nth_rw 2 [← div_add_mod a b] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a b c : Ordinal.{u_4} h : c ∣ b ⊢ a % b % c = (b * (a / b) + a % b) % c [PROOFSTEP] rcases h with ⟨d, rfl⟩ [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop a c d : Ordinal.{u_4} ⊢ a % (c * d) % c = (c * d * (a / (c * d)) + a % (c * d)) % c [PROOFSTEP] rw [mul_assoc, mul_add_mod_self] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α i : ι ⊢ typein r i < o [PROOFSTEP] rw [← ho] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α i : ι ⊢ typein r i < type r [PROOFSTEP] exact typein_lt_type r i [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u_4 r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → α i : ι ⊢ bfamilyOfFamily' r f (typein r i) (_ : typein r i < type r) = f i [PROOFSTEP] simp only [bfamilyOfFamily', enum_typein] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α i : Ordinal.{u} hi : i < o ⊢ i < type r [PROOFSTEP] rwa [ho] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α i : Ordinal.{u} hi : i < o ⊢ familyOfBFamily' r ho f (enum r i (_ : i < type r)) = f i hi [PROOFSTEP] simp only [familyOfBFamily', typein_enum] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{?u.274180} f : (a : Ordinal.{?u.274180}) → a < o → α i : Ordinal.{?u.274180} hi : i < o ⊢ i < type fun x x_1 => x < x_1 [PROOFSTEP] convert hi [GOAL] case h.e'_4 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{?u.274180} f : (a : Ordinal.{?u.274180}) → a < o → α i : Ordinal.{?u.274180} hi : i < o ⊢ (type fun x x_1 => x < x_1) = o [PROOFSTEP] exact type_lt _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α ⊢ range (familyOfBFamily' r ho f) = brange o f [PROOFSTEP] refine' Set.ext fun a => ⟨_, _⟩ [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α a : α ⊢ a ∈ range (familyOfBFamily' r ho f) → a ∈ brange o f [PROOFSTEP] rintro ⟨b, rfl⟩ [GOAL] case refine'_1.intro α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α b : ι ⊢ familyOfBFamily' r ho f b ∈ brange o f [PROOFSTEP] apply mem_brange_self [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α a : α ⊢ a ∈ brange o f → a ∈ range (familyOfBFamily' r ho f) [PROOFSTEP] rintro ⟨i, hi, rfl⟩ [GOAL] case refine'_2.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r o : Ordinal.{u} ho : type r = o f : (a : Ordinal.{u}) → a < o → α i : Ordinal.{u} hi : i < o ⊢ f i hi ∈ range (familyOfBFamily' r ho f) [PROOFSTEP] exact ⟨_, familyOfBFamily'_enum _ _ _ _ _⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → α ⊢ brange (type r) (bfamilyOfFamily' r f) = range f [PROOFSTEP] refine' Set.ext fun a => ⟨_, _⟩ [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → α a : α ⊢ a ∈ brange (type r) (bfamilyOfFamily' r f) → a ∈ range f [PROOFSTEP] rintro ⟨i, hi, rfl⟩ [GOAL] case refine'_1.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → α i : Ordinal.{u} hi : i < type r ⊢ bfamilyOfFamily' r f i hi ∈ range f [PROOFSTEP] apply mem_range_self [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → α a : α ⊢ a ∈ range f → a ∈ brange (type r) (bfamilyOfFamily' r f) [PROOFSTEP] rintro ⟨b, rfl⟩ [GOAL] case refine'_2.intro α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → α b : ι ⊢ f b ∈ brange (type r) (bfamilyOfFamily' r f) [PROOFSTEP] exact ⟨_, _, bfamilyOfFamily'_typein _ _ _⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ho : o ≠ 0 c : α ⊢ (brange o fun x x => c) = {c} [PROOFSTEP] rw [← range_familyOfBFamily] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} ho : o ≠ 0 c : α ⊢ range (familyOfBFamily o fun x x => c) = {c} [PROOFSTEP] exact @Set.range_const _ o.out.α (out_nonempty_iff_ne_zero.2 ho) c [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ ord (iSup (succ ∘ card ∘ f)) ∈ upperBounds (range f) [PROOFSTEP] rintro a ⟨i, rfl⟩ [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} i : ι ⊢ f i ≤ ord (iSup (succ ∘ card ∘ f)) [PROOFSTEP] exact le_of_lt (Cardinal.lt_ord.2 ((lt_succ _).trans_le (le_ciSup (Cardinal.bddAbove_range.{_, v} _) _))) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max u v} ⊢ (∀ (x : Ordinal.{max u v}), x ∈ range f → x ≤ a) ↔ ∀ (i : ι), f i ≤ a [PROOFSTEP] simp [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max u v} ⊢ a < sup f ↔ ∃ i, a < f i [PROOFSTEP] simpa only [not_forall, not_le] using not_congr (@sup_le_iff.{_, v} _ f a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (i : ι), f i ≠ sup f a : Ordinal.{max u v} hao : a < sup f ⊢ succ a < sup f [PROOFSTEP] by_contra' hoa [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (i : ι), f i ≠ sup f a : Ordinal.{max u v} hao : a < sup f hoa : sup f ≤ succ a ⊢ False [PROOFSTEP] exact hao.not_le (sup_le fun i => le_of_lt_succ <| (lt_of_le_of_ne (le_sup _ _) (hf i)).trans_le hoa) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ sup f = 0 ↔ ∀ (i : ι), f i = 0 [PROOFSTEP] refine' ⟨fun h i => _, fun h => le_antisymm (sup_le fun i => Ordinal.le_zero.2 (h i)) (Ordinal.zero_le _)⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = 0 i : ι ⊢ f i = 0 [PROOFSTEP] rw [← Ordinal.le_zero, ← h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = 0 i : ι ⊢ f i ≤ sup f [PROOFSTEP] exact le_sup f i [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f ι : Type u g : ι → Ordinal.{max u v} inst✝ : Nonempty ι a : Ordinal.{max u w} ⊢ f (Ordinal.sup g) ≤ a ↔ Ordinal.sup (f ∘ g) ≤ a [PROOFSTEP] rw [sup_le_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f ι : Type u g : ι → Ordinal.{max u v} inst✝ : Nonempty ι a : Ordinal.{max u w} ⊢ f (Ordinal.sup g) ≤ a ↔ ∀ (i : ι), (f ∘ g) i ≤ a [PROOFSTEP] simp only [comp] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f ι : Type u g : ι → Ordinal.{max u v} inst✝ : Nonempty ι a : Ordinal.{max u w} ⊢ f (Ordinal.sup g) ≤ a ↔ ∀ (i : ι), f (g i) ≤ a [PROOFSTEP] rw [H.le_set' Set.univ Set.univ_nonempty g] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f ι : Type u g : ι → Ordinal.{max u v} inst✝ : Nonempty ι a : Ordinal.{max u w} ⊢ (∀ (a_1 : ι), a_1 ∈ Set.univ → f (g a_1) ≤ a) ↔ ∀ (i : ι), f (g i) ≤ a [PROOFSTEP] simp [sup_le_iff] [GOAL] case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f ι : Type u g : ι → Ordinal.{max u v} inst✝ : Nonempty ι a : Ordinal.{max u w} ⊢ ∀ (o : Ordinal.{max u v}), Ordinal.sup g ≤ o ↔ ∀ (a : ι), a ∈ Set.univ → g a ≤ o [PROOFSTEP] simp [sup_le_iff] [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ sup f = max (sup fun a => f (Sum.inl a)) (sup fun b => f (Sum.inr b)) [PROOFSTEP] apply (sup_le_iff.2 _).antisymm (max_le_iff.2 ⟨_, _⟩) [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ ∀ (i : α ⊕ β), f i ≤ max (sup fun a => f (Sum.inl a)) (sup fun b => f (Sum.inr b)) [PROOFSTEP] rintro (i | i) [GOAL] case inl α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} i : α ⊢ f (Sum.inl i) ≤ max (sup fun a => f (Sum.inl a)) (sup fun b => f (Sum.inr b)) [PROOFSTEP] exact le_max_of_le_left (le_sup _ i) [GOAL] case inr α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} i : β ⊢ f (Sum.inr i) ≤ max (sup fun a => f (Sum.inl a)) (sup fun b => f (Sum.inr b)) [PROOFSTEP] exact le_max_of_le_right (le_sup _ i) [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ (sup fun a => f (Sum.inl a)) ≤ sup f α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ (sup fun b => f (Sum.inr b)) ≤ sup f [PROOFSTEP] all_goals apply sup_le_of_range_subset.{_, max u v, w} rintro i ⟨a, rfl⟩ apply mem_range_self [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ (sup fun a => f (Sum.inl a)) ≤ sup f [PROOFSTEP] apply sup_le_of_range_subset.{_, max u v, w} [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ (range fun a => f (Sum.inl a)) ⊆ range f [PROOFSTEP] rintro i ⟨a, rfl⟩ [GOAL] case intro α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} a : α ⊢ (fun a => f (Sum.inl a)) a ∈ range f [PROOFSTEP] apply mem_range_self [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ (sup fun b => f (Sum.inr b)) ≤ sup f [PROOFSTEP] apply sup_le_of_range_subset.{_, max u v, w} [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} ⊢ (range fun b => f (Sum.inr b)) ⊆ range f [PROOFSTEP] rintro i ⟨a, rfl⟩ [GOAL] case intro α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α : Type u β : Type v f : α ⊕ β → Ordinal.{max (max u v) w} a : β ⊢ (fun b => f (Sum.inr b)) a ∈ range f [PROOFSTEP] apply mem_range_self [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s✝ : β → β → Prop t : γ → γ → Prop s : Set Ordinal.{u} hs : Small.{u, u + 1} ↑s a : Ordinal.{u} ha : a ∈ s ⊢ a ≤ sup fun x => ↑(↑(equivShrink ↑s).symm x) [PROOFSTEP] convert le_sup.{u, u} (fun x => ((@equivShrink s hs).symm x).val) ((@equivShrink s hs) ⟨a, ha⟩) [GOAL] case h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s✝ : β → β → Prop t : γ → γ → Prop s : Set Ordinal.{u} hs : Small.{u, u + 1} ↑s a : Ordinal.{u} ha : a ∈ s ⊢ a = ↑(↑(equivShrink ↑s).symm (↑(equivShrink ↑s) { val := a, property := ha })) [PROOFSTEP] rw [symm_apply_apply] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (Quotient.out o).α → ↑(Iio o) := fun x => { val := typein (fun x x_1 => x < x_1) x, property := (_ : typein (fun x x_1 => x < x_1) x < o) } b : ↑(Iio o) ⊢ ↑b < type fun x x_1 => x < x_1 [PROOFSTEP] rw [type_lt] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (Quotient.out o).α → ↑(Iio o) := fun x => { val := typein (fun x x_1 => x < x_1) x, property := (_ : typein (fun x x_1 => x < x_1) x < o) } b : ↑(Iio o) ⊢ ↑b < o [PROOFSTEP] exact b.prop [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ Small.{u, u + 1} ↑(Iic o) [PROOFSTEP] rw [← Iio_succ] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ Small.{u, u + 1} ↑(Iio (succ o)) [PROOFSTEP] infer_instance [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s✝ : β → β → Prop t : γ → γ → Prop s : Set Cardinal.{u} hs : BddAbove s a : Ordinal.{u} ⊢ ord (sSup s) ≤ a ↔ sSup (ord '' s) ≤ a [PROOFSTEP] rw [csSup_le_iff' (bddAbove_iff_small.2 (@small_image _ _ _ s (Cardinal.bddAbove_iff_small.1 hs))), ord_le, csSup_le_iff' hs] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s✝ : β → β → Prop t : γ → γ → Prop s : Set Cardinal.{u} hs : BddAbove s a : Ordinal.{u} ⊢ (∀ (x : Cardinal.{u}), x ∈ s → x ≤ card a) ↔ ∀ (x : Ordinal.{u}), x ∈ ord '' s → x ≤ a [PROOFSTEP] simp [ord_le] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) ⊢ ord (iSup f) = ⨆ (i : ι), ord (f i) [PROOFSTEP] unfold iSup [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) ⊢ ord (sSup (range f)) = sSup (range fun i => ord (f i)) [PROOFSTEP] convert sSup_ord hf [GOAL] case h.e'_3.h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) ⊢ (range fun i => ord (f i)) = ord '' range f [PROOFSTEP] conv_lhs => change range (ord ∘ f) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) | range fun i => ord (f i) [PROOFSTEP] change range (ord ∘ f) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) | range fun i => ord (f i) [PROOFSTEP] change range (ord ∘ f) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) | range fun i => ord (f i) [PROOFSTEP] change range (ord ∘ f) [GOAL] case h.e'_3.h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Sort u_4 f : ι → Cardinal.{u_5} hf : BddAbove (range f) ⊢ range (ord ∘ f) = ord '' range f [PROOFSTEP] rw [range_comp] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι ⊢ familyOfBFamily' r ho f i ≤ sup (familyOfBFamily' r' ho' f) [PROOFSTEP] cases' typein_surj r' (by rw [ho', ← ho] exact typein_lt_type r i) with j hj [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι ⊢ ?m.307394 < type r' [PROOFSTEP] rw [ho', ← ho] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι ⊢ ?m.307394 < type r α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι ⊢ Ordinal.{u} α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι ⊢ Ordinal.{u} [PROOFSTEP] exact typein_lt_type r i [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι j : ι' hj : typein r' j = typein r i ⊢ familyOfBFamily' r ho f i ≤ sup (familyOfBFamily' r' ho' f) [PROOFSTEP] simp_rw [familyOfBFamily', ← hj] [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : ι j : ι' hj : typein r' j = typein r i ⊢ f (typein r' j) (_ : typein r' j < o) ≤ sup fun i => f (typein r' i) (_ : typein r' i < o) [PROOFSTEP] apply le_sup [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ range (familyOfBFamily' r ho f) = range (familyOfBFamily' r' ho' f) [PROOFSTEP] simp [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ sSup (brange o f) = bsup o f [PROOFSTEP] congr [GOAL] case e_a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ brange o f = range (familyOfBFamily o f) [PROOFSTEP] rw [range_familyOfBFamily] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r : ι → ι → Prop inst✝ : IsWellOrder ι r f : ι → Ordinal.{max u v} ⊢ bsup (type r) (bfamilyOfFamily' r f) = sup f [PROOFSTEP] simp only [← sup_eq_bsup' r, enum_typein, familyOfBFamily', bfamilyOfFamily'] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r r' : ι → ι → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι r' f : ι → Ordinal.{max u v} ⊢ bsup (type r) (bfamilyOfFamily' r f) = bsup (type r') (bfamilyOfFamily' r' f) [PROOFSTEP] rw [bsup_eq_sup', bsup_eq_sup'] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ o₂ : Ordinal.{u} f : (a : Ordinal.{u}) → a < o₁ → Ordinal.{max u v} ho : o₁ = o₂ ⊢ bsup o₁ f = bsup o₂ fun a h => f a (_ : a < o₁) [PROOFSTEP] subst ho [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ : Ordinal.{u} f : (a : Ordinal.{u}) → a < o₁ → Ordinal.{max u v} ⊢ bsup o₁ f = bsup o₁ fun a h => f a (_ : a < o₁) [PROOFSTEP] rfl [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} a : Ordinal.{max u v} h : ∀ (i : (Quotient.out o).α), familyOfBFamily o f i ≤ a i : Ordinal.{u} hi : i < o ⊢ f i hi ≤ a [PROOFSTEP] rw [← familyOfBFamily_enum o f] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} a : Ordinal.{max u v} h : ∀ (i : (Quotient.out o).α), familyOfBFamily o f i ≤ a i : Ordinal.{u} hi : i < o ⊢ familyOfBFamily o f (enum (fun x x_1 => x < x_1) i (_ : i < type fun x x_1 => x < x_1)) ≤ a [PROOFSTEP] exact h _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} a : Ordinal.{max u v} ⊢ a < bsup o f ↔ ∃ i hi, a < f i hi [PROOFSTEP] simpa only [not_forall, not_le] using not_congr (@bsup_le_iff.{_, v} _ f a) [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f o : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r g : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} h : type r ≠ 0 ⊢ f (Ordinal.bsup (type r) g) = Ordinal.bsup (type r) fun a h => f (g a h) [PROOFSTEP] haveI := type_ne_zero_iff_nonempty.1 h [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f o : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r g : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} h : type r ≠ 0 this : Nonempty α ⊢ f (Ordinal.bsup (type r) g) = Ordinal.bsup (type r) fun a h => f (g a h) [PROOFSTEP] rw [← sup_eq_bsup' r, IsNormal.sup.{_, v, w} H, ← sup_eq_bsup' r] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f o : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r g : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} h : type r ≠ 0 this : Nonempty α ⊢ Ordinal.sup (f ∘ familyOfBFamily' r ?ho g) = Ordinal.sup (familyOfBFamily' r ?ho fun a h => f (g a h)) [PROOFSTEP] rfl [GOAL] case ho α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f o : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r g : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} h : type r ≠ 0 this : Nonempty α ⊢ type r = type r [PROOFSTEP] rfl [GOAL] case ho α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{max u v} → Ordinal.{max u w} H : IsNormal f o : Ordinal.{u} α : Type u r : α → α → Prop x✝ : IsWellOrder α r g : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} h : type r ≠ 0 this : Nonempty α ⊢ type r = type r [PROOFSTEP] rfl [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} hf : ∀ {i : Ordinal.{u}} (h : i < o), f i h ≠ bsup o f a : Ordinal.{max u v} ⊢ a < bsup o f → succ a < bsup o f [PROOFSTEP] rw [← sup_eq_bsup] at * [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} hf : ∀ {i : Ordinal.{u}} (h : i < o), f i h ≠ sup (familyOfBFamily o f) a : Ordinal.{max u v} ⊢ a < sup (familyOfBFamily o f) → succ a < sup (familyOfBFamily o f) [PROOFSTEP] exact sup_not_succ_of_ne_sup fun i => hf _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_5 u_4} ⊢ bsup o f = 0 ↔ ∀ (i : Ordinal.{u_4}) (hi : i < o), f i hi = 0 [PROOFSTEP] refine' ⟨fun h i hi => _, fun h => le_antisymm (bsup_le fun i hi => Ordinal.le_zero.2 (h i hi)) (Ordinal.zero_le _)⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_5 u_4} h : bsup o f = 0 i : Ordinal.{u_4} hi : i < o ⊢ f i hi = 0 [PROOFSTEP] rw [← Ordinal.le_zero, ← h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_5 u_4} h : bsup o f = 0 i : Ordinal.{u_4} hi : i < o ⊢ f i hi ≤ bsup o f [PROOFSTEP] exact le_bsup f i hi [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : (a : Ordinal.{u_4}) → a < 1 → Ordinal.{max u_4 u_5} ⊢ bsup 1 f = f 0 (_ : 0 < 1) [PROOFSTEP] simp_rw [← sup_eq_bsup, sup_unique, familyOfBFamily, familyOfBFamily', typein_one_out] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} o' : Ordinal.{v} f : (a : Ordinal.{u}) → a < o → Ordinal.{max (max u v) w} g : (a : Ordinal.{v}) → a < o' → Ordinal.{max (max u v) w} h : brange o f ⊆ brange o' g i : Ordinal.{u} hi : i < o ⊢ f i hi ≤ bsup o' g [PROOFSTEP] obtain ⟨j, hj, hj'⟩ := h ⟨i, hi, rfl⟩ [GOAL] case intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} o' : Ordinal.{v} f : (a : Ordinal.{u}) → a < o → Ordinal.{max (max u v) w} g : (a : Ordinal.{v}) → a < o' → Ordinal.{max (max u v) w} h : brange o f ⊆ brange o' g i : Ordinal.{u} hi : i < o j : Ordinal.{v} hj : j < o' hj' : g j hj = f i hi ⊢ f i hi ≤ bsup o' g [PROOFSTEP] rw [← hj'] [GOAL] case intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} o' : Ordinal.{v} f : (a : Ordinal.{u}) → a < o → Ordinal.{max (max u v) w} g : (a : Ordinal.{v}) → a < o' → Ordinal.{max (max u v) w} h : brange o f ⊆ brange o' g i : Ordinal.{u} hi : i < o j : Ordinal.{v} hj : j < o' hj' : g j hj = f i hi ⊢ g j hj ≤ bsup o' g [PROOFSTEP] apply le_bsup [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max v u} ⊢ lsub f ≤ a ↔ ∀ (i : ι), f i < a [PROOFSTEP] convert sup_le_iff.{_, v} (f := succ ∘ f) (a := a) using 2 -- Porting note: `comp_apply` is required. [GOAL] case h.e'_2.h.a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max v u} a✝ : ι ⊢ f a✝ < a ↔ (succ ∘ f) a✝ ≤ a [PROOFSTEP] simp only [comp_apply, succ_le_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max v u} ⊢ a < lsub f ↔ ∃ i, a ≤ f i [PROOFSTEP] simpa only [not_forall, not_lt, not_le] using not_congr (@lsub_le_iff.{_, v} _ f a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ sup f = lsub f ∨ succ (sup f) = lsub f [PROOFSTEP] cases' eq_or_lt_of_le (sup_le_lsub.{_, v} f) with h h [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = lsub f ⊢ sup f = lsub f ∨ succ (sup f) = lsub f [PROOFSTEP] exact Or.inl h [GOAL] case inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f < lsub f ⊢ sup f = lsub f ∨ succ (sup f) = lsub f [PROOFSTEP] exact Or.inr ((succ_le_of_lt h).antisymm (lsub_le_sup_succ f)) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ succ (sup f) ≤ lsub f ↔ ∃ i, f i = sup f [PROOFSTEP] refine' ⟨fun h => _, _⟩ [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : succ (sup f) ≤ lsub f ⊢ ∃ i, f i = sup f [PROOFSTEP] by_contra' hf [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : succ (sup f) ≤ lsub f hf : ∀ (i : ι), f i ≠ sup f ⊢ False [PROOFSTEP] exact (succ_le_iff.1 h).ne ((sup_le_lsub f).antisymm (lsub_le (ne_sup_iff_lt_sup.1 hf))) [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ (∃ i, f i = sup f) → succ (sup f) ≤ lsub f [PROOFSTEP] rintro ⟨_, hf⟩ [GOAL] case refine'_2.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} w✝ : ι hf : f w✝ = sup f ⊢ succ (sup f) ≤ lsub f [PROOFSTEP] rw [succ_le_iff, ← hf] [GOAL] case refine'_2.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} w✝ : ι hf : f w✝ = sup f ⊢ f w✝ < lsub f [PROOFSTEP] exact lt_lsub _ _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ sup f = lsub f ↔ ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f [PROOFSTEP] refine' ⟨fun h => _, fun hf => le_antisymm (sup_le_lsub f) (lsub_le fun i => _)⟩ [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = lsub f ⊢ ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f [PROOFSTEP] rw [← h] [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = lsub f ⊢ ∀ (a : Ordinal.{max v u}), a < sup f → succ a < sup f [PROOFSTEP] exact fun a => sup_not_succ_of_ne_sup fun i => (lsub_le_iff.1 (le_of_eq h.symm) i).ne [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι ⊢ f i < sup f [PROOFSTEP] by_contra' hle [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i ⊢ False [PROOFSTEP] have heq := (sup_succ_eq_lsub f).2 ⟨i, le_antisymm (le_sup _ _) hle⟩ [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i heq : succ (sup f) = lsub f ⊢ False [PROOFSTEP] have := hf _ (by rw [← heq] exact lt_succ (sup f)) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i heq : succ (sup f) = lsub f ⊢ ?m.327001 < lsub f [PROOFSTEP] rw [← heq] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i heq : succ (sup f) = lsub f ⊢ ?m.327001 < succ (sup f) α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i heq : succ (sup f) = lsub f ⊢ Ordinal.{max v u} [PROOFSTEP] exact lt_succ (sup f) [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i heq : succ (sup f) = lsub f this : succ (sup f) < lsub f ⊢ False [PROOFSTEP] rw [heq] at this [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} hf : ∀ (a : Ordinal.{max v u}), a < lsub f → succ a < lsub f i : ι hle : sup f ≤ f i heq : succ (sup f) = lsub f this : lsub f < lsub f ⊢ False [PROOFSTEP] exact this.false [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = lsub f i : ι ⊢ f i < sup f [PROOFSTEP] rw [h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : sup f = lsub f i : ι ⊢ f i < lsub f [PROOFSTEP] apply lt_lsub [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u_4 h : IsEmpty ι f : ι → Ordinal.{max u_5 u_4} ⊢ lsub f = 0 [PROOFSTEP] rw [← Ordinal.le_zero, lsub_le_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u_4 h : IsEmpty ι f : ι → Ordinal.{max u_5 u_4} ⊢ ∀ (i : ι), f i < 0 [PROOFSTEP] exact h.elim [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ lsub f = 0 ↔ IsEmpty ι [PROOFSTEP] refine' ⟨fun h => ⟨fun i => _⟩, fun h => @lsub_empty _ h _⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : lsub f = 0 i : ι ⊢ False [PROOFSTEP] have := @lsub_pos.{_, v} _ ⟨i⟩ f [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : lsub f = 0 i : ι this : 0 < lsub f ⊢ False [PROOFSTEP] rw [h] at this [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} h : lsub f = 0 i : ι this : 0 < 0 ⊢ False [PROOFSTEP] exact this.false [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u ι' : Type v f : ι → Ordinal.{max (max u v) w} g : ι' → Ordinal.{max (max u v) w} h : range f ⊆ range g ⊢ range (succ ∘ f) ⊆ range (succ ∘ g) [PROOFSTEP] convert Set.image_subset succ h [GOAL] case h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u ι' : Type v f : ι → Ordinal.{max (max u v) w} g : ι' → Ordinal.{max (max u v) w} h : range f ⊆ range g ⊢ range (succ ∘ f) = succ '' range f [PROOFSTEP] apply Set.range_comp [GOAL] case h.e'_4 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u ι' : Type v f : ι → Ordinal.{max (max u v) w} g : ι' → Ordinal.{max (max u v) w} h : range f ⊆ range g ⊢ range (succ ∘ g) = succ '' range g [PROOFSTEP] apply Set.range_comp [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ o ≤ lsub fun i => typein (fun x x_1 => x < x_1) i [PROOFSTEP] by_contra' h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : (lsub fun i => typein (fun x x_1 => x < x_1) i) < o ⊢ False [PROOFSTEP] conv_rhs at h => rw [← type_lt o] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : (lsub fun i => typein (fun x x_1 => x < x_1) i) < o | o [PROOFSTEP] rw [← type_lt o] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : (lsub fun i => typein (fun x x_1 => x < x_1) i) < o | o [PROOFSTEP] rw [← type_lt o] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : (lsub fun i => typein (fun x x_1 => x < x_1) i) < o | o [PROOFSTEP] rw [← type_lt o] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : (lsub fun i => typein (fun x x_1 => x < x_1) i) < type fun x x_1 => x < x_1 ⊢ False [PROOFSTEP] simpa [typein_enum] using lt_lsub.{u, u} (typein (· < ·)) (enum (· < ·) _ h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ho : ∀ (a : Ordinal.{u}), a < o → succ a < o ⊢ sup (typein fun x x_1 => x < x_1) = o [PROOFSTEP] rw [(sup_eq_lsub_iff_succ.{u, u} (typein (· < ·))).2] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ho : ∀ (a : Ordinal.{u}), a < o → succ a < o ⊢ lsub (typein fun x x_1 => x < x_1) = o [PROOFSTEP] rw [lsub_typein o] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ho : ∀ (a : Ordinal.{u}), a < o → succ a < o ⊢ ∀ (a : Ordinal.{u}), a < lsub (typein fun x x_1 => x < x_1) → succ a < lsub (typein fun x x_1 => x < x_1) [PROOFSTEP] rw [lsub_typein o] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ho : ∀ (a : Ordinal.{u}), a < o → succ a < o ⊢ ∀ (a : Ordinal.{u}), a < o → succ a < o [PROOFSTEP] assumption [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ⊢ sup (typein fun x x_1 => x < x_1) = o [PROOFSTEP] cases' sup_eq_lsub_or_sup_succ_eq_lsub.{u, u} (typein ((· < ·) : (succ o).out.α → (succ o).out.α → Prop)) with h h [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : sup (typein fun x x_1 => x < x_1) = lsub (typein fun x x_1 => x < x_1) ⊢ sup (typein fun x x_1 => x < x_1) = o [PROOFSTEP] rw [sup_eq_lsub_iff_succ] at h [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : ∀ (a : Ordinal.{u}), a < lsub (typein fun x x_1 => x < x_1) → succ a < lsub (typein fun x x_1 => x < x_1) ⊢ sup (typein fun x x_1 => x < x_1) = o [PROOFSTEP] simp only [lsub_typein] at h [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : ∀ (a : Ordinal.{u}), a < succ o → succ a < succ o ⊢ sup (typein fun x x_1 => x < x_1) = o [PROOFSTEP] exact (h o (lt_succ o)).false.elim [GOAL] case inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : succ (sup (typein fun x x_1 => x < x_1)) = lsub (typein fun x x_1 => x < x_1) ⊢ sup (typein fun x x_1 => x < x_1) = o [PROOFSTEP] rw [← succ_eq_succ_iff, h] [GOAL] case inr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} h : succ (sup (typein fun x x_1 => x < x_1)) = lsub (typein fun x x_1 => x < x_1) ⊢ lsub (typein fun x x_1 => x < x_1) = succ o [PROOFSTEP] apply lsub_typein [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι ι' : Type u r : ι → ι → Prop r' : ι' → ι' → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι' r' o : Ordinal.{u} ho : type r = o ho' : type r' = o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ lsub (familyOfBFamily' r ho f) = lsub (familyOfBFamily' r' ho' f) [PROOFSTEP] rw [lsub_eq_blsub', lsub_eq_blsub'] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u r r' : ι → ι → Prop inst✝¹ : IsWellOrder ι r inst✝ : IsWellOrder ι r' f : ι → Ordinal.{max u v} ⊢ blsub (type r) (bfamilyOfFamily' r f) = blsub (type r') (bfamilyOfFamily' r' f) [PROOFSTEP] rw [blsub_eq_lsub', blsub_eq_lsub'] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ o₂ : Ordinal.{u} f : (a : Ordinal.{u}) → a < o₁ → Ordinal.{max u v} ho : o₁ = o₂ ⊢ blsub o₁ f = blsub o₂ fun a h => f a (_ : a < o₁) [PROOFSTEP] subst ho [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ : Ordinal.{u} f : (a : Ordinal.{u}) → a < o₁ → Ordinal.{max u v} ⊢ blsub o₁ f = blsub o₁ fun a h => f a (_ : a < o₁) [PROOFSTEP] rfl [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} a : Ordinal.{max u v} ⊢ blsub o f ≤ a ↔ ∀ (i : Ordinal.{u}) (h : i < o), f i h < a [PROOFSTEP] convert bsup_le_iff.{_, v} (f := fun a ha => succ (f a ha)) (a := a) using 2 [GOAL] case h.e'_2.h.a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} a : Ordinal.{max u v} a✝ : Ordinal.{u} ⊢ (∀ (h : a✝ < o), f a✝ h < a) ↔ ∀ (h : a✝ < o), succ (f a✝ h) ≤ a [PROOFSTEP] simp_rw [succ_le_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (b : Ordinal.{u}) → b < o → Ordinal.{max u v} a : Ordinal.{max u v} ⊢ a < blsub o f ↔ ∃ i hi, a ≤ f i hi [PROOFSTEP] simpa only [not_forall, not_lt, not_le] using not_congr (@blsub_le_iff.{_, v} _ f a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ bsup o f = blsub o f ∨ succ (bsup o f) = blsub o f [PROOFSTEP] rw [← sup_eq_bsup, ← lsub_eq_blsub] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ sup (familyOfBFamily o f) = lsub (familyOfBFamily o f) ∨ succ (sup (familyOfBFamily o f)) = lsub (familyOfBFamily o f) [PROOFSTEP] exact sup_eq_lsub_or_sup_succ_eq_lsub _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ succ (bsup o f) ≤ blsub o f ↔ ∃ i hi, f i hi = bsup o f [PROOFSTEP] refine' ⟨fun h => _, _⟩ [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} h : succ (bsup o f) ≤ blsub o f ⊢ ∃ i hi, f i hi = bsup o f [PROOFSTEP] by_contra' hf [GOAL] case refine'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} h : succ (bsup o f) ≤ blsub o f hf : ∀ (i : Ordinal.{u}) (hi : i < o), f i hi ≠ bsup o f ⊢ False [PROOFSTEP] exact ne_of_lt (succ_le_iff.1 h) (le_antisymm (bsup_le_blsub f) (blsub_le (lt_bsup_of_ne_bsup.1 hf))) [GOAL] case refine'_2 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ (∃ i hi, f i hi = bsup o f) → succ (bsup o f) ≤ blsub o f [PROOFSTEP] rintro ⟨_, _, hf⟩ [GOAL] case refine'_2.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} w✝¹ : Ordinal.{u} w✝ : w✝¹ < o hf : f w✝¹ w✝ = bsup o f ⊢ succ (bsup o f) ≤ blsub o f [PROOFSTEP] rw [succ_le_iff, ← hf] [GOAL] case refine'_2.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} w✝¹ : Ordinal.{u} w✝ : w✝¹ < o hf : f w✝¹ w✝ = bsup o f ⊢ f w✝¹ w✝ < blsub o f [PROOFSTEP] exact lt_blsub _ _ _ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ bsup o f = blsub o f ↔ ∀ (a : Ordinal.{max u v}), a < blsub o f → succ a < blsub o f [PROOFSTEP] rw [← sup_eq_bsup, ← lsub_eq_blsub] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} ⊢ sup (familyOfBFamily o f) = lsub (familyOfBFamily o f) ↔ ∀ (a : Ordinal.{max u v}), a < lsub (familyOfBFamily o f) → succ a < lsub (familyOfBFamily o f) [PROOFSTEP] apply sup_eq_lsub_iff_succ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} h : bsup o f = blsub o f i : Ordinal.{u} ⊢ ∀ (hi : i < o), f i hi < bsup o f [PROOFSTEP] rw [h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} h : bsup o f = blsub o f i : Ordinal.{u} ⊢ ∀ (hi : i < o), f i hi < blsub o f [PROOFSTEP] apply lt_blsub [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ho : IsLimit o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} hf : ∀ (a : Ordinal.{u}) (ha : a < o), f a ha < f (succ a) (_ : succ a < o) ⊢ bsup o f = blsub o f [PROOFSTEP] rw [bsup_eq_blsub_iff_lt_bsup] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} ho : IsLimit o f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} hf : ∀ (a : Ordinal.{u}) (ha : a < o), f a ha < f (succ a) (_ : succ a < o) ⊢ ∀ (i : Ordinal.{u}) (hi : i < o), f i hi < bsup o f [PROOFSTEP] exact fun i hi => (hf i hi).trans_le (le_bsup f _ _) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_5 u_4} ⊢ blsub o f = 0 ↔ o = 0 [PROOFSTEP] rw [← lsub_eq_blsub, lsub_eq_zero_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_5 u_4} ⊢ IsEmpty (Quotient.out o).α ↔ o = 0 [PROOFSTEP] exact out_empty_iff_eq_zero [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : (a : Ordinal.{u_4}) → a < 0 → Ordinal.{max u_4 u_5} ⊢ blsub 0 f = 0 [PROOFSTEP] rw [blsub_eq_zero_iff] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop α : Type u r : α → α → Prop inst✝ : IsWellOrder α r f : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} o : Ordinal.{max u v} ⊢ blsub (type r) f ≤ o ↔ (lsub fun a => f (typein r a) (_ : typein r a < type r)) ≤ o [PROOFSTEP] rw [blsub_le_iff, lsub_le_iff] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop α : Type u r : α → α → Prop inst✝ : IsWellOrder α r f : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} o : Ordinal.{max u v} ⊢ (∀ (i : Ordinal.{u}) (h : i < type r), f i h < o) ↔ ∀ (i : α), f (typein r i) (_ : typein r i < type r) < o [PROOFSTEP] exact ⟨fun H b => H _ _, fun H i h => by simpa only [typein_enum] using H (enum r i h)⟩ [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 r✝ : α✝ → α✝ → Prop s : β → β → Prop t : γ → γ → Prop α : Type u r : α → α → Prop inst✝ : IsWellOrder α r f : (a : Ordinal.{u}) → a < type r → Ordinal.{max u v} o : Ordinal.{max u v} H : ∀ (i : α), f (typein r i) (_ : typein r i < type r) < o i : Ordinal.{u} h : i < type r ⊢ f i h < o [PROOFSTEP] simpa only [typein_enum] using H (enum r i h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} o' : Ordinal.{v} f : (a : Ordinal.{u}) → a < o → Ordinal.{max (max u v) w} g : (a : Ordinal.{v}) → a < o' → Ordinal.{max (max u v) w} h : brange o f ⊆ brange o' g a : Ordinal.{max (max u v) w} x✝ : a ∈ brange o fun a ha => succ (f a ha) b : Ordinal.{u} hb : b < o hb' : (fun a ha => succ (f a ha)) b hb = a ⊢ a ∈ brange o' fun a ha => succ (g a ha) [PROOFSTEP] obtain ⟨c, hc, hc'⟩ := h ⟨b, hb, rfl⟩ [GOAL] case intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} o' : Ordinal.{v} f : (a : Ordinal.{u}) → a < o → Ordinal.{max (max u v) w} g : (a : Ordinal.{v}) → a < o' → Ordinal.{max (max u v) w} h : brange o f ⊆ brange o' g a : Ordinal.{max (max u v) w} x✝ : a ∈ brange o fun a ha => succ (f a ha) b : Ordinal.{u} hb : b < o hb' : (fun a ha => succ (f a ha)) b hb = a c : Ordinal.{v} hc : c < o' hc' : g c hc = f b hb ⊢ a ∈ brange o' fun a ha => succ (g a ha) [PROOFSTEP] simp_rw [← hc'] at hb' [GOAL] case intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} o' : Ordinal.{v} f : (a : Ordinal.{u}) → a < o → Ordinal.{max (max u v) w} g : (a : Ordinal.{v}) → a < o' → Ordinal.{max (max u v) w} h : brange o f ⊆ brange o' g a : Ordinal.{max (max u v) w} x✝ : a ∈ brange o fun a ha => succ (f a ha) b : Ordinal.{u} hb : b < o c : Ordinal.{v} hc : c < o' hc' : g c hc = f b hb hb' : succ (g c hc) = a ⊢ a ∈ brange o' fun a ha => succ (g a ha) [PROOFSTEP] exact ⟨c, hc, hb'⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o a : Ordinal.{max u v} ha : a < o' ⊢ g a ha < o [PROOFSTEP] rw [← hg] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o a : Ordinal.{max u v} ha : a < o' ⊢ g a ha < blsub o' g [PROOFSTEP] apply lt_blsub [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o ⊢ (bsup o' fun a ha => f (g a ha) (_ : g a ha < o)) = bsup o f [PROOFSTEP] apply le_antisymm [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o ⊢ (bsup o' fun a ha => f (g a ha) (_ : g a ha < o)) ≤ bsup o f [PROOFSTEP] refine' bsup_le fun i hi => _ [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o ⊢ bsup o f ≤ bsup o' fun a ha => f (g a ha) (_ : g a ha < o) [PROOFSTEP] refine' bsup_le fun i hi => _ [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o i : Ordinal.{max u v} hi : i < o' ⊢ f (g i hi) (_ : g i hi < o) ≤ bsup o f [PROOFSTEP] apply le_bsup [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o i : Ordinal.{max u v} hi : i < o ⊢ f i hi ≤ bsup o' fun a ha => f (g a ha) (_ : g a ha < o) [PROOFSTEP] rw [← hg, lt_blsub_iff] at hi [GOAL] case a α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o i : Ordinal.{max u v} hi✝ : i < o hi : ∃ i_1 hi, i ≤ g i_1 hi ⊢ f i hi✝ ≤ bsup o' fun a ha => f (g a ha) (_ : g a ha < o) [PROOFSTEP] rcases hi with ⟨j, hj, hj'⟩ [GOAL] case a.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o i : Ordinal.{max u v} hi : i < o j : Ordinal.{max u v} hj : j < o' hj' : i ≤ g j hj ⊢ f i hi ≤ bsup o' fun a ha => f (g a ha) (_ : g a ha < o) [PROOFSTEP] exact (hf _ _ hj').trans (le_bsup _ _ _) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o a : Ordinal.{max u v} ha : a < o' ⊢ g a ha < o [PROOFSTEP] rw [← hg] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{max u v} f : (a : Ordinal.{max u v}) → a < o → Ordinal.{max u v w} hf : ∀ {i j : Ordinal.{max u v}} (hi : i < o) (hj : j < o), i ≤ j → f i hi ≤ f j hj g : (a : Ordinal.{max u v}) → a < o' → Ordinal.{max u v} hg : blsub o' g = o a : Ordinal.{max u v} ha : a < o' ⊢ g a ha < blsub o' g [PROOFSTEP] apply lt_blsub [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} H : IsNormal f o : Ordinal.{u} h : IsLimit o ⊢ (Ordinal.bsup o fun x x_1 => f x) = f o [PROOFSTEP] rw [← IsNormal.bsup.{u, u, v} H (fun x _ => x) h.1, bsup_id_limit h.2] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} H : IsNormal f o : Ordinal.{u} h : IsLimit o ⊢ (blsub o fun x x_1 => f x) = f o [PROOFSTEP] rw [← IsNormal.bsup_eq.{u, v} H h, bsup_eq_blsub_of_lt_succ_limit h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} H : IsNormal f o : Ordinal.{u} h : IsLimit o ⊢ ∀ (a : Ordinal.{u}), a < o → f a < f (succ a) [PROOFSTEP] exact fun a _ => H.1 a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} x✝ : (∀ (a : Ordinal.{u}), f a < f (succ a)) ∧ ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o h₁ : ∀ (a : Ordinal.{u}), f a < f (succ a) h₂ : ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o o : Ordinal.{u} ho : IsLimit o a : Ordinal.{max u v} ⊢ f o ≤ a ↔ ∀ (b : Ordinal.{u}), b < o → f b ≤ a [PROOFSTEP] rw [← h₂ o ho] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} x✝ : (∀ (a : Ordinal.{u}), f a < f (succ a)) ∧ ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o h₁ : ∀ (a : Ordinal.{u}), f a < f (succ a) h₂ : ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o o : Ordinal.{u} ho : IsLimit o a : Ordinal.{max u v} ⊢ (bsup o fun x x_1 => f x) ≤ a ↔ ∀ (b : Ordinal.{u}), b < o → f b ≤ a [PROOFSTEP] exact bsup_le_iff [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} ⊢ IsNormal f ↔ (∀ (a : Ordinal.{u}), f a < f (succ a)) ∧ ∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o [PROOFSTEP] rw [isNormal_iff_lt_succ_and_bsup_eq.{u, v}, and_congr_right_iff] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} ⊢ (∀ (a : Ordinal.{u}), f a < f (succ a)) → ((∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o) ↔ ∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o) [PROOFSTEP] intro h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) ⊢ (∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o) ↔ ∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o [PROOFSTEP] constructor [GOAL] case mp α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) ⊢ (∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o) → ∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o [PROOFSTEP] intro H o ho [GOAL] case mpr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) ⊢ (∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o) → ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o [PROOFSTEP] intro H o ho [GOAL] case mp α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) H : ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o o : Ordinal.{u} ho : IsLimit o ⊢ (blsub o fun x x_1 => f x) = f o [PROOFSTEP] have := H o ho [GOAL] case mpr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) H : ∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o o : Ordinal.{u} ho : IsLimit o ⊢ (bsup o fun x x_1 => f x) = f o [PROOFSTEP] have := H o ho [GOAL] case mp α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) H : ∀ (o : Ordinal.{u}), IsLimit o → (bsup o fun x x_1 => f x) = f o o : Ordinal.{u} ho : IsLimit o this : (bsup o fun x x_1 => f x) = f o ⊢ (blsub o fun x x_1 => f x) = f o [PROOFSTEP] rwa [← bsup_eq_blsub_of_lt_succ_limit ho fun a _ => h a] at * [GOAL] case mpr α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f : Ordinal.{u} → Ordinal.{max u v} h : ∀ (a : Ordinal.{u}), f a < f (succ a) H : ∀ (o : Ordinal.{u}), IsLimit o → (blsub o fun x x_1 => f x) = f o o : Ordinal.{u} ho : IsLimit o this : (blsub o fun x x_1 => f x) = f o ⊢ (bsup o fun x x_1 => f x) = f o [PROOFSTEP] rwa [← bsup_eq_blsub_of_lt_succ_limit ho fun a _ => h a] at * [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g h : f = g ⊢ f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) [PROOFSTEP] simp [h] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) a : Ordinal.{u} ⊢ f a = g a [PROOFSTEP] induction' a using limitRecOn with _ _ _ ho H [GOAL] case H₁ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) ⊢ f 0 = g 0 case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} a✝ : f o✝ = g o✝ ⊢ f (succ o✝) = g (succ o✝) case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} ho : IsLimit o✝ H : ∀ (o' : Ordinal.{u}), o' < o✝ → f o' = g o' ⊢ f o✝ = g o✝ [PROOFSTEP] any_goals solve_by_elim [GOAL] case H₁ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) ⊢ f 0 = g 0 [PROOFSTEP] solve_by_elim [GOAL] case H₂ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} a✝ : f o✝ = g o✝ ⊢ f (succ o✝) = g (succ o✝) [PROOFSTEP] solve_by_elim [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} ho : IsLimit o✝ H : ∀ (o' : Ordinal.{u}), o' < o✝ → f o' = g o' ⊢ f o✝ = g o✝ [PROOFSTEP] solve_by_elim [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} ho : IsLimit o✝ H : ∀ (o' : Ordinal.{u}), o' < o✝ → f o' = g o' ⊢ f o✝ = g o✝ [PROOFSTEP] rw [← IsNormal.bsup_eq.{u, u} hf ho, ← IsNormal.bsup_eq.{u, u} hg ho] [GOAL] case H₃ α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} ho : IsLimit o✝ H : ∀ (o' : Ordinal.{u}), o' < o✝ → f o' = g o' ⊢ (Ordinal.bsup o✝ fun x x_1 => f x) = Ordinal.bsup o✝ fun x x_1 => g x [PROOFSTEP] congr [GOAL] case H₃.e_f α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} ho : IsLimit o✝ H : ∀ (o' : Ordinal.{u}), o' < o✝ → f o' = g o' ⊢ (fun x x_1 => f x) = fun x x_1 => g x [PROOFSTEP] ext b hb [GOAL] case H₃.e_f.h.h α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop f g : Ordinal.{u} → Ordinal.{u} hf : IsNormal f hg : IsNormal g x✝ : f 0 = g 0 ∧ ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) h₁ : f 0 = g 0 h₂ : ∀ (a : Ordinal.{u}), f a = g a → f (succ a) = g (succ a) o✝ : Ordinal.{u} ho : IsLimit o✝ H : ∀ (o' : Ordinal.{u}), o' < o✝ → f o' = g o' b : Ordinal.{u} hb : b < o✝ ⊢ f b = g b [PROOFSTEP] exact H b hb [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ : Ordinal.{u_4} o₂ : Ordinal.{u_5} op : {a : Ordinal.{u_4}} → a < o₁ → {b : Ordinal.{u_5}} → b < o₂ → Ordinal.{max (max u_4 u_5) u_6} a : Ordinal.{u_4} b : Ordinal.{u_5} ha : a < o₁ hb : b < o₂ ⊢ op ha hb < blsub₂ o₁ o₂ fun {a} => op [PROOFSTEP] convert lt_lsub _ (Prod.mk (enum (· < ·) a (by rwa [type_lt])) (enum (· < ·) b (by rwa [type_lt]))) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ : Ordinal.{u_4} o₂ : Ordinal.{u_5} op : {a : Ordinal.{u_4}} → a < o₁ → {b : Ordinal.{u_5}} → b < o₂ → Ordinal.{max (max u_4 u_5) u_6} a : Ordinal.{u_4} b : Ordinal.{u_5} ha : a < o₁ hb : b < o₂ ⊢ a < type fun x x_1 => x < x_1 [PROOFSTEP] rwa [type_lt] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ : Ordinal.{u_4} o₂ : Ordinal.{u_5} op : {a : Ordinal.{u_4}} → a < o₁ → {b : Ordinal.{u_5}} → b < o₂ → Ordinal.{max (max u_4 u_5) u_6} a : Ordinal.{u_4} b : Ordinal.{u_5} ha : a < o₁ hb : b < o₂ ⊢ b < type fun x x_1 => x < x_1 [PROOFSTEP] rwa [type_lt] [GOAL] case h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o₁ : Ordinal.{u_4} o₂ : Ordinal.{u_5} op : {a : Ordinal.{u_4}} → a < o₁ → {b : Ordinal.{u_5}} → b < o₂ → Ordinal.{max (max u_4 u_5) u_6} a : Ordinal.{u_4} b : Ordinal.{u_5} ha : a < o₁ hb : b < o₂ ⊢ op ha hb = (fun {a} => op) (_ : typein (fun x x_1 => x < x_1) (enum (fun x x_1 => x < x_1) a (_ : a < type fun x x_1 => x < x_1), enum (fun x x_1 => x < x_1) b (_ : b < type fun x x_1 => x < x_1)).fst < o₁) (_ : typein (fun x x_1 => x < x_1) (enum (fun x x_1 => x < x_1) a (_ : a < type fun x x_1 => x < x_1), enum (fun x x_1 => x < x_1) b (_ : b < type fun x x_1 => x < x_1)).snd < o₂) [PROOFSTEP] simp only [typein_enum] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max u v} H : ∀ (b : Ordinal.{max u v}), b < a → ∃ i, f i = b ⊢ a ≤ mex f [PROOFSTEP] by_contra' h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} a : Ordinal.{max u v} H : ∀ (b : Ordinal.{max u v}), b < a → ∃ i, f i = b h : mex f < a ⊢ False [PROOFSTEP] exact mex_not_mem_range f (H _ h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{max u v} ⊢ ∀ (i : ι), f i ≠ mex f [PROOFSTEP] simpa using mex_not_mem_range.{_, v} f [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u_4 f : ι → Ordinal.{max u_5 u_4} a : Ordinal.{max u_5 u_4} ha : ∀ (i : ι), f i ≠ a ⊢ a ∈ (range f)ᶜ [PROOFSTEP] simp [ha] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u_4 f : ι → Ordinal.{max u_5 u_4} a : Ordinal.{max u_4 u_5} ha : a < mex f ⊢ ∃ i, f i = a [PROOFSTEP] by_contra' ha' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u_4 f : ι → Ordinal.{max u_5 u_4} a : Ordinal.{max u_4 u_5} ha : a < mex f ha' : ∀ (i : ι), f i ≠ a ⊢ False [PROOFSTEP] exact ha.not_le (mex_le_of_ne ha') [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u f : α → Ordinal.{max u v} g : β → Ordinal.{max u v} h : range f ⊆ range g ⊢ mex f ≤ mex g [PROOFSTEP] refine' mex_le_of_ne fun i hi => _ [GOAL] α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u f : α → Ordinal.{max u v} g : β → Ordinal.{max u v} h : range f ⊆ range g i : α hi : f i = mex g ⊢ False [PROOFSTEP] cases' h ⟨i, rfl⟩ with j hj [GOAL] case intro α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u f : α → Ordinal.{max u v} g : β → Ordinal.{max u v} h : range f ⊆ range g i : α hi : f i = mex g j : β hj : g j = f i ⊢ False [PROOFSTEP] rw [← hj] at hi [GOAL] case intro α✝ : Type u_1 β✝ : Type u_2 γ : Type u_3 r : α✝ → α✝ → Prop s : β✝ → β✝ → Prop t : γ → γ → Prop α β : Type u f : α → Ordinal.{max u v} g : β → Ordinal.{max u v} h : range f ⊆ range g i : α j : β hi : g j = mex g hj : g j = f i ⊢ False [PROOFSTEP] exact ne_mex g j hi [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} ⊢ mex f < ord (succ #ι) [PROOFSTEP] by_contra' h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f ⊢ False [PROOFSTEP] apply (lt_succ #ι).not_le [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f ⊢ succ #ι ≤ #ι [PROOFSTEP] have H := fun a => exists_of_lt_mex ((typein_lt_self a).trans_le h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a ⊢ succ #ι ≤ #ι [PROOFSTEP] let g : (succ #ι).ord.out.α → ι := fun a => Classical.choose (H a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a g : (Quotient.out (ord (succ #ι))).α → ι := fun a => choose (_ : ∃ i, f i = typein (fun x x_1 => x < x_1) a) ⊢ succ #ι ≤ #ι [PROOFSTEP] have hg : Injective g := fun a b h' => by have Hf : ∀ x, f (g x) = typein ((· < ·) : (succ #ι).ord.out.α → (succ #ι).ord.out.α → Prop) x := fun a => Classical.choose_spec (H a) apply_fun f at h' rwa [Hf, Hf, typein_inj] at h' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a g : (Quotient.out (ord (succ #ι))).α → ι := fun a => choose (_ : ∃ i, f i = typein (fun x x_1 => x < x_1) a) a b : (Quotient.out (ord (succ #ι))).α h' : g a = g b ⊢ a = b [PROOFSTEP] have Hf : ∀ x, f (g x) = typein ((· < ·) : (succ #ι).ord.out.α → (succ #ι).ord.out.α → Prop) x := fun a => Classical.choose_spec (H a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a g : (Quotient.out (ord (succ #ι))).α → ι := fun a => choose (_ : ∃ i, f i = typein (fun x x_1 => x < x_1) a) a b : (Quotient.out (ord (succ #ι))).α h' : g a = g b Hf : ∀ (x : (Quotient.out (ord (succ #ι))).α), f (g x) = typein (fun x x_1 => x < x_1) x ⊢ a = b [PROOFSTEP] apply_fun f at h' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a g : (Quotient.out (ord (succ #ι))).α → ι := fun a => choose (_ : ∃ i, f i = typein (fun x x_1 => x < x_1) a) a b : (Quotient.out (ord (succ #ι))).α Hf : ∀ (x : (Quotient.out (ord (succ #ι))).α), f (g x) = typein (fun x x_1 => x < x_1) x h' : f (g a) = f (g b) ⊢ a = b [PROOFSTEP] rwa [Hf, Hf, typein_inj] at h' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a g : (Quotient.out (ord (succ #ι))).α → ι := fun a => choose (_ : ∃ i, f i = typein (fun x x_1 => x < x_1) a) hg : Injective g ⊢ succ #ι ≤ #ι [PROOFSTEP] convert Cardinal.mk_le_of_injective hg [GOAL] case h.e'_3 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal.{u} h : ord (succ #ι) ≤ mex f H : ∀ (a : (Quotient.out (ord (succ #ι))).α), ∃ i, f i = typein (fun x x_1 => x < x_1) a g : (Quotient.out (ord (succ #ι))).α → ι := fun a => choose (_ : ∃ i, f i = typein (fun x x_1 => x < x_1) a) hg : Injective g ⊢ succ #ι = #(Quotient.out (ord (succ #ι))).α [PROOFSTEP] rw [Cardinal.mk_ord_out (succ #ι)] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_4 u_5} ⊢ ¬bmex o f ∈ brange o f [PROOFSTEP] rw [← range_familyOfBFamily] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_4 u_5} ⊢ ¬bmex o f ∈ range (familyOfBFamily o f) [PROOFSTEP] apply mex_not_mem_range [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_4 u_5} a : Ordinal.{max u_4 u_5} H : ∀ (b : Ordinal.{max u_4 u_5}), b < a → ∃ i hi, f i hi = b ⊢ a ≤ bmex o f [PROOFSTEP] by_contra' h [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_4 u_5} a : Ordinal.{max u_4 u_5} H : ∀ (b : Ordinal.{max u_4 u_5}), b < a → ∃ i hi, f i hi = b h : bmex o f < a ⊢ False [PROOFSTEP] exact bmex_not_mem_brange f (H _ h) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : Ordinal.{u} hi : i < o ⊢ f i hi ≠ bmex o f [PROOFSTEP] convert (config := { transparency := .default }) ne_mex.{_, v} (familyOfBFamily o f) (enum (· < ·) i (by rwa [type_lt])) using 2 -- Porting note: `familyOfBFamily_enum` → `typein_enum` [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : Ordinal.{u} hi : i < o ⊢ i < type fun x x_1 => x < x_1 [PROOFSTEP] rwa [type_lt] [GOAL] case h.e'_2.h.e'_1 α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} i : Ordinal.{u} hi : i < o ⊢ i = typein (fun x x_1 => x < x_1) (enum (fun x x_1 => x < x_1) i (_ : i < type fun x x_1 => x < x_1)) [PROOFSTEP] rw [typein_enum] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_4 u_5} a : Ordinal.{max u_5 u_4} ha : a < bmex o f ⊢ ∃ i hi, f i hi = a [PROOFSTEP] cases' exists_of_lt_mex ha with i hi [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u_4} f : (a : Ordinal.{u_4}) → a < o → Ordinal.{max u_4 u_5} a : Ordinal.{max u_5 u_4} ha : a < bmex o f i : (Quotient.out o).α hi : familyOfBFamily o f i = a ⊢ ∃ i hi, f i hi = a [PROOFSTEP] exact ⟨_, typein_lt_self i, hi⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o o' : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{max u v} g : (a : Ordinal.{u}) → a < o' → Ordinal.{max u v} h : brange o f ⊆ brange o' g ⊢ range (familyOfBFamily o f) ⊆ range (familyOfBFamily o' g) [PROOFSTEP] rwa [range_familyOfBFamily, range_familyOfBFamily] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{u} ⊢ bmex o f < ord (succ (card o)) [PROOFSTEP] rw [← mk_ordinal_out] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop o : Ordinal.{u} f : (a : Ordinal.{u}) → a < o → Ordinal.{u} ⊢ bmex o f < ord (succ #(Quotient.out o).α) [PROOFSTEP] exact mex_lt_ord_succ_mk (familyOfBFamily o f) [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S o : Ordinal.{u} ⊢ enumOrd S o ∈ S ∩ Ici (blsub o fun c x => enumOrd S c) [PROOFSTEP] rw [enumOrd_def'] [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S o : Ordinal.{u} ⊢ sInf (S ∩ Ici (blsub o fun a x => enumOrd S a)) ∈ S ∩ Ici (blsub o fun c x => enumOrd S c) [PROOFSTEP] exact csInf_mem (enumOrd_def'_nonempty hS _) [GOAL] S : Set Ordinal.{u} o : Ordinal.{u} ⊢ enumOrd S o = sInf (S ∩ {b | ∀ (c : Ordinal.{u}), c < o → enumOrd S c < b}) [PROOFSTEP] rw [enumOrd_def'] [GOAL] S : Set Ordinal.{u} o : Ordinal.{u} ⊢ sInf (S ∩ Ici (blsub o fun a x => enumOrd S a)) = sInf (S ∩ {b | ∀ (c : Ordinal.{u}), c < o → enumOrd S c < b}) [PROOFSTEP] congr [GOAL] case e_a.e_a S : Set Ordinal.{u} o : Ordinal.{u} ⊢ Ici (blsub o fun a x => enumOrd S a) = {b | ∀ (c : Ordinal.{u}), c < o → enumOrd S c < b} [PROOFSTEP] ext [GOAL] case e_a.e_a.h S : Set Ordinal.{u} o x✝ : Ordinal.{u} ⊢ x✝ ∈ Ici (blsub o fun a x => enumOrd S a) ↔ x✝ ∈ {b | ∀ (c : Ordinal.{u}), c < o → enumOrd S c < b} [PROOFSTEP] exact ⟨fun h a hao => (lt_blsub.{u, u} _ _ hao).trans_le h, blsub_le⟩ [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o : Ordinal.{u_1} ⊢ enumOrd (range f) o = f o [PROOFSTEP] apply Ordinal.induction o [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o : Ordinal.{u_1} ⊢ ∀ (j : Ordinal.{u_1}), (∀ (k : Ordinal.{u_1}), k < j → enumOrd (range f) k = f k) → enumOrd (range f) j = f j [PROOFSTEP] intro a H [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k ⊢ enumOrd (range f) a = f a [PROOFSTEP] rw [enumOrd_def a] [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k ⊢ sInf (range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b}) = f a [PROOFSTEP] have Hfa : f a ∈ range f ∩ {b | ∀ c, c < a → enumOrd (range f) c < b} := ⟨mem_range_self a, fun b hb => by rw [H b hb] exact hf hb⟩ [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k b : Ordinal.{u_1} hb : b < a ⊢ enumOrd (range f) b < f a [PROOFSTEP] rw [H b hb] [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k b : Ordinal.{u_1} hb : b < a ⊢ f b < f a [PROOFSTEP] exact hf hb [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k Hfa : f a ∈ range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b} ⊢ sInf (range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b}) = f a [PROOFSTEP] refine' (csInf_le' Hfa).antisymm ((le_csInf_iff'' ⟨_, Hfa⟩).2 _) [GOAL] S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k Hfa : f a ∈ range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b} ⊢ ∀ (b : Ordinal.{u_1}), b ∈ range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b} → f a ≤ b [PROOFSTEP] rintro _ ⟨⟨c, rfl⟩, hc : ∀ b < a, enumOrd (range f) b < f c⟩ [GOAL] case intro.intro S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k Hfa : f a ∈ range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b} c : Ordinal.{u_1} hc : ∀ (b : Ordinal.{u_1}), b < a → enumOrd (range f) b < f c ⊢ f a ≤ f c [PROOFSTEP] rw [hf.le_iff_le] [GOAL] case intro.intro S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k Hfa : f a ∈ range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b} c : Ordinal.{u_1} hc : ∀ (b : Ordinal.{u_1}), b < a → enumOrd (range f) b < f c ⊢ a ≤ c [PROOFSTEP] contrapose! hc [GOAL] case intro.intro S : Set Ordinal.{u} f : Ordinal.{u_1} → Ordinal.{u_1} hf : StrictMono f o a : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < a → enumOrd (range f) k = f k Hfa : f a ∈ range f ∩ {b | ∀ (c : Ordinal.{u_1}), c < a → enumOrd (range f) c < b} c : Ordinal.{u_1} hc : c < a ⊢ ∃ b, b < a ∧ f c ≤ enumOrd (range f) b [PROOFSTEP] exact ⟨c, hc, (H c hc).ge⟩ [GOAL] S : Set Ordinal.{u} ⊢ enumOrd Set.univ = id [PROOFSTEP] rw [← range_id] [GOAL] S : Set Ordinal.{u} ⊢ enumOrd (range id) = id [PROOFSTEP] exact enumOrd_range strictMono_id [GOAL] S : Set Ordinal.{u} ⊢ enumOrd S 0 = sInf S [PROOFSTEP] rw [enumOrd_def] [GOAL] S : Set Ordinal.{u} ⊢ sInf (S ∩ {b | ∀ (c : Ordinal.{u}), c < 0 → enumOrd S c < b}) = sInf S [PROOFSTEP] simp [Ordinal.not_lt_zero] [GOAL] S : Set Ordinal.{u} a b : Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ha : a ∈ S hb : enumOrd S b < a ⊢ enumOrd S (succ b) ≤ a [PROOFSTEP] rw [enumOrd_def] [GOAL] S : Set Ordinal.{u} a b : Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ha : a ∈ S hb : enumOrd S b < a ⊢ sInf (S ∩ {b_1 | ∀ (c : Ordinal.{u}), c < succ b → enumOrd S c < b_1}) ≤ a [PROOFSTEP] exact csInf_le' ⟨ha, fun c hc => ((enumOrd_strictMono hS).monotone (le_of_lt_succ hc)).trans_lt hb⟩ [GOAL] S✝ : Set Ordinal.{u} S T : Set Ordinal.{u_1} hS : Unbounded (fun x x_1 => x < x_1) S hST : S ⊆ T a : Ordinal.{u_1} ⊢ enumOrd T a ≤ enumOrd S a [PROOFSTEP] apply Ordinal.induction a [GOAL] S✝ : Set Ordinal.{u} S T : Set Ordinal.{u_1} hS : Unbounded (fun x x_1 => x < x_1) S hST : S ⊆ T a : Ordinal.{u_1} ⊢ ∀ (j : Ordinal.{u_1}), (∀ (k : Ordinal.{u_1}), k < j → enumOrd T k ≤ enumOrd S k) → enumOrd T j ≤ enumOrd S j [PROOFSTEP] intro b H [GOAL] S✝ : Set Ordinal.{u} S T : Set Ordinal.{u_1} hS : Unbounded (fun x x_1 => x < x_1) S hST : S ⊆ T a b : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < b → enumOrd T k ≤ enumOrd S k ⊢ enumOrd T b ≤ enumOrd S b [PROOFSTEP] rw [enumOrd_def] [GOAL] S✝ : Set Ordinal.{u} S T : Set Ordinal.{u_1} hS : Unbounded (fun x x_1 => x < x_1) S hST : S ⊆ T a b : Ordinal.{u_1} H : ∀ (k : Ordinal.{u_1}), k < b → enumOrd T k ≤ enumOrd S k ⊢ sInf (T ∩ {b_1 | ∀ (c : Ordinal.{u_1}), c < b → enumOrd T c < b_1}) ≤ enumOrd S b [PROOFSTEP] exact csInf_le' ⟨hST (enumOrd_mem hS b), fun c h => (H c h).trans_lt (enumOrd_strictMono hS h)⟩ [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S ⊢ enumOrd S (sSup {a | enumOrd S a ≤ s}) = s [PROOFSTEP] apply le_antisymm [GOAL] case a S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S ⊢ enumOrd S (sSup {a | enumOrd S a ≤ s}) ≤ s [PROOFSTEP] rw [enumOrd_def] [GOAL] case a S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S ⊢ sInf (S ∩ {b | ∀ (c : Ordinal.{u}), c < sSup {a | enumOrd S a ≤ s} → enumOrd S c < b}) ≤ s [PROOFSTEP] refine' csInf_le' ⟨hs, fun a ha => _⟩ [GOAL] case a S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S a : Ordinal.{u} ha : a < sSup {a | enumOrd S a ≤ s} ⊢ enumOrd S a < s [PROOFSTEP] have : enumOrd S 0 ≤ s := by rw [enumOrd_zero] exact csInf_le' hs [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S a : Ordinal.{u} ha : a < sSup {a | enumOrd S a ≤ s} ⊢ enumOrd S 0 ≤ s [PROOFSTEP] rw [enumOrd_zero] [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S a : Ordinal.{u} ha : a < sSup {a | enumOrd S a ≤ s} ⊢ sInf S ≤ s [PROOFSTEP] exact csInf_le' hs [GOAL] case a S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S a : Ordinal.{u} ha : a < sSup {a | enumOrd S a ≤ s} this : enumOrd S 0 ≤ s ⊢ enumOrd S a < s [PROOFSTEP] rcases flip exists_lt_of_lt_csSup ha ⟨0, this⟩ with ⟨b, hb, hab⟩ [GOAL] case a.intro.intro S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S a : Ordinal.{u} ha : a < sSup {a | enumOrd S a ≤ s} this : enumOrd S 0 ≤ s b : Ordinal.{u} hb : b ∈ {a | enumOrd S a ≤ s} hab : a < b ⊢ enumOrd S a < s [PROOFSTEP] exact (enumOrd_strictMono hS hab).trans_le hb [GOAL] case a S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S ⊢ s ≤ enumOrd S (sSup {a | enumOrd S a ≤ s}) [PROOFSTEP] by_contra' h [GOAL] case a S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S s : Ordinal.{u} hs : s ∈ S h : enumOrd S (sSup {a | enumOrd S a ≤ s}) < s ⊢ False [PROOFSTEP] exact (le_csSup ⟨s, fun a => (lt_wf.self_le_of_strictMono (enumOrd_strictMono hS) a).trans⟩ (enumOrd_succ_le hS hs h)).not_lt (lt_succ _) [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ⊢ range (enumOrd S) = S [PROOFSTEP] rw [range_eq_iff] [GOAL] S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ⊢ (∀ (a : Ordinal.{u}), enumOrd S a ∈ S) ∧ ∀ (b : Ordinal.{u}), b ∈ S → ∃ a, enumOrd S a = b [PROOFSTEP] exact ⟨enumOrd_mem hS, enumOrd_surjective hS⟩ [GOAL] S : Set Ordinal.{u} f : Ordinal.{u} → Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ⊢ StrictMono f ∧ range f = S ↔ f = enumOrd S [PROOFSTEP] constructor [GOAL] case mp S : Set Ordinal.{u} f : Ordinal.{u} → Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ⊢ StrictMono f ∧ range f = S → f = enumOrd S [PROOFSTEP] rintro ⟨h₁, h₂⟩ [GOAL] case mp.intro S : Set Ordinal.{u} f : Ordinal.{u} → Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S h₁ : StrictMono f h₂ : range f = S ⊢ f = enumOrd S [PROOFSTEP] rwa [← lt_wf.eq_strictMono_iff_eq_range h₁ (enumOrd_strictMono hS), range_enumOrd hS] [GOAL] case mpr S : Set Ordinal.{u} f : Ordinal.{u} → Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ⊢ f = enumOrd S → StrictMono f ∧ range f = S [PROOFSTEP] rintro rfl [GOAL] case mpr S : Set Ordinal.{u} hS : Unbounded (fun x x_1 => x < x_1) S ⊢ StrictMono (enumOrd S) ∧ range (enumOrd S) = S [PROOFSTEP] exact ⟨enumOrd_strictMono hS, range_enumOrd hS⟩ [GOAL] m : ℕ ⊢ 1 + ↑m = ↑(succ m) [PROOFSTEP] rw [← Nat.cast_one, ← Nat.cast_add, add_comm] [GOAL] m : ℕ ⊢ ↑(m + 1) = ↑(succ m) [PROOFSTEP] rfl [GOAL] m : ℕ ⊢ ↑(m * 0) = ↑m * ↑0 [PROOFSTEP] simp [GOAL] m n : ℕ ⊢ ↑(m * (n + 1)) = ↑m * ↑(n + 1) [PROOFSTEP] rw [Nat.mul_succ, Nat.cast_add, nat_cast_mul m n, Nat.cast_succ, mul_add_one] [GOAL] m n : ℕ ⊢ ↑m ≤ ↑n ↔ m ≤ n [PROOFSTEP] rw [← Cardinal.ord_nat, ← Cardinal.ord_nat, Cardinal.ord_le_ord, Cardinal.natCast_le] [GOAL] m n : ℕ ⊢ ↑m < ↑n ↔ m < n [PROOFSTEP] simp only [lt_iff_le_not_le, nat_cast_le] [GOAL] m n : ℕ ⊢ ↑m = ↑n ↔ m = n [PROOFSTEP] simp only [le_antisymm_iff, nat_cast_le] [GOAL] m n : ℕ ⊢ ↑(m - n) = ↑m - ↑n [PROOFSTEP] cases' le_total m n with h h [GOAL] case inl m n : ℕ h : m ≤ n ⊢ ↑(m - n) = ↑m - ↑n [PROOFSTEP] rw [tsub_eq_zero_iff_le.2 h, Ordinal.sub_eq_zero_iff_le.2 (nat_cast_le.2 h)] [GOAL] case inl m n : ℕ h : m ≤ n ⊢ ↑0 = 0 [PROOFSTEP] rfl [GOAL] case inr m n : ℕ h : n ≤ m ⊢ ↑(m - n) = ↑m - ↑n [PROOFSTEP] apply (add_left_cancel n).1 [GOAL] case inr m n : ℕ h : n ≤ m ⊢ ↑n + ↑(m - n) = ↑n + (↑m - ↑n) [PROOFSTEP] rw [← Nat.cast_add, add_tsub_cancel_of_le h, Ordinal.add_sub_cancel_of_le (nat_cast_le.2 h)] [GOAL] m n : ℕ ⊢ ↑(m / n) = ↑m / ↑n [PROOFSTEP] rcases eq_or_ne n 0 with (rfl | hn) [GOAL] case inl m : ℕ ⊢ ↑(m / 0) = ↑m / ↑0 [PROOFSTEP] simp [GOAL] case inr m n : ℕ hn : n ≠ 0 ⊢ ↑(m / n) = ↑m / ↑n [PROOFSTEP] have hn' := nat_cast_ne_zero.2 hn [GOAL] case inr m n : ℕ hn : n ≠ 0 hn' : ↑n ≠ 0 ⊢ ↑(m / n) = ↑m / ↑n [PROOFSTEP] apply le_antisymm [GOAL] case inr.a m n : ℕ hn : n ≠ 0 hn' : ↑n ≠ 0 ⊢ ↑(m / n) ≤ ↑m / ↑n [PROOFSTEP] rw [le_div hn', ← nat_cast_mul, nat_cast_le, mul_comm] [GOAL] case inr.a m n : ℕ hn : n ≠ 0 hn' : ↑n ≠ 0 ⊢ m / n * n ≤ m [PROOFSTEP] apply Nat.div_mul_le_self [GOAL] case inr.a m n : ℕ hn : n ≠ 0 hn' : ↑n ≠ 0 ⊢ ↑m / ↑n ≤ ↑(m / n) [PROOFSTEP] rw [div_le hn', ← add_one_eq_succ, ← Nat.cast_succ, ← nat_cast_mul, nat_cast_lt, mul_comm, ← Nat.div_lt_iff_lt_mul (Nat.pos_of_ne_zero hn)] [GOAL] case inr.a m n : ℕ hn : n ≠ 0 hn' : ↑n ≠ 0 ⊢ m / n < Nat.succ (m / n) [PROOFSTEP] apply Nat.lt_succ_self [GOAL] m n : ℕ ⊢ ↑(m % n) = ↑m % ↑n [PROOFSTEP] rw [← add_left_cancel, div_add_mod, ← nat_cast_div, ← nat_cast_mul, ← Nat.cast_add, Nat.div_add_mod] [GOAL] ⊢ lift ↑0 = ↑0 [PROOFSTEP] simp [GOAL] n : ℕ ⊢ lift ↑(n + 1) = ↑(n + 1) [PROOFSTEP] simp [lift_nat_cast n] [GOAL] o : Ordinal.{u} h : o < ω ⊢ o < ord ℵ₀ [PROOFSTEP] rcases Ordinal.lt_lift_iff.1 h with ⟨o, rfl, h'⟩ [GOAL] case intro.intro o : Ordinal.{0} h' : o < type fun x x_1 => x < x_1 h : Ordinal.lift o < ω ⊢ Ordinal.lift o < ord ℵ₀ [PROOFSTEP] rw [lt_ord, ← lift_card, lift_lt_aleph0, ← typein_enum (· < ·) h'] [GOAL] case intro.intro o : Ordinal.{0} h' : o < type fun x x_1 => x < x_1 h : Ordinal.lift o < ω ⊢ card (typein (fun x x_1 => x < x_1) (enum (fun x x_1 => x < x_1) o h')) < ℵ₀ [PROOFSTEP] exact lt_aleph0_iff_fintype.2 ⟨Set.fintypeLTNat _⟩ [GOAL] c : Cardinal.{u_1} h : ℵ₀ ≤ c ⊢ c + 1 = c [PROOFSTEP] rw [add_comm, ← card_ord c, ← card_one, ← card_add, one_add_of_omega_le] [GOAL] c : Cardinal.{u_1} h : ℵ₀ ≤ c ⊢ ω ≤ ord c [PROOFSTEP] rwa [← ord_aleph0, ord_le_ord] [GOAL] a b c : Ordinal.{u} h : IsLimit c ⊢ a < b + c ↔ ∃ c', c' < c ∧ a < b + c' [PROOFSTEP] have := IsNormal.bsup_eq.{u, u} (add_isNormal b) h [GOAL] a b c : Ordinal.{u} h : IsLimit c this : (bsup c fun x x_1 => (fun x x_2 => x + x_2) b x) = (fun x x_1 => x + x_1) b c ⊢ a < b + c ↔ ∃ c', c' < c ∧ a < b + c' [PROOFSTEP] dsimp only at this [GOAL] a b c : Ordinal.{u} h : IsLimit c this : (bsup c fun x x_1 => b + x) = b + c ⊢ a < b + c ↔ ∃ c', c' < c ∧ a < b + c' [PROOFSTEP] rw [← this, lt_bsup, bex_def] [GOAL] o : Ordinal.{u_1} ⊢ o < ω ↔ ∃ n, o = ↑n [PROOFSTEP] simp_rw [← Cardinal.ord_aleph0, Cardinal.lt_ord, lt_aleph0, card_eq_nat] [GOAL] ⊢ 1 < ω [PROOFSTEP] simpa only [Nat.cast_one] using nat_lt_omega 1 [GOAL] o : Ordinal.{u_1} h : o < ω ⊢ succ o < ω [PROOFSTEP] let ⟨n, e⟩ := lt_omega.1 h [GOAL] o : Ordinal.{u_1} h : o < ω n : ℕ e : o = ↑n ⊢ succ o < ω [PROOFSTEP] rw [e] [GOAL] o : Ordinal.{u_1} h : o < ω n : ℕ e : o = ↑n ⊢ succ ↑n < ω [PROOFSTEP] exact nat_lt_omega (n + 1) [GOAL] o : Ordinal.{u_1} H : ∀ (n : ℕ), ↑n ≤ o a : Ordinal.{u_1} h : a < ω ⊢ a < o [PROOFSTEP] let ⟨n, e⟩ := lt_omega.1 h [GOAL] o : Ordinal.{u_1} H : ∀ (n : ℕ), ↑n ≤ o a : Ordinal.{u_1} h : a < ω n : ℕ e : a = ↑n ⊢ a < o [PROOFSTEP] rw [e, ← succ_le_iff] [GOAL] o : Ordinal.{u_1} H : ∀ (n : ℕ), ↑n ≤ o a : Ordinal.{u_1} h : a < ω n : ℕ e : a = ↑n ⊢ succ ↑n ≤ o [PROOFSTEP] exact H (n + 1) [GOAL] a : Ordinal.{u_1} ⊢ IsLimit a ↔ a ≠ 0 ∧ ω ∣ a [PROOFSTEP] refine' ⟨fun l => ⟨l.1, ⟨a / ω, le_antisymm _ (mul_div_le _ _)⟩⟩, fun h => _⟩ [GOAL] case refine'_1 a : Ordinal.{u_1} l : IsLimit a ⊢ a ≤ ω * (a / ω) [PROOFSTEP] refine' (limit_le l).2 fun x hx => le_of_lt _ [GOAL] case refine'_1 a : Ordinal.{u_1} l : IsLimit a x : Ordinal.{u_1} hx : x < a ⊢ x < ω * (a / ω) [PROOFSTEP] rw [← div_lt omega_ne_zero, ← succ_le_iff, le_div omega_ne_zero, mul_succ, add_le_of_limit omega_isLimit] [GOAL] case refine'_1 a : Ordinal.{u_1} l : IsLimit a x : Ordinal.{u_1} hx : x < a ⊢ ∀ (b' : Ordinal.{u_1}), b' < ω → ω * (x / ω) + b' ≤ a [PROOFSTEP] intro b hb [GOAL] case refine'_1 a : Ordinal.{u_1} l : IsLimit a x : Ordinal.{u_1} hx : x < a b : Ordinal.{u_1} hb : b < ω ⊢ ω * (x / ω) + b ≤ a [PROOFSTEP] rcases lt_omega.1 hb with ⟨n, rfl⟩ [GOAL] case refine'_1.intro a : Ordinal.{u_1} l : IsLimit a x : Ordinal.{u_1} hx : x < a n : ℕ hb : ↑n < ω ⊢ ω * (x / ω) + ↑n ≤ a [PROOFSTEP] exact (add_le_add_right (mul_div_le _ _) _).trans (lt_sub.1 <| nat_lt_limit (sub_isLimit l hx) _).le [GOAL] case refine'_2 a : Ordinal.{u_1} h : a ≠ 0 ∧ ω ∣ a ⊢ IsLimit a [PROOFSTEP] rcases h with ⟨a0, b, rfl⟩ [GOAL] case refine'_2.intro.intro b : Ordinal.{u_1} a0 : ω * b ≠ 0 ⊢ IsLimit (ω * b) [PROOFSTEP] refine' mul_isLimit_left omega_isLimit (Ordinal.pos_iff_ne_zero.2 <| mt _ a0) [GOAL] case refine'_2.intro.intro b : Ordinal.{u_1} a0 : ω * b ≠ 0 ⊢ b = 0 → ω * b = 0 [PROOFSTEP] intro e [GOAL] case refine'_2.intro.intro b : Ordinal.{u_1} a0 : ω * b ≠ 0 e : b = 0 ⊢ ω * b = 0 [PROOFSTEP] simp only [e, mul_zero] [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ (a + b) * c' ≤ a * c [PROOFSTEP] apply (mul_le_mul_left' (le_succ c') _).trans [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ (a + b) * succ c' ≤ a * c [PROOFSTEP] rw [IH _ h] [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ a * succ c' + b ≤ a * c [PROOFSTEP] apply (add_le_add_left _ _).trans [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ a * succ c' + ?m.440069 ≤ a * c [PROOFSTEP] rw [← mul_succ] [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ a * succ (succ c') ≤ a * c [PROOFSTEP] exact mul_le_mul_left' (succ_le_of_lt <| l.2 _ h) _ [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ b ≤ a [PROOFSTEP] rw [← ba] [GOAL] a b c : Ordinal.{u_1} ba : b + a = a l : IsLimit c IH : ∀ (c' : Ordinal.{u_1}), c' < c → (a + b) * succ c' = a * succ c' + b c' : Ordinal.{u_1} h : c' < c ⊢ b ≤ b + a [PROOFSTEP] exact le_add_right _ _ [GOAL] a b c : Ordinal.{u_1} ba : b + a = a ⊢ (a + b) * succ c = a * succ c + b [PROOFSTEP] induction c using limitRecOn with | H₁ => simp only [succ_zero, mul_one] | H₂ c IH => rw [mul_succ, IH, ← add_assoc, add_assoc _ b, ba, ← mul_succ] | H₃ c l IH => -- Porting note: Unused. -- have := add_mul_limit_aux ba l IHrw [mul_succ, add_mul_limit_aux ba l IH, mul_succ, add_assoc] [GOAL] a b c : Ordinal.{u_1} ba : b + a = a ⊢ (a + b) * succ c = a * succ c + b [PROOFSTEP] induction c using limitRecOn with | H₁ => simp only [succ_zero, mul_one] | H₂ c IH => rw [mul_succ, IH, ← add_assoc, add_assoc _ b, ba, ← mul_succ] | H₃ c l IH => -- Porting note: Unused. -- have := add_mul_limit_aux ba l IHrw [mul_succ, add_mul_limit_aux ba l IH, mul_succ, add_assoc] [GOAL] case H₁ a b : Ordinal.{u_1} ba : b + a = a ⊢ (a + b) * succ 0 = a * succ 0 + b [PROOFSTEP] | H₁ => simp only [succ_zero, mul_one] [GOAL] case H₁ a b : Ordinal.{u_1} ba : b + a = a ⊢ (a + b) * succ 0 = a * succ 0 + b [PROOFSTEP] simp only [succ_zero, mul_one] [GOAL] case H₂ a b : Ordinal.{u_1} ba : b + a = a c : Ordinal.{u_1} IH : (a + b) * succ c = a * succ c + b ⊢ (a + b) * succ (succ c) = a * succ (succ c) + b [PROOFSTEP] | H₂ c IH => rw [mul_succ, IH, ← add_assoc, add_assoc _ b, ba, ← mul_succ] [GOAL] case H₂ a b : Ordinal.{u_1} ba : b + a = a c : Ordinal.{u_1} IH : (a + b) * succ c = a * succ c + b ⊢ (a + b) * succ (succ c) = a * succ (succ c) + b [PROOFSTEP] rw [mul_succ, IH, ← add_assoc, add_assoc _ b, ba, ← mul_succ] [GOAL] case H₃ a b : Ordinal.{u_1} ba : b + a = a c : Ordinal.{u_1} l : IsLimit c IH : ∀ (o' : Ordinal.{u_1}), o' < c → (a + b) * succ o' = a * succ o' + b ⊢ (a + b) * succ c = a * succ c + b [PROOFSTEP] | H₃ c l IH => -- Porting note: Unused. -- have := add_mul_limit_aux ba l IHrw [mul_succ, add_mul_limit_aux ba l IH, mul_succ, add_assoc] [GOAL] case H₃ a b : Ordinal.{u_1} ba : b + a = a c : Ordinal.{u_1} l : IsLimit c IH : ∀ (o' : Ordinal.{u_1}), o' < c → (a + b) * succ o' = a * succ o' + b ⊢ (a + b) * succ c = a * succ c + b [PROOFSTEP] rw [mul_succ, add_mul_limit_aux ba l IH, mul_succ, add_assoc] [GOAL] a b c : Ordinal.{u_1} hb : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c ⊢ a + b ≤ c [PROOFSTEP] have H : a + (c - a) = c := Ordinal.add_sub_cancel_of_le (by rw [← add_zero a] exact (h _ hb).le) [GOAL] a b c : Ordinal.{u_1} hb : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c ⊢ a ≤ c [PROOFSTEP] rw [← add_zero a] [GOAL] a b c : Ordinal.{u_1} hb : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c ⊢ a + 0 ≤ c [PROOFSTEP] exact (h _ hb).le [GOAL] a b c : Ordinal.{u_1} hb : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c H : a + (c - a) = c ⊢ a + b ≤ c [PROOFSTEP] rw [← H] [GOAL] a b c : Ordinal.{u_1} hb : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c H : a + (c - a) = c ⊢ a + b ≤ a + (c - a) [PROOFSTEP] apply add_le_add_left _ a [GOAL] a b c : Ordinal.{u_1} hb : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c H : a + (c - a) = c ⊢ b ≤ c - a [PROOFSTEP] by_contra' hb [GOAL] a b c : Ordinal.{u_1} hb✝ : 0 < b h : ∀ (d : Ordinal.{u_1}), d < b → a + d < c H : a + (c - a) = c hb : c - a < b ⊢ False [PROOFSTEP] exact (h _ hb).ne H [GOAL] f : Ordinal.{u} → Ordinal.{u} hf : IsNormal f ⊢ Ordinal.sup (f ∘ Nat.cast) = f ω [PROOFSTEP] rw [← sup_nat_cast, IsNormal.sup.{0, u, u} hf] [GOAL] o : Ordinal.{u_1} ⊢ (sup fun n => o * ↑n) = o * ω [PROOFSTEP] rcases eq_zero_or_pos o with (rfl | ho) [GOAL] case inl ⊢ (sup fun n => 0 * ↑n) = 0 * ω [PROOFSTEP] rw [zero_mul] [GOAL] case inl ⊢ (sup fun n => 0 * ↑n) = 0 [PROOFSTEP] exact sup_eq_zero_iff.2 fun n => zero_mul (n : Ordinal) [GOAL] case inr o : Ordinal.{u_1} ho : 0 < o ⊢ (sup fun n => o * ↑n) = o * ω [PROOFSTEP] exact (mul_isNormal ho).apply_omega [GOAL] α : Type u r : α → α → Prop a b : α h : Acc r a ⊢ rank h = sup fun b => succ (rank (_ : Acc r ↑b)) [PROOFSTEP] change (Acc.intro a fun _ => h.inv).rank = _ [GOAL] α : Type u r : α → α → Prop a b : α h : Acc r a ⊢ rank (_ : Acc (fun x => r x) a) = sup fun b => succ (rank (_ : Acc r ↑b)) [PROOFSTEP] rfl [GOAL] α : Type u r : α → α → Prop a b : α hb : Acc r b h : r a b ⊢ succ (rank (_ : Acc r a)) ≤ rank hb [PROOFSTEP] rw [hb.rank_eq] [GOAL] α : Type u r : α → α → Prop a b : α hb : Acc r b h : r a b ⊢ succ (rank (_ : Acc r a)) ≤ sup fun b_1 => succ (rank (_ : Acc r ↑b_1)) [PROOFSTEP] refine' le_trans _ (Ordinal.le_sup _ ⟨a, h⟩) [GOAL] α : Type u r : α → α → Prop a b : α hb : Acc r b h : r a b ⊢ succ (rank (_ : Acc r a)) ≤ succ (rank (_ : Acc r ↑{ val := a, property := h })) [PROOFSTEP] rfl [GOAL] α : Type u r : α → α → Prop a b : α hwf : WellFounded r ⊢ rank hwf a = sup fun b => succ (rank hwf ↑b) [PROOFSTEP] rw [rank, Acc.rank_eq] [GOAL] α : Type u r : α → α → Prop a b : α hwf : WellFounded r ⊢ (sup fun b => succ (Acc.rank (_ : Acc r ↑b))) = sup fun b => succ (rank hwf ↑b) [PROOFSTEP] rfl
context("String Interpolation") test_that("str_interp works with default env", { subject <- "statistics" number <- 7 floating <- 6.656 expect_equal( str_interp("A ${subject}. B $[d]{number}. C $[.2f]{floating}."), "A statistics. B 7. C 6.66." ) expect_equal( str_interp("Pi is approximately $[.5f]{pi}"), "Pi is approximately 3.14159" ) }) test_that("str_interp works with lists and data frames.", { expect_equal( str_interp( "One value, ${value1}, and then another, ${value2*2}.", list(value1 = 10, value2 = 20) ), "One value, 10, and then another, 40." ) expect_equal( str_interp( "Values are $[.2f]{max(Sepal.Width)} and $[.2f]{min(Sepal.Width)}.", iris ), "Values are 4.40 and 2.00." ) }) test_that("str_interp works with nested expressions", { amount <- 1337 expect_equal( str_interp("Works with } nested { braces too: $[.2f]{{{2 + 2}*{amount}}}"), "Works with } nested { braces too: 5348.00" ) }) test_that("str_interp works in the absense of placeholders", { expect_equal( str_interp("A quite static string here."), "A quite static string here." ) }) test_that("str_interp fails when encountering nested placeholders", { msg <- "This will never see the light of day" num <- 1.2345 expect_error( str_interp("${${msg}}"), "Invalid template string for interpolation" ) expect_error( str_interp("$[.2f]{${msg}}"), "Invalid template string for interpolation" ) }) test_that("str_interp fails when input is not a character string", { expect_error(str_interp(3L)) }) test_that("str_interp formats list independetly of other placeholders", { a_list <- c("item1", "item2", "item3") other <- "1" extract <- function(text) regmatches(text, regexpr("xx[^x]+xx", text)) from_list <- extract(str_interp("list: xx${a_list}xx")) from_both <- extract(str_interp("list: xx${a_list}xx, and another ${other}")) expect_equal(from_list, from_both) })
# Solution {-} Stationary Gaussian random process ```python from sympy import symbols, factor s = symbols('s') Sy = factor((-s**2 + 1)/((-s**2)**2 + 20*(-s**2) + 64)) Sy ``` $\displaystyle - \frac{\left(s - 1\right) \left(s + 1\right)}{\left(s - 4\right) \left(s - 2\right) \left(s + 2\right) \left(s + 4\right)}$ ```python Gs = (s + 1)/(s**2 + 6*s + 8) Gs ``` $\displaystyle \frac{s + 1}{s^{2} + 6 s + 8}$ Let the KF state vector consist of the phase variables, i.e. $r$ and $\dot{r}$. The the KF measurement equation will be: \begin{equation} z= \begin{bmatrix} 1 &1 \\ \end{bmatrix} \begin{bmatrix} r\\ \dot{r}\\ \end{bmatrix} + v \end{equation} wiht the corresponding scalar differential equation is given as: \begin{equation} \ddot{r} + 6\dot{r} + 8 = u(t) \end{equation} \begin{equation} \begin{bmatrix} \dot{r}\\ \ddot{r}\\ \end{bmatrix} = \begin{bmatrix} 0 & 1 \\ -8 &-6 \\ \end{bmatrix} \begin{bmatrix} r\\ \dot{r}\\ \end{bmatrix} + \begin{bmatrix} 0\\ 1\\ \end{bmatrix} u(t) \end{equation}
State Before: α : Type u_2 β : Type u_1 γ : Type ?u.400090 ι : Type ?u.400093 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β inst✝¹ : TopologicalSpace γ f g : α → β inst✝ : PseudoMetrizableSpace β ν : Measure α ⊢ AEStronglyMeasurable f (μ + ν) ↔ AEStronglyMeasurable f μ ∧ AEStronglyMeasurable f ν State After: α : Type u_2 β : Type u_1 γ : Type ?u.400090 ι : Type ?u.400093 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β inst✝¹ : TopologicalSpace γ f g : α → β inst✝ : PseudoMetrizableSpace β ν : Measure α ⊢ AEStronglyMeasurable f (bif true then μ else ν) ∧ AEStronglyMeasurable f (bif false then μ else ν) ↔ AEStronglyMeasurable f μ ∧ AEStronglyMeasurable f ν Tactic: rw [← sum_cond, aestronglyMeasurable_sum_measure_iff, Bool.forall_bool, and_comm] State Before: α : Type u_2 β : Type u_1 γ : Type ?u.400090 ι : Type ?u.400093 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β inst✝¹ : TopologicalSpace γ f g : α → β inst✝ : PseudoMetrizableSpace β ν : Measure α ⊢ AEStronglyMeasurable f (bif true then μ else ν) ∧ AEStronglyMeasurable f (bif false then μ else ν) ↔ AEStronglyMeasurable f μ ∧ AEStronglyMeasurable f ν State After: no goals Tactic: rfl
import os import pandas as pd import numpy as np from io import open from xmltodict import parse as xml_parse from src.pandas.dataframe import delta COLUMN_NAME_CADENCE_RATE = 'Cadence-rate' COLUMN_NAME_DELTA_T = 'Time-delta' COLUMN_NAME_EXT_SPEED = 'Ext.Speed' COLUMN_NAME_ACCELERATION = 'Acceleration' COLUMN_NAME_SPEED = 'Speed' COLUMN_NAME_CADENCE = 'Cadence' COLUMN_NAME_WATTS = 'Ext.Watts' class Tcx(object): def __init__(self, xmldict: dict): self._dict = xmldict def to_dataframe(self) -> pd.DataFrame: def prepare_tcx(df: pd.DataFrame) -> pd.DataFrame: """ Used if key is unknwon or differs per TCX implementat :param df: ion like <TCX> with or without namespace :return: """ def first_dict_value(d: dict): return list(d.values()).pop() def find_value_by_key_containing(d: dict, key_token: str): first_value = [d[k] for k in d.keys() if key_token in k].pop() return first_value df['DistanceMeters'] = df['DistanceMeters'].apply(lambda x: float(x)) df[COLUMN_NAME_WATTS] = [find_value_by_key_containing(first_dict_value(extension_dict), 'Watts') for extension_dict in df['Extensions']] df[COLUMN_NAME_WATTS] = df[COLUMN_NAME_WATTS].apply(lambda x: float(x)) df[COLUMN_NAME_EXT_SPEED] = [find_value_by_key_containing(first_dict_value(extension_dict), 'Speed') for extension_dict in df['Extensions']] df[COLUMN_NAME_EXT_SPEED] = df[COLUMN_NAME_EXT_SPEED].apply(lambda x: float(x)) # Distance delta df['DistanceMeters-delta'] = delta(df['DistanceMeters'], np.subtract) # Time delta ## Time -> pd.Timestamp df['Time'] = df['Time'].apply(lambda t: pd.to_datetime(t)) ## Time[i+1] - Time[i] type = pd.Timedelta df[COLUMN_NAME_DELTA_T] = delta(df['Time'], np.subtract) ## Speed [km/h] = distance [meter] / time-delta [second] * 3.6 df[COLUMN_NAME_SPEED] = (df['DistanceMeters-delta'] / df[COLUMN_NAME_DELTA_T].apply(lambda td: td.total_seconds())) * 3.6 # delta Speed speed_delta: pd.Series = delta(df[COLUMN_NAME_SPEED], np.subtract) df[COLUMN_NAME_ACCELERATION] = (speed_delta / df[COLUMN_NAME_DELTA_T].apply(lambda td: td.total_seconds())) * 3.6 ## cadence df[COLUMN_NAME_CADENCE] = df[COLUMN_NAME_CADENCE].apply(lambda x: float(x)) # delta Cadence cadence_delta: pd.Series = delta(df[COLUMN_NAME_CADENCE], np.subtract) df[COLUMN_NAME_CADENCE_RATE] = (cadence_delta / df[COLUMN_NAME_DELTA_T].apply(lambda td: td.total_seconds())) return df trackpoints: dict = self._dict['TrainingCenterDatabase']['Activities']['Activity']['Lap']['Track'] list_of_trackpoint_dicts = list(trackpoints.values())[0] df: pd.DataFrame = pd.DataFrame.from_records(list_of_trackpoint_dicts) return prepare_tcx(df) @staticmethod def read_tcx(file_path: str): def read_xml(file_path: str) -> dict: project_root_dir = os.path.abspath('.') abs_file_path = os.path.join(project_root_dir, file_path) with open(abs_file_path, mode='r', encoding='utf-8') as f: content = f.read() return xml_parse(content) # read xml to dict return Tcx(read_xml(file_path))
Formal statement is: lemma le_measureD2: "A \<le> B \<Longrightarrow> space A = space B \<Longrightarrow> sets A \<le> sets B" Informal statement is: If $A \leq B$, then $A$ and $B$ have the same underlying set and $A$ is a sub-algebra of $B$.
function asa066_test ( ) %*****************************************************************************80 % %% ASA066_TEST tests ASA066. % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 14 February 2003 % % Author: % % John Burkardt % timestamp ( ); fprintf ( 1, '\n' ); fprintf ( 1, 'ASA066_TEST:\n' ); fprintf ( 1, ' MATLAB version\n' ); fprintf ( 1, ' Test the ASA066 library.\n' ); asa066_test01 ( ); asa066_test02 ( ); asa066_test03 ( ); % % Terminate. % fprintf ( 1, '\n' ); fprintf ( 1, 'ASA066_TEST:\n' ); fprintf ( 1, ' Normal end of execution.\n' ); fprintf ( 1, '\n' ); timestamp ( ); return end
[STATEMENT] lemma (in digraph) digraph_axioms'[dg_cs_intros]: assumes "\<alpha>' = \<alpha>" shows "digraph \<alpha>' \<CC>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. digraph \<alpha>' \<CC> [PROOF STEP] unfolding assms [PROOF STATE] proof (prove) goal (1 subgoal): 1. digraph \<alpha> \<CC> [PROOF STEP] by (rule digraph_axioms)
[GOAL] α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α h : ∀ (i : α), i ∈ Ico n m → r i (succ i) hnm : n ≤ m ⊢ ReflTransGen r n m [PROOFSTEP] revert h [GOAL] α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α hnm : n ≤ m ⊢ (∀ (i : α), i ∈ Ico n m → r i (succ i)) → ReflTransGen r n m [PROOFSTEP] refine' Succ.rec _ _ hnm [GOAL] case refine'_1 α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α hnm : n ≤ m ⊢ (∀ (i : α), i ∈ Ico n n → r i (succ i)) → ReflTransGen r n n [PROOFSTEP] intro _ [GOAL] case refine'_1 α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α hnm : n ≤ m h✝ : ∀ (i : α), i ∈ Ico n n → r i (succ i) ⊢ ReflTransGen r n n [PROOFSTEP] exact ReflTransGen.refl [GOAL] case refine'_2 α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α hnm : n ≤ m ⊢ ∀ (n_1 : α), n ≤ n_1 → ((∀ (i : α), i ∈ Ico n n_1 → r i (succ i)) → ReflTransGen r n n_1) → (∀ (i : α), i ∈ Ico n (succ n_1) → r i (succ i)) → ReflTransGen r n (succ n_1) [PROOFSTEP] intro m hnm ih h [GOAL] case refine'_2 α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m✝ : α hnm✝ : n ≤ m✝ m : α hnm : n ≤ m ih : (∀ (i : α), i ∈ Ico n m → r i (succ i)) → ReflTransGen r n m h : ∀ (i : α), i ∈ Ico n (succ m) → r i (succ i) ⊢ ReflTransGen r n (succ m) [PROOFSTEP] have : ReflTransGen r n m := ih fun i hi => h i ⟨hi.1, hi.2.trans_le <| le_succ m⟩ [GOAL] case refine'_2 α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m✝ : α hnm✝ : n ≤ m✝ m : α hnm : n ≤ m ih : (∀ (i : α), i ∈ Ico n m → r i (succ i)) → ReflTransGen r n m h : ∀ (i : α), i ∈ Ico n (succ m) → r i (succ i) this : ReflTransGen r n m ⊢ ReflTransGen r n (succ m) [PROOFSTEP] cases' (le_succ m).eq_or_lt with hm hm [GOAL] case refine'_2.inl α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m✝ : α hnm✝ : n ≤ m✝ m : α hnm : n ≤ m ih : (∀ (i : α), i ∈ Ico n m → r i (succ i)) → ReflTransGen r n m h : ∀ (i : α), i ∈ Ico n (succ m) → r i (succ i) this : ReflTransGen r n m hm : m = succ m ⊢ ReflTransGen r n (succ m) [PROOFSTEP] rwa [← hm] [GOAL] case refine'_2.inr α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m✝ : α hnm✝ : n ≤ m✝ m : α hnm : n ≤ m ih : (∀ (i : α), i ∈ Ico n m → r i (succ i)) → ReflTransGen r n m h : ∀ (i : α), i ∈ Ico n (succ m) → r i (succ i) this : ReflTransGen r n m hm : m < succ m ⊢ ReflTransGen r n (succ m) [PROOFSTEP] exact this.tail (h m ⟨hnm, hm⟩) [GOAL] α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α h : ∀ (i : α), i ∈ Ico m n → r (succ i) i hmn : m ≤ n ⊢ ReflTransGen r n m [PROOFSTEP] rw [← reflTransGen_swap] [GOAL] α : Type u_1 inst✝² : PartialOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α h : ∀ (i : α), i ∈ Ico m n → r (succ i) i hmn : m ≤ n ⊢ ReflTransGen (swap r) m n [PROOFSTEP] exact reflTransGen_of_succ_of_le (swap r) h hmn [GOAL] α : Type u_1 inst✝² : LinearOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α hr : Reflexive r h1 : ∀ (i : α), i ∈ Ico n m → r i (succ i) h2 : ∀ (i : α), i ∈ Ico m n → r (succ i) i ⊢ TransGen r n m [PROOFSTEP] rcases eq_or_ne m n with (rfl | hmn) [GOAL] case inl α : Type u_1 inst✝² : LinearOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop m : α hr : Reflexive r h1 : ∀ (i : α), i ∈ Ico m m → r i (succ i) h2 : ∀ (i : α), i ∈ Ico m m → r (succ i) i ⊢ TransGen r m m [PROOFSTEP] exact TransGen.single (hr m) [GOAL] case inr α : Type u_1 inst✝² : LinearOrder α inst✝¹ : SuccOrder α inst✝ : IsSuccArchimedean α r : α → α → Prop n m : α hr : Reflexive r h1 : ∀ (i : α), i ∈ Ico n m → r i (succ i) h2 : ∀ (i : α), i ∈ Ico m n → r (succ i) i hmn : m ≠ n ⊢ TransGen r n m [PROOFSTEP] exact transGen_of_succ_of_ne r h1 h2 hmn.symm
theory Lists1_6 imports Main begin primrec sum :: "nat list \<Rightarrow> nat" where "sum [] = 0" | "sum (x#xs) = x + (sum xs)" value "sum [1::nat,2]" primrec flatten :: "'a list list \<Rightarrow> 'a list" where "flatten [] = []" | "flatten (x#xs) = x @ (flatten xs)" value "flatten []" value "flatten [[]]" value "flatten [[],[1,2],[1,3]]" lemma "sum [2::nat, 4, 8] = 14" by auto lemma "flatten [[2::nat, 3], [4, 5], [7, 9]] = [2::nat,3,4,5,7,9]" by auto lemma "length (flatten xs) = sum (map length xs)" apply (induct xs) apply simp+ done lemma sum_append: "sum (xs @ ys) = sum xs + sum ys" apply (induct xs) apply simp apply (induct ys) apply simp+ done lemma flatten_append: "flatten (xs @ ys) = flatten xs @ flatten ys" apply (induct xs) apply (induct ys) apply simp+ done lemma "flatten (map rev (rev xs)) = rev (flatten xs)" apply (induct xs) apply (simp add:flatten_append)+ done lemma "flatten (rev (map rev xs)) = rev (flatten xs)" apply (induct xs) apply (simp add:flatten_append)+ done lemma "list_all (list_all P) xs = list_all P (flatten xs)" apply (induct xs) apply simp+ done lemma "flatten (rev xs) = flatten xs" quickcheck oops lemma "sum (rev xs) = sum xs" apply (induct xs) apply (simp add:sum_append)+ done lemma "list_all (\<lambda>x. x\<ge>1) xs \<longrightarrow> length xs \<le> sum xs" apply (induct xs) apply auto done primrec list_exists:: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> bool" where "list_exists P [] = False" | "list_exists P (x#xs) = (P x \<or> list_exists P xs)" value "list_exists (\<lambda>x. x=3) []" value "list_exists (\<lambda>x. x=3) [3]" value "list_exists (\<lambda>x. x=3) [1::int,3]" lemma "list_exists (\<lambda>n. n < 3) [4::nat, 3, 7] = False" by auto lemma "list_exists (\<lambda>n. n < 4) [4::nat, 3, 7] = True" by auto lemma list_exists_append: "list_exists P (xs@ys) = (list_exists P xs \<or> list_exists P ys)" apply (induct ys) apply simp apply (induct xs) apply simp apply auto done lemma "list_exists (list_exists P) xs = list_exists P (flatten xs)" apply (induct xs) apply (simp add:list_exists_append)+ done definition list_exists2 :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> bool" where "list_exists2 P xs == \<not> list_all (\<lambda>x. \<not> P x) xs" lemma "list_exists P xs = list_exists2 P xs" apply (induct xs) apply (simp add:list_exists2_def)+ done end
# functions that implement transformations using the hpsModel import numpy as np from scipy.interpolate import interp1d def hpsTimeScale(hfreq, hmag, stocEnv, timeScaling): """ Time scaling of the harmonic plus stochastic representation hfreq, hmag: harmonic frequencies and magnitudes, stocEnv: residual envelope timeScaling: scaling factors, in time-value pairs returns yhfreq, yhmag, ystocEnv: hps output representation """ if (timeScaling.size % 2 != 0): # raise exception if array not even length raise ValueError("Time scaling array does not have an even size") L = hfreq[:,0].size # number of input frames maxInTime = max(timeScaling[::2]) # maximum value used as input times maxOutTime = max(timeScaling[1::2]) # maximum value used in output times outL = int(L*maxOutTime/maxInTime) # number of output frames inFrames = (L-1)*timeScaling[::2]/maxInTime # input time values in frames outFrames = outL*timeScaling[1::2]/maxOutTime # output time values in frames timeScalingEnv = interp1d(outFrames, inFrames, fill_value=0) # interpolation function indexes = timeScalingEnv(np.arange(outL)) # generate frame indexes for the output yhfreq = hfreq[int(round(indexes[0])),:] # first output frame yhmag = hmag[int(round(indexes[0])),:] # first output frame ystocEnv = stocEnv[int(round(indexes[0])),:] # first output frame for l in indexes[1:]: # iterate over all output frame indexes yhfreq = np.vstack((yhfreq, hfreq[int(round(l)),:])) # get the closest input frame yhmag = np.vstack((yhmag, hmag[int(round(l)),:])) # get the closest input frame ystocEnv = np.vstack((ystocEnv, stocEnv[int(round(l)),:])) # get the closest input frame return yhfreq, yhmag, ystocEnv def hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp): """ Morph between two sounds using the harmonic plus stochastic model hfreq1, hmag1, stocEnv1: hps representation of sound 1 hfreq2, hmag2, stocEnv2: hps representation of sound 2 hfreqIntp: interpolation factor between the harmonic frequencies of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs) hmagIntp: interpolation factor between the harmonic magnitudes of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs) stocIntp: interpolation factor between the stochastic representation of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs) returns yhfreq, yhmag, ystocEnv: hps output representation """ if (hfreqIntp.size % 2 != 0): # raise exception if array not even length raise ValueError("Harmonic frequencies interpolation array does not have an even size") if (hmagIntp.size % 2 != 0): # raise exception if array not even length raise ValueError("Harmonic magnitudes interpolation does not have an even size") if (stocIntp.size % 2 != 0): # raise exception if array not even length raise ValueError("Stochastic component array does not have an even size") L1 = hfreq1[:,0].size # number of frames of sound 1 L2 = hfreq2[:,0].size # number of frames of sound 2 hfreqIntp[::2] = (L1-1)*hfreqIntp[::2]/hfreqIntp[-2] # normalize input values hmagIntp[::2] = (L1-1)*hmagIntp[::2]/hmagIntp[-2] # normalize input values stocIntp[::2] = (L1-1)*stocIntp[::2]/stocIntp[-2] # normalize input values hfreqIntpEnv = interp1d(hfreqIntp[0::2], hfreqIntp[1::2], fill_value=0) # interpolation function hfreqIndexes = hfreqIntpEnv(np.arange(L1)) # generate frame indexes for the output hmagIntpEnv = interp1d(hmagIntp[0::2], hmagIntp[1::2], fill_value=0) # interpolation function hmagIndexes = hmagIntpEnv(np.arange(L1)) # generate frame indexes for the output stocIntpEnv = interp1d(stocIntp[0::2], stocIntp[1::2], fill_value=0) # interpolation function stocIndexes = stocIntpEnv(np.arange(L1)) # generate frame indexes for the output yhfreq = np.zeros_like(hfreq1) # create empty output matrix yhmag = np.zeros_like(hmag1) # create empty output matrix ystocEnv = np.zeros_like(stocEnv1) # create empty output matrix for l in range(L1): # generate morphed frames # identify harmonics that are present in both frames harmonics = np.intersect1d(np.array(np.nonzero(hfreq1[l,:]), dtype=np.int)[0], np.array(np.nonzero(hfreq2[int(round(L2*l/float(L1))),:]), dtype=np.int)[0]) # interpolate the frequencies of the existing harmonics yhfreq[l,harmonics] = (1-hfreqIndexes[l])* hfreq1[l,harmonics] + hfreqIndexes[l] * hfreq2[int(round(L2*l/float(L1))),harmonics] # interpolate the magnitudes of the existing harmonics yhmag[l,harmonics] = (1-hmagIndexes[l])* hmag1[l,harmonics] + hmagIndexes[l] * hmag2[int(round(L2*l/float(L1))),harmonics] # interpolate the stochastic envelopes of both frames ystocEnv[l,:] = (1-stocIndexes[l])* stocEnv1[l,:] + stocIndexes[l] * stocEnv2[int(round(L2*l/float(L1))),:] return yhfreq, yhmag, ystocEnv
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj3synthconj4 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus lv0 Zero) (plus lv1 (Succ lv2))). Admitted. QuickChick conj3synthconj4.
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__23.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_flash_nodata_cub Protocol Case Study*} theory n_flash_nodata_cub_lemma_on_inv__23 imports n_flash_nodata_cub_base begin section{*All lemmas on causal relation between inv__23 and some rule r*} lemma n_PI_Remote_GetVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_PI_Remote_GetXVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_PI_Remote_PutXVsinv__23: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_PI_Remote_PutX dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') p__Inv4) ''CacheState'')) (Const CACHE_E))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') dst) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_PutX))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_NakVsinv__23: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Nak__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Nak__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Nak__part__2Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Get__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Get__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Put_HeadVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_PutVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_Get_Put_DirtyVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_Get_NakVsinv__23: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_Get_PutVsinv__23: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_Nak__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_Nak__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_Nak__part__2Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_GetX__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_GetX__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_2Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_3Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_4Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_5Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_6Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_HomeVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''HomeShrSet'')) (Const true)))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''HomeShrSet'')) (Const true)))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8Vsinv__23: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__23: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_10_HomeVsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''HomeShrSet'')) (Const true)))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_10Vsinv__23: assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Local_GetX_PutX_11Vsinv__23: assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Local'')) (Const true))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_NakVsinv__23: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_GetX_PutXVsinv__23: assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') dst) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''WbMsg'') ''Cmd'')) (Const WB_Wb))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_PutVsinv__23: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_Remote_PutXVsinv__23: assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(dst=p__Inv4)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(dst~=p__Inv4)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_NI_WbVsinv__23: assumes a1: "(r=n_NI_Wb )" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__23 p__Inv4" apply fastforce done have "?P1 s" proof(cut_tac a1 a2 , auto) qed then show "invHoldForRule s f r (invariants N)" by auto qed lemma n_NI_Remote_GetX_PutX_HomeVsinv__23: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX__part__0Vsinv__23: assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_3Vsinv__23: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_1Vsinv__23: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_GetX__part__1Vsinv__23: assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_GetX__part__0Vsinv__23: assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Remote_ReplaceVsinv__23: assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_ReplaceVsinv__23: assumes a1: "r=n_PI_Local_Replace " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_existsVsinv__23: assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_Get_Put_HomeVsinv__23: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvVsinv__23: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_PutXVsinv__23: assumes a1: "r=n_PI_Local_PutX " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_Get_PutVsinv__23: assumes a1: "r=n_PI_Local_Get_Put " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_ShWbVsinv__23: assumes a1: "r=n_NI_ShWb N " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__23: assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__0 N " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_ReplaceVsinv__23: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_GetX_Nak_HomeVsinv__23: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_PutXAcksDoneVsinv__23: assumes a1: "r=n_NI_Local_PutXAcksDone " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX__part__1Vsinv__23: assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Remote_Get_Nak_HomeVsinv__23: assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_exists_HomeVsinv__23: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Replace_HomeVsinv__23: assumes a1: "r=n_NI_Replace_Home " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Local_PutVsinv__23: assumes a1: "r=n_NI_Local_Put " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Nak_ClearVsinv__23: assumes a1: "r=n_NI_Nak_Clear " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_Get_GetVsinv__23: assumes a1: "r=n_PI_Local_Get_Get " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_Nak_HomeVsinv__23: assumes a1: "r=n_NI_Nak_Home " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_InvAck_2Vsinv__23: assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__23: assumes a1: "r=n_PI_Local_GetX_PutX_HeadVld__part__1 N " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_NI_FAckVsinv__23: assumes a1: "r=n_NI_FAck " and a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__23 p__Inv4)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
[STATEMENT] lemma (in Ring) ds2_components:"\<lbrakk>R module M1; R module M2; R module M; M1 \<Oplus>\<^bsub>R,M\<^esub> M2; a \<in> carrier M\<rbrakk> \<Longrightarrow> \<exists>a1\<in>carrier M1. \<exists>a2\<in>carrier M2. a = a1 \<plusminus>\<^bsub>M\<^esub> a2" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>R module M1; R module M2; R module M; M1 \<Oplus>\<^bsub>R,M\<^esub> M2; a \<in> carrier M\<rbrakk> \<Longrightarrow> \<exists>a1\<in>carrier M1. \<exists>a2\<in>carrier M2. a = a1 \<plusminus>\<^bsub>M\<^esub> a2 [PROOF STEP] by (simp add:ds2_def)
# Complete multipartite graphs. BindGlobal("CompleteMultipartiteGraph", function(arg) local sizes, dp, F, G, m, n; F := function(x, y) return x[1] <> y[1]; end; if Length(arg) = 0 then Error("at least one argument expected"); return fail; elif Length(arg) = 1 then sizes := arg[1]; dp := DirectProduct(List(sizes, i -> SymmetricGroup(i))); return Graph(dp, Union(List([1..Length(sizes)], i -> List([1..sizes[i]], j -> [i, j]))), OnSum(dp), F, true); else m := arg[1]; n := arg[2]; return Graph(WreathProductSymmetricGroups(n, m), Cartesian([1..m], [1..n]), function(x, g) local y; y := (n*(x[1]-1)+x[2])^g - 1; return [Int(y/n)+1, y mod n + 1]; end, F, true); fi; end); # Cycle graphs. BindGlobal("CycleGraph", n -> Graph(DihedralGroup(IsPermGroup, 2*n), [1..n], OnPoints, function(x, y) return (x-y) mod n in [1,n-1]; end, true)); # Cocktail party graphs. BindGlobal("CocktailPartyGraph", n -> CompleteMultipartiteGraph(n, 2) ); # Paley graphs. # For q = 1 (mod 4) a prime power, the graph is strongly regular. # For q = 3 (mod 4) a prime power, the graph is directed. BindGlobal("PaleyGraph", function(q) local dp; dp := DirectProduct(FieldAdditionPermutationGroup(q), Group(GeneratorsOfGroup(FieldMultiplicationPermutationGroup(q))[1]^2)); return Graph(dp, Elements(GF(q)), OnPaley(q, dp), function(x, y) return IsOne((x-y)^((q-1)/2)); end, true); end); # Latin square graphs. BindGlobal("LatinSquareGraph", function(arg) local dim, dp, G, invt, vcs; if Length(arg) = 0 then Error("at least one argument expected"); return fail; fi; G := arg[1]; if Length(arg) > 1 then invt := arg[2]; else invt := true; fi; if IsGroup(G) then dp := DirectProduct(G, G); if invt then vcs := Cartesian(G, G); else vcs := [[One(G), One(G)]]; fi; return Graph(dp, vcs, OnLatinSquare(dp), function(x, y) return x <> y and (x[1] = y[1] or x[2] = y[2] or x[1]*x[2] = y[1]*y[2]); end, invt); else dim := DimensionsMat(G); return AdjFunGraph(Cartesian([1..dim[1]], [1..dim[2]]), function(x, y) return x <> y and (x[1] = y[1] or x[2] = y[2] or G[x[1]][x[2]] = G[y[1]][y[2]]); end); fi; end); # Complete Taylor graphs, i.e. complete bipartite graphs minus a matching. BindGlobal("CompleteTaylorGraph", function(n) local G; G := EdgeOrbitsGraph(Group([(1,2)(n+1,n+2), PermList(Concatenation([2..n], [1], [n+2..2*n], [n+1])), PermList(Concatenation([n+1..2*n], [1..n]))]), [1, n+2]); AssignVertexNames(G, Cartesian([1, 2], [1..n])); return G; end);
module X public export data Ty : Type where A : Ty B : Ty public export f : (d : Ty) => Nat -> Ty f _ = d g : f 0 = B g = Refl
Require Import msl.msl_direct. Require Import overlapping. Require Import heap_model. Require Import graph. Require Import msl_ext. Require Import ramify_tactics. Require Import FunctionalExtensionality. Require Import NPeano. Require Import List. Require Import utilities. Local Open Scope pred. Instance natEqDec : EqDec nat := { t_eq_dec := eq_nat_dec }. Definition trinode x d l r := !!(3 | x) && (mapsto x d) * (mapsto (x+1) l) * (mapsto (x+2) r). Section PointwiseGraph. Variable pg : @PreGraph nat nat natEqDec. Variable bi : BiGraph pg. Axiom valid_not_zero: forall x, @valid nat nat natEqDec pg x -> x <> 0. Definition gValid (x : adr) : Prop := @valid nat nat natEqDec pg x. Definition graph_fun (Q: adr -> pred world) (x: adr) := (!!(x = 0) && emp) || (EX d:adr, EX l:adr, EX r:adr, !!(gamma bi x = (d, l, r) /\ gValid x) && trinode x d l r ⊗ (Q l) ⊗ (Q r)). Lemma graph_fun_covariant : covariant graph_fun. Proof. unfold graph_fun. apply covariant_orp. apply covariant_const. apply covariant_exp; intro d. apply covariant_exp; intro l. apply covariant_exp; intro r. repeat apply covariant_ocon. apply covariant_andp. apply covariant_const. repeat apply covariant_sepcon; apply covariant_const. apply covariant_const'. apply covariant_const'. Qed. Definition graph := corec graph_fun. Lemma graph_unfold: forall x, graph x = (!!(x = 0) && emp) || (EX d:adr, EX l:adr, EX r:adr, !!(gamma bi x = (d, l, r) /\ gValid x) && trinode x d l r ⊗ (graph l) ⊗ (graph r)). Proof. intros. unfold graph at 1. rewrite corec_fold_unfold. trivial. apply graph_fun_covariant. Qed. Definition dag_fun (Q: adr -> pred world) (x: adr) := (!!(x = 0) && emp) || (EX d:adr, EX l:adr, EX r:adr, !!(gamma bi x = (d, l, r) /\ gValid x) && trinode x d l r * ((Q l) ⊗ (Q r))). Lemma dag_fun_covariant : covariant dag_fun. Proof. unfold dag_fun. apply covariant_orp. apply covariant_const. apply covariant_exp. intro d. apply covariant_exp. intro l. apply covariant_exp. intro r. apply covariant_sepcon. apply covariant_const. apply covariant_ocon; apply covariant_const'. Qed. Definition dag := corec dag_fun. Lemma dag_unfold: forall x, dag x = (!!(x = 0) && emp) || (EX d:adr, EX l:adr, EX r:adr, !!(gamma bi x = (d, l, r) /\ gValid x) && trinode x d l r * ((dag l) ⊗ (dag r))). Proof. intros. unfold dag at 1. rewrite corec_fold_unfold. trivial. apply dag_fun_covariant. Qed. (* Lemma dag_eq_graph: forall x, dag x |-- graph x && !!(graph_is_acyclic (reachable_subgraph pg (x :: nil))). *) Fixpoint graphs (l : list adr) := match l with | nil => emp | v :: l' => graph v ⊗ graphs l' end. Fixpoint dags (l : list adr) := match l with | nil => emp | v :: l' => dag v ⊗ dags l' end. Definition graph_maps (v : adr) : pred world := let (dl, r) := gamma bi v in let (d, l) := dl in trinode v d l r. Lemma precise_graph_maps: forall v, precise (graph_maps v). Proof. intro. unfold graph_maps. destruct (gamma bi v) as [dl r]. destruct dl as [d l]. unfold trinode. apply precise_sepcon. apply precise_mapsto. apply precise_sepcon. apply precise_mapsto. apply precise_andp_right. apply precise_mapsto. Qed. Lemma joinable_mapsto: forall w x y a b, x <> y -> (mapsto x a * TT)%pred w -> (mapsto y b * TT)%pred w -> (mapsto x a * mapsto y b * TT)%pred w. Proof. intros. destruct H0 as [p [q [? [? ?]]]]. generalize H2; intro Hmap1. destruct p as [fp xp] eqn:? . destruct H1 as [m [n [? [? ?]]]]. generalize H4; intro Hmap2. destruct m as [fm xm] eqn:? . hnf in H2. simpl in H2. hnf in H4. simpl in H4. destruct H2 as [? [? ?]]. destruct H4 as [? [? ?]]. remember (fun xx : adr => if eq_nat_dec xx x then Some a else (if eq_nat_dec xx y then Some b else None)) as f. assert (finMap f). exists (x :: y :: nil). intro z. intros. rewrite Heqf. destruct (eq_nat_dec z x). rewrite e in *. exfalso. apply H10. apply in_eq. destruct (eq_nat_dec z y). rewrite e in *. exfalso. apply H10. apply in_cons. apply in_eq. trivial. remember (exist (finMap (B:=adr)) f H10) as ff. assert (join p m ff). rewrite Heqw0, Heqw1, Heqff. hnf; simpl. intro z. destruct (eq_nat_dec z x). rewrite e in *. rewrite H7. generalize (H8 x H); intro HS. rewrite HS. rewrite Heqf. destruct (eq_nat_dec x x). apply lower_None2. exfalso; auto. destruct (eq_nat_dec z y). rewrite e in *. rewrite H9. generalize (H6 y n0); intro HS. rewrite HS. rewrite Heqf. destruct (eq_nat_dec y x). intuition. destruct (eq_nat_dec y y). apply lower_None1. intuition. specialize (H6 z n0). specialize (H8 z n1). rewrite H6, H8. rewrite Heqf. destruct (eq_nat_dec z x). intuition. destruct (eq_nat_dec z y). intuition. apply lower_None1. rewrite <- Heqw0 in *. rewrite <- Heqw1 in *. destruct (join_together H0 H1 H11) as [qn ?]. exists ff, qn. repeat split; auto. exists p, m. split; auto. Qed. Lemma graph_path_tri_in: forall p x y P, pg |= p is x ~o~> y satisfying P -> graph x |-- graph_maps y * TT. Proof. induction p; intros; destruct H as [[? ?] [? ?]]. inversion H. simpl in H; inversion H; subst. clear H. destruct p. simpl in H0; inversion H0; subst; clear H0. repeat intro. rewrite graph_unfold in H. destruct H as [[? ?] | ?]. hnf in H; apply valid_not_zero in H1; intuition. destruct H as [d [l [r ?]]]. destruct_ocon H h. destruct_ocon H4 i. destruct H8 as [[? ?] ?]. try_join i3 h3 i3h3. exists i12, i3h3. split; auto. split; auto. hnf. destruct (gamma bi y) as [dl rr]. destruct dl as [dd ll]. inversion H8; auto. assert (pg |= n :: p is n ~o~> y satisfying P). split; [split; [simpl; auto | auto] | split; [destruct H1; auto | repeat intro; apply H2; apply in_cons; auto]]. repeat intro. rewrite graph_unfold in H3. destruct H3 as [[? ?] | [d [l [r ?]]]]. hnf in H3. destruct H1 as [[? ?] ?]. apply valid_not_zero in H1. intuition. destruct_ocon H3 h. destruct_ocon H6 i. destruct H10 as [[? ?] ?]. unfold gamma in H10. destruct_sepcon H13 j. destruct_sepcon H14 k. destruct H1 as [[? [? ?]] ?]. revert H10. case_eq (biEdge bi x); intros. inversion H21; subst. generalize (biEdge_only2 bi _ _ _ _ H10 H19); intros. destruct H22; subst. specialize (IHp _ _ _ H _ H11). destruct IHp as [l1 [l2 [? [? ?]]]]. try_join i2 i3 i23'. equate_join i23 i23'. try_join l2 i1 l2i1. try_join l2i1 h3 l2i1h3. exists l1, l2i1h3. split; auto. specialize (IHp _ _ _ H _ H7). destruct IHp as [l1 [l2 [? [? ?]]]]. try_join h2 h3 h23'. equate_join h23 h23'. try_join l2 h1 l2h1. exists l1, l2h1. split; auto. Qed. Lemma graph_reachable_tri_in: forall x y, reachable pg x y -> graph x |-- graph_maps y * TT. Proof. intros; destruct H; apply graph_path_tri_in in H; trivial. Qed. Lemma graph_path_in: forall p x y P, pg |= p is x ~o~> y satisfying P -> graph x |-- EX v : adr, (mapsto y v * TT). Proof. intros. generalize (graph_path_tri_in _ _ _ _ H). intros. repeat intro. specialize (H0 a H1). destruct_sepcon H0 i. hnf in H2. destruct (gamma bi y) as [dl r] in H2. destruct dl as [d l]. destruct_sepcon H2 j. destruct_sepcon H4 k. destruct H6. try_join k2 j2 k2j2. try_join k2j2 i2 k2j2i2. exists d, k1, k2j2i2. split; auto. Qed. Lemma graph_reachable_in: forall x y, reachable pg x y -> graph x |-- EX v : adr, (mapsto y v * TT). Proof. intros; destruct H; apply graph_path_in in H; trivial. Qed. Section ConstructReachable. Definition explode (x : adr) (w : world) (H : (graph x * TT)%pred w) : {l : adr & {r : adr | biEdge bi x = (l, r) /\ gValid x /\ (graph l * TT)%pred w /\ (graph r * TT)%pred w}} + {x = 0}. destruct (eq_nat_dec x 0). right; auto. left. rewrite graph_unfold in H. assert (((EX d : adr, (EX l : adr, (EX r : adr, !!(gamma bi x = (d, l, r) /\ gValid x) && trinode x d l r ⊗ graph l ⊗ graph r))) * TT)%pred w) as S. destruct_sepcon H h. hnf in H0. destruct H0. destruct H0. hnf in H0. exfalso; auto. exists h1, h2; split; auto. clear H. remember (gamma bi x). destruct p as [[d l] r]. exists l, r. destruct_sepcon S h. destruct H0 as [dd [ll [rr ?]]]. destruct_ocon H0 i. destruct_ocon H4 j. destruct H8 as [[? ?] ?]. injection H8; intros; subst; clear H8. unfold gamma in Heqp. destruct (biEdge bi x). injection Heqp; intros; subst; clear Heqp. repeat split; auto. apply valid_not_zero in H10. try_join j2 j3 j23'. equate_join j23 j23'. try_join j1 i3 j1i3. try_join j1i3 h2 j1i3h2. exists j23, j1i3h2. repeat split; auto. try_join i2 i3 i23'; equate_join i23 i23'. try_join i1 h2 i1h2. exists i23, i1h2. repeat split; auto. Defined. Definition twoSubTrees (x : adr) (w : world) (m : {l : adr & {r : adr | biEdge bi x = (l, r) /\ gValid x /\ (graph l * TT)%pred w /\ (graph r * TT)%pred w}}) : list {t : adr | (graph t * TT)%pred w }. destruct m as [l [r [? [? [? ?]]]]]. remember (exist (fun t => (graph t * TT)%pred w) l H1) as sl. remember (exist (fun t => (graph t * TT)%pred w) r H2) as sr. remember (sl :: sr :: nil) as lt; apply lt. Defined. Definition graph_sig_fun (w : world) := fun f => (graph f * TT)%pred w. Definition fetch (w : world) := proj1_sig (P := graph_sig_fun w). Fixpoint removeTree (w : world) (x : adr) (l : list {t : adr | (graph t * TT)%pred w}) := match l with | nil => nil | y :: ttl => if eq_nat_dec x (proj1_sig y) then removeTree w x ttl else y :: removeTree w x ttl end. Lemma remove_tree_len_le: forall w x l, length (removeTree w x l) <= length l. Proof. induction l; simpl. trivial. destruct (eq_nat_dec x (proj1_sig a)); simpl; omega. Qed. Lemma remove_tree_sublist: forall w x l, Sublist (removeTree w x l) l. Proof. induction l; hnf; intros; simpl in *; auto. destruct a as [t Ht]; simpl in H. destruct (eq_nat_dec x t). subst. right. apply IHl; auto. destruct (in_inv H); [left | right]. auto. apply IHl; auto. Qed. Lemma remove_tree_not_in: forall w x l, ~ In x (map (fetch w) (removeTree w x l)). Proof. induction l; intro; simpl in * |-. apply H. destruct (eq_nat_dec x (proj1_sig a)). subst. apply IHl; auto. simpl in H. destruct H. intuition. apply IHl; auto. Qed. Lemma remove_tree_in: forall w x l y, y <> x -> In y (map (fetch w) l) -> In y (map (fetch w) (removeTree w x l)). Proof. induction l; intros; simpl in *. apply H0. destruct a as [a ?]. simpl in *. destruct (eq_nat_dec x a). destruct H0. subst. exfalso; apply H; auto. apply IHl; auto. simpl in *. destruct H0. left; auto. right. apply IHl; auto. Qed. Definition dup_input (w : world) := list {t : adr | (graph t * TT)%pred w}. Definition dupLength (w : world) (i : dup_input w) := length i. Definition dupOrder (w : world) (i1 i2 : dup_input w) := dupLength w i1 < dupLength w i2. Lemma dupOrder_wf': forall w len i, dupLength w i <= len -> Acc (dupOrder w) i. Proof. induction len; intros; constructor; intros; unfold dupOrder in * |-; [exfalso | apply IHlen]; intuition. Qed. Lemma dupOrder_wf (w : world) : well_founded (dupOrder w). Proof. red; intro; eapply dupOrder_wf'; eauto. Defined. Definition removeDup (w : world) : dup_input w -> dup_input w. refine ( Fix (dupOrder_wf w) (fun _ => dup_input w) (fun (inp : dup_input w) => match inp return ((forall inp2 : dup_input w, (dupOrder w) inp2 inp -> (dup_input w)) -> dup_input w) with | nil => fun _ => nil | x :: l => fun f => x :: (f (removeTree w (proj1_sig x) l) _) end)). apply le_lt_trans with (dupLength w l). apply remove_tree_len_le. simpl; apply lt_n_Sn. Defined. Lemma removeDup_unfold: forall w i, removeDup w i = match i with | nil => nil | x :: l => x :: removeDup w (removeTree w (proj1_sig x) l) end. Proof. intros. unfold removeDup at 1; rewrite Fix_eq. destruct i; auto. intros. assert (f = g) by (extensionality y; extensionality p; auto); subst; auto. Qed. Lemma remove_dup_len_le: forall w l, length (removeDup w l) <= length l. Proof. intros w l. remember (length l). assert (length l <= n) by omega. clear Heqn. revert H. revert l. induction n; intros; rewrite removeDup_unfold; destruct l; auto. inversion H. simpl. apply le_n_S. apply IHn. simpl in H; apply le_S_n in H. apply le_trans with (length l). apply remove_tree_len_le. auto. Qed. Lemma remove_dup_sublist: forall w l1 l2, Sublist l1 l2 -> Sublist (removeDup w l1) l2. Proof. intros w l1. remember (length l1). assert (length l1 <= n) by omega. clear Heqn. revert H. revert l1. induction n; intros; rewrite removeDup_unfold; destruct l1. apply Sublist_nil. inversion H. apply Sublist_nil. rewrite <- (app_nil_l (removeDup w (removeTree w (proj1_sig s) l1))). rewrite app_comm_cons. apply Sublist_app_2. repeat intro. apply in_inv in H1. destruct H1. subst. specialize (H0 a). apply H0. apply in_eq. inversion H1. apply IHn. simpl in H. apply le_trans with (length l1). apply remove_tree_len_le. apply le_S_n. apply H. apply Sublist_trans with l1. apply remove_tree_sublist. repeat intro. apply H0. apply in_cons. apply H1. Qed. Lemma remove_dup_in_inv: forall w x l, In x (map (fetch w) l) -> In x (map (fetch w) (removeDup w l)). Proof. intros w x l. remember (length l). assert (length l <= n) by omega. clear Heqn. revert l H. induction n; intros; rewrite removeDup_unfold; destruct l; auto. simpl in H. omega. destruct s as [s ?]. simpl in *. destruct (eq_nat_dec x s); destruct H0. left; auto. left; auto. exfalso; apply n0; auto. right. apply IHn. apply le_trans with (length l). apply remove_tree_len_le. omega. apply remove_tree_in; auto. Qed. Lemma remove_dup_nodup: forall w l, NoDup (map (fetch w) (removeDup w l)). Proof. intros w l. remember (length l). assert (length l <= n) by omega. clear Heqn. revert H. revert l. induction n; intros; rewrite removeDup_unfold; destruct l; simpl. apply NoDup_nil. inversion H. apply NoDup_nil. apply NoDup_cons. generalize (remove_tree_not_in w (proj1_sig s) l); intro. intro; apply H0; clear H0. apply (map_sublist _ _ (fetch w) (removeDup w (removeTree w (proj1_sig s) l)) (removeTree w (proj1_sig s) l)). apply remove_dup_sublist. apply Sublist_refl. auto. apply IHn. simpl in H. apply le_trans with (length l). apply remove_tree_len_le. apply le_S_n. apply H. Qed. Fixpoint appRemoveList (w : world) (lc la : list {t : adr | (graph t * TT)%pred w}) (lb : list adr) := match lb with | nil => removeDup w (lc ++ la) | x :: l => appRemoveList w lc (removeTree w x la) l end. Lemma remove_list_len_le: forall w lb lc la, length (appRemoveList w lc la lb) <= length lc + length la. Proof. induction lb; intros; simpl. apply le_trans with (length (lc ++ la)). apply remove_dup_len_le. rewrite app_length; trivial. apply le_trans with (length lc + length (removeTree w a la)). apply IHlb. apply plus_le_compat_l, remove_tree_len_le. Qed. Lemma remove_list_sublist: forall w lb lc la, Sublist (appRemoveList w lc la lb) (lc ++ la). Proof. induction lb; intros; hnf; intros; simpl in *; auto. apply (remove_dup_sublist w (lc ++ la) (lc ++ la)). apply Sublist_refl. apply H. specialize (IHlb lc (removeTree w a la) a0 H). destruct (in_app_or _ _ _ IHlb); apply in_or_app; [left | right]. auto. generalize (remove_tree_sublist w a la); intro Hr; hnf in Hr; apply Hr; auto. Qed. Lemma remove_list_no_dup: forall w lb lc la, NoDup (map (fetch w) (appRemoveList w lc la lb)). Proof. induction lb; intros; simpl. apply remove_dup_nodup. apply IHlb. Qed. Lemma remove_list_not_in: forall w lb lc la, exists ld, (appRemoveList w lc la lb = removeDup w (lc ++ ld)) /\ Sublist ld la /\ forall x, In x lb -> ~ In x (map (fetch w) ld). Proof. induction lb; intros; simpl. exists la. repeat split. apply Sublist_refl. intros. auto. destruct (IHlb lc (removeTree w a la)) as [ld [? [? ?]]]. exists ld. repeat split; auto. apply Sublist_trans with (removeTree w a la). auto. apply remove_tree_sublist. intro y; intros. destruct H2; auto. subst. apply map_sublist with (f := fetch w) in H0. intro. specialize (H0 y H2). apply remove_tree_not_in in H0. auto. Qed. Lemma remove_list_in: forall w lb lc la a x, x <> a -> (~ In x lb) -> In x (map (fetch w) la) -> In x (map (fetch w) (appRemoveList w lc (removeTree w a la) lb)). Proof. induction lb; intros; simpl. assert (In x (map (fetch w) (lc ++ removeTree w a la))). rewrite map_app. apply in_or_app; right. apply remove_tree_in; auto. apply remove_dup_in_inv. auto. apply IHlb. intro; apply H0; subst; apply in_eq. intro; apply H0; apply in_cons; auto. apply remove_tree_in; auto. Qed. Lemma remove_list_in_2: forall w lb lc la x, In x (map (fetch w) lc) -> In x (map (fetch w) (appRemoveList w lc la lb)). Proof. induction lb; intros; simpl. apply remove_dup_in_inv. rewrite map_app. apply in_or_app. left; auto. apply IHlb. auto. Qed. Definition graph_zero : forall (w : world), (graph 0 * TT)%pred w. intros. exists (core w), w; repeat split. apply core_unit. rewrite graph_unfold. left; hnf. split; auto. apply core_identity. Defined. Definition reach_input (w : world) := (nat * list {t : adr | (graph t * TT)%pred w} * list adr )%type. Definition lengthInput (w : world) (i : reach_input w) := match i with | (len, pr, re) => 2 * len + length pr - 2 * length re end. Definition inputOrder (w : world) (i1 i2 : reach_input w) := lengthInput w i1 < lengthInput w i2. Lemma inputOrder_wf': forall w len i, lengthInput w i <= len -> Acc (inputOrder w) i. Proof. induction len; intros; constructor; intros; unfold inputOrder in * |-; [exfalso | apply IHlen]; intuition. Qed. Lemma inputOrder_wf (w : world) : well_founded (inputOrder w). Proof. red; intro; eapply (inputOrder_wf' w); eauto. Defined. Definition extractReach (w : world) : reach_input w -> list adr. refine ( Fix (inputOrder_wf w) (fun _ => list adr) (fun (inp : reach_input w) => match inp return ((forall inp2, inputOrder w inp2 inp -> list adr) -> list adr) with | (_, nil, r) => fun _ => r | (len, g :: l, r) => fun f => if le_dec len (length r) then r else match explode (proj1_sig g) w (proj2_sig g) with | inleft hasNodes => let subT := twoSubTrees (proj1_sig g) w hasNodes in let m := (proj1_sig g) :: r in let newL := appRemoveList w l subT m in f (len, newL, m) _ | inright _ => f (len, l, r) _ end end)). destruct hasNodes as [leftT [rightT [? [? [? ?]]]]]. destruct g as [x ?]. simpl in subT. unfold newL, inputOrder, lengthInput. generalize (remove_list_len_le w m l subT); intro. apply le_lt_trans with (len + len + length l + length subT - S (length r + S (length r))). unfold m at 2. simpl length at 2. omega. simpl. omega. unfold inputOrder, lengthInput; simpl; repeat rewrite <- plus_n_O. omega. Defined. Lemma extractReach_unfold: forall w i, extractReach w i = match i with | (_, nil, r) => r | (len, g :: l, r) => if le_dec len (length r) then r else match explode (proj1_sig g) w (proj2_sig g) with | inleft hasNodes => let subT := twoSubTrees (proj1_sig g) w hasNodes in let newL := appRemoveList w l subT (proj1_sig g :: r) in extractReach w (len, newL, (proj1_sig g) :: r) | inright _ => extractReach w (len, l, r) end end. Proof. intros. destruct i as [[n prs] rslt]. unfold extractReach at 1; rewrite Fix_eq. destruct prs; simpl. auto. destruct (le_dec n (length rslt)). auto. destruct (explode (proj1_sig s) w (proj2_sig s)); unfold extractReach; auto. intros; assert (f = g) by (extensionality y; extensionality p; auto); subst; auto. Qed. Definition rch1 w (i : reach_input w) := match i with (n, _, _) => n end. Definition rch2 w (i : reach_input w) := match i with (_, l, _) => l end. Definition rch3 w (i: reach_input w) := match i with (_, _, result) => result end. Lemma extractReach_reachable: forall w (i : reach_input w) (x : adr), Forall (reachable pg x) (rch3 w i) -> Forall (fun y => reachable pg x y \/ y = 0) (map (fetch w) (rch2 w i)) -> Forall (reachable pg x) (extractReach w i). Proof. intros w i x; remember (lengthInput w i); assert (lengthInput w i <= n) by omega; clear Heqn; revert H x; revert i. induction n; intros; remember (extractReach w i) as result; rename Heqresult into H3; destruct i as [[len pr] rslt]; unfold rch2, rch3, lengthInput in *; simpl in *; rewrite extractReach_unfold in H3; destruct pr; simpl in H3. subst; apply H0; auto. destruct (le_dec len (length rslt)). subst; apply H0; auto. exfalso; omega. subst; apply H0; auto. destruct (le_dec len (length rslt)). subst; apply H0; auto. destruct s as [t Ht]; simpl in *. destruct (explode t w Ht). remember (twoSubTrees t w s) as subT. destruct s as [l [r [? [? [Gl Gr]]]]]. simpl in HeqsubT. assert (reachable pg x t) as HRt. destruct (Forall_inv H1); auto. apply valid_not_zero in g; exfalso; auto. rewrite H3; apply IHn; simpl. apply le_trans with (len + (len + 0) + length pr + length (removeTree w t subT) - S (length rslt + S (length rslt + 0))). generalize (remove_list_len_le w rslt pr (removeTree w t subT)); intros. omega. generalize (remove_tree_len_le w t subT); intros. rewrite HeqsubT in H2 at 2. simpl in H2. omega. apply Forall_cons; auto. assert (Sublist (appRemoveList w pr (removeTree w t subT) rslt) (pr ++ subT)). apply Sublist_trans with (pr ++ (removeTree w t subT)). apply remove_list_sublist. apply Sublist_app. apply Sublist_refl. apply remove_tree_sublist. generalize (map_sublist _ _ (fetch w) _ _ H2); intros; clear H2. rewrite map_app in H4. apply (Forall_sublist _ _ _ H4); clear H4. apply Forall_app. apply Forall_tl in H1; auto. rewrite HeqsubT; simpl. rewrite Forall_forall. intro y; intros. destruct (in_inv H2). subst. destruct Gl as [wl [_ [_ [? _]]]]. rewrite graph_unfold in H3. destruct H3 as [[? ?] | [dd [ll [rr ?]]]]; [right; auto| left]. destruct_ocon H3 h. destruct_ocon H6 j. destruct H10 as [[? ?] ?]. apply reachable_by_merge with t. auto. exists (t :: y :: nil). hnf. split; split; simpl; auto. split. unfold biEdge in e. split. apply reachable_foot_valid in HRt. auto. split. apply H12. destruct (@only_two_neighbours nat nat natEqDec pg bi t) as [v1 [v2 HHH]]. inversion e; subst; clear e. rewrite HHH. apply in_eq. auto. repeat intro. hnf. auto. clear H2. destruct (in_inv H4). subst. destruct Gr as [wr [_ [_ [? _]]]]. rewrite graph_unfold in H2. destruct H2 as [[? ?] | [dd [ll [rr ?]]]]; [right; auto| left]. destruct_ocon H2 h. destruct_ocon H6 j. destruct H10 as [[? ?] ?]. apply reachable_by_merge with t. auto. exists (t :: y :: nil). hnf. split; split; simpl; auto. split. unfold biEdge in e. split. apply reachable_foot_valid in HRt. auto. split. apply H12. destruct (@only_two_neighbours nat nat natEqDec pg bi t) as [v1 [v2 HHH]]. inversion e; subst; clear e. rewrite HHH. apply in_cons, in_eq. auto. repeat intro. hnf. auto. inversion H2. rewrite H3; apply IHn; simpl; clear IHn H3 result; auto. omega. rewrite Forall_forall in H1. rewrite Forall_forall. intros. apply H1. apply in_cons; auto. Qed. Lemma extractReach_nodup: forall w i, NoDup ((map (fetch w) (rch2 w i)) ++ (rch3 w i)) -> NoDup (extractReach w i). Proof. intros w i; remember (lengthInput w i); assert (lengthInput w i <= n) by omega; clear Heqn; revert H. revert i. induction n; intros; remember (extractReach w i) as result; rename Heqresult into H1; destruct i as [[len pr] rslt]; unfold rch3, lengthInput in *; simpl in *; rewrite extractReach_unfold in H1; destruct pr; simpl in H1. subst; auto. destruct (le_dec len (length rslt)). subst; apply NoDup_app_r in H0; auto. simpl in H; exfalso; omega. subst; auto. destruct (le_dec len (length rslt)). subst; apply NoDup_app_r in H0; auto. destruct s as [t Ht]. simpl in *. destruct (explode t w Ht). subst; apply IHn. simpl. repeat rewrite <- plus_n_O. apply le_trans with (len + len + length pr + length (twoSubTrees t w s) - S (length rslt + S (length rslt))). generalize (remove_list_len_le w rslt pr (removeTree w t (twoSubTrees t w s))); intro; generalize (remove_tree_len_le w t (twoSubTrees t w s)); intro; omega. destruct s as [leftT [rightT [? [? [? ?]]]]]; simpl; omega. simpl. generalize (NoDup_cons_1 _ _ _ H0); intro. apply NoDup_cons_2 in H0. rewrite NoDup_app_eq in H1. destruct H1 as [? [? ?]]. apply NoDup_app_inv. apply remove_list_no_dup. apply NoDup_cons. intro; apply H0. apply in_or_app. right; auto. auto. intros. destruct (remove_list_not_in w rslt pr (removeTree w t (twoSubTrees t w s))) as [la [? [? ?]]]. rewrite H5 in H4; clear H5. assert (Sublist (removeDup w (pr ++ la)) (pr ++ la)). apply remove_dup_sublist. apply Sublist_refl. apply map_sublist with (f := fetch w) in H5. specialize (H5 x H4). clear H4. rewrite map_app in H5. apply in_app_or in H5. intro. apply in_inv in H4. destruct H5, H4. subst. apply H0. apply in_or_app. left; auto. specialize (H3 x H5). apply H3; auto. subst. apply map_sublist with (f := fetch w) in H6. specialize (H6 x H5). generalize (remove_tree_not_in w x (twoSubTrees x w s)); intro. apply H4; auto. specialize (H7 x H4). auto. subst; apply IHn. omega. simpl. rewrite <- (app_nil_l ((map (fetch w) pr) ++ rslt)) in H0. rewrite app_comm_cons in H0. apply NoDup_app_r in H0. auto. Qed. Definition NotNone (w : world) (e : adr) : Prop := lookup_fpm w e <> None. Lemma extractReach_all_not_none: forall w i, Forall (NotNone w) (rch3 w i) -> Forall (NotNone w) (extractReach w i). Proof. intros w i; remember (lengthInput w i); assert (lengthInput w i <= n) by omega; clear Heqn; revert H. revert i. induction n; intros; remember (extractReach w i) as result; rename Heqresult into H3; destruct i as [[len pr] rslt]; unfold rch2, rch3, lengthInput in *; simpl in *; rewrite extractReach_unfold in H3; destruct pr; simpl in H3. subst; apply H0; auto. destruct (le_dec len (length rslt)). subst; auto. exfalso; omega. subst; auto. destruct (le_dec len (length rslt)). subst; auto. destruct s as [t Ht]. simpl in *. destruct (explode t w Ht). rewrite H3. apply IHn. repeat rewrite <- plus_n_O. simpl. apply le_trans with (len + len + length pr + length (twoSubTrees t w s) - S (length rslt + S (length rslt))). generalize (remove_list_len_le w rslt pr (removeTree w t (twoSubTrees t w s))); intro; generalize (remove_tree_len_le w t (twoSubTrees t w s)); intro; omega. destruct s as [leftT [rightT [? [? [? ?]]]]]; simpl; omega. constructor. clear H3. destruct s as [leftT [rightT [? [? [? ?]]]]]. generalize (valid_not_zero _ H2); intro. destruct_sepcon Ht w. rewrite graph_unfold in H7. destruct H7 as [[? ?] | [dd [ll [rr ?]]]]. exfalso; intuition. destruct_ocon H7 h. destruct_ocon H11 j. destruct H15. destruct_sepcon H17 k. destruct_sepcon H18 l. destruct H20 as [Hd [? [? ?]]]. apply lookup_fpm_join_sub with l1. try_join l2 k2 l2k2. try_join l2k2 j3 l2k2j3. try_join l2k2j3 h3 l2k2j3h3. try_join l2k2j3h3 w2 l2k2j3h3w2. exists l2k2j3h3w2. auto. rewrite H23. intro S; inversion S. auto. rewrite H3. apply IHn. omega. auto. Qed. Lemma extractReach_length_bound: forall w i, length (rch3 w i) <= rch1 w i -> length (extractReach w i) <= rch1 w i. Proof. intros w i; remember (lengthInput w i); assert (lengthInput w i <= n) by omega; clear Heqn; revert H. revert i. induction n; intros; remember (extractReach w i) as result; rename Heqresult into H3; destruct i as [[len pr] rslt]; unfold rch3, lengthInput in *; simpl in *; rewrite extractReach_unfold in H3; destruct pr; simpl in H3. subst; auto. simpl in H; exfalso; omega. subst; auto. destruct (le_dec len (length rslt)). subst; auto. destruct s as [t Ht]. simpl in *. destruct (explode t w Ht). specialize (IHn (len, appRemoveList w pr (removeTree w t (twoSubTrees t w s)) rslt, t :: rslt)). simpl in IHn. rewrite H3. apply IHn. repeat rewrite <- plus_n_O in *. apply le_trans with (len + len + length pr + length (twoSubTrees t w s) - S (length rslt + S (length rslt))). generalize (remove_list_len_le w rslt pr (removeTree w t (twoSubTrees t w s))); intro; generalize (remove_tree_len_le w t (twoSubTrees t w s)); intro; omega. destruct s as [leftT [rightT [? [? [? ?]]]]]; simpl; omega. omega. rewrite H3; apply IHn. omega. unfold rch1. omega. Qed. Lemma extractReach_sublist: forall w i, Sublist (rch3 w i) (extractReach w i). Proof. intros w i; remember (lengthInput w i); assert (lengthInput w i <= n) by omega; clear Heqn; revert H. revert i. induction n; intros; remember (extractReach w i) as result; rename Heqresult into H3; destruct i as [[len pr] rslt]; unfold rch3, lengthInput in *; simpl in *; rewrite extractReach_unfold in H3; destruct pr; simpl in H3. subst; apply Sublist_refl. destruct (le_dec len (length rslt)). subst. apply Sublist_refl. simpl in H; omega. subst; apply Sublist_refl. destruct (le_dec len (length rslt)). subst. apply Sublist_refl. destruct s as [t Ht]; simpl in *. destruct (explode t w Ht). specialize (IHn (len, appRemoveList w pr (removeTree w t (twoSubTrees t w s)) rslt, t :: rslt)). simpl in IHn. assert (Sublist (t :: rslt) (extractReach w (len, appRemoveList w pr (removeTree w t (twoSubTrees t w s)) rslt, t :: rslt))). apply IHn. repeat rewrite <- plus_n_O in *. apply le_trans with (len + len + length pr + length (twoSubTrees t w s) - S (length rslt + S (length rslt))). generalize (remove_list_len_le w rslt pr (removeTree w t (twoSubTrees t w s))); intro; generalize (remove_tree_len_le w t (twoSubTrees t w s)); intro; omega. destruct s as [leftT [rightT [? [? [? ?]]]]]; simpl; omega. rewrite <- H3 in H0. intro y; intros. apply (H0 y). apply in_cons; auto. specialize (IHn (len, pr, rslt)); simpl in IHn. rewrite H3. apply IHn. simpl in H; omega. Qed. Definition ProcessingInResult (l1 l2 : list adr) : Prop := forall x y, In x l1 -> reachable pg x y -> In y l2. Lemma PIR_cons: forall a l1 l2, (forall y, reachable pg a y -> In y l2) -> ProcessingInResult l1 l2 -> ProcessingInResult (a :: l1) l2. Proof. repeat intro; apply in_inv in H1; destruct H1. subst. apply H; auto. apply (H0 x); auto. Qed. Lemma PIR_sublist: forall l1 l2 l3, Sublist l1 l2 -> ProcessingInResult l2 l3 -> ProcessingInResult l1 l3. Proof. repeat intro. specialize (H x H1). apply (H0 x y); auto. Qed. Definition ResultInProcessing (l1 l2 : list adr) : Prop := forall x y, In x l1 -> edge pg x y -> In y l1 \/ In y l2. Lemma neg_eq_in_neq_nin: forall a (l : list adr) x, ~ (a = x \/ In x l) -> x <> a /\ ~ In x l. Proof. intros; split. destruct (eq_nat_dec x a); auto. destruct (in_dec eq_nat_dec x l); auto. Qed. Fixpoint findNotIn (l1 l2 l3: list adr) : (option adr * (list adr * list adr)) := match l1 with | nil => (None, (nil, nil)) | x :: l => if (in_dec eq_nat_dec x l2) then findNotIn l l2 (x :: l3) else (Some x, (rev l3, l)) end. Lemma find_not_in_none: forall l1 l2 l3, fst (findNotIn l1 l2 l3) = None -> Forall (fun m => In m l2) l1. Proof. induction l1; intros. apply Forall_nil. simpl in H. destruct (in_dec eq_nat_dec a l2). apply Forall_cons. auto. apply IHl1 with (a :: l3); auto. inversion H. Qed. Lemma find_not_in_some_explicit: forall l1 l2 l3 x li1 li2, findNotIn l1 l2 l3 = (Some x, (li1, li2)) -> (Forall (fun m => In m l2) l3) -> (~ In x li1) /\ (~ In x l2) /\ exists l4, li1 = rev l3 ++ l4 /\ Forall (fun m => In m l2) l4 /\ l1 = l4 ++ x :: li2. Proof. induction l1; intros; simpl in H. inversion H. destruct (in_dec eq_nat_dec a l2). assert (Forall (fun m : adr => In m l2) (a :: l3)) by (apply Forall_cons; auto). specialize (IHl1 l2 (a :: l3) x li1 li2 H H1). destruct IHl1 as [? [? [l4 [? [? ?]]]]]. split; auto. split; auto. exists (a :: l4). repeat split; auto. simpl in H4. rewrite <- app_assoc in H4. rewrite <- app_comm_cons in H4. rewrite app_nil_l in H4. auto. rewrite H6; apply app_comm_cons. inversion H. split. intro; apply n. rewrite Forall_forall in H0. apply (H0 a). rewrite H2. rewrite in_rev. auto. split. rewrite <- H2. auto. exists nil. repeat split; auto. rewrite app_nil_r. auto. Qed. Lemma find_not_in_some: forall l1 l2 x li1 li2, findNotIn l1 l2 nil = (Some x, (li1, li2)) -> Forall (fun m => In m l2) li1 /\ l1 = li1 ++ x :: li2 /\ ~ In x li1 /\ ~ In x l2. Proof. intros. assert (Forall (fun m : adr => In m l2) nil) by apply Forall_nil. destruct (find_not_in_some_explicit l1 l2 nil x li1 li2 H H0). destruct H2 as [? [l4 [? [? ?]]]]. simpl in H3. rewrite H3 in *. repeat split; auto. Qed. Lemma foot_none_nil: forall (l : list adr), foot l = None -> l = nil. Proof. induction l; intros; auto. simpl in H. destruct l. inversion H. specialize (IHl H). inversion IHl. Qed. Lemma reachable_by_path_split_dec: forall p a b P rslt, pg |= p is a ~o~> b satisfying P -> {Forall (fun m => In m (a :: rslt)) p} + {exists l1 l2 e1 s2, Forall (fun m => In m (a :: rslt)) l1 /\ pg |= l1 is a ~o~> e1 satisfying P /\ pg |= l2 is s2 ~o~> b satisfying P /\ edge pg e1 s2 /\ ~ In s2 (a::rslt) /\ p = l1 ++ l2 /\ ~ In s2 l1}. Proof. intros. remember (findNotIn p (a :: rslt) nil) as f. destruct f as [n [l1 l2]]. destruct n. right. apply eq_sym in Heqf. destruct (find_not_in_some _ _ _ _ _ Heqf) as [? [? [? ?]]]. exists l1, (a0 :: l2). rewrite Forall_forall in H0. destruct l1. rewrite app_nil_l in H1. generalize (reachable_by_path_head _ _ _ _ _ _ _ _ H); intro. rewrite H1 in *. simpl in H4. inversion H4. rewrite H6 in *. exfalso; apply H3; apply in_eq. generalize (reachable_by_path_head _ _ _ _ _ _ _ _ H); intro. rewrite <- app_comm_cons in H1. rewrite H1 in H4. simpl in H4. inversion H4. rewrite H6 in *. clear H4 H6 a1. remember (foot (a :: l1)). destruct o. exists n, a0. split. rewrite Forall_forall; auto. assert (paths_meet_at adr (a :: l1) (n :: a0 :: l2) n) by (repeat split; auto). assert (pg |= path_glue adr (a :: l1) (n :: a0 :: l2) is a ~o~> b satisfying P). unfold path_glue. simpl. rewrite <- H1. auto. destruct (reachable_by_path_split_glue _ _ _ _ _ _ _ _ _ _ H4 H5). clear H4 H5. split; auto. assert (paths_meet_at adr (n :: a0 :: nil) (a0 :: l2) a0) by repeat split. assert (pg |= path_glue adr (n :: a0 :: nil) (a0 :: l2) is n ~o~> b satisfying P). unfold path_glue. simpl. auto. destruct (reachable_by_path_split_glue _ _ _ _ _ _ _ _ _ _ H4 H5). clear H4 H5 H6 H7. split; auto. split. destruct H8. destruct H5. destruct H5. auto. split. auto. split; simpl; auto. apply eq_sym in Heqo. generalize (foot_none_nil (a :: l1) Heqo); intros. inversion H4. assert (fst (findNotIn p (a :: rslt) nil) = None) by (rewrite <- Heqf; simpl; auto). left. apply find_not_in_none with nil. auto. Qed. Lemma extractReach_contains_all: forall w i, ResultInProcessing (rch3 w i) (map (fetch w) (rch2 w i)) -> length (extractReach w i) < rch1 w i -> ProcessingInResult (map (fetch w) (rch2 w i)) (extractReach w i). Proof. intros w i; remember (lengthInput w i); assert (lengthInput w i <= n) by omega; clear Heqn; revert H. revert i. induction n; intros; remember (extractReach w i) as result; rename Heqresult into H3; destruct i as [[len pr] rslt]; unfold rch1, rch2, rch3, lengthInput in *; simpl in *; rewrite extractReach_unfold in H3; destruct pr; simpl in *. subst; omega. destruct (le_dec len (length rslt)). subst; omega. omega. repeat intro; inversion H2. destruct (le_dec len (length rslt)). subst. omega. destruct s as [t Ht]. simpl in *. destruct (explode t w Ht). assert (ProcessingInResult (map (fetch w) (appRemoveList w pr (removeTree w t (twoSubTrees t w s)) rslt)) result). specialize (IHn (len, appRemoveList w pr (removeTree w t (twoSubTrees t w s)) rslt, t :: rslt)). simpl in IHn. rewrite H3; apply IHn; clear IHn. apply le_trans with (len + len + length pr + length (twoSubTrees t w s) - S (length rslt + S (length rslt))). generalize (remove_list_len_le w rslt pr (removeTree w t (twoSubTrees t w s))); intro; generalize (remove_tree_len_le w t (twoSubTrees t w s)); intro; omega. destruct s as [leftT [rightT [? [? [? ?]]]]]; simpl; omega. repeat intro; destruct (in_dec eq_nat_dec y (t :: rslt)). left; auto. right. remember (twoSubTrees t w s) as subT. apply neg_eq_in_neq_nin in n1. destruct n1. apply in_inv in H2. destruct H2. rewrite <- H2 in *. clear H2. destruct H4 as [? [? ?]]. destruct s as [lT [rT [? [? [? ?]]]]]. simpl in *. unfold biEdge in e. destruct (@only_two_neighbours nat nat natEqDec pg bi t) as [v1 [v2 HHH]]. inversion e. rewrite H9 in *. rewrite H10 in *. clear H9 H10 v1 v2. rewrite HHH in H7. assert (In y (map (fetch w) subT)) by (rewrite HeqsubT; simpl in *; apply H7). apply remove_list_in; auto. specialize (H0 x y H2 H4). destruct H0. exfalso; apply H6; auto. apply in_inv in H0. destruct H0. exfalso. apply H5; auto. apply remove_list_in_2. auto. rewrite H3 in H1; auto. apply PIR_cons. intros. unfold reachable in H4. destruct H4 as [p ?]. destruct (reachable_by_path_split_dec _ _ _ _ rslt H4). rewrite Forall_forall in f. apply reachable_by_path_foot in H4. apply foot_in in H4. specialize (f _ H4). rewrite H3. apply (extractReach_sublist _ _ y). simpl. simpl in f. auto. destruct e as [l1 [l2 [e1 [s1 [? [? [? [? [? [? ?]]]]]]]]]]. rewrite Forall_forall in H5. destruct (eq_nat_dec e1 t). rewrite e in *; clear e e1. destruct H8 as [? [? ?]]. remember (twoSubTrees t w s) as subT. destruct s as [lT [rT [? [? [? ?]]]]]. simpl in *. unfold biEdge in e. destruct (@only_two_neighbours nat nat natEqDec pg bi t) as [v1 [v2 HHH]]. inversion e. rewrite H15 in *. rewrite H16 in *. clear H15 H16 v1 v2. rewrite HHH in H13. assert (In s1 (map (fetch w) subT)). rewrite HeqsubT. simpl map. auto. apply (H2 s1 y). apply neg_eq_in_neq_nin in H9. destruct H9. apply remove_list_in; auto. exists l2. auto. apply reachable_by_path_foot in H6. apply foot_in in H6. specialize (H5 e1 H6). apply in_inv in H5. destruct H5. exfalso; auto. specialize (H0 e1 s1 H5 H8). simpl in H9. apply neg_eq_in_neq_nin in H9; destruct H9. destruct H0. exfalso; auto. apply in_inv in H0. destruct H0. exfalso; auto. apply (H2 s1 y). apply remove_list_in_2. auto. exists l2. auto. apply PIR_sublist with (map (fetch w) (appRemoveList w pr (removeTree w t (twoSubTrees t w s)) rslt)); auto. intro y; intros. apply remove_list_in_2; auto. apply PIR_cons. intros. apply reachable_is_valid in H2. apply valid_not_zero in H2. omega. rewrite H3. specialize (IHn (len, pr, rslt)). simpl in IHn. apply IHn. omega. repeat intro. specialize (H0 x y H2 H4). destruct H0; [left | right]. auto. apply in_inv in H0. destruct H0. subst. destruct H4 as [? [? ?]]. apply valid_not_zero in H3. omega. auto. rewrite H3 in H1. auto. Qed. End ConstructReachable. Lemma graph_reachable_finite: forall x w, graph x w -> exists (l : list adr), NoDup l /\ forall y, (In y l -> reachable pg x y) /\ (~ In y l -> ~ reachable pg x y). Proof. intros. hnf. destruct (world_finite w) as [l ?]. generalize (core_unit w); intros. unfold unit_for in H1. apply join_comm in H1. assert ((graph x * TT)%pred w) by (exists w, (core w); repeat split; auto). remember (exist (graph_sig_fun w) x H2) as g. remember (length l, (g::nil), nil : list adr) as s. assert (Forall (reachable pg x) (extractReach w s)). apply extractReach_reachable. unfold rch3; rewrite Heqs; simpl; apply Forall_nil. unfold rch2; rewrite Heqs; rewrite Heqg; simpl. apply Forall_forall; intros. apply in_inv in H3; destruct H3. subst. assert (graph x0 w) by auto. rewrite graph_unfold in H. destruct H as [[? ?] | [dd [ll [rr ?]]]]. hnf in H; right; auto. left. apply reachable_by_reflexive. split. destruct_ocon H h. destruct_ocon H6 j. destruct H10 as [[? ?] ?]. apply H12. hnf; auto. inversion H3. exists (extractReach w s). assert (NoDup (extractReach w s)) as Hn. apply extractReach_nodup. rewrite Heqs. simpl. apply NoDup_cons. auto. apply NoDup_nil. split. auto. intros y; split; intros. rewrite Forall_forall in H3. apply H3. auto. intro. assert (Sublist (extractReach w s) l) as Hs. assert (Forall (NotNone w) (extractReach w s)). apply extractReach_all_not_none; rewrite Heqs; simpl; apply Forall_nil. rewrite Forall_forall in H6. unfold NotNone in H6. intro z; intros. rewrite H0. apply H6. auto. assert (In y l) as Hy. generalize (graph_reachable_in _ _ H5). intros. specialize (H6 w H). hnf in H6. destruct H6 as [b ?]. destruct_sepcon H6 h. destruct H7 as [? [? ?]]. assert (lookup_fpm w y <> None). apply lookup_fpm_join_sub with h1. exists h2; auto. rewrite H10. intro S; inversion S. rewrite <- H0 in H11. auto. apply H4. generalize (extractReach_length_bound w s); intro. rewrite Heqs in H6 at 1 2; simpl in H6. assert (0 <= length l) as S by omega; specialize (H6 S); clear S. apply le_lt_eq_dec in H6. destruct H6. assert (ResultInProcessing (rch3 w s) (map (fetch w) (rch2 w s))). rewrite Heqs; simpl. repeat intro. inversion H6. generalize (extractReach_contains_all w s H6 l0); intro. rewrite Heqs in H7 at 1. rewrite Heqg in H7. simpl in H7. apply (H7 x y); auto. apply in_eq. rewrite Heqs in e at 2; simpl in e. apply (sublist_reverse eq_nat_dec (extractReach w s) l Hn e Hs y Hy). Qed. End PointwiseGraph.
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj17synthconj3 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus lv0 (mult lv1 lv2)) (plus lv0 (mult lv1 lv2))). Admitted. QuickChick conj17synthconj3.
#ifndef __PROXIMALS__ #define __PROXIMALS__ #include <Eigen/Dense> #include <limits> namespace proxopp { class proxOperator { public: proxOperator() {} virtual ~proxOperator() {} virtual float f(Eigen::VectorXf& x) { // f(x) return 0.0f; } virtual Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda) { // prox_{\lambda f}(x) return x; } virtual Eigen::VectorXf operator()(Eigen::VectorXf& x) { (*this)(x, 1.0); } virtual Eigen::VectorXf prox(Eigen::VectorXf& x, float lambda) { return (*this)(x, lambda); } }; // The proximal operator of the L1 norm // soft_thresholding(x,lambda) = sign(x)*max(|x| - lambda, 0) (element-wise) class softThresholdingOperator : public proxOperator { public: float f(Eigen::VectorXf& x) override { return x.lpNorm<1>(); } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda) override { return x.array().sign()*((x.array().abs()-lambda).max(Eigen::ArrayXf::Zero(x.rows()))); } }; // The proximal operator of the (convex) indicator // function of the set {x | Ax = b} is // x - At(AtA)^(-1)(Ax - b) class proxLinearEquality : public proxOperator { public: proxLinearEquality(Eigen::MatrixXf& A, Eigen::VectorXf& b) : _A(A), _b(b) { _Q = A.transpose()*((A*A.transpose()).inverse()); _Qb = _Q*_b; _QA = _Q*_A; } float f(Eigen::VectorXf& x) override { if((_A*x - _b).norm() < tol) { return 0; } else { return std::numeric_limits<float>::max(); } } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda) override { return (x - _QA*x + _Qb); } private: Eigen::MatrixXf _A; Eigen::MatrixXf _b; Eigen::MatrixXf _Q; Eigen::MatrixXf _QA; Eigen::VectorXf _Qb; const double tol = 1e-6; }; // Proximal operator of the squared L2 norm class proximalL2Square : public proxOperator { public: float f(Eigen::VectorXf& x) override { return x.squaredNorm(); } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda) override { return x/(1.0f+lambda); } }; // Proximal operator of euclidean norm // Block thresholding class proximalL2 : public proxOperator { public: float f(Eigen::VectorXf& x) { return x.lpNorm<2>(); } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda) { const float norm2 = x.lpNorm<2>(); if(lambda > norm2) return Eigen::VectorXf::Zero(x.rows()); return x*(1 - lambda / norm2); } }; // PROJECTION ON BALLS // Projector onto L2 ball class proximalL2Ball : public proxOperator { public: proximalL2Ball(float lambda) : radius(lambda) {} float f(Eigen::VectorXf& x) { return x.lpNorm<2>() <= radius ? 0 : std::numeric_limits<float>::max(); } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda = 0.0) { float norm2 = x.lpNorm<2>(); if(norm2 == 0.0f) return Eigen::VectorXf::Zero(x.rows()); return x*radius/x.lpNorm<2>(); } private: float radius; }; // Fast Projection onto the L1 Ball using sorting // Using algorithm from // Held, M., Wolfe, P., Crowder, H.: Validation of subgradient optimization. // Mathematical Programming6, 62–88 (1974) // Alternatively, see Condat, L. // Fast Projection onto the Simplex and the L1-Ball // https://www.gipsa-lab.grenoble-inp.fr/~laurent.condat/publis/Condat_simplexproj.pdf // It corresponds to Algorithm 1 class proximalL1Ball : public proxOperator { public: proximalL1Ball(float lambda) : radius(lambda) {} float f(Eigen::VectorXf& x) { return (x.lpNorm<1>() < radius) ? 0 : std::numeric_limits<float>::max(); } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda = 0.0f) { if(x.lpNorm<1>() < radius) return x; Eigen::ArrayXf u = Eigen::ArrayXf(x.array().abs()); std::sort(u.data(), u.data()+u.size(), std::greater<float>()); int k=1, K=1; float tau = 0.0f; for(k=1;k<=x.rows();k++) { float mean = 0.0f; for(int i = 0; i < k; i++) { mean += u[i]; } mean = (mean-radius)/k; if(mean < u[k-1]) { K = k-1; tau = mean; } } std::cout << tau << std::endl; Eigen::VectorXf xtau = x.array().abs()-tau*Eigen::ArrayXf::Ones(x.rows()); return x.array().sign()*(xtau).array().max(Eigen::VectorXf::Zero(x.rows()).array()); } private: float radius; }; // Proximal operator of the indicator function of // the set {x | max abs(x_i) <= lambda} (Linf ball) class proximalLinfBall : public proxOperator { public: proximalLinfBall(float lambda) : radius(lambda) {} float f(Eigen::VectorXf& x) override { return (x.lpNorm<Eigen::Infinity>() < radius) ? 0 : std::numeric_limits<float>::max(); } Eigen::VectorXf operator()(Eigen::VectorXf& x, float lambda = 0.0f) override { for(int i = 0; i < x.rows(); i++) { if(x[i] > radius) x[i] = radius; if(x[i] < -radius) x[i] = -radius; } } private: float radius; }; // Projection on the intersection of L1 and Linf balls // with binary search class proximalL1LinfBall : public proxOperator { public: proximalL1LinfBall(float radius_l1, float radius_linf) : r_l1(radius_l1), r_linf(radius_linf) {} float f(Eigen::VectorXf& x) { if(x.lpNorm<1>()<r_l1 && x.lpNorm<Eigen::Infinity>() < r_linf) return 0.0f; return std::numeric_limits<float>::max(); } Eigen::VectorXf operator()(Eigen::VectorXf& x) { int n = x.rows(); proximalLinfBall projLinf(r_linf); Eigen::VectorXf y = projLinf(x); if(y.lpNorm<1>() < r_l1) return y; const double eps = 1e-6; const int itermax = 50; int k = 0; float nu_l = 0.0f; float nu_r = x.array().abs().maxCoeff(); Eigen::ArrayXf z1,z2; while((k < itermax) && (nu_l - nu_r > eps)) { float nu_m = 0.5*(nu_l+nu_r); z1 = y.array().abs()-nu_m*Eigen::ArrayXf::Ones(n); z1 = z1.max(Eigen::ArrayXf::Zero(n)); z2 = z1.min(r_linf*Eigen::ArrayXf::Ones(n)); if(z2.lpNorm<1>() < r_l1) { nu_r = nu_m; } else { nu_l = nu_m; } k++; } return z2*x.array().sign(); } private: float r_l1; // radius of L1 ball float r_linf; // radius of Linf ball (max coeff of abs values) }; } // namespace proxopp #endif
module _ where module A where infix 2 _↑ infix 1 _↓_ data D : Set where ● : D _↑ : D → D _↓_ : D → D → D module B where data D : Set where _↓_ : D → D → D open A open B rejected : A.D rejected = ● ↑ ↓ ●
(*************************************************************************** * Correctness of the CPS-transformation - Definitions * * Arthur Chargueraud, January 2009 * ***************************************************************************) Set Implicit Arguments. Require Export LibLN LibLogic LibFix. Implicit Types x y z : var. (* ********************************************************************** *) (* ********************************************************************** *) (** * Syntax of lambda-terms with constants *) (* ********************************************************************** *) (** Grammar of terms *) Inductive trm : Set := | trm_bvar : nat -> trm | trm_fvar : var -> trm | trm_cst : nat -> trm | trm_app : trm -> trm -> trm | trm_abs : trm -> trm. Instance trm_inhab : Inhab trm. Proof. intros. apply (Inhab_of_val (trm_bvar 0)). Qed. (* ********************************************************************** *) (** Opening of terms *) Fixpoint open_rec (k : nat) (u : trm) (t : trm) {struct t} : trm := match t with | trm_bvar i => If k = i then u else (trm_bvar i) | trm_fvar x => t | trm_cst k => t | trm_app t1 t2 => trm_app (open_rec k u t1) (open_rec k u t2) | trm_abs t1 => trm_abs (open_rec (S k) u t1) end. Definition open t u := open_rec 0 u t. Notation "{ k ~> u } t" := (open_rec k u t) (at level 67). Notation "t ^^ u" := (open t u) (at level 67). Notation "t ^ x" := (open t (trm_fvar x)). (* ********************************************************************** *) (** Closing of term *) Fixpoint close_var_rec (k : nat) (z : var) (t : trm) {struct t} : trm := match t with | trm_bvar i => trm_bvar i | trm_fvar x => If x = z then (trm_bvar k) else t | trm_cst k => t | trm_app t1 t2 => trm_app (close_var_rec k z t1) (close_var_rec k z t2) | trm_abs t1 => trm_abs (close_var_rec (S k) z t1) end. Definition close_var z t := close_var_rec 0 z t. (* ********************************************************************** *) (** Local closure of terms *) Inductive term : trm -> Prop := | term_var : forall x, term (trm_fvar x) | term_cst : forall k, term (trm_cst k) | term_app : forall t1 t2, term t1 -> term t2 -> term (trm_app t1 t2) | term_abs : forall L t1, (forall x, x \notin L -> term (t1 ^ x)) -> term (trm_abs t1). (* ********************************************************************** *) (** Body of an abstraction *) Definition body t := exists L, forall x, x \notin L -> term (t ^ x). (* ********************************************************************** *) (** Free variables of a term *) Fixpoint fv (t : trm) {struct t} : vars := match t with | trm_bvar i => \{} | trm_fvar x => \{x} | trm_cst k => \{} | trm_app t1 t2 => (fv t1) \u (fv t2) | trm_abs t1 => (fv t1) end. (* ********************************************************************** *) (** Substitution for a name *) Fixpoint subst (z : var) (u : trm) (t : trm) {struct t} : trm := match t with | trm_bvar i => t | trm_fvar x => If x = z then u else (trm_fvar x) | trm_cst k => t | trm_app t1 t2 => trm_app (subst z u t1) (subst z u t2) | trm_abs t1 => trm_abs (subst z u t1) end. Notation "[ z ~> u ] t" := (subst z u t) (at level 68). (* ********************************************************************** *) (* ********************************************************************** *) (** * Semantics *) (* ********************************************************************** *) (** Values *) Inductive value : trm -> Prop := | value_cst : forall k, value (trm_cst k) | value_abs : forall t1, term (trm_abs t1) -> value (trm_abs t1). (* ********************************************************************** *) (** Big-step reduction relation *) Inductive eval : trm -> trm -> Prop := | eval_val : forall t1, value t1 -> eval t1 t1 | eval_red : forall v2 t3 v3 t1 t2, eval t1 (trm_abs t3) -> eval t2 v2 -> eval (t3 ^^ v2) v3 -> eval (trm_app t1 t2) v3. (* ********************************************************************** *) (* ********************************************************************** *) (** * Transformation *) (* ********************************************************************** *) (** CPS transformation of terms *) Definition Cps (cps : trm -> trm) (t : trm) : trm := match t with | trm_bvar i => arbitrary | trm_fvar x => trm_abs (trm_app (trm_bvar 0) t) | trm_cst k => trm_abs (trm_app (trm_bvar 0) t) | trm_abs t1 => let x := var_gen (fv t1) in let t1' := close_var x (cps (t1 ^ x)) in trm_abs (trm_app (trm_bvar 0) (trm_abs t1')) | trm_app t1 t2 => let k := trm_abs (trm_app (trm_app (trm_bvar 1) (trm_bvar 0)) (trm_bvar 2)) in trm_abs (trm_app (cps t1) (trm_abs (trm_app (cps t2) k))) end. Definition cps := FixFun Cps. (* ********************************************************************** *) (** CPS transformation of values *) Definition cps_abs_body t1 := let x := var_gen (fv t1) in close_var x (cps (t1 ^ x)). Definition cpsval (t:trm) : trm := match t with | trm_cst k => t | trm_abs t1 => trm_abs (cps_abs_body t1) | _ => arbitrary end. (* ********************************************************************** *) (** Correctness of the CPS translation *) Definition trm_id := trm_abs (trm_bvar 0). Definition cps_correctness_statement := forall v t, eval t v -> value v -> eval (trm_app (cps t) trm_id) (cpsval v).
[STATEMENT] lemma compl: "\<forall>x \<in> carrier . \<forall>y\<in> carrier . x \<succeq> y \<or> y \<succeq> x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>x\<in>carrier. \<forall>y\<in>carrier. x \<succeq> y \<or> y \<succeq> x [PROOF STEP] by (metis refl_onD reflexivity total total_on_def)
State Before: G : Type ?u.3529 S : Type u_1 inst✝ : Semigroup S a✝ b c : S hbc : Commute b c a d : S ⊢ a * b * (c * d) = a * c * (b * d) State After: no goals Tactic: simp only [hbc.left_comm, mul_assoc]
import make_ring import tactic.auto_cases open lean.parser (ident) open interactive (parse types.with_ident_list) namespace freealg /---------------------------------------------------------------------------------- Builds a 'free boolean algebra' whose elements are commutative sums of squarefree monomials in n indeterminates X₀, X₁, ... with coefficients mod 2. These elements are encoded internally as boolean vectors, via a map under which addition is 'xor' and multiplication is 'and', both coordinate-wise. ------------------------------------------------------------------------------------/ variables {α : Type*}[boolean_algebra α] def freealg : nat → Type -- base case is bool | 0 := bool -- inductive case is the coefficient on X_n and the coefficient on X_nᶜ -- where the value of the term is CX_n + DX_nᶜ -- this determines a region in the Venn diagram that represents the free boolean algebra | (n+1) := (freealg n) × (freealg n) section operations def zero : forall {n : nat}, (freealg n) | 0 := ff | (n+1) := (zero, zero) def one : forall {n : nat}, (freealg n) | 0 := tt | (n+1) := (one, one) def var : forall {n : nat} (i : nat), (i < n) → (freealg n) | 0 i Hi := false.elim (nat.not_lt_zero i Hi) | (n+1) 0 Hi := (one, zero) | (n+1) (i+1) Hi := let coeff : freealg n := var i (nat.lt_of_succ_lt_succ Hi) in (coeff, coeff) def symmdiff : forall {n : nat}, (freealg n) → (freealg n) → (freealg n) | 0 a b := bxor a b | (n+1) a b := (symmdiff a.1 b.1, symmdiff a.2 b.2) def inf : forall {n : nat}, (freealg n) → (freealg n) → (freealg n) | 0 a b := band a b | (n+1) a b := (inf a.1 b.1, inf a.2 b.2) def sup : forall {n : nat}, (freealg n) → (freealg n) → (freealg n) | 0 a b := bor a b | (n + 1) a b := (sup a.1 b.1, sup a.2 b.2) def compl : forall {n : nat}, (freealg n) → (freealg n) | _ a := (symmdiff one a) def sdiff : forall {n : nat}, (freealg n) → (freealg n) → (freealg n) | _ a b := (inf a (compl b)) def map : forall {n : nat} (V : vector α n), (freealg n) → α | 0 V ff := 0 | 0 V tt := 1 | (n+1) V a := (map V.tail a.1) * V.head + (map V.tail a.2) * (V.head + 1) end operations section conversion_lemmas -- Our proofs rely on having ring around -- TODO: do we need to have ring around (can we get rid of some notation) -- having ring makes it nicer to work with symmdiff/inf so we define sup in terms of symmdiff and inf. lemma sup_to_symmdiff_and_inf {n : nat} (a b : (freealg n)) : sup a b = (symmdiff (symmdiff a b) (inf a b)) := begin induction n, { unfold sup symmdiff inf, cases a; cases b; refl }, { unfold sup symmdiff inf, rewrite (n_ih a.1 b.1), rewrite (n_ih a.2 b.2), } end -- These are basic lemmas about how map transforms freealg equations into -- equations in the incoming boolean algebra lemma on_zero : forall {n : nat} (V : vector α n), 0 = map V zero | 0 V := rfl | (n+1) V := calc 0 = 0 * V.head + 0 * (V.head + 1) : by ring ... = (map V.tail zero) * V.head + (map V.tail zero) * (V.head + 1) : by rw on_zero lemma on_one : forall {n : nat} (V : vector α n), 1 = map V one | 0 V := rfl | (n+1) V := calc 1 = V.head + (V.head + 1) : (add_self_left _ _).symm ... = 1 * V.head + 1 * (V.head + 1) : by ring ... = (map V.tail one) * V.head + (map V.tail one) * (V.head + 1) : by rw on_one lemma on_var : forall {n : nat} (V : vector α n) (i : nat) (Hi : i < n), V.nth ⟨i, Hi⟩ = map V (var i Hi) | 0 V i Hi := false.elim (nat.not_lt_zero i Hi) | (n+1) V 0 Hi := calc V.nth ⟨0, Hi⟩ = V.head : by simp ... = 1 * V.head + 0 * (V.head + 1) : by ring ... = (map V.tail one) * V.head + (map V.tail zero) * (V.head + 1) : by rw [on_zero, on_one] | (n+1) V (i+1) Hi := let Hip : (i < n) := nat.lt_of_succ_lt_succ Hi, tail_var := map V.tail (var i Hip) in calc V.nth ⟨i + 1, Hi⟩ = V.tail.nth ⟨i, Hip⟩ : by rw [vector.nth_tail, fin.succ.equations._eqn_1] ... = tail_var : on_var _ _ _ ... = _ : (add_self_left (tail_var * V.head) _).symm ... = tail_var * V.head + tail_var * (V.head + 1) : by ring -- These two lemmas are useful when working with ring equations. lemma on_add : forall {n : nat} (V : vector α n) (a b : freealg n), (map V a) + (map V b) = map V (symmdiff a b) | 0 V a b := begin cases a; cases b; unfold map symmdiff bxor; ring, exact two_eq_zero, end | (n+1) V a b := begin unfold map symmdiff, rw [←on_add V.tail a.1 b.1, ←on_add V.tail a.2 b.2], ring, end lemma on_mul : forall {n : nat} (V : vector α n) (a b : freealg n), (map V a) * (map V b) = map V (inf a b) | 0 V a b := by cases a; cases b; unfold map inf band; ring | (n+1) V a b := begin unfold map inf, rw [←on_mul V.tail a.1 b.1, ←on_mul V.tail a.2 b.2,←expand_product], end -- These four lemmas are useful when ⊔ and ⊓, ᶜ, and \ are in the mix lemma on_inf : forall {n : nat} (V : vector α n) (a b : freealg n), (map V a) ⊓ (map V b) = map V (inf a b) := begin intros, change (map V a) * (map V b) = map V (inf a b), apply on_mul, end lemma on_sup : forall {n : nat} (V : vector α n) (a b : freealg n), (map V a) ⊔ (map V b) = map V (sup a b) := begin intros, rewrite sup_to_ring, rewrite sup_to_symmdiff_and_inf, rewrite <- on_add, rewrite <- on_add, rewrite <- on_mul, end lemma on_compl : forall {n : nat} (V : vector α n) (a : freealg n), (map V a)ᶜ = (map V (compl a)) := begin intros, rewrite compl_to_ring, unfold compl, rewrite <- on_add, rewrite <- on_one, ring, end lemma on_sdiff : forall {n : nat} (V : vector α n) (a b : freealg n), (map V a) \ (map V b) = (map V (sdiff a b)) := begin intros, rewrite diff_to_ring, unfold sdiff, rewrite <- on_mul, unfold compl, rewrite <- on_add, rewrite <- on_one, ring, end end conversion_lemmas section boolean_algebra_instance -- discharging (decidable true) and (decidable false) as that's easy meta def construct_decidable : tactic unit := tactic.focus1 $ do target <- tactic.target, match target with | `(decidable true) := `[apply is_true, trivial] | `(decidable false) := `[apply is_false, trivial] | _ := tactic.skip end -- ≤ def le : forall {n : nat}, (freealg n) → (freealg n) → Prop | 0 ff ff := true | 0 ff tt := true | 0 tt ff := false | 0 tt tt := true | (n+1) a b := le a.1 b.1 ∧ le a.2 b.2 -- < def lt {n : nat} (a b : freealg n) := le a b ∧ ¬ le b a -- So tauto can work instance le_decidable {n : nat} (a b : freealg n) : (decidable (le a b)) := begin -- discharge the base case induction n; cases a; cases b; unfold le; construct_decidable, -- discharge the inductive case simp, letI := (n_ih a_fst b_fst), letI := (n_ih a_snd b_snd), exact and.decidable, end meta def every_freealg_proof : tactic unit := do tactic.try $ `[unfold_projs], tactic.intros, nexpr <- tactic.find_assumption `(nat), tactic.induction nexpr, tactic.all_goals $ (tactic.focus1 $ do tactic.repeat $ (do freealg_expr <- tactic.to_expr ``(freealg _), freealg_var <- tactic.find_assumption freealg_expr, tactic.cases_core freealg_var, tactic.skip), ctx <- tactic.local_context, tactic.all_goals $ tactic.try $ `[simp only [prod.mk.inj_iff, le, lt, sup, inf, symmdiff, sdiff, compl] at *], tactic.all_goals $ tactic.try $ `[tauto]), tactic.skip instance freealg_as_boolalg (n : nat) : (boolean_algebra (freealg n)) := { sup := sup, le := le, lt := lt, le_refl := by every_freealg_proof, le_trans := by every_freealg_proof, lt_iff_le_not_le := by every_freealg_proof, le_antisymm := by every_freealg_proof, le_sup_left := by every_freealg_proof, le_sup_right := by every_freealg_proof, sup_le := by every_freealg_proof, inf := inf, inf_le_left := by every_freealg_proof, inf_le_right := by every_freealg_proof, le_inf := by every_freealg_proof, le_sup_inf := by every_freealg_proof, top := one, le_top := by every_freealg_proof, bot := zero, bot_le := by every_freealg_proof, compl := compl, sdiff := sdiff, inf_compl_le_bot := by every_freealg_proof, top_le_sup_compl := by every_freealg_proof, sdiff_eq := by every_freealg_proof, } end boolean_algebra_instance end /-namespace-/ freealg
// SPDX-License-Identifier: Apache-2.0 // Copyright 2020 - 2022 Pionix GmbH and Contributors to EVerest #include <ostream> #include <string> #include <boost/optional/optional.hpp> #include <nlohmann/json.hpp> #include <ocpp1_6/messages/GetConfiguration.hpp> #include <ocpp1_6/ocpp_types.hpp> #include <ocpp1_6/types.hpp> using json = nlohmann::json; namespace ocpp1_6 { std::string GetConfigurationRequest::get_type() const { return "GetConfiguration"; } void to_json(json& j, const GetConfigurationRequest& k) { // the required parts of the message j = json({}); // the optional parts of the message if (k.key) { if (j.size() == 0) { j = json{{"key", json::array()}}; } else { j["key"] = json::array(); } for (auto val : k.key.value()) { j["key"].push_back(val); } } } void from_json(const json& j, GetConfigurationRequest& k) { // the required parts of the message // the optional parts of the message if (j.contains("key")) { json arr = j.at("key"); std::vector<CiString50Type> vec; for (auto val : arr) { vec.push_back(val); } k.key.emplace(vec); } } /// \brief Writes the string representation of the given GetConfigurationRequest \p k to the given output stream \p os /// \returns an output stream with the GetConfigurationRequest written to std::ostream& operator<<(std::ostream& os, const GetConfigurationRequest& k) { os << json(k).dump(4); return os; } std::string GetConfigurationResponse::get_type() const { return "GetConfigurationResponse"; } void to_json(json& j, const GetConfigurationResponse& k) { // the required parts of the message j = json({}); // the optional parts of the message if (k.configurationKey) { if (j.size() == 0) { j = json{{"configurationKey", json::array()}}; } else { j["configurationKey"] = json::array(); } for (auto val : k.configurationKey.value()) { j["configurationKey"].push_back(val); } } if (k.unknownKey) { if (j.size() == 0) { j = json{{"unknownKey", json::array()}}; } else { j["unknownKey"] = json::array(); } for (auto val : k.unknownKey.value()) { j["unknownKey"].push_back(val); } } } void from_json(const json& j, GetConfigurationResponse& k) { // the required parts of the message // the optional parts of the message if (j.contains("configurationKey")) { json arr = j.at("configurationKey"); std::vector<KeyValue> vec; for (auto val : arr) { vec.push_back(val); } k.configurationKey.emplace(vec); } if (j.contains("unknownKey")) { json arr = j.at("unknownKey"); std::vector<CiString50Type> vec; for (auto val : arr) { vec.push_back(val); } k.unknownKey.emplace(vec); } } /// \brief Writes the string representation of the given GetConfigurationResponse \p k to the given output stream \p os /// \returns an output stream with the GetConfigurationResponse written to std::ostream& operator<<(std::ostream& os, const GetConfigurationResponse& k) { os << json(k).dump(4); return os; } } // namespace ocpp1_6
-- In a mutual block, either all or none must have a MEASURE declaration. module _ where open import Common.Prelude mutual {-# MEASURE n #-} f : (n : Nat) → Nat f zero = zero f (suc n) = g n {-# MEASURE n #-} g : (n : Nat) → Nat g zero = zero g (suc n) = suc (f n)
/*============================================================================= Copyright (c) 2001-2007 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #include <iostream> #include <vector> #include <algorithm> #include <boost/detail/lightweight_test.hpp> #include <boost/spirit/include/phoenix_statement.hpp> #include <boost/spirit/include/phoenix_operator.hpp> #include <boost/spirit/include/phoenix_core.hpp> using namespace boost::phoenix; using namespace boost::phoenix::arg_names; using namespace std; int main() { int init[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; vector<int> v(init, init+10); for_each(v.begin(), v.end(), switch_(_1) [ // wierd case, why not just use if(...), but valid, nonetheless case_<4>(cout << val("<4>") << endl) ] ); cout << endl; for_each(v.begin(), v.end(), switch_(_1) [ // wierd case, but valid, nonetheless default_(cout << val("<any...>") << endl) ] ); cout << endl; for_each(v.begin(), v.end(), switch_(_1) [ case_<1>(cout << val("<1>") << endl), case_<2>(cout << val("<2>") << endl), case_<3>(cout << val("<3>") << endl), case_<4>(cout << val("<4>") << endl) ] ); cout << endl; for_each(v.begin(), v.end(), switch_(_1) [ case_<1>(cout << val("<1>") << endl), case_<2>(cout << val("<2>") << endl), case_<3>(cout << val("<3>") << endl), case_<4>(cout << val("<4>") << endl), default_(cout << val("<over 4>") << endl) ] ); return boost::report_errors(); }
In 2012 Caroline Jaine’s peice “5 Migrants” was shortlisted for the Searle Award – it has now been published by Askance as “Getting There” and is available in full colour hard-back or a soft-back with a black & white interior. This work was drawn from five interviews with migrants living in the UK. It transcribes their words into a 184 page book, presenting the five journeys in the form of a puzzle. Jaine said, “I wanted the reader/viewer to be able to make choices, play with their assumptions, and reflect the choices made by the story-tellers”. The front cover image is of Jaine’s grandfather writing home from India. Other images used have been donated by the subjects, or are Jaine’s own photographs and paintings. Sorry, currently out of print. PAYMENT: Buying directly from Askance is done via PayPal. You do not need a PayPal account, you can use your credit or debit card as you would with any other online purchase.
[STATEMENT] lemma type_update_select_length2: assumes "type_update_select (TopType cts) = tm" "length cts = 2" "tm \<noteq> Bot" shows "\<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1]" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] obtain x y where cts_def:"cts = [x,y]" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>x y. cts = [x, y] \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] using assms(2) List.length_Suc_conv[of cts "Suc 0"] [PROOF STATE] proof (prove) using this: length cts = 2 (length cts = Suc (Suc 0)) = (\<exists>y ys. cts = y # ys \<and> length ys = Suc 0) goal (1 subgoal): 1. (\<And>x y. cts = [x, y] \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (metis length_0_conv length_Suc_conv numeral_2_eq_2) [PROOF STATE] proof (state) this: cts = [x, y] goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] moreover [PROOF STATE] proof (state) this: cts = [x, y] goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] hence "consume (TopType [x,y]) [TSome T_i32] = tm" [PROOF STATE] proof (prove) using this: cts = [x, y] goal (1 subgoal): 1. consume (TopType [x, y]) [TSome T_i32] = tm [PROOF STEP] using assms(1,2) [PROOF STATE] proof (prove) using this: cts = [x, y] type_update_select (TopType cts) = tm length cts = 2 goal (1 subgoal): 1. consume (TopType [x, y]) [TSome T_i32] = tm [PROOF STEP] by simp [PROOF STATE] proof (state) this: consume (TopType [x, y]) [TSome T_i32] = tm goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] moreover [PROOF STATE] proof (state) this: consume (TopType [x, y]) [TSome T_i32] = tm goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] hence "ct_suffix [x,y] [TSome T_i32] \<or> ct_suffix [TSome T_i32] [x,y]" [PROOF STATE] proof (prove) using this: consume (TopType [x, y]) [TSome T_i32] = tm goal (1 subgoal): 1. ct_suffix [x, y] [TSome T_i32] \<or> ct_suffix [TSome T_i32] [x, y] [PROOF STEP] using assms(3) [PROOF STATE] proof (prove) using this: consume (TopType [x, y]) [TSome T_i32] = tm tm \<noteq> Bot goal (1 subgoal): 1. ct_suffix [x, y] [TSome T_i32] \<or> ct_suffix [TSome T_i32] [x, y] [PROOF STEP] by (metis consume.simps(2)) [PROOF STATE] proof (state) this: ct_suffix [x, y] [TSome T_i32] \<or> ct_suffix [TSome T_i32] [x, y] goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] hence "ct_suffix [TSome T_i32] ([x]@[y])" [PROOF STATE] proof (prove) using this: ct_suffix [x, y] [TSome T_i32] \<or> ct_suffix [TSome T_i32] [x, y] goal (1 subgoal): 1. ct_suffix [TSome T_i32] ([x] @ [y]) [PROOF STEP] using assms(2) ct_suffix_length [PROOF STATE] proof (prove) using this: ct_suffix [x, y] [TSome T_i32] \<or> ct_suffix [TSome T_i32] [x, y] length cts = 2 ct_suffix ?ts ?ts' \<Longrightarrow> length ?ts \<le> length ?ts' goal (1 subgoal): 1. ct_suffix [TSome T_i32] ([x] @ [y]) [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: ct_suffix [TSome T_i32] ([x] @ [y]) goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] moreover [PROOF STATE] proof (state) this: ct_suffix [TSome T_i32] ([x] @ [y]) goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] hence "ct_eq y (TSome T_i32)" [PROOF STATE] proof (prove) using this: ct_suffix [TSome T_i32] ([x] @ [y]) goal (1 subgoal): 1. ct_eq y (TSome T_i32) [PROOF STEP] by (metis ct_eq_commute ct_list_eq_def ct_suffix_cons2 list.rel_sel list.sel(1) list.simps(3) list.size(4)) [PROOF STATE] proof (state) this: ct_eq y (TSome T_i32) goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: cts = [x, y] consume (TopType [x, y]) [TSome T_i32] = tm ct_suffix [TSome T_i32] ([x] @ [y]) ct_eq y (TSome T_i32) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: cts = [x, y] consume (TopType [x, y]) [TSome T_i32] = tm ct_suffix [TSome T_i32] ([x] @ [y]) ct_eq y (TSome T_i32) goal (1 subgoal): 1. \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] [PROOF STEP] by auto [PROOF STATE] proof (state) this: \<exists>t1 t2. cts = [t1, t2] \<and> ct_eq t2 (TSome T_i32) \<and> tm = TopType [t1] goal: No subgoals! [PROOF STEP] qed
{-# OPTIONS --cubical --safe --no-import-sorts #-} module Cubical.Algebra.Module where open import Cubical.Algebra.Module.Base public
3 , SbCl
The complex conjugate of the inverse of a complex number is the inverse of the complex conjugate of that number.
Sistema: dS/dt=-bS, dI/dt=bI, b es positivo (b=beta) ```python from sympy import * from sympy.abc import S,I,t,b ``` ```python #puntos criticos P=-b*S Q=b*I #establecer P(S,I)=0 y Q(S,I)=0 Peqn=Eq(P,0) Qeqn=Eq(Q,0) print(solve((Peqn,Qeqn),S,I)) #Eigenvalores y eigenvectores M=Matrix([[-b,0],[0,b]]) print(M.eigenvals()) pprint(M.eigenvects()) ``` {I: 0, S: 0} {-b: 1, b: 1} ⎡⎛ ⎡⎡1⎤⎤⎞ ⎛ ⎡⎡0⎤⎤⎞⎤ ⎢⎜-b, 1, ⎢⎢ ⎥⎥⎟, ⎜b, 1, ⎢⎢ ⎥⎥⎟⎥ ⎣⎝ ⎣⎣0⎦⎦⎠ ⎝ ⎣⎣1⎦⎦⎠⎦ El sistema tiene un punto critico en el origen el cual tiene eigenvalores -b y b, ya que b es postivo los eigenvalores son reales distintos uno positivo y otro negativo por lo que el punto critico es un punto silla, el cual es inestable. Ya que el sistema tiene un punto critico que es un punto silla, por la definicion 9 el indice del punto critico es -1 y por el teorema 1 entonces no hay ciclos limites en el sistema, es decir no existe una solucion periodica aislada del sistema. ```python import matplotlib.pyplot as plt import numpy as np from scipy.integrate import odeint import pylab as pl import matplotlib ``` ```python def dx_dt(x,t): return [ -0.5*x[0] , 0.5*x[1] ] #trayectorias en tiempo hacia adelante ts=np.linspace(0,10,500) ic=np.linspace(-1,1,6) for r in ic: for s in ic: x0=[r,s] xs=odeint(dx_dt,x0,ts) plt.plot(xs[:,0],xs[:,1],"r-") #trayectorias en tiempo hacia atras ts=np.linspace(0,-10,500) ic=np.linspace(-1,1,6) for r in ic: for s in ic: x0=[r,s] xs=odeint(dx_dt,x0,ts) plt.plot(xs[:,0],xs[:,1],"r-") #etiquetas de ejes y estilo de letra plt.xlabel('S',fontsize=10) plt.ylabel('I',fontsize=10) plt.tick_params(labelsize=10) plt.xlim(-1,1) plt.ylim(-1,1) #campo vectorial X,Y=np.mgrid[-1:1:20j,-1:1:20j] u=-0.5*X v=0.5*Y pl.quiver(X,Y,u,v,color='b') plt.show() ``` Analisis de existencia de Bifurcaciones El punto critico del sistema no varia con el cambio en el valor de b (beta) ya que es idependiente de este parametro. Comportamiento del sistema cuando beta<0 ```python #sistema dS/dt=-b*S, dI/dt=b*I con beta<0 def dx_dt(x,t): return [ 1*x[0] , -1*x[1] ] #trayectorias en tiempo hacia adelante ts=np.linspace(0,10,500) ic=np.linspace(20000,100000,3) for r in ic: for s in ic: x0=[r,s] xs=odeint(dx_dt,x0,ts) plt.plot(xs[:,0],xs[:,1],"-", color="orangered", lw=1.5) #trayectorias en tiempo hacia atras ts=np.linspace(0,-10,500) ic=np.linspace(20000,100000,3) for r in ic: for s in ic: x0=[r,s] xs=odeint(dx_dt,x0,ts) plt.plot(xs[:,0],xs[:,1],"-", color="orangered", lw=1.5) #etiquetas de ejes y estilo de letra plt.xlabel('S',fontsize=20) plt.ylabel('I',fontsize=20) plt.tick_params(labelsize=12) plt.ticklabel_format(style="sci", scilimits=(0,0)) plt.xlim(0,100000) plt.ylim(0,100000) #campo vectorial X,Y=np.mgrid[0:100000:15j,0:100000:15j] u=1*X v=-1*Y pl.quiver(X,Y,u,v,color='dimgray') plt.show() ``` Comportamiento del sistema cuando beta>0 ```python #sistema dS/dt=-b*S, dI/dt=b*I con beta>0 def dx_dt(x,t): return [ -1*x[0] , 1*x[1] ] #trayectorias en tiempo hacia adelante ts=np.linspace(0,10,500) ic=np.linspace(0,100000,4) for r in ic: for s in ic: x0=[r,s] xs=odeint(dx_dt,x0,ts) plt.plot(xs[:,0],xs[:,1],"r-") #trayectorias en tiempo hacia atras ts=np.linspace(0,-10,500) ic=np.linspace(0,100000,4) for r in ic: for s in ic: x0=[r,s] xs=odeint(dx_dt,x0,ts) plt.plot(xs[:,0],xs[:,1],"r-") #etiquetas de ejes y estilo de letra plt.xlabel('S',fontsize=10) plt.ylabel('I',fontsize=10) plt.tick_params(labelsize=10) plt.xlim(0,100000) plt.ylim(0,100000) #campo vectorial X,Y=np.mgrid[0:100000:20j,0:100000:20j] u=-1*X v=1*Y pl.quiver(X,Y,u,v,color='b') plt.show() ``` hay un cambio en la direccion del flujo en el sistema. ```python ```