Datasets:
AI4M
/

text
stringlengths
0
3.34M
record R (A : Set) : Set where field f : A → A open module R′ (@0 A : Set) (r : R A) = R {A = A} r
# United States calendars # In the United States, if a holiday falls on Saturday, it's observed on the preceding Friday. # If it falls on Sunday, it's observed on the next Monday. function adjustweekendholidayUS(dt::Date) if dayofweek(dt) == Dates.Saturday return dt - Dates.Day(1) end if dayofweek(dt) == Dates.Sunday return dt + Dates.Day(1) end return dt end function isholiday(::USSettlement , dt::Date) const yy = Dates.year(dt) const mm = Dates.month(dt) const dd = Dates.day(dt) if ( # New Year's Day adjustweekendholidayUS(Date(yy, 1, 1)) == dt || # New Year's Day on the previous year when 1st Jan is Saturday (mm == 12 && dd == 31 && dayofweek(dt) == Friday) || # Birthday of Martin Luther King, Jr. (yy >= 1983 && adjustweekendholidayUS(findweekday(Dates.Monday, yy, 1, 3, true)) == dt) || # Washington's Birthday adjustweekendholidayUS(findweekday(Dates.Monday, yy, 2, 3, true)) == dt || # Memorial Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 5, 1, false)) == dt || # Independence Day adjustweekendholidayUS(Date(yy, 7, 4)) == dt || # Labor Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 9, 1, true)) == dt || # Columbus Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 10, 2, true)) == dt || # Veterans Day adjustweekendholidayUS(Date(yy, 11, 11)) == dt || # Thanksgiving Day adjustweekendholidayUS(findweekday(Dates.Thursday, yy, 11, 4, true)) == dt || # Christmas adjustweekendholidayUS(Date(yy, 12, 25)) == dt ) return true end return false end function isholiday(::USNYSE , dt::Date) const yy = Dates.year(dt) const mm = Dates.month(dt) const dd = Dates.day(dt) const dt_rata::Int = Dates.days(dt) const e_rata::Int = easter_rata(Dates.Year(yy)) if ( # New Year's Day adjustweekendholidayUS(Date(yy, 1, 1)) == dt || # Birthday of Martin Luther King, Jr. (yy >= 1998 && adjustweekendholidayUS(findweekday(Dates.Monday, yy, 1, 3, true)) == dt) || # Washington's Birthday adjustweekendholidayUS(findweekday(Dates.Monday, yy, 2, 3, true)) == dt || # Good Friday dt_rata == ( e_rata - 2 ) || # Memorial Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 5, 1, false)) == dt || # Independence Day adjustweekendholidayUS(Date(yy, 7, 4)) == dt || # Labor Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 9, 1, true)) == dt || # Thanksgiving Day adjustweekendholidayUS(findweekday(Dates.Thursday, yy, 11, 4, true)) == dt || # Christmas adjustweekendholidayUS(Date(yy, 12, 25)) == dt ) return true end # Presidential election days if (yy <= 1968 || (yy <= 1980 && yy % 4 == 0)) && mm == 11 && dd <= 7 && Dates.istuesday(dt) return true end # Special Closings if ( # Hurricane Sandy yy == 2012 && mm == 10 && (dd == 29 || dd == 30) || # Predient Ford's funeral dt == Date(2007,1,2) || # President Reagan's funeral dt == Date(2004,6,11) || # Sep 11th yy == 2001 && mm == 9 && ( 11 <= dd && dd <= 14) || # President Nixon's funeral dt == Date(1994,4,27) || # Hurricane Gloria dt == Date(1985,9,27) || # 1977 Blackout dt == Date(1977,7,14) || # Funeral of former President Lyndon B. Johnson dt == Date(1973,1,25) || # Funeral of former President Harry S. Truman dt == Date(1972,12,28) || # National Day of Participation for the lunar exploration dt == Date(1969,7,21) || # Eisenhower's funeral dt == Date(1969,3,31) || # Heavy snow dt == Date(1969,2,10) || # Day after Independence Day dt == Date(1968,7,5) || # Paperwork Crisis yy == 1968 && dayofyear(dt) >= 163 && Dates.iswednesday(dt) || # Mourning for Martin Luther King Jr dt == Date(1968,4,9) || # President Kennedy's funeral dt == Date(1963,11,25) || # Day before Decoration Day dt == Date(1961,5,29) || # Day after Christmas dt == Date(1958,12,26) || # Christmas Eve dt in [Date(1954,12,24), Date(1956,12,24), Date(1965,12,24)] ) return true end return false end function isholiday(::USGovernmentBond , dt::Date) const yy = Dates.year(dt) const mm = Dates.month(dt) const dd = Dates.day(dt) const dt_rata::Int = Dates.days(dt) const e_rata::Int = easter_rata(Dates.Year(yy)) if ( # New Year's Day adjustweekendholidayUS(Date(yy, 1, 1)) == dt || # Birthday of Martin Luther King, Jr. yy >= 1983 && adjustweekendholidayUS(findweekday(Dates.Monday, yy, 1, 3, true)) == dt || # Washington's Birthday adjustweekendholidayUS(findweekday(Dates.Monday, yy, 2, 3, true)) == dt || # Good Friday dt_rata == ( e_rata - 2 ) || # Memorial Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 5, 1, false)) == dt || # Independence Day adjustweekendholidayUS(Date(yy, 7, 4)) == dt || # Labor Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 9, 1, true)) == dt || # Columbus Day adjustweekendholidayUS(findweekday(Dates.Monday, yy, 10, 2, true)) == dt || # Veterans Day adjustweekendholidayUS(Date(yy, 11, 11)) == dt || # Thanksgiving Day adjustweekendholidayUS(findweekday(Dates.Thursday, yy, 11, 4, true)) == dt || # Christmas adjustweekendholidayUS(Date(yy, 12, 25)) == dt ) return true end return false end
"Assesses model's accuracy on a test data set. Usage: src/model_results.r --test=<test> --out_dir=<out_dir> Options: --test=<test> Path (including filename) to test data (which needs to be saved as a feather file) --out_dir=<out_dir> Path to directory where the plots should be saved " -> doc library(tidyverse) library(docopt) set.seed(2019) opt <- docopt(doc) main <- function(test, out_dir) { # Load and Wrangle test data test_data <- read.csv(test) # Load Model and Predict final_model <- readRDS("results/final_model.rds") hd_predictions <- predict(final_model, test_data) %>% bind_cols(test_data) hd_predictions %>% metrics(truth = diagnosis, estimate = .pred_class) %>% filter(.metric == "accuracy") # Assess Model Accuracy confusion_mat <- hd_predictions %>% conf_mat(truth = diagnosis, estimate = .pred_class) ggsave(paste0(out_dir, "/confusion_matrix.png"), confusion_mat, width = 8, height = 10) } main(opt[["--test"]], opt[["--out_dir"]])
/- Copyright (c) 2016 Jeremy Avigad. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jeremy Avigad ! This file was ported from Lean 3 source module data.list.sort ! leanprover-community/mathlib commit f694c7dead66f5d4c80f446c796a5aad14707f0e ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Data.List.OfFn import Mathlib.Data.List.Perm /-! # Sorting algorithms on lists In this file we define `List.Sorted r l` to be an alias for `Pairwise r l`. This alias is preferred in the case that `r` is a `<` or `≤`-like relation. Then we define two sorting algorithms: `List.insertionSort` and `List.mergeSort`, and prove their correctness. -/ open List.Perm universe uu namespace List /-! ### The predicate `List.Sorted` -/ section Sorted variable {α : Type uu} {r : α → α → Prop} {a : α} {l : List α} /-- `Sorted r l` is the same as `Pairwise r l`, preferred in the case that `r` is a `<` or `≤`-like relation (transitive and antisymmetric or asymmetric) -/ def Sorted := @Pairwise #align list.sorted List.Sorted instance decidableSorted [DecidableRel r] (l : List α) : Decidable (Sorted r l) := List.instDecidablePairwise _ #align list.decidable_sorted List.decidableSorted protected theorem Sorted.le_of_lt [Preorder α] {l : List α} (h : l.Sorted (· < ·)) : l.Sorted (· ≤ ·) := h.imp le_of_lt protected theorem Sorted.lt_of_le [PartialOrder α] {l : List α} (h₁ : l.Sorted (· ≤ ·)) (h₂ : l.Nodup) : l.Sorted (· < ·) := h₁.imp₂ (fun _ _ => lt_of_le_of_ne) h₂ @[simp] theorem sorted_nil : Sorted r [] := Pairwise.nil #align list.sorted_nil List.sorted_nil theorem Sorted.of_cons : Sorted r (a :: l) → Sorted r l := Pairwise.of_cons #align list.sorted.of_cons List.Sorted.of_cons theorem Sorted.tail {r : α → α → Prop} {l : List α} (h : Sorted r l) : Sorted r l.tail := Pairwise.tail h #align list.sorted.tail List.Sorted.tail theorem rel_of_sorted_cons {a : α} {l : List α} : Sorted r (a :: l) → ∀ b ∈ l, r a b := rel_of_pairwise_cons #align list.rel_of_sorted_cons List.rel_of_sorted_cons @[simp] theorem sorted_cons {a : α} {l : List α} : Sorted r (a :: l) ↔ (∀ b ∈ l, r a b) ∧ Sorted r l := pairwise_cons #align list.sorted_cons List.sorted_cons protected theorem Sorted.nodup {r : α → α → Prop} [IsIrrefl α r] {l : List α} (h : Sorted r l) : Nodup l := Pairwise.nodup h #align list.sorted.nodup List.Sorted.nodup theorem eq_of_perm_of_sorted [IsAntisymm α r] {l₁ l₂ : List α} (p : l₁ ~ l₂) (s₁ : Sorted r l₁) (s₂ : Sorted r l₂) : l₁ = l₂ := by induction' s₁ with a l₁ h₁ s₁ IH generalizing l₂ · exact p.nil_eq · have : a ∈ l₂ := p.subset (mem_cons_self _ _) rcases mem_split this with ⟨u₂, v₂, rfl⟩ have p' := (perm_cons a).1 (p.trans perm_middle) obtain rfl := IH p' (s₂.sublist <| by simp) change a :: u₂ ++ v₂ = u₂ ++ ([a] ++ v₂) rw [← append_assoc] congr have : ∀ (x : α) (_ : x ∈ u₂), x = a := fun x m => antisymm ((pairwise_append.1 s₂).2.2 _ m a (mem_cons_self _ _)) (h₁ _ (by simp [m])) rw [(@eq_replicate _ a (length u₂ + 1) (a :: u₂)).2, (@eq_replicate _ a (length u₂ + 1) (u₂ ++ [a])).2] <;> constructor <;> simp [iff_true_intro this, or_comm] #align list.eq_of_perm_of_sorted List.eq_of_perm_of_sorted theorem sublist_of_subperm_of_sorted [IsAntisymm α r] {l₁ l₂ : List α} (p : l₁ <+~ l₂) (s₁ : l₁.Sorted r) (s₂ : l₂.Sorted r) : l₁ <+ l₂ := by let ⟨_, h, h'⟩ := p rwa [← eq_of_perm_of_sorted h (s₂.sublist h') s₁] #align list.sublist_of_subperm_of_sorted List.sublist_of_subperm_of_sorted @[simp 1100] --Porting note: higher priority for linter theorem sorted_singleton (a : α) : Sorted r [a] := pairwise_singleton _ _ #align list.sorted_singleton List.sorted_singleton theorem Sorted.rel_get_of_lt {l : List α} (h : l.Sorted r) {a b : Fin l.length} (hab : a < b) : r (l.get a) (l.get b) := List.pairwise_iff_get.1 h _ _ hab theorem Sorted.rel_nthLe_of_lt {l : List α} (h : l.Sorted r) {a b : ℕ} (ha : a < l.length) (hb : b < l.length) (hab : a < b) : r (l.nthLe a ha) (l.nthLe b hb) := List.pairwise_iff_get.1 h ⟨a, ha⟩ ⟨b, hb⟩ hab #align list.sorted.rel_nth_le_of_lt List.Sorted.rel_nthLe_of_lt theorem Sorted.rel_get_of_le [IsRefl α r] {l : List α} (h : l.Sorted r) {a b : Fin l.length} (hab : a ≤ b) : r (l.get a) (l.get b) := by rcases hab.eq_or_lt with (rfl | hlt) exacts [refl _, h.rel_get_of_lt hlt] theorem Sorted.rel_nthLe_of_le [IsRefl α r] {l : List α} (h : l.Sorted r) {a b : ℕ} (ha : a < l.length) (hb : b < l.length) (hab : a ≤ b) : r (l.nthLe a ha) (l.nthLe b hb) := h.rel_get_of_le hab #align list.sorted.rel_nth_le_of_le List.Sorted.rel_nthLe_of_le theorem Sorted.rel_of_mem_take_of_mem_drop {l : List α} (h : List.Sorted r l) {k : ℕ} {x y : α} (hx : x ∈ List.take k l) (hy : y ∈ List.drop k l) : r x y := by obtain ⟨⟨iy, hiy⟩, rfl⟩ := get_of_mem hy obtain ⟨⟨ix, hix⟩, rfl⟩ := get_of_mem hx rw [get_take', get_drop'] rw [length_take] at hix exact h.rel_nthLe_of_lt _ _ (ix.lt_add_right _ _ (lt_min_iff.mp hix).left) #align list.sorted.rel_of_mem_take_of_mem_drop List.Sorted.rel_of_mem_take_of_mem_drop end Sorted section Monotone variable {n : ℕ} {α : Type uu} [Preorder α] {f : Fin n → α} theorem sorted_ofFn_iff {r : α → α → Prop} : (ofFn f).Sorted r ↔ ((· < ·) ⇒ r) f f := by simp_rw [Sorted, pairwise_iff_get, length_ofFn, get_ofFn, Relator.LiftFun] exact Iff.symm (Fin.cast _).surjective.forall₂ /-- The list `List.ofFn f` is strictly sorted with respect to `(· ≤ ·)` if and only if `f` is strictly monotone. -/ @[simp] theorem sorted_lt_ofFn_iff : (ofFn f).Sorted (· < ·) ↔ StrictMono f := sorted_ofFn_iff /-- The list `List.ofFn f` is sorted with respect to `(· ≤ ·)` if and only if `f` is monotone. -/ @[simp] theorem sorted_le_ofFn_iff : (ofFn f).Sorted (· ≤ ·) ↔ Monotone f := sorted_ofFn_iff.trans monotone_iff_forall_lt.symm /-- A tuple is monotone if and only if the list obtained from it is sorted. -/ @[deprecated sorted_le_ofFn_iff] theorem monotone_iff_ofFn_sorted : Monotone f ↔ (ofFn f).Sorted (· ≤ ·) := sorted_le_ofFn_iff.symm #align list.monotone_iff_of_fn_sorted List.monotone_iff_ofFn_sorted /-- The list obtained from a monotone tuple is sorted. -/ alias sorted_le_ofFn_iff ↔ _ _root_.Monotone.ofFn_sorted #align list.monotone.of_fn_sorted Monotone.ofFn_sorted end Monotone section sort variable {α : Type uu} (r : α → α → Prop) [DecidableRel r] local infixl:50 " ≼ " => r /-! ### Insertion sort -/ section InsertionSort /-- `orderedInsert a l` inserts `a` into `l` at such that `orderedInsert a l` is sorted if `l` is. -/ @[simp] def orderedInsert (a : α) : List α → List α | [] => [a] | b :: l => if a ≼ b then a :: b :: l else b :: orderedInsert a l #align list.ordered_insert List.orderedInsert /-- `insertionSort l` returns `l` sorted using the insertion sort algorithm. -/ @[simp] def insertionSort : List α → List α | [] => [] | b :: l => orderedInsert r b (insertionSort l) #align list.insertion_sort List.insertionSort @[simp] theorem orderedInsert_nil (a : α) : [].orderedInsert r a = [a] := rfl #align list.ordered_insert_nil List.orderedInsert_nil theorem orderedInsert_length : ∀ (L : List α) (a : α), (L.orderedInsert r a).length = L.length + 1 | [], a => rfl | hd :: tl, a => by dsimp [orderedInsert] split_ifs <;> simp [orderedInsert_length tl] #align list.ordered_insert_length List.orderedInsert_length /-- An alternative definition of `orderedInsert` using `takeWhile` and `dropWhile`. -/ theorem orderedInsert_eq_take_drop (a : α) : ∀ l : List α, l.orderedInsert r a = (l.takeWhile fun b => ¬a ≼ b) ++ a :: l.dropWhile fun b => ¬a ≼ b | [] => rfl | b :: l => by dsimp only [orderedInsert] split_ifs with h <;> simp [takeWhile, dropWhile, *, orderedInsert_eq_take_drop a l] #align list.ordered_insert_eq_take_drop List.orderedInsert_eq_take_drop theorem insertionSort_cons_eq_take_drop (a : α) (l : List α) : insertionSort r (a :: l) = ((insertionSort r l).takeWhile fun b => ¬a ≼ b) ++ a :: (insertionSort r l).dropWhile fun b => ¬a ≼ b := orderedInsert_eq_take_drop r a _ #align list.insertion_sort_cons_eq_take_drop List.insertionSort_cons_eq_take_drop section Correctness open Perm theorem orderedInsert_count [DecidableEq α] (L : List α) (a b : α) : count a (L.orderedInsert r b) = count a L + if a = b then 1 else 0 := by rw [(L.perm_orderedInsert r b).count_eq, count_cons] split_ifs <;> simp only [Nat.succ_eq_add_one, add_zero] #align list.ordered_insert_count List.orderedInsert_count theorem perm_insertionSort : ∀ l : List α, insertionSort r l ~ l | [] => Perm.nil | b :: l => by simpa [insertionSort] using (perm_orderedInsert _ _ _).trans ((perm_insertionSort l).cons b) #align list.perm_insertion_sort List.perm_insertionSort variable {r} /-- If `l` is already `List.Sorted` with respect to `r`, then `insertionSort` does not change it. -/ theorem Sorted.insertionSort_eq : ∀ {l : List α} (_ : Sorted r l), insertionSort r l = l | [], _ => rfl | [a], _ => rfl | a :: b :: l, h => by rw [insertionSort, Sorted.insertionSort_eq, orderedInsert, if_pos] exacts[rel_of_sorted_cons h _ (mem_cons_self _ _), h.tail] #align list.sorted.insertion_sort_eq List.Sorted.insertionSort_eq section TotalAndTransitive variable [IsTotal α r] [IsTrans α r] theorem Sorted.orderedInsert (a : α) : ∀ l, Sorted r l → Sorted r (orderedInsert r a l) | [], _ => sorted_singleton a | b :: l, h => by by_cases h' : a ≼ b · -- Porting note: was -- `simpa [orderedInsert, h', h] using fun b' bm => trans h' (rel_of_sorted_cons h _ bm)` rw [List.orderedInsert, if_pos h', sorted_cons] exact ⟨forall_mem_cons.2 ⟨h', fun c hc => _root_.trans h' (rel_of_sorted_cons h _ hc)⟩, h⟩ · suffices ∀ b' : α, b' ∈ List.orderedInsert r a l → r b b' by simpa [orderedInsert, h', h.of_cons.orderedInsert a l] intro b' bm cases' show b' = a ∨ b' ∈ l by simpa using (perm_orderedInsert _ _ _).subset bm with be bm · subst b' exact (total_of r _ _).resolve_left h' · exact rel_of_sorted_cons h _ bm #align list.sorted.ordered_insert List.Sorted.orderedInsert variable (r) /-- The list `List.insertionSort r l` is `List.Sorted` with respect to `r`. -/ theorem sorted_insertionSort : ∀ l, Sorted r (insertionSort r l) | [] => sorted_nil | a :: l => (sorted_insertionSort l).orderedInsert a _ #align list.sorted_insertion_sort List.sorted_insertionSort end TotalAndTransitive end Correctness end InsertionSort /-! ### Merge sort -/ section MergeSort -- TODO(Jeremy): observation: if instead we write (a :: (split l).1, b :: (split l).2), the -- equation compiler can't prove the third equation /-- Split `l` into two lists of approximately equal length. split [1, 2, 3, 4, 5] = ([1, 3, 5], [2, 4]) -/ @[simp] def split : List α → List α × List α | [] => ([], []) | a :: l => let (l₁, l₂) := split l (a :: l₂, l₁) #align list.split List.split theorem split_cons_of_eq (a : α) {l l₁ l₂ : List α} (h : split l = (l₁, l₂)) : split (a :: l) = (a :: l₂, l₁) := by rw [split, h] #align list.split_cons_of_eq List.split_cons_of_eq theorem length_split_le : ∀ {l l₁ l₂ : List α}, split l = (l₁, l₂) → length l₁ ≤ length l ∧ length l₂ ≤ length l | [], _, _, rfl => ⟨Nat.le_refl 0, Nat.le_refl 0⟩ | a :: l, l₁', l₂', h => by cases' e : split l with l₁ l₂ injection (split_cons_of_eq _ e).symm.trans h; substs l₁' l₂' cases' length_split_le e with h₁ h₂ exact ⟨Nat.succ_le_succ h₂, Nat.le_succ_of_le h₁⟩ #align list.length_split_le List.length_split_le theorem length_split_lt {a b} {l l₁ l₂ : List α} (h : split (a :: b :: l) = (l₁, l₂)) : length l₁ < length (a :: b :: l) ∧ length l₂ < length (a :: b :: l) := by cases' e : split l with l₁' l₂' injection (split_cons_of_eq _ (split_cons_of_eq _ e)).symm.trans h; substs l₁ l₂ cases' length_split_le e with h₁ h₂ exact ⟨Nat.succ_le_succ (Nat.succ_le_succ h₁), Nat.succ_le_succ (Nat.succ_le_succ h₂)⟩ #align list.length_split_lt List.length_split_lt theorem perm_split : ∀ {l l₁ l₂ : List α}, split l = (l₁, l₂) → l ~ l₁ ++ l₂ | [], _, _, rfl => Perm.refl _ | a :: l, l₁', l₂', h => by cases' e : split l with l₁ l₂ injection (split_cons_of_eq _ e).symm.trans h; substs l₁' l₂' exact ((perm_split e).trans perm_append_comm).cons a #align list.perm_split List.perm_split /-- Merge two sorted lists into one in linear time. merge [1, 2, 4, 5] [0, 1, 3, 4] = [0, 1, 1, 2, 3, 4, 4, 5] -/ def merge : List α → List α → List α | [], l' => l' | l, [] => l | a :: l, b :: l' => if a ≼ b then a :: merge l (b :: l') else b :: merge (a :: l) l' termination_by merge l₁ l₂ => length l₁ + length l₂ #align list.merge List.merge /-- Implementation of a merge sort algorithm to sort a list. -/ def mergeSort : List α → List α | [] => [] | [a] => [a] | a :: b :: l => by -- Porting note: rewrote to make `mergeSort_cons_cons` proof easier let ls := (split (a :: b :: l)) have e : split (a :: b :: l) = ⟨ls.1, ls.2⟩ := rfl have h := length_split_lt e have := h.1 have := h.2 exact merge r (mergeSort ls.1) (mergeSort ls.2) termination_by mergeSort l => length l #align list.merge_sort List.mergeSort @[nolint unusedHavesSuffices] --Porting note: false positive theorem mergeSort_cons_cons {a b} {l l₁ l₂ : List α} (h : split (a :: b :: l) = (l₁, l₂)) : mergeSort r (a :: b :: l) = merge r (mergeSort r l₁) (mergeSort r l₂) := by simp only [mergeSort, h] #align list.merge_sort_cons_cons List.mergeSort_cons_cons section Correctness theorem perm_merge : ∀ l l' : List α, merge r l l' ~ l ++ l' | [], [] => by simp [merge] | [], b :: l' => by simp [merge] | a :: l, [] => by simp [merge] | a :: l, b :: l' => by by_cases h : a ≼ b · simpa [merge, h] using perm_merge _ _ · suffices b :: merge r (a :: l) l' ~ a :: (l ++ b :: l') by simpa [merge, h] exact ((perm_merge _ _).cons _).trans ((swap _ _ _).trans (perm_middle.symm.cons _)) termination_by perm_merge l₁ l₂ => length l₁ + length l₂ #align list.perm_merge List.perm_merge theorem perm_mergeSort : ∀ l : List α, mergeSort r l ~ l | [] => by simp [mergeSort] | [a] => by simp [mergeSort] | a :: b :: l => by cases' e : split (a :: b :: l) with l₁ l₂ cases' length_split_lt e with h₁ h₂ rw [mergeSort_cons_cons r e] apply (perm_merge r _ _).trans exact ((perm_mergeSort l₁).append (perm_mergeSort l₂)).trans (perm_split e).symm termination_by perm_mergeSort l => length l #align list.perm_merge_sort List.perm_mergeSort @[simp] theorem length_mergeSort (l : List α) : (mergeSort r l).length = l.length := (perm_mergeSort r _).length_eq #align list.length_merge_sort List.length_mergeSort section TotalAndTransitive variable {r} [IsTotal α r] [IsTrans α r] theorem Sorted.merge : ∀ {l l' : List α}, Sorted r l → Sorted r l' → Sorted r (merge r l l') | [], [], _, _ => by simp [List.merge] | [], b :: l', _, h₂ => by simpa [List.merge] using h₂ | a :: l, [], h₁, _ => by simpa [List.merge] using h₁ | a :: l, b :: l', h₁, h₂ => by by_cases h : a ≼ b · suffices ∀ (b' : α) (_ : b' ∈ List.merge r l (b :: l')), r a b' by simpa [List.merge, h, h₁.of_cons.merge h₂] intro b' bm rcases show b' = b ∨ b' ∈ l ∨ b' ∈ l' by simpa [or_left_comm] using (perm_merge _ _ _).subset bm with (be | bl | bl') · subst b' assumption · exact rel_of_sorted_cons h₁ _ bl · exact _root_.trans h (rel_of_sorted_cons h₂ _ bl') · suffices ∀ (b' : α) (_ : b' ∈ List.merge r (a :: l) l'), r b b' by simpa [List.merge, h, h₁.merge h₂.of_cons] intro b' bm have ba : b ≼ a := (total_of r _ _).resolve_left h have : b' = a ∨ b' ∈ l ∨ b' ∈ l' := by simpa using (perm_merge _ _ _).subset bm rcases this with (be | bl | bl') · subst b' assumption · exact _root_.trans ba (rel_of_sorted_cons h₁ _ bl) · exact rel_of_sorted_cons h₂ _ bl' termination_by Sorted.merge l₁ l₂ _ _ => length l₁ + length l₂ #align list.sorted.merge List.Sorted.merge variable (r) theorem sorted_mergeSort : ∀ l : List α, Sorted r (mergeSort r l) | [] => by simp [mergeSort] | [a] => by simp [mergeSort] | a :: b :: l => by cases' e : split (a :: b :: l) with l₁ l₂ cases' length_split_lt e with h₁ h₂ rw [mergeSort_cons_cons r e] exact (sorted_mergeSort l₁).merge (sorted_mergeSort l₂) termination_by sorted_mergeSort l => length l #align list.sorted_merge_sort List.sorted_mergeSort theorem mergeSort_eq_self [IsAntisymm α r] {l : List α} : Sorted r l → mergeSort r l = l := eq_of_perm_of_sorted (perm_mergeSort _ _) (sorted_mergeSort _ _) #align list.merge_sort_eq_self List.mergeSort_eq_self theorem mergeSort_eq_insertionSort [IsAntisymm α r] (l : List α) : mergeSort r l = insertionSort r l := eq_of_perm_of_sorted ((perm_mergeSort r l).trans (perm_insertionSort r l).symm) (sorted_mergeSort r l) (sorted_insertionSort r l) #align list.merge_sort_eq_insertion_sort List.mergeSort_eq_insertionSort end TotalAndTransitive end Correctness @[simp] theorem mergeSort_nil : [].mergeSort r = [] := by rw [List.mergeSort] #align list.merge_sort_nil List.mergeSort_nil @[simp] theorem mergeSort_singleton (a : α) : [a].mergeSort r = [a] := by rw [List.mergeSort] #align list.merge_sort_singleton List.mergeSort_singleton end MergeSort end sort -- try them out! --#eval insertionSort (fun m n : ℕ => m ≤ n) [5, 27, 221, 95, 17, 43, 7, 2, 98, 567, 23, 12] --#eval mergeSort (fun m n : ℕ => m ≤ n) [5, 27, 221, 95, 17, 43, 7, 2, 98, 567, 23, 12] end List
lemma convex_singleton[intro,simp]: "convex {a}"
# plumber.R #* Echo back the input #* @param msg The message to echo #* @get /echo function(msg=""){ list(msg = paste0("The message is: '", msg, "'")) } #* Return the sum of two numbers #* @param a The first number to add #* @param b The second number to add #* @post /sum function(a, b){ as.numeric(a) + as.numeric(b) }
import analysis.special_functions.exp_log noncomputable theory open set real section rounding /-- Addition of affine forms with error. -/ -- TODO: Move. def log₂ (x : ℝ) := log x / log 2 -- TODO: This should happen over the dyadic rationals. -- Or not, but prove a lemma that there exists a dyadic that casts to the computed real. -- TODO: Move to dyadic. def round_down (p : ℤ) (r : ℝ) : ℝ := ⌊r * 2 ^ p⌋ * 2 ^ -p lemma round_down_zero (p : ℤ) : round_down p 0 = 0 := by simp [round_down] def round_up (p : ℤ) (r : ℝ) : ℚ := ⌈r * 2 ^ p⌉ * 2 ^ -p -- TODO: This should be shorter. lemma round_up_mono (p : ℤ) (x y : ℝ) (h : x ≤ y) : round_up p x ≤ round_up p y := begin simp [round_up], refine (mul_le_mul_right _).2 _, { simp, exact (fpow_pos_of_pos (by linarith) p), }, { simp, apply ceil_mono, refine (mul_le_mul_right _).2 h, exact fpow_pos_of_pos (by linarith) p, }, end def truncate_down (p : ℕ) (r : ℝ) : ℝ := round_down (p - ⌊log₂ r⌋ - 1) r lemma truncate_down_zero (p : ℕ) : truncate_down p 0 = 0 := by simp [truncate_down, round_down_zero] def truncate_up (p : ℕ) (r : ℝ) : ℝ := round_up (p - ⌊log₂ r⌋ - 1) r lemma truncate_up_mono (p : ℕ) (x y : ℝ) (h : x ≤ y) : truncate_up p x ≤ truncate_up p x := by simp [truncate_up, round_up_mono] -- We need something like ⌈a * b⌉ ≥ ⌈a⌉ * b if b ≥ 1. Then we should -- assume that p - ⌊log₂ r⌋ ≥ 1. That is not great, it holds regardless... lemma truncate_up_ge (p : ℕ) (r : ℝ) : r ≤ truncate_up p r := begin simp [truncate_up, round_up], sorry, end -- Def trunc-bound-eucl (p. 152) def truncate_with_error (p : ℕ) (r : ℝ) : ℝ × ℝ := let q := truncate_down p r, e := truncate_up p (abs (q - r)) in ⟨q, e⟩ lemma truncate_with_error_is_bound (p : ℕ) (r : ℝ) : ∃ e : ℝ, (truncate_with_error p r).1 = r + e ∧ abs e ≤ (truncate_with_error p r).2 := begin use [truncate_down p r - r], split, { ring, }, { simp [truncate_with_error, truncate_up_ge], } end end rounding
(* Copyright (c) 2012-2015, Robbert Krebbers. *) (* This file is distributed under the terms of the BSD license. *) Require Import String axiomatic_simple. Section gcd. Context `{EnvSpec K}. Hint Extern 10 (Some Readable ⊆ _) => transitivity (Some Writable): core. Hint Extern 0 (perm_locked _ = _) => apply perm_Readable_locked; auto : typeclass_instances. Hint Resolve ax_load' ax_var' assert_memext_l' assert_eval_int_cast_self' assert_memext_r' assert_and_l assert_singleton_eval assert_int_typed_eval assert_eval_singleton_r assert_eval_singleton_l assert_and_intro : exec. Ltac exec := repeat match goal with A := _ : assert _ |- _ => progress unfold A end; simpl; eauto 20 with exec. Definition gcd_stmt : stmt K := "l" :; if{load (var 1)} local{uintT} ( !(var 2 ::= ( var 0 ::= load (var 1) .{ArithOp ModOp} load (var 2),, var 1 ::= load (var 2),, load (var 0)));; goto "l" ) else skip. Lemma gcd_typed : (∅,∅,[uintT%T;uintT%T]) ⊢ gcd_stmt : (false,None). Proof. change false with (true && false). repeat (apply SLocal_typed || typed_constructor || constructor); try set_solver. apply base_binop_type_of_sound; simpl. by rewrite (idemp_L _), int_promote_int. Qed. Lemma gcd_correct Γ δ R J T C y z μ1 γ1 μ2 γ2 : sep_valid γ1 → Some Writable ⊆ perm_kind γ1 → sep_valid γ2 → Some Writable ⊆ perm_kind γ2 → J "l"%string ≡{Γ,δ} (∃ y' z', ⌜ Z.gcd y' z' = Z.gcd y z ⌝%Z ★ var 0 ↦{μ1,γ1} #intV{uintT} y' : uintT ★ var 1 ↦{μ2,γ2} #intV{uintT} z' : uintT)%A → Γ\ δ\ R\ J\ T\ C ⊨ₛ {{ var 0 ↦{μ1,γ1} #intV{uintT} y : uintT ★ var 1 ↦{μ2,γ2} #intV{uintT} z : uintT }} gcd_stmt {{ var 0 ↦{μ1,γ1} #intV{uintT} (Z.gcd y z) : uintT ★ var 1 ↦{μ2,γ2} #intV{uintT} 0 : uintT }}. Proof. intros ???? HJ; eapply ax_comp. { eapply ax_stmt_weaken_pre, ax_label; rewrite HJ. apply assert_exist_intro with y, assert_exist_intro with z. by rewrite assert_Prop_l by done. } eapply ax_stmt_weaken_pre; [by rewrite HJ|]. apply ax_stmt_exist_pre; intros y'; apply ax_stmt_exist_pre; intros z'. apply ax_stmt_Prop_pre; try set_solver; intros Hgcd. eapply ax_stmt_weaken_pre. { by rewrite (assert_singleton_int_typed' _ _ (var 0)), (assert_singleton_int_typed' _ _ (var 1)), (assoc (★)%A), (comm _ _ (⌜ int_typed z' _ ⌝)%A), <-!(assoc (★)%A). } apply ax_stmt_Prop_pre; try set_solver; intros Hz'. apply ax_stmt_Prop_pre; try set_solver; intros Hy'. eapply ax_if'' with (intV{uintT} z')%B; auto. { apply ax_expr_frame_l'. rewrite (assert_singleton_l _ _ (var 1)) at 1. apply ax_expr_exist_pre; intros a1. eapply ax_expr_weaken_post'; [by rewrite <-(assert_singleton_l_2 Γ _ (var 1) _ _ _ a1)|exec]. } { by eapply assert_exist_intro, assert_eval_int_unop'; auto using assert_memext_r', assert_eval_singleton_r. } { apply ax_stmt_Prop_pre; try set_solver; simpl; intros. apply ax_local. set (R' v := (var 0 ↦{false,perm_full} - : uintT%BT ★ R v↑)%A). set (J' l := (var 0 ↦{false,perm_full} - : uintT%BT ★ (J l)↑)%A). set (T' n := (var 0 ↦{false,perm_full} - : uintT%BT ★ (T n)↑)%A). set (C' mz := (var 0 ↦{false,perm_full} - : uintT%BT ★ (C mz)↑)%A). rewrite !assert_lift_sep, !assert_lift_singleton; simpl. apply ax_comp with ( var 0 ↦{false,perm_full} #intV{uintT} (y' `mod` z') : uintT ★ var 1 ↦{μ1,γ1} #intV{uintT} z' : uintT ★ var 2 ↦{μ2,γ2} #intV{uintT} (y' `mod` z') : uintT)%A. * eapply ax_do' with _ (inr (intV{uintT} (y' `mod` z'))); [|by rewrite <-!assert_unlock_sep, <-(assert_lock_singleton _ _ (var 2)), <-2!unlock_indep]. rewrite (assert_singleton_l_ _ _ (var 0)), assert_exist_sep. apply ax_expr_exist_pre; intros a_tmp. eapply ax_expr_weaken_post'; [by rewrite <-(assert_singleton_l_2 Γ _ (var 0) _ _ _ a_tmp)|]. rewrite <-!(assoc (★)%A); apply ax_expr_invariant_l'. rewrite (right_id _ (★)%A), (assert_singleton_l _ _ (var 1)), (assert_exist_sep (A:=ptr _)), (assert_sep_exist (A:=ptr _)). apply ax_expr_exist_pre; intros a_y. eapply ax_expr_weaken_post'; [by rewrite <-(assert_singleton_l_2 Γ _ (var 1) _ _ _ a_y)|]. rewrite !(assoc (★)%A), !(comm (★)%A _ (_ ∧ _)%A). rewrite <-!(assoc (★)%A); apply ax_expr_invariant_l'. rewrite (assert_singleton_l _ _ (var 2)), !(assert_sep_exist (A:=ptr _)). apply ax_expr_exist_pre; intros a_z. eapply ax_expr_weaken_post'; [by rewrite <-(assert_singleton_l_2 Γ _ (var 2) _ _ _ a_z)|]. rewrite <-!(comm (★)%A _ (var 2 ⇓ _ ∧ _)%A), !(assoc (★)%A). apply ax_expr_invariant_r'. set (A' := ((var 2 ⇓ inl a_z ∧ emp) ★ (var 1 ⇓ inl a_y ∧ emp) ★ var 0 ⇓ inl a_tmp ∧ emp)%A). rewrite <-!(assoc (★)%A). eapply ax_assign_r' with (%a_tmp ↦{false,perm_full} #intV{uintT} (y' `mod` z') : uintT ★ %a_y ↦{μ1,γ1} #intV{uintT} z' : uintT ★ %a_z ↦{μ2,γ2} #intV{uintT} z' : uintT)%A μ2 γ2 uintT%T a_z (intV{uintT} (y' `mod` z')) _; try by exec. { eapply ax_expr_comma' with (%a_tmp ↦{false,perm_full} #intV{uintT} (y' `mod` z') : uintT ★ %a_y ↦{μ1,γ1} #intV{uintT} y' : uintT ★ %a_z ↦{μ2,γ2} #intV{uintT} z' : uintT)%A (inr (intV{uintT} (y' `mod` z'))). { eapply ax_expr_weaken_post'; [by rewrite <-!assert_unlock_sep, <-(assert_lock_singleton _ _ (%a_tmp)), <-2!unlock_indep by done|]. apply ax_expr_invariant_r'. set (A'' := ((%a_y ↦{μ1,γ1} #intV{uintT} y' : uintT%BT ★ %a_z ↦{μ2,γ2} #intV{uintT} z' : uintT%BT) ★ A')%A). eapply ax_assign_r' with _ _ perm_full uintT%T _ (intV{uintT} (y' `mod` z')) _. * exec. * eapply ax_binop_r'; try by exec. eapply assert_eval_int_arithop'; try by exec. + by simpl; rewrite (idemp_L _), int_promote_int. + by simpl; rewrite int_pre_cast_self by done. + simpl; rewrite !int_pre_cast_self by done. apply int_typed_unsigned_nonneg in Hy'. apply int_typed_unsigned_nonneg in Hz'. by rewrite Z.rem_mod_nonneg by lia. * cut (int_typed (y' `mod` z') uintT); [exec|]. rewrite int_typed_spec_alt in Hz', Hy' |- *; simpl in *. split; [apply Z.mod_pos_bound;lia|]. transitivity z'; [apply Z.mod_pos_bound|]; lia. * rewrite <-(right_id _ (★)%A) at 1. apply assert_sep_preserving, assert_wand_intro; rewrite ?(left_id _ (★)%A); eauto using assert_exist_intro. * done. } eapply ax_expr_comma' with _ (inr (intV{uintT} z')); [|exec]. eapply ax_expr_weaken_post'; [by rewrite <-!assert_unlock_sep, <-(assert_lock_singleton _ _ (%a_y)), <-2!unlock_indep by done|]. apply ax_expr_frame_l', ax_expr_invariant_r'. set (A'' := (%a_z ↦{μ2,γ2} #intV{uintT} z' : uintT%BT ★ A')%A). eapply ax_assign_r' with _ _ γ1 uintT%T _ (intV{uintT} z') _; try exec. * rewrite <-(right_id _ (★)%A); apply assert_sep_preserving, assert_wand_intro; rewrite ?(left_id _ (★)%A); eauto using assert_exist_intro. } { rewrite !(assoc (★)%A), (comm (★)%A _ (%a_z ↦{_,_} _ : _)%A). eauto using assert_sep_preserving, assert_wand_intro, assert_exist_intro. } * eapply ax_stmt_weaken_pre, ax_goto. apply assert_sep_preserving; eauto using assert_exist_intro. rewrite HJ; repeat setoid_rewrite (assert_lift_exists Γ δ). apply assert_exist_intro with z', assert_exist_intro with (y' `mod` z')%Z. rewrite !assert_lift_sep, !assert_lift_singleton, stack_indep; simpl. assert (Z.gcd z' (y' `mod` z') = Z.gcd y z). { by rewrite Z.gcd_comm, Z.gcd_mod, Z.gcd_comm by lia. } by rewrite assert_Prop_l by auto using Z_mod_pos, Z.gcd_mod with lia. } { eapply ax_stmt_weaken_pre, ax_skip; simpl. apply assert_Prop_intro_l; intros ->. by rewrite Z.gcd_0_r_nonneg in Hgcd by eauto using int_typed_unsigned_nonneg; subst. } Qed. End gcd.
integer e1,e4,e1p,e4p parameter(e1=5,e4=6,e1p=7,e4p=8)
import for_mathlib.group -- some stupid lemma about units import Spa.space import Huber_ring.localization /-! # Rational open subsets and their properties We define a preorder on `rational_open_data` that will be used when constructing the valuations on the stalks of the structure presheaf. -/ open_locale classical local attribute [instance] set.pointwise_mul_comm_semiring local attribute [instance] set.smul_set_action local postfix `⁺` : 66 := λ A : Huber_pair, A.plus namespace spa open set algebra variables {A : Huber_pair} namespace rational_open_data variables (r : rational_open_data A) /-- The preorder on rational open data. Due to limitations in the existing mathematical library, we cannot work with the “correct” preorder on rational open data. The “correct” preorder on rational open data would be: def correct_preorder : preorder (rational_open_data A) := { le := λ r1 r2, rational_open r1 ⊆ rational_open r2, le_refl := λ _ _, id, le_trans := λ _ _ _, subset.trans } One can prove (in maths) that r1 ≤ r2 iff there's a continuous R-algebra morphism of Huber pairs localization r2 → localization r1. I think the ← direction of this iff is straightforward (but I didn't think about it too carefully). However we definitely cannot prove the → direction of this iff in this repo yet because we don't have enough API for cont. Here is an indication of part of the problem. localization r2 is just A[1/r2.s]. But we cannot prove yet r2.s is invertible in localization.r1, even though we know it doesn't vanish anywhere on rational_open r2 and hence on rational_open r1, because the fact that it doesn't vanish anywhere on rational_open r1 only means that it's not in any prime ideal corresponding to a *continuous* valuation on localization r1 which is bounded by 1 on some + subring; one would now need to prove, at least, that every maximal ideal is the support of a continuous valuation, which is Wedhorn 7.52(2). This is not too bad -- but it is work that we have not yet done. However this is by no means the whole story; we would also need that r1.T is power-bounded in localization.r2 and this looks much worse: it's Wedhorn 7.52(1). Everything is do-able, but it's just *long*. Long as in "thousands more lines of code". We will need a good theory of primary and secondary specialisation of valuations and so on and so on. None of this is there at the time of writing, although I see no obstruction to putting it there, other than the fact that it would take weeks of work. We have to work with a weaker preorder then, because haven't made a good enough API for continuous valuations. We basically work with the preorder r1 ≤ r2 iff there's a continuous R-algebra map localization r2 → localization r1, i.e, we define our way around the problem. We are fortunate in that we can prove (in maths) that the projective limit over this preorder agrees with the projective limit over the correct preorder. -/ instance : preorder (rational_open_data A) := { le := λ r1 r2, ∃ k : A, r1.s * k = r2.s ∧ ∀ t₁ ∈ r1.T, ∃ t₂ ∈ r2.T, ∃ N : ℕ, r2.s ^ N * t₂ = r2.s ^ N * (t₁ * k), le_refl := λ r, ⟨1, mul_one _, λ t ht, ⟨t, ht, 0, by rw mul_one⟩⟩, le_trans := λ a b c ⟨k, hk, hab⟩ ⟨l, hl, hbc⟩, ⟨k * l, by rw [←mul_assoc, hk, hl], λ ta hta, begin rcases hab ta hta with ⟨tb, htb, Nab, h1⟩, rcases hbc tb htb with ⟨hc, htc, Nbc, h2⟩, refine ⟨hc, htc, (Nab + Nbc), _⟩, rw [←mul_assoc, pow_add, mul_assoc, h2, ←hl, mul_pow, mul_pow], rw (show b.s ^ Nab * l ^ Nab * (b.s ^ Nbc * l ^ Nbc * (tb * l)) = b.s ^ Nab * tb * (l ^ Nab * (b.s ^ Nbc * l ^ Nbc * l)), by ring), rw h1, ring end⟩ } lemma le_inter_left (r1 r2 : rational_open_data A) : r1 ≤ (inter r1 r2) := begin refine ⟨r2.s, rfl, _⟩, intros t1 ht1, refine ⟨t1 * r2.s, ⟨t1, mem_insert_of_mem _ ht1, r2.s, mem_insert_s _, rfl⟩, 0, by simp⟩, end lemma le_inter_right (r1 r2 : rational_open_data A) : r2 ≤ (inter r1 r2) := by { rw inter_symm, apply le_inter_left, } -- The preorder defined above is weaker than the preorder we're supposed to have but don't. -- However the projective limit we take over our preorder is provably (in maths) equal to -- the projective limit that we cannot even formalise. The thing we definitely need -- is that if r1 ≤ r2 then there's a map localization r1 → localization r2 /-- The localization of a Huber pair A at the rational open subset r = D(T,s) ⊆ spa(A). -/ def localization (r : rational_open_data A) := Huber_ring.away r.T r.s namespace localization /-- The ring structure on the localization at the rational open subset r = D(T,s) ⊆ spa(A). -/ instance : comm_ring (localization r) := by unfold localization; apply_instance /-- The basis of open subgroups of the localization at the rational open subset r = D(T,s) ⊆ spa(A). -/ instance : subgroups_basis (localization r) := Huber_ring.away.top_loc_basis r.T r.s r.Hopen /-- The topology on the localization at the rational open subset r = D(T,s) ⊆ spa(A). -/ instance : topological_space (localization r) := subgroups_basis.topology _ /-- The localization at the rational open subset r = D(T,s) ⊆ spa(A) is a topological ring. -/ instance : topological_ring (localization r) := ring_filter_basis.is_topological_ring _ rfl /-- The uniform structure on the localization at the rational open subset r = D(T,s) ⊆ spa(A). -/ instance (r : rational_open_data A) : uniform_space (rational_open_data.localization r) := topological_add_group.to_uniform_space _ /-- The localization at the rational open subset r = D(T,s) ⊆ spa(A) is a uniform additive group. -/ instance (rd : rational_open_data A): uniform_add_group (rational_open_data.localization rd) := topological_add_group_is_uniform /-- The localization at the rational open subset r = D(T,s) ⊆ spa(A) is a an algebra over A. -/ instance : algebra A (localization r) := Huber_ring.away.algebra r.T r.s /-- The coercion from a Huber pair A to the localization at the rational open subset r = D(T,s) ⊆ spa(A). -/ instance : has_coe A (localization r) := ⟨λ a, (of_id A (localization r) : A → localization r) a⟩ lemma nonarchimedean (r : rational_open_data A) : topological_add_group.nonarchimedean (localization r) := subgroups_basis.nonarchimedean set_option class.instance_max_depth 38 /--If A is a Huber pair, and r = D(T,s) a rational open subset of Spa(A), and coe is the localization map A → A(T/s), then `power_bounded_data r` is the set { coe(t)/s | t ∈ T } ⊆ A(T/s).-/ def power_bounded_data (r : rational_open_data A) : set (localization r) := let s_inv : localization r := ((localization.to_units ⟨r.s, ⟨1, by simp⟩⟩)⁻¹ : units (localization r)) in (s_inv • (coe : A → localization r) '' r.T) theorem power_bounded (r : rational_open_data A) : is_power_bounded_subset (power_bounded_data r) := begin suffices : is_bounded (ring.closure (power_bounded_data r)), { exact is_bounded.subset add_group.subset_closure this }, intros U hU, rcases subgroups_basis.mem_nhds_zero.mp hU with ⟨_, ⟨V, rfl⟩, hV⟩, refine ⟨_, mem_nhds_sets (subgroups_basis.is_op _ rfl (set.mem_range_self _)) _, _⟩, { exact V }, { erw submodule.mem_coe, convert submodule.zero_mem _ }, { intros v hv b hb, apply hV, rw [mul_comm, ← smul_eq_mul], rw submodule.mem_coe at hv ⊢, convert submodule.smul_mem _ _ hv, swap, { exact ⟨b, hb⟩ }, { refl } } end end localization /-- This auxilliary function produces r1.s as a unit in localization r2 -/ noncomputable def s_inv_aux (r1 r2 : rational_open_data A) (h : r1 ≤ r2) : units (localization r2) := @units.unit_of_mul_left_eq_unit _ _ ((of_id A (localization r2) : A → r2.localization) r1.s) ((of_id A (localization r2) : A → r2.localization) (classical.some h)) (localization.to_units (⟨r2.s, 1, by simp⟩ : powers r2.s)) begin rw [← alg_hom.map_mul, (classical.some_spec h).1], refl, end /-- The map A(T1/s1) -> A(T2/s2) coming from the inequality r1 ≤ r2 -/ noncomputable def localization_map {r1 r2 : rational_open_data A} (h : r1 ≤ r2) : localization r1 → localization r2 := Huber_ring.away.lift r1.T r1.s (of_id A (localization r2)) (s_inv_aux r1 r2 h) rfl /-- The induced map A(T1/s1) -> A(T2/s2) coming from the inequality r1 ≤ r2 is a ring homomorphism. -/ instance {r1 r2 : rational_open_data A} (h : r1 ≤ r2) : is_ring_hom (localization_map h) := by delta localization_map; apply_instance /- To prove continuity of the localisation map coming from r1 ≤ r2 we need to check that the image of T1/s1 under the localization map is power-bounded in the ring (localization r2). This is done in the following lemma. -/ local attribute [instance] set.pointwise_mul_comm_semiring local attribute [instance] set.smul_set_action set_option class.instance_max_depth 38 lemma localization_map_is_cts_aux {r1 r2 : rational_open_data A} (h : r1 ≤ r2) : is_power_bounded_subset ((s_inv_aux r1 r2 h)⁻¹.val • (λ (x : ↥A), to_fun (localization r2) x) '' r1.T) := begin refine power_bounded.subset _ (localization.power_bounded r2), intros x hx, rcases hx with ⟨_, ⟨t₁, ht₁, rfl⟩, rfl⟩, let h' := h, -- need it later rcases h with ⟨a, ha, h₂⟩, rcases h₂ t₁ ht₁ with ⟨t₂, ht₂, N, hN⟩, show ↑(s_inv_aux r1 r2 _)⁻¹ * to_fun (localization r2) t₁ ∈ localization.mk 1 ⟨r2.s, _⟩ • (of_id ↥A (localization r2)).to_fun '' r2.T, refine ⟨(of_id ↥A (localization r2)).to_fun t₂, ⟨t₂, ht₂, rfl⟩, _⟩, rw [←units.mul_left_inj (s_inv_aux r1 r2 h'), units.mul_inv_cancel_left], show to_fun (localization r2) t₁ = to_fun (localization r2) (r1.s) * (localization.mk 1 ⟨r2.s, _⟩ * to_fun (localization r2) t₂), rw [mul_comm, mul_assoc], rw ←units.mul_left_inj (localization.to_units (⟨r2.s, 1, by simp⟩ : powers r2.s)), rw ←mul_assoc, -- t1=s1*(1/s2 * t2) in r2 have : ↑(localization.to_units (⟨r2.s, 1, by simp⟩ : powers r2.s)) * localization.mk (1 : A) (⟨r2.s, 1, by simp⟩ : powers r2.s) = 1, convert units.mul_inv _, rw [this, one_mul], clear this, show to_fun (localization r2) r2.s * _ = _, rw ←units.mul_left_inj (localization.to_units (⟨r2.s ^ N, N, rfl⟩ : powers r2.s)), show to_fun (localization r2) (r2.s ^ N) * _ = to_fun (localization r2) (r2.s ^ N) * _, have hrh : is_ring_hom (to_fun (localization r2)) := begin change is_ring_hom ((of_id ↥A (localization r2)).to_fun), apply_instance, end, rw ←@is_ring_hom.map_mul _ _ _ _ (to_fun (localization r2)) hrh, rw ←@is_ring_hom.map_mul _ _ _ _ (to_fun (localization r2)) hrh, rw ←@is_ring_hom.map_mul _ _ _ _ (to_fun (localization r2)) hrh, rw ←@is_ring_hom.map_mul _ _ _ _ (to_fun (localization r2)) hrh, congr' 1, rw [←mul_assoc _ t₂, hN], rw ←ha, ring, end -- Continuity now follows from the universal property. lemma localization_map_is_cts {r1 r2 : rational_open_data A} (h : r1 ≤ r2) : continuous (localization_map h) := Huber_ring.away.lift_continuous r1.T r1.s (localization.nonarchimedean r2) (Huber_ring.away.of_continuous r2.T r2.s _) _ _ _ (localization_map_is_cts_aux h) lemma localization_map_is_uniform_continuous {r1 r2 : rational_open_data A} (h : r1 ≤ r2) : uniform_continuous (rational_open_data.localization_map h) := uniform_continuous_of_continuous (rational_open_data.localization_map_is_cts h) end rational_open_data -- namespace end spa
(* To be imported qualified. *) Require Import MathClasses.interfaces.abstract_algebra MathClasses.interfaces.universal_algebra MathClasses.theory.ua_homomorphisms MathClasses.misc.workaround_tactics MathClasses.categories.categories. Require MathClasses.categories.varieties MathClasses.categories.product MathClasses.theory.forget_algebra MathClasses.theory.forget_variety. Inductive op := mult | one. Definition sig: Signature := single_sorted_signature (λ o, match o with one => O | mult => 2%nat end). Section laws. Global Instance: SgOp (Term0 sig nat tt) := fun x => App sig _ _ _ (App sig _ _ _ (Op sig nat mult) x). Global Instance: MonUnit (Term0 sig nat tt) := Op sig nat one. Local Notation x := (Var sig nat 0%nat tt). Local Notation y := (Var sig nat 1%nat tt). Local Notation z := (Var sig nat 2%nat tt). Import notations. Inductive Laws: EqEntailment sig → Prop := | e_mult_assoc: Laws (x & (y & z) === (x & y) & z) | e_mult_1_l: Laws (mon_unit & x === x) | e_mult_1_r: Laws (x & mon_unit === x). End laws. Definition theory: EquationalTheory := Build_EquationalTheory sig Laws. Definition Object := varieties.Object theory. Local Hint Extern 3 => progress simpl : typeclass_instances. Definition forget: Object → setoids.Object := @product.project unit (λ _, setoids.Object) (λ _, _) _ (λ _, _) (λ _, _) (λ _, _) tt ∘ forget_algebra.object theory ∘ forget_variety.forget theory. (* todo: too ugly *) (* Now follow a series of encoding/decoding functions to convert between the specialized Monoid/Monoid_Morphism type classes and the universal Algebra/InVariety/HomoMorphism type classes instantiated with the above signature and theory. *) #[global] Instance encode_operations A `{!SgOp A} `{!MonUnit A}: AlgebraOps sig (λ _, A) := λ o, match o with mult => (&) | one => mon_unit: A end. Section decode_operations. Context `{AlgebraOps theory A}. Global Instance: MonUnit (A tt) := algebra_op one. Global Instance: SgOp (A tt) := algebra_op mult. End decode_operations. Section encode_variety_and_ops. Context A `{Monoid A}. Global Instance encode_algebra_and_ops: Algebra sig _. Proof. constructor. intro. apply _. intro o. destruct o; simpl; try apply _; unfold Proper; reflexivity. Qed. Global Instance encode_variety_and_ops: InVariety theory (λ _, A) | 10. Proof. constructor. apply _. intros ? [] ?; simpl; unfold algebra_op; simpl. apply associativity. apply left_identity. apply right_identity. Qed. Definition object: Object := varieties.object theory (λ _, A). End encode_variety_and_ops. Lemma encode_algebra_only `{!AlgebraOps theory A} `{∀ u, Equiv (A u)} `{!Monoid (A tt)}: Algebra theory A . Proof. constructor; intros []; apply _. Qed. Global Instance decode_variety_and_ops `{InVariety theory A}: Monoid (A tt) | 10. Proof with simpl; auto. pose proof (λ law lawgood x y z, variety_laws law lawgood (λ s n, match s with tt => match n with 0 => x | 1 => y | _ => z end end)) as laws. constructor. constructor. apply _. intro. apply_simplified (laws _ e_mult_assoc). apply_simplified (algebra_propers mult)... intro. apply_simplified (laws _ e_mult_1_l)... intro. apply_simplified (laws _ e_mult_1_r)... Qed. Lemma encode_morphism_only `{AlgebraOps theory A} `{∀ u, Equiv (A u)} `{AlgebraOps theory B} `{∀ u, Equiv (B u)} (f: ∀ u, A u → B u) `{!Monoid_Morphism (f tt)}: HomoMorphism sig A B f. Proof. pose proof (monmor_a (f:=f tt)). pose proof (monmor_b (f:=f tt)). constructor. intros []. apply _. intros []; simpl. apply preserves_sg_op. apply (@preserves_mon_unit (A tt) (B tt) _ _ _ _ _ _ (f tt)). apply _. apply encode_algebra_only. apply encode_algebra_only. Qed. Lemma encode_morphism_and_ops `{Monoid_Morphism A B f}: @HomoMorphism sig (λ _, A) (λ _, B) _ _ ( _) ( _) (λ _, f). Proof. intros. apply encode_morphism_only. assumption. Qed. Lemma decode_morphism_and_ops `{InVariety theory x} `{InVariety theory y} `{!HomoMorphism theory x y f}: Monoid_Morphism (f tt). Proof. constructor; try apply _. constructor; try apply _. apply (preserves theory x y f mult). apply (preserves theory x y f one). Qed. #[global] Instance id_monoid_morphism `{Monoid A}: Monoid_Morphism (@id A). Proof. repeat (split; try apply _); easy. Qed. (* Finally, we use these encoding/decoding functions to specialize some universal results: *) Section specialized. Context `{Equiv A}`{MonUnit A} `{SgOp A} `{Equiv B} `{MonUnit B} `{SgOp B} `{Equiv C} `{MonUnit C} `{SgOp C} (f : A → B) (g : B → C). Instance compose_monoid_morphism: Monoid_Morphism f → Monoid_Morphism g → Monoid_Morphism (g ∘ f). Proof. intros. pose proof (encode_morphism_and_ops (f:=f)) as P. pose proof (encode_morphism_and_ops (f:=g)) as Q. pose proof (@compose_homomorphisms theory _ _ _ _ _ _ _ _ _ _ _ P Q) as PP. pose proof (monmor_a (f:=f)). pose proof (monmor_b (f:=f)). pose proof (monmor_b (f:=g)). apply (@decode_morphism_and_ops _ _ _ _ _ _ _ _ _ PP). Qed. Lemma invert_monoid_morphism: ∀ `{!Inverse f}, Bijective f → Monoid_Morphism f → Monoid_Morphism (f⁻¹). Proof. intros. pose proof (encode_morphism_and_ops (f:=f)) as P. pose proof (@invert_homomorphism theory _ _ _ _ _ _ _ _ _ _ P) as Q. pose proof (monmor_a (f:=f)). pose proof (monmor_b (f:=f)). apply (@decode_morphism_and_ops _ _ _ _ _ _ _ _ _ Q). Qed. End specialized. #[global] Hint Extern 4 (Monoid_Morphism (_ ∘ _)) => class_apply @compose_monoid_morphism : typeclass_instances. #[global] Hint Extern 4 (Monoid_Morphism (_⁻¹)) => class_apply @invert_monoid_morphism : typeclass_instances.
Require Import Verdi.Verdi. Require Import Verdi.HandlerMonad. Require Import Verdi.NameOverlay. Require Import Verdi.TotalMapSimulations. Require Import Verdi.PartialMapSimulations. Require Import Verdi.DynamicNetLemmas. Require Import StructTact.Update. Require Import StructTact.Update2. Require Import StructTact.StructTactics. Require Import StructTact.ListUtil. Require Import TreeAux. Require Import FailureRecorderDynamic. Require Import FailureRecorderDynamicCorrect. Require Import TreeDynamic. Require Import Sumbool. Require Import Orders. Require Import MSetFacts. Require Import MSetProperties. Require Import FMapInterface. Require Import Sorting.Permutation. Require Import mathcomp.ssreflect.ssreflect. Require Import mathcomp.ssreflect.ssrbool. Local Arguments update {_} {_} _ _ _ _ _ : simpl never. Set Implicit Arguments. Module TreeCorrect (Import NT : NameType) (NOT : NameOrderedType NT) (NSet : MSetInterface.S with Module E := NOT) (NOTC : NameOrderedTypeCompat NT) (NMap : FMapInterface.S with Module E := NOTC) (Import RNT : RootNameType NT) (Import ANT : AdjacentNameType NT) (Import TA : TAux NT NOT NSet NOTC NMap). Module NSetFacts := Facts NSet. Module NSetProps := Properties NSet. Module NSetOrdProps := OrdProperties NSet. Require Import FMapFacts. Module NMapFacts := Facts NMap. Module FRC := FailureRecorderCorrect NT NOT NSet ANT. Module FR := FRC.FR. Module TR := Tree NT NOT NSet NOTC NMap RNT ANT TA. Import TR. Instance Tree_FailureRecorder_base_params_pt_map : BaseParamsPartialMap Tree_BaseParams FR.FailureRecorder_BaseParams := { pt_map_data := fun d => FR.mkData d.(adjacent) ; pt_map_input := fun _ => None ; pt_map_output := fun _ => None }. Instance Tree_FailureRecorder_name_tot_map : MultiParamsNameTotalMap Tree_MultiParams FR.FailureRecorder_MultiParams := { tot_map_name := id ; tot_map_name_inv := id ; }. Instance Tree_FailureRecorder_name_tot_map_bijective : MultiParamsNameTotalMapBijective Tree_FailureRecorder_name_tot_map := { tot_map_name_inv_inverse := fun _ => Logic.eq_refl ; tot_map_name_inverse_inv := fun _ => Logic.eq_refl }. Instance Tree_FailureRecorder_multi_params_pt_map : MultiParamsMsgPartialMap Tree_MultiParams FR.FailureRecorder_MultiParams := { pt_map_msg := fun m => match m with | Fail => Some FR.Fail | New => Some FR.New | _ => None end ; }. Instance Tree_FailureRecorder_multi_params_pt_map_congruency : MultiParamsPartialMapCongruency Tree_FailureRecorder_base_params_pt_map Tree_FailureRecorder_name_tot_map Tree_FailureRecorder_multi_params_pt_map := { pt_init_handlers_eq := fun _ => Logic.eq_refl ; pt_net_handlers_some := _ ; pt_net_handlers_none := _ ; pt_input_handlers_some := _ ; pt_input_handlers_none := _ }. Proof. - move => me src mg st mg' H_eq. rewrite /pt_mapped_net_handlers. repeat break_let. case H_n: net_handlers => [[out st'] ps]. rewrite /= /runGenHandler_ignore /= in Heqp H_n. repeat break_let. repeat tuple_inversion. unfold id in *. destruct u, u0, st'. by net_handler_cases; FR.net_handler_cases; simpl in *; congruence. - move => me src mg st out st' ps H_eq H_eq'. rewrite /= /runGenHandler_ignore /= in H_eq'. repeat break_let. repeat tuple_inversion. destruct u, st'. by net_handler_cases; simpl in *; congruence. - move => me inp st inp' H_eq. rewrite /pt_mapped_input_handlers. repeat break_let. case H_i: input_handlers => [[out st'] ps]. rewrite /= /runGenHandler_ignore /= in Heqp H_i. repeat break_let. repeat tuple_inversion. destruct u. by io_handler_cases. - move => me inp st out st' ps H_eq H_eq'. rewrite /= /runGenHandler_ignore /= in H_eq'. repeat break_let. repeat tuple_inversion. destruct u, st'. io_handler_cases; simpl in *; try by congruence. rewrite /level_adjacent NSet.fold_spec /flip /=. elim: NSet.elements => //=. move => n l IH. rewrite /flip /= /level_fold. rewrite (@fold_left_level_fold_eq Tree_TreeMsg). by rewrite filterMap_app /= IH. Qed. Instance Tree_FailureRecorder_fail_msg_params_pt_map_congruency : FailMsgParamsPartialMapCongruency Tree_FailMsgParams FR.FailureRecorder_FailMsgParams Tree_FailureRecorder_multi_params_pt_map := { pt_fail_msg_fst_snd := Logic.eq_refl }. Instance Tree_FailureRecorder_name_overlay_params_tot_map_congruency : NameOverlayParamsTotalMapCongruency Tree_NameOverlayParams FR.FailureRecorder_NameOverlayParams Tree_FailureRecorder_name_tot_map := { tot_adjacent_to_fst_snd := fun _ _ => conj (fun H => H) (fun H => H) }. Instance Tree_FailureRecorder_new_msg_params_pt_map_congruency : NewMsgParamsPartialMapCongruency Tree_NewMsgParams FR.FailureRecorder_NewMsgParams Tree_FailureRecorder_multi_params_pt_map := { pt_new_msg_fst_snd := Logic.eq_refl }. Theorem Tree_Failed_pt_mapped_simulation_star_1 : forall net failed tr, @step_ordered_dynamic_failure_star _ _ _ Tree_NewMsgParams Tree_FailMsgParams step_ordered_dynamic_failure_init (failed, net) tr -> @step_ordered_dynamic_failure_star _ _ _ FR.FailureRecorder_NewMsgParams FR.FailureRecorder_FailMsgParams step_ordered_dynamic_failure_init (failed, pt_map_odnet net) (filterMap pt_map_trace_ev tr). Proof. move => onet failed tr H_st. apply step_ordered_dynamic_failure_pt_mapped_simulation_star_1 in H_st. by rewrite map_id in H_st. Qed. Lemma Tree_node_not_adjacent_self_lift : forall net failed n, (In n (odnwNodes (pt_map_odnet net)) -> ~ In n failed -> forall d, odnwState (pt_map_odnet net) n = Some d -> ~ NSet.In n (FR.adjacent d)) -> (In n (odnwNodes net) -> ~ In n failed -> forall d, odnwState net n = Some d -> ~ NSet.In n d.(adjacent)). Proof. move => net failed n H_p H_in H_in' d H_eq. rewrite /= /id /= map_id H_eq /= in H_p. have H_p' := H_p H_in H_in' {| FR.adjacent := d.(adjacent) |}. exact: H_p'. Qed. Lemma Tree_node_not_adjacent_self : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n (odnwNodes net) -> ~ In n failed -> forall d, odnwState net n = Some d -> ~ NSet.In n d.(adjacent). Proof. move => net failed tr H_st n H_n H_f d H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_node_not_adjacent_self _ _ _ H_st' n. eapply Tree_node_not_adjacent_self_lift in H_inv'; eauto. Qed. Lemma Tree_not_failed_no_fail : forall onet failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, onet) tr -> forall n, In n (odnwNodes onet) -> ~ In n failed -> forall n', ~ In Fail (onet.(odnwPackets) n n'). Proof. move => net failed tr H_st n H_n H_f n'. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have IH := FRC.Failure_not_failed_no_fail H_st'. rewrite /= map_id /id /= in IH. have IH' := IH _ H_n H_f n'. move => H_in. case: IH'. move: H_in. apply: in_msg_filterMap_pt_map_msg. exact: pt_fail_msg_fst_snd. Qed. Lemma Tree_in_after_all_fail_new : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n (odnwNodes net) -> ~ In n failed -> forall (n' : name), before_all New Fail (net.(odnwPackets) n' n). Proof. move => net failed tr H_st n H_n H_f n'. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have IH := FRC.Failure_in_after_all_fail_new H_st'. rewrite /= map_id /id /= in IH. have IH' := IH _ H_n H_f n'. move: IH'. exact: in_all_before_pt_map_msg. Qed. Lemma Tree_pt_map_msg_injective : forall m0 m1 m2 : msg, pt_map_msg m0 = Some m2 -> pt_map_msg m1 = Some m2 -> m0 = m1. Proof. by case => [|lvo'|]; case => [|lvo''|] => //=; case. Qed. Lemma Tree_le_one_new : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n (odnwNodes net) -> ~ In n failed -> forall (n' : name), count_occ Msg_eq_dec (net.(odnwPackets) n' n) New <= 1. Proof. move => net failed tr H_st n H_n H_f n'. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have IH := FRC.Failure_le_one_new H_st'. rewrite /= map_id /id /= in IH. have IH' := IH _ H_n H_f n'. move: IH'. set c1 := count_occ _ _ _. set c2 := count_occ _ _ _. suff H_suff: c1 = c2 by rewrite H_suff. rewrite /c1 /c2 {c1 c2}. apply: count_occ_filterMap_pt_map_msg_eq => //. exact: Tree_pt_map_msg_injective. Qed. Lemma Tree_le_one_fail : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n (odnwNodes net) -> ~ In n failed -> forall (n' : name), count_occ Msg_eq_dec (net.(odnwPackets) n' n) Fail <= 1. Proof. move => net failed tr H_st n H_n H_f n'. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have IH := FRC.Failure_le_one_fail H_st'. rewrite /= map_id /id /= in IH. have IH' := IH _ H_n H_f n'. move: IH'. set c1 := count_occ _ _ _. set c2 := count_occ _ _ _. suff H_suff: c1 = c2 by rewrite H_suff. rewrite /c1 /c2 {c1 c2}. apply: count_occ_filterMap_pt_map_msg_eq => //. exact: Tree_pt_map_msg_injective. Qed. Lemma Tree_in_new_failed_incoming_fail : forall onet failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, onet) tr -> forall n, In n (odnwNodes onet) -> ~ In n failed -> forall n', In n' failed -> In New (onet.(odnwPackets) n' n) -> In Fail (onet.(odnwPackets) n' n). Proof. move => net failed tr H_st n H_n H_f n' H_f' H_in. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := FRC.Failure_in_new_failed_incoming_fail H_st'. rewrite /= map_id /id /= in H_inv'. have IH := H_inv' _ H_n H_f _ H_f'. move: IH. set in_pt := In FR.Fail _. move => IH. suff H_suff: in_pt. move: H_suff. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. apply: IH. move: H_in. exact: in_msg_filterMap_pt_map_msg. Qed. Lemma Tree_in_adj_adjacent_to : forall onet failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, onet) tr -> forall n n', In n (odnwNodes onet) -> ~ In n failed -> forall d, onet.(odnwState) n = Some d -> NSet.In n' d.(adjacent) -> adjacent_to n' n. Proof. move => net failed tr H_st n n' H_n H_f d H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_in_adj_adjacent_to _ _ _ H_st' n n'. rewrite /= map_id /id /= H_eq in H_inv'. have H_inv'' := H_inv' H_n H_f {| FR.adjacent := d.(adjacent) |}. exact: H_inv''. Qed. Lemma Tree_in_adj_or_incoming_fail : forall onet failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, onet) tr -> forall n n', In n (odnwNodes onet) -> ~ In n failed -> forall d, onet.(odnwState) n = Some d -> NSet.In n' d.(adjacent) -> (In n' (odnwNodes onet) /\ ~ In n' failed) \/ (In n' (odnwNodes onet) /\ In n' failed /\ In Fail (onet.(odnwPackets) n' n)). Proof. move => net failed tr H_st n n' H_n H_f d H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_in_adj_or_incoming_fail _ _ _ H_st' n n'. rewrite /= map_id /id /= H_eq in H_inv'. have H_inv'' := H_inv' H_n H_f {| FR.adjacent := d.(adjacent) |}. rewrite /= in H_inv''. move => H_ins. case (H_inv'' (Logic.eq_refl _) H_ins) => H_in. break_and. by left. break_and. right. split => //. split => //. move: H1. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. Qed. Lemma Tree_new_incoming_not_in_adj : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n (odnwNodes net) -> ~ In n failed -> forall (n' : name), In New (net.(odnwPackets) n' n) -> forall d, net.(odnwState) n = Some d -> ~ NSet.In n' d.(adjacent). Proof. move => net failed tr H_st n H_n H_f n' H_in d H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_new_incoming_not_in_adj _ _ _ H_st' n _ _ n' _ {| FR.adjacent := d.(adjacent) |}. rewrite /= map_id /id /= H_eq in H_inv'. apply: H_inv' => //. move: H_in. exact: in_msg_filterMap_pt_map_msg. Qed. Lemma Tree_adjacent_to_no_incoming_new_n_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n n', In n net.(odnwNodes) -> ~ In n failed -> In n' net.(odnwNodes) -> ~ In n' failed -> adjacent_to n' n -> forall d, odnwState net n = Some d -> ~ In New (odnwPackets net n' n) -> NSet.In n' (adjacent d). Proof. move => net failed tr H_st n n' H_n H_f H_n' H_f' H_adj d H_eq H_in. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_adjacent_to_no_incoming_new_n_adjacent _ _ _ H_st' n n'. rewrite /= map_id /id /= H_eq in H_inv'. have H_inv'' := H_inv' H_n H_f H_n' H_f' H_adj {| FR.adjacent := d.(adjacent) |}. apply: H_inv'' => //. move => H_in'. case: H_in. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. Qed. Lemma Tree_incoming_fail_then_incoming_new_or_in_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In Fail (net.(odnwPackets) n' n) -> forall d, net.(odnwState) n = Some d -> (In New (net.(odnwPackets) n' n) /\ ~ NSet.In n' d.(adjacent)) \/ (~ In New (net.(odnwPackets) n' n) /\ NSet.In n' d.(adjacent)). Proof. move => net failed tr H_st n H_n H_f n' H_in d H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_incoming_fail_then_incoming_new_or_in_adjacent _ _ _ H_st' n. rewrite /= map_id /id /= H_eq in H_inv'. have H_inv'' := H_inv' H_n H_f n' _ {| FR.adjacent := d.(adjacent) |} (Logic.eq_refl _). move: H_inv''. set f_in := In FR.Fail _. move => H_inv''. suff H_suff: f_in. concludes. case: H_inv'' => H_inv''. break_and. left. split => //. move: H. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. break_and. right. split => //. move => H_in'. case: H. move: H_in'. exact: in_msg_filterMap_pt_map_msg. rewrite /f_in. move: H_in. exact: in_msg_filterMap_pt_map_msg. Qed. Lemma Tree_incoming_fail_then_new_or_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In Fail (net.(odnwPackets) n' n) -> forall d, net.(odnwState) n = Some d -> In New (net.(odnwPackets) n' n) \/ NSet.In n' (adjacent d). Proof. move => net failed tr H_st. move => n H_in_n H_in_f n' H_in d H_eq. have H_or := Tree_incoming_fail_then_incoming_new_or_in_adjacent H_st _ H_in_n H_in_f _ H_in H_eq. break_or_hyp; break_and; first by left. by right. Qed. Lemma Tree_head_fail_then_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', head (net.(odnwPackets) n' n) = Some Fail -> forall d, net.(odnwState) n = Some d -> NSet.In n' d.(adjacent). Proof. move => net failed tr H_st n H_n H_f n' H_eq d H_eq'. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_head_fail_then_adjacent _ _ _ H_st' n. rewrite /= map_id /id /= H_eq' in H_inv'. have H_inv'' := H_inv' H_n H_f n' _ {| FR.adjacent := d.(adjacent) |} (Logic.eq_refl _). apply: H_inv''. move: H_eq. exact: hd_error_filterMap_pt_map_msg. Qed. Lemma Tree_adjacent_or_incoming_new_reciprocal : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n n', In n net.(odnwNodes) -> ~ In n failed -> In n' net.(odnwNodes) -> ~ In n' failed -> forall d0, odnwState net n = Some d0 -> forall d1, odnwState net n' = Some d1 -> (NSet.In n' d0.(adjacent) \/ In New (net.(odnwPackets) n' n)) -> NSet.In n d1.(adjacent) \/ In New (net.(odnwPackets) n n'). Proof. move => net failed tr H_st n n' H_n H_f H_n' H_f' d H_eq d' H_eq'. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_adjacent_or_incoming_new_reciprocal _ _ _ H_st' n n'. rewrite /= map_id /id /= H_eq H_eq' in H_inv'. have H_inv'' := H_inv' H_n H_f H_n' H_f' {| FR.adjacent := d.(adjacent) |} (Logic.eq_refl _) {| FR.adjacent := d'.(adjacent) |} (Logic.eq_refl _). rewrite /= in H_inv''. move => H_in. move: H_inv''. set inn := In FR.New _. set inn' := In FR.New _. move => H_inv''. case: H_in => H_in. have H_or: NSet.In n' d.(adjacent) \/ inn by left. concludes. case: H_inv'' => H_inv''; first by left. right. move: H_inv''. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. suff H_suff: inn. have H_or: NSet.In n' (adjacent d) \/ inn by right. concludes. case: H_inv'' => H_inv''; first by left. right. move: H_inv''. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. move: H_in. exact: in_msg_filterMap_pt_map_msg. Qed. Lemma Tree_adjacent_then_adjacent_or_new_incoming : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n n', In n net.(odnwNodes) -> ~ In n failed -> In n' net.(odnwNodes) -> ~ In n' failed -> forall d0, odnwState net n = Some d0 -> forall d1, odnwState net n' = Some d1 -> NSet.In n' d0.(adjacent) -> NSet.In n d1.(adjacent) \/ In New (net.(odnwPackets) n n'). Proof. move => net failed tr H_st n n' H_n H_f H_n' H_f' d H_eq d' H_eq' H_ins. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_adjacent_then_adjacent_or_new_incoming _ _ _ H_st' n n'. rewrite /= map_id /id /= H_eq H_eq' in H_inv'. have H_inv'' := H_inv' H_n H_f H_n' H_f' {| FR.adjacent := d.(adjacent) |} (Logic.eq_refl _) {| FR.adjacent := d'.(adjacent) |} (Logic.eq_refl _). rewrite /= in H_inv''. concludes. break_or_hyp; first by left. right. move: H. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. Qed. Lemma Tree_fail_head_no_new : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', head (net.(odnwPackets) n' n) = Some Fail -> ~ In New (net.(odnwPackets) n' n). Proof. move => net failed tr H_st n H_n H_f n' H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_fail_head_no_new _ _ _ H_st' n. rewrite /= map_id /id /= in H_inv'. have H_inv'' := H_inv' H_n H_f n'. move => H_in. move: H_inv''. set hde := hd_error _ = _. move => H_inv''. suff H_suff: hde. concludes. case: H_inv''. move: H_in. exact: in_msg_filterMap_pt_map_msg. move: H_eq. exact: hd_error_filterMap_pt_map_msg. Qed. Lemma Tree_failed_adjacent_fail : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' failed -> forall d0, odnwState net n = Some d0 -> (NSet.In n' d0.(adjacent) \/ In New (net.(odnwPackets) n' n)) -> In Fail (net.(odnwPackets) n' n). Proof. move => net failed tr H_st n H_n H_f n' H_f' d H_eq H_or. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_failed_adjacent_fail _ _ _ H_st' n. rewrite /= map_id /id /= H_eq in H_inv'. have H_inv'' := H_inv' H_n H_f _ H_f' {| FR.adjacent := d.(adjacent) |} (Logic.eq_refl _). rewrite /= in H_inv''. move: H_inv''. set inn := In FR.Fail _. move => H_inv''. suff H_suff: inn. move: H_suff. apply: in_filterMap_pt_map_msg_in_msg => //. exact: Tree_pt_map_msg_injective. apply: H_inv''. case: H_or => H_or; first by left. right. move: H_or. exact: in_msg_filterMap_pt_map_msg. Qed. Lemma Tree_in_new_then_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In New (odnwPackets net n' n) -> adjacent_to n' n. Proof. move => net failed tr H_st n H_n H_f n' H_in. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_in_new_then_adjacent _ _ _ H_st' n. rewrite /= map_id /id /= in H_inv'. apply: (H_inv' H_n H_f n'). move: H_in. exact: in_msg_filterMap_pt_map_msg. Qed. Lemma Tree_inactive_not_in_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', ~ In n' (odnwNodes net) -> forall d0, odnwState net n = Some d0 -> ~ NSet.In n' d0.(adjacent). Proof. move => net failed tr H_st n H_in H_f n' H_n' d0 H_eq. have H_st' := Tree_Failed_pt_mapped_simulation_star_1 H_st. have H_inv' := @FRC.Failure_inactive_not_in_adjacent _ _ _ H_st' n _ _ n' _ {| FR.adjacent := d0.(adjacent) |}. rewrite /= map_id /id /= H_eq /= in H_inv'. by repeat concludes. Qed. Lemma Tree_self_channel_empty : forall onet failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, onet) tr -> forall n, onet.(odnwPackets) n n = []. Proof. move => net failed tr H. change net with (snd (failed, net)). remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H using refl_trans_1n_trace_n1_ind => H_init {failed}; first by rewrite H_init /step_ordered_failure_init /=. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; rewrite /=. - move => n. case (name_eq_dec h n) => H_dec; last first. rewrite collate_ls_neq_to //. by rewrite collate_neq. find_reverse_rewrite. rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. rewrite collate_map2snd_not_in; last by eauto using in_remove_all_was_in. by find_higher_order_rewrite. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; first by break_and; subst; repeat find_higher_order_rewrite. by break_if; first by break_and; subst; break_or_hyp. * rewrite /update2 /=. break_if; last by find_higher_order_rewrite. break_and; repeat find_rewrite. by find_higher_order_rewrite. * rewrite /update2 /=. break_if; first by break_and; subst; repeat find_higher_order_rewrite. by break_if; first by break_and; subst; break_or_hyp. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=. case (name_eq_dec h n) => H_dec; last by rewrite collate_neq. subst. have H_ins := Tree_node_not_adjacent_self H H3 H2 H4. rewrite /level_adjacent NSet.fold_spec /flip /=. have H_ins': ~ In n (NSet.elements d.(adjacent)). move => H_ins'. case: H_ins. by apply NSetFacts.elements_2; auto. elim: NSet.elements H_ins' => //=. move => n' ns IH H_in. have H_neq: n' <> n by auto. have H_in': ~ In n ns by auto. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /=. rewrite collate_app /=. rewrite /update2. break_if; first by break_and; subst. by rewrite IH. - move => n. case (name_eq_dec h n) => H_dec; last by rewrite collate_neq; first by find_higher_order_rewrite. find_reverse_rewrite. rewrite collate_map2snd_not_related //. exact: adjacent_to_irreflexive. Qed. Lemma Tree_inactive_no_incoming : forall onet failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, onet) tr -> forall n, ~ In n (odnwNodes onet) -> forall n', onet.(odnwPackets) n' n = []. Proof. move => net failed tr H. change net with (snd (failed, net)). remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H using refl_trans_1n_trace_n1_ind => H_init {failed}; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; rewrite /=. - move => n H_in n'. have H_neq: h <> n by auto. have H_not_in: ~ In n net0.(odnwNodes) by auto. rewrite collate_ls_neq_to //. case (name_eq_dec h n') => H_dec. rewrite -H_dec. rewrite collate_map2snd_not_in; last by eauto using in_remove_all_was_in. by auto. rewrite collate_neq //. by auto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; first by break_and; subst; rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H) in H5. break_if; first by break_and; subst; rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H) in H5. by rewrite IHrefl_trans_1n_trace1. * rewrite /update2 /=. break_if; break_and; last by eauto. by repeat find_rewrite; eauto. * rewrite /update2 /=. break_if; first by break_and; subst; rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H) in H5. break_if; first by break_and; subst; rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H) in H5. by rewrite IHrefl_trans_1n_trace1. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=. * by auto. * case (name_eq_dec h n') => H_dec; last by rewrite collate_neq // IHrefl_trans_1n_trace1. subst. have H_ins: ~ NSet.In n d.(adjacent). move => H_ins. have H_or := Tree_in_adj_or_incoming_fail H _ H3 H2 H4 H_ins. by break_or_hyp; break_and. rewrite /level_adjacent NSet.fold_spec /flip /=. have H_ins': ~ In n (NSet.elements d.(adjacent)). move => H_ins'. case: H_ins. by apply NSetFacts.elements_2; auto. elim: NSet.elements H_ins' => /=; first by move => H_in; rewrite IHrefl_trans_1n_trace1. move => n0 ns IH H_in. have H_neq: n0 <> n by auto. have H_in': ~ In n ns by auto. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /=. rewrite collate_app /=. rewrite /update2. break_if; first by break_and; subst. by rewrite IH. * by auto. * by auto. * by auto. - move => n H_in n'. have H_neq: h <> n by move => H_eq; rewrite -H_eq in H_in. case (name_eq_dec h n') => H_dec. rewrite -H_dec. rewrite collate_map2snd_not_in; last by eauto using in_remove_all_was_in. by auto. rewrite collate_neq //. by auto. Qed. (* bfs_net_ok_root_levels_empty *) Lemma Tree_root_levels_empty : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> root n -> forall d, net.(odnwState) n = Some d -> d.(levels) = NMap.empty lv. Proof. move => net failed tr H. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H using refl_trans_1n_trace_n1_ind => H_init {failed}; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; simpl in *. - move => n H_in_n H_in_f H_r d H_d. destruct_update; first by find_injection. break_or_hyp => //. by eauto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=. * destruct_update; last by eauto. find_injection. find_rewrite. by eauto. * by destruct_update; eauto. * by destruct_update; eauto. * destruct_update; last by eauto. find_injection. by eauto. * by destruct_update; eauto. * by destruct_update; eauto. * by destruct_update; eauto. * by destruct_update; eauto. * destruct_update; last by eauto. find_injection. find_rewrite. by eauto. * by destruct_update; eauto. * by destruct_update; eauto. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=; try by eauto. * destruct_update; last by eauto. find_injection. by eauto. * by destruct_update; eauto. * by destruct_update; eauto. * destruct_update; last by eauto. find_injection. by eauto. * by destruct_update; eauto. - move => n H_in_n H_in_f H_r d H_eq. have H_neq: h <> n by auto. have H_in: ~ In n failed by auto. by eauto. Qed. (* bfs_net_ok_root_levels_bot *) Lemma Tree_root_levels_bot : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> root n -> forall d, net.(odnwState) n = Some d -> forall n', NMap.find n' d.(levels) = None. Proof. move => net failed tr H_st. move => n H_in_n H_in_f H_r d H_d n'. have H_emp := Tree_root_levels_empty H_st H_in_n H_in_f H_r H_d. rewrite H_emp /=. apply NMapFacts.not_find_in_iff. move => H_in. by apply NMapFacts.empty_in_iff in H_in. Qed. (* in_after_all_fail_status *) Lemma Tree_in_after_all_fail_level : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall (n : name), In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> forall lvo', before_all (Level lvo') Fail (net.(odnwPackets) n' n). Proof. move => net failed tr H. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3 4. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H using refl_trans_1n_trace_n1_ind => H_init {failed}; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; simpl in *. - move => n H_n H_f n' H_n' lvo'. break_or_hyp; break_or_hyp. * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. rewrite collate_map2snd_not_in; last by eauto using in_remove_all_was_in. by rewrite (Tree_self_channel_empty H). * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. case (adjacent_to_dec n' n) => H_dec; last first. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H). have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H. rewrite collate_map2snd_not_in_related //. rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H) //=. by left. * have H_neq: n <> n' by move => H_eq; find_reverse_rewrite. case (adjacent_to_dec n n') => H_dec; last first. rewrite collate_ls_not_related //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H). case (in_dec name_eq_dec n' failed) => H_dec'; last first. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H. rewrite collate_ls_live_related //. rewrite collate_neq //. rewrite (Tree_inactive_no_incoming H) //=. by left. rewrite collate_ls_in_remove_all //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H). * have H_neq: h <> n by move => H_eq; find_reverse_rewrite. have H_neq': h <> n' by move => H_eq; repeat find_rewrite. rewrite collate_ls_neq_to //. rewrite collate_neq //. by eauto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=; unfold update2 in *; break_if; break_and; subst_max; try by eauto. * have IH := IHrefl_trans_1n_trace1 _ H11 H13 _ H14 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H12 H14 _ H15 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H12 H14 _ H15 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H6 H8 _ H9 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H0 H8 _ H9 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H6 H8 _ H9 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H12 H14 _ H15 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H12 H14 _ H15 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHrefl_trans_1n_trace1 _ H11 H13 _ H3 lvo'. have H_neq: n <> n'. move => H_eq. rewrite H_eq in H5. by rewrite (Tree_self_channel_empty H) in H5. break_if; first by break_and. apply: before_all_not_in_2. move => H_in. apply in_app_or in H_in. case: H_in => H_in; last by case: H_in. contradict H_in. by apply: Tree_not_failed_no_fail; eauto. * break_if; last by eauto. break_and; subst. have H_neq: n <> n' by break_or_hyp; auto. have IH := IHrefl_trans_1n_trace1 _ H11 H13 _ H14 lvo'. find_rewrite. simpl in *. break_or_hyp; last by break_and. exact: before_all_not_in_1. * have IH := IHrefl_trans_1n_trace1 _ H12 H14 _ H15 lvo'. find_rewrite. simpl in *. break_or_hyp; last by break_and. exact: before_all_not_in_1. * have H_neq: n <> n'. move => H_eq. rewrite H_eq in H5. by rewrite (Tree_self_channel_empty H) in H5. break_if; first by break_and; subst_max. apply: before_all_not_in_2. move => H_in. apply in_app_or in H_in. case: H_in => H_in; last by case: H_in. contradict H_in. by apply: Tree_not_failed_no_fail; eauto. * break_if; last by eauto. break_and; subst. have H_neq: n <> n' by break_or_hyp; auto. have IH := IHrefl_trans_1n_trace1 _ H7 H9 _ H10 lvo'. find_rewrite. simpl in *. break_or_hyp; last by break_and. exact: before_all_not_in_1. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=; try by eauto. have IH := IHrefl_trans_1n_trace1 _ H11 H13 _ H14 lvo'. case (name_eq_dec h n') => H_dec; last by rewrite collate_neq. subst. have H_f := Tree_not_failed_no_fail H _ H3 H2 n. apply before_all_not_in_2. rewrite /level_adjacent NSet.fold_spec /flip /=. move => H_in. case: H_f. move: H_in. elim: (NSet.elements _) => //=. rewrite /flip /= /level_fold. move => n'' ns IH' H_in. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) in H_in. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) in IH'. rewrite app_nil_r in IH'. rewrite collate_app in H_in. apply IH'. rewrite /tree_level /= in H_in. update2_destruct_hyp; find_inversion; subst; rewrite_update2. - apply in_app_or in H_in. by case: H_in => H_in; last by case: H_in. - have H_neq: n'' <> n by move => H_eq; find_rewrite. rewrite_update2. by find_rewrite. - move => n H_n H_f n' H_n' lvo'. have H_neq: h <> n by auto. have H_f': ~ In n failed by auto. case (name_eq_dec h n') => H_dec; last first. rewrite collate_neq //. by eauto. subst_max. case (adjacent_to_dec n' n) => H_dec; last first. rewrite collate_map2snd_not_related //. by eauto. rewrite collate_map2snd_not_in_related //. apply: before_all_neq_append => //. by eauto. exact: @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H. Qed. Lemma Tree_in_level_adjacent_or_incoming_new : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> forall lvo', In (Level lvo') (net.(odnwPackets) n' n) -> forall d, net.(odnwState) n = Some d -> NSet.In n' d.(adjacent) \/ In New (net.(odnwPackets) n' n). Proof. move => net failed tr H. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3 4 5 6. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H using refl_trans_1n_trace_n1_ind => H_init {failed}; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invcs H end. - move => n H_n H_f n' H_n' lvo'. break_or_hyp; break_or_hyp. * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. rewrite collate_map2snd_not_in; last by eauto using in_remove_all_was_in. by rewrite (Tree_self_channel_empty H). * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. case (adjacent_to_dec n' n) => H_dec; last first. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H). have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H. rewrite collate_map2snd_not_in_related //. rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H) //=. move => H_or. by break_or_hyp. * have H_neq: n <> n' by move => H_eq; find_reverse_rewrite. case (adjacent_to_dec n n') => H_dec; last first. rewrite collate_ls_not_related //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H). case (in_dec name_eq_dec n' failed) => H_dec'; last first. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H. rewrite collate_ls_live_related //. rewrite collate_neq //. rewrite (Tree_inactive_no_incoming H) //=. move => H_or. by break_or_hyp. rewrite collate_ls_in_remove_all //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H). * have H_neq: h <> n by move => H_eq; find_reverse_rewrite. have H_neq': h <> n' by move => H_eq; repeat find_rewrite. rewrite collate_ls_neq_to //. rewrite collate_neq //. rewrite_update. by eauto. - intros. find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=; simpl in *. + destruct (name_eq_dec to n), (name_eq_dec from n'); subst; rewrite_update; rewrite_update2; find_inversion. * assert (H_in: In (Level lvo') (odnwPackets net0 n' n)). { repeat find_rewrite. auto with datatypes. } eapply IHrefl_trans_1n_trace1 with (d:=d) in H_in; eauto. break_or_hyp; last by find_rewrite; simpl in *; break_or_hyp; last by right. exfalso. match goal with | [ H : refl_trans_1n_trace _ _ (?failed, net0) ?tr |- _ ] => assert (H_step: step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net0) tr) by auto; pose proof (Tree_in_after_all_fail_level H_step n) as H_after; simpl in H_after end. assert (before_all (Level lvo') Fail (odnwPackets net0 n' n)) by eauto. find_rewrite. simpl in *; break_or_hyp; break_and; by auto. * match goal with | [H : In (Level lvo') (odnwPackets _ n' n) |- _ ] => eapply IHrefl_trans_1n_trace1 with (d:=d) in H; auto end. break_or_hyp. -- left. find_rewrite. apply NSet.remove_spec; by auto. -- by auto. * by eauto. * by eauto. + (* Fail case with broadcast = false *) destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try eauto. * exfalso. assert (before_all (Level lvo') Fail (odnwPackets net0 n' n)) by eauto using Tree_in_after_all_fail_level. repeat find_rewrite. find_eapply_lem_hyp before_all_head_not_in; congruence. * find_inversion. match goal with | [H : In (Level lvo') (odnwPackets _ n' n) |- _ ] => eapply IHrefl_trans_1n_trace1 with (d:=d) in H; auto end. break_or_hyp; [left|by auto]. find_injection. repeat find_rewrite. apply NSet.remove_spec; by auto. + (* Fail case with broadcast = true (same proof) *) destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try eauto. * exfalso. assert (before_all (Level lvo') Fail (odnwPackets net0 n' n)) by eauto using Tree_in_after_all_fail_level. repeat find_rewrite. find_eapply_lem_hyp before_all_head_not_in; congruence. * find_inversion. match goal with | [H : In (Level lvo') (odnwPackets _ n' n) |- _ ] => eapply IHrefl_trans_1n_trace1 with (d:=d) in H; auto end. break_or_hyp; [left|by auto]. find_rewrite. apply NSet.remove_spec; by auto. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; eauto. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp; [left|]; first by find_inversion. find_rewrite. simpl in *. by break_or_hyp; last by right. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp. -- left. by find_inversion. -- find_rewrite. right. by eauto using In_cons_neq. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp; [left|]; first by repeat find_rewrite. find_rewrite. simpl in *. by break_or_hyp; last by right. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp. -- repeat find_rewrite. auto. -- find_rewrite. right. eauto || eapply In_cons_neq; eauto || congruence. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp; [left|]; first by repeat find_rewrite. find_rewrite. simpl in *. by break_or_hyp; last by right. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp. -- repeat find_rewrite. auto. -- find_rewrite. right. eauto || eapply In_cons_neq; eauto || congruence. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp; [left|]; first by repeat find_rewrite. find_rewrite. simpl in *. by break_or_hyp; last by right. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp. -- repeat find_rewrite. auto. -- find_rewrite. right. eauto || eapply In_cons_neq; eauto || congruence. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp; [left|]; first by repeat find_rewrite. find_rewrite. simpl in *. by break_or_hyp; last by right. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). by (eapply IHrefl_trans_1n_trace1; eauto; repeat find_rewrite; eauto with datatypes). break_or_hyp. -- repeat find_rewrite. auto. -- find_rewrite. right. eauto || eapply In_cons_neq; eauto || congruence. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (n' <> n). { intro H_eq; subst. rewrite_update2. match goal with | [ H : context[ _ ++ [Level (Some 0)] ] |- _ ] => eapply Tree_self_channel_empty with (n := n) in H; simpl in H; symmetry in H end. rewrite_update2. eapply app_cons_not_nil; eauto. } rewrite_update2; rewrite_update. assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). { eapply IHrefl_trans_1n_trace1 with (lvo':=lvo'); eauto. find_rewrite. auto with datatypes. } repeat find_rewrite. by auto with set. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)) by eauto. break_or_hyp; [left|by auto]. find_rewrite. by auto with set. * move {H1}. case (name_eq_dec from n) => H_dec. subst_max. case (name_eq_dec to n') => H_dec. subst_max. rewrite_update2. have H_in: In New (odnwPackets net0 n n') by find_rewrite; left. have H_or: NSet.In n d.(adjacent) \/ In New (odnwPackets net0 n n') by right. have H_rec := Tree_adjacent_or_incoming_new_reciprocal H _ H3 H2 H0 H7 H4 H10 H_or. case: H_rec => H_rec; first by left. right. apply in_or_app. by left. rewrite_update2. by eauto. rewrite_update2. by eauto. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). { eapply IHrefl_trans_1n_trace1 with (lvo':=lvo'); eauto. find_rewrite. auto with datatypes. } break_or_hyp; repeat find_rewrite; by auto with set. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)) by eauto. break_or_hyp; repeat find_rewrite; by auto with set. + destruct (name_eq_dec from n'), (name_eq_dec to n); subst; rewrite_update2; rewrite_update; try find_injection; eauto. * assert (n' <> n). { intro H_eq; subst. rewrite_update2. match goal with | [ H : context[ _ ++ _ ] |- _ ] => eapply Tree_self_channel_empty with (n := n) in H; simpl in H; symmetry in H end. rewrite_update2. eapply app_cons_not_nil; eauto. } rewrite_update2; rewrite_update. assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). { eapply IHrefl_trans_1n_trace1 with (lvo':=lvo'); eauto. rewrite_update. find_rewrite. auto with datatypes. } break_or_hyp; [left|]; first by repeat find_rewrite; auto with set. repeat find_rewrite. by left; auto with set. * assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)) by eauto. break_or_hyp; [left|by auto]. find_rewrite. by auto with set. * move {H1}. case (name_eq_dec from n) => H_dec. subst_max. case (name_eq_dec to n') => H_dec'. subst_max. rewrite_update2. have H_in: In New (odnwPackets net0 n n') by find_rewrite; left. have H_or: NSet.In n d.(adjacent) \/ In New (odnwPackets net0 n n') by right. have H_rec := Tree_adjacent_or_incoming_new_reciprocal H _ H3 H2 H0 H7 H4 H10 H_or. case: H_rec => H_rec; first by left. right. apply in_or_app. by left. rewrite_update2. by eauto. rewrite_update2. by eauto. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=; simpl in *; eauto. * eapply IHrefl_trans_1n_trace1; eauto. destruct (name_eq_dec h n); rewrite_update; try find_injection; auto. * destruct (name_eq_dec n n'); subst; rewrite_update; try find_inversion; [match goal with | [ H : In (Level lvo') ?chan, H' : refl_trans_1n_trace _ _ (_, ?net) _ |- _ ] => assert (H_empty: odnwPackets net n' n' = []) by eauto using Tree_self_channel_empty; simpl in H_empty; repeat find_rewrite; exfalso; auto with datatypes end|]. destruct (name_eq_dec h n'); subst; rewrite_update; try find_inversion; repeat find_rewrite. -- cut (NSet.In n' (adjacent d0) \/ In New (odnwPackets net0 n' n)); try by intuition eauto using collate_in_in. match goal with | [ H : In (Level lvo') (collate _ _ _ ?sends _ _) |- _ ] => destruct (In_dec name_eq_dec n (map fst sends)); [eauto|erewrite collate_not_in_eq in H; eauto] end. eapply Tree_adjacent_or_incoming_new_reciprocal; eauto. unfold level_adjacent, flip, level_fold in *. find_rewrite_lem NSet.fold_spec. find_apply_lem_hyp in_map_iff. break_exists_name pkt; break_and. find_eapply_lem_hyp in_fold_left_by_cons_in; (repeat decide equality || auto using name_eq_dec). break_or_hyp => //=. break_exists; break_and. cut (InA eq (fst pkt) (NSet.elements (adjacent d))); cut (In (fst pkt) (NSet.elements (adjacent d))); by auto with set || by repeat find_rewrite. -- destruct (name_eq_dec h n); subst; rewrite_update; try find_inversion. ** assert (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)). { eapply IHrefl_trans_1n_trace1; eauto. erewrite <- collate_neq; eauto. } break_or_hyp; find_rewrite; auto using collate_in_in. ** assert (NSet.In n' (adjacent d0) \/ In New (odnwPackets net0 n' n)). { eapply IHrefl_trans_1n_trace1; eauto. erewrite <- collate_neq; eauto. } break_or_hyp; find_rewrite; auto using collate_in_in. * eapply IHrefl_trans_1n_trace1; eauto. destruct (name_eq_dec h n); rewrite_update; try find_injection; auto. * eapply IHrefl_trans_1n_trace1; eauto. destruct (name_eq_dec h n); rewrite_update; try find_injection; auto. * eapply IHrefl_trans_1n_trace1; eauto. destruct (name_eq_dec h n); rewrite_update; try find_injection; auto. - intros. cut (NSet.In n' (adjacent d) \/ In New (odnwPackets net0 n' n)); try by intuition auto using collate_in_in. eapply IHrefl_trans_1n_trace1 with (lvo':=lvo'); eauto. eapply collate_map2snd_in_neq_in_before; eauto || congruence. Qed. Lemma Tree_in_before_all_new_level : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> forall lvo', before_all New (Level lvo') (net.(odnwPackets) n' n). Proof. move => net failed tr H_step. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3 4. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H_step using refl_trans_1n_trace_n1_ind => H_init; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; simpl in *. - move => n H_n H_f n' H_n' lvo'. break_or_hyp; break_or_hyp. * have H_rel: ~ adjacent_to n n by apply adjacent_to_irreflexive. rewrite collate_ls_not_related //. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1). * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. case (adjacent_to_dec n' n) => H_dec; last first. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1). have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_map2snd_not_in_related //. rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1) //=. by left. * have H_neq: n <> n' by move => H_eq; subst_max. case (adjacent_to_dec n n') => H_dec; last first. rewrite collate_ls_not_related //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H_step1). case (in_dec name_eq_dec n' failed0) => H_dec'; last first. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_ls_live_related //. rewrite collate_neq //. rewrite (Tree_inactive_no_incoming H_step1) //=. by left. rewrite collate_ls_in_remove_all //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H_step1). * have H_neq: h <> n by move => H_eq; find_reverse_rewrite. have H_neq': h <> n' by move => H_eq; repeat find_rewrite. rewrite collate_ls_neq_to //. rewrite collate_neq //. by eauto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=; simpl in *; update2_destruct_max_simplify; repeat find_rewrite; auto; try tuple_inversion. * have IH := IHH_step1 _ H9 H11 _ H12 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H10 H12 _ H13 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H10 H12 _ H13 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H1 H0 _ H7 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H1 H0 _ H7 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H1 H0 _ H7 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H1 H0 _ H13 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H1 H0 _ H13 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have H_neq: n <> n'. move => H_eq. subst_max. by find_rewrite_lem (Tree_self_channel_empty H_step1). move {H_step2}. rewrite_update2. have IH := IHH_step1 _ H9 H11 _ H12 lvo'. exact: before_all_neq_append. * move {H_step2}. destruct_update2; last by eauto. tuple_inversion. have IH := IHH_step1 _ H9 H11 _ H12 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * have IH := IHH_step1 _ H10 H12 _ H13 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. * move {H_step2}. destruct_update2. + tuple_inversion. by find_rewrite_lem (Tree_self_channel_empty H_step1). + have H_neq: n <> n' by move => H_eq; subst_max. have IH := IHH_step1 _ H5 H7 _ H1 lvo'. exact: before_all_neq_append. * move {H_step2}. destruct_update2; last by eauto. tuple_inversion. have IH := IHH_step1 _ H5 H7 _ H8 lvo'. find_rewrite. case: IH => IH; first exact: before_all_not_in_1. by break_and. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=. * by eauto. * case (name_eq_dec h n') => H_dec; last by rewrite collate_neq; eauto. subst_max. have H_adj_in := Tree_in_adj_adjacent_to H_step1 _ H1 H0 H2. have H_adj_in_elts: forall k, In k (NSet.elements d.(adjacent)) -> adjacent_to k n'. move => k H_in. have H_adj_in_spec := NSet.elements_spec1 d.(adjacent) k. apply H_adj_in. apply H_adj_in_spec. apply InA_alt. by exists k. case (adjacent_to_dec n n') => H_adj. + case (in_dec Msg_eq_dec New (net0.(odnwPackets) n n')) => H_in. have H_n := Tree_new_incoming_not_in_adj H_step1 _ H1 H0 H_in H2. have H_inn: ~ In n (NSet.elements d.(adjacent)). move => H_inn. case: H_n. apply NSet.elements_spec1. apply InA_alt. by exists n. move: H_inn. rewrite /level_adjacent NSet.fold_spec /flip /=. elim: NSet.elements => //=; first by eauto. move => k ns IH H_inn. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /= {2}/level_fold /=. rewrite collate_app /=. have H_neq: k <> n by auto. have H_nin: ~ In n ns by auto. rewrite_update2. exact: IH. have H_adj_new := Tree_adjacent_to_no_incoming_new_n_adjacent H_step1 H12 H0 H9 H11 H_adj H2 H_in. apply NSet.elements_spec1 in H_adj_new. have H_nd := NSet.elements_spec2w d.(adjacent). apply InA_alt in H_adj_new. break_exists. break_and. have H_eq_x: x = n by []. subst_max. find_apply_lem_hyp in_split. break_exists. find_rewrite. have H_not_in_1: ~ In n x. move => H_nx. apply NoDupA_swap in H_nd; last exact: eq_equivalence. inversion H_nd; subst_max. case: H14. apply InA_alt. exists n. split => //. apply in_or_app. by left. have H_not_in_2: ~ In n x0. move => H_nx. apply NoDupA_swap in H_nd; last exact: eq_equivalence. inversion H_nd; subst_max. case: H14. apply InA_alt. exists n. split => //. apply in_or_app. by right. rewrite /level_adjacent NSet.fold_spec /flip /=. rewrite H8 /= fold_left_app /= {2}/level_fold /= (@fold_left_level_fold_eq Tree_TreeMsg). rewrite collate_not_in; last first. simpl. move: H_not_in_2 {H_nd H8}. elim: x0 => //=. move => k ns IH H_inn. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /= map_app /=. move => H_k. find_apply_lem_hyp in_app_or. break_or_hyp; last by simpl in *; break_or_hyp; eauto. by concludes. set e := ((n, _)). set l := fold_left _ _ _. have ->: e :: l = [e] ++ l by []. rewrite collate_not_in_rest; last first. rewrite /l {l} /=. move: H_not_in_1 {H_nd H8}. elim: x => //=. move => k ns IH H_inn. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /= map_app /=. move => H_k. find_apply_lem_hyp in_app_or. break_or_hyp; last by simpl in *; break_or_hyp; eauto. by concludes. rewrite /=. rewrite_update2. by apply: before_all_neq_append; eauto. + rewrite collate_not_in_eq; first by eauto. unfold level_adjacent. rewrite NSet.fold_spec /flip /=. move: H_adj_in_elts. elim: (NSet.elements _) => //=; first by auto. move => k ns IH H_adj_in_elts. rewrite /level_fold /=. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /=. rewrite map_app /=. move => H_in. apply in_app_or in H_in. case: H_in => H_in; last first. case: H_in => H_in //. subst_max. contradict H_adj. apply H_adj_in_elts. by left. contradict H_in. apply IH. move => k' H_in_k'. apply H_adj_in_elts. by right. * by eauto. * by eauto. * by eauto. - move => n H_n H_f n' H_n' lvo'. have H_neq: h <> n by auto. have H_in: ~ In n failed0 by auto. case (name_eq_dec h n') => H_dec; last by rewrite collate_neq; eauto. subst_max. case (adjacent_to_dec n' n) => H_dec'; last by rewrite collate_map2snd_not_related; eauto. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_map2snd_not_in_related //. by apply: before_all_neq_append; eauto. Qed. (* bfs_net_ok_notin_adj_not_sent_status *) Lemma Tree_notin_adjacent_not_sent_level : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> ~ In n' failed -> forall d, net.(odnwState) n = Some d -> ~ NSet.In n' d.(adjacent) -> forall lvo', ~ In (Level lvo') (net.(odnwPackets) n n'). Proof. move => net failed tr H_step. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3 5 6. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H_step using refl_trans_1n_trace_n1_ind => H_init; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; simpl in *. - move => n H_n H_f n' H_n' H_f' d H_d H_ins lvo'. break_or_hyp; break_or_hyp. * have H_rel: ~ adjacent_to n n by apply adjacent_to_irreflexive. rewrite collate_ls_not_related //. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1). * rewrite_update. have H_neq: n' <> n by move => H_eq; subst_max. case (adjacent_to_dec n' n) => H_dec; last first. rewrite collate_ls_not_related //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H_step1). case (in_dec name_eq_dec n' failed0) => H_dec'; first by rewrite collate_ls_in_remove_all. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_ls_live_related //. rewrite collate_neq //. rewrite (Tree_inactive_no_incoming H_step1) //=. move => H_in. by case: H_in. * rewrite_update. find_injection. simpl in *. have H_neq: n <> n' by move => H_eq; subst_max. rewrite collate_ls_neq_to //. case (adjacent_to_dec n n') => H_adj. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_map2snd_not_in_related //. rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1) //=. move => H_in. by break_or_hyp. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1). * have H_neq: h <> n by move => H_eq; subst_max. have H_neq': h <> n' by move => H_eq; subst_max. rewrite collate_ls_neq_to //. rewrite collate_neq //. rewrite_update. by eauto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=; simpl in *; update2_destruct_max_simplify; update_destruct_max_simplify; repeat find_rewrite; auto; try tuple_inversion; try find_injection; repeat find_rewrite. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have H_bef := Tree_in_after_all_fail_level H_step1 _ H1 H0 _ H9 lvo'. find_rewrite. simpl in *. break_or_hyp => //. by break_and. * case (name_eq_dec from n) => H_dec. subst_max. by find_rewrite_lem (Tree_self_channel_empty H_step1). case (name_eq_dec from n') => H_dec'. subst_max. have H_f := Tree_not_failed_no_fail H_step1 _ H12 H13 n. find_rewrite. case: H_f. by left. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H15. exact: NSetFacts.remove_2. by have IH := IHH_step1 _ H9 H11 _ H12 H13 _ H2 H_ins lvo'. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have H_f := Tree_not_failed_no_fail H_step1 _ H10 H12 n'. find_rewrite. by case: H_f; left. * case (name_eq_dec from n') => H_dec. subst_max. have H_f := Tree_not_failed_no_fail H_step1 _ H13 H14 n. find_rewrite. by case: H_f; left. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H16. by auto with set. by eauto. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have H_f := Tree_not_failed_no_fail H_step1 _ H10 H12 n'. find_rewrite. by case: H_f; left. * case (name_eq_dec from n') => H_dec. subst_max. have H_f := Tree_not_failed_no_fail H_step1 _ H13 H14 n. find_rewrite. by case: H_f; left. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H16. by auto with set. by eauto. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have IH := IHH_step1 _ H4 H6 _ H7 H8 _ H9 H10 x. find_rewrite. by case: IH; left. * by eauto. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have IH := IHH_step1 _ H H6 _ H7 H8 _ H9 H10 lvo'. find_rewrite. case: IH. by right. * by eauto. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have IH := IHH_step1 _ H4 H6 _ H7 H8 _ H9 H10 (Some x). find_rewrite. by case: IH; left. * by eauto. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have IH := IHH_step1 _ H10 H12 _ H13 H14 _ H15 H16 lvo'. find_rewrite. by case: IH; right. * by eauto. * by eauto. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have IH := IHH_step1 _ H10 H12 _ H13 H14 _ H15 H16 lvo'. find_rewrite. by case: IH; right. * by eauto. * by eauto. * by auto with set. * by auto. * have H_neq: from <> n' by move => H_eq; subst_max. rewrite_update2. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H15. by auto with set. by eauto. * have IH := IHH_step1 _ H9 H11 _ H12 H13 _ H14 H15 lvo'. update2_destruct_max_simplify => //. find_injection. find_rewrite. simpl in *. case: IH. by right. * by find_rewrite_lem (Tree_self_channel_empty H_step1). * have IH := IHH_step1 _ H10 H12 _ H13 H14 _ H15 H16 lvo'. find_rewrite. case: IH. by right. * case (name_eq_dec from n') => H_dec. subst_max. by auto with set. have H_ins: ~ NSet.In n' d.(adjacent) by auto with set. by eauto. * by eauto. * by auto with set. * by auto. * have H_neq: from <> n' by move => H_eq; subst_max. rewrite_update2. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H15. by auto with set. by eauto. * case (name_eq_dec from n') => H_dec. subst_max. rewrite_update2. by eauto. have IH := IHH_step1 _ H5 H7 _ H8 H9 _ H10 H11 lvo'. update2_destruct_max_simplify => //. find_injection. find_rewrite. simpl in *. case: IH. by right. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=; simpl in *; update_destruct_max_simplify; repeat find_rewrite; auto; try tuple_inversion; try find_injection; repeat find_rewrite. * by eauto. * by eauto. * have IH := IHH_step1 _ H9 H11 _ H12 H13 _ H2 H15 lvo'. have H_ins: ~ In n' (NSet.elements d.(adjacent)). move => H_ins. case: H15. have H_adj_in_spec := NSet.elements_spec1 d.(adjacent) n'. apply H_adj_in_spec. apply InA_alt. by exists n'. contradict H16. move: H_ins. rewrite /level_adjacent NSet.fold_spec /flip /=. elim: NSet.elements => //=. move => k ns IH' H_in. have H_neq: k <> n' by auto. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /=. rewrite {2}/level_fold /= collate_app /=. rewrite_update2. have H_in': ~ In n' ns by auto. exact: IH'. * rewrite collate_neq // in H16. by eauto. * by eauto. * by eauto. * by eauto. * by eauto. * by eauto. * by eauto. - move => n H_n H_f n' H_n' H_f' d H_d H_ins lvo'. have H_neq: h <> n by auto. rewrite collate_neq //. have H_fn: ~ In n failed0 by auto. have H_fn': ~ In n' failed0 by auto. by eauto. Qed. Lemma Tree_level_head_in_adjacent : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> forall lvo', head (net.(odnwPackets) n' n) = Some (Level lvo') -> forall d, net.(odnwState) n = Some d -> NSet.In n' d.(adjacent). Proof. move => net failed tr H_step. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3 4 5. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H_step using refl_trans_1n_trace_n1_ind => H_init; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; simpl in *. - move => n H_n H_f n' H_n' lvo'. break_or_hyp; break_or_hyp. * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. rewrite collate_map2snd_not_in; last by eauto using in_remove_all_was_in. by rewrite (Tree_self_channel_empty H_step1). * rewrite collate_ls_not_in; last by apply: not_in_not_in_filter_rel; eauto using in_remove_all_was_in. case (adjacent_to_dec n' n) => H_dec; last first. rewrite collate_map2snd_not_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1). have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_map2snd_not_in_related //. by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1). * have H_neq: n <> n' by move => H_eq; find_reverse_rewrite. case (adjacent_to_dec n n') => H_dec; last first. rewrite collate_ls_not_related //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H_step1). case (in_dec name_eq_dec n' failed0) => H_dec'; last first. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_ls_live_related //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H_step1). rewrite collate_ls_in_remove_all //. rewrite collate_neq //. by rewrite (Tree_inactive_no_incoming H_step1). * have H_neq: h <> n by move => H_eq; find_reverse_rewrite. have H_neq': h <> n' by move => H_eq; repeat find_rewrite. rewrite collate_ls_neq_to //. rewrite collate_neq //. rewrite_update. by eauto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=; simpl in *; update2_destruct_max_simplify; repeat find_rewrite; auto; try tuple_inversion. * have H_bef := Tree_in_after_all_fail_level H_step1 _ H9 H11 _ H12 lvo'. find_rewrite. simpl in *. case: H_bef => H_bef; last by case: H_bef. destruct ms => //. simpl in *. find_inversion. by case: H_bef; left. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by apply NSetFacts.remove_2; eauto. rewrite_update. by eauto. * rewrite_update. find_injection. have H_bef := Tree_in_after_all_fail_level H_step1 _ H10 H12 _ H13 lvo'. find_rewrite. simpl in *. case: H_bef => H_bef; last by case: H_bef; break_and. destruct ms => //. simpl in *. find_injection. by case: H_bef; left. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by apply NSetFacts.remove_2; eauto. rewrite_update. by eauto. * rewrite_update. find_injection. have H_bef := Tree_in_after_all_fail_level H_step1 _ H10 H12 _ H13 lvo'. find_rewrite. simpl in *. case: H_bef => H_bef; last by case: H_bef; break_and. destruct ms => //. simpl in *. find_injection. by case: H_bef; left. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by apply NSetFacts.remove_2; eauto. rewrite_update. by eauto. * rewrite_update. find_injection. have H_hd: hd_error (odnwPackets net0 n' n) = Some (Level x) by find_rewrite. by have IH := IHH_step1 _ H4 H6 _ H7 _ H_hd _ H2. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. by eauto. rewrite_update. by eauto. * rewrite_update. find_injection. find_rewrite. have H_hd: hd_error (odnwPackets net0 n' n) = Some (Level (Some x)) by find_rewrite. by have IH := IHH_step1 _ H1 H0 _ H7 _ H_hd _ H2. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by eauto. rewrite_update. by eauto. * rewrite_update. find_injection. find_rewrite. have H_hd: hd_error (odnwPackets net0 n' n) = Some (Level (Some x)) by find_rewrite. by have IH := IHH_step1 _ H1 H0 _ H7 _ H_hd _ H2. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by eauto. rewrite_update. by eauto. * rewrite_update. find_injection. find_rewrite. have H_hd: hd_error (odnwPackets net0 n' n) = Some (Level None) by find_rewrite. by have IH := IHH_step1 _ H1 H0 _ H13 _ H_hd _ H2. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by eauto. rewrite_update. by eauto. * rewrite_update. find_injection. find_rewrite. have H_hd: hd_error (odnwPackets net0 n' n) = Some (Level None) by find_rewrite. by have IH := IHH_step1 _ H1 H0 _ H13 _ H_hd _ H2. * have H_neq: ~ (from = n' /\ to = n) by move => H_neq; break_and; subst_max. case (name_eq_dec from n') => H_dec. subst_max. have H_neq': to <> n by auto. rewrite_update. by eauto. case (name_eq_dec to n) => H_dec'. subst_max. have H_neq': from <> n' by auto. rewrite_update. find_injection. find_rewrite. by eauto. rewrite_update. by eauto. * update_destruct_max_simplify. find_injection. by find_rewrite_lem (Tree_self_channel_empty H_step1). update2_destruct_max_simplify; first by find_injection. have H_in_new: In New (odnwPackets net0 n n') by find_rewrite; left. have H_adj := Tree_in_new_then_adjacent H_step1 _ H1 H0 _ H_in_new. apply adjacent_to_symmetric in H_adj. case (in_dec Msg_eq_dec New (odnwPackets net0 n' n)) => H_in; last exact: (Tree_adjacent_to_no_incoming_new_n_adjacent H_step1 H9 H11 H1 H0 H_adj H14 H_in). have IH := IHH_step1 _ H9 H11 _ H1 lvo'. have H_bef := Tree_in_before_all_new_level H_step1 _ H9 H11 _ H1 lvo'. destruct (odnwPackets net0 n' n) => //. simpl in *. find_injection. case: H_in => H_in //. break_or_hyp => //. by break_and. * case (name_eq_dec to n) => H_dec. subst_max. rewrite_update. find_injection. find_rewrite. case (name_eq_dec from n') => H_dec'; first exact: NSetFacts.add_1. apply NSetFacts.add_2. update2_destruct_max_simplify. find_injection. by find_rewrite. by eauto. rewrite_update. update2_destruct_max_simplify. find_injection. find_rewrite. find_injection. by find_rewrite. by eauto. * rewrite_update. find_injection. find_rewrite. exact: NSetFacts.add_1. * update_destruct_max_simplify. find_injection. find_rewrite. apply NSetFacts.add_2. by eauto. by eauto. * update_destruct_max_simplify. find_injection. by find_rewrite_lem (Tree_self_channel_empty H_step1). update2_destruct_max_simplify. find_injection. by find_rewrite_lem (Tree_self_channel_empty H_step1). have H_in_new: In New (odnwPackets net0 n n') by find_rewrite; left. have H_adj := Tree_in_new_then_adjacent H_step1 _ H1 H0 _ H_in_new. apply adjacent_to_symmetric in H_adj. case (in_dec Msg_eq_dec New (odnwPackets net0 n' n)) => H_in; last exact: (Tree_adjacent_to_no_incoming_new_n_adjacent H_step1 H5 H7 H1 H0 H_adj H10 H_in). have IH := IHH_step1 _ H5 H7 _ H8 lvo'. have H_bef := Tree_in_before_all_new_level H_step1 _ H5 H7 _ H1 lvo'. destruct (odnwPackets net0 n' n) => //. simpl in *. find_injection. case: H_in => H_in //. break_or_hyp => //. by break_and. * have H_neq: from <> to. move => H_eq; find_rewrite. subst_max. by find_rewrite_lem (Tree_self_channel_empty H_step1). case (name_eq_dec from n') => H_dec. subst_max. update_destruct_max_simplify. find_injection. rewrite_update2. find_rewrite. by auto with set. rewrite_update2. by eauto. rewrite_update2. update_destruct_max_simplify. find_injection. find_rewrite. have IH := IHH_step1 _ H5 H7 _ H8 lvo' H9 _ H2. by auto with set. by eauto. - find_apply_lem_hyp input_handlers_IOHandler. io_handler_cases => //=; simpl in *; update_destruct_max_simplify; repeat find_rewrite; try find_injection; try by eauto. * case (name_eq_dec n n') => H_dec. subst_max. find_rewrite. have H_ins := Tree_node_not_adjacent_self H_step1 H1 H0 H2. suff H_suff: In n' (NSet.elements d.(adjacent)). have H_adj_in_spec := NSet.elements_spec1 d.(adjacent) n'. apply H_adj_in_spec. apply InA_alt. by exists n'. have H_ins': ~ In n' (NSet.elements d.(adjacent)). move => H_in. case: H_ins. have H_adj_in_spec := NSet.elements_spec1 d.(adjacent) n'. apply H_adj_in_spec. apply InA_alt. by exists n'. move: H_ins' H13. rewrite /level_adjacent /= NSet.fold_spec /flip /=. elim: NSet.elements => //=. move => H_ins' H_hd. by find_rewrite_lem (Tree_self_channel_empty H_step1). rewrite {3}/level_fold /=. move => n ns IH H_in. have H_neq: n <> n' by auto. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) /=. rewrite collate_not_in_rest //=; last by move => H_neq'; break_or_hyp. move => H_in'. right. by eauto. rewrite collate_neq // in H13. find_rewrite. by eauto. * case (name_eq_dec h n') => H_dec. subst_max. case (adjacent_to_dec n n') => H_adj; last first. have H_ins: ~ NSet.In n d.(adjacent). move => H_ins. case: H_adj. exact: (Tree_in_adj_adjacent_to H_step1 _ H12 H0 H2 H_ins). have H_inl := Tree_notin_adjacent_not_sent_level H_step1 _ H1 H0 H9 H11 H2 H_ins lvo'. have H_ins': ~ In n (NSet.elements d.(adjacent)). move => H_ins'. case: H_ins. apply NSetFacts.elements_2. apply InA_alt. by exists n. move: H13 H_ins'. rewrite /level_adjacent NSet.fold_spec /flip /=. elim: NSet.elements => //=. move => H_hd H_ins'. destruct (odnwPackets net0 n' n); simpl in * => //. find_injection. case: H_inl. by left. move => k ns IH H_ins' H_in_k. have H_neq: k <> n by auto. have H_nin: ~ In n ns by auto. apply: IH => //. rewrite {2}/level_fold /= in H_ins'. rewrite (@fold_left_level_fold_eq Tree_TreeMsg) in H_ins'. by rewrite collate_not_in_rest //= in H_ins'; last by move => H_neq'; break_or_hyp. case (in_dec Msg_eq_dec New (net0.(odnwPackets) n' n)) => H_in; last first. apply adjacent_to_symmetric in H_adj. exact: (Tree_adjacent_to_no_incoming_new_n_adjacent H_step1 H9 H11 H1 H0 H_adj H14 H_in). have H_bef := Tree_in_before_all_new_level H_step1 _ H9 H11 _ H1 lvo'. contradict H13. rewrite /level_adjacent NSet.fold_spec /flip /=. elim: NSet.elements => //=. move => H_hd. destruct (odnwPackets net0 n' n) => //. simpl in *. find_injection. case: H_in => H_in //. case: H_bef => H_bef //. by break_and. move => k ns IH. rewrite {2}/level_fold /= (@fold_left_level_fold_eq Tree_TreeMsg) /=. case (name_eq_dec n k) => H_dec_nk; last first. rewrite collate_not_in_rest //=. move => H_neq. by break_or_hyp. subst_max. rewrite collate_app /=. rewrite_update2. move: IH. set l := fold_left _ _ _. have H_in_in := (@collate_in_in _ _ name_eq_dec l n' _ _ _ _ H_in). move: H_in_in. set ls := collate _ _ _ _ _ _. by destruct ls. rewrite collate_neq // in H13. by eauto. - move => n H_n H_f n' H_n' lvo'. have H_neq: h <> n by auto. have H_in: ~ In n failed0 by auto. case (name_eq_dec h n') => H_dec. subst_max. case (adjacent_to_dec n' n) => H_adj; last by rewrite collate_map2snd_not_related; eauto. have H_nd := @ordered_dynamic_nodes_no_dup _ _ _ _ Tree_FailMsgParams _ _ _ H_step1. rewrite collate_map2snd_not_in_related //. move => H_hd. apply: (IHH_step1 _ _ _ _ _ lvo') => //. by destruct (odnwPackets net0 n' n). rewrite collate_neq //. by eauto. Qed. (* bfs_net_ok_notins_levels_bot *) Lemma Tree_notins_levels_bot : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall d, net.(odnwState) n = Some d -> forall n', ~ NSet.In n' d.(adjacent) -> NMap.find n' d.(levels) = None. Proof. move => net failed tr H_step. change failed with (fst (failed, net)). change net with (snd (failed, net)) at 1 3. remember step_ordered_dynamic_failure_init as y in *. move: Heqy. induction H_step using refl_trans_1n_trace_n1_ind => H_init; first by rewrite H_init. concludes. match goal with | [ H : step_ordered_dynamic_failure _ _ _ |- _ ] => invc H end; simpl in *. - move => n H_n H_f d H_d n' H_ins. break_or_hyp; rewrite_update; first find_injection; simpl in *. * apply NMapFacts.not_find_in_iff. by NMapFacts.map_iff. * by eauto. - find_apply_lem_hyp net_handlers_NetHandler. net_handler_cases => //=; simpl in *; update_destruct_max_simplify; repeat find_rewrite; try find_injection; repeat find_rewrite; try by eauto. * have H_emp := (Tree_root_levels_empty H_step1) _ H9 H11 H4 _ H2. repeat find_rewrite. apply NMapFacts.not_find_in_iff. by NMapFacts.map_iff. * case (name_eq_dec from n') => H_dec. subst_max. apply NMapFacts.not_find_in_iff. NMapFacts.map_iff. move => H_and. by break_and. apply NMapFacts.not_find_in_iff. NMapFacts.map_iff. move => H_and. break_and. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H14. by auto with set. have IH := IHH_step1 _ H10 H12 _ H2 _ H_ins. by apply NMapFacts.not_find_in_iff in IH. * case (name_eq_dec from n') => H_dec. subst_max. apply NMapFacts.not_find_in_iff. NMapFacts.map_iff. move => H_or. by break_and. apply NMapFacts.not_find_in_iff. NMapFacts.map_iff. move => H_and. break_and. have H_ins: ~ NSet.In n' d.(adjacent). move => H_ins. case: H14. by auto with set. have IH := IHH_step1 _ H1 H0 _ H2 _ H_ins. by apply NMapFacts.not_find_in_iff in IH. * case (in_dec name_eq_dec from (odnwNodes net0)) => H_in; last by rewrite (@ordered_dynamic_no_outgoing_uninitialized _ _ _ _ Tree_FailMsgParams _ _ _ H_step1) in H3. have H_hd := Tree_level_head_in_adjacent H_step1 _ H1 H0 from H_in. Admitted. (* bfs_net_ok_root_status_in_queue *) Lemma Tree_root_incoming_level_0 : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> root n -> forall n' lvo', In (Level lvo') (net.(odnwPackets) n n') -> lvo' = Some 0. Proof. Admitted. Lemma Tree_root_broadcast_false : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> root n -> forall d, net.(odnwState) n = Some d -> d.(broadcast) = false. Proof. Admitted. (* bfs_net_ok_notin_adj_find_none *) Lemma Tree_notin_adjacent_find_none : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> ~ In n' failed -> forall d, net.(odnwState) n = Some d -> forall d', net.(odnwState) n' = Some d' -> ~ NSet.In n' d.(adjacent) -> NMap.find n d'.(levels) = None. Proof. Admitted. (* bfs_net_ok_root_have_level *) Lemma Tree_root_have_level : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> ~ In n' failed -> forall d, net.(odnwState) n = Some d -> forall d', net.(odnwState) n' = Some d' -> root n -> NSet.In n' d.(adjacent) -> (count_occ msg_eq_dec (net.(odnwPackets) n n') (Level (Some 0)) = 1 /\ NMap.find n d'.(levels) = None) \/ (~ In (Level (Some 0)) (net.(odnwPackets) n n') /\ NMap.find n d'.(levels) = Some 0). Proof. Admitted. Corollary Tree_root_have_level_incoming : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> ~ In n' failed -> forall d, net.(odnwState) n = Some d -> forall d', net.(odnwState) n' = Some d' -> root n -> NSet.In n' d.(adjacent) -> In (Level (Some 0)) (net.(odnwPackets) n n') \/ NMap.find n d'.(levels) = Some 0. Proof. Admitted. (* nonroot_have_level *) Lemma Tree_nonroot_have_level : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In n' net.(odnwNodes) -> ~ In n' failed -> forall d, net.(odnwState) n = Some d -> forall d', net.(odnwState) n' = Some d' -> ~ root n -> ~ root n' -> NSet.In n' d.(adjacent) -> forall lv', level d.(adjacent) d.(levels) = Some lv' -> d.(broadcast) = true \/ (In (Level (Some lv')) (net.(odnwPackets) n n') /\ (forall lvo5, lvo5 <> Some lv' -> In (Level lvo5) (net.(odnwPackets) n n') -> before (Level lvo5) (Level (Some lv')) (net.(odnwPackets) n n'))) \/ (NMap.find n d'.(levels) = Some lv' /\ forall lvo5, ~ In (Level lvo5) (net.(odnwPackets) n n')). Proof. Admitted. Lemma Tree_level_gt_zero : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall d, net.(odnwState) n = Some d -> forall lv', level d.(adjacent) d.(levels) = Some lv' -> lv' > 0. Proof. Admitted. Lemma Tree_levels_some_in_adj : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall d, net.(odnwState) n = Some d -> forall n' lv', NMap.find n' d.(levels) = Some lv' -> NSet.In n' d.(adjacent). Proof. Admitted. (* status_0_in_queue_then_root *) Lemma Tree_level_0_incoming_then_root : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall n', In (Level (Some 0)) (net.(odnwPackets) n' n) -> root n'. Proof. Admitted. Lemma Tree_find_level_0_then_root : forall net failed tr, step_ordered_dynamic_failure_star step_ordered_dynamic_failure_init (failed, net) tr -> forall n, In n net.(odnwNodes) -> ~ In n failed -> forall d, net.(odnwState) n = Some d -> forall n', NMap.find n' d.(levels) = Some 0 -> root n'. Proof. Admitted. End TreeCorrect.
import M4R.Set.Finite.Finset namespace List open M4R def range' : Nat → Nat → List Nat | s, 0 => [] | s, (n+1) => s::range' (s+1) n @[simp] theorem range_zero : range 0 = [] := rfl @[simp] theorem range'_empty (n : Nat) : range' n 0 = [] := rfl @[simp] theorem range'_singleton (n : Nat) : range' n 1 = [n] := rfl theorem range_core_range' : ∀ s n : Nat, rangeAux s (range' s n) = range' 0 (n + s) | 0 , n => rfl | (s+1), n => by have : n+(s+1) = n+1+s := Nat.add_right_comm n s 1 rw [this] exact range_core_range' s (n+1) theorem range_eq_range' (n : Nat) : range n = range' 0 n := (range_core_range' n 0).trans (by rw [Nat.zero_add]) theorem range'_start (s n : Nat) : range' s n.succ = [s] ++ range' s.succ n := rfl theorem range_start (n : Nat) : range n.succ = [0] ++ range' 1 n := by rw [range_eq_range', range'_start] @[simp] theorem range'_append : ∀ s m n : Nat, range' s m ++ range' (s+m) n = range' s (n+m) | s, 0 , n => rfl | s, m+1, n => by have : s :: (range' (s+1) m ++ range' (s+m+1) n) = s :: range' (s+1) (n+m) := by rw [Nat.add_right_comm, range'_append] exact this theorem range'_succ (s n : Nat) : range' s n.succ = range' s n ++ [s+n] := by rw [Nat.succ_eq_add_one, Nat.add_comm n 1]; exact (range'_append s n 1).symm theorem range_succ (n : Nat) : range n.succ = range n ++ [n] := by have := range'_succ 0 n; rw [←range_eq_range', ←range_eq_range', Nat.zero_add] at this exact this theorem range_add (a : Nat) : ∀ b, range (a + b) = range a ++ (range b).map (a + ·) | 0 => by rw [Nat.add_zero, range_zero, map_nil, append_nil] | b + 1 => by rw [Nat.add_succ, range_succ, range_add a b, range_succ, map_append, map_singleton, append_assoc] @[simp] theorem mem_range' : ∀ {m s n : Nat}, m ∈ range' s n ↔ s ≤ m ∧ m < s + n | m, s, Nat.zero => by rw [range'_empty, mem_nil_iff, false_iff, not_and_iff_or_not, Nat.add_zero, Nat.not_le, Nat.not_lt]; exact (Nat.le_or_lt s m).comm | m, s, Nat.succ n => by have : m = s → m < s + n + 1 := (· ▸ Nat.lt_succ_of_le (Nat.le_add_right _ _)) have l : m = s ∨ s + 1 ≤ m ↔ s ≤ m := by conv => rhs rw [Nat.le_iff_eq_or_lt, Eq.comm]; exact Iff.rfl simp only [range', mem_cons_iff, mem_range', or_and_distrib_left, or_iff_right_of_imp this, l, Nat.add_right_comm] exact Iff.rfl @[simp] theorem mem_range {m n : Nat} : m ∈ range n ↔ m < n := by simp only [range_eq_range', mem_range', Nat.zero_le, true_and, Nat.zero_add]; exact Iff.rfl inductive chain (R : α → α → Prop) : α → List α → Prop | nil {a : α} : chain R a [] | cons : ∀ {a b : α} {l : List α}, R a b → chain R b l → chain R a (b::l) variable {R : α → α → Prop} @[simp] theorem chain_cons {a b : α} {l : List α} : chain R a (b::l) ↔ R a b ∧ chain R b l := ⟨fun p => by cases p with | cons n p => exact ⟨n, p⟩, fun ⟨n, p⟩ => p.cons n⟩ theorem chain.imp' {S : α → α → Prop} (HRS : ∀ ⦃a b⦄, R a b → S a b) {a b : α} (Hab : ∀ ⦃c⦄, R a c → S b c) {l : List α} (p : chain R a l) : chain S b l := by induction p generalizing b with | nil => constructor | cons r c ih => exact chain.cons (Hab r) (ih (@HRS _)) theorem chain.imp {S : α → α → Prop} (H : ∀ a b, R a b → S a b) {a : α} {l : List α} (p : chain R a l) : chain S a l := p.imp' H (H a) theorem chain_of_pairwise {a : α} {l : List α} (p : Pairwise R (a::l)) : chain R a l := by let ⟨r, p'⟩ := Pairwise.consIff.mp p induction p' generalizing a with | nil => constructor | cons r' p ih => simp only [chain_cons, forall_mem_cons] at r exact chain_cons.mpr ⟨r.left, ih (Pairwise.consIff.mp p).right r'⟩ theorem chain_iff_pairwise (tr : ∀ ⦃x y z⦄, R x y → R y z → R x z) {a : α} {l : List α} : chain R a l ↔ Pairwise R (a::l) := ⟨fun c => by induction c with | nil => exact Pairwise.singleton _ _ | cons r p ih => exact Pairwise.cons (fun x hx => Or.elim (eq_or_mem_of_mem_cons hx) (fun h => by rw [h]; exact r) (fun h => tr r ((Pairwise.consIff.mp ih).left x h))) ih, chain_of_pairwise⟩ theorem chain_succ_range' : ∀ s n : Nat, chain (fun a b => b = Nat.succ a) s (range' (s+1) n) | s, 0 => chain.nil | s, (n+1) => (chain_succ_range' (s+1) n).cons rfl theorem chain_lt_range' (s n : Nat) : chain (fun a b => a < b) s (range' (s+1) n) := (chain_succ_range' s n).imp (fun a b e => e.symm ▸ Nat.lt_succ_self _) theorem pairwise_lt_range' : ∀ s n : Nat, Pairwise (fun a b => a < b) (range' s n) | s, 0 => Pairwise.nil | s, (n+1) => (chain_iff_pairwise (by exact fun a b c => Nat.lt_trans)).mp (chain_lt_range' s n) theorem nodup_range' (s n : Nat) : nodup (range' s n) := (pairwise_lt_range' s n).imp fun _ _ => Nat.ne_of_lt theorem nodup_range (n : Nat) : nodup (range n) := by simp only [range_eq_range', nodup_range'] def antidiagonal (n : Nat) : List (Nat × Nat) := (range (n+1)).map fun i => (i, n - i) theorem nodup_antidiagonal (n : Nat) : nodup (antidiagonal n) := nodup_map_on (fun _ _ _ _ hxy => by simp only [congrArg Prod.fst hxy]) (nodup_range (n+1)) end List namespace M4R /- range n => [0, ..., n-1] e.g. : range 0 => [], range 1 => [0], range 2 => [0, 1], range 3 => [0, 1, 2] range m, n => [m, m+1, ..., m+n-1] e.g. : range 2 0 => [], range 2 1 => [2], range 2 2 => [2, 3], range 2 3 => [2, 3, 4] -/ namespace UnorderedList def range (n : Nat) : UnorderedList Nat := List.range n def range' (m n : Nat) : UnorderedList Nat := List.range' m n def antidiagonal (n : Nat) : UnorderedList (Nat × Nat) := List.antidiagonal n def toInt (l : UnorderedList Nat) : UnorderedList Int := l.map Int.ofNat theorem range_eq_range' (n : Nat) : range n = range' 0 n := Quot.sound (by rw [List.range_eq_range' n]; exact Perm.refl _) namespace range' @[simp] theorem zero (n : Nat) : range' n 0 = 0 := rfl @[simp] theorem singleton (n : Nat) : range' n 1 = UnorderedList.singleton n := rfl theorem append (s m n : Nat) : range' s m + range' (s+m) n = range' s (n+m) := congrArg List.to_UnorderedList (List.range'_append s m n) theorem start (s n : Nat) : range' s n.succ = UnorderedList.singleton s + range' s.succ n := rfl theorem succ (s n : Nat) : range' s n.succ = range' s n + UnorderedList.singleton (s+n) := congrArg List.to_UnorderedList (List.range'_succ s n) @[simp] theorem mem_range' {m s n : Nat} : m ∈ range' s n ↔ s ≤ m ∧ m < s + n := List.mem_range' end range' namespace range @[simp] theorem zero : range 0 = 0 := rfl theorem succ (n : Nat) : range n.succ = range n + UnorderedList.singleton n := congrArg List.to_UnorderedList (List.range_succ n) theorem start (n : Nat) : range n.succ = UnorderedList.singleton 0 + range' 1 n := congrArg List.to_UnorderedList (List.range_start n) theorem add (a b : Nat) : range (a + b) = range a + (range b).map (a + ·) := congrArg List.to_UnorderedList (List.range_add a b) @[simp] theorem mem_range {m n : Nat} : m ∈ range n ↔ m < n := List.mem_range end range end UnorderedList namespace Finset def range (n : Nat) : Finset Nat := ⟨UnorderedList.range n, List.nodup_range n⟩ def range' (m n : Nat) : Finset Nat := ⟨UnorderedList.range' m n, List.nodup_range' m n⟩ theorem range_eq_range' (n : Nat) : range n = range' 0 n := by apply Finset.val_inj.mp exact UnorderedList.range_eq_range' n def antidiagonal (n : Nat) : Finset (Nat × Nat) := ⟨UnorderedList.antidiagonal n, List.nodup_antidiagonal n⟩ @[simp] theorem antidiagonal.zero : antidiagonal 0 = Finset.singleton (0, 0) := rfl def toInt (f : Finset Nat) : Finset Int := f.map_inj (fun _ _ => congrArg Int.toNat : Function.injective Int.ofNat) namespace range' @[simp] theorem zero (n : Nat) : range' n 0 = ∅ := rfl @[simp] theorem singleton (n : Nat) : range' n 1 = Finset.singleton n := rfl @[simp] theorem mem_range' {m s n : Nat} : m ∈ range' s n ↔ s ≤ m ∧ m < s + n := UnorderedList.range'.mem_range' theorem not_mem_front {s n} : s ∉ range' s.succ n := by rw [mem_range', not_and_iff_or_not, Nat.not_le, Nat.not_lt] exact Or.inl (Nat.lt_succ_self s) theorem not_mem_back {s n} : s+n ∉ range' s n := by rw [mem_range', not_and_iff_or_not, Nat.not_le, Nat.not_lt] exact Or.inr (Nat.le_refl _) theorem start (s n : Nat) : range' s n.succ = (range' s.succ n).cons s not_mem_front := rfl theorem succ (s n : Nat) : range' s n.succ = (range' s n).cons (s+n) not_mem_back := Finset.ext fun _ => by rw [Finset.mem_cons, mem_range', mem_range', or_and_distrib_left, or_iff_right_of_imp (· ▸ Nat.le_add_right s n), Nat.add_succ, ←Nat.le_iff_eq_or_lt, Nat.lt_succ_if_le]; exact Iff.rfl theorem append (s m n : Nat) : range' s m ∪ range' (s + m) n = range' s (m + n) := by have := UnorderedList.range'.append s m n apply Finset.ext; intro k rw [Finset.mem_union, mem_range', mem_range', mem_range'] exact ⟨fun h => h.elim (fun ⟨h₁, h₂⟩ => ⟨h₁, Nat.add_assoc _ _ _ ▸ Nat.lt_add_right _ _ _ h₂⟩) (fun ⟨h₁, h₂⟩ => ⟨Nat.le_trans (Nat.le_add_right s m) h₁, Nat.add_assoc _ _ _ ▸ h₂⟩), fun ⟨h₁, h₂⟩ => Or.elim (Nat.le_or_lt (s + m) k) (fun h => Or.inr ⟨h, Nat.add_assoc _ _ _ ▸ h₂⟩) (fun h => Or.inl ⟨h₁, h⟩)⟩ theorem disjoint (s₁ n₁ s₂ n₂ : Nat) : disjoint (range' s₁ n₁) (range' s₂ n₂) ↔ s₁ + n₁ ≤ s₂ ∨ s₂ + n₂ ≤ s₁ ∨ n₁ = 0 ∨ n₂ = 0 := by simp only [Finset.disjoint] have : (range' s₁ n₁ ∩ range' s₂ n₂ = ∅) ↔ ∀ a, (a < s₁ ∨ s₁ + n₁ ≤ a) ∨ a < s₂ ∨ s₂ + n₂ ≤ a := by simp only [Finset.ext_iff, Finset.mem_empty, iff_false, Finset.mem_inter, not_and_iff_or_not, mem_range', not_and_iff_or_not, Nat.not_le, Nat.not_lt]; exact Iff.rfl rw [this] exact ⟨fun h => by have h₁ := h s₁ have h₂ := h s₂ simp only [Nat.lt_irrefl, false_or, Nat.add_le] at h₁ h₂ exact Or.elim h₁ (fun h₁ => Or.inr (Or.inr (Or.inl h₁))) (Or.elim · (fun h₁ => Or.elim h₂ (Or.elim · (absurd ⟨h₁, ·⟩ Nat.lt_not_symm) Or.inl) (fun h₂ => Or.inr (Or.inr (Or.inr h₂)))) (fun h₁ => Or.inr (Or.inl h₁))), fun h a => by cases h with | inl h => byCases h' : s₁ + n₁ ≤ a; { exact Or.inl (Or.inr h') } { exact Or.inr (Or.inl (Nat.lt_of_lt_of_le (Nat.not_le.mp h') h)) } | inr h => cases h with | inl h => byCases h' : s₂ + n₂ ≤ a; { exact Or.inr (Or.inr h') } { exact Or.inl (Or.inl (Nat.lt_of_lt_of_le (Nat.not_le.mp h') h)) } | inr h => cases h with | inl h => rw [h, Nat.add_zero]; exact Or.inl (Nat.le_or_lt s₁ a).comm | inr h => rw [h, Nat.add_zero]; exact Or.inr (Nat.le_or_lt s₂ a).comm⟩ end range' namespace range @[simp] theorem zero : range 0 = ∅ := rfl theorem not_mem_back {n} : n ∉ range n := by have := @range'.not_mem_back 0 n rw [Nat.zero_add, ←range_eq_range'] at this exact this theorem succ (n : Nat) : range n.succ = (range n).cons n not_mem_back := Finset.ext fun x => by rw [range_eq_range', range'.succ, Finset.mem_cons, Finset.mem_cons, Nat.zero_add, range_eq_range']; exact Iff.rfl theorem start (n : Nat) : range n.succ = (range' 1 n).cons 0 range'.not_mem_front := by rw [range_eq_range', range'.start] @[simp] theorem mem_range {m n : Nat} : m ∈ range n ↔ m < n := UnorderedList.range.mem_range theorem append (m n : Nat) : range m ∪ range' m n = range (m + n) := by have := range'.append 0 m n simp only [Nat.zero_add, ←range_eq_range'] at this exact this end range end Finset end M4R
import Mathlib.Tactic.ApplyWith example (f : ∀ x : Nat, x = x → α) : α := by apply (config := {}) f apply rfl apply 1 example (f : ∀ x : Nat, x = x → α) : α := by apply (config := { newGoals := .nonDependentOnly }) f apply @rfl _ 1 example (f : ∀ x : Nat, x = x → α) : α := by apply (config := { newGoals := .all }) f apply 1 apply rfl
[STATEMENT] lemma "- (2*i) + 3 + (2*i + 4) = (0::int)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. - (2 * i) + 3 + (2 * i + 4) = 0 [PROOF STEP] apply simp [PROOF STATE] proof (prove) goal (1 subgoal): 1. False [PROOF STEP] oops
[STATEMENT] lemma clop_idem_var [simp]: "cl_op (cl_op x) = cl_op x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. cl_op (cl_op x) = cl_op x [PROOF STEP] by (simp add: order.antisym clop_ext clop_wtrans)
include("test_utils.jl") include("test_metrics.jl") include("test_simplices.jl") include("test_cell_complex.jl") include("test_mesh.jl") include("test_operators.jl") include("test_laplacian_rectangle.jl")
State Before: x : ℝ hx : x ≠ 0 y : ℝ≥0∞ ⊢ (fun y => y ^ x) (y ^ x⁻¹) = y State After: no goals Tactic: simp_rw [← rpow_mul, _root_.inv_mul_cancel hx, rpow_one]
import Cat def main : IO Unit := IO.println s!"lif sux" theorem easy : True := by have nested (n : Nat) : True := by
lemma bool.eq_iff : ∀ (x y : bool), x = y ↔ (x = tt ∧ y = tt) ∨ (x = ff ∧ y = ff) | ff ff := ⟨(λ _, or.inr ⟨rfl,rfl⟩),(λ _, rfl)⟩ | ff tt := { mp := λ h, bool.no_confusion h, mpr := λ h, or.elim h (λ k, k.left) (λ k, k.right.symm) } | tt ff := { mp := λ h, bool.no_confusion h, mpr := λ h, or.elim h (λ k, k.right.symm) (λ k, k.left) } | tt tt := ⟨(λ _, or.inl ⟨rfl,rfl⟩),(λ _, rfl)⟩ lemma bool.neq_iff : ∀ (x y : bool), x ≠ y ↔ (x = tt ∧ y = ff) ∨ (x = ff ∧ y = tt) | ff ff := { mp := λ h, false.elim (h rfl), mpr := λ h, or.elim h (λ k, bool.no_confusion k.left) (λ k, bool.no_confusion k.right) } | ff tt := ⟨(λ _, or.inr ⟨rfl,rfl⟩),(λ _ h, bool.no_confusion h)⟩ | tt ff := ⟨(λ _, or.inl ⟨rfl,rfl⟩),(λ _ h, bool.no_confusion h)⟩ | tt tt := { mp := λ h, false.elim (h rfl), mpr := λ h, or.elim h (λ k, bool.no_confusion k.right) (λ k, bool.no_confusion k.left) } lemma eq_tt_of_not_eq_ff_safe : ∀ (x : bool), x ≠ ff → x = tt | ff h := false.elim $ h rfl | tt h := rfl lemma eq_ff_of_not_eq_tt_safe : ∀ (x : bool), x ≠ tt → x = ff | ff h := rfl | tt h := false.elim $ h rfl lemma neq_tt_iff : ∀ (x : bool), x ≠ tt ↔ x = ff | ff := ⟨(λ _, rfl),(λ _ h, bool.no_confusion h)⟩ | tt := ⟨(λ h, false.elim (h rfl)),(λ h, bool.no_confusion h)⟩ lemma neq_ff_iff : ∀ (x : bool), x ≠ ff ↔ x = tt | ff := ⟨(λ h, false.elim (h rfl)),(λ h, bool.no_confusion h)⟩ | tt := ⟨(λ _, rfl),(λ _ h, bool.no_confusion h)⟩ /- Lemmas on `bnot` -/ lemma eq_tt_of_bnot_eq_ff_safe : ∀ (x : bool), bnot x = ff → x = tt | tt h := rfl | ff h := bool.no_confusion h lemma eq_ff_of_bnot_eq_tt_safe : ∀ (x : bool), bnot x = tt → x = ff | tt h := bool.no_confusion h | ff h := rfl lemma bnot_eq_tt_iff : ∀ (x : bool), bnot x = tt ↔ x = ff | tt := ⟨λ h, bool.no_confusion h, λ h, bool.no_confusion h⟩ | ff := ⟨λ _, rfl, λ _, rfl⟩ lemma bnot_eq_ff_iff : ∀ (x : bool), bnot x = ff ↔ x = tt | tt := ⟨λ _, rfl, λ _, rfl⟩ | ff := ⟨λ h, bool.no_confusion h, λ h, bool.no_confusion h⟩ /- Lemmas on `bxor` -/ @[simp] lemma ff_bxor_safe : ∀ (a : bool), bxor ff a = a | ff := rfl | tt := rfl @[simp] lemma bxor_ff_safe : ∀ (a : bool), bxor a ff = a | ff := rfl | tt := rfl @[simp] lemma tt_bxor_safe : ∀ (a : bool), bxor tt a = bnot a | tt := rfl | ff := rfl @[simp] lemma bxor_tt_safe : ∀ (a : bool), bxor a tt = bnot a | tt := rfl | ff := rfl @[simp] lemma bxor_self_safe : ∀ (a : bool), bxor a a = ff | ff := rfl | tt := rfl @[simp] lemma bxor_comm : ∀ a b, bxor a b = bxor b a := by intros; cases b; cases a; refl @[simp] lemma bxor_assoc : ∀ a b c, bxor (bxor a b) c = bxor a (bxor b c) := by intros; cases c; cases b; cases a; refl lemma bxor_eq_tt_iff : ∀ a b, bxor a b = tt ↔ a ≠ b | ff ff := ⟨(λ h, bool.no_confusion h), (λ h, false.elim (h rfl))⟩ | ff tt := ⟨(λ _ h, bool.no_confusion h), (λ _, rfl)⟩ | tt ff := ⟨(λ _ h, bool.no_confusion h), (λ _, rfl)⟩ | tt tt := ⟨(λ h, bool.no_confusion h), (λ h, false.elim (h rfl))⟩ lemma bxor_eq_ff_iff : ∀ a b, bxor a b = ff ↔ a = b | ff ff := ⟨(λ _, rfl), (λ _, rfl)⟩ | ff tt := ⟨(λ h, bool.no_confusion h), (λ h, bool.no_confusion h)⟩ | tt ff := ⟨(λ h, bool.no_confusion h), (λ h, bool.no_confusion h)⟩ | tt tt := ⟨(λ _, rfl), (λ _, rfl)⟩
import free_pfpng.epi import free_pfpng.mono noncomputable theory open_locale classical big_operators open category_theory open opposite open category_theory.grothendieck_topology universe u lemma Condensed.is_zero_of_is_zero_obj (A : Condensed.{u} Ab.{u+1}) (hA : ∀ S : Profinite.{u}, limits.is_zero (A.val.obj (opposite.op S))) : limits.is_zero A := { unique_to := λ Y, nonempty.intro { default := 0, uniq := λ a, begin ext t : 3, apply (hA t.unop).eq_of_src, end }, unique_from := λ Y, nonempty.intro { default := 0, uniq := λ a, begin ext t : 3, apply (hA t.unop).eq_of_tgt end } } lemma Profinite.free_pfpng_eq_zero_of_empty (S : Profinite.{u}) [is_empty S] (a : S.free_pfpng) : a = 0 := begin let E : limits.cone ((S.fintype_diagram ⋙ free_pfpng_functor)) := ProFiltPseuNormGrp₁.bounded_cone ⟨Ab.explicit_limit_cone.{u u} _, Ab.explicit_limit_cone_is_limit _⟩, let hE : limits.is_limit E := ProFiltPseuNormGrp₁.bounded_cone_is_limit _, let ee : S.free_pfpng ≅ E.X := (limits.limit.is_limit _).cone_point_unique_up_to_iso hE, apply_fun ee.hom, swap, { intros x y h, apply_fun ee.inv at h, simpa using h }, rw ee.hom.map_zero, ext T t, obtain ⟨s⟩ := t, apply is_empty.elim _ (s : S), assumption end lemma Profinite.is_zero_of_empty (S : Profinite.{u}) [is_empty S] : limits.is_zero S.condensed_free_pfpng := begin apply Condensed.is_zero_of_is_zero_obj, intros T, dsimp [Profinite.condensed_free_pfpng], dsimp [CompHausFiltPseuNormGrp.presheaf], apply is_zero_Ab, rintros ⟨⟨f,hf⟩⟩, ext t, change f t = 0, apply Profinite.free_pfpng_eq_zero_of_empty, end lemma category_theory.abelian.is_iso_of_mono_of_is_zero {A : Type*} [category A] [abelian A] {X Y : A} (f : X ⟶ Y) [mono f] (hY : limits.is_zero Y) : is_iso f := begin use 0, simp, split, rw ← cancel_mono f, apply hY.eq_of_tgt, apply hY.eq_of_tgt, end instance Profinite.epi_free'_to_condensed_free_pfpng_of_empty (S : Profinite.{u}) [is_empty S] : epi S.free'_to_condensed_free_pfpng := begin suffices : is_iso S.free'_to_condensed_free_pfpng, { resetI, apply_instance }, apply category_theory.abelian.is_iso_of_mono_of_is_zero, apply Profinite.is_zero_of_empty, end -- Do a case split on `[nonempty S]` here. instance Profinite.epi_free'_to_condensed_free_pfpng (S : Profinite.{u}) : epi S.free'_to_condensed_free_pfpng := begin by_cases hS : nonempty S, { resetI, apply_instance }, simp only [not_nonempty_iff] at hS, resetI, apply_instance end instance Profinite.is_iso_free'_to_condensed_free_pfpng (S : Profinite.{u}) : is_iso S.free'_to_condensed_free_pfpng := is_iso_of_mono_of_epi _ def Profinite.free_to_pfpng (S : Profinite.{u}) : CondensedSet_to_Condensed_Ab.obj S.to_Condensed ⟶ S.condensed_free_pfpng := (Condensed_Ab_CondensedSet_adjunction.hom_equiv _ _).symm S.to_condensed_free_pfpng attribute [simps hom_app] AddCommGroup.free_iso_free' instance Profinite.is_iso_free_to_pfpng (S : Profinite.{u}) : is_iso S.free_to_pfpng := begin suffices : S.free_to_pfpng = (CondensedSet_to_Condensed_Ab_iso.app S.to_Condensed).hom ≫ S.free'_to_condensed_free_pfpng, { rw this, apply_instance }, rw [iso.app_hom], delta Profinite.free'_to_condensed_free_pfpng Profinite.free'_lift Profinite.free_to_pfpng CondensedSet_to_Condensed_Ab_iso Sheaf.adjunction Condensed_Ab_CondensedSet_adjunction Condensed_Ab_CondensedSet_adjunction', ext T : 4, dsimp only [adjunction.mk_of_hom_equiv_hom_equiv, functor.map_iso_hom, quiver.hom.forget_Ab, Sheaf.hom.comp_val, Condensed_Ab_to_CondensedSet_map, Sheaf.compose_equiv_symm_apply_val, presheaf_to_Sheaf_map_val, nat_trans.comp_app, iso_whisker_left_hom, iso_whisker_right_hom, whisker_left_app, whisker_right_app], rw [← nat_trans.comp_app, sheafify_map_sheafify_lift], congr' 4, clear T, ext T : 2, dsimp only [whiskering_right_map_app_app, whiskering_right_obj_map, nat_trans.comp_app, adjunction.whisker_right, adjunction.mk_of_unit_counit_hom_equiv_symm_apply, whisker_left_app, whisker_right_app, functor.associator_hom_app, functor.right_unitor_hom_app], erw [category.id_comp, category.id_comp, category.comp_id, category.comp_id], rw [← nat_trans.naturality_assoc], congr' 1, dsimp only [AddCommGroup.adj, AddCommGroup.adj', adjunction.mk_of_hom_equiv_hom_equiv, adjunction.of_nat_iso_left, adjunction.mk_of_hom_equiv_counit_app, equiv.inv_fun_as_coe, equiv.symm_trans_apply, iso.symm_hom, adjunction.equiv_homset_left_of_nat_iso_symm_apply], simp only [equiv.symm_symm], erw [← category.assoc, ← nat_trans.comp_app, iso.hom_inv_id, nat_trans.id_app, category.id_comp], end lemma free_pfpng_profinite_natural_map_aux (S T : Profinite.{u}) (f : S ⟶ T) : f ≫ T.to_free_pfpng = S.to_free_pfpng ≫ (ProFiltPseuNormGrp₁.level.obj 1).map ((Profinite.extend free_pfpng_functor).map f) := begin apply (limits.is_limit_of_preserves (ProFiltPseuNormGrp₁.level.obj 1) (limits.limit.is_limit _)).hom_ext, intros W, dsimp [Profinite.to_free_pfpng, Profinite.free_pfpng_level_iso, limits.is_limit.cone_point_unique_up_to_iso, limits.is_limit.map], simp only [category.assoc], erw (limits.is_limit_of_preserves (ProFiltPseuNormGrp₁.level.obj 1) (limits.limit.is_limit (T.fintype_diagram ⋙ free_pfpng_functor))).fac, erw limits.limit.lift_π, swap, apply_instance, simp only [← functor.map_comp, limits.limit.lift_π], dsimp [Profinite.change_cone], simp only [functor.map_comp], erw (limits.is_limit_of_preserves (ProFiltPseuNormGrp₁.level.obj 1) (limits.limit.is_limit (S.fintype_diagram ⋙ free_pfpng_functor))).fac_assoc, erw limits.limit.lift_π_assoc, ext, dsimp [Profinite.as_limit_cone, Fintype.free_pfpng_unit, free_pfpng.map, ProFiltPseuNormGrp₁.level], rcases x with ⟨x⟩, simp only [finset.filter_congr_decidable], erw [finset.sum_filter, finset.sum_ite, finset.sum_ite], simp only [finset.filter_congr_decidable, finset.sum_const, nat.smul_one_eq_coe, finset.sum_const_zero, add_zero], rw finset.filter_filter, split_ifs, { symmetry, norm_cast, rw finset.card_eq_one, use (W.comap f.2).proj a, rw finset.eq_singleton_iff_nonempty_unique_mem, split, { rw finset.filter_nonempty_iff, use (W.comap f.2).proj a, refine ⟨finset.mem_univ _, h, rfl⟩ }, { rintros ⟨q⟩ hq, simp only [finset.mem_filter, finset.mem_univ, true_and] at hq, erw hq.2 } }, { symmetry, norm_cast, simp only [finset.card_eq_zero], rw finset.filter_eq_empty_iff, rintros ⟨q⟩ -, push_neg, intros hh, rw ← hh at h, erw discrete_quotient.map_proj_apply at h, contrapose! h, let e : (W.comap f.2) → W := discrete_quotient.map (le_refl _), apply_fun e at h, exact h }, end def free_pfpng_profinite_natural_map : Profinite_to_Condensed ⋙ CondensedSet_to_Condensed_Ab ⟶ Profinite.extend free_pfpng_functor ⋙ PFPNG₁_to_CHFPNG₁ₑₗ ⋙ CHFPNG₁_to_CHFPNGₑₗ ⋙ CompHausFiltPseuNormGrp.to_Condensed := { app := λ X, X.free_to_pfpng, naturality' := λ S T f, begin -- we should be able to precompose with the natural map `S.to_Condensed ⟶ S.free'` -- how do we do that? -- Answer: use `adjunction.hom_equiv`. dsimp only [functor.comp_map], dsimp only [Profinite.free_to_pfpng], apply_fun (Condensed_Ab_CondensedSet_adjunction.hom_equiv _ _), simp only [adjunction.hom_equiv_unit, adjunction.hom_equiv_counit, functor.map_comp], simp only [nat_trans.naturality, category.assoc, nat_trans.naturality_assoc], dsimp only [Profinite.condensed_free_pfpng], have := Condensed_Ab_CondensedSet_adjunction.unit.naturality (Profinite_to_Condensed.map f), dsimp only [functor.comp_map] at this, slice_lhs 1 2 { rw ← this }, clear this, dsimp only [functor.id_map], simp only [category.assoc], have := Condensed_Ab_CondensedSet_adjunction.unit.naturality S.to_condensed_free_pfpng, dsimp only [functor.comp_map] at this, slice_rhs 1 2 { erw ← this }, clear this, dsimp only [functor.id_map], simp only [category.assoc], have := Condensed_Ab_CondensedSet_adjunction.right_triangle_components, slice_rhs 2 3 { erw this }, clear this, erw category.id_comp, slice_lhs 2 3 { erw ← nat_trans.naturality }, simp only [functor.id_map, category.assoc], have := Condensed_Ab_CondensedSet_adjunction.right_triangle_components, slice_lhs 3 4 { rw this }, clear this, erw category.comp_id, ext W ⟨t⟩ : 7, change W.unop ⟶ S at t, dsimp [Profinite.to_condensed_free_pfpng, CompHausFiltPseuNormGrp.level_Condensed_diagram_cocone, Ab.ulift, Profinite.to_free_pfpng_level], erw ← comp_apply, erw ← comp_apply, erw ← comp_apply, rw free_pfpng_profinite_natural_map_aux _ _ f, refl, end } instance free_pfpng_profinite_natural_map_is_iso : is_iso free_pfpng_profinite_natural_map := begin apply_with nat_iso.is_iso_of_is_iso_app { instances := ff }, intros X, apply X.is_iso_free_to_pfpng, end def free_pfpng_profinite_iso_aux : condensify (free_pfpng_functor ⋙ PFPNG₁_to_CHFPNG₁ₑₗ) ≅ ((Profinite.extend free_pfpng_functor ⋙ PFPNG₁_to_CHFPNG₁ₑₗ) ⋙ CHFPNG₁_to_CHFPNGₑₗ) ⋙ CompHausFiltPseuNormGrp.to_Condensed := iso_whisker_right (iso_whisker_right (Profinite.extend_commutes free_pfpng_functor PFPNG₁_to_CHFPNG₁ₑₗ).symm CHFPNG₁_to_CHFPNGₑₗ) CompHausFiltPseuNormGrp.to_Condensed /-- Prop 2.1 of Analytic.pdf -/ def free_pfpng_profinite_iso : condensify (free_pfpng_functor ⋙ PFPNG₁_to_CHFPNG₁ₑₗ) ≅ Profinite_to_Condensed ⋙ CondensedSet_to_Condensed_Ab := free_pfpng_profinite_iso_aux ≪≫ (as_iso free_pfpng_profinite_natural_map).symm
module Issue535 where data Nat : Set where zero : Nat suc : Nat → Nat data Vec A : Nat → Set where [] : Vec A zero _∷_ : ∀ {n} → A → Vec A n → Vec A (suc n) replicate : ∀ {A n} → A → Vec A n replicate {n = n} x = {!n!} replicate′ : ∀ {n A} → A → Vec A n replicate′ {n} x = {!n!} extlam : Nat → {n m : Nat} → Vec Nat n extlam = λ { x {m = m} → {!m!} }
! **************************************************************************** ! FILE: mpi_hello.f ! DESCRIPTION: ! MPI tutorial example code: Simple hello world program ! AUTHOR: Blaise Barney ! LAST REVISED: 03/05/10 ! **************************************************************************** program hello ! include 'mpif.h' use mpi parameter (MASTER = 0) integer numtasks, taskid, len, ierr character(MPI_MAX_PROCESSOR_NAME) hostname call MPI_INIT(ierr) call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ierr) call MPI_COMM_RANK(MPI_COMM_WORLD, taskid, ierr) call MPI_GET_PROCESSOR_NAME(hostname, len, ierr) write(*,20) taskid, hostname if (taskid .eq. MASTER) then write(*,30) numtasks end if call MPI_FINALIZE(ierr) 20 format('Hello from task ',I2,' on ',A48) 30 format('MASTER: Number of MPI tasks is: ',I2) end
lemma integrable_on_localized_vector_derivative: "(\<lambda>x. f (g x) * vector_derivative g (at x within {a..b})) integrable_on {a..b} \<longleftrightarrow> (\<lambda>x. f (g x) * vector_derivative g (at x)) integrable_on {a..b}"
State Before: p x : ℝ ⊢ ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) State After: ⊢ ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) Tactic: clear! x p State Before: ⊢ ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) State After: x : ℝ ⊢ ‖↑x‖ = abs (x - ↑(round x)) Tactic: intros x State Before: x : ℝ ⊢ ‖↑x‖ = abs (x - ↑(round x)) State After: x : ℝ ⊢ sInf (norm '' {m | ↑m = ↑x}) = min (fract x) (1 - fract x) Tactic: rw [quotient_norm_eq, abs_sub_round_eq_min] State Before: x : ℝ ⊢ sInf (norm '' {m | ↑m = ↑x}) = min (fract x) (1 - fract x) State After: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) ⊢ sInf (norm '' {m | ↑m = ↑x}) = min (fract x) (1 - fract x) Tactic: have h₁ : BddBelow (abs '' { m : ℝ | (m : AddCircle (1 : ℝ)) = x }) := ⟨0, by simp [mem_lowerBounds]⟩ State Before: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) ⊢ sInf (norm '' {m | ↑m = ↑x}) = min (fract x) (1 - fract x) State After: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ sInf (norm '' {m | ↑m = ↑x}) = min (fract x) (1 - fract x) Tactic: have h₂ : (abs '' { m : ℝ | (m : AddCircle (1 : ℝ)) = x }).Nonempty := ⟨|x|, ⟨x, rfl, rfl⟩⟩ State Before: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ sInf (norm '' {m | ↑m = ↑x}) = min (fract x) (1 - fract x) State After: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ sInf (norm '' {m | ↑m = ↑x}) ≤ min (fract x) (1 - fract x) case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ min (fract x) (1 - fract x) ≤ sInf (norm '' {m | ↑m = ↑x}) Tactic: apply le_antisymm State Before: p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) State After: case inl x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) ⊢ ‖↑x‖ = abs (x - ↑(round (0⁻¹ * x)) * 0) case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) Tactic: rcases eq_or_ne p 0 with (rfl | hp) State Before: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) State After: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) Tactic: intros State Before: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) State After: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 hx : ‖↑(p⁻¹ * x)‖ = abs p⁻¹ * ‖↑x‖ ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) Tactic: have hx := norm_coe_mul p x p⁻¹ State Before: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 hx : ‖↑(p⁻¹ * x)‖ = abs p⁻¹ * ‖↑x‖ ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) State After: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 hx : abs p * ‖↑(p⁻¹ * x)‖ = ‖↑x‖ ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) Tactic: rw [abs_inv, eq_inv_mul_iff_mul_eq₀ ((not_congr abs_eq_zero).mpr hp)] at hx State Before: case inr p x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) hp : p ≠ 0 hx : abs p * ‖↑(p⁻¹ * x)‖ = ‖↑x‖ ⊢ ‖↑x‖ = abs (x - ↑(round (p⁻¹ * x)) * p) State After: no goals Tactic: rw [← hx, inv_mul_cancel hp, this, ← abs_mul, mul_sub, mul_inv_cancel_left₀ hp, mul_comm p] State Before: case inl x : ℝ this : ∀ (x : ℝ), ‖↑x‖ = abs (x - ↑(round x)) ⊢ ‖↑x‖ = abs (x - ↑(round (0⁻¹ * x)) * 0) State After: no goals Tactic: simp State Before: x : ℝ ⊢ 0 ∈ lowerBounds (abs '' {m | ↑m = ↑x}) State After: no goals Tactic: simp [mem_lowerBounds] State Before: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ sInf (norm '' {m | ↑m = ↑x}) ≤ min (fract x) (1 - fract x) State After: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ ∀ (b : ℝ), b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) → b ≤ fract x ∧ b ≤ 1 - fract x Tactic: simp_rw [Real.norm_eq_abs, csInf_le_iff h₁ h₂, le_min_iff] State Before: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ ∀ (b : ℝ), b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) → b ≤ fract x ∧ b ≤ 1 - fract x State After: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ b ≤ fract x ∧ b ≤ 1 - fract x Tactic: intro b h State Before: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ b ≤ fract x ∧ b ≤ 1 - fract x State After: case a.refine'_1 x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ fract x ∈ {m | ↑m = ↑x} case a.refine'_2 x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ fract x - 1 ∈ {m | ↑m = ↑x} Tactic: refine' ⟨mem_lowerBounds.1 h _ ⟨fract x, _, abs_fract⟩, mem_lowerBounds.1 h _ ⟨fract x - 1, _, by rw [abs_sub_comm, abs_one_sub_fract]⟩⟩ State Before: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ abs (fract x - 1) = 1 - fract x State After: no goals Tactic: rw [abs_sub_comm, abs_one_sub_fract] State Before: case a.refine'_1 x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ fract x ∈ {m | ↑m = ↑x} State After: no goals Tactic: simp only [mem_setOf, fract, sub_eq_self, QuotientAddGroup.mk_sub, QuotientAddGroup.eq_zero_iff, int_cast_mem_zmultiples_one] State Before: case a.refine'_2 x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ fract x - 1 ∈ {m | ↑m = ↑x} State After: no goals Tactic: simp only [mem_setOf, fract, sub_eq_self, QuotientAddGroup.mk_sub, QuotientAddGroup.eq_zero_iff, int_cast_mem_zmultiples_one, sub_sub, (by norm_cast : (⌊x⌋ : ℝ) + 1 = (↑(⌊x⌋ + 1) : ℝ))] State Before: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ h : b ∈ lowerBounds (abs '' {m | ↑m = ↑x}) ⊢ ↑⌊x⌋ + 1 = ↑(⌊x⌋ + 1) State After: no goals Tactic: norm_cast State Before: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ min (fract x) (1 - fract x) ≤ sInf (norm '' {m | ↑m = ↑x}) State After: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ ∀ (b : ℝ), b ∈ abs '' {m | ↑m = ↑x} → min (fract x) (1 - fract x) ≤ b Tactic: simp only [QuotientAddGroup.mk'_apply, Real.norm_eq_abs, le_csInf_iff h₁ h₂] State Before: case a x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) ⊢ ∀ (b : ℝ), b ∈ abs '' {m | ↑m = ↑x} → min (fract x) (1 - fract x) ≤ b State After: case a.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ hb : b ∈ {m | ↑m = ↑x} ⊢ min (fract x) (1 - fract x) ≤ abs b Tactic: rintro b' ⟨b, hb, rfl⟩ State Before: case a.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ hb : b ∈ {m | ↑m = ↑x} ⊢ min (fract x) (1 - fract x) ≤ abs b State After: case a.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ hb : ∃ k, ↑k = b - x ⊢ min (fract x) (1 - fract x) ≤ abs b Tactic: simp only [mem_setOf, QuotientAddGroup.eq_iff_sub_mem, mem_zmultiples_iff, smul_one_eq_coe] at hb State Before: case a.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ hb : ∃ k, ↑k = b - x ⊢ min (fract x) (1 - fract x) ≤ abs b State After: case a.intro.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ min (fract x) (1 - fract x) ≤ abs b Tactic: obtain ⟨z, hz⟩ := hb State Before: case a.intro.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ min (fract x) (1 - fract x) ≤ abs b State After: case a.intro.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ abs (b - ↑(round b)) ≤ abs b Tactic: rw [(by rw [hz]; abel : x = b - z), fract_sub_int, ← abs_sub_round_eq_min] State Before: case a.intro.intro.intro x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ abs (b - ↑(round b)) ≤ abs b State After: case h.e'_4.h.e'_3 x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ b = b - ↑0 Tactic: convert round_le b 0 State Before: case h.e'_4.h.e'_3 x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ b = b - ↑0 State After: no goals Tactic: simp State Before: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ x = b - ↑z State After: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ x = b - (b - x) Tactic: rw [hz] State Before: x : ℝ h₁ : BddBelow (abs '' {m | ↑m = ↑x}) h₂ : Set.Nonempty (abs '' {m | ↑m = ↑x}) b : ℝ z : ℤ hz : ↑z = b - x ⊢ x = b - (b - x) State After: no goals Tactic: abel
State Before: a b r q : Int h : 0 < b ⊢ a / b = q ∧ a % b = r ↔ r + b * q = a ∧ 0 ≤ r ∧ r < b State After: case mp a b r q : Int h : 0 < b ⊢ a / b = q ∧ a % b = r → r + b * q = a ∧ 0 ≤ r ∧ r < b case mpr a b r q : Int h : 0 < b ⊢ r + b * q = a ∧ 0 ≤ r ∧ r < b → a / b = q ∧ a % b = r Tactic: constructor State Before: case mp a b r q : Int h : 0 < b ⊢ a / b = q ∧ a % b = r → r + b * q = a ∧ 0 ≤ r ∧ r < b State After: case mp a b r q : Int h : 0 < b ⊢ a % b + b * (a / b) = a ∧ 0 ≤ a % b ∧ a % b < b Tactic: intro ⟨rfl, rfl⟩ State Before: case mp a b r q : Int h : 0 < b ⊢ a % b + b * (a / b) = a ∧ 0 ≤ a % b ∧ a % b < b State After: no goals Tactic: exact ⟨emod_add_ediv a b, emod_nonneg _ (Int.ne_of_gt h), emod_lt_of_pos _ h⟩ State Before: case mpr a b r q : Int h : 0 < b ⊢ r + b * q = a ∧ 0 ≤ r ∧ r < b → a / b = q ∧ a % b = r State After: case mpr a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ (r + b * q) / b = q ∧ (r + b * q) % b = r Tactic: intro ⟨rfl, hz, hb⟩ State Before: case mpr a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ (r + b * q) / b = q ∧ (r + b * q) % b = r State After: case mpr.left a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ (r + b * q) / b = q case mpr.right a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ (r + b * q) % b = r Tactic: constructor State Before: case mpr.left a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ (r + b * q) / b = q State After: case mpr.left a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ 0 + q = q Tactic: rw [Int.add_mul_ediv_left r q (Int.ne_of_gt h), ediv_eq_zero_of_lt hz hb] State Before: case mpr.left a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ 0 + q = q State After: no goals Tactic: simp [Int.zero_add] State Before: case mpr.right a b r q : Int h : 0 < b hz : 0 ≤ r hb : r < b ⊢ (r + b * q) % b = r State After: no goals Tactic: rw [add_mul_emod_self_left, emod_eq_of_lt hz hb]
def Nat.bit (b : Bool) (n : Nat) : Nat := cond b (2*n+1) (2*n) theorem Nat.bit_div_even (h : n % 2 = 0) : bit false (n / 2) = n := by simp [bit] have := Nat.div_add_mod n 2 simp [h] at this assumption theorem Nat.bit_div_odd (h : n % 2 ≠ 0) : bit true (n / 2) = n := by have h : n % 2 = 1 := by have := mod_lt n (by decide : 2 > 0) revert h this generalize n%2 = k match k with | 0 => decide | 1 => decide | n+2 => intros; contradiction simp [bit] have := Nat.div_add_mod n 2 simp [h] at this assumption theorem Nat.div2_lt (h : n ≠ 0) : n / 2 < n := by match n with | 1 => decide | 2 => decide | 3 => decide | n+4 => rw [div_eq, if_pos] refine succ_lt_succ (Nat.lt_trans ?_ (lt_succ_self _)) exact @div2_lt (n+2) (by simp_arith) simp_arith @[specialize] def Nat.binrec (motive : Nat → Sort u) (base : Unit → motive 0) (ind : (b : Bool) → (n : Nat) → (Unit → motive n) → motive (bit b n)) (n : Nat) : motive n := if h₁ : n = 0 then h₁ ▸ base () else if h₂ : n % 2 = 0 then bit_div_even h₂ ▸ ind false (n / 2) (fun _ => binrec motive base ind (n / 2)) else bit_div_odd h₂ ▸ ind true (n / 2) (fun _ => binrec motive base ind (n / 2)) termination_by _ n => n decreasing_by exact Nat.div2_lt h₁ theorem Nat.binind (motive : Nat → Prop) (base : motive 0) (ind : (b : Bool) → (n : Nat) → motive n → motive (bit b n)) (n : Nat) : motive n := binrec motive (fun _ => base) (fun b n ih => ind b n (ih ())) n set_option trace.compiler.ir.result true in def Nat.toBit (n : Nat) : List Bool := binrec (fun _ => List Bool) (fun _ => []) (fun b n ih => b :: ih ()) n #eval Nat.toBit 18
!* Copyright (c) 1998, NVIDIA CORPORATION. All rights reserved. !* !* Licensed under the Apache License, Version 2.0 (the "License"); !* you may not use this file except in compliance with the License. !* You may obtain a copy of the License at !* !* http://www.apache.org/licenses/LICENSE-2.0 !* !* Unless required by applicable law or agreed to in writing, software !* distributed under the License is distributed on an "AS IS" BASIS, !* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. !* See the License for the specific language governing permissions and !* limitations under the License. * Parallel do - simple schedule, large # of elements, > 2 processors program test parameter (NTESTS=102) integer expect(NTESTS) common/comp/ia(1000),ib(1000) call fill() call sub(NTESTS-1) !define elements 1-101, #102 is unchanged ! print 99, (ia(i), i=1,102) !99 format ((5x,'+',10(i3,','))) data expect / + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99,100, +101, 0 / call check(ia, expect, NTESTS) end subroutine sub(n) common/comp/ia(1000),ib(1000) !$omp parallel do do i = 1, n ia(i) = ib(i) enddo !$omp endparalleldo end subroutine fill common/comp/ia(1000),ib(1000) do i = 1, 1000 ib(i) = iii(i) enddo end integer function iii(i) iii = i end
[STATEMENT] theorem secure: secure [PROOF STATE] proof (prove) goal (1 subgoal): 1. secure [PROOF STEP] apply (rule unwind_decomp_secure_graph[of Gr \<Delta>0]) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<forall>\<Delta>\<in>Domain Gr. \<exists>\<Delta>s\<subseteq>Domain Gr. (\<Delta>, \<Delta>s) \<in> Gr 2. \<Delta>0 \<in> Domain Gr 3. \<And>vl vl1. B vl vl1 \<Longrightarrow> \<Delta>0 istate vl istate vl1 4. \<And>\<Delta>. unwind_exit \<Delta> \<or> (\<forall>\<Delta>s. (\<Delta>, \<Delta>s) \<in> Gr \<longrightarrow> unwind_cont \<Delta> \<Delta>s) [PROOF STEP] unfolding Gr_def [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<forall>\<Delta>\<in>Domain {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})}. \<exists>\<Delta>s\<subseteq>Domain {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})}. (\<Delta>, \<Delta>s) \<in> {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} 2. \<Delta>0 \<in> Domain {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} 3. \<And>vl vl1. B vl vl1 \<Longrightarrow> \<Delta>0 istate vl istate vl1 4. \<And>\<Delta>. unwind_exit \<Delta> \<or> (\<forall>\<Delta>s. (\<Delta>, \<Delta>s) \<in> {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} \<longrightarrow> unwind_cont \<Delta> \<Delta>s) [PROOF STEP] apply (simp, smt insert_subset order_refl) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<Delta>0 \<in> Domain {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} 2. \<And>vl vl1. B vl vl1 \<Longrightarrow> \<Delta>0 istate vl istate vl1 3. \<And>\<Delta>. unwind_exit \<Delta> \<or> (\<forall>\<Delta>s. (\<Delta>, \<Delta>s) \<in> {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} \<longrightarrow> unwind_cont \<Delta> \<Delta>s) [PROOF STEP] using istate_\<Delta>0 unwind_cont_\<Delta>0 unwind_cont_\<Delta>1 unwind_cont_\<Delta>31 unwind_cont_\<Delta>32 unwind_cont_\<Delta>2 unwind_cont_\<Delta>4 [PROOF STATE] proof (prove) using this: B ?vl ?vl1.0 \<Longrightarrow> \<Delta>0 istate ?vl istate ?vl1.0 unwind_cont \<Delta>0 {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4} unwind_cont \<Delta>1 {\<Delta>1} unwind_cont \<Delta>31 {\<Delta>31, \<Delta>32} unwind_cont \<Delta>32 {\<Delta>2, \<Delta>32, \<Delta>4} unwind_cont \<Delta>2 {\<Delta>2} unwind_cont \<Delta>4 {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4} goal (3 subgoals): 1. \<Delta>0 \<in> Domain {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} 2. \<And>vl vl1. B vl vl1 \<Longrightarrow> \<Delta>0 istate vl istate vl1 3. \<And>\<Delta>. unwind_exit \<Delta> \<or> (\<forall>\<Delta>s. (\<Delta>, \<Delta>s) \<in> {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} \<longrightarrow> unwind_cont \<Delta> \<Delta>s) [PROOF STEP] unfolding Gr_def [PROOF STATE] proof (prove) using this: B ?vl ?vl1.0 \<Longrightarrow> \<Delta>0 istate ?vl istate ?vl1.0 unwind_cont \<Delta>0 {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4} unwind_cont \<Delta>1 {\<Delta>1} unwind_cont \<Delta>31 {\<Delta>31, \<Delta>32} unwind_cont \<Delta>32 {\<Delta>2, \<Delta>32, \<Delta>4} unwind_cont \<Delta>2 {\<Delta>2} unwind_cont \<Delta>4 {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4} goal (3 subgoals): 1. \<Delta>0 \<in> Domain {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} 2. \<And>vl vl1. B vl vl1 \<Longrightarrow> \<Delta>0 istate vl istate vl1 3. \<And>\<Delta>. unwind_exit \<Delta> \<or> (\<forall>\<Delta>s. (\<Delta>, \<Delta>s) \<in> {(\<Delta>0, {\<Delta>0, \<Delta>1, \<Delta>2, \<Delta>31, \<Delta>32, \<Delta>4}), (\<Delta>1, {\<Delta>1}), (\<Delta>2, {\<Delta>2}), (\<Delta>31, {\<Delta>31, \<Delta>32}), (\<Delta>32, {\<Delta>2, \<Delta>32, \<Delta>4}), (\<Delta>4, {\<Delta>1, \<Delta>31, \<Delta>32, \<Delta>4})} \<longrightarrow> unwind_cont \<Delta> \<Delta>s) [PROOF STEP] by auto
/- Copyright (c) 2022 Kexing Ying. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kexing Ying, Bhavik Mehta ! This file was ported from Lean 3 source module probability.cond_count ! leanprover-community/mathlib commit 117e93f82b5f959f8193857370109935291f0cc4 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Probability.ConditionalProbability /-! # Classical probability The classical formulation of probability states that the probability of an event occurring in a finite probability space is the ratio of that event to all possible events. This notion can be expressed with measure theory using the counting measure. In particular, given the sets `s` and `t`, we define the probability of `t` occuring in `s` to be `|s|⁻¹ * |s ∩ t|`. With this definition, we recover the the probability over the entire sample space when `s = set.univ`. Classical probability is often used in combinatorics and we prove some useful lemmas in this file for that purpose. ## Main definition * `probability_theory.cond_count`: given a set `s`, `cond_count s` is the counting measure conditioned on `s`. This is a probability measure when `s` is finite and nonempty. ## Notes The original aim of this file is to provide a measure theoretic method of describing the probability an element of a set `s` satisfies some predicate `P`. Our current formulation still allow us to describe this by abusing the definitional equality of sets and predicates by simply writing `cond_count s P`. We should avoid this however as none of the lemmas are written for predicates. -/ noncomputable section open ProbabilityTheory open MeasureTheory MeasurableSpace namespace ProbabilityTheory variable {Ω : Type _} [MeasurableSpace Ω] /-- Given a set `s`, `cond_count s` is the counting measure conditioned on `s`. In particular, `cond_count s t` is the proportion of `s` that is contained in `t`. This is a probability measure when `s` is finite and nonempty and is given by `probability_theory.cond_count_is_probability_measure`. -/ def condCount (s : Set Ω) : Measure Ω := Measure.count[|s] #align probability_theory.cond_count ProbabilityTheory.condCount @[simp] theorem condCount_empty_meas : (condCount ∅ : Measure Ω) = 0 := by simp [cond_count] #align probability_theory.cond_count_empty_meas ProbabilityTheory.condCount_empty_meas theorem condCount_empty {s : Set Ω} : condCount s ∅ = 0 := by simp #align probability_theory.cond_count_empty ProbabilityTheory.condCount_empty theorem finite_of_condCount_ne_zero {s t : Set Ω} (h : condCount s t ≠ 0) : s.Finite := by by_contra hs' simpa [cond_count, cond, measure.count_apply_infinite hs'] using h #align probability_theory.finite_of_cond_count_ne_zero ProbabilityTheory.finite_of_condCount_ne_zero theorem condCount_univ [Fintype Ω] {s : Set Ω} : condCount Set.univ s = Measure.count s / Fintype.card Ω := by rw [cond_count, cond_apply _ MeasurableSet.univ, ← ENNReal.div_eq_inv_mul, Set.univ_inter] congr rw [← Finset.coe_univ, measure.count_apply, finset.univ.tsum_subtype' fun _ => (1 : ENNReal)] · simp [Finset.card_univ] · exact (@Finset.coe_univ Ω _).symm ▸ MeasurableSet.univ #align probability_theory.cond_count_univ ProbabilityTheory.condCount_univ variable [MeasurableSingletonClass Ω] theorem condCountIsProbabilityMeasure {s : Set Ω} (hs : s.Finite) (hs' : s.Nonempty) : IsProbabilityMeasure (condCount s) := { measure_univ := by rw [cond_count, cond_apply _ hs.measurable_set, Set.inter_univ, ENNReal.inv_mul_cancel] · exact fun h => hs'.ne_empty <| measure.empty_of_count_eq_zero h · exact (measure.count_apply_lt_top.2 hs).Ne } #align probability_theory.cond_count_is_probability_measure ProbabilityTheory.condCountIsProbabilityMeasure theorem condCount_singleton (ω : Ω) (t : Set Ω) [Decidable (ω ∈ t)] : condCount {ω} t = if ω ∈ t then 1 else 0 := by rw [cond_count, cond_apply _ (measurable_set_singleton ω), measure.count_singleton, inv_one, one_mul] split_ifs · rw [(by simpa : ({ω} : Set Ω) ∩ t = {ω}), measure.count_singleton] · rw [(by simpa : ({ω} : Set Ω) ∩ t = ∅), measure.count_empty] #align probability_theory.cond_count_singleton ProbabilityTheory.condCount_singleton variable {s t u : Set Ω} theorem condCount_inter_self (hs : s.Finite) : condCount s (s ∩ t) = condCount s t := by rw [cond_count, cond_inter_self _ hs.measurable_set] #align probability_theory.cond_count_inter_self ProbabilityTheory.condCount_inter_self theorem condCount_self (hs : s.Finite) (hs' : s.Nonempty) : condCount s s = 1 := by rw [cond_count, cond_apply _ hs.measurable_set, Set.inter_self, ENNReal.inv_mul_cancel] · exact fun h => hs'.ne_empty <| measure.empty_of_count_eq_zero h · exact (measure.count_apply_lt_top.2 hs).Ne #align probability_theory.cond_count_self ProbabilityTheory.condCount_self theorem condCount_eq_one_of (hs : s.Finite) (hs' : s.Nonempty) (ht : s ⊆ t) : condCount s t = 1 := by haveI := cond_count_is_probability_measure hs hs' refine' eq_of_le_of_not_lt prob_le_one _ rw [not_lt, ← cond_count_self hs hs'] exact measure_mono ht #align probability_theory.cond_count_eq_one_of ProbabilityTheory.condCount_eq_one_of theorem pred_true_of_condCount_eq_one (h : condCount s t = 1) : s ⊆ t := by have hsf := finite_of_cond_count_ne_zero (by rw [h] exact one_ne_zero) rw [cond_count, cond_apply _ hsf.measurable_set, mul_comm] at h replace h := ENNReal.eq_inv_of_mul_eq_one_left h rw [inv_inv, measure.count_apply_finite _ hsf, measure.count_apply_finite _ (hsf.inter_of_left _), Nat.cast_inj] at h suffices s ∩ t = s by exact this ▸ fun x hx => hx.2 rw [← @Set.Finite.toFinset_inj _ _ _ (hsf.inter_of_left _) hsf] exact Finset.eq_of_subset_of_card_le (Set.Finite.toFinset_mono <| s.inter_subset_left t) h.ge #align probability_theory.pred_true_of_cond_count_eq_one ProbabilityTheory.pred_true_of_condCount_eq_one theorem condCount_eq_zero_iff (hs : s.Finite) : condCount s t = 0 ↔ s ∩ t = ∅ := by simp [cond_count, cond_apply _ hs.measurable_set, measure.count_apply_eq_top, Set.not_infinite.2 hs, measure.count_apply_finite _ (hs.inter_of_left _)] #align probability_theory.cond_count_eq_zero_iff ProbabilityTheory.condCount_eq_zero_iff theorem condCount_of_univ (hs : s.Finite) (hs' : s.Nonempty) : condCount s Set.univ = 1 := condCount_eq_one_of hs hs' s.subset_univ #align probability_theory.cond_count_of_univ ProbabilityTheory.condCount_of_univ theorem condCount_inter (hs : s.Finite) : condCount s (t ∩ u) = condCount (s ∩ t) u * condCount s t := by by_cases hst : s ∩ t = ∅ · rw [hst, cond_count_empty_meas, measure.coe_zero, Pi.zero_apply, MulZeroClass.zero_mul, cond_count_eq_zero_iff hs, ← Set.inter_assoc, hst, Set.empty_inter] rw [cond_count, cond_count, cond_apply _ hs.measurable_set, cond_apply _ hs.measurable_set, cond_apply _ (hs.inter_of_left _).MeasurableSet, mul_comm _ (measure.count (s ∩ t)), ← mul_assoc, mul_comm _ (measure.count (s ∩ t)), ← mul_assoc, ENNReal.mul_inv_cancel, one_mul, mul_comm, Set.inter_assoc] · rwa [← measure.count_eq_zero_iff] at hst · exact (measure.count_apply_lt_top.2 <| hs.inter_of_left _).Ne #align probability_theory.cond_count_inter ProbabilityTheory.condCount_inter theorem condCount_inter' (hs : s.Finite) : condCount s (t ∩ u) = condCount (s ∩ u) t * condCount s u := by rw [← Set.inter_comm] exact cond_count_inter hs #align probability_theory.cond_count_inter' ProbabilityTheory.condCount_inter' theorem condCount_union (hs : s.Finite) (htu : Disjoint t u) : condCount s (t ∪ u) = condCount s t + condCount s u := by rw [cond_count, cond_apply _ hs.measurable_set, cond_apply _ hs.measurable_set, cond_apply _ hs.measurable_set, Set.inter_union_distrib_left, measure_union, mul_add] exacts[htu.mono inf_le_right inf_le_right, (hs.inter_of_left _).MeasurableSet] #align probability_theory.cond_count_union ProbabilityTheory.condCount_union theorem condCount_compl (t : Set Ω) (hs : s.Finite) (hs' : s.Nonempty) : condCount s t + condCount s (tᶜ) = 1 := by rw [← cond_count_union hs disjoint_compl_right, Set.union_compl_self, (cond_count_is_probability_measure hs hs').measure_univ] #align probability_theory.cond_count_compl ProbabilityTheory.condCount_compl theorem condCount_disjoint_union (hs : s.Finite) (ht : t.Finite) (hst : Disjoint s t) : condCount s u * condCount (s ∪ t) s + condCount t u * condCount (s ∪ t) t = condCount (s ∪ t) u := by rcases s.eq_empty_or_nonempty with (rfl | hs') <;> rcases t.eq_empty_or_nonempty with (rfl | ht') · simp · simp [cond_count_self ht ht'] · simp [cond_count_self hs hs'] rw [cond_count, cond_count, cond_count, cond_apply _ hs.measurable_set, cond_apply _ ht.measurable_set, cond_apply _ (hs.union ht).MeasurableSet, cond_apply _ (hs.union ht).MeasurableSet, cond_apply _ (hs.union ht).MeasurableSet] conv_lhs => rw [Set.union_inter_cancel_left, Set.union_inter_cancel_right, mul_comm (measure.count (s ∪ t))⁻¹, mul_comm (measure.count (s ∪ t))⁻¹, ← mul_assoc, ← mul_assoc, mul_comm _ (measure.count s), mul_comm _ (measure.count t), ← mul_assoc, ← mul_assoc] rw [ENNReal.mul_inv_cancel, ENNReal.mul_inv_cancel, one_mul, one_mul, ← add_mul, ← measure_union, Set.union_inter_distrib_right, mul_comm] exacts[hst.mono inf_le_left inf_le_left, (ht.inter_of_left _).MeasurableSet, measure.count_ne_zero ht', (measure.count_apply_lt_top.2 ht).Ne, measure.count_ne_zero hs', (measure.count_apply_lt_top.2 hs).Ne] #align probability_theory.cond_count_disjoint_union ProbabilityTheory.condCount_disjoint_union /-- A version of the law of total probability for counting probabilites. -/ theorem condCount_add_compl_eq (u t : Set Ω) (hs : s.Finite) : condCount (s ∩ u) t * condCount s u + condCount (s ∩ uᶜ) t * condCount s (uᶜ) = condCount s t := by conv_rhs => rw [(by simp : s = s ∩ u ∪ s ∩ uᶜ), ← cond_count_disjoint_union (hs.inter_of_left _) (hs.inter_of_left _) (disjoint_compl_right.mono inf_le_right inf_le_right)] simp [cond_count_inter_self hs] #align probability_theory.cond_count_add_compl_eq ProbabilityTheory.condCount_add_compl_eq end ProbabilityTheory
lemmas scaleR_left_imp_eq = real_vector.scale_left_imp_eq
/- Copyright (c) 2019 Neil Strickland. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Neil Strickland -/ import algebra.group_with_zero.power import algebra.big_operators.order import algebra.big_operators.ring import algebra.big_operators.intervals /-! # Partial sums of geometric series This file determines the values of the geometric series $\sum_{i=0}^{n-1} x^i$ and $\sum_{i=0}^{n-1} x^i y^{n-1-i}$ and variants thereof. ## Main definitions * `geom_sum` defines for each $x$ in a semiring and each natural number $n$ the partial sum $\sum_{i=0}^{n-1} x^i$ of the geometric series. * `geom_sum₂` defines for each $x,y$ in a semiring and each natural number $n$ the partial sum $\sum_{i=0}^{n-1} x^i y^{n-1-i}$ of the geometric series. ## Main statements * `geom_sum_Ico` proves that $\sum_{i=m}^{n-1} x^i=\frac{x^n-x^m}{x-1}$ in a division ring. * `geom_sum₂_Ico` proves that $\sum_{i=m}^{n-1} x^i=\frac{x^n-y^{n-m}x^m}{x-y}$ in a field. Several variants are recorded, generalising in particular to the case of a noncommutative ring in which `x` and `y` commute. Even versions not using division or subtraction, valid in each semiring, are recorded. -/ universe u variable {α : Type u} open finset opposite open_locale big_operators /-- Sum of the finite geometric series $\sum_{i=0}^{n-1} x^i$. -/ def geom_sum [semiring α] (x : α) (n : ℕ) := ∑ i in range n, x ^ i theorem geom_sum_def [semiring α] (x : α) (n : ℕ) : geom_sum x n = ∑ i in range n, x ^ i := rfl @[simp] theorem geom_sum_zero [semiring α] (x : α) : geom_sum x 0 = 0 := rfl @[simp] theorem geom_sum_one [semiring α] (x : α) : geom_sum x 1 = 1 := by { rw [geom_sum_def, sum_range_one, pow_zero] } @[simp] lemma op_geom_sum [ring α] (x : α) (n : ℕ) : op (geom_sum x n) = geom_sum (op x) n := by simp [geom_sum_def] /-- Sum of the finite geometric series $\sum_{i=0}^{n-1} x^i y^{n-1-i}$. -/ def geom_sum₂ [semiring α] (x y : α) (n : ℕ) := ∑ i in range n, x ^ i * (y ^ (n - 1 - i)) theorem geom_sum₂_def [semiring α] (x y : α) (n : ℕ) : geom_sum₂ x y n = ∑ i in range n, x ^ i * y ^ (n - 1 - i) := rfl @[simp] theorem geom_sum₂_zero [semiring α] (x y : α) : geom_sum₂ x y 0 = 0 := rfl @[simp] theorem geom_sum₂_one [semiring α] (x y : α) : geom_sum₂ x y 1 = 1 := by { have : 1 - 1 - 0 = 0 := rfl, rw [geom_sum₂_def, sum_range_one, this, pow_zero, pow_zero, mul_one] } @[simp] lemma op_geom_sum₂ [ring α] (x y : α) (n : ℕ) : op (geom_sum₂ x y n) = geom_sum₂ (op y) (op x) n := begin simp only [geom_sum₂_def, op_sum, op_mul, units.op_pow], rw ← sum_range_reflect, refine sum_congr rfl (λ j j_in, _), rw [mem_range, nat.lt_iff_add_one_le] at j_in, congr, apply nat.sub_sub_self, exact nat.le_sub_right_of_add_le j_in end @[simp] theorem geom_sum₂_with_one [semiring α] (x : α) (n : ℕ) : geom_sum₂ x 1 n = geom_sum x n := sum_congr rfl (λ i _, by { rw [one_pow, mul_one] }) /-- $x^n-y^n = (x-y) \sum x^ky^{n-1-k}$ reformulated without `-` signs. -/ protected theorem commute.geom_sum₂_mul_add [semiring α] {x y : α} (h : commute x y) (n : ℕ) : (geom_sum₂ (x + y) y n) * x + y ^ n = (x + y) ^ n := begin let f := λ (m i : ℕ), (x + y) ^ i * y ^ (m - 1 - i), change (∑ i in range n, (f n) i) * x + y ^ n = (x + y) ^ n, induction n with n ih, { rw [range_zero, sum_empty, zero_mul, zero_add, pow_zero, pow_zero] }, { have f_last : f (n + 1) n = (x + y) ^ n := by { dsimp [f], rw [nat.sub_sub, nat.add_comm, nat.sub_self, pow_zero, mul_one] }, have f_succ : ∀ i, i ∈ range n → f (n + 1) i = y * f n i := λ i hi, by { dsimp [f], have : commute y ((x + y) ^ i) := (h.symm.add_right (commute.refl y)).pow_right i, rw [← mul_assoc, this.eq, mul_assoc, ← pow_succ y (n - 1 - i)], congr' 2, rw [nat.add_sub_cancel, nat.sub_sub, add_comm 1 i], have : i + 1 + (n - (i + 1)) = n := nat.add_sub_of_le (mem_range.mp hi), rw [add_comm (i + 1)] at this, rw [← this, nat.add_sub_cancel, add_comm i 1, ← add_assoc, nat.add_sub_cancel] }, rw [pow_succ (x + y), add_mul, sum_range_succ_comm, add_mul, f_last, add_assoc], rw (((commute.refl x).add_right h).pow_right n).eq, congr' 1, rw [sum_congr rfl f_succ, ← mul_sum, pow_succ y, mul_assoc, ← mul_add y, ih] } end theorem geom_sum₂_self {α : Type*} [comm_ring α] (x : α) (n : ℕ) : geom_sum₂ x x n = n * x ^ (n-1) := calc ∑ i in finset.range n, x ^ i * x ^ (n - 1 - i) = ∑ i in finset.range n, x ^ (i + (n - 1 - i)) : by simp_rw [← pow_add] ... = ∑ i in finset.range n, x ^ (n - 1) : finset.sum_congr rfl (λ i hi, congr_arg _ $ nat.add_sub_cancel' $ nat.le_pred_of_lt $ finset.mem_range.1 hi) ... = (finset.range n).card • (x ^ (n - 1)) : finset.sum_const _ ... = n * x ^ (n - 1) : by rw [finset.card_range, nsmul_eq_mul] /-- $x^n-y^n = (x-y) \sum x^ky^{n-1-k}$ reformulated without `-` signs. -/ theorem geom_sum₂_mul_add [comm_semiring α] (x y : α) (n : ℕ) : (geom_sum₂ (x + y) y n) * x + y ^ n = (x + y) ^ n := (commute.all x y).geom_sum₂_mul_add n theorem geom_sum_mul_add [semiring α] (x : α) (n : ℕ) : (geom_sum (x + 1) n) * x + 1 = (x + 1) ^ n := begin have := (commute.one_right x).geom_sum₂_mul_add n, rw [one_pow, geom_sum₂_with_one] at this, exact this end protected theorem commute.geom_sum₂_mul [ring α] {x y : α} (h : commute x y) (n : ℕ) : (geom_sum₂ x y n) * (x - y) = x ^ n - y ^ n := begin have := (h.sub_left (commute.refl y)).geom_sum₂_mul_add n, rw [sub_add_cancel] at this, rw [← this, add_sub_cancel] end lemma commute.mul_neg_geom_sum₂ [ring α] {x y : α} (h : commute x y) (n : ℕ) : (y - x) * (geom_sum₂ x y n) = y ^ n - x ^ n := begin rw ← op_inj_iff, simp only [op_mul, op_sub, op_geom_sum₂, units.op_pow], exact (commute.op h.symm).geom_sum₂_mul n end lemma commute.mul_geom_sum₂ [ring α] {x y : α} (h : commute x y) (n : ℕ) : (x - y) * (geom_sum₂ x y n) = x ^ n - y ^ n := by rw [← neg_sub (y ^ n), ← h.mul_neg_geom_sum₂, ← neg_mul_eq_neg_mul_symm, neg_sub] theorem geom_sum₂_mul [comm_ring α] (x y : α) (n : ℕ) : (geom_sum₂ x y n) * (x - y) = x ^ n - y ^ n := (commute.all x y).geom_sum₂_mul n theorem geom_sum_mul [ring α] (x : α) (n : ℕ) : (geom_sum x n) * (x - 1) = x ^ n - 1 := begin have := (commute.one_right x).geom_sum₂_mul n, rw [one_pow, geom_sum₂_with_one] at this, exact this end lemma mul_geom_sum [ring α] (x : α) (n : ℕ) : (x - 1) * (geom_sum x n) = x ^ n - 1 := begin rw ← op_inj_iff, simpa using geom_sum_mul (op x) n, end theorem geom_sum_mul_neg [ring α] (x : α) (n : ℕ) : (geom_sum x n) * (1 - x) = 1 - x ^ n := begin have := congr_arg has_neg.neg (geom_sum_mul x n), rw [neg_sub, ← mul_neg_eq_neg_mul_symm, neg_sub] at this, exact this end lemma mul_neg_geom_sum [ring α] (x : α) (n : ℕ) : (1 - x) * (geom_sum x n) = 1 - x ^ n := begin rw ← op_inj_iff, simpa using geom_sum_mul_neg (op x) n, end protected theorem commute.geom_sum₂ [division_ring α] {x y : α} (h' : commute x y) (h : x ≠ y) (n : ℕ) : (geom_sum₂ x y n) = (x ^ n - y ^ n) / (x - y) := have x - y ≠ 0, by simp [*, -sub_eq_add_neg, sub_eq_iff_eq_add] at *, by rw [← h'.geom_sum₂_mul, mul_div_cancel _ this] theorem geom₂_sum [field α] {x y : α} (h : x ≠ y) (n : ℕ) : (geom_sum₂ x y n) = (x ^ n - y ^ n) / (x - y) := (commute.all x y).geom_sum₂ h n theorem geom_sum_eq [division_ring α] {x : α} (h : x ≠ 1) (n : ℕ) : (geom_sum x n) = (x ^ n - 1) / (x - 1) := have x - 1 ≠ 0, by simp [*, -sub_eq_add_neg, sub_eq_iff_eq_add] at *, by rw [← geom_sum_mul, mul_div_cancel _ this] protected theorem commute.mul_geom_sum₂_Ico [ring α] {x y : α} (h : commute x y) {m n : ℕ} (hmn : m ≤ n) : (x - y) * (∑ i in finset.Ico m n, x ^ i * y ^ (n - 1 - i)) = x ^ n - x ^ m * y ^ (n - m) := begin rw [sum_Ico_eq_sub _ hmn, ← geom_sum₂_def], have : ∑ k in range m, x ^ k * y ^ (n - 1 - k) = ∑ k in range m, x ^ k * (y ^ (n - m) * y ^ (m - 1 - k)), { refine sum_congr rfl (λ j j_in, _), rw ← pow_add, congr, rw [mem_range, nat.lt_iff_add_one_le, add_comm] at j_in, have h' : n - m + (m - (1 + j)) = n - (1 + j) := nat.sub_add_sub_cancel hmn j_in, rw [nat.sub_sub m, h', nat.sub_sub] }, rw this, simp_rw pow_mul_comm y (n-m) _, simp_rw ← mul_assoc, rw [← sum_mul, ← geom_sum₂_def, mul_sub, h.mul_geom_sum₂, ← mul_assoc, h.mul_geom_sum₂, sub_mul, ← pow_add, nat.add_sub_of_le hmn, sub_sub_sub_cancel_right (x ^ n) (x ^ m * y ^ (n - m)) (y ^ n)], end protected theorem commute.geom_sum₂_succ_eq {α : Type u} [ring α] {x y : α} (h : commute x y) {n : ℕ} : geom_sum₂ x y (n + 1) = x ^ n + y * (geom_sum₂ x y n) := begin simp_rw [geom_sum₂, mul_sum, sum_range_succ_comm, nat.add_succ_sub_one, add_zero, nat.sub_self, pow_zero, mul_one, add_right_inj, ←mul_assoc, (h.symm.pow_right _).eq, mul_assoc, ←pow_succ], refine sum_congr rfl (λ i hi, _), suffices : n - 1 - i + 1 = n - i, { rw this }, cases n, { exact absurd (list.mem_range.mp hi) i.not_lt_zero }, { rw [nat.sub_add_eq_add_sub (nat.le_pred_of_lt (list.mem_range.mp hi)), nat.sub_add_cancel (nat.succ_le_iff.mpr n.succ_pos)] }, end theorem geom_sum₂_succ_eq {α : Type u} [comm_ring α] (x y : α) {n : ℕ} : geom_sum₂ x y (n + 1) = x ^ n + y * (geom_sum₂ x y n) := (commute.all x y).geom_sum₂_succ_eq theorem mul_geom_sum₂_Ico [comm_ring α] (x y : α) {m n : ℕ} (hmn : m ≤ n) : (x - y) * (∑ i in finset.Ico m n, x ^ i * y ^ (n - 1 - i)) = x ^ n - x ^ m * y ^ (n - m) := (commute.all x y).mul_geom_sum₂_Ico hmn protected theorem commute.geom_sum₂_Ico_mul [ring α] {x y : α} (h : commute x y) {m n : ℕ} (hmn : m ≤ n) : (∑ i in finset.Ico m n, x ^ i * y ^ (n - 1 - i)) * (x - y) = x ^ n - y ^ (n - m) * x ^ m := begin rw ← op_inj_iff, simp only [op_sub, op_mul, units.op_pow, op_sum], have : ∑ k in Ico m n, op y ^ (n - 1 - k) * op x ^ k = ∑ k in Ico m n, op x ^ k * op y ^ (n - 1 - k), { refine sum_congr rfl (λ k k_in, _), apply commute.pow_pow (commute.op h.symm) }, rw this, exact (commute.op h).mul_geom_sum₂_Ico hmn end theorem geom_sum_Ico_mul [ring α] (x : α) {m n : ℕ} (hmn : m ≤ n) : (∑ i in finset.Ico m n, x ^ i) * (x - 1) = x^n - x^m := by rw [sum_Ico_eq_sub _ hmn, ← geom_sum_def, ← geom_sum_def, sub_mul, geom_sum_mul, geom_sum_mul, sub_sub_sub_cancel_right] theorem geom_sum_Ico_mul_neg [ring α] (x : α) {m n : ℕ} (hmn : m ≤ n) : (∑ i in finset.Ico m n, x ^ i) * (1 - x) = x^m - x^n := by rw [sum_Ico_eq_sub _ hmn, ← geom_sum_def, ← geom_sum_def, sub_mul, geom_sum_mul_neg, geom_sum_mul_neg, sub_sub_sub_cancel_left] protected theorem commute.geom_sum₂_Ico [division_ring α] {x y : α} (h : commute x y) (hxy : x ≠ y) {m n : ℕ} (hmn : m ≤ n) : ∑ i in finset.Ico m n, x ^ i * y ^ (n - 1 - i) = (x ^ n - y ^ (n - m) * x ^ m ) / (x - y) := have x - y ≠ 0, by simp [*, -sub_eq_add_neg, sub_eq_iff_eq_add] at *, by rw [← h.geom_sum₂_Ico_mul hmn, mul_div_cancel _ this] theorem geom_sum₂_Ico [field α] {x y : α} (hxy : x ≠ y) {m n : ℕ} (hmn : m ≤ n) : ∑ i in finset.Ico m n, x ^ i * y ^ (n - 1 - i) = (x ^ n - y ^ (n - m) * x ^ m ) / (x - y) := (commute.all x y).geom_sum₂_Ico hxy hmn theorem geom_sum_Ico [division_ring α] {x : α} (hx : x ≠ 1) {m n : ℕ} (hmn : m ≤ n) : ∑ i in finset.Ico m n, x ^ i = (x ^ n - x ^ m) / (x - 1) := by simp only [sum_Ico_eq_sub _ hmn, (geom_sum_def _ _).symm, geom_sum_eq hx, div_sub_div_same, sub_sub_sub_cancel_right] theorem geom_sum_Ico' [division_ring α] {x : α} (hx : x ≠ 1) {m n : ℕ} (hmn : m ≤ n) : ∑ i in finset.Ico m n, x ^ i = (x ^ m - x ^ n) / (1 - x) := by { simp only [geom_sum_Ico hx hmn], convert neg_div_neg_eq (x^m - x^n) (1-x); abel } lemma geom_sum_inv [division_ring α] {x : α} (hx1 : x ≠ 1) (hx0 : x ≠ 0) (n : ℕ) : (geom_sum x⁻¹ n) = (x - 1)⁻¹ * (x - x⁻¹ ^ n * x) := have h₁ : x⁻¹ ≠ 1, by rwa [inv_eq_one_div, ne.def, div_eq_iff_mul_eq hx0, one_mul], have h₂ : x⁻¹ - 1 ≠ 0, from mt sub_eq_zero.1 h₁, have h₃ : x - 1 ≠ 0, from mt sub_eq_zero.1 hx1, have h₄ : x * (x ^ n)⁻¹ = (x ^ n)⁻¹ * x := nat.rec_on n (by simp) (λ n h, by rw [pow_succ, mul_inv_rev', ←mul_assoc, h, mul_assoc, mul_inv_cancel hx0, mul_assoc, inv_mul_cancel hx0]), begin rw [geom_sum_eq h₁, div_eq_iff_mul_eq h₂, ← mul_right_inj' h₃, ← mul_assoc, ← mul_assoc, mul_inv_cancel h₃], simp [mul_add, add_mul, mul_inv_cancel hx0, mul_assoc, h₄, sub_eq_add_neg, add_comm, add_left_comm], end variables {β : Type*} theorem ring_hom.map_geom_sum [semiring α] [semiring β] (x : α) (n : ℕ) (f : α →+* β) : f (geom_sum x n) = geom_sum (f x) n := by simp [geom_sum_def, f.map_sum] theorem ring_hom.map_geom_sum₂ [semiring α] [semiring β] (x y : α) (n : ℕ) (f : α →+* β) : f (geom_sum₂ x y n) = geom_sum₂ (f x) (f y) n := by simp [geom_sum₂_def, f.map_sum]
/- Copyright (c) 2020 Kevin Lacker. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kevin Lacker -/ import tactic.ring import data.nat.prime /-! # IMO 1959 Q1 Prove that the fraction `(21n+4)/(14n+3)` is irreducible for every natural number `n`. Since Lean doesn't have a concept of "irreducible fractions" per se, we just formalize this as saying the numerator and denominator are relatively prime. -/ open nat lemma calculation (n k : ℕ) (h1 : k ∣ 21 * n + 4) (h2 : k ∣ 14 * n + 3) : k ∣ 1 := have h3 : k ∣ 2 * (21 * n + 4), from h1.mul_left 2, have h4 : k ∣ 3 * (14 * n + 3), from h2.mul_left 3, have h5 : 3 * (14 * n + 3) = 2 * (21 * n + 4) + 1, by ring, (nat.dvd_add_right h3).mp (h5 ▸ h4) theorem imo1959_q1 : ∀ n : ℕ, coprime (21 * n + 4) (14 * n + 3) := assume n, coprime_of_dvd' $ λ k hp h1 h2, calculation n k h1 h2
import hilbert.wr.ka_bot import hilbert.wr.proofs.ka namespace clfrags namespace hilbert namespace wr namespace ka_bot theorem kab₁_ka {a b c d e : Prop} (h₁ : ka d e (ka a b bot)) : ka d e (ka a b c) := have h₂ : ka d e (ka d b bot), from ka.ka₇ h₁, have h₃ : ka d (ka d e b) bot, from ka.ka₄ h₂, have h₄ : ka d (ka d e b) c, from kab₁ h₃, have h₅ : ka d e a, from ka.ka₆ h₁, have h₆ : ka d e (ka d b c), from ka.ka₄' h₄, show ka d e (ka a b c), from ka.ka₅ h₅ h₆ theorem b₁ {a : Prop} (h₁ : bot) : a := have h₂ : ka bot bot bot, from ka.ka₁ h₁ h₁, have h₃ : ka bot bot a, from kab₁ h₂, have h₄ : ka bot a bot, from ka.ka₃ h₃, have h₅ : ka bot a a, from kab₁ h₄, show a, from ka.ka₂ h₅ end ka_bot end wr end hilbert end clfrags
[STATEMENT] lemma real_average_minus_first [simp]: "(a + b) / 2 - a = (b - a) / 2" for a b :: real [PROOF STATE] proof (prove) goal (1 subgoal): 1. (a + b) / 2 - a = (b - a) / 2 [PROOF STEP] by simp
/- Copyright (c) 2022 Heather Macbeth. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Heather Macbeth ! This file was ported from Lean 3 source module analysis.inner_product_space.l2_space ! leanprover-community/mathlib commit 46b633fd842bef9469441c0209906f6dddd2b4f5 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Analysis.InnerProductSpace.Projection import Mathbin.Analysis.NormedSpace.LpSpace import Mathbin.Analysis.InnerProductSpace.PiL2 /-! # Hilbert sum of a family of inner product spaces Given a family `(G : ι → Type*) [Π i, inner_product_space 𝕜 (G i)]` of inner product spaces, this file equips `lp G 2` with an inner product space structure, where `lp G 2` consists of those dependent functions `f : Π i, G i` for which `∑' i, ‖f i‖ ^ 2`, the sum of the norms-squared, is summable. This construction is sometimes called the *Hilbert sum* of the family `G`. By choosing `G` to be `ι → 𝕜`, the Hilbert space `ℓ²(ι, 𝕜)` may be seen as a special case of this construction. We also define a *predicate* `is_hilbert_sum 𝕜 G V`, where `V : Π i, G i →ₗᵢ[𝕜] E`, expressing that `V` is an `orthogonal_family` and that the associated map `lp G 2 →ₗᵢ[𝕜] E` is surjective. ## Main definitions * `orthogonal_family.linear_isometry`: Given a Hilbert space `E`, a family `G` of inner product spaces and a family `V : Π i, G i →ₗᵢ[𝕜] E` of isometric embeddings of the `G i` into `E` with mutually-orthogonal images, there is an induced isometric embedding of the Hilbert sum of `G` into `E`. * `is_hilbert_sum`: Given a Hilbert space `E`, a family `G` of inner product spaces and a family `V : Π i, G i →ₗᵢ[𝕜] E` of isometric embeddings of the `G i` into `E`, `is_hilbert_sum 𝕜 G V` means that `V` is an `orthogonal_family` and that the above linear isometry is surjective. * `is_hilbert_sum.linear_isometry_equiv`: If a Hilbert space `E` is a Hilbert sum of the inner product spaces `G i` with respect to the family `V : Π i, G i →ₗᵢ[𝕜] E`, then the corresponding `orthogonal_family.linear_isometry` can be upgraded to a `linear_isometry_equiv`. * `hilbert_basis`: We define a *Hilbert basis* of a Hilbert space `E` to be a structure whose single field `hilbert_basis.repr` is an isometric isomorphism of `E` with `ℓ²(ι, 𝕜)` (i.e., the Hilbert sum of `ι` copies of `𝕜`). This parallels the definition of `basis`, in `linear_algebra.basis`, as an isomorphism of an `R`-module with `ι →₀ R`. * `hilbert_basis.has_coe_to_fun`: More conventionally a Hilbert basis is thought of as a family `ι → E` of vectors in `E` satisfying certain properties (orthonormality, completeness). We obtain this interpretation of a Hilbert basis `b` by defining `⇑b`, of type `ι → E`, to be the image under `b.repr` of `lp.single 2 i (1:𝕜)`. This parallels the definition `basis.has_coe_to_fun` in `linear_algebra.basis`. * `hilbert_basis.mk`: Make a Hilbert basis of `E` from an orthonormal family `v : ι → E` of vectors in `E` whose span is dense. This parallels the definition `basis.mk` in `linear_algebra.basis`. * `hilbert_basis.mk_of_orthogonal_eq_bot`: Make a Hilbert basis of `E` from an orthonormal family `v : ι → E` of vectors in `E` whose span has trivial orthogonal complement. ## Main results * `lp.inner_product_space`: Construction of the inner product space instance on the Hilbert sum `lp G 2`. Note that from the file `analysis.normed_space.lp_space`, the space `lp G 2` already held a normed space instance (`lp.normed_space`), and if each `G i` is a Hilbert space (i.e., complete), then `lp G 2` was already known to be complete (`lp.complete_space`). So the work here is to define the inner product and show it is compatible. * `orthogonal_family.range_linear_isometry`: Given a family `G` of inner product spaces and a family `V : Π i, G i →ₗᵢ[𝕜] E` of isometric embeddings of the `G i` into `E` with mutually-orthogonal images, the image of the embedding `orthogonal_family.linear_isometry` of the Hilbert sum of `G` into `E` is the closure of the span of the images of the `G i`. * `hilbert_basis.repr_apply_apply`: Given a Hilbert basis `b` of `E`, the entry `b.repr x i` of `x`'s representation in `ℓ²(ι, 𝕜)` is the inner product `⟪b i, x⟫`. * `hilbert_basis.has_sum_repr`: Given a Hilbert basis `b` of `E`, a vector `x` in `E` can be expressed as the "infinite linear combination" `∑' i, b.repr x i • b i` of the basis vectors `b i`, with coefficients given by the entries `b.repr x i` of `x`'s representation in `ℓ²(ι, 𝕜)`. * `exists_hilbert_basis`: A Hilbert space admits a Hilbert basis. ## Keywords Hilbert space, Hilbert sum, l2, Hilbert basis, unitary equivalence, isometric isomorphism -/ open IsROrC Submodule Filter open BigOperators NNReal ENNReal Classical ComplexConjugate Topology noncomputable section variable {ι : Type _} variable {𝕜 : Type _} [IsROrC 𝕜] {E : Type _} variable [NormedAddCommGroup E] [InnerProductSpace 𝕜 E] [cplt : CompleteSpace E] variable {G : ι → Type _} [∀ i, NormedAddCommGroup (G i)] [∀ i, InnerProductSpace 𝕜 (G i)] -- mathport name: «expr⟪ , ⟫» local notation "⟪" x ", " y "⟫" => @inner 𝕜 _ _ x y -- mathport name: «exprℓ²( , )» notation "ℓ²(" ι ", " 𝕜 ")" => lp (fun i : ι => 𝕜) 2 /-! ### Inner product space structure on `lp G 2` -/ namespace lp theorem summable_inner (f g : lp G 2) : Summable fun i => ⟪f i, g i⟫ := by -- Apply the Direct Comparison Test, comparing with ∑' i, ‖f i‖ * ‖g i‖ (summable by Hölder) refine' summable_of_norm_bounded (fun i => ‖f i‖ * ‖g i‖) (lp.summable_mul _ f g) _ · rw [Real.isConjugateExponent_iff] <;> norm_num intro i -- Then apply Cauchy-Schwarz pointwise exact norm_inner_le_norm _ _ #align lp.summable_inner lp.summable_inner instance : InnerProductSpace 𝕜 (lp G 2) := { lp.normedSpace with inner := fun f g => ∑' i, ⟪f i, g i⟫ norm_sq_eq_inner := fun f => by calc ‖f‖ ^ 2 = ‖f‖ ^ (2 : ℝ≥0∞).toReal := by norm_cast _ = ∑' i, ‖f i‖ ^ (2 : ℝ≥0∞).toReal := (lp.norm_rpow_eq_tsum _ f) _ = ∑' i, ‖f i‖ ^ 2 := by norm_cast _ = ∑' i, re ⟪f i, f i⟫ := by simp only [@norm_sq_eq_inner 𝕜] _ = re (∑' i, ⟪f i, f i⟫) := (is_R_or_C.re_clm.map_tsum _).symm _ = _ := by congr · norm_num · exact summable_inner f f conj_symm := fun f g => by calc conj _ = conj (∑' i, ⟪g i, f i⟫) := by congr _ = ∑' i, conj ⟪g i, f i⟫ := is_R_or_C.conj_cle.map_tsum _ = ∑' i, ⟪f i, g i⟫ := by simp only [inner_conj_symm] _ = _ := by congr add_left := fun f₁ f₂ g => by calc _ = ∑' i, ⟪(f₁ + f₂) i, g i⟫ := _ _ = ∑' i, ⟪f₁ i, g i⟫ + ⟪f₂ i, g i⟫ := by simp only [inner_add_left, Pi.add_apply, coe_fn_add] _ = (∑' i, ⟪f₁ i, g i⟫) + ∑' i, ⟪f₂ i, g i⟫ := (tsum_add _ _) _ = _ := by congr · congr · exact summable_inner f₁ g · exact summable_inner f₂ g smul_left := fun f g c => by calc _ = ∑' i, ⟪c • f i, g i⟫ := _ _ = ∑' i, conj c * ⟪f i, g i⟫ := by simp only [inner_smul_left] _ = conj c * ∑' i, ⟪f i, g i⟫ := tsum_mul_left _ = _ := _ · simp only [coe_fn_smul, Pi.smul_apply] · congr } theorem inner_eq_tsum (f g : lp G 2) : ⟪f, g⟫ = ∑' i, ⟪f i, g i⟫ := rfl #align lp.inner_eq_tsum lp.inner_eq_tsum theorem hasSum_inner (f g : lp G 2) : HasSum (fun i => ⟪f i, g i⟫) ⟪f, g⟫ := (summable_inner f g).HasSum #align lp.has_sum_inner lp.hasSum_inner theorem inner_single_left (i : ι) (a : G i) (f : lp G 2) : ⟪lp.single 2 i a, f⟫ = ⟪a, f i⟫ := by refine' (has_sum_inner (lp.single 2 i a) f).unique _ convert hasSum_ite_eq i ⟪a, f i⟫ ext j rw [lp.single_apply] split_ifs · subst h · simp #align lp.inner_single_left lp.inner_single_left theorem inner_single_right (i : ι) (a : G i) (f : lp G 2) : ⟪f, lp.single 2 i a⟫ = ⟪f i, a⟫ := by simpa [inner_conj_symm] using congr_arg conj (@inner_single_left _ 𝕜 _ _ _ _ i a f) #align lp.inner_single_right lp.inner_single_right end lp /-! ### Identification of a general Hilbert space `E` with a Hilbert sum -/ namespace OrthogonalFamily variable {V : ∀ i, G i →ₗᵢ[𝕜] E} (hV : OrthogonalFamily 𝕜 G V) include cplt hV protected theorem summable_of_lp (f : lp G 2) : Summable fun i => V i (f i) := by rw [hV.summable_iff_norm_sq_summable] convert(lp.memℓp f).Summable _ · norm_cast · norm_num #align orthogonal_family.summable_of_lp OrthogonalFamily.summable_of_lp /-- A mutually orthogonal family of subspaces of `E` induce a linear isometry from `lp 2` of the subspaces into `E`. -/ protected def linearIsometry : lp G 2 →ₗᵢ[𝕜] E where toFun f := ∑' i, V i (f i) map_add' f g := by simp only [tsum_add (hV.summable_of_lp f) (hV.summable_of_lp g), lp.coeFn_add, Pi.add_apply, LinearIsometry.map_add] map_smul' c f := by simpa only [LinearIsometry.map_smul, Pi.smul_apply, lp.coeFn_smul] using tsum_const_smul c (hV.summable_of_lp f) norm_map' f := by classical -- needed for lattice instance on `finset ι`, for `filter.at_top_ne_bot` have H : 0 < (2 : ℝ≥0∞).toReal := by norm_num suffices ‖∑' i : ι, V i (f i)‖ ^ (2 : ℝ≥0∞).toReal = ‖f‖ ^ (2 : ℝ≥0∞).toReal by exact Real.rpow_left_injOn H.ne' (norm_nonneg _) (norm_nonneg _) this refine' tendsto_nhds_unique _ (lp.hasSum_norm H f) convert(hV.summable_of_lp f).HasSum.norm.rpow_const (Or.inr H.le) ext s exact_mod_cast (hV.norm_sum f s).symm #align orthogonal_family.linear_isometry OrthogonalFamily.linearIsometry protected theorem linearIsometry_apply (f : lp G 2) : hV.LinearIsometry f = ∑' i, V i (f i) := rfl #align orthogonal_family.linear_isometry_apply OrthogonalFamily.linearIsometry_apply protected theorem hasSum_linearIsometry (f : lp G 2) : HasSum (fun i => V i (f i)) (hV.LinearIsometry f) := (hV.summable_of_lp f).HasSum #align orthogonal_family.has_sum_linear_isometry OrthogonalFamily.hasSum_linearIsometry @[simp] protected theorem linearIsometry_apply_single {i : ι} (x : G i) : hV.LinearIsometry (lp.single 2 i x) = V i x := by rw [hV.linear_isometry_apply, ← tsum_ite_eq i (V i x)] congr ext j rw [lp.single_apply] split_ifs · subst h · simp #align orthogonal_family.linear_isometry_apply_single OrthogonalFamily.linearIsometry_apply_single @[simp] protected theorem linearIsometry_apply_dfinsupp_sum_single (W₀ : Π₀ i : ι, G i) : hV.LinearIsometry (W₀.Sum (lp.single 2)) = W₀.Sum fun i => V i := by have : hV.linear_isometry (∑ i in W₀.support, lp.single 2 i (W₀ i)) = ∑ i in W₀.support, hV.linear_isometry (lp.single 2 i (W₀ i)) := hV.linear_isometry.to_linear_map.map_sum simp (config := { contextual := true }) [Dfinsupp.sum, this] #align orthogonal_family.linear_isometry_apply_dfinsupp_sum_single OrthogonalFamily.linearIsometry_apply_dfinsupp_sum_single /-- The canonical linear isometry from the `lp 2` of a mutually orthogonal family of subspaces of `E` into E, has range the closure of the span of the subspaces. -/ protected theorem range_linearIsometry [∀ i, CompleteSpace (G i)] : hV.LinearIsometry.toLinearMap.range = (⨆ i, (V i).toLinearMap.range).topologicalClosure := by refine' le_antisymm _ _ · rintro x ⟨f, rfl⟩ refine' mem_closure_of_tendsto (hV.has_sum_linear_isometry f) (eventually_of_forall _) intro s rw [SetLike.mem_coe] refine' sum_mem _ intro i hi refine' mem_supr_of_mem i _ exact LinearMap.mem_range_self _ (f i) · apply topological_closure_minimal · refine' supᵢ_le _ rintro i x ⟨x, rfl⟩ use lp.single 2 i x exact hV.linear_isometry_apply_single x exact hV.linear_isometry.isometry.uniform_inducing.is_complete_range.is_closed #align orthogonal_family.range_linear_isometry OrthogonalFamily.range_linearIsometry end OrthogonalFamily section IsHilbertSum variable (𝕜 G) (V : ∀ i, G i →ₗᵢ[𝕜] E) (F : ι → Submodule 𝕜 E) include cplt /-- Given a family of Hilbert spaces `G : ι → Type*`, a Hilbert sum of `G` consists of a Hilbert space `E` and an orthogonal family `V : Π i, G i →ₗᵢ[𝕜] E` such that the induced isometry `Φ : lp G 2 → E` is surjective. Keeping in mind that `lp G 2` is "the" external Hilbert sum of `G : ι → Type*`, this is analogous to `direct_sum.is_internal`, except that we don't express it in terms of actual submodules. -/ @[protect_proj] structure IsHilbertSum : Prop where ofSurjective :: OrthogonalFamily : OrthogonalFamily 𝕜 G V surjective_isometry : Function.Surjective OrthogonalFamily.LinearIsometry #align is_hilbert_sum IsHilbertSum variable {𝕜 G V} /-- If `V : Π i, G i →ₗᵢ[𝕜] E` is an orthogonal family such that the supremum of the ranges of `V i` is dense, then `(E, V)` is a Hilbert sum of `G`. -/ theorem IsHilbertSum.mk [∀ i, CompleteSpace <| G i] (hVortho : OrthogonalFamily 𝕜 G V) (hVtotal : ⊤ ≤ (⨆ i, (V i).toLinearMap.range).topologicalClosure) : IsHilbertSum 𝕜 G V := { OrthogonalFamily := hVortho surjective_isometry := by rw [← LinearIsometry.coe_toLinearMap] exact linear_map.range_eq_top.mp (eq_top_iff.mpr <| hVtotal.trans_eq hVortho.range_linear_isometry.symm) } #align is_hilbert_sum.mk IsHilbertSum.mk /-- This is `orthogonal_family.is_hilbert_sum` in the case of actual inclusions from subspaces. -/ theorem IsHilbertSum.mkInternal [∀ i, CompleteSpace <| F i] (hFortho : OrthogonalFamily 𝕜 (fun i => F i) fun i => (F i).subtypeₗᵢ) (hFtotal : ⊤ ≤ (⨆ i, F i).topologicalClosure) : IsHilbertSum 𝕜 (fun i => F i) fun i => (F i).subtypeₗᵢ := IsHilbertSum.mk hFortho (by simpa [subtypeₗᵢ_to_linear_map, range_subtype] using hFtotal) #align is_hilbert_sum.mk_internal IsHilbertSum.mkInternal /-- *A* Hilbert sum `(E, V)` of `G` is canonically isomorphic to *the* Hilbert sum of `G`, i.e `lp G 2`. Note that this goes in the opposite direction from `orthogonal_family.linear_isometry`. -/ noncomputable def IsHilbertSum.linearIsometryEquiv (hV : IsHilbertSum 𝕜 G V) : E ≃ₗᵢ[𝕜] lp G 2 := LinearIsometryEquiv.symm <| LinearIsometryEquiv.ofSurjective hV.OrthogonalFamily.LinearIsometry hV.surjective_isometry #align is_hilbert_sum.linear_isometry_equiv IsHilbertSum.linearIsometryEquiv /-- In the canonical isometric isomorphism between a Hilbert sum `E` of `G` and `lp G 2`, a vector `w : lp G 2` is the image of the infinite sum of the associated elements in `E`. -/ protected theorem IsHilbertSum.linearIsometryEquiv_symm_apply (hV : IsHilbertSum 𝕜 G V) (w : lp G 2) : hV.LinearIsometryEquiv.symm w = ∑' i, V i (w i) := by simp [IsHilbertSum.linearIsometryEquiv, OrthogonalFamily.linearIsometry_apply] #align is_hilbert_sum.linear_isometry_equiv_symm_apply IsHilbertSum.linearIsometryEquiv_symm_apply /-- In the canonical isometric isomorphism between a Hilbert sum `E` of `G` and `lp G 2`, a vector `w : lp G 2` is the image of the infinite sum of the associated elements in `E`, and this sum indeed converges. -/ protected theorem IsHilbertSum.hasSum_linearIsometryEquiv_symm (hV : IsHilbertSum 𝕜 G V) (w : lp G 2) : HasSum (fun i => V i (w i)) (hV.LinearIsometryEquiv.symm w) := by simp [IsHilbertSum.linearIsometryEquiv, OrthogonalFamily.hasSum_linearIsometry] #align is_hilbert_sum.has_sum_linear_isometry_equiv_symm IsHilbertSum.hasSum_linearIsometryEquiv_symm /-- In the canonical isometric isomorphism between a Hilbert sum `E` of `G : ι → Type*` and `lp G 2`, an "elementary basis vector" in `lp G 2` supported at `i : ι` is the image of the associated element in `E`. -/ @[simp] protected theorem IsHilbertSum.linearIsometryEquiv_symm_apply_single (hV : IsHilbertSum 𝕜 G V) {i : ι} (x : G i) : hV.LinearIsometryEquiv.symm (lp.single 2 i x) = V i x := by simp [IsHilbertSum.linearIsometryEquiv, OrthogonalFamily.linearIsometry_apply_single] #align is_hilbert_sum.linear_isometry_equiv_symm_apply_single IsHilbertSum.linearIsometryEquiv_symm_apply_single /-- In the canonical isometric isomorphism between a Hilbert sum `E` of `G : ι → Type*` and `lp G 2`, a finitely-supported vector in `lp G 2` is the image of the associated finite sum of elements of `E`. -/ @[simp] protected theorem IsHilbertSum.linearIsometryEquiv_symm_apply_dfinsupp_sum_single (hV : IsHilbertSum 𝕜 G V) (W₀ : Π₀ i : ι, G i) : hV.LinearIsometryEquiv.symm (W₀.Sum (lp.single 2)) = W₀.Sum fun i => V i := by simp [IsHilbertSum.linearIsometryEquiv, OrthogonalFamily.linearIsometry_apply_dfinsupp_sum_single] #align is_hilbert_sum.linear_isometry_equiv_symm_apply_dfinsupp_sum_single IsHilbertSum.linearIsometryEquiv_symm_apply_dfinsupp_sum_single /-- In the canonical isometric isomorphism between a Hilbert sum `E` of `G : ι → Type*` and `lp G 2`, a finitely-supported vector in `lp G 2` is the image of the associated finite sum of elements of `E`. -/ @[simp] protected theorem IsHilbertSum.linearIsometryEquiv_apply_dfinsupp_sum_single (hV : IsHilbertSum 𝕜 G V) (W₀ : Π₀ i : ι, G i) : (hV.LinearIsometryEquiv (W₀.Sum fun i => V i) : ∀ i, G i) = W₀ := by rw [← hV.linear_isometry_equiv_symm_apply_dfinsupp_sum_single] rw [LinearIsometryEquiv.apply_symm_apply] ext i simp (config := { contextual := true }) [Dfinsupp.sum, lp.single_apply] #align is_hilbert_sum.linear_isometry_equiv_apply_dfinsupp_sum_single IsHilbertSum.linearIsometryEquiv_apply_dfinsupp_sum_single /-- Given a total orthonormal family `v : ι → E`, `E` is a Hilbert sum of `λ i : ι, 𝕜` relative to the family of linear isometries `λ i, λ k, k • v i`. -/ theorem Orthonormal.isHilbertSum {v : ι → E} (hv : Orthonormal 𝕜 v) (hsp : ⊤ ≤ (span 𝕜 (Set.range v)).topologicalClosure) : IsHilbertSum 𝕜 (fun i : ι => 𝕜) fun i => LinearIsometry.toSpanSingleton 𝕜 E (hv.1 i) := IsHilbertSum.mk hv.OrthogonalFamily (by convert hsp simp [← LinearMap.span_singleton_eq_range, ← Submodule.span_unionᵢ]) #align orthonormal.is_hilbert_sum Orthonormal.isHilbertSum theorem Submodule.isHilbertSumOrthogonal (K : Submodule 𝕜 E) [hK : CompleteSpace K] : IsHilbertSum 𝕜 (fun b => ↥(cond b K Kᗮ)) fun b => (cond b K Kᗮ).subtypeₗᵢ := by have : ∀ b, CompleteSpace ↥(cond b K Kᗮ) := by intro b cases b <;> first |exact orthogonal.complete_space K|assumption refine' IsHilbertSum.mkInternal _ K.orthogonal_family_self _ refine' le_trans _ (Submodule.le_topologicalClosure _) rw [supᵢ_bool_eq, cond, cond] refine' Codisjoint.top_le _ exact submodule.is_compl_orthogonal_of_complete_space.codisjoint #align submodule.is_hilbert_sum_orthogonal Submodule.isHilbertSumOrthogonal end IsHilbertSum /-! ### Hilbert bases -/ section variable (ι) (𝕜) (E) /-- A Hilbert basis on `ι` for an inner product space `E` is an identification of `E` with the `lp` space `ℓ²(ι, 𝕜)`. -/ structure HilbertBasis where ofRepr :: repr : E ≃ₗᵢ[𝕜] ℓ²(ι, 𝕜) #align hilbert_basis HilbertBasis end namespace HilbertBasis instance {ι : Type _} : Inhabited (HilbertBasis ι 𝕜 ℓ²(ι, 𝕜)) := ⟨of_repr (LinearIsometryEquiv.refl 𝕜 _)⟩ /-- `b i` is the `i`th basis vector. -/ instance : CoeFun (HilbertBasis ι 𝕜 E) fun _ => ι → E where coe b i := b.repr.symm (lp.single 2 i (1 : 𝕜)) @[simp] protected theorem repr_symm_single (b : HilbertBasis ι 𝕜 E) (i : ι) : b.repr.symm (lp.single 2 i (1 : 𝕜)) = b i := rfl #align hilbert_basis.repr_symm_single HilbertBasis.repr_symm_single @[simp] protected theorem repr_self (b : HilbertBasis ι 𝕜 E) (i : ι) : b.repr (b i) = lp.single 2 i (1 : 𝕜) := by rw [← b.repr_symm_single, LinearIsometryEquiv.apply_symm_apply] #align hilbert_basis.repr_self HilbertBasis.repr_self protected theorem repr_apply_apply (b : HilbertBasis ι 𝕜 E) (v : E) (i : ι) : b.repr v i = ⟪b i, v⟫ := by rw [← b.repr.inner_map_map (b i) v, b.repr_self, lp.inner_single_left] simp #align hilbert_basis.repr_apply_apply HilbertBasis.repr_apply_apply @[simp] protected theorem orthonormal (b : HilbertBasis ι 𝕜 E) : Orthonormal 𝕜 b := by rw [orthonormal_iff_ite] intro i j rw [← b.repr.inner_map_map (b i) (b j), b.repr_self, b.repr_self, lp.inner_single_left, lp.single_apply] simp #align hilbert_basis.orthonormal HilbertBasis.orthonormal protected theorem hasSum_repr_symm (b : HilbertBasis ι 𝕜 E) (f : ℓ²(ι, 𝕜)) : HasSum (fun i => f i • b i) (b.repr.symm f) := by suffices H : (fun i : ι => f i • b i) = fun b_1 : ι => b.repr.symm.to_continuous_linear_equiv ((fun i : ι => lp.single 2 i (f i)) b_1) · rw [H] have : HasSum (fun i : ι => lp.single 2 i (f i)) f := lp.hasSum_single ENNReal.two_ne_top f exact (↑b.repr.symm.to_continuous_linear_equiv : ℓ²(ι, 𝕜) →L[𝕜] E).HasSum this ext i apply b.repr.injective letI : NormedSpace 𝕜 ↥(lp (fun i : ι => 𝕜) 2) := by infer_instance have : lp.single 2 i (f i * 1) = f i • lp.single 2 i 1 := lp.single_smul 2 i (1 : 𝕜) (f i) rw [mul_one] at this rw [LinearIsometryEquiv.map_smul, b.repr_self, ← this, LinearIsometryEquiv.coe_toContinuousLinearEquiv] exact (b.repr.apply_symm_apply (lp.single 2 i (f i))).symm #align hilbert_basis.has_sum_repr_symm HilbertBasis.hasSum_repr_symm protected theorem hasSum_repr (b : HilbertBasis ι 𝕜 E) (x : E) : HasSum (fun i => b.repr x i • b i) x := by simpa using b.has_sum_repr_symm (b.repr x) #align hilbert_basis.has_sum_repr HilbertBasis.hasSum_repr @[simp] protected theorem dense_span (b : HilbertBasis ι 𝕜 E) : (span 𝕜 (Set.range b)).topologicalClosure = ⊤ := by classical rw [eq_top_iff] rintro x - refine' mem_closure_of_tendsto (b.has_sum_repr x) (eventually_of_forall _) intro s simp only [SetLike.mem_coe] refine' sum_mem _ rintro i - refine' smul_mem _ _ _ exact subset_span ⟨i, rfl⟩ #align hilbert_basis.dense_span HilbertBasis.dense_span protected theorem hasSum_inner_mul_inner (b : HilbertBasis ι 𝕜 E) (x y : E) : HasSum (fun i => ⟪x, b i⟫ * ⟪b i, y⟫) ⟪x, y⟫ := by convert(b.has_sum_repr y).mapL (innerSL _ x) ext i rw [innerSL_apply, b.repr_apply_apply, inner_smul_right, mul_comm] #align hilbert_basis.has_sum_inner_mul_inner HilbertBasis.hasSum_inner_mul_inner protected theorem summable_inner_mul_inner (b : HilbertBasis ι 𝕜 E) (x y : E) : Summable fun i => ⟪x, b i⟫ * ⟪b i, y⟫ := (b.hasSum_inner_mul_inner x y).Summable #align hilbert_basis.summable_inner_mul_inner HilbertBasis.summable_inner_mul_inner protected theorem tsum_inner_mul_inner (b : HilbertBasis ι 𝕜 E) (x y : E) : (∑' i, ⟪x, b i⟫ * ⟪b i, y⟫) = ⟪x, y⟫ := (b.hasSum_inner_mul_inner x y).tsum_eq #align hilbert_basis.tsum_inner_mul_inner HilbertBasis.tsum_inner_mul_inner -- Note : this should be `b.repr` composed with an identification of `lp (λ i : ι, 𝕜) p` with -- `pi_Lp p (λ i : ι, 𝕜)` (in this case with `p = 2`), but we don't have this yet (July 2022). /-- A finite Hilbert basis is an orthonormal basis. -/ protected def toOrthonormalBasis [Fintype ι] (b : HilbertBasis ι 𝕜 E) : OrthonormalBasis ι 𝕜 E := OrthonormalBasis.mk b.Orthonormal (by refine' Eq.ge _ have := (span 𝕜 (finset.univ.image b : Set E)).closed_of_finiteDimensional simpa only [Finset.coe_image, Finset.coe_univ, Set.image_univ, HilbertBasis.dense_span] using this.submodule_topological_closure_eq.symm) #align hilbert_basis.to_orthonormal_basis HilbertBasis.toOrthonormalBasis @[simp] theorem coe_toOrthonormalBasis [Fintype ι] (b : HilbertBasis ι 𝕜 E) : (b.toOrthonormalBasis : ι → E) = b := OrthonormalBasis.coe_mk _ _ #align hilbert_basis.coe_to_orthonormal_basis HilbertBasis.coe_toOrthonormalBasis protected theorem hasSum_orthogonalProjection {U : Submodule 𝕜 E} [CompleteSpace U] (b : HilbertBasis ι 𝕜 U) (x : E) : HasSum (fun i => ⟪(b i : E), x⟫ • b i) (orthogonalProjection U x) := by simpa only [b.repr_apply_apply, inner_orthogonalProjection_eq_of_mem_left] using b.has_sum_repr (orthogonalProjection U x) #align hilbert_basis.has_sum_orthogonal_projection HilbertBasis.hasSum_orthogonalProjection theorem finite_spans_dense (b : HilbertBasis ι 𝕜 E) : (⨆ J : Finset ι, span 𝕜 (J.image b : Set E)).topologicalClosure = ⊤ := eq_top_iff.mpr <| b.dense_span.ge.trans (by simp_rw [← Submodule.span_unionᵢ] exact topological_closure_mono (span_mono <| set.range_subset_iff.mpr fun i => Set.mem_unionᵢ_of_mem {i} <| finset.mem_coe.mpr <| Finset.mem_image_of_mem _ <| Finset.mem_singleton_self i)) #align hilbert_basis.finite_spans_dense HilbertBasis.finite_spans_dense variable {v : ι → E} (hv : Orthonormal 𝕜 v) include hv cplt /-- An orthonormal family of vectors whose span is dense in the whole module is a Hilbert basis. -/ protected def mk (hsp : ⊤ ≤ (span 𝕜 (Set.range v)).topologicalClosure) : HilbertBasis ι 𝕜 E := HilbertBasis.of_repr <| (hv.IsHilbertSum hsp).LinearIsometryEquiv #align hilbert_basis.mk HilbertBasis.mk theorem Orthonormal.linearIsometryEquiv_symm_apply_single_one (h i) : (hv.IsHilbertSum h).LinearIsometryEquiv.symm (lp.single 2 i 1) = v i := by rw [IsHilbertSum.linearIsometryEquiv_symm_apply_single, LinearIsometry.toSpanSingleton_apply, one_smul] #align orthonormal.linear_isometry_equiv_symm_apply_single_one Orthonormal.linearIsometryEquiv_symm_apply_single_one @[simp] protected theorem coe_mk (hsp : ⊤ ≤ (span 𝕜 (Set.range v)).topologicalClosure) : ⇑(HilbertBasis.mk hv hsp) = v := by apply funext <| Orthonormal.linearIsometryEquiv_symm_apply_single_one hv hsp #align hilbert_basis.coe_mk HilbertBasis.coe_mk /-- An orthonormal family of vectors whose span has trivial orthogonal complement is a Hilbert basis. -/ protected def mkOfOrthogonalEqBot (hsp : (span 𝕜 (Set.range v))ᗮ = ⊥) : HilbertBasis ι 𝕜 E := HilbertBasis.mk hv (by rw [← orthogonal_orthogonal_eq_closure, ← eq_top_iff, orthogonal_eq_top_iff, hsp]) #align hilbert_basis.mk_of_orthogonal_eq_bot HilbertBasis.mkOfOrthogonalEqBot @[simp] protected theorem coe_of_orthogonal_eq_bot_mk (hsp : (span 𝕜 (Set.range v))ᗮ = ⊥) : ⇑(HilbertBasis.mkOfOrthogonalEqBot hv hsp) = v := HilbertBasis.coe_mk hv _ #align hilbert_basis.coe_of_orthogonal_eq_bot_mk HilbertBasis.coe_of_orthogonal_eq_bot_mk omit hv -- Note : this should be `b.repr` composed with an identification of `lp (λ i : ι, 𝕜) p` with -- `pi_Lp p (λ i : ι, 𝕜)` (in this case with `p = 2`), but we don't have this yet (July 2022). /-- An orthonormal basis is an Hilbert basis. -/ protected def OrthonormalBasis.toHilbertBasis [Fintype ι] (b : OrthonormalBasis ι 𝕜 E) : HilbertBasis ι 𝕜 E := HilbertBasis.mk b.Orthonormal <| by simpa only [← OrthonormalBasis.coe_toBasis, b.to_basis.span_eq, eq_top_iff] using @subset_closure E _ _ #align orthonormal_basis.to_hilbert_basis OrthonormalBasis.toHilbertBasis @[simp] theorem OrthonormalBasis.coe_toHilbertBasis [Fintype ι] (b : OrthonormalBasis ι 𝕜 E) : (b.toHilbertBasis : ι → E) = b := HilbertBasis.coe_mk _ _ #align orthonormal_basis.coe_to_hilbert_basis OrthonormalBasis.coe_toHilbertBasis /-- A Hilbert space admits a Hilbert basis extending a given orthonormal subset. -/ theorem Orthonormal.exists_hilbertBasis_extension {s : Set E} (hs : Orthonormal 𝕜 (coe : s → E)) : ∃ (w : Set E)(b : HilbertBasis w 𝕜 E), s ⊆ w ∧ ⇑b = (coe : w → E) := let ⟨w, hws, hw_ortho, hw_max⟩ := exists_maximal_orthonormal hs ⟨w, HilbertBasis.mkOfOrthogonalEqBot hw_ortho (by simpa [maximal_orthonormal_iff_orthogonal_complement_eq_bot hw_ortho] using hw_max), hws, HilbertBasis.coe_of_orthogonal_eq_bot_mk _ _⟩ #align orthonormal.exists_hilbert_basis_extension Orthonormal.exists_hilbertBasis_extension variable (𝕜 E) /-- A Hilbert space admits a Hilbert basis. -/ theorem exists_hilbertBasis : ∃ (w : Set E)(b : HilbertBasis w 𝕜 E), ⇑b = (coe : w → E) := let ⟨w, hw, hw', hw''⟩ := (orthonormal_empty 𝕜 E).exists_hilbertBasis_extension ⟨w, hw, hw''⟩ #align exists_hilbert_basis exists_hilbertBasis end HilbertBasis
Formal statement is: lemma countably_compactI: assumes "\<And>C. \<forall>t\<in>C. open t \<Longrightarrow> s \<subseteq> \<Union>C \<Longrightarrow> countable C \<Longrightarrow> (\<exists>C'\<subseteq>C. finite C' \<and> s \<subseteq> \<Union>C')" shows "countably_compact s" Informal statement is: If every countable open cover of $s$ has a finite subcover, then $s$ is countably compact.
theory func_cor_other imports func_cor_lemma begin section \<open>Functional correctness of Schedule\<close> thm Schedule_def lemma Schedule_satRG_h1: "\<Gamma> \<turnstile>\<^sub>I Some (IF \<exists>y. \<acute>cur = Some y THEN \<acute>thd_state := \<acute>thd_state(the \<acute>cur := READY);; Basic (cur_update Map.empty) FI;; Basic (cur_update (\<lambda>_. Some t));; \<acute>thd_state := \<acute>thd_state (t := RUNNING)) sat\<^sub>p [\<lbrace>\<acute>inv\<rbrace> \<inter> \<lbrace>\<acute>thd_state t = READY\<rbrace> \<inter> {V}, {(s, t). s = t}, UNIV, \<lbrace>\<acute>(Pair V) \<in> Schedule_guar\<rbrace> \<inter> \<lbrace>\<acute>inv\<rbrace>]" apply(case_tac "\<lbrace>\<acute>inv\<rbrace> \<inter> \<lbrace>\<acute>thd_state t = READY\<rbrace> \<inter> {V} = {}") using Emptyprecond apply auto[1] apply simp apply(case_tac "\<exists>y. cur V = Some y") apply(rule Seq[where mid = "{V\<lparr>thd_state := (thd_state V)(the (cur V) := READY), cur := Some t\<rparr>}"]) apply(rule Seq[where mid = "{V\<lparr>thd_state := (thd_state V)(the (cur V) := READY), cur := None\<rparr>}"]) apply(rule Cond) apply(simp add:stable_def) apply(rule Seq[where mid = "{V\<lparr>thd_state := (thd_state V)(the (cur V) := READY)\<rparr>}"]) apply(rule Basic) apply auto[1] apply(simp add:stable_def)+ apply(rule Basic) apply auto[1] apply(simp add:stable_def)+ apply(simp add:Skip_def) apply(rule Basic) apply(simp add:stable_def)+ apply(rule Basic) apply auto[1] apply(simp add:stable_def)+ apply(rule Basic) apply(simp add:Schedule_guar_def) apply(subgoal_tac "inv (V\<lparr>cur := Some t, thd_state := (thd_state V)(the (cur V) := READY, t := RUNNING)\<rparr>) \<and> (\<forall>x. (V, V\<lparr>cur := Some t, thd_state := (thd_state V)(the (cur V) := READY, t := RUNNING)\<rparr>) \<in> lvars_nochange_rel x)") apply simp apply(rule conjI) apply(simp add:inv_def) apply clarify apply(rule conjI) apply(simp add:inv_cur_def) apply force apply(simp add:inv_thd_waitq_def inv_cur_def) apply (metis Thread_State_Type.distinct(3) Thread_State_Type.distinct(6)) apply auto[1] using lvars_nochange_rel_def lvars_nochange_def apply simp apply(simp add: stable_def)+ apply(rule Seq[where mid = "{V\<lparr>cur := Some t\<rparr>}"]) apply(rule Seq[where mid = "{V}"]) apply(rule Cond) apply(simp add:stable_def) apply(rule Seq[where mid = "{}"]) apply(rule Basic) apply auto[1] apply(simp add:stable_def)+ apply(rule Basic) apply auto[1] apply(simp add:stable_def)+ apply(simp add:Skip_def) apply(rule Basic) apply(simp add:stable_def)+ apply(rule Basic) apply auto[1] apply(simp add:stable_def)+ apply(rule Basic) apply(simp add:Schedule_guar_def) apply(subgoal_tac "inv (V\<lparr>cur := Some t, thd_state := (thd_state V)(t := RUNNING)\<rparr>) \<and> (\<forall>x. (V, V\<lparr>cur := Some t, thd_state := (thd_state V)(t := RUNNING)\<rparr>) \<in> lvars_nochange_rel x)") apply simp apply(rule conjI) apply(simp add:inv_def) apply clarify apply(rule conjI) apply(simp add:inv_cur_def) apply(simp add:inv_thd_waitq_def) apply auto[1] apply auto[1] using lvars_nochange_rel_def lvars_nochange_def apply simp apply(simp add:stable_def)+ done lemma Schedule_satRG: "\<Gamma> (Schedule t) \<turnstile> Schedule_RGCond t" apply(simp add:Evt_sat_RG_def) apply (simp add: Schedule_def Schedule_RGCond_def) apply(rule BasicEvt) apply(simp add:body_def Pre\<^sub>f_def Post\<^sub>f_def guard_def Rely\<^sub>f_def Guar\<^sub>f_def getrgformula_def) apply(rule Await) using stable_inv_sched_rely1 apply simp using stable_inv_sched_rely1 apply simp using Schedule_satRG_h1 apply simp apply(simp add:Pre\<^sub>f_def Rely\<^sub>f_def getrgformula_def) using stable_inv_sched_rely1 apply simp by(simp add:Guar\<^sub>f_def getrgformula_def Schedule_guar_def) section \<open>Functional correctness of Tick\<close> lemma Tick_satRG: "\<Gamma> Tick \<turnstile> Tick_RGCond" apply(simp add:Evt_sat_RG_def) apply (simp add: Tick_def Tick_RGCond_def Tick_rely_def Tick_guar_def) apply(rule BasicEvt) apply(simp add:body_def Pre\<^sub>f_def Post\<^sub>f_def guard_def Rely\<^sub>f_def Guar\<^sub>f_def getrgformula_def) apply(rule Basic) apply simp using lvars_nochange_rel_def lvars_nochange_def apply simp apply auto[1] apply(simp add:stable_def)+ apply(simp add: stable_def Pre\<^sub>f_def getrgformula_def Rely\<^sub>f_def) apply auto[1] by (simp add: Guar\<^sub>f_def getrgformula_def) end
chapter \<open>Basic Control Flow\<close> theory PhiSem_CF_Basic imports PhiSem_Generic_Boolean begin section \<open>Instructions\<close> subsection \<open>Non-Branching Selection\<close> definition op_sel :: "TY \<Rightarrow> (VAL \<times> VAL \<times> VAL, VAL) proc'" where "op_sel TY = \<phi>M_caseV (\<lambda>vc. \<phi>M_caseV (\<lambda>va vb. \<phi>M_getV bool V_bool.dest vc (\<lambda>c. \<phi>M_getV TY id va (\<lambda>a. \<phi>M_getV TY id vb (\<lambda>b. Return (\<phi>arg (if c then b else a)))))))" subsection \<open>Branch\<close> definition op_if :: "'ret proc \<Rightarrow> 'ret proc \<Rightarrow> (VAL,'ret::VALs) proc'" where "op_if brT brF v = \<phi>M_getV bool V_bool.dest v (\<lambda>c. (if c then brT else brF))" subsection \<open>While Loop\<close> inductive SemDoWhile :: "VAL proc \<Rightarrow> resource \<Rightarrow> unit comp \<Rightarrow> bool" where "Success (\<phi>arg (V_bool.mk False)) res \<in> f s \<Longrightarrow> SemDoWhile f s (Success (\<phi>arg ()) res)" | "Success (\<phi>arg (V_bool.mk True)) res \<in> f s \<Longrightarrow> SemDoWhile f res s'' \<Longrightarrow> SemDoWhile f s s''" | "Abnormality v e \<in> f s \<Longrightarrow> SemDoWhile f s (Abnormality v e)" | "NonTerm \<in> f s \<Longrightarrow> SemDoWhile f s NonTerm" | "AssumptionBroken \<in> f s \<Longrightarrow> SemDoWhile f s AssumptionBroken" | "Invalid \<in> f s \<Longrightarrow> SemDoWhile f s Invalid" lemma "\<nexists> y. SemDoWhile ((\<lambda>res. Return (\<phi>arg (V_bool.mk True)) res) :: VAL proc) res y" apply rule apply (elim exE) subgoal for y apply (induct "((\<lambda>res. Return (\<phi>arg (V_bool.mk True)) (res::resource)) :: VAL proc)" res y rule: SemDoWhile.induct) apply (simp_all add: Return_def det_lift_def) . . definition op_do_while :: " VAL proc \<Rightarrow> unit proc" where "op_do_while f s = Collect (SemDoWhile f s)" (* lemma SemDoWhile_deterministic: assumes "SemDoWhile c s s1" and "SemDoWhile c s s2" shows "s1 = s2" proof - have "SemDoWhile c s s1 \<Longrightarrow> (\<forall>s2. SemDoWhile c s s2 \<longrightarrow> s1 = s2)" apply (induct rule: SemDoWhile.induct) by (subst SemDoWhile.simps, simp)+ thus ?thesis using assms by simp qed lemma SemDoWhile_deterministic2: "SemDoWhile body s x \<Longrightarrow> The ( SemDoWhile body s) = x" using SemDoWhile_deterministic by blast *) subsection \<open>Recursion\<close> inductive SemRec :: "(('a,'a) proc' \<Rightarrow> ('a,'a) proc') \<Rightarrow> 'a \<phi>arg \<Rightarrow> resource \<Rightarrow> 'a comp set \<Rightarrow> bool" where SemRec_I0: "(\<And>g. F g x res = y) \<Longrightarrow> SemRec F x res y" | SemRec_IS: "SemRec (F o F) x res y \<Longrightarrow> SemRec F x res y" definition op_fix_point :: "(('a,'a) proc' \<Rightarrow> ('a,'a) proc') \<Rightarrow> ('a,'a) proc'" where "op_fix_point F x s = (if (\<exists>t. SemRec F x s t) then The (SemRec F x s) else {})" subsubsection \<open>Simple Properties\<close> lemma SemRec_IR: "SemRec F x r y \<Longrightarrow> SemRec (F o F) x r y" by (induct rule: SemRec.induct, rule SemRec_I0, simp) lemma SemRec_deterministic: assumes "SemRec c s r s1" and "SemRec c s r s2" shows "s1 = s2" proof - have "SemRec c s r s1 \<Longrightarrow> (\<forall>s2. SemRec c s r s2 \<longrightarrow> s1 = s2)" apply (induct rule: SemRec.induct) apply clarify subgoal for F a b y s2 apply (rotate_tac 1) apply (induct rule: SemRec.induct) by auto apply clarify apply (blast intro: SemRec_IR) done thus ?thesis using assms by simp qed lemma SemRec_deterministic2: " SemRec body s r x \<Longrightarrow> The (SemRec body s r) = x" using SemRec_deterministic by (metis theI_unique) section \<open>Abstraction of Procedures\<close> subsubsection \<open>Syntax for Annotations\<close> consts Invariant :: \<open>bool \<Rightarrow> bool\<close> ("Inv: _" [100] 36) consts Guard :: \<open>bool \<Rightarrow> bool\<close> ("Guard: _" [100] 36) consts End :: \<open>bool \<Rightarrow> bool\<close> ("End: _" [100] 36) subsection \<open>Branch-like\<close> lemma op_sel_\<phi>app: \<open> \<phi>SemType (a \<Ztypecolon> A) TY \<Longrightarrow> \<phi>SemType (b \<Ztypecolon> B) TY \<Longrightarrow> \<p>\<r>\<o>\<c> op_sel TY (\<phi>V_pair rawc (\<phi>V_pair rawb rawa)) \<lbrace> a \<Ztypecolon> \<v>\<a>\<l>[rawa] A\<heavy_comma> b \<Ztypecolon> \<v>\<a>\<l>[rawb] B\<heavy_comma> c \<Ztypecolon> \<v>\<a>\<l>[rawc] \<bool> \<longmapsto> (if c then a else b) \<Ztypecolon> \<v>\<a>\<l> (if c then A else B) \<rbrace>\<close> unfolding op_sel_def by (cases rawc; cases rawb; cases rawa; cases c; simp add: \<phi>SemType_def subset_iff, rule, rule, rule, simp add: \<phi>expns WT_bool, blast, rule, simp add: \<phi>expns WT_bool, rule, simp add: \<phi>expns WT_bool, rule, simp add: \<phi>expns WT_bool) lemma branch_\<phi>app: \<open> (\<p>\<r>\<e>\<m>\<i>\<s>\<e> C \<longrightarrow> \<p>\<r>\<o>\<c> br\<^sub>T \<lbrace> X \<longmapsto> Y\<^sub>T \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E\<^sub>T ) \<Longrightarrow> (\<p>\<r>\<e>\<m>\<i>\<s>\<e> \<not> C \<longrightarrow> \<p>\<r>\<o>\<c> br\<^sub>F \<lbrace> X \<longmapsto> Y\<^sub>F \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E\<^sub>F ) \<Longrightarrow> (\<And>v. If C (Y\<^sub>T v) (Y\<^sub>F v) \<i>\<m>\<p>\<l>\<i>\<e>\<s> Y v @action invoke_branch_convergence) \<Longrightarrow> \<p>\<r>\<o>\<c> op_if br\<^sub>T br\<^sub>F rawc \<lbrace> X\<heavy_comma> C \<Ztypecolon> \<v>\<a>\<l>[rawc] \<bool> \<longmapsto> Y \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> (\<lambda>e. (E\<^sub>T e \<s>\<u>\<b>\<j> C) + (E\<^sub>F e \<s>\<u>\<b>\<j> \<not> C)) \<close> unfolding op_if_def Premise_def Action_Tag_def apply (cases rawc; cases C; simp; rule; simp add: \<phi>expns WT_bool) using \<phi>CONSEQ view_shift_by_implication view_shift_refl by blast+ proc "if": assumes C: \<open>\<p>\<r>\<o>\<c> cond \<lbrace> X \<longmapsto> X1\<heavy_comma> \<v>\<a>\<l> C \<Ztypecolon> \<bool> \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E \<close> and brT: \<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> C \<longrightarrow> \<p>\<r>\<o>\<c> brT \<lbrace> X1 \<longmapsto> Y\<^sub>T \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E\<^sub>T \<close> and brF: \<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> \<not> C \<longrightarrow> \<p>\<r>\<o>\<c> brF \<lbrace> X1 \<longmapsto> Y\<^sub>F \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E\<^sub>F \<close> and BC: \<open>(\<And>v. If C (Y\<^sub>T v) (Y\<^sub>F v) \<i>\<m>\<p>\<l>\<i>\<e>\<s> Y v @action invoke_branch_convergence)\<close> input \<open>X\<close> output \<open>Y\<close> throws \<open>E + E\<^sub>T + E\<^sub>F\<close> \<medium_left_bracket> C branch brT brF BC \<medium_right_bracket>. . subsection \<open>Loops\<close> lemma "__DoWhile__rule_\<phi>app": " \<p>\<r>\<o>\<c> body \<lbrace> X x \<s>\<u>\<b>\<j> x. P x \<longmapsto> (\<exists>*x'. X x' \<heavy_comma> \<v>\<a>\<l> P x' \<Ztypecolon> \<bool>) \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E \<Longrightarrow> \<p>\<r>\<o>\<c> op_do_while body \<lbrace> X x \<s>\<u>\<b>\<j> x. P x \<longmapsto> X x' \<s>\<u>\<b>\<j> x'. \<not> P x' \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E " unfolding op_do_while_def \<phi>Procedure_def apply (simp add: subset_iff LooseStateSpec_expn') apply (rule allI impI conjI)+ subgoal for comp R s apply (rotate_tac 2) apply (induct body comp s rule: SemDoWhile.induct; clarsimp simp add: \<phi>expns times_list_def) apply fastforce subgoal premises prems for res f s s'' c u v proof - have t1: \<open>\<exists>c. (\<exists>fic. (\<exists>u v. fic = u * v \<and> u \<in> R \<and> v \<in> X c \<and> u ## v) \<and> s \<in> INTERP_RES fic) \<and> P c\<close> using prems(5) prems(6) prems(7) prems(8) prems(9) by blast show ?thesis apply (insert \<open>\<forall>_ _. (\<exists>_. _) \<longrightarrow> _\<close>[THEN spec[where x=s], THEN spec[where x=R], THEN mp, OF t1]) using prems(1) prems(3) by fastforce qed apply fastforce by blast . proc (nodef) do_while: assumes \<open>\<p>\<a>\<r>\<a>\<m> ( X' x \<s>\<u>\<b>\<j> x. Inv: invariant x \<and> Guard: cond x)\<close> and V: \<open>X \<i>\<m>\<p>\<l>\<i>\<e>\<s> ( X' x \<s>\<u>\<b>\<j> x. invariant x \<and> cond x) \<a>\<n>\<d> Any @action ToSA\<close> assumes B: \<open>\<forall>x. \<p>\<r>\<e>\<m>\<i>\<s>\<e> cond x \<longrightarrow> \<p>\<r>\<e>\<m>\<i>\<s>\<e> invariant x \<longrightarrow> \<p>\<r>\<o>\<c> body \<lbrace> X' x \<longmapsto> (X' x'\<heavy_comma> \<v>\<a>\<l> cond x' \<Ztypecolon> \<bool> \<s>\<u>\<b>\<j> x'. invariant x') \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E \<close> input \<open>X\<close> output \<open>X' x' \<s>\<u>\<b>\<j> x'. invariant x' \<and> \<not> cond x'\<close> throws E \<medium_left_bracket> V[unfolded Action_Tag_def] "__DoWhile__rule_\<phi>app"[where P=cond and X=\<open>\<lambda>x'. X' x' \<s>\<u>\<b>\<j> invariant x'\<close>, simplified] \<medium_left_bracket> B \<medium_right_bracket>. \<medium_right_bracket> by simp . proc while: assumes \<open>\<p>\<a>\<r>\<a>\<m> ( X x \<s>\<u>\<b>\<j> x. Inv: invariant x \<and> Guard: cond x)\<close> assumes V[unfolded Action_Tag_def]: "X' \<i>\<m>\<p>\<l>\<i>\<e>\<s> ((X x \<r>\<e>\<m>\<a>\<i>\<n>\<s> R) \<s>\<u>\<b>\<j> x. invariant x) \<a>\<n>\<d> Any @action ToSA" and C: "\<forall>x. \<p>\<r>\<e>\<m>\<i>\<s>\<e> invariant x \<longrightarrow> \<p>\<r>\<o>\<c> Cond \<lbrace> R\<heavy_comma> X x \<longmapsto> R\<heavy_comma> X x'\<heavy_comma> \<v>\<a>\<l> cond x' \<Ztypecolon> \<bool> \<s>\<u>\<b>\<j> x'. invariant x' \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E1" and B: "\<forall>x. \<p>\<r>\<e>\<m>\<i>\<s>\<e> invariant x \<longrightarrow> \<p>\<r>\<e>\<m>\<i>\<s>\<e> cond x \<longrightarrow> \<p>\<r>\<o>\<c> Body \<lbrace> R\<heavy_comma> X x \<longmapsto> R\<heavy_comma> X x' \<s>\<u>\<b>\<j> x'. invariant x' \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E2" input \<open>X'\<close> output \<open>R\<heavy_comma> X x \<s>\<u>\<b>\<j> x. invariant x \<and> \<not> cond x\<close> throws \<open>E1 + E2\<close> \<medium_left_bracket> V C branch \<medium_left_bracket> do_while \<open>R\<heavy_comma> X vars \<s>\<u>\<b>\<j> vars. Inv: invariant vars \<and> Guard: cond vars\<close> \<medium_left_bracket> B C \<medium_right_bracket>. \<medium_right_bracket>. \<medium_left_bracket> \<medium_right_bracket> for \<open>R\<heavy_comma> X vars \<s>\<u>\<b>\<j> vars. invariant vars \<and> \<not> cond vars\<close> .. \<medium_right_bracket> .. . (* We fail to infer the abstraction of the loop guard automatically but require users to give by an annotation. The main difficulty is about the nondeterminancy in higher-order unification. In \<^term>\<open>cond x' \<Ztypecolon> \<bool>\<close> in the above rule, both \<open>cond\<close> and \<open>x'\<close> are schematic variables, which means we cannot determine either of them via unification. Even though the abstract state \<open>x'\<close> may be determined possibly in the unification of \<open>X x'\<close>, to infer \<open>cond x'\<close> it is still a problem especially when \<open>x'\<close> is not a variable but a compounded term and its expression may be shattered in and mixed up with the expression of \<open>cond\<close> after simplifications like beta reduction, causing it is very difficult to recover the actual abstract guard \<open>cond\<close> from the reduced composition \<open>cond x'\<close>. *) subsection \<open>Recursion\<close> lemma "__op_recursion_simp__": "(\<And>g x' v'. (\<And>x'' v''. \<p>\<r>\<o>\<c> g v'' \<lbrace> X x'' v'' \<longmapsto> \<lambda>ret. Y x'' ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E x'') \<Longrightarrow> \<p>\<r>\<o>\<c> F g v' \<lbrace> X x' v' \<longmapsto> \<lambda>ret. Y x' ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E x' ) \<Longrightarrow> \<forall>x v. \<p>\<r>\<o>\<c> op_fix_point F v \<lbrace> X x v \<longmapsto> \<lambda>ret. Y x ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E x" unfolding op_fix_point_def \<phi>Procedure_def atomize_all apply (clarsimp simp add: SemRec_deterministic2 del: subsetI) subgoal for x v comp a R apply (rotate_tac 1) apply (induct rule: SemRec.induct) subgoal premises prems for F v res y using prems(3)[of \<open>\<lambda>_ _. {AssumptionBroken}\<close> x v, simplified, THEN spec[where x=res], THEN spec[where x=R], THEN mp, OF prems(2), unfolded prems(1)] . by simp . text \<open>Instead, we use a variant of the above rule which in addition annotates the names of the values.\<close> lemma "__op_recursion__": "(\<And>g x' (v':: 'a::VALs \<phi>arg <named> 'names). P x' \<Longrightarrow> PROP Labelled label (HIDDEN_PREM (\<And>x'' (v''::'a \<phi>arg <named> 'names). P x'' \<Longrightarrow> \<p>\<r>\<o>\<c> g (case_named id v'') \<lbrace> case_named (X x'') v'' \<longmapsto> \<lambda>ret. Y x'' ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E x'')) \<Longrightarrow> \<p>\<r>\<o>\<c> F g (case_named id v') \<lbrace> case_named (X x') v' \<longmapsto> \<lambda>ret. Y x' ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E x' ) \<Longrightarrow> PROP Pure.prop ( P x \<Longrightarrow> \<p>\<r>\<o>\<c> op_fix_point F v \<lbrace> X x v \<longmapsto> \<lambda>ret. Y x ret \<rbrace> \<t>\<h>\<r>\<o>\<w>\<s> E x )" unfolding op_fix_point_def \<phi>Procedure_def atomize_all \<phi>arg_forall \<phi>arg_All HIDDEN_PREM_def Pure.prop_def apply (clarsimp simp add: SemRec_deterministic2 del: subsetI) subgoal for comp a R apply (rotate_tac 2) apply (induct rule: SemRec.induct) subgoal premises prems for F v res y using prems(3)[OF prems(4), of \<open>\<lambda>_ _. {AssumptionBroken}\<close> v, simplified, THEN spec[where x=res], THEN spec[where x=R], THEN mp, OF prems(2), unfolded prems(1)] . by simp . ML_file \<open>library/basic_recursion.ML\<close> attribute_setup recursive = \<open>Scan.repeat (Scan.lift Parse.term) >> (fn vars => Phi_Modifier.wrap_to_attribute (fn (ctxt,sequent) => case Phi_Toplevel.name_of_the_building_procedure ctxt of NONE => error "Name binding of the recursive procedure is mandatory." | SOME b => ( if Binding.is_empty b then error "Name binding of the recursive procedure is mandatory." else if null vars then tracing "You may want to use syntax \<open>recursive vars\<close> to indicate \ \which variables are varied between the recursive callings." else (); PhiSem_Control_Flow.basic_recursive_mod Syntax.read_terms b vars (ctxt,sequent) ) ))\<close> end
import Mathlib.Topology.Basic variable [TopologicalSpace X] [TopologicalSpace Y] example : Continuous (id : X → X) := by continuity example {f : X → Y} {g : Y → X} (hf : Continuous f) (hg : Continuous g) : Continuous (fun x => f (g x)) := by continuity example {f : X → Y} {g : Y → X} (hf : Continuous f) (hg : Continuous g) : Continuous (f ∘ g ∘ f) := by continuity example {f : X → Y} {g : Y → X} (hf : Continuous f) (hg : Continuous g) : Continuous (f ∘ g) := by continuity example (y : Y) : Continuous (fun (_ : X) ↦ y) := by continuity example {f : Y → Y} (y : Y) : Continuous (f ∘ (fun (_ : X) => y)) := by continuity example {g : X → X} (y : Y) : Continuous ((fun _ ↦ y) ∘ g) := by continuity example {f : X → Y} (x : X) : Continuous (fun (_ : X) ↦ f x) := by continuity -- Todo: more interesting examples when more algebra is ported -- Porting note: port the tests from mathlib3 once we have the necessary theory files /- Todo: restore this test example [TopologicalSpace X] [TopologicalSpace Y] (f₁ f₂ : X → Y) (hf₁ : Continuous f₁) (hf₂ : Continuous f₂) (g : Y → ℝ) (hg : Continuous g) : Continuous (fun x => (max (g (f₁ x)) (g (f₂ x))) + 1) := by continuity -/
On 6 May 1942 , she took on 84 survivors of the carrier Yorktown which had sunk in the aftermath of the Battle of Midway . During the summer of 1942 , she operated out of the South Pacific On 3 August 1942 , she , along with minesweepers Gamble and Tracy , were laying mines in Segond Channel , Espiritu Santo . Destroyer Tucker entered the strait on escort patrol , having not been notified of the minefield , when she struck one of the mines and sank . Breese , which was moored in the channel , rendered aid . On 30 September 1942 , she was on a nighttime exercise off Espiritu Santo when she was damaged in a collision with the cruiser San Francisco . She carried out minesweeping duties during the consolidation of the Solomon Islands from 1 – 13 May 1943 , where she was assigned to Task Group 36 @.@ 5 alongside Gamble , Preble , and Radford . They laid mined in Blackett Strait to guard the western approaches to Kula Gulf .
(* Copyright 2016 University of Luxembourg This file is part of our formalization of Platzer's "A Complete Uniform Substitution Calculus for Differential Dynamic Logic" available here: http://arxiv.org/pdf/1601.06183.pdf (July 27, 2016). We refer to this formalization as DdlCoq here. DdlCoq is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. DdlCoq is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with DdlCoq. If not, see <http://www.gnu.org/licenses/>. authors: Vincent Rahli Marcus Völp Ivana Vukotic *) Require Export list_util. Require Export vec_util. Require Export reals_util. Require Export symbol_lemmas. Require Export Derive. Require Export deriv_util. Require Export state. (** This file contains some lemmas about smooth functions with multiple arguments. Beside that, this file introduces definitions for semantics of Terms, Formulas and Programs, as well as definition of interpretation. This file works with functions with multiple arguments *) (** definition of symbols which works with functions with multiple arguments *) Inductive Symbol := (* terms *) | SymbolFunction (f : FunctionSymbol) (n : nat) : Symbol | SymbolDotTerm (n : nat) : Symbol (* formulas *) | SymbolPredicate (f : PredicateSymbol) (n : nat) : Symbol | SymbolQuantifier (f : QuantifierSymbol) : Symbol | SymbolDotForm : Symbol (* ODEs *) (* provides the bound variables & dynamic semantics of ODE constants *) | SymbolODE (c : ODEConst) : Symbol (* programs *) | SymbolProgramConst (a : ProgramConstName) : Symbol. (* Record listn (T : Type) (n : nat) := mk_listn { listn_l : list T; listn_cond : Datatypes.length listn_l = n }. *) (** function is n derivable if it's n derivable in every point *) Definition ex_derive_all_n (f : R -> R) := forall n pt, ex_derive_n f n pt. (** partial derivative of variables in state (st stands for state here) *) Fixpoint partial_derive (f : state -> R) (l : list Assign) : state -> R := match l with | [] => f | v :: l => fun (st : state) => Derive (fun X : R => partial_derive f l (upd_state st v X)) (st v) end. Require Export FunctionalExtensionality. Lemma upd_state_var_ext : forall s v, upd_state_var s v (s (KAssignVar v)) = s. Proof. introv. apply functional_extensionality; introv. unfold upd_state_var, upd_state; dest_cases w. Qed. Lemma upd_state_ext : forall s v, upd_state s v (s v) = s. Proof. introv. apply functional_extensionality; introv. unfold upd_state_var, upd_state; dest_cases w. Qed. Lemma upd_state_var_twice : forall s v z w, upd_state_var (upd_state_var s v z) v w = upd_state_var s v w. Proof. introv. apply functional_extensionality; introv. unfold upd_state_var, upd_state. dest_cases w. Qed. Hint Rewrite upd_state_var_twice. Lemma upd_state_twice : forall s v z w, upd_state (upd_state s v z) v w = upd_state s v w. Proof. introv. apply functional_extensionality; introv. unfold upd_state. dest_cases w. Qed. Hint Rewrite upd_state_twice. Lemma partial_derive_st_as_Derive_n : forall (l : list Assign) f v s, (forall x, List.In x l -> x = v) -> partial_derive f l s = Derive_n (fun r => f (upd_state s v r)) (List.length l) (s v). Proof. induction l; introv imp; simpl in *. { rewrite upd_state_ext; auto. } { dLin_hyp h; subst. apply Derive_ext; introv. rewrite (IHl f v); auto. autorewrite with core. apply Derive_n_ext; introv. autorewrite with core; auto. } Qed. Definition ex_partial_derive_st_l (f : state -> R) (st : state) (l : list Assign) (v : Assign) := ex_derive_R (fun X => partial_derive f l (upd_state st v X)) (st v). (** This says that the (n+1)th partial derivative of f exists *) Definition ex_nth_partial_derive_st (n : nat) (f : state -> R) := forall (st : state) (l : list Assign) (v : Assign), List.length l = n -> ex_partial_derive_st_l f st l v. Definition ex_all_partial_derive_st (f : state -> R) := forall n, ex_nth_partial_derive_st n f. Lemma partial_derive_st_ext : forall f g l st, (forall s, f s = g s) -> partial_derive f l st = partial_derive g l st. Proof. induction l; introv imp; simpl; auto. apply Derive_ext; introv. apply IHl; auto. Qed. Lemma ex_all_partial_derive_st_ext : forall F G, (forall s, F s = G s) -> ex_all_partial_derive_st F -> ex_all_partial_derive_st G. Proof. introv imp d len; simpl. eapply ex_derive_ext;[|apply (d n);exact len]. simpl; introv. apply partial_derive_st_ext; auto. Qed. (* Definition partial_derive_st_upd : forall f l s s', partial_derive f l s = partial_derive (fun s' => f (upd_state_st s s' l)) l s'. Proof. induction l; introv; simpl; autorewrite with core; auto. Abort.*) Definition ex_partial_derive (f : state -> R) (v : Assign) (l : list Assign) : Prop := forall pt s, ex_derive_R (fun X => partial_derive f l (upd_state s v X)) pt. (** This is a variant of [ex_all_partial_derive_st] where the point can be anything. We prove below in [ex_partial_derive_st_pt_eq_all] that both definitions are equivalent. *) Definition ex_partial_derive_st_pt (f : state -> R) : Prop := forall v l, ex_partial_derive f v l. Lemma ex_partial_derive_st_pt_eq_all : forall (f : state -> R), ex_partial_derive_st_pt f <-> ex_all_partial_derive_st f. Proof. introv; split; intro h. { introv len; subst. unfold ex_partial_derive_st_l; simpl; apply h. } { repeat introv; simpl in *. pose proof (h (List.length l) (upd_state s v pt) l v eq_refl) as h. unfold ex_partial_derive_st_l in h; simpl in h. autorewrite with core in h. eapply ex_derive_ext;[|exact h]; clear h; simpl; introv. autorewrite with core; auto. } Qed. Definition Vector := Vector.t. (* This is essentially stating that [f] is smooth. It derives [f]'s arguments through the interpretation function [G]. Does that even make sense? Try to prove [ex_derive_func 2 plus_vector], where plus_vector is defined from Rplus. (see below) T is really Term, but what do we really care what the type is? *) Definition ex_partial_derive_st_func {m : nat} (f : Vector R m -> R) := forall (T : Type) (d : T) (* forall non-empty T *) (ts : Vector T m) (G : state -> T -> R), (forall t, Vector.In t ts -> ex_all_partial_derive_st (fun s => G s t)) -> ex_all_partial_derive_st (fun s => f (Vector.map (G s) ts)). (* Here's a slightly stronger version where the arguments have to only be derivable on sublists. We show below in [smooth_fun_T_eq] that it is equivalent to the vectorial version [smooth_fun] *) Definition smooth_fun_T {m : nat} (f : Vector R m -> R) := forall (T : Type) (d : T) (* forall non-empty T *) (ts : Vector T m) (G : state -> T -> R) (v : Assign) (l : list Assign), (forall t w l', Vector.In t ts -> sublist (w :: l') (v :: l) -> ex_partial_derive (fun s => G s t) w l') -> ex_partial_derive (fun s => f (Vector.map (G s) ts)) v l. (* A stronger variant of ex_partial_derive_st_func. Here we ask for the arguments to be derivable on the same states and variables. Is that enough? Probably not because I can't prove it for plus in dynamic_semantics_prop.v! (Lemma ex_partial_derive_st_func_l_plus_vector) *) Definition ex_partial_derive_st_func_l {m : nat} (f : Vector R m -> R) := forall (T : Type) (d : T) (* forall non-empty T *) (ts : Vector T m) (G : state -> T -> R) (st : state) (l : list Assign) (v : Assign), (forall t, Vector.In t ts -> ex_partial_derive_st_l (fun s => G s t) st l v) -> ex_partial_derive_st_l (fun s => f (Vector.map (G s) ts)) st l v. Definition revApp {A B} (s : A) (f : A -> B) : B := f s. (* This is an alternative definition of ex_partial_derive_st_func that is not parametrized over a non-empty type and an interpretation-like function (the G in ex_partial_derive_st_func) but instead directly provides a vector of interpretations. We show below in ex_partial_derive_vec_st_func_eq that the two definitions are equivalent (obviously! :P). *) Definition ex_partial_derive_vec_st_func {m : nat} (f : Vector R m -> R) := forall (Is : Vector (state -> R) m), (forall i, Vector.In i Is -> ex_all_partial_derive_st i) -> ex_all_partial_derive_st (fun s => f (Vector.map (revApp s) Is)). Definition smooth_fun {m : nat} (f : Vector.t R m -> R) := forall (Is : Vector.t (state -> R) m) (v : Assign) (l : list Assign), (forall i w l', Vector.In i Is -> sublist (w :: l') (v :: l) -> ex_partial_derive i w l') -> ex_partial_derive (fun s => f (Vector.map (revApp s) Is)) v l. Lemma ex_partial_derive_vec_st_func_eq : forall {m} (f : Vector.t R m -> R), ex_partial_derive_vec_st_func f <-> ex_partial_derive_st_func f. Proof. introv; split; introv exd. { introv d cond. unfold ex_partial_derive_vec_st_func in exd. pose proof (exd (Vector.map (fun a s => G s a) ts)) as q; clear exd. autodimp q hyp. { introv j; apply in_vec_map in j; exrepnd; subst; auto. } eapply ex_all_partial_derive_st_ext in q; [|introv;rewrite vec_map_map; unfold compose; reflexivity]. auto. } { introv cond. unfold ex_partial_derive_st_func in exd. apply (exd (state -> R) (fun _ => 0) Is (fun s i => i s)); auto. } Qed. Lemma ex_partial_derive_ext : forall f g v l, (forall s, f s = g s) -> ex_partial_derive f v l -> ex_partial_derive g v l. Proof. introv e pd; introv; simpl in *. eapply ex_derive_ext;[|apply pd]; simpl; introv. apply partial_derive_st_ext; auto. Qed. Lemma smooth_fun_T_eq : forall {m} (f : Vector.t R m -> R), smooth_fun f <-> smooth_fun_T f. Proof. introv; split; introv exd. { introv d cond; introv; simpl in *. pose proof (exd (Vector.map (fun a s => G s a) ts) v l) as q; clear exd. autodimp q hyp. { introv j; apply in_vec_map in j; exrepnd; subst; auto. } eapply ex_derive_ext;[|apply (q pt s)]; simpl; introv; clear q. apply partial_derive_st_ext; introv. rewrite vec_map_map; unfold compose; auto. } { introv cond; introv; simpl in *. apply (exd (state -> R) (fun _ => 0) Is (fun s i => i s)); auto. } Qed. Definition plus_vector : Vector.t R 2%nat -> R := fun v : Vector.t R 2%nat => match v with | Vector.cons x 1 (Vector.cons y 0 Vector.nil) => Rplus x y | _ => R0 end. (* see ex_partial_derive_st_func_test_plus in dynamic_semantics_props, which is proved *) Lemma ex_partial_derive_st_func_test : ex_partial_derive_st_func plus_vector. Proof. introv d; introv. unfold plus_vector; simpl. apply (Vector.caseS' ts). introv; simpl. apply (Vector.caseS' t). introv; simpl. clear ts t. apply (@Vector.case0 T (fun t0 => (forall t : T, Vector.In t (Vector.cons T h 1 (Vector.cons T h0 0 t0)) -> ex_all_partial_derive_st (fun s : state => G s t)) -> ex_all_partial_derive_st (fun s : state => match Vector.map (G s) t0 return R with | @Vector.nil _ => G s h + G s h0 | @Vector.cons _ _ _ _ => 0 end))). simpl. clear t0. introv ih. rename h into x. rename h0 into y. pose proof (ih x) as h1. pose proof (ih y) as h2. clear ih. autodimp h1 hyp;[constructor|]. autodimp h2 hyp;[repeat constructor|]. (* unfold ex_partial_derive_st in *. unfold ex_nth_partial_derive_st in *. introv H1. unfold ex_derive_R in *. unfold ex_derive in *. (* I need instance of type T (I can not rewrite, apply of pose proof)*) *) Abort. (** interpretation of function with multiple arguments *) Record interpretation_function (n : nat) := MkInterpFun { interp_fun_f : Vector.t R n -> R; interp_fun_cond : smooth_fun_T interp_fun_f }. (** definition of semantics for Terms *) Definition TermSem := state -> R. (** definition of semantics for Formulas *) Definition FormulaSem := state -> Prop. (** definition of semantics for Formulas which return False and True, respectively *) Definition FalseFormulaSem : FormulaSem := fun _ => False. Definition TrueFormulaSem : FormulaSem := fun _ => True. (** definition of semantics for Programs **) Definition ProgramSem := state -> state -> Prop. (** definition of semantics for Programs which return False and True, respectively *) Definition FalseProgramSem : ProgramSem := fun _ _ => False. Definition TrueProgramSem : ProgramSem := fun _ _ => True. (** definition of semantics for Differential Programs *) Definition ODESem := state -> state. (*Definition ODESem := R -> (R -> state) -> Prop.*) (*Definition ODEConstSem := state -> state.*) (*Definition ODEConstSem := (R -> state) -> R -> state.*) (* Could we do that instead: Definition ODESem := forall r : R, (preal_upto r -> state) -> Prop. Definition ODEConstSem := forall (r : R), (preal_upto r -> state) -> preal_upto r -> state.*) (* [R -> state] is a stateflow. Do we have to limit the stateflow in time? Right now we do with the argument [R]. *) Definition interpQuantExt (F : FormulaSem -> FormulaSem) := forall f g v w, (forall a, v a = w a) -> (forall s, f s <-> g s) -> (F f v <-> F g w). (** interpretation of quantifier *) Record interpretation_quantifier := MkInterpQuant { interp_quant_f : FormulaSem -> FormulaSem; interp_quant_ext : interpQuantExt interp_quant_f }. Definition interpOdeExt (l : list Assign) (F : ODESem) := forall (v w : state) a, (forall a, v a = w a) -> F v a = F w a. Record interpretation_ode := MkInterpODE { interp_ode_bv : list Assign; interp_ode_dm : ODESem; interp_ode_cond : interpOdeExt interp_ode_bv interp_ode_dm }. (** definition of interpretation *) Definition interpretation := forall f : Symbol, match f return Type with | SymbolFunction _ n => interpretation_function n | SymbolDotTerm n => R | SymbolPredicate _ n => Vector.t R n -> Prop | SymbolQuantifier _ => interpretation_quantifier | SymbolDotForm => FormulaSem | SymbolODE _ => interpretation_ode | SymbolProgramConst _ => ProgramSem end. Ltac constant_derivable := let eps := fresh "eps" in let h := fresh "h" in let d := fresh "d" in let q := fresh "q" in let z := fresh "z" in try (unfold derivable_pt, derivable_pt_abs, derivable_pt_lim); autorewrite with core; exists R0; intros eps h; exists R1pos; intros d q z; autorewrite with core; rewrite zero_div_is_zero; auto; rewrite Rabs_R0; auto. Lemma deriv_constant : forall c, derivable (fun X => c). Proof. intros c; intros x. reg. Qed. Lemma ex_all_partial_derive_st_implies_l_pt : forall (f : state -> R) v l, ex_all_partial_derive_st f -> ex_partial_derive f v l. Proof. introv h; introv; simpl in *. apply ex_partial_derive_st_pt_eq_all in h. apply h. Qed.
(* Title: HOL/Conditionally_Complete_Lattices.thy Author: Amine Chaieb and L C Paulson, University of Cambridge Author: Johannes Hölzl, TU München Author: Luke S. Serafin, Carnegie Mellon University *) section \<open>Conditionally-complete Lattices\<close> theory Conditionally_Complete_Lattices imports Finite_Set Lattices_Big Set_Interval begin locale preordering_bdd = preordering begin definition bdd :: \<open>'a set \<Rightarrow> bool\<close> where unfold: \<open>bdd A \<longleftrightarrow> (\<exists>M. \<forall>x \<in> A. x \<^bold>\<le> M)\<close> lemma empty [simp, intro]: \<open>bdd {}\<close> by (simp add: unfold) lemma I [intro]: \<open>bdd A\<close> if \<open>\<And>x. x \<in> A \<Longrightarrow> x \<^bold>\<le> M\<close> using that by (auto simp add: unfold) lemma E: assumes \<open>bdd A\<close> obtains M where \<open>\<And>x. x \<in> A \<Longrightarrow> x \<^bold>\<le> M\<close> using assms that by (auto simp add: unfold) lemma I2: \<open>bdd (f ` A)\<close> if \<open>\<And>x. x \<in> A \<Longrightarrow> f x \<^bold>\<le> M\<close> using that by (auto simp add: unfold) lemma mono: \<open>bdd A\<close> if \<open>bdd B\<close> \<open>A \<subseteq> B\<close> using that by (auto simp add: unfold) lemma Int1 [simp]: \<open>bdd (A \<inter> B)\<close> if \<open>bdd A\<close> using mono that by auto lemma Int2 [simp]: \<open>bdd (A \<inter> B)\<close> if \<open>bdd B\<close> using mono that by auto end context preorder begin sublocale bdd_above: preordering_bdd \<open>(\<le>)\<close> \<open>(<)\<close> defines bdd_above_primitive_def: bdd_above = bdd_above.bdd .. sublocale bdd_below: preordering_bdd \<open>(\<ge>)\<close> \<open>(>)\<close> defines bdd_below_primitive_def: bdd_below = bdd_below.bdd .. lemma bdd_above_def: \<open>bdd_above A \<longleftrightarrow> (\<exists>M. \<forall>x \<in> A. x \<le> M)\<close> by (fact bdd_above.unfold) lemma bdd_below_def: \<open>bdd_below A \<longleftrightarrow> (\<exists>M. \<forall>x \<in> A. M \<le> x)\<close> by (fact bdd_below.unfold) lemma bdd_aboveI: "(\<And>x. x \<in> A \<Longrightarrow> x \<le> M) \<Longrightarrow> bdd_above A" by (fact bdd_above.I) lemma bdd_belowI: "(\<And>x. x \<in> A \<Longrightarrow> m \<le> x) \<Longrightarrow> bdd_below A" by (fact bdd_below.I) lemma bdd_aboveI2: "(\<And>x. x \<in> A \<Longrightarrow> f x \<le> M) \<Longrightarrow> bdd_above (f`A)" by (fact bdd_above.I2) lemma bdd_belowI2: "(\<And>x. x \<in> A \<Longrightarrow> m \<le> f x) \<Longrightarrow> bdd_below (f`A)" by (fact bdd_below.I2) lemma bdd_above_empty: "bdd_above {}" by (fact bdd_above.empty) lemma bdd_below_empty: "bdd_below {}" by (fact bdd_below.empty) lemma bdd_above_mono: "bdd_above B \<Longrightarrow> A \<subseteq> B \<Longrightarrow> bdd_above A" by (fact bdd_above.mono) lemma bdd_below_mono: "bdd_below B \<Longrightarrow> A \<subseteq> B \<Longrightarrow> bdd_below A" by (fact bdd_below.mono) lemma bdd_above_Int1: "bdd_above A \<Longrightarrow> bdd_above (A \<inter> B)" by (fact bdd_above.Int1) lemma bdd_above_Int2: "bdd_above B \<Longrightarrow> bdd_above (A \<inter> B)" by (fact bdd_above.Int2) lemma bdd_below_Int1: "bdd_below A \<Longrightarrow> bdd_below (A \<inter> B)" by (fact bdd_below.Int1) lemma bdd_below_Int2: "bdd_below B \<Longrightarrow> bdd_below (A \<inter> B)" by (fact bdd_below.Int2) lemma bdd_above_Ioo [simp, intro]: "bdd_above {a <..< b}" by (auto simp add: bdd_above_def intro!: exI[of _ b] less_imp_le) lemma bdd_above_Ico [simp, intro]: "bdd_above {a ..< b}" by (auto simp add: bdd_above_def intro!: exI[of _ b] less_imp_le) lemma bdd_above_Iio [simp, intro]: "bdd_above {..< b}" by (auto simp add: bdd_above_def intro: exI[of _ b] less_imp_le) lemma bdd_above_Ioc [simp, intro]: "bdd_above {a <.. b}" by (auto simp add: bdd_above_def intro: exI[of _ b] less_imp_le) lemma bdd_above_Icc [simp, intro]: "bdd_above {a .. b}" by (auto simp add: bdd_above_def intro: exI[of _ b] less_imp_le) lemma bdd_above_Iic [simp, intro]: "bdd_above {.. b}" by (auto simp add: bdd_above_def intro: exI[of _ b] less_imp_le) lemma bdd_below_Ioo [simp, intro]: "bdd_below {a <..< b}" by (auto simp add: bdd_below_def intro!: exI[of _ a] less_imp_le) lemma bdd_below_Ioc [simp, intro]: "bdd_below {a <.. b}" by (auto simp add: bdd_below_def intro!: exI[of _ a] less_imp_le) lemma bdd_below_Ioi [simp, intro]: "bdd_below {a <..}" by (auto simp add: bdd_below_def intro: exI[of _ a] less_imp_le) lemma bdd_below_Ico [simp, intro]: "bdd_below {a ..< b}" by (auto simp add: bdd_below_def intro: exI[of _ a] less_imp_le) lemma bdd_below_Icc [simp, intro]: "bdd_below {a .. b}" by (auto simp add: bdd_below_def intro: exI[of _ a] less_imp_le) lemma bdd_below_Ici [simp, intro]: "bdd_below {a ..}" by (auto simp add: bdd_below_def intro: exI[of _ a] less_imp_le) end context order_top begin lemma bdd_above_top [simp, intro!]: "bdd_above A" by (rule bdd_aboveI [of _ top]) simp end context order_bot begin lemma bdd_below_bot [simp, intro!]: "bdd_below A" by (rule bdd_belowI [of _ bot]) simp end lemma bdd_above_image_mono: "mono f \<Longrightarrow> bdd_above A \<Longrightarrow> bdd_above (f`A)" by (auto simp: bdd_above_def mono_def) lemma bdd_below_image_mono: "mono f \<Longrightarrow> bdd_below A \<Longrightarrow> bdd_below (f`A)" by (auto simp: bdd_below_def mono_def) lemma bdd_above_image_antimono: "antimono f \<Longrightarrow> bdd_below A \<Longrightarrow> bdd_above (f`A)" by (auto simp: bdd_above_def bdd_below_def antimono_def) lemma bdd_below_image_antimono: "antimono f \<Longrightarrow> bdd_above A \<Longrightarrow> bdd_below (f`A)" by (auto simp: bdd_above_def bdd_below_def antimono_def) lemma fixes X :: "'a::ordered_ab_group_add set" shows bdd_above_uminus[simp]: "bdd_above (uminus ` X) \<longleftrightarrow> bdd_below X" and bdd_below_uminus[simp]: "bdd_below (uminus ` X) \<longleftrightarrow> bdd_above X" using bdd_above_image_antimono[of uminus X] bdd_below_image_antimono[of uminus "uminus`X"] using bdd_below_image_antimono[of uminus X] bdd_above_image_antimono[of uminus "uminus`X"] by (auto simp: antimono_def image_image) context lattice begin lemma bdd_above_insert [simp]: "bdd_above (insert a A) = bdd_above A" by (auto simp: bdd_above_def intro: le_supI2 sup_ge1) lemma bdd_below_insert [simp]: "bdd_below (insert a A) = bdd_below A" by (auto simp: bdd_below_def intro: le_infI2 inf_le1) lemma bdd_finite [simp]: assumes "finite A" shows bdd_above_finite: "bdd_above A" and bdd_below_finite: "bdd_below A" using assms by (induct rule: finite_induct, auto) lemma bdd_above_Un [simp]: "bdd_above (A \<union> B) = (bdd_above A \<and> bdd_above B)" proof assume "bdd_above (A \<union> B)" thus "bdd_above A \<and> bdd_above B" unfolding bdd_above_def by auto next assume "bdd_above A \<and> bdd_above B" then obtain a b where "\<forall>x\<in>A. x \<le> a" "\<forall>x\<in>B. x \<le> b" unfolding bdd_above_def by auto hence "\<forall>x \<in> A \<union> B. x \<le> sup a b" by (auto intro: Un_iff le_supI1 le_supI2) thus "bdd_above (A \<union> B)" unfolding bdd_above_def .. qed lemma bdd_below_Un [simp]: "bdd_below (A \<union> B) = (bdd_below A \<and> bdd_below B)" proof assume "bdd_below (A \<union> B)" thus "bdd_below A \<and> bdd_below B" unfolding bdd_below_def by auto next assume "bdd_below A \<and> bdd_below B" then obtain a b where "\<forall>x\<in>A. a \<le> x" "\<forall>x\<in>B. b \<le> x" unfolding bdd_below_def by auto hence "\<forall>x \<in> A \<union> B. inf a b \<le> x" by (auto intro: Un_iff le_infI1 le_infI2) thus "bdd_below (A \<union> B)" unfolding bdd_below_def .. qed lemma bdd_above_image_sup[simp]: "bdd_above ((\<lambda>x. sup (f x) (g x)) ` A) \<longleftrightarrow> bdd_above (f`A) \<and> bdd_above (g`A)" by (auto simp: bdd_above_def intro: le_supI1 le_supI2) lemma bdd_below_image_inf[simp]: "bdd_below ((\<lambda>x. inf (f x) (g x)) ` A) \<longleftrightarrow> bdd_below (f`A) \<and> bdd_below (g`A)" by (auto simp: bdd_below_def intro: le_infI1 le_infI2) lemma bdd_below_UN[simp]: "finite I \<Longrightarrow> bdd_below (\<Union>i\<in>I. A i) = (\<forall>i \<in> I. bdd_below (A i))" by (induction I rule: finite.induct) auto lemma bdd_above_UN[simp]: "finite I \<Longrightarrow> bdd_above (\<Union>i\<in>I. A i) = (\<forall>i \<in> I. bdd_above (A i))" by (induction I rule: finite.induct) auto end text \<open> To avoid name classes with the \<^class>\<open>complete_lattice\<close>-class we prefix \<^const>\<open>Sup\<close> and \<^const>\<open>Inf\<close> in theorem names with c. \<close> class conditionally_complete_lattice = lattice + Sup + Inf + assumes cInf_lower: "x \<in> X \<Longrightarrow> bdd_below X \<Longrightarrow> Inf X \<le> x" and cInf_greatest: "X \<noteq> {} \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> z \<le> x) \<Longrightarrow> z \<le> Inf X" assumes cSup_upper: "x \<in> X \<Longrightarrow> bdd_above X \<Longrightarrow> x \<le> Sup X" and cSup_least: "X \<noteq> {} \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> x \<le> z) \<Longrightarrow> Sup X \<le> z" begin lemma cSup_upper2: "x \<in> X \<Longrightarrow> y \<le> x \<Longrightarrow> bdd_above X \<Longrightarrow> y \<le> Sup X" by (metis cSup_upper order_trans) lemma cInf_lower2: "x \<in> X \<Longrightarrow> x \<le> y \<Longrightarrow> bdd_below X \<Longrightarrow> Inf X \<le> y" by (metis cInf_lower order_trans) lemma cSup_mono: "B \<noteq> {} \<Longrightarrow> bdd_above A \<Longrightarrow> (\<And>b. b \<in> B \<Longrightarrow> \<exists>a\<in>A. b \<le> a) \<Longrightarrow> Sup B \<le> Sup A" by (metis cSup_least cSup_upper2) lemma cInf_mono: "B \<noteq> {} \<Longrightarrow> bdd_below A \<Longrightarrow> (\<And>b. b \<in> B \<Longrightarrow> \<exists>a\<in>A. a \<le> b) \<Longrightarrow> Inf A \<le> Inf B" by (metis cInf_greatest cInf_lower2) lemma cSup_subset_mono: "A \<noteq> {} \<Longrightarrow> bdd_above B \<Longrightarrow> A \<subseteq> B \<Longrightarrow> Sup A \<le> Sup B" by (metis cSup_least cSup_upper subsetD) lemma cInf_superset_mono: "A \<noteq> {} \<Longrightarrow> bdd_below B \<Longrightarrow> A \<subseteq> B \<Longrightarrow> Inf B \<le> Inf A" by (metis cInf_greatest cInf_lower subsetD) lemma cSup_eq_maximum: "z \<in> X \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> x \<le> z) \<Longrightarrow> Sup X = z" by (intro order.antisym cSup_upper[of z X] cSup_least[of X z]) auto lemma cInf_eq_minimum: "z \<in> X \<Longrightarrow> (\<And>x. x \<in> X \<Longrightarrow> z \<le> x) \<Longrightarrow> Inf X = z" by (intro order.antisym cInf_lower[of z X] cInf_greatest[of X z]) auto lemma cSup_le_iff: "S \<noteq> {} \<Longrightarrow> bdd_above S \<Longrightarrow> Sup S \<le> a \<longleftrightarrow> (\<forall>x\<in>S. x \<le> a)" by (metis order_trans cSup_upper cSup_least) lemma le_cInf_iff: "S \<noteq> {} \<Longrightarrow> bdd_below S \<Longrightarrow> a \<le> Inf S \<longleftrightarrow> (\<forall>x\<in>S. a \<le> x)" by (metis order_trans cInf_lower cInf_greatest) lemma cSup_eq_non_empty: assumes 1: "X \<noteq> {}" assumes 2: "\<And>x. x \<in> X \<Longrightarrow> x \<le> a" assumes 3: "\<And>y. (\<And>x. x \<in> X \<Longrightarrow> x \<le> y) \<Longrightarrow> a \<le> y" shows "Sup X = a" by (intro 3 1 order.antisym cSup_least) (auto intro: 2 1 cSup_upper) lemma cInf_eq_non_empty: assumes 1: "X \<noteq> {}" assumes 2: "\<And>x. x \<in> X \<Longrightarrow> a \<le> x" assumes 3: "\<And>y. (\<And>x. x \<in> X \<Longrightarrow> y \<le> x) \<Longrightarrow> y \<le> a" shows "Inf X = a" by (intro 3 1 order.antisym cInf_greatest) (auto intro: 2 1 cInf_lower) lemma cInf_cSup: "S \<noteq> {} \<Longrightarrow> bdd_below S \<Longrightarrow> Inf S = Sup {x. \<forall>s\<in>S. x \<le> s}" by (rule cInf_eq_non_empty) (auto intro!: cSup_upper cSup_least simp: bdd_below_def) lemma cSup_cInf: "S \<noteq> {} \<Longrightarrow> bdd_above S \<Longrightarrow> Sup S = Inf {x. \<forall>s\<in>S. s \<le> x}" by (rule cSup_eq_non_empty) (auto intro!: cInf_lower cInf_greatest simp: bdd_above_def) lemma cSup_insert: "X \<noteq> {} \<Longrightarrow> bdd_above X \<Longrightarrow> Sup (insert a X) = sup a (Sup X)" by (intro cSup_eq_non_empty) (auto intro: le_supI2 cSup_upper cSup_least) lemma cInf_insert: "X \<noteq> {} \<Longrightarrow> bdd_below X \<Longrightarrow> Inf (insert a X) = inf a (Inf X)" by (intro cInf_eq_non_empty) (auto intro: le_infI2 cInf_lower cInf_greatest) lemma cSup_singleton [simp]: "Sup {x} = x" by (intro cSup_eq_maximum) auto lemma cInf_singleton [simp]: "Inf {x} = x" by (intro cInf_eq_minimum) auto lemma cSup_insert_If: "bdd_above X \<Longrightarrow> Sup (insert a X) = (if X = {} then a else sup a (Sup X))" using cSup_insert[of X] by simp lemma cInf_insert_If: "bdd_below X \<Longrightarrow> Inf (insert a X) = (if X = {} then a else inf a (Inf X))" using cInf_insert[of X] by simp lemma le_cSup_finite: "finite X \<Longrightarrow> x \<in> X \<Longrightarrow> x \<le> Sup X" proof (induct X arbitrary: x rule: finite_induct) case (insert x X y) then show ?case by (cases "X = {}") (auto simp: cSup_insert intro: le_supI2) qed simp lemma cInf_le_finite: "finite X \<Longrightarrow> x \<in> X \<Longrightarrow> Inf X \<le> x" proof (induct X arbitrary: x rule: finite_induct) case (insert x X y) then show ?case by (cases "X = {}") (auto simp: cInf_insert intro: le_infI2) qed simp lemma cSup_eq_Sup_fin: "finite X \<Longrightarrow> X \<noteq> {} \<Longrightarrow> Sup X = Sup_fin X" by (induct X rule: finite_ne_induct) (simp_all add: cSup_insert) lemma cInf_eq_Inf_fin: "finite X \<Longrightarrow> X \<noteq> {} \<Longrightarrow> Inf X = Inf_fin X" by (induct X rule: finite_ne_induct) (simp_all add: cInf_insert) lemma cSup_atMost[simp]: "Sup {..x} = x" by (auto intro!: cSup_eq_maximum) lemma cSup_greaterThanAtMost[simp]: "y < x \<Longrightarrow> Sup {y<..x} = x" by (auto intro!: cSup_eq_maximum) lemma cSup_atLeastAtMost[simp]: "y \<le> x \<Longrightarrow> Sup {y..x} = x" by (auto intro!: cSup_eq_maximum) lemma cInf_atLeast[simp]: "Inf {x..} = x" by (auto intro!: cInf_eq_minimum) lemma cInf_atLeastLessThan[simp]: "y < x \<Longrightarrow> Inf {y..<x} = y" by (auto intro!: cInf_eq_minimum) lemma cInf_atLeastAtMost[simp]: "y \<le> x \<Longrightarrow> Inf {y..x} = y" by (auto intro!: cInf_eq_minimum) lemma cINF_lower: "bdd_below (f ` A) \<Longrightarrow> x \<in> A \<Longrightarrow> \<Sqinter>(f ` A) \<le> f x" using cInf_lower [of _ "f ` A"] by simp lemma cINF_greatest: "A \<noteq> {} \<Longrightarrow> (\<And>x. x \<in> A \<Longrightarrow> m \<le> f x) \<Longrightarrow> m \<le> \<Sqinter>(f ` A)" using cInf_greatest [of "f ` A"] by auto lemma cSUP_upper: "x \<in> A \<Longrightarrow> bdd_above (f ` A) \<Longrightarrow> f x \<le> \<Squnion>(f ` A)" using cSup_upper [of _ "f ` A"] by simp lemma cSUP_least: "A \<noteq> {} \<Longrightarrow> (\<And>x. x \<in> A \<Longrightarrow> f x \<le> M) \<Longrightarrow> \<Squnion>(f ` A) \<le> M" using cSup_least [of "f ` A"] by auto lemma cINF_lower2: "bdd_below (f ` A) \<Longrightarrow> x \<in> A \<Longrightarrow> f x \<le> u \<Longrightarrow> \<Sqinter>(f ` A) \<le> u" by (auto intro: cINF_lower order_trans) lemma cSUP_upper2: "bdd_above (f ` A) \<Longrightarrow> x \<in> A \<Longrightarrow> u \<le> f x \<Longrightarrow> u \<le> \<Squnion>(f ` A)" by (auto intro: cSUP_upper order_trans) lemma cSUP_const [simp]: "A \<noteq> {} \<Longrightarrow> (\<Squnion>x\<in>A. c) = c" by (intro order.antisym cSUP_least) (auto intro: cSUP_upper) lemma cINF_const [simp]: "A \<noteq> {} \<Longrightarrow> (\<Sqinter>x\<in>A. c) = c" by (intro order.antisym cINF_greatest) (auto intro: cINF_lower) lemma le_cINF_iff: "A \<noteq> {} \<Longrightarrow> bdd_below (f ` A) \<Longrightarrow> u \<le> \<Sqinter>(f ` A) \<longleftrightarrow> (\<forall>x\<in>A. u \<le> f x)" by (metis cINF_greatest cINF_lower order_trans) lemma cSUP_le_iff: "A \<noteq> {} \<Longrightarrow> bdd_above (f ` A) \<Longrightarrow> \<Squnion>(f ` A) \<le> u \<longleftrightarrow> (\<forall>x\<in>A. f x \<le> u)" by (metis cSUP_least cSUP_upper order_trans) lemma less_cINF_D: "bdd_below (f`A) \<Longrightarrow> y < (\<Sqinter>i\<in>A. f i) \<Longrightarrow> i \<in> A \<Longrightarrow> y < f i" by (metis cINF_lower less_le_trans) lemma cSUP_lessD: "bdd_above (f`A) \<Longrightarrow> (\<Squnion>i\<in>A. f i) < y \<Longrightarrow> i \<in> A \<Longrightarrow> f i < y" by (metis cSUP_upper le_less_trans) lemma cINF_insert: "A \<noteq> {} \<Longrightarrow> bdd_below (f ` A) \<Longrightarrow> \<Sqinter>(f ` insert a A) = inf (f a) (\<Sqinter>(f ` A))" by (simp add: cInf_insert) lemma cSUP_insert: "A \<noteq> {} \<Longrightarrow> bdd_above (f ` A) \<Longrightarrow> \<Squnion>(f ` insert a A) = sup (f a) (\<Squnion>(f ` A))" by (simp add: cSup_insert) lemma cINF_mono: "B \<noteq> {} \<Longrightarrow> bdd_below (f ` A) \<Longrightarrow> (\<And>m. m \<in> B \<Longrightarrow> \<exists>n\<in>A. f n \<le> g m) \<Longrightarrow> \<Sqinter>(f ` A) \<le> \<Sqinter>(g ` B)" using cInf_mono [of "g ` B" "f ` A"] by auto lemma cSUP_mono: "A \<noteq> {} \<Longrightarrow> bdd_above (g ` B) \<Longrightarrow> (\<And>n. n \<in> A \<Longrightarrow> \<exists>m\<in>B. f n \<le> g m) \<Longrightarrow> \<Squnion>(f ` A) \<le> \<Squnion>(g ` B)" using cSup_mono [of "f ` A" "g ` B"] by auto lemma cINF_superset_mono: "A \<noteq> {} \<Longrightarrow> bdd_below (g ` B) \<Longrightarrow> A \<subseteq> B \<Longrightarrow> (\<And>x. x \<in> B \<Longrightarrow> g x \<le> f x) \<Longrightarrow> \<Sqinter>(g ` B) \<le> \<Sqinter>(f ` A)" by (rule cINF_mono) auto lemma cSUP_subset_mono: "\<lbrakk>A \<noteq> {}; bdd_above (g ` B); A \<subseteq> B; \<And>x. x \<in> A \<Longrightarrow> f x \<le> g x\<rbrakk> \<Longrightarrow> \<Squnion> (f ` A) \<le> \<Squnion> (g ` B)" by (rule cSUP_mono) auto lemma less_eq_cInf_inter: "bdd_below A \<Longrightarrow> bdd_below B \<Longrightarrow> A \<inter> B \<noteq> {} \<Longrightarrow> inf (Inf A) (Inf B) \<le> Inf (A \<inter> B)" by (metis cInf_superset_mono lattice_class.inf_sup_ord(1) le_infI1) lemma cSup_inter_less_eq: "bdd_above A \<Longrightarrow> bdd_above B \<Longrightarrow> A \<inter> B \<noteq> {} \<Longrightarrow> Sup (A \<inter> B) \<le> sup (Sup A) (Sup B) " by (metis cSup_subset_mono lattice_class.inf_sup_ord(1) le_supI1) lemma cInf_union_distrib: "A \<noteq> {} \<Longrightarrow> bdd_below A \<Longrightarrow> B \<noteq> {} \<Longrightarrow> bdd_below B \<Longrightarrow> Inf (A \<union> B) = inf (Inf A) (Inf B)" by (intro order.antisym le_infI cInf_greatest cInf_lower) (auto intro: le_infI1 le_infI2 cInf_lower) lemma cINF_union: "A \<noteq> {} \<Longrightarrow> bdd_below (f ` A) \<Longrightarrow> B \<noteq> {} \<Longrightarrow> bdd_below (f ` B) \<Longrightarrow> \<Sqinter> (f ` (A \<union> B)) = \<Sqinter> (f ` A) \<sqinter> \<Sqinter> (f ` B)" using cInf_union_distrib [of "f ` A" "f ` B"] by (simp add: image_Un) lemma cSup_union_distrib: "A \<noteq> {} \<Longrightarrow> bdd_above A \<Longrightarrow> B \<noteq> {} \<Longrightarrow> bdd_above B \<Longrightarrow> Sup (A \<union> B) = sup (Sup A) (Sup B)" by (intro order.antisym le_supI cSup_least cSup_upper) (auto intro: le_supI1 le_supI2 cSup_upper) lemma cSUP_union: "A \<noteq> {} \<Longrightarrow> bdd_above (f ` A) \<Longrightarrow> B \<noteq> {} \<Longrightarrow> bdd_above (f ` B) \<Longrightarrow> \<Squnion> (f ` (A \<union> B)) = \<Squnion> (f ` A) \<squnion> \<Squnion> (f ` B)" using cSup_union_distrib [of "f ` A" "f ` B"] by (simp add: image_Un) lemma cINF_inf_distrib: "A \<noteq> {} \<Longrightarrow> bdd_below (f`A) \<Longrightarrow> bdd_below (g`A) \<Longrightarrow> \<Sqinter> (f ` A) \<sqinter> \<Sqinter> (g ` A) = (\<Sqinter>a\<in>A. inf (f a) (g a))" by (intro order.antisym le_infI cINF_greatest cINF_lower2) (auto intro: le_infI1 le_infI2 cINF_greatest cINF_lower le_infI) lemma SUP_sup_distrib: "A \<noteq> {} \<Longrightarrow> bdd_above (f`A) \<Longrightarrow> bdd_above (g`A) \<Longrightarrow> \<Squnion> (f ` A) \<squnion> \<Squnion> (g ` A) = (\<Squnion>a\<in>A. sup (f a) (g a))" by (intro order.antisym le_supI cSUP_least cSUP_upper2) (auto intro: le_supI1 le_supI2 cSUP_least cSUP_upper le_supI) lemma cInf_le_cSup: "A \<noteq> {} \<Longrightarrow> bdd_above A \<Longrightarrow> bdd_below A \<Longrightarrow> Inf A \<le> Sup A" by (auto intro!: cSup_upper2[of "SOME a. a \<in> A"] intro: someI cInf_lower) context fixes f :: "'a \<Rightarrow> 'b::conditionally_complete_lattice" assumes "mono f" begin lemma mono_cInf: "\<lbrakk>bdd_below A; A\<noteq>{}\<rbrakk> \<Longrightarrow> f (Inf A) \<le> (INF x\<in>A. f x)" by (simp add: \<open>mono f\<close> conditionally_complete_lattice_class.cINF_greatest cInf_lower monoD) lemma mono_cSup: "\<lbrakk>bdd_above A; A\<noteq>{}\<rbrakk> \<Longrightarrow> (SUP x\<in>A. f x) \<le> f (Sup A)" by (simp add: \<open>mono f\<close> conditionally_complete_lattice_class.cSUP_least cSup_upper monoD) lemma mono_cINF: "\<lbrakk>bdd_below (A`I); I\<noteq>{}\<rbrakk> \<Longrightarrow> f (INF i\<in>I. A i) \<le> (INF x\<in>I. f (A x))" by (simp add: \<open>mono f\<close> conditionally_complete_lattice_class.cINF_greatest cINF_lower monoD) lemma mono_cSUP: "\<lbrakk>bdd_above (A`I); I\<noteq>{}\<rbrakk> \<Longrightarrow> (SUP x\<in>I. f (A x)) \<le> f (SUP i\<in>I. A i)" by (simp add: \<open>mono f\<close> conditionally_complete_lattice_class.cSUP_least cSUP_upper monoD) end end instance complete_lattice \<subseteq> conditionally_complete_lattice by standard (auto intro: Sup_upper Sup_least Inf_lower Inf_greatest) lemma cSup_eq: fixes a :: "'a :: {conditionally_complete_lattice, no_bot}" assumes upper: "\<And>x. x \<in> X \<Longrightarrow> x \<le> a" assumes least: "\<And>y. (\<And>x. x \<in> X \<Longrightarrow> x \<le> y) \<Longrightarrow> a \<le> y" shows "Sup X = a" proof cases assume "X = {}" with lt_ex[of a] least show ?thesis by (auto simp: less_le_not_le) qed (intro cSup_eq_non_empty assms) lemma cInf_eq: fixes a :: "'a :: {conditionally_complete_lattice, no_top}" assumes upper: "\<And>x. x \<in> X \<Longrightarrow> a \<le> x" assumes least: "\<And>y. (\<And>x. x \<in> X \<Longrightarrow> y \<le> x) \<Longrightarrow> y \<le> a" shows "Inf X = a" proof cases assume "X = {}" with gt_ex[of a] least show ?thesis by (auto simp: less_le_not_le) qed (intro cInf_eq_non_empty assms) class conditionally_complete_linorder = conditionally_complete_lattice + linorder begin lemma less_cSup_iff: "X \<noteq> {} \<Longrightarrow> bdd_above X \<Longrightarrow> y < Sup X \<longleftrightarrow> (\<exists>x\<in>X. y < x)" by (rule iffI) (metis cSup_least not_less, metis cSup_upper less_le_trans) lemma cInf_less_iff: "X \<noteq> {} \<Longrightarrow> bdd_below X \<Longrightarrow> Inf X < y \<longleftrightarrow> (\<exists>x\<in>X. x < y)" by (rule iffI) (metis cInf_greatest not_less, metis cInf_lower le_less_trans) lemma cINF_less_iff: "A \<noteq> {} \<Longrightarrow> bdd_below (f`A) \<Longrightarrow> (\<Sqinter>i\<in>A. f i) < a \<longleftrightarrow> (\<exists>x\<in>A. f x < a)" using cInf_less_iff[of "f`A"] by auto lemma less_cSUP_iff: "A \<noteq> {} \<Longrightarrow> bdd_above (f`A) \<Longrightarrow> a < (\<Squnion>i\<in>A. f i) \<longleftrightarrow> (\<exists>x\<in>A. a < f x)" using less_cSup_iff[of "f`A"] by auto lemma less_cSupE: assumes "y < Sup X" "X \<noteq> {}" obtains x where "x \<in> X" "y < x" by (metis cSup_least assms not_le that) lemma less_cSupD: "X \<noteq> {} \<Longrightarrow> z < Sup X \<Longrightarrow> \<exists>x\<in>X. z < x" by (metis less_cSup_iff not_le_imp_less bdd_above_def) lemma cInf_lessD: "X \<noteq> {} \<Longrightarrow> Inf X < z \<Longrightarrow> \<exists>x\<in>X. x < z" by (metis cInf_less_iff not_le_imp_less bdd_below_def) lemma complete_interval: assumes "a < b" and "P a" and "\<not> P b" shows "\<exists>c. a \<le> c \<and> c \<le> b \<and> (\<forall>x. a \<le> x \<and> x < c \<longrightarrow> P x) \<and> (\<forall>d. (\<forall>x. a \<le> x \<and> x < d \<longrightarrow> P x) \<longrightarrow> d \<le> c)" proof (rule exI [where x = "Sup {d. \<forall>x. a \<le> x \<and> x < d \<longrightarrow> P x}"], auto) show "a \<le> Sup {d. \<forall>c. a \<le> c \<and> c < d \<longrightarrow> P c}" by (rule cSup_upper, auto simp: bdd_above_def) (metis \<open>a < b\<close> \<open>\<not> P b\<close> linear less_le) next show "Sup {d. \<forall>c. a \<le> c \<and> c < d \<longrightarrow> P c} \<le> b" apply (rule cSup_least) apply auto apply (metis less_le_not_le) apply (metis \<open>a<b\<close> \<open>\<not> P b\<close> linear less_le) done next fix x assume x: "a \<le> x" and lt: "x < Sup {d. \<forall>c. a \<le> c \<and> c < d \<longrightarrow> P c}" show "P x" apply (rule less_cSupE [OF lt], auto) apply (metis less_le_not_le) apply (metis x) done next fix d assume 0: "\<forall>x. a \<le> x \<and> x < d \<longrightarrow> P x" thus "d \<le> Sup {d. \<forall>c. a \<le> c \<and> c < d \<longrightarrow> P c}" by (rule_tac cSup_upper, auto simp: bdd_above_def) (metis \<open>a<b\<close> \<open>\<not> P b\<close> linear less_le) qed end instance complete_linorder < conditionally_complete_linorder .. lemma cSup_eq_Max: "finite (X::'a::conditionally_complete_linorder set) \<Longrightarrow> X \<noteq> {} \<Longrightarrow> Sup X = Max X" using cSup_eq_Sup_fin[of X] by (simp add: Sup_fin_Max) lemma cInf_eq_Min: "finite (X::'a::conditionally_complete_linorder set) \<Longrightarrow> X \<noteq> {} \<Longrightarrow> Inf X = Min X" using cInf_eq_Inf_fin[of X] by (simp add: Inf_fin_Min) lemma cSup_lessThan[simp]: "Sup {..<x::'a::{conditionally_complete_linorder, no_bot, dense_linorder}} = x" by (auto intro!: cSup_eq_non_empty intro: dense_le) lemma cSup_greaterThanLessThan[simp]: "y < x \<Longrightarrow> Sup {y<..<x::'a::{conditionally_complete_linorder, dense_linorder}} = x" by (auto intro!: cSup_eq_non_empty intro: dense_le_bounded) lemma cSup_atLeastLessThan[simp]: "y < x \<Longrightarrow> Sup {y..<x::'a::{conditionally_complete_linorder, dense_linorder}} = x" by (auto intro!: cSup_eq_non_empty intro: dense_le_bounded) lemma cInf_greaterThan[simp]: "Inf {x::'a::{conditionally_complete_linorder, no_top, dense_linorder} <..} = x" by (auto intro!: cInf_eq_non_empty intro: dense_ge) lemma cInf_greaterThanAtMost[simp]: "y < x \<Longrightarrow> Inf {y<..x::'a::{conditionally_complete_linorder, dense_linorder}} = y" by (auto intro!: cInf_eq_non_empty intro: dense_ge_bounded) lemma cInf_greaterThanLessThan[simp]: "y < x \<Longrightarrow> Inf {y<..<x::'a::{conditionally_complete_linorder, dense_linorder}} = y" by (auto intro!: cInf_eq_non_empty intro: dense_ge_bounded) lemma Inf_insert_finite: fixes S :: "'a::conditionally_complete_linorder set" shows "finite S \<Longrightarrow> Inf (insert x S) = (if S = {} then x else min x (Inf S))" by (simp add: cInf_eq_Min) lemma Sup_insert_finite: fixes S :: "'a::conditionally_complete_linorder set" shows "finite S \<Longrightarrow> Sup (insert x S) = (if S = {} then x else max x (Sup S))" by (simp add: cSup_insert sup_max) lemma finite_imp_less_Inf: fixes a :: "'a::conditionally_complete_linorder" shows "\<lbrakk>finite X; x \<in> X; \<And>x. x\<in>X \<Longrightarrow> a < x\<rbrakk> \<Longrightarrow> a < Inf X" by (induction X rule: finite_induct) (simp_all add: cInf_eq_Min Inf_insert_finite) lemma finite_less_Inf_iff: fixes a :: "'a :: conditionally_complete_linorder" shows "\<lbrakk>finite X; X \<noteq> {}\<rbrakk> \<Longrightarrow> a < Inf X \<longleftrightarrow> (\<forall>x \<in> X. a < x)" by (auto simp: cInf_eq_Min) lemma finite_imp_Sup_less: fixes a :: "'a::conditionally_complete_linorder" shows "\<lbrakk>finite X; x \<in> X; \<And>x. x\<in>X \<Longrightarrow> a > x\<rbrakk> \<Longrightarrow> a > Sup X" by (induction X rule: finite_induct) (simp_all add: cSup_eq_Max Sup_insert_finite) lemma finite_Sup_less_iff: fixes a :: "'a :: conditionally_complete_linorder" shows "\<lbrakk>finite X; X \<noteq> {}\<rbrakk> \<Longrightarrow> a > Sup X \<longleftrightarrow> (\<forall>x \<in> X. a > x)" by (auto simp: cSup_eq_Max) class linear_continuum = conditionally_complete_linorder + dense_linorder + assumes UNIV_not_singleton: "\<exists>a b::'a. a \<noteq> b" begin lemma ex_gt_or_lt: "\<exists>b. a < b \<or> b < a" by (metis UNIV_not_singleton neq_iff) end instantiation nat :: conditionally_complete_linorder begin definition "Sup (X::nat set) = (if X={} then 0 else Max X)" definition "Inf (X::nat set) = (LEAST n. n \<in> X)" lemma bdd_above_nat: "bdd_above X \<longleftrightarrow> finite (X::nat set)" proof assume "bdd_above X" then obtain z where "X \<subseteq> {.. z}" by (auto simp: bdd_above_def) then show "finite X" by (rule finite_subset) simp qed simp instance proof fix x :: nat fix X :: "nat set" show "Inf X \<le> x" if "x \<in> X" "bdd_below X" using that by (simp add: Inf_nat_def Least_le) show "x \<le> Inf X" if "X \<noteq> {}" "\<And>y. y \<in> X \<Longrightarrow> x \<le> y" using that unfolding Inf_nat_def ex_in_conv[symmetric] by (rule LeastI2_ex) show "x \<le> Sup X" if "x \<in> X" "bdd_above X" using that by (auto simp add: Sup_nat_def bdd_above_nat) show "Sup X \<le> x" if "X \<noteq> {}" "\<And>y. y \<in> X \<Longrightarrow> y \<le> x" proof - from that have "bdd_above X" by (auto simp: bdd_above_def) with that show ?thesis by (simp add: Sup_nat_def bdd_above_nat) qed qed end lemma Inf_nat_def1: fixes K::"nat set" assumes "K \<noteq> {}" shows "Inf K \<in> K" by (auto simp add: Min_def Inf_nat_def) (meson LeastI assms bot.extremum_unique subsetI) lemma Sup_nat_empty [simp]: "Sup {} = (0::nat)" by (auto simp add: Sup_nat_def) instantiation int :: conditionally_complete_linorder begin definition "Sup (X::int set) = (THE x. x \<in> X \<and> (\<forall>y\<in>X. y \<le> x))" definition "Inf (X::int set) = - (Sup (uminus ` X))" instance proof { fix x :: int and X :: "int set" assume "X \<noteq> {}" "bdd_above X" then obtain x y where "X \<subseteq> {..y}" "x \<in> X" by (auto simp: bdd_above_def) then have *: "finite (X \<inter> {x..y})" "X \<inter> {x..y} \<noteq> {}" and "x \<le> y" by (auto simp: subset_eq) have "\<exists>!x\<in>X. (\<forall>y\<in>X. y \<le> x)" proof { fix z assume "z \<in> X" have "z \<le> Max (X \<inter> {x..y})" proof cases assume "x \<le> z" with \<open>z \<in> X\<close> \<open>X \<subseteq> {..y}\<close> *(1) show ?thesis by (auto intro!: Max_ge) next assume "\<not> x \<le> z" then have "z < x" by simp also have "x \<le> Max (X \<inter> {x..y})" using \<open>x \<in> X\<close> *(1) \<open>x \<le> y\<close> by (intro Max_ge) auto finally show ?thesis by simp qed } note le = this with Max_in[OF *] show ex: "Max (X \<inter> {x..y}) \<in> X \<and> (\<forall>z\<in>X. z \<le> Max (X \<inter> {x..y}))" by auto fix z assume *: "z \<in> X \<and> (\<forall>y\<in>X. y \<le> z)" with le have "z \<le> Max (X \<inter> {x..y})" by auto moreover have "Max (X \<inter> {x..y}) \<le> z" using * ex by auto ultimately show "z = Max (X \<inter> {x..y})" by auto qed then have "Sup X \<in> X \<and> (\<forall>y\<in>X. y \<le> Sup X)" unfolding Sup_int_def by (rule theI') } note Sup_int = this { fix x :: int and X :: "int set" assume "x \<in> X" "bdd_above X" then show "x \<le> Sup X" using Sup_int[of X] by auto } note le_Sup = this { fix x :: int and X :: "int set" assume "X \<noteq> {}" "\<And>y. y \<in> X \<Longrightarrow> y \<le> x" then show "Sup X \<le> x" using Sup_int[of X] by (auto simp: bdd_above_def) } note Sup_le = this { fix x :: int and X :: "int set" assume "x \<in> X" "bdd_below X" then show "Inf X \<le> x" using le_Sup[of "-x" "uminus ` X"] by (auto simp: Inf_int_def) } { fix x :: int and X :: "int set" assume "X \<noteq> {}" "\<And>y. y \<in> X \<Longrightarrow> x \<le> y" then show "x \<le> Inf X" using Sup_le[of "uminus ` X" "-x"] by (force simp: Inf_int_def) } qed end lemma interval_cases: fixes S :: "'a :: conditionally_complete_linorder set" assumes ivl: "\<And>a b x. a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> a \<le> x \<Longrightarrow> x \<le> b \<Longrightarrow> x \<in> S" shows "\<exists>a b. S = {} \<or> S = UNIV \<or> S = {..<b} \<or> S = {..b} \<or> S = {a<..} \<or> S = {a..} \<or> S = {a<..<b} \<or> S = {a<..b} \<or> S = {a..<b} \<or> S = {a..b}" proof - define lower upper where "lower = {x. \<exists>s\<in>S. s \<le> x}" and "upper = {x. \<exists>s\<in>S. x \<le> s}" with ivl have "S = lower \<inter> upper" by auto moreover have "\<exists>a. upper = UNIV \<or> upper = {} \<or> upper = {.. a} \<or> upper = {..< a}" proof cases assume *: "bdd_above S \<and> S \<noteq> {}" from * have "upper \<subseteq> {.. Sup S}" by (auto simp: upper_def intro: cSup_upper2) moreover from * have "{..< Sup S} \<subseteq> upper" by (force simp add: less_cSup_iff upper_def subset_eq Ball_def) ultimately have "upper = {.. Sup S} \<or> upper = {..< Sup S}" unfolding ivl_disj_un(2)[symmetric] by auto then show ?thesis by auto next assume "\<not> (bdd_above S \<and> S \<noteq> {})" then have "upper = UNIV \<or> upper = {}" by (auto simp: upper_def bdd_above_def not_le dest: less_imp_le) then show ?thesis by auto qed moreover have "\<exists>b. lower = UNIV \<or> lower = {} \<or> lower = {b ..} \<or> lower = {b <..}" proof cases assume *: "bdd_below S \<and> S \<noteq> {}" from * have "lower \<subseteq> {Inf S ..}" by (auto simp: lower_def intro: cInf_lower2) moreover from * have "{Inf S <..} \<subseteq> lower" by (force simp add: cInf_less_iff lower_def subset_eq Ball_def) ultimately have "lower = {Inf S ..} \<or> lower = {Inf S <..}" unfolding ivl_disj_un(1)[symmetric] by auto then show ?thesis by auto next assume "\<not> (bdd_below S \<and> S \<noteq> {})" then have "lower = UNIV \<or> lower = {}" by (auto simp: lower_def bdd_below_def not_le dest: less_imp_le) then show ?thesis by auto qed ultimately show ?thesis unfolding greaterThanAtMost_def greaterThanLessThan_def atLeastAtMost_def atLeastLessThan_def by (metis inf_bot_left inf_bot_right inf_top.left_neutral inf_top.right_neutral) qed lemma cSUP_eq_cINF_D: fixes f :: "_ \<Rightarrow> 'b::conditionally_complete_lattice" assumes eq: "(\<Squnion>x\<in>A. f x) = (\<Sqinter>x\<in>A. f x)" and bdd: "bdd_above (f ` A)" "bdd_below (f ` A)" and a: "a \<in> A" shows "f a = (\<Sqinter>x\<in>A. f x)" apply (rule antisym) using a bdd apply (auto simp: cINF_lower) apply (metis eq cSUP_upper) done lemma cSUP_UNION: fixes f :: "_ \<Rightarrow> 'b::conditionally_complete_lattice" assumes ne: "A \<noteq> {}" "\<And>x. x \<in> A \<Longrightarrow> B(x) \<noteq> {}" and bdd_UN: "bdd_above (\<Union>x\<in>A. f ` B x)" shows "(\<Squnion>z \<in> \<Union>x\<in>A. B x. f z) = (\<Squnion>x\<in>A. \<Squnion>z\<in>B x. f z)" proof - have bdd: "\<And>x. x \<in> A \<Longrightarrow> bdd_above (f ` B x)" using bdd_UN by (meson UN_upper bdd_above_mono) obtain M where "\<And>x y. x \<in> A \<Longrightarrow> y \<in> B(x) \<Longrightarrow> f y \<le> M" using bdd_UN by (auto simp: bdd_above_def) then have bdd2: "bdd_above ((\<lambda>x. \<Squnion>z\<in>B x. f z) ` A)" unfolding bdd_above_def by (force simp: bdd cSUP_le_iff ne(2)) have "(\<Squnion>z \<in> \<Union>x\<in>A. B x. f z) \<le> (\<Squnion>x\<in>A. \<Squnion>z\<in>B x. f z)" using assms by (fastforce simp add: intro!: cSUP_least intro: cSUP_upper2 simp: bdd2 bdd) moreover have "(\<Squnion>x\<in>A. \<Squnion>z\<in>B x. f z) \<le> (\<Squnion> z \<in> \<Union>x\<in>A. B x. f z)" using assms by (fastforce simp add: intro!: cSUP_least intro: cSUP_upper simp: image_UN bdd_UN) ultimately show ?thesis by (rule order_antisym) qed lemma cINF_UNION: fixes f :: "_ \<Rightarrow> 'b::conditionally_complete_lattice" assumes ne: "A \<noteq> {}" "\<And>x. x \<in> A \<Longrightarrow> B(x) \<noteq> {}" and bdd_UN: "bdd_below (\<Union>x\<in>A. f ` B x)" shows "(\<Sqinter>z \<in> \<Union>x\<in>A. B x. f z) = (\<Sqinter>x\<in>A. \<Sqinter>z\<in>B x. f z)" proof - have bdd: "\<And>x. x \<in> A \<Longrightarrow> bdd_below (f ` B x)" using bdd_UN by (meson UN_upper bdd_below_mono) obtain M where "\<And>x y. x \<in> A \<Longrightarrow> y \<in> B(x) \<Longrightarrow> f y \<ge> M" using bdd_UN by (auto simp: bdd_below_def) then have bdd2: "bdd_below ((\<lambda>x. \<Sqinter>z\<in>B x. f z) ` A)" unfolding bdd_below_def by (force simp: bdd le_cINF_iff ne(2)) have "(\<Sqinter>z \<in> \<Union>x\<in>A. B x. f z) \<le> (\<Sqinter>x\<in>A. \<Sqinter>z\<in>B x. f z)" using assms by (fastforce simp add: intro!: cINF_greatest intro: cINF_lower simp: bdd2 bdd) moreover have "(\<Sqinter>x\<in>A. \<Sqinter>z\<in>B x. f z) \<le> (\<Sqinter>z \<in> \<Union>x\<in>A. B x. f z)" using assms by (fastforce simp add: intro!: cINF_greatest intro: cINF_lower2 simp: bdd bdd_UN bdd2) ultimately show ?thesis by (rule order_antisym) qed lemma cSup_abs_le: fixes S :: "('a::{linordered_idom,conditionally_complete_linorder}) set" shows "S \<noteq> {} \<Longrightarrow> (\<And>x. x\<in>S \<Longrightarrow> \<bar>x\<bar> \<le> a) \<Longrightarrow> \<bar>Sup S\<bar> \<le> a" apply (auto simp add: abs_le_iff intro: cSup_least) by (metis bdd_aboveI cSup_upper neg_le_iff_le order_trans) end
import os import os.path as osp import copy import yaml import numpy as np from ast import literal_eval from libs.utils.collections import AttrDict __C = AttrDict() cfg = __C # ---------------------------------------------------------------------------- # # Misc options # --------------------------------------------------------------------------- # # Device for training or testing # E.g., 'cuda' for using GPU, 'cpu' for using CPU __C.DEVICE = 'cuda' # Pixel mean values (BGR order) as a list __C.PIXEL_MEANS = np.array([[[0.485, 0.456, 0.406]]]) # Pixel std values (BGR order) as a list __C.PIXEL_STDS = np.array([[[0.229, 0.224, 0.225]]]) # Calculation the model flops and params __C.CALC_FLOPS = True # Directory for saving checkpoints and loggers __C.CKPT = 'ckpts/pose' # Display the log per iteration __C.DISPLAY_ITER = 20 # Root directory of project __C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..')) # Data directory __C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data')) # ---------------------------------------------------------------------------- # # Optimizer options # ---------------------------------------------------------------------------- # __C.SOLVER = AttrDict() # Optimizer type __C.SOLVER.OPTIMIZER = 'adam' # learning rate __C.SOLVER.LR = 0.001 # learning rate adjusting schedule __C.SOLVER.LR_SCHEDULE = 'step' # Momentum in sgd __C.SOLVER.MOMENTUM = 0.9 # Number of iter adjusts the lr __C.SOLVER.UPDATE_ITER = 10000 # Decay rate in adjusting lr __C.SOLVER.UPDATE_RATE = 0.8 # ---------------------------------------------------------------------------- # # Pose options # ---------------------------------------------------------------------------- # __C.POSE = AttrDict() # Type of pose estimation model __C.POSE.TYPE = 'paf' # Number of key points __C.POSE.NUM_KPTS = 14 # Number of pafs __C.POSE.NUM_PAFS = 22 # (11 * 2) # Confidence threshold of key points __C.POSE.KPT_THRESH = 0.1 # The size of image inputted pose model __C.POSE.SCALE = (512, 512) # Connections of limbs __C.POSE.LIMBS = [[1,2], [2,3], [4,5], [5,6], [14,1], [14,4], [7,8], [8,9], [10,11], [11,12], [13,14]] # Head's key points connection __C.POSE.HEAD = [13, 14] # Body's key points connection __C.POSE.BODY = [[1,2], [2,3], [4,5], [5,6], [14,1], [14,4], [7,8], [8,9], [10,11], [11,12]] # limb width for judge the point in one limb or not __C.POSE.LIMB_WIDTH = 2.5 # variance for paf to generate Gaussian heat map __C.POSE.GAUSSIAN_VAR = 1.1 # ---------------------------------------------------------------------------- # # Pose options # ---------------------------------------------------------------------------- # __C.RNN = AttrDict() # Number of classes __C.RNN.NUM_CLASSES = 9 # Feature number of rnn input, default is 30 represents 10 limbs and 20 angles __C.RNN.DIM_IN = 30 # Target delay __C.RNN.TARGET_DELAY = 15 # time step __C.RNN.TIME_STEP = 1350 # 90*15 for video which is 15 fps # traffic police pose English name __C.RNN.GESTURES = {0: "--", 1: "STOP", 2: "MOVE STRAIGHT", 3: "LEFT TURN", 4: "LEFT TURN WAITING", 5: "RIGHT TURN", 6: "LANG CHANGING", 7: "SLOW DOWN", 8: "PULL OVER"} # --------------------------------------------------------------------------- # # Test options # --------------------------------------------------------------------------- # __C.TEST = AttrDict() # Test data path __C.TEST.DATA_PATH = 'rnn/test' # Key points coordinates save path __C.TEST.SAVE_PATH = 'paf_features' # Model weights path __C.TEST.WEIGHTS = 'weights/mypaf.pth' # dir of npy files for training __C.TEST.NPY_DIR = 'rnn/test_npy' # dir of annotations files for training __C.TEST.CSV_DIR = 'rnn/test_csv' # test batch size __C.TEST.BATCH_SIZE = 2 # --------------------------------------------------------------------------- # # Train options # --------------------------------------------------------------------------- # __C.TRAIN = AttrDict() # Test data path __C.TRAIN.DATA_PATH = 'ai_challenger' # Key points coordinates save path __C.TRAIN.SAVE_PATH = 'paf_features' # Model weights path __C.TRAIN.WEIGHTS = '' # Image scale during train __C.TRAIN.SCALE = (512, 512) # Snapshot iteration __C.TRAIN.SNAPSHOT = 10000 # Iterations for training __C.TRAIN.ITERS = 40000 # batch size __C.TRAIN.BATCH_SIZE = 4 # number of threads used loading data __C.TRAIN.LOAD_THREADS = 4 # dir of npy files for training __C.TRAIN.NPY_DIR = 'rnn/train_npy' # dir of annotations files for training __C.TRAIN.CSV_DIR = 'rnn/train_csv' # ---------------------------------------------------------------------------- # # Deprecated options # If an option is removed from the code and you don't want to break existing # yaml configs, you can add the full config key as a string to the set below. # ---------------------------------------------------------------------------- # _DEPCRECATED_KEYS = set() # ---------------------------------------------------------------------------- # # Renamed options # If you rename a config option, record the mapping from the old name to the new # name in the dictionary below. Optionally, if the type also changed, you can # make the value a tuple that specifies first the renamed key and then # instructions for how to edit the config file. # ---------------------------------------------------------------------------- # _RENAMED_KEYS = { 'EXAMPLE.RENAMED.KEY': 'EXAMPLE.KEY', # Dummy example to follow 'PIXEL_MEAN': 'PIXEL_MEANS', 'PIXEL_STD': 'PIXEL_STDS', } def _merge_a_into_b(a, b, stack=None): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ assert isinstance(a, AttrDict), \ '`a` (cur type {}) must be an instance of {}'.format(type(a), AttrDict) assert isinstance(b, AttrDict), \ '`b` (cur type {}) must be an instance of {}'.format(type(b), AttrDict) for k, v_ in a.items(): full_key = '.'.join(stack) + '.' + k if stack is not None else k # a must specify keys that are in b if k not in b: if _key_is_deprecated(full_key): continue elif _key_is_renamed(full_key): _raise_key_rename_error(full_key) else: raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) v = _decode_cfg_value(v) v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key) # Recursively merge dicts if isinstance(v, AttrDict): try: stack_push = [k] if stack is None else stack + [k] _merge_a_into_b(v, b[k], stack=stack_push) except BaseException: raise else: b[k] = v def merge_cfg_from_file(filename): """Load a config file and merge it into the default options.""" with open(filename, 'r') as f: yaml_cfg = AttrDict(yaml.load(f)) _merge_a_into_b(yaml_cfg, __C) # update_cfg() def merge_cfg_from_cfg(cfg_other): """Merge `cfg_other` into the global config.""" _merge_a_into_b(cfg_other, __C) def merge_cfg_from_list(cfg_list): """Merge config keys, values in a list (e.g., from command line) into the global config. For example, `cfg_list = ['TEST.NMS', 0.5]`. """ assert len(cfg_list) % 2 == 0 for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]): if _key_is_deprecated(full_key): continue if _key_is_renamed(full_key): _raise_key_rename_error(full_key) key_list = full_key.split('.') d = __C for subkey in key_list[:-1]: assert subkey in d, 'Non-existent key: {}'.format(full_key) d = d[subkey] subkey = key_list[-1] assert subkey in d, 'Non-existent key: {}'.format(full_key) value = _decode_cfg_value(v) value = _check_and_coerce_cfg_value_type( value, d[subkey], subkey, full_key ) d[subkey] = value def _decode_cfg_value(v): """Decodes a raw config value (e.g., from a yaml config files or command line argument) into a Python object. """ # Configs parsed from raw yaml will contain dictionary keys that need to be # converted to AttrDict objects if isinstance(v, dict): return AttrDict(v) # All remaining processing is only applied to strings if not isinstance(v, str): return v # Try to interpret `v` as a: # string, number, tuple, list, dict, boolean, or None try: v = literal_eval(v) # The following two excepts allow v to pass through when it represents a # string. # # Longer explanation: # The type of v is always a string (before calling literal_eval), but # sometimes it *represents* a string and other times a data structure, like # a list. In the case that v represents a string, what we got back from the # yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is # ok with '"foo"', but will raise a ValueError if given 'foo'. In other # cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval # will raise a SyntaxError. except ValueError: pass except SyntaxError: pass return v def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key): """Checks that `value_a`, which is intended to replace `value_b` is of the right type. The type is correct if it matches exactly or is one of a few cases in which the type can be easily coerced. """ # The types must match (with some exceptions) type_b = type(value_b) type_a = type(value_a) if type_a is type_b: return value_a # Exceptions: numpy arrays, strings, tuple<->list if isinstance(value_b, np.ndarray): value_a = np.array(value_a, dtype=value_b.dtype) elif isinstance(value_b, str): value_a = str(value_a) elif isinstance(value_a, tuple) and isinstance(value_b, list): value_a = list(value_a) elif isinstance(value_a, list) and isinstance(value_b, tuple): value_a = tuple(value_a) else: raise ValueError( 'Type mismatch ({} vs. {}) with values ({} vs. {}) for config ' 'key: {}'.format(type_b, type_a, value_b, value_a, full_key) ) return value_a def _key_is_deprecated(full_key): if full_key in _DEPCRECATED_KEYS: return True return False def _key_is_renamed(full_key): return full_key in _RENAMED_KEYS def _raise_key_rename_error(full_key): new_key = _RENAMED_KEYS[full_key] if isinstance(new_key, tuple): msg = ' Note: ' + new_key[1] new_key = new_key[0] else: msg = '' raise KeyError( 'Key {} was renamed to {}; please update your config.{}'. format(full_key, new_key, msg) )
module time_mgt contains subroutine timestamp() use, intrinsic :: iso_fortran_env implicit none character(len=8) :: ampm integer(kind=int32) :: d integer(kind=int32) :: h integer(kind=int32) :: m integer(kind=int32) :: mm character(len=9), parameter, dimension(12) :: month = (/ & 'January ', 'February ', & 'March ', 'April ', & 'May ', 'June ', & 'July ', 'August ', & 'September', 'October ', & 'November ', 'December '/) integer(kind=4) :: n integer(kind=4) :: s integer(kind=4) :: values(8) integer(kind=4) :: y call date_and_time(values=values) y = values(1) m = values(2) d = values(3) h = values(5) n = values(6) s = values(7) mm = values(8) if (h < 12) then ampm = 'AM' else if (h == 12) then if (n == 0 .and. s == 0) then ampm = 'Noon' else ampm = 'PM' end if else h = h - 12 if (h < 12) then ampm = 'PM' else if (h == 12) then if (n == 0 .and. s == 0) then ampm = 'Midnight' else ampm = 'AM' end if end if end if write (*, '(i2,1x,a,1x,i4,2x,i2,a1,i2.2,a1,i2.2,a1,i3.3,1x,a)') & d, trim(month(m)), y, h, ':', n, ':', s, '.', mm, trim(ampm) return end subroutine end module program main use, intrinsic :: iso_fortran_env use time_mgt use mpi_f08 implicit none integer, parameter :: n=1000000 integer(kind=int32) :: i integer(kind=int32) :: ierror ! To control errors in MPI calls integer(kind=int32) :: rank ! Unique number received by each process integer(kind=int32) :: num_proc ! Total number of processes real(kind=real64) :: wtime real(kind=real64), allocatable, dimension(:) :: array ! Initialize MPI. This must be the first MPI call call MPI_Init(ierror) ! Get the number of processes call MPI_Comm_size(MPI_COMM_WORLD, num_proc, ierror) ! Get the individual process rank call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror) if (rank == 0) then wtime = MPI_Wtime() call timestamp() ! Only rank = 0 print this write (*, '(a)') '' write (*, '(a,i2,2x,a)') 'RANK:', rank, ' Master process reporting:' write (*, '(a,i2,2x,a,i3)') 'RANK:', rank, & ' The number of MPI processes is ', num_proc else ! Every MPI process will print this message. write (*, '(a,i2,2x,a,i8)') 'RANK:', rank, & ' Allocating array of size:', rank*n ! Each rank will allocate an array of a different size allocate (array(rank*n)) do i = 1, size(array) array(i) = log(real(rank))+sqrt(real(i)) end do ! Reporting sum of array write (*, '(a,i2,2x,a,e12.3)') 'RANK:', rank, ' Sum of array:', sum(array) end if if (rank == 0) then write (*, '(a)') '' write (*, '(a,i2,2x,a)') 'RANK:', rank, ' Master process reporting:' write (*, '(a,i2,2x,a)') 'RANK:', rank, & ' Normal end of execution for master' wtime = MPI_Wtime() - wtime write (*, '(a)') '' write (*, '(a,i2,2x,a,g14.6,a)') & 'RANK:', rank, ' Elapsed wall clock time = ', wtime, ' seconds.' write (*, '(a)') '' end if ! No more MPI calls after Finalize call MPI_Finalize(ierror) ! Ranks are intrinsic to each process and this conditional is legal if (rank == 0) then write (*, '(a)') '' write (*, '(a,i2,2x,a)') 'RANK:', rank, ' Master process reporting:' write (*, '(a,i2,2x,a)') 'RANK:', rank, ' Normal end of execution for all' call timestamp() end if stop end program
theory first_incompleteness_par imports embedding begin nitpick_params[assms=true, user_axioms=true, show_all, expect=genuine, format = 3] section \<open>First Incompleteness Theorem (paraconsistent scenario)\<close> (* Here we reconstruct the proofs of Gödel's first theorem (file "first_incompleteness_class.thy") but this time employing the paraconsistent logic (R)mbC (featuring a paraconsistent negation). *) abbreviation neg :: "wo\<Rightarrow>wo" ("\<^bold>\<not>_" [54] 55) where "\<^bold>\<not>\<phi> \<equiv> \<^bold>\<not>\<^sup>p\<phi>" (* negation is paraconsistent *) abbreviation circ :: "wo\<Rightarrow>wo" ("\<^bold>\<circ>_" [54] 55) where "\<^bold>\<circ>\<phi> \<equiv> \<^bold>\<circ>\<^sup>m\<^sup>b\<^sup>c\<phi>" (* logic is (R)mbC *) (* We employ the model finder Nitpick to verify that the problem is not trivial: *) lemma "\<forall>P. [\<^bold>\<turnstile> P] \<or> [\<^bold>\<turnstile> \<^bold>\<not>P]" nitpick[card w=2] oops (* countermodel found *) lemma "\<exists>P. \<sim>[\<^bold>\<turnstile> P] \<and> \<sim>[\<^bold>\<turnstile> \<^bold>\<not>P]" nitpick[card w=1] oops (* countermodel found *) (* If F is consistent, then F \<turnstile>/ G\<^sub>(\<^sub>F\<^sub>) *) lemma non_provable: fixes G assumes "[\<^bold>\<turnstile> G \<^bold>\<leftrightarrow> \<^bold>\<not>\<^bold>\<box>G]" (* G's fixed-point *) and "[\<^bold>\<turnstile> \<^bold>\<circ>\<^bold>\<box>G]" (* \<^bold>\<circ>-consistency of \<^bold>\<box>G *) shows "\<sim>[\<^bold>\<turnstile> G]" proof assume Gprov: "[\<^bold>\<turnstile> G]" (* assume G were provable *) hence "[\<^bold>\<turnstile> \<^bold>\<box>G]" by simp (* by necessitation *) hence negGprov: "[\<^bold>\<turnstile> \<^bold>\<not>G]" (* by contraposition and \<dots>*) using assms(1) assms(2) by blast (*\<dots>using G's fixed-point with \<^bold>\<circ>\<^bold>\<box>G *) from Gprov negGprov have "[\<^bold>\<turnstile> \<^bold>\<bottom>]" using assms(2) (* using \<^bold>\<circ>\<^bold>\<box>G \<dots>*) by presburger (*\<dots> the solver can derive \<^bold>\<bottom> *) thus False by simp qed (* If F is \<^bold>*-consistent, then F \<turnstile>/ \<^bold>\<not>G\<^sub>(\<^sub>F\<^sub>) *) lemma non_refutable_v1: fixes G assumes "[\<^bold>\<turnstile> G \<^bold>\<leftrightarrow> \<^bold>\<not>\<^bold>\<box>G]" (* G's fixed-point *) and "\<forall>\<phi>. \<sim>[\<^bold>\<turnstile> \<^bold>\<not>\<phi> \<^bold>\<and> \<^bold>\<box>\<phi>]" (* \<^bold>*-consistency *) and "[\<^bold>\<turnstile> \<^bold>\<circ>G]" (* \<^bold>\<circ>-consistency of G *) shows "\<sim>[\<^bold>\<turnstile> \<^bold>\<not>G]" proof assume negGprov: "[\<^bold>\<turnstile> \<^bold>\<not>G]" (* assume G were refutable *) hence provGprov: "[\<^bold>\<turnstile> \<^bold>\<box>G]" (* by contraposition and \<dots>*) using assms(1) assms(3) by blast (*\<dots>using G's fixed-point with \<^bold>\<circ>G *) have Gcons: "\<sim>[\<^bold>\<turnstile> \<^bold>\<not>G \<^bold>\<and> \<^bold>\<box>G]" using assms(2) by (rule allE) (*G is *-con.*) from negGprov provGprov have "[\<^bold>\<turnstile> \<^bold>\<not>G \<^bold>\<and> \<^bold>\<box>G]" by simp (* by \<^bold>\<and>-intr.*) hence "[\<^bold>\<turnstile> \<^bold>\<bottom>]" using Gcons by simp (* using \<^bold>*-consistency of G *) thus False by simp qed (* If F is S-consistent, then F \<turnstile>/ \<^bold>\<not>G\<^sub>(\<^sub>F\<^sub>) *) lemma non_refutable_v2: fixes G assumes "[\<^bold>\<turnstile> G \<^bold>\<leftrightarrow> \<^bold>\<not>\<^bold>\<box>G]" (* G's fixed-point *) and "\<forall>\<phi>. [\<^bold>\<turnstile> \<^bold>\<box>\<phi>]\<longrightarrow>[\<^bold>\<turnstile> \<phi>]" (* S-consistency *) and "[\<^bold>\<turnstile> \<^bold>\<circ>G]" (* \<^bold>\<circ>-consistency of G*) shows "\<sim>[\<^bold>\<turnstile> \<^bold>\<not>G]" proof assume negGprov: "[\<^bold>\<turnstile> \<^bold>\<not>G]" (* assume G were refutable *) hence provGprov: "[\<^bold>\<turnstile> \<^bold>\<box>G]" (* by contraposition and \<dots>*) using assms(1) assms(3) by blast (*\<dots>using G's fixed-point with \<^bold>\<circ>G *) have "[\<^bold>\<turnstile> \<^bold>\<box>G] \<longrightarrow> [\<^bold>\<turnstile> G]" using assms(2) by (rule allE) (* G is S-con.*) hence Gprov: "[\<^bold>\<turnstile> G]" using provGprov by (rule mp) (* by modus ponens *) from Gprov negGprov have "[\<^bold>\<turnstile> \<^bold>\<bottom>]" using assms(3) (* using \<^bold>\<circ>G \<dots>*) by simp (*\<dots> by LFI definition of \<^bold>\<bottom> *) thus False by simp qed section \<open>Experiments with other variants\<close> abbreviation "P_consistency_a \<equiv> \<sim>[\<^bold>\<turnstile> \<^bold>\<box>\<^bold>\<bottom>]" abbreviation "P_consistency_b_par \<equiv> [\<^bold>\<turnstile> \<^bold>\<not>\<^bold>\<box>\<^bold>\<bottom>]" abbreviation "circ_consistency \<equiv> \<forall>\<phi>. [\<^bold>\<turnstile> \<^bold>\<circ>\<phi>]" (* If F is P-consistent-a, then F \<turnstile>/ \<^bold>\<not>G\<^sub>F *) lemma non_refutable_v3: fixes G assumes "[\<^bold>\<turnstile> G \<^bold>\<leftrightarrow> \<^bold>\<not>\<^bold>\<box>G]" and P_consistency_a and "[\<^bold>\<turnstile> \<^bold>\<circ>G]" shows "\<sim>[\<^bold>\<turnstile> \<^bold>\<not>G]" using assms by meson (* If F is P-consistent-b, then F \<turnstile>/ \<^bold>\<not>G\<^sub>F *) lemma non_refutable_v4: fixes G assumes "[\<^bold>\<turnstile> G \<^bold>\<leftrightarrow> \<^bold>\<not>\<^bold>\<box>G]" and P_consistency_b_par and "[\<^bold>\<turnstile> \<^bold>\<circ>G]" shows "\<sim>[\<^bold>\<turnstile> \<^bold>\<not>G]" (* sledgehammer[prover=remote_leo3,verbose](assms) (*Leo-III reports a proof*)*) oops (* TODO: prove or refute*) (* If F is \<^bold>\<circ>-consistent, then F \<turnstile>/ \<^bold>\<not>G\<^sub>F *) lemma non_refutable_v5: fixes G assumes "[\<^bold>\<turnstile> G \<^bold>\<leftrightarrow> \<^bold>\<not>\<^bold>\<box>G]" and circ_consistency and "[\<^bold>\<turnstile> \<^bold>\<circ>G]" shows "\<sim>[\<^bold>\<turnstile> \<^bold>\<not>G]" nitpick oops (* countermodel found *) end
#ifndef TEST_UNIT_MATH_PRIM_SCAL_PROB_HPP #define TEST_UNIT_MATH_PRIM_SCAL_PROB_HPP #include <boost/math/distributions.hpp> #include <gtest/gtest.h> #include <algorithm> #include <vector> /** * Uses a chi-squared test to assert that a vector of observed counts * is consistent with a vector of expected counts. Useful for testing RNGs. */ void assert_chi_squared(const std::vector<int>& counts, const std::vector<double>& expected, double tolerance) { int bins = counts.size(); EXPECT_EQ(bins, expected.size()); double chi = 0; for (int i = 0; i < bins; ++i) { double discrepancy = expected[i] - counts[i]; chi += discrepancy * discrepancy / expected[i]; } boost::math::chi_squared dist(bins - 1); double chi_threshold = quantile(complement(dist, tolerance)); EXPECT_TRUE(chi < chi_threshold); } /** * Like assert_matches_quantiles, but the bins are not necessarily * equiprobable. Assert that approximately proportions[i] of the * samples are in bin i, which has lower bound bin_boundaries[i-1] and * upper bound bin_boundaries[i], using a chi-squared goodness of fit * test. bin_boundaries is assumed sorted in increasing order. **/ void assert_matches_bins(const std::vector<double>& samples, const std::vector<double>& bin_boundaries, const std::vector<double>& proportions, double tolerance) { ASSERT_GT(samples.size(), 0); int N = samples.size(); std::vector<double> mysamples = samples; std::sort(mysamples.begin(), mysamples.end()); ASSERT_GT(bin_boundaries.size(), 0); ASSERT_TRUE(bin_boundaries.size() == proportions.size()); int K = bin_boundaries.size(); std::vector<double> expected; for (int i = 0; i < K; i++) { ASSERT_TRUE(proportions[i] >= 0 && proportions[i] <= 1); expected.push_back(proportions[i] * N); } std::vector<int> counts(K); size_t current_index = 0; for (int i = 0; i < N; ++i) { while (mysamples[i] >= bin_boundaries[current_index]) { ++current_index; EXPECT_TRUE(current_index < bin_boundaries.size()); } ++counts[current_index]; } assert_chi_squared(counts, expected, tolerance); } /** * From a collection of samples and a list of quantiles, assumed * ordered, assert that the samples resemble draws from a distribution * with those quantiles, using a chi_squared goodness of fit * test. That is, assert that the samples are approximately evenly * distributed among the quantiles.size() equiprobable bins, who's * upper bounds are given in quantiles in increasing order. */ void assert_matches_quantiles(const std::vector<double>& samples, const std::vector<double>& quantiles, double tolerance) { int K = quantiles.size(); std::vector<double> proportions; for (int i = 0; i < K; ++i) proportions.push_back(1.0 / K); assert_matches_bins(samples, quantiles, proportions, tolerance); } #endif
section variable A : Type variable f : A → A variable P : A → Prop variable h : ∀ x, P x → P (f x) include h -- Show the following: example : ∀ y, P y → P (f (f y)) := begin intros, have h1 : P y → P (f y), from h y, have h2 : P (f y), from h1 a, have h3 : P (f y) → P (f (f y)), from h (f y), have h4 : P (f (f y)), from h3 h2, assumption end end section variable U : Type variables A B : U → Prop example : (∀ x, A x ∧ B x) → ∀ x, A x := begin intro, intro, exact (a x).left end end section variable U : Type variables A B C : U → Prop variable h1 : ∀ x, A x ∨ B x variable h2 : ∀ x, A x → C x variable h3 : ∀ x, B x → C x include h1 h2 h3 example : ∀ x, C x := begin intro x, have h4, from h1 x, cases h4, exact h2 x h4, exact h3 x h4 end end open classical -- not needed, but you can use it -- This is an exercise from Chapter 4. Use it as an axiom here. axiom not_iff_not_self (P : Prop) : ¬ (P ↔ ¬ P) example (Q : Prop) : ¬ (Q ↔ ¬ Q) := not_iff_not_self Q section variable Person : Type variable shaves : Person → Person → Prop variable barber : Person variable h : ∀ x, shaves barber x ↔ ¬ shaves x x include Person shaves barber h -- Show the following: example : false := begin have h1, from h barber, have h2, from iff.elim_left h1, have h3, from iff.elim_right h1, have h5, from assume h4 : shaves barber barber, show false, from (h2 h4) h4, have h6, from by_contradiction (assume h4 : ¬ shaves barber barber, show false, from h4 (h3 h4)), exact h5 h6 end end
/- Copyright (c) 2019 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Riccardo Brasca, Johan Commelin -/ import data.polynomial.field_division import field_theory.minpoly.basic import ring_theory.algebraic /-! # Minimal polynomials on an algebra over a field This file specializes the theory of minpoly to the setting of field extensions and derives some well-known properties, amongst which the fact that minimal polynomials are irreducible, and uniquely determined by their defining property. -/ open_locale classical polynomial open polynomial set function minpoly namespace minpoly variables {A B : Type*} variables (A) [field A] section ring variables [ring B] [algebra A B] (x : B) /-- If an element `x` is a root of a nonzero polynomial `p`, then the degree of `p` is at least the degree of the minimal polynomial of `x`. See also `gcd_domain_degree_le_of_ne_zero` which relaxes the assumptions on `A` in exchange for stronger assumptions on `B`. -/ lemma degree_le_of_ne_zero {p : A[X]} (pnz : p ≠ 0) (hp : polynomial.aeval x p = 0) : degree (minpoly A x) ≤ degree p := calc degree (minpoly A x) ≤ degree (p * C (leading_coeff p)⁻¹) : min A x (monic_mul_leading_coeff_inv pnz) (by simp [hp]) ... = degree p : degree_mul_leading_coeff_inv p pnz lemma ne_zero_of_finite_field_extension (e : B) [finite_dimensional A B] : minpoly A e ≠ 0 := minpoly.ne_zero $ is_integral_of_noetherian (is_noetherian.iff_fg.2 infer_instance) _ /-- The minimal polynomial of an element `x` is uniquely characterized by its defining property: if there is another monic polynomial of minimal degree that has `x` as a root, then this polynomial is equal to the minimal polynomial of `x`. See also `minpoly.gcd_unique` which relaxes the assumptions on `A` in exchange for stronger assumptions on `B`. -/ lemma unique {p : A[X]} (pmonic : p.monic) (hp : polynomial.aeval x p = 0) (pmin : ∀ q : A[X], q.monic → polynomial.aeval x q = 0 → degree p ≤ degree q) : p = minpoly A x := begin have hx : is_integral A x := ⟨p, pmonic, hp⟩, symmetry, apply eq_of_sub_eq_zero, by_contra hnz, have := degree_le_of_ne_zero A x hnz (by simp [hp]), contrapose! this, apply degree_sub_lt _ (ne_zero hx), { rw [(monic hx).leading_coeff, pmonic.leading_coeff] }, { exact le_antisymm (min A x pmonic hp) (pmin (minpoly A x) (monic hx) (aeval A x)) } end /-- If an element `x` is a root of a polynomial `p`, then the minimal polynomial of `x` divides `p`. See also `minpoly.gcd_domain_dvd` which relaxes the assumptions on `A` in exchange for stronger assumptions on `B`. -/ lemma dvd {p : A[X]} (hp : polynomial.aeval x p = 0) : minpoly A x ∣ p := begin by_cases hp0 : p = 0, { simp only [hp0, dvd_zero] }, have hx : is_integral A x, { rw ← is_algebraic_iff_is_integral, exact ⟨p, hp0, hp⟩ }, rw ← dvd_iff_mod_by_monic_eq_zero (monic hx), by_contra hnz, have := degree_le_of_ne_zero A x hnz _, { contrapose! this, exact degree_mod_by_monic_lt _ (monic hx) }, { rw ← mod_by_monic_add_div p (monic hx) at hp, simpa using hp } end lemma dvd_map_of_is_scalar_tower (A K : Type*) {R : Type*} [comm_ring A] [field K] [comm_ring R] [algebra A K] [algebra A R] [algebra K R] [is_scalar_tower A K R] (x : R) : minpoly K x ∣ (minpoly A x).map (algebra_map A K) := by { refine minpoly.dvd K x _, rw [aeval_map_algebra_map, minpoly.aeval] } lemma dvd_map_of_is_scalar_tower' (R : Type*) {S : Type*} (K L : Type*) [comm_ring R] [comm_ring S] [field K] [comm_ring L] [algebra R S] [algebra R K] [algebra S L] [algebra K L] [algebra R L] [is_scalar_tower R K L] [is_scalar_tower R S L] (s : S): minpoly K (algebra_map S L s) ∣ (map (algebra_map R K) (minpoly R s)) := begin apply minpoly.dvd K (algebra_map S L s), rw [← map_aeval_eq_aeval_map, minpoly.aeval, map_zero], rw [← is_scalar_tower.algebra_map_eq, ← is_scalar_tower.algebra_map_eq] end /-- If `y` is a conjugate of `x` over a field `K`, then it is a conjugate over a subring `R`. -/ lemma aeval_of_is_scalar_tower (R : Type*) {K T U : Type*} [comm_ring R] [field K] [comm_ring T] [algebra R K] [algebra K T] [algebra R T] [is_scalar_tower R K T] [comm_semiring U] [algebra K U] [algebra R U] [is_scalar_tower R K U] (x : T) (y : U) (hy : polynomial.aeval y (minpoly K x) = 0) : polynomial.aeval y (minpoly R x) = 0 := aeval_map_algebra_map K y (minpoly R x) ▸ eval₂_eq_zero_of_dvd_of_eval₂_eq_zero (algebra_map K U) y (minpoly.dvd_map_of_is_scalar_tower R K x) hy variables {A x} theorem eq_of_irreducible_of_monic [nontrivial B] {p : A[X]} (hp1 : _root_.irreducible p) (hp2 : polynomial.aeval x p = 0) (hp3 : p.monic) : p = minpoly A x := let ⟨q, hq⟩ := dvd A x hp2 in eq_of_monic_of_associated hp3 (monic ⟨p, ⟨hp3, hp2⟩⟩) $ mul_one (minpoly A x) ▸ hq.symm ▸ associated.mul_left _ $ associated_one_iff_is_unit.2 $ (hp1.is_unit_or_is_unit hq).resolve_left $ not_is_unit A x lemma eq_of_irreducible [nontrivial B] {p : A[X]} (hp1 : _root_.irreducible p) (hp2 : polynomial.aeval x p = 0) : p * C p.leading_coeff⁻¹ = minpoly A x := begin have : p.leading_coeff ≠ 0 := leading_coeff_ne_zero.mpr hp1.ne_zero, apply eq_of_irreducible_of_monic, { exact associated.irreducible ⟨⟨C p.leading_coeff⁻¹, C p.leading_coeff, by rwa [←C_mul, inv_mul_cancel, C_1], by rwa [←C_mul, mul_inv_cancel, C_1]⟩, rfl⟩ hp1 }, { rw [aeval_mul, hp2, zero_mul] }, { rwa [polynomial.monic, leading_coeff_mul, leading_coeff_C, mul_inv_cancel] }, end /-- If `y` is the image of `x` in an extension, their minimal polynomials coincide. We take `h : y = algebra_map L T x` as an argument because `rw h` typically fails since `is_integral R y` depends on y. -/ lemma eq_of_algebra_map_eq {K S T : Type*} [field K] [comm_ring S] [comm_ring T] [algebra K S] [algebra K T] [algebra S T] [is_scalar_tower K S T] (hST : function.injective (algebra_map S T)) {x : S} {y : T} (hx : is_integral K x) (h : y = algebra_map S T x) : minpoly K x = minpoly K y := minpoly.unique _ _ (minpoly.monic hx) (by rw [h, aeval_algebra_map_apply, minpoly.aeval, ring_hom.map_zero]) (λ q q_monic root_q, minpoly.min _ _ q_monic ((aeval_algebra_map_eq_zero_iff_of_injective hST).mp (h ▸ root_q : polynomial.aeval (algebra_map S T x) q = 0))) lemma add_algebra_map {B : Type*} [comm_ring B] [algebra A B] {x : B} (hx : is_integral A x) (a : A) : minpoly A (x + (algebra_map A B a)) = (minpoly A x).comp (X - C a) := begin refine (minpoly.unique _ _ ((minpoly.monic hx).comp_X_sub_C _) _ (λ q qmo hq, _)).symm, { simp [aeval_comp] }, { have : (polynomial.aeval x) (q.comp (X + C a)) = 0 := by simpa [aeval_comp] using hq, have H := minpoly.min A x (qmo.comp_X_add_C _) this, rw [degree_eq_nat_degree qmo.ne_zero, degree_eq_nat_degree ((minpoly.monic hx).comp_X_sub_C _).ne_zero, with_bot.coe_le_coe, nat_degree_comp, nat_degree_X_sub_C, mul_one], rwa [degree_eq_nat_degree (minpoly.ne_zero hx), degree_eq_nat_degree (qmo.comp_X_add_C _).ne_zero, with_bot.coe_le_coe, nat_degree_comp, nat_degree_X_add_C, mul_one] at H } end lemma sub_algebra_map {B : Type*} [comm_ring B] [algebra A B] {x : B} (hx : is_integral A x) (a : A) : minpoly A (x - (algebra_map A B a)) = (minpoly A x).comp (X + C a) := by simpa [sub_eq_add_neg] using add_algebra_map hx (-a) section alg_hom_fintype /-- A technical finiteness result. -/ noncomputable def fintype.subtype_prod {E : Type*} {X : set E} (hX : X.finite) {L : Type*} (F : E → multiset L) : fintype (Π x : X, {l : L // l ∈ F x}) := let hX := finite.fintype hX in by exactI pi.fintype variables (F E K : Type*) [field F] [ring E] [comm_ring K] [is_domain K] [algebra F E] [algebra F K] [finite_dimensional F E] /-- Function from Hom_K(E,L) to pi type Π (x : basis), roots of min poly of x -/ -- Marked as `noncomputable!` since this definition takes multiple seconds to compile, -- and isn't very computable in practice (since neither `finrank` nor `fin_basis` are). noncomputable! def roots_of_min_poly_pi_type (φ : E →ₐ[F] K) (x : range (finite_dimensional.fin_basis F E : _ → E)) : {l : K // l ∈ (((minpoly F x.1).map (algebra_map F K)).roots : multiset K)} := ⟨φ x, by rw [mem_roots_map (minpoly.ne_zero_of_finite_field_extension F x.val), subtype.val_eq_coe, ←aeval_def, aeval_alg_hom_apply, minpoly.aeval, map_zero]⟩ lemma aux_inj_roots_of_min_poly : injective (roots_of_min_poly_pi_type F E K) := begin intros f g h, suffices : (f : E →ₗ[F] K) = g, { rwa fun_like.ext'_iff at this ⊢ }, rw funext_iff at h, exact linear_map.ext_on (finite_dimensional.fin_basis F E).span_eq (λ e he, subtype.ext_iff.mp (h ⟨e, he⟩)), end /-- Given field extensions `E/F` and `K/F`, with `E/F` finite, there are finitely many `F`-algebra homomorphisms `E →ₐ[K] K`. -/ noncomputable instance alg_hom.fintype : fintype (E →ₐ[F] K) := @fintype.of_injective _ _ (fintype.subtype_prod (finite_range (finite_dimensional.fin_basis F E)) (λ e, ((minpoly F e).map (algebra_map F K)).roots)) _ (aux_inj_roots_of_min_poly F E K) end alg_hom_fintype variables (B) [nontrivial B] /-- If `B/K` is a nontrivial algebra over a field, and `x` is an element of `K`, then the minimal polynomial of `algebra_map K B x` is `X - C x`. -/ lemma eq_X_sub_C (a : A) : minpoly A (algebra_map A B a) = X - C a := eq_X_sub_C_of_algebra_map_inj a (algebra_map A B).injective lemma eq_X_sub_C' (a : A) : minpoly A a = X - C a := eq_X_sub_C A a variables (A) /-- The minimal polynomial of `0` is `X`. -/ @[simp] lemma zero : minpoly A (0:B) = X := by simpa only [add_zero, C_0, sub_eq_add_neg, neg_zero, ring_hom.map_zero] using eq_X_sub_C B (0:A) /-- The minimal polynomial of `1` is `X - 1`. -/ @[simp] lemma one : minpoly A (1:B) = X - 1 := by simpa only [ring_hom.map_one, C_1, sub_eq_add_neg] using eq_X_sub_C B (1:A) end ring section is_domain variables [ring B] [is_domain B] [algebra A B] variables {A} {x : B} /-- A minimal polynomial is prime. -/ lemma prime (hx : is_integral A x) : prime (minpoly A x) := begin refine ⟨ne_zero hx, not_is_unit A x, _⟩, rintros p q ⟨d, h⟩, have : polynomial.aeval x (p*q) = 0 := by simp [h, aeval A x], replace : polynomial.aeval x p = 0 ∨ polynomial.aeval x q = 0 := by simpa, exact or.imp (dvd A x) (dvd A x) this end /-- If `L/K` is a field extension and an element `y` of `K` is a root of the minimal polynomial of an element `x ∈ L`, then `y` maps to `x` under the field embedding. -/ lemma root {x : B} (hx : is_integral A x) {y : A} (h : is_root (minpoly A x) y) : algebra_map A B y = x := have key : minpoly A x = X - C y := eq_of_monic_of_associated (monic hx) (monic_X_sub_C y) (associated_of_dvd_dvd ((irreducible_X_sub_C y).dvd_symm (irreducible hx) (dvd_iff_is_root.2 h)) (dvd_iff_is_root.2 h)), by { have := aeval A x, rwa [key, alg_hom.map_sub, aeval_X, aeval_C, sub_eq_zero, eq_comm] at this } /-- The constant coefficient of the minimal polynomial of `x` is `0` if and only if `x = 0`. -/ @[simp] lemma coeff_zero_eq_zero (hx : is_integral A x) : coeff (minpoly A x) 0 = 0 ↔ x = 0 := begin split, { intro h, have zero_root := zero_is_root_of_coeff_zero_eq_zero h, rw ← root hx zero_root, exact ring_hom.map_zero _ }, { rintro rfl, simp } end /-- The minimal polynomial of a nonzero element has nonzero constant coefficient. -/ lemma coeff_zero_ne_zero (hx : is_integral A x) (h : x ≠ 0) : coeff (minpoly A x) 0 ≠ 0 := by { contrapose! h, simpa only [hx, coeff_zero_eq_zero] using h } end is_domain end minpoly
= = Tolls = =
(* This file is generated by Why3's Coq driver *) (* Beware! Only edit allowed sections below *) Require Import ZArith. Require Import Rbase. Require int.Int. (* Why3 assumption *) Definition unit := unit. Parameter t : Type. Parameter f: t -> t. Parameter x0: t. Parameter iter: Z -> t -> t. Axiom iter_0 : forall (x:t), ((iter 0%Z x) = x). Axiom iter_s : forall (k:Z) (x:t), (0%Z < k)%Z -> ((iter k x) = (iter (k - 1%Z)%Z (f x))). Axiom iter_1 : forall (x:t), ((iter 1%Z x) = (f x)). Axiom iter_s2 : forall (k:Z) (x:t), (0%Z < k)%Z -> ((iter k x) = (f (iter (k - 1%Z)%Z x))). Parameter mu: Z. Parameter lambda: Z. Axiom mu_range : (0%Z <= mu)%Z. Axiom lambda_range : (1%Z <= lambda)%Z. Axiom distinct : forall (i:Z) (j:Z), ((0%Z <= i)%Z /\ (i < (mu + lambda)%Z)%Z) -> (((0%Z <= j)%Z /\ (j < (mu + lambda)%Z)%Z) -> ((~ (i = j)) -> ~ ((iter i x0) = (iter j x0)))). Axiom cycle : forall (n:Z), (mu <= n)%Z -> ((iter (n + lambda)%Z x0) = (iter n x0)). Axiom cycle_induction : forall (n:Z), (mu <= n)%Z -> forall (k:Z), (0%Z <= k)%Z -> ((iter (n + (lambda * k)%Z)%Z x0) = (iter n x0)). (* Why3 assumption *) Inductive ref (a:Type) := | mk_ref : a -> ref a. Implicit Arguments mk_ref. (* Why3 assumption *) Definition contents (a:Type)(v:(ref a)): a := match v with | (mk_ref x) => x end. Implicit Arguments contents. Parameter dist: Z -> Z -> Z. Axiom dist_def : forall (i:Z) (j:Z), (mu <= i)%Z -> ((mu <= j)%Z -> ((0%Z <= (dist i j))%Z /\ (((iter (i + (dist i j))%Z x0) = (iter j x0)) /\ forall (k:Z), (0%Z <= k)%Z -> (((iter (i + k)%Z x0) = (iter j x0)) -> ((dist i j) <= k)%Z)))). (* Why3 assumption *) Definition rel(t2:t) (t1:t): Prop := exists i:Z, (t1 = (iter i x0)) /\ ((t2 = (iter (i + 1%Z)%Z x0)) /\ (((1%Z <= i)%Z /\ (i <= (mu + lambda)%Z)%Z) /\ ((mu <= i)%Z -> ((dist ((2%Z * i)%Z + 2%Z)%Z (i + 1%Z)%Z) < (dist (2%Z * i)%Z i))%Z))). (* Why3 goal *) Theorem WP_parameter_tortoise_hare : forall (hare:t) (tortoise:t), (exists t1:Z, ((1%Z <= t1)%Z /\ (t1 <= (mu + lambda)%Z)%Z) /\ ((tortoise = (iter t1 x0)) /\ ((hare = (iter (2%Z * t1)%Z x0)) /\ forall (i:Z), ((1%Z <= i)%Z /\ (i < t1)%Z) -> ~ ((iter i x0) = (iter (2%Z * i)%Z x0))))) -> ((~ (tortoise = hare)) -> forall (tortoise1:t), (tortoise1 = (f tortoise)) -> forall (hare1:t), (hare1 = (f (f hare))) -> (rel tortoise1 tortoise)). (* YOU MAY EDIT THE PROOF BELOW *) intuition. clear H2. destruct H as (i, (h1, (h2, (h3, h4)))). red. exists i; intuition. subst. rewrite iter_s2 with (k := (i+1)%Z). apply f_equal. ring_simplify (i+1-1)%Z; auto. omega. assert (mu1: (mu <= 2*i+2)%Z) by omega. assert (mu2: (mu <= i+1)%Z) by omega. generalize (dist_def (2*i+2) (i+1) mu1 mu2)%Z. intros (d1, (d2, d3)). clear mu1 mu2. assert (mu1: (mu <= 2*i)%Z) by omega. generalize (dist_def (2*i) i mu1 H3)%Z. intros (d'1, (d'2, d'3)). apply Zle_lt_trans with (dist (2 * i) i - 1)%Z. apply d3. assert (case: (dist (2*i) i = 0 \/ dist (2*i) i > 0)%Z) by omega. destruct case. rewrite H4 in d'2. subst. absurd (iter i x0 = iter (2 * i) x0)%Z; auto. symmetry. ring_simplify (2*i+0)%Z in d'2. auto. omega. rewrite iter_s2; try omega. rewrite iter_s2 with (k:=(i+1)%Z); try omega. apply f_equal. ring_simplify (i+1-1)%Z. ring_simplify (2 * i + 2 + (dist (2 * i) i - 1) - 1)%Z. auto. omega. Qed.
Require Export SpecSemantics. Require Export BoilerplateSemantics. Set Implicit Arguments. (******************************************************************************) (* Progress *) (******************************************************************************) Lemma can_form_tarr {Γ t T1 T2} (v: Value t) (wt: Typing Γ t (tarr T1 T2)) : ∃ t2, t = abs T1 t2. Proof. depind wt; try contradiction; exists t; reflexivity. Qed. Lemma progress {t U} (wt: Typing empty t U) : Value t ∨ ∃ t', red t t'. Proof with try (subst; eauto using red). depind wt; simpl; auto. - inversion H. - destruct IHwt1 as [v1|[t1' r1]]... destruct IHwt2 as [v2|[t2' r2]]... destruct (can_form_tarr v1 wt1)... Qed. (******************************************************************************) (* Preservation *) (******************************************************************************) Lemma preservation {Γ t U} (wt: Typing Γ t U) : ∀ {t'}, red t t' → Typing Γ t' U. Proof. induction wt; intros t' r; inversion r; subst; eauto using Typing. - inversion wt1; eauto using subst_evar_typing. Qed.
module Everything where import Relation.Ternary.Separation -- The syntax and interpreter of LTLC import Typed.LTLC -- The syntax and interpreter of LTLC with strong updatable references import Typed.LTLCRef -- The syntax of a session typed language import Sessions.Syntax -- ... and its semantics import Sessions.Semantics.Runtime import Sessions.Semantics.Commands import Sessions.Semantics.Expr import Sessions.Semantics.Communication import Sessions.Semantics.Process -- the paper's session-typed program example import Examples
[STATEMENT] lemma is_sup_join [intro?]: "is_sup x y (x \<squnion> y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. is_sup x y (x \<squnion> y) [PROOF STEP] proof (unfold join_def) [PROOF STATE] proof (state) goal (1 subgoal): 1. is_sup x y (The (is_sup x y)) [PROOF STEP] from ex_sup [PROOF STATE] proof (chain) picking this: \<exists>sup. is_sup ?x ?y sup [PROOF STEP] obtain sup where "is_sup x y sup" [PROOF STATE] proof (prove) using this: \<exists>sup. is_sup ?x ?y sup goal (1 subgoal): 1. (\<And>sup. is_sup x y sup \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] .. [PROOF STATE] proof (state) this: is_sup x y sup goal (1 subgoal): 1. is_sup x y (The (is_sup x y)) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: is_sup x y sup [PROOF STEP] show "is_sup x y (THE sup. is_sup x y sup)" [PROOF STATE] proof (prove) using this: is_sup x y sup goal (1 subgoal): 1. is_sup x y (THE sup. is_sup x y sup) [PROOF STEP] by (rule theI) (rule is_sup_uniq [OF _ \<open>is_sup x y sup\<close>]) [PROOF STATE] proof (state) this: is_sup x y (THE sup. is_sup x y sup) goal: No subgoals! [PROOF STEP] qed
lemma rcis_zero_mod [simp]: "rcis 0 a = 0"
%% Copyright (C) 2016 Lagu %% Copyright (C) 2017-2019 Colin B. Macdonald %% %% This file is part of OctSymPy. %% %% OctSymPy is free software; you can redistribute it and/or modify %% it under the terms of the GNU General Public License as published %% by the Free Software Foundation; either version 3 of the License, %% or (at your option) any later version. %% %% This software is distributed in the hope that it will be useful, %% but WITHOUT ANY WARRANTY; without even the implied warranty %% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See %% the GNU General Public License for more details. %% %% You should have received a copy of the GNU General Public %% License along with this software; see the file COPYING. %% If not, see <http://www.gnu.org/licenses/>. %% -*- texinfo -*- %% @documentencoding UTF-8 %% @deftypemethod @@sym {@var{L} =} chol (@var{A}) %% Cholesky factorization of symbolic symmetric matrix. %% %% Returns a lower-triangular matrix @var{L}, such that @code{L*L'} %% is matrix @var{A}. The matrix @var{A} must be symmetric %% positive-definite. Example: %% @example %% @group %% A = sym([1 2 4; 2 13 23; 4 23 43]) %% @result{} A = (sym 3×3 matrix) %% %% ⎡1 2 4 ⎤ %% ⎢ ⎥ %% ⎢2 13 23⎥ %% ⎢ ⎥ %% ⎣4 23 43⎦ %% %% L = chol(A) %% @result{} L = (sym 3×3 matrix) %% %% ⎡1 0 0 ⎤ %% ⎢ ⎥ %% ⎢2 3 0 ⎥ %% ⎢ ⎥ %% ⎣4 5 √2⎦ %% %% L*L' %% @result{} (sym 3×3 matrix) %% %% ⎡1 2 4 ⎤ %% ⎢ ⎥ %% ⎢2 13 23⎥ %% ⎢ ⎥ %% ⎣4 23 43⎦ %% @end group %% @end example %% %% @seealso{chol, @@sym/qr, @@sym/lu} %% @end deftypemethod function y = chol(x) if (nargin == 2) error('Operation not supported yet.'); elseif (nargin > 2) print_usage (); end y = pycall_sympy__ ('return _ins[0].cholesky(),', x); end %!error <must be> chol (sym ([1 2; 3 4])); %!error <must be square> chol (sym ([1 2; 3 4; 5 6])); %!test %! A = chol(hilb(sym(2))); %! B = [[1 0]; sym(1)/2 sqrt(sym(3))/6]; %! assert( isequal( A, B ))
ui = plot_dict(simParams) title = """<h1 class="text-white">Simulation parameters</h1>""" content!(w, "div#content", title*ui)
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9. Copyright (c) 2020 Oracle and/or its affiliates. Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl -} open import Optics.All open import LibraBFT.Prelude open import LibraBFT.Hash open import LibraBFT.Lemmas open import LibraBFT.Base.KVMap open import LibraBFT.Base.PKCS open import LibraBFT.Abstract.Types open EpochConfig open import LibraBFT.Impl.NetworkMsg open import LibraBFT.Impl.Consensus.Types open import LibraBFT.Impl.Util.Crypto open import LibraBFT.Impl.Handle sha256 sha256-cr open import LibraBFT.Concrete.System.Parameters open import LibraBFT.Yasm.Base open import LibraBFT.Yasm.AvailableEpochs using (AvailableEpochs ; lookup'; lookup'') open import LibraBFT.Yasm.System ConcSysParms open import LibraBFT.Yasm.Properties ConcSysParms -- In this module, we define two "implementation obligations" -- (ImplObligationᵢ for i ∈ {1 , 2}), which are predicates over -- reachable states of a system defined by -- 'LibraBFT.Concrete.System.Parameters'. These two properties relate -- votes sent by the same sender, ensuring that if they are for the -- same epoch and round, then they vote for the same blockID; the -- first relates a vote output by the handler to a vote sent -- previously, and the second relates two votes both sent by the -- handler. -- -- We then prove that, if an implementation satisfies these two -- semantic obligations, along with a structural one about messages -- sent by honest peers in the implementation, then the implemenation -- satisfies the LibraBFT.Abstract.Properties.VotesOnce invariant. module LibraBFT.Concrete.Properties.VotesOnce where -- TODO-3: This may not be the best way to state the implementation obligation. Why not reduce -- this as much as possible before giving the obligation to the implementation? For example, this -- will still require the implementation to deal with hash collisons (v and v' could be different, -- but yield the same bytestring and therefore same signature). Also, avoid the need for the -- implementation to reason about messages sent by step-cheat, or give it something to make this -- case easy to eliminate. ImplObligation₁ : Set₁ ImplObligation₁ = ∀{e pid sndr s' outs pk}{pre : SystemState e} → ReachableSystemState pre -- For any honest call to /handle/ or /init/, → StepPeerState pid (availEpochs pre) (msgPool pre) (Map-lookup pid (peerStates pre)) s' outs → ∀{v m v' m'} → Meta-Honest-PK pk -- For signed every vote v of every outputted message → v ⊂Msg m → m ∈ outs → (sig : WithVerSig pk v) -- If v is really new and valid -- Note that this does not directly exclude possibility of previous message with -- same signature, but sent by someone else. We could prove it implies it though. → ¬ (MsgWithSig∈ pk (ver-signature sig) (msgPool pre)) → ValidPartForPK (availEpochs pre) v pk -- And if there exists another v' that has been sent before → v' ⊂Msg m' → (sndr , m') ∈ (msgPool pre) → WithVerSig pk v' -- If v and v' share the same epoch and round → (v ^∙ vEpoch) ≡ (v' ^∙ vEpoch) → (v ^∙ vProposed ∙ biRound) ≡ (v' ^∙ vProposed ∙ biRound) ---------------------------------------------------------- -- Then an honest implemenation promises v and v' vote for the same blockId. → (v ^∙ vProposed ∙ biId) ≡ (v' ^∙ vProposed ∙ biId) ImplObligation₂ : Set₁ ImplObligation₂ = ∀{e pid s' outs pk}{pre : SystemState e} → ReachableSystemState pre -- For any honest call to /handle/ or /init/, → StepPeerState pid (availEpochs pre) (msgPool pre) (Map-lookup pid (peerStates pre)) s' outs → ∀{v m v' m'} → Meta-Honest-PK pk -- For every vote v represented in a message output by the call → v ⊂Msg m → m ∈ outs → (sig : WithVerSig pk v) -- If v is really new and valid → ¬ (MsgWithSig∈ pk (ver-signature sig) (msgPool pre)) → ValidPartForPK (availEpochs pre) v pk -- And if there exists another v' that is also new and valid → v' ⊂Msg m' → m' ∈ outs → (sig' : WithVerSig pk v') → ¬ (MsgWithSig∈ pk (ver-signature sig') (msgPool pre)) → ValidPartForPK (availEpochs pre) v' pk -- If v and v' share the same epoch and round → (v ^∙ vEpoch) ≡ (v' ^∙ vEpoch) → (v ^∙ vProposed ∙ biRound) ≡ (v' ^∙ vProposed ∙ biRound) ---------------------------------------------------------- -- Then, an honest implemenation promises v and v' vote for the same blockId. → (v ^∙ vProposed ∙ biId) ≡ (v' ^∙ vProposed ∙ biId) -- Next, we prove that, given the necessary obligations, module Proof (sps-corr : StepPeerState-AllValidParts) (Impl-VO1 : ImplObligation₁) (Impl-VO2 : ImplObligation₂) where -- Any reachable state satisfies the VO rule for any epoch in the system. module _ {e}(st : SystemState e)(r : ReachableSystemState st)(eid : Fin e) where -- Bring in 'unwind', 'ext-unforgeability' and friends open Structural sps-corr -- Bring in ConcSystemState open import LibraBFT.Concrete.System sps-corr open PerState st r open PerEpoch eid open import LibraBFT.Abstract.Obligations.VotesOnce 𝓔 Hash _≟Hash_ (ConcreteVoteEvidence 𝓔) as VO -- The VO proof is done by induction on the execution trace leading to 'st'. In -- Agda, this is 'r : RechableSystemState st' above. We will use induction to -- construct a predicate Pred'' below, which holds for every state on the trace. private -- First we specify the predicate we need: it relates two votes verified -- by the same public key, such that both are elements of the same message pool Pred'' : PK → Vote → Vote → SentMessages → Set Pred'' pk v v' pool = Meta-Honest-PK pk → (ver : WithVerSig pk v) → MsgWithSig∈ pk (ver-signature ver) pool → (ver' : WithVerSig pk v') → MsgWithSig∈ pk (ver-signature ver') pool → v ^∙ vEpoch ≡ v' ^∙ vEpoch → v ^∙ vRound ≡ v' ^∙ vRound → v ^∙ vProposedId ≡ v' ^∙ vProposedId -- Usually, we want to universally quantify Pred'' over arbitrary votes and pks Pred' : SentMessages → Set Pred' pool = ∀{pk}{v v' : Vote} → Pred'' pk v v' pool -- Finally, we state Pred' in terms of SystemSate Pred : ∀{e} → SystemState e → Set Pred = Pred' ∘ msgPool ------------------- -- * Base Case * -- ------------------- -- Pred above is trivially true for the initial state: there are no messages in the pool Pred₀ : Pred initialState Pred₀ _ _ () -------------------------------------------------- -- * Inductive Case: New Epochs in the System * -- -------------------------------------------------- -- Because pushEpoch does not alter the msgPool, the proof is trivial. Pred𝓔 : ∀{e}{st : SystemState e}(𝓔 : EpochConfigFor e) → Pred st → Pred (pushEpoch 𝓔 st) Pred𝓔 𝓔 p = p ---------------------------------------------- -- * Inductive Case: Transition by a Peer * -- ---------------------------------------------- -- From this point onwards, it might be easier to read this proof starting at 'voo' -- at the end of the file. Next, we provide an overview the proof. -- -- We wish to prove that, for any two votes v and v' cast by an honest α in the message pool of -- a state st, if v and v' have equal rounds and epochs, then they vote for the same block. As -- we have seen above, the base case and the case for a new epoch in the system are -- trivial. Next, we look at the PeerStep case. -- -- The induction hypothesis tells us that the property holds in the pre-state. Next, we reason -- about the post-state. We start by analyzing whether v and v' have been sent as outputs of -- the PeerStep under scrutiny or were already in the pool before (captured by the PredStep -- function). There are four possibilities: -- -- i) v and v' were aleady present in the msgPool before: use induction hypothesis. -- ii) v and v' are both in the output produced by the PeerStep under scrutiny. -- iii) v was present before, but v' is new. -- iv) v' was present before, but v is new. -- -- Case (i) is trivial; cases (iii) and (iv) are symmetric and reduce to an implementation -- obligation (Impl-VO1) and case (ii) reduces to a different implementation obligation (Impl-VO2). -- -- The proofs of cases (iii) and (iv) are in PredStep-wlog-ht and PredStep-wlog-ht'. The 'ht' -- suffix refers to 'Here-There' as in one vote is "here" and the other is old, or "there". We -- first analyze whether the new vote is really new or a replay; sps-cor provides us this -- information. If the new vote is, in fact, a replay of an old message, we have two old -- messages and can call the induction hypothesis. If it is really new, we must rely on the -- implementation obligation. But to do so, we must prove that the old vote was also sent by -- the same peer. We can see that is the case by reasoning about PK-inj and IsValidEpochMember. -- -- Finally, the proof of case (ii) also branches on whether either of the "new" votes -- are replays or are really new. In case at least one is a replay we fallback to cases (iii) and (iv) -- or just call the induction hypothesis when both are replays. -- When both votes are in fact new, we rely on Impl-VO2 to conclude. -- -- In both PredSetp-wlog-ht and PredStep-wlog-hh, we must eliminate the possibility of -- either vote being produced by a cheat step. This is easy because we received -- a proof that the PK in question is honest, hence, it must be the case that a cheat -- step is at most replaying these votes, not producing them. Producing them would -- require the cheater to forge a signature. This is the purpose of the isCheat constraint. PredStep-wlog-ht' : ∀{e pid pid' s' outs pk}{pre : SystemState e} → ReachableSystemState pre → Pred pre → StepPeerState pid (availEpochs pre) (msgPool pre) (Map-lookup pid (peerStates pre)) s' outs → ∀{v m v' m'} → v ⊂Msg m → m ∈ outs → v' ⊂Msg m' → (pid' , m') ∈ msgPool pre → WithVerSig pk v → WithVerSig pk v' → Meta-Honest-PK pk → (v ^∙ vEpoch) ≡ (v' ^∙ vEpoch) → (v ^∙ vProposed ∙ biRound) ≡ (v' ^∙ vProposed ∙ biRound) → (v ^∙ vProposed ∙ biId) ≡ (v' ^∙ vProposed ∙ biId) PredStep-wlog-ht' {pre = pre} preach hip ps {v} v⊂m m∈outs v'⊂m' m'∈pool ver ver' hpk eids≡ r≡ -- (1) The first step is branching on whether 'v' above is a /new/ vote or not. -- (1.1) If it's new: with sps-corr preach hpk ps m∈outs v⊂m ver ...| inj₁ (vValid , vNew) with honestPartValid preach hpk v'⊂m' m'∈pool ver' ...| v'Old , vOldValid with sameHonestSig⇒sameVoteData hpk ver' (msgSigned v'Old) (sym (msgSameSig v'Old)) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl = Impl-VO1 preach ps hpk v⊂m m∈outs ver vNew vValid (msg⊆ v'Old) (msg∈pool v'Old) (msgSigned v'Old) eids≡ r≡ -- (1.1) If 'v' is not new, then there exists a msg sent with the -- same signature. PredStep-wlog-ht' preach hip ps {v} v⊂m m∈outs v'⊂m' m'∈pool ver ver' hpk e≡ r≡ | inj₂ vOld with honestPartValid preach hpk v'⊂m' m'∈pool ver' ...| sv' , _ = hip hpk ver vOld ver' sv' e≡ r≡ -- Here we prove a modified version of Pred'' where we assume w.l.o.g that -- one vote is sent by "pstep" and another was present in the prestate. PredStep-wlog-ht : ∀{e pid st' outs}{pre : SystemState e} → ReachableSystemState pre → (pstep : StepPeer pre pid st' outs) → Pred pre → ∀{pk v v'} -- Below is a inline expansion of "Pred'' pk v v' (msgPool (StepPeer-post pstep))", -- but with the added information that one vote (v) was sent by pstep whereas the -- other (v') was in the pool of the prestate. → let pool = msgPool (StepPeer-post pstep) in Meta-Honest-PK pk → (ver : WithVerSig pk v )(sv : MsgWithSig∈ pk (ver-signature ver ) pool) → (msgSender sv , msgWhole sv) ∈ List-map (pid ,_) outs → (ver' : WithVerSig pk v')(sv' : MsgWithSig∈ pk (ver-signature ver') pool) → (msgSender sv' , msgWhole sv') ∈ msgPool pre → v ^∙ vEpoch ≡ v' ^∙ vEpoch → v ^∙ vRound ≡ v' ^∙ vRound → v ^∙ vProposedId ≡ v' ^∙ vProposedId PredStep-wlog-ht preach (step-cheat fm isCheat) hip hpk ver sv (here refl) ver' sv' furtherBack' epoch≡ r≡ with isCheat (msg⊆ sv) (msgSigned sv) ...| inj₁ abs = ⊥-elim (hpk abs) -- The key was honest by hypothesis. ...| inj₂ sentb4 -- the cheater replayed the message; which means the message was sent before this -- step; hence, call induction hypothesis. with msgSameSig sv ...| refl = hip hpk ver sentb4 ver' (MsgWithSig∈-transp sv' furtherBack') epoch≡ r≡ PredStep-wlog-ht preach (step-honest x) hip hpk ver sv thisStep ver' sv' furtherBack' epoch≡ r≡ with sameHonestSig⇒sameVoteData hpk ver (msgSigned sv) (sym (msgSameSig sv)) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl with sameHonestSig⇒sameVoteData hpk ver' (msgSigned sv') (sym (msgSameSig sv')) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl = PredStep-wlog-ht' preach hip x (msg⊆ sv) (Any-map (cong proj₂) (Any-map⁻ thisStep)) (msg⊆ sv') furtherBack' (msgSigned sv) (msgSigned sv') hpk epoch≡ r≡ -- Analogous to PredStep-wlog-ht', but here we must reason about two messages that are in the -- outputs of a step. PredStep-hh' : ∀{e pid s' outs pk}{pre : SystemState e} → ReachableSystemState pre → Pred pre → StepPeerState pid (availEpochs pre) (msgPool pre) (Map-lookup pid (peerStates pre)) s' outs → ∀{v m v' m'} → v ⊂Msg m → m ∈ outs → v' ⊂Msg m' → m' ∈ outs → WithVerSig pk v → WithVerSig pk v' → Meta-Honest-PK pk → (v ^∙ vEpoch) ≡ (v' ^∙ vEpoch) → (v ^∙ vProposed ∙ biRound) ≡ (v' ^∙ vProposed ∙ biRound) → (v ^∙ vProposed ∙ biId) ≡ (v' ^∙ vProposed ∙ biId) -- Since the step is from an honest peer, we can check whether the messages are in fact -- new or not. PredStep-hh' preach hip ps {v} v⊂m m∈outs v'⊂m' m'∈outs ver ver' hpk e≡ r≡ with sps-corr preach hpk ps m∈outs v⊂m ver | sps-corr preach hpk ps m'∈outs v'⊂m' ver' -- (A) Both are old: call induction hypothesis ...| inj₂ vOld | inj₂ v'Old = hip hpk ver vOld ver' v'Old e≡ r≡ -- (B) One is new, one is old: use PredStep-wlog-ht' PredStep-hh' preach hip ps {v} v⊂m m∈outs v'⊂m' m'∈outs ver ver' hpk e≡ r≡ | inj₁ (vValid , vNew) | inj₂ v'Old with sameHonestSig⇒sameVoteData hpk ver' (msgSigned v'Old) (sym (msgSameSig v'Old)) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl = PredStep-wlog-ht' preach hip ps v⊂m m∈outs (msg⊆ v'Old) (msg∈pool v'Old) ver (msgSigned v'Old) hpk e≡ r≡ -- (C) One is old, one is new: use PredStep-wlog-ht' PredStep-hh' preach hip ps {v} v⊂m m∈outs v'⊂m' m'∈outs ver ver' hpk e≡ r≡ | inj₂ vOld | inj₁ (v'Valid , v'New) with sameHonestSig⇒sameVoteData hpk ver (msgSigned vOld) (sym (msgSameSig vOld)) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl = sym (PredStep-wlog-ht' preach hip ps v'⊂m' m'∈outs (msg⊆ vOld) (msg∈pool vOld) ver' (msgSigned vOld) hpk (sym e≡) (sym r≡)) -- (D) Finally, both votes are new in this step. The proof is then trivially -- forwarded to the implementation obligation. PredStep-hh' preach hip ps {v} v⊂m m∈outs v'⊂m' m'∈outs ver ver' hpk e≡ r≡ | inj₁ (vValid , vNew) | inj₁ (v'Valid , v'New) = Impl-VO2 preach ps hpk v⊂m m∈outs ver vNew vValid v'⊂m' m'∈outs ver' v'New v'Valid e≡ r≡ PredStep-hh : ∀{e pid st' outs}{pre : SystemState e} → ReachableSystemState pre → (pstep : StepPeer pre pid st' outs) → Pred pre → ∀{pk v v'} → let pool = msgPool (StepPeer-post pstep) in Meta-Honest-PK pk → (ver : WithVerSig pk v )(sv : MsgWithSig∈ pk (ver-signature ver ) pool) → (msgSender sv , msgWhole sv) ∈ List-map (pid ,_) outs → (ver' : WithVerSig pk v')(sv' : MsgWithSig∈ pk (ver-signature ver') pool) → (msgSender sv' , msgWhole sv') ∈ List-map (pid ,_) outs → v ^∙ vEpoch ≡ v' ^∙ vEpoch → v ^∙ vRound ≡ v' ^∙ vRound → v ^∙ vProposedId ≡ v' ^∙ vProposedId PredStep-hh preach (step-cheat fm isCheat) hip hpk ver sv (here refl) ver' sv' (here refl) epoch≡ r≡ with isCheat (msg⊆ sv) (msgSigned sv) ...| inj₁ abs = ⊥-elim (hpk abs) -- The key was honest by hypothesis. ...| inj₂ sentb4 with isCheat (msg⊆ sv') (msgSigned sv') ...| inj₁ abs = ⊥-elim (hpk abs) -- The key was honest by hypothesis. ...| inj₂ sentb4' with msgSameSig sv | msgSameSig sv' ...| refl | refl = hip hpk ver sentb4 ver' sentb4' epoch≡ r≡ PredStep-hh preach (step-honest x) hip hpk ver sv thisStep ver' sv' thisStep' epoch≡ r≡ with sameHonestSig⇒sameVoteData hpk ver (msgSigned sv) (sym (msgSameSig sv)) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl with sameHonestSig⇒sameVoteData hpk ver' (msgSigned sv') (sym (msgSameSig sv')) ...| inj₁ abs = ⊥-elim (meta-sha256-cr abs) ...| inj₂ refl = PredStep-hh' preach hip x (msg⊆ sv ) (Any-map (cong proj₂) (Any-map⁻ thisStep)) (msg⊆ sv') (Any-map (cong proj₂) (Any-map⁻ thisStep')) (msgSigned sv) (msgSigned sv') hpk epoch≡ r≡ PredStep : ∀{e pid st' outs}{pre : SystemState e} → ReachableSystemState pre → (pstep : StepPeer pre pid st' outs) → Pred pre → Pred (StepPeer-post pstep) PredStep {e} {pid} {st'} {outs} {pre} preach pstep hip hpk ver sv ver' sv' epoch≡ r≡ -- First we check when have the votes been sent: with Any-++⁻ (List-map (pid ,_) outs) {msgPool pre} (msg∈pool sv) | Any-++⁻ (List-map (pid ,_) outs) {msgPool pre} (msg∈pool sv') -- (A) Neither vote has been sent by the step under scrutiny: invoke inductive hypothesis ...| inj₂ furtherBack | inj₂ furtherBack' = hip hpk ver (MsgWithSig∈-transp sv furtherBack) ver' (MsgWithSig∈-transp sv' furtherBack') epoch≡ r≡ -- (B) One vote was cast here; the other was cast in the past. PredStep {e} {pid} {st'} {outs} {pre} preach pstep hip hpk ver sv ver' sv' epoch≡ r≡ | inj₁ thisStep | inj₂ furtherBack' = PredStep-wlog-ht preach pstep hip hpk ver sv thisStep ver' sv' furtherBack' epoch≡ r≡ -- (C) Symmetric to (B) PredStep {e} {pid} {st'} {outs} {pre} preach pstep hip hpk ver sv ver' sv' epoch≡ r≡ | inj₂ furtherBack | inj₁ thisStep' = sym (PredStep-wlog-ht preach pstep hip hpk ver' sv' thisStep' ver sv furtherBack (sym epoch≡) (sym r≡)) -- (D) Both votes were cast here PredStep {e} {pid} {st'} {outs} {pre} preach pstep hip hpk ver sv ver' sv' epoch≡ r≡ | inj₁ thisStep | inj₁ thisStep' = PredStep-hh preach pstep hip hpk ver sv thisStep ver' sv' thisStep' epoch≡ r≡ voo : VO.Type ConcSystemState voo hpk refl sv refl sv' round≡ with Step*-Step-fold Pred (λ {e} {st} _ → Pred𝓔 {e} {st}) PredStep Pred₀ r ...| res with vmsg≈v (vmFor sv) | vmsg≈v (vmFor sv') ...| refl | refl = res hpk (vmsgSigned (vmFor sv)) (mkMsgWithSig∈ (nm (vmFor sv)) (cv (vmFor sv)) (cv∈nm (vmFor sv)) _ (nmSentByAuth sv) (vmsgSigned (vmFor sv)) refl) (vmsgSigned (vmFor sv')) (mkMsgWithSig∈ (nm (vmFor sv')) (cv (vmFor sv')) (cv∈nm (vmFor sv')) _ (nmSentByAuth sv') (vmsgSigned (vmFor sv')) refl) (trans (vmsgEpoch (vmFor sv)) (sym (vmsgEpoch (vmFor sv')))) round≡
function [ icpPoseNew ] = poseSparsification_yq( savePoseName, saveIndexName, icpPose, expDis ) %POSESPARCIFICATION Summary of this function goes here % Detailed explanation goes here % YQ dataset, data preparation % same for park dataset keepIndex = [1]; icpPoseNew(1,:) = icpPose(1,:); % for plotting cnt = 1; for i = 2:length(icpPose) pose1 = [icpPose(i,4), icpPose(i,8)]; pose2 = [icpPose(keepIndex(cnt),4), icpPose(keepIndex(cnt),8)]; dis = norm(pose1 - pose2); if dis > expDis cnt = cnt + 1; keepIndex(cnt,:) = i; icpPoseNew(cnt,:) = icpPose(i,:); end end keepIndex = keepIndex - 1; % from zero length(keepIndex) % save the index to the file dlmwrite(saveIndexName, keepIndex, 'delimiter', '\t'); dlmwrite(savePoseName, icpPoseNew, 'delimiter', '\t'); end
import LMT variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)] example {a1 a2 a3 : A I E} : ((a3).read i3) ≠ (v1) → (a3) = ((a3).write i3 (v1)) → False := by arr
[STATEMENT] lemma standard_decomp_SucE: assumes "finite X" and "U \<subseteq> X" and "h \<in> P[X]" and "h \<noteq> (0::_ \<Rightarrow>\<^sub>0 'a::{comm_ring_1,ring_no_zero_divisors})" obtains ps where "valid_decomp X ps" and "cone_decomp (cone (h, U)) ps" and "standard_decomp (Suc (poly_deg h)) ps" and "is_monomial h \<Longrightarrow> punit.lc h = 1 \<Longrightarrow> monomial_decomp ps" and "homogeneous h \<Longrightarrow> hom_decomp ps" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, U)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. (\<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, U)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] from assms(2, 1) [PROOF STATE] proof (chain) picking this: U \<subseteq> X finite X [PROOF STEP] have "finite U" [PROOF STATE] proof (prove) using this: U \<subseteq> X finite X goal (1 subgoal): 1. finite U [PROOF STEP] by (rule finite_subset) [PROOF STATE] proof (state) this: finite U goal (1 subgoal): 1. (\<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, U)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] thus ?thesis [PROOF STATE] proof (prove) using this: finite U goal (1 subgoal): 1. thesis [PROOF STEP] using assms(2) that [PROOF STATE] proof (prove) using this: finite U U \<subseteq> X \<lbrakk>valid_decomp X ?ps13; cone_decomp (cone (h, U)) ?ps13; standard_decomp (Suc (poly_deg h)) ?ps13; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13; homogeneous h \<Longrightarrow> hom_decomp ?ps13\<rbrakk> \<Longrightarrow> thesis goal (1 subgoal): 1. thesis [PROOF STEP] proof (induct U arbitrary: thesis rule: finite_induct) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] case empty [PROOF STATE] proof (state) this: {} \<subseteq> X \<lbrakk>valid_decomp X ?ps13; cone_decomp (cone (h, {})) ?ps13; standard_decomp (Suc (poly_deg h)) ?ps13; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13; homogeneous h \<Longrightarrow> hom_decomp ?ps13\<rbrakk> \<Longrightarrow> thesis goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] from assms(3, 4) [PROOF STATE] proof (chain) picking this: h \<in> P[X] h \<noteq> 0 [PROOF STEP] have "valid_decomp X [(h, {})]" [PROOF STATE] proof (prove) using this: h \<in> P[X] h \<noteq> 0 goal (1 subgoal): 1. valid_decomp X [(h, {})] [PROOF STEP] by (simp add: valid_decomp_def) [PROOF STATE] proof (state) this: valid_decomp X [(h, {})] goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] moreover [PROOF STATE] proof (state) this: valid_decomp X [(h, {})] goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] note cone_decomp_singleton [PROOF STATE] proof (state) this: cone_decomp (cone (?t, ?U)) [(?t, ?U)] goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] moreover [PROOF STATE] proof (state) this: cone_decomp (cone (?t, ?U)) [(?t, ?U)] goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] have "standard_decomp (Suc (poly_deg h)) [(h, {})]" [PROOF STATE] proof (prove) goal (1 subgoal): 1. standard_decomp (Suc (poly_deg h)) [(h, {})] [PROOF STEP] by (rule standard_decomp_Nil) (simp add: pos_decomp_def) [PROOF STATE] proof (state) this: standard_decomp (Suc (poly_deg h)) [(h, {})] goal (2 subgoals): 1. \<And>thesis. \<lbrakk>{} \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, {})) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis 2. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: valid_decomp X [(h, {})] cone_decomp (cone (?t, ?U)) [(?t, ?U)] standard_decomp (Suc (poly_deg h)) [(h, {})] [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: valid_decomp X [(h, {})] cone_decomp (cone (?t, ?U)) [(?t, ?U)] standard_decomp (Suc (poly_deg h)) [(h, {})] goal (1 subgoal): 1. thesis [PROOF STEP] by (rule empty) (simp_all add: monomial_decomp_def hom_decomp_def) [PROOF STATE] proof (state) this: thesis goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] case (insert x U) [PROOF STATE] proof (state) this: finite U x \<notin> U \<lbrakk>U \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, U)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> ?thesis13\<rbrakk> \<Longrightarrow> ?thesis13 insert x U \<subseteq> X \<lbrakk>valid_decomp X ?ps13; cone_decomp (cone (h, insert x U)) ?ps13; standard_decomp (Suc (poly_deg h)) ?ps13; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13; homogeneous h \<Longrightarrow> hom_decomp ?ps13\<rbrakk> \<Longrightarrow> thesis goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] from insert.prems(1) [PROOF STATE] proof (chain) picking this: insert x U \<subseteq> X [PROOF STEP] have "x \<in> X" and "U \<subseteq> X" [PROOF STATE] proof (prove) using this: insert x U \<subseteq> X goal (1 subgoal): 1. x \<in> X &&& U \<subseteq> X [PROOF STEP] by simp_all [PROOF STATE] proof (state) this: x \<in> X U \<subseteq> X goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] from this(2) [PROOF STATE] proof (chain) picking this: U \<subseteq> X [PROOF STEP] obtain ps where 0: "valid_decomp X ps" and 1: "cone_decomp (cone (h, U)) ps" and 2: "standard_decomp (Suc (poly_deg h)) ps" and 3: "is_monomial h \<Longrightarrow> punit.lc h = 1 \<Longrightarrow> monomial_decomp ps" and 4: "homogeneous h \<Longrightarrow> hom_decomp ps" [PROOF STATE] proof (prove) using this: U \<subseteq> X goal (1 subgoal): 1. (\<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, U)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (rule insert.hyps) blast [PROOF STATE] proof (state) this: valid_decomp X ps cone_decomp (cone (h, U)) ps standard_decomp (Suc (poly_deg h)) ps \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps homogeneous h \<Longrightarrow> hom_decomp ps goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] let ?x = "monomial (1::'a) (Poly_Mapping.single x (Suc 0))" [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] have "?x \<noteq> 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. monomial (1::'a) (monomial (Suc 0) x) \<noteq> 0 [PROOF STEP] by (simp add: monomial_0_iff) [PROOF STATE] proof (state) this: monomial (1::'a) (monomial (Suc 0) x) \<noteq> 0 goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] with assms(4) [PROOF STATE] proof (chain) picking this: h \<noteq> 0 monomial (1::'a) (monomial (Suc 0) x) \<noteq> 0 [PROOF STEP] have deg: "poly_deg (?x * h) = Suc (poly_deg h)" [PROOF STATE] proof (prove) using this: h \<noteq> 0 monomial (1::'a) (monomial (Suc 0) x) \<noteq> 0 goal (1 subgoal): 1. poly_deg (monomial (1::'a) (monomial (Suc 0) x) * h) = Suc (poly_deg h) [PROOF STEP] by (simp add: poly_deg_times poly_deg_monomial deg_pm_single) [PROOF STATE] proof (state) this: poly_deg (monomial (1::'a) (monomial (Suc 0) x) * h) = Suc (poly_deg h) goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] define qs where "qs = [(?x * h, insert x U)]" [PROOF STATE] proof (state) this: qs = [(monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)] goal (1 subgoal): 1. \<And>x F thesis. \<lbrakk>finite F; x \<notin> F; \<And>thesis. \<lbrakk>F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis; insert x F \<subseteq> X; \<And>ps. \<lbrakk>valid_decomp X ps; cone_decomp (cone (h, insert x F)) ps; standard_decomp (Suc (poly_deg h)) ps; \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ps; homogeneous h \<Longrightarrow> hom_decomp ps\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. thesis [PROOF STEP] proof (rule insert.prems) [PROOF STATE] proof (state) goal (5 subgoals): 1. valid_decomp X ?ps13 2. cone_decomp (cone (h, insert x U)) ?ps13 3. standard_decomp (Suc (poly_deg h)) ?ps13 4. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13 5. homogeneous h \<Longrightarrow> hom_decomp ?ps13 [PROOF STEP] from \<open>x \<in> X\<close> [PROOF STATE] proof (chain) picking this: x \<in> X [PROOF STEP] have "?x \<in> P[X]" [PROOF STATE] proof (prove) using this: x \<in> X goal (1 subgoal): 1. monomial (1::'a) (monomial (Suc 0) x) \<in> P[X] [PROOF STEP] by (intro Polys_closed_monomial PPs_closed_single) [PROOF STATE] proof (state) this: monomial (1::'a) (monomial (Suc 0) x) \<in> P[X] goal (5 subgoals): 1. valid_decomp X ?ps13 2. cone_decomp (cone (h, insert x U)) ?ps13 3. standard_decomp (Suc (poly_deg h)) ?ps13 4. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13 5. homogeneous h \<Longrightarrow> hom_decomp ?ps13 [PROOF STEP] hence "?x * h \<in> P[X]" [PROOF STATE] proof (prove) using this: monomial (1::'a) (monomial (Suc 0) x) \<in> P[X] goal (1 subgoal): 1. monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] [PROOF STEP] using assms(3) [PROOF STATE] proof (prove) using this: monomial (1::'a) (monomial (Suc 0) x) \<in> P[X] h \<in> P[X] goal (1 subgoal): 1. monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] [PROOF STEP] by (rule Polys_closed_times) [PROOF STATE] proof (state) this: monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] goal (5 subgoals): 1. valid_decomp X ?ps13 2. cone_decomp (cone (h, insert x U)) ?ps13 3. standard_decomp (Suc (poly_deg h)) ?ps13 4. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13 5. homogeneous h \<Longrightarrow> hom_decomp ?ps13 [PROOF STEP] moreover [PROOF STATE] proof (state) this: monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] goal (5 subgoals): 1. valid_decomp X ?ps13 2. cone_decomp (cone (h, insert x U)) ?ps13 3. standard_decomp (Suc (poly_deg h)) ?ps13 4. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13 5. homogeneous h \<Longrightarrow> hom_decomp ?ps13 [PROOF STEP] from \<open>?x \<noteq> 0\<close> assms(4) [PROOF STATE] proof (chain) picking this: monomial (1::'a) (monomial (Suc 0) x) \<noteq> 0 h \<noteq> 0 [PROOF STEP] have "?x * h \<noteq> 0" [PROOF STATE] proof (prove) using this: monomial (1::'a) (monomial (Suc 0) x) \<noteq> 0 h \<noteq> 0 goal (1 subgoal): 1. monomial (1::'a) (monomial (Suc 0) x) * h \<noteq> 0 [PROOF STEP] by (rule times_not_zero) [PROOF STATE] proof (state) this: monomial (1::'a) (monomial (Suc 0) x) * h \<noteq> 0 goal (5 subgoals): 1. valid_decomp X ?ps13 2. cone_decomp (cone (h, insert x U)) ?ps13 3. standard_decomp (Suc (poly_deg h)) ?ps13 4. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13 5. homogeneous h \<Longrightarrow> hom_decomp ?ps13 [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] monomial (1::'a) (monomial (Suc 0) x) * h \<noteq> 0 [PROOF STEP] have "valid_decomp X qs" [PROOF STATE] proof (prove) using this: monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] monomial (1::'a) (monomial (Suc 0) x) * h \<noteq> 0 goal (1 subgoal): 1. valid_decomp X qs [PROOF STEP] using insert.hyps(1) \<open>x \<in> X\<close> \<open>U \<subseteq> X\<close> [PROOF STATE] proof (prove) using this: monomial (1::'a) (monomial (Suc 0) x) * h \<in> P[X] monomial (1::'a) (monomial (Suc 0) x) * h \<noteq> 0 finite U x \<in> X U \<subseteq> X goal (1 subgoal): 1. valid_decomp X qs [PROOF STEP] by (simp add: qs_def valid_decomp_def) [PROOF STATE] proof (state) this: valid_decomp X qs goal (5 subgoals): 1. valid_decomp X ?ps13 2. cone_decomp (cone (h, insert x U)) ?ps13 3. standard_decomp (Suc (poly_deg h)) ?ps13 4. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp ?ps13 5. homogeneous h \<Longrightarrow> hom_decomp ?ps13 [PROOF STEP] with 0 [PROOF STATE] proof (chain) picking this: valid_decomp X ps valid_decomp X qs [PROOF STEP] show "valid_decomp X (ps @ qs)" [PROOF STATE] proof (prove) using this: valid_decomp X ps valid_decomp X qs goal (1 subgoal): 1. valid_decomp X (ps @ qs) [PROOF STEP] by (rule valid_decomp_append) [PROOF STATE] proof (state) this: valid_decomp X (ps @ qs) goal (4 subgoals): 1. cone_decomp (cone (h, insert x U)) (ps @ qs) 2. standard_decomp (Suc (poly_deg h)) (ps @ qs) 3. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 4. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] next [PROOF STATE] proof (state) goal (4 subgoals): 1. cone_decomp (cone (h, insert x U)) (ps @ qs) 2. standard_decomp (Suc (poly_deg h)) (ps @ qs) 3. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 4. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] show "cone_decomp (cone (h, insert x U)) (ps @ qs)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. cone_decomp (cone (h, insert x U)) (ps @ qs) [PROOF STEP] proof (rule cone_decomp_append) [PROOF STATE] proof (state) goal (3 subgoals): 1. direct_decomp (cone (h, insert x U)) [?S1.0, ?S2.0] 2. cone_decomp ?S1.0 ps 3. cone_decomp ?S2.0 qs [PROOF STEP] show "direct_decomp (cone (h, insert x U)) [cone (h, U), cone (?x * h, insert x U)]" [PROOF STATE] proof (prove) goal (1 subgoal): 1. direct_decomp (cone (h, insert x U)) [cone (h, U), cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)] [PROOF STEP] using insert.hyps(2) [PROOF STATE] proof (prove) using this: x \<notin> U goal (1 subgoal): 1. direct_decomp (cone (h, insert x U)) [cone (h, U), cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)] [PROOF STEP] by (rule direct_decomp_cone_insert) [PROOF STATE] proof (state) this: direct_decomp (cone (h, insert x U)) [cone (h, U), cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)] goal (2 subgoals): 1. cone_decomp (cone (h, U)) ps 2. cone_decomp (cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)) qs [PROOF STEP] next [PROOF STATE] proof (state) goal (2 subgoals): 1. cone_decomp (cone (h, U)) ps 2. cone_decomp (cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)) qs [PROOF STEP] show "cone_decomp (cone (?x * h, insert x U)) qs" [PROOF STATE] proof (prove) goal (1 subgoal): 1. cone_decomp (cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)) qs [PROOF STEP] by (simp add: qs_def cone_decomp_singleton) [PROOF STATE] proof (state) this: cone_decomp (cone (monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)) qs goal (1 subgoal): 1. cone_decomp (cone (h, U)) ps [PROOF STEP] qed (fact 1) [PROOF STATE] proof (state) this: cone_decomp (cone (h, insert x U)) (ps @ qs) goal (3 subgoals): 1. standard_decomp (Suc (poly_deg h)) (ps @ qs) 2. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 3. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] next [PROOF STATE] proof (state) goal (3 subgoals): 1. standard_decomp (Suc (poly_deg h)) (ps @ qs) 2. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 3. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] from standard_decomp_singleton[of "?x * h" "insert x U"] [PROOF STATE] proof (chain) picking this: standard_decomp (poly_deg (monomial (1::'a) (monomial (Suc 0) x) * h)) [(monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)] [PROOF STEP] have "standard_decomp (Suc (poly_deg h)) qs" [PROOF STATE] proof (prove) using this: standard_decomp (poly_deg (monomial (1::'a) (monomial (Suc 0) x) * h)) [(monomial (1::'a) (monomial (Suc 0) x) * h, insert x U)] goal (1 subgoal): 1. standard_decomp (Suc (poly_deg h)) qs [PROOF STEP] by (simp add: deg qs_def) [PROOF STATE] proof (state) this: standard_decomp (Suc (poly_deg h)) qs goal (3 subgoals): 1. standard_decomp (Suc (poly_deg h)) (ps @ qs) 2. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 3. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] with 2 [PROOF STATE] proof (chain) picking this: standard_decomp (Suc (poly_deg h)) ps standard_decomp (Suc (poly_deg h)) qs [PROOF STEP] show "standard_decomp (Suc (poly_deg h)) (ps @ qs)" [PROOF STATE] proof (prove) using this: standard_decomp (Suc (poly_deg h)) ps standard_decomp (Suc (poly_deg h)) qs goal (1 subgoal): 1. standard_decomp (Suc (poly_deg h)) (ps @ qs) [PROOF STEP] by (rule standard_decomp_append) [PROOF STATE] proof (state) this: standard_decomp (Suc (poly_deg h)) (ps @ qs) goal (2 subgoals): 1. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 2. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] next [PROOF STATE] proof (state) goal (2 subgoals): 1. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 2. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] assume "is_monomial h" and "punit.lc h = 1" [PROOF STATE] proof (state) this: is_monomial h lcf h = (1::'a) goal (2 subgoals): 1. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 2. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] hence "monomial_decomp ps" [PROOF STATE] proof (prove) using this: is_monomial h lcf h = (1::'a) goal (1 subgoal): 1. monomial_decomp ps [PROOF STEP] by (rule 3) [PROOF STATE] proof (state) this: monomial_decomp ps goal (2 subgoals): 1. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 2. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] moreover [PROOF STATE] proof (state) this: monomial_decomp ps goal (2 subgoals): 1. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 2. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] have "monomial_decomp qs" [PROOF STATE] proof (prove) goal (1 subgoal): 1. monomial_decomp qs [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. monomial_decomp qs [PROOF STEP] have "is_monomial (?x * h)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. is_monomial (monomial (1::'a) (monomial (Suc 0) x) * h) [PROOF STEP] by (metis \<open>is_monomial h\<close> is_monomial_monomial monomial_is_monomial mult.commute mult.right_neutral mult_single) [PROOF STATE] proof (state) this: is_monomial (monomial (1::'a) (monomial (Suc 0) x) * h) goal (1 subgoal): 1. monomial_decomp qs [PROOF STEP] thus ?thesis [PROOF STATE] proof (prove) using this: is_monomial (monomial (1::'a) (monomial (Suc 0) x) * h) goal (1 subgoal): 1. monomial_decomp qs [PROOF STEP] by (simp add: monomial_decomp_def qs_def lc_times \<open>punit.lc h = 1\<close>) [PROOF STATE] proof (state) this: monomial_decomp qs goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: monomial_decomp qs goal (2 subgoals): 1. \<lbrakk>is_monomial h; lcf h = (1::'a)\<rbrakk> \<Longrightarrow> monomial_decomp (ps @ qs) 2. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: monomial_decomp ps monomial_decomp qs [PROOF STEP] show "monomial_decomp (ps @ qs)" [PROOF STATE] proof (prove) using this: monomial_decomp ps monomial_decomp qs goal (1 subgoal): 1. monomial_decomp (ps @ qs) [PROOF STEP] by (simp only: monomial_decomp_append_iff) [PROOF STATE] proof (state) this: monomial_decomp (ps @ qs) goal (1 subgoal): 1. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] assume "homogeneous h" [PROOF STATE] proof (state) this: homogeneous h goal (1 subgoal): 1. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] hence "hom_decomp ps" [PROOF STATE] proof (prove) using this: homogeneous h goal (1 subgoal): 1. hom_decomp ps [PROOF STEP] by (rule 4) [PROOF STATE] proof (state) this: hom_decomp ps goal (1 subgoal): 1. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] moreover [PROOF STATE] proof (state) this: hom_decomp ps goal (1 subgoal): 1. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] from \<open>homogeneous h\<close> [PROOF STATE] proof (chain) picking this: homogeneous h [PROOF STEP] have "hom_decomp qs" [PROOF STATE] proof (prove) using this: homogeneous h goal (1 subgoal): 1. hom_decomp qs [PROOF STEP] by (simp add: hom_decomp_def qs_def homogeneous_times) [PROOF STATE] proof (state) this: hom_decomp qs goal (1 subgoal): 1. homogeneous h \<Longrightarrow> hom_decomp (ps @ qs) [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: hom_decomp ps hom_decomp qs [PROOF STEP] show "hom_decomp (ps @ qs)" [PROOF STATE] proof (prove) using this: hom_decomp ps hom_decomp qs goal (1 subgoal): 1. hom_decomp (ps @ qs) [PROOF STEP] by (simp only: hom_decomp_append_iff) [PROOF STATE] proof (state) this: hom_decomp (ps @ qs) goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: thesis goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: thesis goal: No subgoals! [PROOF STEP] qed
/- Lemmas about nth -/ universe u namespace list theorem nth_is_none_bound {α:Type} {l : list α} {i:ℕ} (pr : l.nth i = none) : i ≥ l.length := begin revert i, induction l, case nil { intros i pr, apply nat.zero_le, }, case cons h r ind { intros i pr, cases i; simp at pr, contradiction, exact nat.succ_le_succ (ind pr), } end theorem nth_is_some_bound {α:Type} {l : list α} {i:ℕ} {e : α} (pr : l.nth i = some e) : i < l.length := begin revert i, induction l, case nil { intros i pr, simp at pr, contradiction, }, case cons h r ind { intros i pr, cases i, { apply nat.zero_lt_succ, }, { apply nat.succ_le_succ (ind pr), } } end theorem nth_mem {α:Type} {l : list α} {i:ℕ} {e : α} (pr : l.nth i = some e) : e ∈ l := begin revert i, induction l, case nil { intros i pr, simp at pr, contradiction, }, case cons h r ind { intros i pr, cases i; simp at pr, case nat.zero { simp [option.some.inj pr], }, case nat.succ i { exact or.inr (ind pr), } } end lemma nth_mem_len {A} {x : A} {xs : list A} (H : x ∈ xs) : ∃ n, xs.nth n = some x := begin induction xs, case nil { simp at H, contradiction, }, case cons h r ind { simp at H, cases H with here there, { apply exists.intro 0, simp [here], }, { apply exists.elim (ind there), intros n pr, constructor, change (nth (h :: r) (nat.succ n) = some x), exact pr, } }, end lemma nth_append_left {α:Type u} {x : α} (xs ys : list α) {i : ℕ} (pr : xs.nth i = some x) : (xs ++ ys).nth i = some x := begin revert i, induction xs, case nil { intros i pr, contradiction, }, case cons h r ind { intros i pr, cases i with i, { exact pr, }, { exact ind pr, }, } end end list
#ifndef BOOST_METAPARSE_GETTING_STARTED_6_HPP #define BOOST_METAPARSE_GETTING_STARTED_6_HPP // Automatically generated header file // Definitions before section 5.2.4 #include "5_2_4.hpp" // Definitions of section 5.2.4 #include <boost/metaparse/foldl_start_with_parser.hpp> using exp_parser11 = build_parser< foldl_start_with_parser< sequence<plus_token, int_token>, /* apply this parser repeatedly */ int_token, /* use this parser to get the initial value */ boost::mpl::quote2<sum_items> /* use this function to add a new value to the summary */ > >; // query: // exp_parser11::apply<BOOST_METAPARSE_STRING("1 + 2 + 3 + 4")>::type #endif
Formal statement is: lemma tl_cCons [simp]: "tl (x ## xs) = xs" Informal statement is: The tail of a list with a single element is the empty list.
/- Copyright (c) 2022 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Patrick Massot, Scott Morrison ! This file was ported from Lean 3 source module tactic.assert_exists ! leanprover-community/mathlib commit 90367774bb3afc7bdc1e9acbc770970042378306 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Tactic.Core import Mathbin.Tactic.Lint.Basic /-! # User commands for assert the (non-)existence of declaration or instances. These commands are used to enforce the independence of different parts of mathlib. ## Implementation notes This file provides two linters that verify that things we assert do not _yet_ exist do _eventually_ exist. This works by creating declarations of the form: * ``assert_not_exists._checked.<uniq> : name := `foo`` for `assert_not_exists foo` * `assert_no_instance._checked.<uniq> := t` for `assert_instance t` These declarations are then picked up by the linter and analyzed accordingly. The `_` in the `_checked` prefix should hide them from doc-gen. -/ section /- ./././Mathport/Syntax/Translate/Tactic/Mathlib/Core.lean:38:34: unsupported: setup_tactic_parser -/ open Tactic /-- `assert_exists n` is a user command that asserts that a declaration named `n` exists in the current import scope. Be careful to use names (e.g. `rat`) rather than notations (e.g. `ℚ`). -/ @[user_command] unsafe def assert_exists (_ : parse <| tk "assert_exists") : lean.parser Unit := do let decl ← ident let d ← get_decl decl return () #align assert_exists assert_exists /-- `assert_not_exists n` is a user command that asserts that a declaration named `n` *does not exist* in the current import scope. Be careful to use names (e.g. `rat`) rather than notations (e.g. `ℚ`). It may be used (sparingly!) in mathlib to enforce plans that certain files are independent of each other. If you encounter an error on an `assert_not_exists` command while developing mathlib, it is probably because you have introduced new import dependencies to a file. In this case, you should refactor your work (for example by creating new files rather than adding imports to existing files). You should *not* delete the `assert_not_exists` statement without careful discussion ahead of time. -/ @[user_command] unsafe def assert_not_exists (_ : parse <| tk "assert_not_exists") : lean.parser Unit := do let decl ← ident let ff ← succeeds (get_decl decl) | fail f! "Declaration {decl} is not allowed to exist in this file." let n ← tactic.mk_fresh_name let marker := `assert_not_exists._checked.append (decl.append n) add_decl (declaration.defn marker [] q(Name) q(decl) default tt) pure () #align assert_not_exists assert_not_exists /-- A linter for checking that the declarations marked `assert_not_exists` eventually exist. -/ unsafe def assert_not_exists.linter : linter where test d := do let n := d.to_name let tt ← pure (`assert_not_exists._checked.isPrefixOfₓ n) | pure none let declaration.defn _ _ q(Name) val _ _ ← pure d let n ← tactic.eval_expr Name val let tt ← succeeds (get_decl n) | pure (some (f! "`{n}` does not ever exist").toString) pure none auto_decls := true no_errors_found := "All `assert_not_exists` declarations eventually exist." errors_found := "The following declarations used in `assert_not_exists` never exist; perhaps there is a typo." is_fast := true #align assert_not_exists.linter assert_not_exists.linter /-- `assert_instance e` is a user command that asserts that an instance `e` is available in the current import scope. Example usage: ``` assert_instance semiring ℕ ``` -/ @[user_command] unsafe def assert_instance (_ : parse <| tk "assert_instance") : lean.parser Unit := do let q ← texpr let e ← i_to_expr q mk_instance e return () #align assert_instance assert_instance /-- `assert_no_instance e` is a user command that asserts that an instance `e` *is not available* in the current import scope. It may be used (sparingly!) in mathlib to enforce plans that certain files are independent of each other. If you encounter an error on an `assert_no_instance` command while developing mathlib, it is probably because you have introduced new import dependencies to a file. In this case, you should refactor your work (for example by creating new files rather than adding imports to existing files). You should *not* delete the `assert_no_instance` statement without careful discussion ahead of time. Example usage: ``` assert_no_instance linear_ordered_field ℚ ``` -/ @[user_command] unsafe def assert_no_instance (_ : parse <| tk "assert_no_instance") : lean.parser Unit := do let q ← texpr let e ← i_to_expr q let i ← try_core (mk_instance e) match i with | none => do let n ← tactic.mk_fresh_name let e_str ← toString <$> pp e let marker := (`assert_no_instance._checked.mk_string e_str).append n let et ← infer_type e let tt ← succeeds (get_decl marker) | add_decl (declaration.defn marker [] et e default tt) pure () | some i => (throwError "Instance `{(← i)} : {← e}` is not allowed to be found in this file." : tactic Unit) #align assert_no_instance assert_no_instance /-- A linter for checking that the declarations marked `assert_no_instance` eventually exist. -/ unsafe def assert_no_instance.linter : linter where test d := do let n := d.to_name let tt ← pure (`assert_no_instance._checked.isPrefixOfₓ n) | pure none let declaration.defn _ _ _ val _ _ ← pure d let tt ← succeeds (tactic.mk_instance val) | (some ∘ format.to_string) <$> f!"No instance of `{← val}`" pure none auto_decls := true no_errors_found := "All `assert_no_instance` instances eventually exist." errors_found := "The following typeclass instances used in `assert_no_instance` never exist; perhaps they " ++ "are missing?" is_fast := false #align assert_no_instance.linter assert_no_instance.linter end
import logic.nontrivial import algebra.order.ring import data.nat.basic /-! ### Test `nontriviality` with inequality hypotheses -/ example {R : Type} [ordered_ring R] {a : R} (h : 0 < a) : 0 < a := begin nontriviality, guard_hyp _inst : nontrivial R, assumption, end /-! ### Test `nontriviality` with equality or non-strict inequality goals -/ example {R : Type} [comm_ring R] {r s : R} : r * s = s * r := begin nontriviality, guard_hyp _inst : nontrivial R, apply mul_comm, end /-! ### Test deducing `nontriviality` by instance search -/ example {R : Type} [ordered_ring R] : 0 ≤ (1 : R) := begin nontriviality R, guard_hyp _inst : nontrivial R, exact zero_le_one, end example {R : Type} [ordered_ring R] : 0 ≤ (1 : R) := begin nontriviality ℕ, guard_hyp _inst : nontrivial ℕ, exact zero_le_one, end example {R : Type} [ordered_ring R] : 0 ≤ (2 : R) := begin success_if_fail { nontriviality punit }, exact zero_le_two, end example {R : Type} [ordered_ring R] {a : R} (h : 0 < a) : 2 ∣ 4 := begin nontriviality R, guard_hyp _inst : nontrivial R, dec_trivial end /-! Test using `@[nontriviality]` lemmas in `nontriviality and custom `simp` lemmas -/ def empty_or_univ {α : Type*} (s : set α) : Prop := s = ∅ ∨ s = set.univ lemma subsingleton.set_empty_or_univ {α} [subsingleton α] (s : set α) : s = ∅ ∨ s = set.univ := subsingleton.set_cases (or.inl rfl) (or.inr rfl) s lemma subsingleton.set_empty_or_univ' {α} [subsingleton α] (s : set α) : empty_or_univ s := subsingleton.set_empty_or_univ s example {α : Type*} (s : set α) (hs : s = ∅ ∪ set.univ) : empty_or_univ s := begin success_if_fail { nontriviality α }, rw [set.empty_union] at hs, exact or.inr hs end section local attribute [nontriviality] subsingleton.set_empty_or_univ example {α : Type*} (s : set α) (hs : s = ∅ ∪ set.univ) : empty_or_univ s := begin success_if_fail { nontriviality α }, nontriviality α using [subsingleton.set_empty_or_univ'], rw [set.empty_union] at hs, exact or.inr hs end end local attribute [nontriviality] subsingleton.set_empty_or_univ' example {α : Type*} (s : set α) (hs : s = ∅ ∪ set.univ) : empty_or_univ s := begin nontriviality α, rw [set.empty_union] at hs, exact or.inr hs end /-! Test with nonatomic type argument -/ example (α : ℕ → Type) (a b : α 0) (h : a = b) : a = b := begin nontriviality α 0 using [nat.zero_lt_one], guard_hyp _inst : nontrivial (α 0), exact h end
lemma topological_basis_iff: assumes "\<And>B'. B' \<in> B \<Longrightarrow> open B'" shows "topological_basis B \<longleftrightarrow> (\<forall>O'. open O' \<longrightarrow> (\<forall>x\<in>O'. \<exists>B'\<in>B. x \<in> B' \<and> B' \<subseteq> O'))" (is "_ \<longleftrightarrow> ?rhs")
import topology.algebra.infinite_sum import topology.instances.nnreal open_locale big_operators nnreal lemma tsum_abs_eq_coe_tsum_nnabs {α : Type*} (f : α → ℝ) : (∑' i, abs (f i)) = ∑' i, real.nnabs (f i) := by simp only [real.coe_nnabs] open nnreal finset lemma has_sum_nat_add_iff'' {f : ℕ → ℝ≥0} (k : ℕ) {a : ℝ≥0} : has_sum (λ n, f (n + k)) a ↔ has_sum f (a + ∑ i in range k, f i) := begin unfold has_sum, rw ← tendsto_coe, rw ← tendsto_coe, simp only [coe_sum], convert _root_.has_sum_nat_add_iff k, refl, classical, rw nnreal.coe_add, simp only [coe_sum], apply_instance, end . lemma sum_add_tsum_nat_add' {f : ℕ → ℝ≥0} (k : ℕ) (h : summable f) : (∑ i in range k, f i) + (∑' i, f (i + k)) = (∑' i, f i) := by simpa [add_comm] using ((has_sum_nat_add_iff'' k).1 ((nnreal.summable_nat_add_iff k).2 h).has_sum).unique h.has_sum lemma tsum_eq_zero {ι} (f : ι → ℝ≥0) (h : ∀ b, f b = 0) : (∑' b, f b) = 0 := by simp only [h, tsum_zero] #lint- only unused_arguments def_lemma doc_blame
using Enzyme function main() function speelpenning(y::AbstractVector{VT}, x::AbstractVector{VT}) where {VT} y[1] = reduce(*, x) return nothing end y = [0.0] n = 10 x = [i/(1.0+i) for i in 1:n] speelpenning(y,x) dx = zeros(n) dy = [1.0] autodiff(speelpenning, Duplicated(y,dy), Duplicated(x,dx)) y = [0.0] speelpenning(y,x) errg = 0.0 for (i, v) in enumerate(x) errg += abs(dx[i]-y[1]/v) end return (y[1]-1/(1.0+n)), errg end
------------------------------------------------------------ -- -- ShowExample.hs -- Code sample accompanying topic 1.3.4 "Type classes" -- See README.md for details -- -- Fundamentals of Practical Haskell Programming -- By Richard Cook -- ------------------------------------------------------------ import Data.Complex import Data.Ratio main :: IO () main = do print 5 print 5.0 print "five" print (5 :+ 6) print (5 % 6)
Require Import ZArith ROmega. (* Submitted by Yegor Bryukhov (BZ#922) *) Open Scope Z_scope. (* First a simplified version used during debug of romega on Test46 *) Lemma Test46_simplified : forall v1 v2 v5 : Z, 0 = v2 + v5 -> 0 < v5 -> 0 < v2 -> 4*v2 <> 5*v1. intros. romega. Qed. (* The complete problem *) Lemma Test46 : forall v1 v2 v3 v4 v5 : Z, ((2 * v4) + (5)) + (8 * v2) <= ((4 * v4) + (3 * v4)) + (5 * v4) -> 9 * v4 > (1 * v4) + ((2 * v1) + (0 * v2)) -> ((9 * v3) + (2 * v5)) + (5 * v2) = 3 * v4 -> 0 > 6 * v1 -> (0 * v3) + (6 * v2) <> 2 -> (0 * v3) + (5 * v5) <> ((4 * v2) + (8 * v2)) + (2 * v5) -> 7 * v3 > 5 * v5 -> 0 * v4 >= ((5 * v1) + (4 * v1)) + ((6 * v5) + (3 * v5)) -> 7 * v2 = ((3 * v2) + (6 * v5)) + (7 * v2) -> 0 * v3 > 7 * v1 -> 9 * v2 < 9 * v5 -> (2 * v3) + (8 * v1) <= 5 * v4 -> 5 * v2 = ((5 * v1) + (0 * v5)) + (1 * v2) -> 0 * v5 <= 9 * v2 -> ((7 * v1) + (1 * v3)) + ((2 * v3) + (1 * v3)) >= ((6 * v5) + (4)) + ((1) + (9)) -> False. intros. romega. Qed.
subroutine hello() PRINT *, "Hello World!" END subroutine goodbye() PRINT *, "Goodbye World!" END
```python import numpy as np import sympy as sympy import control as co # We tell python to treat "s" as a SymPy symbol # We have to make it rational! s = sympy.Symbol('s', rational=True) # An example Transfer function K = (s+3)/(3*s**3 + 2*s**2 + 1*s + 1) ''' Here is where it gets dirty. We convert the expression to a Polynomial object in SymPy and then we extract coefficients, but that list of coefficients we will get, will be full of sympy.core.numbers, so it's going to be numbers but sympy's numbers. That's why then we go and each one of them into a float ''' # We get the numerator and denomiator num_list, den_list = [[float(i) for i in sympy.Poly(i, s).all_coeffs()] for i in K.as_numer_denom()]; # Now num_list and den_list are simple float lists and we put them into # TransferFunction() function from control module trf = co.TransferFunction(num_list, den_list) ``` ```python trf ``` $$\frac{s + 3}{3 s^3 + 2 s^2 + s + 1}$$ ```python import sympy as sp s = sp.Symbol('s', rational=True) eq=1/(2*s**2 + 3*s + 4) top, bot = [[float(i) for i in sp.Poly(i, s).all_coeffs()] for i in eq.as_numer_denom()] import control as co co.TransferFunction(top, bot) ``` $$\frac{1}{2 s^2 + 3 s + 4}$$ ```python ```
Picturesque Children of the Allies ; J. Salmon , 1916
[STATEMENT] lemma "A \<and> B \<longrightarrow> B \<and> A" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A \<and> B \<longrightarrow> B \<and> A [PROOF STEP] proof [PROOF STATE] proof (state) goal (1 subgoal): 1. A \<and> B \<Longrightarrow> B \<and> A [PROOF STEP] assume "A \<and> B" [PROOF STATE] proof (state) this: A \<and> B goal (1 subgoal): 1. A \<and> B \<Longrightarrow> B \<and> A [PROOF STEP] then [PROOF STATE] proof (chain) picking this: A \<and> B [PROOF STEP] show "B \<and> A" [PROOF STATE] proof (prove) using this: A \<and> B goal (1 subgoal): 1. B \<and> A [PROOF STEP] proof [PROOF STATE] proof (state) goal (1 subgoal): 1. \<lbrakk>A; B\<rbrakk> \<Longrightarrow> B \<and> A [PROOF STEP] assume B A [PROOF STATE] proof (state) this: B A goal (1 subgoal): 1. \<lbrakk>A; B\<rbrakk> \<Longrightarrow> B \<and> A [PROOF STEP] then [PROOF STATE] proof (chain) picking this: B A [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: B A goal (1 subgoal): 1. B \<and> A [PROOF STEP] .. [PROOF STATE] proof (state) this: B \<and> A goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: B \<and> A goal: No subgoals! [PROOF STEP] qed
"""input.py Class to read in input files for the muspinsim script """ import re from io import StringIO import numpy as np from collections import namedtuple from muspinsim.input.keyword import ( InputKeywords, MuSpinEvaluateKeyword, MuSpinCouplingKeyword, ) from muspinsim.input.larkeval import LarkExpressionError class MuSpinInputError(Exception): pass MuSpinInputValue = namedtuple("MuSpinInputValue", ["name", "args", "value"]) # Experiment defaults as .in files _exp_defaults = { "alc": """ polarization longitudinal y_axis integral x_axis field """, "zero_field": """ field 0.0 polarization transverse x_axis time y_axis asymmetry """, } class MuSpinInput(object): def __init__(self, fs=None): """Read in an input file Read in an input file from an opened file stream Arguments: fs {TextIOBase} -- I/O stream (should be file, can be StringIO) """ self._keywords = {} self._variables = {} self._fitting_info = {"fit": False, "data": None, "method": None, "rtol": None} if fs is not None: lines = fs.readlines() # Split lines in blocks raw_blocks = {} curr_block = None block_line_nums = {} indre = re.compile("(\\s+)[^\\s]") indent = None for i, l in enumerate(lines): # Remove any comments l = l.split("#", 1)[0] if l.strip() == "": continue # It's a comment m = indre.match(l) if m: if indent is None: indent = m.groups()[0] if m.groups()[0] != indent: raise RuntimeError("Invalid indent in input file") else: try: raw_blocks[curr_block].append(l.strip()) except KeyError: raise RuntimeError("Badly formatted input file") else: curr_block = l.strip() raw_blocks[curr_block] = [] block_line_nums[curr_block] = i + 1 indent = None # Reset for each block # A special case: if there are fitting variables, we need to know # right away self._load_fitting_kw(raw_blocks) # Another special case: if the "experiment" keyword is present, # use it to set some defaults try: block = raw_blocks.pop("experiment") kw = InputKeywords["experiment"](block) exptype = kw.evaluate()[0] if len(exptype) > 1: raise MuSpinInputError( "Can not define more than one experiment type" ) elif len(exptype) == 1: try: mock_i = MuSpinInput(StringIO(_exp_defaults[exptype[0]])) self._keywords.update(mock_i._keywords) except KeyError: raise MuSpinInputError("Invalid experiment type defined") except KeyError: pass # Now parse errors_found = [] for header, block in raw_blocks.items(): try: hsplit = header.split() name = hsplit[0] args = hsplit[1:] try: KWClass = InputKeywords[name] except KeyError: raise MuSpinInputError( "Invalid keyword {0} found in input file".format(name) ) if issubclass(KWClass, MuSpinEvaluateKeyword): kw = KWClass(block, args=args, variables=self._variables) else: kw = KWClass(block, args=args) kwid = kw.id if name != kwid: self._keywords[name] = self._keywords.get(name, {}) self._keywords[name][kwid] = kw else: self._keywords[name] = kw except LarkExpressionError as e: errors_found += [ "Error occurred when parsing keyword {0}" " (block starting at line {1}):\n{2}".format( name, block_line_nums[header], str(e) ) ] if errors_found: raise MuSpinInputError( "Found {0} Errors whilst trying to parse input file: " "\n\n{1}".format(len(errors_found), "\n\n".join(errors_found)) ) @property def variables(self): return {**self._variables} @property def fitting_info(self): return {**self._fitting_info} def evaluate(self, **variables): """Produce a full dictionary with a value for every input keyword, interpreted given the variable values that have been passed.""" result = {"couplings": {}, "fitting_info": self.fitting_info} for name, KWClass in InputKeywords.items(): if issubclass(KWClass, MuSpinCouplingKeyword): if name in self._keywords: for kwid, kw in self._keywords[name].items(): val = MuSpinInputValue( name, kw.arguments, kw.evaluate(**variables) ) result["couplings"][kwid] = val else: if name in self._keywords: kw = self._keywords[name] v = variables if issubclass(KWClass, MuSpinEvaluateKeyword) else {} val = kw.evaluate(**v) result[name] = MuSpinInputValue(name, kw.arguments, val) elif KWClass.default is not None: kw = KWClass() val = np.array(kw.evaluate()) result[name] = MuSpinInputValue(name, kw.arguments, val) return result def _load_fitting_kw(self, raw_blocks): """Special case: handling of all the fitting related keywords and information.""" try: block = raw_blocks.pop("fitting_variables") kw = InputKeywords["fitting_variables"](block) self._variables = {v.name: v for v in kw.evaluate()} except KeyError: pass if len(self._variables) == 0: return self._fitting_info["fit"] = True try: block = raw_blocks.pop("fitting_data") kw = InputKeywords["fitting_data"](block) self._fitting_info["data"] = np.array(kw.evaluate()) except KeyError: raise MuSpinInputError( "Fitting variables defined without defining" " a set of data to fit" ) block = raw_blocks.pop("fitting_tolerance", []) kw = InputKeywords["fitting_tolerance"](block) self._fitting_info["rtol"] = float(kw.evaluate()[0][0]) block = raw_blocks.pop("fitting_method", []) kw = InputKeywords["fitting_method"](block) self._fitting_info["method"] = kw.evaluate()[0][0]
/- Copyright (c) 2017 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl, Yury G. Kudryashov, Scott Morrison -/ import data.finsupp import ring_theory.algebra import linear_algebra.star_algebra import topology.algebra.infinite_sum import data.monoid_algebra /-! # Monoid star algebras This is formally parallel to monoid_star_algebra, but instead of taking finite formal combinations of the generators, we take L^1 combinations. We take the measure to be the sum over all elements. This makes the convolution product definable by restricting the sum ∑_x,y f(x) g(y) to x*y = a. Ideally we would take the definitions from monoid_algebra, interpreted formally, and check the summability conditions. This requires generalizing finsupp to summable or some stronger norm condition (we could also use ℓ_2). NOTE: we are trying to use summable as defined in topology.algebra.infinite_sum but this is NOT the same as ℓ_1 whose definition involves the sum of the norm of the elements. So we would rather use l1_space but using the discreteness to define the convolution. ## Implementation note Unfortunately because additive and multiplicative structures both appear in both cases, it doesn't appear to be possible to make much use of `to_additive`, and we just settle for saying everything twice. Similarly, I attempted to just define `add_monoid_star_algebra k G := monoid_star_algebra k (multiplicative G)`, but the definitional equality `multiplicative G = G` leaks through everywhere, and seems impossible to use. -/ noncomputable theory open_locale classical open finset finsupp universes u₁ u₂ u₃ variables (k : Type u₁) (G : Type u₂) section variables [add_comm_monoid k] [topological_space k] [monoid G] /-- The monoid algebra over a semiring `k` generated by the monoid `G`. It is the type of finite formal `k`-linear combinations of terms of `G`, endowed with the convolution product. -/ -- this is not right but we want a similar class of summable functions. @[derive [inhabited, add_comm_monoid]] def monoid_star_algebra : Type (max u₁ u₂) := G →₀ k end namespace monoid_star_algebra variables {k G} local attribute [reducible] monoid_star_algebra section variables [add_comm_monoid k] [topological_space k] [monoid G] /-- The product of `f g : monoid_star_algebra k G` is the summable function whose value at `a` is the sum of `f x * g y` over all pairs `x, y` such that `x * y = a`. (Think of the group ring of a group.) -/ def mul_def1 (f g : monoid_star_algebra k G) := (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ * a₂) (b₁ * b₂)) #check mul_def1 lemma mul_convergence (f g : monoid_star_algebra k G) (hf : summable f) (hg : summable g) : summable (mul_def1 f g) := begin unfold summable at *, unfold mul_def1, rcases hf, rcases hg, end instance : has_mul (monoid_star_algebra k G) := ⟨λf g, f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ * a₂) (b₁ * b₂)⟩ lemma mul_def {f g : monoid_star_algebra k G} : f * g = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ * a₂) (b₁ * b₂)) := rfl lemma mul_apply (f g : monoid_star_algebra k G) (x : G) : (f * g) x = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, if a₁ * a₂ = x then b₁ * b₂ else 0) := begin rw [mul_def], simp only [finsupp.sum_apply, single_apply], end lemma mul_apply_antidiagonal (f g : monoid_star_algebra k G) (x : G) (s : finset (G × G)) (hs : ∀ {p : G × G}, p ∈ s ↔ p.1 * p.2 = x) : (f * g) x = s.sum (λ p, f p.1 * g p.2) := let F : G × G → k := λ p, if p.1 * p.2 = x then f p.1 * g p.2 else 0 in calc (f * g) x = (f.support.sum $ λ a₁, g.support.sum $ λ a₂, F (a₁, a₂)) : mul_apply f g x ... = (f.support.product g.support).sum F : finset.sum_product.symm ... = ((f.support.product g.support).filter (λ p : G × G, p.1 * p.2 = x)).sum (λ p, f p.1 * g p.2) : (finset.sum_filter _ _).symm ... = (s.filter (λ p : G × G, p.1 ∈ f.support ∧ p.2 ∈ g.support)).sum (λ p, f p.1 * g p.2) : sum_congr (by { ext, simp [hs, and_comm] }) (λ _ _, rfl) ... = s.sum (λ p, f p.1 * g p.2) : sum_subset (filter_subset _) $ λ p hps hp, begin simp only [mem_filter, mem_support_iff, not_and, not_not] at hp ⊢, by_cases h1 : f p.1 = 0, { rw [h1, zero_mul] }, { rw [hp hps h1, mul_zero] } end end section variables [semiring k] [monoid G] lemma support_mul (a b : monoid_star_algebra k G) : (a * b).support ⊆ a.support.bind (λa₁, b.support.bind $ λa₂, {a₁ * a₂}) := subset.trans support_sum $ bind_mono $ assume a₁ _, subset.trans support_sum $ bind_mono $ assume a₂ _, support_single_subset /-- The unit of the multiplication is `single 1 1`, i.e. the function that is `1` at `1` and zero elsewhere. -/ instance : has_one (monoid_star_algebra k G) := ⟨single 1 1⟩ lemma one_def : (1 : monoid_star_algebra k G) = single 1 1 := rfl -- TODO: the simplifier unfolds 0 in the instance proof! protected lemma zero_mul (f : monoid_star_algebra k G) : 0 * f = 0 := by simp only [mul_def, sum_zero_index] protected lemma mul_zero (f : monoid_star_algebra k G) : f * 0 = 0 := by simp only [mul_def, sum_zero_index, sum_zero] private lemma left_distrib (a b c : monoid_star_algebra k G) : a * (b + c) = a * b + a * c := by simp only [mul_def, sum_add_index, mul_add, mul_zero, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_add] private lemma right_distrib (a b c : monoid_star_algebra k G) : (a + b) * c = a * c + b * c := by simp only [mul_def, sum_add_index, add_mul, mul_zero, zero_mul, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_zero, sum_add] instance : semiring (monoid_star_algebra k G) := { one := 1, mul := (*), one_mul := assume f, by simp only [mul_def, one_def, sum_single_index, zero_mul, single_zero, sum_zero, zero_add, one_mul, sum_single], mul_one := assume f, by simp only [mul_def, one_def, sum_single_index, mul_zero, single_zero, sum_zero, add_zero, mul_one, sum_single], zero_mul := monoid_star_algebra.zero_mul, mul_zero := monoid_star_algebra.mul_zero, mul_assoc := assume f g h, by simp only [mul_def, sum_sum_index, sum_zero_index, sum_add_index, sum_single_index, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, add_mul, mul_add, add_assoc, mul_assoc, zero_mul, mul_zero, sum_zero, sum_add], left_distrib := left_distrib, right_distrib := right_distrib, .. finsupp.add_comm_monoid } @[simp] lemma single_mul_single {a₁ a₂ : G} {b₁ b₂ : k} : (single a₁ b₁ : monoid_star_algebra k G) * single a₂ b₂ = single (a₁ * a₂) (b₁ * b₂) := (sum_single_index (by simp only [zero_mul, single_zero, sum_zero])).trans (sum_single_index (by rw [mul_zero, single_zero])) @[simp] lemma single_pow {a : G} {b : k} : ∀ n : ℕ, (single a b : monoid_star_algebra k G)^n = single (a^n) (b ^ n) | 0 := rfl | (n+1) := by simp only [pow_succ, single_pow n, single_mul_single] section variables (k G) /-- Embedding of a monoid into its monoid algebra. -/ def of : G →* monoid_star_algebra k G := { to_fun := λ a, single a 1, map_one' := rfl, map_mul' := λ a b, by rw [single_mul_single, one_mul] } end @[simp] lemma of_apply (a : G) : of k G a = single a 1 := rfl lemma mul_single_apply_aux (f : monoid_star_algebra k G) {r : k} {x y z : G} (H : ∀ a, a * x = z ↔ a = y) : (f * single x r) z = f y * r := have A : ∀ a₁ b₁, (single x r).sum (λ a₂ b₂, ite (a₁ * a₂ = z) (b₁ * b₂) 0) = ite (a₁ * x = z) (b₁ * r) 0, from λ a₁ b₁, sum_single_index $ by simp, calc (f * single x r) z = sum f (λ a b, if (a = y) then (b * r) else 0) : -- different `decidable` instances make it not trivial by { simp only [mul_apply, A, H], congr, funext, split_ifs; refl } ... = if y ∈ f.support then f y * r else 0 : f.support.sum_ite_eq' _ _ ... = f y * r : by split_ifs with h; simp at h; simp [h] lemma mul_single_one_apply (f : monoid_star_algebra k G) (r : k) (x : G) : (f * single 1 r) x = f x * r := f.mul_single_apply_aux $ λ a, by rw [mul_one] lemma single_mul_apply_aux (f : monoid_star_algebra k G) {r : k} {x y z : G} (H : ∀ a, x * a = y ↔ a = z) : (single x r * f) y = r * f z := have f.sum (λ a b, ite (x * a = y) (0 * b) 0) = 0, by simp, calc (single x r * f) y = sum f (λ a b, ite (x * a = y) (r * b) 0) : (mul_apply _ _ _).trans $ sum_single_index this ... = f.sum (λ a b, ite (a = z) (r * b) 0) : by { simp only [H], congr, ext; split_ifs; refl } ... = if z ∈ f.support then (r * f z) else 0 : f.support.sum_ite_eq' _ _ ... = _ : by split_ifs with h; simp at h; simp [h] lemma single_one_mul_apply (f : monoid_star_algebra k G) (r : k) (x : G) : (single 1 r * f) x = r * f x := f.single_mul_apply_aux $ λ a, by rw [one_mul] end instance [comm_semiring k] [comm_monoid G] : comm_semiring (monoid_star_algebra k G) := { mul_comm := assume f g, begin simp only [mul_def, finsupp.sum, mul_comm], rw [finset.sum_comm], simp only [mul_comm] end, .. monoid_star_algebra.semiring } instance [ring k] : has_neg (monoid_star_algebra k G) := by apply_instance instance [ring k] [monoid G] : ring (monoid_star_algebra k G) := { neg := has_neg.neg, add_left_neg := add_left_neg, .. monoid_star_algebra.semiring } instance [comm_ring k] [comm_monoid G] : comm_ring (monoid_star_algebra k G) := { mul_comm := mul_comm, .. monoid_star_algebra.ring} instance [semiring k] : has_scalar k (monoid_star_algebra k G) := finsupp.has_scalar instance [semiring k] : semimodule k (monoid_star_algebra k G) := finsupp.semimodule G k instance [ring k] : module k (monoid_star_algebra k G) := by {delta monoid_star_algebra, apply_instance } lemma single_one_comm [comm_semiring k] [monoid G] (r : k) (f : monoid_star_algebra k G) : single 1 r * f = f * single 1 r := by { ext, rw [single_one_mul_apply, mul_single_one_apply, mul_comm] } instance [comm_semiring k] [monoid G] : algebra k (monoid_star_algebra k G) := { to_fun := single 1, map_one' := rfl, map_mul' := λ x y, by rw [single_mul_single, one_mul], map_zero' := single_zero, map_add' := λ x y, single_add, smul_def' := λ r a, ext (λ _, smul_apply.trans (single_one_mul_apply _ _ _).symm), commutes' := λ r f, ext $ λ _, by rw [single_one_mul_apply, mul_single_one_apply, mul_comm] } @[simp] lemma coe_algebra_map [comm_semiring k] [monoid G] : (algebra_map k (monoid_star_algebra k G) : k → monoid_star_algebra k G) = single 1 := rfl lemma single_eq_algebra_map_mul_of [comm_semiring k] [monoid G] (a : G) (b : k) : single a b = (algebra_map k (monoid_star_algebra k G) : k → monoid_star_algebra k G) b * of k G a := by simp instance [group G] [semiring k] : distrib_mul_action G (monoid_star_algebra k G) := finsupp.comap_distrib_mul_action_self section lift variables (k G) [comm_semiring k] [monoid G] (R : Type u₃) [semiring R] [algebra k R] /-- Any monoid homomorphism `G →* R` can be lifted to an algebra homomorphism `monoid_star_algebra k G →ₐ[k] R`. -/ def lift : (G →* R) ≃ (monoid_star_algebra k G →ₐ[k] R) := { inv_fun := λ f, (f : monoid_star_algebra k G →* R).comp (of k G), to_fun := λ F, { to_fun := λ f, f.sum (λ a b, b • F a), map_one' := by { rw [one_def, sum_single_index, one_smul, F.map_one], apply zero_smul }, map_mul' := begin intros f g, rw [mul_def, finsupp.sum_mul, finsupp.sum_sum_index]; try { intros, simp only [zero_smul, add_smul], done }, refine finset.sum_congr rfl (λ a ha, _), simp only [], rw [finsupp.mul_sum, finsupp.sum_sum_index]; try { intros, simp only [zero_smul, add_smul], done }, refine finset.sum_congr rfl (λ a' ha', _), simp only [], rw [sum_single_index, F.map_mul, algebra.mul_smul_comm, algebra.smul_mul_assoc, smul_smul, mul_comm], apply zero_smul end, map_zero' := sum_zero_index, map_add' := λ f g, by rw [sum_add_index]; intros; simp only [zero_smul, add_smul], commutes' := λ r, by rw [coe_algebra_map, sum_single_index, F.map_one, algebra.smul_def, mul_one]; apply zero_smul }, left_inv := λ f, begin ext x, simp [sum_single_index] end, right_inv := λ F, begin ext f, conv_rhs { rw ← f.sum_single }, simp [← F.map_smul, finsupp.sum, ← F.map_sum] end } variables {k G R} lemma lift_apply (F : G →* R) (f : monoid_star_algebra k G) : lift k G R F f = f.sum (λ a b, b • F a) := rfl @[simp] lemma lift_symm_apply (F : monoid_star_algebra k G →ₐ[k] R) (x : G) : (lift k G R).symm F x = F (single x 1) := rfl lemma lift_of (F : G →* R) (x) : lift k G R F (of k G x) = F x := by rw [of_apply, ← lift_symm_apply, equiv.symm_apply_apply] @[simp] lemma lift_single (F : G →* R) (a b) : lift k G R F (single a b) = b • F a := by rw [single_eq_algebra_map_mul_of, ← algebra.smul_def, alg_hom.map_smul, lift_of] lemma lift_unique' (F : monoid_star_algebra k G →ₐ[k] R) : F = lift k G R ((F : monoid_star_algebra k G →* R).comp (of k G)) := ((lift k G R).apply_symm_apply F).symm /-- Decomposition of a `k`-algebra homomorphism from `monoid_star_algebra k G` by its values on `F (single a 1)`. -/ lemma lift_unique (F : monoid_star_algebra k G →ₐ[k] R) (f : monoid_star_algebra k G) : F f = f.sum (λ a b, b • F (single a 1)) := by conv_lhs { rw lift_unique' F, simp [lift_apply] } /-- A `k`-algebra homomorphism from `monoid_star_algebra k G` is uniquely defined by its values on the functions `single a 1`. -/ lemma alg_hom_ext ⦃φ₁ φ₂ : monoid_star_algebra k G →ₐ[k] R⦄ (h : ∀ x, φ₁ (single x 1) = φ₂ (single x 1)) : φ₁ = φ₂ := (lift k G R).symm.injective $ monoid_hom.ext h end lift section variables (k) /-- When `V` is a `k[G]`-module, multiplication by a group element `g` is a `k`-linear map. -/ def group_smul.linear_map [group G] [comm_ring k] (V : Type u₃) [add_comm_group V] [module (monoid_star_algebra k G) V] (g : G) : (module.restrict_scalars k (monoid_star_algebra k G) V) →ₗ[k] (module.restrict_scalars k (monoid_star_algebra k G) V) := { to_fun := λ v, (single g (1 : k) • v : V), map_add' := λ x y, smul_add (single g (1 : k)) x y, map_smul' := λ c x, by simp only [module.restrict_scalars_smul_def, coe_algebra_map, ←mul_smul, single_one_comm], }. @[simp] lemma group_smul.linear_map_apply [group G] [comm_ring k] (V : Type u₃) [add_comm_group V] [module (monoid_star_algebra k G) V] (g : G) (v : V) : (group_smul.linear_map k V g) v = (single g (1 : k) • v : V) := rfl section variables {k} variables [group G] [comm_ring k] {V : Type u₃} {gV : add_comm_group V} {mV : module (monoid_star_algebra k G) V} {W : Type u₃} {gW : add_comm_group W} {mW : module (monoid_star_algebra k G) W} (f : (module.restrict_scalars k (monoid_star_algebra k G) V) →ₗ[k] (module.restrict_scalars k (monoid_star_algebra k G) W)) (h : ∀ (g : G) (v : V), f (single g (1 : k) • v : V) = (single g (1 : k) • (f v) : W)) include h /-- Build a `k[G]`-linear map from a `k`-linear map and evidence that it is `G`-equivariant. -/ def equivariant_of_linear_of_comm : V →ₗ[monoid_star_algebra k G] W := { to_fun := f, map_add' := λ v v', by simp, map_smul' := λ c v, begin apply finsupp.induction c, { simp, }, { intros g r c' nm nz w, rw [add_smul, linear_map.map_add, w, add_smul, add_left_inj, single_eq_algebra_map_mul_of, ←smul_smul, ←smul_smul], erw [f.map_smul, h g v], refl, } end, } @[simp] lemma equivariant_of_linear_of_comm_apply (v : V) : (equivariant_of_linear_of_comm f h) v = f v := rfl end end universe ui variable {ι : Type ui} lemma prod_single [comm_semiring k] [comm_monoid G] {s : finset ι} {a : ι → G} {b : ι → k} : s.prod (λi, single (a i) (b i)) = single (s.prod a) (s.prod b) := finset.induction_on s rfl $ λ a s has ih, by rw [prod_insert has, ih, single_mul_single, prod_insert has, prod_insert has] section -- We now prove some additional statements that hold for group algebras. variables [semiring k] [group G] @[simp] lemma mul_single_apply (f : monoid_star_algebra k G) (r : k) (x y : G) : (f * single x r) y = f (y * x⁻¹) * r := f.mul_single_apply_aux $ λ a, eq_mul_inv_iff_mul_eq.symm @[simp] lemma single_mul_apply (r : k) (x : G) (f : monoid_star_algebra k G) (y : G) : (single x r * f) y = r * f (x⁻¹ * y) := f.single_mul_apply_aux $ λ z, eq_inv_mul_iff_mul_eq.symm lemma mul_apply_left (f g : monoid_star_algebra k G) (x : G) : (f * g) x = (f.sum $ λ a b, b * (g (a⁻¹ * x))) := calc (f * g) x = sum f (λ a b, (single a (f a) * g) x) : by rw [← finsupp.sum_apply, ← finsupp.sum_mul, f.sum_single] ... = _ : by simp only [single_mul_apply, finsupp.sum] -- If we'd assumed `comm_semiring`, we could deduce this from `mul_apply_left`. lemma mul_apply_right (f g : monoid_star_algebra k G) (x : G) : (f * g) x = (g.sum $ λa b, (f (x * a⁻¹)) * b) := calc (f * g) x = sum g (λ a b, (f * single a (g a)) x) : by rw [← finsupp.sum_apply, ← finsupp.mul_sum, g.sum_single] ... = _ : by simp only [mul_single_apply, finsupp.sum] end end monoid_star_algebra section variables [semiring k] /-- The monoid algebra over a semiring `k` generated by the additive monoid `G`. It is the type of finite formal `k`-linear combinations of terms of `G`, endowed with the convolution product. -/ @[derive [inhabited, add_comm_monoid]] def add_monoid_star_algebra := G →₀ k end namespace add_monoid_star_algebra variables {k G} local attribute [reducible] add_monoid_star_algebra section variables [semiring k] [add_monoid G] /-- The product of `f g : add_monoid_star_algebra k G` is the finitely supported function whose value at `a` is the sum of `f x * g y` over all pairs `x, y` such that `x + y = a`. (Think of the product of multivariate polynomials where `α` is the additive monoid of monomial exponents.) -/ instance : has_mul (add_monoid_star_algebra k G) := ⟨λf g, f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ + a₂) (b₁ * b₂)⟩ lemma mul_def {f g : add_monoid_star_algebra k G} : f * g = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, single (a₁ + a₂) (b₁ * b₂)) := rfl lemma mul_apply (f g : add_monoid_star_algebra k G) (x : G) : (f * g) x = (f.sum $ λa₁ b₁, g.sum $ λa₂ b₂, if a₁ + a₂ = x then b₁ * b₂ else 0) := begin rw [mul_def], simp only [finsupp.sum_apply, single_apply], end lemma support_mul (a b : add_monoid_star_algebra k G) : (a * b).support ⊆ a.support.bind (λa₁, b.support.bind $ λa₂, {a₁ + a₂}) := subset.trans support_sum $ bind_mono $ assume a₁ _, subset.trans support_sum $ bind_mono $ assume a₂ _, support_single_subset /-- The unit of the multiplication is `single 1 1`, i.e. the function that is `1` at `0` and zero elsewhere. -/ instance : has_one (add_monoid_star_algebra k G) := ⟨single 0 1⟩ lemma one_def : (1 : add_monoid_star_algebra k G) = single 0 1 := rfl -- TODO: the simplifier unfolds 0 in the instance proof! protected lemma zero_mul (f : add_monoid_star_algebra k G) : 0 * f = 0 := by simp only [mul_def, sum_zero_index] protected lemma mul_zero (f : add_monoid_star_algebra k G) : f * 0 = 0 := by simp only [mul_def, sum_zero_index, sum_zero] private lemma left_distrib (a b c : add_monoid_star_algebra k G) : a * (b + c) = a * b + a * c := by simp only [mul_def, sum_add_index, mul_add, mul_zero, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_add] private lemma right_distrib (a b c : add_monoid_star_algebra k G) : (a + b) * c = a * c + b * c := by simp only [mul_def, sum_add_index, add_mul, mul_zero, zero_mul, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, sum_zero, sum_add] instance : semiring (add_monoid_star_algebra k G) := { one := 1, mul := (*), one_mul := assume f, by simp only [mul_def, one_def, sum_single_index, zero_mul, single_zero, sum_zero, zero_add, one_mul, sum_single], mul_one := assume f, by simp only [mul_def, one_def, sum_single_index, mul_zero, single_zero, sum_zero, add_zero, mul_one, sum_single], zero_mul := add_monoid_star_algebra.zero_mul, mul_zero := add_monoid_star_algebra.mul_zero, mul_assoc := assume f g h, by simp only [mul_def, sum_sum_index, sum_zero_index, sum_add_index, sum_single_index, single_zero, single_add, eq_self_iff_true, forall_true_iff, forall_3_true_iff, add_mul, mul_add, add_assoc, mul_assoc, zero_mul, mul_zero, sum_zero, sum_add], left_distrib := left_distrib, right_distrib := right_distrib, .. finsupp.add_comm_monoid } lemma single_mul_single {a₁ a₂ : G} {b₁ b₂ : k} : (single a₁ b₁ : add_monoid_star_algebra k G) * single a₂ b₂ = single (a₁ + a₂) (b₁ * b₂) := (sum_single_index (by simp only [zero_mul, single_zero, sum_zero])).trans (sum_single_index (by rw [mul_zero, single_zero])) section variables (k G) /-- Embedding of a monoid into its monoid algebra. -/ def of : multiplicative G →* add_monoid_star_algebra k G := { to_fun := λ a, single a 1, map_one' := rfl, map_mul' := λ a b, by { rw [single_mul_single, one_mul], refl } } end @[simp] lemma of_apply (a : G) : of k G a = single a 1 := rfl lemma mul_single_apply_aux (f : add_monoid_star_algebra k G) (r : k) (x y z : G) (H : ∀ a, a + x = z ↔ a = y) : (f * single x r) z = f y * r := have A : ∀ a₁ b₁, (single x r).sum (λ a₂ b₂, ite (a₁ + a₂ = z) (b₁ * b₂) 0) = ite (a₁ + x = z) (b₁ * r) 0, from λ a₁ b₁, sum_single_index $ by simp, calc (f * single x r) z = sum f (λ a b, if (a = y) then (b * r) else 0) : -- different `decidable` instances make it not trivial by { simp only [mul_apply, A, H], congr, funext, split_ifs; refl } ... = if y ∈ f.support then f y * r else 0 : f.support.sum_ite_eq' _ _ ... = f y * r : by split_ifs with h; simp at h; simp [h] lemma mul_single_zero_apply (f : add_monoid_star_algebra k G) (r : k) (x : G) : (f * single 0 r) x = f x * r := f.mul_single_apply_aux r _ _ _ $ λ a, by rw [add_zero] lemma single_mul_apply_aux (f : add_monoid_star_algebra k G) (r : k) (x y z : G) (H : ∀ a, x + a = y ↔ a = z) : (single x r * f) y = r * f z := have f.sum (λ a b, ite (x + a = y) (0 * b) 0) = 0, by simp, calc (single x r * f) y = sum f (λ a b, ite (x + a = y) (r * b) 0) : (mul_apply _ _ _).trans $ sum_single_index this ... = f.sum (λ a b, ite (a = z) (r * b) 0) : by { simp only [H], congr, ext; split_ifs; refl } ... = if z ∈ f.support then (r * f z) else 0 : f.support.sum_ite_eq' _ _ ... = _ : by split_ifs with h; simp at h; simp [h] lemma single_zero_mul_apply (f : add_monoid_star_algebra k G) (r : k) (x : G) : (single 0 r * f) x = r * f x := f.single_mul_apply_aux r _ _ _ $ λ a, by rw [zero_add] end instance [comm_semiring k] [add_comm_monoid G] : comm_semiring (add_monoid_star_algebra k G) := { mul_comm := assume f g, begin simp only [mul_def, finsupp.sum, mul_comm], rw [finset.sum_comm], simp only [add_comm] end, .. add_monoid_star_algebra.semiring } instance [ring k] : has_neg (add_monoid_star_algebra k G) := by apply_instance instance [ring k] [add_monoid G] : ring (add_monoid_star_algebra k G) := { neg := has_neg.neg, add_left_neg := add_left_neg, .. add_monoid_star_algebra.semiring } instance [comm_ring k] [add_comm_monoid G] : comm_ring (add_monoid_star_algebra k G) := { mul_comm := mul_comm, .. add_monoid_star_algebra.ring} instance [semiring k] : has_scalar k (add_monoid_star_algebra k G) := finsupp.has_scalar instance [semiring k] : semimodule k (add_monoid_star_algebra k G) := finsupp.semimodule G k instance [ring k] : module k (add_monoid_star_algebra k G) := by { delta add_monoid_star_algebra, apply_instance } instance [comm_semiring k] [add_monoid G] : algebra k (add_monoid_star_algebra k G) := { to_fun := single 0, map_one' := rfl, map_mul' := λ x y, by rw [single_mul_single, zero_add], map_zero' := single_zero, map_add' := λ x y, single_add, smul_def' := λ r a, by { ext x, exact smul_apply.trans (single_zero_mul_apply _ _ _).symm }, commutes' := λ r f, show single 0 r * f = f * single 0 r, by ext; rw [single_zero_mul_apply, mul_single_zero_apply, mul_comm] } @[simp] lemma coe_algebra_map [comm_semiring k] [add_monoid G] : (algebra_map k (add_monoid_star_algebra k G) : k → add_monoid_star_algebra k G) = single 0 := rfl /-- Any monoid homomorphism `multiplicative G →* R` can be lifted to an algebra homomorphism `add_monoid_star_algebra k G →ₐ[k] R`. -/ def lift [comm_semiring k] [add_monoid G] {R : Type u₃} [semiring R] [algebra k R] : (multiplicative G →* R) ≃ (add_monoid_star_algebra k G →ₐ[k] R) := { inv_fun := λ f, ((f : add_monoid_star_algebra k G →+* R) : add_monoid_star_algebra k G →* R).comp (of k G), to_fun := λ F, { to_fun := λ f, f.sum (λ a b, b • F a), map_one' := by { rw [one_def, sum_single_index, one_smul], erw [F.map_one], apply zero_smul }, map_mul' := begin intros f g, rw [mul_def, finsupp.sum_mul, finsupp.sum_sum_index]; try { intros, simp only [zero_smul, add_smul], done }, refine finset.sum_congr rfl (λ a ha, _), simp only [], rw [finsupp.mul_sum, finsupp.sum_sum_index]; try { intros, simp only [zero_smul, add_smul], done }, refine finset.sum_congr rfl (λ a' ha', _), simp only [], rw [sum_single_index], erw [F.map_mul], rw [algebra.mul_smul_comm, algebra.smul_mul_assoc, smul_smul, mul_comm], apply zero_smul end, map_zero' := sum_zero_index, map_add' := λ f g, by rw [sum_add_index]; intros; simp only [zero_smul, add_smul], commutes' := λ r, begin rw [coe_algebra_map, sum_single_index], erw [F.map_one], rw [algebra.smul_def, mul_one], apply zero_smul end, }, left_inv := λ f, begin ext x, simp [sum_single_index] end, right_inv := λ F, begin ext f, conv_rhs { rw ← f.sum_single }, simp [← F.map_smul, finsupp.sum, ← F.map_sum] end } -- It is hard to state the equivalent of `distrib_mul_action G (monoid_star_algebra k G)` -- because we've never discussed actions of additive groups. universe ui variable {ι : Type ui} lemma prod_single [comm_semiring k] [add_comm_monoid G] {s : finset ι} {a : ι → G} {b : ι → k} : s.prod (λi, single (a i) (b i)) = single (s.sum a) (s.prod b) := finset.induction_on s rfl $ λ a s has ih, by rw [prod_insert has, ih, single_mul_single, sum_insert has, prod_insert has] end add_monoid_star_algebra
/- Copyright (c) 2018 Patrick Massot. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Patrick Massot, Johannes Hölzl ! This file was ported from Lean 3 source module topology.algebra.group_completion ! leanprover-community/mathlib commit f47581155c818e6361af4e4fda60d27d020c226b ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Topology.Algebra.UniformGroup import Mathbin.Topology.Algebra.UniformMulAction import Mathbin.Topology.UniformSpace.Completion /-! # Completion of topological groups: > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This files endows the completion of a topological abelian group with a group structure. More precisely the instance `uniform_space.completion.add_group` builds an abelian group structure on the completion of an abelian group endowed with a compatible uniform structure. Then the instance `uniform_space.completion.uniform_add_group` proves this group structure is compatible with the completed uniform structure. The compatibility condition is `uniform_add_group`. ## Main declarations: Beyond the instances explained above (that don't have to be explicitly invoked), the main constructions deal with continuous group morphisms. * `add_monoid_hom.extension`: extends a continuous group morphism from `G` to a complete separated group `H` to `completion G`. * `add_monoid_hom.completion`: promotes a continuous group morphism from `G` to `H` into a continuous group morphism from `completion G` to `completion H`. -/ noncomputable section variable {M R α β : Type _} section Group open UniformSpace CauchyFilter Filter Set variable [UniformSpace α] instance [Zero α] : Zero (Completion α) := ⟨(0 : α)⟩ instance [Neg α] : Neg (Completion α) := ⟨Completion.map (fun a => -a : α → α)⟩ instance [Add α] : Add (Completion α) := ⟨Completion.map₂ (· + ·)⟩ instance [Sub α] : Sub (Completion α) := ⟨Completion.map₂ Sub.sub⟩ /- warning: uniform_space.completion.coe_zero -> UniformSpace.Completion.coe_zero is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : Zero.{u1} α], Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) (OfNat.ofNat.{u1} α 0 (OfNat.mk.{u1} α 0 (Zero.zero.{u1} α _inst_2)))) (OfNat.ofNat.{u1} (UniformSpace.Completion.{u1} α _inst_1) 0 (OfNat.mk.{u1} (UniformSpace.Completion.{u1} α _inst_1) 0 (Zero.zero.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasZero.{u1} α _inst_1 _inst_2)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : Zero.{u1} α], Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.coe'.{u1} α _inst_1 (OfNat.ofNat.{u1} α 0 (Zero.toOfNat0.{u1} α _inst_2))) (OfNat.ofNat.{u1} (UniformSpace.Completion.{u1} α _inst_1) 0 (Zero.toOfNat0.{u1} (UniformSpace.Completion.{u1} α _inst_1) (instZeroCompletion.{u1} α _inst_1 _inst_2))) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.coe_zero UniformSpace.Completion.coe_zeroₓ'. -/ @[norm_cast] theorem UniformSpace.Completion.coe_zero [Zero α] : ((0 : α) : Completion α) = 0 := rfl #align uniform_space.completion.coe_zero UniformSpace.Completion.coe_zero end Group namespace UniformSpace.Completion open UniformSpace section Zero instance [UniformSpace α] [MonoidWithZero M] [Zero α] [MulActionWithZero M α] [UniformContinuousConstSMul M α] : MulActionWithZero M (Completion α) := { Completion.mulAction M α with smul := (· • ·) smul_zero := fun r => by rw [← coe_zero, ← coe_smul, MulActionWithZero.smul_zero r] zero_smul := ext' (continuous_const_smul _) continuous_const fun a => by rw [← coe_smul, zero_smul, coe_zero] } end Zero section UniformAddGroup variable [UniformSpace α] [AddGroup α] [UniformAddGroup α] /- warning: uniform_space.completion.coe_neg -> UniformSpace.Completion.coe_neg is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] (a : α), Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) (Neg.neg.{u1} α (SubNegMonoid.toHasNeg.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)) a)) (Neg.neg.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasNeg.{u1} α _inst_1 (SubNegMonoid.toHasNeg.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) a)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] (a : α), Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.coe'.{u1} α _inst_1 (Neg.neg.{u1} α (NegZeroClass.toNeg.{u1} α (SubNegZeroMonoid.toNegZeroClass.{u1} α (SubtractionMonoid.toSubNegZeroMonoid.{u1} α (AddGroup.toSubtractionMonoid.{u1} α _inst_2)))) a)) (Neg.neg.{u1} (UniformSpace.Completion.{u1} α _inst_1) (instNegCompletion.{u1} α _inst_1 (NegZeroClass.toNeg.{u1} α (SubNegZeroMonoid.toNegZeroClass.{u1} α (SubtractionMonoid.toSubNegZeroMonoid.{u1} α (AddGroup.toSubtractionMonoid.{u1} α _inst_2))))) (UniformSpace.Completion.coe'.{u1} α _inst_1 a)) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.coe_neg UniformSpace.Completion.coe_negₓ'. -/ @[norm_cast] theorem coe_neg (a : α) : ((-a : α) : Completion α) = -a := (map_coe uniformContinuous_neg a).symm #align uniform_space.completion.coe_neg UniformSpace.Completion.coe_neg /- warning: uniform_space.completion.coe_sub -> UniformSpace.Completion.coe_sub is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] (a : α) (b : α), Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) (HSub.hSub.{u1, u1, u1} α α α (instHSub.{u1} α (SubNegMonoid.toHasSub.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) a b)) (HSub.hSub.{u1, u1, u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (instHSub.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasSub.{u1} α _inst_1 (SubNegMonoid.toHasSub.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) a) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) b)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] (a : α) (b : α), Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.coe'.{u1} α _inst_1 (HSub.hSub.{u1, u1, u1} α α α (instHSub.{u1} α (SubNegMonoid.toSub.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) a b)) (HSub.hSub.{u1, u1, u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (instHSub.{u1} (UniformSpace.Completion.{u1} α _inst_1) (instSubCompletion.{u1} α _inst_1 (SubNegMonoid.toSub.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (UniformSpace.Completion.coe'.{u1} α _inst_1 a) (UniformSpace.Completion.coe'.{u1} α _inst_1 b)) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.coe_sub UniformSpace.Completion.coe_subₓ'. -/ @[norm_cast] theorem coe_sub (a b : α) : ((a - b : α) : Completion α) = a - b := (map₂_coe_coe a b Sub.sub uniformContinuous_sub).symm #align uniform_space.completion.coe_sub UniformSpace.Completion.coe_sub /- warning: uniform_space.completion.coe_add -> UniformSpace.Completion.coe_add is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] (a : α) (b : α), Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) (HAdd.hAdd.{u1, u1, u1} α α α (instHAdd.{u1} α (AddZeroClass.toHasAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))))) a b)) (HAdd.hAdd.{u1, u1, u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (instHAdd.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasAdd.{u1} α _inst_1 (AddZeroClass.toHasAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) a) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) b)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] (a : α) (b : α), Eq.{succ u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.coe'.{u1} α _inst_1 (HAdd.hAdd.{u1, u1, u1} α α α (instHAdd.{u1} α (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))))) a b)) (HAdd.hAdd.{u1, u1, u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u1} α _inst_1) (instHAdd.{u1} (UniformSpace.Completion.{u1} α _inst_1) (instAddCompletion.{u1} α _inst_1 (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))))) (UniformSpace.Completion.coe'.{u1} α _inst_1 a) (UniformSpace.Completion.coe'.{u1} α _inst_1 b)) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.coe_add UniformSpace.Completion.coe_addₓ'. -/ @[norm_cast] theorem coe_add (a b : α) : ((a + b : α) : Completion α) = a + b := (map₂_coe_coe a b (· + ·) uniformContinuous_add).symm #align uniform_space.completion.coe_add UniformSpace.Completion.coe_add instance : AddMonoid (Completion α) := { Completion.hasZero, Completion.hasAdd with zero_add := fun a => Completion.induction_on a (isClosed_eq (continuous_map₂ continuous_const continuous_id) continuous_id) fun a => show 0 + (a : Completion α) = a by rw_mod_cast [zero_add] add_zero := fun a => Completion.induction_on a (isClosed_eq (continuous_map₂ continuous_id continuous_const) continuous_id) fun a => show (a : Completion α) + 0 = a by rw_mod_cast [add_zero] add_assoc := fun a b c => Completion.induction_on₃ a b c (isClosed_eq (continuous_map₂ (continuous_map₂ continuous_fst (continuous_fst.comp continuous_snd)) (continuous_snd.comp continuous_snd)) (continuous_map₂ continuous_fst (continuous_map₂ (continuous_fst.comp continuous_snd) (continuous_snd.comp continuous_snd)))) fun a b c => show (a : Completion α) + b + c = a + (b + c) by repeat' rw_mod_cast [add_assoc] nsmul := (· • ·) nsmul_zero := fun a => Completion.induction_on a (isClosed_eq continuous_map continuous_const) fun a => by rw [← coe_smul, ← coe_zero, zero_smul] nsmul_succ := fun n a => Completion.induction_on a (isClosed_eq continuous_map <| continuous_map₂ continuous_id continuous_map) fun a => by rw_mod_cast [succ_nsmul] } instance : SubNegMonoid (Completion α) := { Completion.addMonoid, Completion.hasNeg, Completion.hasSub with sub_eq_add_neg := fun a b => Completion.induction_on₂ a b (isClosed_eq (continuous_map₂ continuous_fst continuous_snd) (continuous_map₂ continuous_fst (Completion.continuous_map.comp continuous_snd))) fun a b => by exact_mod_cast congr_arg coe (sub_eq_add_neg a b) zsmul := (· • ·) zsmul_zero' := fun a => Completion.induction_on a (isClosed_eq continuous_map continuous_const) fun a => by rw_mod_cast [zero_smul] rfl zsmul_succ' := fun n a => Completion.induction_on a (isClosed_eq continuous_map <| continuous_map₂ continuous_id continuous_map) fun a => by rw_mod_cast [show Int.ofNat n.succ • a = a + Int.ofNat n • a from SubNegMonoid.zsmul_succ' n a] zsmul_neg' := fun n a => Completion.induction_on a (isClosed_eq continuous_map <| Completion.continuous_map.comp continuous_map) fun a => by rw [← coe_smul, ← coe_smul, ← coe_neg, show -[n+1] • a = -((n.succ : ℤ) • a) from SubNegMonoid.zsmul_neg' n a] } instance : AddGroup (Completion α) := { Completion.subNegMonoid with add_left_neg := fun a => Completion.induction_on a (isClosed_eq (continuous_map₂ Completion.continuous_map continuous_id) continuous_const) fun a => show -(a : Completion α) + a = 0 by rw_mod_cast [add_left_neg] rfl } instance : UniformAddGroup (Completion α) := ⟨uniformContinuous_map₂ Sub.sub⟩ instance {M} [Monoid M] [DistribMulAction M α] [UniformContinuousConstSMul M α] : DistribMulAction M (Completion α) := { Completion.mulAction M α with smul := (· • ·) smul_add := fun r x y => induction_on₂ x y (isClosed_eq ((continuous_fst.add continuous_snd).const_smul _) ((continuous_fst.const_smul _).add (continuous_snd.const_smul _))) fun a b => by simp only [← coe_add, ← coe_smul, smul_add] smul_zero := fun r => by rw [← coe_zero, ← coe_smul, smul_zero r] } /- warning: uniform_space.completion.to_compl -> UniformSpace.Completion.toCompl is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2], AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2], AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.to_compl UniformSpace.Completion.toComplₓ'. -/ /-- The map from a group to its completion as a group hom. -/ @[simps] def toCompl : α →+ Completion α where toFun := coe map_add' := coe_add map_zero' := coe_zero #align uniform_space.completion.to_compl UniformSpace.Completion.toCompl /- warning: uniform_space.completion.continuous_to_compl -> UniformSpace.Completion.continuous_toCompl is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2], Continuous.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3))) (fun (_x : AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3))) => α -> (UniformSpace.Completion.{u1} α _inst_1)) (AddMonoidHom.hasCoeToFun.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3))) (UniformSpace.Completion.toCompl.{u1} α _inst_1 _inst_2 _inst_3)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2], Continuous.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => UniformSpace.Completion.{u1} α _inst_1) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) α (UniformSpace.Completion.{u1} α _inst_1) (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u1} (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoidHom.addMonoidHomClass.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))))) (UniformSpace.Completion.toCompl.{u1} α _inst_1 _inst_2 _inst_3)) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.continuous_to_compl UniformSpace.Completion.continuous_toComplₓ'. -/ theorem continuous_toCompl : Continuous (toCompl : α → Completion α) := continuous_coe α #align uniform_space.completion.continuous_to_compl UniformSpace.Completion.continuous_toCompl variable (α) /- warning: uniform_space.completion.dense_inducing_to_compl -> UniformSpace.Completion.denseInducing_toCompl is a dubious translation: lean 3 declaration is forall (α : Type.{u1}) [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2], DenseInducing.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3))) (fun (_x : AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3))) => α -> (UniformSpace.Completion.{u1} α _inst_1)) (AddMonoidHom.hasCoeToFun.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3))) (UniformSpace.Completion.toCompl.{u1} α _inst_1 _inst_2 _inst_3)) but is expected to have type forall (α : Type.{u1}) [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2], DenseInducing.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => UniformSpace.Completion.{u1} α _inst_1) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) α (UniformSpace.Completion.{u1} α _inst_1) (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u1} (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddMonoidHom.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoidHom.addMonoidHomClass.{u1, u1} α (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))))) (UniformSpace.Completion.toCompl.{u1} α _inst_1 _inst_2 _inst_3)) Case conversion may be inaccurate. Consider using '#align uniform_space.completion.dense_inducing_to_compl UniformSpace.Completion.denseInducing_toComplₓ'. -/ theorem denseInducing_toCompl : DenseInducing (toCompl : α → Completion α) := denseInducing_coe #align uniform_space.completion.dense_inducing_to_compl UniformSpace.Completion.denseInducing_toCompl variable {α} end UniformAddGroup section UniformAddCommGroup variable [UniformSpace α] [AddCommGroup α] [UniformAddGroup α] instance : AddCommGroup (Completion α) := { Completion.addGroup with add_comm := fun a b => Completion.induction_on₂ a b (isClosed_eq (continuous_map₂ continuous_fst continuous_snd) (continuous_map₂ continuous_snd continuous_fst)) fun x y => by change ↑x + ↑y = ↑y + ↑x rw [← coe_add, ← coe_add, add_comm] } instance [Semiring R] [Module R α] [UniformContinuousConstSMul R α] : Module R (Completion α) := { Completion.distribMulAction, Completion.mulActionWithZero with smul := (· • ·) add_smul := fun a b => ext' (continuous_const_smul _) ((continuous_const_smul _).add (continuous_const_smul _)) fun x => by norm_cast rw [add_smul] } end UniformAddCommGroup end UniformSpace.Completion section AddMonoidHom variable [UniformSpace α] [AddGroup α] [UniformAddGroup α] [UniformSpace β] [AddGroup β] [UniformAddGroup β] open UniformSpace UniformSpace.Completion /- warning: add_monoid_hom.extension -> AddMonoidHom.extension is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] [_inst_7 : CompleteSpace.{u2} β _inst_4] [_inst_8 : SeparatedSpace.{u2} β _inst_4] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))), (Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f)) -> (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] [_inst_7 : CompleteSpace.{u2} β _inst_4] [_inst_8 : SeparatedSpace.{u2} β _inst_4] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))), (Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) f)) -> (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.extension AddMonoidHom.extensionₓ'. -/ /-- Extension to the completion of a continuous group hom. -/ def AddMonoidHom.extension [CompleteSpace β] [SeparatedSpace β] (f : α →+ β) (hf : Continuous f) : Completion α →+ β := have hf : UniformContinuous f := uniformContinuous_addMonoidHom_of_continuous hf { toFun := Completion.extension f map_zero' := by rw [← coe_zero, extension_coe hf, f.map_zero] map_add' := fun a b => Completion.induction_on₂ a b (isClosed_eq (continuous_extension.comp continuous_add) ((continuous_extension.comp continuous_fst).add (continuous_extension.comp continuous_snd))) fun a b => by rw_mod_cast [extension_coe hf, extension_coe hf, extension_coe hf, f.map_add] } #align add_monoid_hom.extension AddMonoidHom.extension /- warning: add_monoid_hom.extension_coe -> AddMonoidHom.extension_coe is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] [_inst_7 : CompleteSpace.{u2} β _inst_4] [_inst_8 : SeparatedSpace.{u2} β _inst_4] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (hf : Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f)) (a : α), Eq.{succ u2} β (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => (UniformSpace.Completion.{u1} α _inst_1) -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHom.extension.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 _inst_7 _inst_8 f hf) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) a)) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f a) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] [_inst_7 : CompleteSpace.{u2} β _inst_4] [_inst_8 : SeparatedSpace.{u2} β _inst_4] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (hf : Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) f)) (a : α), Eq.{succ u2} ((fun ([email protected]._hyg.403 : UniformSpace.Completion.{u1} α _inst_1) => β) (UniformSpace.Completion.coe'.{u1} α _inst_1 a)) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (UniformSpace.Completion.{u1} α _inst_1) (fun (_x : UniformSpace.Completion.{u1} α _inst_1) => (fun ([email protected]._hyg.403 : UniformSpace.Completion.{u1} α _inst_1) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (UniformSpace.Completion.{u1} α _inst_1) β (AddZeroClass.toAdd.{u1} (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) (AddMonoidHom.extension.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 _inst_7 _inst_8 f hf) (UniformSpace.Completion.coe'.{u1} α _inst_1 a)) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) f a) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.extension_coe AddMonoidHom.extension_coeₓ'. -/ theorem AddMonoidHom.extension_coe [CompleteSpace β] [SeparatedSpace β] (f : α →+ β) (hf : Continuous f) (a : α) : f.extension hf a = f a := extension_coe (uniformContinuous_addMonoidHom_of_continuous hf) a #align add_monoid_hom.extension_coe AddMonoidHom.extension_coe /- warning: add_monoid_hom.continuous_extension -> AddMonoidHom.continuous_extension is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] [_inst_7 : CompleteSpace.{u2} β _inst_4] [_inst_8 : SeparatedSpace.{u2} β _inst_4] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (hf : Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f)), Continuous.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => (UniformSpace.Completion.{u1} α _inst_1) -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHom.extension.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 _inst_7 _inst_8 f hf)) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] [_inst_7 : CompleteSpace.{u2} β _inst_4] [_inst_8 : SeparatedSpace.{u2} β _inst_4] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (hf : Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) f)), Continuous.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (UniformSpace.Completion.{u1} α _inst_1) (fun (_x : UniformSpace.Completion.{u1} α _inst_1) => (fun ([email protected]._hyg.403 : UniformSpace.Completion.{u1} α _inst_1) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (UniformSpace.Completion.{u1} α _inst_1) β (AddZeroClass.toAdd.{u1} (UniformSpace.Completion.{u1} α _inst_1) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) β (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) (AddMonoidHom.extension.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 _inst_7 _inst_8 f hf)) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.continuous_extension AddMonoidHom.continuous_extensionₓ'. -/ @[continuity] theorem AddMonoidHom.continuous_extension [CompleteSpace β] [SeparatedSpace β] (f : α →+ β) (hf : Continuous f) : Continuous (f.extension hf) := continuous_extension #align add_monoid_hom.continuous_extension AddMonoidHom.continuous_extension /- warning: add_monoid_hom.completion -> AddMonoidHom.completion is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))), (Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f)) -> (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))), (Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))))) f)) -> (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u2} β _inst_4 _inst_5 _inst_6))) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.completion AddMonoidHom.completionₓ'. -/ /-- Completion of a continuous group hom, as a group hom. -/ def AddMonoidHom.completion (f : α →+ β) (hf : Continuous f) : Completion α →+ Completion β := (toCompl.comp f).extension (continuous_toCompl.comp hf) #align add_monoid_hom.completion AddMonoidHom.completion /- warning: add_monoid_hom.continuous_completion -> AddMonoidHom.continuous_completion is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (hf : Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f)), Continuous.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.uniformSpace.{u1} α _inst_1)) (UniformSpace.toTopologicalSpace.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.uniformSpace.{u2} β _inst_4)) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) (fun (_x : AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) => (UniformSpace.Completion.{u1} α _inst_1) -> (UniformSpace.Completion.{u2} β _inst_4)) (AddMonoidHom.hasCoeToFun.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) (AddMonoidHom.completion.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 f hf)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : UniformSpace.{u2} α] [_inst_2 : AddGroup.{u2} α] [_inst_3 : UniformAddGroup.{u2} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u1} β] [_inst_5 : AddGroup.{u1} β] [_inst_6 : UniformAddGroup.{u1} β _inst_4 _inst_5] (f : AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) (hf : Continuous.{u2, u1} α β (UniformSpace.toTopologicalSpace.{u2} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} β _inst_4) (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α β (AddZeroClass.toAdd.{u2} α (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2)))) (AddZeroClass.toAdd.{u1} β (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))))) f)), Continuous.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.toTopologicalSpace.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.uniformSpace.{u2} α _inst_1)) (UniformSpace.toTopologicalSpace.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.uniformSpace.{u1} β _inst_4)) (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (UniformSpace.Completion.{u2} α _inst_1) (fun (_x : UniformSpace.Completion.{u2} α _inst_1) => (fun ([email protected]._hyg.403 : UniformSpace.Completion.{u2} α _inst_1) => UniformSpace.Completion.{u1} β _inst_4) _x) (AddHomClass.toFunLike.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddZeroClass.toAdd.{u2} (UniformSpace.Completion.{u2} α _inst_1) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3))) (AddZeroClass.toAdd.{u1} (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (AddMonoidHomClass.toAddHomClass.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6)) (AddMonoidHom.addMonoidHomClass.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))))) (AddMonoidHom.completion.{u2, u1} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 f hf)) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.continuous_completion AddMonoidHom.continuous_completionₓ'. -/ @[continuity] theorem AddMonoidHom.continuous_completion (f : α →+ β) (hf : Continuous f) : Continuous (f.Completion hf : Completion α → Completion β) := ContinuousMap #align add_monoid_hom.continuous_completion AddMonoidHom.continuous_completion /- warning: add_monoid_hom.completion_coe -> AddMonoidHom.completion_coe is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5] (f : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (hf : Continuous.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f)) (a : α), Eq.{succ u2} (UniformSpace.Completion.{u2} β _inst_4) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) (fun (_x : AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) => (UniformSpace.Completion.{u1} α _inst_1) -> (UniformSpace.Completion.{u2} β _inst_4)) (AddMonoidHom.hasCoeToFun.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) (AddMonoidHom.completion.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 f hf) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (UniformSpace.Completion.{u1} α _inst_1) (HasLiftT.mk.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (CoeTCₓ.coe.{succ u1, succ u1} α (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.hasCoeT.{u1} α _inst_1))) a)) ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) β (UniformSpace.Completion.{u2} β _inst_4) (HasLiftT.mk.{succ u2, succ u2} β (UniformSpace.Completion.{u2} β _inst_4) (CoeTCₓ.coe.{succ u2, succ u2} β (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.hasCoeT.{u2} β _inst_4))) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (fun (_x : AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) => α -> β) (AddMonoidHom.hasCoeToFun.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) f a)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : UniformSpace.{u2} α] [_inst_2 : AddGroup.{u2} α] [_inst_3 : UniformAddGroup.{u2} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u1} β] [_inst_5 : AddGroup.{u1} β] [_inst_6 : UniformAddGroup.{u1} β _inst_4 _inst_5] (f : AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) (hf : Continuous.{u2, u1} α β (UniformSpace.toTopologicalSpace.{u2} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} β _inst_4) (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α β (AddZeroClass.toAdd.{u2} α (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2)))) (AddZeroClass.toAdd.{u1} β (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))))) f)) (a : α), Eq.{succ u1} ((fun ([email protected]._hyg.403 : UniformSpace.Completion.{u2} α _inst_1) => UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.coe'.{u2} α _inst_1 a)) (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (UniformSpace.Completion.{u2} α _inst_1) (fun (_x : UniformSpace.Completion.{u2} α _inst_1) => (fun ([email protected]._hyg.403 : UniformSpace.Completion.{u2} α _inst_1) => UniformSpace.Completion.{u1} β _inst_4) _x) (AddHomClass.toFunLike.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddZeroClass.toAdd.{u2} (UniformSpace.Completion.{u2} α _inst_1) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3))) (AddZeroClass.toAdd.{u1} (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (AddMonoidHomClass.toAddHomClass.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6)) (AddMonoidHom.addMonoidHomClass.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))))) (AddMonoidHom.completion.{u2, u1} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 f hf) (UniformSpace.Completion.coe'.{u2} α _inst_1 a)) (UniformSpace.Completion.coe'.{u1} ((fun ([email protected]._hyg.403 : α) => β) a) _inst_4 (FunLike.coe.{max (succ u2) (succ u1), succ u2, succ u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => β) _x) (AddHomClass.toFunLike.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α β (AddZeroClass.toAdd.{u2} α (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2)))) (AddZeroClass.toAdd.{u1} β (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) (AddMonoidHomClass.toAddHomClass.{max u2 u1, u2, u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5))) (AddMonoidHom.addMonoidHomClass.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))))) f a)) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.completion_coe AddMonoidHom.completion_coeₓ'. -/ theorem AddMonoidHom.completion_coe (f : α →+ β) (hf : Continuous f) (a : α) : f.Completion hf a = f a := map_coe (uniformContinuous_addMonoidHom_of_continuous hf) a #align add_monoid_hom.completion_coe AddMonoidHom.completion_coe /- warning: add_monoid_hom.completion_zero -> AddMonoidHom.completion_zero is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u2} β] [_inst_5 : AddGroup.{u2} β] [_inst_6 : UniformAddGroup.{u2} β _inst_4 _inst_5], Eq.{max (succ u2) (succ u1)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) (AddMonoidHom.completion.{u1, u2} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 (OfNat.ofNat.{max u2 u1} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) 0 (OfNat.mk.{max u2 u1} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) 0 (Zero.zero.{max u2 u1} (AddMonoidHom.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5)))) (AddMonoidHom.hasZero.{u1, u2} α β (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))))))) (continuous_const.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_4) (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (SubNegMonoid.toAddMonoid.{u2} β (AddGroup.toSubNegMonoid.{u2} β _inst_5))))))))) (OfNat.ofNat.{max u2 u1} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) 0 (OfNat.mk.{max u2 u1} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) 0 (Zero.zero.{max u2 u1} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6))) (AddMonoidHom.hasZero.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} β _inst_4) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} β _inst_4) (UniformSpace.Completion.addMonoid.{u2} β _inst_4 _inst_5 _inst_6)))))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : UniformSpace.{u2} α] [_inst_2 : AddGroup.{u2} α] [_inst_3 : UniformAddGroup.{u2} α _inst_1 _inst_2] [_inst_4 : UniformSpace.{u1} β] [_inst_5 : AddGroup.{u1} β] [_inst_6 : UniformAddGroup.{u1} β _inst_4 _inst_5], Eq.{max (succ u2) (succ u1)} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (AddMonoidHom.completion.{u2, u1} α β _inst_1 _inst_2 _inst_3 _inst_4 _inst_5 _inst_6 (OfNat.ofNat.{max u2 u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) 0 (Zero.toOfNat0.{max u2 u1} (AddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))) (instZeroAddMonoidHom.{u2, u1} α β (AddMonoid.toAddZeroClass.{u2} α (SubNegMonoid.toAddMonoid.{u2} α (AddGroup.toSubNegMonoid.{u2} α _inst_2))) (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))))) (continuous_const.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u2} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} β _inst_4) ((AddZeroClass.toZero.{u1} β (AddMonoid.toAddZeroClass.{u1} β (SubNegMonoid.toAddMonoid.{u1} β (AddGroup.toSubNegMonoid.{u1} β _inst_5)))).0))) (OfNat.ofNat.{max u2 u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) 0 (Zero.toOfNat0.{max u2 u1} (AddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))) (instZeroAddMonoidHom.{u2, u1} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.{u1} β _inst_4) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u2} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} β _inst_4) (UniformSpace.Completion.instAddMonoidCompletion.{u1} β _inst_4 _inst_5 _inst_6))))) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.completion_zero AddMonoidHom.completion_zeroₓ'. -/ theorem AddMonoidHom.completion_zero : (0 : α →+ β).Completion continuous_const = 0 := by ext x apply completion.induction_on x · apply isClosed_eq ((0 : α →+ β).continuous_completion continuous_const) simp [continuous_const] · intro a simp [(0 : α →+ β).completion_coe continuous_const, coe_zero] #align add_monoid_hom.completion_zero AddMonoidHom.completion_zero /- warning: add_monoid_hom.completion_add -> AddMonoidHom.completion_add is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] {γ : Type.{u2}} [_inst_7 : AddCommGroup.{u2} γ] [_inst_8 : UniformSpace.{u2} γ] [_inst_9 : UniformAddGroup.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7)] (f : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (g : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (hf : Continuous.{u1, u2} α γ (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (fun (_x : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) => α -> γ) (AddMonoidHom.hasCoeToFun.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) f)) (hg : Continuous.{u1, u2} α γ (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (fun (_x : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) => α -> γ) (AddMonoidHom.hasCoeToFun.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) g)), Eq.{max (succ u2) (succ u1)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.addMonoid.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.completion.{u1, u2} α γ _inst_1 _inst_2 _inst_3 _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9 (HAdd.hAdd.{max u2 u1, max u2 u1, max u2 u1} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (instHAdd.{max u2 u1} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHom.hasAdd.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7))) f g) (Continuous.add.{u1, u2} α γ (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (AddZeroClass.toHasAdd.{u2} γ (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (TopologicalAddGroup.to_continuousAdd.{u2} γ (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (AddCommGroup.toAddGroup.{u2} γ _inst_7) (UniformAddGroup.to_topologicalAddGroup.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9)) (coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (fun (_x : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) => α -> γ) (AddMonoidHom.hasCoeToFun.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) f) (fun (x : α) => coeFn.{max (succ u2) (succ u1), max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) (fun (_x : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) => α -> γ) (AddMonoidHom.hasCoeToFun.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) g x) hf hg)) (HAdd.hAdd.{max u2 u1, max u2 u1, max u2 u1} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.addMonoid.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.addMonoid.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.addMonoid.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (instHAdd.{max u2 u1} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.addMonoid.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.hasAdd.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.addMonoid.{u1} α _inst_1 _inst_2 _inst_3)) (AddCommGroup.toAddCommMonoid.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.addCommGroup.{u2} γ _inst_8 _inst_7 _inst_9)))) (AddMonoidHom.completion.{u1, u2} α γ _inst_1 _inst_2 _inst_3 _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9 f hf) (AddMonoidHom.completion.{u1, u2} α γ _inst_1 _inst_2 _inst_3 _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9 g hg)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : AddGroup.{u1} α] [_inst_3 : UniformAddGroup.{u1} α _inst_1 _inst_2] {γ : Type.{u2}} [_inst_7 : AddCommGroup.{u2} γ] [_inst_8 : UniformSpace.{u2} γ] [_inst_9 : UniformAddGroup.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7)] (f : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (g : AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (hf : Continuous.{u1, u2} α γ (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => γ) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α γ (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} γ (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7)))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))))) f)) (hg : Continuous.{u1, u2} α γ (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => γ) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α γ (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} γ (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7)))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))))) g)), Eq.{max (succ u1) (succ u2)} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.instAddMonoidCompletion.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.completion.{u1, u2} α γ _inst_1 _inst_2 _inst_3 _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9 (HAdd.hAdd.{max u1 u2, max u1 u2, max u1 u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (instHAdd.{max u1 u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHom.add.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7))) f g) (Continuous.add.{u2, u1} α γ (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (AddZeroClass.toAdd.{u2} γ (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (TopologicalAddGroup.toContinuousAdd.{u2} γ (UniformSpace.toTopologicalSpace.{u2} γ _inst_8) (AddCommGroup.toAddGroup.{u2} γ _inst_7) (UniformAddGroup.to_topologicalAddGroup.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9)) (FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α (fun (_x : α) => (fun ([email protected]._hyg.403 : α) => γ) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α γ (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} γ (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))) α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7)))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (SubNegMonoid.toAddMonoid.{u2} γ (AddGroup.toSubNegMonoid.{u2} γ (AddCommGroup.toAddGroup.{u2} γ _inst_7))))))) f) (fun (x : α) => FunLike.coe.{max (succ u1) (succ u2), succ u1, succ u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) α (fun (_x : α) => (fun ([email protected]._hyg.2391 : α) => γ) _x) (AddHomClass.toFunLike.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) α γ (AddZeroClass.toAdd.{u1} α (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2)))) (AddZeroClass.toAdd.{u2} γ (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) (AddMonoidHomClass.toAddHomClass.{max u1 u2, u1, u2} (AddMonoidHom.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))) α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7))) (AddMonoidHom.addMonoidHomClass.{u1, u2} α γ (AddMonoid.toAddZeroClass.{u1} α (SubNegMonoid.toAddMonoid.{u1} α (AddGroup.toSubNegMonoid.{u1} α _inst_2))) (AddMonoid.toAddZeroClass.{u2} γ (AddCommMonoid.toAddMonoid.{u2} γ (AddCommGroup.toAddCommMonoid.{u2} γ _inst_7)))))) g x) hf hg)) (HAdd.hAdd.{max u1 u2, max u1 u2, max u1 u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.instAddMonoidCompletion.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.instAddMonoidCompletion.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.instAddMonoidCompletion.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (instHAdd.{max u1 u2} (AddMonoidHom.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddMonoid.toAddZeroClass.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.instAddMonoidCompletion.{u2} γ _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9))) (AddMonoidHom.add.{u1, u2} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.{u2} γ _inst_8) (AddMonoid.toAddZeroClass.{u1} (UniformSpace.Completion.{u1} α _inst_1) (UniformSpace.Completion.instAddMonoidCompletion.{u1} α _inst_1 _inst_2 _inst_3)) (AddCommGroup.toAddCommMonoid.{u2} (UniformSpace.Completion.{u2} γ _inst_8) (UniformSpace.Completion.instAddCommGroupCompletion.{u2} γ _inst_8 _inst_7 _inst_9)))) (AddMonoidHom.completion.{u1, u2} α γ _inst_1 _inst_2 _inst_3 _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9 f hf) (AddMonoidHom.completion.{u1, u2} α γ _inst_1 _inst_2 _inst_3 _inst_8 (AddCommGroup.toAddGroup.{u2} γ _inst_7) _inst_9 g hg)) Case conversion may be inaccurate. Consider using '#align add_monoid_hom.completion_add AddMonoidHom.completion_addₓ'. -/ theorem AddMonoidHom.completion_add {γ : Type _} [AddCommGroup γ] [UniformSpace γ] [UniformAddGroup γ] (f g : α →+ γ) (hf : Continuous f) (hg : Continuous g) : (f + g).Completion (hf.add hg) = f.Completion hf + g.Completion hg := by have hfg := hf.add hg ext x apply completion.induction_on x · exact isClosed_eq ((f + g).continuous_completion hfg) ((f.continuous_completion hf).add (g.continuous_completion hg)) · intro a simp [(f + g).completion_coe hfg, coe_add, f.completion_coe hf, g.completion_coe hg] #align add_monoid_hom.completion_add AddMonoidHom.completion_add end AddMonoidHom
[STATEMENT] lemma sttp_of_comb_nil [simp]: "sttp_of (comb_nil sttp) = sttp" [PROOF STATE] proof (prove) goal (1 subgoal): 1. snd (comb_nil sttp) = sttp [PROOF STEP] by (simp add: comb_nil_def)
Private valet companies take up public parking spaces for profit. They cause traffic to back up on West Davis at Bishop during peak times. And they perpetuate the perception that valet is the only option for parking in Bishop Arts. Those are the complaints of some Oak Cliff neighbors who want City Council to review the practices of valet companies in Bishop Arts. Amy Cowan of Go Oak Cliff, who is a co-owner of Oddfellows, began circulating a survey/petition last week asking neighbors their level of support for the current parking situation. I like the valet the way it is now. I want all valet removed. Valet is fine, but needs to be relocated immediately. Valet should only be allowed to operate in private lots. City Councilman Scott Griggs has said business and property owners are coming up with a plan to move the valet stand from the corner of Bishop at Seventh, where it blocks public parking spaces and unnecessarily holds up traffic. The stand would move to Bishop Avenue between Seventh and Eighth. In that case, at least some of the on-street parking on Bishop wouldn’t be available for self-park while the valet companies operate. Like so many, I enjoy Bishop Arts. I was pulling into a spot on Seventh which was the first spot away from Bishop that didn’t have a cone blocking it. A valet attendant bolted over stating it was for valet only and wanted me to move or leave my keys with him- I refused him and stated that it is a public/city-owned space. He backed away but obviously irritated. It’s bad enough that in so many places (Mia’s on Lemmon) that you have to surrender your keys in a private lot, but when it’s a public space, it’s even more egregious.
[STATEMENT] lemma p_eq2Q[rule_format]: "normalizeQ x \<noteq> [] \<longrightarrow> C (list2FWpolicy (normalizeQ x)) = C x \<longrightarrow> list2policy (map C (rev (normalizeQ x))) = C x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. normalizeQ x \<noteq> [] \<longrightarrow> C (list2FWpolicy (normalizeQ x)) = C x \<longrightarrow> list2policy (map C (rev (normalizeQ x))) = C x [PROOF STEP] by (simp add: p_eq)
Reassignment of a struct's members with different input
theory hoare_logic_test imports "~~/src/HOL/Hoare/Separation" begin lemma "VARS x {P} SKIP {P}" apply vcg done lemma "VARS x {x=5} x:=7 {x=5}" apply vcg apply auto sorry lemma "VARS x {P} SKIP;SKIP {P}" apply vcg done lemma "VARS x {P} IF R THEN SKIP ELSE SKIP FI {Q}" apply vcg sorry lemma "VARS x {P} WHILE S INV{I} DO SKIP OD {Q}" apply vcg sorry end
theory Check imports Submission begin theorem big_ls: "(c,s) \<Rightarrow> t \<Longrightarrow> \<exists>sts. ls c s sts t" by (rule Submission.big_ls) theorem ls_big: "ls c s ss t \<Longrightarrow> (c,s) \<Rightarrow> t" by (rule Submission.ls_big) lemma ls_step: "\<lbrakk>ls c s ss t; c \<noteq> SKIP\<rbrakk> \<Longrightarrow> (case ss of [] \<Rightarrow> (c,s) \<rightarrow> (SKIP,t) | (x#_) \<Rightarrow> \<exists>c'. (c,s) \<rightarrow> (c',x))" by (rule Submission.ls_step) lemma ls_ls: "\<lbrakk>ls c s\<^sub>1 (s\<^sub>2#ss) s\<^sub>3; (c,s\<^sub>1) \<rightarrow> (c',s\<^sub>2)\<rbrakk> \<Longrightarrow> ls c' s\<^sub>2 ss s\<^sub>3" by (rule Submission.ls_ls) theorem ls_steps: "ls c s\<^sub>1 (ss\<^sub>1@[s\<^sub>2]@ss\<^sub>2) t \<Longrightarrow> \<exists>c'. (c,s\<^sub>1) \<rightarrow>* (c',s\<^sub>2)" by (rule Submission.ls_steps) end
/- Copyright (c) 2015 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura, Jeremy Avigad, Mario Carneiro -/ import data.list.sort import data.nat.gcd import data.nat.sqrt import tactic.norm_num import tactic.wlog /-! # Prime numbers This file deals with prime numbers: natural numbers `p ≥ 2` whose only divisors are `p` and `1`. ## Important declarations All the following declarations exist in the namespace `nat`. - `prime`: the predicate that expresses that a natural number `p` is prime - `primes`: the subtype of natural numbers that are prime - `min_fac n`: the minimal prime factor of a natural number `n ≠ 1` - `exists_infinite_primes`: Euclid's theorem that there exist infinitely many prime numbers - `factors n`: the prime factorization of `n` - `factors_unique`: uniqueness of the prime factorisation -/ open bool subtype open_locale nat namespace nat /-- `prime p` means that `p` is a prime number, that is, a natural number at least 2 whose only divisors are `p` and `1`. -/ @[pp_nodot] def prime (p : ℕ) := 2 ≤ p ∧ ∀ m ∣ p, m = 1 ∨ m = p theorem prime.two_le {p : ℕ} : prime p → 2 ≤ p := and.left theorem prime.one_lt {p : ℕ} : prime p → 1 < p := prime.two_le instance prime.one_lt' (p : ℕ) [hp : _root_.fact p.prime] : _root_.fact (1 < p) := ⟨hp.1.one_lt⟩ lemma prime.ne_one {p : ℕ} (hp : p.prime) : p ≠ 1 := ne.symm $ ne_of_lt hp.one_lt theorem prime_def_lt {p : ℕ} : prime p ↔ 2 ≤ p ∧ ∀ m < p, m ∣ p → m = 1 := and_congr_right $ λ p2, forall_congr $ λ m, ⟨λ h l d, (h d).resolve_right (ne_of_lt l), λ h d, (le_of_dvd (le_of_succ_le p2) d).lt_or_eq_dec.imp_left (λ l, h l d)⟩ theorem prime_def_lt' {p : ℕ} : prime p ↔ 2 ≤ p ∧ ∀ m, 2 ≤ m → m < p → ¬ m ∣ p := prime_def_lt.trans $ and_congr_right $ λ p2, forall_congr $ λ m, ⟨λ h m2 l d, not_lt_of_ge m2 ((h l d).symm ▸ dec_trivial), λ h l d, begin rcases m with _|_|m, { rw eq_zero_of_zero_dvd d at p2, revert p2, exact dec_trivial }, { refl }, { exact (h dec_trivial l).elim d } end⟩ theorem prime_def_le_sqrt {p : ℕ} : prime p ↔ 2 ≤ p ∧ ∀ m, 2 ≤ m → m ≤ sqrt p → ¬ m ∣ p := prime_def_lt'.trans $ and_congr_right $ λ p2, ⟨λ a m m2 l, a m m2 $ lt_of_le_of_lt l $ sqrt_lt_self p2, λ a, have ∀ {m k}, m ≤ k → 1 < m → p ≠ m * k, from λ m k mk m1 e, a m m1 (le_sqrt.2 (e.symm ▸ nat.mul_le_mul_left m mk)) ⟨k, e⟩, λ m m2 l ⟨k, e⟩, begin cases (le_total m k) with mk km, { exact this mk m2 e }, { rw [mul_comm] at e, refine this km (lt_of_mul_lt_mul_right _ (zero_le m)) e, rwa [one_mul, ← e] } end⟩ theorem prime_of_coprime (n : ℕ) (h1 : 1 < n) (h : ∀ m < n, m ≠ 0 → n.coprime m) : prime n := begin refine prime_def_lt.mpr ⟨h1, λ m mlt mdvd, _⟩, have hm : m ≠ 0, { rintro rfl, rw zero_dvd_iff at mdvd, exact mlt.ne' mdvd }, exact (h m mlt hm).symm.eq_one_of_dvd mdvd, end section /-- This instance is slower than the instance `decidable_prime` defined below, but has the advantage that it works in the kernel for small values. If you need to prove that a particular number is prime, in any case you should not use `dec_trivial`, but rather `by norm_num`, which is much faster. -/ local attribute [instance] def decidable_prime_1 (p : ℕ) : decidable (prime p) := decidable_of_iff' _ prime_def_lt' lemma prime.ne_zero {n : ℕ} (h : prime n) : n ≠ 0 := by { rintro rfl, revert h, dec_trivial } theorem prime.pos {p : ℕ} (pp : prime p) : 0 < p := lt_of_succ_lt pp.one_lt theorem not_prime_zero : ¬ prime 0 := by simp [prime] theorem not_prime_one : ¬ prime 1 := by simp [prime] theorem prime_two : prime 2 := dec_trivial end theorem prime.pred_pos {p : ℕ} (pp : prime p) : 0 < pred p := lt_pred_iff.2 pp.one_lt theorem succ_pred_prime {p : ℕ} (pp : prime p) : succ (pred p) = p := succ_pred_eq_of_pos pp.pos theorem dvd_prime {p m : ℕ} (pp : prime p) : m ∣ p ↔ m = 1 ∨ m = p := ⟨λ d, pp.2 m d, λ h, h.elim (λ e, e.symm ▸ one_dvd _) (λ e, e.symm ▸ dvd_rfl)⟩ theorem dvd_prime_two_le {p m : ℕ} (pp : prime p) (H : 2 ≤ m) : m ∣ p ↔ m = p := (dvd_prime pp).trans $ or_iff_right_of_imp $ not.elim $ ne_of_gt H theorem prime_dvd_prime_iff_eq {p q : ℕ} (pp : p.prime) (qp : q.prime) : p ∣ q ↔ p = q := dvd_prime_two_le qp (prime.two_le pp) theorem prime.not_dvd_one {p : ℕ} (pp : prime p) : ¬ p ∣ 1 | d := (not_le_of_gt pp.one_lt) $ le_of_dvd dec_trivial d theorem not_prime_mul {a b : ℕ} (a1 : 1 < a) (b1 : 1 < b) : ¬ prime (a * b) := λ h, ne_of_lt (nat.mul_lt_mul_of_pos_left b1 (lt_of_succ_lt a1)) $ by simpa using (dvd_prime_two_le h a1).1 (dvd_mul_right _ _) lemma not_prime_mul' {a b n : ℕ} (h : a * b = n) (h₁ : 1 < a) (h₂ : 1 < b) : ¬ prime n := by { rw ← h, exact not_prime_mul h₁ h₂ } section min_fac private lemma min_fac_lemma (n k : ℕ) (h : ¬ n < k * k) : sqrt n - k < sqrt n + 2 - k := (tsub_lt_tsub_iff_right $ le_sqrt.2 $ le_of_not_gt h).2 $ nat.lt_add_of_pos_right dec_trivial /-- If `n < k * k`, then `min_fac_aux n k = n`, if `k | n`, then `min_fac_aux n k = k`. Otherwise, `min_fac_aux n k = min_fac_aux n (k+2)` using well-founded recursion. If `n` is odd and `1 < n`, then then `min_fac_aux n 3` is the smallest prime factor of `n`. -/ def min_fac_aux (n : ℕ) : ℕ → ℕ | k := if h : n < k * k then n else if k ∣ n then k else have _, from min_fac_lemma n k h, min_fac_aux (k + 2) using_well_founded {rel_tac := λ _ _, `[exact ⟨_, measure_wf (λ k, sqrt n + 2 - k)⟩]} /-- Returns the smallest prime factor of `n ≠ 1`. -/ def min_fac : ℕ → ℕ | 0 := 2 | 1 := 1 | (n+2) := if 2 ∣ n then 2 else min_fac_aux (n + 2) 3 @[simp] theorem min_fac_zero : min_fac 0 = 2 := rfl @[simp] theorem min_fac_one : min_fac 1 = 1 := rfl theorem min_fac_eq : ∀ n, min_fac n = if 2 ∣ n then 2 else min_fac_aux n 3 | 0 := by simp | 1 := by simp [show 2≠1, from dec_trivial]; rw min_fac_aux; refl | (n+2) := have 2 ∣ n + 2 ↔ 2 ∣ n, from (nat.dvd_add_iff_left (by refl)).symm, by simp [min_fac, this]; congr private def min_fac_prop (n k : ℕ) := 2 ≤ k ∧ k ∣ n ∧ ∀ m, 2 ≤ m → m ∣ n → k ≤ m theorem min_fac_aux_has_prop {n : ℕ} (n2 : 2 ≤ n) (nd2 : ¬ 2 ∣ n) : ∀ k i, k = 2*i+3 → (∀ m, 2 ≤ m → m ∣ n → k ≤ m) → min_fac_prop n (min_fac_aux n k) | k := λ i e a, begin rw min_fac_aux, by_cases h : n < k*k; simp [h], { have pp : prime n := prime_def_le_sqrt.2 ⟨n2, λ m m2 l d, not_lt_of_ge l $ lt_of_lt_of_le (sqrt_lt.2 h) (a m m2 d)⟩, from ⟨n2, dvd_rfl, λ m m2 d, le_of_eq ((dvd_prime_two_le pp m2).1 d).symm⟩ }, have k2 : 2 ≤ k, { subst e, exact dec_trivial }, by_cases dk : k ∣ n; simp [dk], { exact ⟨k2, dk, a⟩ }, { refine have _, from min_fac_lemma n k h, min_fac_aux_has_prop (k+2) (i+1) (by simp [e, left_distrib]) (λ m m2 d, _), cases nat.eq_or_lt_of_le (a m m2 d) with me ml, { subst me, contradiction }, apply (nat.eq_or_lt_of_le ml).resolve_left, intro me, rw [← me, e] at d, change 2 * (i + 2) ∣ n at d, have := dvd_of_mul_right_dvd d, contradiction } end using_well_founded {rel_tac := λ _ _, `[exact ⟨_, measure_wf (λ k, sqrt n + 2 - k)⟩]} theorem min_fac_has_prop {n : ℕ} (n1 : n ≠ 1) : min_fac_prop n (min_fac n) := begin by_cases n0 : n = 0, {simp [n0, min_fac_prop, ge]}, have n2 : 2 ≤ n, { revert n0 n1, rcases n with _|_|_; exact dec_trivial }, simp [min_fac_eq], by_cases d2 : 2 ∣ n; simp [d2], { exact ⟨le_refl _, d2, λ k k2 d, k2⟩ }, { refine min_fac_aux_has_prop n2 d2 3 0 rfl (λ m m2 d, (nat.eq_or_lt_of_le m2).resolve_left (mt _ d2)), exact λ e, e.symm ▸ d } end theorem min_fac_dvd (n : ℕ) : min_fac n ∣ n := if n1 : n = 1 then by simp [n1] else (min_fac_has_prop n1).2.1 theorem min_fac_prime {n : ℕ} (n1 : n ≠ 1) : prime (min_fac n) := let ⟨f2, fd, a⟩ := min_fac_has_prop n1 in prime_def_lt'.2 ⟨f2, λ m m2 l d, not_le_of_gt l (a m m2 (d.trans fd))⟩ theorem min_fac_le_of_dvd {n : ℕ} : ∀ {m : ℕ}, 2 ≤ m → m ∣ n → min_fac n ≤ m := by by_cases n1 : n = 1; [exact λ m m2 d, n1.symm ▸ le_trans dec_trivial m2, exact (min_fac_has_prop n1).2.2] theorem min_fac_pos (n : ℕ) : 0 < min_fac n := by by_cases n1 : n = 1; [exact n1.symm ▸ dec_trivial, exact (min_fac_prime n1).pos] theorem min_fac_le {n : ℕ} (H : 0 < n) : min_fac n ≤ n := le_of_dvd H (min_fac_dvd n) theorem le_min_fac {m n : ℕ} : n = 1 ∨ m ≤ min_fac n ↔ ∀ p, prime p → p ∣ n → m ≤ p := ⟨λ h p pp d, h.elim (by rintro rfl; cases pp.not_dvd_one d) (λ h, le_trans h $ min_fac_le_of_dvd pp.two_le d), λ H, or_iff_not_imp_left.2 $ λ n1, H _ (min_fac_prime n1) (min_fac_dvd _)⟩ theorem le_min_fac' {m n : ℕ} : n = 1 ∨ m ≤ min_fac n ↔ ∀ p, 2 ≤ p → p ∣ n → m ≤ p := ⟨λ h p (pp:1<p) d, h.elim (by rintro rfl; cases not_le_of_lt pp (le_of_dvd dec_trivial d)) (λ h, le_trans h $ min_fac_le_of_dvd pp d), λ H, le_min_fac.2 (λ p pp d, H p pp.two_le d)⟩ theorem prime_def_min_fac {p : ℕ} : prime p ↔ 2 ≤ p ∧ min_fac p = p := ⟨λ pp, ⟨pp.two_le, let ⟨f2, fd, a⟩ := min_fac_has_prop $ ne_of_gt pp.one_lt in ((dvd_prime pp).1 fd).resolve_left (ne_of_gt f2)⟩, λ ⟨p2, e⟩, e ▸ min_fac_prime (ne_of_gt p2)⟩ /-- This instance is faster in the virtual machine than `decidable_prime_1`, but slower in the kernel. If you need to prove that a particular number is prime, in any case you should not use `dec_trivial`, but rather `by norm_num`, which is much faster. -/ instance decidable_prime (p : ℕ) : decidable (prime p) := decidable_of_iff' _ prime_def_min_fac theorem not_prime_iff_min_fac_lt {n : ℕ} (n2 : 2 ≤ n) : ¬ prime n ↔ min_fac n < n := (not_congr $ prime_def_min_fac.trans $ and_iff_right n2).trans $ (lt_iff_le_and_ne.trans $ and_iff_right $ min_fac_le $ le_of_succ_le n2).symm lemma min_fac_le_div {n : ℕ} (pos : 0 < n) (np : ¬ prime n) : min_fac n ≤ n / min_fac n := match min_fac_dvd n with | ⟨0, h0⟩ := absurd pos $ by rw [h0, mul_zero]; exact dec_trivial | ⟨1, h1⟩ := begin rw mul_one at h1, rw [prime_def_min_fac, not_and_distrib, ← h1, eq_self_iff_true, not_true, or_false, not_le] at np, rw [le_antisymm (le_of_lt_succ np) (succ_le_of_lt pos), min_fac_one, nat.div_one] end | ⟨(x+2), hx⟩ := begin conv_rhs { congr, rw hx }, rw [nat.mul_div_cancel_left _ (min_fac_pos _)], exact min_fac_le_of_dvd dec_trivial ⟨min_fac n, by rwa mul_comm⟩ end end /-- The square of the smallest prime factor of a composite number `n` is at most `n`. -/ lemma min_fac_sq_le_self {n : ℕ} (w : 0 < n) (h : ¬ prime n) : (min_fac n)^2 ≤ n := have t : (min_fac n) ≤ (n/min_fac n) := min_fac_le_div w h, calc (min_fac n)^2 = (min_fac n) * (min_fac n) : sq (min_fac n) ... ≤ (n/min_fac n) * (min_fac n) : nat.mul_le_mul_right (min_fac n) t ... ≤ n : div_mul_le_self n (min_fac n) @[simp] lemma min_fac_eq_one_iff {n : ℕ} : min_fac n = 1 ↔ n = 1 := begin split, { intro h, by_contradiction hn, have := min_fac_prime hn, rw h at this, exact not_prime_one this, }, { rintro rfl, refl, } end @[simp] lemma min_fac_eq_two_iff (n : ℕ) : min_fac n = 2 ↔ 2 ∣ n := begin split, { intro h, convert min_fac_dvd _, rw h, }, { intro h, have ub := min_fac_le_of_dvd (le_refl 2) h, have lb := min_fac_pos n, apply ub.eq_or_lt.resolve_right (λ h', _), have := le_antisymm (nat.succ_le_of_lt lb) (lt_succ_iff.mp h'), rw [eq_comm, nat.min_fac_eq_one_iff] at this, subst this, exact not_lt_of_le (le_of_dvd zero_lt_one h) one_lt_two } end end min_fac theorem exists_dvd_of_not_prime {n : ℕ} (n2 : 2 ≤ n) (np : ¬ prime n) : ∃ m, m ∣ n ∧ m ≠ 1 ∧ m ≠ n := ⟨min_fac n, min_fac_dvd _, ne_of_gt (min_fac_prime (ne_of_gt n2)).one_lt, ne_of_lt $ (not_prime_iff_min_fac_lt n2).1 np⟩ theorem exists_dvd_of_not_prime2 {n : ℕ} (n2 : 2 ≤ n) (np : ¬ prime n) : ∃ m, m ∣ n ∧ 2 ≤ m ∧ m < n := ⟨min_fac n, min_fac_dvd _, (min_fac_prime (ne_of_gt n2)).two_le, (not_prime_iff_min_fac_lt n2).1 np⟩ theorem exists_prime_and_dvd {n : ℕ} (n2 : 2 ≤ n) : ∃ p, prime p ∧ p ∣ n := ⟨min_fac n, min_fac_prime (ne_of_gt n2), min_fac_dvd _⟩ /-- Euclid's theorem on the **infinitude of primes**. Here given in the form: for every `n`, there exists a prime number `p ≥ n`. -/ theorem exists_infinite_primes (n : ℕ) : ∃ p, n ≤ p ∧ prime p := let p := min_fac (n! + 1) in have f1 : n! + 1 ≠ 1, from ne_of_gt $ succ_lt_succ $ factorial_pos _, have pp : prime p, from min_fac_prime f1, have np : n ≤ p, from le_of_not_ge $ λ h, have h₁ : p ∣ n!, from dvd_factorial (min_fac_pos _) h, have h₂ : p ∣ 1, from (nat.dvd_add_iff_right h₁).2 (min_fac_dvd _), pp.not_dvd_one h₂, ⟨p, np, pp⟩ lemma prime.eq_two_or_odd {p : ℕ} (hp : prime p) : p = 2 ∨ p % 2 = 1 := p.mod_two_eq_zero_or_one.imp_left (λ h, ((hp.2 2 (dvd_of_mod_eq_zero h)).resolve_left dec_trivial).symm) theorem coprime_of_dvd {m n : ℕ} (H : ∀ k, prime k → k ∣ m → ¬ k ∣ n) : coprime m n := begin have g1 : 1 ≤ gcd m n, { refine nat.succ_le_of_lt (pos_iff_ne_zero.mpr (λ g0, _)), rw [eq_zero_of_gcd_eq_zero_left g0, eq_zero_of_gcd_eq_zero_right g0] at H, exact H 2 prime_two (dvd_zero _) (dvd_zero _) }, rw [coprime_iff_gcd_eq_one, eq_comm], refine g1.lt_or_eq.resolve_left (λ g2, _), obtain ⟨p, hp, hpdvd⟩ := exists_prime_and_dvd (succ_le_of_lt g2), apply H p hp; apply dvd_trans hpdvd, { exact gcd_dvd_left _ _ }, { exact gcd_dvd_right _ _ } end theorem coprime_of_dvd' {m n : ℕ} (H : ∀ k, prime k → k ∣ m → k ∣ n → k ∣ 1) : coprime m n := coprime_of_dvd $ λk kp km kn, not_le_of_gt kp.one_lt $ le_of_dvd zero_lt_one $ H k kp km kn theorem factors_lemma {k} : (k+2) / min_fac (k+2) < k+2 := div_lt_self dec_trivial (min_fac_prime dec_trivial).one_lt /-- `factors n` is the prime factorization of `n`, listed in increasing order. -/ def factors : ℕ → list ℕ | 0 := [] | 1 := [] | n@(k+2) := let m := min_fac n in have n / m < n := factors_lemma, m :: factors (n / m) @[simp] lemma factors_zero : factors 0 = [] := by rw factors @[simp] lemma factors_one : factors 1 = [] := by rw factors lemma prime_of_mem_factors : ∀ {n p}, p ∈ factors n → prime p | 0 := by simp | 1 := by simp | n@(k+2) := λ p h, let m := min_fac n in have n / m < n := factors_lemma, have h₁ : p = m ∨ p ∈ (factors (n / m)) := (list.mem_cons_iff _ _ _).1 (by rwa [factors] at h), or.cases_on h₁ (λ h₂, h₂.symm ▸ min_fac_prime dec_trivial) prime_of_mem_factors lemma prod_factors : ∀ {n}, 0 < n → list.prod (factors n) = n | 0 := by simp | 1 := by simp | n@(k+2) := λ h, let m := min_fac n in have n / m < n := factors_lemma, show (factors n).prod = n, from have h₁ : 0 < n / m := nat.pos_of_ne_zero $ λ h, have n = 0 * m := (nat.div_eq_iff_eq_mul_left (min_fac_pos _) (min_fac_dvd _)).1 h, by rw zero_mul at this; exact (show k + 2 ≠ 0, from dec_trivial) this, by rw [factors, list.prod_cons, prod_factors h₁, nat.mul_div_cancel' (min_fac_dvd _)] lemma factors_prime {p : ℕ} (hp : nat.prime p) : p.factors = [p] := begin have : p = (p - 2) + 2 := (tsub_eq_iff_eq_add_of_le hp.1).mp rfl, rw [this, nat.factors], simp only [eq.symm this], have : nat.min_fac p = p := (nat.prime_def_min_fac.mp hp).2, split, { exact this, }, { simp only [this, nat.factors, nat.div_self (nat.prime.pos hp)], }, end lemma factors_chain : ∀ {n a}, (∀ p, prime p → p ∣ n → a ≤ p) → list.chain (≤) a (factors n) | 0 := λ a h, by simp | 1 := λ a h, by simp | n@(k+2) := λ a h, let m := min_fac n in have n / m < n := factors_lemma, begin rw factors, refine list.chain.cons ((le_min_fac.2 h).resolve_left dec_trivial) (factors_chain _), exact λ p pp d, min_fac_le_of_dvd pp.two_le (d.trans $ div_dvd_of_dvd $ min_fac_dvd _), end lemma factors_chain_2 (n) : list.chain (≤) 2 (factors n) := factors_chain $ λ p pp _, pp.two_le lemma factors_chain' (n) : list.chain' (≤) (factors n) := @list.chain'.tail _ _ (_::_) (factors_chain_2 _) lemma factors_sorted (n : ℕ) : list.sorted (≤) (factors n) := (list.chain'_iff_pairwise (@le_trans _ _)).1 (factors_chain' _) /-- `factors` can be constructed inductively by extracting `min_fac`, for sufficiently large `n`. -/ lemma factors_add_two (n : ℕ) : factors (n+2) = min_fac (n+2) :: factors ((n+2) / min_fac (n+2)) := by rw factors @[simp] lemma factors_eq_nil (n : ℕ) : n.factors = [] ↔ n = 0 ∨ n = 1 := begin split; intro h, { rcases n with (_ | _ | n), { exact or.inl rfl }, { exact or.inr rfl }, { rw factors at h, injection h }, }, { rcases h with (rfl | rfl), { exact factors_zero }, { exact factors_one }, } end lemma eq_of_perm_factors {a b : ℕ} (ha : 0 < a) (hb : 0 < b) (h : a.factors ~ b.factors) : a = b := by simpa [prod_factors ha, prod_factors hb] using list.perm.prod_eq h lemma eq_of_count_factors_eq {a b : ℕ} (ha : 0 < a) (hb : 0 < b) (h : ∀ p : ℕ, list.count p a.factors = list.count p b.factors) : a = b := eq_of_perm_factors ha hb (list.perm_iff_count.mpr h) theorem prime.coprime_iff_not_dvd {p n : ℕ} (pp : prime p) : coprime p n ↔ ¬ p ∣ n := ⟨λ co d, pp.not_dvd_one $ co.dvd_of_dvd_mul_left (by simp [d]), λ nd, coprime_of_dvd $ λ m m2 mp, ((prime_dvd_prime_iff_eq m2 pp).1 mp).symm ▸ nd⟩ theorem prime.dvd_iff_not_coprime {p n : ℕ} (pp : prime p) : p ∣ n ↔ ¬ coprime p n := iff_not_comm.2 pp.coprime_iff_not_dvd theorem prime.not_coprime_iff_dvd {m n : ℕ} : ¬ coprime m n ↔ ∃p, prime p ∧ p ∣ m ∧ p ∣ n := begin apply iff.intro, { intro h, exact ⟨min_fac (gcd m n), min_fac_prime h, ((min_fac_dvd (gcd m n)).trans (gcd_dvd_left m n)), ((min_fac_dvd (gcd m n)).trans (gcd_dvd_right m n))⟩ }, { intro h, cases h with p hp, apply nat.not_coprime_of_dvd_of_dvd (prime.one_lt hp.1) hp.2.1 hp.2.2 } end theorem prime.dvd_mul {p m n : ℕ} (pp : prime p) : p ∣ m * n ↔ p ∣ m ∨ p ∣ n := ⟨λ H, or_iff_not_imp_left.2 $ λ h, (pp.coprime_iff_not_dvd.2 h).dvd_of_dvd_mul_left H, or.rec (λ h : p ∣ m, h.mul_right _) (λ h : p ∣ n, h.mul_left _)⟩ theorem prime.not_dvd_mul {p m n : ℕ} (pp : prime p) (Hm : ¬ p ∣ m) (Hn : ¬ p ∣ n) : ¬ p ∣ m * n := mt pp.dvd_mul.1 $ by simp [Hm, Hn] theorem prime.dvd_of_dvd_pow {p m n : ℕ} (pp : prime p) (h : p ∣ m^n) : p ∣ m := begin induction n with n IH, { exact pp.not_dvd_one.elim h }, { rw pow_succ at h, exact (pp.dvd_mul.1 h).elim id IH } end lemma prime.pow_dvd_of_dvd_mul_right {p n a b : ℕ} (hp : p.prime) (h : p ^ n ∣ a * b) (hpb : ¬ p ∣ b) : p ^ n ∣ a := begin induction n with n ih, { simp only [one_dvd, pow_zero] }, { rw [pow_succ'] at *, rcases ih ((dvd_mul_right _ _).trans h) with ⟨c, rfl⟩, rw [mul_assoc] at h, rcases hp.dvd_mul.1 (nat.dvd_of_mul_dvd_mul_left (pow_pos hp.pos _) h) with ⟨d, rfl⟩|⟨d, rfl⟩, { rw [← mul_assoc], exact dvd_mul_right _ _ }, { exact (hpb (dvd_mul_right _ _)).elim } } end lemma prime.pow_dvd_of_dvd_mul_left {p n a b : ℕ} (hp : p.prime) (h : p ^ n ∣ a * b) (hpb : ¬ p ∣ a) : p ^ n ∣ b := by rw [mul_comm] at h; exact hp.pow_dvd_of_dvd_mul_right h hpb lemma prime.pow_not_prime {x n : ℕ} (hn : 2 ≤ n) : ¬ (x ^ n).prime := λ hp, (hp.2 x $ dvd_trans ⟨x, sq _⟩ (pow_dvd_pow _ hn)).elim (λ hx1, hp.ne_one $ hx1.symm ▸ one_pow _) (λ hxn, lt_irrefl x $ calc x = x ^ 1 : (pow_one _).symm ... < x ^ n : nat.pow_right_strict_mono (hxn.symm ▸ hp.two_le) hn ... = x : hxn.symm) lemma prime.pow_not_prime' {x : ℕ} : ∀ {n : ℕ}, n ≠ 1 → ¬ (x ^ n).prime | 0 := λ _, not_prime_one | 1 := λ h, (h rfl).elim | (n+2) := λ _, prime.pow_not_prime le_add_self lemma prime.eq_one_of_pow {x n : ℕ} (h : (x ^ n).prime) : n = 1 := not_imp_not.mp prime.pow_not_prime' h lemma prime.pow_eq_iff {p a k : ℕ} (hp : p.prime) : a ^ k = p ↔ a = p ∧ k = 1 := begin refine ⟨_, λ h, by rw [h.1, h.2, pow_one]⟩, rintro rfl, rw [hp.eq_one_of_pow, eq_self_iff_true, and_true, pow_one], end lemma prime.mul_eq_prime_sq_iff {x y p : ℕ} (hp : p.prime) (hx : x ≠ 1) (hy : y ≠ 1) : x * y = p ^ 2 ↔ x = p ∧ y = p := ⟨λ h, have pdvdxy : p ∣ x * y, by rw h; simp [sq], begin wlog := hp.dvd_mul.1 pdvdxy using x y, cases case with a ha, have hap : a ∣ p, from ⟨y, by rwa [ha, sq, mul_assoc, nat.mul_right_inj hp.pos, eq_comm] at h⟩, exact ((nat.dvd_prime hp).1 hap).elim (λ _, by clear_aux_decl; simp [*, sq, nat.mul_right_inj hp.pos] at * {contextual := tt}) (λ _, by clear_aux_decl; simp [*, sq, mul_comm, mul_assoc, nat.mul_right_inj hp.pos, nat.mul_right_eq_self_iff hp.pos] at * {contextual := tt}) end, λ ⟨h₁, h₂⟩, h₁.symm ▸ h₂.symm ▸ (sq _).symm⟩ lemma prime.dvd_factorial : ∀ {n p : ℕ} (hp : prime p), p ∣ n! ↔ p ≤ n | 0 p hp := iff_of_false hp.not_dvd_one (not_le_of_lt hp.pos) | (n+1) p hp := begin rw [factorial_succ, hp.dvd_mul, prime.dvd_factorial hp], exact ⟨λ h, h.elim (le_of_dvd (succ_pos _)) le_succ_of_le, λ h, (_root_.lt_or_eq_of_le h).elim (or.inr ∘ le_of_lt_succ) (λ h, or.inl $ by rw h)⟩ end theorem prime.coprime_pow_of_not_dvd {p m a : ℕ} (pp : prime p) (h : ¬ p ∣ a) : coprime a (p^m) := (pp.coprime_iff_not_dvd.2 h).symm.pow_right _ theorem coprime_primes {p q : ℕ} (pp : prime p) (pq : prime q) : coprime p q ↔ p ≠ q := pp.coprime_iff_not_dvd.trans $ not_congr $ dvd_prime_two_le pq pp.two_le theorem coprime_pow_primes {p q : ℕ} (n m : ℕ) (pp : prime p) (pq : prime q) (h : p ≠ q) : coprime (p^n) (q^m) := ((coprime_primes pp pq).2 h).pow _ _ theorem coprime_or_dvd_of_prime {p} (pp : prime p) (i : ℕ) : coprime p i ∨ p ∣ i := by rw [pp.dvd_iff_not_coprime]; apply em lemma coprime_of_lt_prime {n p} (n_pos : 0 < n) (hlt : n < p) (pp : prime p) : coprime p n := (coprime_or_dvd_of_prime pp n).resolve_right $ λ h, lt_le_antisymm hlt (le_of_dvd n_pos h) lemma eq_or_coprime_of_le_prime {n p} (n_pos : 0 < n) (hle : n ≤ p) (pp : prime p) : p = n ∨ coprime p n := hle.eq_or_lt.imp eq.symm (λ h, coprime_of_lt_prime n_pos h pp) theorem dvd_prime_pow {p : ℕ} (pp : prime p) {m i : ℕ} : i ∣ (p^m) ↔ ∃ k ≤ m, i = p^k := begin induction m with m IH generalizing i, { simp }, by_cases p ∣ i, { cases h with a e, subst e, rw [pow_succ, nat.mul_dvd_mul_iff_left pp.pos, IH], split; intro h; rcases h with ⟨k, h, e⟩, { exact ⟨succ k, succ_le_succ h, by rw [e, pow_succ]; refl⟩ }, cases k with k, { apply pp.not_dvd_one.elim, rw [← pow_zero, ← e], apply dvd_mul_right }, { refine ⟨k, le_of_succ_le_succ h, _⟩, rwa [mul_comm, pow_succ', nat.mul_left_inj pp.pos] at e } }, { split; intro d, { rw (pp.coprime_pow_of_not_dvd h).eq_one_of_dvd d, exact ⟨0, zero_le _, (pow_zero p).symm⟩ }, { rcases d with ⟨k, l, rfl⟩, exact pow_dvd_pow _ l } } end /-- If `p` is prime, and `a` doesn't divide `p^k`, but `a` does divide `p^(k+1)` then `a = p^(k+1)`. -/ lemma eq_prime_pow_of_dvd_least_prime_pow {a p k : ℕ} (pp : prime p) (h₁ : ¬(a ∣ p^k)) (h₂ : a ∣ p^(k+1)) : a = p^(k+1) := begin obtain ⟨l, ⟨h, rfl⟩⟩ := (dvd_prime_pow pp).1 h₂, congr, exact le_antisymm h (not_le.1 ((not_congr (pow_dvd_pow_iff_le_right (prime.one_lt pp))).1 h₁)), end lemma ne_one_iff_exists_prime_dvd : ∀ {n}, n ≠ 1 ↔ ∃ p : ℕ, p.prime ∧ p ∣ n | 0 := by simpa using (Exists.intro 2 nat.prime_two) | 1 := by simp [nat.not_prime_one] | (n+2) := let a := n+2 in let ha : a ≠ 1 := nat.succ_succ_ne_one n in begin simp only [true_iff, ne.def, not_false_iff, ha], exact ⟨a.min_fac, nat.min_fac_prime ha, a.min_fac_dvd⟩, end lemma eq_one_iff_not_exists_prime_dvd {n : ℕ} : n = 1 ↔ ∀ p : ℕ, p.prime → ¬p ∣ n := by simpa using not_iff_not.mpr ne_one_iff_exists_prime_dvd section open list lemma mem_list_primes_of_dvd_prod {p : ℕ} (hp : prime p) : ∀ {l : list ℕ}, (∀ p ∈ l, prime p) → p ∣ prod l → p ∈ l | [] := λ h₁ h₂, absurd h₂ (prime.not_dvd_one hp) | (q :: l) := λ h₁ h₂, have h₃ : p ∣ q * prod l := @prod_cons _ _ l q ▸ h₂, have hq : prime q := h₁ q (mem_cons_self _ _), or.cases_on ((prime.dvd_mul hp).1 h₃) (λ h, by rw [prime.dvd_iff_not_coprime hp, coprime_primes hp hq, ne.def, not_not] at h; exact h ▸ mem_cons_self _ _) (λ h, have hl : ∀ p ∈ l, prime p := λ p hlp, h₁ p ((mem_cons_iff _ _ _).2 (or.inr hlp)), (mem_cons_iff _ _ _).2 (or.inr (mem_list_primes_of_dvd_prod hl h))) lemma mem_factors_iff_dvd {n p : ℕ} (hn : 0 < n) (hp : prime p) : p ∈ factors n ↔ p ∣ n := ⟨λ h, prod_factors hn ▸ list.dvd_prod h, λ h, mem_list_primes_of_dvd_prod hp (@prime_of_mem_factors n) ((prod_factors hn).symm ▸ h)⟩ lemma dvd_of_mem_factors {n p : ℕ} (h : p ∈ n.factors) : p ∣ n := begin rcases n.eq_zero_or_pos with rfl | hn, { exact dvd_zero p }, { rwa ←mem_factors_iff_dvd hn (prime_of_mem_factors h) } end lemma mem_factors {n p} (hn : 0 < n) : p ∈ factors n ↔ prime p ∧ p ∣ n := ⟨λ h, ⟨prime_of_mem_factors h, (mem_factors_iff_dvd hn $ prime_of_mem_factors h).mp h⟩, λ ⟨hprime, hdvd⟩, (mem_factors_iff_dvd hn hprime).mpr hdvd⟩ lemma factors_subset_right {n k : ℕ} (h : k ≠ 0) : n.factors ⊆ (n * k).factors := begin cases n, { rw zero_mul, refl }, cases n, { rw factors_one, apply list.nil_subset }, intros p hp, rw mem_factors succ_pos' at hp, rw mem_factors (nat.mul_pos succ_pos' (nat.pos_of_ne_zero h)), exact ⟨hp.1, hp.2.mul_right k⟩, end lemma factors_subset_of_dvd {n k : ℕ} (h : n ∣ k) (h' : k ≠ 0) : n.factors ⊆ k.factors := begin obtain ⟨a, rfl⟩ := h, exact factors_subset_right (right_ne_zero_of_mul h'), end lemma perm_of_prod_eq_prod : ∀ {l₁ l₂ : list ℕ}, prod l₁ = prod l₂ → (∀ p ∈ l₁, prime p) → (∀ p ∈ l₂, prime p) → l₁ ~ l₂ | [] [] _ _ _ := perm.nil | [] (a :: l) h₁ h₂ h₃ := have ha : a ∣ 1 := @prod_nil ℕ _ ▸ h₁.symm ▸ (@prod_cons _ _ l a).symm ▸ dvd_mul_right _ _, absurd ha (prime.not_dvd_one (h₃ a (mem_cons_self _ _))) | (a :: l) [] h₁ h₂ h₃ := have ha : a ∣ 1 := @prod_nil ℕ _ ▸ h₁ ▸ (@prod_cons _ _ l a).symm ▸ dvd_mul_right _ _, absurd ha (prime.not_dvd_one (h₂ a (mem_cons_self _ _))) | (a :: l₁) (b :: l₂) h hl₁ hl₂ := have hl₁' : ∀ p ∈ l₁, prime p := λ p hp, hl₁ p (mem_cons_of_mem _ hp), have hl₂' : ∀ p ∈ (b :: l₂).erase a, prime p := λ p hp, hl₂ p (mem_of_mem_erase hp), have ha : a ∈ (b :: l₂) := mem_list_primes_of_dvd_prod (hl₁ a (mem_cons_self _ _)) hl₂ (h ▸ by rw prod_cons; exact dvd_mul_right _ _), have hb : b :: l₂ ~ a :: (b :: l₂).erase a := perm_cons_erase ha, have hl : prod l₁ = prod ((b :: l₂).erase a) := (nat.mul_right_inj (prime.pos (hl₁ a (mem_cons_self _ _)))).1 $ by rwa [← prod_cons, ← prod_cons, ← hb.prod_eq], perm.trans ((perm_of_prod_eq_prod hl hl₁' hl₂').cons _) hb.symm /-- **Fundamental theorem of arithmetic**-/ lemma factors_unique {n : ℕ} {l : list ℕ} (h₁ : prod l = n) (h₂ : ∀ p ∈ l, prime p) : l ~ factors n := begin refine perm_of_prod_eq_prod _ h₂ (λ p, prime_of_mem_factors), rw h₁, refine (prod_factors (nat.pos_of_ne_zero _)).symm, rintro rfl, rw prod_eq_zero_iff at h₁, exact prime.ne_zero (h₂ 0 h₁) rfl, end lemma prime.factors_pow {p : ℕ} (hp : p.prime) (n : ℕ) : (p ^ n).factors = list.repeat p n := begin symmetry, rw ← list.repeat_perm, apply nat.factors_unique (list.prod_repeat p n), intros q hq, rwa eq_of_mem_repeat hq, end /-- For positive `a` and `b`, the prime factors of `a * b` are the union of those of `a` and `b` -/ lemma perm_factors_mul_of_pos {a b : ℕ} (ha : 0 < a) (hb : 0 < b) : (a * b).factors ~ a.factors ++ b.factors := begin refine (factors_unique _ _).symm, { rw [list.prod_append, prod_factors ha, prod_factors hb] }, { intros p hp, rw list.mem_append at hp, cases hp; exact prime_of_mem_factors hp }, end /-- For coprime `a` and `b`, the prime factors of `a * b` are the union of those of `a` and `b` -/ lemma perm_factors_mul_of_coprime {a b : ℕ} (hab : coprime a b) : (a * b).factors ~ a.factors ++ b.factors := begin rcases a.eq_zero_or_pos with rfl | ha, { simp [(coprime_zero_left _).mp hab] }, rcases b.eq_zero_or_pos with rfl | hb, { simp [(coprime_zero_right _).mp hab] }, exact perm_factors_mul_of_pos ha hb, end /-- For positive `a` and `b`, the power of `p` in `a * b` is the sum of the powers in `a` and `b` -/ lemma count_factors_mul_of_pos {p a b : ℕ} (ha : 0 < a) (hb : 0 < b) : list.count p (a * b).factors = list.count p a.factors + list.count p b.factors := by rw [perm_iff_count.mp (perm_factors_mul_of_pos ha hb) p, count_append] /-- For coprime `a` and `b`, the power of `p` in `a * b` is the sum of the powers in `a` and `b` -/ lemma count_factors_mul_of_coprime {p a b : ℕ} (hab : coprime a b) : list.count p (a * b).factors = list.count p a.factors + list.count p b.factors := by rw [perm_iff_count.mp (perm_factors_mul_of_coprime hab) p, count_append] /-- For any `p`, the power of `p` in `n^k` is `k` times the power in `n` -/ lemma factors_count_pow {n k p : ℕ} : count p (n ^ k).factors = k * count p n.factors := begin induction k with k IH, { simp }, rcases n.eq_zero_or_pos with rfl | hn, { simp [zero_pow (succ_pos k), count_nil, factors_zero, mul_zero] }, rw [pow_succ n k, perm_iff_count.mp (perm_factors_mul_of_pos hn (pow_pos hn k)) p], rw [list.count_append, IH, add_comm, mul_comm, ←mul_succ (count p n.factors) k, mul_comm], end end lemma succ_dvd_or_succ_dvd_of_succ_sum_dvd_mul {p : ℕ} (p_prime : prime p) {m n k l : ℕ} (hpm : p ^ k ∣ m) (hpn : p ^ l ∣ n) (hpmn : p ^ (k+l+1) ∣ m*n) : p ^ (k+1) ∣ m ∨ p ^ (l+1) ∣ n := have hpd : p^(k+l)*p ∣ m*n, by rwa pow_succ' at hpmn, have hpd2 : p ∣ (m*n) / p ^ (k+l), from dvd_div_of_mul_dvd hpd, have hpd3 : p ∣ (m*n) / (p^k * p^l), by simpa [pow_add] using hpd2, have hpd4 : p ∣ (m / p^k) * (n / p^l), by simpa [nat.div_mul_div hpm hpn] using hpd3, have hpd5 : p ∣ (m / p^k) ∨ p ∣ (n / p^l), from (prime.dvd_mul p_prime).1 hpd4, suffices p^k*p ∣ m ∨ p^l*p ∣ n, by rwa [pow_succ', pow_succ'], hpd5.elim (assume : p ∣ m / p ^ k, or.inl $ mul_dvd_of_dvd_div hpm this) (assume : p ∣ n / p ^ l, or.inr $ mul_dvd_of_dvd_div hpn this) /-- The type of prime numbers -/ def primes := {p : ℕ // p.prime} namespace primes instance : has_repr nat.primes := ⟨λ p, repr p.val⟩ instance inhabited_primes : inhabited primes := ⟨⟨2, prime_two⟩⟩ instance coe_nat : has_coe nat.primes ℕ := ⟨subtype.val⟩ theorem coe_nat_inj (p q : nat.primes) : (p : ℕ) = (q : ℕ) → p = q := λ h, subtype.eq h end primes instance monoid.prime_pow {α : Type*} [monoid α] : has_pow α primes := ⟨λ x p, x^p.val⟩ end nat /-! ### Primality prover -/ open norm_num namespace tactic namespace norm_num lemma is_prime_helper (n : ℕ) (h₁ : 1 < n) (h₂ : nat.min_fac n = n) : nat.prime n := nat.prime_def_min_fac.2 ⟨h₁, h₂⟩ lemma min_fac_bit0 (n : ℕ) : nat.min_fac (bit0 n) = 2 := by simp [nat.min_fac_eq, show 2 ∣ bit0 n, by simp [bit0_eq_two_mul n]] /-- A predicate representing partial progress in a proof of `min_fac`. -/ def min_fac_helper (n k : ℕ) : Prop := 0 < k ∧ bit1 k ≤ nat.min_fac (bit1 n) theorem min_fac_helper.n_pos {n k : ℕ} (h : min_fac_helper n k) : 0 < n := pos_iff_ne_zero.2 $ λ e, by rw e at h; exact not_le_of_lt (nat.bit1_lt h.1) h.2 lemma min_fac_ne_bit0 {n k : ℕ} : nat.min_fac (bit1 n) ≠ bit0 k := begin rw bit0_eq_two_mul, refine (λ e, absurd ((nat.dvd_add_iff_right _).2 (dvd_trans ⟨_, e⟩ (nat.min_fac_dvd _))) _); simp end lemma min_fac_helper_0 (n : ℕ) (h : 0 < n) : min_fac_helper n 1 := begin refine ⟨zero_lt_one, lt_of_le_of_ne _ min_fac_ne_bit0.symm⟩, rw nat.succ_le_iff, refine lt_of_le_of_ne (nat.min_fac_pos _) (λ e, nat.not_prime_one _), rw e, exact nat.min_fac_prime (nat.bit1_lt h).ne', end lemma min_fac_helper_1 {n k k' : ℕ} (e : k + 1 = k') (np : nat.min_fac (bit1 n) ≠ bit1 k) (h : min_fac_helper n k) : min_fac_helper n k' := begin rw ← e, refine ⟨nat.succ_pos _, (lt_of_le_of_ne (lt_of_le_of_ne _ _ : k+1+k < _) min_fac_ne_bit0.symm : bit0 (k+1) < _)⟩, { rw add_right_comm, exact h.2 }, { rw add_right_comm, exact np.symm } end lemma min_fac_helper_2 (n k k' : ℕ) (e : k + 1 = k') (np : ¬ nat.prime (bit1 k)) (h : min_fac_helper n k) : min_fac_helper n k' := begin refine min_fac_helper_1 e _ h, intro e₁, rw ← e₁ at np, exact np (nat.min_fac_prime $ ne_of_gt $ nat.bit1_lt h.n_pos) end lemma min_fac_helper_3 (n k k' c : ℕ) (e : k + 1 = k') (nc : bit1 n % bit1 k = c) (c0 : 0 < c) (h : min_fac_helper n k) : min_fac_helper n k' := begin refine min_fac_helper_1 e _ h, refine mt _ (ne_of_gt c0), intro e₁, rw [← nc, ← nat.dvd_iff_mod_eq_zero, ← e₁], apply nat.min_fac_dvd end lemma min_fac_helper_4 (n k : ℕ) (hd : bit1 n % bit1 k = 0) (h : min_fac_helper n k) : nat.min_fac (bit1 n) = bit1 k := by { rw ← nat.dvd_iff_mod_eq_zero at hd, exact le_antisymm (nat.min_fac_le_of_dvd (nat.bit1_lt h.1) hd) h.2 } lemma min_fac_helper_5 (n k k' : ℕ) (e : bit1 k * bit1 k = k') (hd : bit1 n < k') (h : min_fac_helper n k) : nat.min_fac (bit1 n) = bit1 n := begin refine (nat.prime_def_min_fac.1 (nat.prime_def_le_sqrt.2 ⟨nat.bit1_lt h.n_pos, _⟩)).2, rw ← e at hd, intros m m2 hm md, have := le_trans h.2 (le_trans (nat.min_fac_le_of_dvd m2 md) hm), rw nat.le_sqrt at this, exact not_le_of_lt hd this end /-- Given `e` a natural numeral and `d : nat` a factor of it, return `⊢ ¬ prime e`. -/ meta def prove_non_prime (e : expr) (n d₁ : ℕ) : tactic expr := do let e₁ := reflect d₁, c ← mk_instance_cache `(nat), (c, p₁) ← prove_lt_nat c `(1) e₁, let d₂ := n / d₁, let e₂ := reflect d₂, (c, e', p) ← prove_mul_nat c e₁ e₂, guard (e' =ₐ e), (c, p₂) ← prove_lt_nat c `(1) e₂, return $ `(@nat.not_prime_mul').mk_app [e₁, e₂, e, p, p₁, p₂] /-- Given `a`,`a1 := bit1 a`, `n1` the value of `a1`, `b` and `p : min_fac_helper a b`, returns `(c, ⊢ min_fac a1 = c)`. -/ meta def prove_min_fac_aux (a a1 : expr) (n1 : ℕ) : instance_cache → expr → expr → tactic (instance_cache × expr × expr) | ic b p := do k ← b.to_nat, let k1 := bit1 k, let b1 := `(bit1:ℕ→ℕ).mk_app [b], if n1 < k1*k1 then do (ic, e', p₁) ← prove_mul_nat ic b1 b1, (ic, p₂) ← prove_lt_nat ic a1 e', return (ic, a1, `(min_fac_helper_5).mk_app [a, b, e', p₁, p₂, p]) else let d := k1.min_fac in if to_bool (d < k1) then do let k' := k+1, let e' := reflect k', (ic, p₁) ← prove_succ ic b e', p₂ ← prove_non_prime b1 k1 d, prove_min_fac_aux ic e' $ `(min_fac_helper_2).mk_app [a, b, e', p₁, p₂, p] else do let nc := n1 % k1, (ic, c, pc) ← prove_div_mod ic a1 b1 tt, if nc = 0 then return (ic, b1, `(min_fac_helper_4).mk_app [a, b, pc, p]) else do (ic, p₀) ← prove_pos ic c, let k' := k+1, let e' := reflect k', (ic, p₁) ← prove_succ ic b e', prove_min_fac_aux ic e' $ `(min_fac_helper_3).mk_app [a, b, e', c, p₁, pc, p₀, p] /-- Given `a` a natural numeral, returns `(b, ⊢ min_fac a = b)`. -/ meta def prove_min_fac (ic : instance_cache) (e : expr) : tactic (instance_cache × expr × expr) := match match_numeral e with | match_numeral_result.zero := return (ic, `(2:ℕ), `(nat.min_fac_zero)) | match_numeral_result.one := return (ic, `(1:ℕ), `(nat.min_fac_one)) | match_numeral_result.bit0 e := return (ic, `(2), `(min_fac_bit0).mk_app [e]) | match_numeral_result.bit1 e := do n ← e.to_nat, c ← mk_instance_cache `(nat), (c, p) ← prove_pos c e, let a1 := `(bit1:ℕ→ℕ).mk_app [e], prove_min_fac_aux e a1 (bit1 n) c `(1) (`(min_fac_helper_0).mk_app [e, p]) | _ := failed end /-- A partial proof of `factors`. Asserts that `l` is a sorted list of primes, lower bounded by a prime `p`, which multiplies to `n`. -/ def factors_helper (n p : ℕ) (l : list ℕ) : Prop := p.prime → list.chain (≤) p l ∧ (∀ a ∈ l, nat.prime a) ∧ list.prod l = n lemma factors_helper_nil (a : ℕ) : factors_helper 1 a [] := λ pa, ⟨list.chain.nil, by rintro _ ⟨⟩, list.prod_nil⟩ lemma factors_helper_cons' (n m a b : ℕ) (l : list ℕ) (h₁ : b * m = n) (h₂ : a ≤ b) (h₃ : nat.min_fac b = b) (H : factors_helper m b l) : factors_helper n a (b :: l) := λ pa, have pb : b.prime, from nat.prime_def_min_fac.2 ⟨le_trans pa.two_le h₂, h₃⟩, let ⟨f₁, f₂, f₃⟩ := H pb in ⟨list.chain.cons h₂ f₁, λ c h, h.elim (λ e, e.symm ▸ pb) (f₂ _), by rw [list.prod_cons, f₃, h₁]⟩ lemma factors_helper_cons (n m a b : ℕ) (l : list ℕ) (h₁ : b * m = n) (h₂ : a < b) (h₃ : nat.min_fac b = b) (H : factors_helper m b l) : factors_helper n a (b :: l) := factors_helper_cons' _ _ _ _ _ h₁ h₂.le h₃ H lemma factors_helper_sn (n a : ℕ) (h₁ : a < n) (h₂ : nat.min_fac n = n) : factors_helper n a [n] := factors_helper_cons _ _ _ _ _ (mul_one _) h₁ h₂ (factors_helper_nil _) lemma factors_helper_same (n m a : ℕ) (l : list ℕ) (h : a * m = n) (H : factors_helper m a l) : factors_helper n a (a :: l) := λ pa, factors_helper_cons' _ _ _ _ _ h (le_refl _) (nat.prime_def_min_fac.1 pa).2 H pa lemma factors_helper_same_sn (a : ℕ) : factors_helper a a [a] := factors_helper_same _ _ _ _ (mul_one _) (factors_helper_nil _) lemma factors_helper_end (n : ℕ) (l : list ℕ) (H : factors_helper n 2 l) : nat.factors n = l := let ⟨h₁, h₂, h₃⟩ := H nat.prime_two in have _, from (list.chain'_iff_pairwise (@le_trans _ _)).1 (@list.chain'.tail _ _ (_::_) h₁), (list.eq_of_perm_of_sorted (nat.factors_unique h₃ h₂) this (nat.factors_sorted _)).symm /-- Given `n` and `a` natural numerals, returns `(l, ⊢ factors_helper n a l)`. -/ meta def prove_factors_aux : instance_cache → expr → expr → ℕ → ℕ → tactic (instance_cache × expr × expr) | c en ea n a := let b := n.min_fac in if b < n then do let m := n / b, (c, em) ← c.of_nat m, if b = a then do (c, _, p₁) ← prove_mul_nat c ea em, (c, l, p₂) ← prove_factors_aux c em ea m a, pure (c, `(%%ea::%%l:list ℕ), `(factors_helper_same).mk_app [en, em, ea, l, p₁, p₂]) else do (c, eb) ← c.of_nat b, (c, _, p₁) ← prove_mul_nat c eb em, (c, p₂) ← prove_lt_nat c ea eb, (c, _, p₃) ← prove_min_fac c eb, (c, l, p₄) ← prove_factors_aux c em eb m b, pure (c, `(%%eb::%%l : list ℕ), `(factors_helper_cons).mk_app [en, em, ea, eb, l, p₁, p₂, p₃, p₄]) else if b = a then pure (c, `([%%ea] : list ℕ), `(factors_helper_same_sn).mk_app [ea]) else do (c, p₁) ← prove_lt_nat c ea en, (c, _, p₂) ← prove_min_fac c en, pure (c, `([%%en] : list ℕ), `(factors_helper_sn).mk_app [en, ea, p₁, p₂]) /-- Evaluates the `prime` and `min_fac` functions. -/ @[norm_num] meta def eval_prime : expr → tactic (expr × expr) | `(nat.prime %%e) := do n ← e.to_nat, match n with | 0 := false_intro `(nat.not_prime_zero) | 1 := false_intro `(nat.not_prime_one) | _ := let d₁ := n.min_fac in if d₁ < n then prove_non_prime e n d₁ >>= false_intro else do let e₁ := reflect d₁, c ← mk_instance_cache `(ℕ), (c, p₁) ← prove_lt_nat c `(1) e₁, (c, e₁, p) ← prove_min_fac c e, true_intro $ `(is_prime_helper).mk_app [e, p₁, p] end | `(nat.min_fac %%e) := do ic ← mk_instance_cache `(ℕ), prod.snd <$> prove_min_fac ic e | `(nat.factors %%e) := do n ← e.to_nat, match n with | 0 := pure (`(@list.nil ℕ), `(nat.factors_zero)) | 1 := pure (`(@list.nil ℕ), `(nat.factors_one)) | _ := do c ← mk_instance_cache `(ℕ), (c, l, p) ← prove_factors_aux c e `(2) n 2, pure (l, `(factors_helper_end).mk_app [e, l, p]) end | _ := failed end norm_num end tactic namespace nat theorem prime_three : prime 3 := by norm_num /-- See note [fact non-instances].-/ lemma fact_prime_two : fact (prime 2) := ⟨prime_two⟩ /-- See note [fact non-instances].-/ lemma fact_prime_three : fact (prime 3) := ⟨prime_three⟩ end nat namespace nat /-- The only prime divisor of positive prime power `p^k` is `p` itself -/ lemma prime_pow_prime_divisor {p k : ℕ} (hk : 0 < k) (hp: prime p) : (p^k).factors.to_finset = {p} := by rw [hp.factors_pow, list.to_finset_repeat_of_ne_zero hk.ne'] lemma mem_factors_mul_of_pos {a b : ℕ} (ha : 0 < a) (hb : 0 < b) (p : ℕ) : p ∈ (a * b).factors ↔ p ∈ a.factors ∨ p ∈ b.factors := begin rw [mem_factors (mul_pos ha hb), mem_factors ha, mem_factors hb, ←and_or_distrib_left], simpa only [and.congr_right_iff] using prime.dvd_mul end /-- If `a`,`b` are positive the prime divisors of `(a * b)` are the union of those of `a` and `b` -/ lemma factors_mul_of_pos {a b : ℕ} (ha : 0 < a) (hb : 0 < b) : (a * b).factors.to_finset = a.factors.to_finset ∪ b.factors.to_finset := by { ext p, simp only [finset.mem_union, list.mem_to_finset, mem_factors_mul_of_pos ha hb p] } /-- The sets of factors of coprime `a` and `b` are disjoint -/ lemma coprime_factors_disjoint {a b : ℕ} (hab: a.coprime b) : list.disjoint a.factors b.factors := begin intros q hqa hqb, apply not_prime_one, rw ←(eq_one_of_dvd_coprimes hab (dvd_of_mem_factors hqa) (dvd_of_mem_factors hqb)), exact prime_of_mem_factors hqa end lemma factors_mul_of_coprime {a b : ℕ} (hab : coprime a b) (p:ℕ): p ∈ (a * b).factors ↔ p ∈ a.factors ∪ b.factors := begin rcases a.eq_zero_or_pos with rfl | ha, { simp [(coprime_zero_left _).mp hab] }, rcases b.eq_zero_or_pos with rfl | hb, { simp [(coprime_zero_right _).mp hab] }, rw [mem_factors_mul_of_pos ha hb p, list.mem_union] end open list /-- For `b > 0`, the power of `p` in `a * b` is at least that in `a` -/ lemma le_factors_count_mul_left {p a b : ℕ} (hb : 0 < b) : list.count p a.factors ≤ list.count p (a * b).factors := begin rcases a.eq_zero_or_pos with rfl | ha, { simp }, { rw [perm.count_eq (perm_factors_mul_of_pos ha hb) p, count_append p], simp }, end /-- For `a > 0`, the power of `p` in `a * b` is at least that in `b` -/ lemma le_factors_count_mul_right {p a b : ℕ} (ha : 0 < a) : list.count p b.factors ≤ list.count p (a * b).factors := by { rw mul_comm, apply le_factors_count_mul_left ha } /-- If `p` is a prime factor of `a` then `p` is also a prime factor of `a * b` for any `b > 0` -/ lemma mem_factors_mul_left {p a b : ℕ} (hpa : p ∈ a.factors) (hb : 0 < b) : p ∈ (a*b).factors := by { rw ←list.count_pos, exact gt_of_ge_of_gt (le_factors_count_mul_left hb) (count_pos.mpr hpa) } /-- If `p` is a prime factor of `b` then `p` is also a prime factor of `a * b` for any `a > 0` -/ lemma mem_factors_mul_right {p a b : ℕ} (hpb : p ∈ b.factors) (ha : 0 < a) : p ∈ (a*b).factors := by { rw mul_comm, exact mem_factors_mul_left hpb ha } /-- If `p` is a prime factor of `a` then the power of `p` in `a` is the same that in `a * b`, for any `b` coprime to `a`. -/ lemma factors_count_eq_of_coprime_left {p a b : ℕ} (hab : coprime a b) (hpa : p ∈ a.factors) : list.count p (a * b).factors = list.count p a.factors := begin rw count_factors_mul_of_coprime hab, simpa only [count_eq_zero_of_not_mem (coprime_factors_disjoint hab hpa)], end /-- If `p` is a prime factor of `b` then the power of `p` in `b` is the same that in `a * b`, for any `a` coprime to `b`. -/ lemma factors_count_eq_of_coprime_right {p a b : ℕ} (hab : coprime a b) (hpb : p ∈ b.factors) : list.count p (a * b).factors = list.count p b.factors := by { rw mul_comm, exact factors_count_eq_of_coprime_left (coprime_comm.mp hab) hpb } end nat
lemma measure_empty[simp]: "measure M {} = 0"
(* Title: HOL/Analysis/Borel_Space.thy Author: Johannes Hölzl, TU München Author: Armin Heller, TU München *) section \<open>Borel Space\<close> theory Borel_Space imports Measurable Derivative Ordered_Euclidean_Space Extended_Real_Limits begin lemma is_interval_real_ereal_oo: "is_interval (real_of_ereal ` {N<..<M::ereal})" by (auto simp: real_atLeastGreaterThan_eq) lemma sets_Collect_eventually_sequentially[measurable]: "(\<And>i. {x\<in>space M. P x i} \<in> sets M) \<Longrightarrow> {x\<in>space M. eventually (P x) sequentially} \<in> sets M" unfolding eventually_sequentially by simp lemma topological_basis_trivial: "topological_basis {A. open A}" by (auto simp: topological_basis_def) proposition open_prod_generated: "open = generate_topology {A \<times> B | A B. open A \<and> open B}" proof - have "{A \<times> B :: ('a \<times> 'b) set | A B. open A \<and> open B} = ((\<lambda>(a, b). a \<times> b) ` ({A. open A} \<times> {A. open A}))" by auto then show ?thesis by (auto intro: topological_basis_prod topological_basis_trivial topological_basis_imp_subbasis) qed proposition mono_on_imp_deriv_nonneg: assumes mono: "mono_on f A" and deriv: "(f has_real_derivative D) (at x)" assumes "x \<in> interior A" shows "D \<ge> 0" proof (rule tendsto_lowerbound) let ?A' = "(\<lambda>y. y - x) ` interior A" from deriv show "((\<lambda>h. (f (x + h) - f x) / h) \<longlongrightarrow> D) (at 0)" by (simp add: field_has_derivative_at has_field_derivative_def) from mono have mono': "mono_on f (interior A)" by (rule mono_on_subset) (rule interior_subset) show "eventually (\<lambda>h. (f (x + h) - f x) / h \<ge> 0) (at 0)" proof (subst eventually_at_topological, intro exI conjI ballI impI) have "open (interior A)" by simp hence "open ((+) (-x) ` interior A)" by (rule open_translation) also have "((+) (-x) ` interior A) = ?A'" by auto finally show "open ?A'" . next from \<open>x \<in> interior A\<close> show "0 \<in> ?A'" by auto next fix h assume "h \<in> ?A'" hence "x + h \<in> interior A" by auto with mono' and \<open>x \<in> interior A\<close> show "(f (x + h) - f x) / h \<ge> 0" by (cases h rule: linorder_cases[of _ 0]) (simp_all add: divide_nonpos_neg divide_nonneg_pos mono_onD field_simps) qed qed simp proposition mono_on_ctble_discont: fixes f :: "real \<Rightarrow> real" fixes A :: "real set" assumes "mono_on f A" shows "countable {a\<in>A. \<not> continuous (at a within A) f}" proof - have mono: "\<And>x y. x \<in> A \<Longrightarrow> y \<in> A \<Longrightarrow> x \<le> y \<Longrightarrow> f x \<le> f y" using \<open>mono_on f A\<close> by (simp add: mono_on_def) have "\<forall>a \<in> {a\<in>A. \<not> continuous (at a within A) f}. \<exists>q :: nat \<times> rat. (fst q = 0 \<and> of_rat (snd q) < f a \<and> (\<forall>x \<in> A. x < a \<longrightarrow> f x < of_rat (snd q))) \<or> (fst q = 1 \<and> of_rat (snd q) > f a \<and> (\<forall>x \<in> A. x > a \<longrightarrow> f x > of_rat (snd q)))" proof (clarsimp simp del: One_nat_def) fix a assume "a \<in> A" assume "\<not> continuous (at a within A) f" thus "\<exists>q1 q2. q1 = 0 \<and> real_of_rat q2 < f a \<and> (\<forall>x\<in>A. x < a \<longrightarrow> f x < real_of_rat q2) \<or> q1 = 1 \<and> f a < real_of_rat q2 \<and> (\<forall>x\<in>A. a < x \<longrightarrow> real_of_rat q2 < f x)" proof (auto simp add: continuous_within order_tendsto_iff eventually_at) fix l assume "l < f a" then obtain q2 where q2: "l < of_rat q2" "of_rat q2 < f a" using of_rat_dense by blast assume * [rule_format]: "\<forall>d>0. \<exists>x\<in>A. x \<noteq> a \<and> dist x a < d \<and> \<not> l < f x" from q2 have "real_of_rat q2 < f a \<and> (\<forall>x\<in>A. x < a \<longrightarrow> f x < real_of_rat q2)" proof auto fix x assume "x \<in> A" "x < a" with q2 *[of "a - x"] show "f x < real_of_rat q2" apply (auto simp add: dist_real_def not_less) apply (subgoal_tac "f x \<le> f xa") by (auto intro: mono) qed thus ?thesis by auto next fix u assume "u > f a" then obtain q2 where q2: "f a < of_rat q2" "of_rat q2 < u" using of_rat_dense by blast assume *[rule_format]: "\<forall>d>0. \<exists>x\<in>A. x \<noteq> a \<and> dist x a < d \<and> \<not> u > f x" from q2 have "real_of_rat q2 > f a \<and> (\<forall>x\<in>A. x > a \<longrightarrow> f x > real_of_rat q2)" proof auto fix x assume "x \<in> A" "x > a" with q2 *[of "x - a"] show "f x > real_of_rat q2" apply (auto simp add: dist_real_def) apply (subgoal_tac "f x \<ge> f xa") by (auto intro: mono) qed thus ?thesis by auto qed qed hence "\<exists>g :: real \<Rightarrow> nat \<times> rat . \<forall>a \<in> {a\<in>A. \<not> continuous (at a within A) f}. (fst (g a) = 0 \<and> of_rat (snd (g a)) < f a \<and> (\<forall>x \<in> A. x < a \<longrightarrow> f x < of_rat (snd (g a)))) | (fst (g a) = 1 \<and> of_rat (snd (g a)) > f a \<and> (\<forall>x \<in> A. x > a \<longrightarrow> f x > of_rat (snd (g a))))" by (rule bchoice) then guess g .. hence g: "\<And>a x. a \<in> A \<Longrightarrow> \<not> continuous (at a within A) f \<Longrightarrow> x \<in> A \<Longrightarrow> (fst (g a) = 0 \<and> of_rat (snd (g a)) < f a \<and> (x < a \<longrightarrow> f x < of_rat (snd (g a)))) | (fst (g a) = 1 \<and> of_rat (snd (g a)) > f a \<and> (x > a \<longrightarrow> f x > of_rat (snd (g a))))" by auto have "inj_on g {a\<in>A. \<not> continuous (at a within A) f}" proof (auto simp add: inj_on_def) fix w z assume 1: "w \<in> A" and 2: "\<not> continuous (at w within A) f" and 3: "z \<in> A" and 4: "\<not> continuous (at z within A) f" and 5: "g w = g z" from g [OF 1 2 3] g [OF 3 4 1] 5 show "w = z" by auto qed thus ?thesis by (rule countableI') qed lemma mono_on_ctble_discont_open: fixes f :: "real \<Rightarrow> real" fixes A :: "real set" assumes "open A" "mono_on f A" shows "countable {a\<in>A. \<not>isCont f a}" proof - have "{a\<in>A. \<not>isCont f a} = {a\<in>A. \<not>(continuous (at a within A) f)}" by (auto simp add: continuous_within_open [OF _ \<open>open A\<close>]) thus ?thesis apply (elim ssubst) by (rule mono_on_ctble_discont, rule assms) qed lemma mono_ctble_discont: fixes f :: "real \<Rightarrow> real" assumes "mono f" shows "countable {a. \<not> isCont f a}" using assms mono_on_ctble_discont [of f UNIV] unfolding mono_on_def mono_def by auto lemma has_real_derivative_imp_continuous_on: assumes "\<And>x. x \<in> A \<Longrightarrow> (f has_real_derivative f' x) (at x)" shows "continuous_on A f" apply (intro differentiable_imp_continuous_on, unfold differentiable_on_def) using assms differentiable_at_withinI real_differentiable_def by blast lemma continuous_interval_vimage_Int: assumes "continuous_on {a::real..b} g" and mono: "\<And>x y. a \<le> x \<Longrightarrow> x \<le> y \<Longrightarrow> y \<le> b \<Longrightarrow> g x \<le> g y" assumes "a \<le> b" "(c::real) \<le> d" "{c..d} \<subseteq> {g a..g b}" obtains c' d' where "{a..b} \<inter> g -` {c..d} = {c'..d'}" "c' \<le> d'" "g c' = c" "g d' = d" proof- let ?A = "{a..b} \<inter> g -` {c..d}" from IVT'[of g a c b, OF _ _ \<open>a \<le> b\<close> assms(1)] assms(4,5) obtain c'' where c'': "c'' \<in> ?A" "g c'' = c" by auto from IVT'[of g a d b, OF _ _ \<open>a \<le> b\<close> assms(1)] assms(4,5) obtain d'' where d'': "d'' \<in> ?A" "g d'' = d" by auto hence [simp]: "?A \<noteq> {}" by blast define c' where "c' = Inf ?A" define d' where "d' = Sup ?A" have "?A \<subseteq> {c'..d'}" unfolding c'_def d'_def by (intro subsetI) (auto intro: cInf_lower cSup_upper) moreover from assms have "closed ?A" using continuous_on_closed_vimage[of "{a..b}" g] by (subst Int_commute) simp hence c'd'_in_set: "c' \<in> ?A" "d' \<in> ?A" unfolding c'_def d'_def by ((intro closed_contains_Inf closed_contains_Sup, simp_all)[])+ hence "{c'..d'} \<subseteq> ?A" using assms by (intro subsetI) (auto intro!: order_trans[of c "g c'" "g x" for x] order_trans[of "g x" "g d'" d for x] intro!: mono) moreover have "c' \<le> d'" using c'd'_in_set(2) unfolding c'_def by (intro cInf_lower) auto moreover have "g c' \<le> c" "g d' \<ge> d" apply (insert c'' d'' c'd'_in_set) apply (subst c''(2)[symmetric]) apply (auto simp: c'_def intro!: mono cInf_lower c'') [] apply (subst d''(2)[symmetric]) apply (auto simp: d'_def intro!: mono cSup_upper d'') [] done with c'd'_in_set have "g c' = c" "g d' = d" by auto ultimately show ?thesis using that by blast qed subsection \<open>Generic Borel spaces\<close> definition\<^marker>\<open>tag important\<close> (in topological_space) borel :: "'a measure" where "borel = sigma UNIV {S. open S}" abbreviation "borel_measurable M \<equiv> measurable M borel" lemma in_borel_measurable: "f \<in> borel_measurable M \<longleftrightarrow> (\<forall>S \<in> sigma_sets UNIV {S. open S}. f -` S \<inter> space M \<in> sets M)" by (auto simp add: measurable_def borel_def) lemma in_borel_measurable_borel: "f \<in> borel_measurable M \<longleftrightarrow> (\<forall>S \<in> sets borel. f -` S \<inter> space M \<in> sets M)" by (auto simp add: measurable_def borel_def) lemma space_borel[simp]: "space borel = UNIV" unfolding borel_def by auto lemma space_in_borel[measurable]: "UNIV \<in> sets borel" unfolding borel_def by auto lemma sets_borel: "sets borel = sigma_sets UNIV {S. open S}" unfolding borel_def by (rule sets_measure_of) simp lemma measurable_sets_borel: "\<lbrakk>f \<in> measurable borel M; A \<in> sets M\<rbrakk> \<Longrightarrow> f -` A \<in> sets borel" by (drule (1) measurable_sets) simp lemma pred_Collect_borel[measurable (raw)]: "Measurable.pred borel P \<Longrightarrow> {x. P x} \<in> sets borel" unfolding borel_def pred_def by auto lemma borel_open[measurable (raw generic)]: assumes "open A" shows "A \<in> sets borel" proof - have "A \<in> {S. open S}" unfolding mem_Collect_eq using assms . thus ?thesis unfolding borel_def by auto qed lemma borel_closed[measurable (raw generic)]: assumes "closed A" shows "A \<in> sets borel" proof - have "space borel - (- A) \<in> sets borel" using assms unfolding closed_def by (blast intro: borel_open) thus ?thesis by simp qed lemma borel_singleton[measurable]: "A \<in> sets borel \<Longrightarrow> insert x A \<in> sets (borel :: 'a::t1_space measure)" unfolding insert_def by (rule sets.Un) auto lemma sets_borel_eq_count_space: "sets (borel :: 'a::{countable, t2_space} measure) = count_space UNIV" proof - have "(\<Union>a\<in>A. {a}) \<in> sets borel" for A :: "'a set" by (intro sets.countable_UN') auto then show ?thesis by auto qed lemma borel_comp[measurable]: "A \<in> sets borel \<Longrightarrow> - A \<in> sets borel" unfolding Compl_eq_Diff_UNIV by simp lemma borel_measurable_vimage: fixes f :: "'a \<Rightarrow> 'x::t2_space" assumes borel[measurable]: "f \<in> borel_measurable M" shows "f -` {x} \<inter> space M \<in> sets M" by simp lemma borel_measurableI: fixes f :: "'a \<Rightarrow> 'x::topological_space" assumes "\<And>S. open S \<Longrightarrow> f -` S \<inter> space M \<in> sets M" shows "f \<in> borel_measurable M" unfolding borel_def proof (rule measurable_measure_of, simp_all) fix S :: "'x set" assume "open S" thus "f -` S \<inter> space M \<in> sets M" using assms[of S] by simp qed lemma borel_measurable_const: "(\<lambda>x. c) \<in> borel_measurable M" by auto lemma borel_measurable_indicator: assumes A: "A \<in> sets M" shows "indicator A \<in> borel_measurable M" unfolding indicator_def [abs_def] using A by (auto intro!: measurable_If_set) lemma borel_measurable_count_space[measurable (raw)]: "f \<in> borel_measurable (count_space S)" unfolding measurable_def by auto lemma borel_measurable_indicator'[measurable (raw)]: assumes [measurable]: "{x\<in>space M. f x \<in> A x} \<in> sets M" shows "(\<lambda>x. indicator (A x) (f x)) \<in> borel_measurable M" unfolding indicator_def[abs_def] by (auto intro!: measurable_If) lemma borel_measurable_indicator_iff: "(indicator A :: 'a \<Rightarrow> 'x::{t1_space, zero_neq_one}) \<in> borel_measurable M \<longleftrightarrow> A \<inter> space M \<in> sets M" (is "?I \<in> borel_measurable M \<longleftrightarrow> _") proof assume "?I \<in> borel_measurable M" then have "?I -` {1} \<inter> space M \<in> sets M" unfolding measurable_def by auto also have "?I -` {1} \<inter> space M = A \<inter> space M" unfolding indicator_def [abs_def] by auto finally show "A \<inter> space M \<in> sets M" . next assume "A \<inter> space M \<in> sets M" moreover have "?I \<in> borel_measurable M \<longleftrightarrow> (indicator (A \<inter> space M) :: 'a \<Rightarrow> 'x) \<in> borel_measurable M" by (intro measurable_cong) (auto simp: indicator_def) ultimately show "?I \<in> borel_measurable M" by auto qed lemma borel_measurable_subalgebra: assumes "sets N \<subseteq> sets M" "space N = space M" "f \<in> borel_measurable N" shows "f \<in> borel_measurable M" using assms unfolding measurable_def by auto lemma borel_measurable_restrict_space_iff_ereal: fixes f :: "'a \<Rightarrow> ereal" assumes \<Omega>[measurable, simp]: "\<Omega> \<inter> space M \<in> sets M" shows "f \<in> borel_measurable (restrict_space M \<Omega>) \<longleftrightarrow> (\<lambda>x. f x * indicator \<Omega> x) \<in> borel_measurable M" by (subst measurable_restrict_space_iff) (auto simp: indicator_def if_distrib[where f="\<lambda>x. a * x" for a] cong del: if_weak_cong) lemma borel_measurable_restrict_space_iff_ennreal: fixes f :: "'a \<Rightarrow> ennreal" assumes \<Omega>[measurable, simp]: "\<Omega> \<inter> space M \<in> sets M" shows "f \<in> borel_measurable (restrict_space M \<Omega>) \<longleftrightarrow> (\<lambda>x. f x * indicator \<Omega> x) \<in> borel_measurable M" by (subst measurable_restrict_space_iff) (auto simp: indicator_def if_distrib[where f="\<lambda>x. a * x" for a] cong del: if_weak_cong) lemma borel_measurable_restrict_space_iff: fixes f :: "'a \<Rightarrow> 'b::real_normed_vector" assumes \<Omega>[measurable, simp]: "\<Omega> \<inter> space M \<in> sets M" shows "f \<in> borel_measurable (restrict_space M \<Omega>) \<longleftrightarrow> (\<lambda>x. indicator \<Omega> x *\<^sub>R f x) \<in> borel_measurable M" by (subst measurable_restrict_space_iff) (auto simp: indicator_def if_distrib[where f="\<lambda>x. x *\<^sub>R a" for a] ac_simps cong del: if_weak_cong) lemma cbox_borel[measurable]: "cbox a b \<in> sets borel" by (auto intro: borel_closed) lemma box_borel[measurable]: "box a b \<in> sets borel" by (auto intro: borel_open) lemma borel_compact: "compact (A::'a::t2_space set) \<Longrightarrow> A \<in> sets borel" by (auto intro: borel_closed dest!: compact_imp_closed) lemma borel_sigma_sets_subset: "A \<subseteq> sets borel \<Longrightarrow> sigma_sets UNIV A \<subseteq> sets borel" using sets.sigma_sets_subset[of A borel] by simp lemma borel_eq_sigmaI1: fixes F :: "'i \<Rightarrow> 'a::topological_space set" and X :: "'a::topological_space set set" assumes borel_eq: "borel = sigma UNIV X" assumes X: "\<And>x. x \<in> X \<Longrightarrow> x \<in> sets (sigma UNIV (F ` A))" assumes F: "\<And>i. i \<in> A \<Longrightarrow> F i \<in> sets borel" shows "borel = sigma UNIV (F ` A)" unfolding borel_def proof (intro sigma_eqI antisym) have borel_rev_eq: "sigma_sets UNIV {S::'a set. open S} = sets borel" unfolding borel_def by simp also have "\<dots> = sigma_sets UNIV X" unfolding borel_eq by simp also have "\<dots> \<subseteq> sigma_sets UNIV (F`A)" using X by (intro sigma_algebra.sigma_sets_subset[OF sigma_algebra_sigma_sets]) auto finally show "sigma_sets UNIV {S. open S} \<subseteq> sigma_sets UNIV (F`A)" . show "sigma_sets UNIV (F`A) \<subseteq> sigma_sets UNIV {S. open S}" unfolding borel_rev_eq using F by (intro borel_sigma_sets_subset) auto qed auto lemma borel_eq_sigmaI2: fixes F :: "'i \<Rightarrow> 'j \<Rightarrow> 'a::topological_space set" and G :: "'l \<Rightarrow> 'k \<Rightarrow> 'a::topological_space set" assumes borel_eq: "borel = sigma UNIV ((\<lambda>(i, j). G i j)`B)" assumes X: "\<And>i j. (i, j) \<in> B \<Longrightarrow> G i j \<in> sets (sigma UNIV ((\<lambda>(i, j). F i j) ` A))" assumes F: "\<And>i j. (i, j) \<in> A \<Longrightarrow> F i j \<in> sets borel" shows "borel = sigma UNIV ((\<lambda>(i, j). F i j) ` A)" using assms by (intro borel_eq_sigmaI1[where X="(\<lambda>(i, j). G i j) ` B" and F="(\<lambda>(i, j). F i j)"]) auto lemma borel_eq_sigmaI3: fixes F :: "'i \<Rightarrow> 'j \<Rightarrow> 'a::topological_space set" and X :: "'a::topological_space set set" assumes borel_eq: "borel = sigma UNIV X" assumes X: "\<And>x. x \<in> X \<Longrightarrow> x \<in> sets (sigma UNIV ((\<lambda>(i, j). F i j) ` A))" assumes F: "\<And>i j. (i, j) \<in> A \<Longrightarrow> F i j \<in> sets borel" shows "borel = sigma UNIV ((\<lambda>(i, j). F i j) ` A)" using assms by (intro borel_eq_sigmaI1[where X=X and F="(\<lambda>(i, j). F i j)"]) auto lemma borel_eq_sigmaI4: fixes F :: "'i \<Rightarrow> 'a::topological_space set" and G :: "'l \<Rightarrow> 'k \<Rightarrow> 'a::topological_space set" assumes borel_eq: "borel = sigma UNIV ((\<lambda>(i, j). G i j)`A)" assumes X: "\<And>i j. (i, j) \<in> A \<Longrightarrow> G i j \<in> sets (sigma UNIV (range F))" assumes F: "\<And>i. F i \<in> sets borel" shows "borel = sigma UNIV (range F)" using assms by (intro borel_eq_sigmaI1[where X="(\<lambda>(i, j). G i j) ` A" and F=F]) auto lemma borel_eq_sigmaI5: fixes F :: "'i \<Rightarrow> 'j \<Rightarrow> 'a::topological_space set" and G :: "'l \<Rightarrow> 'a::topological_space set" assumes borel_eq: "borel = sigma UNIV (range G)" assumes X: "\<And>i. G i \<in> sets (sigma UNIV (range (\<lambda>(i, j). F i j)))" assumes F: "\<And>i j. F i j \<in> sets borel" shows "borel = sigma UNIV (range (\<lambda>(i, j). F i j))" using assms by (intro borel_eq_sigmaI1[where X="range G" and F="(\<lambda>(i, j). F i j)"]) auto theorem second_countable_borel_measurable: fixes X :: "'a::second_countable_topology set set" assumes eq: "open = generate_topology X" shows "borel = sigma UNIV X" unfolding borel_def proof (intro sigma_eqI sigma_sets_eqI) interpret X: sigma_algebra UNIV "sigma_sets UNIV X" by (rule sigma_algebra_sigma_sets) simp fix S :: "'a set" assume "S \<in> Collect open" then have "generate_topology X S" by (auto simp: eq) then show "S \<in> sigma_sets UNIV X" proof induction case (UN K) then have K: "\<And>k. k \<in> K \<Longrightarrow> open k" unfolding eq by auto from ex_countable_basis obtain B :: "'a set set" where B: "\<And>b. b \<in> B \<Longrightarrow> open b" "\<And>X. open X \<Longrightarrow> \<exists>b\<subseteq>B. (\<Union>b) = X" and "countable B" by (auto simp: topological_basis_def) from B(2)[OF K] obtain m where m: "\<And>k. k \<in> K \<Longrightarrow> m k \<subseteq> B" "\<And>k. k \<in> K \<Longrightarrow> \<Union>(m k) = k" by metis define U where "U = (\<Union>k\<in>K. m k)" with m have "countable U" by (intro countable_subset[OF _ \<open>countable B\<close>]) auto have "\<Union>U = (\<Union>A\<in>U. A)" by simp also have "\<dots> = \<Union>K" unfolding U_def UN_simps by (simp add: m) finally have "\<Union>U = \<Union>K" . have "\<forall>b\<in>U. \<exists>k\<in>K. b \<subseteq> k" using m by (auto simp: U_def) then obtain u where u: "\<And>b. b \<in> U \<Longrightarrow> u b \<in> K" and "\<And>b. b \<in> U \<Longrightarrow> b \<subseteq> u b" by metis then have "(\<Union>b\<in>U. u b) \<subseteq> \<Union>K" "\<Union>U \<subseteq> (\<Union>b\<in>U. u b)" by auto then have "\<Union>K = (\<Union>b\<in>U. u b)" unfolding \<open>\<Union>U = \<Union>K\<close> by auto also have "\<dots> \<in> sigma_sets UNIV X" using u UN by (intro X.countable_UN' \<open>countable U\<close>) auto finally show "\<Union>K \<in> sigma_sets UNIV X" . qed auto qed (auto simp: eq intro: generate_topology.Basis) lemma borel_eq_closed: "borel = sigma UNIV (Collect closed)" unfolding borel_def proof (intro sigma_eqI sigma_sets_eqI, safe) fix x :: "'a set" assume "open x" hence "x = UNIV - (UNIV - x)" by auto also have "\<dots> \<in> sigma_sets UNIV (Collect closed)" by (force intro: sigma_sets.Compl simp: \<open>open x\<close>) finally show "x \<in> sigma_sets UNIV (Collect closed)" by simp next fix x :: "'a set" assume "closed x" hence "x = UNIV - (UNIV - x)" by auto also have "\<dots> \<in> sigma_sets UNIV (Collect open)" by (force intro: sigma_sets.Compl simp: \<open>closed x\<close>) finally show "x \<in> sigma_sets UNIV (Collect open)" by simp qed simp_all proposition borel_eq_countable_basis: fixes B::"'a::topological_space set set" assumes "countable B" assumes "topological_basis B" shows "borel = sigma UNIV B" unfolding borel_def proof (intro sigma_eqI sigma_sets_eqI, safe) interpret countable_basis "open" B using assms by (rule countable_basis_openI) fix X::"'a set" assume "open X" from open_countable_basisE[OF this] obtain B' where B': "B' \<subseteq> B" "X = \<Union> B'" . then show "X \<in> sigma_sets UNIV B" by (blast intro: sigma_sets_UNION \<open>countable B\<close> countable_subset) next fix b assume "b \<in> B" hence "open b" by (rule topological_basis_open[OF assms(2)]) thus "b \<in> sigma_sets UNIV (Collect open)" by auto qed simp_all lemma borel_measurable_continuous_on_restrict: fixes f :: "'a::topological_space \<Rightarrow> 'b::topological_space" assumes f: "continuous_on A f" shows "f \<in> borel_measurable (restrict_space borel A)" proof (rule borel_measurableI) fix S :: "'b set" assume "open S" with f obtain T where "f -` S \<inter> A = T \<inter> A" "open T" by (metis continuous_on_open_invariant) then show "f -` S \<inter> space (restrict_space borel A) \<in> sets (restrict_space borel A)" by (force simp add: sets_restrict_space space_restrict_space) qed lemma borel_measurable_continuous_onI: "continuous_on UNIV f \<Longrightarrow> f \<in> borel_measurable borel" by (drule borel_measurable_continuous_on_restrict) simp lemma borel_measurable_continuous_on_if: "A \<in> sets borel \<Longrightarrow> continuous_on A f \<Longrightarrow> continuous_on (- A) g \<Longrightarrow> (\<lambda>x. if x \<in> A then f x else g x) \<in> borel_measurable borel" by (auto simp add: measurable_If_restrict_space_iff Collect_neg_eq intro!: borel_measurable_continuous_on_restrict) lemma borel_measurable_continuous_countable_exceptions: fixes f :: "'a::t1_space \<Rightarrow> 'b::topological_space" assumes X: "countable X" assumes "continuous_on (- X) f" shows "f \<in> borel_measurable borel" proof (rule measurable_discrete_difference[OF _ X]) have "X \<in> sets borel" by (rule sets.countable[OF _ X]) auto then show "(\<lambda>x. if x \<in> X then undefined else f x) \<in> borel_measurable borel" by (intro borel_measurable_continuous_on_if assms continuous_intros) qed auto lemma borel_measurable_continuous_on: assumes f: "continuous_on UNIV f" and g: "g \<in> borel_measurable M" shows "(\<lambda>x. f (g x)) \<in> borel_measurable M" using measurable_comp[OF g borel_measurable_continuous_onI[OF f]] by (simp add: comp_def) lemma borel_measurable_continuous_on_indicator: fixes f g :: "'a::topological_space \<Rightarrow> 'b::real_normed_vector" shows "A \<in> sets borel \<Longrightarrow> continuous_on A f \<Longrightarrow> (\<lambda>x. indicator A x *\<^sub>R f x) \<in> borel_measurable borel" by (subst borel_measurable_restrict_space_iff[symmetric]) (auto intro: borel_measurable_continuous_on_restrict) lemma borel_measurable_Pair[measurable (raw)]: fixes f :: "'a \<Rightarrow> 'b::second_countable_topology" and g :: "'a \<Rightarrow> 'c::second_countable_topology" assumes f[measurable]: "f \<in> borel_measurable M" assumes g[measurable]: "g \<in> borel_measurable M" shows "(\<lambda>x. (f x, g x)) \<in> borel_measurable M" proof (subst borel_eq_countable_basis) let ?B = "SOME B::'b set set. countable B \<and> topological_basis B" let ?C = "SOME B::'c set set. countable B \<and> topological_basis B" let ?P = "(\<lambda>(b, c). b \<times> c) ` (?B \<times> ?C)" show "countable ?P" "topological_basis ?P" by (auto intro!: countable_basis topological_basis_prod is_basis) show "(\<lambda>x. (f x, g x)) \<in> measurable M (sigma UNIV ?P)" proof (rule measurable_measure_of) fix S assume "S \<in> ?P" then obtain b c where "b \<in> ?B" "c \<in> ?C" and S: "S = b \<times> c" by auto then have borel: "open b" "open c" by (auto intro: is_basis topological_basis_open) have "(\<lambda>x. (f x, g x)) -` S \<inter> space M = (f -` b \<inter> space M) \<inter> (g -` c \<inter> space M)" unfolding S by auto also have "\<dots> \<in> sets M" using borel by simp finally show "(\<lambda>x. (f x, g x)) -` S \<inter> space M \<in> sets M" . qed auto qed lemma borel_measurable_continuous_Pair: fixes f :: "'a \<Rightarrow> 'b::second_countable_topology" and g :: "'a \<Rightarrow> 'c::second_countable_topology" assumes [measurable]: "f \<in> borel_measurable M" assumes [measurable]: "g \<in> borel_measurable M" assumes H: "continuous_on UNIV (\<lambda>x. H (fst x) (snd x))" shows "(\<lambda>x. H (f x) (g x)) \<in> borel_measurable M" proof - have eq: "(\<lambda>x. H (f x) (g x)) = (\<lambda>x. (\<lambda>x. H (fst x) (snd x)) (f x, g x))" by auto show ?thesis unfolding eq by (rule borel_measurable_continuous_on[OF H]) auto qed subsection \<open>Borel spaces on order topologies\<close> lemma [measurable]: fixes a b :: "'a::linorder_topology" shows lessThan_borel: "{..< a} \<in> sets borel" and greaterThan_borel: "{a <..} \<in> sets borel" and greaterThanLessThan_borel: "{a<..<b} \<in> sets borel" and atMost_borel: "{..a} \<in> sets borel" and atLeast_borel: "{a..} \<in> sets borel" and atLeastAtMost_borel: "{a..b} \<in> sets borel" and greaterThanAtMost_borel: "{a<..b} \<in> sets borel" and atLeastLessThan_borel: "{a..<b} \<in> sets borel" unfolding greaterThanAtMost_def atLeastLessThan_def by (blast intro: borel_open borel_closed open_lessThan open_greaterThan open_greaterThanLessThan closed_atMost closed_atLeast closed_atLeastAtMost)+ lemma borel_Iio: "borel = sigma UNIV (range lessThan :: 'a::{linorder_topology, second_countable_topology} set set)" unfolding second_countable_borel_measurable[OF open_generated_order] proof (intro sigma_eqI sigma_sets_eqI) from countable_dense_setE guess D :: "'a set" . note D = this interpret L: sigma_algebra UNIV "sigma_sets UNIV (range lessThan)" by (rule sigma_algebra_sigma_sets) simp fix A :: "'a set" assume "A \<in> range lessThan \<union> range greaterThan" then obtain y where "A = {y <..} \<or> A = {..< y}" by blast then show "A \<in> sigma_sets UNIV (range lessThan)" proof assume A: "A = {y <..}" show ?thesis proof cases assume "\<forall>x>y. \<exists>d. y < d \<and> d < x" with D(2)[of "{y <..< x}" for x] have "\<forall>x>y. \<exists>d\<in>D. y < d \<and> d < x" by (auto simp: set_eq_iff) then have "A = UNIV - (\<Inter>d\<in>{d\<in>D. y < d}. {..< d})" by (auto simp: A) (metis less_asym) also have "\<dots> \<in> sigma_sets UNIV (range lessThan)" using D(1) by (intro L.Diff L.top L.countable_INT'') auto finally show ?thesis . next assume "\<not> (\<forall>x>y. \<exists>d. y < d \<and> d < x)" then obtain x where "y < x" "\<And>d. y < d \<Longrightarrow> \<not> d < x" by auto then have "A = UNIV - {..< x}" unfolding A by (auto simp: not_less[symmetric]) also have "\<dots> \<in> sigma_sets UNIV (range lessThan)" by auto finally show ?thesis . qed qed auto qed auto lemma borel_Ioi: "borel = sigma UNIV (range greaterThan :: 'a::{linorder_topology, second_countable_topology} set set)" unfolding second_countable_borel_measurable[OF open_generated_order] proof (intro sigma_eqI sigma_sets_eqI) from countable_dense_setE guess D :: "'a set" . note D = this interpret L: sigma_algebra UNIV "sigma_sets UNIV (range greaterThan)" by (rule sigma_algebra_sigma_sets) simp fix A :: "'a set" assume "A \<in> range lessThan \<union> range greaterThan" then obtain y where "A = {y <..} \<or> A = {..< y}" by blast then show "A \<in> sigma_sets UNIV (range greaterThan)" proof assume A: "A = {..< y}" show ?thesis proof cases assume "\<forall>x<y. \<exists>d. x < d \<and> d < y" with D(2)[of "{x <..< y}" for x] have "\<forall>x<y. \<exists>d\<in>D. x < d \<and> d < y" by (auto simp: set_eq_iff) then have "A = UNIV - (\<Inter>d\<in>{d\<in>D. d < y}. {d <..})" by (auto simp: A) (metis less_asym) also have "\<dots> \<in> sigma_sets UNIV (range greaterThan)" using D(1) by (intro L.Diff L.top L.countable_INT'') auto finally show ?thesis . next assume "\<not> (\<forall>x<y. \<exists>d. x < d \<and> d < y)" then obtain x where "x < y" "\<And>d. y > d \<Longrightarrow> x \<ge> d" by (auto simp: not_less[symmetric]) then have "A = UNIV - {x <..}" unfolding A Compl_eq_Diff_UNIV[symmetric] by auto also have "\<dots> \<in> sigma_sets UNIV (range greaterThan)" by auto finally show ?thesis . qed qed auto qed auto lemma borel_measurableI_less: fixes f :: "'a \<Rightarrow> 'b::{linorder_topology, second_countable_topology}" shows "(\<And>y. {x\<in>space M. f x < y} \<in> sets M) \<Longrightarrow> f \<in> borel_measurable M" unfolding borel_Iio by (rule measurable_measure_of) (auto simp: Int_def conj_commute) lemma borel_measurableI_greater: fixes f :: "'a \<Rightarrow> 'b::{linorder_topology, second_countable_topology}" shows "(\<And>y. {x\<in>space M. y < f x} \<in> sets M) \<Longrightarrow> f \<in> borel_measurable M" unfolding borel_Ioi by (rule measurable_measure_of) (auto simp: Int_def conj_commute) lemma borel_measurableI_le: fixes f :: "'a \<Rightarrow> 'b::{linorder_topology, second_countable_topology}" shows "(\<And>y. {x\<in>space M. f x \<le> y} \<in> sets M) \<Longrightarrow> f \<in> borel_measurable M" by (rule borel_measurableI_greater) (auto simp: not_le[symmetric]) lemma borel_measurableI_ge: fixes f :: "'a \<Rightarrow> 'b::{linorder_topology, second_countable_topology}" shows "(\<And>y. {x\<in>space M. y \<le> f x} \<in> sets M) \<Longrightarrow> f \<in> borel_measurable M" by (rule borel_measurableI_less) (auto simp: not_le[symmetric]) lemma borel_measurable_less[measurable]: fixes f :: "'a \<Rightarrow> 'b::{second_countable_topology, linorder_topology}" assumes "f \<in> borel_measurable M" assumes "g \<in> borel_measurable M" shows "{w \<in> space M. f w < g w} \<in> sets M" proof - have "{w \<in> space M. f w < g w} = (\<lambda>x. (f x, g x)) -` {x. fst x < snd x} \<inter> space M" by auto also have "\<dots> \<in> sets M" by (intro measurable_sets[OF borel_measurable_Pair borel_open, OF assms open_Collect_less] continuous_intros) finally show ?thesis . qed lemma fixes f :: "'a \<Rightarrow> 'b::{second_countable_topology, linorder_topology}" assumes f[measurable]: "f \<in> borel_measurable M" assumes g[measurable]: "g \<in> borel_measurable M" shows borel_measurable_le[measurable]: "{w \<in> space M. f w \<le> g w} \<in> sets M" and borel_measurable_eq[measurable]: "{w \<in> space M. f w = g w} \<in> sets M" and borel_measurable_neq: "{w \<in> space M. f w \<noteq> g w} \<in> sets M" unfolding eq_iff not_less[symmetric] by measurable lemma borel_measurable_SUP[measurable (raw)]: fixes F :: "_ \<Rightarrow> _ \<Rightarrow> _::{complete_linorder, linorder_topology, second_countable_topology}" assumes [simp]: "countable I" assumes [measurable]: "\<And>i. i \<in> I \<Longrightarrow> F i \<in> borel_measurable M" shows "(\<lambda>x. SUP i\<in>I. F i x) \<in> borel_measurable M" by (rule borel_measurableI_greater) (simp add: less_SUP_iff) lemma borel_measurable_INF[measurable (raw)]: fixes F :: "_ \<Rightarrow> _ \<Rightarrow> _::{complete_linorder, linorder_topology, second_countable_topology}" assumes [simp]: "countable I" assumes [measurable]: "\<And>i. i \<in> I \<Longrightarrow> F i \<in> borel_measurable M" shows "(\<lambda>x. INF i\<in>I. F i x) \<in> borel_measurable M" by (rule borel_measurableI_less) (simp add: INF_less_iff) lemma borel_measurable_cSUP[measurable (raw)]: fixes F :: "_ \<Rightarrow> _ \<Rightarrow> 'a::{conditionally_complete_linorder, linorder_topology, second_countable_topology}" assumes [simp]: "countable I" assumes [measurable]: "\<And>i. i \<in> I \<Longrightarrow> F i \<in> borel_measurable M" assumes bdd: "\<And>x. x \<in> space M \<Longrightarrow> bdd_above ((\<lambda>i. F i x) ` I)" shows "(\<lambda>x. SUP i\<in>I. F i x) \<in> borel_measurable M" proof cases assume "I = {}" then show ?thesis unfolding \<open>I = {}\<close> image_empty by simp next assume "I \<noteq> {}" show ?thesis proof (rule borel_measurableI_le) fix y have "{x \<in> space M. \<forall>i\<in>I. F i x \<le> y} \<in> sets M" by measurable also have "{x \<in> space M. \<forall>i\<in>I. F i x \<le> y} = {x \<in> space M. (SUP i\<in>I. F i x) \<le> y}" by (simp add: cSUP_le_iff \<open>I \<noteq> {}\<close> bdd cong: conj_cong) finally show "{x \<in> space M. (SUP i\<in>I. F i x) \<le> y} \<in> sets M" . qed qed lemma borel_measurable_cINF[measurable (raw)]: fixes F :: "_ \<Rightarrow> _ \<Rightarrow> 'a::{conditionally_complete_linorder, linorder_topology, second_countable_topology}" assumes [simp]: "countable I" assumes [measurable]: "\<And>i. i \<in> I \<Longrightarrow> F i \<in> borel_measurable M" assumes bdd: "\<And>x. x \<in> space M \<Longrightarrow> bdd_below ((\<lambda>i. F i x) ` I)" shows "(\<lambda>x. INF i\<in>I. F i x) \<in> borel_measurable M" proof cases assume "I = {}" then show ?thesis unfolding \<open>I = {}\<close> image_empty by simp next assume "I \<noteq> {}" show ?thesis proof (rule borel_measurableI_ge) fix y have "{x \<in> space M. \<forall>i\<in>I. y \<le> F i x} \<in> sets M" by measurable also have "{x \<in> space M. \<forall>i\<in>I. y \<le> F i x} = {x \<in> space M. y \<le> (INF i\<in>I. F i x)}" by (simp add: le_cINF_iff \<open>I \<noteq> {}\<close> bdd cong: conj_cong) finally show "{x \<in> space M. y \<le> (INF i\<in>I. F i x)} \<in> sets M" . qed qed lemma borel_measurable_lfp[consumes 1, case_names continuity step]: fixes F :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'b::{complete_linorder, linorder_topology, second_countable_topology})" assumes "sup_continuous F" assumes *: "\<And>f. f \<in> borel_measurable M \<Longrightarrow> F f \<in> borel_measurable M" shows "lfp F \<in> borel_measurable M" proof - { fix i have "((F ^^ i) bot) \<in> borel_measurable M" by (induct i) (auto intro!: *) } then have "(\<lambda>x. SUP i. (F ^^ i) bot x) \<in> borel_measurable M" by measurable also have "(\<lambda>x. SUP i. (F ^^ i) bot x) = (SUP i. (F ^^ i) bot)" by (auto simp add: image_comp) also have "(SUP i. (F ^^ i) bot) = lfp F" by (rule sup_continuous_lfp[symmetric]) fact finally show ?thesis . qed lemma borel_measurable_gfp[consumes 1, case_names continuity step]: fixes F :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'b::{complete_linorder, linorder_topology, second_countable_topology})" assumes "inf_continuous F" assumes *: "\<And>f. f \<in> borel_measurable M \<Longrightarrow> F f \<in> borel_measurable M" shows "gfp F \<in> borel_measurable M" proof - { fix i have "((F ^^ i) top) \<in> borel_measurable M" by (induct i) (auto intro!: * simp: bot_fun_def) } then have "(\<lambda>x. INF i. (F ^^ i) top x) \<in> borel_measurable M" by measurable also have "(\<lambda>x. INF i. (F ^^ i) top x) = (INF i. (F ^^ i) top)" by (auto simp add: image_comp) also have "\<dots> = gfp F" by (rule inf_continuous_gfp[symmetric]) fact finally show ?thesis . qed lemma borel_measurable_max[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. max (g x) (f x) :: 'b::{second_countable_topology, linorder_topology}) \<in> borel_measurable M" by (rule borel_measurableI_less) simp lemma borel_measurable_min[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. min (g x) (f x) :: 'b::{second_countable_topology, linorder_topology}) \<in> borel_measurable M" by (rule borel_measurableI_greater) simp lemma borel_measurable_Min[measurable (raw)]: "finite I \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> f i \<in> borel_measurable M) \<Longrightarrow> (\<lambda>x. Min ((\<lambda>i. f i x)`I) :: 'b::{second_countable_topology, linorder_topology}) \<in> borel_measurable M" proof (induct I rule: finite_induct) case (insert i I) then show ?case by (cases "I = {}") auto qed auto lemma borel_measurable_Max[measurable (raw)]: "finite I \<Longrightarrow> (\<And>i. i \<in> I \<Longrightarrow> f i \<in> borel_measurable M) \<Longrightarrow> (\<lambda>x. Max ((\<lambda>i. f i x)`I) :: 'b::{second_countable_topology, linorder_topology}) \<in> borel_measurable M" proof (induct I rule: finite_induct) case (insert i I) then show ?case by (cases "I = {}") auto qed auto lemma borel_measurable_sup[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. sup (g x) (f x) :: 'b::{lattice, second_countable_topology, linorder_topology}) \<in> borel_measurable M" unfolding sup_max by measurable lemma borel_measurable_inf[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. inf (g x) (f x) :: 'b::{lattice, second_countable_topology, linorder_topology}) \<in> borel_measurable M" unfolding inf_min by measurable lemma [measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{complete_linorder, second_countable_topology, linorder_topology}" assumes "\<And>i. f i \<in> borel_measurable M" shows borel_measurable_liminf: "(\<lambda>x. liminf (\<lambda>i. f i x)) \<in> borel_measurable M" and borel_measurable_limsup: "(\<lambda>x. limsup (\<lambda>i. f i x)) \<in> borel_measurable M" unfolding liminf_SUP_INF limsup_INF_SUP using assms by auto lemma measurable_convergent[measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{complete_linorder, second_countable_topology, linorder_topology}" assumes [measurable]: "\<And>i. f i \<in> borel_measurable M" shows "Measurable.pred M (\<lambda>x. convergent (\<lambda>i. f i x))" unfolding convergent_ereal by measurable lemma sets_Collect_convergent[measurable]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{complete_linorder, second_countable_topology, linorder_topology}" assumes f[measurable]: "\<And>i. f i \<in> borel_measurable M" shows "{x\<in>space M. convergent (\<lambda>i. f i x)} \<in> sets M" by measurable lemma borel_measurable_lim[measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{complete_linorder, second_countable_topology, linorder_topology}" assumes [measurable]: "\<And>i. f i \<in> borel_measurable M" shows "(\<lambda>x. lim (\<lambda>i. f i x)) \<in> borel_measurable M" proof - have "\<And>x. lim (\<lambda>i. f i x) = (if convergent (\<lambda>i. f i x) then limsup (\<lambda>i. f i x) else (THE i. False))" by (simp add: lim_def convergent_def convergent_limsup_cl) then show ?thesis by simp qed lemma borel_measurable_LIMSEQ_order: fixes u :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{complete_linorder, second_countable_topology, linorder_topology}" assumes u': "\<And>x. x \<in> space M \<Longrightarrow> (\<lambda>i. u i x) \<longlonglongrightarrow> u' x" and u: "\<And>i. u i \<in> borel_measurable M" shows "u' \<in> borel_measurable M" proof - have "\<And>x. x \<in> space M \<Longrightarrow> u' x = liminf (\<lambda>n. u n x)" using u' by (simp add: lim_imp_Liminf[symmetric]) with u show ?thesis by (simp cong: measurable_cong) qed subsection \<open>Borel spaces on topological monoids\<close> lemma borel_measurable_add[measurable (raw)]: fixes f g :: "'a \<Rightarrow> 'b::{second_countable_topology, topological_monoid_add}" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" shows "(\<lambda>x. f x + g x) \<in> borel_measurable M" using f g by (rule borel_measurable_continuous_Pair) (intro continuous_intros) lemma borel_measurable_sum[measurable (raw)]: fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> 'b::{second_countable_topology, topological_comm_monoid_add}" assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> borel_measurable M" shows "(\<lambda>x. \<Sum>i\<in>S. f i x) \<in> borel_measurable M" proof cases assume "finite S" thus ?thesis using assms by induct auto qed simp lemma borel_measurable_suminf_order[measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{complete_linorder, second_countable_topology, linorder_topology, topological_comm_monoid_add}" assumes f[measurable]: "\<And>i. f i \<in> borel_measurable M" shows "(\<lambda>x. suminf (\<lambda>i. f i x)) \<in> borel_measurable M" unfolding suminf_def sums_def[abs_def] lim_def[symmetric] by simp subsection \<open>Borel spaces on Euclidean spaces\<close> lemma borel_measurable_inner[measurable (raw)]: fixes f g :: "'a \<Rightarrow> 'b::{second_countable_topology, real_inner}" assumes "f \<in> borel_measurable M" assumes "g \<in> borel_measurable M" shows "(\<lambda>x. f x \<bullet> g x) \<in> borel_measurable M" using assms by (rule borel_measurable_continuous_Pair) (intro continuous_intros) notation eucl_less (infix "<e" 50) lemma box_oc: "{x. a <e x \<and> x \<le> b} = {x. a <e x} \<inter> {..b}" and box_co: "{x. a \<le> x \<and> x <e b} = {a..} \<inter> {x. x <e b}" by auto lemma eucl_ivals[measurable]: fixes a b :: "'a::ordered_euclidean_space" shows "{x. x <e a} \<in> sets borel" and "{x. a <e x} \<in> sets borel" and "{..a} \<in> sets borel" and "{a..} \<in> sets borel" and "{a..b} \<in> sets borel" and "{x. a <e x \<and> x \<le> b} \<in> sets borel" and "{x. a \<le> x \<and> x <e b} \<in> sets borel" unfolding box_oc box_co by (auto intro: borel_open borel_closed) lemma fixes i :: "'a::{second_countable_topology, real_inner}" shows hafspace_less_borel: "{x. a < x \<bullet> i} \<in> sets borel" and hafspace_greater_borel: "{x. x \<bullet> i < a} \<in> sets borel" and hafspace_less_eq_borel: "{x. a \<le> x \<bullet> i} \<in> sets borel" and hafspace_greater_eq_borel: "{x. x \<bullet> i \<le> a} \<in> sets borel" by simp_all lemma borel_eq_box: "borel = sigma UNIV (range (\<lambda> (a, b). box a b :: 'a :: euclidean_space set))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI1[OF borel_def]) fix M :: "'a set" assume "M \<in> {S. open S}" then have "open M" by simp show "M \<in> ?SIGMA" apply (subst open_UNION_box[OF \<open>open M\<close>]) apply (safe intro!: sets.countable_UN' countable_PiE countable_Collect) apply (auto intro: countable_rat) done qed (auto simp: box_def) lemma halfspace_gt_in_halfspace: assumes i: "i \<in> A" shows "{x::'a. a < x \<bullet> i} \<in> sigma_sets UNIV ((\<lambda> (a, i). {x::'a::euclidean_space. x \<bullet> i < a}) ` (UNIV \<times> A))" (is "?set \<in> ?SIGMA") proof - interpret sigma_algebra UNIV ?SIGMA by (intro sigma_algebra_sigma_sets) simp_all have *: "?set = (\<Union>n. UNIV - {x::'a. x \<bullet> i < a + 1 / real (Suc n)})" proof (safe, simp_all add: not_less del: of_nat_Suc) fix x :: 'a assume "a < x \<bullet> i" with reals_Archimedean[of "x \<bullet> i - a"] obtain n where "a + 1 / real (Suc n) < x \<bullet> i" by (auto simp: field_simps) then show "\<exists>n. a + 1 / real (Suc n) \<le> x \<bullet> i" by (blast intro: less_imp_le) next fix x n have "a < a + 1 / real (Suc n)" by auto also assume "\<dots> \<le> x" finally show "a < x" . qed show "?set \<in> ?SIGMA" unfolding * by (auto intro!: Diff sigma_sets_Inter i) qed lemma borel_eq_halfspace_less: "borel = sigma UNIV ((\<lambda>(a, i). {x::'a::euclidean_space. x \<bullet> i < a}) ` (UNIV \<times> Basis))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI2[OF borel_eq_box]) fix a b :: 'a have "box a b = {x\<in>space ?SIGMA. \<forall>i\<in>Basis. a \<bullet> i < x \<bullet> i \<and> x \<bullet> i < b \<bullet> i}" by (auto simp: box_def) also have "\<dots> \<in> sets ?SIGMA" by (intro sets.sets_Collect_conj sets.sets_Collect_finite_All sets.sets_Collect_const) (auto intro!: halfspace_gt_in_halfspace countable_PiE countable_rat) finally show "box a b \<in> sets ?SIGMA" . qed auto lemma borel_eq_halfspace_le: "borel = sigma UNIV ((\<lambda> (a, i). {x::'a::euclidean_space. x \<bullet> i \<le> a}) ` (UNIV \<times> Basis))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI2[OF borel_eq_halfspace_less]) fix a :: real and i :: 'a assume "(a, i) \<in> UNIV \<times> Basis" then have i: "i \<in> Basis" by auto have *: "{x::'a. x\<bullet>i < a} = (\<Union>n. {x. x\<bullet>i \<le> a - 1/real (Suc n)})" proof (safe, simp_all del: of_nat_Suc) fix x::'a assume *: "x\<bullet>i < a" with reals_Archimedean[of "a - x\<bullet>i"] obtain n where "x \<bullet> i < a - 1 / (real (Suc n))" by (auto simp: field_simps) then show "\<exists>n. x \<bullet> i \<le> a - 1 / (real (Suc n))" by (blast intro: less_imp_le) next fix x::'a and n assume "x\<bullet>i \<le> a - 1 / real (Suc n)" also have "\<dots> < a" by auto finally show "x\<bullet>i < a" . qed show "{x. x\<bullet>i < a} \<in> ?SIGMA" unfolding * by (intro sets.countable_UN) (auto intro: i) qed auto lemma borel_eq_halfspace_ge: "borel = sigma UNIV ((\<lambda> (a, i). {x::'a::euclidean_space. a \<le> x \<bullet> i}) ` (UNIV \<times> Basis))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI2[OF borel_eq_halfspace_less]) fix a :: real and i :: 'a assume i: "(a, i) \<in> UNIV \<times> Basis" have *: "{x::'a. x\<bullet>i < a} = space ?SIGMA - {x::'a. a \<le> x\<bullet>i}" by auto show "{x. x\<bullet>i < a} \<in> ?SIGMA" unfolding * using i by (intro sets.compl_sets) auto qed auto lemma borel_eq_halfspace_greater: "borel = sigma UNIV ((\<lambda> (a, i). {x::'a::euclidean_space. a < x \<bullet> i}) ` (UNIV \<times> Basis))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI2[OF borel_eq_halfspace_le]) fix a :: real and i :: 'a assume "(a, i) \<in> (UNIV \<times> Basis)" then have i: "i \<in> Basis" by auto have *: "{x::'a. x\<bullet>i \<le> a} = space ?SIGMA - {x::'a. a < x\<bullet>i}" by auto show "{x. x\<bullet>i \<le> a} \<in> ?SIGMA" unfolding * by (intro sets.compl_sets) (auto intro: i) qed auto lemma borel_eq_atMost: "borel = sigma UNIV (range (\<lambda>a. {..a::'a::ordered_euclidean_space}))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI4[OF borel_eq_halfspace_le]) fix a :: real and i :: 'a assume "(a, i) \<in> UNIV \<times> Basis" then have "i \<in> Basis" by auto then have *: "{x::'a. x\<bullet>i \<le> a} = (\<Union>k::nat. {.. (\<Sum>n\<in>Basis. (if n = i then a else real k)*\<^sub>R n)})" proof (safe, simp_all add: eucl_le[where 'a='a] split: if_split_asm) fix x :: 'a from real_arch_simple[of "Max ((\<lambda>i. x\<bullet>i)`Basis)"] guess k::nat .. then have "\<And>i. i \<in> Basis \<Longrightarrow> x\<bullet>i \<le> real k" by (subst (asm) Max_le_iff) auto then show "\<exists>k::nat. \<forall>ia\<in>Basis. ia \<noteq> i \<longrightarrow> x \<bullet> ia \<le> real k" by (auto intro!: exI[of _ k]) qed show "{x. x\<bullet>i \<le> a} \<in> ?SIGMA" unfolding * by (intro sets.countable_UN) auto qed auto lemma borel_eq_greaterThan: "borel = sigma UNIV (range (\<lambda>a::'a::ordered_euclidean_space. {x. a <e x}))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI4[OF borel_eq_halfspace_le]) fix a :: real and i :: 'a assume "(a, i) \<in> UNIV \<times> Basis" then have i: "i \<in> Basis" by auto have "{x::'a. x\<bullet>i \<le> a} = UNIV - {x::'a. a < x\<bullet>i}" by auto also have *: "{x::'a. a < x\<bullet>i} = (\<Union>k::nat. {x. (\<Sum>n\<in>Basis. (if n = i then a else -real k) *\<^sub>R n) <e x})" using i proof (safe, simp_all add: eucl_less_def split: if_split_asm) fix x :: 'a from reals_Archimedean2[of "Max ((\<lambda>i. -x\<bullet>i)`Basis)"] guess k::nat .. note k = this { fix i :: 'a assume "i \<in> Basis" then have "-x\<bullet>i < real k" using k by (subst (asm) Max_less_iff) auto then have "- real k < x\<bullet>i" by simp } then show "\<exists>k::nat. \<forall>ia\<in>Basis. ia \<noteq> i \<longrightarrow> -real k < x \<bullet> ia" by (auto intro!: exI[of _ k]) qed finally show "{x. x\<bullet>i \<le> a} \<in> ?SIGMA" apply (simp only:) apply (intro sets.countable_UN sets.Diff) apply (auto intro: sigma_sets_top) done qed auto lemma borel_eq_lessThan: "borel = sigma UNIV (range (\<lambda>a::'a::ordered_euclidean_space. {x. x <e a}))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI4[OF borel_eq_halfspace_ge]) fix a :: real and i :: 'a assume "(a, i) \<in> UNIV \<times> Basis" then have i: "i \<in> Basis" by auto have "{x::'a. a \<le> x\<bullet>i} = UNIV - {x::'a. x\<bullet>i < a}" by auto also have *: "{x::'a. x\<bullet>i < a} = (\<Union>k::nat. {x. x <e (\<Sum>n\<in>Basis. (if n = i then a else real k) *\<^sub>R n)})" using \<open>i\<in> Basis\<close> proof (safe, simp_all add: eucl_less_def split: if_split_asm) fix x :: 'a from reals_Archimedean2[of "Max ((\<lambda>i. x\<bullet>i)`Basis)"] guess k::nat .. note k = this { fix i :: 'a assume "i \<in> Basis" then have "x\<bullet>i < real k" using k by (subst (asm) Max_less_iff) auto then have "x\<bullet>i < real k" by simp } then show "\<exists>k::nat. \<forall>ia\<in>Basis. ia \<noteq> i \<longrightarrow> x \<bullet> ia < real k" by (auto intro!: exI[of _ k]) qed finally show "{x. a \<le> x\<bullet>i} \<in> ?SIGMA" apply (simp only:) apply (intro sets.countable_UN sets.Diff) apply (auto intro: sigma_sets_top ) done qed auto lemma borel_eq_atLeastAtMost: "borel = sigma UNIV (range (\<lambda>(a,b). {a..b} ::'a::ordered_euclidean_space set))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI5[OF borel_eq_atMost]) fix a::'a have *: "{..a} = (\<Union>n::nat. {- real n *\<^sub>R One .. a})" proof (safe, simp_all add: eucl_le[where 'a='a]) fix x :: 'a from real_arch_simple[of "Max ((\<lambda>i. - x\<bullet>i)`Basis)"] guess k::nat .. note k = this { fix i :: 'a assume "i \<in> Basis" with k have "- x\<bullet>i \<le> real k" by (subst (asm) Max_le_iff) (auto simp: field_simps) then have "- real k \<le> x\<bullet>i" by simp } then show "\<exists>n::nat. \<forall>i\<in>Basis. - real n \<le> x \<bullet> i" by (auto intro!: exI[of _ k]) qed show "{..a} \<in> ?SIGMA" unfolding * by (intro sets.countable_UN) (auto intro!: sigma_sets_top) qed auto lemma borel_set_induct[consumes 1, case_names empty interval compl union]: assumes "A \<in> sets borel" assumes empty: "P {}" and int: "\<And>a b. a \<le> b \<Longrightarrow> P {a..b}" and compl: "\<And>A. A \<in> sets borel \<Longrightarrow> P A \<Longrightarrow> P (-A)" and un: "\<And>f. disjoint_family f \<Longrightarrow> (\<And>i. f i \<in> sets borel) \<Longrightarrow> (\<And>i. P (f i)) \<Longrightarrow> P (\<Union>i::nat. f i)" shows "P (A::real set)" proof - let ?G = "range (\<lambda>(a,b). {a..b::real})" have "Int_stable ?G" "?G \<subseteq> Pow UNIV" "A \<in> sigma_sets UNIV ?G" using assms(1) by (auto simp add: borel_eq_atLeastAtMost Int_stable_def) thus ?thesis proof (induction rule: sigma_sets_induct_disjoint) case (union f) from union.hyps(2) have "\<And>i. f i \<in> sets borel" by (auto simp: borel_eq_atLeastAtMost) with union show ?case by (auto intro: un) next case (basic A) then obtain a b where "A = {a .. b}" by auto then show ?case by (cases "a \<le> b") (auto intro: int empty) qed (auto intro: empty compl simp: Compl_eq_Diff_UNIV[symmetric] borel_eq_atLeastAtMost) qed lemma borel_sigma_sets_Ioc: "borel = sigma UNIV (range (\<lambda>(a, b). {a <.. b::real}))" proof (rule borel_eq_sigmaI5[OF borel_eq_atMost]) fix i :: real have "{..i} = (\<Union>j::nat. {-j <.. i})" by (auto simp: minus_less_iff reals_Archimedean2) also have "\<dots> \<in> sets (sigma UNIV (range (\<lambda>(i, j). {i<..j})))" by (intro sets.countable_nat_UN) auto finally show "{..i} \<in> sets (sigma UNIV (range (\<lambda>(i, j). {i<..j})))" . qed simp lemma eucl_lessThan: "{x::real. x <e a} = lessThan a" by (simp add: eucl_less_def lessThan_def) lemma borel_eq_atLeastLessThan: "borel = sigma UNIV (range (\<lambda>(a, b). {a ..< b :: real}))" (is "_ = ?SIGMA") proof (rule borel_eq_sigmaI5[OF borel_eq_lessThan]) have move_uminus: "\<And>x y::real. -x \<le> y \<longleftrightarrow> -y \<le> x" by auto fix x :: real have "{..<x} = (\<Union>i::nat. {-real i ..< x})" by (auto simp: move_uminus real_arch_simple) then show "{y. y <e x} \<in> ?SIGMA" by (auto intro: sigma_sets.intros(2-) simp: eucl_lessThan) qed auto lemma borel_measurable_halfspacesI: fixes f :: "'a \<Rightarrow> 'c::euclidean_space" assumes F: "borel = sigma UNIV (F ` (UNIV \<times> Basis))" and S_eq: "\<And>a i. S a i = f -` F (a,i) \<inter> space M" shows "f \<in> borel_measurable M = (\<forall>i\<in>Basis. \<forall>a::real. S a i \<in> sets M)" proof safe fix a :: real and i :: 'b assume i: "i \<in> Basis" and f: "f \<in> borel_measurable M" then show "S a i \<in> sets M" unfolding assms by (auto intro!: measurable_sets simp: assms(1)) next assume a: "\<forall>i\<in>Basis. \<forall>a. S a i \<in> sets M" then show "f \<in> borel_measurable M" by (auto intro!: measurable_measure_of simp: S_eq F) qed lemma borel_measurable_iff_halfspace_le: fixes f :: "'a \<Rightarrow> 'c::euclidean_space" shows "f \<in> borel_measurable M = (\<forall>i\<in>Basis. \<forall>a. {w \<in> space M. f w \<bullet> i \<le> a} \<in> sets M)" by (rule borel_measurable_halfspacesI[OF borel_eq_halfspace_le]) auto lemma borel_measurable_iff_halfspace_less: fixes f :: "'a \<Rightarrow> 'c::euclidean_space" shows "f \<in> borel_measurable M \<longleftrightarrow> (\<forall>i\<in>Basis. \<forall>a. {w \<in> space M. f w \<bullet> i < a} \<in> sets M)" by (rule borel_measurable_halfspacesI[OF borel_eq_halfspace_less]) auto lemma borel_measurable_iff_halfspace_ge: fixes f :: "'a \<Rightarrow> 'c::euclidean_space" shows "f \<in> borel_measurable M = (\<forall>i\<in>Basis. \<forall>a. {w \<in> space M. a \<le> f w \<bullet> i} \<in> sets M)" by (rule borel_measurable_halfspacesI[OF borel_eq_halfspace_ge]) auto lemma borel_measurable_iff_halfspace_greater: fixes f :: "'a \<Rightarrow> 'c::euclidean_space" shows "f \<in> borel_measurable M \<longleftrightarrow> (\<forall>i\<in>Basis. \<forall>a. {w \<in> space M. a < f w \<bullet> i} \<in> sets M)" by (rule borel_measurable_halfspacesI[OF borel_eq_halfspace_greater]) auto lemma borel_measurable_iff_le: "(f::'a \<Rightarrow> real) \<in> borel_measurable M = (\<forall>a. {w \<in> space M. f w \<le> a} \<in> sets M)" using borel_measurable_iff_halfspace_le[where 'c=real] by simp lemma borel_measurable_iff_less: "(f::'a \<Rightarrow> real) \<in> borel_measurable M = (\<forall>a. {w \<in> space M. f w < a} \<in> sets M)" using borel_measurable_iff_halfspace_less[where 'c=real] by simp lemma borel_measurable_iff_ge: "(f::'a \<Rightarrow> real) \<in> borel_measurable M = (\<forall>a. {w \<in> space M. a \<le> f w} \<in> sets M)" using borel_measurable_iff_halfspace_ge[where 'c=real] by simp lemma borel_measurable_iff_greater: "(f::'a \<Rightarrow> real) \<in> borel_measurable M = (\<forall>a. {w \<in> space M. a < f w} \<in> sets M)" using borel_measurable_iff_halfspace_greater[where 'c=real] by simp lemma borel_measurable_euclidean_space: fixes f :: "'a \<Rightarrow> 'c::euclidean_space" shows "f \<in> borel_measurable M \<longleftrightarrow> (\<forall>i\<in>Basis. (\<lambda>x. f x \<bullet> i) \<in> borel_measurable M)" proof safe assume f: "\<forall>i\<in>Basis. (\<lambda>x. f x \<bullet> i) \<in> borel_measurable M" then show "f \<in> borel_measurable M" by (subst borel_measurable_iff_halfspace_le) auto qed auto subsection "Borel measurable operators" lemma borel_measurable_norm[measurable]: "norm \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_sgn [measurable]: "(sgn::'a::real_normed_vector \<Rightarrow> 'a) \<in> borel_measurable borel" by (rule borel_measurable_continuous_countable_exceptions[where X="{0}"]) (auto intro!: continuous_on_sgn continuous_on_id) lemma borel_measurable_uminus[measurable (raw)]: fixes g :: "'a \<Rightarrow> 'b::{second_countable_topology, real_normed_vector}" assumes g: "g \<in> borel_measurable M" shows "(\<lambda>x. - g x) \<in> borel_measurable M" by (rule borel_measurable_continuous_on[OF _ g]) (intro continuous_intros) lemma borel_measurable_diff[measurable (raw)]: fixes f :: "'a \<Rightarrow> 'b::{second_countable_topology, real_normed_vector}" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" shows "(\<lambda>x. f x - g x) \<in> borel_measurable M" using borel_measurable_add [of f M "- g"] assms by (simp add: fun_Compl_def) lemma borel_measurable_times[measurable (raw)]: fixes f :: "'a \<Rightarrow> 'b::{second_countable_topology, real_normed_algebra}" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" shows "(\<lambda>x. f x * g x) \<in> borel_measurable M" using f g by (rule borel_measurable_continuous_Pair) (intro continuous_intros) lemma borel_measurable_prod[measurable (raw)]: fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> 'b::{second_countable_topology, real_normed_field}" assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> borel_measurable M" shows "(\<lambda>x. \<Prod>i\<in>S. f i x) \<in> borel_measurable M" proof cases assume "finite S" thus ?thesis using assms by induct auto qed simp lemma borel_measurable_dist[measurable (raw)]: fixes g f :: "'a \<Rightarrow> 'b::{second_countable_topology, metric_space}" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" shows "(\<lambda>x. dist (f x) (g x)) \<in> borel_measurable M" using f g by (rule borel_measurable_continuous_Pair) (intro continuous_intros) lemma borel_measurable_scaleR[measurable (raw)]: fixes g :: "'a \<Rightarrow> 'b::{second_countable_topology, real_normed_vector}" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" shows "(\<lambda>x. f x *\<^sub>R g x) \<in> borel_measurable M" using f g by (rule borel_measurable_continuous_Pair) (intro continuous_intros) lemma borel_measurable_uminus_eq [simp]: fixes f :: "'a \<Rightarrow> 'b::{second_countable_topology, real_normed_vector}" shows "(\<lambda>x. - f x) \<in> borel_measurable M \<longleftrightarrow> f \<in> borel_measurable M" (is "?l = ?r") proof assume ?l from borel_measurable_uminus[OF this] show ?r by simp qed auto lemma affine_borel_measurable_vector: fixes f :: "'a \<Rightarrow> 'x::real_normed_vector" assumes "f \<in> borel_measurable M" shows "(\<lambda>x. a + b *\<^sub>R f x) \<in> borel_measurable M" proof (rule borel_measurableI) fix S :: "'x set" assume "open S" show "(\<lambda>x. a + b *\<^sub>R f x) -` S \<inter> space M \<in> sets M" proof cases assume "b \<noteq> 0" with \<open>open S\<close> have "open ((\<lambda>x. (- a + x) /\<^sub>R b) ` S)" (is "open ?S") using open_affinity [of S "inverse b" "- a /\<^sub>R b"] by (auto simp: algebra_simps) hence "?S \<in> sets borel" by auto moreover from \<open>b \<noteq> 0\<close> have "(\<lambda>x. a + b *\<^sub>R f x) -` S = f -` ?S" apply auto by (rule_tac x="a + b *\<^sub>R f x" in image_eqI, simp_all) ultimately show ?thesis using assms unfolding in_borel_measurable_borel by auto qed simp qed lemma borel_measurable_const_scaleR[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. b *\<^sub>R f x ::'a::real_normed_vector) \<in> borel_measurable M" using affine_borel_measurable_vector[of f M 0 b] by simp lemma borel_measurable_const_add[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. a + f x ::'a::real_normed_vector) \<in> borel_measurable M" using affine_borel_measurable_vector[of f M a 1] by simp lemma borel_measurable_inverse[measurable (raw)]: fixes f :: "'a \<Rightarrow> 'b::real_normed_div_algebra" assumes f: "f \<in> borel_measurable M" shows "(\<lambda>x. inverse (f x)) \<in> borel_measurable M" apply (rule measurable_compose[OF f]) apply (rule borel_measurable_continuous_countable_exceptions[of "{0}"]) apply (auto intro!: continuous_on_inverse continuous_on_id) done lemma borel_measurable_divide[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. f x / g x::'b::{second_countable_topology, real_normed_div_algebra}) \<in> borel_measurable M" by (simp add: divide_inverse) lemma borel_measurable_abs[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. \<bar>f x :: real\<bar>) \<in> borel_measurable M" unfolding abs_real_def by simp lemma borel_measurable_nth[measurable (raw)]: "(\<lambda>x::real^'n. x $ i) \<in> borel_measurable borel" by (simp add: cart_eq_inner_axis) lemma convex_measurable: fixes A :: "'a :: euclidean_space set" shows "X \<in> borel_measurable M \<Longrightarrow> X ` space M \<subseteq> A \<Longrightarrow> open A \<Longrightarrow> convex_on A q \<Longrightarrow> (\<lambda>x. q (X x)) \<in> borel_measurable M" by (rule measurable_compose[where f=X and N="restrict_space borel A"]) (auto intro!: borel_measurable_continuous_on_restrict convex_on_continuous measurable_restrict_space2) lemma borel_measurable_ln[measurable (raw)]: assumes f: "f \<in> borel_measurable M" shows "(\<lambda>x. ln (f x :: real)) \<in> borel_measurable M" apply (rule measurable_compose[OF f]) apply (rule borel_measurable_continuous_countable_exceptions[of "{0}"]) apply (auto intro!: continuous_on_ln continuous_on_id) done lemma borel_measurable_log[measurable (raw)]: "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> (\<lambda>x. log (g x) (f x)) \<in> borel_measurable M" unfolding log_def by auto lemma borel_measurable_exp[measurable]: "(exp::'a::{real_normed_field,banach}\<Rightarrow>'a) \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_at_imp_continuous_on ballI isCont_exp) lemma measurable_real_floor[measurable]: "(floor :: real \<Rightarrow> int) \<in> measurable borel (count_space UNIV)" proof - have "\<And>a x. \<lfloor>x\<rfloor> = a \<longleftrightarrow> (real_of_int a \<le> x \<and> x < real_of_int (a + 1))" by (auto intro: floor_eq2) then show ?thesis by (auto simp: vimage_def measurable_count_space_eq2_countable) qed lemma measurable_real_ceiling[measurable]: "(ceiling :: real \<Rightarrow> int) \<in> measurable borel (count_space UNIV)" unfolding ceiling_def[abs_def] by simp lemma borel_measurable_real_floor: "(\<lambda>x::real. real_of_int \<lfloor>x\<rfloor>) \<in> borel_measurable borel" by simp lemma borel_measurable_root [measurable]: "root n \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_sqrt [measurable]: "sqrt \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_power [measurable (raw)]: fixes f :: "_ \<Rightarrow> 'b::{power,real_normed_algebra}" assumes f: "f \<in> borel_measurable M" shows "(\<lambda>x. (f x) ^ n) \<in> borel_measurable M" by (intro borel_measurable_continuous_on [OF _ f] continuous_intros) lemma borel_measurable_Re [measurable]: "Re \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_Im [measurable]: "Im \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_of_real [measurable]: "(of_real :: _ \<Rightarrow> (_::real_normed_algebra)) \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_sin [measurable]: "(sin :: _ \<Rightarrow> (_::{real_normed_field,banach})) \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_cos [measurable]: "(cos :: _ \<Rightarrow> (_::{real_normed_field,banach})) \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma borel_measurable_arctan [measurable]: "arctan \<in> borel_measurable borel" by (intro borel_measurable_continuous_onI continuous_intros) lemma\<^marker>\<open>tag important\<close> borel_measurable_complex_iff: "f \<in> borel_measurable M \<longleftrightarrow> (\<lambda>x. Re (f x)) \<in> borel_measurable M \<and> (\<lambda>x. Im (f x)) \<in> borel_measurable M" apply auto apply (subst fun_complex_eq) apply (intro borel_measurable_add) apply auto done lemma powr_real_measurable [measurable]: assumes "f \<in> measurable M borel" "g \<in> measurable M borel" shows "(\<lambda>x. f x powr g x :: real) \<in> measurable M borel" using assms by (simp_all add: powr_def) lemma measurable_of_bool[measurable]: "of_bool \<in> count_space UNIV \<rightarrow>\<^sub>M borel" by simp subsection "Borel space on the extended reals" lemma borel_measurable_ereal[measurable (raw)]: assumes f: "f \<in> borel_measurable M" shows "(\<lambda>x. ereal (f x)) \<in> borel_measurable M" using continuous_on_ereal f by (rule borel_measurable_continuous_on) (rule continuous_on_id) lemma borel_measurable_real_of_ereal[measurable (raw)]: fixes f :: "'a \<Rightarrow> ereal" assumes f: "f \<in> borel_measurable M" shows "(\<lambda>x. real_of_ereal (f x)) \<in> borel_measurable M" apply (rule measurable_compose[OF f]) apply (rule borel_measurable_continuous_countable_exceptions[of "{\<infinity>, -\<infinity> }"]) apply (auto intro: continuous_on_real simp: Compl_eq_Diff_UNIV) done lemma borel_measurable_ereal_cases: fixes f :: "'a \<Rightarrow> ereal" assumes f: "f \<in> borel_measurable M" assumes H: "(\<lambda>x. H (ereal (real_of_ereal (f x)))) \<in> borel_measurable M" shows "(\<lambda>x. H (f x)) \<in> borel_measurable M" proof - let ?F = "\<lambda>x. if f x = \<infinity> then H \<infinity> else if f x = - \<infinity> then H (-\<infinity>) else H (ereal (real_of_ereal (f x)))" { fix x have "H (f x) = ?F x" by (cases "f x") auto } with f H show ?thesis by simp qed lemma fixes f :: "'a \<Rightarrow> ereal" assumes f[measurable]: "f \<in> borel_measurable M" shows borel_measurable_ereal_abs[measurable(raw)]: "(\<lambda>x. \<bar>f x\<bar>) \<in> borel_measurable M" and borel_measurable_ereal_inverse[measurable(raw)]: "(\<lambda>x. inverse (f x) :: ereal) \<in> borel_measurable M" and borel_measurable_uminus_ereal[measurable(raw)]: "(\<lambda>x. - f x :: ereal) \<in> borel_measurable M" by (auto simp del: abs_real_of_ereal simp: borel_measurable_ereal_cases[OF f] measurable_If) lemma borel_measurable_uminus_eq_ereal[simp]: "(\<lambda>x. - f x :: ereal) \<in> borel_measurable M \<longleftrightarrow> f \<in> borel_measurable M" (is "?l = ?r") proof assume ?l from borel_measurable_uminus_ereal[OF this] show ?r by simp qed auto lemma set_Collect_ereal2: fixes f g :: "'a \<Rightarrow> ereal" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" assumes H: "{x \<in> space M. H (ereal (real_of_ereal (f x))) (ereal (real_of_ereal (g x)))} \<in> sets M" "{x \<in> space borel. H (-\<infinity>) (ereal x)} \<in> sets borel" "{x \<in> space borel. H (\<infinity>) (ereal x)} \<in> sets borel" "{x \<in> space borel. H (ereal x) (-\<infinity>)} \<in> sets borel" "{x \<in> space borel. H (ereal x) (\<infinity>)} \<in> sets borel" shows "{x \<in> space M. H (f x) (g x)} \<in> sets M" proof - let ?G = "\<lambda>y x. if g x = \<infinity> then H y \<infinity> else if g x = -\<infinity> then H y (-\<infinity>) else H y (ereal (real_of_ereal (g x)))" let ?F = "\<lambda>x. if f x = \<infinity> then ?G \<infinity> x else if f x = -\<infinity> then ?G (-\<infinity>) x else ?G (ereal (real_of_ereal (f x))) x" { fix x have "H (f x) (g x) = ?F x" by (cases "f x" "g x" rule: ereal2_cases) auto } note * = this from assms show ?thesis by (subst *) (simp del: space_borel split del: if_split) qed lemma borel_measurable_ereal_iff: shows "(\<lambda>x. ereal (f x)) \<in> borel_measurable M \<longleftrightarrow> f \<in> borel_measurable M" proof assume "(\<lambda>x. ereal (f x)) \<in> borel_measurable M" from borel_measurable_real_of_ereal[OF this] show "f \<in> borel_measurable M" by auto qed auto lemma borel_measurable_erealD[measurable_dest]: "(\<lambda>x. ereal (f x)) \<in> borel_measurable M \<Longrightarrow> g \<in> measurable N M \<Longrightarrow> (\<lambda>x. f (g x)) \<in> borel_measurable N" unfolding borel_measurable_ereal_iff by simp theorem borel_measurable_ereal_iff_real: fixes f :: "'a \<Rightarrow> ereal" shows "f \<in> borel_measurable M \<longleftrightarrow> ((\<lambda>x. real_of_ereal (f x)) \<in> borel_measurable M \<and> f -` {\<infinity>} \<inter> space M \<in> sets M \<and> f -` {-\<infinity>} \<inter> space M \<in> sets M)" proof safe assume *: "(\<lambda>x. real_of_ereal (f x)) \<in> borel_measurable M" "f -` {\<infinity>} \<inter> space M \<in> sets M" "f -` {-\<infinity>} \<inter> space M \<in> sets M" have "f -` {\<infinity>} \<inter> space M = {x\<in>space M. f x = \<infinity>}" "f -` {-\<infinity>} \<inter> space M = {x\<in>space M. f x = -\<infinity>}" by auto with * have **: "{x\<in>space M. f x = \<infinity>} \<in> sets M" "{x\<in>space M. f x = -\<infinity>} \<in> sets M" by simp_all let ?f = "\<lambda>x. if f x = \<infinity> then \<infinity> else if f x = -\<infinity> then -\<infinity> else ereal (real_of_ereal (f x))" have "?f \<in> borel_measurable M" using * ** by (intro measurable_If) auto also have "?f = f" by (auto simp: fun_eq_iff ereal_real) finally show "f \<in> borel_measurable M" . qed simp_all lemma borel_measurable_ereal_iff_Iio: "(f::'a \<Rightarrow> ereal) \<in> borel_measurable M \<longleftrightarrow> (\<forall>a. f -` {..< a} \<inter> space M \<in> sets M)" by (auto simp: borel_Iio measurable_iff_measure_of) lemma borel_measurable_ereal_iff_Ioi: "(f::'a \<Rightarrow> ereal) \<in> borel_measurable M \<longleftrightarrow> (\<forall>a. f -` {a <..} \<inter> space M \<in> sets M)" by (auto simp: borel_Ioi measurable_iff_measure_of) lemma vimage_sets_compl_iff: "f -` A \<inter> space M \<in> sets M \<longleftrightarrow> f -` (- A) \<inter> space M \<in> sets M" proof - { fix A assume "f -` A \<inter> space M \<in> sets M" moreover have "f -` (- A) \<inter> space M = space M - f -` A \<inter> space M" by auto ultimately have "f -` (- A) \<inter> space M \<in> sets M" by auto } from this[of A] this[of "-A"] show ?thesis by (metis double_complement) qed lemma borel_measurable_iff_Iic_ereal: "(f::'a\<Rightarrow>ereal) \<in> borel_measurable M \<longleftrightarrow> (\<forall>a. f -` {..a} \<inter> space M \<in> sets M)" unfolding borel_measurable_ereal_iff_Ioi vimage_sets_compl_iff[where A="{a <..}" for a] by simp lemma borel_measurable_iff_Ici_ereal: "(f::'a \<Rightarrow> ereal) \<in> borel_measurable M \<longleftrightarrow> (\<forall>a. f -` {a..} \<inter> space M \<in> sets M)" unfolding borel_measurable_ereal_iff_Iio vimage_sets_compl_iff[where A="{..< a}" for a] by simp lemma borel_measurable_ereal2: fixes f g :: "'a \<Rightarrow> ereal" assumes f: "f \<in> borel_measurable M" assumes g: "g \<in> borel_measurable M" assumes H: "(\<lambda>x. H (ereal (real_of_ereal (f x))) (ereal (real_of_ereal (g x)))) \<in> borel_measurable M" "(\<lambda>x. H (-\<infinity>) (ereal (real_of_ereal (g x)))) \<in> borel_measurable M" "(\<lambda>x. H (\<infinity>) (ereal (real_of_ereal (g x)))) \<in> borel_measurable M" "(\<lambda>x. H (ereal (real_of_ereal (f x))) (-\<infinity>)) \<in> borel_measurable M" "(\<lambda>x. H (ereal (real_of_ereal (f x))) (\<infinity>)) \<in> borel_measurable M" shows "(\<lambda>x. H (f x) (g x)) \<in> borel_measurable M" proof - let ?G = "\<lambda>y x. if g x = \<infinity> then H y \<infinity> else if g x = - \<infinity> then H y (-\<infinity>) else H y (ereal (real_of_ereal (g x)))" let ?F = "\<lambda>x. if f x = \<infinity> then ?G \<infinity> x else if f x = - \<infinity> then ?G (-\<infinity>) x else ?G (ereal (real_of_ereal (f x))) x" { fix x have "H (f x) (g x) = ?F x" by (cases "f x" "g x" rule: ereal2_cases) auto } note * = this from assms show ?thesis unfolding * by simp qed lemma [measurable(raw)]: fixes f :: "'a \<Rightarrow> ereal" assumes [measurable]: "f \<in> borel_measurable M" "g \<in> borel_measurable M" shows borel_measurable_ereal_add: "(\<lambda>x. f x + g x) \<in> borel_measurable M" and borel_measurable_ereal_times: "(\<lambda>x. f x * g x) \<in> borel_measurable M" by (simp_all add: borel_measurable_ereal2) lemma [measurable(raw)]: fixes f g :: "'a \<Rightarrow> ereal" assumes "f \<in> borel_measurable M" assumes "g \<in> borel_measurable M" shows borel_measurable_ereal_diff: "(\<lambda>x. f x - g x) \<in> borel_measurable M" and borel_measurable_ereal_divide: "(\<lambda>x. f x / g x) \<in> borel_measurable M" using assms by (simp_all add: minus_ereal_def divide_ereal_def) lemma borel_measurable_ereal_sum[measurable (raw)]: fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> ereal" assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> borel_measurable M" shows "(\<lambda>x. \<Sum>i\<in>S. f i x) \<in> borel_measurable M" using assms by (induction S rule: infinite_finite_induct) auto lemma borel_measurable_ereal_prod[measurable (raw)]: fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> ereal" assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> borel_measurable M" shows "(\<lambda>x. \<Prod>i\<in>S. f i x) \<in> borel_measurable M" using assms by (induction S rule: infinite_finite_induct) auto lemma borel_measurable_extreal_suminf[measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> ereal" assumes [measurable]: "\<And>i. f i \<in> borel_measurable M" shows "(\<lambda>x. (\<Sum>i. f i x)) \<in> borel_measurable M" unfolding suminf_def sums_def[abs_def] lim_def[symmetric] by simp subsection "Borel space on the extended non-negative reals" text \<open> \<^type>\<open>ennreal\<close> is a topological monoid, so no rules for plus are required, also all order statements are usually done on type classes. \<close> lemma measurable_enn2ereal[measurable]: "enn2ereal \<in> borel \<rightarrow>\<^sub>M borel" by (intro borel_measurable_continuous_onI continuous_on_enn2ereal) lemma measurable_e2ennreal[measurable]: "e2ennreal \<in> borel \<rightarrow>\<^sub>M borel" by (intro borel_measurable_continuous_onI continuous_on_e2ennreal) lemma borel_measurable_enn2real[measurable (raw)]: "f \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> (\<lambda>x. enn2real (f x)) \<in> M \<rightarrow>\<^sub>M borel" unfolding enn2real_def[abs_def] by measurable definition\<^marker>\<open>tag important\<close> [simp]: "is_borel f M \<longleftrightarrow> f \<in> borel_measurable M" lemma is_borel_transfer[transfer_rule]: "rel_fun (rel_fun (=) pcr_ennreal) (=) is_borel is_borel" unfolding is_borel_def[abs_def] proof (safe intro!: rel_funI ext dest!: rel_fun_eq_pcr_ennreal[THEN iffD1]) fix f and M :: "'a measure" show "f \<in> borel_measurable M" if f: "enn2ereal \<circ> f \<in> borel_measurable M" using measurable_compose[OF f measurable_e2ennreal] by simp qed simp context includes ennreal.lifting begin lemma measurable_ennreal[measurable]: "ennreal \<in> borel \<rightarrow>\<^sub>M borel" unfolding is_borel_def[symmetric] by transfer simp lemma borel_measurable_ennreal_iff[simp]: assumes [simp]: "\<And>x. x \<in> space M \<Longrightarrow> 0 \<le> f x" shows "(\<lambda>x. ennreal (f x)) \<in> M \<rightarrow>\<^sub>M borel \<longleftrightarrow> f \<in> M \<rightarrow>\<^sub>M borel" proof safe assume "(\<lambda>x. ennreal (f x)) \<in> M \<rightarrow>\<^sub>M borel" then have "(\<lambda>x. enn2real (ennreal (f x))) \<in> M \<rightarrow>\<^sub>M borel" by measurable then show "f \<in> M \<rightarrow>\<^sub>M borel" by (rule measurable_cong[THEN iffD1, rotated]) auto qed measurable lemma borel_measurable_times_ennreal[measurable (raw)]: fixes f g :: "'a \<Rightarrow> ennreal" shows "f \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> g \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> (\<lambda>x. f x * g x) \<in> M \<rightarrow>\<^sub>M borel" unfolding is_borel_def[symmetric] by transfer simp lemma borel_measurable_inverse_ennreal[measurable (raw)]: fixes f :: "'a \<Rightarrow> ennreal" shows "f \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> (\<lambda>x. inverse (f x)) \<in> M \<rightarrow>\<^sub>M borel" unfolding is_borel_def[symmetric] by transfer simp lemma borel_measurable_divide_ennreal[measurable (raw)]: fixes f :: "'a \<Rightarrow> ennreal" shows "f \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> g \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> (\<lambda>x. f x / g x) \<in> M \<rightarrow>\<^sub>M borel" unfolding divide_ennreal_def by simp lemma borel_measurable_minus_ennreal[measurable (raw)]: fixes f :: "'a \<Rightarrow> ennreal" shows "f \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> g \<in> M \<rightarrow>\<^sub>M borel \<Longrightarrow> (\<lambda>x. f x - g x) \<in> M \<rightarrow>\<^sub>M borel" unfolding is_borel_def[symmetric] by transfer simp lemma borel_measurable_prod_ennreal[measurable (raw)]: fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> ennreal" assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> borel_measurable M" shows "(\<lambda>x. \<Prod>i\<in>S. f i x) \<in> borel_measurable M" using assms by (induction S rule: infinite_finite_induct) auto end hide_const (open) is_borel subsection \<open>LIMSEQ is borel measurable\<close> lemma borel_measurable_LIMSEQ_real: fixes u :: "nat \<Rightarrow> 'a \<Rightarrow> real" assumes u': "\<And>x. x \<in> space M \<Longrightarrow> (\<lambda>i. u i x) \<longlonglongrightarrow> u' x" and u: "\<And>i. u i \<in> borel_measurable M" shows "u' \<in> borel_measurable M" proof - have "\<And>x. x \<in> space M \<Longrightarrow> liminf (\<lambda>n. ereal (u n x)) = ereal (u' x)" using u' by (simp add: lim_imp_Liminf) moreover from u have "(\<lambda>x. liminf (\<lambda>n. ereal (u n x))) \<in> borel_measurable M" by auto ultimately show ?thesis by (simp cong: measurable_cong add: borel_measurable_ereal_iff) qed lemma borel_measurable_LIMSEQ_metric: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b :: metric_space" assumes [measurable]: "\<And>i. f i \<in> borel_measurable M" assumes lim: "\<And>x. x \<in> space M \<Longrightarrow> (\<lambda>i. f i x) \<longlonglongrightarrow> g x" shows "g \<in> borel_measurable M" unfolding borel_eq_closed proof (safe intro!: measurable_measure_of) fix A :: "'b set" assume "closed A" have [measurable]: "(\<lambda>x. infdist (g x) A) \<in> borel_measurable M" proof (rule borel_measurable_LIMSEQ_real) show "\<And>x. x \<in> space M \<Longrightarrow> (\<lambda>i. infdist (f i x) A) \<longlonglongrightarrow> infdist (g x) A" by (intro tendsto_infdist lim) show "\<And>i. (\<lambda>x. infdist (f i x) A) \<in> borel_measurable M" by (intro borel_measurable_continuous_on[where f="\<lambda>x. infdist x A"] continuous_at_imp_continuous_on ballI continuous_infdist continuous_ident) auto qed show "g -` A \<inter> space M \<in> sets M" proof cases assume "A \<noteq> {}" then have "\<And>x. infdist x A = 0 \<longleftrightarrow> x \<in> A" using \<open>closed A\<close> by (simp add: in_closed_iff_infdist_zero) then have "g -` A \<inter> space M = {x\<in>space M. infdist (g x) A = 0}" by auto also have "\<dots> \<in> sets M" by measurable finally show ?thesis . qed simp qed auto lemma sets_Collect_Cauchy[measurable]: fixes f :: "nat \<Rightarrow> 'a => 'b::{metric_space, second_countable_topology}" assumes f[measurable]: "\<And>i. f i \<in> borel_measurable M" shows "{x\<in>space M. Cauchy (\<lambda>i. f i x)} \<in> sets M" unfolding metric_Cauchy_iff2 using f by auto lemma borel_measurable_lim_metric[measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{banach, second_countable_topology}" assumes f[measurable]: "\<And>i. f i \<in> borel_measurable M" shows "(\<lambda>x. lim (\<lambda>i. f i x)) \<in> borel_measurable M" proof - define u' where "u' x = lim (\<lambda>i. if Cauchy (\<lambda>i. f i x) then f i x else 0)" for x then have *: "\<And>x. lim (\<lambda>i. f i x) = (if Cauchy (\<lambda>i. f i x) then u' x else (THE x. False))" by (auto simp: lim_def convergent_eq_Cauchy[symmetric]) have "u' \<in> borel_measurable M" proof (rule borel_measurable_LIMSEQ_metric) fix x have "convergent (\<lambda>i. if Cauchy (\<lambda>i. f i x) then f i x else 0)" by (cases "Cauchy (\<lambda>i. f i x)") (auto simp add: convergent_eq_Cauchy[symmetric] convergent_def) then show "(\<lambda>i. if Cauchy (\<lambda>i. f i x) then f i x else 0) \<longlonglongrightarrow> u' x" unfolding u'_def by (rule convergent_LIMSEQ_iff[THEN iffD1]) qed measurable then show ?thesis unfolding * by measurable qed lemma borel_measurable_suminf[measurable (raw)]: fixes f :: "nat \<Rightarrow> 'a \<Rightarrow> 'b::{banach, second_countable_topology}" assumes f[measurable]: "\<And>i. f i \<in> borel_measurable M" shows "(\<lambda>x. suminf (\<lambda>i. f i x)) \<in> borel_measurable M" unfolding suminf_def sums_def[abs_def] lim_def[symmetric] by simp lemma Collect_closed_imp_pred_borel: "closed {x. P x} \<Longrightarrow> Measurable.pred borel P" by (simp add: pred_def) (* Proof by Jeremy Avigad and Luke Serafin *) lemma isCont_borel_pred[measurable]: fixes f :: "'b::metric_space \<Rightarrow> 'a::metric_space" shows "Measurable.pred borel (isCont f)" proof (subst measurable_cong) let ?I = "\<lambda>j. inverse(real (Suc j))" show "isCont f x = (\<forall>i. \<exists>j. \<forall>y z. dist x y < ?I j \<and> dist x z < ?I j \<longrightarrow> dist (f y) (f z) \<le> ?I i)" for x unfolding continuous_at_eps_delta proof safe fix i assume "\<forall>e>0. \<exists>d>0. \<forall>y. dist y x < d \<longrightarrow> dist (f y) (f x) < e" moreover have "0 < ?I i / 2" by simp ultimately obtain d where d: "0 < d" "\<And>y. dist x y < d \<Longrightarrow> dist (f y) (f x) < ?I i / 2" by (metis dist_commute) then obtain j where j: "?I j < d" by (metis reals_Archimedean) show "\<exists>j. \<forall>y z. dist x y < ?I j \<and> dist x z < ?I j \<longrightarrow> dist (f y) (f z) \<le> ?I i" proof (safe intro!: exI[where x=j]) fix y z assume *: "dist x y < ?I j" "dist x z < ?I j" have "dist (f y) (f z) \<le> dist (f y) (f x) + dist (f z) (f x)" by (rule dist_triangle2) also have "\<dots> < ?I i / 2 + ?I i / 2" by (intro add_strict_mono d less_trans[OF _ j] *) also have "\<dots> \<le> ?I i" by (simp add: field_simps) finally show "dist (f y) (f z) \<le> ?I i" by simp qed next fix e::real assume "0 < e" then obtain n where n: "?I n < e" by (metis reals_Archimedean) assume "\<forall>i. \<exists>j. \<forall>y z. dist x y < ?I j \<and> dist x z < ?I j \<longrightarrow> dist (f y) (f z) \<le> ?I i" from this[THEN spec, of "Suc n"] obtain j where j: "\<And>y z. dist x y < ?I j \<Longrightarrow> dist x z < ?I j \<Longrightarrow> dist (f y) (f z) \<le> ?I (Suc n)" by auto show "\<exists>d>0. \<forall>y. dist y x < d \<longrightarrow> dist (f y) (f x) < e" proof (safe intro!: exI[of _ "?I j"]) fix y assume "dist y x < ?I j" then have "dist (f y) (f x) \<le> ?I (Suc n)" by (intro j) (auto simp: dist_commute) also have "?I (Suc n) < ?I n" by simp also note n finally show "dist (f y) (f x) < e" . qed simp qed qed (intro pred_intros_countable closed_Collect_all closed_Collect_le open_Collect_less Collect_closed_imp_pred_borel closed_Collect_imp open_Collect_conj continuous_intros) lemma isCont_borel: fixes f :: "'b::metric_space \<Rightarrow> 'a::metric_space" shows "{x. isCont f x} \<in> sets borel" by simp lemma is_real_interval: assumes S: "is_interval S" shows "\<exists>a b::real. S = {} \<or> S = UNIV \<or> S = {..<b} \<or> S = {..b} \<or> S = {a<..} \<or> S = {a..} \<or> S = {a<..<b} \<or> S = {a<..b} \<or> S = {a..<b} \<or> S = {a..b}" using S unfolding is_interval_1 by (blast intro: interval_cases) lemma real_interval_borel_measurable: assumes "is_interval (S::real set)" shows "S \<in> sets borel" proof - from assms is_real_interval have "\<exists>a b::real. S = {} \<or> S = UNIV \<or> S = {..<b} \<or> S = {..b} \<or> S = {a<..} \<or> S = {a..} \<or> S = {a<..<b} \<or> S = {a<..b} \<or> S = {a..<b} \<or> S = {a..b}" by auto then guess a .. then guess b .. thus ?thesis by auto qed text \<open>The next lemmas hold in any second countable linorder (including ennreal or ereal for instance), but in the current state they are restricted to reals.\<close> lemma borel_measurable_mono_on_fnc: fixes f :: "real \<Rightarrow> real" and A :: "real set" assumes "mono_on f A" shows "f \<in> borel_measurable (restrict_space borel A)" apply (rule measurable_restrict_countable[OF mono_on_ctble_discont[OF assms]]) apply (auto intro!: image_eqI[where x="{x}" for x] simp: sets_restrict_space) apply (auto simp add: sets_restrict_restrict_space continuous_on_eq_continuous_within cong: measurable_cong_sets intro!: borel_measurable_continuous_on_restrict intro: continuous_within_subset) done lemma borel_measurable_piecewise_mono: fixes f::"real \<Rightarrow> real" and C::"real set set" assumes "countable C" "\<And>c. c \<in> C \<Longrightarrow> c \<in> sets borel" "\<And>c. c \<in> C \<Longrightarrow> mono_on f c" "(\<Union>C) = UNIV" shows "f \<in> borel_measurable borel" by (rule measurable_piecewise_restrict[of C], auto intro: borel_measurable_mono_on_fnc simp: assms) lemma borel_measurable_mono: fixes f :: "real \<Rightarrow> real" shows "mono f \<Longrightarrow> f \<in> borel_measurable borel" using borel_measurable_mono_on_fnc[of f UNIV] by (simp add: mono_def mono_on_def) lemma measurable_bdd_below_real[measurable (raw)]: fixes F :: "'a \<Rightarrow> 'i \<Rightarrow> real" assumes [simp]: "countable I" and [measurable]: "\<And>i. i \<in> I \<Longrightarrow> F i \<in> M \<rightarrow>\<^sub>M borel" shows "Measurable.pred M (\<lambda>x. bdd_below ((\<lambda>i. F i x)`I))" proof (subst measurable_cong) show "bdd_below ((\<lambda>i. F i x)`I) \<longleftrightarrow> (\<exists>q\<in>\<int>. \<forall>i\<in>I. q \<le> F i x)" for x by (auto simp: bdd_below_def intro!: bexI[of _ "of_int (floor _)"] intro: order_trans of_int_floor_le) show "Measurable.pred M (\<lambda>w. \<exists>q\<in>\<int>. \<forall>i\<in>I. q \<le> F i w)" using countable_int by measurable qed lemma borel_measurable_cINF_real[measurable (raw)]: fixes F :: "_ \<Rightarrow> _ \<Rightarrow> real" assumes [simp]: "countable I" assumes F[measurable]: "\<And>i. i \<in> I \<Longrightarrow> F i \<in> borel_measurable M" shows "(\<lambda>x. INF i\<in>I. F i x) \<in> borel_measurable M" proof (rule measurable_piecewise_restrict) let ?\<Omega> = "{x\<in>space M. bdd_below ((\<lambda>i. F i x)`I)}" show "countable {?\<Omega>, - ?\<Omega>}" "space M \<subseteq> \<Union>{?\<Omega>, - ?\<Omega>}" "\<And>X. X \<in> {?\<Omega>, - ?\<Omega>} \<Longrightarrow> X \<inter> space M \<in> sets M" by auto fix X assume "X \<in> {?\<Omega>, - ?\<Omega>}" then show "(\<lambda>x. INF i\<in>I. F i x) \<in> borel_measurable (restrict_space M X)" proof safe show "(\<lambda>x. INF i\<in>I. F i x) \<in> borel_measurable (restrict_space M ?\<Omega>)" by (intro borel_measurable_cINF measurable_restrict_space1 F) (auto simp: space_restrict_space) show "(\<lambda>x. INF i\<in>I. F i x) \<in> borel_measurable (restrict_space M (-?\<Omega>))" proof (subst measurable_cong) fix x assume "x \<in> space (restrict_space M (-?\<Omega>))" then have "\<not> (\<forall>i\<in>I. - F i x \<le> y)" for y by (auto simp: space_restrict_space bdd_above_def bdd_above_uminus[symmetric]) then show "(INF i\<in>I. F i x) = - (THE x. False)" by (auto simp: space_restrict_space Inf_real_def Sup_real_def Least_def simp del: Set.ball_simps(10)) qed simp qed qed lemma borel_Ici: "borel = sigma UNIV (range (\<lambda>x::real. {x ..}))" proof (safe intro!: borel_eq_sigmaI1[OF borel_Iio]) fix x :: real have eq: "{..<x} = space (sigma UNIV (range atLeast)) - {x ..}" by auto show "{..<x} \<in> sets (sigma UNIV (range atLeast))" unfolding eq by (intro sets.compl_sets) auto qed auto lemma borel_measurable_pred_less[measurable (raw)]: fixes f :: "'a \<Rightarrow> 'b::{second_countable_topology, linorder_topology}" shows "f \<in> borel_measurable M \<Longrightarrow> g \<in> borel_measurable M \<Longrightarrow> Measurable.pred M (\<lambda>w. f w < g w)" unfolding Measurable.pred_def by (rule borel_measurable_less) no_notation eucl_less (infix "<e" 50) lemma borel_measurable_Max2[measurable (raw)]: fixes f::"_ \<Rightarrow> _ \<Rightarrow> 'a::{second_countable_topology, dense_linorder, linorder_topology}" assumes "finite I" and [measurable]: "\<And>i. f i \<in> borel_measurable M" shows "(\<lambda>x. Max{f i x |i. i \<in> I}) \<in> borel_measurable M" by (simp add: borel_measurable_Max[OF assms(1), where ?f=f and ?M=M] Setcompr_eq_image) lemma measurable_compose_n [measurable (raw)]: assumes "T \<in> measurable M M" shows "(T^^n) \<in> measurable M M" by (induction n, auto simp add: measurable_compose[OF _ assms]) lemma measurable_real_imp_nat: fixes f::"'a \<Rightarrow> nat" assumes [measurable]: "(\<lambda>x. real(f x)) \<in> borel_measurable M" shows "f \<in> measurable M (count_space UNIV)" proof - let ?g = "(\<lambda>x. real(f x))" have "\<And>(n::nat). ?g-`({real n}) \<inter> space M = f-`{n} \<inter> space M" by auto moreover have "\<And>(n::nat). ?g-`({real n}) \<inter> space M \<in> sets M" using assms by measurable ultimately have "\<And>(n::nat). f-`{n} \<inter> space M \<in> sets M" by simp then show ?thesis using measurable_count_space_eq2_countable by blast qed lemma measurable_equality_set [measurable]: fixes f g::"_\<Rightarrow> 'a::{second_countable_topology, t2_space}" assumes [measurable]: "f \<in> borel_measurable M" "g \<in> borel_measurable M" shows "{x \<in> space M. f x = g x} \<in> sets M" proof - define A where "A = {x \<in> space M. f x = g x}" define B where "B = {y. \<exists>x::'a. y = (x,x)}" have "A = (\<lambda>x. (f x, g x))-`B \<inter> space M" unfolding A_def B_def by auto moreover have "(\<lambda>x. (f x, g x)) \<in> borel_measurable M" by simp moreover have "B \<in> sets borel" unfolding B_def by (simp add: closed_diagonal) ultimately have "A \<in> sets M" by simp then show ?thesis unfolding A_def by simp qed lemma measurable_inequality_set [measurable]: fixes f g::"_ \<Rightarrow> 'a::{second_countable_topology, linorder_topology}" assumes [measurable]: "f \<in> borel_measurable M" "g \<in> borel_measurable M" shows "{x \<in> space M. f x \<le> g x} \<in> sets M" "{x \<in> space M. f x < g x} \<in> sets M" "{x \<in> space M. f x \<ge> g x} \<in> sets M" "{x \<in> space M. f x > g x} \<in> sets M" proof - define F where "F = (\<lambda>x. (f x, g x))" have * [measurable]: "F \<in> borel_measurable M" unfolding F_def by simp have "{x \<in> space M. f x \<le> g x} = F-`{(x, y) | x y. x \<le> y} \<inter> space M" unfolding F_def by auto moreover have "{(x, y) | x y. x \<le> (y::'a)} \<in> sets borel" using closed_subdiagonal borel_closed by blast ultimately show "{x \<in> space M. f x \<le> g x} \<in> sets M" using * by (metis (mono_tags, lifting) measurable_sets) have "{x \<in> space M. f x < g x} = F-`{(x, y) | x y. x < y} \<inter> space M" unfolding F_def by auto moreover have "{(x, y) | x y. x < (y::'a)} \<in> sets borel" using open_subdiagonal borel_open by blast ultimately show "{x \<in> space M. f x < g x} \<in> sets M" using * by (metis (mono_tags, lifting) measurable_sets) have "{x \<in> space M. f x \<ge> g x} = F-`{(x, y) | x y. x \<ge> y} \<inter> space M" unfolding F_def by auto moreover have "{(x, y) | x y. x \<ge> (y::'a)} \<in> sets borel" using closed_superdiagonal borel_closed by blast ultimately show "{x \<in> space M. f x \<ge> g x} \<in> sets M" using * by (metis (mono_tags, lifting) measurable_sets) have "{x \<in> space M. f x > g x} = F-`{(x, y) | x y. x > y} \<inter> space M" unfolding F_def by auto moreover have "{(x, y) | x y. x > (y::'a)} \<in> sets borel" using open_superdiagonal borel_open by blast ultimately show "{x \<in> space M. f x > g x} \<in> sets M" using * by (metis (mono_tags, lifting) measurable_sets) qed proposition measurable_limit [measurable]: fixes f::"nat \<Rightarrow> 'a \<Rightarrow> 'b::first_countable_topology" assumes [measurable]: "\<And>n::nat. f n \<in> borel_measurable M" shows "Measurable.pred M (\<lambda>x. (\<lambda>n. f n x) \<longlonglongrightarrow> c)" proof - obtain A :: "nat \<Rightarrow> 'b set" where A: "\<And>i. open (A i)" "\<And>i. c \<in> A i" "\<And>S. open S \<Longrightarrow> c \<in> S \<Longrightarrow> eventually (\<lambda>i. A i \<subseteq> S) sequentially" by (rule countable_basis_at_decseq) blast have [measurable]: "\<And>N i. (f N)-`(A i) \<inter> space M \<in> sets M" using A(1) by auto then have mes: "(\<Inter>i. \<Union>n. \<Inter>N\<in>{n..}. (f N)-`(A i) \<inter> space M) \<in> sets M" by blast have "(u \<longlonglongrightarrow> c) \<longleftrightarrow> (\<forall>i. eventually (\<lambda>n. u n \<in> A i) sequentially)" for u::"nat \<Rightarrow> 'b" proof assume "u \<longlonglongrightarrow> c" then have "eventually (\<lambda>n. u n \<in> A i) sequentially" for i using A(1)[of i] A(2)[of i] by (simp add: topological_tendstoD) then show "(\<forall>i. eventually (\<lambda>n. u n \<in> A i) sequentially)" by auto next assume H: "(\<forall>i. eventually (\<lambda>n. u n \<in> A i) sequentially)" show "(u \<longlonglongrightarrow> c)" proof (rule topological_tendstoI) fix S assume "open S" "c \<in> S" with A(3)[OF this] obtain i where "A i \<subseteq> S" using eventually_False_sequentially eventually_mono by blast moreover have "eventually (\<lambda>n. u n \<in> A i) sequentially" using H by simp ultimately show "\<forall>\<^sub>F n in sequentially. u n \<in> S" by (simp add: eventually_mono subset_eq) qed qed then have "{x. (\<lambda>n. f n x) \<longlonglongrightarrow> c} = (\<Inter>i. \<Union>n. \<Inter>N\<in>{n..}. (f N)-`(A i))" by (auto simp add: atLeast_def eventually_at_top_linorder) then have "{x \<in> space M. (\<lambda>n. f n x) \<longlonglongrightarrow> c} = (\<Inter>i. \<Union>n. \<Inter>N\<in>{n..}. (f N)-`(A i) \<inter> space M)" by auto then have "{x \<in> space M. (\<lambda>n. f n x) \<longlonglongrightarrow> c} \<in> sets M" using mes by simp then show ?thesis by auto qed lemma measurable_limit2 [measurable]: fixes u::"nat \<Rightarrow> 'a \<Rightarrow> real" assumes [measurable]: "\<And>n. u n \<in> borel_measurable M" "v \<in> borel_measurable M" shows "Measurable.pred M (\<lambda>x. (\<lambda>n. u n x) \<longlonglongrightarrow> v x)" proof - define w where "w = (\<lambda>n x. u n x - v x)" have [measurable]: "w n \<in> borel_measurable M" for n unfolding w_def by auto have "((\<lambda>n. u n x) \<longlonglongrightarrow> v x) \<longleftrightarrow> ((\<lambda>n. w n x) \<longlonglongrightarrow> 0)" for x unfolding w_def using Lim_null by auto then show ?thesis using measurable_limit by auto qed lemma measurable_P_restriction [measurable (raw)]: assumes [measurable]: "Measurable.pred M P" "A \<in> sets M" shows "{x \<in> A. P x} \<in> sets M" proof - have "A \<subseteq> space M" using sets.sets_into_space[OF assms(2)]. then have "{x \<in> A. P x} = A \<inter> {x \<in> space M. P x}" by blast then show ?thesis by auto qed lemma measurable_sum_nat [measurable (raw)]: fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> nat" assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> measurable M (count_space UNIV)" shows "(\<lambda>x. \<Sum>i\<in>S. f i x) \<in> measurable M (count_space UNIV)" proof cases assume "finite S" then show ?thesis using assms by induct auto qed simp lemma measurable_abs_powr [measurable]: fixes p::real assumes [measurable]: "f \<in> borel_measurable M" shows "(\<lambda>x. \<bar>f x\<bar> powr p) \<in> borel_measurable M" by simp text \<open>The next one is a variation around \<open>measurable_restrict_space\<close>.\<close> lemma measurable_restrict_space3: assumes "f \<in> measurable M N" and "f \<in> A \<rightarrow> B" shows "f \<in> measurable (restrict_space M A) (restrict_space N B)" proof - have "f \<in> measurable (restrict_space M A) N" using assms(1) measurable_restrict_space1 by auto then show ?thesis by (metis Int_iff funcsetI funcset_mem measurable_restrict_space2[of f, of "restrict_space M A", of B, of N] assms(2) space_restrict_space) qed lemma measurable_restrict_mono: assumes f: "f \<in> restrict_space M A \<rightarrow>\<^sub>M N" and "B \<subseteq> A" shows "f \<in> restrict_space M B \<rightarrow>\<^sub>M N" by (rule measurable_compose[OF measurable_restrict_space3 f]) (insert \<open>B \<subseteq> A\<close>, auto) text \<open>The next one is a variation around \<open>measurable_piecewise_restrict\<close>.\<close> lemma measurable_piecewise_restrict2: assumes [measurable]: "\<And>n. A n \<in> sets M" and "space M = (\<Union>(n::nat). A n)" "\<And>n. \<exists>h \<in> measurable M N. (\<forall>x \<in> A n. f x = h x)" shows "f \<in> measurable M N" proof (rule measurableI) fix B assume [measurable]: "B \<in> sets N" { fix n::nat obtain h where [measurable]: "h \<in> measurable M N" and "\<forall>x \<in> A n. f x = h x" using assms(3) by blast then have *: "f-`B \<inter> A n = h-`B \<inter> A n" by auto have "h-`B \<inter> A n = h-`B \<inter> space M \<inter> A n" using assms(2) sets.sets_into_space by auto then have "h-`B \<inter> A n \<in> sets M" by simp then have "f-`B \<inter> A n \<in> sets M" using * by simp } then have "(\<Union>n. f-`B \<inter> A n) \<in> sets M" by measurable moreover have "f-`B \<inter> space M = (\<Union>n. f-`B \<inter> A n)" using assms(2) by blast ultimately show "f-`B \<inter> space M \<in> sets M" by simp next fix x assume "x \<in> space M" then obtain n where "x \<in> A n" using assms(2) by blast obtain h where [measurable]: "h \<in> measurable M N" and "\<forall>x \<in> A n. f x = h x" using assms(3) by blast then have "f x = h x" using \<open>x \<in> A n\<close> by blast moreover have "h x \<in> space N" by (metis measurable_space \<open>x \<in> space M\<close> \<open>h \<in> measurable M N\<close>) ultimately show "f x \<in> space N" by simp qed end
During that year , the first of four new dreadnoughts , SMS Viribus Unitis , that made up the Tegetthoff class — the only dreadnoughts built for the Austro @-@ Hungarian Navy — came into active service . With the commissioning of these dreadnoughts , Zrínyi and her sisters were moved from the 1st Division to the 2nd Division of the 1st Battle Squadron .
/- Copyright (c) 2021 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro -/ import Std.Linter import Std.Tactic.NoMatch import Std.Tactic.GuardExpr import Std.Tactic.ByCases import Std.Tactic.SeqFocus import Std.Tactic.ShowTerm import Std.Tactic.SimpTrace import Lean.Elab.Tactic.ElabTerm import Std.Lean.Meta.Basic import Std.Lean.Tactic namespace Std.Tactic open Lean Parser.Tactic Elab Command Elab.Tactic Meta /-- `exfalso` converts a goal `⊢ tgt` into `⊢ False` by applying `False.elim`. -/ macro "exfalso" : tactic => `(tactic| apply False.elim) /-- `_` in tactic position acts like the `done` tactic: it fails and gives the list of goals if there are any. It is useful as a placeholder after starting a tactic block such as `by _` to make it syntactically correct and show the current goal. -/ macro "_" : tactic => `(tactic| {}) @[inherit_doc failIfSuccess] syntax (name := failIfSuccessConv) "fail_if_success " Conv.convSeq : conv attribute [tactic failIfSuccessConv] evalFailIfSuccess /-- We allow the `rfl` tactic to also use `Iff.rfl`. -/ -- `rfl` was defined earlier in Lean4, at src/Lean/Init/Tactics.lean -- Later we want to allow `rfl` to use all relations marked with an attribute. macro_rules | `(tactic| rfl) => `(tactic| exact Iff.rfl) macro_rules | `(tactic| rfl) => `(tactic| exact HEq.rfl) /-- `rwa` calls `rw`, then closes any remaining goals using `assumption`. -/ macro "rwa " rws:rwRuleSeq loc:(location)? : tactic => `(tactic| (rw $rws:rwRuleSeq $[$loc:location]?; assumption)) /-- Like `exact`, but takes a list of terms and checks that all goals are discharged after the tactic. -/ elab (name := exacts) "exacts" "[" hs:term,* "]" : tactic => do for stx in hs.getElems do evalTactic (← `(tactic| exact $stx)) evalTactic (← `(tactic| done)) /-- `by_contra h` proves `⊢ p` by contradiction, introducing a hypothesis `h : ¬p` and proving `False`. * If `p` is a negation `¬q`, `h : q` will be introduced instead of `¬¬q`. * If `p` is decidable, it uses `Decidable.byContradiction` instead of `Classical.byContradiction`. * If `h` is omitted, the introduced variable `_: ¬p` will be anonymous. -/ macro (name := byContra) tk:"by_contra" e?:(ppSpace colGt binderIdent)? : tactic => do let e := match e? with | some e => match e with | `(binderIdent| $e:ident) => e | e => Unhygienic.run `(_%$e) -- HACK: hover fails without Unhygienic here | none => Unhygienic.run `(_%$tk) `(tactic| first | guard_target = Not _; intro $e:term | refine Decidable.byContradiction fun $e => ?_ | refine Classical.byContradiction fun $e => ?_) /-- `iterate n tac` runs `tac` exactly `n` times. `iterate tac` runs `tac` repeatedly until failure. To run multiple tactics, one can do `iterate (tac₁; tac₂; ⋯)` or ```lean iterate tac₁ tac₂ ⋯ ``` -/ syntax "iterate" (ppSpace num)? ppSpace tacticSeq : tactic macro_rules | `(tactic| iterate $seq:tacticSeq) => `(tactic| try ($seq:tacticSeq); iterate $seq:tacticSeq) | `(tactic| iterate $n $seq:tacticSeq) => match n.1.toNat with | 0 => `(tactic| skip) | n+1 => `(tactic| ($seq:tacticSeq); iterate $(quote n) $seq:tacticSeq) /-- `repeat' tac` runs `tac` on all of the goals to produce a new list of goals, then runs `tac` again on all of those goals, and repeats until `tac` fails on all remaining goals. -/ elab "repeat' " tac:tacticSeq : tactic => do setGoals (← repeat' (evalTacticAtRaw tac) (← getGoals)) /-- `repeat1 tac` applies `tac` to main goal at least once. If the application succeeds, the tactic is applied recursively to the generated subgoals until it eventually fails. -/ macro "repeat1 " tac:tacticSeq : tactic => `(tactic| focus (($tac); repeat' $tac)) /-- `subst_eqs` applies `subst` to all equalities in the context as long as it makes progress. -/ elab "subst_eqs" : tactic => Elab.Tactic.liftMetaTactic1 (·.substEqs) /-- `split_ands` applies `And.intro` until it does not make progress. -/ syntax "split_ands" : tactic macro_rules | `(tactic| split_ands) => `(tactic| repeat' refine And.intro ?_ ?_) /-- `fapply e` is like `apply e` but it adds goals in the order they appear, rather than putting the dependent goals first. -/ elab "fapply " e:term : tactic => evalApplyLikeTactic (·.apply (cfg := {newGoals := .all})) e /-- `eapply e` is like `apply e` but it does not add subgoals for variables that appear in the types of other goals. Note that this can lead to a failure where there are no goals remaining but there are still metavariables in the term: ``` example (h : ∀ x : Nat, x = x → True) : True := by eapply h rfl -- no goals -- (kernel) declaration has metavariables '_example' ``` -/ elab "eapply " e:term : tactic => evalApplyLikeTactic (·.apply (cfg := {newGoals := .nonDependentOnly})) e /-- Tries to solve the goal using a canonical proof of `True`, or the `rfl` tactic. Unlike `trivial` or `trivial'`, does not use the `contradiction` tactic. -/ macro (name := triv) "triv" : tactic => `(tactic| first | exact trivial | rfl | fail "triv tactic failed") /-- `conv` tactic to close a goal using an equality theorem. -/ macro (name := Conv.exact) "exact" t:term : conv => `(conv| tactic => exact $t)
(* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) theory int_promotion imports "CParser.CTranslation" begin external_file "int_promotion.c" install_C_file "int_promotion.c" context int_promotion begin thm f_body_def lemma "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL f() \<lbrace> \<acute>ret__int = 1 \<rbrace>" apply vcg apply simp done end end
import .definitions3 .qi lemma exp.vcgen.extension {P: prop} {e: exp} {Q: propctx}: (P ⊢ e : Q) → (P ⊩ e : Q) := begin assume e_verified: P ⊢ e : Q, induction e_verified, case exp.vcgen.tru P y e' Q y_not_in_P e'_verified ih { apply exp.dvcgen.tru, from y_not_in_P, from ih }, case exp.vcgen.fals P y e' Q y_not_in_P e'_verified ih { apply exp.dvcgen.fals, from y_not_in_P, from ih }, case exp.vcgen.num P y n e' Q y_not_in_P e'_verified ih { apply exp.dvcgen.num, from y_not_in_P, from ih }, case exp.vcgen.func P f fx R S e₁ e₂ Q₁ Q₂ f_not_in_P fx_not_in_P f_neq_fx fx_in_R fv_R fv_S e₁_verified e₂_verified func_vc ih₁ ih₂ { apply exp.dvcgen.func, from f_not_in_P, from fx_not_in_P, from f_neq_fx, from fx_in_R, from fv_R, from fv_S, from ih₁, from ih₂, from vc_valid_from_inst_valid func_vc }, case exp.vcgen.unop op P e' x₁ y Q x_free_in_P y_not_in_P e'_verified vc_valid ih { apply exp.dvcgen.unop, from x_free_in_P, from y_not_in_P, from ih, from vc_valid_from_inst_valid vc_valid }, case exp.vcgen.binop op P e' x₁ x₂ y Q x₁_free_in_P x₂_free_in_P y_not_in_P e'_verified vc_valid ih { apply exp.dvcgen.binop, from x₁_free_in_P, from x₂_free_in_P, from y_not_in_P, from ih, from vc_valid_from_inst_valid vc_valid }, case exp.vcgen.app P y f e' x₁ Q f_free_in_P x₁_free_in_P y_not_in_P e'_verified vc_valid ih { apply exp.dvcgen.app, from f_free_in_P, from x₁_free_in_P, from y_not_in_P, from ih, from vc_valid_from_inst_valid vc_valid }, case exp.vcgen.ite P e₁ e₂ y Q₁ Q₂ y_free_in_P e₁_verified e₂_verified vc_valid ih₁ ih₂ { apply exp.dvcgen.ite, from y_free_in_P, from ih₁, from ih₂, from vc_valid_from_inst_valid vc_valid }, case exp.vcgen.return P y y_free_in_P { apply exp.dvcgen.return, from y_free_in_P } end lemma exp.dvcgen.return.inv {P: prop} {x: var} {Q: propctx}: (P ⊩ exp.return x : Q) → x ∈ FV P := assume return_verified: P ⊩ exp.return x : Q, begin cases return_verified, case exp.dvcgen.return x_free { show x ∈ FV P, from x_free } end lemma stack.dvcgen.top.inv {R: spec} {σ: env} {e: exp} {Q: propctx}: (⊩ₛ (R, σ, e) : Q) → ∃P Q₂, (⊩ σ: P) ∧ (FV R.to_prop ⊆ FV P) ∧ (σ ⊨ R.to_prop.to_vc) ∧ (R ⋀ P ⊩ e: Q₂) := assume top_verified: ⊩ₛ (R, σ, e) : Q, begin cases top_verified, case stack.dvcgen.top P Q env_verified fv_R R_valid e_verified { show ∃P Q₂, (⊩ σ: P) ∧ (FV R.to_prop ⊆ FV P) ∧ (σ ⊨ R.to_prop.to_vc) ∧ (R ⋀ P ⊩ e: Q₂), from exists.intro P (exists.intro Q ⟨env_verified, ⟨fv_R, ⟨R_valid, e_verified⟩⟩⟩) } end lemma env.dvcgen.inv {σ: env} {P: prop} {x: var} {v: value}: (⊩ σ : P) → (σ x = v) → ∃σ' Q', ⊩ (σ'[x↦v]) : Q' := assume env_verified: ⊩ σ : P, assume σ_x_is_v: σ x = v, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', by begin induction env_verified, case env.dvcgen.empty { from have env.apply env.empty x = none, by unfold env.apply, have some v = none, from eq.trans σ_x_is_v.symm this, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from false.elim (option.no_confusion this) }, case env.dvcgen.tru σ' y Q y_not_in_σ' σ'_verified ih { from have env.apply (σ'[y↦value.true]) x = v, from σ_x_is_v, have h1: (if y = x ∧ option.is_none (σ'.apply x) then ↑value.true else σ'.apply x) = v, by { unfold env.apply at this, from this }, if h2: y = x ∧ option.is_none (σ'.apply x) then ( have (↑value.true) = ↑v, by { simp[h2] at h1, from h1 }, have v_is_true: v = value.true, from (option.some.inj this).symm, have x_not_in_σ': x ∉ σ', from h2.left ▸ y_not_in_σ', have ⊩ (σ'[x↦value.true]) : Q ⋀ x ≡ value.true, from env.dvcgen.tru x_not_in_σ' σ'_verified, have ⊩ (σ'[x↦v]) : Q ⋀ x ≡ value.true, from v_is_true.symm ▸ this, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from exists.intro σ' (exists.intro (Q ⋀ x ≡ value.true) this) ) else ( have (σ'.apply x) = v, by { simp[h2] at h1, from h1 }, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from ih this ) }, case env.dvcgen.fls σ' y Q y_not_in_σ' σ'_verified ih { from have env.apply (σ'[y↦value.false]) x = v, from σ_x_is_v, have h1: (if y = x ∧ option.is_none (σ'.apply x) then ↑value.false else σ'.apply x) = v, by { unfold env.apply at this, from this }, if h2: y = x ∧ option.is_none (σ'.apply x) then ( have (↑value.false) = ↑v, by { simp[h2] at h1, from h1 }, have v_is_false: v = value.false, from (option.some.inj this).symm, have x_not_in_σ': x ∉ σ', from h2.left ▸ y_not_in_σ', have ⊩ (σ'[x↦value.false]) : Q ⋀ x ≡ value.false, from env.dvcgen.fls x_not_in_σ' σ'_verified, have ⊩ (σ'[x↦v]) : Q ⋀ x ≡ value.false, from v_is_false.symm ▸ this, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from exists.intro σ' (exists.intro (Q ⋀ x ≡ value.false) this) ) else ( have (σ'.apply x) = v, by { simp[h2] at h1, from h1 }, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from ih this ) }, case env.dvcgen.num n σ' y Q y_not_in_σ' σ'_verified ih { from have env.apply (σ'[y↦value.num n]) x = v, from σ_x_is_v, have h1: (if y = x ∧ option.is_none (σ'.apply x) then ↑(value.num n) else σ'.apply x) = v, by { unfold env.apply at this, from this }, if h2: y = x ∧ option.is_none (σ'.apply x) then ( have ↑(value.num n) = ↑v, by { simp[h2] at h1, from h1 }, have v_is_num: v = value.num n, from (option.some.inj this).symm, have x_not_in_σ': x ∉ σ', from h2.left ▸ y_not_in_σ', have ⊩ (σ'[x↦value.num n]) : Q ⋀ x ≡ value.num n, from env.dvcgen.num x_not_in_σ' σ'_verified, have ⊩ (σ'[x↦v]) : Q ⋀ x ≡ value.num n, from v_is_num.symm ▸ this, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from exists.intro σ' (exists.intro (Q ⋀ x ≡ value.num n) this) ) else ( have (σ'.apply x) = v, by { simp[h2] at h1, from h1 }, show ∃σ' Q', ⊩ (σ'[x↦v]) : Q', from ih this ) }, case env.dvcgen.func f σ₂ σ₁ g gx R S e Q₁ Q₂ Q₃ f_not_in_σ₁ g_not_in_σ₂ gx_not_in_σ₂ g_neq_gx σ₁_verified σ₂_verified x_free_in_R fv_R fv_S e_verified func_vc ih₁ ih₂ { from have env.apply (σ₁[f↦value.func g gx R S e σ₂]) x = v, from σ_x_is_v, have h1: (if f = x ∧ option.is_none (σ₁.apply x) then ↑(value.func g gx R S e σ₂) else σ₁.apply x) = v, by { unfold env.apply at this, from this }, if h2: f = x ∧ option.is_none (σ₁.apply x) then ( have ↑(value.func g gx R S e σ₂) = ↑v, by { simp[h2] at h1, from h1 }, have v_is_num: v = value.func g gx R S e σ₂, from (option.some.inj this).symm, have x_not_in_σ₁: x ∉ σ₁, from h2.left ▸ f_not_in_σ₁, have ⊩ (σ₁[x↦value.func g gx R S e σ₂]) : (Q₁ ⋀ x ≡ value.func g gx R S e σ₂ ⋀ prop.subst_env (σ₂[g↦value.func g gx R S e σ₂]) (prop.func g gx R (Q₃ (term.app g gx) ⋀ S))), from env.dvcgen.func x_not_in_σ₁ g_not_in_σ₂ gx_not_in_σ₂ g_neq_gx σ₁_verified σ₂_verified x_free_in_R fv_R fv_S e_verified func_vc, have ⊩ (σ₁[x↦v]) : (Q₁ ⋀ x ≡ value.func g gx R S e σ₂ ⋀ prop.subst_env (σ₂[g↦value.func g gx R S e σ₂]) (prop.func g gx R (Q₃ (term.app g gx) ⋀ S))), from v_is_num.symm ▸ this, show ∃σ₁ Q', ⊩ (σ₁[x↦v]) : Q', from exists.intro σ₁ (exists.intro (Q₁ ⋀ x ≡ value.func g gx R S e σ₂ ⋀ prop.subst_env (σ₂[g↦value.func g gx R S e σ₂]) (prop.func g gx R (Q₃ (term.app g gx) ⋀ S))) this) ) else ( have (σ₁.apply x) = v, by { simp[h2] at h1, from h1 }, show ∃σ₁ Q₁, ⊩ (σ₁[x↦v]) : Q₁, from ih₁ this ) } end lemma env.dvcgen.tru.inv {σ: env} {x: var} {Q: prop}: (⊩ (σ[x ↦ value.true]) : Q ⋀ x ≡ value.true) → x ∉ σ ∧ (⊩ σ : Q) := assume h: ⊩ (σ[x ↦ value.true]) : Q ⋀ x ≡ value.true, begin cases h, case env.dvcgen.tru h1 h2 { from ⟨h1, h2⟩ } end lemma env.dvcgen.fls.inv {σ: env} {x: var} {Q: prop}: (⊩ (σ[x ↦ value.false]) : Q ⋀ x ≡ value.false) → x ∉ σ ∧ (⊩ σ : Q) := assume h: ⊩ (σ[x ↦ value.false]) : Q ⋀ x ≡ value.false, begin cases h, case env.dvcgen.fls h1 h2 { from ⟨h1, h2⟩ } end lemma env.dvcgen.num.inv {σ: env} {x: var} {n: ℕ} {Q: prop}: (⊩ (σ[x ↦ value.num n]) : Q ⋀ x ≡ value.num n) → x ∉ σ ∧ (⊩ σ : Q) := assume h: ⊩ (σ[x ↦ value.num n]) : Q ⋀ x ≡ value.num n, begin cases h, case env.dvcgen.num h1 h2 { from ⟨h1, h2⟩ } end lemma env.dvcgen.func.inv {σ₁ σ₂: env} {f g x: var} {R S: spec} {e: exp} {Q: prop}: (⊩ (σ₁[f ↦ value.func g x R S e σ₂]) : Q) → ∃Q₁ Q₂ Q₃, f ∉ σ₁ ∧ g ∉ σ₂ ∧ x ∉ σ₂ ∧ g ≠ x ∧ (⊩ σ₁ : Q₁) ∧ (⊩ σ₂ : Q₂) ∧ x ∈ FV R.to_prop.to_vc ∧ FV R.to_prop ⊆ FV Q₂ ∪ { g, x } ∧ FV S.to_prop ⊆ FV Q₂ ∪ { g, x } ∧ (Q₂ ⋀ spec.func g x R S ⋀ R ⊩ e : Q₃) ∧ ⦃ prop.implies (Q₂ ⋀ spec.func g x R S ⋀ R ⋀ Q₃ (term.app g x)) S ⦄ ∧ (Q = (Q₁ ⋀ ((f ≡ (value.func g x R S e σ₂)) ⋀ prop.subst_env (σ₂[g↦value.func g x R S e σ₂]) (prop.func g x R (Q₃ (term.app g ↑x) ⋀ S))))) := assume h : ⊩ (σ₁[f ↦ value.func g x R S e σ₂]) : Q, begin cases h, case env.dvcgen.func Q₁ Q₂ Q₃ f_not_in_σ₁ g_not_in_σ₂ x_not_in_σ₂ g_neq_x σ₁_verified σ₂_verified x_free_in_R fv_R fv_S e_verified func_vc { from ⟨Q₁, ⟨Q₂, ⟨Q₃, ⟨f_not_in_σ₁, ⟨g_not_in_σ₂, ⟨x_not_in_σ₂, ⟨g_neq_x, ⟨σ₁_verified, ⟨σ₂_verified, ⟨x_free_in_R, ⟨fv_R, ⟨fv_S, ⟨e_verified, ⟨func_vc, rfl⟩⟩⟩⟩⟩⟩⟩⟩⟩⟩⟩⟩⟩⟩ } end lemma env.dvcgen.copy {σ₁ σ₂: env} {P₁ P₂} {x y: var} {v: value}: (⊩ σ₁ : P₁) → (y ∉ σ₁) → (⊩ (σ₂[x↦v]) : P₂) → ∃P₃, (⊩ (σ₁[y↦v]) : P₁ ⋀ P₃) := assume σ₁_verified: ⊩ σ₁ : P₁, assume y_not_in_σ₁: y ∉ σ₁, assume σ₂_xv_verified: ⊩ (σ₂[x↦v]) : P₂, show ∃P₃, (⊩ (σ₁[y↦v]) : P₁ ⋀ P₃), by begin cases σ₂_xv_verified, case env.dvcgen.tru { from have ⊩ (σ₁[y↦value.true]) : P₁ ⋀ y ≡ value.true, from env.dvcgen.tru y_not_in_σ₁ σ₁_verified, show ∃P₃, ⊩ (σ₁[y↦value.true]) : P₁ ⋀ P₃, from exists.intro (y ≡ value.true) this }, case env.dvcgen.fls { from have ⊩ (σ₁[y↦value.false]) : P₁ ⋀ y ≡ value.false, from env.dvcgen.fls y_not_in_σ₁ σ₁_verified, show ∃P₃, ⊩ (σ₁[y↦value.false]) : P₁ ⋀ P₃, from exists.intro (y ≡ value.false) this }, case env.dvcgen.num n { from have ⊩ (σ₁[y↦value.num n]) : P₁ ⋀ y ≡ value.num n, from env.dvcgen.num y_not_in_σ₁ σ₁_verified, show ∃P₃, ⊩ (σ₁[y↦value.num n]) : P₁ ⋀ P₃, from exists.intro (y ≡ value.num n) this }, case env.dvcgen.func σ₃ f fx R S e Q₃ Q₄ Q₂ x_not_in_σ₂ f_not_in_σ₃ fx_not_in_σ₃ f_neq_fx σ₂_verified σ₃_verified x_free_in_R fv_R fv_S e_verified func_vc { from have ⊩ (σ₁[y↦value.func f fx R S e σ₃]) : (P₁ ⋀ y ≡ value.func f fx R S e σ₃ ⋀ prop.subst_env (σ₃[f↦value.func f fx R S e σ₃]) (prop.func f fx R (Q₃ (term.app f fx) ⋀ S))), from env.dvcgen.func y_not_in_σ₁ f_not_in_σ₃ fx_not_in_σ₃ f_neq_fx σ₁_verified σ₃_verified x_free_in_R fv_R fv_S e_verified func_vc, show ∃P₃, ⊩ (σ₁[y↦value.func f fx R S e σ₃]) : P₁ ⋀ P₃, from exists.intro ( y ≡ value.func f fx R S e σ₃ ⋀ prop.subst_env (σ₃[f↦value.func f fx R S e σ₃]) (prop.func f fx R (Q₃ (term.app f fx) ⋀ S))) this } end lemma exp.dvcgen.inj {P: prop} {Q: propctx} {e: exp}: (P ⊩ e : Q) → ∀Q', (P ⊩ e : Q') → (Q = Q') := assume h1: P ⊩ e : Q, begin induction h1, intros Q' h2, cases h2, have : (Q_1 = Q_2), from ih_1 Q_2 a_3, rw[this], intros Q' h2, cases h2, have : (Q_1 = Q_2), from ih_1 Q_2 a_3, rw[this], intros Q' h2, cases h2, have : (Q_1 = Q_2), from ih_1 Q_2 a_3, rw[this], intros Q' h2, cases h2, have h3: (Q₁ = Q₁_1), from ih_1 Q₁_1 a_15, rw[←h3] at a_16, have : (Q₂ = Q₂_1), from ih_2 Q₂_1 a_16, rw[this], rw[h3], intros Q' h2, cases h2, have : (Q_1 = Q_2), from ih_1 Q_2 a_6, rw[this], intros Q' h2, cases h2, have : (Q_1 = Q_2), from ih_1 Q_2 a_8, rw[this], intros Q' h2, cases h2, have : (Q_1 = Q_2), from ih_1 Q_2 a_8, rw[this], intros Q' h2, cases h2, have : (Q₁ = Q₁_1), from ih_1 Q₁_1 a_5, rw[this], have : (Q₂ = Q₂_1), from ih_2 Q₂_1 a_6, rw[this], refl, intros Q' h2, cases h2, refl end lemma env.dvcgen.inj {P: prop} {σ: env}: (⊩ σ : P) → ∀Q, (⊩ σ : Q) → (P = Q) := assume h1: ⊩ σ : P, begin induction h1, intros Q h2, cases h2, refl, intros Q h2, cases h2, have : (Q = Q_1), from ih_1 Q_1 a_3, rw[this], refl, intros Q h2, cases h2, have : (Q = Q_1), from ih_1 Q_1 a_3, rw[this], refl, intros Q h2, cases h2, have : (Q = Q_1), from ih_1 Q_1 a_3, rw[this], refl, intros Q h2, cases h2, have h3: (Q₁ = Q₁_1), from ih_1 Q₁_1 a_15, rw[h3], have h4: (Q₂ = Q₂_1), from ih_2 Q₂_1 a_16, rw[←h4] at a_20, have : (Q₃ = Q₃_1), from exp.dvcgen.inj a_9 Q₃_1 a_20, rw[this], refl end lemma stack.dvcgen.inj {s: dstack} {Q₁: propctx}: (⊩ₛ s : Q₁) → ∀Q₂, (⊩ₛ s : Q₂) → (Q₁ = Q₂) := assume h1: ⊩ₛ s : Q₁, have ∀s' Q₂, (s = s') → (⊩ₛ s' : Q₂) → (Q₁ = Q₂), by begin cases h1, intros s' Q₂ h2 h3, cases h3, injection h2, have h4: (R = R_1), from h_1, have h6: (σ = σ_1), from h_2, have h7: (e = e_1), from h_3, have h8: (P = P_1), from env.dvcgen.inj a P_1 (h6.symm ▸ a_4), have : ↑R ⋀ P ⊩ e : Q_1, from h4.symm ▸ h7.symm ▸ h8.symm ▸ a_7, have h9: (Q = Q_1), from exp.dvcgen.inj a_3 Q_1 this, rw[←h8], rw[←h9], contradiction, intros s' Q₂ h2 h3, cases h3, contradiction, injection h2, have h4: (P₁ = P₁_1), from env.dvcgen.inj a_2 P₁_1 (h_3.symm ▸ a_17), rw[h4.symm] at a_24, rw[h_4.symm] at a_24, rw[h_5.symm] at a_24, rw[h_7.symm] at a_24, rw[h_6.symm] at a_24, rw[h_2.symm] at a_24, have h5: (Q₁_1 = Q₁), from exp.dvcgen.inj a_9 Q₁ a_24, rw[←h4], rw[←h_5], rw[←h_6], rw[←h_4], rw[h5] end, show ∀Q₂, (⊩ₛ s : Q₂) → (Q₁ = Q₂), from λQ₂ h1, (this s Q₂) rfl h1
Monument 113 depicts Ruler 2 participating in a scattering ritual .
function plot2DPlane(x1,x2,y1,y2,h_newfig) if h_newfig figure; set(gcf,'position',[500,100,1000,650]) set(gcf,'color','w'); end set(gca,'visible','off') mArrow2(x1,0,x2,0,{'color','k'}); mArrow2(0,y1,0,y2,{'color','k'}); xlim([x1 x2]) ylim([y1 y2]) for i = (x1+1):(x2-1) line([i i],[-0.05 0.05],'color','k') if i<=0 t= text(i-0.2, -0.3,num2str(i)); else t= text(i-0.05, -0.3,num2str(i)); end t.FontSize=15; end for i = (y1+1):(y2-1) line([-0.05 0.05],[i i],'color','k') if i<0 t = text(-0.4, i,num2str(i)); elseif i>0 t = text(-0.3, i,num2str(i)); end t.FontSize = 15; end t = text(x2*0.9,0.3,'x'); t.FontSize = 15; t = text(0.2,y2*0.9,'y'); t.FontSize = 15; axis square
[STATEMENT] lemma Stable_reachable_AND_nmsg_0: "[|v \<in> V; w \<in> V|] ==> F \<in> Stable (reachable v \<inter> nmsg_eq 0 (v,w))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>v \<in> V; w \<in> V\<rbrakk> \<Longrightarrow> F \<in> Stable (Reachability.reachable v \<inter> nmsg_eq 0 (v, w)) [PROOF STEP] apply (subst lemma5) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>v \<in> V; w \<in> V\<rbrakk> \<Longrightarrow> F \<in> Stable (nmsg_gte 0 (v, w) \<inter> nmsg_lte (Suc 0) (v, w) \<inter> (Reachability.reachable v \<inter> nmsg_lte 0 (v, w))) [PROOF STEP] apply (blast intro: MA5 Always_imp_Stable [THEN Stable_Int] MA6b) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
import Euclid.axioms open Euclidean_plane variables {point : Type} [Euclidean_plane point] theorem prop1 (a b : point) : ∃ c, eqd a b a c → eqd a b b c := sorry theorem prop2 (a b c : point) : ∃ d, eqd a d b c := sorry theorem prop3 (a b c d : point) (h : ¬eqd a b c d) : (∃ x, B a x b → eqd a x c d) ∨ (∃ x, B c x d → eqd a b c x) := sorry --theorem prop4 --theorem prop5 --theorem prop6 theorem prop7 (a b c d c' d' x : point) : eqd a c a c' → eqd b c b c' → B a d b → B a d' b → B c d x → B c' d' x → d ≠ x → d' ≠ x → c = c' := unique_tri a b c d c' d' x --theorem prop8 --theorem prop9 theorem prop10 (a b : point) : ∃ c, B a c b → eqd a c b c := sorry --theorem prop11 --theorem prop12 --theorem prop13 --theorem prop14 --theorem prop15 --theorem prop16 --theorem prop17 --theorem prop18 --theorem prop19 --theorem prop20 --theorem prop21 --theorem prop22 --theorem prop23 --theorem prop24 --theorem prop25 --theorem prop26 --theorem prop27 --theorem prop28 --theorem prop29 theorem prop30 (a b c d e f : point) {h1 : a ≠ b} {h2 : c ≠ d} {h3 : e ≠ f} : parallel a b c d h1 h2 → parallel a b e f h1 h3 → parallel c d e f h2 h3:= sorry
/- Copyright (c) 2020 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov, Patrick Massot -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.topology.algebra.ordered import Mathlib.data.set.intervals.proj_Icc import Mathlib.PostPort universes u_1 u_2 namespace Mathlib /-! # Projection onto a closed interval In this file we prove that the projection `set.proj_Icc f a b h` is a quotient map, and use it to show that `Icc_extend h f` is continuous if and only if `f` is continuous. -/ theorem continuous_proj_Icc {α : Type u_1} [topological_space α] [linear_order α] [order_topology α] {a : α} {b : α} {h : a ≤ b} : continuous (set.proj_Icc a b h) := continuous_subtype_mk (fun (x : α) => set.proj_Icc._proof_1 a b h x) (continuous.max continuous_const (continuous.min continuous_const continuous_id)) theorem quotient_map_proj_Icc {α : Type u_1} [topological_space α] [linear_order α] [order_topology α] {a : α} {b : α} {h : a ≤ b} : quotient_map (set.proj_Icc a b h) := sorry @[simp] theorem continuous_Icc_extend_iff {α : Type u_1} {β : Type u_2} [topological_space α] [linear_order α] [order_topology α] [topological_space β] {a : α} {b : α} {h : a ≤ b} {f : ↥(set.Icc a b) → β} : continuous (set.Icc_extend h f) ↔ continuous f := iff.symm (quotient_map.continuous_iff quotient_map_proj_Icc) theorem continuous.Icc_extend {α : Type u_1} {β : Type u_2} [topological_space α] [linear_order α] [order_topology α] [topological_space β] {a : α} {b : α} {h : a ≤ b} {f : ↥(set.Icc a b) → β} (hf : continuous f) : continuous (set.Icc_extend h f) := continuous.comp hf continuous_proj_Icc
#pragma once #include "IntervalGrad.h" #ifndef _NOGSL #include <gsl/gsl_vector.h> #else #include "CustomSolver.h" #endif #include "BooleanNodes.h" #include "BooleanDAG.h" #include "NodeVisitor.h" #include "VarStore.h" #include <map> #include "FloatSupport.h" #include <iostream> using namespace std; #define FINE_GRAIN_RANGES 1 class RangeDiff: NodeVisitor { FloatManager& floats; BooleanDAG& bdag; map<string, int> floatCtrls; // Maps float ctrl names to indices with grad vectors int nctrls; // number of float ctrls gsl_vector* ctrls; // ctrl values vector<IntervalGrad*> ranges; // Keeps track of ranges for each node vector<DistanceGrad*> distances; // Keeps track of distance metric for boolean nodes map<int, int> inputValues; // Maps node id to values set by the SAT solver double error = 0.0; gsl_vector* errorGrad; int DEFAULT_INP = -1; int assertCtr; bool foundFailure; static constexpr float ASSERT_PENALTY = 1.0; public: int failedAssert; RangeDiff(BooleanDAG& bdag_p, FloatManager& _floats, const map<string, int>& floatCtrls_p); ~RangeDiff(void); virtual void visit( SRC_node& node ); virtual void visit( DST_node& node ); virtual void visit( CTRL_node& node ); virtual void visit( PLUS_node& node ); virtual void visit( TIMES_node& node ); virtual void visit( ARRACC_node& node ); virtual void visit( DIV_node& node ); virtual void visit( MOD_node& node ); virtual void visit( NEG_node& node ); virtual void visit( CONST_node& node ); virtual void visit( LT_node& node ); virtual void visit( EQ_node& node ); virtual void visit( AND_node& node ); virtual void visit( OR_node& node ); virtual void visit( NOT_node& node ); virtual void visit( ARRASS_node& node ); virtual void visit( UFUN_node& node ); virtual void visit( TUPLE_R_node& node ); virtual void visit( ASSERT_node& node ); double run(const gsl_vector* ctrls_p, map<int, int>& inputValues_p, gsl_vector* errorGrad_p); void setrange(bool_node& bn, IntervalGrad* r) { ranges[bn.id] = r; } IntervalGrad* r(bool_node& bn) { IntervalGrad* interval = ranges[bn.id]; if (interval == NULL) { gsl_vector* l = gsl_vector_alloc(nctrls); gsl_vector* h = gsl_vector_alloc(nctrls); interval = new IntervalGrad(0, 0, l, h); setrange(bn, interval); } return interval; } IntervalGrad* r(bool_node* bn) { return r(*bn); } void setdistance(bool_node& bn, DistanceGrad* d) { distances[bn.id] = d; } DistanceGrad* d(bool_node& bn) { DistanceGrad* dist = distances[bn.id]; if (dist == NULL) { gsl_vector* g = gsl_vector_alloc(nctrls); dist = new DistanceGrad(0, g); setdistance(bn, dist); } return dist; } DistanceGrad* d(bool_node* bn) { return d(*bn); } void print() { for (int i = 0; i < bdag.size(); i++) { cout << bdag[i]->lprint() << " "; if (bdag[i]->getOtype() == OutType::FLOAT) { cout << r(bdag[i])->print() << endl; } else { cout << d(bdag[i])->print() << endl; } } } void printFull() { for (int i = 0; i < bdag.size(); i++) { cout << bdag[i]->lprint() << endl; if (bdag[i]->getOtype() == OutType::FLOAT) { cout << r(bdag[i])->printFull() << endl; } else { cout << d(bdag[i])->printFull() << endl; } } } bool isFloat(bool_node& bn) { return (bn.getOtype() == OutType::FLOAT); } bool isFloat(bool_node* bn) { return (bn->getOtype() == OutType::FLOAT); } int getInputValue(bool_node& bn) { if (inputValues.find(bn.id) != inputValues.end()) { int val = inputValues[bn.id]; Assert(val == 0 || val == 1, "NYI: Integer values"); return val; } else { return DEFAULT_INP; } } int getInputValue(bool_node* bn) { return getInputValue(*bn); } void computeError(double dist, int expected, gsl_vector* dg, bool_node& node, bool relax = false); };
import combinatorics.simple_graph.coloring --OUTPUT 1 theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) := begin split, { assume h1 : G.colorable 2, have h2 : ∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B), from sorry, show (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)), from sorry, }, { assume h1 : (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)), have h2 : G.colorable 2, from sorry, show G.colorable 2, from sorry, } end /- FEW SHOT PROMPTS TO CODEX(START) /--`theorem` Power Set is Closed under Intersection Let $S$ be a set. Let $\powerset S$ be the power set of $S$. Then: :$\forall A, B \in \powerset S: A \cap B \in \powerset S$ `proof` Let $A, B \in \powerset S$. Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$. From Intersection is Subset we have that $A \cap B \subseteq A$. It follows from Subset Relation is Transitive that $A \cap B \subseteq S$. Thus $A \cap B \in \powerset S$ and closure is proved. {{qed}} -/ theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S := begin assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S), have h1 : (A ⊆ S) ∧ (B ⊆ S), from sorry, have h2 : (A ∩ B) ⊆ A, from sorry, have h3 : (A ∩ B) ⊆ S, from sorry, show (A ∩ B) ∈ 𝒫 S, from sorry, end /--`theorem` Square of Sum :$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$ `proof` Follows from the distribution of multiplication over addition: {{begin-eqn}} {{eqn | l = \left({x + y}\right)^2 | r = \left({x + y}\right) \cdot \left({x + y}\right) }} {{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right) | c = Real Multiplication Distributes over Addition }} {{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y | c = Real Multiplication Distributes over Addition }} {{eqn | r = x^2 + 2xy + y^2 | c = }} {{end-eqn}} {{qed}} -/ theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) := begin calc (x + y)^2 = (x+y)*(x+y) : by sorry ... = x*(x+y) + y*(x+y) : by sorry ... = x*x + x*y + y*x + y*y : by sorry ... = x^2 + 2*x*y + y^2 : by sorry, end /--`theorem` Identity of Group is Unique Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$. `proof` From Group has Latin Square Property, there exists a unique $x \in G$ such that: :$a x = b$ and there exists a unique $y \in G$ such that: :$y a = b$ Setting $b = a$, this becomes: There exists a unique $x \in G$ such that: :$a x = a$ and there exists a unique $y \in G$ such that: :$y a = a$ These $x$ and $y$ are both $e$, by definition of identity element. {{qed}} -/ theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a := begin have h1 : ∀ a b : G, ∃! x : G, a * x = b, from sorry, have h2 : ∀ a b : G, ∃! y : G, y * a = b, from sorry, have h3 : ∀ a : G, ∃! x : G, a * x = a, from sorry, have h4 : ∀ a : G, ∃! y : G, y * a = a, from sorry, have h5 : ∀ a : G, classical.some (h3 a) = (1 : G), from sorry, have h6 : ∀ a : G, classical.some (h4 a) = (1 : G), from sorry, show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by { use (1 : G), have h7 : ∀ e : G, (∀ a : G, e * a = a ∧ a * e = a) → e = 1, from by { assume (e : G) (h7 : ∀ a : G, e * a = a ∧ a * e = a), have h8 : ∀ a : G, e = classical.some (h3 a), from sorry, have h9 : ∀ a : G, e = classical.some (h4 a), from sorry, show e = (1 : G), from sorry, }, sorry, } end /--`theorem` Bipartite Graph is two colorable Let $G$ be a graph. Then $G$ is 2-colorable if and only if $G$ is bipartite. `proof` Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color. Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue. Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$. This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite. Conversely, suppose $G$ is bipartite, that is, we can partition the vertices into two subsets $V_{1}, V_{2}$ every edge has one endpoint in $V_{1}$ and the other in $V_{2}$. Then coloring every vertex of $V_{1}$ red and every vertex of $V_{2}$ blue yields a valid coloring, so $G$ is 2-colorable. QED -/ theorem bipartite_iff_two_colorable {V : Type*} (G : simple_graph V) [fintype V] : (G.colorable 2) ↔ (∃ (A B : Type*) (h : (A ⊕ B) = V), G ≤ cast (congr_arg _ h) (complete_bipartite_graph A B)) := FEW SHOT PROMPTS TO CODEX(END)-/
[STATEMENT] lemma (in lbvs) phi_None [intro?]: "\<lbrakk> pc < length ins; c!pc = \<bottom> \<rbrakk> \<Longrightarrow> \<phi> ! pc = wtl (take pc ins) c 0 s0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>pc < length ins; c ! pc = \<bottom>\<rbrakk> \<Longrightarrow> \<phi> ! pc = wtl (take pc ins) c 0 s\<^sub>0 [PROOF STEP] by (simp add: phi_def)
[STATEMENT] lemma prfx_Lam_iff: shows "u \<lesssim> Lam t \<longleftrightarrow> is_Lam u \<and> un_Lam u \<lesssim> t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (u \<lesssim> \<^bold>\<lambda>\<^bold>[t\<^bold>]) = (is_Lam u \<and> un_Lam u \<lesssim> t) [PROOF STEP] using ide_char Arr_not_Nil Con_implies_is_Lam_iff_is_Lam Ide_implies_Arr is_Lam_def [PROOF STATE] proof (prove) using this: ide ?t = Ide ?t Arr ?t \<Longrightarrow> ?t \<noteq> \<^bold>\<sharp> ?t \ ?u \<noteq> \<^bold>\<sharp> \<Longrightarrow> is_Lam ?t = is_Lam ?u Ide ?t \<Longrightarrow> Arr ?t is_Lam ?lambda = (\<exists>x3. ?lambda = \<^bold>\<lambda>\<^bold>[x3\<^bold>]) goal (1 subgoal): 1. (u \<lesssim> \<^bold>\<lambda>\<^bold>[t\<^bold>]) = (is_Lam u \<and> un_Lam u \<lesssim> t) [PROOF STEP] by fastforce