Datasets:
AI4M
/

text
stringlengths
0
3.34M
module Issue280 where data PreModel (C : Set) (M : C → Set) : Set → Set where model : (c : C) → PreModel C M (M c) reflect : (C : Set)(M : C → Set) → PreModel C M C → C reflect .(M c) M (model c) = c
lemma holomorphic_onD [dest?]: "\<lbrakk>f holomorphic_on s; x \<in> s\<rbrakk> \<Longrightarrow> f field_differentiable (at x within s)"
#!/usr/bin/Rscript # MIT License, John Williams, github @jaw-analytics # takes 2 args, weight in kg, and calories consumed that day # assumes user has R v.3.2.0 or greater installed and is on a unix/linux system # usage ./weight_log.r 90 2000, where 90 is weight and 2000 is kcal consumed args <- commandArgs(TRUE) day <- format(Sys.Date(), "%m/%d") # calculate BMR via Mifflin-St Jeor Equations: #Mifflin, M. D., S. T. St Jeor, L. A. Hill, B. J. Scott, S. A. Daugherty, and Y. O. Koh. 1990. “A New Predictive Equation for Resting Energy Expenditure in Healthy Individuals.” The American Journal of Clinical Nutrition 51 (2): 241–47. # m = mass in kg # h = height in cm # a = age in years # g = gender, (-161 if female, +5 if male) m <- as.numeric(args[1]) # change h to be your height in cm h <- 171 # change a to your age in years a <- 31 # change g to 5 if male, -161 if female g <- 5 BMR <- (10.0*m + 6.25*h - 5.0*a + g) BMI <- m/(1.71^2) # log estimate of calories consumed kcal <- as.numeric(args[2]) # log caloric balance for day balance <- kcal - BMR # write weight to log cat(sprintf("%s\t%2.1f\t%2.2f\t%4.f\t%4.f\t%4.f\n", day, m, BMI, BMR, kcal, balance), file="log.txt", append=TRUE, sep="\t") # read current log log <- read.table(file="log.txt", header=TRUE, sep="\t") # # # # # CREATE TIME SERIES library(ggplot2) library(forecast) ts_weight <- ts(log$Weight_kg) ts_bmi <- ts(log$BMI) ts_balance <- ts(log$Kcal_Deficit) ts_weight <- HoltWinters(ts_weight, beta=F,gamma=F) ts_bmi <- HoltWinters(ts_bmi, beta=F, gamma=F) ts_balance <- HoltWinters(ts_balance, beta=F, gamma=F) # creates forecast. Days are set to (h=5), that can be changed for a longer forecast. If it is changed, correct the 'main' title in the plots below to reflect this. weight_forecast <- forecast.HoltWinters(ts_weight, h=5) bmi_forecast <- forecast.HoltWinters(ts_bmi, h=5) balance_forecast <- forecast.HoltWinters(ts_balance, h=5) # plot forecasts as weight_plots.svg svg("weight_plots.svg") par(mfrow=c(3,1)) plot.forecast(weight_forecast, main="5 Day Weight Forecast", xlab="Day", ylab="Weight (kg)") plot.forecast(bmi_forecast, main="5 Day BMI Forecast", xlab="Day", ylab="BMI") plot.forecast(balance_forecast, main="5 Day Caloric Deficit Forecast", xlab="Day", ylab="Energy Deficit (kcal)") dev.off()
import LMT variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)] example {a1 a2 a3 : A I E} : ((((a2).write i1 (v1)).write i2 (v1)).read i1) ≠ (v1) → False := by arr
[STATEMENT] lemma collect_pair_finite[intro]: notes finite_subset[intro] assumes "finite {x. P x}" "finite {x. Q x}" shows "finite {(x, y) . P x \<and> Q y \<and> R x y}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: finite {x. P x} finite {x. Q x} goal (1 subgoal): 1. finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. \<lbrakk>finite {x. P x}; finite {x. Q x}\<rbrakk> \<Longrightarrow> finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] from assms [PROOF STATE] proof (chain) picking this: finite {x. P x} finite {x. Q x} [PROOF STEP] have "finite {(x, y) . P x \<and> Q y}" [PROOF STATE] proof (prove) using this: finite {x. P x} finite {x. Q x} goal (1 subgoal): 1. finite {(x, y). P x \<and> Q y} [PROOF STEP] by auto [PROOF STATE] proof (state) this: finite {(x, y). P x \<and> Q y} goal (1 subgoal): 1. \<lbrakk>finite {x. P x}; finite {x. Q x}\<rbrakk> \<Longrightarrow> finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] moreover [PROOF STATE] proof (state) this: finite {(x, y). P x \<and> Q y} goal (1 subgoal): 1. \<lbrakk>finite {x. P x}; finite {x. Q x}\<rbrakk> \<Longrightarrow> finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] have "{(x, y) . P x \<and> (Q y \<and> R x y)} \<subseteq> {(x, y) . P x \<and> Q y}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. {(x, y). P x \<and> Q y \<and> R x y} \<subseteq> {(x, y). P x \<and> Q y} [PROOF STEP] by auto [PROOF STATE] proof (state) this: {(x, y). P x \<and> Q y \<and> R x y} \<subseteq> {(x, y). P x \<and> Q y} goal (1 subgoal): 1. \<lbrakk>finite {x. P x}; finite {x. Q x}\<rbrakk> \<Longrightarrow> finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: finite {(x, y). P x \<and> Q y} {(x, y). P x \<and> Q y \<and> R x y} \<subseteq> {(x, y). P x \<and> Q y} [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: finite {(x, y). P x \<and> Q y} {(x, y). P x \<and> Q y \<and> R x y} \<subseteq> {(x, y). P x \<and> Q y} goal (1 subgoal): 1. finite {(x, y). P x \<and> Q y \<and> R x y} [PROOF STEP] by blast [PROOF STATE] proof (state) this: finite {(x, y). P x \<and> Q y \<and> R x y} goal: No subgoals! [PROOF STEP] qed
State Before: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S ⊢ IsPrime (prod I J) → I = ⊤ ∨ J = ⊤ State After: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S ⊢ I ≠ ⊤ ∧ J ≠ ⊤ → ¬IsPrime (prod I J) Tactic: contrapose! State Before: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S ⊢ I ≠ ⊤ ∧ J ≠ ⊤ → ¬IsPrime (prod I J) State After: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S ⊢ ¬1 ∈ I ∧ ¬1 ∈ J → ¬1 ∈ prod I J → ∃ x x_1 h, ¬x ∈ prod I J ∧ ¬x_1 ∈ prod I J Tactic: simp only [ne_top_iff_one, isPrime_iff, not_and, not_forall, not_or] State Before: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S ⊢ ¬1 ∈ I ∧ ¬1 ∈ J → ¬1 ∈ prod I J → ∃ x x_1 h, ¬x ∈ prod I J ∧ ¬x_1 ∈ prod I J State After: no goals Tactic: exact fun ⟨hI, hJ⟩ _ => ⟨⟨0, 1⟩, ⟨1, 0⟩, by simp, by simp [hJ], by simp [hI]⟩ State Before: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S x✝¹ : ¬1 ∈ I ∧ ¬1 ∈ J x✝ : ¬1 ∈ prod I J hI : ¬1 ∈ I hJ : ¬1 ∈ J ⊢ (0, 1) * (1, 0) ∈ prod I J State After: no goals Tactic: simp State Before: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S x✝¹ : ¬1 ∈ I ∧ ¬1 ∈ J x✝ : ¬1 ∈ prod I J hI : ¬1 ∈ I hJ : ¬1 ∈ J ⊢ ¬(0, 1) ∈ prod I J State After: no goals Tactic: simp [hJ] State Before: R : Type u S : Type v inst✝¹ : Ring R inst✝ : Ring S I✝ I' : Ideal R J✝ J' : Ideal S I : Ideal R J : Ideal S x✝¹ : ¬1 ∈ I ∧ ¬1 ∈ J x✝ : ¬1 ∈ prod I J hI : ¬1 ∈ I hJ : ¬1 ∈ J ⊢ ¬(1, 0) ∈ prod I J State After: no goals Tactic: simp [hI]
PROGRAM WRFILE CHARACTER FNAME*80 WRITE(*,*) 'Enter Filename' READ(*,*) FNAME C Use of NAME in OPEN is VMS Fortran extension OPEN (UNIT=8,NAME=FNAME,ERR=999) WRITE(8,100) FNAME 100 FORMAT(1x,'Sample Output to File ',A80) STOP 999 WRITE(*,*) 'Error opening file' STOP 2 END
[STATEMENT] lemma lmember_Lazy_llist [code]: "lmember x (Lazy_llist xs) = (case xs () of None \<Rightarrow> False | Some (y, ys) \<Rightarrow> x = y \<or> lmember x ys)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. lmember x (Lazy_llist xs) = (case xs () of None \<Rightarrow> False | Some (y, ys) \<Rightarrow> x = y \<or> lmember x ys) [PROOF STEP] by(simp add: lmember_def)
/- Copyright (c) 2021 OpenAI. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kunhao Zheng, Stanislas Polu, David Renshaw, OpenAI GPT-f -/ import mathzoo.imports.miniF2F open_locale nat rat real big_operators topological_space theorem mathd_algebra_245 (x : ℝ) (h₀ : x ≠ 0) : (4 / x)⁻¹ * ((3 * x^3) / x)^2 * ((1 / (2 * x))⁻¹)^3 = 18 * x^8 := begin field_simp [h₀]; ring, end
-- A Formal Proof of the Lovasz Local Lemma and Symmetric Lovasz Local Lemma -- This import covers everything we need; finsets, measure theory, ennreals, and probability theory. import probability.independence /- Since we are constantly dealing with finite sets, measures, and big products/intersections, it will make the proof much more readable if we open these libraries/locales. -/ open finset measure_theory open_locale big_operators /- If the events Eᵢ are all independent and occur with probability less than 1, then it's obviously true that one can avoid them all with nonzero probability, simply due to the fact that a product of positive quantities is positive. The Lovasz Local Lemma says the same holds if the events are "almost independent", a notion captured by some "pseudo-probabilities" X and a dependency digraph Γ. I'm following the proof from these notes (and, for readability, using it's notation as well): https://theory.stanford.edu/~jvondrak/MATH233A-2018/Math233-lec02.pdf To be more precise, here is the full theorem statement in English: Suppose we have a probability space Ω with probability measure ℙ, as well as events E₁,…,Eₙ. Let G be a dependency digraph for these events, and let Γ(i) be the neighborhood of Eᵢ in G. In other words, Γ(i) lists all other event indices j such that j ≠ i and Eᵢ depends on Eⱼ. Also, assume that we have real numbers X₁,…,Xₙ in the open interval (0, 1) such that, for each i, we have that ℙ(Eᵢ) ≤ Xᵢ * (∏ j, 1 - Xⱼ), where the product is taken over all j ∈ Γ(i). Given all of this, theorem says that we can avoid all the events; the probability of the intersection of their complements is nonzero. In particular, it is bounded from below by (∏ j, 1 - Xⱼ), where the product is taken over all j ∈ {1,…,n}. -/ theorem lovasz_local_lemma {Ω : Type*} [measurable_space Ω] {ℙ : measure Ω} [is_probability_measure ℙ] {n : ℕ} {E : fin n → set Ω} {h_events : ∀ i, measurable_set (E i)} {Γ : fin n → finset (fin n)} (h_no_self_loops : ∀ i, i ∉ Γ i) (h_dependency_digraph : ∀ i, ∀ J ⊆ ({i} ∪ (Γ i))ᶜ, probability_theory.indep_sets {E i} {⋂ j ∈ J, (E j)ᶜ} ℙ) {X : fin n → ennreal} (h_pseudo_probability : ∀ i, 0 < X i ∧ X i < 1) (h_independence_bound : ∀ i, ℙ (E i) ≤ X i * ∏ j in Γ i, (1 - X j)) : ℙ (⋂ i, (E i)ᶜ) ≠ 0 ∧ ∏ i, (1 - X i) ≤ ℙ (⋂ i, (E i)ᶜ) := begin /- To make life easier, we make a few local definitions: - Firstly, we extend the dependency digraph Γ to include self-loops; after all, nontrivial events are dependent on themselves. Call this new digraph Γ'. - Secondly, we define shorthand for the intersection of the complements some subset of our events, since we'll be using it a lot. - Thirdly, we define shorthand for the probability of the above intersection. -/ let Γ' : (fin n → finset (fin n)) := λ i, insert i (Γ i), let inter_over : (finset (fin n) → set Ω) := λ S, ⋂ i ∈ S, (E i)ᶜ, let P : (finset (fin n) → ennreal) := λ S, ℙ (inter_over S), /- We'll also prove a few helpful lemmas about these definitions and the definitions in the theorem statement. - 1. The probability of the empty intersection is 1. - 2. The intersection of more sets is smaller than the intersection of fewer sets. - 3. We have 0 < 1 - X i < 1 for all i. - 4. P finset.univ is the probability of the intersection of the complements. - 5. Given S, a ∈ S, and any set T, (S \ insert a T) is of course a subset of (S.erase a). We use this a lot. - 6. Given S, a ∈ S, and any set T, (S \ insert a T) is of course a strict subset of S. We also use this a lot. All of these lemmas have simple proofs, so I didn't think any annotations were necessary. -/ have P_empty_eq_one : P ∅ = 1 := begin have inter_over_empty_eq_univ : inter_over ∅ = set.univ := begin rw set.Inter_eq_univ, intro i, ext x, split, { intro _, exact set.mem_univ x, }, { intro _, rw set.mem_Inter, intro i_in_empty, exfalso, exact set.not_mem_empty i i_in_empty, }, end, simp only [P], rw inter_over_empty_eq_univ, exact measure_univ, end, have inter_subset_of_supset : ∀ M N : finset (fin n), N ⊆ M → inter_over M ⊆ inter_over N := begin intros M N N_subset_M, intros x hx, rw set.mem_Inter, intro i, rw set.mem_Inter, intro hi, rw set.mem_Inter at hx, specialize hx i, rw set.mem_Inter at hx, exact hx (mem_of_subset N_subset_M hi), end, have one_minus_pprob_is_pprob : ∀ i, 0 < 1 - X i ∧ 1 - X i < 1 := begin intro i, split, { rw tsub_pos_iff_lt, exact (h_pseudo_probability i).2, }, exact ennreal.sub_lt_self ennreal.one_ne_top one_ne_zero (ne_of_gt (h_pseudo_probability i).1), end, have P_univ_eq_prob_inter : P univ = ℙ (⋂ i, (E i)ᶜ) := begin simp only [P, inter_over], simp only [mem_univ, set.Inter_true], end, have sdiff_subset : ∀ S : finset (fin n), ∀ a ∈ S, ∀ T : finset (fin n), S \ insert a T ⊆ S.erase a := begin intros S a a_in_S T, rw ← sdiff_singleton_eq_erase, exact sdiff_subset_sdiff (subset_refl S) (singleton_subset_iff.2 (mem_insert_self a T)), end, have sdiff_ssubset : ∀ S : finset (fin n), ∀ a ∈ S, ∀ T : finset (fin n), S \ insert a T ⊂ S := begin intros S a a_in_S T, apply finset.ssubset_of_subset_of_ssubset, exact sdiff_subset _ _ a_in_S _, exact erase_ssubset a_in_S, end, /- The bulk of the work is done by the following lemma: For all S ⊆ {1,...,n}, we have that the probability of avoiding all events Eₐ for a ∈ S is nonzero, as well as the fact that the probability of avoiding all events Eₐ for a ∈ S is more than (1 - Xₐ) times the probability of avoiding all events Eᵢ for i ∈ S \ {a}. -/ have main_lemma : ∀ S : finset (fin n), P S ≠ 0 ∧ ∀ a ∈ S, P (S.erase a) * (1 - X a) ≤ P S := begin -- We go by strong induction on S; the predicate we're trying to prove is of course: let predicate : (finset (fin n) → Prop) := λ S, P S ≠ 0 ∧ ∀ a ∈ S, P (S.erase a) * (1 - X a) ≤ P S, -- As is typical in strong induction, the induction step absorbs the base case. have induction_step : ∀ S : finset (fin n), (∀ T ⊂ S, predicate T) → predicate S := begin -- Stop using predicate notation. simp only [predicate], clear predicate, -- Let S be arbitrary and assume the claim holds for all strictly smaller sets. intros S induction_hypothesis, /- We now prove the main part of our goal as a lemma, for convenience (it implies the other part). This section contains nearly all of the hard work for proving the Lovasz Local Lemma. -/ have main_inequality : ∀ a ∈ S, P (S.erase a) * (1 - X a) ≤ P S := begin -- Fortunately we can fix our element a right away. intros a a_in_S, -- First, we use independence and probability basics to get a lower bound on P S. have lower_bound : P (S.erase a) - ℙ (E a) * P (S \ Γ' a) ≤ P S := begin -- We pull (E a)ᶜ out of the intersection over S. have inter_over_S_split : inter_over S = (E a)ᶜ ∩ inter_over (S.erase a) := begin simp only [inter_over], rw [← insert_erase a_in_S, set_bInter_insert a (S.erase a), insert_erase a_in_S], end, -- Using the above, we pull (E a)ᶜ out and use complementary measure. have P_S_split : P S = P (S.erase a) - ℙ ((E a) ∩ inter_over (S.erase a)) := begin simp only [P], rw inter_over_S_split, symmetry, apply ennreal.sub_eq_of_eq_add, { exact ne_of_lt (measure_lt_top ℙ _), }, symmetry, rw [← set.diff_eq_compl_inter, set.inter_comm], apply measure_diff_add_inter, exact h_events a, end, -- We have a simple inequality arising from monotonicity of measure. have inequality : P (S.erase a) - ℙ ((E a) ∩ inter_over (S \ Γ' a)) ≤ P (S.erase a) - ℙ ((E a) ∩ inter_over (S.erase a)) := begin -- This immediately follows from lemma (2). have subset : (E a) ∩ inter_over (S.erase a) ⊆ (E a) ∩ inter_over (S \ Γ' a) := begin intros x hx, split, { exact hx.1, }, have subset := sdiff_subset S a a_in_S (Γ a), exact set.mem_of_subset_of_mem (inter_subset_of_supset (S.erase a) (S \ Γ' a) subset) hx.2, end, exact tsub_le_tsub_left (measure_theory.outer_measure.mono' ℙ.to_outer_measure subset) _, end, -- Finally, we use independence to separate (E a) from its independent sets as given by S \ Γ' a. have prob_inter_eq_prod : ℙ ((E a) ∩ inter_over (S \ Γ' a)) = ℙ (E a) * P (S \ Γ' a) := begin specialize h_dependency_digraph a (S \ Γ' a), have subset : S \ Γ' a ⊆ (Γ' a)ᶜ := begin rw sdiff_eq_inter_compl, intros x hx, rw mem_inter at hx, exact hx.2, end, exact probability_theory.indep_sets_singleton_iff.1 (h_dependency_digraph subset), end, -- We combine the above to complete the proof. rw ← P_S_split at inequality, rwa ← prob_inter_eq_prod, end, /- Now, we write P (S \ Γ' a) / P (S.erase a) as a telescoping product, and apply induction_hypothesis to each of the terms. In practice, we'll do this one term at a time using another induction. -/ have product_bound : P (S \ Γ' a) ≤ P (S.erase a) * (∏ i in (S ∩ (Γ a)), (1 - X i)⁻¹) := begin -- We go by induction on T; the predicate we're trying to prove is: let predicate : (finset (fin n) → Prop) := λ T, P (S \ Γ' a) ≤ P (S.erase a) * (∏ i in T, (1 - X i)⁻¹) * P (S \ Γ' a) * (P (S \ (insert a T)))⁻¹, have induction_lemma : predicate (S ∩ Γ a) := begin -- The base case is more or less immediate. have base_case : predicate ∅ := begin -- Stop using predicate notation. simp only [predicate], clear predicate, rw [ insert_eq, prod_empty, mul_one, union_empty, sdiff_singleton_eq_erase a S, mul_comm, ← mul_assoc, ennreal.inv_mul_cancel ], { rw one_mul, exact le_refl _, }, { exact (induction_hypothesis (S.erase a) (erase_ssubset a_in_S)).1, }, exact ne_of_lt (measure_lt_top ℙ _), end, -- The induction step makes use of induction_hypothesis to show the upper bound for one more term. have induction_step : ∀ b, ∀ T, b ∈ S ∩ Γ a → T ⊆ S ∩ Γ a → b ∉ T → predicate T → predicate (insert b T) := begin -- Stop using predicate notation. simp only [predicate], clear base_case, clear predicate, -- Let T and b be arbitrary such that b ∉ T, and assume the claim holds for S. intros b T b_in_S_cap_Gam_a T_subset b_notin_T ih_lem, -- First, we pull b out of the product. rw [prod_insert b_notin_T, mul_comm (1 - X b)⁻¹ _, mul_assoc], /- Next, (1 - X b)⁻¹ is lower bounded by the desired ratio; this follows from the inductive_hypothesis. Although this ends up being rather difficult in Lean, it's not anything mathematically interesting; it's an immediate application of the inductive hypothesis to (S \ insert a T) and basic inequality manipulation. So, I didn't feel it necessary to provide annotations here. -/ have ih_lower_bound : P (S \ insert a (insert b T)) * (P (S \ insert a T))⁻¹ ≤ (1 - X b)⁻¹ := begin specialize induction_hypothesis (S \ insert a T) (sdiff_ssubset S a a_in_S T), have induction_bound := induction_hypothesis.2 b, rw [ ennreal.le_inv_iff_mul_le, mul_assoc, mul_comm _ (1 - X b), ← mul_assoc, ← ennreal.le_inv_iff_mul_le, inv_inv ], repeat { rw insert_eq }, rw ← sdiff_insert at induction_bound, repeat { rw insert_eq at induction_bound }, rw [union_comm, union_assoc, union_comm T _], have b_in_set : b ∈ S \ ({a} ∪ T) := begin rw mem_sdiff, split, { rw mem_inter at b_in_S_cap_Gam_a, exact b_in_S_cap_Gam_a.1, }, rw not_mem_union, split, { rw not_mem_singleton, by_contradiction b_eq_a, rw b_eq_a at b_in_S_cap_Gam_a, exact (not_mem_mono (inter_subset_right S (Γ a)) (h_no_self_loops a)) b_in_S_cap_Gam_a, }, exact b_notin_T, end, exact induction_bound b_in_set, end, -- To make what follows easier, we move the term (1 - X b)⁻¹ all the way to the right. rw [mul_assoc, mul_assoc _ (1 - X b)⁻¹ _, mul_comm (1 - X b)⁻¹ _], repeat { rw ← mul_assoc }, -- We can use transitivity with the lemma's induction hypothesis to reduce the inequality. transitivity', exact ih_lem, -- We split a ratio (i.e. use a/c = a/b * b/c) to prepare the inequality for applying ih_lower_bound. rw ← mul_one (P (S \ Γ' a)), have ne_zero : P (S \ insert a (insert b T)) ≠ 0 := (induction_hypothesis (S \ insert a (insert b T)) (sdiff_ssubset S a a_in_S (insert b T))).1, have ne_top : P (S \ insert a (insert b T)) ≠ ⊤ := ne_of_lt (measure_lt_top ℙ _), nth_rewrite_lhs 0 [← ennreal.inv_mul_cancel ne_zero ne_top], rw mul_one, repeat { rw ← mul_assoc }, rw mul_assoc _ _ (P (S \ insert a T))⁻¹, -- Finally, we apply ih_lower_bound, which completes the induction step. exact ennreal.mul_le_mul (le_refl _) ih_lower_bound, end, -- Invoking the induction theorem for finite sets completes the proof. exact finset.induction_on' (S ∩ Γ a) base_case induction_step, end, -- Stop using predicate notation. simp only [predicate] at induction_lemma, clear predicate, -- The desired bound follows easily from the induction_lemma; we just need to cancel the division. have same_set : S \ (insert a (S ∩ Γ a)) = S \ Γ' a := begin simp only [Γ'], repeat { rw insert_eq }, rw [ union_distrib_left, sdiff_inter_distrib_right, sdiff_eq_empty_iff_subset.2 (subset_union_right _ _), empty_union _ ], end, rwa [same_set, mul_assoc, ennreal.mul_inv_cancel, mul_one] at induction_lemma, { have ssubset := finset.ssubset_of_subset_of_ssubset (sdiff_subset S a a_in_S (Γ a)) (erase_ssubset a_in_S), exact (induction_hypothesis (S \ Γ' a) ssubset).1, }, exact ne_of_lt (measure_lt_top ℙ _), end, -- The last big task is getting rid of the two products; we'll first need to combine them into one. have prod_cancel : (∏ i in Γ a, (1 - X i)) * (∏ i in S ∩ Γ a, (1 - X i)⁻¹) = ∏ i in Γ a \ S, (1 - X i) := begin have prod_split : (∏ i in Γ a, (1 - X i)) = (∏ i in Γ a \ S, (1 - X i)) * (∏ i in S ∩ Γ a, (1 - X i)) := begin rw [inter_comm, mul_comm], have piecewise_same := set.piecewise_same (↑S) (λ i, (1 - X i)), nth_rewrite 0 [← piecewise_same], rw [piecewise_coe, prod_piecewise], end, have cancel : ∀ i ∈ S ∩ Γ a, (1 - X i) * (1 - X i)⁻¹ = 1 := begin intros i _, rw ennreal.mul_inv_cancel, exact ne_of_gt (one_minus_pprob_is_pprob i).1, exact ne_of_lt (lt_trans (one_minus_pprob_is_pprob i).2 ennreal.one_lt_top), end, rw [prod_split, mul_assoc, ← prod_mul_distrib, prod_eq_one cancel, mul_one], end, -- And now we can of course upper bound this resulting product by 1. have prod_le_one : (∏ i in Γ a \ S, (1 - X i)) ≤ 1 := begin have le_one : ∀ i ∈ Γ a \ S, 1 - X i ≤ 1 := begin intros i _, exact le_of_lt (one_minus_pprob_is_pprob i).2, end, exact prod_le_one' le_one, end, -- Using the above work, we combine the independence and product bounds and then simplify the big products. have two_products := ennreal.mul_le_mul (h_independence_bound a) product_bound, rw mul_comm (P (S.erase a)) _ at two_products, repeat { rw mul_assoc at two_products }, rw [← mul_assoc _ _ (P (S.erase a)), prod_cancel] at two_products, -- From here, we can upper bound the product by 1 and conclude that P (S.erase a) * (1 - X a) ≤ P S. rw [mul_comm _ (P (S.erase a)), ← mul_assoc _ (P (S.erase a)) _, mul_comm _ (P (S.erase a))] at two_products, have no_products := ennreal.mul_le_mul (refl (P (S.erase a) * X a)) prod_le_one, rw mul_one at no_products, have final_inequality := le_trans two_products no_products, -- Combining the above with the lower bound from earlier, we complete the proof of the main inequality! rw ennreal.mul_sub, swap, { intros _ _, exact ne_of_lt (measure_lt_top ℙ _), }, rw mul_one, exact le_trans (tsub_le_tsub_left final_inequality (P (S.erase a))) lower_bound, end, -- Using the main part of our goal, we can now quickly prove the easier part of our goal (that P S ≠ 0). split, { by_cases S_nonempty : S = ∅, { rw [S_nonempty, P_empty_eq_one], exact one_ne_zero, }, cases nonempty_iff_ne_empty.2 S_nonempty with a a_in_S, apply ne_of_gt, specialize main_inequality a a_in_S, specialize induction_hypothesis (S.erase a) (erase_ssubset a_in_S), apply lt_of_le_of_lt', { exact main_inequality, }, exact ennreal.mul_pos induction_hypothesis.1 (ne_of_gt (one_minus_pprob_is_pprob a).1), }, -- Finally, we complete the proof; we've already finished proving the second part of the goal. exact main_inequality, end, -- Invoking the strong induction theorem for finite sets completes the proof. intro S, exact finset.strong_induction_on S induction_step, end, -- Now that we've proven the lemma, we can immediately conclude the first part the theorem. split, { have events_avoidable := (main_lemma univ).1, rwa P_univ_eq_prob_inter at events_avoidable, }, /- We'll now use induction to create a stronger version of our lemma. NOTE: Unfortunately, as tempting as it was to use finset.prod_range_induction, I couldn't find a good way to deal with the fact that E : fin n → set Ω rather than E : ℕ → set Ω. In particular, that theorem has a hypothesis "∀ (k : ℕ), s (k + 1) = s k * f k", and I couldn't think of a good way to expand E to domain ℕ without breaking this hypothesis condition for k = n. -/ have stronger_lemma : ∀ S : finset (fin n), P Sᶜ * ∏ i in S, (1 - X i) ≤ P univ := begin -- We go by induction on S; the predicate we're trying to prove is of course: let predicate : (finset (fin n) → Prop) := λ S, P Sᶜ * ∏ i in S, (1 - X i) ≤ P univ, have base_case : predicate ∅ := begin -- Stop using predicate notation. simp only [predicate], clear predicate, rw [finset.prod_empty, mul_one, compl_empty], exact le_refl _, end, have induction_step : ∀ a : fin n, ∀ S : finset (fin n), a ∉ S → predicate S → predicate (insert a S) := begin -- Stop using predicate notation. simp only [predicate], clear base_case, clear predicate, -- Let S and a be arbitrary such that a ∉ S, and assume the claim holds for S. intros a S a_notin_S induction_hypothesis, -- We pull an element out of Sᶜ, which is the same as adding an element to S, and apply the main lemma to it. specialize main_lemma Sᶜ, have main_lemma_ineq := main_lemma.2, clear main_lemma, specialize main_lemma_ineq a (mem_compl.2 a_notin_S), rw ← compl_insert at main_lemma_ineq, -- Now we turn the product over (insert a S) into the product over S times the term at a rw [prod_insert a_notin_S, ← mul_assoc], -- Transitivity and multiplicativity of ≤ for nonnegative reals completes the proof. exact le_trans (ennreal.mul_le_mul main_lemma_ineq (refl _)) induction_hypothesis, end, -- Invoking the induction theorem for finite sets completes the proof. exact finset.induction base_case induction_step, end, -- Finally, we can use this stronger version to conclude the proof of the theorem! specialize stronger_lemma univ, rwa [P_univ_eq_prob_inter, ← compl_empty, compl_involutive, P_empty_eq_one, one_mul] at stronger_lemma, end /- There is also a "symmetric" version of the theorem, which is typically the one used in practice since it only deals with an upper the number of other events that an event is dependent on rather than the specific events. To be precise, it says if each event is individually avoidable (probability strictly less than 1, call it p), each event depends on at most d other events, and ep(d + 1) ≤ 1 (where e is Euler's number), then the events are collectively avoidable; the probability of the intersection of their complements is nonzero. NOTE: I couldn't find anything on Euler's number in Lean, so I decided to just use the slightly tighter bound p ≤ (1 - 1/(d + 1))^d / (d + 1). Indeed ep(d + 1) ≤ 1, we have p(d + 1) ≤ e^(-1) ≤ e^(-d/(d + 1)). By a classical inequality, this is at most (1 - 1/(d + 1))^d, so our assumption is indeed stronger. -/ theorem symmetric_lovasz_local_lemma {Ω : Type*} [measurable_space Ω] {ℙ : measure Ω} [is_probability_measure ℙ] {n : ℕ} {E : fin n → set Ω} {h_events : ∀ i, measurable_set (E i)} {Γ : fin n → finset (fin n)} (h_no_self_loops : ∀ i, i ∉ Γ i) (h_dependency_digraph : ∀ i, ∀ J ⊆ ({i} ∪ (Γ i))ᶜ, probability_theory.indep_sets {E i} {⋂ j ∈ J, (E j)ᶜ} ℙ) (p : ennreal) (h_probability : 0 < p ∧ p < 1) (h_event_probability_bound : ∀ i, ℙ (E i) ≤ p) (d : ℕ) (h_d_pos : 1 ≤ d) (h_maximum_dependence : ∀ i, (Γ i).card ≤ d) (h_p_bound : p ≤ (d + 1)⁻¹ * (1 - (d + 1)⁻¹)^d) : ℙ (⋂ i, (E i)ᶜ) ≠ 0 := begin -- We take our pseudo-probabiliies to be Xᵢ = 1 / (d + 1) let X : fin n → ennreal := λ _, (d + 1)⁻¹, -- First, we need to show that X actually gives pseudo-probabilities. have h_pseudo_probability : ∀ i, 0 < X i ∧ X i < 1 := begin intro i, simp only [X], split, { simp, }, rw ennreal.inv_lt_one, apply lt_of_lt_of_le, exact ennreal.one_lt_two, rw ← one_add_one_eq_two, apply' add_le_add, { norm_cast, exact h_d_pos, }, exact le_refl _, end, -- The main work here is showing that X actually gives an independence bound; it uses a classical inequality. have h_independence_bound : ∀ i, ℙ (E i) ≤ X i * ∏ j in Γ i, (1 - X j) := begin intro i, transitivity', exact h_event_probability_bound i, -- We upper bound p using the h_p_bound, transitivity', exact h_p_bound, -- We now simplify the right-hand side using the definition of X. simp only [X], apply' ennreal.mul_le_mul, exact le_refl _, -- Finally, we're just left with a constant product over Γ i. rw prod_const, have le_one : 1 - (ennreal.has_coe.coe d + 1)⁻¹ ≤ 1 := by simp, exact ennreal.pow_le_pow_of_le_one le_one (h_maximum_dependence i), end, -- From here, it's just a direct aplication of the (asymmetric) Lovasz Local Lemma. have result := lovasz_local_lemma h_no_self_loops h_dependency_digraph h_pseudo_probability h_independence_bound, swap, exact h_events, exact result.1, end
(* Title: HOL/Proofs/ex/Proof_Terms.thy Author: Makarius Basic examples involving proof terms. *) theory Proof_Terms imports Main begin text \<open> Detailed proof information of a theorem may be retrieved as follows: \<close> lemma ex: "A \<and> B \<longrightarrow> B \<and> A" proof assume "A \<and> B" then obtain B and A .. then show "B \<and> A" .. qed ML_val \<open> (*proof body with digest*) val body = Proofterm.strip_thm (Thm.proof_body_of @{thm ex}); (*proof term only*) val prf = Proofterm.proof_of body; Pretty.writeln (Proof_Syntax.pretty_proof @{context} prf); (*all theorems used in the graph of nested proofs*) val all_thms = Proofterm.fold_body_thms (fn (name, _, _) => insert (op =) name) [body] []; \<close> text \<open> The result refers to various basic facts of Isabelle/HOL: @{thm [source] HOL.impI}, @{thm [source] HOL.conjE}, @{thm [source] HOL.conjI} etc. The combinator @{ML Proofterm.fold_body_thms} recursively explores the graph of the proofs of all theorems being used here. \<^medskip> Alternatively, we may produce a proof term manually, and turn it into a theorem as follows: \<close> ML_val \<open> val thy = @{theory}; val ctxt = @{context}; val prf = Proof_Syntax.read_proof thy true false "impI \<cdot> _ \<cdot> _ \<bullet> \ \ (\<^bold>\<lambda>H: _. \ \ conjE \<cdot> _ \<cdot> _ \<cdot> _ \<bullet> H \<bullet> \ \ (\<^bold>\<lambda>(H: _) Ha: _. conjI \<cdot> _ \<cdot> _ \<bullet> Ha \<bullet> H))"; val thm = prf |> Reconstruct.reconstruct_proof ctxt @{prop "A \<and> B \<longrightarrow> B \<and> A"} |> Proof_Checker.thm_of_proof thy |> Drule.export_without_context; \<close> end
[STATEMENT] lemma stc_divide [simp]: "\<lbrakk>x \<in> HFinite; y \<in> HFinite; stc y \<noteq> 0\<rbrakk> \<Longrightarrow> stc(x/y) = (stc x) / (stc y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>x \<in> HFinite; y \<in> HFinite; stc y \<noteq> 0\<rbrakk> \<Longrightarrow> stc (x / y) = stc x / stc y [PROOF STEP] by (simp add: divide_inverse stc_mult stc_not_Infinitesimal HFinite_inverse stc_inverse)
Formal statement is: lemmas mult = big_mult small_big_mult big_small_mult small_mult Informal statement is: The mult lemma is a combination of the big_mult, small_big_mult, big_small_mult, and small_mult lemmas.
! Matt's bug in ac_implied_do. It turned out that the rule just ! didn't have an action. - Bryan R. integer :: i integer :: a(10) a = (/ (i,i=1,10) /) end
using luteos using SymPy # let # limit scope Ps = [P1(), P2(), P3()] # Range of polynomial order Ns = [9, 17, 33] # Range of grid size dim = 2 # Material mat = Material( ν=0.33, E=1.0, dim = dim ) ## Set up problem # Get functions for exact solution function ExactSol( Cstiff ) dim = 2 @syms x1 x2 u1 = (x2 - x2.^2) .* cos.(pi*x1) .* (1/2 - x1) .* (1 - x2/2) #10 * (x2 - x2.^2) .* sin.(pi*x1) .* (1 - x1) .* (1 - x2/2) u2 = 2 * (x1 - x1.^2) .* sin.(pi*x2) .* (1 - x2) .* (1 - x1/2) # 2 * (x1 - x1.^2) .* sin.(pi*x2) .* (1 - x2) .* (1 - x1/2) ∂u1∂x1 = diff( u1, x1 ) ∂u1∂x2 = diff( u1, x2 ) ∂u2∂x1 = diff( u2, x1 ) ∂u2∂x2 = diff( u2, x2 ) ∇u = [ ∂u1∂x1 ∂u2∂x1; ∂u1∂x2 ∂u2∂x2 ] ϵ = 1/2 * ( ∇u + ∇u.' ) σ = fill( 0*x1, dim, dim ) for ii in 1:dim, jj in 1:dim, kk in 1:dim, ll in 1:dim σ[ii,jj] += (Cstiff[ii,jj,kk,ll] * ϵ[kk,ll])[1,1,1,1] end F = fill( 0*x1, dim ) ∇σ = fill( 0*x1, dim, dim, dim ) for ii in 1:dim, jj in 1:dim ∇σ[ii,jj,1] = diff( σ[ii,jj], x1 ) ∇σ[ii,jj,2] = diff( σ[ii,jj], x2 ) end for ii in 1:dim, jj in 1:dim F[ii] -= ∇σ[ii,jj,jj] end u1func_org = lambdify( u1 ) u2func_org = lambdify( u2 ) F1func_org = lambdify( F[1] ) F2func_org = lambdify( F[2] ) σ1func_org = lambdify( σ[1] ) σ2func_org = lambdify( σ[2] ) σ4func_org = lambdify( σ[4] ) ϵ1func_org = lambdify( ϵ[1] ) ϵ2func_org = lambdify( ϵ[2] ) ϵ4func_org = lambdify( ϵ[4] ) # make sure you can call it with matrices u1func = x -> u1func_org.(x[:,1], x[:,2]) u2func = x -> u2func_org.(x[:,1], x[:,2]) F1func = x -> F1func_org.(x[:,1], x[:,2]) F2func = x -> F2func_org.(x[:,1], x[:,2]) σ1func = x -> σ1func_org.(x[:,1], x[:,2]) σ2func = x -> σ2func_org.(x[:,1], x[:,2]) σ4func = x -> σ4func_org.(x[:,1], x[:,2]) ϵ1func = x -> ϵ1func_org.(x[:,1], x[:,2]) ϵ2func = x -> ϵ2func_org.(x[:,1], x[:,2]) ϵ4func = x -> ϵ4func_org.(x[:,1], x[:,2]) return( u1func, u2func, F1func, F2func, σ1func, σ2func, σ4func, ϵ1func, ϵ2func, ϵ4func ) end ( u1func, u2func, F1func, F2func, σ1func, σ2func, σ4func, ϵ1func, ϵ2func, ϵ4func ) = ExactSol( mat.Cstiff ) # Setup boundary conditions function funcB( p::Array{Float64} ) return fill(0.0, size(p,1), 2) end funcNE = (p) -> [ σ1func(p) σ2func(p)] funcNW = (p) -> [-σ1func(p) -σ2func(p)] bctype = [1,2,1,2] # All Dirichlet # Setup source function source = (p) -> [F1func(p) F2func(p)] ## Compute error for different polynomial orders and grid sizes # Initialize arrays Err_uh1 = fill( 0.0, length(Ps), length(Ns) ) Err_uh2 = fill( 0.0, length(Ps), length(Ns) ) Err_σh1 = fill( 0.0, length(Ps), length(Ns) ) Err_σh2 = fill( 0.0, length(Ps), length(Ns) ) Err_σh4 = fill( 0.0, length(Ps), length(Ns) ) # Err_ϵh1 = fill( 0.0, length(Ps), length(Ns) ) # Err_ϵh2 = fill( 0.0, length(Ps), length(Ns) ) # Err_ϵh4 = fill( 0.0, length(Ps), length(Ns) ) # Loop over polynomial order and grid size for ii in 1:length(Ps), jj in 1:length(Ns) P = Ps[ii]; N = Ns[jj] println("P ", P.p) mesh = Mesh2D( "square", P, N = N) master = Master2D( P ) prob = Elas( @sprintf("Reg %i %i", P.p, N), mat, source, bctype, false, [funcB, funcNE, funcB, funcNW] ) (uhathTri, uh, σh) = hdgSolve( master, mesh, prob ) # Initialize arrays err_uh = fill( 0.0, dim ) err_σh = fill( 0.0, dim^2 ) # err_ϵh = fill( 0.0, dim^2 ) # preallocate jcwd = fill( 0.0, size(master.∇ϕ,2), size(master.∇ϕ,2) ) ∂ξ∂x = fill( 0.0, size(master.∇ϕ,2), dim^2 ) ∂x∂ξ = fill( 0.0, size(master.∇ϕ,2), dim^2 ) for kk in 1:size(mesh.t,1) # Compute Jacobians luteos.compJacob!( master, mesh.nodes[:,:,kk], ∂ξ∂x, jcwd, ∂x∂ξ ) # u Δuh1 = master.ϕ' * ( uh[:,1,kk] - u1func( mesh.nodes[:,:,kk] ) ) Δuh2 = master.ϕ' * ( uh[:,2,kk] - u2func( mesh.nodes[:,:,kk] ) ) err_uh[1] += Δuh1' * jcwd * Δuh1 err_uh[2] += Δuh2' * jcwd * Δuh2 # σ Δσh1 = master.ϕ' * ( σh[:,1,kk] - σ1func( mesh.nodes[:,:,kk] ) ) Δσh2 = master.ϕ' * ( σh[:,2,kk] - σ2func( mesh.nodes[:,:,kk] ) ) Δσh4 = master.ϕ' * ( σh[:,4,kk] - σ4func( mesh.nodes[:,:,kk] ) ) err_σh[1] += Δσh1' * jcwd * Δσh1 err_σh[2] += Δσh2' * jcwd * Δσh2 err_σh[4] += Δσh4' * jcwd * Δσh4 # # ϵ # Δϵh1 = master.ϕ' * ( ϵh[:,1,kk] - ϵ1func( mesh.nodes[:,:,kk] ) ) # Δϵh2 = master.ϕ' * ( ϵh[:,2,kk] - ϵ2func( mesh.nodes[:,:,kk] ) ) # Δϵh4 = master.ϕ' * ( ϵh[:,4,kk] - ϵ4func( mesh.nodes[:,:,kk] ) ) # err_ϵh[1] += Δϵh1' * jcwd * Δϵh1 # err_ϵh[2] += Δϵh2' * jcwd * Δϵh2 # err_ϵh[4] += Δϵh4' * jcwd * Δϵh4 end Err_uh1[ii,jj] = sqrt(err_uh[1]) Err_uh2[ii,jj] = sqrt(err_uh[2]) Err_σh1[ii,jj] = sqrt(err_σh[1]) Err_σh2[ii,jj] = sqrt(err_σh[2]) Err_σh4[ii,jj] = sqrt(err_σh[4]) # Err_ϵh1[ii,jj] = sqrt(err_ϵh[1]) # Err_ϵh2[ii,jj] = sqrt(err_ϵh[2]) # Err_ϵh4[ii,jj] = sqrt(err_ϵh[4]) end # Compute convergence rates h = 1 ./ ( Ns - 1 ) conv_uh1 = (log.( Err_uh1[:,end-2]) - log.( Err_uh1[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); conv_uh2 = (log.( Err_uh2[:,end-2]) - log.( Err_uh2[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); conv_σh1 = (log.( Err_σh1[:,end-2]) - log.( Err_σh1[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); conv_σh2 = (log.( Err_σh2[:,end-2]) - log.( Err_σh2[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); conv_σh4 = (log.( Err_σh4[:,end-2]) - log.( Err_σh4[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); # conv_ϵh1 = (log.( Err_ϵh1[:,end-2]) - log.( Err_ϵh1[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); # conv_ϵh2 = (log.( Err_ϵh2[:,end-2]) - log.( Err_ϵh2[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); # conv_ϵh4 = (log.( Err_ϵh4[:,end-2]) - log.( Err_ϵh4[:,end] ) ) / (log.( h[end-2]) - log.( h[end] )); # Output to terminal @printf("\n") @printf(" Convergence rates for 2D Dirichlet/Neumann problem\n\n") @printf(" --------------------------------------------------\n\n") @printf( "P ") for jj in 1:size(Ps,1) @printf( " %6i", Ps[jj].p ) end @printf( "\n" ) # u @printf( "u₁ ") for jj in 1:size(Ps,1) @printf( " %6.4f", conv_uh1[jj] ) end @printf( "\n" ) @printf( "u₂ ") for jj in 1:size(Ps,1) @printf( " %6.4f", conv_uh2[jj] ) end @printf( "\n" ) # σ @printf( "σ₁ ") for jj in 1:length(Ps) @printf( " %6.4f", conv_σh1[jj] ) end @printf( "\n" ) @printf( "σ₂ ") for jj in 1:length(Ps) @printf( " %6.4f", conv_σh2[jj] ) end @printf( "\n" ) @printf( "σ₄ ") for jj in 1:length(Ps) @printf( " %6.4f", conv_σh4[jj] ) end @printf( "\n" ) # # # ϵ # @printf( "ϵ₁ ") # for jj in 1:length(Ps) # @printf( " %6.4f", conv_ϵh1[jj] ) # end # @printf( "\n" ) # @printf( "ϵ₂ ") # for jj in 1:length(Ps) # @printf( " %6.4f", conv_ϵh2[jj] ) # end # @printf( "\n" ) # @printf( "ϵ₄ ") # for jj in 1:length(Ps) # @printf( " %6.4f", conv_ϵh4[jj] ) # end # @printf( "\n" ) open("errors_Elas_Neumann2D.dat", "w") do f @printf(f, "P \t N \t E_uh1 \t E_uh2 \t E_σh1 \t E_σh2 \t E_σh4 \n")#\t E_ϵh1 \t E_ϵh2 \t E_ϵh4 \n")#\t E_J\n") for ii in 1:length(Ps), jj in 1:length(Ns) @printf(f, "%i \t %i \t %16.15e \t %16.15e \t %16.15e \t %16.15e \t %16.15e \n",#\t %16.15e \t %16.15e \t %16.15e\n", Ps[ii].p, Ns[jj], Err_uh1[ii,jj], Err_uh2[ii,jj], Err_σh1[ii,jj], Err_σh2[ii,jj], Err_σh4[ii,jj] )#, #Err_ϵh1[ii,jj], Err_ϵh2[ii,jj], Err_ϵh4[ii,jj] ) end end # end # limit scope
module DecidableMembership where open import OscarPrelude open import Membership open import Successor record DecidableMembership {ℓ} (m : Set ℓ) (M : Set ℓ) ⦃ _ : Membership m M ⦄ : Set (⊹ ℓ) where field _∈?_ : (x : m) → (X : M) → Dec $ x ∈ X field _∉?_ : (x : m) → (X : M) → Dec $ x ∉ X open DecidableMembership ⦃ … ⦄ public instance DecidableMembershipList : ∀ {ℓ} {A : Set ℓ} ⦃ _ : Eq A ⦄ → DecidableMembership A $ List A DecidableMembership._∈?_ (DecidableMembershipList {ℓ} {A}) = _∈List?_ where _∈List?_ : (a : A) → (xs : List A) → Dec (a ∈ xs) a ∈List? [] = no λ () a ∈List? (x ∷ xs) with a ≟ x … | yes a≡x rewrite a≡x = yes zero … | no a≢x with a ∈List? xs … | yes a∈xs = yes (⊹ a∈xs) … | no a∉xs = no (λ {zero → a≢x refl ; (suc a∈xs) → a∉xs a∈xs}) DecidableMembership._∉?_ (DecidableMembershipList {ℓ} {A}) x X with x ∈? X DecidableMembership._∉?_ (DecidableMembershipList {ℓ} {A}) x X | yes x∈X = no (λ x∉X → x∉X x∈X) DecidableMembership._∉?_ (DecidableMembershipList {ℓ} {A}) x X | no x∉X = yes x∉X
module Fast2haskell ( Complex_type, Array_type, Assoc_type, Descr_type, abortstr, delay, fix, force, iff, iffrev, seQ, pair, strcmp, entier, land_i, lnot_i, lor_i, lshift_i, rshift_i, descr, destr_update, indassoc, lowbound, tabulate, upbound, update, valassoc) where { import Data.Bits; -- import Word2; import Data.Word; import Data.Complex; -- 1.3 import Data.Array; -- 1.3 -- import Data.Int ( Num(fromInt) ); type Complex_type = Complex Double; type Array_type b = Array Int b; type Assoc_type a = (Int, a); type Descr_type = (Int,Int); abortstr str = error ("abort:"++str); -- abort (OtherError str); delay x = abortstr "delay not implemented"; fix f = fix_f where {fix_f = f fix_f}; force x = x; -- error "force not implemented"; iff b x y = if b then x else y; iffrev y x b = if b then x else y; seQ x y = x `seq` y; pair [] = False; pair x = True; strcmp :: [Char] -> [Char] -> Bool; strcmp x y = x == y; entier x = fromIntegral (floor x); land_i :: Int -> Int -> Int; land_i x y = x .&. y; lnot_i :: Int -> Int; lnot_i x = complement x; lor_i :: Int -> Int -> Int; lor_i x y = x .|. y; lshift_i :: Int -> Int -> Int; lshift_i x y = x `shiftL` y; rshift_i :: Int -> Int -> Int; rshift_i x y = x `shiftR` y; write x = abortstr "write not implemented"; descr l u = (l,u); destr_update ar i x = ar // [(i,x)]; indassoc (i,v) = i; lowbound (l,u) = l; tabulate f (l,u) = listArray (l,u) [f i | i <- [l..u]]; upbound (l,u) = u; update ar i x = ar // [(i,x)]; valassoc (i,v) = v; }
theory ConcreteSemantics7_1_2_BigStep imports Main "~~/src/HOL/IMP/Com" begin section "6 Introduction" text \<open> When building upon any of those theories, for example when solving an exercise, the imports section needs to include "~~/src/HOL/IMP/T" where T is the name of the required theory \<close> section "7 IMP: A Simple Imperative Language" (* two styles of defining the semantics of a programming language: - big-step and - small-step operational semantics. *) (* As a smaller concrete example, we will apply our semantics to the concept of program equivalence. *) subsection "7.1 IMP Commands" text \<open> Before we jump into any formalization or define the abstract syntax of commands, we need to determine which constructs the language IMP should contain \<close> (* For an imperative language, we will want the basics such as assignments *) value "''x'' ::= Plus(V ''y'')(N 1);; ''y'' ::= N 2" (* value "x := y + 1; y := 2" *) text \<open> even the more concrete Isabelle notation above is occasionally somewhat cumbersome to use. one could write separate parsing/printing ML code that integrates with Isabelle and implements the concrete syntax of the language. \<close> text \<open> Therefore definitions and theorems about the core language only need to worry about one type of loop, while still supporting the full richness of a larger language. This significantly reduces proof size and effort for the theorems that we discuss in this book. \<close> subsection "7.2 Big-Step Semantics" text \<open> use a big-step operational semantics to give meaning to commands. In an operational semantics setting, the aim is to capture the meaning of a program as a relation that describes how a program executes easier to define and understand, \<close> subsubsection "7.2.1 Definition" text \<open> In big-step operational semantics, the relation to be defined is between program,initial state, and final state. Intermediate states during the execution of the program are not visible in the relation. \<close> (* Predicates in the big-step rules are called "judgements" *) text \<open> The idea is to have at least one rule per syntactic construct and to add further rules when case distinctions become necessary. \<close> inductive big_step :: "com \<times> state \<Rightarrow> state \<Rightarrow> bool" (infix "\<Rightarrow>" 55) where Skip: "(SKIP,s) \<Rightarrow> s" | Assign: "(x ::= a,s) \<Rightarrow> s(x := aval a s)" | Seq: "\<lbrakk> (c\<^sub>1,s\<^sub>1) \<Rightarrow> s\<^sub>2; (c\<^sub>2,s\<^sub>2) \<Rightarrow> s\<^sub>3 \<rbrakk> \<Longrightarrow> (c\<^sub>1;;c\<^sub>2, s\<^sub>1) \<Rightarrow> s\<^sub>3" | IfTrue: "\<lbrakk> bval b s; (c\<^sub>1,s) \<Rightarrow> t \<rbrakk> \<Longrightarrow> (IF b THEN c\<^sub>1 ELSE c\<^sub>2, s) \<Rightarrow> t" | IfFalse: "\<lbrakk> \<not>bval b s; (c\<^sub>2,s) \<Rightarrow> t \<rbrakk> \<Longrightarrow> (IF b THEN c\<^sub>1 ELSE c\<^sub>2, s) \<Rightarrow> t" | WhileFalse: "\<not>bval b s \<Longrightarrow> (WHILE b DO c,s) \<Rightarrow> s" | WhileTrue: "\<lbrakk> bval b s\<^sub>1; (c,s\<^sub>1) \<Rightarrow> s\<^sub>2; (WHILE b DO c, s\<^sub>2) \<Rightarrow> s\<^sub>3 \<rbrakk> \<Longrightarrow> (WHILE b DO c, s\<^sub>1) \<Rightarrow> s\<^sub>3" subsubsection "7.2.2 Deriving IMP Executions" text \<open> state the lemma with a schematic variable and let Isabelle compute its value as the proof progresses. \<close> schematic_goal ex: "(''x'' ::= N 5;; ''y'' ::= V ''x'', s) \<Rightarrow> ?t" apply(rule Seq) apply(rule Assign) apply simp apply(rule Assign) done (* I got theorem ex: (''x'' ::= N 5;; ''y'' ::= V ''x'', ?s) \<Rightarrow> ?s (''x'' := 5, ''y'' := aval (V ''x'') (?s(''x'' := 5))) In text, the author wrote that we get the expected (''x'' ::= N 5;; ''y'' ::= V ''x'', s) \<Rightarrow> s(''x'' := 5, ''y'' := 5) There seems be merely different. *) text\<open>We want to execute the big-step rules:\<close> code_pred big_step . text \<open>The introduction rules are good for automatically construction small program executions. The recursive cases may require backtracking, so we declare the set as unsafe intro rules.\<close> declare big_step.intros [intro] text\<open>The standard induction rule @{thm [display] big_step.induct [no_vars]}\<close> thm big_step.induct values "{t. (SKIP, \<lambda>_.0) \<Rightarrow> t}" text \<open> Functions cannot always easily be printed, but lists can be, \<close> values "{map t [''x'', ''y''] | t. (''x'' ::= N 2, \<lambda>_.0) \<Rightarrow> t}" text \<open> This section showed us how to construct program derivations and how to execute small IMP programs according to the big-step semantics. In the next section, we instead deconstruct executions that we know have happened and analyse all possible ways we could have gotten there \<close> subsubsection "7.2.3 Rule Inversion" text \<open> These inverted rules can be proved automatically by Isabelle from the original rules. Moreover, proof methods like auto and blast can be instructed to use both the introduction and the inverted rules automatically during proof search. \<close> inductive_cases SkipE[elim!]: "(SKIP,s) \<Rightarrow> t" thm SkipE inductive_cases AssignE[elim!]: "(x ::= a,s) \<Rightarrow> t" thm AssignE inductive_cases SeqE[elim!]: "(c1;;c2,s1) \<Rightarrow> s3" thm SeqE inductive_cases IfE[elim!]: "(IF b THEN c1 ELSE c2,s) \<Rightarrow> t" thm IfE inductive_cases WhileE[elim]: "(WHILE b DO c,s) \<Rightarrow> t" thm WhileE lemma "(IF b THEN SKIP ELSE SKIP, s) \<Rightarrow> t \<Longrightarrow> t = s" by blast lemma assumes "(IF b THEN SKIP ELSE SKIP, s) \<Rightarrow> t" shows "t = s" proof- from assms show ?thesis proof cases \<comment> \<open>inverting assms\<close> case IfTrue thm IfTrue thus ?thesis by blast next case IfFalse thus ?thesis by blast qed qed (* Using rule inversion to prove simplification rules: *) lemma assign_simp: "(x ::= a,s) \<Rightarrow> s' \<longleftrightarrow> (s' = s(x := aval a s))" by auto lemma "(c1;;c2;;c3, s) \<Rightarrow> s' \<longleftrightarrow> (c1;;(c2;;c3), s) \<Rightarrow> s'" proof assume "(c1;;c2;;c3, s) \<Rightarrow> s'" then obtain s1 s2 where c1: "(c1, s) \<Rightarrow> s1" and "(c2, s1) \<Rightarrow> s2" and "(c3, s2) \<Rightarrow> s'" by blast (* This method is not able to be used without the above SeqE. *) then have "(c2;;c3, s1) \<Rightarrow> s'" using Seq by auto then show " (c1;;(c2;;c3), s) \<Rightarrow> s'" using Seq c1 by auto next assume "(c1;;(c2;;c3), s) \<Rightarrow> s'" then show "(c1;;c2;;c3, s) \<Rightarrow> s'" by (meson Seq SeqE) qed text \<open> Big Stepの証明は基本的に上記の証明と代わりないが、オリジナルの証明の方がわかりやすい。 あと、私の証明では、`by (meson Seq SeqE)`を最後に使っているが、オリジナルではそうではない。 オリジナルのTheoryに入れられているいずれかのlemmaが効いているのであろうが、それはなにか。 \<close> subsubsection "7.2.4 Equivalence of Commands" (* semantic equivalence *) abbreviation equiv_c :: "com \<Rightarrow> com \<Rightarrow> bool" (infix "\<sim>" 50) where "c \<sim> c' \<equiv> (\<forall>s t. (c,s) \<Rightarrow> t = (c',s) \<Rightarrow> t)" text \<open> Big_Stepの証明とだいぶ違い、blastで済んでいるが、なぜだ。 \<close> lemma "(WHILE b DO c) \<sim> (IF b THEN c;; WHILE b DO c ELSE SKIP)" (is "?w \<sim> ?iw") proof - (* have "(?iw, s) \<Rightarrow> t" if assm: "(?w, s) \<Rightarrow> t" for s t sorry have "(?w, s) \<Rightarrow> t" if assm: "(?iw, s) \<Rightarrow> t" for s t sorry *) have "(?iw, s) \<Rightarrow> t" if assm: "(?w, s) \<Rightarrow> t" for s t proof - from assm show ?thesis proof cases case WhileFalse thus ?thesis (* using IfFalse Skip *) by blast (* ここで `using IfFalse Skip` が必要にったり明示されたりするのを避けるなら、 declare big_step.intros [intro] が必要 *) next case WhileTrue thus ?thesis by blast qed qed moreover have "(?w, s) \<Rightarrow> t" if assm: "(?iw, s) \<Rightarrow> t" for s t proof - from assm show ?thesis proof cases case IfFalse thus ?thesis by blast next case IfTrue thus ?thesis by blast qed qed ultimately show ?thesis by blast qed (* picking this: (WHILE b DO c, ?s) \<Rightarrow> ?t \<Longrightarrow> (IF b THEN c;; WHILE b DO c ELSE SKIP, ?s) \<Rightarrow> ?t (IF b THEN c;; WHILE b DO c ELSE SKIP, ?s) \<Rightarrow> ?t \<Longrightarrow> (WHILE b DO c, ?s) \<Rightarrow> ?t Illegal application of proof command in "chain" mode 多分Whileに関するEliminationRuleを入れていないからかな? => `proof -`としていなかったことが原因。 *) lemma "(c) \<sim> (IF b THEN c ELSE c)" (is "?w \<sim> ?iw") by blast (* proof - have "(?iw, s) \<Rightarrow> t" if assm: "(?w, s) \<Rightarrow> t" for s t proof cases case (bval b s) then show ?thesis sorry next case \<not> (bval b s) then show ?thesis sorry qed moreover have "(?w, s) \<Rightarrow> t" if assm: "(?iw, s) \<Rightarrow> t" for s t sorry ultimately show ?thesis by blast qed *) text\<open> This induction schema is almost perfect for our purposes, but our trick for reusing the tuple syntax means that the induction schema has two parameters instead of the \<open>c\<close>, \<open>s\<close>, and \<open>s'\<close> that we are likely to encounter. Splitting the tuple parameter fixes this: \<close> lemmas big_step_induct = big_step.induct[split_format(complete)] thm big_step_induct lemma sim_while_cong_aux: "(WHILE b DO c,s) \<Rightarrow> t \<Longrightarrow> c \<sim> c' \<Longrightarrow> (WHILE b DO c',s) \<Rightarrow> t" apply(induction "WHILE b DO c" s t arbitrary: b c rule: big_step_induct) apply (simp add: WhileFalse) apply (simp add: WhileTrue) done lemma sim_while_cong: "c \<sim> c' \<Longrightarrow> WHILE b DO c \<sim> WHILE b DO c'" using sim_while_cong_aux by auto text \<open>Command equivalence is an equivalence relation, i.e.\ it is reflexive, symmetric, and transitive. Because we used an abbreviation above, Isabelle derives this automatically.\<close> lemma sim_refl: "c \<sim> c" by simp lemma sim_sym: "(c \<sim> c') = (c' \<sim> c)" by auto lemma sim_trans: "c \<sim> c' \<Longrightarrow> c' \<sim> c'' \<Longrightarrow> c \<sim> c''" by auto subsubsection "7.2.5 Execution in IMP is Deterministic" (* determinism. *) theorem big_step_determ: "\<lbrakk> (c,s) \<Rightarrow> t; (c,s) \<Rightarrow> u \<rbrakk> \<Longrightarrow> u = t" apply(induction arbitrary: u rule:big_step_induct) apply(blast+) done (* テキストのFig. 7.4. と同じように各ケースを明記してもよいが、全部blastで解決されるため、`blast+`を使う` *) end
/- Copyright (c) 2015, 2017 Jeremy Avigad. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jeremy Avigad, Robert Y. Lewis, Johannes Hölzl, Mario Carneiro, Sébastien Gouëzel -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.real.ennreal import Mathlib.data.finset.intervals import Mathlib.topology.uniform_space.uniform_embedding import Mathlib.topology.uniform_space.pi import Mathlib.topology.uniform_space.uniform_convergence import Mathlib.PostPort universes u v u_1 l u_2 namespace Mathlib /-! # Extended metric spaces This file is devoted to the definition and study of `emetric_spaces`, i.e., metric spaces in which the distance is allowed to take the value ∞. This extended distance is called `edist`, and takes values in `ennreal`. Many definitions and theorems expected on emetric spaces are already introduced on uniform spaces and topological spaces. For example: open and closed sets, compactness, completeness, continuity and uniform continuity The class `emetric_space` therefore extends `uniform_space` (and `topological_space`). -/ /-- Characterizing uniformities associated to a (generalized) distance function `D` in terms of the elements of the uniformity. -/ theorem uniformity_dist_of_mem_uniformity {α : Type u} {β : Type v} [linear_order β] {U : filter (α × α)} (z : β) (D : α → α → β) (H : ∀ (s : set (α × α)), s ∈ U ↔ ∃ (ε : β), ∃ (H : ε > z), ∀ {a b : α}, D a b < ε → (a, b) ∈ s) : U = infi fun (ε : β) => infi fun (H : ε > z) => filter.principal (set_of fun (p : α × α) => D (prod.fst p) (prod.snd p) < ε) := sorry class has_edist (α : Type u_1) where edist : α → α → ennreal /-- Creating a uniform space from an extended distance. -/ def uniform_space_of_edist {α : Type u} (edist : α → α → ennreal) (edist_self : ∀ (x : α), edist x x = 0) (edist_comm : ∀ (x y : α), edist x y = edist y x) (edist_triangle : ∀ (x y z : α), edist x z ≤ edist x y + edist y z) : uniform_space α := uniform_space.of_core (uniform_space.core.mk (infi fun (ε : ennreal) => infi fun (H : ε > 0) => filter.principal (set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε)) sorry sorry sorry) -- the uniform structure is embedded in the emetric space structure -- to avoid instance diamond issues. See Note [forgetful inheritance]. /-- Extended metric spaces, with an extended distance `edist` possibly taking the value ∞ Each emetric space induces a canonical `uniform_space` and hence a canonical `topological_space`. This is enforced in the type class definition, by extending the `uniform_space` structure. When instantiating an `emetric_space` structure, the uniformity fields are not necessary, they will be filled in by default. There is a default value for the uniformity, that can be substituted in cases of interest, for instance when instantiating an `emetric_space` structure on a product. Continuity of `edist` is proved in `topology.instances.ennreal` -/ class emetric_space (α : Type u) extends has_edist α, emetric_space.to_uniform_space._default #2 #1 #0 α _to_has_edist = id (uniform_space_of_edist edist #0 α _to_has_edist), uniform_space #2, uniform_space α where edist_self : ∀ (x : α), edist x x = 0 eq_of_edist_eq_zero : ∀ {x y : α}, edist x y = 0 → x = y edist_comm : ∀ (x y : α), edist x y = edist y x edist_triangle : ∀ (x y z : α), edist x z ≤ edist x y + edist y z to_uniform_space : uniform_space α uniformity_edist : autoParam (uniformity α = infi fun (ε : ennreal) => infi fun (H : ε > 0) => filter.principal (set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε)) (Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.control_laws_tac") (Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "control_laws_tac") []) /- emetric spaces are less common than metric spaces. Therefore, we work in a dedicated namespace, while notions associated to metric spaces are mostly in the root namespace. -/ protected instance emetric_space.to_uniform_space' {α : Type u} [emetric_space α] : uniform_space α := emetric_space.to_uniform_space /-- Characterize the equality of points by the vanishing of their extended distance -/ @[simp] theorem edist_eq_zero {α : Type u} [emetric_space α] {x : α} {y : α} : edist x y = 0 ↔ x = y := { mp := eq_of_edist_eq_zero, mpr := fun (this : x = y) => this ▸ edist_self x } @[simp] theorem zero_eq_edist {α : Type u} [emetric_space α] {x : α} {y : α} : 0 = edist x y ↔ x = y := { mp := fun (h : 0 = edist x y) => eq_of_edist_eq_zero (Eq.symm h), mpr := fun (this : x = y) => this ▸ Eq.symm (edist_self x) } theorem edist_le_zero {α : Type u} [emetric_space α] {x : α} {y : α} : edist x y ≤ 0 ↔ x = y := iff.trans nonpos_iff_eq_zero edist_eq_zero /-- Triangle inequality for the extended distance -/ theorem edist_triangle_left {α : Type u} [emetric_space α] (x : α) (y : α) (z : α) : edist x y ≤ edist z x + edist z y := eq.mpr (id (Eq._oldrec (Eq.refl (edist x y ≤ edist z x + edist z y)) (edist_comm z x))) (edist_triangle x z y) theorem edist_triangle_right {α : Type u} [emetric_space α] (x : α) (y : α) (z : α) : edist x y ≤ edist x z + edist y z := eq.mpr (id (Eq._oldrec (Eq.refl (edist x y ≤ edist x z + edist y z)) (edist_comm y z))) (edist_triangle x z y) theorem edist_triangle4 {α : Type u} [emetric_space α] (x : α) (y : α) (z : α) (t : α) : edist x t ≤ edist x y + edist y z + edist z t := le_trans (edist_triangle x z t) (add_le_add_right (edist_triangle x y z) (edist z t)) /-- The triangle (polygon) inequality for sequences of points; `finset.Ico` version. -/ theorem edist_le_Ico_sum_edist {α : Type u} [emetric_space α] (f : ℕ → α) {m : ℕ} {n : ℕ} (h : m ≤ n) : edist (f m) (f n) ≤ finset.sum (finset.Ico m n) fun (i : ℕ) => edist (f i) (f (i + 1)) := sorry /-- The triangle (polygon) inequality for sequences of points; `finset.range` version. -/ theorem edist_le_range_sum_edist {α : Type u} [emetric_space α] (f : ℕ → α) (n : ℕ) : edist (f 0) (f n) ≤ finset.sum (finset.range n) fun (i : ℕ) => edist (f i) (f (i + 1)) := finset.Ico.zero_bot n ▸ edist_le_Ico_sum_edist f (nat.zero_le n) /-- A version of `edist_le_Ico_sum_edist` with each intermediate distance replaced with an upper estimate. -/ theorem edist_le_Ico_sum_of_edist_le {α : Type u} [emetric_space α] {f : ℕ → α} {m : ℕ} {n : ℕ} (hmn : m ≤ n) {d : ℕ → ennreal} (hd : ∀ {k : ℕ}, m ≤ k → k < n → edist (f k) (f (k + 1)) ≤ d k) : edist (f m) (f n) ≤ finset.sum (finset.Ico m n) fun (i : ℕ) => d i := sorry /-- A version of `edist_le_range_sum_edist` with each intermediate distance replaced with an upper estimate. -/ theorem edist_le_range_sum_of_edist_le {α : Type u} [emetric_space α] {f : ℕ → α} (n : ℕ) {d : ℕ → ennreal} (hd : ∀ {k : ℕ}, k < n → edist (f k) (f (k + 1)) ≤ d k) : edist (f 0) (f n) ≤ finset.sum (finset.range n) fun (i : ℕ) => d i := finset.Ico.zero_bot n ▸ edist_le_Ico_sum_of_edist_le (zero_le n) fun (_x : ℕ) (_x_1 : 0 ≤ _x) => hd /-- Two points coincide if their distance is `< ε` for all positive ε -/ theorem eq_of_forall_edist_le {α : Type u} [emetric_space α] {x : α} {y : α} (h : ∀ (ε : ennreal), ε > 0 → edist x y ≤ ε) : x = y := eq_of_edist_eq_zero (eq_of_le_of_forall_le_of_dense bot_le h) /-- Reformulation of the uniform structure in terms of the extended distance -/ theorem uniformity_edist {α : Type u} [emetric_space α] : uniformity α = infi fun (ε : ennreal) => infi fun (H : ε > 0) => filter.principal (set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε) := emetric_space.uniformity_edist theorem uniformity_basis_edist {α : Type u} [emetric_space α] : filter.has_basis (uniformity α) (fun (ε : ennreal) => 0 < ε) fun (ε : ennreal) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε := sorry /-- Characterization of the elements of the uniformity in terms of the extended distance -/ theorem mem_uniformity_edist {α : Type u} [emetric_space α] {s : set (α × α)} : s ∈ uniformity α ↔ ∃ (ε : ennreal), ∃ (H : ε > 0), ∀ {a b : α}, edist a b < ε → (a, b) ∈ s := filter.has_basis.mem_uniformity_iff uniformity_basis_edist /-- Given `f : β → ennreal`, if `f` sends `{i | p i}` to a set of positive numbers accumulating to zero, then `f i`-neighborhoods of the diagonal form a basis of `𝓤 α`. For specific bases see `uniformity_basis_edist`, `uniformity_basis_edist'`, `uniformity_basis_edist_nnreal`, and `uniformity_basis_edist_inv_nat`. -/ protected theorem emetric.mk_uniformity_basis {α : Type u} [emetric_space α] {β : Type u_1} {p : β → Prop} {f : β → ennreal} (hf₀ : ∀ (x : β), p x → 0 < f x) (hf : ∀ (ε : ennreal), 0 < ε → ∃ (x : β), ∃ (hx : p x), f x ≤ ε) : filter.has_basis (uniformity α) p fun (x : β) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < f x := sorry /-- Given `f : β → ennreal`, if `f` sends `{i | p i}` to a set of positive numbers accumulating to zero, then closed `f i`-neighborhoods of the diagonal form a basis of `𝓤 α`. For specific bases see `uniformity_basis_edist_le` and `uniformity_basis_edist_le'`. -/ protected theorem emetric.mk_uniformity_basis_le {α : Type u} [emetric_space α] {β : Type u_1} {p : β → Prop} {f : β → ennreal} (hf₀ : ∀ (x : β), p x → 0 < f x) (hf : ∀ (ε : ennreal), 0 < ε → ∃ (x : β), ∃ (hx : p x), f x ≤ ε) : filter.has_basis (uniformity α) p fun (x : β) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) ≤ f x := sorry theorem uniformity_basis_edist_le {α : Type u} [emetric_space α] : filter.has_basis (uniformity α) (fun (ε : ennreal) => 0 < ε) fun (ε : ennreal) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) ≤ ε := emetric.mk_uniformity_basis_le (fun (_x : ennreal) => id) fun (ε : ennreal) (ε₀ : 0 < ε) => Exists.intro ε (Exists.intro ε₀ (le_refl ε)) theorem uniformity_basis_edist' {α : Type u} [emetric_space α] (ε' : ennreal) (hε' : 0 < ε') : filter.has_basis (uniformity α) (fun (ε : ennreal) => ε ∈ set.Ioo 0 ε') fun (ε : ennreal) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε := sorry theorem uniformity_basis_edist_le' {α : Type u} [emetric_space α] (ε' : ennreal) (hε' : 0 < ε') : filter.has_basis (uniformity α) (fun (ε : ennreal) => ε ∈ set.Ioo 0 ε') fun (ε : ennreal) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) ≤ ε := sorry theorem uniformity_basis_edist_nnreal {α : Type u} [emetric_space α] : filter.has_basis (uniformity α) (fun (ε : nnreal) => 0 < ε) fun (ε : nnreal) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ↑ε := sorry theorem uniformity_basis_edist_inv_nat {α : Type u} [emetric_space α] : filter.has_basis (uniformity α) (fun (_x : ℕ) => True) fun (n : ℕ) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < (↑n⁻¹) := sorry /-- Fixed size neighborhoods of the diagonal belong to the uniform structure -/ theorem edist_mem_uniformity {α : Type u} [emetric_space α] {ε : ennreal} (ε0 : 0 < ε) : (set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε) ∈ uniformity α := iff.mpr mem_uniformity_edist (Exists.intro ε (Exists.intro ε0 fun (a b : α) => id)) namespace emetric theorem uniformity_has_countable_basis {α : Type u} [emetric_space α] : filter.is_countably_generated (uniformity α) := filter.is_countably_generated_of_seq (Exists.intro (fun (i : ℕ) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < (↑i⁻¹)) (filter.has_basis.eq_infi uniformity_basis_edist_inv_nat)) /-- ε-δ characterization of uniform continuity on a set for emetric spaces -/ theorem uniform_continuous_on_iff {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] {f : α → β} {s : set α} : uniform_continuous_on f s ↔ ∀ (ε : ennreal) (H : ε > 0), ∃ (δ : ennreal), ∃ (H : δ > 0), ∀ {a b : α}, a ∈ s → b ∈ s → edist a b < δ → edist (f a) (f b) < ε := filter.has_basis.uniform_continuous_on_iff uniformity_basis_edist uniformity_basis_edist /-- ε-δ characterization of uniform continuity on emetric spaces -/ theorem uniform_continuous_iff {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] {f : α → β} : uniform_continuous f ↔ ∀ (ε : ennreal) (H : ε > 0), ∃ (δ : ennreal), ∃ (H : δ > 0), ∀ {a b : α}, edist a b < δ → edist (f a) (f b) < ε := filter.has_basis.uniform_continuous_iff uniformity_basis_edist uniformity_basis_edist /-- ε-δ characterization of uniform embeddings on emetric spaces -/ theorem uniform_embedding_iff {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] {f : α → β} : uniform_embedding f ↔ function.injective f ∧ uniform_continuous f ∧ ∀ (δ : ennreal) (H : δ > 0), ∃ (ε : ennreal), ∃ (H : ε > 0), ∀ {a b : α}, edist (f a) (f b) < ε → edist a b < δ := sorry /-- A map between emetric spaces is a uniform embedding if and only if the edistance between `f x` and `f y` is controlled in terms of the distance between `x` and `y` and conversely. -/ theorem uniform_embedding_iff' {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] {f : α → β} : uniform_embedding f ↔ (∀ (ε : ennreal) (H : ε > 0), ∃ (δ : ennreal), ∃ (H : δ > 0), ∀ {a b : α}, edist a b < δ → edist (f a) (f b) < ε) ∧ ∀ (δ : ennreal) (H : δ > 0), ∃ (ε : ennreal), ∃ (H : ε > 0), ∀ {a b : α}, edist (f a) (f b) < ε → edist a b < δ := sorry /-- ε-δ characterization of Cauchy sequences on emetric spaces -/ protected theorem cauchy_iff {α : Type u} [emetric_space α] {f : filter α} : cauchy f ↔ f ≠ ⊥ ∧ ∀ (ε : ennreal) (H : ε > 0), ∃ (t : set α), ∃ (H : t ∈ f), ∀ (x y : α), x ∈ t → y ∈ t → edist x y < ε := filter.has_basis.cauchy_iff uniformity_basis_edist /-- A very useful criterion to show that a space is complete is to show that all sequences which satisfy a bound of the form `edist (u n) (u m) < B N` for all `n m ≥ N` are converging. This is often applied for `B N = 2^{-N}`, i.e., with a very fast convergence to `0`, which makes it possible to use arguments of converging series, while this is impossible to do in general for arbitrary Cauchy sequences. -/ theorem complete_of_convergent_controlled_sequences {α : Type u} [emetric_space α] (B : ℕ → ennreal) (hB : ∀ (n : ℕ), 0 < B n) (H : ∀ (u : ℕ → α), (∀ (N n m : ℕ), N ≤ n → N ≤ m → edist (u n) (u m) < B N) → ∃ (x : α), filter.tendsto u filter.at_top (nhds x)) : complete_space α := uniform_space.complete_of_convergent_controlled_sequences uniformity_has_countable_basis (fun (n : ℕ) => set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < B n) (fun (n : ℕ) => edist_mem_uniformity (hB n)) H /-- A sequentially complete emetric space is complete. -/ theorem complete_of_cauchy_seq_tendsto {α : Type u} [emetric_space α] : (∀ (u : ℕ → α), cauchy_seq u → ∃ (a : α), filter.tendsto u filter.at_top (nhds a)) → complete_space α := uniform_space.complete_of_cauchy_seq_tendsto uniformity_has_countable_basis /-- Expressing locally uniform convergence on a set using `edist`. -/ theorem tendsto_locally_uniformly_on_iff {α : Type u} {β : Type v} [emetric_space α] {ι : Type u_1} [topological_space β] {F : ι → β → α} {f : β → α} {p : filter ι} {s : set β} : tendsto_locally_uniformly_on F f p s ↔ ∀ (ε : ennreal) (H : ε > 0) (x : β) (H : x ∈ s), ∃ (t : set β), ∃ (H : t ∈ nhds_within x s), filter.eventually (fun (n : ι) => ∀ (y : β), y ∈ t → edist (f y) (F n y) < ε) p := sorry /-- Expressing uniform convergence on a set using `edist`. -/ theorem tendsto_uniformly_on_iff {α : Type u} {β : Type v} [emetric_space α] {ι : Type u_1} {F : ι → β → α} {f : β → α} {p : filter ι} {s : set β} : tendsto_uniformly_on F f p s ↔ ∀ (ε : ennreal), ε > 0 → filter.eventually (fun (n : ι) => ∀ (x : β), x ∈ s → edist (f x) (F n x) < ε) p := sorry /-- Expressing locally uniform convergence using `edist`. -/ theorem tendsto_locally_uniformly_iff {α : Type u} {β : Type v} [emetric_space α] {ι : Type u_1} [topological_space β] {F : ι → β → α} {f : β → α} {p : filter ι} : tendsto_locally_uniformly F f p ↔ ∀ (ε : ennreal) (H : ε > 0) (x : β), ∃ (t : set β), ∃ (H : t ∈ nhds x), filter.eventually (fun (n : ι) => ∀ (y : β), y ∈ t → edist (f y) (F n y) < ε) p := sorry /-- Expressing uniform convergence using `edist`. -/ theorem tendsto_uniformly_iff {α : Type u} {β : Type v} [emetric_space α] {ι : Type u_1} {F : ι → β → α} {f : β → α} {p : filter ι} : tendsto_uniformly F f p ↔ ∀ (ε : ennreal), ε > 0 → filter.eventually (fun (n : ι) => ∀ (x : β), edist (f x) (F n x) < ε) p := sorry end emetric /-- An emetric space is separated -/ protected instance to_separated {α : Type u} [emetric_space α] : separated_space α := iff.mpr separated_def fun (x y : α) (h : ∀ (r : set (α × α)), r ∈ uniformity α → (x, y) ∈ r) => eq_of_forall_edist_le fun (ε : ennreal) (ε0 : ε > 0) => le_of_lt (h (set_of fun (p : α × α) => edist (prod.fst p) (prod.snd p) < ε) (edist_mem_uniformity ε0)) /-- Auxiliary function to replace the uniformity on an emetric space with a uniformity which is equal to the original one, but maybe not defeq. This is useful if one wants to construct an emetric space with a specified uniformity. See Note [forgetful inheritance] explaining why having definitionally the right uniformity is often important. -/ def emetric_space.replace_uniformity {α : Type u_1} [U : uniform_space α] (m : emetric_space α) (H : uniformity α = uniformity α) : emetric_space α := emetric_space.mk edist_self eq_of_edist_eq_zero edist_comm edist_triangle U /-- The extended metric induced by an injective function taking values in an emetric space. -/ def emetric_space.induced {α : Type u_1} {β : Type u_2} (f : α → β) (hf : function.injective f) (m : emetric_space β) : emetric_space α := emetric_space.mk sorry sorry sorry sorry (uniform_space.comap f emetric_space.to_uniform_space) /-- Emetric space instance on subsets of emetric spaces -/ protected instance subtype.emetric_space {α : Type u_1} {p : α → Prop} [t : emetric_space α] : emetric_space (Subtype p) := emetric_space.induced coe sorry t /-- The extended distance on a subset of an emetric space is the restriction of the original distance, by definition -/ theorem subtype.edist_eq {α : Type u} [emetric_space α] {p : α → Prop} (x : Subtype p) (y : Subtype p) : edist x y = edist ↑x ↑y := rfl /-- The product of two emetric spaces, with the max distance, is an extended metric spaces. We make sure that the uniform structure thus constructed is the one corresponding to the product of uniform spaces, to avoid diamond problems. -/ protected instance prod.emetric_space_max {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] : emetric_space (α × β) := emetric_space.mk sorry sorry sorry sorry prod.uniform_space theorem prod.edist_eq {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] (x : α × β) (y : α × β) : edist x y = max (edist (prod.fst x) (prod.fst y)) (edist (prod.snd x) (prod.snd y)) := rfl /-- The product of a finite number of emetric spaces, with the max distance, is still an emetric space. This construction would also work for infinite products, but it would not give rise to the product topology. Hence, we only formalize it in the good situation of finitely many spaces. -/ protected instance emetric_space_pi {β : Type v} {π : β → Type u_1} [fintype β] [(b : β) → emetric_space (π b)] : emetric_space ((b : β) → π b) := emetric_space.mk sorry sorry sorry sorry (Pi.uniform_space fun (b : β) => π b) theorem edist_pi_def {β : Type v} {π : β → Type u_1} [fintype β] [(b : β) → emetric_space (π b)] (f : (b : β) → π b) (g : (b : β) → π b) : edist f g = finset.sup finset.univ fun (b : β) => edist (f b) (g b) := rfl @[simp] theorem edist_pi_const {α : Type u} {β : Type v} [emetric_space α] [fintype β] [Nonempty β] (a : α) (b : α) : (edist (fun (x : β) => a) fun (_x : β) => b) = edist a b := finset.sup_const finset.univ_nonempty (edist a b) namespace emetric /-- `emetric.ball x ε` is the set of all points `y` with `edist y x < ε` -/ def ball {α : Type u} [emetric_space α] (x : α) (ε : ennreal) : set α := set_of fun (y : α) => edist y x < ε @[simp] theorem mem_ball {α : Type u} [emetric_space α] {x : α} {y : α} {ε : ennreal} : y ∈ ball x ε ↔ edist y x < ε := iff.rfl theorem mem_ball' {α : Type u} [emetric_space α] {x : α} {y : α} {ε : ennreal} : y ∈ ball x ε ↔ edist x y < ε := eq.mpr (id (Eq._oldrec (Eq.refl (y ∈ ball x ε ↔ edist x y < ε)) (edist_comm x y))) (iff.refl (y ∈ ball x ε)) /-- `emetric.closed_ball x ε` is the set of all points `y` with `edist y x ≤ ε` -/ def closed_ball {α : Type u} [emetric_space α] (x : α) (ε : ennreal) : set α := set_of fun (y : α) => edist y x ≤ ε @[simp] theorem mem_closed_ball {α : Type u} [emetric_space α] {x : α} {y : α} {ε : ennreal} : y ∈ closed_ball x ε ↔ edist y x ≤ ε := iff.rfl theorem ball_subset_closed_ball {α : Type u} [emetric_space α] {x : α} {ε : ennreal} : ball x ε ⊆ closed_ball x ε := fun (y : α) => eq.mpr (id (imp_congr_eq (propext mem_ball) (propext mem_closed_ball))) fun (h : edist y x < ε) => le_of_lt h theorem pos_of_mem_ball {α : Type u} [emetric_space α] {x : α} {y : α} {ε : ennreal} (hy : y ∈ ball x ε) : 0 < ε := lt_of_le_of_lt (zero_le (edist y x)) hy theorem mem_ball_self {α : Type u} [emetric_space α] {x : α} {ε : ennreal} (h : 0 < ε) : x ∈ ball x ε := (fun (this : edist x x < ε) => this) (eq.mpr (id (Eq._oldrec (Eq.refl (edist x x < ε)) (edist_self x))) h) theorem mem_closed_ball_self {α : Type u} [emetric_space α] {x : α} {ε : ennreal} : x ∈ closed_ball x ε := (fun (this : edist x x ≤ ε) => this) (eq.mpr (id (Eq._oldrec (Eq.refl (edist x x ≤ ε)) (edist_self x))) bot_le) theorem mem_ball_comm {α : Type u} [emetric_space α] {x : α} {y : α} {ε : ennreal} : x ∈ ball y ε ↔ y ∈ ball x ε := sorry theorem ball_subset_ball {α : Type u} [emetric_space α] {x : α} {ε₁ : ennreal} {ε₂ : ennreal} (h : ε₁ ≤ ε₂) : ball x ε₁ ⊆ ball x ε₂ := fun (y : α) (yx : edist y x < ε₁) => lt_of_lt_of_le yx h theorem closed_ball_subset_closed_ball {α : Type u} [emetric_space α] {x : α} {ε₁ : ennreal} {ε₂ : ennreal} (h : ε₁ ≤ ε₂) : closed_ball x ε₁ ⊆ closed_ball x ε₂ := fun (y : α) (yx : edist y x ≤ ε₁) => le_trans yx h theorem ball_disjoint {α : Type u} [emetric_space α] {x : α} {y : α} {ε₁ : ennreal} {ε₂ : ennreal} (h : ε₁ + ε₂ ≤ edist x y) : ball x ε₁ ∩ ball y ε₂ = ∅ := sorry theorem ball_subset {α : Type u} [emetric_space α] {x : α} {y : α} {ε₁ : ennreal} {ε₂ : ennreal} (h : edist x y + ε₁ ≤ ε₂) (h' : edist x y < ⊤) : ball x ε₁ ⊆ ball y ε₂ := sorry theorem exists_ball_subset_ball {α : Type u} [emetric_space α] {x : α} {y : α} {ε : ennreal} (h : y ∈ ball x ε) : ∃ (ε' : ennreal), ∃ (H : ε' > 0), ball y ε' ⊆ ball x ε := sorry theorem ball_eq_empty_iff {α : Type u} [emetric_space α] {x : α} {ε : ennreal} : ball x ε = ∅ ↔ ε = 0 := sorry /-- Relation “two points are at a finite edistance” is an equivalence relation. -/ def edist_lt_top_setoid {α : Type u} [emetric_space α] : setoid α := setoid.mk (fun (x y : α) => edist x y < ⊤) sorry @[simp] theorem ball_zero {α : Type u} [emetric_space α] {x : α} : ball x 0 = ∅ := eq.mpr (id (Eq._oldrec (Eq.refl (ball x 0 = ∅)) (propext ball_eq_empty_iff))) (Eq.refl 0) theorem nhds_basis_eball {α : Type u} [emetric_space α] {x : α} : filter.has_basis (nhds x) (fun (ε : ennreal) => 0 < ε) (ball x) := nhds_basis_uniformity uniformity_basis_edist theorem nhds_basis_closed_eball {α : Type u} [emetric_space α] {x : α} : filter.has_basis (nhds x) (fun (ε : ennreal) => 0 < ε) (closed_ball x) := nhds_basis_uniformity uniformity_basis_edist_le theorem nhds_eq {α : Type u} [emetric_space α] {x : α} : nhds x = infi fun (ε : ennreal) => infi fun (H : ε > 0) => filter.principal (ball x ε) := filter.has_basis.eq_binfi nhds_basis_eball theorem mem_nhds_iff {α : Type u} [emetric_space α] {x : α} {s : set α} : s ∈ nhds x ↔ ∃ (ε : ennreal), ∃ (H : ε > 0), ball x ε ⊆ s := filter.has_basis.mem_iff nhds_basis_eball theorem is_open_iff {α : Type u} [emetric_space α] {s : set α} : is_open s ↔ ∀ (x : α) (H : x ∈ s), ∃ (ε : ennreal), ∃ (H : ε > 0), ball x ε ⊆ s := sorry theorem is_open_ball {α : Type u} [emetric_space α] {x : α} {ε : ennreal} : is_open (ball x ε) := iff.mpr is_open_iff fun (y : α) => exists_ball_subset_ball theorem is_closed_ball_top {α : Type u} [emetric_space α] {x : α} : is_closed (ball x ⊤) := sorry theorem ball_mem_nhds {α : Type u} [emetric_space α] (x : α) {ε : ennreal} (ε0 : 0 < ε) : ball x ε ∈ nhds x := mem_nhds_sets is_open_ball (mem_ball_self ε0) theorem ball_prod_same {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] (x : α) (y : β) (r : ennreal) : set.prod (ball x r) (ball y r) = ball (x, y) r := set.ext fun (z : α × β) => iff.symm max_lt_iff theorem closed_ball_prod_same {α : Type u} {β : Type v} [emetric_space α] [emetric_space β] (x : α) (y : β) (r : ennreal) : set.prod (closed_ball x r) (closed_ball y r) = closed_ball (x, y) r := set.ext fun (z : α × β) => iff.symm max_le_iff /-- ε-characterization of the closure in emetric spaces -/ theorem mem_closure_iff {α : Type u} [emetric_space α] {x : α} {s : set α} : x ∈ closure s ↔ ∀ (ε : ennreal) (H : ε > 0), ∃ (y : α), ∃ (H : y ∈ s), edist x y < ε := sorry theorem tendsto_nhds {α : Type u} {β : Type v} [emetric_space α] {f : filter β} {u : β → α} {a : α} : filter.tendsto u f (nhds a) ↔ ∀ (ε : ennreal), ε > 0 → filter.eventually (fun (x : β) => edist (u x) a < ε) f := filter.has_basis.tendsto_right_iff nhds_basis_eball theorem tendsto_at_top {α : Type u} {β : Type v} [emetric_space α] [Nonempty β] [semilattice_sup β] {u : β → α} {a : α} : filter.tendsto u filter.at_top (nhds a) ↔ ∀ (ε : ennreal), ε > 0 → ∃ (N : β), ∀ (n : β), n ≥ N → edist (u n) a < ε := sorry /-- In an emetric space, Cauchy sequences are characterized by the fact that, eventually, the edistance between its elements is arbitrarily small -/ theorem cauchy_seq_iff {α : Type u} {β : Type v} [emetric_space α] [Nonempty β] [semilattice_sup β] {u : β → α} : cauchy_seq u ↔ ∀ (ε : ennreal), ε > 0 → ∃ (N : β), ∀ (m n : β), m ≥ N → n ≥ N → edist (u m) (u n) < ε := filter.has_basis.cauchy_seq_iff uniformity_basis_edist /-- A variation around the emetric characterization of Cauchy sequences -/ theorem cauchy_seq_iff' {α : Type u} {β : Type v} [emetric_space α] [Nonempty β] [semilattice_sup β] {u : β → α} : cauchy_seq u ↔ ∀ (ε : ennreal), ε > 0 → ∃ (N : β), ∀ (n : β), n ≥ N → edist (u n) (u N) < ε := filter.has_basis.cauchy_seq_iff' uniformity_basis_edist /-- A variation of the emetric characterization of Cauchy sequences that deals with `ℝ≥0` upper bounds. -/ theorem cauchy_seq_iff_nnreal {α : Type u} {β : Type v} [emetric_space α] [Nonempty β] [semilattice_sup β] {u : β → α} : cauchy_seq u ↔ ∀ (ε : nnreal), 0 < ε → ∃ (N : β), ∀ (n : β), N ≤ n → edist (u n) (u N) < ↑ε := filter.has_basis.cauchy_seq_iff' uniformity_basis_edist_nnreal theorem totally_bounded_iff {α : Type u} [emetric_space α] {s : set α} : totally_bounded s ↔ ∀ (ε : ennreal) (H : ε > 0), ∃ (t : set α), set.finite t ∧ s ⊆ set.Union fun (y : α) => set.Union fun (H : y ∈ t) => ball y ε := sorry theorem totally_bounded_iff' {α : Type u} [emetric_space α] {s : set α} : totally_bounded s ↔ ∀ (ε : ennreal) (H : ε > 0), ∃ (t : set α), ∃ (H : t ⊆ s), set.finite t ∧ s ⊆ set.Union fun (y : α) => set.Union fun (H : y ∈ t) => ball y ε := sorry /-- A compact set in an emetric space is separable, i.e., it is the closure of a countable set -/ theorem countable_closure_of_compact {α : Type u} [emetric_space α] {s : set α} (hs : is_compact s) : ∃ (t : set α), ∃ (H : t ⊆ s), set.countable t ∧ s = closure t := sorry -- assume e, finite_cover_balls_of_compact hs, protected instance topological_space.first_countable_topology (α : Type u) [emetric_space α] : topological_space.first_countable_topology α := uniform_space.first_countable_topology uniformity_has_countable_basis /-- A separable emetric space is second countable: one obtains a countable basis by taking the balls centered at points in a dense subset, and with rational radii. We do not register this as an instance, as there is already an instance going in the other direction from second countable spaces to separable spaces, and we want to avoid loops. -/ theorem second_countable_of_separable (α : Type u) [emetric_space α] [topological_space.separable_space α] : topological_space.second_countable_topology α := uniform_space.second_countable_of_separable uniformity_has_countable_basis /-- The diameter of a set in an emetric space, named `emetric.diam` -/ def diam {α : Type u} [emetric_space α] (s : set α) : ennreal := supr fun (x : α) => supr fun (H : x ∈ s) => supr fun (y : α) => supr fun (H : y ∈ s) => edist x y theorem diam_le_iff_forall_edist_le {α : Type u} [emetric_space α] {s : set α} {d : ennreal} : diam s ≤ d ↔ ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → edist x y ≤ d := sorry /-- If two points belong to some set, their edistance is bounded by the diameter of the set -/ theorem edist_le_diam_of_mem {α : Type u} [emetric_space α] {x : α} {y : α} {s : set α} (hx : x ∈ s) (hy : y ∈ s) : edist x y ≤ diam s := iff.mp diam_le_iff_forall_edist_le (le_refl (diam s)) x hx y hy /-- If the distance between any two points in a set is bounded by some constant, this constant bounds the diameter. -/ theorem diam_le_of_forall_edist_le {α : Type u} [emetric_space α] {s : set α} {d : ennreal} (h : ∀ (x : α), x ∈ s → ∀ (y : α), y ∈ s → edist x y ≤ d) : diam s ≤ d := iff.mpr diam_le_iff_forall_edist_le h /-- The diameter of a subsingleton vanishes. -/ theorem diam_subsingleton {α : Type u} [emetric_space α] {s : set α} (hs : set.subsingleton s) : diam s = 0 := iff.mp nonpos_iff_eq_zero (diam_le_of_forall_edist_le fun (x : α) (hx : x ∈ s) (y : α) (hy : y ∈ s) => Eq.symm (hs hx hy) ▸ edist_self y ▸ le_refl (edist y y)) /-- The diameter of the empty set vanishes -/ @[simp] theorem diam_empty {α : Type u} [emetric_space α] : diam ∅ = 0 := diam_subsingleton set.subsingleton_empty /-- The diameter of a singleton vanishes -/ @[simp] theorem diam_singleton {α : Type u} [emetric_space α] {x : α} : diam (singleton x) = 0 := diam_subsingleton set.subsingleton_singleton theorem diam_eq_zero_iff {α : Type u} [emetric_space α] {s : set α} : diam s = 0 ↔ set.subsingleton s := sorry theorem diam_pos_iff {α : Type u} [emetric_space α] {s : set α} : 0 < diam s ↔ ∃ (x : α), ∃ (H : x ∈ s), ∃ (y : α), ∃ (H : y ∈ s), x ≠ y := sorry theorem diam_insert {α : Type u} [emetric_space α] {x : α} {s : set α} : diam (insert x s) = max (supr fun (y : α) => supr fun (H : y ∈ s) => edist x y) (diam s) := sorry theorem diam_pair {α : Type u} [emetric_space α] {x : α} {y : α} : diam (insert x (singleton y)) = edist x y := sorry theorem diam_triple {α : Type u} [emetric_space α] {x : α} {y : α} {z : α} : diam (insert x (insert y (singleton z))) = max (max (edist x y) (edist x z)) (edist y z) := sorry /-- The diameter is monotonous with respect to inclusion -/ theorem diam_mono {α : Type u} [emetric_space α] {s : set α} {t : set α} (h : s ⊆ t) : diam s ≤ diam t := diam_le_of_forall_edist_le fun (x : α) (hx : x ∈ s) (y : α) (hy : y ∈ s) => edist_le_diam_of_mem (h hx) (h hy) /-- The diameter of a union is controlled by the diameter of the sets, and the edistance between two points in the sets. -/ theorem diam_union {α : Type u} [emetric_space α] {x : α} {y : α} {s : set α} {t : set α} (xs : x ∈ s) (yt : y ∈ t) : diam (s ∪ t) ≤ diam s + edist x y + diam t := sorry theorem diam_union' {α : Type u} [emetric_space α] {s : set α} {t : set α} (h : set.nonempty (s ∩ t)) : diam (s ∪ t) ≤ diam s + diam t := sorry theorem diam_closed_ball {α : Type u} [emetric_space α] {x : α} {r : ennreal} : diam (closed_ball x r) ≤ bit0 1 * r := sorry theorem diam_ball {α : Type u} [emetric_space α] {x : α} {r : ennreal} : diam (ball x r) ≤ bit0 1 * r := le_trans (diam_mono ball_subset_closed_ball) diam_closed_ball end Mathlib
-- This module defines namespace Nat -- `forallRange i n f` is true if f holds for all indices j from i to n-1. def forallRange (i:Nat) (n:Nat) (f: ∀(j:Nat), j < n → Bool) : Bool := if h:i < n then f i h && forallRange (i+1) n f else true termination_by forallRange i n f => n-i -- `forallRange` correctness theorem. theorem forallRangeImplies' (n i j : Nat) (f : ∀(k:Nat), k < n → Bool) (eq : i+j = n) (p : forallRange i n f = true) (k : Nat) (lb : i ≤ k) (ub : k < n) : f k ub = true := by revert i induction j with | zero => intro i eq p lb simp at eq simp [eq] at lb have pr := Nat.not_le_of_gt ub contradiction | succ j ind => intros i n_eq ltPred lb have i_lt_n : i < n := Nat.le_trans (Nat.succ_le_succ lb) ub unfold forallRange at ltPred simp [i_lt_n] at ltPred cases Nat.eq_or_lt_of_le lb with | inl hEq => apply Eq.subst apply ltPred.left simp only [hEq] | inr hLt => have succ_i_add_j : succ i + j = n := by simp [Nat.succ_add] exact n_eq apply ind (succ i) succ_i_add_j ltPred.right hLt -- Correctness theorem for `forallRange` theorem forallRangeImplies (p:forallRange i n f = true) {j:Nat} (lb:i ≤ j) (ub : j < n) : f j ub = true := let h : i+(n-i)=n := Nat.add_sub_of_le (Nat.le_trans lb (Nat.le_of_lt ub)) forallRangeImplies' n i (n-i) f h p j lb ub theorem lt_or_eq_of_succ {i j:Nat} (lt : i < Nat.succ j) : i < j ∨ i = j := match lt with | Nat.le.step m => Or.inl m | Nat.le.refl => Or.inr rfl -- Introduce strong induction principal for natural numbers. theorem strong_induction_on {p : Nat → Prop} (n:Nat) (h:∀n, (∀ m, m < n → p m) → p n) : p n := by suffices ∀n m, m < n → p m from this (succ n) n (Nat.lt_succ_self _) intros n induction n with | zero => intros m h contradiction | succ i ind => intros m h1 cases Nat.lt_or_eq_of_succ h1 with | inl is_lt => apply ind _ is_lt | inr is_eq => apply h rw [is_eq] apply ind end Nat -- Introduce strong induction principal for Fin. theorem Fin.strong_induction_on {P : Fin w → Prop} (i:Fin w) (ind : ∀(i:Fin w), (∀(j:Fin w), j < i → P j) → P i) : P i := by cases i with | mk i i_lt => revert i_lt apply @Nat.strong_induction_on (λi => ∀ (i_lt : i < w), P { val := i, isLt := i_lt }) intros j p j_lt_w apply ind ⟨j, j_lt_w⟩ intros z z_lt_j apply p _ z_lt_j namespace PEG inductive Expression (t : Type) (nt : Type) where | epsilon{} : Expression t nt | fail : Expression t nt | any : Expression t nt | terminal : t → Expression t nt | seq : (a b : nt) → Expression t nt | choice : (a b : nt) → Expression t nt | look : (a : nt) → Expression t nt | notP : (e : nt) → Expression t nt def Grammar (t nt : Type _) := nt → Expression t nt structure ProofRecord (nt : Type) where (leftnonterminal : nt) (success : Bool) (position : Nat) (lengthofspan : Nat) (subproof1index : Nat) (subproof2index : Nat) namespace ProofRecord def endposition {nt:Type} (r:ProofRecord nt) : Nat := r.position + r.lengthofspan inductive Result where | fail : Result | success : Nat → Result def record_result (r:ProofRecord nt) : Result := if r.success then Result.success r.lengthofspan else Result.fail end ProofRecord def PreProof (nt : Type) := Array (ProofRecord nt) def record_match [dnt : DecidableEq nt] (r:ProofRecord nt) (n:nt) (i:Nat) : Bool := r.leftnonterminal = n && r.position = i open Expression section well_formed variable {t nt : Type} variable [dt : DecidableEq t] variable [dnt : DecidableEq nt] variable (g : Grammar t nt) variable (s : Array t) def well_formed_record (p : PreProof nt) (i:Nat) (i_lt : i < p.size) (r : ProofRecord nt) : Bool := let n := r.leftnonterminal match g n with | epsilon _ _ => r.success ∧ r.lengthofspan = 0 | fail => ¬ r.success | any => if r.position < s.size then r.success && r.lengthofspan = 1 else ¬ r.success | terminal t => if r.position < s.size && s.getD r.position t = t then r.success && r.lengthofspan = 1 else ¬ r.success | seq a b => r.subproof1index < i && let r1 := p.getD r.subproof1index r record_match r1 a r.position && if r1.success then r.subproof2index < i && let r2 := p.getD r.subproof2index r record_match r2 b r1.endposition && if r2.success then r.success && r.endposition = r2.endposition else ¬r.success else ¬r.success | choice a b => r.subproof1index < i && let r1 := p.getD r.subproof1index r record_match r1 a r.position && if r1.success then r.success && r.lengthofspan = r1.lengthofspan else r.subproof2index < i && let r2 := p.getD r.subproof2index r record_match r2 b r.position && if r2.success then r.success && r.lengthofspan = r2.lengthofspan else ¬r.success | look a => r.subproof1index < i && let r1 := p.getD r.subproof1index r record_match r1 a r.position && if r1.success then r.success && r.lengthofspan = 0 else ¬r.success | notP a => r.subproof1index < i && let r1 := p.getD r.subproof1index r record_match r1 a r.position && if r1.success then ¬r.success else r.success && r.lengthofspan = 0 def well_formed_proof (p : PreProof nt) : Bool := Nat.forallRange 0 p.size (λi lt => well_formed_record g s p i lt (p.get ⟨i, lt⟩)) end well_formed def Proof {t} {nt} [DecidableEq t] [DecidableEq nt] (g:Grammar t nt) (s: Array t) := { p:PreProof nt // well_formed_proof g s p } namespace Proof variable {g:Grammar t nt} variable {s : Array t} variable [DecidableEq t] variable [DecidableEq nt] def size (p:Proof g s) := p.val.size def get (p:Proof g s) : Fin p.size → ProofRecord nt := p.val.get instance : CoeFun (Proof g s) (fun p => Fin p.size → ProofRecord nt) := ⟨fun p => p.get⟩ theorem has_well_formed_record (p:Proof g s) (i:Fin p.size) : well_formed_record g s p.val i.val i.isLt (p i) := Nat.forallRangeImplies p.property (Nat.zero_le i.val) i.isLt end Proof section correctness variable {g:Grammar t nt} variable {s : Array t} variable [h1:DecidableEq t] variable [h2:DecidableEq nt] -- Lemma to rewrite from dependent use of proof index to get-with-default theorem proof_get_to_getD (r:ProofRecord nt) (p:Proof g s) (i:Fin p.size) : p i = p.val.getD i.val r := by have isLt : i.val < Array.size p.val := i.isLt simp [Proof.get, Array.get, Array.getD, isLt ] apply congrArg apply Fin.eq_of_val_eq trivial theorem is_deterministic : forall (p q : Proof g s) (i: Fin p.size) (j: Fin q.size), (p i).leftnonterminal = (q j).leftnonterminal → (p i).position = (q j).position → (p i).record_result = (q j).record_result := by simp [ProofRecord.record_result] intros p q i0 induction i0 using Fin.strong_induction_on with | ind i ind => intros j eq_nt p_pos_eq_q_pos have p_def := p.has_well_formed_record i have q_def := q.has_well_formed_record j simp only [well_formed_record, eq_nt, p_pos_eq_q_pos] at p_def q_def generalize q_j_eq : q j = q_j generalize e_eq : g (q_j.leftnonterminal) = e simp only [q_j_eq, e_eq] at p_def q_def p_pos_eq_q_pos cases e with | epsilon => simp at p_def q_def simp [p_def, q_def] | fail => simp at p_def q_def simp [p_def, q_def] | any => simp at p_def split at p_def <;> simp [*] at p_def q_def <;> simp [p_def, q_def] | terminal t => simp at p_def split at p_def <;> simp [*] at p_def q_def <;> simp [p_def, q_def] | seq a b => simp [record_match, ProofRecord.endposition] at p_def q_def generalize p_sub1_eq : (p i).subproof1index = p_sub1 generalize p_sub2_eq : (p i).subproof2index = p_sub2 generalize q_sub1_eq : q_j.subproof1index = q_sub1 generalize q_sub2_eq : q_j.subproof2index = q_sub2 simp only [p_sub1_eq, p_sub2_eq] at p_def simp only [q_sub1_eq, q_sub2_eq] at q_def have ⟨p_sub1_bound, ⟨⟨p_sub1_nt, p_sub1_pos⟩, p_def⟩⟩ := p_def have ⟨q_sub1_bound, ⟨⟨q_sub1_nt, q_sub1_pos⟩, q_def⟩⟩ := q_def have ind1 := ind (Fin.mk p_sub1 (Nat.lt_trans p_sub1_bound i.isLt)) p_sub1_bound (Fin.mk q_sub1 (Nat.lt_trans q_sub1_bound j.isLt)) rw [proof_get_to_getD (p i) p, proof_get_to_getD q_j q ] at ind1 simp [*] at ind1 split at p_def case inr p_sub1_fail => split at q_def case inl q_sub1_success => simp [p_sub1_fail, q_sub1_success] at ind1 case inr q_sub1_fail => simp [p_sub1_fail, q_sub1_fail] at p_def q_def simp [p_def, q_def] case inl p_sub1_success => split at q_def case inr q_sub1_fail => simp [p_sub1_success, q_sub1_fail] at ind1 case inl q_sub1_success => simp [*] at ind1 p_def q_def have ⟨p_sub2_bound, ⟨⟨p_sub2_nt, p_sub2_pos⟩, p_def⟩⟩ := p_def have ⟨q_sub2_bound, ⟨⟨q_sub2_nt, q_sub2_pos⟩, q_def⟩⟩ := q_def -- Instantiate second invariant on subterm 2 have ind2 := ind (Fin.mk p_sub2 (Nat.lt_trans p_sub2_bound i.isLt)) p_sub2_bound (Fin.mk q_sub2 (Nat.lt_trans q_sub2_bound j.isLt)) rw [proof_get_to_getD (p i) p, proof_get_to_getD q_j q ] at ind2 simp [*] at ind2 split at p_def case inr p_sub2_fail => split at q_def case inl q_sub2_success => simp [*] at ind2 case inr q_sub2_fail => simp [*] at ind2 p_def q_def simp [p_def, q_def] case inl p_sub2_success => simp [p_sub2_success] at ind2 split at q_def case inr q_sub2_fail => simp [*] at ind2 case inl q_sub2_success => simp [*] at ind2 p_def q_def have ⟨p_success, p_pos⟩ := p_def simp only [p_pos_eq_q_pos, p_sub2_pos, ind1, ind2, Nat.add_assoc] at p_pos have p_pos' := Nat.add_left_cancel p_pos have ⟨q_success, q_pos⟩ := q_def simp only [q_sub1_pos, q_sub2_pos, Nat.add_assoc] at q_pos have q_pos' := Nat.add_left_cancel q_pos simp [p_success, q_success, p_pos', q_pos'] | choice => simp [record_match] at p_def q_def generalize p_sub1_eq : (p i).subproof1index = p_sub1 generalize p_sub2_eq : (p i).subproof2index = p_sub2 generalize q_sub1_eq : q_j.subproof1index = q_sub1 generalize q_sub2_eq : q_j.subproof2index = q_sub2 simp only [p_sub1_eq, p_sub2_eq] at p_def simp only [q_sub1_eq, q_sub2_eq] at q_def have ⟨p_sub1_bound, ⟨⟨p_sub1_nt, p_sub1_pos⟩, p_def⟩⟩ := p_def have ⟨q_sub1_bound, ⟨⟨q_sub1_nt, q_sub1_pos⟩, q_def⟩⟩ := q_def have ind1 := ind (Fin.mk p_sub1 (Nat.lt_trans p_sub1_bound i.isLt)) p_sub1_bound (Fin.mk q_sub1 (Nat.lt_trans q_sub1_bound j.isLt)) rw [proof_get_to_getD (p i) p, proof_get_to_getD q_j q ] at ind1 simp [*] at ind1 split at p_def case inl p_sub1_success => split at q_def case inr q_sub1_fail => simp [p_sub1_success, q_sub1_fail] at ind1 case inl q_sub1_success => simp [p_sub1_success, q_sub1_success] at ind1 p_def q_def simp [p_def, q_def, ind1] case inr p_sub1_fail => split at q_def case inl q_sub1_success => simp [p_sub1_fail, q_sub1_success] at ind1 case inr q_sub1_fail => simp [p_sub1_fail, q_sub1_fail] at p_def q_def have ⟨p_sub2_bound, ⟨⟨p_sub2_nt, p_sub2_pos⟩, p_def⟩⟩ := p_def have ⟨q_sub2_bound, ⟨⟨q_sub2_nt, q_sub2_pos⟩, q_def⟩⟩ := q_def -- Instantiate second invariant on subterm 2 have ind2 := ind (Fin.mk p_sub2 (Nat.lt_trans p_sub2_bound i.isLt)) p_sub2_bound (Fin.mk q_sub2 (Nat.lt_trans q_sub2_bound j.isLt)) rw [proof_get_to_getD (p i) p, proof_get_to_getD q_j q ] at ind2 simp [*] at ind2 split at p_def case inl p_sub2_success => split at q_def case inr q_sub2_fail => simp [p_sub2_success, q_sub2_fail] at ind2 case inl q_sub2_success => simp [p_sub2_success, q_sub2_success] at ind2 p_def q_def simp [p_def, q_def, ind2] case inr p_sub2_fail => split at q_def case inl q_sub2_success => simp [p_sub2_fail, q_sub2_success] at ind2 case inr q_sub2_fail => simp [p_sub2_fail, q_sub2_fail] at p_def q_def simp [p_def, q_def] | look => simp [record_match] at p_def q_def generalize p_sub1_eq : (p i).subproof1index = p_sub1 generalize q_sub1_eq : q_j.subproof1index = q_sub1 simp only [p_sub1_eq] at p_def simp only [q_sub1_eq] at q_def have ⟨p_sub1_bound, ⟨⟨p_sub1_nt, p_sub1_pos⟩, p_def⟩⟩ := p_def have ⟨q_sub1_bound, ⟨⟨q_sub1_nt, q_sub1_pos⟩, q_def⟩⟩ := q_def have ind1 := ind (Fin.mk p_sub1 (Nat.lt_trans p_sub1_bound i.isLt)) p_sub1_bound (Fin.mk q_sub1 (Nat.lt_trans q_sub1_bound j.isLt)) rw [proof_get_to_getD (p i) p, proof_get_to_getD q_j q ] at ind1 simp [*] at ind1 split at p_def case inl p_sub1_success => split at q_def case inr q_sub1_fail => simp [p_sub1_success, q_sub1_fail] at ind1 case inl q_sub1_success => simp [p_sub1_success, q_sub1_success] at ind1 p_def q_def simp [p_def, q_def, ind1] case inr p_sub1_fail => split at q_def case inl q_sub1_success => simp [p_sub1_fail, q_sub1_success] at ind1 case inr q_sub1_fail => simp [p_sub1_fail, q_sub1_fail] at ind1 p_def q_def simp [p_def, q_def, ind1] | notP => simp [record_match] at p_def q_def generalize p_sub1_eq : (p i).subproof1index = p_sub1 generalize q_sub1_eq : q_j.subproof1index = q_sub1 simp only [p_sub1_eq] at p_def simp only [q_sub1_eq] at q_def have ⟨p_sub1_bound, ⟨⟨p_sub1_nt, p_sub1_pos⟩, p_def⟩⟩ := p_def have ⟨q_sub1_bound, ⟨⟨q_sub1_nt, q_sub1_pos⟩, q_def⟩⟩ := q_def have ind1 := ind (Fin.mk p_sub1 (Nat.lt_trans p_sub1_bound i.isLt)) p_sub1_bound (Fin.mk q_sub1 (Nat.lt_trans q_sub1_bound j.isLt)) rw [proof_get_to_getD (p i) p, proof_get_to_getD q_j q ] at ind1 simp [*] at ind1 split at p_def case inl p_sub1_success => split at q_def case inr q_sub1_fail => simp [p_sub1_success, q_sub1_fail] at ind1 case inl q_sub1_success => simp [p_sub1_success, q_sub1_success] at ind1 p_def q_def simp [p_def, q_def, ind1] case inr p_sub1_fail => split at q_def case inl q_sub1_success => simp [p_sub1_fail, q_sub1_success] at ind1 case inr q_sub1_fail => simp [p_sub1_fail, q_sub1_fail] at ind1 p_def q_def simp [p_def, q_def, ind1] end correctness end PEG
module DataDef where data ⊤ : Set where tt : ⊤ data ⊤' (x : ⊤) : Set where tt : ⊤' x data D {y : ⊤} (y' : ⊤' y) : Set data D {z} _ where postulate d : D {tt} tt
/- Copyright (c) 2020 Joseph Myers. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Joseph Myers. -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.algebra.invertible import Mathlib.data.indicator_function import Mathlib.linear_algebra.affine_space.affine_map import Mathlib.linear_algebra.affine_space.affine_subspace import Mathlib.linear_algebra.finsupp import Mathlib.PostPort universes u_1 u_2 u_3 u_4 u_5 namespace Mathlib /-! # Affine combinations of points This file defines affine combinations of points. ## Main definitions * `weighted_vsub_of_point` is a general weighted combination of subtractions with an explicit base point, yielding a vector. * `weighted_vsub` uses an arbitrary choice of base point and is intended to be used when the sum of weights is 0, in which case the result is independent of the choice of base point. * `affine_combination` adds the weighted combination to the arbitrary base point, yielding a point rather than a vector, and is intended to be used when the sum of weights is 1, in which case the result is independent of the choice of base point. These definitions are for sums over a `finset`; versions for a `fintype` may be obtained using `finset.univ`, while versions for a `finsupp` may be obtained using `finsupp.support`. ## References * https://en.wikipedia.org/wiki/Affine_space -/ namespace finset /-- A weighted sum of the results of subtracting a base point from the given points, as a linear map on the weights. The main cases of interest are where the sum of the weights is 0, in which case the sum is independent of the choice of base point, and where the sum of the weights is 1, in which case the sum added to the base point is independent of the choice of base point. -/ def weighted_vsub_of_point {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (p : ι → P) (b : P) : linear_map k (ι → k) V := finset.sum s fun (i : ι) => linear_map.smul_right (linear_map.proj i) (p i -ᵥ b) @[simp] theorem weighted_vsub_of_point_apply {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (b : P) : coe_fn (weighted_vsub_of_point s p b) w = finset.sum s fun (i : ι) => w i • (p i -ᵥ b) := sorry /-- The weighted sum is independent of the base point when the sum of the weights is 0. -/ theorem weighted_vsub_of_point_eq_of_sum_eq_zero {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (h : (finset.sum s fun (i : ι) => w i) = 0) (b₁ : P) (b₂ : P) : coe_fn (weighted_vsub_of_point s p b₁) w = coe_fn (weighted_vsub_of_point s p b₂) w := sorry /-- The weighted sum, added to the base point, is independent of the base point when the sum of the weights is 1. -/ theorem weighted_vsub_of_point_vadd_eq_of_sum_eq_one {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (h : (finset.sum s fun (i : ι) => w i) = 1) (b₁ : P) (b₂ : P) : coe_fn (weighted_vsub_of_point s p b₁) w +ᵥ b₁ = coe_fn (weighted_vsub_of_point s p b₂) w +ᵥ b₂ := sorry /-- The weighted sum is unaffected by removing the base point, if present, from the set of points. -/ @[simp] theorem weighted_vsub_of_point_erase {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (i : ι) : coe_fn (weighted_vsub_of_point (erase s i) p (p i)) w = coe_fn (weighted_vsub_of_point s p (p i)) w := sorry /-- The weighted sum is unaffected by adding the base point, whether or not present, to the set of points. -/ @[simp] theorem weighted_vsub_of_point_insert {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (i : ι) : coe_fn (weighted_vsub_of_point (insert i s) p (p i)) w = coe_fn (weighted_vsub_of_point s p (p i)) w := sorry /-- The weighted sum is unaffected by changing the weights to the corresponding indicator function and adding points to the set. -/ theorem weighted_vsub_of_point_indicator_subset {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (w : ι → k) (p : ι → P) (b : P) {s₁ : finset ι} {s₂ : finset ι} (h : s₁ ⊆ s₂) : coe_fn (weighted_vsub_of_point s₁ p b) w = coe_fn (weighted_vsub_of_point s₂ p b) (set.indicator (↑s₁) w) := sorry /-- A weighted sum, over the image of an embedding, equals a weighted sum with the same points and weights over the original `finset`. -/ theorem weighted_vsub_of_point_map {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} {ι₂ : Type u_5} (s₂ : finset ι₂) (e : ι₂ ↪ ι) (w : ι → k) (p : ι → P) (b : P) : coe_fn (weighted_vsub_of_point (map e s₂) p b) w = coe_fn (weighted_vsub_of_point s₂ (p ∘ ⇑e) b) (w ∘ ⇑e) := sorry /-- A weighted sum of the results of subtracting a default base point from the given points, as a linear map on the weights. This is intended to be used when the sum of the weights is 0; that condition is specified as a hypothesis on those lemmas that require it. -/ def weighted_vsub {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (p : ι → P) : linear_map k (ι → k) V := weighted_vsub_of_point s p (Classical.choice sorry) /-- Applying `weighted_vsub` with given weights. This is for the case where a result involving a default base point is OK (for example, when that base point will cancel out later); a more typical use case for `weighted_vsub` would involve selecting a preferred base point with `weighted_vsub_eq_weighted_vsub_of_point_of_sum_eq_zero` and then using `weighted_vsub_of_point_apply`. -/ theorem weighted_vsub_apply {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) : coe_fn (weighted_vsub s p) w = finset.sum s fun (i : ι) => w i • (p i -ᵥ Classical.choice add_torsor.nonempty) := sorry /-- `weighted_vsub` gives the sum of the results of subtracting any base point, when the sum of the weights is 0. -/ theorem weighted_vsub_eq_weighted_vsub_of_point_of_sum_eq_zero {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (h : (finset.sum s fun (i : ι) => w i) = 0) (b : P) : coe_fn (weighted_vsub s p) w = coe_fn (weighted_vsub_of_point s p b) w := weighted_vsub_of_point_eq_of_sum_eq_zero s w p h (Classical.choice weighted_vsub._proof_1) b /-- The `weighted_vsub` for an empty set is 0. -/ @[simp] theorem weighted_vsub_empty {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (w : ι → k) (p : ι → P) : coe_fn (weighted_vsub ∅ p) w = 0 := sorry /-- The weighted sum is unaffected by changing the weights to the corresponding indicator function and adding points to the set. -/ theorem weighted_vsub_indicator_subset {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (w : ι → k) (p : ι → P) {s₁ : finset ι} {s₂ : finset ι} (h : s₁ ⊆ s₂) : coe_fn (weighted_vsub s₁ p) w = coe_fn (weighted_vsub s₂ p) (set.indicator (↑s₁) w) := weighted_vsub_of_point_indicator_subset w p (Classical.choice weighted_vsub._proof_1) h /-- A weighted subtraction, over the image of an embedding, equals a weighted subtraction with the same points and weights over the original `finset`. -/ theorem weighted_vsub_map {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} {ι₂ : Type u_5} (s₂ : finset ι₂) (e : ι₂ ↪ ι) (w : ι → k) (p : ι → P) : coe_fn (weighted_vsub (map e s₂) p) w = coe_fn (weighted_vsub s₂ (p ∘ ⇑e)) (w ∘ ⇑e) := weighted_vsub_of_point_map s₂ e w p (Classical.choice weighted_vsub._proof_1) /-- A weighted sum of the results of subtracting a default base point from the given points, added to that base point, as an affine map on the weights. This is intended to be used when the sum of the weights is 1, in which case it is an affine combination (barycenter) of the points with the given weights; that condition is specified as a hypothesis on those lemmas that require it. -/ def affine_combination {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (p : ι → P) : affine_map k (ι → k) P := affine_map.mk (fun (w : ι → k) => coe_fn (weighted_vsub_of_point s p (Classical.choice sorry)) w +ᵥ Classical.choice sorry) (weighted_vsub s p) sorry /-- The linear map corresponding to `affine_combination` is `weighted_vsub`. -/ @[simp] theorem affine_combination_linear {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (p : ι → P) : affine_map.linear (affine_combination s p) = weighted_vsub s p := rfl /-- Applying `affine_combination` with given weights. This is for the case where a result involving a default base point is OK (for example, when that base point will cancel out later); a more typical use case for `affine_combination` would involve selecting a preferred base point with `affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one` and then using `weighted_vsub_of_point_apply`. -/ theorem affine_combination_apply {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) : coe_fn (affine_combination s p) w = coe_fn (weighted_vsub_of_point s p (Classical.choice add_torsor.nonempty)) w +ᵥ Classical.choice add_torsor.nonempty := rfl /-- `affine_combination` gives the sum with any base point, when the sum of the weights is 1. -/ theorem affine_combination_eq_weighted_vsub_of_point_vadd_of_sum_eq_one {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) (h : (finset.sum s fun (i : ι) => w i) = 1) (b : P) : coe_fn (affine_combination s p) w = coe_fn (weighted_vsub_of_point s p b) w +ᵥ b := weighted_vsub_of_point_vadd_eq_of_sum_eq_one s w p h (Classical.choice affine_combination._proof_1) b /-- Adding a `weighted_vsub` to an `affine_combination`. -/ theorem weighted_vsub_vadd_affine_combination {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w₁ : ι → k) (w₂ : ι → k) (p : ι → P) : coe_fn (weighted_vsub s p) w₁ +ᵥ coe_fn (affine_combination s p) w₂ = coe_fn (affine_combination s p) (w₁ + w₂) := sorry /-- Subtracting two `affine_combination`s. -/ theorem affine_combination_vsub {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w₁ : ι → k) (w₂ : ι → k) (p : ι → P) : coe_fn (affine_combination s p) w₁ -ᵥ coe_fn (affine_combination s p) w₂ = coe_fn (weighted_vsub s p) (w₁ - w₂) := sorry /-- An `affine_combination` equals a point if that point is in the set and has weight 1 and the other points in the set have weight 0. -/ @[simp] theorem affine_combination_of_eq_one_of_eq_zero {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) (p : ι → P) {i : ι} (his : i ∈ s) (hwi : w i = 1) (hw0 : ∀ (i2 : ι), i2 ∈ s → i2 ≠ i → w i2 = 0) : coe_fn (affine_combination s p) w = p i := sorry /-- An affine combination is unaffected by changing the weights to the corresponding indicator function and adding points to the set. -/ theorem affine_combination_indicator_subset {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} (w : ι → k) (p : ι → P) {s₁ : finset ι} {s₂ : finset ι} (h : s₁ ⊆ s₂) : coe_fn (affine_combination s₁ p) w = coe_fn (affine_combination s₂ p) (set.indicator (↑s₁) w) := sorry /-- An affine combination, over the image of an embedding, equals an affine combination with the same points and weights over the original `finset`. -/ theorem affine_combination_map {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} {ι₂ : Type u_5} (s₂ : finset ι₂) (e : ι₂ ↪ ι) (w : ι → k) (p : ι → P) : coe_fn (affine_combination (map e s₂) p) w = coe_fn (affine_combination s₂ (p ∘ ⇑e)) (w ∘ ⇑e) := sorry /-- Suppose an indexed family of points is given, along with a subset of the index type. A vector can be expressed as `weighted_vsub_of_point` using a `finset` lying within that subset and with a given sum of weights if and only if it can be expressed as `weighted_vsub_of_point` with that sum of weights for the corresponding indexed family whose index type is the subtype corresponding to that subset. -/ theorem eq_weighted_vsub_of_point_subset_iff_eq_weighted_vsub_of_point_subtype {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} {v : V} {x : k} {s : set ι} {p : ι → P} {b : P} : (∃ (fs : finset ι), ∃ (hfs : ↑fs ⊆ s), ∃ (w : ι → k), ∃ (hw : (finset.sum fs fun (i : ι) => w i) = x), v = coe_fn (weighted_vsub_of_point fs p b) w) ↔ ∃ (fs : finset ↥s), ∃ (w : ↥s → k), ∃ (hw : (finset.sum fs fun (i : ↥s) => w i) = x), v = coe_fn (weighted_vsub_of_point fs (fun (i : ↥s) => p ↑i) b) w := sorry /-- Suppose an indexed family of points is given, along with a subset of the index type. A vector can be expressed as `weighted_vsub` using a `finset` lying within that subset and with sum of weights 0 if and only if it can be expressed as `weighted_vsub` with sum of weights 0 for the corresponding indexed family whose index type is the subtype corresponding to that subset. -/ theorem eq_weighted_vsub_subset_iff_eq_weighted_vsub_subtype (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} {v : V} {s : set ι} {p : ι → P} : (∃ (fs : finset ι), ∃ (hfs : ↑fs ⊆ s), ∃ (w : ι → k), ∃ (hw : (finset.sum fs fun (i : ι) => w i) = 0), v = coe_fn (weighted_vsub fs p) w) ↔ ∃ (fs : finset ↥s), ∃ (w : ↥s → k), ∃ (hw : (finset.sum fs fun (i : ↥s) => w i) = 0), v = coe_fn (weighted_vsub fs fun (i : ↥s) => p ↑i) w := eq_weighted_vsub_of_point_subset_iff_eq_weighted_vsub_of_point_subtype /-- Suppose an indexed family of points is given, along with a subset of the index type. A point can be expressed as an `affine_combination` using a `finset` lying within that subset and with sum of weights 1 if and only if it can be expressed an `affine_combination` with sum of weights 1 for the corresponding indexed family whose index type is the subtype corresponding to that subset. -/ theorem eq_affine_combination_subset_iff_eq_affine_combination_subtype (k : Type u_1) (V : Type u_2) {P : Type u_3} [ring k] [add_comm_group V] [module k V] [S : add_torsor V P] {ι : Type u_4} {p0 : P} {s : set ι} {p : ι → P} : (∃ (fs : finset ι), ∃ (hfs : ↑fs ⊆ s), ∃ (w : ι → k), ∃ (hw : (finset.sum fs fun (i : ι) => w i) = 1), p0 = coe_fn (affine_combination fs p) w) ↔ ∃ (fs : finset ↥s), ∃ (w : ↥s → k), ∃ (hw : (finset.sum fs fun (i : ↥s) => w i) = 1), p0 = coe_fn (affine_combination fs fun (i : ↥s) => p ↑i) w := sorry end finset namespace finset /-- The weights for the centroid of some points. -/ def centroid_weights (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) : ι → k := function.const ι (↑(card s)⁻¹) /-- `centroid_weights` at any point. -/ @[simp] theorem centroid_weights_apply (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) (i : ι) : centroid_weights k s i = (↑(card s)⁻¹) := rfl /-- `centroid_weights` equals a constant function. -/ theorem centroid_weights_eq_const (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) : centroid_weights k s = function.const ι (↑(card s)⁻¹) := rfl /-- The weights in the centroid sum to 1, if the number of points, converted to `k`, is not zero. -/ theorem sum_centroid_weights_eq_one_of_cast_card_ne_zero {k : Type u_1} [division_ring k] {ι : Type u_4} (s : finset ι) (h : ↑(card s) ≠ 0) : (finset.sum s fun (i : ι) => centroid_weights k s i) = 1 := sorry /-- In the characteristic zero case, the weights in the centroid sum to 1 if the number of points is not zero. -/ theorem sum_centroid_weights_eq_one_of_card_ne_zero (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [char_zero k] (h : card s ≠ 0) : (finset.sum s fun (i : ι) => centroid_weights k s i) = 1 := sorry /-- In the characteristic zero case, the weights in the centroid sum to 1 if the set is nonempty. -/ theorem sum_centroid_weights_eq_one_of_nonempty (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [char_zero k] (h : finset.nonempty s) : (finset.sum s fun (i : ι) => centroid_weights k s i) = 1 := sum_centroid_weights_eq_one_of_card_ne_zero k s (ne_of_gt (iff.mpr card_pos h)) /-- In the characteristic zero case, the weights in the centroid sum to 1 if the number of points is `n + 1`. -/ theorem sum_centroid_weights_eq_one_of_card_eq_add_one (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [char_zero k] {n : ℕ} (h : card s = n + 1) : (finset.sum s fun (i : ι) => centroid_weights k s i) = 1 := sum_centroid_weights_eq_one_of_card_ne_zero k s (Eq.symm h ▸ nat.succ_ne_zero n) /-- The centroid of some points. Although defined for any `s`, this is intended to be used in the case where the number of points, converted to `k`, is not zero. -/ def centroid (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (s : finset ι) (p : ι → P) : P := coe_fn (affine_combination s p) (centroid_weights k s) /-- The definition of the centroid. -/ theorem centroid_def (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (s : finset ι) (p : ι → P) : centroid k s p = coe_fn (affine_combination s p) (centroid_weights k s) := rfl /-- The centroid of a single point. -/ @[simp] theorem centroid_singleton (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (p : ι → P) (i : ι) : centroid k (singleton i) p = p i := sorry /-- The centroid of two points, expressed directly as adding a vector to a point. -/ theorem centroid_insert_singleton (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [invertible (bit0 1)] (p : ι → P) (i₁ : ι) (i₂ : ι) : centroid k (insert i₁ (singleton i₂)) p = bit0 1⁻¹ • (p i₂ -ᵥ p i₁) +ᵥ p i₁ := sorry /-- The centroid of two points indexed by `fin 2`, expressed directly as adding a vector to the first point. -/ theorem centroid_insert_singleton_fin (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] [invertible (bit0 1)] (p : fin (bit0 1) → P) : centroid k univ p = bit0 1⁻¹ • (p 1 -ᵥ p 0) +ᵥ p 0 := sorry /-- A centroid, over the image of an embedding, equals a centroid with the same points and weights over the original `finset`. -/ theorem centroid_map (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} {ι₂ : Type u_5} (s₂ : finset ι₂) (e : ι₂ ↪ ι) (p : ι → P) : centroid k (map e s₂) p = centroid k s₂ (p ∘ ⇑e) := sorry /-- `centroid_weights` gives the weights for the centroid as a constant function, which is suitable when summing over the points whose centroid is being taken. This function gives the weights in a form suitable for summing over a larger set of points, as an indicator function that is zero outside the set whose centroid is being taken. In the case of a `fintype`, the sum may be over `univ`. -/ def centroid_weights_indicator (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) : ι → k := set.indicator (↑s) (centroid_weights k s) /-- The definition of `centroid_weights_indicator`. -/ theorem centroid_weights_indicator_def (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) : centroid_weights_indicator k s = set.indicator (↑s) (centroid_weights k s) := rfl /-- The sum of the weights for the centroid indexed by a `fintype`. -/ theorem sum_centroid_weights_indicator (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [fintype ι] : (finset.sum univ fun (i : ι) => centroid_weights_indicator k s i) = finset.sum s fun (i : ι) => centroid_weights k s i := Eq.symm (set.sum_indicator_subset (fun (i : ι) => centroid_weights k s i) (subset_univ s)) /-- In the characteristic zero case, the weights in the centroid indexed by a `fintype` sum to 1 if the number of points is not zero. -/ theorem sum_centroid_weights_indicator_eq_one_of_card_ne_zero (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [char_zero k] [fintype ι] (h : card s ≠ 0) : (finset.sum univ fun (i : ι) => centroid_weights_indicator k s i) = 1 := sorry /-- In the characteristic zero case, the weights in the centroid indexed by a `fintype` sum to 1 if the set is nonempty. -/ theorem sum_centroid_weights_indicator_eq_one_of_nonempty (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [char_zero k] [fintype ι] (h : finset.nonempty s) : (finset.sum univ fun (i : ι) => centroid_weights_indicator k s i) = 1 := sorry /-- In the characteristic zero case, the weights in the centroid indexed by a `fintype` sum to 1 if the number of points is `n + 1`. -/ theorem sum_centroid_weights_indicator_eq_one_of_card_eq_add_one (k : Type u_1) [division_ring k] {ι : Type u_4} (s : finset ι) [char_zero k] [fintype ι] {n : ℕ} (h : card s = n + 1) : (finset.sum univ fun (i : ι) => centroid_weights_indicator k s i) = 1 := sorry /-- The centroid as an affine combination over a `fintype`. -/ theorem centroid_eq_affine_combination_fintype (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (s : finset ι) [fintype ι] (p : ι → P) : centroid k s p = coe_fn (affine_combination univ p) (centroid_weights_indicator k s) := affine_combination_indicator_subset (centroid_weights k s) p (subset_univ s) /-- An indexed family of points that is injective on the given `finset` has the same centroid as the image of that `finset`. This is stated in terms of a set equal to the image to provide control of definitional equality for the index type used for the centroid of the image. -/ theorem centroid_eq_centroid_image_of_inj_on (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (s : finset ι) {p : ι → P} (hi : ∀ (i j : ι), i ∈ s → j ∈ s → p i = p j → i = j) {ps : set P} [fintype ↥ps] (hps : ps = p '' ↑s) : centroid k s p = centroid k univ fun (x : ↥ps) => ↑x := sorry /-- Two indexed families of points that are injective on the given `finset`s and with the same points in the image of those `finset`s have the same centroid. -/ theorem centroid_eq_of_inj_on_of_image_eq (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (s : finset ι) {ι₂ : Type u_5} (s₂ : finset ι₂) {p : ι → P} (hi : ∀ (i j : ι), i ∈ s → j ∈ s → p i = p j → i = j) {p₂ : ι₂ → P} (hi₂ : ∀ (i j : ι₂), i ∈ s₂ → j ∈ s₂ → p₂ i = p₂ j → i = j) (he : p '' ↑s = p₂ '' ↑s₂) : centroid k s p = centroid k s₂ p₂ := sorry end finset /-- A `weighted_vsub` with sum of weights 0 is in the `vector_span` of an indexed family. -/ theorem weighted_vsub_mem_vector_span {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} {s : finset ι} {w : ι → k} (h : (finset.sum s fun (i : ι) => w i) = 0) (p : ι → P) : coe_fn (finset.weighted_vsub s p) w ∈ vector_span k (set.range p) := sorry /-- An `affine_combination` with sum of weights 1 is in the `affine_span` of an indexed family, if the underlying ring is nontrivial. -/ theorem affine_combination_mem_affine_span {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [nontrivial k] {s : finset ι} {w : ι → k} (h : (finset.sum s fun (i : ι) => w i) = 1) (p : ι → P) : coe_fn (finset.affine_combination s p) w ∈ affine_span k (set.range p) := sorry /-- A vector is in the `vector_span` of an indexed family if and only if it is a `weighted_vsub` with sum of weights 0. -/ theorem mem_vector_span_iff_eq_weighted_vsub (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} {v : V} {p : ι → P} : v ∈ vector_span k (set.range p) ↔ ∃ (s : finset ι), ∃ (w : ι → k), ∃ (h : (finset.sum s fun (i : ι) => w i) = 0), v = coe_fn (finset.weighted_vsub s p) w := sorry /-- A point in the `affine_span` of an indexed family is an `affine_combination` with sum of weights 1. -/ theorem eq_affine_combination_of_mem_affine_span {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} {p1 : P} {p : ι → P} (h : p1 ∈ affine_span k (set.range p)) : ∃ (s : finset ι), ∃ (w : ι → k), ∃ (hw : (finset.sum s fun (i : ι) => w i) = 1), p1 = coe_fn (finset.affine_combination s p) w := sorry /-- A point is in the `affine_span` of an indexed family if and only if it is an `affine_combination` with sum of weights 1, provided the underlying ring is nontrivial. -/ theorem mem_affine_span_iff_eq_affine_combination (k : Type u_1) (V : Type u_2) {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [nontrivial k] {p1 : P} {p : ι → P} : p1 ∈ affine_span k (set.range p) ↔ ∃ (s : finset ι), ∃ (w : ι → k), ∃ (hw : (finset.sum s fun (i : ι) => w i) = 1), p1 = coe_fn (finset.affine_combination s p) w := sorry /-- The centroid lies in the affine span if the number of points, converted to `k`, is not zero. -/ theorem centroid_mem_affine_span_of_cast_card_ne_zero {k : Type u_1} {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} {s : finset ι} (p : ι → P) (h : ↑(finset.card s) ≠ 0) : finset.centroid k s p ∈ affine_span k (set.range p) := affine_combination_mem_affine_span (finset.sum_centroid_weights_eq_one_of_cast_card_ne_zero s h) p /-- In the characteristic zero case, the centroid lies in the affine span if the number of points is not zero. -/ theorem centroid_mem_affine_span_of_card_ne_zero (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [char_zero k] {s : finset ι} (p : ι → P) (h : finset.card s ≠ 0) : finset.centroid k s p ∈ affine_span k (set.range p) := affine_combination_mem_affine_span (finset.sum_centroid_weights_eq_one_of_card_ne_zero k s h) p /-- In the characteristic zero case, the centroid lies in the affine span if the set is nonempty. -/ theorem centroid_mem_affine_span_of_nonempty (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [char_zero k] {s : finset ι} (p : ι → P) (h : finset.nonempty s) : finset.centroid k s p ∈ affine_span k (set.range p) := affine_combination_mem_affine_span (finset.sum_centroid_weights_eq_one_of_nonempty k s h) p /-- In the characteristic zero case, the centroid lies in the affine span if the number of points is `n + 1`. -/ theorem centroid_mem_affine_span_of_card_eq_add_one (k : Type u_1) {V : Type u_2} {P : Type u_3} [division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [char_zero k] {s : finset ι} (p : ι → P) {n : ℕ} (h : finset.card s = n + 1) : finset.centroid k s p ∈ affine_span k (set.range p) := affine_combination_mem_affine_span (finset.sum_centroid_weights_eq_one_of_card_eq_add_one k s h) p namespace affine_map -- TODO: define `affine_map.proj`, `affine_map.fst`, `affine_map.snd` /-- A weighted sum, as an affine map on the points involved. -/ def weighted_vsub_of_point {k : Type u_1} {V : Type u_2} (P : Type u_3) [comm_ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (s : finset ι) (w : ι → k) : affine_map k ((ι → P) × P) V := mk (fun (p : (ι → P) × P) => coe_fn (finset.weighted_vsub_of_point s (prod.fst p) (prod.snd p)) w) (finset.sum s fun (i : ι) => w i • (linear_map.comp (linear_map.proj i) (linear_map.fst k (ι → V) V) - linear_map.snd k (ι → V) V)) sorry
program exercise_3_4 !сумма элементов массива с индексами с меняющимся шагом = 3, начиная от 5го элемента implicit none character(*), parameter :: input_file = "../data/input.txt", output_file = "output.txt" integer ::In = 0, Out = 0, N=0 real(4) :: SumA=0 real(4), allocatable :: A (:) open (file=input_file, newunit=In) read (In, *) N ! число - размерность массива allocate (A(N)) read (In, *) A !читаем N следующиx элементов из файла close (In) SumA = Sum(A(5:N:3)) !Правильно - экономия памяти. в кеш памяти хранится только каждый 3й элемент. перебор от 5го элемента до элемента N c шагом 3 open (file=output_file, newunit=Out) write (Out, '(F0.2)') A !оставить 2 знака после запятой write (Out, *) SumA close (Out) end program exercise_3_4
/- Copyright (c) 2020 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.mv_polynomial.default import Mathlib.data.fintype.card import Mathlib.PostPort universes u_1 u_3 u_2 namespace Mathlib /-! # Homogeneous polynomials A multivariate polynomial `φ` is homogeneous of degree `n` if all monomials occuring in `φ` have degree `n`. ## Main definitions/lemmas * `is_homogeneous φ n`: a predicate that asserts that `φ` is homogeneous of degree `n`. * `homogeneous_component n`: the additive morphism that projects polynomials onto their summand that is homogeneous of degree `n`. * `sum_homogeneous_component`: every polynomial is the sum of its homogeneous components -/ namespace mv_polynomial /- TODO * create definition for `∑ i in d.support, d i` * define graded rings, and show that mv_polynomial is an example -/ /-- A multivariate polynomial `φ` is homogeneous of degree `n` if all monomials occuring in `φ` have degree `n`. -/ def is_homogeneous {σ : Type u_1} {R : Type u_3} [comm_semiring R] (φ : mv_polynomial σ R) (n : ℕ) := ∀ {d : σ →₀ ℕ}, coeff d φ ≠ 0 → (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n theorem is_homogeneous_monomial {σ : Type u_1} {R : Type u_3} [comm_semiring R] (d : σ →₀ ℕ) (r : R) (n : ℕ) (hn : (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n) : is_homogeneous (monomial d r) n := sorry theorem is_homogeneous_C (σ : Type u_1) {R : Type u_3} [comm_semiring R] (r : R) : is_homogeneous (coe_fn C r) 0 := sorry theorem is_homogeneous_zero (σ : Type u_1) (R : Type u_3) [comm_semiring R] (n : ℕ) : is_homogeneous 0 n := fun (d : σ →₀ ℕ) (hd : coeff d 0 ≠ 0) => false.elim (hd (coeff_zero d)) theorem is_homogeneous_one (σ : Type u_1) (R : Type u_3) [comm_semiring R] : is_homogeneous 1 0 := is_homogeneous_C σ 1 theorem is_homogeneous_X {σ : Type u_1} (R : Type u_3) [comm_semiring R] (i : σ) : is_homogeneous (X i) 1 := sorry namespace is_homogeneous theorem coeff_eq_zero {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {n : ℕ} (hφ : is_homogeneous φ n) (d : σ →₀ ℕ) (hd : (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) ≠ n) : coeff d φ = 0 := eq.mp (Eq._oldrec (Eq.refl (¬coeff d φ ≠ 0)) (propext not_not)) (mt hφ hd) theorem inj_right {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {m : ℕ} {n : ℕ} (hm : is_homogeneous φ m) (hn : is_homogeneous φ n) (hφ : φ ≠ 0) : m = n := sorry theorem add {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {ψ : mv_polynomial σ R} {n : ℕ} (hφ : is_homogeneous φ n) (hψ : is_homogeneous ψ n) : is_homogeneous (φ + ψ) n := sorry theorem sum {σ : Type u_1} {R : Type u_3} [comm_semiring R] {ι : Type u_2} (s : finset ι) (φ : ι → mv_polynomial σ R) (n : ℕ) (h : ∀ (i : ι), i ∈ s → is_homogeneous (φ i) n) : is_homogeneous (finset.sum s fun (i : ι) => φ i) n := sorry theorem mul {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {ψ : mv_polynomial σ R} {m : ℕ} {n : ℕ} (hφ : is_homogeneous φ m) (hψ : is_homogeneous ψ n) : is_homogeneous (φ * ψ) (m + n) := sorry theorem prod {σ : Type u_1} {R : Type u_3} [comm_semiring R] {ι : Type u_2} (s : finset ι) (φ : ι → mv_polynomial σ R) (n : ι → ℕ) (h : ∀ (i : ι), i ∈ s → is_homogeneous (φ i) (n i)) : is_homogeneous (finset.prod s fun (i : ι) => φ i) (finset.sum s fun (i : ι) => n i) := sorry theorem total_degree {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {n : ℕ} (hφ : is_homogeneous φ n) (h : φ ≠ 0) : total_degree φ = n := sorry end is_homogeneous /-- `homogeneous_component n φ` is the part of `φ` that is homogeneous of degree `n`. See `sum_homogeneous_component` for the statement that `φ` is equal to the sum of all its homogeneous components. -/ def homogeneous_component {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) : linear_map R (mv_polynomial σ R) (mv_polynomial σ R) := linear_map.comp (submodule.subtype (finsupp.supported R R (set_of fun (d : σ →₀ ℕ) => (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n))) (finsupp.restrict_dom R R (set_of fun (d : σ →₀ ℕ) => (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n)) theorem coeff_homogeneous_component {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) (d : σ →₀ ℕ) : coeff d (coe_fn (homogeneous_component n) φ) = ite ((finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n) (coeff d φ) 0 := sorry theorem homogeneous_component_apply {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) : coe_fn (homogeneous_component n) φ = finset.sum (finset.filter (fun (d : σ →₀ ℕ) => (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n) (finsupp.support φ)) fun (d : σ →₀ ℕ) => monomial d (coeff d φ) := sorry theorem homogeneous_component_is_homogeneous {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) : is_homogeneous (coe_fn (homogeneous_component n) φ) n := sorry theorem homogeneous_component_zero {σ : Type u_1} {R : Type u_3} [comm_semiring R] (φ : mv_polynomial σ R) : coe_fn (homogeneous_component 0) φ = coe_fn C (coeff 0 φ) := sorry theorem homogeneous_component_eq_zero' {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) (h : ∀ (d : σ →₀ ℕ), d ∈ finsupp.support φ → (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) ≠ n) : coe_fn (homogeneous_component n) φ = 0 := sorry theorem homogeneous_component_eq_zero {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) (h : total_degree φ < n) : coe_fn (homogeneous_component n) φ = 0 := sorry theorem sum_homogeneous_component {σ : Type u_1} {R : Type u_3} [comm_semiring R] (φ : mv_polynomial σ R) : (finset.sum (finset.range (total_degree φ + 1)) fun (i : ℕ) => coe_fn (homogeneous_component i) φ) = φ := sorry
Formal statement is: lemma uniformity_bot: "uniformity \<noteq> bot" Informal statement is: The uniformity on a set is never empty.
module modules_06_b implicit none private public b contains integer function b() b = 5 end function end module module modules_06_a use modules_06_b, only: b implicit none private public a contains integer function a() a = 3 + b() end function end module
theory Lappend_literates imports Main "$HIPSTER_HOME/IsaHipster" begin setup Tactic_Data.set_coinduct_sledgehammer codatatype (lset: 'a) Llist = lnull: LNil | LCons (lhd: 'a) (ltl: "'a Llist") where "ltl LNil = LNil" primcorec literates :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a Llist" where "literates f x = LCons x (literates f (f x))" primcorec lappend :: "'a Llist \<Rightarrow> 'a Llist \<Rightarrow> 'a Llist" where "lnull xs \<Longrightarrow> lnull ys \<Longrightarrow> lnull (lappend xs ys)" | "lhd (lappend xs ys) = lhd (if lnull xs then ys else xs)" | "ltl (lappend xs ys) = (if lnull xs then ltl ys else lappend (ltl xs) ys)" (* Need obs to explore with literates *) datatype 'a Lst = Emp | Cons "'a" "'a Lst" lemma "\<exists>(n::integer). n < 0" using neg_less_0_iff_less zero_less_one by blast lemma integerz [simp]: "(\<forall>(n::integer). \<not> n \<le> 0) \<Longrightarrow> False" by blast fun obsLList :: "int \<Rightarrow> 'a Llist \<Rightarrow> 'a Lst" where "obsLList n s = (if (n \<le> 0) then Emp else Cons (lhd s) (obsLList (n - 1) (ltl s)))" (*hipster_obs Llist Lst obsLList literates lappend*) lemma lemma_a [thy_expl]: "lappend (literates z x2) y = literates z x2" apply (coinduction arbitrary: x2 y z rule: Lappend_literates.Llist.coinduct_strong) apply simp by auto lemma lemma_aa [thy_expl]: "lappend (LCons z (literates x2 x3)) y = LCons z (literates x2 x3)" apply (coinduction arbitrary: x2 x3 y z rule: Lappend_literates.Llist.coinduct_strong) by (simp_all add: lemma_a) theorem lappend_literates: "lappend (literates f x) xs = literates f x" (*by hipster_coinduct_sledgehammer works from the start *) by (simp add: lemma_a) end
lemma compactI: assumes "\<And>C. \<forall>t\<in>C. open t \<Longrightarrow> s \<subseteq> \<Union>C \<Longrightarrow> \<exists>C'. C' \<subseteq> C \<and> finite C' \<and> s \<subseteq> \<Union>C'" shows "compact s"
This is the final piece for my final major project, a hand drawn animation examining the concept of nostalgia. 'Fragile' is a hand drawn animation created for my final major project at London College of Fashion. It explores the concept of nostalgia and tells the story of a forgotten doll that comes to life. I animated the film, and David Marsh created the music.
lemma poly_reflect_poly_0 [simp]: "poly (reflect_poly p) 0 = lead_coeff p"
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE Strict #-} {-# LANGUAGE FlexibleContexts #-} module CPVO.IO.Fortran ( loadFile ) where --import Numeric.LinearAlgebra --import Numeric.LinearAlgebra.Data hiding (find) --import Data.List (findIndex,groupBy) --import Data.Maybe (fromJust) --import qualified Language.C.Inline as C loadFile :: IO () loadFile = do putStrLn "=======done : [email protected]"
Formal statement is: lemma closedin_path_component_locally_path_connected: assumes "locally path_connected S" shows "closedin (top_of_set S) (path_component_set S x)" Informal statement is: If $S$ is locally path-connected, then the path-component of $x$ in $S$ is closed in $S$.
State Before: n : ℕ c✝ c : Composition n ⊢ c = ones n ↔ length c = n State After: case mp n : ℕ c✝ c : Composition n ⊢ c = ones n → length c = n case mpr n : ℕ c✝ c : Composition n ⊢ length c = n → c = ones n Tactic: constructor State Before: case mp n : ℕ c✝ c : Composition n ⊢ c = ones n → length c = n State After: case mp n : ℕ c : Composition n ⊢ length (ones n) = n Tactic: rintro rfl State Before: case mp n : ℕ c : Composition n ⊢ length (ones n) = n State After: no goals Tactic: exact ones_length n State Before: case mpr n : ℕ c✝ c : Composition n ⊢ length c = n → c = ones n State After: case mpr n : ℕ c✝ c : Composition n ⊢ ¬c = ones n → ¬length c = n Tactic: contrapose State Before: case mpr n : ℕ c✝ c : Composition n ⊢ ¬c = ones n → ¬length c = n State After: case mpr n : ℕ c✝ c : Composition n H : ¬c = ones n length_n : length c = n ⊢ False Tactic: intro H length_n State Before: case mpr n : ℕ c✝ c : Composition n H : ¬c = ones n length_n : length c = n ⊢ False State After: case mpr n : ℕ c✝ c : Composition n H : ¬c = ones n length_n : length c = n ⊢ n < n Tactic: apply lt_irrefl n State Before: case mpr n : ℕ c✝ c : Composition n H : ¬c = ones n length_n : length c = n ⊢ n < n State After: no goals Tactic: calc n = ∑ i : Fin c.length, 1 := by simp [length_n] _ < ∑ i : Fin c.length, c.blocksFun i := by { obtain ⟨i, hi, i_blocks⟩ : ∃ i ∈ c.blocks, 1 < i := ne_ones_iff.1 H rw [← ofFn_blocksFun, mem_ofFn c.blocksFun, Set.mem_range] at hi obtain ⟨j : Fin c.length, hj : c.blocksFun j = i⟩ := hi rw [← hj] at i_blocks exact Finset.sum_lt_sum (fun i _ => by simp [blocksFun]) ⟨j, Finset.mem_univ _, i_blocks⟩ } _ = n := c.sum_blocksFun State Before: n : ℕ c✝ c : Composition n H : ¬c = ones n length_n : length c = n ⊢ n = ∑ i : Fin (length c), 1 State After: no goals Tactic: simp [length_n] State Before: n : ℕ c✝ c : Composition n H : ¬c = ones n length_n : length c = n i✝ : ℕ j : Fin (length c) i_blocks : 1 < blocksFun c j hj : blocksFun c j = i✝ i : Fin (length c) x✝ : i ∈ Finset.univ ⊢ 1 ≤ blocksFun c i State After: no goals Tactic: simp [blocksFun]
Require Import floyd.proofauto. Local Open Scope logic. Require Import tweetnacl20140427.split_array_lemmas. Require Import ZArith. Require Import tweetnacl20140427.tweetNaclBase. Require Import tweetnacl20140427.Salsa20. Require Import tweetnacl20140427.tweetnaclVerifiableC. Require Import tweetnacl20140427.verif_salsa_base. Require Import tweetnacl20140427.spec_salsa. Require Import veric.expr_lemmas3. Opaque Snuffle20. Opaque Snuffle.Snuffle. Opaque prepare_data. Opaque fcore_result. Lemma L32_spec_ok: semax_body SalsaVarSpecs SalsaFunSpecs f_L32 L32_spec. Proof. start_function. Time forward. (*8.8*) Time entailer!. (*0.8*) assert (W: Int.zwordsize = 32). reflexivity. assert (U: Int.unsigned Int.iwordsize=32). reflexivity. simpl. remember (Int.ltu c Int.iwordsize) as d. symmetry in Heqd. destruct d; simpl. { clear Heqd. remember (Int.ltu (Int.sub (Int.repr 32) c) Int.iwordsize) as z. symmetry in Heqz. destruct z. - simpl; split; trivial. split. 2: split; trivial. apply ltu_inv in Heqz. unfold Int.sub in *. rewrite (Int.unsigned_repr 32) in *; try (rewrite int_max_unsigned_eq; omega). rewrite Int.unsigned_repr in Heqz. 2: rewrite int_max_unsigned_eq; omega. unfold Int.rol, Int.shl, Int.shru. rewrite or_repr. rewrite Z.mod_small, W; simpl; try omega. rewrite Int.unsigned_repr. 2: rewrite int_max_unsigned_eq; omega. rewrite Int.and_mone. trivial. - apply ltu_false_inv in Heqz. rewrite U in *. unfold Int.sub in Heqz. rewrite (Int.unsigned_repr 32), Int.unsigned_repr in Heqz. omega. rewrite int_max_unsigned_eq; omega. rewrite int_max_unsigned_eq; omega. } { apply ltu_false_inv in Heqd. rewrite U in *. omega. } Time Qed. (*0.9*) Lemma ld32_spec_ok: semax_body SalsaVarSpecs SalsaFunSpecs f_ld32 ld32_spec. Proof. start_function. destruct B as (((b0, b1), b2), b3). simpl. specialize Byte_max_unsigned_Int_max_unsigned; intros BND. assert (RNG3:= Byte.unsigned_range_2 b3). assert (RNG2:= Byte.unsigned_range_2 b2). assert (RNG1:= Byte.unsigned_range_2 b1). assert (RNG0:= Byte.unsigned_range_2 b0). Time forward. (*1.8*) Time entailer!; omega. (*1.1*) Time forward. (*2*) Time entailer!; omega. (*1.1*) Time forward. (*1.1*) Time forward. (*2.2*) Time entailer!; omega. (*1.3*) Time forward. (*1.5*) drop_LOCAL 1%nat. Time forward. Time entailer!; omega. (*1.3*) Time forward. (*5.2*) Time entailer!. assert (WS: Int.zwordsize = 32). reflexivity. assert (TP: two_p 8 = Byte.max_unsigned + 1). reflexivity. assert (BMU: Byte.max_unsigned = 255). reflexivity. simpl. repeat rewrite Int.shifted_or_is_add; try repeat rewrite Int.unsigned_repr; try omega. f_equal. f_equal. simpl. rewrite Z.mul_add_distr_r. rewrite (Zmult_comm (Z.pow_pos 2 8)). rewrite (Zmult_comm (Z.pow_pos 2 16)). rewrite (Zmult_comm (Z.pow_pos 2 24)). simpl. repeat rewrite <- two_power_pos_correct. rewrite Z.mul_add_distr_r. rewrite Z.mul_add_distr_r. repeat rewrite <- Z.mul_assoc. rewrite <- Z.add_assoc. rewrite <- Z.add_assoc. rewrite Z.add_comm. f_equal. rewrite Z.add_comm. f_equal. rewrite Z.add_comm. f_equal. rewrite TP, BMU, Z.mul_add_distr_l, int_max_unsigned_eq. omega. rewrite TP, BMU, Z.mul_add_distr_l, int_max_unsigned_eq. omega. rewrite TP, BMU, Z.mul_add_distr_l, int_max_unsigned_eq. omega. Time Qed. (*6.7*) Fixpoint lendian (l:list byte): Z := match l with nil => 0 | h::t => Byte.unsigned h + 2^8 * lendian t end. Lemma lendian4 b0 b1 b2 b3: littleendian (b0,b1,b2,b3) = Int.repr(lendian [b0;b1;b2;b3]). Proof. simpl. rewrite Zplus_0_r. rewrite ! Z.mul_add_distr_l, ! (Z.mul_assoc _ (2^8)), <- ! Z.add_assoc; reflexivity. Qed. Lemma lendian_nil: lendian [] = 0. Proof. reflexivity. Qed. Lemma lendian_singleton b: lendian [b] = Byte.unsigned b. Proof. simpl; omega. Qed. Lemma lendian_app: forall l1 l2, lendian (l1++l2) = lendian l1 + 2^(8*Zlength l1) * lendian l2. Proof. induction l1; intros. + rewrite Zlength_nil; simpl; omega. + simpl. rewrite IHl1. rewrite Zlength_cons; clear IHl1. rewrite ! Z.mul_add_distr_l, <- ! Z.add_assoc, Z.mul_assoc, Z.pow_pos_fold. f_equal. f_equal. rewrite <- Zpower_exp, <- Zmult_succ_r_reverse, Z.add_comm; trivial. omega. specialize (Zlength_nonneg l1); omega. Qed. Lemma lendian_range: forall l, 0 <= lendian l < 2^(8*Zlength l). Proof. induction l; simpl; intros. + omega. + rewrite Zlength_cons. destruct (Byte.unsigned_range a). assert (Z.pow_pos 2 8 = 256) by reflexivity. split. rewrite H1. apply Z.add_nonneg_nonneg; trivial; omega. rewrite <- Zmult_succ_r_reverse, Z.pow_add_r; [| specialize (Zlength_nonneg l); omega | omega ]. rewrite Z.mul_comm. change (Z.pow_pos 2 8) with (2^8). assert (Byte.unsigned a + lendian l * 2 ^ 8 < Byte.modulus + lendian l * 2 ^ 8). omega. eapply Z.lt_le_trans. apply H2. clear H2 H0. change Byte.modulus with 256. change (2^8) with 256. specialize (Z.mul_add_distr_r 1 (lendian l) 256). rewrite Z.mul_1_l. intros X; rewrite <- X; clear X. apply Zmult_le_compat_r; omega. Qed. Definition bendian l: Z := lendian (rev l). Lemma bendian_nil: bendian [] = 0. Proof. reflexivity. Qed. Lemma bendian_singleton b: bendian [b] = Byte.unsigned b. Proof. unfold bendian. simpl; omega. Qed. Lemma bendian_app l1 l2: bendian (l1++l2) = bendian l2 + 2^(8*Zlength l2) * bendian l1. Proof. unfold bendian. rewrite rev_app_distr, lendian_app, Zlength_rev; trivial. Qed. Lemma bendian_range l: 0 <= bendian l < 2^(8*Zlength l). Proof. unfold bendian. specialize (lendian_range (rev l)). rewrite Zlength_rev; trivial. Qed. Lemma Zlor_2powpos_add a b (n:positive) (B: 0<=b <Z.pow_pos 2 n): a * Z.pow_pos 2 n + b = Z.lor (a * Z.pow_pos 2 n) b. Proof. apply Byte.equal_same_bits; intros. rewrite Z.lor_spec. apply Byte.Z_add_is_or; trivial. intros. rewrite Z.pow_pos_fold in *. destruct (zlt j (Z.pos n)). + rewrite Z.mul_pow2_bits_low; simpl; trivial. + rewrite <- (positive_nat_Z n) in g, B. erewrite (Byte.Ztestbit_above _ b), andb_false_r. trivial. 2: eassumption. rewrite two_power_nat_equiv. apply B. Qed. Lemma Byte_unsigned_range_32 b: 0 <= Byte.unsigned b <= Int.max_unsigned. Proof. destruct (Byte.unsigned_range_2 b). specialize Byte_Int_max_unsigned; omega. Qed. Lemma Byte_unsigned_range_64 b: 0 <= Byte.unsigned b <= Int64.max_unsigned. Proof. destruct (Byte.unsigned_range_2 b). unfold Int64.max_unsigned; simpl. unfold Byte.max_unsigned in H0; simpl in H0; omega. Qed. Axiom myadmit: False. Lemma dl64_spec_ok: semax_body SalsaVarSpecs SalsaFunSpecs f_dl64 dl64_spec. Proof. start_function. destruct B as (((b0, b1), b2), b3). destruct C as (((c0, c1), c2), c3). unfold QuadByte2ValList; simpl. forward. simpl. rewrite Int.signed_repr. 2: rewrite int_min_signed_eq, int_max_signed_eq; omega. forward_for_simple_bound 8 (EX i:Z, (PROP () LOCAL (temp _x x; temp _u (Vlong (Int64.repr (bendian (sublist 0 i [b0;b1;b2;b3;c0;c1;c2;c3]))))) SEP (data_at Tsh (tarray tuchar 8) (map Vint (map Int.repr (map Byte.unsigned [b0;b1;b2;b3;c0;c1;c2;c3]))) x))). 1: solve [ entailer! ]. { rename H into I. forward. + entailer!. apply zero_ext_range'. change Int.zwordsize with 32; omega. + forward. entailer!. exfalso. (*tc_error tulong int*) apply myadmit. entailer!. clear H1 H0 H. f_equal. rewrite <- (sublist_rejoin 0 i (i+1)). 2: omega. 2: rewrite ! Zlength_cons, Zlength_nil; omega. rewrite pure_lemmas.sublist_singleton with (d:=Byte.zero). 2: rewrite ! Zlength_cons, Zlength_nil; omega. simpl. unfold Int64.or. rewrite Int64.shl_mul_two_p, (Int64.unsigned_repr 8). 2: unfold Int64.max_unsigned; simpl; omega. replace (Znth i [Byte.unsigned b0; Byte.unsigned b1; Byte.unsigned b2; Byte.unsigned b3; Byte.unsigned c0; Byte.unsigned c1; Byte.unsigned c2; Byte.unsigned c3] 0) with (Byte.unsigned (Znth i [b0; b1; b2; b3; c0; c1; c2; c3] Byte.zero)). 2: erewrite <- (Znth_map' Byte.unsigned) with (d:= Z.zero); [ reflexivity | apply I ]. rewrite zero_ext_inrange. 2: rewrite Int.unsigned_repr; [ apply Byte.unsigned_range_2 | apply Byte_unsigned_range_32 ]. rewrite Int.unsigned_repr. 2: apply Byte_unsigned_range_32. rewrite Int64.unsigned_repr. 2: apply Byte_unsigned_range_64. change (two_p 8) with 256. rewrite bendian_app, bendian_singleton. simpl. unfold Int64.mul. rewrite (Int64.unsigned_repr 256). 2: unfold Int64.max_unsigned; simpl; omega. rewrite Zplus_comm, Zmult_comm, Zlor_2powpos_add. 2: apply Byte.unsigned_range. f_equal. f_equal. remember (bendian (sublist 0 i [b0; b1; b2; b3; c0; c1; c2; c3])) as q. specialize (Int64.shifted_or_is_add (Int64.repr q) Int64.zero 8). change (two_p 8) with 256. rewrite Int64.unsigned_zero, Z.add_0_r. intros X; rewrite <- X, Int64.or_zero; clear X. 2: replace Int64.zwordsize with 64 by reflexivity; omega. 2: omega. rewrite Int64.shl_mul_two_p, (Int64.unsigned_repr 8). 2: unfold Int64.max_unsigned; simpl; omega. unfold Int64.mul. assert (Q: 0 <= q < 2^56). { specialize (bendian_range (sublist 0 i [b0; b1; b2; b3; c0; c1; c2; c3])). rewrite Zlength_sublist, Zminus_0_r, <- Heqq. intros. assert (2^(8 * i) <= 2^56) by (apply Z.pow_le_mono_r; omega). omega. omega. change (Zlength [b0; b1; b2; b3; c0; c1; c2; c3]) with 8; omega. } change (2^56) with 72057594037927936 in Q. change (two_p 8) with 256. change (Z.pow_pos 2 8) with 256. rewrite (Int64.unsigned_repr 256). 2: unfold Int64.max_unsigned; simpl; omega. rewrite (Int64.unsigned_repr q). 2: unfold Int64.max_unsigned; simpl; omega. rewrite Int64.unsigned_repr; trivial. unfold Int64.max_unsigned; simpl; omega. } forward. apply prop_right. clear H H0. unfold bendian. simpl. rewrite ! Z.mul_add_distr_l, ! (Z.mul_assoc _ (Z.pow_pos 2 8)), <- ! Z.add_assoc, ! Z.mul_0_r, Z.add_0_r. reflexivity. Qed. Lemma div_bound u n (N:1<n): 0 <= Int.unsigned u / n <= Int.max_unsigned. Proof. destruct (Int.unsigned_range u). split. apply Z_div_pos; try omega. assert (Int.unsigned u / n <Int.modulus). 2: unfold Int.max_unsigned; omega. apply Z.div_lt_upper_bound; try omega. specialize (Z.mul_lt_mono_nonneg 1 n (Int.unsigned u) (Int.modulus)). rewrite Z.mul_1_l. intros Q; apply Q; trivial. Qed. Lemma ST32_spec_ok: semax_body SalsaVarSpecs SalsaFunSpecs f_st32 st32_spec. Proof. start_function. remember (littleendian_invert u) as U. destruct U as [[[u0 u1] u2] u3]. Time forward_for_simple_bound 4 (EX i:Z, (PROP () LOCAL (temp _x x; temp _u (Vint (iterShr8 u (Z.to_nat i)))) SEP (data_at Tsh (tarray tuchar 4) (sublist 0 i (map Vint (map Int.repr (map Byte.unsigned ([u0;u1;u2;u3])))) ++ list_repeat (Z.to_nat(4-i)) Vundef) x))). { entailer!. } { rename H into I. Time assert_PROP (field_compatible (Tarray tuchar 4 noattr) [] x /\ isptr x) as FC_ptrX by solve [entailer!]. (*2.3*) destruct FC_ptrX as [FC ptrX]. Time forward. (*3.2*) Time forward. (*0.8*) rewrite Z.add_comm, Z2Nat.inj_add; try omega. Time entailer!. (*1.5*) unfold upd_Znth. autorewrite with sublist. rewrite field_at_data_at. simpl. unfold field_address. simpl. if_tac. 2: solve [contradiction]. replace (4 - (1 + i)) with (4-i-1) by omega. rewrite isptr_offset_val_zero; trivial. clear H. apply data_at_ext. rewrite Zplus_comm. assert (ZW: Int.zwordsize = 32) by reflexivity. assert (EIGHT: Int.unsigned (Int.repr 8) = 8). apply Int.unsigned_repr. rewrite int_max_unsigned_eq; omega. inv HeqU. clear - ZW EIGHT I. destruct (zeq i 0); subst; simpl. f_equal. f_equal. { rewrite Byte.unsigned_repr. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^8) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^16) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite <- (Int.zero_ext_mod 8). rewrite Int.repr_unsigned; trivial. rewrite ZW; omega. assert (0 <= ((Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16) mod Z.pow_pos 2 8 < Byte.modulus). apply Z_mod_lt. cbv; trivial. unfold Byte.max_unsigned. omega. } destruct (zeq i 1); subst; simpl. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= (Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16 / Z.pow_pos 2 8 < Byte.modulus). Focus 2. unfold Byte.max_unsigned. omega. split. apply Z_div_pos. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Zdiv_lt_upper_bound. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite (Z.div_pow2_bits _ 8); try omega. rewrite (Int.Ztestbit_mod_two_p 16); try omega. rewrite (Int.Ztestbit_mod_two_p 24); try omega. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW. (* Ztest_Inttest.*) remember (zlt i 8). destruct s. repeat rewrite zlt_true. trivial. omega. omega. omega. rewrite zlt_false. trivial. omega. } destruct (zeq i 2); subst; simpl. f_equal. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= Int.unsigned u mod Z.pow_pos 2 24 / Z.pow_pos 2 16 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. split. apply Z_div_pos. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Zdiv_lt_upper_bound. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW. rewrite (Z.div_pow2_bits _ 16); try omega. rewrite (Int.Ztestbit_mod_two_p 24); try omega. (*rewrite Ztest_Inttest.*) remember (zlt i 8). destruct s. repeat rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. rewrite <- Z.add_assoc. reflexivity. omega. omega. omega. omega. rewrite zlt_false. trivial. omega. } destruct (zeq i 3); subst; simpl. + f_equal. f_equal. f_equal. f_equal. f_equal. rewrite Byte.unsigned_repr. Focus 2. assert (0 <= Int.unsigned u / Z.pow_pos 2 24 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. split. apply Z_div_pos. cbv; trivial. apply Int.unsigned_range. apply Zdiv_lt_upper_bound. cbv; trivial. apply Int.unsigned_range. rewrite ! Int.shru_div_two_p. rewrite (Int.unsigned_repr 8); [| cbv; split; congruence ]. rewrite (Int.unsigned_repr (Int.unsigned u / two_p 8)), Zdiv.Zdiv_Zdiv; [ | cbv; congruence | cbv; congruence | ] . 2: apply div_bound; cbv; trivial. replace (two_p 8 * two_p 8)%Z with (two_p 16) by reflexivity. rewrite (Int.unsigned_repr (Int.unsigned u / two_p 16)), Zdiv.Zdiv_Zdiv; [ | cbv; congruence | cbv; congruence | ] . 2: apply div_bound; cbv; trivial. replace (two_p 16 * two_p 8)%Z with (two_p 24) by reflexivity. apply zero_ext_inrange. rewrite (Int.unsigned_repr (Int.unsigned u / Z.pow_pos 2 24)). 2: apply div_bound; cbv; trivial. assert (Int.unsigned u / Z.pow_pos 2 24 < two_p 8). 2: omega. apply Z.div_lt_upper_bound; trivial. apply Int.unsigned_range. + omega. } forward. Time Qed. (*4.9*) Fixpoint iter64Shr8 (u : int64) (n : nat) {struct n} : int64 := match n with | 0%nat => u | S n' => Int64.shru (iter64Shr8 u n') (Int64.repr 8) end. Definition iter64Shr8' (u : int64) (n : nat): int64 := Int64.shru u (Int64.mul (Int64.repr 8) (Int64.repr (Z.of_nat n))). Lemma iter64: forall n u (N: Z.of_nat n < 8), iter64Shr8 u n = iter64Shr8' u n. Proof. unfold iter64Shr8'. assert (W: Int64.iwordsize = Int64.repr 64) by reflexivity. induction n; simpl; intros. + rewrite Int64.mul_zero, Int64.shru_zero; trivial. + rewrite Zpos_P_of_succ_nat in *. rewrite IHn, Int64.shru_shru, Int64.mul_commut; clear IHn. - f_equal. specialize (Int64.mul_add_distr_l (Int64.repr (Z.of_nat n)) Int64.one (Int64.repr 8)). rewrite (Int64.mul_commut Int64.one), Int64.mul_one. intros X; rewrite <- X, Int64.mul_commut, Int64.add_unsigned; clear X. f_equal. f_equal. unfold Int64.one. rewrite 2 Int64.unsigned_repr; try reflexivity. unfold Int64.max_unsigned; simpl; omega. unfold Int64.max_unsigned; simpl; omega. - rewrite W, Int64.mul_signed, 2 Int64.signed_repr. unfold Int64.ltu. rewrite (Int64.unsigned_repr 64), if_true; trivial. rewrite Int64.unsigned_repr. omega. unfold Int64.max_unsigned; simpl; omega. unfold Int64.max_unsigned; simpl; omega. unfold Int64.min_signed, Int64.max_signed; simpl; omega. unfold Int64.min_signed, Int64.max_signed; simpl; omega. - rewrite W. unfold Int64.ltu. rewrite if_true; trivial. - rewrite W. unfold Int64.ltu. rewrite Int64.mul_signed, Int64.add_signed, if_true; trivial. rewrite (Int64.signed_repr 8). 2: unfold Int64.min_signed, Int64.max_signed; simpl; omega. rewrite (Int64.signed_repr (Z.of_nat n)). 2: unfold Int64.min_signed, Int64.max_signed; simpl; omega. rewrite Int64.signed_repr. 2: unfold Int64.min_signed, Int64.max_signed; simpl; omega. rewrite 2 Int64.unsigned_repr. omega. unfold Int64.max_unsigned; simpl; omega. unfold Int64.max_unsigned; simpl; omega. - omega. Qed. Lemma unsigned_repr' z (Q: 0 <= z < Byte.modulus): Byte.unsigned (Byte.repr z) = z. Proof. apply Byte.unsigned_repr. unfold Byte.max_unsigned. omega. Qed. Lemma shru_shru x n m (NM:Int64.unsigned n + Int64.unsigned m <= Int64.max_unsigned): Int64.shru (Int64.shru x n) m = Int64.shru x (Int64.add n m). Proof. rewrite 3 Int64.shru_div_two_p. f_equal. specialize (Int64.unsigned_range n). specialize (Int64.unsigned_range m). specialize (Int64.unsigned_range x). intros X M N. rewrite Int64.unsigned_repr, Zdiv_Zdiv, <- two_p_is_exp, Int64.add_unsigned, Int64.unsigned_repr; trivial; try apply two_p_gt_ZERO; try omega. split. apply Z_div_pos; trivial. apply two_p_gt_ZERO; try omega. omega. assert (Int64.unsigned x / two_p (Int64.unsigned n) < Int64.max_unsigned +1). 2: omega. specialize (two_p_gt_ZERO (Int64.unsigned n)); intros A. apply Z.div_lt_upper_bound. omega. eapply Z.lt_le_trans. apply X. unfold Int64.max_unsigned. replace (Int64.modulus - 1 + 1) with Int64.modulus by omega. specialize (Zmult_le_compat_l 1 (two_p (Int64.unsigned n)) Int64.modulus). rewrite Z.mul_1_r, Z.mul_comm. intros Y; apply Y; omega. Qed. (* Lemma TS64_spec_ok: semax_body SalsaVarSpecs SalsaFunSpecs f_ts64 ts64_spec. Proof. start_function. remember (bigendian64_invert u) as U. destruct U as [B C]. destruct B as [[[b3 b2] b1] b0]. destruct C as [[[c3 c2] c1] c0]. (* unfold littleendian64_invert in HeqU. simpl in HeqU.*) (*unfold Sfor. forward. forward_seq.*) (*Parameter Data: Z -> list val.*) (*assert_PROP (isptr x) by entailer!. rename H into isptrX.*) Time forward_for_simple_bound 8 (EX i:Z, (PROP () LOCAL (temp _x x; temp _u (Vlong (iter64Shr8 u (Z.to_nat i)))) SEP (data_at Tsh (tarray tuchar 8) (list_repeat (Z.to_nat(8-i)) Vundef ++ sublist (8-i) 8 (map Vint (map Int.repr (map Byte.unsigned ([b3;b2;b1;b0;c3;c2;c1;c0]))))) x))). { entailer!. } 2: solve [forward]. { rename H into I. Time assert_PROP (field_compatible (Tarray tuchar 8 noattr) [] x /\ isptr x) as FC_ptrX by solve [entailer!]. destruct FC_ptrX as [FC ptrX].x Definition typecheck_expr := fix typecheck_expr (CS : compspecs) (Delta : tycontext) (e : expr) {struct e} : tc_assert := let tcr := typecheck_expr CS Delta in match e with | Econst_int _ Tvoid => tc_FF (invalid_expression e) | Econst_int _ (Tint I8 _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tint I16 _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tint I32 _ _) => tc_TT | Econst_int _ (Tint IBool _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tlong _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tfloat _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tpointer _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tarray _ _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tfunction _ _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tstruct _ _) => tc_FF (invalid_expression e) | Econst_int _ (Tunion _ _) => tc_FF (invalid_expression e) | Econst_float _ Tvoid => tc_FF (invalid_expression e) | Econst_float _ (Tint _ _ _) => tc_FF (invalid_expression e) | Econst_float _ (Tlong _ _) => tc_FF (invalid_expression e) | Econst_float _ (Tfloat F32 _) => tc_FF (invalid_expression e) | Econst_float _ (Tfloat F64 _) => tc_TT | Econst_float _ (Tpointer _ _) => tc_FF (invalid_expression e) | Econst_float _ (Tarray _ _ _) => tc_FF (invalid_expression e) | Econst_float _ (Tfunction _ _ _) => tc_FF (invalid_expression e) | Econst_float _ (Tstruct _ _) => tc_FF (invalid_expression e) | Econst_float _ (Tunion _ _) => tc_FF (invalid_expression e) | Econst_single _ Tvoid => tc_FF (invalid_expression e) | Econst_single _ (Tint _ _ _) => tc_FF (invalid_expression e) | Econst_single _ (Tlong _ _) => tc_FF (invalid_expression e) | Econst_single _ (Tfloat F32 _) => tc_TT | Econst_single _ (Tfloat F64 _) => tc_FF (invalid_expression e) | Econst_single _ (Tpointer _ _) => tc_FF (invalid_expression e) | Econst_single _ (Tarray _ _ _) => tc_FF (invalid_expression e) | Econst_single _ (Tfunction _ _ _) => tc_FF (invalid_expression e) | Econst_single _ (Tstruct _ _) => tc_FF (invalid_expression e) | Econst_single _ (Tunion _ _) => tc_FF (invalid_expression e) | Econst_long _ _ => tc_FF (invalid_expression e) | Evar id ty => match access_mode ty with | By_value _ => tc_FF (deref_byvalue ty) | By_reference => match get_var_type Delta id with | Some ty' => tc_bool (eqb_type ty ty') (mismatch_context_type ty ty') | None => tc_FF (var_not_in_tycontext Delta id) end | By_copy => tc_FF (deref_byvalue ty) | By_nothing => tc_FF (deref_byvalue ty) end | Etempvar id ty => match (temp_types Delta) ! id with | Some ty' => if (is_neutral_cast (fst ty') ty || same_base_type (fst ty') ty)%bool then if snd ty' then tc_TT else tc_initialized id ty else tc_FF (mismatch_context_type ty (fst ty')) | None => tc_FF (var_not_in_tycontext Delta id) end | Ederef a ty => match access_mode ty with | By_value _ => tc_FF (deref_byvalue ty) | By_reference => tc_andp (tc_andp (typecheck_expr CS Delta a) (tc_bool (is_pointer_type (typeof a)) (op_result_type e))) (tc_isptr a) | By_copy => tc_FF (deref_byvalue ty) | By_nothing => tc_FF (deref_byvalue ty) end | Eaddrof a ty => tc_andp (typecheck_lvalue CS Delta a) (tc_bool (is_pointer_type ty) (op_result_type e)) | Eunop op a ty => tc_andp (isUnOpResultType op a ty) (tcr a) | Ebinop op a1 a2 ty => tc_andp (tc_andp (isBinOpResultType op a1 a2 ty) (tcr a1)) (tcr a2) | Ecast a ty => tc_andp (tcr a) (isCastResultType (typeof a) ty a) | Efield a i ty => match access_mode ty with | By_value _ => tc_FF (deref_byvalue ty) | By_reference => tc_andp (typecheck_lvalue CS Delta a) match typeof a with | Tvoid => tc_FF (invalid_field_access e) | Tint _ _ _ => tc_FF (invalid_field_access e) | Tlong _ _ => tc_FF (invalid_field_access e) | Tfloat _ _ => tc_FF (invalid_field_access e) | Tpointer _ _ => tc_FF (invalid_field_access e) | Tarray _ _ _ => tc_FF (invalid_field_access e) | Tfunction _ _ _ => tc_FF (invalid_field_access e) | Tstruct id _ => match cenv_cs ! id with | Some co => match Ctypes.field_offset cenv_cs i (co_members co) with | Errors.OK _ => tc_TT | Errors.Error _ => tc_FF (invalid_struct_field i id) end | None => tc_FF (invalid_composite_name id) end | Tunion id _ => match cenv_cs ! id with | Some _ => tc_TT | None => tc_FF (invalid_composite_name id) end end | By_copy => tc_FF (deref_byvalue ty) | By_nothing => tc_FF (deref_byvalue ty) end | Esizeof ty t => tc_andp (tc_bool (complete_type cenv_cs ty) (invalid_expression e)) (tc_bool (eqb_type t (Tint I32 Unsigned noattr)) (invalid_expression e)) | Ealignof ty t => tc_andp (tc_bool (complete_type cenv_cs ty) (invalid_expression e)) (tc_bool (eqb_type t (Tint I32 Unsigned noattr)) (invalid_expression e)) end with typecheck_lvalue (CS : compspecs) (Delta : tycontext) (e : expr) {struct e} : tc_assert := match e with | Econst_int _ _ => tc_FF (invalid_lvalue e) | Econst_float _ _ => tc_FF (invalid_lvalue e) | Econst_single _ _ => tc_FF (invalid_lvalue e) | Econst_long _ _ => tc_FF (invalid_lvalue e) | Evar id ty => match get_var_type Delta id with | Some ty' => tc_bool (eqb_type ty ty') (mismatch_context_type ty ty') | None => tc_FF (var_not_in_tycontext Delta id) end | Etempvar _ _ => tc_FF (invalid_lvalue e) | Ederef a _ => tc_andp (tc_andp (typecheck_expr CS Delta a) (tc_bool (is_pointer_type (typeof a)) (op_result_type e))) (tc_isptr a) | Eaddrof _ _ => tc_FF (invalid_lvalue e) | Eunop _ _ _ => tc_FF (invalid_lvalue e) | Ebinop _ _ _ _ => tc_FF (invalid_lvalue e) | Ecast _ _ => tc_FF (invalid_lvalue e) | Efield a i _ => tc_andp (typecheck_lvalue CS Delta a) match typeof a with | Tvoid => tc_FF (invalid_field_access e) | Tint _ _ _ => tc_FF (invalid_field_access e) | Tlong _ _ => tc_FF (invalid_field_access e) | Tfloat _ _ => tc_FF (invalid_field_access e) | Tpointer _ _ => tc_FF (invalid_field_access e) | Tarray _ _ _ => tc_FF (invalid_field_access e) | Tfunction _ _ _ => tc_FF (invalid_field_access e) | Tstruct id _ => match cenv_cs ! id with | Some co => match Ctypes.field_offset cenv_cs i (co_members co) with | Errors.OK _ => tc_TT | Errors.Error _ => tc_FF (invalid_struct_field i id) end | None => tc_FF (invalid_composite_name id) end | Tunion id _ => match cenv_cs ! id with | Some _ => tc_TT | None => tc_FF (invalid_composite_name id) end end | Esizeof _ _ => tc_FF (invalid_lvalue e) | Ealignof _ _ => tc_FF (invalid_lvalue e) end. set (e1:=(Ederef (Ebinop Oadd (Etempvar _x (tptr tuchar)) (Ebinop Osub (Econst_int (Int.repr 7) tint) (Etempvar _i tint) tint) (tptr tuchar)) tuchar)). set (e2:=(Ecast (Etempvar _u tulong) tuchar)). assert (XX: typeof e1 = tuchar) by reflexivity. set (TC:=tc_expr Delta (Ecast e2 tuchar)). cbv in TC. simpl in TC. Eval compute in (tc_expr Delta (Ecast e2 tuchar)). Time forward. apply andp_right. apply andp_right. solve [entailer!]. entailer. admit. (*!! typecheck_error (invalid_cast_result tuchar tuchar)*) solve [entailer!]. Time forward. entailer. admit. (*another tc_error*) rewrite Z.add_comm, Z2Nat.inj_add; try omega. Time entailer!. (*1.5*) unfold upd_Znth. clear H. autorewrite with sublist. replace (8 - (1 + i)) with (7-i) by omega. replace (7 - i + 1) with (8-i) by omega. replace (i+(8-i)) with 8 by omega. rewrite field_at_data_at. simpl. unfold field_address. simpl. if_tac. 2: solve [contradiction]. rewrite isptr_offset_val_zero; [| trivial]. clear H. apply data_at_ext. f_equal. rewrite <- (sublist_rejoin (7-i) (7-i+1) 8). 2: omega. 2: unfold Zlength; simpl; omega. rewrite pure_lemmas.sublist_singleton with (d:=Vundef); simpl. 2: unfold Zlength; simpl; omega. replace (7 - i + 1) with (8-i) by omega. f_equal. rewrite iter64; try rewrite Z2Nat.id; try omega. unfold iter64Shr8', Int64.shru. rewrite Int64.mul_signed. rewrite 2 Int64.signed_repr; try rewrite Z2Nat.id; try unfold Int64.min_signed, Int64.max_signed; simpl; try omega. rewrite (Int64.unsigned_repr (8 * i)). 2: unfold Int64.max_unsigned; simpl; omega. specialize (Int64.unsigned_range u); specialize (Z.pow_pos_nonneg 2 (8*i)); intros NN U. rewrite Int64.unsigned_repr. Focus 2. rewrite Z.shiftr_div_pow2 by omega. split. apply Z_div_pos; omega. assert (Int64.unsigned u / 2 ^ (8 * i) < Int64.modulus). 2: solve [unfold Int64.max_unsigned; omega]. apply Zdiv_lt_upper_bound. omega. assert (Int64.modulus <= Int64.modulus * 2 ^ (8 * i)). 2: omega. apply Z.le_mul_diag_r; omega. assert (ADD16: Int64.add (Int64.repr 8) (Int64.repr 8) = Int64.repr 16) by reflexivity. assert (ADD24: Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.repr 8)) = Int64.repr 24) by reflexivity. assert (ADD32: Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.repr 8))) = Int64.repr 32) by reflexivity. assert (ADD40: Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.repr 8)))) = Int64.repr 40) by reflexivity. assert (ADD48: Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.repr 8))))) = Int64.repr 48) by reflexivity. assert (ADD56: Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.repr 8)))))) = Int64.repr 56) by reflexivity. assert (UBND: forall n m, Pos.add m n=64%positive -> 0 <= Int64.unsigned u / Z.pow_pos 2 n < Z.pow_pos 2 m). { intros. destruct (Int64.unsigned_range u). split. apply Z_div_pos; trivial. specialize (Fcore_Zaux.Zpower_pos_gt_0 2 n); omega. apply Zdiv_lt_upper_bound; trivial. specialize (Fcore_Zaux.Zpower_pos_gt_0 2 n); omega. rewrite <- Zpower_pos_is_exp, H. change Int64.modulus with (Z.pow_pos 2 64) in H1; trivial. }(* assert (B1: 0 <= Int64.unsigned u / Z.pow_pos 2 56 <= Byte.max_unsigned). { destruct (UBND 56 8)%positive. reflexivity. replace Byte.max_unsigned with (Z.pow_pos 2 8 -1). omega. reflexivity. }*) assert (UNS_B_I64: Byte.max_unsigned <= Int64.max_unsigned) by (cbv; congruence). assert (UNS_B_I: Byte.max_unsigned <= Int.max_unsigned) by (cbv; congruence). destruct (zeq i 0). { subst i; simpl in *. unfold Znth; simpl. unfold bigendian64_invert in HeqU; inv HeqU. rewrite Z.shiftr_0_r. unfold destruct (zeq i 7). { subst; simpl in *. unfold Znth; simpl. (*specialize (UBND 56 8)%positive. rewrite Z.pow_pos_fold in UBND.*) rewrite ! shru_shru, ADD56. + rewrite Int64.shru_div_two_p, (Int64.unsigned_repr 56), two_p_correct. 2: unfold Int64.max_unsigned; simpl; omega. rewrite Int64.unsigned_repr. * rewrite zero_ext_inrange. f_equal; f_equal. - unfold bigendian64_invert in HeqU; inv HeqU. rewrite Byte.unsigned_repr. reflexivity. change Byte.max_unsigned with (Z.pow_pos 2 8 -1). specialize (UBND 56 8 (eq_refl _))%positive; omega. - rewrite Int.unsigned_repr, two_p_equiv. specialize (UBND 56 8 (eq_refl _))%positive. rewrite ! Z.pow_pos_fold in UBND. omega. specialize (UBND 56 8 (eq_refl _))%positive. rewrite ! Z.pow_pos_fold in UBND. assert (2^8 < Int.max_unsigned) by (cbv; trivial). omega. * specialize (UBND 56 8 (eq_refl _))%positive. rewrite ! Z.pow_pos_fold in UBND. assert (2^8 < Int64.max_unsigned) by (cbv; trivial). omega. + rewrite ADD48. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD40. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD32. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD24. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD16. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ! Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. } destruct (zeq i 0). { subst; simpl in *. unfold Znth; simpl. f_equal. unfold bigendian64_invert in HeqU; inv HeqU. simpl. rewrite Byte.unsigned_repr. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^8) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^16) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^24) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^32) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^40) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^48) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. unfold Int.zero_ext. apply Int.eqm_samerepr. apply Int.eqm_same_bits. change Int.zwordsize with 32; intros. rewrite Int.Zzero_ext_spec. destruct (zlt i 8); subst; simpl. + destruct (zeq i 0); subst; simpl. remember u as uu. destruct uu; simpl. rewrite Int.unsigned_repr. unfold Z.odd. unfold Int64.unsigned, Int64.intval. simpl. remember (Int.unsigned (Int.repr (Int64.unsigned u))). destruct z. Int.eqm. apply Int.testbit specialize (Int.zero_ext_mod 8). Check Int64.zero_ext_mod. Require Import compcert.lib.Integers. intros. specialize (Int.equal_same_bits (Int.unsigned (Int.zero_ext 8 (Int.repr (Int64.unsigned u)))) (Int.unsigned (Int.repr (Int64.unsigned u mod 2 ^ 8)))). intros. unfold Int.zero_ext in *. rewrite Ztestbit_mod_two_p; auto. fold (testbit (zero_ext n x) i). destruct (zlt i zwordsize). rewrite bits_zero_ext; auto. rewrite bits_above. rewrite zlt_false; auto. omega. omega. omega. Qed. rewrite Int.repr_unsigned; trivial. rewrite ZW; omega. assert (0 <= ((Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16) mod Z.pow_pos 2 8 < Byte.modulus). apply Z_mod_lt. cbv; trivial. unfold Byte.max_unsigned. omega. } destruct (zeq i 6). { subst; simpl in *. unfold Znth; simpl. (*assert ((56 <= 56)%positive) by apply Pos.le_refl. specialize (B1 _ H); clear H. rewrite Z.pow_pos_fold in B1.*) rewrite ! shru_shru, ADD48. + rewrite Int64.shru_div_two_p, (Int64.unsigned_repr 48), two_p_correct. 2: unfold Int64.max_unsigned; simpl; omega. assert (QQ:= (UBND 48 16 (eq_refl _))%positive). rewrite ! Z.pow_pos_fold in QQ. rewrite Int64.unsigned_repr. Focus 2. assert (2 ^ 16 < Int64.max_unsigned) by (cbv; trivial). omega. f_equal; f_equal. unfold bigendian64_invert in HeqU; inv HeqU. simpl. destruct (Int64.unsigned_range u). destruct (zlt (Int64.unsigned u) (Z.pow_pos 2 56)). - rewrite Zmod_small by omega. rewrite ! Z.pow_pos_fold. assert (0<= Int64.unsigned u / 2 ^ 48 < 2^8). { split; try omega. apply Zdiv_lt_upper_bound; trivial. } (*rewrite Int.unsigned_repr. 2: change Byte.max_unsigned with (2^8-1) in UNS_B_I; omega.*) rewrite Byte.unsigned_repr. 2: change Byte.max_unsigned with (2^8-1); omega. rewrite zero_ext_inrange; trivial. rewrite Int.unsigned_repr. 2: change Byte.max_unsigned with (2^8-1) in UNS_B_I; omega. change (two_p 8) with (2^8); omega. - specialize (Fcore_Zaux.Zdiv_mod_mult (Int64.unsigned u) (Z.pow_pos 2 48) (Z.pow_pos 2 8)); intros. change ((Z.pow_pos 2 48 * Z.pow_pos 2 8)%Z) with (Z.pow_pos 2 56) in H1. rewrite H1. rewrite Byte.unsigned_repr. Focus 2. destruct (Z_mod_lt (Int64.unsigned u / Z.pow_pos 2 48) (Z.pow_pos 2 8)). cbv; trivial. change Byte.max_unsigned with (Z.pow_pos 2 8 -1). omega. unfold Int.zero_ext. clear - H1; rewrite int_max_unsigned_eq; split; try omega. specialize (Fcore_Zaux.Zpower_pos_gt_0 2 n); omega. rewrite <- Zpower_pos_is_exp, H. change Int64.modulus with (Z.pow_pos 2 64) in H1; trivial. rewrite (Zdiv_small (Int64.unsigned u mod Z.pow_pos 2 56)). Focus 2. specialize (Zmod_unique (Int64.unsigned u) (Z.pow_pos 2 56)); intros. rewrite Int.unsigned_repr. Focus 2. assert (2 ^ 16 < Int64.max_unsigned) by (cbv; trivial). omega. unfold Int.zero_ext. f_equal. f_equal. apply Byte.equal_same_bits; intros. rewrite Int.Zzero_ext_spec by omega. unfold bigendian64_invert in HeqU; inv HeqU. simpl. specialize (Zmod_recombine (Int64.unsigned u) (Z.pow_pos 2 8) (Z.pow_pos 2 48)). intros. replace (Z.pow_pos 2 8 * Z.pow_pos 2 48)%Z with (Z.pow_pos 2 56) in H0. rewrite H0. destruct (zlt i 8). rewrite <- (Byte.testbit_repr (Byte.unsigned b2)), Byte.repr_unsigned. unfold Byte.testbit. rewrite if_true. by omega. rewrite Int64.unsigned_repr. unfold Int.zero_ext. rewrite Int.unsigned_repr. unfold Int.zero_ext. f_equal. f_equal. rewrite Int64.unsigned_repr by omega. rewrite zero_ext_inrange. f_equal; f_equal. - unfold bigendian64_invert in HeqU; inv HeqU. rewrite Byte.unsigned_repr. reflexivity. rewrite Z.pow_pos_fold. omega. - rewrite Int.unsigned_repr. apply B1. omega. + rewrite ADD48. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD40. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD32. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD24. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ADD16. rewrite 2 Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. + rewrite ! Int64.unsigned_repr; unfold Int64.max_unsigned; simpl; omega. } + rewrite ! Int64.add_unsigned. rewrite ! Int64.unsigned_repr; simpl; unfold Int64.max_unsigned; simpl; try omega. + } rewrite two_p_correct. rewrite Z.pow_pos_fold in B1. omega. unfold Int64.max_unsigned; simpl; omega. rewrite Int64.shru_div_two_p. UNSB_I64. <- two_power_nat_two_p. omega. apply B1; apply Pos.le_refl. cbv. omega. admit. admit. admit. admit. admit. admit. admit. admit. admit. admit. } destruct (zeq i 6). { subst; simpl in *. unfold Znth; simpl. rewrite ! shru_shru. replace (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.add (Int64.repr 8) (Int64.repr 8)))))) with (Int64.repr 48) by reflexivity. rewrite zero_ext_inrange. f_equal; f_equal. unfold bigendian64_invert in HeqU; inv HeqU. rewrite Int64.shru_div_two_p. rewrite (Int64.unsigned_repr 48). rewrite Int64.unsigned_repr. rewrite Byte.unsigned_repr.x specialize (Fcore_Zaux.Zdiv_mod_mult (Int64.unsigned u) (Z.pow_pos 2 8) (Z.pow_pos 2 48) ). intros. replace (Z.pow_pos 2 8 * Z.pow_pos 2 48)%Z with (Z.pow_pos 2 56) in H by reflexivity. rewrite H. specialize (Fcore_Zaux.Zdiv_mod_mult). (Int64.unsigned u) (Z.pow_pos 2 40) (Z.pow_pos 2 8)). intros. replace (Z.pow_pos 2 40 * Z.pow_pos 2 8)%Z with (Z.pow_pos 2 48) in H0 by reflexivity. intros. replace (Z.pow_pos 2 48 * Z.pow_pos 2 8)%Z with (Z.pow_pos 2 56) in H by reflexivity. rewrite H. reflexivity. admit. admit. admit. admit. admit. admit. admit. admit. admit. admit. } unfold Int64.shru. simpl. ! Int64.add_unsigned. (Int64.unsigned_repr 8). rewrite if_false by omega. unfold Znth; simpl. rewrite if_false by omega. destruct (Int64.unsigned_range_2 u). unfold bigendian64_invert in HeqU. inv HeqU. assert (BMU: Byte.max_unsigned = 255) by reflexivity. assert (I64MU: Int64.max_unsigned = Z.pow 2 64 -1) by reflexivity. rewrite iter64. 2: rewrite Z2Nat.id; omega. unfold iter64Shr8'. rewrite Z2Nat.id; try omega. rewrite Int64.mul_signed. rewrite 2 Int64.signed_repr; try (unfold Int64.min_signed, Int64.max_signed; simpl; omega). rewrite Int64.shru_div_two_p, (Int64.unsigned_repr (8 * i)). 2: unfold Int64.max_unsigned; simpl; omega. assert (GT:= two_p_gt_ZERO (8*i)). assert (BND1: 0 <= Int64.unsigned u / Z.pow_pos 2 56 < Byte.modulus). { split. apply Z_div_pos; trivial. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. simpl in *. omega. } (* assert (BND1: 0 <= Int64.unsigned u / Z.pow_pos 2 56 < Byte.max_unsigned). { split. apply Z_div_pos; trivial. cbv; trivial. assert (Int64.unsigned u / Z.pow_pos 2 56 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. apply Z.div_lt_upper_bound. cbv; trivial. simpl in *. omega. }*) (*assert (BND1: 0 <= Int64.unsigned u / Z.pow_pos 2 56 < Byte.modulus). { split. apply Z_div_pos; trivial. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. simpl in *. omega. }*) rewrite unsigned_repr'; trivial. (* rewrite Int64.unsigned_repr. Focus 2. split. apply Z_div_pos; trivial. omega. apply Z.div_le_upper_bound. omega. eapply Z.le_trans; eauto. specialize (Zmult_le_compat_r 1 (two_p (8 * i)) Int64.max_unsigned). simpl. intros Q; apply Q; omega.*) rewrite unsigned_repr'. Focus 2. split. apply Z_div_pos; trivial. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. rewrite unsigned_repr'. Focus 2. split. apply Z_div_pos; trivial. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. rewrite unsigned_repr'. Focus 2. split. apply Z_div_pos; trivial. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. rewrite unsigned_repr'. Focus 2. split. apply Z_div_pos; trivial. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. rewrite unsigned_repr'. Focus 2. split. apply Z_div_pos; trivial. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. rewrite unsigned_repr'. Focus 2. split. apply Z_div_pos; trivial. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. rewrite unsigned_repr'. Focus 2. split. apply Z_mod_lt. cbv; trivial. apply Z_mod_lt. cbv; trivial. assert (BND: 0 <= Int64.unsigned u / Z.pow_pos 2 56 <= Byte.max_unsigned). { unfold Byte.max_unsigned; omega. } assert (IMU: Int.max_unsigned = 4294967295) by reflexivity. destruct (zeq i 7). { subst; simpl in *. rewrite two_power_pos_correct, zero_ext_inrange. + rewrite Int64.unsigned_repr; trivial. split. apply Z_div_pos; trivial. cbv; trivial. apply Z.div_le_upper_bound. cbv; trivial. eapply Z.le_trans; eauto. + rewrite Int.unsigned_repr, Int64.unsigned_repr. apply BND. omega. rewrite Int64.unsigned_repr. omega. omega. } destruct (zeq i 6). { subst; simpl in *. rewrite two_power_pos_correct, zero_ext_inrange. specialize (Fcore_Zaux.Zdiv_mod_mult (Int64.unsigned u) (Z.pow_pos 2 48) (Z.pow_pos 2 8)). rewrite <- Zpower_pos_is_exp. intros Q. replace (Z.pow_pos 2 (48 + 8)) with (Z.pow_pos 2 56) in Q by reflexivity. rewrite Q. rewrite Zmod_small; trivial. f_equal. f_equal. simpl. reflexivity. rewrite Int.unsigned_repr; simpl in *; omega. } omega. replace Byte.modulus with (two_p 8) in BND1 unfold Byte.modulus in BND1. simpl in *. cbv. unfold Int.zero_ext. rewrite Int.unsigned_repr. apply Z.div_lt_upper_bound. cbv; trivial. eapply Z.lt_le_trans. apply Z_mod_lt. cbv; trivial. cbv; congruence. . omega. cbv. specialize (Zmult_le_compat_r 1 (two_p (8 * i)) Int64.max_unsigned). simpl. intros Q; apply Q; omega. rewrite zero_ext_inrange. Focus 2. rewrite Int.unsigned_repr. assert (Int64.unsigned u / two_p (8 * i) < two_p 8). 2: omega. apply Z.div_lt_upper_bound. omega. assert (Int64.max_unsigned < two_p (8 * i) * two_p 8). 2: omega. rewrite 2 two_p_equiv, Z.pow_mul_r, I64MU; try omega. specialize (Zpower_exp (2^8) i 1); rewrite Z.pow_1_r. intros Q; rewrite <- Q. simpl. omega. simpl. simpl in *. omega. split. apply Z_div_pos; trivial. cbv; trivial. assert (Int64.unsigned u / Z.pow_pos 2 56 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. apply Z.div_lt_upper_bound. cbv; trivial. simpl in *. omega. destruct (zeq i 7). { subst. simpl in *. rewrite zero_ext_inrange. rewrite Byte.unsigned_repr. reflexivity. { split. apply Z_div_pos; trivial. cbv; trivial. assert (Int64.unsigned u / Z.pow_pos 2 56 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. apply Z.div_lt_upper_bound. cbv; trivial. simpl. omega. } eapply Z.le_trans; eauto. rewrite I64MU. simpl. clear. cbv. omega. unfold omega. simpl. omega. Zdiv_interval_2. destruct (zeq i 0); subst; simpl. rewrite Byte.unsigned_repr. admit. + f_equal. rewrite iter64. 2: rewrite Z2Nat.id; omega. unfold iter64Shr8'. rewrite Z2Nat.id; try omega. unfold Int64.mul. rewrite 2 Int64.unsigned_repr. 2: unfold Int64.max_unsigned; simpl; omega. 2: unfold Int64.max_unsigned; simpl; omega. rewrite Int64.shru_div_two_p. rewrite (Int64.unsigned_repr (8 * i)), two_p_equiv. 2: unfold Int64.max_unsigned; simpl; omega. assert (X: 0 < 2 ^ (8 * i)) by (apply Z.pow_pos_nonneg; omega). destruct (Int64.unsigned_range_2 u). assert (T: 0 <= Int64.unsigned u / 2 ^ (8 * i) <= 255). { split. apply Z_div_pos. omega. omega. apply Zdiv_le_upper_bound; trivial. eapply Z.le_trans. apply H0. unfold Int64.max_unsigned. rewrite Int64.modulus_power. replace (two_p Int64.zwordsize) with (2^64) by reflexivity. assert (2 ^ 64 < 255 * 2 ^ (8 * i)). 2: omega. specialize (Zmult_le_compat_l 1 (2 ^ (8 * i)) Int64.max_unsigned). rewrite Z.mul_1_r. intros Y; apply Y. omega. unfold Int64.max_unsigned; simpl; omega. } assert (Q: 0 <= Int64.unsigned u / 2 ^ (8 * i) <= Int64.max_unsigned). { split. apply Z_div_pos. omega. omega. apply Zdiv_le_upper_bound; trivial. eapply Z.le_trans. apply H0. specialize (Zmult_le_compat_l 1 (2 ^ (8 * i)) Int64.max_unsigned). rewrite Z.mul_1_r. intros Y; apply Y. omega. unfold Int64.max_unsigned; simpl; omega. } rewrite Int64.unsigned_repr; trivial. rewrite zero_ext_inrange. f_equal. admit. rewrite Int.unsigned_repr. replace (two_p 8 - 1) with 255 by reflexivity. replace (1 + (7 - i)) with (8-i) by omega. replace (i + (8 - i)) with 8 by omega. destruct (zeq i 0). { subst; unfold sublist; simpl. unfold littleendian64_invert in HeqU. inv HeqU. rewrite <- app_comm_cons. (sublist_app1 _ 0 i). 2: omega. 2: rewrite Zlength_sublist. omega. rewrite <- app_assoc. assert (ZW: Int.zwordsize = 32) by reflexivity. assert (EIGHT: Int.unsigned (Int.repr 8) = 8). apply Int.unsigned_repr. rewrite int_max_unsigned_eq; omega. inv HeqU. clear - ZW EIGHT I. simpl. destruct (zeq i 0); subst; simpl. f_equal. f_equal. { rewrite Byte.unsigned_repr. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^8) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^16) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite <- (Int.zero_ext_mod 8). rewrite Int.repr_unsigned; trivial. rewrite ZW; omega. assert (0 <= ((Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16) mod Z.pow_pos 2 8 < Byte.modulus). apply Z_mod_lt. cbv; trivial. unfold Byte.max_unsigned. omega. } destruct (zeq i 1); subst; simpl. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= (Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16 / Z.pow_pos 2 8 < Byte.modulus). Focus 2. unfold Byte.max_unsigned. omega. split. apply Z_div_pos. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Zdiv_lt_upper_bound. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite (Z.div_pow2_bits _ 8); try omega. rewrite (Int.Ztestbit_mod_two_p 16); try omega. rewrite (Int.Ztestbit_mod_two_p 24); try omega. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW, Ztest_Inttest. remember (zlt i 8). destruct s. repeat rewrite zlt_true. trivial. omega. omega. omega. rewrite zlt_false. trivial. omega. } destruct (zeq i 2); subst; simpl. f_equal. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= Int.unsigned u mod Z.pow_pos 2 24 / Z.pow_pos 2 16 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. split. apply Z_div_pos. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Zdiv_lt_upper_bound. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW. rewrite (Z.div_pow2_bits _ 16); try omega. rewrite (Int.Ztestbit_mod_two_p 24); try omega. rewrite Ztest_Inttest. remember (zlt i 8). destruct s. repeat rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. rewrite <- Z.add_assoc. reflexivity. omega. omega. omega. omega. rewrite zlt_false. trivial. omega. } destruct (zeq i 3); subst; simpl. f_equal. f_equal. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= Int.unsigned u / Z.pow_pos 2 24 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. split. apply Z_div_pos. cbv; trivial. apply Int.unsigned_range. apply Zdiv_lt_upper_bound. cbv; trivial. apply Int.unsigned_range. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW. rewrite (Z.div_pow2_bits _ 24); try omega. rewrite Ztest_Inttest. remember (zlt i 8). destruct s. rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. repeat rewrite <- Z.add_assoc. reflexivity. omega. omega. omega. omega. omega. rewrite Int.bits_above. trivial. omega. } omega. } Time forward. (*1.6*) Time Qed. (*4.9*) unfold data_at_, field_at_. rewrite field_at_data_at. rewrite field_address_offset by auto with field_compatible. simpl. rewrite isptr_offset_val_zero. apply data_at_ext. unfold default_val. simpl. unfold tarray. simpl. destruct tv. reflexivity. cancel. rewrite unfold field_address; simpl. normalize. cancel. } forward_for (EX z:_, (PROP (0<= z <= 7 ) LOCAL (temp _i (Vint (Int.repr z)); temp _x x; temp _u (Vlong u)) SEP (data_at Tsh (tarray tuchar 8) (Data z) x))). { Exists 7. entailer!. admit. (*Data 7 = list_repeat 8 Vundef*) } eapply semax_for with (A:=Z)(v:= fun a => Val.of_bool (negb (Int.lt (Int.repr a) (Int.repr 0)))). solve [ reflexivity]. intros. solve [entailer!]. intros. entailer!. { intros i. simpl. normalize. rename H into I0. rename H0 into I7. apply negb_true_iff in I0. (* apply lt_repr_false in I0. 2: red; unfold Int.min_signed, Int.max_signed; simpl. 2: split; try omega. Focus 2. 2: red; unfold Int.min_signed, Int.max_signed; simpl; omega.*) forward. { apply andp_right. 2: solve [entailer]. apply andp_right. solve [entailer!]. entailer. admit. (*typecheck_error (invalid_cast_result tuchar tuchar)*) } forward. entailer. simpl. admit. (*typecheck_error (arg_type (Ebinop Oshr (Etempvar _u tulong) (Econst_int (Int.repr 8) tint) tulong))*) unfold arg_type. go_lower. entailer!. Search invalid_cast_result. unfold invalid_cast_result. typecheck_error. simpl. simpl. destruct (zlt { apply extract_exists_pre. intros i. Intros. rename H into I. cancel. Focus 2. eapply semax_for with (A:=Z). reflexivity. Ltac forward_for_simple_bound n Pre ::= check_Delta; repeat match goal with |- semax _ _ (Ssequence (Ssequence (Ssequence _ _) _) _) _ => apply -> seq_assoc; abbreviate_semax end. (* first [ match type of n with ?t => first [ unify t Z | elimtype (Type_of_bound_in_forward_for_should_be_Z_but_is t)] end; match type of Pre with ?t => first [unify t (environ -> mpred); fail 1 | elimtype (Type_of_invariant_in_forward_for_should_be_environ_arrow_mpred_but_is t)] end | simple eapply semax_seq'; [forward_for_simple_bound' n Pre | cbv beta; simpl update_tycon; abbreviate_semax ] | eapply semax_post_flipped'; [forward_for_simple_bound' n Pre | ] ].*) Time forward_for_simple_bound 8 (EX i:Z, (PROP () LOCAL (temp _x x; temp _u (Vlong (iter64Shr8 u (Z.to_nat i)))) SEP (data_at Tsh (tarray tuchar 8) (sublist 0 i (map Vint (map Int.repr (map Byte.unsigned ([w0;w1;w2;w3;u0;u1;u2;u3])))) ++ list_repeat (Z.to_nat(8-i)) Vundef) x))). { entailer!. } { rename H into I. Time assert_PROP (field_compatible (Tarray tuchar 4 noattr) [] x /\ isptr x) as FC_ptrX by solve [entailer!]. (*2.3*) destruct FC_ptrX as [FC ptrX]. Time forward. (*3.2*) Time forward. (*0.8*) rewrite Z.add_comm, Z2Nat.inj_add; try omega. Time entailer!. (*1.5*) unfold upd_Znth. autorewrite with sublist. rewrite field_at_data_at. simpl. unfold field_address. simpl. if_tac. 2: solve [contradiction]. replace (4 - (1 + i)) with (4-i-1) by omega. rewrite isptr_offset_val_zero; trivial. clear H. apply data_at_ext. rewrite Zplus_comm. assert (ZW: Int.zwordsize = 32) by reflexivity. assert (EIGHT: Int.unsigned (Int.repr 8) = 8). apply Int.unsigned_repr. rewrite int_max_unsigned_eq; omega. inv HeqU. clear - ZW EIGHT I. destruct (zeq i 0); subst; simpl. f_equal. f_equal. { rewrite Byte.unsigned_repr. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^8) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite (Fcore_Zaux.Zmod_mod_mult _ (2^16) (2^8)). 2: cbv; trivial. 2: cbv; intros; discriminate. rewrite <- (Int.zero_ext_mod 8). rewrite Int.repr_unsigned; trivial. rewrite ZW; omega. assert (0 <= ((Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16) mod Z.pow_pos 2 8 < Byte.modulus). apply Z_mod_lt. cbv; trivial. unfold Byte.max_unsigned. omega. } destruct (zeq i 1); subst; simpl. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= (Int.unsigned u mod Z.pow_pos 2 24) mod Z.pow_pos 2 16 / Z.pow_pos 2 8 < Byte.modulus). Focus 2. unfold Byte.max_unsigned. omega. split. apply Z_div_pos. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Zdiv_lt_upper_bound. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite (Z.div_pow2_bits _ 8); try omega. rewrite (Int.Ztestbit_mod_two_p 16); try omega. rewrite (Int.Ztestbit_mod_two_p 24); try omega. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW, Ztest_Inttest. remember (zlt i 8). destruct s. repeat rewrite zlt_true. trivial. omega. omega. omega. rewrite zlt_false. trivial. omega. } destruct (zeq i 2); subst; simpl. f_equal. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= Int.unsigned u mod Z.pow_pos 2 24 / Z.pow_pos 2 16 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. split. apply Z_div_pos. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Zdiv_lt_upper_bound. cbv; trivial. apply Z_mod_lt. cbv; trivial. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW. rewrite (Z.div_pow2_bits _ 16); try omega. rewrite (Int.Ztestbit_mod_two_p 24); try omega. rewrite Ztest_Inttest. remember (zlt i 8). destruct s. repeat rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. rewrite <- Z.add_assoc. reflexivity. omega. omega. omega. omega. rewrite zlt_false. trivial. omega. } destruct (zeq i 3); subst; simpl. f_equal. f_equal. f_equal. f_equal. f_equal. { rewrite Byte.unsigned_repr. Focus 2. assert (0 <= Int.unsigned u / Z.pow_pos 2 24 < Byte.modulus). 2: unfold Byte.max_unsigned; omega. split. apply Z_div_pos. cbv; trivial. apply Int.unsigned_range. apply Zdiv_lt_upper_bound. cbv; trivial. apply Int.unsigned_range. apply Int.same_bits_eq. rewrite ZW; intros. rewrite Int.bits_zero_ext, Int.testbit_repr; try apply H. rewrite Int.bits_shru; try omega. rewrite EIGHT, ZW. rewrite (Z.div_pow2_bits _ 24); try omega. rewrite Ztest_Inttest. remember (zlt i 8). destruct s. rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. rewrite Int.bits_shru, EIGHT, ZW. rewrite zlt_true. repeat rewrite <- Z.add_assoc. reflexivity. omega. omega. omega. omega. omega. rewrite Int.bits_above. trivial. omega. } omega. } Time forward. (*1.6*) Time Qed. (*4.9*) *) (* Definition L32_specZ := DECLARE _L32 WITH x : int, c: int PRE [ _x OF tuint, _c OF tint ] PROP () (*c=Int.zero doesn't seem to satisfy spec???*) LOCAL (temp _x (Vint x); temp _c (Vint Int.zero)) SEP () POST [ tuint ] PROP (True) LOCAL () SEP (). Definition LDZFunSpecs : funspecs := L32_specZ::nil. Lemma L32_specZ_ok: semax_body SalsaVarSpecs LDZFunSpecs f_L32 L32_specZ. Proof. start_function. name x' _x. name c' _c. forward. entailer. apply prop_right. assert (W: Int.zwordsize = 32). reflexivity. assert (U: Int.unsigned Int.iwordsize=32). reflexivity. (*remember (Int.eq c' Int.zero) as z. destruct z. apply binop_lemmas.int_eq_true in Heqz. subst. simpl. *) remember (Int.ltu (Int.repr 32) Int.iwordsize) as d. symmetry in Heqd. destruct d; simpl. Focus 2. apply ltu_false_inv in Heqd. rewrite U in *. rewrite Int.unsigned_repr in Heqd. 2: rewrite int_max_unsigned_eq; omega. clear Heqd. split; trivial. remember (Int.ltu (Int.sub (Int.repr 32) c') Int.iwordsize) as z. symmetry in Heqz. destruct z. Focus 2. apply ltu_false_inv in Heqz. rewrite U in *. unfold Int.sub in Heqz. rewrite (Int.unsigned_repr 32) in Heqz. rewrite Int.unsigned_repr in Heqz. omega. rewrite int_max_unsigned_eq; omega. rewrite int_max_unsigned_eq; omega. simpl; split; trivial. split; trivial. apply ltu_inv in Heqz. unfold Int.sub in *. rewrite (Int.unsigned_repr 32) in *; try (rewrite int_max_unsigned_eq; omega). rewrite Int.unsigned_repr in Heqz. 2: rewrite int_max_unsigned_eq; omega. unfold Int.rol, Int.shl, Int.shru. rewrite or_repr. assert (Int.unsigned c' mod Int.zwordsize = Int.unsigned c'). apply Zmod_small. rewrite W; omega. rewrite H0, W. f_equal. f_equal. f_equal. rewrite Int.unsigned_repr. 2: rewrite int_max_unsigned_eq; omega. rewrite Int.and_mone. trivial. Qed. *)
import to_mathlib.data.set.prod import to_mathlib.data.set.lattice import to_mathlib.data.nat.basic import to_mathlib.topology.constructions import to_mathlib.topology.germ import to_mathlib.topology.misc import indexing import notations open set filter prod topological_space open_locale topology unit_interval /-! Notes by Patrick: The goal of this file is to explore how to prove `exists_surrounding_loops` (or rather its version with `C = U = univ` which is the only needed case) in a way that uncouples the general topological argument from the things specific to loops. The general lemma is meant to be something like `inductive_construction'` below. -/ section inductive_construction /-! Notes by Patrick: In this section, I took lemmas that used to exist when I worked on the inductive construction refactor. In particular there is the lemma which can't quite be used to prove `inductive_htpy_construction`, namely `inductive_construction`. In that lemma, the covering is fixed. Lemma `inductive_construction'` combines this with an argument using local existence and exhaustions. A technical intermediate statement is `inductive_construction''`. -/ lemma index_type.tendsto_coe_at_top (N : ℕ) : tendsto (coe : ℕ → index_type N) at_top at_top := tendsto_at_top_at_top.mpr (λ i, ⟨indexing.to_nat i, λ n hn,(indexing.from_to i) ▸ indexing.coe_mono hn⟩) lemma locally_finite.exists_forall_eventually_of_indexing {α X ι : Type*} [topological_space X] [linear_order ι] [indexing ι] {f : ℕ → X → α} {V : ι → set X} (hV : locally_finite V) (h : ∀ n : ℕ, ∀ x ∉ V ((n + 1) : ℕ), f (n + 1) x = f n x) (h' : ∀ n : ℕ, ((n+1 : ℕ) : ι) = n → f (n + 1) = f n) : ∃ (F : X → α), ∀ (x : X), ∀ᶠ (n : ℕ) in filter.at_top, f n =ᶠ[𝓝 x] F := begin let π : ℕ → ι := indexing.from_nat, choose U hUx hU using hV, choose i₀ hi₀ using λ x, (hU x).bdd_above, let n₀ : X → ℕ := indexing.to_nat ∘ i₀, have key : ∀ {x} {n}, n ≥ n₀ x → ∀ {y}, y ∈ U x → f n y = f (n₀ x) y, { intros x n hn, rcases le_iff_exists_add.mp hn with ⟨k, rfl⟩, clear hn, intros y hy, induction k with k hk, { simp }, { rw ← hk, clear hk, have : ∀ n, π n < π (n+1) ∨ π n = π (n+1), exact λ n, lt_or_eq_of_le (indexing.mono_from n.le_succ), rcases this (n₀ x + k) with H | H ; clear this, { have ineq : π (n₀ x + k + 1) > i₀ x, { suffices : i₀ x ≤ π (n₀ x + k), from lt_of_le_of_lt this H, rw ← indexing.from_to (i₀ x), exact indexing.mono_from le_self_add }, apply h, rintro (hy' : y ∈ V (π (n₀ x + k + 1))), have := hi₀ x ⟨y, ⟨hy', hy⟩⟩, clear hy hy', exact lt_irrefl _ (lt_of_le_of_lt this ineq) }, { erw [← (h' _ H.symm)], refl } } }, refine ⟨λ x, f (n₀ x) x, λ x, _⟩, change ∀ᶠ (n : ℕ) in at_top, f n =ᶠ[𝓝 x] λ (y : X), f (n₀ y) y, apply (eventually_gt_at_top (n₀ x)).mono (λ n hn, _), apply mem_of_superset (hUx x) (λ y hy, _), change f n y = f (n₀ y) y, calc f n y = f (n₀ x) y : key hn.le hy ... = f (max (n₀ x) (n₀ y)) y : (key (le_max_left _ _) hy).symm ... = f (n₀ y) y : key (le_max_right _ _) (mem_of_mem_nhds $ hUx y) end lemma inductive_construction_alt {X Y : Type*} [topological_space X] {N : ℕ} {U K : index_type N → set X} (P₀ : Π x : X, germ (𝓝 x) Y → Prop) (P₁ : Π i : index_type N, Π x : X, germ (𝓝 x) Y → Prop) (U_fin : locally_finite U) (init : ∃ f : X → Y, ∀ x, P₀ x f) (ind : ∀ (i : index_type N) (f : X → Y), (∀ x, P₀ x f) → (∀ j < i, ∀ᶠ x near K j, P₁ j x f) → ∃ f' : X → Y, (∀ x, P₀ x f') ∧ (∀ j ≤ i, ∀ᶠ x near K j, P₁ j x f') ∧ ∀ x ∉ U i, f' x = f x) : ∃ f : X → Y, (∀ x, P₀ x f) ∧ ∀ j, ∀ᶠ x near K j, P₁ j x f := begin let P : ℕ → (X → Y) → Prop := λ n f, (∀ x, P₀ x f) ∧ ∀ j : index_type N, j ≤ n → ∀ᶠ x near K j, P₁ j x f, let Q : ℕ → (X → Y) → (X → Y) → Prop := λ n f f', ((((n+1:ℕ) : index_type N) = n) → f' = f) ∧ ∀ x ∉ U (n + 1 : ℕ), f' x = f x, obtain ⟨f, hf⟩ : ∃ f : ℕ → X → Y, ∀ n, P n (f n) ∧ Q n (f n) (f $ n + 1), { apply exists_by_induction', { dsimp [P], cases init with f₀ hf₀, rcases ind 0 f₀ hf₀ _ with ⟨f', h₀f', h₁f', hf'⟩, use [f', h₀f', h₁f'], simp [index_type.not_lt_zero] }, { rintros n f ⟨h₀f, h₁f⟩, rcases index_type.lt_or_eq_succ N n with hn | hn, { simp_rw index_type.le_or_lt_succ hn at h₁f, rcases ind (n+1 : ℕ) f h₀f h₁f with ⟨f', h₀f', h₁f', hf'⟩, exact ⟨f', ⟨h₀f', h₁f'⟩, ⟨λ hn', (hn.ne hn'.symm).elim, hf'⟩⟩ }, { simp only [hn] at h₁f, exact ⟨f, ⟨h₀f, h₁f⟩, λ hn, rfl, λ x hx, rfl⟩ } } }, dsimp only [P, Q] at hf, simp only [forall_and_distrib] at hf, rcases hf with ⟨⟨h₀f, h₁f⟩, hf, hf'⟩, rcases U_fin.exists_forall_eventually_of_indexing hf' hf with ⟨F, hF⟩, refine ⟨F, λ x, _, λ j, _⟩, { rcases (hF x).exists with ⟨n₀, hn₀⟩, simp only [germ.coe_eq.mpr hn₀.symm, h₀f n₀ x] }, apply eventually_nhds_set_iff.mpr, intros x hx, rcases ((hF x).and $ (filter.tendsto_at_top.mp (index_type.tendsto_coe_at_top N) j)).exists with ⟨n₀, hn₀, hn₀'⟩, apply ((eventually_nhds_set_iff.mp (h₁f _ _ hn₀') x hx).and $ eventually_eventually_eq_nhds.mpr hn₀).mono, rintros y ⟨hy, hy'⟩, rwa germ.coe_eq.mpr hy'.symm end lemma inductive_construction {X Y : Type*} [topological_space X] {N : ℕ} {U K : index_type N → set X} (P₀ P₁ : Π x : X, germ (𝓝 x) Y → Prop) (U_fin : locally_finite U) (K_cover : (⋃ i, K i) = univ) (init : ∃ f : X → Y, ∀ x, P₀ x f) (ind : ∀ (i : index_type N) (f : X → Y), (∀ x, P₀ x f) → (∀ᶠ x near ⋃ j < i, K j, P₁ x f) → ∃ f' : X → Y, (∀ x, P₀ x f') ∧ (∀ᶠ x near ⋃ j ≤ i, K j, P₁ x f') ∧ ∀ x ∉ U i, f' x = f x) : ∃ f : X → Y, ∀ x, P₀ x f ∧ P₁ x f := begin rcases inductive_construction_alt P₀ (λ j, P₁) U_fin init (by simpa only [eventually_nhds_set_Union₂] using ind) with ⟨f, h₀f, h₁f⟩, refine ⟨f, λ x, ⟨h₀f x, _⟩⟩, obtain ⟨j, hj⟩ : ∃ j, x ∈ K j, by simpa using (by simp [K_cover] : x ∈ ⋃ j, K j), exact (h₁f j).on_set _ hj end /-- We are given a suitably nice topological space `X` and three local constraints `P₀`,`P₀'` and `P₁` on maps from `X` to some type `Y`. All maps entering the discussion are required to statisfy `P₀` everywhere. The goal is to turn a map `f₀` satisfying `P₁` near a compact set `K` into one satisfying everywhere without changing `f₀` near `K`. The assumptions are: * For every `x` in `X` there is a map which satisfies `P₁` near `x` * One can patch two maps `f₁ f₂` satisfying `P₁` on open sets `U₁` and `U₂` respectively and such that `f₁` satisfies `P₀'` everywhere into a map satisfying `P₁` on `K₁ ∪ K₂` for any compact sets `Kᵢ ⊆ Uᵢ` and `P₀'` everywhere. -/ lemma inductive_construction'' {X Y : Type*} [emetric_space X] [locally_compact_space X] [second_countable_topology X] (P₀ P₀' P₁ : Π x : X, germ (𝓝 x) Y → Prop) {f₀ : X → Y} (hP₀f₀ : ∀ x, P₀ x f₀ ∧ P₀' x f₀ ) (loc : ∀ x, ∃ f : X → Y, (∀ x, P₀ x f) ∧ ∀ᶠ x' in 𝓝 x, P₁ x' f) (ind : ∀ {U₁ U₂ K₁ K₂ : set X} {f₁ f₂ : X → Y}, is_open U₁ → is_open U₂ → is_closed K₁ → is_closed K₂ → K₁ ⊆ U₁ → K₂ ⊆ U₂ → (∀ x, P₀ x f₁ ∧ P₀' x f₁) → (∀ x, P₀ x f₂) → (∀ x ∈ U₁, P₁ x f₁) → (∀ x ∈ U₂, P₁ x f₂) → ∃ f : X → Y, (∀ x, P₀ x f ∧ P₀' x f ) ∧ (∀ᶠ x near K₁ ∪ K₂, P₁ x f) ∧ (∀ᶠ x near K₁ ∪ U₂ᶜ, f x = f₁ x)) : ∃ f : X → Y, ∀ x, P₀ x f ∧ P₀' x f ∧ P₁ x f := begin let P : set X → Prop := λ U, ∃ f : X → Y, (∀ x, P₀ x f) ∧ (∀ x ∈ U, P₁ x f), have hP₁ : antitone P, { rintros U V hUV ⟨f, h, h'⟩, exact ⟨f, h, λ x hx, h' x (hUV hx)⟩ }, have hP₂ : P ∅, from ⟨f₀, λ x, (hP₀f₀ x).1, λ x h, h.elim⟩, have hP₃ : ∀ (x : X), x ∈ univ → (∃ (V : set X) (H : V ∈ 𝓝 x), P V), { rintros x -, rcases loc x with ⟨f, h₀f, h₁f⟩, exact ⟨_, h₁f, f, h₀f, λ x, id⟩ }, rcases exists_locally_finite_subcover_of_locally is_closed_univ hP₁ hP₂ hP₃ with ⟨K, (U : index_type 0 →set X) , K_cpct, U_op, hU, hKU, U_loc, hK⟩, simp_rw ← and_assoc, apply inductive_construction (λ x φ, P₀ x φ ∧ P₀' x φ) P₁ U_loc (eq_univ_of_univ_subset hK) ⟨f₀, hP₀f₀⟩, rintros (n : ℕ) f h₀f (h₁f : ∀ᶠ x near ⋃ j < n, K j, P₁ x f), have cpct : is_closed ⋃ j < n, K j, { rw show (⋃ j < n, K j) = ⋃ j ∈ finset.range n, K j, by simp only [finset.mem_range], apply (finset.range n).is_closed_bUnion _ (λ j _, (K_cpct j).is_closed) }, rcases hU n with ⟨f', h₀f', h₁f'⟩, rcases mem_nhds_set_iff_exists.mp h₁f with ⟨V, V_op, hKV, h₁V⟩, rcases ind V_op (U_op n) cpct (K_cpct n).is_closed hKV (hKU n) h₀f h₀f' h₁V h₁f' with ⟨F, h₀F, h₁F, hF⟩, simp_rw ← bUnion_le at h₁F, exact ⟨F, h₀F, h₁F, λ x hx, hF.on_set x (or.inr hx)⟩ end /-- We are given a suitably nice topological space `X` and two local constraints `P₀` and `P₁` on maps from `X` to some type `Y`. All maps entering the discussion are required to statisfy `P₀` everywhere. The goal is to turn a map `f₀` satisfying `P₁` near a compact set `K` into one satisfying everywhere without changing `f₀` near `K`. The assumptions are: * For every `x` in `X` there is a map which satisfies `P₁` near `x` * One can patch two maps `f₁ f₂` satisfying `P₁` on open sets `U₁` and `U₂` respectively into a map satisfying `P₁` on `K₁ ∪ K₂` for any compact sets `Kᵢ ⊆ Uᵢ`. This is deduced this version from the version where `K` is empty but adding some `P'₀`, see `inductive_construction''`. -/ lemma inductive_construction' {X Y : Type*} [emetric_space X] [locally_compact_space X] [second_countable_topology X] (P₀ P₁ : Π x : X, germ (𝓝 x) Y → Prop) {K : set X} (hK : is_closed K) {f₀ : X → Y} (hP₀f₀ : ∀ x, P₀ x f₀) (hP₁f₀ : ∀ᶠ x near K, P₁ x f₀) (loc : ∀ x, ∃ f : X → Y, (∀ x, P₀ x f) ∧ ∀ᶠ x' in 𝓝 x, P₁ x' f) (ind : ∀ {U₁ U₂ K₁ K₂ : set X} {f₁ f₂ : X → Y}, is_open U₁ → is_open U₂ → is_closed K₁ → is_closed K₂ → K₁ ⊆ U₁ → K₂ ⊆ U₂ → (∀ x, P₀ x f₁) → (∀ x, P₀ x f₂) → (∀ x ∈ U₁, P₁ x f₁) → (∀ x ∈ U₂, P₁ x f₂) → ∃ f : X → Y, (∀ x, P₀ x f) ∧ (∀ᶠ x near K₁ ∪ K₂, P₁ x f) ∧ (∀ᶠ x near K₁ ∪ U₂ᶜ, f x = f₁ x)) : ∃ f : X → Y, (∀ x, P₀ x f ∧ P₁ x f) ∧ ∀ᶠ x near K, f x = f₀ x := begin let P₀' : Π x : X, germ (𝓝 x) Y → Prop := restrict_germ_predicate (λ x φ, φ.value = f₀ x) K, have hf₀ : ∀ x, P₀ x f₀ ∧ P₀' x f₀, { exact λ x, ⟨hP₀f₀ x, λ hx, eventually_of_forall (λ x', rfl)⟩ }, have ind' : ∀ (U₁ U₂ K₁ K₂ : set X) {f₁ f₂ : X → Y}, is_open U₁ → is_open U₂ → is_closed K₁ → is_closed K₂ → K₁ ⊆ U₁ → K₂ ⊆ U₂ → (∀ x, P₀ x f₁ ∧ P₀' x f₁) → (∀ x, P₀ x f₂) → (∀ x ∈ U₁, P₁ x f₁) → (∀ x ∈ U₂, P₁ x f₂) → ∃ f : X → Y, (∀ x, P₀ x f ∧ P₀' x f ) ∧ (∀ᶠ x near K₁ ∪ K₂, P₁ x f) ∧ (∀ᶠ x near K₁ ∪ U₂ᶜ, f x = f₁ x), { intros U₁ U₂ K₁ K₂ f₁ f₂ U₁_op U₂_op K₁_cpct K₂_cpct hK₁U₁ hK₂U₂ hf₁ hf₂ hf₁U₁ hf₂U₂, obtain ⟨h₀f₁, h₀'f₁⟩ := forall_and_distrib.mp hf₁, rw forall_restrict_germ_predicate_iff at h₀'f₁, rcases (has_basis_nhds_set K).mem_iff.mp (hP₁f₀.germ_congr h₀'f₁) with ⟨U, ⟨U_op, hKU⟩, hU⟩, rcases ind (U_op.union U₁_op) U₂_op (hK.union K₁_cpct) K₂_cpct (union_subset_union hKU hK₁U₁) hK₂U₂ h₀f₁ hf₂ (λ x hx, hx.elim (λ hx, hU hx) (λ hx, hf₁U₁ x hx)) hf₂U₂ with ⟨f, h₀f, hf, h'f⟩, rw [union_assoc, eventually_nhds_set_union] at hf h'f, exact ⟨f, λ x, ⟨h₀f x, restrict_germ_predicate_congr (hf₁ x).2 h'f.1⟩, hf.2, h'f.2⟩ }, rcases inductive_construction'' P₀ P₀' P₁ hf₀ loc ind' with ⟨f, hf⟩, simp only [forall_and_distrib, forall_restrict_germ_predicate_iff ] at hf ⊢, exact ⟨f, ⟨hf.1, hf.2.2⟩, hf.2.1⟩ end end inductive_construction section htpy private noncomputable def T : ℕ → ℝ := λ n, nat.rec 0 (λ k x, x + 1/(2 : ℝ)^(k+1)) n open_locale big_operators -- Note this is more painful than Patrick hoped for. Maybe this should be the definition of T. private lemma T_eq (n : ℕ) : T n = 1- (1/(2: ℝ))^n := begin have : T n = ∑ k in finset.range n, 1/(2: ℝ)^(k+1), { induction n with n hn, { simp only [T, finset.range_zero, finset.sum_empty] }, change T n + _ = _, rw [hn, finset.sum_range_succ] }, simp_rw [this, ← one_div_pow, pow_succ, ← finset.mul_sum, geom_sum_eq (by norm_num : 1/(2:ℝ) ≠ 1) n], field_simp, norm_num, apply div_eq_of_eq_mul, apply neg_ne_zero.mpr, apply ne_of_gt, positivity, ring end private lemma T_lt (n : ℕ) : T n < 1 := begin rw T_eq, have : (0 : ℝ) < (1 / 2) ^ n, by positivity, linarith end private lemma T_lt_succ (n : ℕ) : T n < T (n+1) := lt_add_of_le_of_pos le_rfl (one_div_pos.mpr (pow_pos zero_lt_two _)) private lemma T_le_succ (n : ℕ) : T n ≤ T (n+1) := (T_lt_succ n).le private lemma T_succ_sub (n : ℕ) : T (n+1) - T n = 1/2^(n+1) := begin change T n + _ - T n = _, simp end private lemma mul_T_succ_sub (n : ℕ) : 2^(n+1)*(T (n+1) - T n) = 1 := begin rw T_succ_sub, field_simp end private lemma T_one : T 1 = 1/2 := by simp [T] private lemma not_T_succ_le (n : ℕ) : ¬ T (n + 1) ≤ 0 := begin rw [T_eq, not_le], have : (1 / (2 : ℝ)) ^ (n + 1) < 1, apply pow_lt_one ; norm_num, linarith, end lemma inductive_htpy_construction {X Y : Type*} [topological_space X] {N : ℕ} {U K : index_type N → set X} (P₀ P₁ : Π x : X, germ (𝓝 x) Y → Prop) (P₂ : Π p : ℝ × X, germ (𝓝 p) Y → Prop) (hP₂ : ∀ a b (p : ℝ × X) (f : ℝ × X → Y), P₂ (a*p.1+b, p.2) f → P₂ p (λ p : ℝ × X, f (a*p.1+b, p.2))) (U_fin : locally_finite U) (K_cover : (⋃ i, K i) = univ) {f₀ : X → Y} (init : ∀ x, P₀ x f₀) (ind : ∀ (i : index_type N) (f : X → Y), (∀ x, P₀ x f) → (∀ᶠ x near ⋃ j < i, K j, P₁ x f) → ∃ F : ℝ → X → Y, (∀ t, ∀ x, P₀ x $ F t) ∧ (∀ᶠ x near ⋃ j ≤ i, K j, P₁ x $ F 1) ∧ (∀ p, P₂ p ↿F) ∧ (∀ t, ∀ x ∉ U i, F t x = f x) ∧ (∀ᶠ t near Iic 0, F t = f) ∧ (∀ᶠ t near Ici 1, F t = F 1)) : ∃ F : ℝ → X → Y, F 0 = f₀ ∧ (∀ t x, P₀ x (F t)) ∧ (∀ x, P₁ x (F 1)) ∧ (∀ p, P₂ p ↿F) := begin let PP₀ : Π p : ℝ × X, germ (𝓝 p) Y → Prop := λ p φ, P₀ p.2 φ.slice_right ∧ sorry, sorry end end htpy
theory TopoS_Util imports Main begin lemma finite_ne_subset_induct [case_names singleton insert, consumes 2]: assumes "finite F" and "F \<noteq> {}" and "F \<subseteq> A" assumes "\<And>x. x \<in> A \<Longrightarrow> P {x}" and "\<And>x F. finite F \<Longrightarrow> F \<noteq> {} \<Longrightarrow> x \<in> A \<Longrightarrow> x \<notin> F \<Longrightarrow> P F \<Longrightarrow> P (insert x F)" shows "P F" using assms proof induct case empty then show ?case by simp next case (insert x F) then show ?case by cases auto qed (*lemma from afp collections Misc*) lemma set_union_code: "set xs \<union> set ys = set (xs @ ys)" by auto end
Require Export Iron.Language.SystemF2Store.Exp. (********************************************************************) (* Storeable values are the ones that we can keep directly in store bindings *) Inductive svalue := | SLoc : nat -> svalue | SLAM : exp -> svalue | SLam : ty -> exp -> svalue. Hint Constructors svalue. Definition takeSValueOfExp (xx : exp) : option svalue := match xx with | XLoc n => Some (SLoc n) | XLAM x => Some (SLAM x) | XLam t x => Some (SLam t x) | _ => None end. Definition expOfSValue (s: svalue) : exp := match s with | SLoc n => XLoc n | SLAM x => XLAM x | SLam t x => XLam t x end. Definition svalueOf (xx : exp) (sv : svalue) : Prop := takeSValueOfExp xx = Some sv. (* There is an expression for every store value *) Lemma exp_from_svalue : forall sv, exists v, svalueOf v sv. Proof. intros. destruct sv. exists (XLoc n). burn. exists (XLAM e). burn. exists (XLam t e). burn. Qed. Hint Resolve exp_from_svalue. (* There is a store value for every expression value. *) Lemma svalue_from_value : forall v, value v -> (exists sv, svalueOf v sv). Proof. intros. destruct v; nope. exists (SLoc n). burn. exists (SLAM v). burn. exists (SLam t v). burn. Qed. Hint Resolve svalue_from_value. Lemma svalue_of_expOfSValue : forall sv : svalue, svalueOf (expOfSValue sv) sv. Proof. intros. destruct sv; burn. Qed. Hint Resolve svalue_of_expOfSValue. Lemma svalueOf_is_expOfSValue : forall v sv , svalueOf v sv -> v = expOfSValue sv. Proof. intros. inverts H. destruct sv; destruct v; try burn; simpl in *; inverts H1; auto. Qed. Hint Resolve svalueOf_is_expOfSValue. Lemma svalueOf_forall_expOfSValue : forall vs svs , Forall2 svalueOf vs svs -> vs = map expOfSValue svs. Proof. intros. induction H; burn. Qed. Hint Resolve svalueOf_forall_expOfSValue.
# 标准合并问题 Standard Pooling Problem ## 目的和先决条件 Gurobi 9.0的新功能之一是增加了双线性求解器,它可以找到非凸二次规划问题(即QP,QCQP,MIQP和MIQCQP)的最佳解决方案。本笔记本将通过处理标准池问题(最著名的双线性程序之一)的实例,向您展示如何使用此功能。此外,我们将介绍两个替代公式,将其转换为二次约束二次问题,以在调用优化程序时对比它们的性能。 此建模示例处于高级水平。为了完全理解此笔记本的内容,读者应熟悉以下内容: - Python. - Gurobi Python interface. - Advanced knowledge of building mathematical optimization models. Typically, the constraints of these Jupyter Notebooks are complex or require advanced features of the Gurobi Python Interface. **Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*. --- ## Motivation The pooling problem is a challenging problem in the petrochemical refining, wastewater treatment and mining industries. This problem can be regarded as a generalization of the minimum-cost flow problem and the blending problem. It is indeed important because of the significant savings it can generate, so it comes at no surprise that it has been studied extensively since Haverly pointed out the non-linear structure of this problem in 1978 [5]. --- ## Problem Description The Minimum-Cost Flow Problem (MCFP) seeks to find the cheapest way of sending a certain amount of flow from a set of source nodes to a set of target nodes, possibily via transshipment nodes⁠, in a directed capacitated network. The Blending Problem is a type of MCFP with only source and target nodes, where raw materials with different attribute qualities are blended together to create end products in such a way that their attribute qualities are within tolerances. The Pooling Problem combines features of both problems, as flow streams from different sources are mixed at intermediate pools and blended again at the target nodes. The non-linearity is in fact the direct result of considering pools, as the quality of a given attribute at a pool —defined as the weighted average of the qualities of the incoming streams— is an unknown quantity and thus needs to be captured by a decision variable. We refer to this problem as the Standard Pooling Problem when the network can be represented by a tripartite graph, i.e. three disjoint sets of nodes such that no nodes within the same set are adjacent. In a nutshell, it can be stated as follows: Given a list of source nodes with raw materials containing known attribute qualities, what is the cheapest way of mixing these materials at intermediate pools so as to meet the demand and tolerances at multiple target nodes? (Gupte et al., 2017) [4]. Several different formulations for the Standard Pooling Problem and its extensions exist in the literature, which can be classified into two main categories: one that consists of flow and quality variables, and the other that uses flow proportions instead of quality variables. Both categories will be considered in this notebook. --- ## Problem Instance As an illustrative example, we will solve the second Pooling Problem posed by Rehfeldt and Tisljar in 1997 and cited by Audet et al. in 2004: To that end, let's declare the required data structures to represent this problem instance: ```python from itertools import product import gurobipy as gp import numpy as np import pandas as pd from gurobipy import GRB attrs = {'den', 'bnz', 'roz', 'moz'} sources, cost, supply, content = gp.multidict({ "s1": [49.2, 6097.56, {'den': 0.82, 'bnz':3, 'roz':99.2,'moz':90.5}], "s2": [62.0, 16129, {'den': 0.62, 'bnz':0, 'roz':87.9,'moz':83.5}], "s3": [300.0, 500, {'den': 0.75, 'bnz':0, 'roz':114,'moz':98.7}] }) targets, price, demand, min_tol, max_tol = gp.multidict({ "t1": [190, 500, {'den': 0.74, 'roz':95,'moz':85}, {'den': 0.79}], "t2": [230, 500, {'den': 0.74, 'roz':96,'moz':88}, {'den': 0.79, 'bnz':0.9}], "t3": [150, 500, {'den': 0.74, 'roz':91}, {'den': 0.79}] }) pools, cap = gp.multidict({ "p1": 1250, "p2": 1750 }) # The function `product` deploys the Cartesian product of elements in sets A and B s2p = set(product(sources, pools)) p2t = set(product(pools, targets)) s2t = {("s1", "t2"), ("s2", "t1"), ("s2", "t3"), ("s3", "t1")} ``` --- ## Solution Approach Mathematical programming is a declarative approach where the modeler formulates a mathematical optimization model that captures the key aspects of a complex decision problem. The Gurobi Optimizer solves such models using state-of-the-art mathematics and computer science. A mathematical optimization model has five components, namely: - Sets and indices. - Parameters. - Decision variables. - Objective function(s). - Constraints. A quadratic constraint that involves only products of disjoint pairs of variables is often called a bilinear constraint, and a model that contains bilinear constraints is often called a Bilinear Program. Bilinear constraints are a special case of non-convex quadratic constraints. This type of problems is typically solved using spatial Branch and Bound (sB&B). This algorithm explores the entire search space, so it provides a globally valid lower bound on the optimal objective value and —given enough time— it will find a globally optimal solution (subject to tolerances). The interested reader is referred to [references](#references) [3], [6] and [7]. We now present two alternative Bilinear Programs for the Standard Pooling Problem: ### P-formulation (Concentration) #### Sets and Indices $G=(V,E)$: Directed graph. $i,j \in V$: Set of nodes. $(i,j) \in E \subset V \times V$: Set of edges. $N(i)^+ = \{j \in V \mid (i,j) \in E \}$: Set of successor nodes receiving outflow from node $i$. $N(j)^- = \{i \in V \mid (i,j) \in E \}$: Set of predecessor nodes sending inflow to node $i$. $k \in \text{Attrs}$: Set of attributes. $s \in \text{Sources} \subset V$: Set of source nodes, i.e. $N(s)^-= \emptyset$. $t \in \text{Targets} \subset V$: Set of target nodes, i.e. $N(t)^+= \emptyset$. $p \in \text{Pools} = V \setminus (\text{Sources} \cup \text{Targets})$: Set of pools. #### Parameters $\text{Cost}_s \in \mathbb{R}^+$: Cost of acquiring one unit of raw material at source node $s$. $\text{Supply}_s \in \mathbb{R}^+$: Maximum number of units of raw material available at source node $s$. $\text{Content}_{s,k} \in \mathbb{R}^+$: Content of attribute $k$ in raw material at source node $s$. $\text{Price}_t \in \mathbb{R}^+$: Price for selling one unit of final blend at target node $t$. $\text{Demand}_t \in \mathbb{R}^+$: Minimum number of units required of final blend at target node $t$. $\text{Min_tol}_{t,k} \in \mathbb{R}^+$: Minimum tolerance for attribute $k$ in final blend at target node $t$. $\text{Max_tol}_{t,k} \in \mathbb{R}^+$: Maximum tolerance for attribute $k$ in final blend at target node $t$. $\text{Cap}_p \in \mathbb{R}^+$: Maximum Capacity to store intermediate blend at pool $p$. $\text{UB}_{i,j}\in \mathbb{R}^+$: Maximum flow from node $i$ to node $j$. #### Decision Variables $\text{flow}_{i,j} \in [0, \text{UB}_{i,j}]$: Flow from node $i$ to node $j$. $\text{quality}_{p,k} \in \mathbb{R}^+$: Concentration of attribute $k$ at pool $p$. #### Objective Function - **Profit**: Maximize total profits. \begin{equation} \text{Max} \quad Z = \sum_{t \in \text{Targets}}{\sum_{i \in N(t)^-}{\text{Price}_t \cdot \text{flow}_{i,t}}} - \sum_{s \in \text{Sources}}{\sum_{j \in N(s)^+}{\text{Cost}_s \cdot \text{flow}_{s,j}}} \tag{0} \end{equation} #### Constraints - **Flow conservation**: Total inflow of pool $p$ must be equal to its total outflow (nothing is stored in them). \begin{equation} \sum_{t \in N(p)^+}{\text{flow}_{p,t}} - \sum_{s \in N(p)^-}{\text{flow}_{s,p}} = 0 \quad \forall p \in \text{Pools} \tag{1} \end{equation} - **Source capacity**: Total outflow of source $s$ cannot exceed its capacity. \begin{equation} \sum_{j \in N(s)^+}{\text{flow}_{s,j}} \leq \text{Supply}_s \quad \forall s \in \text{Sources} \tag{2} \end{equation} - **Pool capacity**: Total outflow of pool $p$ cannot exceed its capacity. \begin{equation} \sum_{t \in N(p)^+}{\text{flow}_{p,t}} \leq \text{Cap}_p \quad \forall p \in \text{Pools} \tag{3} \end{equation} - **Target demand**: Total inflow of target $t$ must at least meet its minimum demand. \begin{equation} \sum_{i \in N(t)^-}{\text{flow}_{i,t}} \geq \text{Demand}_t \quad \forall t \in \text{Targets} \tag{4} \end{equation} - **Pool concentration**: Concentration of attribute $k$ at pool $p$ is expressed as the weighted average (linear blending) of the concentrations associated to the incoming flows (notice the bilinear terms on the right-hand side). \begin{equation} \sum_{s \in N(p)^-}{\text{Content}_{s,k} \cdot \text{flow}_{s,p}} = \text{quality}_{p,k} \cdot \sum_{t \in N(p)^+}{\text{flow}_{p,t}} \quad \forall (p,k) \in \text{Pools} \times \text{Attrs} \tag{5} \end{equation} - **Target tolerances**: Concentration of attribute $k$ at target $t$ is also the result of linear blending, and must be within tolerances (notice the bilinear terms on the second expression of the left-hand side). \begin{equation} \sum_{s \in N(t)^- \cap \text{Sources}}{\text{Content}_{s,k} \cdot \text{flow}_{s,t}}+ \sum_{p \in N(t)^- \cap \text{Pools}}{\text{quality}_{p,k} \cdot \text{flow}_{p,t}} \geq \text{Min_tol}_{t,k} \cdot \sum_{i \in N(t)^-}{\text{flow}_{i,t}} \quad \forall (t,k) \in \text{Targets} \times \text{Attrs} \tag{6.1} \end{equation} \begin{equation} \sum_{s \in N(t)^- \cap \text{Sources}}{\text{Content}_{s,k} \cdot \text{flow}_{s,t}}+ \sum_{p \in N(t)^- \cap \text{Pools}}{\text{quality}_{p,k} \cdot \text{flow}_{p,t}} \leq \text{Max_tol}_{t,k} \cdot \sum_{i \in N(t)^-}{\text{flow}_{i,t}} \quad \forall (t,k) \in \text{Targets} \times \text{Attrs} \tag{6.2} \end{equation} The number of bilinear terms in this formulation is proportional to the number of attributes. An alternative formulation relies on decision variables that represent fractions of flow instead of concentrations, so that the bilinear terms are no longer associated to the number of attributes. Two types of decision variables may be used: - fraction of total inflow at pool $p$, coming from source $s$. - fraction of total outflow at pool $p$, going to terminal $t$. A model based on the first option is now presented: ### Q-formulation (Proportion) #### Decision Variables $\text{flow}_{i,j} \in [0, \text{UB}_{i,j}]$: Flow from node $i$ to node $j$. $\text{prop}_{s,p} \in \mathbb{R}^+$: fraction of total inflow at pool $p$, coming from source $s$. **Note:** The $\text{flow}$ variables from sources to pools are replaced by the $\text{prop}$ variables. #### Objective Function - **Profit**: Maximize total profits (notice the bilinear terms on the second expression). \begin{equation} \text{Max} \quad Z = \sum_{t \in \text{Targets}}{\sum_{i \in N(t)^-}{\text{Price}_t \cdot \text{flow}_{i,t}}} - \sum_{s \in \text{Sources}}{\text{cost}_s \cdot \left( \sum_{t \in N(s)^+ \cap \text{Targets}}{\text{flow}_{s,t}} + \sum_{p \in N(s)^+ \cap \text{Pools}}{\text{prop}_{s,p} \cdot \sum_{t \in N(p)^+}{\text{flow}_{p,t}}} \right) } \tag{0} \end{equation} #### Constraints - **Source capacity**: Total outflow of source $s$ cannot exceed its capacity (notice the bilinear terms on the first expression of the left-hand side). \begin{equation} \sum_{p \in N(s)^+ \cap \text{Pools}}{\text{prop}_{s,p} \cdot \sum_{t \in N(p)^+}{\text{flow}_{p,t}}} + \sum_{t \in N(s)^+ \cap \text{Targets}}{\text{flow}_{s,t}} \leq \text{Supply}_s \quad \forall s \in \text{Sources} \tag{1} \end{equation} - **Pool capacity**: Total outflow of pool $p$ cannot exceed its capacity. \begin{equation} \sum_{t \in N(p)^+}{\text{flow}_{p,t}} \leq \text{Cap}_p \quad \forall p \in \text{Pools} \tag{2} \end{equation} - **Target demand**: Total inflow of target $t$ must at least meet its minimum demand. \begin{equation} \sum_{i \in N(t)^-}{\text{flow}_{i,t}} \geq \text{Demand}_t \quad \forall t \in \text{Targets} \tag{3} \end{equation} - **Pool inflow**: The sum of the contributions of all incoming sources to pool $p$ must equal one. \begin{equation} \sum_{s \in N(p)^-}{\text{prop}_{s,p}} = 1 \quad \forall p \in \text{Pools} \tag{4} \end{equation} - **Target tolerances**: Concentration of attribute $k$ at target $t$ is also the result of linear blending, and must be within tolerances (notice the bilinear terms on the second expression of the left-hand side). \begin{equation} \sum_{s \in N(t)^- \cap \text{Sources}}{\text{Content}_{s,k} \cdot \text{flow}_{s,t}} + \sum_{p \in N(t)^- \cap \text{Pools}}{\text{flow}_{p,t} \cdot \sum_{s \in N(p)^-}{\text{content}_{s,k} \cdot \text{prop}_{s,p}}} \geq \text{Min_tol}_{t,k} \cdot \sum_{i \in N(t)^-}{\text{flow}_{i,t}} \\ \forall (t,k) \in \text{Targets} \times \text{Attrs} \tag{5.1} \end{equation} \begin{equation} \sum_{s \in N(t)^- \cap \text{Sources}}{\text{Content}_{s,k} \cdot \text{flow}_{s,t}} + \sum_{p \in N(t)^- \cap \text{Pools}}{\text{flow}_{p,t} \cdot \sum_{s \in N(p)^-}{\text{content}_{s,k} \cdot \text{prop}_{s,p}}} \leq \text{Max_tol}_{t,k} \cdot \sum_{i \in N(t)^-}{\text{flow}_{i,t}} \\ \forall (t,k) \in \text{Targets} \times \text{Attrs} \tag{5.2} \end{equation} One drawback is that if some of the source-to-pool edges have flow capacity, we need to define additional constraints instead of just specifying upper bounds on the associated decision variables. Such constraints can be defined with bilinear terms as follows: - **Flow limit**: Flow from source $s$ to pool $p$ cannot exceed the installed capacity (notice the bilinear terms on the left-hand side). In the P-formulation, declaring this is as easy as setting the upper bound of the associated $\text{flow}$ variable. However, this variable no longer exists in the Q-formulation so we need to model the capacity as a constraint. \begin{equation} \text{prop}_{s,p} \cdot \sum_{t \in N(p)^+}{\text{flow}_{p,t}} \leq \text{UB}_{s,p} \quad \forall (i,j) \in E \cap \left( \text{Sources} \times \text{Pools} \right) \mid \text{UB}_{i,j} < \infty \tag{6} \end{equation} --- ## Python Implementation Solving Bilinear Programs with Gurobi is as easy as configuring the global parameter `nonConvex`. When setting this parameter to a value of 2, non-convex quadratic problems are solved by means of translating them into bilinear form and applying sB&B. We will first deploy the P-formulation model, and then compare it with the Q-formulation model: ### P-formulation (Concentration) ```python p_pooling = gp.Model("Pooling") # Set global parameters p_pooling.params.nonConvex = 2 p_pooling.params.timelimit = 5*60 # time limit of 5 minutes # Declare decision variables # flow ik = p_pooling.addVars(s2t, name="Source2Target") ij = p_pooling.addVars(s2p, name="Source2Pool") jk = p_pooling.addVars(p2t, name="Pool2Target") ik["s1","t2"].ub = 750 ik["s3","t1"].ub = 750 # quality prop = p_pooling.addVars(pools, attrs, name="Proportion") # Deploy constraint sets # 1. Flow conservation p_pooling.addConstrs((ij.sum('*',j) == jk.sum(j,'*') for j in pools), name="Flow_conservation") # 2. Source capacity p_pooling.addConstrs((ij.sum(i,'*') + ik.sum(i,'*') <= supply[i] for i in sources), name="Source_capacity") # 3. Pool capacity p_pooling.addConstrs((jk.sum(j,'*') <= cap[j] for j in pools), name="Pool_capacity") # 4. Target demand p_pooling.addConstrs((ik.sum('*',k) + jk.sum('*',k) >= demand[k] for k in targets), name="Target_demand") # 5. Pool concentration p_pooling.addConstrs((gp.quicksum(content[i][attr]*ij[i,j] for i in sources if (i,j) in s2p) == prop[j,attr]*jk.sum(j,'*') for j in pools for attr in attrs), name="Pool_concentration") # 6.1 Target (min) tolerances p_pooling.addConstrs((gp.quicksum(content[i][attr]*ik[i,k] for i in sources if (i,k) in s2t) + gp.quicksum(prop[j,attr]*jk[j,k] for j in pools if (j,k) in p2t) >= min_tol[k][attr]*(ik.sum('*',k) + jk.sum('*',k)) for k in targets for attr in min_tol[k].keys()), name="Target_min_tolerances") # 6.2 Target (max) tolerances p_pooling.addConstrs((gp.quicksum(content[i][attr]*ik[i,k] for i in sources if (i,k) in s2t) + gp.quicksum(prop[j,attr]*jk[j,k] for j in pools if (j,k) in p2t) <= max_tol[k][attr]*(ik.sum('*',k) + jk.sum('*',k)) for k in targets for attr in max_tol[k].keys()), name="Target_max_tolerances") # Deploy Objective Function # 0. Total profit obj = gp.quicksum(price[k]*(ik.sum('*',k) + jk.sum('*',k)) for k in targets) \ - gp.quicksum(cost[i]*(ij.sum(i,'*') + ik.sum(i,'*')) for i in sources) p_pooling.setObjective(obj, GRB.MAXIMIZE) # Find the optimal solution p_pooling.optimize() ``` Using license file c:\gurobi\gurobi.lic Set parameter TokenServer to value SANTOS-SURFACE- Changed value of parameter nonConvex to 2 Prev: -1 Min: -1 Max: 2 Default: -1 Changed value of parameter timelimit to 300.0 Prev: inf Min: 0.0 Max: inf Default: inf Gurobi Optimizer version 9.0.0 build v9.0.0rc2 (win64) Optimize a model with 10 rows, 24 columns and 38 nonzeros Model fingerprint: 0x59d32e14 Model has 20 quadratic constraints Coefficient statistics: Matrix range [1e+00, 1e+00] QMatrix range [1e+00, 1e+00] QLMatrix range [1e-02, 1e+02] Objective range [5e+01, 3e+02] Bounds range [8e+02, 8e+02] RHS range [5e+02, 2e+04] Continuous model is non-convex -- solving as a MIP. Presolve removed 1 rows and 0 columns Presolve time: 0.02s Presolved: 125 rows, 49 columns, 311 nonzeros Presolved model has 24 bilinear constraint(s) Variable types: 49 continuous, 0 integer (0 binary) Root relaxation: objective 9.577646e+05, 56 iterations, 0.01 seconds Nodes | Current Node | Objective Bounds | Work Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time 0 0 957764.599 0 10 - 957764.599 - - 0s 0 0 957764.599 0 13 - 957764.599 - - 0s 0 0 878230.977 0 22 - 878230.977 - - 0s 0 0 878230.977 0 21 - 878230.977 - - 0s 0 0 878230.977 0 20 - 878230.977 - - 0s 0 2 878230.977 0 20 - 878230.977 - - 0s * 1927 1907 134 263730.11967 837698.434 218% 7.3 0s * 5021 3355 54 266377.71782 724146.388 172% 6.8 0s * 5593 3432 95 308249.57852 716081.192 132% 6.9 0s * 5599 3345 95 308249.57856 716081.192 132% 6.9 0s * 5609 3262 95 308249.57860 716081.192 132% 6.9 0s * 6033 3469 130 322769.88783 712632.245 121% 6.8 0s * 6047 3367 136 323738.64180 712632.245 120% 6.8 0s * 7343 3714 62 324011.27928 709866.667 119% 6.9 0s * 8515 4213 92 325287.96968 709866.667 118% 7.0 0s * 8517 4213 93 325287.96973 709866.667 118% 7.0 0s * 8521 4213 95 325287.96977 709866.667 118% 7.0 0s *12548 6096 83 325315.73755 707517.455 117% 7.7 1s *20116 7853 195 328873.95358 705636.713 115% 7.8 2s *20117 7849 195 329060.83777 705636.713 114% 7.8 2s *20118 7849 194 329070.27690 705636.713 114% 7.8 2s *20119 7839 193 329080.21283 705636.713 114% 7.8 2s *20120 7839 192 329090.67171 705636.713 114% 7.8 2s *20121 7839 191 329101.68105 705636.713 114% 7.8 2s *23551 6770 121 346978.40032 700617.165 102% 8.9 2s *23552 6770 121 346978.40257 700617.165 102% 8.9 2s *23557 6770 120 346978.40681 700617.165 102% 8.9 2s 64724 36164 infeasible 56 346978.407 666107.243 92.0% 7.4 5s 146554 98426 573450.731 61 10 346978.407 665919.393 91.9% 6.4 10s 217069 148294 infeasible 62 346978.407 665907.668 91.9% 6.4 15s 286106 196549 588780.141 100 7 346978.407 665905.832 91.9% 6.7 20s 367033 255876 470531.514 85 6 346978.407 665905.506 91.9% 6.7 25s 446831 315965 665905.382 75 9 346978.407 665905.463 91.9% 6.6 30s 523723 373196 574337.483 129 10 346978.407 665905.454 91.9% 6.7 35s 596512 430108 587451.543 157 10 346978.407 665905.452 91.9% 6.7 40s 669032 487207 588752.309 114 8 346978.407 665905.451 91.9% 6.7 45s 735697 539826 infeasible 147 346978.407 665905.450 91.9% 6.8 50s 804933 594427 587243.791 132 12 346978.407 665905.450 91.9% 6.8 55s 869363 644240 infeasible 91 346978.407 665905.449 91.9% 6.8 60s 925972 688706 587453.210 153 11 346978.407 665905.449 91.9% 6.8 65s 987758 737398 508376.862 143 12 346978.407 665905.449 91.9% 6.8 70s 1045671 782955 587448.100 162 13 346978.407 665905.448 91.9% 6.8 75s 1099488 824053 597466.701 81 13 346978.407 665905.448 91.9% 6.9 80s 1145517 860103 cutoff 100 346978.407 665905.448 91.9% 6.9 85s 1185587 891503 587466.790 119 9 346978.407 665905.448 91.9% 6.9 90s 1234648 929817 429795.325 96 13 346978.407 665905.448 91.9% 6.9 95s 1278698 965633 587456.630 149 9 346978.407 665905.448 91.9% 6.9 100s 1325849 1002349 493064.825 93 13 346978.407 665905.447 91.9% 6.9 105s 1372171 1038902 574392.037 150 18 346978.407 665905.447 91.9% 6.9 110s 1415976 1073276 587453.870 178 8 346978.407 665905.447 91.9% 6.9 115s 1452666 1102042 587408.017 185 13 346978.407 665905.447 91.9% 6.9 120s 1494087 1135484 518491.257 99 11 346978.407 665905.447 91.9% 6.9 125s 1533746 1166382 587455.978 158 11 346978.407 665905.447 91.9% 6.9 130s 1569354 1195994 591156.447 85 8 346978.407 665905.447 91.9% 6.9 135s 1602435 1223016 infeasible 90 346978.407 665905.447 91.9% 6.9 140s 1629176 1244050 592746.319 90 15 346978.407 665905.447 91.9% 6.9 145s 1658759 1266971 587503.448 111 9 346978.407 665905.447 91.9% 6.9 150s 1691677 1293037 infeasible 198 346978.407 665905.447 91.9% 6.9 155s 1721818 1317169 587452.069 164 14 346978.407 665905.447 91.9% 6.9 160s 1751223 1340185 infeasible 192 346978.407 665905.446 91.9% 6.9 165s 1780045 1363452 585006.800 122 8 346978.407 665905.446 91.9% 6.9 170s 1808104 1386855 665905.446 85 15 346978.407 665905.446 91.9% 6.9 175s 1829333 1403671 589411.413 133 9 346978.407 665905.446 91.9% 6.9 180s 1853880 1422918 594834.193 85 14 346978.407 665905.446 91.9% 6.9 185s 1878017 1441690 587460.140 128 5 346978.407 665905.446 91.9% 6.9 190s 1897993 1457668 553412.444 143 7 346978.407 665905.446 91.9% 6.9 195s 1918186 1473566 587551.973 108 10 346978.407 665905.446 91.9% 6.9 200s 1935433 1487525 infeasible 86 346978.407 665905.446 91.9% 6.9 205s 1951817 1500274 cutoff 95 346978.407 665905.446 91.9% 6.9 210s 1966580 1512279 594738.299 103 6 346978.407 665905.446 91.9% 6.9 215s 1984393 1526167 588701.419 117 6 346978.407 665905.446 91.9% 6.9 220s 2003743 1541428 587107.111 117 8 346978.407 665905.446 91.9% 6.9 225s 2026379 1559255 587456.458 172 7 346978.407 665905.446 91.9% 6.9 230s 2047413 1576413 588788.175 104 8 346978.407 665905.446 91.9% 6.9 235s 2068624 1593485 665905.444 90 11 346978.407 665905.446 91.9% 6.9 240s 2090768 1610499 587457.490 137 8 346978.407 665905.446 91.9% 6.9 245s 2111937 1627414 infeasible 203 346978.407 665905.446 91.9% 6.9 250s 2125245 1638227 418041.761 87 9 346978.407 665905.446 91.9% 6.9 255s 2144493 1652753 587415.379 140 13 346978.407 665905.446 91.9% 6.9 260s 2164972 1669513 588880.129 107 8 346978.407 665905.446 91.9% 6.9 265s 2186542 1685907 infeasible 211 346978.407 665905.446 91.9% 6.9 270s 2207633 1701919 577032.735 107 15 346978.407 665905.446 91.9% 6.9 275s 2229326 1719066 587624.771 134 9 346978.407 665905.446 91.9% 6.9 280s 2249495 1735417 587458.057 137 9 346978.407 665905.445 91.9% 6.9 285s 2272006 1753436 589725.738 99 6 346978.407 665905.445 91.9% 6.9 290s 2294769 1771504 infeasible 94 346978.407 665905.445 91.9% 6.9 295s 2312724 1786109 597466.046 93 17 346978.407 665905.445 91.9% 6.9 300s Explored 2313831 nodes (16012361 simplex iterations) in 300.01 seconds Thread count was 8 (of 8 available processors) Solution count 10: 346978 346978 346978 ... 325316 Time limit reached Warning: max constraint violation (1.6281e-06) exceeds tolerance Best objective 3.469784068080e+05, best bound 6.659054453585e+05, gap 91.9155% The P-formulation for this instance has: - 24 decision variables. - 10 linear constraints. - 20 bilinear constraints. - a linear objective function. As can be seen, we still observe a gap of 64.82% after reaching the time limit of five minutes (at this point, the incumbent solution induces a total profit of 429,486.87 USD). In fact, even after 20 minutes the solver does not make much progress in closing the gap. ### Q-Formulation (proportion) Let's now see how the q-formulation model performs: ```python q_pooling = gp.Model("Pooling") # Set global parameters q_pooling.params.nonConvex = 2 q_pooling.params.timelimit = 5*60 # Declare decision variables # flow ik = q_pooling.addVars(s2t, name="Source2Target") jk = q_pooling.addVars(p2t, name="Pool2Target") ik["s1","t2"].ub = 750 ik["s3","t1"].ub = 750 # proportion p_ij = q_pooling.addVars(s2p, ub=1.0, name="Prop_Source2Pool") # Deploy constraint sets # 1. Source capacity q_pooling.addConstrs((gp.quicksum(p_ij[i,j]*jk.sum(j,'*') for j in pools if (i,j) in s2p) + ik.sum(i,'*') <= supply[i] for i in sources), name="Source_capacity") # 2. Pool capacity q_pooling.addConstrs((jk.sum(j,'*') <= cap[j] for j in pools), name="Pool_capacity") # 3. Target demand q_pooling.addConstrs((ik.sum('*',k) + jk.sum('*',k) >= demand[k] for k in targets), name="Target_demand") # 4. Pool inflow q_pooling.addConstrs((p_ij.sum('*',j) == 1 for j in pools), name="Pool_inflow") # 5.1 Target (min) tolerances q_pooling.addConstrs((gp.quicksum(content[i][attr]*ik[i,k] for i in sources if (i,k) in s2t) + gp.quicksum(content[i][attr]*p_ij[i,j]*jk[j,k] for (i,j) in s2p if (j,k) in p2t) >= min_tol[k][attr]*(ik.sum('*',k) + jk.sum('*',k)) for k in targets for attr in min_tol[k].keys()), name="Target_min_tolerances") # 5.2 Target (max) tolerances q_pooling.addConstrs((gp.quicksum(content[i][attr]*ik[i,k] for i in sources if (i,k) in s2t) + gp.quicksum(content[i][attr]*p_ij[i,j]*jk[j,k] for (i,j) in s2p if (j,k) in p2t) <= max_tol[k][attr]*(ik.sum('*',k) + jk.sum('*',k)) for k in targets for attr in max_tol[k].keys()), name="Target_max_tolerances") # Deploy Objective Function # 0. Total profit obj = gp.quicksum(price[k]*(ik.sum('*',k) + jk.sum('*',k)) for k in targets) \ - gp.quicksum(cost[i]*(gp.quicksum(p_ij[i,j]*jk.sum(j,'*') for j in pools if (i,j) in s2p) + ik.sum(i,'*')) for i in sources) q_pooling.setObjective(obj, GRB.MAXIMIZE) # Find the optimal solution q_pooling.optimize() ``` Changed value of parameter nonConvex to 2 Prev: -1 Min: -1 Max: 2 Default: -1 Changed value of parameter timelimit to 300.0 Prev: inf Min: 0.0 Max: inf Default: inf Gurobi Optimizer version 9.0.0 build v9.0.0rc2 (win64) Optimize a model with 7 rows, 16 columns and 22 nonzeros Model fingerprint: 0xfc755c91 Model has 18 quadratic objective terms Model has 15 quadratic constraints Coefficient statistics: Matrix range [1e+00, 1e+00] QMatrix range [6e-01, 1e+02] QLMatrix range [1e-02, 1e+02] Objective range [9e+01, 2e+02] QObjective range [1e+02, 6e+02] Bounds range [1e+00, 8e+02] RHS range [1e+00, 2e+03] QRHS range [5e+02, 2e+04] Continuous model is non-convex -- solving as a MIP. Presolve time: 0.01s Presolved: 95 rows, 35 columns, 315 nonzeros Presolved model has 18 bilinear constraint(s) Variable types: 35 continuous, 0 integer (0 binary) Root relaxation: objective 1.810743e+06, 36 iterations, 0.00 seconds Nodes | Current Node | Objective Bounds | Work Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time 0 0 1810743.26 0 13 - 1810743.26 - - 0s 0 0 865587.763 0 13 - 865587.763 - - 0s 0 0 595135.123 0 13 - 595135.123 - - 0s 0 0 564193.525 0 15 - 564193.525 - - 0s 0 0 556780.563 0 15 - 556780.563 - - 0s 0 0 549889.932 0 15 - 549889.932 - - 0s 0 0 549889.932 0 18 - 549889.932 - - 0s 0 0 549889.932 0 18 - 549889.932 - - 0s 0 2 549889.932 0 18 - 549889.932 - - 0s * 47 54 8 404852.41910 476226.236 17.6% 7.3 0s H 62 52 433077.56632 476226.236 10.0% 7.1 0s H 65 52 439182.58928 476226.236 8.43% 6.9 0s Cutting planes: RLT: 18 Explored 131 nodes (684 simplex iterations) in 0.20 seconds Thread count was 8 (of 8 available processors) Solution count 3: 439183 433078 404852 Optimal solution found (tolerance 1.00e-04) Best objective 4.391825892775e+05, best bound 4.392081187464e+05, gap 0.0058% The Q-formulation for this instance has: - 16 decision variables. - 7 linear constraints. - 15 bilinear constraints. - a bilinear objective function. Notice it has fewer decision variables and also fewer bilinear constraints. Now Gurobi was able to find the optimal solution of 439,182.59 USD in less than one second. --- ## Analysis Let's see the optimal flows found: ### Flow from Sources to Targets The following table determines the flows from source nodes to target nodes. For example, from source node s2 to target node t1 there is a flow of 966.7$\times 10^{2}$ bbl. ```python rows = sources.copy() columns = targets.copy() s2t_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for source, target in ik.keys(): if (abs(ik[source, target].x) > 1e-6): s2t_plan.loc[source, target] = np.round(ik[source, target].x, 1) s2t_plan ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>t1</th> <th>t2</th> <th>t3</th> </tr> </thead> <tbody> <tr> <th>s1</th> <td>0.0</td> <td>0.0</td> <td>0.0</td> </tr> <tr> <th>s2</th> <td>966.7</td> <td>0.0</td> <td>200.0</td> </tr> <tr> <th>s3</th> <td>0.0</td> <td>0.0</td> <td>0.0</td> </tr> </tbody> </table> </div> ### Flow from Pools to Targets The following table defines the flows from pool nodes to target nodes. For example, from pool node p1 to target node t1 there is a flow of 92.8$\times 10^{2}$ bbl. ```python rows = pools.copy() columns = targets.copy() p2t_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for pool, target in jk.keys(): if (abs(jk[pool, target].x) > 1e-6): p2t_plan.loc[pool, target] = np.round(jk[pool, target].x, 1) p2t_plan ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>t1</th> <th>t2</th> <th>t3</th> </tr> </thead> <tbody> <tr> <th>p1</th> <td>92.8</td> <td>990.6</td> <td>0.0</td> </tr> <tr> <th>p2</th> <td>1450.0</td> <td>0.0</td> <td>300.0</td> </tr> </tbody> </table> </div> ### Flow from Sources to Pools The following table shows the flows from source nodes to pool nodes. For example, from source node s2 to pool node p1 there is a flow of 258.3$\times 10^{2}$ bbl. ```python rows = sources.copy() columns = pools.copy() s2p_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for source, pool in p_ij.keys(): if (abs(p_ij[source, pool].x) > 1e-6): flow = p_ij[source, pool].x * p2t_plan.loc[pool,:].sum() s2p_plan.loc[source, pool] = np.round(flow, 1) s2p_plan ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>p1</th> <th>p2</th> </tr> </thead> <tbody> <tr> <th>s1</th> <td>325.0</td> <td>1750.0</td> </tr> <tr> <th>s2</th> <td>258.3</td> <td>0.0</td> </tr> <tr> <th>s3</th> <td>500.0</td> <td>0.0</td> </tr> </tbody> </table> </div> --- ## Conclusions This notebook showed how easy it is to solve Bilinear Programs using Gurobi. It also highlighted the dramatic difference in performance of alternative formulations when solving challenging problems, such as the Standard Pooling Problem. It is thus of utmost importance to analyze carefully the context of the problem at hand, and to weigh the pros and cons of alternative models. --- <a id='references'></a> ## References 1. Alfaki, M. (2012). Models and solution methods for the pooling problem. 2. Audet, C., Brimberg, J., Hansen, P., Digabel, S. L., & Mladenović, N. (2004). Pooling problem: Alternate formulations and solution methods. Management science, 50(6), 761-776. 3. Dombrowski, J. (2015, June 07). McCormick envelopes. Retrieved from https://optimization.mccormick.northwestern.edu/index.php/McCormick_envelopes 4. Gupte, A., Ahmed, S., Dey, S. S., & Cheon, M. S. (2017). Relaxations and discretizations for the pooling problem. Journal of Global Optimization, 67(3), 631-669. 5. Haverly, C. A. (1978). Studies of the behavior of recursion for the pooling problem. Acm sigmap bulletin, (25), 19-28. 6. Liberti, L. (2008). Introduction to global optimization. Ecole Polytechnique. 7. Zhuang E. (2015, June 06). Spatial branch and bound method. Retrieved from https://optimization.mccormick.northwestern.edu/index.php/Spatial_branch_and_bound_method Copyright © 2020 Gurobi Optimization, LLC ```python ```
(******************************************************************* The trivial comprehension bicategory If we have a bicategory with products, then we obtain a comprehension bicategory. The fibration comes from the trivial displayed bicategory and for the comprehension pseudofunctor, we use that the bicategory has products. Contents 1. The comprehension pseudofunctor 2. Preservation of cartesian 1-cells 3. Preservation of (op)cartesian 2-cells 4. The comprehension bicategory *******************************************************************) Require Import UniMath.Foundations.All. Require Import UniMath.MoreFoundations.All. Require Import UniMath.CategoryTheory.Core.Categories. Require Import UniMath.Bicategories.Core.Bicat. Import Bicat.Notations. Require Import UniMath.Bicategories.Core.Invertible_2cells. Require Import UniMath.Bicategories.Morphisms.FullyFaithful. Require Import UniMath.Bicategories.Morphisms.Properties. Require Import UniMath.Bicategories.Morphisms.Properties.Projections. Require Import UniMath.Bicategories.DisplayedBicats.DispBicat. Require Import UniMath.Bicategories.DisplayedBicats.DispPseudofunctor. Require Import UniMath.Bicategories.DisplayedBicats.DispUnivalence. Require Import UniMath.Bicategories.DisplayedBicats.CleavingOfBicat. Require Import UniMath.Bicategories.DisplayedBicats.CartesianPseudoFunctor. Require Import UniMath.Bicategories.DisplayedBicats.Examples.Trivial. Require Import UniMath.Bicategories.DisplayedBicats.Examples.Codomain. Require Import UniMath.Bicategories.DisplayedBicats.ExamplesOfCleavings.TrivialCleaving. Require Import UniMath.Bicategories.DisplayedBicats.ExamplesOfCleavings.CodomainCleaving. Require Import UniMath.Bicategories.Limits.Products. Import Products.Notations. Require Import UniMath.Bicategories.Limits.Pullbacks. Require Import UniMath.Bicategories.PseudoFunctors.Display.PseudoFunctorBicat. Require Import UniMath.Bicategories.PseudoFunctors.PseudoFunctor. Require Import UniMath.Bicategories.PseudoFunctors.Examples.Identity. Require Import UniMath.Bicategories.Logic.ComprehensionBicat. Local Open Scope cat. Section TrivialCompBicat. Context (B : bicat_with_binprod). (** 1. The comprehension pseudofunctor *) Definition trivial_comprehension_data : disp_psfunctor_data (trivial_displayed_bicat B B) (cod_disp_bicat B) (id_psfunctor B). Proof. simple refine (_ ,, _ ,, _ ,, _ ,, _). - intros x y. use make_ar. + exact (x ⊗ y). + exact π₁. - intros x₁ x₂ f y₁ y₂ g. use make_disp_1cell_cod. + exact (f ⊗₁ g). + apply inv_of_invertible_2cell. apply pair_1cell_pr1. - intros x₁ x₂ f₁ f₂ α y₁ y₂ g₁ g₂ β. use make_disp_2cell_cod. + exact (α ⊗₂ β). + abstract (unfold coherent_homot ; cbn ; use vcomp_move_R_pM ; [ is_iso | ] ; cbn ; rewrite !vassocr ; use vcomp_move_L_Mp ; [ is_iso | ] ; cbn ; apply prod_2cell_pr1_alt). - intro ; intros ; simpl. simple refine (_ ,, _). + use make_disp_2cell_cod. * exact ((pair_1cell_id_id_invertible _ _ _)^-1). * abstract (unfold coherent_homot ; cbn ; refine (maponpaths _ (binprod_ump_2cell_pr1 _ _ _) @ _) ; rewrite !vassocr ; apply maponpaths_2 ; rewrite lwhisker_id2 ; rewrite !vassocl ; rewrite !(maponpaths (λ z, _ • z) (vassocr _ _ _)) ; rewrite linvunitor_lunitor ; rewrite id2_left ; apply runitor_rinvunitor). + use is_disp_invertible_2cell_cod. cbn. apply binprod_ump_2cell_invertible ; is_iso. - intro ; intros ; simpl. simple refine (_ ,, _). + use make_disp_2cell_cod. * apply pair_1cell_comp. * abstract (unfold coherent_homot ; cbn ; rewrite !vassocl ; etrans ; [ do 5 apply maponpaths ; apply binprod_ump_2cell_pr1 | ] ; rewrite !vassocr ; apply maponpaths_2 ; rewrite !vassocl ; etrans ; [ do 4 apply maponpaths ; rewrite !vassocr ; rewrite lassociator_rassociator ; rewrite id2_left ; apply idpath | ] ; etrans ; [ do 3 apply maponpaths ; rewrite !vassocr ; rewrite lwhisker_vcomp ; rewrite vcomp_linv ; rewrite lwhisker_id2 ; rewrite id2_left ; apply idpath | ] ; etrans ; [ do 2 apply maponpaths ; rewrite !vassocr ; rewrite rassociator_lassociator ; rewrite id2_left ; apply idpath | ] ; etrans ; [ apply maponpaths ; rewrite !vassocr ; rewrite rwhisker_vcomp ; rewrite vcomp_linv ; rewrite id2_rwhisker ; rewrite id2_left ; apply idpath | ] ; rewrite lassociator_rassociator ; rewrite lwhisker_id2 ; apply idpath). + use is_disp_invertible_2cell_cod. cbn. apply pair_1cell_comp_invertible. Defined. Definition trivial_comprehension_is_disp_psfunctor : is_disp_psfunctor (trivial_displayed_bicat B B) (cod_disp_bicat B) (id_psfunctor B) trivial_comprehension_data. Proof. repeat split ; intro ; intros ; (use subtypePath ; [ intro ; apply cellset_property | ]). - refine (_ @ !(transportb_cell_of_cod_over _ _)). apply pair_2cell_id_id. - refine (_ @ !(transportb_cell_of_cod_over _ _)). apply pair_2cell_comp. - refine (_ @ !(transportb_cell_of_cod_over (psfunctor_lunitor _ _) _)). apply binprod_lunitor. - refine (_ @ !(transportb_cell_of_cod_over (psfunctor_runitor _ _) _)). apply binprod_runitor. - refine (_ @ !(transportb_cell_of_cod_over (psfunctor_lassociator _ _ _ _) _)). apply binprod_lassociator. - refine (_ @ !(transportb_cell_of_cod_over (psfunctor_lwhisker _ _ _) _)). apply binprod_lwhisker. - refine (_ @ !(transportb_cell_of_cod_over (psfunctor_rwhisker _ _ _) _)). apply binprod_rwhisker. Qed. Definition trivial_comprehension : disp_psfunctor (trivial_displayed_bicat B B) (cod_disp_bicat B) (id_psfunctor B). Proof. simple refine (_ ,, _). - exact trivial_comprehension_data. - exact trivial_comprehension_is_disp_psfunctor. Defined. (** 2. Preservation of cartesian 1-cells *) Section GlobalCartesian. Context {b₁ b₂ : B} (f : b₁ --> b₂) {c₁ c₂ : B} (l : c₁ --> c₂) (Hl : left_adjoint_equivalence l). Let cone : pb_cone f π₁ := make_pb_cone (b₁ ⊗ c₁) π₁ (f ⊗₁ l) (inv_of_invertible_2cell (pair_1cell_pr1 B f l)). Let r : c₂ --> c₁ := left_adjoint_right_adjoint Hl. Let η : invertible_2cell (id₁ c₁) (l · left_adjoint_right_adjoint Hl) := left_equivalence_unit_iso Hl. Let ε : invertible_2cell (left_adjoint_right_adjoint Hl · l) (id₁ c₂) := left_equivalence_counit_iso Hl. Section AdjEquivToUMP1. Context (q : pb_cone f (π₁ : b₂ ⊗ c₂ --> b₂)). Local Definition adj_equiv_to_pb_ump_1_pb_1cell_map : q --> b₁ ⊗ c₁ := ⟨ pb_cone_pr1 q , pb_cone_pr2 q · π₂ · r ⟩. Local Notation "'φ'" := adj_equiv_to_pb_ump_1_pb_1cell_map. Local Definition adj_equiv_to_pb_ump_1_pb_1cell_π₁ : invertible_2cell (φ · π₁) (pb_cone_pr1 q) := prod_1cell_pr1 _ _ _. Local Notation "'φπ₁'" := adj_equiv_to_pb_ump_1_pb_1cell_π₁. Local Definition adj_equiv_to_pb_ump_1_pb_1cell_cell_π₁ : φ · (f ⊗₁ l) · π₁ ==> pb_cone_pr2 q · π₁ := rassociator _ _ _ • (_ ◃ pair_1cell_pr1 _ _ _) • lassociator _ _ _ • (prod_1cell_pr1 _ _ _ ▹ _) • pb_cone_cell _. Local Notation "'φπ₂_cell_π₁'" := adj_equiv_to_pb_ump_1_pb_1cell_cell_π₁. Local Definition adj_equiv_to_pb_ump_1_pb_1cell_cell_π₂ : φ · (f ⊗₁ l) · π₂ ==> pb_cone_pr2 q · π₂ := rassociator _ _ _ • (_ ◃ pair_1cell_pr2 _ _ _) • lassociator _ _ _ • (prod_1cell_pr2 _ _ _ ▹ _) • rassociator _ _ _ • (_ ◃ ε) • runitor _. Local Notation "'φπ₂_cell_π₂'" := adj_equiv_to_pb_ump_1_pb_1cell_cell_π₂. Local Definition adj_equiv_to_pb_ump_1_pb_1cell_cell : φ · (f ⊗₁ l) ==> pb_cone_pr2 q. Proof. use binprod_ump_2cell. - exact (pr2 (binprod_of B b₂ c₂)). - exact φπ₂_cell_π₁. - exact φπ₂_cell_π₂. Defined. Local Notation "'φπ₂_cell'" := adj_equiv_to_pb_ump_1_pb_1cell_cell. Local Definition adj_equiv_to_pb_ump_1_pb_1cell_invcell : invertible_2cell (φ · (f ⊗₁ l)) (pb_cone_pr2 q). Proof. use make_invertible_2cell. - exact φπ₂_cell. - use binprod_ump_2cell_invertible ; unfold adj_equiv_to_pb_ump_1_pb_1cell_cell_π₁ ; unfold adj_equiv_to_pb_ump_1_pb_1cell_cell_π₂. + is_iso. * apply pair_1cell_pr1. * apply prod_1cell_pr1. * apply pb_cone_cell. + is_iso. * apply pair_1cell_pr2. * apply prod_1cell_pr2. * apply ε. Defined. Local Notation "'φπ₂'" := adj_equiv_to_pb_ump_1_pb_1cell_invcell. Local Definition adj_equiv_to_pb_ump_1_pb_1cell_eq : φ ◃ pb_cone_cell cone = lassociator φ (pb_cone_pr1 cone) f • (φπ₁ ▹ f) • pb_cone_cell q • (φπ₂ ^-1 ▹ π₁) • rassociator φ (pb_cone_pr2 cone) π₁. Proof. cbn. rewrite !vassocl. refine (!_). etrans. { do 3 apply maponpaths. apply maponpaths_2. apply binprod_ump_2cell_pr1. } rewrite !vassocl. rewrite lassociator_rassociator. rewrite id2_right. etrans. { do 2 apply maponpaths. rewrite !vassocr. rewrite vcomp_rinv. rewrite id2_left. apply idpath. } rewrite !vassocl. etrans. { apply maponpaths. rewrite !vassocr. rewrite rwhisker_vcomp. rewrite vcomp_rinv. rewrite id2_rwhisker. rewrite id2_left. apply idpath. } rewrite !vassocr. rewrite lassociator_rassociator. rewrite id2_left. apply idpath. Qed. Definition adj_equiv_to_pb_ump_1_pb_1cell : pb_1cell q cone. Proof. use make_pb_1cell. - exact φ. - exact φπ₁. - exact φπ₂. - exact adj_equiv_to_pb_ump_1_pb_1cell_eq. Defined. End AdjEquivToUMP1. Definition adj_equiv_to_pb_ump_1 : pb_ump_1 cone. Proof. intro q. exact (adj_equiv_to_pb_ump_1_pb_1cell q). Defined. Section AdjEquivToUMP2. Context {q : B} {φ ψ : q --> cone} (α : φ · pb_cone_pr1 cone ==> ψ · pb_cone_pr1 cone) (β : φ · pb_cone_pr2 cone ==> ψ · pb_cone_pr2 cone) (p : (φ ◃ pb_cone_cell cone) • lassociator φ (pb_cone_pr2 cone) π₁ • (β ▹ π₁) • rassociator ψ (pb_cone_pr2 cone) π₁ = lassociator φ (pb_cone_pr1 cone) f • (α ▹ f) • rassociator ψ (pb_cone_pr1 cone) f • (ψ ◃ pb_cone_cell cone)). Let φπ₂ : φ · π₂ ==> ψ · π₂ := fully_faithful_1cell_inv_map (adj_equiv_fully_faithful Hl) (rassociator φ π₂ l • (φ ◃ (pair_1cell_pr2 B f l) ^-1) • lassociator φ (f ⊗₁ l) π₂ • (β ▹ π₂) • rassociator ψ (f ⊗₁ l) π₂ • (ψ ◃ pair_1cell_pr2 B f l) • lassociator ψ π₂ l). Definition adj_equiv_to_pb_ump_2_unique : isaprop (∑ (γ : φ ==> ψ), γ ▹ pb_cone_pr1 cone = α × γ ▹ pb_cone_pr2 cone = β). Proof. use invproofirrelevance. intros ζ₁ ζ₂. use subtypePath. { intro. apply isapropdirprod ; apply cellset_property. } use binprod_ump_2cell_unique. - exact (pr2 (binprod_of B b₁ c₁)). - exact α. - exact φπ₂. - exact (pr12 ζ₁). - use (fully_faithful_1cell_eq (adj_equiv_fully_faithful Hl)). refine (!_). etrans. { apply fully_faithful_1cell_inv_map_eq. } cbn -[η]. rewrite !vassocl. use vcomp_move_R_pM ; [ is_iso | ] ; cbn. rewrite rwhisker_rwhisker. rewrite !vassocr. apply maponpaths_2. rewrite !vassocl. use vcomp_move_R_pM ; [ is_iso | ] ; cbn. rewrite <- vcomp_whisker. rewrite !vassocr. apply maponpaths_2. rewrite !vassocl. use vcomp_move_R_pM ; [ is_iso | ] ; cbn. rewrite <- rwhisker_rwhisker_alt. apply maponpaths_2. apply maponpaths. exact (!(pr22 ζ₁)). - exact (pr12 ζ₂). - use (fully_faithful_1cell_eq (adj_equiv_fully_faithful Hl)). refine (!_). etrans. { apply fully_faithful_1cell_inv_map_eq. } cbn -[η]. rewrite !vassocl. use vcomp_move_R_pM ; [ is_iso | ] ; cbn. rewrite rwhisker_rwhisker. rewrite !vassocr. apply maponpaths_2. rewrite !vassocl. use vcomp_move_R_pM ; [ is_iso | ] ; cbn. rewrite <- vcomp_whisker. rewrite !vassocr. apply maponpaths_2. rewrite !vassocl. use vcomp_move_R_pM ; [ is_iso | ] ; cbn. rewrite <- rwhisker_rwhisker_alt. apply maponpaths_2. apply maponpaths. exact (!(pr22 ζ₂)). Qed. Definition adj_equiv_to_pb_ump_2_cell : φ ==> ψ. Proof. use binprod_ump_2cell. - exact (pr2 (binprod_of B b₁ c₁)). - exact α. - exact φπ₂. Defined. Definition adj_equiv_to_pb_ump_2_cell_pr1 : adj_equiv_to_pb_ump_2_cell ▹ pb_cone_pr1 cone = α. Proof. apply binprod_ump_2cell_pr1. Qed. Definition adj_equiv_to_pb_ump_2_cell_pr2 : adj_equiv_to_pb_ump_2_cell ▹ pb_cone_pr2 cone = β. Proof. use binprod_ump_2cell_unique. - exact (pr2 (binprod_of B b₂ c₂)). - exact (rassociator _ _ _ • (_ ◃ pair_1cell_pr1 _ _ _) • lassociator _ _ _ • (α ▹ f) • rassociator _ _ _ • (_ ◃ (pair_1cell_pr1 _ _ _)^-1) • lassociator _ _ _). - exact (β ▹ _). - cbn ; cbn in p. rewrite !vassocl. use vcomp_move_L_pM ; [ is_iso | ]. cbn. rewrite rwhisker_rwhisker. rewrite !vassocr. apply maponpaths_2. use vcomp_move_L_Mp ; [ is_iso | ]. cbn. rewrite vcomp_whisker. rewrite !vassocl. apply maponpaths. rewrite !vassocr. use vcomp_move_L_Mp ; [ is_iso | ]. cbn. rewrite <- rwhisker_rwhisker. do 2 apply maponpaths. apply binprod_ump_2cell_pr1. - cbn. use (vcomp_lcancel (lassociator _ _ _)) ; [ is_iso | ]. rewrite rwhisker_rwhisker. use (vcomp_lcancel (_ ◃ (pair_1cell_pr2 B f l)^-1)) ; [ is_iso | ]. rewrite !vassocr. rewrite <- vcomp_whisker. use (vcomp_lcancel (rassociator _ _ _)) ; [ is_iso | ]. rewrite !vassocr. rewrite <- rwhisker_rwhisker_alt. etrans. { do 3 apply maponpaths_2. apply maponpaths. apply binprod_ump_2cell_pr2. } do 3 (use vcomp_move_R_Mp ; [ is_iso | ]). apply fully_faithful_1cell_inv_map_eq. - rewrite !vassocl. use vcomp_move_L_pM ; [ is_iso | ]. cbn. use vcomp_move_L_pM ; [ is_iso ; apply pair_1cell_pr1 | ]. cbn. rewrite !vassocr. use vcomp_move_L_Mp ; [ is_iso | ]. cbn. exact p. - apply idpath. Qed. End AdjEquivToUMP2. Definition adj_equiv_to_pb_ump_2 : pb_ump_2 cone. Proof. intros q φ ψ α β p. use iscontraprop1. - exact (adj_equiv_to_pb_ump_2_unique α β). - exact (adj_equiv_to_pb_ump_2_cell α β ,, adj_equiv_to_pb_ump_2_cell_pr1 α β ,, adj_equiv_to_pb_ump_2_cell_pr2 α β p). Defined. Definition adj_equiv_to_pb : has_pb_ump cone. Proof. split. - exact adj_equiv_to_pb_ump_1. - exact adj_equiv_to_pb_ump_2. Defined. End GlobalCartesian. Definition global_cartesian_trivial_comprehension : global_cartesian_disp_psfunctor trivial_comprehension. Proof. intros b₁ b₂ f c₁ c₂ g Hg ; cbn in *. apply is_pb_to_cartesian_1cell. pose (g_equiv := trivial_cartesian_1cell_is_adj_equiv _ _ _ _ Hg). apply (adj_equiv_to_pb f g g_equiv). Defined. (** 3. Preservation of (op)cartesian 2-cells *) Definition local_cartesian_trivial_comprehension : local_cartesian_disp_psfunctor trivial_comprehension. Proof. intros b₁ b₂ f₁ f₂ α c₁ c₂ g₁ g₂ β Hβ ; cbn in *. apply is_cartesian_2cell_sfib_to_is_cartesian_2cell ; cbn. apply invertible_to_cartesian. refine (transportb is_invertible_2cell (pair_2cell_pr2 _ _ _) _). is_iso. - apply prod_1cell_pr2. - exact (trivial_cartesian_2cell_is_invertible _ _ _ _ Hβ). Defined. Definition local_opcartesian_trivial_comprehension : local_opcartesian_disp_psfunctor trivial_comprehension. Proof. intros b₁ b₂ f₁ f₂ α c₁ c₂ g₁ g₂ β Hβ ; cbn in *. apply is_opcartesian_2cell_sopfib_to_is_opcartesian_2cell ; cbn. apply invertible_to_opcartesian. refine (transportb is_invertible_2cell (pair_2cell_pr2 _ _ _) _). is_iso. - apply prod_1cell_pr2. - exact (trivial_opcartesian_2cell_is_invertible _ _ _ _ Hβ). Defined. (** 4. The comprehension bicategory *) Definition trivial_comprehension_bicat_structure : comprehension_bicat_structure B. Proof. use make_comprehension_bicat_structure. - exact (trivial_displayed_bicat B B). - exact trivial_comprehension. - exact (trivial_global_cleaving B B). - exact global_cartesian_trivial_comprehension. Defined. Definition trivial_comprehension_is_covariant : is_covariant trivial_comprehension_bicat_structure. Proof. repeat split. - exact (trivial_local_opcleaving B B). - exact (trivial_lwhisker_opcartesian B B). - exact (trivial_rwhisker_opcartesian B B). - exact local_opcartesian_trivial_comprehension. Defined. Definition trivial_comprehension_is_contravariant : is_contravariant trivial_comprehension_bicat_structure. Proof. repeat split. - exact (trivial_local_cleaving B B). - exact (trivial_lwhisker_cartesian B B). - exact (trivial_rwhisker_cartesian B B). - exact local_cartesian_trivial_comprehension. Defined. Definition trivial_comprehension_bicat : comprehension_bicat := _ ,, _ ,, trivial_comprehension_is_covariant. Definition trivial_contravariant_comprehension_bicat : contravariant_comprehension_bicat := _ ,, _ ,, trivial_comprehension_is_contravariant. End TrivialCompBicat.
import Iris.BI import Iris.Proofmode.Classes import Iris.Proofmode.Environments import Iris.Std namespace Iris.Proofmode open Iris.BI Iris.Std open BI /-- Introduce one or multiple let-bound variables. -/ scoped macro "intro_let " names:(colGt Lean.binderIdent)* : tactic => `( intro _ ; split ; rename_i $[$names]* ) -- proof mode theorem tac_start [BI PROP] (P : PROP) : envs_entails ⟨.nil, .nil⟩ P → ⊢ P := by simp only [envs_entails, of_envs, big_op] rw' [intuitionistically_True_emp, (left_id : emp ∗ _ ⊣⊢ _)] intro h exact h theorem tac_stop [BI PROP] {Γₚ Γₛ : Env PROP} (P : PROP) : let Ps := match Γₚ, Γₛ with | .nil, .nil => `[iprop| emp] | _ , .nil => `[iprop| □ [∧] Γₚ] | .nil, _ => `[iprop| [∗] Γₛ] | _ , _ => `[iprop| □ [∧] Γₚ ∗ [∗] Γₛ] (Ps ⊢ P) → envs_entails ⟨Γₚ, Γₛ⟩ P := by cases Γₚ <;> cases Γₛ all_goals simp [envs_entails, of_envs, big_op] intro Ps rw' [Ps] case cons.nil => rw' [(right_id : _ ∗ emp ⊣⊢ _)] all_goals rw' [intuitionistically_True_emp, (left_id : emp ∗ _ ⊣⊢ _)] theorem tac_clear [BI PROP] {Δ : Envs PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [TCIte p TCTrue (TCOr (Affine P) (Absorbing Q))] → envs_entails (Δ.delete true i) Q → envs_entails Δ Q := by intro_let p P h_lookup intro inst_affine_absorbing cases p all_goals cases inst_affine_absorbing simp only [envs_entails] intro h_entails rw' [envs_lookup_delete_sound true h_lookup, h_entails] simp only [bi_intuitionistically_if, ite_true, ite_false] rw' [sep_elim_r] -- pure theorem tac_pure_intro [BI PROP] {Δ : Envs PROP} {a : Bool} {φ : Prop} (Q : PROP) : [FromPure a Q φ] → [TCIte a (AffineEnv Δ.spatial) TCTrue] → φ → envs_entails Δ Q := by simp only [envs_entails] intro _ inst_affine_env hφ rw' [← from_pure] cases a case false => apply pure_intro exact hφ case true => cases inst_affine_env simp only [of_envs, bi_affinely_if] rw' [ affine, pure_True hφ, affinely_True_emp, affinely_emp] -- implication and wand theorem tac_impl_intro [BI PROP] {Δ : Envs PROP} {P Q : PROP} (R : PROP) : [FromImpl R P Q] → [TCIte Δ.spatial.isEmpty TCTrue (Persistent P)] → [FromAffinely P' P] → envs_entails (Δ.append false P') Q → envs_entails Δ R := by simp only [envs_entails] intro _ inst_pers _ h_entails rw' [← from_impl] cases h_empty : Δ.spatial.isEmpty <;> rw [h_empty] at inst_pers <;> cases inst_pers case false => apply impl_intro_l rw' [ envs_append_sound false P', (from_affinely : <affine>?true P ⊢ _), persistent_and_affinely_sep_l_1, wand_elim_r, h_entails] case true => rw' [envs_spatial_is_empty_intuitionistically h_empty] apply impl_intro_l rw' [ envs_append_sound false P', (from_affinely : <affine>?true P ⊢ _)] simp only [bi_intuitionistically] rw' [ ← affinely_and_lr, persistently_and_intuitionistically_sep_r, intuitionistically_elim, wand_elim_r, h_entails] theorem tac_impl_intro_intuitionistic [BI PROP] {Δ : Envs PROP} {P P' Q : PROP} (R : PROP) : [FromImpl R P Q] → [IntoPersistent false P P'] → envs_entails (Δ.append true P') Q → envs_entails Δ R := by simp only [envs_entails] intro _ _ h_entails rw' [← from_impl, envs_append_sound true P'] ; simp only apply impl_intro_l rw' [ persistently_if_intro_false P, into_persistent, persistently_and_intuitionistically_sep_l, wand_elim_r, h_entails] theorem tac_impl_intro_drop [BI PROP] {Δ : Envs PROP} {P Q : PROP} (R : PROP) : [FromImpl R P Q] → envs_entails Δ Q → envs_entails Δ R := by simp only [envs_entails] intro _ h_entails rw' [← from_impl] apply impl_intro_l rw' [and_elim_r, h_entails] theorem tac_wand_intro [BI PROP] {Δ : Envs PROP} {P Q : PROP} (R : PROP) : [FromWand R P Q] → envs_entails (Δ.append false P) Q → envs_entails Δ R := by simp only [envs_entails] intro _ h_entails rw' [ ← from_wand, envs_append_sound false P, h_entails] theorem tac_wand_intro_intuitionistic [BI PROP] {Δ : Envs PROP} {P P' Q : PROP} (R : PROP) : [FromWand R P Q] → [IntoPersistent false P P'] → [TCOr (Affine P) (Absorbing Q)] → envs_entails (Δ.append true P') Q → envs_entails Δ R := by simp only [envs_entails] intro _ _ inst_affine_absorbing h_entails rw' [← from_wand, envs_append_sound true P'] ; simp only apply wand_intro_l cases inst_affine_absorbing case a.l => rw' [ ← affine_affinely P, persistently_if_intro_false P, into_persistent, wand_elim_r, h_entails] case a.r => rw' [ persistently_if_intro_false P, into_persistent, ← absorbingly_intuitionistically_into_persistently, absorbingly_sep_l, wand_elim_r, h_entails, absorbing] -- specialize theorem tac_specialize [BI PROP] {Δ : Envs PROP} (rpPremise rpWand : Bool) (i j : EnvsIndex.of Δ) (h_ne : i.type = j.type → i.val ≠ j.val) {P2 : PROP} (R : PROP) : let (p, P1) := Δ.lookup i let Δ' := Δ.delete rpPremise i let j' := Δ.updateIndexAfterDelete rpPremise i j h_ne let (q, Q) := Δ'.lookup j' [IntoWand q p Q P1 P2] → envs_entails (Δ'.replace rpWand j' (p && q) P2) R → envs_entails Δ R := by intro_let p P1 h_lookup_i intro Δ' j' intro_let q Q h_lookup_j' simp only [envs_entails] intro _ h_entails rw' [ envs_lookup_delete_sound rpPremise h_lookup_i, envs_lookup_replace_sound rpWand (p && q) P2 h_lookup_j'] cases p case false => rw' [(IntoWand.into_wand : □?q Q ⊢ □?false P1 -∗ P2)] simp only [bi_intuitionistically_if, Bool.false_and, ite_false] rw' [(assoc : P1 ∗ _ ⊣⊢ _), !wand_elim_r, h_entails] case true => simp only [Bool.true_and, ← intuitionistically_if_intro_true] rw' [ ← intuitionistically_idemp, ← intuitionistically_if_idemp, intuitionistically_intuitionistically_if q, (IntoWand.into_wand : □?q Q ⊢ □?true P1 -∗ P2), (assoc : □?q □ P1 ∗ _ ⊣⊢ _), intuitionistically_if_sep_2, !wand_elim_r, h_entails] theorem tac_specialize_forall [BI PROP] {Δ : Envs PROP} (rpWand : Bool) (i : EnvsIndex.of Δ) {Φ : α → PROP} (Q : PROP) : let (p, P) := Δ.lookup i [IntoForall P Φ] → (∃ x, envs_entails (Δ.replace rpWand i p (Φ x)) Q) → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ ⟨x, h_entails⟩ rw' [ envs_lookup_replace_sound rpWand p (Φ x) h_lookup, IntoForall.into_forall, forall_elim x, wand_elim_r, h_entails] -- forall theorem tac_forall_intro [BI PROP] {Δ : Envs PROP} {Ψ : α → PROP} (Q : PROP) : [FromForall Q Ψ] → (∀ a, envs_entails Δ `[iprop| Ψ a]) → envs_entails Δ Q := by simp only [envs_entails] intro _ h_entails rw' [← from_forall] apply forall_intro exact h_entails -- exist theorem tac_exist [BI PROP] {Δ : Envs PROP} {Φ : α → PROP} (P : PROP) : [FromExist P Φ] → (∃ a, envs_entails Δ `[iprop| Φ a]) → envs_entails Δ P := by simp only [envs_entails] intro _ ⟨a, h_entails⟩ rw' [← from_exist, ← exist_intro a, h_entails] theorem tac_exist_destruct [BI PROP] {Δ : Envs PROP} (i : EnvsIndex.of Δ) {Φ : α → PROP} (Q : PROP) : let (p, P) := Δ.lookup i [IntoExist P Φ] → (∀ a, envs_entails (Δ.replace true i p (Φ a)) Q) → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails, Envs.replace] intro _ h_entails rw' [ envs_lookup_delete_sound true h_lookup, into_exist, intuitionistically_if_exist, sep_exist_r] ; simp only apply exist_elim intro a rw' [ envs_append_sound p (Φ a), wand_elim_r, h_entails a] -- emp theorem tac_emp_intro [BI PROP] {Γₚ Γₛ : Env PROP} : [AffineEnv Γₛ] → envs_entails ⟨Γₚ, Γₛ⟩ `[iprop| emp] := by intro _ simp only [envs_entails, of_envs] rw' [ affinely_elim_emp, (affine : [∗] Γₛ.toList ⊢ emp), (left_id : emp ∗ _ ⊣⊢ _)] -- assumptions theorem tac_assumption_lean [BI PROP] {Δ : Envs PROP} {P : PROP} (Q : PROP) : (⊢ P) → [FromAssumption true P Q] → [TCIte Δ.spatial.isEmpty TCTrue (TCOr (Absorbing Q) (AffineEnv Δ.spatial))] → envs_entails Δ Q := by simp only [envs_entails] intro h_P _ inst_absorbing_affine_env rw' [ ← (left_id : emp ∗ of_envs Δ ⊣⊢ _), ← intuitionistically_emp, h_P, (from_assumption : □?true P ⊢ Q)] cases h_empty : Δ.spatial.isEmpty <;> rw [h_empty] at inst_absorbing_affine_env <;> cases inst_absorbing_affine_env case false.e inst_absorbing_affine_env => cases inst_absorbing_affine_env <;> rw' [!sep_elim_l] case true.t => rw' [envs_spatial_is_empty_intuitionistically h_empty, sep_elim_l] theorem tac_assumption [BI PROP] {Δ : Envs PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [FromAssumption p P Q] → let Δ' := Δ.delete true i [TCIte Δ'.spatial.isEmpty TCTrue (TCOr (Absorbing Q) (AffineEnv Δ'.spatial))] → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ inst_absorbing_affine_env rw' [envs_lookup_delete_sound true h_lookup] cases h_empty : (Δ.delete true i).spatial.isEmpty <;> rw [h_empty] at inst_absorbing_affine_env <;> cases inst_absorbing_affine_env case false.e inst_absorbing_affine_env => rw' [(from_assumption : □?p P ⊢ Q)] cases inst_absorbing_affine_env <;> rw' [!sep_elim_l] case true.t => rw' [envs_spatial_is_empty_intuitionistically h_empty, sep_elim_l] exact from_assumption -- false theorem tac_ex_falso [BI PROP] {Δ : Envs PROP} (Q : PROP) : envs_entails Δ `[iprop| False] → envs_entails Δ Q := by simp only [envs_entails] intro h_entails rw' [h_entails] exact False_elim theorem tac_false_destruct [BI PROP] {Δ : Envs PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (_, P) := Δ.lookup i P = `[iprop| False] → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro h_false rw' [ envs_lookup_delete_sound true h_lookup, intuitionistically_if_elim, h_false, sep_elim_l] exact False_elim -- moving between contexts theorem tac_pure [BI PROP] {Δ : Envs PROP} {φ : Prop} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [IntoPure P φ] → [TCIte p TCTrue (TCOr (Affine P) (Absorbing Q))] → (φ → envs_entails (Δ.delete true i) Q) → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ inst_affine_absorbing h_entails rw' [envs_lookup_delete_sound true h_lookup] cases p <;> simp only [bi_intuitionistically_if, ite_true, ite_false] <;> cases inst_affine_absorbing case false.e inst_affine_absorbing => cases inst_affine_absorbing case l => rw' [ ← affine_affinely P, into_pure, ← persistent_and_affinely_sep_l] apply pure_elim φ · exact and_elim_l · intro h_φ rw' [h_entails h_φ, and_elim_r] case r => rw' [ into_pure, persistent_absorbingly_affinely_2, absorbingly_sep_lr, ← persistent_and_affinely_sep_l] apply pure_elim_l intro h_φ rw' [h_entails h_φ, absorbing] case true.t => rw' [ into_pure, ← persistently_and_intuitionistically_sep_l, persistently_pure] apply pure_elim_l intro h_φ rw' [h_entails h_φ] theorem tac_intuitionistic [BI PROP] {Δ : Envs PROP} {P' : PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [IntoPersistent p P P'] → [TCIte p TCTrue (TCOr (Affine P) (Absorbing Q))] → envs_entails (Δ.replace true i true P') Q → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ inst_affine_absorbing h_entails rw' [envs_lookup_replace_sound true true P' h_lookup] cases p <;> simp only [bi_intuitionistically_if, ite_true, ite_false, bi_intuitionistically] <;> cases inst_affine_absorbing case false inst_affine_absorbing => cases inst_affine_absorbing case l => rw' [ ← affine_affinely P, persistently_if_intro_false P, into_persistent, wand_elim_r, h_entails] case r => rw' [persistently_if_intro_false P, into_persistent] conv => lhs lhs rw [← absorbingly_intuitionistically_into_persistently] rw' [ absorbingly_sep_l, wand_elim_r, h_entails, absorbing] case true => rw' [ persistently_if_intro_true P, into_persistent, wand_elim_r, h_entails] theorem tac_spatial [BI PROP] {Δ : Envs PROP} {P' : PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [FromAffinely P' P p] → envs_entails (Δ.replace true i false P') Q → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ h_entails rw' [envs_lookup_replace_sound true false P' h_lookup] cases p <;> simp only [bi_intuitionistically_if, ite_true, ite_false] case false => rw' [ affinely_if_intro_false P, from_affinely, wand_elim_r, h_entails] case true => rw' [ intuitionistically_affinely, affinely_if_intro_true P, from_affinely, wand_elim_r, h_entails] -- (separating) conjunction splitting theorem tac_and_split [BI PROP] {Δ : Envs PROP} {Q1 Q2 : PROP} (P : PROP) : [FromAnd P Q1 Q2] → envs_entails Δ Q1 → envs_entails Δ Q2 → envs_entails Δ P := by simp only [envs_entails] intro _ h_entails_1 h_entails_2 rw' [← from_and] apply and_intro · exact h_entails_1 · exact h_entails_2 theorem tac_sep_split [BI PROP] {Δ : Envs PROP} {Q1 Q2 : PROP} (mask : List Bool) (h : mask.length = Δ.spatial.length) (P : PROP) : let (Δ₁, Δ₂) := Δ.split mask h [FromSep P Q1 Q2] → envs_entails Δ₁ Q1 → envs_entails Δ₂ Q2 → envs_entails Δ P := by intro_let Δ₁ Δ₂ h_split simp only [envs_entails] intro _ h_entails_1 h_entails_2 rw' [ envs_split_sound h_split, ← from_sep, h_entails_1, h_entails_2] -- disjunction selection theorem tac_disjunction_l [BI PROP] {Δ : Envs PROP} {Q1 Q2 : PROP} (P : PROP) : [FromOr P Q1 Q2] → envs_entails Δ Q1 → envs_entails Δ P := by simp only [envs_entails] intro _ h_entails rw' [← from_or] apply or_intro_l' exact h_entails theorem tac_disjunction_r [BI PROP] {Δ : Envs PROP} {Q1 Q2 : PROP} (P : PROP) : [FromOr P Q1 Q2] → envs_entails Δ Q2 → envs_entails Δ P := by simp only [envs_entails] intro _ h_entails rw' [← from_or] apply or_intro_r' exact h_entails -- destruction class inductive IntoConjunction [BI PROP] (P : PROP) (P1 P2 : outParam PROP) : Bool → Type | and : [IntoAnd true P P1 P2] → IntoConjunction P P1 P2 true | sep : [IntoSep P P1 P2] → IntoConjunction P P1 P2 false attribute [instance] IntoConjunction.and attribute [instance] IntoConjunction.sep theorem tac_conjunction_destruct [BI PROP] {Δ : Envs PROP} {P1 P2 : PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [IntoConjunction P P1 P2 p] → envs_entails (Δ |>.delete true i |>.append p P1 |>.append p P2) Q → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro inst_conjunction h_entails rw' [ envs_lookup_delete_sound true h_lookup, envs_append_sound p P1, envs_append_sound p P2] ; simp only cases p <;> simp only [bi_intuitionistically_if, ite_true, ite_false] <;> cases inst_conjunction case false.sep => rw' [ into_sep, (comm : P1 ∗ P2 ⊣⊢ _), ← (assoc : _ ⊣⊢ (P2 ∗ P1) ∗ _), wand_elim_r, wand_elim_r, h_entails] case true.and => rw' [intuitionistically_if_intro_true P, into_and] simp only [bi_intuitionistically_if, ite_true] rw' [ intuitionistically_and, and_sep_intuitionistically, (comm : □ P1 ∗ □ P2 ⊣⊢ _), ← (assoc : _ ⊣⊢ (□ P2 ∗ □ P1) ∗ _), wand_elim_r, wand_elim_r, h_entails] theorem tac_conjunction_destruct_choice [BI PROP] {Δ : Envs PROP} {P1 P2 : PROP} (i : EnvsIndex.of Δ) (d : Bool) (Q : PROP) : let (p, P) := Δ.lookup i [IntoAnd p P P1 P2] → envs_entails (if d then Δ.replace true i p P1 else Δ.replace true i p P2) Q → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ h_entails cases d case false => rw' [ envs_lookup_replace_sound true p P2 h_lookup, into_and, and_elim_r, wand_elim_r, h_entails] case true => rw' [ envs_lookup_replace_sound true p P1 h_lookup, into_and, and_elim_l, wand_elim_r, h_entails] theorem tac_disjunction_destruct [BI PROP] {Δ : Envs PROP} {P1 P2 : PROP} (i : EnvsIndex.of Δ) (Q : PROP) : let (p, P) := Δ.lookup i [IntoOr P P1 P2] → envs_entails (Δ.replace true i p P1) Q → envs_entails (Δ.replace true i p P2) Q → envs_entails Δ Q := by intro_let p P h_lookup simp only [envs_entails] intro _ h_entails_1 h_entails_2 rw' [envs_lookup_delete_sound true h_lookup] ; simp only simp only [Envs.replace] at h_entails_1 simp only [Envs.replace] at h_entails_2 rw' [into_or, intuitionistically_if_or, sep_or_r] apply or_elim · rw' [envs_append_sound p P1, wand_elim_r, h_entails_1] · rw' [envs_append_sound p P2, wand_elim_r, h_entails_2] end Iris.Proofmode
library(ggplot2) library(scales) library(ggthemes) source("./utils.r") demog_data <- read.csv("./outputs/demog_summary.csv") demog_data <- subset(demog_data, sex != "") demog_data$sex <- factor(demog_data$sex, levels = c("M", "F")) demog_data$dataset <- factor( demog_data$dataset, levels = c("camcan", "lemon", "chbp", "tuab"), labels = c("Cam-CAN\n(MEG)", "LEMON\n(EEG)", "CHBP\n(EEG)", "TUAB\n(EEG)")) demog_summary1 <- do.call(rbind, lapply( split(demog_data, list(demog_data$sex, demog_data$dataset)), function(dat){ with(dat, data.frame(dataset = dat$dataset[[1]], sex = sex[[1]], sub_count = length(sex), sub_M = mean(age), sub_min = min(age), sub_max = max(age)) ) })) demog_summary2 <- do.call(rbind, lapply( split(demog_data, demog_data$dataset), function(dat) { with(dat, data.frame(dataset = dat$dataset[[1]], count = nrow(dat), min = min(age), max = max(age), SD = sd(age), M = mean(age)) ) })) demog_out <- merge(demog_summary2, demog_summary1, by = "dataset") is_num <- sapply(demog_out, is.numeric) demog_out[, is_num] <- round(demog_out[, is_num], 1) write.csv(demog_out, "./outputs/demog_summary_table.csv") demog_count <- demog_out[seq(1, 8, 2), c("dataset", "count")] demog_count$label <- paste('n =', demog_count$count) fig <- ggplot( aes(x = age, color = sex, fill = sex), data = demog_data) + geom_density(alpha = 0.4, size = 1, trim = T) + geom_rug() + facet_wrap(.~dataset, ncol = 4) + theme_minimal(base_size = 22) + scale_color_solarized() + scale_fill_solarized() + labs(x = "Age [years]", y = "Density") + geom_text(x = 70, y = 0.026, size = 6, inherit.aes = F, mapping = aes(label = label), data = demog_count) print(fig) my_ggsave("./figures/fig_demographics", fig, dpi = 300, width = 10, height = 4.5)
id : Set → Set id A = A
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <gsl/gsl_matrix_double.h> #include <gsl/gsl_linalg.h> #include "../include/run_svd.h" int main(void) { FILE *fp = fopen("./data/my_data.csv", "r"); if (fp == NULL) { perror("Unable to open file!"); exit(1); } int m = 4; int n = 5; char chunk[128]; const char s[2] = ","; int i = 0; int j = 0; gsl_matrix *mat = gsl_matrix_alloc(m, n); while (fgets(chunk, sizeof(chunk), fp) != NULL) { char *token; token = strtok(chunk, s); while (token != NULL) { double x = atof(token); gsl_matrix_set(mat, i, j, x); j = (j + 1) % n; if (j == 0) { i = (i + 1) % m; } token = strtok(NULL, s); } } printf("a_matrix\n"); pretty_print(mat); int result = run_svd(mat); fclose(fp); return result; }
```python from luwiji.metrics import illustration ``` ```python illustration.type_of_mistake ``` ### Accuracy \begin{equation} Accuracy = \frac{TP+TN}{(TP+FP + TN+FN)} \end{equation} Berapa yang benar dibandingkan semua data ### Precision \begin{equation} Precision = \frac{TP}{(TP+FP)} \end{equation} - Target: FP sekecil mungkin - Contoh: - klasifikasi email spam vs non spam - kebanyakan email kita adalah non spam, beberapa email yang sebenarnya adalah spam - Akan fatal kalau email biasa tapi kita anggap spam (False Positive) ### Recall / Sensitivity \begin{equation} Recall = \frac{TP}{(TP+FN)} \end{equation} - Target: FN sekecil mungkin - Contoh: - klasifikasi kanker vs non kanker - kebanyakan label kita tidak kanker - Akan fatal kalau kanker tapi kita bilang tidak kanker (False Negative) ### F1-Score \begin{equation} \frac{1}{F_1} = \frac{1}{2} \left(\frac{1}{Precision} + \frac{1}{Recall}\right) \end{equation} - rata-rata harmonik dari precision dan recall - Kapan dipakai? Kalau mau balance antara precision dan recall ### PR_AUC dan ROC_AUC Nanti kita bahas setelah materi logistic regression
Congratulations to the 6-4-3 DP Cougars 13U squad for claiming the championship title in this past weekend’s TBS Battle of the Bats event. The 13U Cougars finished the weekend with a perfect 4-0 record as well as the championship title. The total team effort provided wins by the score of 12-1, 10-8, 5-2, and 13-3 (championship). Congrats again to the 13U Cougars players and coaches on their successful tournament performance!
{-# OPTIONS --safe #-} module Definition.Typed.EqRelInstance where open import Definition.Untyped open import Definition.Typed open import Definition.Typed.Properties open import Definition.Typed.Weakening open import Definition.Typed.Properties open import Definition.Typed.Reduction open import Definition.Typed.EqualityRelation open import Tools.Function Urefl : ∀ {r l Γ} → ⊢ Γ → Γ ⊢ (Univ r l) ≡ (Univ r l) ^ [ ! , next l ] Urefl {l = ⁰} ⊢Γ = refl (univ (univ 0<1 ⊢Γ)) Urefl {l = ¹} ⊢Γ = refl (Uⱼ ⊢Γ) instance eqRelInstance : EqRelSet eqRelInstance = eqRel _⊢_≡_^_ _⊢_≡_∷_^_ _⊢_≡_∷_^_ idᶠ idᶠ idᶠ univ un-univ≡ sym genSym genSym trans genTrans genTrans conv conv wkEq wkEqTerm wkEqTerm reduction reductionₜ Urefl (refl ∘ᶠ univ 0<1) (refl ∘ᶠ ℕⱼ) (refl ∘ᶠ Emptyⱼ) Π-cong ∃-cong (refl ∘ᶠ zeroⱼ) suc-cong (λ lF lG x x₁ x₂ x₃ x₄ x₅ → η-eq lF lG x x₁ x₂ x₅) genVar app-cong natrec-cong Emptyrec-cong Id-cong (λ ⊢Γ → Id-cong (refl (ℕⱼ ⊢Γ))) (λ ⊢Γ → Id-cong (refl (ℕⱼ ⊢Γ)) (refl (zeroⱼ ⊢Γ))) (λ ⊢Γ t → Id-cong (refl (ℕⱼ ⊢Γ)) (suc-cong t)) (λ ⊢Γ → Id-cong (refl (univ 0<1 ⊢Γ))) (λ ⊢Γ → Id-cong (refl (univ 0<1 ⊢Γ)) (refl (ℕⱼ ⊢Γ))) (λ ⊢A B → Id-cong (refl (univ 0<1 (wfEq (univ ⊢A)))) ⊢A B) cast-cong (λ ⊢Γ → cast-cong (refl (ℕⱼ ⊢Γ))) (λ ⊢Γ → cast-cong (refl (ℕⱼ ⊢Γ)) (refl (ℕⱼ ⊢Γ))) cast-cong (λ ⊢A ⊢P P → cast-cong (refl (ℕⱼ (wf (univ ⊢A)))) P) (λ ⊢A ⊢P P → cast-cong P (refl (ℕⱼ (wf (univ ⊢A))))) (λ ⊢A ⊢P P ⊢A' ⊢P' P' → cast-cong P P') (λ ⊢A ⊢P P ⊢A' ⊢P' P' → cast-cong P P') proof-irrelevance
using NonLinearReactionAdvectionDiffusionWithFrontData; using Test using Documenter using SafeTestsets @time @safetestset "Вспомогательные " begin include("utils.jl") end @time @safetestset "Якобиан прямой задачи на статической сетке. " begin include("direct_jacobian.jl") end @time @safetestset "Якобиан прямой задачи на динамической сетке. " begin include("direct_jacobian_nonuniform.jl") end @time @safetestset "Модельная невзяка для прямой задачи " begin include("direct_check.jl") end @time @safetestset "Якобиан сопряженной задачи на статической сетке. " begin include("adjoint_jacobian.jl") end @time @safetestset "Модельная невязка для сопряженной задачи " begin include("adjoint_check.jl") end @time @safetestset "Неоднородность на точных данных " begin include("heterogeneity_check.jl") end @time @safetestset "Новая аппроксимация дельта-функции " begin include("delta_check.jl") end @time @safetestset "Вырожденые корни " begin include("degenerated_check.jl") end @time @safetestset "Градиент, функционал " begin include("functional.jl") end
#include("abstract_types.jl")
import LMT variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)] example {a1 a2 a3 : A I E} : ((((a1).write i1 (v3)).write i2 (v3)).read i1) ≠ (v3) → False := by arr
# 4. faza: Analiza podatkov #NAPOVED RASTI NETO POVPREČNE PLAČE V SLOVENIJI #linearna regresija model <- lm(POVPRECNA.MESECNA.PLACA.SLO ~ MESEC, data=mesecne.place.slo) #napoved za vsak prvi mesec v letu prihodnja.leta <- data.frame(MESEC = c("2014-01-01", "2015-01-01", "2015-01-01", "2016-01-01", "2017-01-01", "2018-01-01", "2019-01-01", "2020-01-01", "2021-01-01", "2022-01-01", "2023-01-01", "2024-01-01", "2025-01-01")) prihodnja.leta$MESEC <- as.Date(prihodnja.leta$MESEC) napoved.slo <- mutate(prihodnja.leta, POVPRECNA.MESECNA.PLACA.SLO = predict(model,prihodnja.leta)) napoved.graf <- ggplot(mesecne.place.slo, aes( x = MESEC, y = POVPRECNA.MESECNA.PLACA.SLO)) + geom_point() + geom_smooth(method = 'lm', fullrange = TRUE, formula = y~x) + geom_point(data = napoved.slo, aes( x = MESEC, y = POVPRECNA.MESECNA.PLACA.SLO),color='red') + xlab("Leto") + ylab("Mesečna plača (€)") + ggtitle('Napoved mesečnih januarskih plač')
rulemsiDataObjRsync { #Input parameters are: # Data object path # Optional flag for mode # IRODS_TO_IRODS # IRODS_TO_COLLECTION # Optional storage resource # Optional target collection #Output parameters are: # status #Output from running the example is: # Source object /tempZone/home/antoine/source.txt synchronized with destination object /tempZone/home/antoine/dest.txt msiDataObjRsync(*sourceObj,"IRODS_TO_IRODS",*destResc,*destObj,*status); writeLine("stdout","Source object *sourceObj synchronized with destination object *destObj"); writeLine("stdout","status = *status"); } INPUT *sourceObj="/tempZone/home/rods/synctest/source.txt",*destResc="demoResc",*destObj="/tempZone/home/rods/synctest/dest.txt" OUTPUT ruleExecOut
z <- 0 repeat { print(z) z <- z + 1 }
lemma and_or_distrib_left (P Q R : Prop) : P ∧ (Q ∨ R) ↔ (P ∧ Q) ∨ (P ∧ R) := begin split, intro h1, cases h1, cases h1_right, left, split, exact h1_left, exact h1_right, right, split, exact h1_left, exact h1_right, intro h, split, repeat {cases h, repeat {cc}}, end
/- Copyright (c) 2017 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl, Mario Carneiro, Patrick Massot ! This file was ported from Lean 3 source module topology.uniform_space.basic ! leanprover-community/mathlib commit 195fcd60ff2bfe392543bceb0ec2adcdb472db4c ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Order.Filter.SmallSets import Mathbin.Topology.SubsetProperties import Mathbin.Topology.NhdsSet /-! # Uniform spaces > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. Uniform spaces are a generalization of metric spaces and topological groups. Many concepts directly generalize to uniform spaces, e.g. * uniform continuity (in this file) * completeness (in `cauchy.lean`) * extension of uniform continuous functions to complete spaces (in `uniform_embedding.lean`) * totally bounded sets (in `cauchy.lean`) * totally bounded complete sets are compact (in `cauchy.lean`) A uniform structure on a type `X` is a filter `𝓤 X` on `X × X` satisfying some conditions which makes it reasonable to say that `∀ᶠ (p : X × X) in 𝓤 X, ...` means "for all p.1 and p.2 in X close enough, ...". Elements of this filter are called entourages of `X`. The two main examples are: * If `X` is a metric space, `V ∈ 𝓤 X ↔ ∃ ε > 0, { p | dist p.1 p.2 < ε } ⊆ V` * If `G` is an additive topological group, `V ∈ 𝓤 G ↔ ∃ U ∈ 𝓝 (0 : G), {p | p.2 - p.1 ∈ U} ⊆ V` Those examples are generalizations in two different directions of the elementary example where `X = ℝ` and `V ∈ 𝓤 ℝ ↔ ∃ ε > 0, { p | |p.2 - p.1| < ε } ⊆ V` which features both the topological group structure on `ℝ` and its metric space structure. Each uniform structure on `X` induces a topology on `X` characterized by > `nhds_eq_comap_uniformity : ∀ {x : X}, 𝓝 x = comap (prod.mk x) (𝓤 X)` where `prod.mk x : X → X × X := (λ y, (x, y))` is the partial evaluation of the product constructor. The dictionary with metric spaces includes: * an upper bound for `dist x y` translates into `(x, y) ∈ V` for some `V ∈ 𝓤 X` * a ball `ball x r` roughly corresponds to `uniform_space.ball x V := {y | (x, y) ∈ V}` for some `V ∈ 𝓤 X`, but the later is more general (it includes in particular both open and closed balls for suitable `V`). In particular we have: `is_open_iff_ball_subset {s : set X} : is_open s ↔ ∀ x ∈ s, ∃ V ∈ 𝓤 X, ball x V ⊆ s` The triangle inequality is abstracted to a statement involving the composition of relations in `X`. First note that the triangle inequality in a metric space is equivalent to `∀ (x y z : X) (r r' : ℝ), dist x y ≤ r → dist y z ≤ r' → dist x z ≤ r + r'`. Then, for any `V` and `W` with type `set (X × X)`, the composition `V ○ W : set (X × X)` is defined as `{ p : X × X | ∃ z, (p.1, z) ∈ V ∧ (z, p.2) ∈ W }`. In the metric space case, if `V = { p | dist p.1 p.2 ≤ r }` and `W = { p | dist p.1 p.2 ≤ r' }` then the triangle inequality, as reformulated above, says `V ○ W` is contained in `{p | dist p.1 p.2 ≤ r + r'}` which is the entourage associated to the radius `r + r'`. In general we have `mem_ball_comp (h : y ∈ ball x V) (h' : z ∈ ball y W) : z ∈ ball x (V ○ W)`. Note that this discussion does not depend on any axiom imposed on the uniformity filter, it is simply captured by the definition of composition. The uniform space axioms ask the filter `𝓤 X` to satisfy the following: * every `V ∈ 𝓤 X` contains the diagonal `id_rel = { p | p.1 = p.2 }`. This abstracts the fact that `dist x x ≤ r` for every non-negative radius `r` in the metric space case and also that `x - x` belongs to every neighborhood of zero in the topological group case. * `V ∈ 𝓤 X → prod.swap '' V ∈ 𝓤 X`. This is tightly related the fact that `dist x y = dist y x` in a metric space, and to continuity of negation in the topological group case. * `∀ V ∈ 𝓤 X, ∃ W ∈ 𝓤 X, W ○ W ⊆ V`. In the metric space case, it corresponds to cutting the radius of a ball in half and applying the triangle inequality. In the topological group case, it comes from continuity of addition at `(0, 0)`. These three axioms are stated more abstractly in the definition below, in terms of operations on filters, without directly manipulating entourages. ## Main definitions * `uniform_space X` is a uniform space structure on a type `X` * `uniform_continuous f` is a predicate saying a function `f : α → β` between uniform spaces is uniformly continuous : `∀ r ∈ 𝓤 β, ∀ᶠ (x : α × α) in 𝓤 α, (f x.1, f x.2) ∈ r` In this file we also define a complete lattice structure on the type `uniform_space X` of uniform structures on `X`, as well as the pullback (`uniform_space.comap`) of uniform structures coming from the pullback of filters. Like distance functions, uniform structures cannot be pushed forward in general. ## Notations Localized in `uniformity`, we have the notation `𝓤 X` for the uniformity on a uniform space `X`, and `○` for composition of relations, seen as terms with type `set (X × X)`. ## Implementation notes There is already a theory of relations in `data/rel.lean` where the main definition is `def rel (α β : Type*) := α → β → Prop`. The relations used in the current file involve only one type, but this is not the reason why we don't reuse `data/rel.lean`. We use `set (α × α)` instead of `rel α α` because we really need sets to use the filter library, and elements of filters on `α × α` have type `set (α × α)`. The structure `uniform_space X` bundles a uniform structure on `X`, a topology on `X` and an assumption saying those are compatible. This may not seem mathematically reasonable at first, but is in fact an instance of the forgetful inheritance pattern. See Note [forgetful inheritance] below. ## References The formalization uses the books: * [N. Bourbaki, *General Topology*][bourbaki1966] * [I. M. James, *Topologies and Uniformities*][james1999] But it makes a more systematic use of the filter library. -/ open Set Filter Classical open Classical Topology Filter /- ./././Mathport/Syntax/Translate/Basic.lean:334:40: warning: unsupported option eqn_compiler.zeta -/ set_option eqn_compiler.zeta true universe u /-! ### Relations, seen as `set (α × α)` -/ variable {α : Type _} {β : Type _} {γ : Type _} {δ : Type _} {ι : Sort _} #print idRel /- /-- The identity relation, or the graph of the identity function -/ def idRel {α : Type _} := { p : α × α | p.1 = p.2 } #align id_rel idRel -/ #print mem_idRel /- @[simp] theorem mem_idRel {a b : α} : (a, b) ∈ @idRel α ↔ a = b := Iff.rfl #align mem_id_rel mem_idRel -/ #print idRel_subset /- @[simp] theorem idRel_subset {s : Set (α × α)} : idRel ⊆ s ↔ ∀ a, (a, a) ∈ s := by simp [subset_def] <;> exact forall_congr' fun a => by simp #align id_rel_subset idRel_subset -/ #print compRel /- /-- The composition of relations -/ def compRel {α : Type u} (r₁ r₂ : Set (α × α)) := { p : α × α | ∃ z : α, (p.1, z) ∈ r₁ ∧ (z, p.2) ∈ r₂ } #align comp_rel compRel -/ -- mathport name: uniformity.comp_rel scoped[uniformity] infixl:55 " ○ " => compRel #print mem_compRel /- @[simp] theorem mem_compRel {r₁ r₂ : Set (α × α)} {x y : α} : (x, y) ∈ r₁ ○ r₂ ↔ ∃ z, (x, z) ∈ r₁ ∧ (z, y) ∈ r₂ := Iff.rfl #align mem_comp_rel mem_compRel -/ #print swap_idRel /- @[simp] theorem swap_idRel : Prod.swap '' idRel = @idRel α := Set.ext fun ⟨a, b⟩ => by simp [image_swap_eq_preimage_swap] <;> exact eq_comm #align swap_id_rel swap_idRel -/ /- warning: monotone.comp_rel -> Monotone.compRel is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : Preorder.{u2} β] {f : β -> (Set.{u1} (Prod.{u1, u1} α α))} {g : β -> (Set.{u1} (Prod.{u1, u1} α α))}, (Monotone.{u2, u1} β (Set.{u1} (Prod.{u1, u1} α α)) _inst_1 (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.completeBooleanAlgebra.{u1} (Prod.{u1, u1} α α)))))))) f) -> (Monotone.{u2, u1} β (Set.{u1} (Prod.{u1, u1} α α)) _inst_1 (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.completeBooleanAlgebra.{u1} (Prod.{u1, u1} α α)))))))) g) -> (Monotone.{u2, u1} β (Set.{u1} (Prod.{u1, u1} α α)) _inst_1 (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.completeBooleanAlgebra.{u1} (Prod.{u1, u1} α α)))))))) (fun (x : β) => compRel.{u1} α (f x) (g x))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : Preorder.{u2} β] {f : β -> (Set.{u1} (Prod.{u1, u1} α α))} {g : β -> (Set.{u1} (Prod.{u1, u1} α α))}, (Monotone.{u2, u1} β (Set.{u1} (Prod.{u1, u1} α α)) _inst_1 (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instCompleteBooleanAlgebraSet.{u1} (Prod.{u1, u1} α α)))))))) f) -> (Monotone.{u2, u1} β (Set.{u1} (Prod.{u1, u1} α α)) _inst_1 (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instCompleteBooleanAlgebraSet.{u1} (Prod.{u1, u1} α α)))))))) g) -> (Monotone.{u2, u1} β (Set.{u1} (Prod.{u1, u1} α α)) _inst_1 (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instCompleteBooleanAlgebraSet.{u1} (Prod.{u1, u1} α α)))))))) (fun (x : β) => compRel.{u1} α (f x) (g x))) Case conversion may be inaccurate. Consider using '#align monotone.comp_rel Monotone.compRelₓ'. -/ theorem Monotone.compRel [Preorder β] {f g : β → Set (α × α)} (hf : Monotone f) (hg : Monotone g) : Monotone fun x => f x ○ g x := fun a b h p ⟨z, h₁, h₂⟩ => ⟨z, hf h h₁, hg h h₂⟩ #align monotone.comp_rel Monotone.compRel #print compRel_mono /- @[mono] theorem compRel_mono {f g h k : Set (α × α)} (h₁ : f ⊆ h) (h₂ : g ⊆ k) : f ○ g ⊆ h ○ k := fun ⟨x, y⟩ ⟨z, h, h'⟩ => ⟨z, h₁ h, h₂ h'⟩ #align comp_rel_mono compRel_mono -/ #print prod_mk_mem_compRel /- theorem prod_mk_mem_compRel {a b c : α} {s t : Set (α × α)} (h₁ : (a, c) ∈ s) (h₂ : (c, b) ∈ t) : (a, b) ∈ s ○ t := ⟨c, h₁, h₂⟩ #align prod_mk_mem_comp_rel prod_mk_mem_compRel -/ #print id_compRel /- @[simp] theorem id_compRel {r : Set (α × α)} : idRel ○ r = r := Set.ext fun ⟨a, b⟩ => by simp #align id_comp_rel id_compRel -/ #print compRel_assoc /- theorem compRel_assoc {r s t : Set (α × α)} : r ○ s ○ t = r ○ (s ○ t) := by ext p <;> cases p <;> simp only [mem_compRel] <;> tauto #align comp_rel_assoc compRel_assoc -/ #print left_subset_compRel /- theorem left_subset_compRel {s t : Set (α × α)} (h : idRel ⊆ t) : s ⊆ s ○ t := fun ⟨x, y⟩ xy_in => ⟨y, xy_in, h <| rfl⟩ #align left_subset_comp_rel left_subset_compRel -/ #print right_subset_compRel /- theorem right_subset_compRel {s t : Set (α × α)} (h : idRel ⊆ s) : t ⊆ s ○ t := fun ⟨x, y⟩ xy_in => ⟨x, h <| rfl, xy_in⟩ #align right_subset_comp_rel right_subset_compRel -/ #print subset_comp_self /- theorem subset_comp_self {s : Set (α × α)} (h : idRel ⊆ s) : s ⊆ s ○ s := left_subset_compRel h #align subset_comp_self subset_comp_self -/ #print subset_iterate_compRel /- theorem subset_iterate_compRel {s t : Set (α × α)} (h : idRel ⊆ s) (n : ℕ) : t ⊆ ((· ○ ·) s^[n]) t := by induction' n with n ihn generalizing t exacts[subset.rfl, (right_subset_compRel h).trans ihn] #align subset_iterate_comp_rel subset_iterate_compRel -/ #print SymmetricRel /- /-- The relation is invariant under swapping factors. -/ def SymmetricRel (V : Set (α × α)) : Prop := Prod.swap ⁻¹' V = V #align symmetric_rel SymmetricRel -/ #print symmetrizeRel /- /-- The maximal symmetric relation contained in a given relation. -/ def symmetrizeRel (V : Set (α × α)) : Set (α × α) := V ∩ Prod.swap ⁻¹' V #align symmetrize_rel symmetrizeRel -/ #print symmetric_symmetrizeRel /- theorem symmetric_symmetrizeRel (V : Set (α × α)) : SymmetricRel (symmetrizeRel V) := by simp [SymmetricRel, symmetrizeRel, preimage_inter, inter_comm, ← preimage_comp] #align symmetric_symmetrize_rel symmetric_symmetrizeRel -/ #print symmetrizeRel_subset_self /- theorem symmetrizeRel_subset_self (V : Set (α × α)) : symmetrizeRel V ⊆ V := sep_subset _ _ #align symmetrize_rel_subset_self symmetrizeRel_subset_self -/ #print symmetrize_mono /- @[mono] theorem symmetrize_mono {V W : Set (α × α)} (h : V ⊆ W) : symmetrizeRel V ⊆ symmetrizeRel W := inter_subset_inter h <| preimage_mono h #align symmetrize_mono symmetrize_mono -/ #print SymmetricRel.mk_mem_comm /- theorem SymmetricRel.mk_mem_comm {V : Set (α × α)} (hV : SymmetricRel V) {x y : α} : (x, y) ∈ V ↔ (y, x) ∈ V := Set.ext_iff.1 hV (y, x) #align symmetric_rel.mk_mem_comm SymmetricRel.mk_mem_comm -/ #print SymmetricRel.eq /- theorem SymmetricRel.eq {U : Set (α × α)} (hU : SymmetricRel U) : Prod.swap ⁻¹' U = U := hU #align symmetric_rel.eq SymmetricRel.eq -/ /- warning: symmetric_rel.inter -> SymmetricRel.inter is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {U : Set.{u1} (Prod.{u1, u1} α α)} {V : Set.{u1} (Prod.{u1, u1} α α)}, (SymmetricRel.{u1} α U) -> (SymmetricRel.{u1} α V) -> (SymmetricRel.{u1} α (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasInter.{u1} (Prod.{u1, u1} α α)) U V)) but is expected to have type forall {α : Type.{u1}} {U : Set.{u1} (Prod.{u1, u1} α α)} {V : Set.{u1} (Prod.{u1, u1} α α)}, (SymmetricRel.{u1} α U) -> (SymmetricRel.{u1} α V) -> (SymmetricRel.{u1} α (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instInterSet.{u1} (Prod.{u1, u1} α α)) U V)) Case conversion may be inaccurate. Consider using '#align symmetric_rel.inter SymmetricRel.interₓ'. -/ theorem SymmetricRel.inter {U V : Set (α × α)} (hU : SymmetricRel U) (hV : SymmetricRel V) : SymmetricRel (U ∩ V) := by rw [SymmetricRel, preimage_inter, hU.eq, hV.eq] #align symmetric_rel.inter SymmetricRel.inter #print UniformSpace.Core /- /-- This core description of a uniform space is outside of the type class hierarchy. It is useful for constructions of uniform spaces, when the topology is derived from the uniform space. -/ structure UniformSpace.Core (α : Type u) where uniformity : Filter (α × α) refl : 𝓟 idRel ≤ uniformity symm : Tendsto Prod.swap uniformity uniformity comp : (uniformity.lift' fun s => s ○ s) ≤ uniformity #align uniform_space.core UniformSpace.Core -/ /- warning: uniform_space.core.mk' -> UniformSpace.Core.mk' is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} (U : Filter.{u1} (Prod.{u1, u1} α α)), (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) r U) -> (forall (x : α), Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x x) r)) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) r U) -> (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) (Set.preimage.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) r) U)) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) r U) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t U) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t U) => HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) r)))) -> (UniformSpace.Core.{u1} α) but is expected to have type forall {α : Type.{u1}} (U : Filter.{u1} (Prod.{u1, u1} α α)), (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) r U) -> (forall (x : α), Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x x) r)) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) r U) -> (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) (Set.preimage.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) r) U)) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) r U) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t U) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) r)))) -> (UniformSpace.Core.{u1} α) Case conversion may be inaccurate. Consider using '#align uniform_space.core.mk' UniformSpace.Core.mk'ₓ'. -/ /-- An alternative constructor for `uniform_space.core`. This version unfolds various `filter`-related definitions. -/ def UniformSpace.Core.mk' {α : Type u} (U : Filter (α × α)) (refl : ∀ r ∈ U, ∀ (x), (x, x) ∈ r) (symm : ∀ r ∈ U, Prod.swap ⁻¹' r ∈ U) (comp : ∀ r ∈ U, ∃ t ∈ U, t ○ t ⊆ r) : UniformSpace.Core α := ⟨U, fun r ru => idRel_subset.2 (refl _ ru), symm, fun r ru => let ⟨s, hs, hsr⟩ := comp _ ru mem_of_superset (mem_lift' hs) hsr⟩ #align uniform_space.core.mk' UniformSpace.Core.mk' /- warning: uniform_space.core.mk_of_basis -> UniformSpace.Core.mkOfBasis is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} (B : FilterBasis.{u1} (Prod.{u1, u1} α α)), (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) r B) -> (forall (x : α), Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x x) r)) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) r B) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) t B) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) t B) => HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) t (Set.preimage.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) r))))) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) r B) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) t B) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (FilterBasis.hasMem.{u1} (Prod.{u1, u1} α α)) t B) => HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) r)))) -> (UniformSpace.Core.{u1} α) but is expected to have type forall {α : Type.{u1}} (B : FilterBasis.{u1} (Prod.{u1, u1} α α)), (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilterBasis.{u1} (Prod.{u1, u1} α α)) r B) -> (forall (x : α), Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x x) r)) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilterBasis.{u1} (Prod.{u1, u1} α α)) r B) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilterBasis.{u1} (Prod.{u1, u1} α α)) t B) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) t (Set.preimage.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) r))))) -> (forall (r : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilterBasis.{u1} (Prod.{u1, u1} α α)) r B) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (FilterBasis.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilterBasis.{u1} (Prod.{u1, u1} α α)) t B) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) r)))) -> (UniformSpace.Core.{u1} α) Case conversion may be inaccurate. Consider using '#align uniform_space.core.mk_of_basis UniformSpace.Core.mkOfBasisₓ'. -/ /-- Defining an `uniform_space.core` from a filter basis satisfying some uniformity-like axioms. -/ def UniformSpace.Core.mkOfBasis {α : Type u} (B : FilterBasis (α × α)) (refl : ∀ r ∈ B, ∀ (x), (x, x) ∈ r) (symm : ∀ r ∈ B, ∃ t ∈ B, t ⊆ Prod.swap ⁻¹' r) (comp : ∀ r ∈ B, ∃ t ∈ B, t ○ t ⊆ r) : UniformSpace.Core α where uniformity := B.filterₓ refl := B.HasBasis.ge_iff.mpr fun r ru => idRel_subset.2 <| refl _ ru symm := (B.HasBasis.tendsto_iffₓ B.HasBasis).mpr symm comp := (HasBasis.le_basis_iff (B.HasBasis.lift' (monotone_id.compRel monotone_id)) B.HasBasis).mpr comp #align uniform_space.core.mk_of_basis UniformSpace.Core.mkOfBasis #print UniformSpace.Core.toTopologicalSpace /- /-- A uniform space generates a topological space -/ def UniformSpace.Core.toTopologicalSpace {α : Type u} (u : UniformSpace.Core α) : TopologicalSpace α where IsOpen s := ∀ x ∈ s, { p : α × α | p.1 = x → p.2 ∈ s } ∈ u.uniformity isOpen_univ := by simp <;> intro <;> exact univ_mem isOpen_inter := fun s t hs ht x ⟨xs, xt⟩ => by filter_upwards [hs x xs, ht x xt] <;> simp (config := { contextual := true }) isOpen_unionₛ := fun s hs x ⟨t, ts, xt⟩ => by filter_upwards [hs t ts x xt]with p ph h using⟨t, ts, ph h⟩ #align uniform_space.core.to_topological_space UniformSpace.Core.toTopologicalSpace -/ #print UniformSpace.core_eq /- theorem UniformSpace.core_eq : ∀ {u₁ u₂ : UniformSpace.Core α}, u₁.uniformity = u₂.uniformity → u₁ = u₂ | ⟨u₁, _, _, _⟩, ⟨u₂, _, _, _⟩, rfl => by congr #align uniform_space.core_eq UniformSpace.core_eq -/ #print UniformSpace /- -- the topological structure is embedded in the uniform structure -- to avoid instance diamond issues. See Note [forgetful inheritance]. /-- A uniform space is a generalization of the "uniform" topological aspects of a metric space. It consists of a filter on `α × α` called the "uniformity", which satisfies properties analogous to the reflexivity, symmetry, and triangle properties of a metric. A metric space has a natural uniformity, and a uniform space has a natural topology. A topological group also has a natural uniformity, even when it is not metrizable. -/ class UniformSpace (α : Type u) extends TopologicalSpace α, UniformSpace.Core α where isOpen_uniformity : ∀ s, @IsOpen _ to_topological_space s ↔ ∀ x ∈ s, { p : α × α | p.1 = x → p.2 ∈ s } ∈ uniformity #align uniform_space UniformSpace -/ #print UniformSpace.mk' /- /-- Alternative constructor for `uniform_space α` when a topology is already given. -/ @[match_pattern] def UniformSpace.mk' {α} (t : TopologicalSpace α) (c : UniformSpace.Core α) (is_open_uniformity : ∀ s : Set α, IsOpen s ↔ ∀ x ∈ s, { p : α × α | p.1 = x → p.2 ∈ s } ∈ c.uniformity) : UniformSpace α := ⟨c, isOpen_uniformity⟩ #align uniform_space.mk' UniformSpace.mk' -/ #print UniformSpace.ofCore /- /-- Construct a `uniform_space` from a `uniform_space.core`. -/ def UniformSpace.ofCore {α : Type u} (u : UniformSpace.Core α) : UniformSpace α where toCore := u toTopologicalSpace := u.toTopologicalSpace isOpen_uniformity a := Iff.rfl #align uniform_space.of_core UniformSpace.ofCore -/ #print UniformSpace.ofCoreEq /- /-- Construct a `uniform_space` from a `u : uniform_space.core` and a `topological_space` structure that is equal to `u.to_topological_space`. -/ def UniformSpace.ofCoreEq {α : Type u} (u : UniformSpace.Core α) (t : TopologicalSpace α) (h : t = u.toTopologicalSpace) : UniformSpace α where toCore := u toTopologicalSpace := t isOpen_uniformity a := h.symm ▸ Iff.rfl #align uniform_space.of_core_eq UniformSpace.ofCoreEq -/ #print UniformSpace.toCore_toTopologicalSpace /- theorem UniformSpace.toCore_toTopologicalSpace (u : UniformSpace α) : u.toCore.toTopologicalSpace = u.toTopologicalSpace := topologicalSpace_eq <| funext fun s => by rw [UniformSpace.isOpen_uniformity, isOpen_mk] #align uniform_space.to_core_to_topological_space UniformSpace.toCore_toTopologicalSpace -/ #print uniformity /- /-- The uniformity is a filter on α × α (inferred from an ambient uniform space structure on α). -/ def uniformity (α : Type u) [UniformSpace α] : Filter (α × α) := (@UniformSpace.toCore α _).uniformity #align uniformity uniformity -/ -- mathport name: uniformity_of scoped[Topology] notation "𝓤[" u "]" => @uniformity hole! u #print uniformSpace_eq /- @[ext] theorem uniformSpace_eq : ∀ {u₁ u₂ : UniformSpace α}, 𝓤[u₁] = 𝓤[u₂] → u₁ = u₂ | UniformSpace.mk' t₁ u₁ o₁, UniformSpace.mk' t₂ u₂ o₂, h => by have : u₁ = u₂ := UniformSpace.core_eq h have : t₁ = t₂ := topologicalSpace_eq <| funext fun s => by rw [o₁, o₂] <;> simp [this] simp [*] #align uniform_space_eq uniformSpace_eq -/ #print UniformSpace.ofCoreEq_toCore /- theorem UniformSpace.ofCoreEq_toCore (u : UniformSpace α) (t : TopologicalSpace α) (h : t = u.toCore.toTopologicalSpace) : UniformSpace.ofCoreEq u.toCore t h = u := uniformSpace_eq rfl #align uniform_space.of_core_eq_to_core UniformSpace.ofCoreEq_toCore -/ #print UniformSpace.replaceTopology /- /-- Replace topology in a `uniform_space` instance with a propositionally (but possibly not definitionally) equal one. -/ @[reducible] def UniformSpace.replaceTopology {α : Type _} [i : TopologicalSpace α] (u : UniformSpace α) (h : i = u.toTopologicalSpace) : UniformSpace α := UniformSpace.ofCoreEq u.toCore i <| h.trans u.toCore_toTopologicalSpace.symm #align uniform_space.replace_topology UniformSpace.replaceTopology -/ #print UniformSpace.replaceTopology_eq /- theorem UniformSpace.replaceTopology_eq {α : Type _} [i : TopologicalSpace α] (u : UniformSpace α) (h : i = u.toTopologicalSpace) : u.replaceTopology h = u := u.ofCoreEq_toCore _ _ #align uniform_space.replace_topology_eq UniformSpace.replaceTopology_eq -/ /- warning: uniform_space.of_fun -> UniformSpace.ofFun is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : OrderedAddCommMonoid.{u2} β] (d : α -> α -> β), (forall (x : α), Eq.{succ u2} β (d x x) (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))))) -> (forall (x : α) (y : α), Eq.{succ u2} β (d x y) (d y x)) -> (forall (x : α) (y : α) (z : α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) (d x z) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toHasAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) (d x y) (d y z))) -> (forall (ε : β), (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) ε (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))))) -> (Exists.{succ u2} β (fun (δ : β) => Exists.{0} (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) δ (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))))) (fun (H : GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) δ (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))))) => forall (x : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) x δ) -> (forall (y : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) y δ) -> (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toHasAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) x y) ε)))))) -> (UniformSpace.{u1} α) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : OrderedAddCommMonoid.{u2} β] (d : α -> α -> β), (forall (x : α), Eq.{succ u2} β (d x x) (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))) -> (forall (x : α) (y : α), Eq.{succ u2} β (d x y) (d y x)) -> (forall (x : α) (y : α) (z : α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) (d x z) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) (d x y) (d y z))) -> (forall (ε : β), (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) ε (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))) -> (Exists.{succ u2} β (fun (δ : β) => And (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) δ (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))) (forall (x : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) x δ) -> (forall (y : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) y δ) -> (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β _inst_1))) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) x y) ε)))))) -> (UniformSpace.{u1} α) Case conversion may be inaccurate. Consider using '#align uniform_space.of_fun UniformSpace.ofFunₓ'. -/ /-- Define a `uniform_space` using a "distance" function. The function can be, e.g., the distance in a (usual or extended) metric space or an absolute value on a ring. -/ def UniformSpace.ofFun {α β : Type _} [OrderedAddCommMonoid β] (d : α → α → β) (refl : ∀ x, d x x = 0) (symm : ∀ x y, d x y = d y x) (triangle : ∀ x y z, d x z ≤ d x y + d y z) (half : ∀ ε > (0 : β), ∃ δ > (0 : β), ∀ x < δ, ∀ y < δ, x + y < ε) : UniformSpace α := UniformSpace.ofCore { uniformity := ⨅ r > 0, 𝓟 { x | d x.1 x.2 < r } refl := le_infᵢ₂ fun r hr => principal_mono.2 <| idRel_subset.2 fun x => by simpa [refl] symm := tendsto_infᵢ_infᵢ fun r => tendsto_infᵢ_infᵢ fun _ => tendsto_principal_principal.2 fun x hx => by rwa [mem_set_of, symm] comp := le_infᵢ₂ fun r hr => let ⟨δ, h0, hδr⟩ := half r hr le_principal_iff.2 <| mem_of_superset (mem_lift' <| mem_infᵢ_of_mem δ <| mem_infᵢ_of_mem h0 <| mem_principal_self _) fun ⟨x, z⟩ ⟨y, h₁, h₂⟩ => (triangle _ _ _).trans_lt (hδr _ h₁ _ h₂) } #align uniform_space.of_fun UniformSpace.ofFun /- warning: uniform_space.has_basis_of_fun -> UniformSpace.hasBasis_ofFun is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : LinearOrderedAddCommMonoid.{u2} β], (Exists.{succ u2} β (fun (x : β) => LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))))))) x)) -> (forall (d : α -> α -> β) (refl : forall (x : α), Eq.{succ u2} β (d x x) (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1))))))))) (symm : forall (x : α) (y : α), Eq.{succ u2} β (d x y) (d y x)) (triangle : forall (x : α) (y : α) (z : α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (d x z) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toHasAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))))) (d x y) (d y z))) (half : forall (ε : β), (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) ε (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1))))))))) -> (Exists.{succ u2} β (fun (δ : β) => Exists.{0} (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) δ (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1))))))))) (fun (H : GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) δ (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1))))))))) => forall (x : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) x δ) -> (forall (y : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) y δ) -> (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toHasAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))))) x y) ε)))))), Filter.HasBasis.{u1, succ u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α (UniformSpace.ofFun.{u1, u2} α β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1) d refl symm triangle half)) (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (OfNat.ofNat.{u2} β 0 (OfNat.mk.{u2} β 0 (Zero.zero.{u2} β (AddZeroClass.toHasZero.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (OrderedAddCommMonoid.toAddCommMonoid.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1))))))))) (fun (ε : β) => setOf.{u1} (Prod.{u1, u1} α α) (fun (x : Prod.{u1, u1} α α) => LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (d (Prod.fst.{u1, u1} α α x) (Prod.snd.{u1, u1} α α x)) ε))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : LinearOrderedAddCommMonoid.{u2} β], (Exists.{succ u2} β (fun (x : β) => LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) x)) -> (forall (d : α -> α -> β) (refl : forall (x : α), Eq.{succ u2} β (d x x) (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))) (symm : forall (x : α) (y : α), Eq.{succ u2} β (d x y) (d y x)) (triangle : forall (x : α) (y : α) (z : α), LE.le.{u2} β (Preorder.toLE.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (d x z) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) (d x y) (d y z))) (half : forall (ε : β), (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) ε (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))) -> (Exists.{succ u2} β (fun (δ : β) => And (GT.gt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) δ (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1)))))) (forall (x : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) x δ) -> (forall (y : β), (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) y δ) -> (LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (HAdd.hAdd.{u2, u2, u2} β β β (instHAdd.{u2} β (AddZeroClass.toAdd.{u2} β (AddMonoid.toAddZeroClass.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) x y) ε)))))), Filter.HasBasis.{u1, succ u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α (UniformSpace.ofFun.{u1, u2} α β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1) d refl symm triangle half)) (fun ([email protected]._hyg.4156 : β) => LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (OfNat.ofNat.{u2} β 0 (Zero.toOfNat0.{u2} β (AddMonoid.toZero.{u2} β (AddCommMonoid.toAddMonoid.{u2} β (LinearOrderedAddCommMonoid.toAddCommMonoid.{u2} β _inst_1))))) [email protected]._hyg.4156) (fun (ε : β) => setOf.{u1} (Prod.{u1, u1} α α) (fun (x : Prod.{u1, u1} α α) => LT.lt.{u2} β (Preorder.toLT.{u2} β (PartialOrder.toPreorder.{u2} β (OrderedAddCommMonoid.toPartialOrder.{u2} β (LinearOrderedAddCommMonoid.toOrderedAddCommMonoid.{u2} β _inst_1)))) (d (Prod.fst.{u1, u1} α α x) (Prod.snd.{u1, u1} α α x)) ε))) Case conversion may be inaccurate. Consider using '#align uniform_space.has_basis_of_fun UniformSpace.hasBasis_ofFunₓ'. -/ theorem UniformSpace.hasBasis_ofFun {α β : Type _} [LinearOrderedAddCommMonoid β] (h₀ : ∃ x : β, 0 < x) (d : α → α → β) (refl : ∀ x, d x x = 0) (symm : ∀ x y, d x y = d y x) (triangle : ∀ x y z, d x z ≤ d x y + d y z) (half : ∀ ε > (0 : β), ∃ δ > (0 : β), ∀ x < δ, ∀ y < δ, x + y < ε) : 𝓤[UniformSpace.ofFun d refl symm triangle half].HasBasis ((· < ·) (0 : β)) fun ε => { x | d x.1 x.2 < ε } := hasBasis_binfᵢ_principal' (fun ε₁ h₁ ε₂ h₂ => ⟨min ε₁ ε₂, lt_min h₁ h₂, fun _x hx => lt_of_lt_of_le hx (min_le_left _ _), fun _x hx => lt_of_lt_of_le hx (min_le_right _ _)⟩) h₀ #align uniform_space.has_basis_of_fun UniformSpace.hasBasis_ofFun section UniformSpace variable [UniformSpace α] -- mathport name: uniformity scoped[uniformity] notation "𝓤" => uniformity #print isOpen_uniformity /- theorem isOpen_uniformity {s : Set α} : IsOpen s ↔ ∀ x ∈ s, { p : α × α | p.1 = x → p.2 ∈ s } ∈ 𝓤 α := UniformSpace.isOpen_uniformity s #align is_open_uniformity isOpen_uniformity -/ /- warning: refl_le_uniformity -> refl_le_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (Filter.principal.{u1} (Prod.{u1, u1} α α) (idRel.{u1} α)) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (Filter.principal.{u1} (Prod.{u1, u1} α α) (idRel.{u1} α)) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align refl_le_uniformity refl_le_uniformityₓ'. -/ theorem refl_le_uniformity : 𝓟 idRel ≤ 𝓤 α := (@UniformSpace.toCore α _).refl #align refl_le_uniformity refl_le_uniformity #print uniformity.neBot /- instance uniformity.neBot [Nonempty α] : NeBot (𝓤 α) := diagonal_nonempty.principal_neBot.mono refl_le_uniformity #align uniformity.ne_bot uniformity.neBot -/ #print refl_mem_uniformity /- theorem refl_mem_uniformity {x : α} {s : Set (α × α)} (h : s ∈ 𝓤 α) : (x, x) ∈ s := refl_le_uniformity h rfl #align refl_mem_uniformity refl_mem_uniformity -/ #print mem_uniformity_of_eq /- theorem mem_uniformity_of_eq {x y : α} {s : Set (α × α)} (h : s ∈ 𝓤 α) (hx : x = y) : (x, y) ∈ s := refl_le_uniformity h hx #align mem_uniformity_of_eq mem_uniformity_of_eq -/ /- warning: symm_le_uniformity -> symm_le_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (Filter.map.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) (uniformity.{u1} α _inst_1)) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (Filter.map.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) (uniformity.{u1} α _inst_1)) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align symm_le_uniformity symm_le_uniformityₓ'. -/ theorem symm_le_uniformity : map (@Prod.swap α α) (𝓤 _) ≤ 𝓤 _ := (@UniformSpace.toCore α _).symm #align symm_le_uniformity symm_le_uniformity /- warning: comp_le_uniformity -> comp_le_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (Filter.lift'.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => compRel.{u1} α s s)) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (Filter.lift'.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => compRel.{u1} α s s)) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align comp_le_uniformity comp_le_uniformityₓ'. -/ theorem comp_le_uniformity : ((𝓤 α).lift' fun s : Set (α × α) => s ○ s) ≤ 𝓤 α := (@UniformSpace.toCore α _).comp #align comp_le_uniformity comp_le_uniformity #print tendsto_swap_uniformity /- theorem tendsto_swap_uniformity : Tendsto (@Prod.swap α α) (𝓤 α) (𝓤 α) := symm_le_uniformity #align tendsto_swap_uniformity tendsto_swap_uniformity -/ /- warning: comp_mem_uniformity_sets -> comp_mem_uniformity_sets is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s))) Case conversion may be inaccurate. Consider using '#align comp_mem_uniformity_sets comp_mem_uniformity_setsₓ'. -/ theorem comp_mem_uniformity_sets {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, t ○ t ⊆ s := have : s ∈ (𝓤 α).lift' fun t : Set (α × α) => t ○ t := comp_le_uniformity hs (mem_lift'_sets <| monotone_id.compRel monotone_id).mp this #align comp_mem_uniformity_sets comp_mem_uniformity_sets #print eventually_uniformity_iterate_comp_subset /- /-- If `s ∈ 𝓤 α`, then for any natural `n`, for a subset `t` of a sufficiently small set in `𝓤 α`, we have `t ○ t ○ ... ○ t ⊆ s` (`n` compositions). -/ theorem eventually_uniformity_iterate_comp_subset {s : Set (α × α)} (hs : s ∈ 𝓤 α) (n : ℕ) : ∀ᶠ t in (𝓤 α).smallSets, ((· ○ ·) t^[n]) t ⊆ s := by suffices : ∀ᶠ t in (𝓤 α).smallSets, t ⊆ s ∧ ((· ○ ·) t^[n]) t ⊆ s exact (eventually_and.1 this).2 induction' n with n ihn generalizing s; · simpa rcases comp_mem_uniformity_sets hs with ⟨t, htU, hts⟩ refine' (ihn htU).mono fun U hU => _ rw [Function.iterate_succ_apply'] exact ⟨hU.1.trans <| (subset_comp_self <| refl_le_uniformity htU).trans hts, (compRel_mono hU.1 hU.2).trans hts⟩ #align eventually_uniformity_iterate_comp_subset eventually_uniformity_iterate_comp_subset -/ #print eventually_uniformity_comp_subset /- /-- If `s ∈ 𝓤 α`, then for any natural `n`, for a subset `t` of a sufficiently small set in `𝓤 α`, we have `t ○ t ⊆ s`. -/ theorem eventually_uniformity_comp_subset {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∀ᶠ t in (𝓤 α).smallSets, t ○ t ⊆ s := eventually_uniformity_iterate_comp_subset hs 1 #align eventually_uniformity_comp_subset eventually_uniformity_comp_subset -/ #print Filter.Tendsto.uniformity_trans /- /-- Relation `λ f g, tendsto (λ x, (f x, g x)) l (𝓤 α)` is transitive. -/ theorem Filter.Tendsto.uniformity_trans {l : Filter β} {f₁ f₂ f₃ : β → α} (h₁₂ : Tendsto (fun x => (f₁ x, f₂ x)) l (𝓤 α)) (h₂₃ : Tendsto (fun x => (f₂ x, f₃ x)) l (𝓤 α)) : Tendsto (fun x => (f₁ x, f₃ x)) l (𝓤 α) := by refine' le_trans (le_lift'.2 fun s hs => mem_map.2 _) comp_le_uniformity filter_upwards [h₁₂ hs, h₂₃ hs]with x hx₁₂ hx₂₃ using⟨_, hx₁₂, hx₂₃⟩ #align filter.tendsto.uniformity_trans Filter.Tendsto.uniformity_trans -/ #print Filter.Tendsto.uniformity_symm /- /-- Relation `λ f g, tendsto (λ x, (f x, g x)) l (𝓤 α)` is symmetric -/ theorem Filter.Tendsto.uniformity_symm {l : Filter β} {f : β → α × α} (h : Tendsto f l (𝓤 α)) : Tendsto (fun x => ((f x).2, (f x).1)) l (𝓤 α) := tendsto_swap_uniformity.comp h #align filter.tendsto.uniformity_symm Filter.Tendsto.uniformity_symm -/ #print tendsto_diag_uniformity /- /-- Relation `λ f g, tendsto (λ x, (f x, g x)) l (𝓤 α)` is reflexive. -/ theorem tendsto_diag_uniformity (f : β → α) (l : Filter β) : Tendsto (fun x => (f x, f x)) l (𝓤 α) := fun s hs => mem_map.2 <| univ_mem' fun x => refl_mem_uniformity hs #align tendsto_diag_uniformity tendsto_diag_uniformity -/ #print tendsto_const_uniformity /- theorem tendsto_const_uniformity {a : α} {f : Filter β} : Tendsto (fun _ => (a, a)) f (𝓤 α) := tendsto_diag_uniformity (fun _ => a) f #align tendsto_const_uniformity tendsto_const_uniformity -/ /- warning: symm_of_uniformity -> symm_of_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => And (forall (a : α) (b : α), (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) t) -> (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α b a) t)) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) t s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (And (forall (a : α) (b : α), (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) t) -> (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α b a) t)) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) t s)))) Case conversion may be inaccurate. Consider using '#align symm_of_uniformity symm_of_uniformityₓ'. -/ theorem symm_of_uniformity {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, (∀ a b, (a, b) ∈ t → (b, a) ∈ t) ∧ t ⊆ s := have : preimage Prod.swap s ∈ 𝓤 α := symm_le_uniformity hs ⟨s ∩ preimage Prod.swap s, inter_mem hs this, fun a b ⟨h₁, h₂⟩ => ⟨h₂, h₁⟩, inter_subset_left _ _⟩ #align symm_of_uniformity symm_of_uniformity /- warning: comp_symm_of_uniformity -> comp_symm_of_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => And (forall {a : α} {b : α}, (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) t) -> (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α b a) t)) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (And (forall {a : α} {b : α}, (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) t) -> (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α b a) t)) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s)))) Case conversion may be inaccurate. Consider using '#align comp_symm_of_uniformity comp_symm_of_uniformityₓ'. -/ theorem comp_symm_of_uniformity {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, (∀ {a b}, (a, b) ∈ t → (b, a) ∈ t) ∧ t ○ t ⊆ s := let ⟨t, ht₁, ht₂⟩ := comp_mem_uniformity_sets hs let ⟨t', ht', ht'₁, ht'₂⟩ := symm_of_uniformity ht₁ ⟨t', ht', ht'₁, Subset.trans (monotone_id.compRel monotone_id ht'₂) ht₂⟩ #align comp_symm_of_uniformity comp_symm_of_uniformity /- warning: uniformity_le_symm -> uniformity_le_symm is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (uniformity.{u1} α _inst_1) (Functor.map.{u1, u1} Filter.{u1} Filter.functor.{u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) (uniformity.{u1} α _inst_1)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (uniformity.{u1} α _inst_1) (Functor.map.{u1, u1} Filter.{u1} Filter.instFunctorFilter.{u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) (uniformity.{u1} α _inst_1)) Case conversion may be inaccurate. Consider using '#align uniformity_le_symm uniformity_le_symmₓ'. -/ theorem uniformity_le_symm : 𝓤 α ≤ @Prod.swap α α <$> 𝓤 α := by rw [map_swap_eq_comap_swap] <;> exact map_le_iff_le_comap.1 tendsto_swap_uniformity #align uniformity_le_symm uniformity_le_symm #print uniformity_eq_symm /- theorem uniformity_eq_symm : 𝓤 α = @Prod.swap α α <$> 𝓤 α := le_antisymm uniformity_le_symm symm_le_uniformity #align uniformity_eq_symm uniformity_eq_symm -/ #print comap_swap_uniformity /- @[simp] theorem comap_swap_uniformity : comap (@Prod.swap α α) (𝓤 α) = 𝓤 α := (congr_arg _ uniformity_eq_symm).trans <| comap_map Prod.swap_injective #align comap_swap_uniformity comap_swap_uniformity -/ #print symmetrize_mem_uniformity /- theorem symmetrize_mem_uniformity {V : Set (α × α)} (h : V ∈ 𝓤 α) : symmetrizeRel V ∈ 𝓤 α := by apply (𝓤 α).inter_sets h rw [← image_swap_eq_preimage_swap, uniformity_eq_symm] exact image_mem_map h #align symmetrize_mem_uniformity symmetrize_mem_uniformity -/ #print UniformSpace.hasBasis_symmetric /- /-- Symmetric entourages form a basis of `𝓤 α` -/ theorem UniformSpace.hasBasis_symmetric : (𝓤 α).HasBasis (fun s : Set (α × α) => s ∈ 𝓤 α ∧ SymmetricRel s) id := hasBasis_self.2 fun t t_in => ⟨symmetrizeRel t, symmetrize_mem_uniformity t_in, symmetric_symmetrizeRel t, symmetrizeRel_subset_self t⟩ #align uniform_space.has_basis_symmetric UniformSpace.hasBasis_symmetric -/ /- warning: uniformity_lift_le_swap -> uniformity_lift_le_swap is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {g : (Set.{u1} (Prod.{u1, u1} α α)) -> (Filter.{u2} β)} {f : Filter.{u2} β}, (Monotone.{u1, u2} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.completeBooleanAlgebra.{u1} (Prod.{u1, u1} α α)))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β)) g) -> (LE.le.{u2} (Filter.{u2} β) (Preorder.toLE.{u2} (Filter.{u2} β) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => g (Set.preimage.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) s))) f) -> (LE.le.{u2} (Filter.{u2} β) (Preorder.toLE.{u2} (Filter.{u2} β) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) g) f) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {g : (Set.{u1} (Prod.{u1, u1} α α)) -> (Filter.{u2} β)} {f : Filter.{u2} β}, (Monotone.{u1, u2} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instCompleteBooleanAlgebraSet.{u1} (Prod.{u1, u1} α α)))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β)) g) -> (LE.le.{u2} (Filter.{u2} β) (Preorder.toLE.{u2} (Filter.{u2} β) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => g (Set.preimage.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (Prod.swap.{u1, u1} α α) s))) f) -> (LE.le.{u2} (Filter.{u2} β) (Preorder.toLE.{u2} (Filter.{u2} β) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) g) f) Case conversion may be inaccurate. Consider using '#align uniformity_lift_le_swap uniformity_lift_le_swapₓ'. -/ theorem uniformity_lift_le_swap {g : Set (α × α) → Filter β} {f : Filter β} (hg : Monotone g) (h : ((𝓤 α).lift fun s => g (preimage Prod.swap s)) ≤ f) : (𝓤 α).lift g ≤ f := calc (𝓤 α).lift g ≤ (Filter.map (@Prod.swap α α) <| 𝓤 α).lift g := lift_mono uniformity_le_symm le_rfl _ ≤ _ := by rw [map_lift_eq2 hg, image_swap_eq_preimage_swap] <;> exact h #align uniformity_lift_le_swap uniformity_lift_le_swap /- warning: uniformity_lift_le_comp -> uniformity_lift_le_comp is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {f : (Set.{u1} (Prod.{u1, u1} α α)) -> (Filter.{u2} β)}, (Monotone.{u1, u2} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.completeBooleanAlgebra.{u1} (Prod.{u1, u1} α α)))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β)) f) -> (LE.le.{u2} (Filter.{u2} β) (Preorder.toLE.{u2} (Filter.{u2} β) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => f (compRel.{u1} α s s))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) f)) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {f : (Set.{u1} (Prod.{u1, u1} α α)) -> (Filter.{u2} β)}, (Monotone.{u1, u2} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instCompleteBooleanAlgebraSet.{u1} (Prod.{u1, u1} α α)))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β)) f) -> (LE.le.{u2} (Filter.{u2} β) (Preorder.toLE.{u2} (Filter.{u2} β) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => f (compRel.{u1} α s s))) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) f)) Case conversion may be inaccurate. Consider using '#align uniformity_lift_le_comp uniformity_lift_le_compₓ'. -/ theorem uniformity_lift_le_comp {f : Set (α × α) → Filter β} (h : Monotone f) : ((𝓤 α).lift fun s => f (s ○ s)) ≤ (𝓤 α).lift f := calc ((𝓤 α).lift fun s => f (s ○ s)) = ((𝓤 α).lift' fun s : Set (α × α) => s ○ s).lift f := by rw [lift_lift'_assoc] exact monotone_id.comp_rel monotone_id exact h _ ≤ (𝓤 α).lift f := lift_mono comp_le_uniformity le_rfl #align uniformity_lift_le_comp uniformity_lift_le_comp /- warning: comp_le_uniformity3 -> comp_le_uniformity3 is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (Filter.lift'.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => compRel.{u1} α s (compRel.{u1} α s s))) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (Filter.lift'.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => compRel.{u1} α s (compRel.{u1} α s s))) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align comp_le_uniformity3 comp_le_uniformity3ₓ'. -/ theorem comp_le_uniformity3 : ((𝓤 α).lift' fun s : Set (α × α) => s ○ (s ○ s)) ≤ 𝓤 α := calc ((𝓤 α).lift' fun d => d ○ (d ○ d)) = (𝓤 α).lift fun s => (𝓤 α).lift' fun t : Set (α × α) => s ○ (t ○ t) := by rw [lift_lift'_same_eq_lift'] exact fun x => monotone_const.comp_rel <| monotone_id.comp_rel monotone_id exact fun x => monotone_id.comp_rel monotone_const _ ≤ (𝓤 α).lift fun s => (𝓤 α).lift' fun t : Set (α × α) => s ○ t := (lift_mono' fun s hs => @uniformity_lift_le_comp α _ _ (𝓟 ∘ (· ○ ·) s) <| monotone_principal.comp (monotone_const.compRel monotone_id)) _ = (𝓤 α).lift' fun s : Set (α × α) => s ○ s := (lift_lift'_same_eq_lift' (fun s => monotone_const.compRel monotone_id) fun s => monotone_id.compRel monotone_const) _ ≤ 𝓤 α := comp_le_uniformity #align comp_le_uniformity3 comp_le_uniformity3 /- warning: comp_symm_mem_uniformity_sets -> comp_symm_mem_uniformity_sets is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => And (SymmetricRel.{u1} α t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (And (SymmetricRel.{u1} α t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s)))) Case conversion may be inaccurate. Consider using '#align comp_symm_mem_uniformity_sets comp_symm_mem_uniformity_setsₓ'. -/ /-- See also `comp_open_symm_mem_uniformity_sets`. -/ theorem comp_symm_mem_uniformity_sets {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, SymmetricRel t ∧ t ○ t ⊆ s := by obtain ⟨w, w_in, w_sub⟩ : ∃ w ∈ 𝓤 α, w ○ w ⊆ s := comp_mem_uniformity_sets hs use symmetrizeRel w, symmetrize_mem_uniformity w_in, symmetric_symmetrizeRel w have : symmetrizeRel w ⊆ w := symmetrizeRel_subset_self w calc symmetrizeRel w ○ symmetrizeRel w ⊆ w ○ w := by mono _ ⊆ s := w_sub #align comp_symm_mem_uniformity_sets comp_symm_mem_uniformity_sets #print subset_comp_self_of_mem_uniformity /- theorem subset_comp_self_of_mem_uniformity {s : Set (α × α)} (h : s ∈ 𝓤 α) : s ⊆ s ○ s := subset_comp_self (refl_le_uniformity h) #align subset_comp_self_of_mem_uniformity subset_comp_self_of_mem_uniformity -/ /- warning: comp_comp_symm_mem_uniformity_sets -> comp_comp_symm_mem_uniformity_sets is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => And (SymmetricRel.{u1} α t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α (compRel.{u1} α t t) t) s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (And (SymmetricRel.{u1} α t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α (compRel.{u1} α t t) t) s)))) Case conversion may be inaccurate. Consider using '#align comp_comp_symm_mem_uniformity_sets comp_comp_symm_mem_uniformity_setsₓ'. -/ theorem comp_comp_symm_mem_uniformity_sets {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, SymmetricRel t ∧ t ○ t ○ t ⊆ s := by rcases comp_symm_mem_uniformity_sets hs with ⟨w, w_in, w_symm, w_sub⟩ rcases comp_symm_mem_uniformity_sets w_in with ⟨t, t_in, t_symm, t_sub⟩ use t, t_in, t_symm have : t ⊆ t ○ t := subset_comp_self_of_mem_uniformity t_in calc t ○ t ○ t ⊆ w ○ t := by mono _ ⊆ w ○ (t ○ t) := by mono _ ⊆ w ○ w := by mono _ ⊆ s := w_sub #align comp_comp_symm_mem_uniformity_sets comp_comp_symm_mem_uniformity_sets /-! ### Balls in uniform spaces -/ #print UniformSpace.ball /- /-- The ball around `(x : β)` with respect to `(V : set (β × β))`. Intended to be used for `V ∈ 𝓤 β`, but this is not needed for the definition. Recovers the notions of metric space ball when `V = {p | dist p.1 p.2 < r }`. -/ def UniformSpace.ball (x : β) (V : Set (β × β)) : Set β := Prod.mk x ⁻¹' V #align uniform_space.ball UniformSpace.ball -/ open UniformSpace (ball) #print UniformSpace.mem_ball_self /- theorem UniformSpace.mem_ball_self (x : α) {V : Set (α × α)} (hV : V ∈ 𝓤 α) : x ∈ ball x V := refl_mem_uniformity hV #align uniform_space.mem_ball_self UniformSpace.mem_ball_self -/ #print mem_ball_comp /- /-- The triangle inequality for `uniform_space.ball` -/ theorem mem_ball_comp {V W : Set (β × β)} {x y z} (h : y ∈ ball x V) (h' : z ∈ ball y W) : z ∈ ball x (V ○ W) := prod_mk_mem_compRel h h' #align mem_ball_comp mem_ball_comp -/ #print ball_subset_of_comp_subset /- theorem ball_subset_of_comp_subset {V W : Set (β × β)} {x y} (h : x ∈ ball y W) (h' : W ○ W ⊆ V) : ball x W ⊆ ball y V := fun z z_in => h' (mem_ball_comp h z_in) #align ball_subset_of_comp_subset ball_subset_of_comp_subset -/ #print ball_mono /- theorem ball_mono {V W : Set (β × β)} (h : V ⊆ W) (x : β) : ball x V ⊆ ball x W := preimage_mono h #align ball_mono ball_mono -/ /- warning: ball_inter -> ball_inter is a dubious translation: lean 3 declaration is forall {β : Type.{u1}} (x : β) (V : Set.{u1} (Prod.{u1, u1} β β)) (W : Set.{u1} (Prod.{u1, u1} β β)), Eq.{succ u1} (Set.{u1} β) (UniformSpace.ball.{u1} β x (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.hasInter.{u1} (Prod.{u1, u1} β β)) V W)) (Inter.inter.{u1} (Set.{u1} β) (Set.hasInter.{u1} β) (UniformSpace.ball.{u1} β x V) (UniformSpace.ball.{u1} β x W)) but is expected to have type forall {β : Type.{u1}} (x : β) (V : Set.{u1} (Prod.{u1, u1} β β)) (W : Set.{u1} (Prod.{u1, u1} β β)), Eq.{succ u1} (Set.{u1} β) (UniformSpace.ball.{u1} β x (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.instInterSet.{u1} (Prod.{u1, u1} β β)) V W)) (Inter.inter.{u1} (Set.{u1} β) (Set.instInterSet.{u1} β) (UniformSpace.ball.{u1} β x V) (UniformSpace.ball.{u1} β x W)) Case conversion may be inaccurate. Consider using '#align ball_inter ball_interₓ'. -/ theorem ball_inter (x : β) (V W : Set (β × β)) : ball x (V ∩ W) = ball x V ∩ ball x W := preimage_inter #align ball_inter ball_inter /- warning: ball_inter_left -> ball_inter_left is a dubious translation: lean 3 declaration is forall {β : Type.{u1}} (x : β) (V : Set.{u1} (Prod.{u1, u1} β β)) (W : Set.{u1} (Prod.{u1, u1} β β)), HasSubset.Subset.{u1} (Set.{u1} β) (Set.hasSubset.{u1} β) (UniformSpace.ball.{u1} β x (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.hasInter.{u1} (Prod.{u1, u1} β β)) V W)) (UniformSpace.ball.{u1} β x V) but is expected to have type forall {β : Type.{u1}} (x : β) (V : Set.{u1} (Prod.{u1, u1} β β)) (W : Set.{u1} (Prod.{u1, u1} β β)), HasSubset.Subset.{u1} (Set.{u1} β) (Set.instHasSubsetSet.{u1} β) (UniformSpace.ball.{u1} β x (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.instInterSet.{u1} (Prod.{u1, u1} β β)) V W)) (UniformSpace.ball.{u1} β x V) Case conversion may be inaccurate. Consider using '#align ball_inter_left ball_inter_leftₓ'. -/ theorem ball_inter_left (x : β) (V W : Set (β × β)) : ball x (V ∩ W) ⊆ ball x V := ball_mono (inter_subset_left V W) x #align ball_inter_left ball_inter_left /- warning: ball_inter_right -> ball_inter_right is a dubious translation: lean 3 declaration is forall {β : Type.{u1}} (x : β) (V : Set.{u1} (Prod.{u1, u1} β β)) (W : Set.{u1} (Prod.{u1, u1} β β)), HasSubset.Subset.{u1} (Set.{u1} β) (Set.hasSubset.{u1} β) (UniformSpace.ball.{u1} β x (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.hasInter.{u1} (Prod.{u1, u1} β β)) V W)) (UniformSpace.ball.{u1} β x W) but is expected to have type forall {β : Type.{u1}} (x : β) (V : Set.{u1} (Prod.{u1, u1} β β)) (W : Set.{u1} (Prod.{u1, u1} β β)), HasSubset.Subset.{u1} (Set.{u1} β) (Set.instHasSubsetSet.{u1} β) (UniformSpace.ball.{u1} β x (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.instInterSet.{u1} (Prod.{u1, u1} β β)) V W)) (UniformSpace.ball.{u1} β x W) Case conversion may be inaccurate. Consider using '#align ball_inter_right ball_inter_rightₓ'. -/ theorem ball_inter_right (x : β) (V W : Set (β × β)) : ball x (V ∩ W) ⊆ ball x W := ball_mono (inter_subset_right V W) x #align ball_inter_right ball_inter_right #print mem_ball_symmetry /- theorem mem_ball_symmetry {V : Set (β × β)} (hV : SymmetricRel V) {x y} : x ∈ ball y V ↔ y ∈ ball x V := show (x, y) ∈ Prod.swap ⁻¹' V ↔ (x, y) ∈ V by unfold SymmetricRel at hV rw [hV] #align mem_ball_symmetry mem_ball_symmetry -/ #print ball_eq_of_symmetry /- theorem ball_eq_of_symmetry {V : Set (β × β)} (hV : SymmetricRel V) {x} : ball x V = { y | (y, x) ∈ V } := by ext y rw [mem_ball_symmetry hV] exact Iff.rfl #align ball_eq_of_symmetry ball_eq_of_symmetry -/ #print mem_comp_of_mem_ball /- theorem mem_comp_of_mem_ball {V W : Set (β × β)} {x y z : β} (hV : SymmetricRel V) (hx : x ∈ ball z V) (hy : y ∈ ball z W) : (x, y) ∈ V ○ W := by rw [mem_ball_symmetry hV] at hx exact ⟨z, hx, hy⟩ #align mem_comp_of_mem_ball mem_comp_of_mem_ball -/ /- warning: uniform_space.is_open_ball -> UniformSpace.isOpen_ball is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (x : α) {V : Set.{u1} (Prod.{u1, u1} α α)}, (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) -> (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.ball.{u1} α x V)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (x : α) {V : Set.{u1} (Prod.{u1, u1} α α)}, (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) -> (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.ball.{u1} α x V)) Case conversion may be inaccurate. Consider using '#align uniform_space.is_open_ball UniformSpace.isOpen_ballₓ'. -/ theorem UniformSpace.isOpen_ball (x : α) {V : Set (α × α)} (hV : IsOpen V) : IsOpen (ball x V) := hV.Preimage <| continuous_const.prod_mk continuous_id #align uniform_space.is_open_ball UniformSpace.isOpen_ball /- warning: mem_comp_comp -> mem_comp_comp is a dubious translation: lean 3 declaration is forall {β : Type.{u1}} {V : Set.{u1} (Prod.{u1, u1} β β)} {W : Set.{u1} (Prod.{u1, u1} β β)} {M : Set.{u1} (Prod.{u1, u1} β β)}, (SymmetricRel.{u1} β W) -> (forall {p : Prod.{u1, u1} β β}, Iff (Membership.Mem.{u1, u1} (Prod.{u1, u1} β β) (Set.{u1} (Prod.{u1, u1} β β)) (Set.hasMem.{u1} (Prod.{u1, u1} β β)) p (compRel.{u1} β (compRel.{u1} β V M) W)) (Set.Nonempty.{u1} (Prod.{u1, u1} β β) (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.hasInter.{u1} (Prod.{u1, u1} β β)) (Set.prod.{u1, u1} β β (UniformSpace.ball.{u1} β (Prod.fst.{u1, u1} β β p) V) (UniformSpace.ball.{u1} β (Prod.snd.{u1, u1} β β p) W)) M))) but is expected to have type forall {β : Type.{u1}} {V : Set.{u1} (Prod.{u1, u1} β β)} {W : Set.{u1} (Prod.{u1, u1} β β)} {M : Set.{u1} (Prod.{u1, u1} β β)}, (SymmetricRel.{u1} β W) -> (forall {p : Prod.{u1, u1} β β}, Iff (Membership.mem.{u1, u1} (Prod.{u1, u1} β β) (Set.{u1} (Prod.{u1, u1} β β)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} β β)) p (compRel.{u1} β (compRel.{u1} β V M) W)) (Set.Nonempty.{u1} (Prod.{u1, u1} β β) (Inter.inter.{u1} (Set.{u1} (Prod.{u1, u1} β β)) (Set.instInterSet.{u1} (Prod.{u1, u1} β β)) (Set.prod.{u1, u1} β β (UniformSpace.ball.{u1} β (Prod.fst.{u1, u1} β β p) V) (UniformSpace.ball.{u1} β (Prod.snd.{u1, u1} β β p) W)) M))) Case conversion may be inaccurate. Consider using '#align mem_comp_comp mem_comp_compₓ'. -/ /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ theorem mem_comp_comp {V W M : Set (β × β)} (hW' : SymmetricRel W) {p : β × β} : p ∈ V ○ M ○ W ↔ (ball p.1 V ×ˢ ball p.2 W ∩ M).Nonempty := by cases' p with x y constructor · rintro ⟨z, ⟨w, hpw, hwz⟩, hzy⟩ exact ⟨(w, z), ⟨hpw, by rwa [mem_ball_symmetry hW']⟩, hwz⟩ · rintro ⟨⟨w, z⟩, ⟨w_in, z_in⟩, hwz⟩ rwa [mem_ball_symmetry hW'] at z_in use z, w <;> tauto #align mem_comp_comp mem_comp_comp /-! ### Neighborhoods in uniform spaces -/ #print mem_nhds_uniformity_iff_right /- theorem mem_nhds_uniformity_iff_right {x : α} {s : Set α} : s ∈ 𝓝 x ↔ { p : α × α | p.1 = x → p.2 ∈ s } ∈ 𝓤 α := by refine' ⟨_, fun hs => _⟩ · simp only [mem_nhds_iff, isOpen_uniformity, and_imp, exists_imp] intro t ts ht xt filter_upwards [ht x xt]using fun y h eq => ts (h Eq) · refine' mem_nhds_iff.mpr ⟨{ x | { p : α × α | p.1 = x → p.2 ∈ s } ∈ 𝓤 α }, _, _, hs⟩ · exact fun y hy => refl_mem_uniformity hy rfl · refine' is_open_uniformity.mpr fun y hy => _ rcases comp_mem_uniformity_sets hy with ⟨t, ht, tr⟩ filter_upwards [ht] rintro ⟨a, b⟩ hp' rfl filter_upwards [ht] rintro ⟨a', b'⟩ hp'' rfl exact @tr (a, b') ⟨a', hp', hp''⟩ rfl #align mem_nhds_uniformity_iff_right mem_nhds_uniformity_iff_right -/ #print mem_nhds_uniformity_iff_left /- theorem mem_nhds_uniformity_iff_left {x : α} {s : Set α} : s ∈ 𝓝 x ↔ { p : α × α | p.2 = x → p.1 ∈ s } ∈ 𝓤 α := by rw [uniformity_eq_symm, mem_nhds_uniformity_iff_right] rfl #align mem_nhds_uniformity_iff_left mem_nhds_uniformity_iff_left -/ #print nhds_eq_comap_uniformity /- theorem nhds_eq_comap_uniformity {x : α} : 𝓝 x = (𝓤 α).comap (Prod.mk x) := by ext s rw [mem_nhds_uniformity_iff_right, mem_comap_prod_mk] #align nhds_eq_comap_uniformity nhds_eq_comap_uniformity -/ /- warning: is_open_iff_ball_subset -> isOpen_iff_ball_subset is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α}, Iff (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) (forall (x : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x s) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) => HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (UniformSpace.ball.{u1} α x V) s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α}, Iff (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) (forall (x : α), (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x s) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (UniformSpace.ball.{u1} α x V) s)))) Case conversion may be inaccurate. Consider using '#align is_open_iff_ball_subset isOpen_iff_ball_subsetₓ'. -/ /-- See also `is_open_iff_open_ball_subset`. -/ theorem isOpen_iff_ball_subset {s : Set α} : IsOpen s ↔ ∀ x ∈ s, ∃ V ∈ 𝓤 α, ball x V ⊆ s := by simp_rw [isOpen_iff_mem_nhds, nhds_eq_comap_uniformity] exact Iff.rfl #align is_open_iff_ball_subset isOpen_iff_ball_subset /- warning: nhds_basis_uniformity' -> nhds_basis_uniformity' is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : ι -> Prop} {s : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p s) -> (forall {x : α}, Filter.HasBasis.{u1, u2} α ι (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x) p (fun (i : ι) => UniformSpace.ball.{u1} α x (s i))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u2} α] {p : ι -> Prop} {s : ι -> (Set.{u2} (Prod.{u2, u2} α α))}, (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p s) -> (forall {x : α}, Filter.HasBasis.{u2, u1} α ι (nhds.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) x) p (fun (i : ι) => UniformSpace.ball.{u2} α x (s i))) Case conversion may be inaccurate. Consider using '#align nhds_basis_uniformity' nhds_basis_uniformity'ₓ'. -/ theorem nhds_basis_uniformity' {p : ι → Prop} {s : ι → Set (α × α)} (h : (𝓤 α).HasBasis p s) {x : α} : (𝓝 x).HasBasis p fun i => ball x (s i) := by rw [nhds_eq_comap_uniformity] exact h.comap (Prod.mk x) #align nhds_basis_uniformity' nhds_basis_uniformity' /- warning: nhds_basis_uniformity -> nhds_basis_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : ι -> Prop} {s : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p s) -> (forall {x : α}, Filter.HasBasis.{u1, u2} α ι (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x) p (fun (i : ι) => setOf.{u1} α (fun (y : α) => Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y x) (s i)))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u2} α] {p : ι -> Prop} {s : ι -> (Set.{u2} (Prod.{u2, u2} α α))}, (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p s) -> (forall {x : α}, Filter.HasBasis.{u2, u1} α ι (nhds.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) x) p (fun (i : ι) => setOf.{u2} α (fun (y : α) => Membership.mem.{u2, u2} (Prod.{u2, u2} α α) (Set.{u2} (Prod.{u2, u2} α α)) (Set.instMembershipSet.{u2} (Prod.{u2, u2} α α)) (Prod.mk.{u2, u2} α α y x) (s i)))) Case conversion may be inaccurate. Consider using '#align nhds_basis_uniformity nhds_basis_uniformityₓ'. -/ theorem nhds_basis_uniformity {p : ι → Prop} {s : ι → Set (α × α)} (h : (𝓤 α).HasBasis p s) {x : α} : (𝓝 x).HasBasis p fun i => { y | (y, x) ∈ s i } := by replace h := h.comap Prod.swap rw [← map_swap_eq_comap_swap, ← uniformity_eq_symm] at h exact nhds_basis_uniformity' h #align nhds_basis_uniformity nhds_basis_uniformity #print nhds_eq_comap_uniformity' /- theorem nhds_eq_comap_uniformity' {x : α} : 𝓝 x = (𝓤 α).comap fun y => (y, x) := (nhds_basis_uniformity (𝓤 α).basis_sets).eq_of_same_basis <| (𝓤 α).basis_sets.comap _ #align nhds_eq_comap_uniformity' nhds_eq_comap_uniformity' -/ /- warning: uniform_space.mem_nhds_iff -> UniformSpace.mem_nhds_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {x : α} {s : Set.{u1} α}, Iff (Membership.Mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (Filter.hasMem.{u1} α) s (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x)) (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) => HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (UniformSpace.ball.{u1} α x V) s))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {x : α} {s : Set.{u1} α}, Iff (Membership.mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (instMembershipSetFilter.{u1} α) s (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x)) (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (UniformSpace.ball.{u1} α x V) s))) Case conversion may be inaccurate. Consider using '#align uniform_space.mem_nhds_iff UniformSpace.mem_nhds_iffₓ'. -/ theorem UniformSpace.mem_nhds_iff {x : α} {s : Set α} : s ∈ 𝓝 x ↔ ∃ V ∈ 𝓤 α, ball x V ⊆ s := by rw [nhds_eq_comap_uniformity, mem_comap] exact Iff.rfl #align uniform_space.mem_nhds_iff UniformSpace.mem_nhds_iff #print UniformSpace.ball_mem_nhds /- theorem UniformSpace.ball_mem_nhds (x : α) ⦃V : Set (α × α)⦄ (V_in : V ∈ 𝓤 α) : ball x V ∈ 𝓝 x := by rw [UniformSpace.mem_nhds_iff] exact ⟨V, V_in, subset.refl _⟩ #align uniform_space.ball_mem_nhds UniformSpace.ball_mem_nhds -/ /- warning: uniform_space.mem_nhds_iff_symm -> UniformSpace.mem_nhds_iff_symm is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {x : α} {s : Set.{u1} α}, Iff (Membership.Mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (Filter.hasMem.{u1} α) s (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x)) (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) => And (SymmetricRel.{u1} α V) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (UniformSpace.ball.{u1} α x V) s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {x : α} {s : Set.{u1} α}, Iff (Membership.mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (instMembershipSetFilter.{u1} α) s (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x)) (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (And (SymmetricRel.{u1} α V) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (UniformSpace.ball.{u1} α x V) s)))) Case conversion may be inaccurate. Consider using '#align uniform_space.mem_nhds_iff_symm UniformSpace.mem_nhds_iff_symmₓ'. -/ theorem UniformSpace.mem_nhds_iff_symm {x : α} {s : Set α} : s ∈ 𝓝 x ↔ ∃ V ∈ 𝓤 α, SymmetricRel V ∧ ball x V ⊆ s := by rw [UniformSpace.mem_nhds_iff] constructor · rintro ⟨V, V_in, V_sub⟩ use symmetrizeRel V, symmetrize_mem_uniformity V_in, symmetric_symmetrizeRel V exact subset.trans (ball_mono (symmetrizeRel_subset_self V) x) V_sub · rintro ⟨V, V_in, V_symm, V_sub⟩ exact ⟨V, V_in, V_sub⟩ #align uniform_space.mem_nhds_iff_symm UniformSpace.mem_nhds_iff_symm #print UniformSpace.hasBasis_nhds /- theorem UniformSpace.hasBasis_nhds (x : α) : HasBasis (𝓝 x) (fun s : Set (α × α) => s ∈ 𝓤 α ∧ SymmetricRel s) fun s => ball x s := ⟨fun t => by simp [UniformSpace.mem_nhds_iff_symm, and_assoc']⟩ #align uniform_space.has_basis_nhds UniformSpace.hasBasis_nhds -/ open UniformSpace /- warning: uniform_space.mem_closure_iff_symm_ball -> UniformSpace.mem_closure_iff_symm_ball is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {x : α}, Iff (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (closure.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s)) (forall {V : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) -> (SymmetricRel.{u1} α V) -> (Set.Nonempty.{u1} α (Inter.inter.{u1} (Set.{u1} α) (Set.hasInter.{u1} α) s (UniformSpace.ball.{u1} α x V)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {x : α}, Iff (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (closure.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s)) (forall {V : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) -> (SymmetricRel.{u1} α V) -> (Set.Nonempty.{u1} α (Inter.inter.{u1} (Set.{u1} α) (Set.instInterSet.{u1} α) s (UniformSpace.ball.{u1} α x V)))) Case conversion may be inaccurate. Consider using '#align uniform_space.mem_closure_iff_symm_ball UniformSpace.mem_closure_iff_symm_ballₓ'. -/ theorem UniformSpace.mem_closure_iff_symm_ball {s : Set α} {x} : x ∈ closure s ↔ ∀ {V}, V ∈ 𝓤 α → SymmetricRel V → (s ∩ ball x V).Nonempty := by simp [mem_closure_iff_nhds_basis (has_basis_nhds x), Set.Nonempty] #align uniform_space.mem_closure_iff_symm_ball UniformSpace.mem_closure_iff_symm_ball /- warning: uniform_space.mem_closure_iff_ball -> UniformSpace.mem_closure_iff_ball is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {x : α}, Iff (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (closure.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s)) (forall {V : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) -> (Set.Nonempty.{u1} α (Inter.inter.{u1} (Set.{u1} α) (Set.hasInter.{u1} α) (UniformSpace.ball.{u1} α x V) s))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {x : α}, Iff (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (closure.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s)) (forall {V : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) -> (Set.Nonempty.{u1} α (Inter.inter.{u1} (Set.{u1} α) (Set.instInterSet.{u1} α) (UniformSpace.ball.{u1} α x V) s))) Case conversion may be inaccurate. Consider using '#align uniform_space.mem_closure_iff_ball UniformSpace.mem_closure_iff_ballₓ'. -/ theorem UniformSpace.mem_closure_iff_ball {s : Set α} {x} : x ∈ closure s ↔ ∀ {V}, V ∈ 𝓤 α → (ball x V ∩ s).Nonempty := by simp [mem_closure_iff_nhds_basis' (nhds_basis_uniformity' (𝓤 α).basis_sets)] #align uniform_space.mem_closure_iff_ball UniformSpace.mem_closure_iff_ball /- warning: uniform_space.has_basis_nhds_prod -> UniformSpace.hasBasis_nhds_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (x : α) (y : α), Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (nhds.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α x y)) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) (SymmetricRel.{u1} α s)) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => Set.prod.{u1, u1} α α (UniformSpace.ball.{u1} α x s) (UniformSpace.ball.{u1} α y s)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (x : α) (y : α), Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (nhds.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α x y)) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) (SymmetricRel.{u1} α s)) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => Set.prod.{u1, u1} α α (UniformSpace.ball.{u1} α x s) (UniformSpace.ball.{u1} α y s)) Case conversion may be inaccurate. Consider using '#align uniform_space.has_basis_nhds_prod UniformSpace.hasBasis_nhds_prodₓ'. -/ /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ theorem UniformSpace.hasBasis_nhds_prod (x y : α) : HasBasis (𝓝 (x, y)) (fun s => s ∈ 𝓤 α ∧ SymmetricRel s) fun s => ball x s ×ˢ ball y s := by rw [nhds_prod_eq] apply (has_basis_nhds x).prod_same_index (has_basis_nhds y) rintro U V ⟨U_in, U_symm⟩ ⟨V_in, V_symm⟩ exact ⟨U ∩ V, ⟨(𝓤 α).inter_sets U_in V_in, U_symm.inter V_symm⟩, ball_inter_left x U V, ball_inter_right y U V⟩ #align uniform_space.has_basis_nhds_prod UniformSpace.hasBasis_nhds_prod #print nhds_eq_uniformity /- theorem nhds_eq_uniformity {x : α} : 𝓝 x = (𝓤 α).lift' (ball x) := (nhds_basis_uniformity' (𝓤 α).basis_sets).eq_binfᵢ #align nhds_eq_uniformity nhds_eq_uniformity -/ #print nhds_eq_uniformity' /- theorem nhds_eq_uniformity' {x : α} : 𝓝 x = (𝓤 α).lift' fun s => { y | (y, x) ∈ s } := (nhds_basis_uniformity (𝓤 α).basis_sets).eq_binfᵢ #align nhds_eq_uniformity' nhds_eq_uniformity' -/ #print mem_nhds_left /- theorem mem_nhds_left (x : α) {s : Set (α × α)} (h : s ∈ 𝓤 α) : { y : α | (x, y) ∈ s } ∈ 𝓝 x := ball_mem_nhds x h #align mem_nhds_left mem_nhds_left -/ #print mem_nhds_right /- theorem mem_nhds_right (y : α) {s : Set (α × α)} (h : s ∈ 𝓤 α) : { x : α | (x, y) ∈ s } ∈ 𝓝 y := mem_nhds_left _ (symm_le_uniformity h) #align mem_nhds_right mem_nhds_right -/ /- warning: exists_mem_nhds_ball_subset_of_mem_nhds -> exists_mem_nhds_ball_subset_of_mem_nhds is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {a : α} {U : Set.{u1} α}, (Membership.Mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (Filter.hasMem.{u1} α) U (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) a)) -> (Exists.{succ u1} (Set.{u1} α) (fun (V : Set.{u1} α) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (Filter.hasMem.{u1} α) V (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) a)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (Filter.hasMem.{u1} α) V (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) a)) => Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => forall (a' : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) a' V) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (UniformSpace.ball.{u1} α a' t) U)))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {a : α} {U : Set.{u1} α}, (Membership.mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (instMembershipSetFilter.{u1} α) U (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) a)) -> (Exists.{succ u1} (Set.{u1} α) (fun (V : Set.{u1} α) => And (Membership.mem.{u1, u1} (Set.{u1} α) (Filter.{u1} α) (instMembershipSetFilter.{u1} α) V (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) a)) (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (forall (a' : α), (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) a' V) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (UniformSpace.ball.{u1} α a' t) U)))))) Case conversion may be inaccurate. Consider using '#align exists_mem_nhds_ball_subset_of_mem_nhds exists_mem_nhds_ball_subset_of_mem_nhdsₓ'. -/ theorem exists_mem_nhds_ball_subset_of_mem_nhds {a : α} {U : Set α} (h : U ∈ 𝓝 a) : ∃ V ∈ 𝓝 a, ∃ t ∈ 𝓤 α, ∀ a' ∈ V, UniformSpace.ball a' t ⊆ U := let ⟨t, ht, htU⟩ := comp_mem_uniformity_sets (mem_nhds_uniformity_iff_right.1 h) ⟨_, mem_nhds_left a ht, t, ht, fun a₁ h₁ a₂ h₂ => @htU (a, a₂) ⟨a₁, h₁, h₂⟩ rfl⟩ #align exists_mem_nhds_ball_subset_of_mem_nhds exists_mem_nhds_ball_subset_of_mem_nhds /- warning: is_compact.nhds_set_basis_uniformity -> IsCompact.nhdsSet_basis_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : ι -> Prop} {s : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p s) -> (forall {K : Set.{u1} α}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) K) -> (Filter.HasBasis.{u1, u2} α ι (nhdsSet.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) K) p (fun (i : ι) => Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x K) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x K) => UniformSpace.ball.{u1} α x (s i)))))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u2} α] {p : ι -> Prop} {s : ι -> (Set.{u2} (Prod.{u2, u2} α α))}, (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p s) -> (forall {K : Set.{u2} α}, (IsCompact.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) K) -> (Filter.HasBasis.{u2, u1} α ι (nhdsSet.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) K) p (fun (i : ι) => Set.unionᵢ.{u2, succ u2} α α (fun (x : α) => Set.unionᵢ.{u2, 0} α (Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x K) (fun (H : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x K) => UniformSpace.ball.{u2} α x (s i)))))) Case conversion may be inaccurate. Consider using '#align is_compact.nhds_set_basis_uniformity IsCompact.nhdsSet_basis_uniformityₓ'. -/ theorem IsCompact.nhdsSet_basis_uniformity {p : ι → Prop} {s : ι → Set (α × α)} (hU : (𝓤 α).HasBasis p s) {K : Set α} (hK : IsCompact K) : (𝓝ˢ K).HasBasis p fun i => ⋃ x ∈ K, ball x (s i) := by refine' ⟨fun U => _⟩ simp only [mem_nhdsSet_iff_forall, (nhds_basis_uniformity' hU).mem_iff, Union₂_subset_iff] refine' ⟨fun H => _, fun ⟨i, hpi, hi⟩ x hx => ⟨i, hpi, hi x hx⟩⟩ replace H : ∀ x ∈ K, ∃ i : { i // p i }, ball x (s i ○ s i) ⊆ U · intro x hx rcases H x hx with ⟨i, hpi, hi⟩ rcases comp_mem_uniformity_sets (hU.mem_of_mem hpi) with ⟨t, ht_mem, ht⟩ rcases hU.mem_iff.1 ht_mem with ⟨j, hpj, hj⟩ exact ⟨⟨j, hpj⟩, subset.trans (ball_mono ((compRel_mono hj hj).trans ht) _) hi⟩ have : Nonempty { a // p a } := nonempty_subtype.2 hU.ex_mem choose! I hI using H rcases hK.elim_nhds_subcover (fun x => ball x <| s (I x)) fun x hx => ball_mem_nhds _ <| hU.mem_of_mem (I x).2 with ⟨t, htK, ht⟩ obtain ⟨i, hpi, hi⟩ : ∃ (i : _)(hpi : p i), s i ⊆ ⋂ x ∈ t, s (I x) exact hU.mem_iff.1 ((bInter_finset_mem t).2 fun x hx => hU.mem_of_mem (I x).2) rw [subset_Inter₂_iff] at hi refine' ⟨i, hpi, fun x hx => _⟩ rcases mem_Union₂.1 (ht hx) with ⟨z, hzt : z ∈ t, hzx : x ∈ ball z (s (I z))⟩ calc ball x (s i) ⊆ ball z (s (I z) ○ s (I z)) := fun y hy => ⟨x, hzx, hi z hzt hy⟩ _ ⊆ U := hI z (htK z hzt) #align is_compact.nhds_set_basis_uniformity IsCompact.nhdsSet_basis_uniformity /- warning: disjoint.exists_uniform_thickening -> Disjoint.exists_uniform_thickening is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {A : Set.{u1} α} {B : Set.{u1} α}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) A) -> (IsClosed.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) B) -> (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} α) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α))) A B) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) => Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} α) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α))) (Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x A) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x A) => UniformSpace.ball.{u1} α x V))) (Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x B) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x B) => UniformSpace.ball.{u1} α x V)))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {A : Set.{u1} α} {B : Set.{u1} α}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) A) -> (IsClosed.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) B) -> (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} α) (Preorder.toLE.{u1} (Set.{u1} α) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) A B) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} α) (Preorder.toLE.{u1} (Set.{u1} α) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) (Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x A) (fun ([email protected]._hyg.10418 : Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x A) => UniformSpace.ball.{u1} α x V))) (Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x B) (fun ([email protected]._hyg.10451 : Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x B) => UniformSpace.ball.{u1} α x V)))))) Case conversion may be inaccurate. Consider using '#align disjoint.exists_uniform_thickening Disjoint.exists_uniform_thickeningₓ'. -/ theorem Disjoint.exists_uniform_thickening {A B : Set α} (hA : IsCompact A) (hB : IsClosed B) (h : Disjoint A B) : ∃ V ∈ 𝓤 α, Disjoint (⋃ x ∈ A, ball x V) (⋃ x ∈ B, ball x V) := by have : Bᶜ ∈ 𝓝ˢ A := hB.is_open_compl.mem_nhds_set.mpr h.le_compl_right rw [(hA.nhds_set_basis_uniformity (Filter.basis_sets _)).mem_iff] at this rcases this with ⟨U, hU, hUAB⟩ rcases comp_symm_mem_uniformity_sets hU with ⟨V, hV, hVsymm, hVU⟩ refine' ⟨V, hV, set.disjoint_left.mpr fun x => _⟩ simp only [mem_Union₂] rintro ⟨a, ha, hxa⟩ ⟨b, hb, hxb⟩ rw [mem_ball_symmetry hVsymm] at hxa hxb exact hUAB (mem_Union₂_of_mem ha <| hVU <| mem_comp_of_mem_ball hVsymm hxa hxb) hb #align disjoint.exists_uniform_thickening Disjoint.exists_uniform_thickening /- warning: disjoint.exists_uniform_thickening_of_basis -> Disjoint.exists_uniform_thickening_of_basis is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : ι -> Prop} {s : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p s) -> (forall {A : Set.{u1} α} {B : Set.{u1} α}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) A) -> (IsClosed.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) B) -> (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} α) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α))) A B) -> (Exists.{u2} ι (fun (i : ι) => And (p i) (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} α) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α))) (Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x A) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x A) => UniformSpace.ball.{u1} α x (s i)))) (Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x B) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x B) => UniformSpace.ball.{u1} α x (s i)))))))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u2} α] {p : ι -> Prop} {s : ι -> (Set.{u2} (Prod.{u2, u2} α α))}, (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p s) -> (forall {A : Set.{u2} α} {B : Set.{u2} α}, (IsCompact.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) A) -> (IsClosed.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) B) -> (Disjoint.{u2} (Set.{u2} α) (CompleteSemilatticeInf.toPartialOrder.{u2} (Set.{u2} α) (CompleteLattice.toCompleteSemilatticeInf.{u2} (Set.{u2} α) (Order.Coframe.toCompleteLattice.{u2} (Set.{u2} α) (CompleteDistribLattice.toCoframe.{u2} (Set.{u2} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u2} (Set.{u2} α) (Set.instCompleteBooleanAlgebraSet.{u2} α)))))) (BoundedOrder.toOrderBot.{u2} (Set.{u2} α) (Preorder.toLE.{u2} (Set.{u2} α) (PartialOrder.toPreorder.{u2} (Set.{u2} α) (CompleteSemilatticeInf.toPartialOrder.{u2} (Set.{u2} α) (CompleteLattice.toCompleteSemilatticeInf.{u2} (Set.{u2} α) (Order.Coframe.toCompleteLattice.{u2} (Set.{u2} α) (CompleteDistribLattice.toCoframe.{u2} (Set.{u2} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u2} (Set.{u2} α) (Set.instCompleteBooleanAlgebraSet.{u2} α)))))))) (CompleteLattice.toBoundedOrder.{u2} (Set.{u2} α) (Order.Coframe.toCompleteLattice.{u2} (Set.{u2} α) (CompleteDistribLattice.toCoframe.{u2} (Set.{u2} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u2} (Set.{u2} α) (Set.instCompleteBooleanAlgebraSet.{u2} α)))))) A B) -> (Exists.{u1} ι (fun (i : ι) => And (p i) (Disjoint.{u2} (Set.{u2} α) (CompleteSemilatticeInf.toPartialOrder.{u2} (Set.{u2} α) (CompleteLattice.toCompleteSemilatticeInf.{u2} (Set.{u2} α) (Order.Coframe.toCompleteLattice.{u2} (Set.{u2} α) (CompleteDistribLattice.toCoframe.{u2} (Set.{u2} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u2} (Set.{u2} α) (Set.instCompleteBooleanAlgebraSet.{u2} α)))))) (BoundedOrder.toOrderBot.{u2} (Set.{u2} α) (Preorder.toLE.{u2} (Set.{u2} α) (PartialOrder.toPreorder.{u2} (Set.{u2} α) (CompleteSemilatticeInf.toPartialOrder.{u2} (Set.{u2} α) (CompleteLattice.toCompleteSemilatticeInf.{u2} (Set.{u2} α) (Order.Coframe.toCompleteLattice.{u2} (Set.{u2} α) (CompleteDistribLattice.toCoframe.{u2} (Set.{u2} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u2} (Set.{u2} α) (Set.instCompleteBooleanAlgebraSet.{u2} α)))))))) (CompleteLattice.toBoundedOrder.{u2} (Set.{u2} α) (Order.Coframe.toCompleteLattice.{u2} (Set.{u2} α) (CompleteDistribLattice.toCoframe.{u2} (Set.{u2} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u2} (Set.{u2} α) (Set.instCompleteBooleanAlgebraSet.{u2} α)))))) (Set.unionᵢ.{u2, succ u2} α α (fun (x : α) => Set.unionᵢ.{u2, 0} α (Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x A) (fun (H : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x A) => UniformSpace.ball.{u2} α x (s i)))) (Set.unionᵢ.{u2, succ u2} α α (fun (x : α) => Set.unionᵢ.{u2, 0} α (Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x B) (fun (H : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x B) => UniformSpace.ball.{u2} α x (s i)))))))) Case conversion may be inaccurate. Consider using '#align disjoint.exists_uniform_thickening_of_basis Disjoint.exists_uniform_thickening_of_basisₓ'. -/ theorem Disjoint.exists_uniform_thickening_of_basis {p : ι → Prop} {s : ι → Set (α × α)} (hU : (𝓤 α).HasBasis p s) {A B : Set α} (hA : IsCompact A) (hB : IsClosed B) (h : Disjoint A B) : ∃ i, p i ∧ Disjoint (⋃ x ∈ A, ball x (s i)) (⋃ x ∈ B, ball x (s i)) := by rcases h.exists_uniform_thickening hA hB with ⟨V, hV, hVAB⟩ rcases hU.mem_iff.1 hV with ⟨i, hi, hiV⟩ exact ⟨i, hi, hVAB.mono (Union₂_mono fun a _ => ball_mono hiV a) (Union₂_mono fun b _ => ball_mono hiV b)⟩ #align disjoint.exists_uniform_thickening_of_basis Disjoint.exists_uniform_thickening_of_basis #print tendsto_right_nhds_uniformity /- theorem tendsto_right_nhds_uniformity {a : α} : Tendsto (fun a' => (a', a)) (𝓝 a) (𝓤 α) := fun s => mem_nhds_right a #align tendsto_right_nhds_uniformity tendsto_right_nhds_uniformity -/ #print tendsto_left_nhds_uniformity /- theorem tendsto_left_nhds_uniformity {a : α} : Tendsto (fun a' => (a, a')) (𝓝 a) (𝓤 α) := fun s => mem_nhds_left a #align tendsto_left_nhds_uniformity tendsto_left_nhds_uniformity -/ /- warning: lift_nhds_left -> lift_nhds_left is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {x : α} {g : (Set.{u1} α) -> (Filter.{u2} β)}, (Monotone.{u1, u2} (Set.{u1} α) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β)) g) -> (Eq.{succ u2} (Filter.{u2} β) (Filter.lift.{u1, u2} α β (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x) g) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => g (UniformSpace.ball.{u1} α x s)))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {x : α} {g : (Set.{u1} α) -> (Filter.{u2} β)}, (Monotone.{u1, u2} (Set.{u1} α) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β)) g) -> (Eq.{succ u2} (Filter.{u2} β) (Filter.lift.{u1, u2} α β (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x) g) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => g (UniformSpace.ball.{u1} α x s)))) Case conversion may be inaccurate. Consider using '#align lift_nhds_left lift_nhds_leftₓ'. -/ theorem lift_nhds_left {x : α} {g : Set α → Filter β} (hg : Monotone g) : (𝓝 x).lift g = (𝓤 α).lift fun s : Set (α × α) => g (ball x s) := by rw [nhds_eq_comap_uniformity, comap_lift_eq2 hg] rfl #align lift_nhds_left lift_nhds_left /- warning: lift_nhds_right -> lift_nhds_right is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {x : α} {g : (Set.{u1} α) -> (Filter.{u2} β)}, (Monotone.{u1, u2} (Set.{u1} α) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.partialOrder.{u2} β)) g) -> (Eq.{succ u2} (Filter.{u2} β) (Filter.lift.{u1, u2} α β (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x) g) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => g (setOf.{u1} α (fun (y : α) => Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y x) s))))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {x : α} {g : (Set.{u1} α) -> (Filter.{u2} β)}, (Monotone.{u1, u2} (Set.{u1} α) (Filter.{u2} β) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α))))))) (PartialOrder.toPreorder.{u2} (Filter.{u2} β) (Filter.instPartialOrderFilter.{u2} β)) g) -> (Eq.{succ u2} (Filter.{u2} β) (Filter.lift.{u1, u2} α β (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) x) g) (Filter.lift.{u1, u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => g (setOf.{u1} α (fun (y : α) => Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y x) s))))) Case conversion may be inaccurate. Consider using '#align lift_nhds_right lift_nhds_rightₓ'. -/ theorem lift_nhds_right {x : α} {g : Set α → Filter β} (hg : Monotone g) : (𝓝 x).lift g = (𝓤 α).lift fun s : Set (α × α) => g { y | (y, x) ∈ s } := by rw [nhds_eq_comap_uniformity', comap_lift_eq2 hg] rfl #align lift_nhds_right lift_nhds_right /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ #print nhds_nhds_eq_uniformity_uniformity_prod /- theorem nhds_nhds_eq_uniformity_uniformity_prod {a b : α} : 𝓝 a ×ᶠ 𝓝 b = (𝓤 α).lift fun s : Set (α × α) => (𝓤 α).lift' fun t : Set (α × α) => { y : α | (y, a) ∈ s } ×ˢ { y : α | (b, y) ∈ t } := by rw [nhds_eq_uniformity', nhds_eq_uniformity, prod_lift'_lift'] exacts[rfl, monotone_preimage, monotone_preimage] #align nhds_nhds_eq_uniformity_uniformity_prod nhds_nhds_eq_uniformity_uniformity_prod -/ /- warning: nhds_eq_uniformity_prod -> nhds_eq_uniformity_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {a : α} {b : α}, Eq.{succ u1} (Filter.{u1} (Prod.{u1, u1} α α)) (nhds.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α a b)) (Filter.lift'.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => Set.prod.{u1, u1} α α (setOf.{u1} α (fun (y : α) => Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y a) s)) (setOf.{u1} α (fun (y : α) => Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α b y) s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {a : α} {b : α}, Eq.{succ u1} (Filter.{u1} (Prod.{u1, u1} α α)) (nhds.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α a b)) (Filter.lift'.{u1, u1} (Prod.{u1, u1} α α) (Prod.{u1, u1} α α) (uniformity.{u1} α _inst_1) (fun (s : Set.{u1} (Prod.{u1, u1} α α)) => Set.prod.{u1, u1} α α (setOf.{u1} α (fun (y : α) => Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y a) s)) (setOf.{u1} α (fun (y : α) => Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α b y) s)))) Case conversion may be inaccurate. Consider using '#align nhds_eq_uniformity_prod nhds_eq_uniformity_prodₓ'. -/ /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ theorem nhds_eq_uniformity_prod {a b : α} : 𝓝 (a, b) = (𝓤 α).lift' fun s : Set (α × α) => { y : α | (y, a) ∈ s } ×ˢ { y : α | (b, y) ∈ s } := by rw [nhds_prod_eq, nhds_nhds_eq_uniformity_uniformity_prod, lift_lift'_same_eq_lift'] · intro s exact monotone_const.set_prod monotone_preimage · intro t exact monotone_preimage.set_prod monotone_const #align nhds_eq_uniformity_prod nhds_eq_uniformity_prod /- warning: nhdset_of_mem_uniformity -> nhdset_of_mem_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {d : Set.{u1} (Prod.{u1, u1} α α)} (s : Set.{u1} (Prod.{u1, u1} α α)), (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) d (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (And (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) s t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) t (setOf.{u1} (Prod.{u1, u1} α α) (fun (p : Prod.{u1, u1} α α) => Exists.{succ u1} α (fun (x : α) => Exists.{succ u1} α (fun (y : α) => And (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α (Prod.fst.{u1, u1} α α p) x) d) (And (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) s) (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y (Prod.snd.{u1, u1} α α p)) d)))))))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {d : Set.{u1} (Prod.{u1, u1} α α)} (s : Set.{u1} (Prod.{u1, u1} α α)), (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) d (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (And (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) s t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) t (setOf.{u1} (Prod.{u1, u1} α α) (fun (p : Prod.{u1, u1} α α) => Exists.{succ u1} α (fun (x : α) => Exists.{succ u1} α (fun (y : α) => And (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α (Prod.fst.{u1, u1} α α p) x) d) (And (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) s) (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α y (Prod.snd.{u1, u1} α α p)) d)))))))))) Case conversion may be inaccurate. Consider using '#align nhdset_of_mem_uniformity nhdset_of_mem_uniformityₓ'. -/ /- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (t «expr ⊆ » cl_d) -/ theorem nhdset_of_mem_uniformity {d : Set (α × α)} (s : Set (α × α)) (hd : d ∈ 𝓤 α) : ∃ t : Set (α × α), IsOpen t ∧ s ⊆ t ∧ t ⊆ { p | ∃ x y, (p.1, x) ∈ d ∧ (x, y) ∈ s ∧ (y, p.2) ∈ d } := let cl_d := { p : α × α | ∃ x y, (p.1, x) ∈ d ∧ (x, y) ∈ s ∧ (y, p.2) ∈ d } have : ∀ p ∈ s, ∃ (t : _)(_ : t ⊆ cl_d), IsOpen t ∧ p ∈ t := fun ⟨x, y⟩ hp => mem_nhds_iff.mp <| show cl_d ∈ 𝓝 (x, y) by rw [nhds_eq_uniformity_prod, mem_lift'_sets] exact ⟨d, hd, fun ⟨a, b⟩ ⟨ha, hb⟩ => ⟨x, y, ha, hp, hb⟩⟩ exact monotone_preimage.set_prod monotone_preimage have : ∃ t : ∀ (p : α × α) (h : p ∈ s), Set (α × α), ∀ p, ∀ h : p ∈ s, t p h ⊆ cl_d ∧ IsOpen (t p h) ∧ p ∈ t p h := by simp [Classical.skolem] at this <;> simp <;> assumption match this with | ⟨t, ht⟩ => ⟨(⋃ p : α × α, ⋃ h : p ∈ s, t p h : Set (α × α)), isOpen_unionᵢ fun p : α × α => isOpen_unionᵢ fun hp => (ht p hp).right.left, fun ⟨a, b⟩ hp => by simp <;> exact ⟨a, b, hp, (ht (a, b) hp).right.right⟩, unionᵢ_subset fun p => unionᵢ_subset fun hp => (ht p hp).left⟩ #align nhdset_of_mem_uniformity nhdset_of_mem_uniformity /- warning: nhds_le_uniformity -> nhds_le_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (x : α), LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (nhds.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α x x)) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (x : α), LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (nhds.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α x x)) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align nhds_le_uniformity nhds_le_uniformityₓ'. -/ /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ /-- Entourages are neighborhoods of the diagonal. -/ theorem nhds_le_uniformity (x : α) : 𝓝 (x, x) ≤ 𝓤 α := by intro V V_in rcases comp_symm_mem_uniformity_sets V_in with ⟨w, w_in, w_symm, w_sub⟩ have : ball x w ×ˢ ball x w ∈ 𝓝 (x, x) := by rw [nhds_prod_eq] exact prod_mem_prod (ball_mem_nhds x w_in) (ball_mem_nhds x w_in) apply mem_of_superset this rintro ⟨u, v⟩ ⟨u_in, v_in⟩ exact w_sub (mem_comp_of_mem_ball w_symm u_in v_in) #align nhds_le_uniformity nhds_le_uniformity /- warning: supr_nhds_le_uniformity -> supᵢ_nhds_le_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (supᵢ.{u1, succ u1} (Filter.{u1} (Prod.{u1, u1} α α)) (ConditionallyCompleteLattice.toHasSup.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toConditionallyCompleteLattice.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.completeLattice.{u1} (Prod.{u1, u1} α α)))) α (fun (x : α) => nhds.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α x x))) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (supᵢ.{u1, succ u1} (Filter.{u1} (Prod.{u1, u1} α α)) (ConditionallyCompleteLattice.toSupSet.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toConditionallyCompleteLattice.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instCompleteLatticeFilter.{u1} (Prod.{u1, u1} α α)))) α (fun (x : α) => nhds.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Prod.mk.{u1, u1} α α x x))) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align supr_nhds_le_uniformity supᵢ_nhds_le_uniformityₓ'. -/ /-- Entourages are neighborhoods of the diagonal. -/ theorem supᵢ_nhds_le_uniformity : (⨆ x : α, 𝓝 (x, x)) ≤ 𝓤 α := supᵢ_le nhds_le_uniformity #align supr_nhds_le_uniformity supᵢ_nhds_le_uniformity /- warning: nhds_set_diagonal_le_uniformity -> nhdsSet_diagonal_le_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.partialOrder.{u1} (Prod.{u1, u1} α α)))) (nhdsSet.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Set.diagonal.{u1} α)) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], LE.le.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Preorder.toLE.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (PartialOrder.toPreorder.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.instPartialOrderFilter.{u1} (Prod.{u1, u1} α α)))) (nhdsSet.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (Set.diagonal.{u1} α)) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align nhds_set_diagonal_le_uniformity nhdsSet_diagonal_le_uniformityₓ'. -/ /-- Entourages are neighborhoods of the diagonal. -/ theorem nhdsSet_diagonal_le_uniformity : 𝓝ˢ (diagonal α) ≤ 𝓤 α := (nhdsSet_diagonal α).trans_le supᵢ_nhds_le_uniformity #align nhds_set_diagonal_le_uniformity nhdsSet_diagonal_le_uniformity /-! ### Closure and interior in uniform spaces -/ /- warning: closure_eq_uniformity -> closure_eq_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (s : Set.{u1} (Prod.{u1, u1} α α)), Eq.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (closure.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) s) (Set.interᵢ.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Set.interᵢ.{u1, 0} (Prod.{u1, u1} α α) (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.{u1} (Set.{u1} (Prod.{u1, u1} α α))) (Set.hasMem.{u1} (Set.{u1} (Prod.{u1, u1} α α))) V (setOf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (SymmetricRel.{u1} α V)))) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.{u1} (Set.{u1} (Prod.{u1, u1} α α))) (Set.hasMem.{u1} (Set.{u1} (Prod.{u1, u1} α α))) V (setOf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (SymmetricRel.{u1} α V)))) => compRel.{u1} α (compRel.{u1} α V s) V))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] (s : Set.{u1} (Prod.{u1, u1} α α)), Eq.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (closure.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) s) (Set.interᵢ.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Set.interᵢ.{u1, 0} (Prod.{u1, u1} α α) (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.{u1} (Set.{u1} (Prod.{u1, u1} α α))) (Set.instMembershipSet.{u1} (Set.{u1} (Prod.{u1, u1} α α))) V (setOf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (SymmetricRel.{u1} α V)))) (fun (H : Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.{u1} (Set.{u1} (Prod.{u1, u1} α α))) (Set.instMembershipSet.{u1} (Set.{u1} (Prod.{u1, u1} α α))) V (setOf.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (SymmetricRel.{u1} α V)))) => compRel.{u1} α (compRel.{u1} α V s) V))) Case conversion may be inaccurate. Consider using '#align closure_eq_uniformity closure_eq_uniformityₓ'. -/ theorem closure_eq_uniformity (s : Set <| α × α) : closure s = ⋂ V ∈ { V | V ∈ 𝓤 α ∧ SymmetricRel V }, V ○ s ○ V := by ext ⟨x, y⟩ simp (config := { contextual := true }) only [mem_closure_iff_nhds_basis (UniformSpace.hasBasis_nhds_prod x y), mem_Inter, mem_set_of_eq, and_imp, mem_comp_comp, exists_prop, ← mem_inter_iff, inter_comm, Set.Nonempty] #align closure_eq_uniformity closure_eq_uniformity /- warning: uniformity_has_basis_closed -> uniformity_hasBasis_closed is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (IsClosed.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V)) (id.{succ u1} (Set.{u1} (Prod.{u1, u1} α α))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (IsClosed.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V)) (id.{succ u1} (Set.{u1} (Prod.{u1, u1} α α))) Case conversion may be inaccurate. Consider using '#align uniformity_has_basis_closed uniformity_hasBasis_closedₓ'. -/ theorem uniformity_hasBasis_closed : HasBasis (𝓤 α) (fun V : Set (α × α) => V ∈ 𝓤 α ∧ IsClosed V) id := by refine' Filter.hasBasis_self.2 fun t h => _ rcases comp_comp_symm_mem_uniformity_sets h with ⟨w, w_in, w_symm, r⟩ refine' ⟨closure w, mem_of_superset w_in subset_closure, isClosed_closure, _⟩ refine' subset.trans _ r rw [closure_eq_uniformity] apply Inter_subset_of_subset apply Inter_subset exact ⟨w_in, w_symm⟩ #align uniformity_has_basis_closed uniformity_hasBasis_closed #print uniformity_eq_uniformity_closure /- theorem uniformity_eq_uniformity_closure : 𝓤 α = (𝓤 α).lift' closure := Eq.symm <| uniformity_hasBasis_closed.lift'_closure_eq_self fun _ => And.right #align uniformity_eq_uniformity_closure uniformity_eq_uniformity_closure -/ /- warning: filter.has_basis.uniformity_closure -> Filter.HasBasis.uniformity_closure is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : ι -> Prop} {U : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p U) -> (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p (fun (i : ι) => closure.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) (U i))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u2} α] {p : ι -> Prop} {U : ι -> (Set.{u2} (Prod.{u2, u2} α α))}, (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p U) -> (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p (fun (i : ι) => closure.{u2} (Prod.{u2, u2} α α) (instTopologicalSpaceProd.{u2, u2} α α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} α _inst_1)) (U i))) Case conversion may be inaccurate. Consider using '#align filter.has_basis.uniformity_closure Filter.HasBasis.uniformity_closureₓ'. -/ theorem Filter.HasBasis.uniformity_closure {p : ι → Prop} {U : ι → Set (α × α)} (h : (𝓤 α).HasBasis p U) : (𝓤 α).HasBasis p fun i => closure (U i) := (@uniformity_eq_uniformity_closure α _).symm ▸ h.lift'_closure #align filter.has_basis.uniformity_closure Filter.HasBasis.uniformity_closure /- warning: uniformity_has_basis_closure -> uniformity_hasBasis_closure is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (closure.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (closure.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1))) Case conversion may be inaccurate. Consider using '#align uniformity_has_basis_closure uniformity_hasBasis_closureₓ'. -/ /-- Closed entourages form a basis of the uniformity filter. -/ theorem uniformity_hasBasis_closure : HasBasis (𝓤 α) (fun V : Set (α × α) => V ∈ 𝓤 α) closure := (𝓤 α).basis_sets.uniformity_closure #align uniformity_has_basis_closure uniformity_hasBasis_closure /- warning: closure_eq_inter_uniformity -> closure_eq_inter_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {t : Set.{u1} (Prod.{u1, u1} α α)}, Eq.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (closure.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (Set.interᵢ.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (fun (d : Set.{u1} (Prod.{u1, u1} α α)) => Set.interᵢ.{u1, 0} (Prod.{u1, u1} α α) (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) d (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) d (uniformity.{u1} α _inst_1)) => compRel.{u1} α d (compRel.{u1} α t d)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {t : Set.{u1} (Prod.{u1, u1} α α)}, Eq.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (closure.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (Set.interᵢ.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (fun (d : Set.{u1} (Prod.{u1, u1} α α)) => Set.interᵢ.{u1, 0} (Prod.{u1, u1} α α) (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) d (uniformity.{u1} α _inst_1)) (fun (H : Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) d (uniformity.{u1} α _inst_1)) => compRel.{u1} α d (compRel.{u1} α t d)))) Case conversion may be inaccurate. Consider using '#align closure_eq_inter_uniformity closure_eq_inter_uniformityₓ'. -/ theorem closure_eq_inter_uniformity {t : Set (α × α)} : closure t = ⋂ d ∈ 𝓤 α, d ○ (t ○ d) := calc closure t = ⋂ (V) (hV : V ∈ 𝓤 α ∧ SymmetricRel V), V ○ t ○ V := closure_eq_uniformity t _ = ⋂ V ∈ 𝓤 α, V ○ t ○ V := (Eq.symm <| UniformSpace.hasBasis_symmetric.binterᵢ_mem fun V₁ V₂ hV => compRel_mono (compRel_mono hV Subset.rfl) hV) _ = ⋂ V ∈ 𝓤 α, V ○ (t ○ V) := by simp only [compRel_assoc] #align closure_eq_inter_uniformity closure_eq_inter_uniformity #print uniformity_eq_uniformity_interior /- theorem uniformity_eq_uniformity_interior : 𝓤 α = (𝓤 α).lift' interior := le_antisymm (le_infᵢ fun d => le_infᵢ fun hd => by let ⟨s, hs, hs_comp⟩ := (mem_lift'_sets <| monotone_id.compRel <| monotone_id.compRel monotone_id).mp (comp_le_uniformity3 hd) let ⟨t, ht, hst, ht_comp⟩ := nhdset_of_mem_uniformity s hs have : s ⊆ interior d := calc s ⊆ t := hst _ ⊆ interior d := ht.subset_interior_iff.mpr fun x (hx : x ∈ t) => let ⟨x, y, h₁, h₂, h₃⟩ := ht_comp hx hs_comp ⟨x, h₁, y, h₂, h₃⟩ have : interior d ∈ 𝓤 α := by filter_upwards [hs]using this simp [this]) fun s hs => ((𝓤 α).lift' interior).sets_of_superset (mem_lift' hs) interior_subset #align uniformity_eq_uniformity_interior uniformity_eq_uniformity_interior -/ /- warning: interior_mem_uniformity -> interior_mem_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) (interior.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) s) (uniformity.{u1} α _inst_1)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) (interior.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) s) (uniformity.{u1} α _inst_1)) Case conversion may be inaccurate. Consider using '#align interior_mem_uniformity interior_mem_uniformityₓ'. -/ theorem interior_mem_uniformity {s : Set (α × α)} (hs : s ∈ 𝓤 α) : interior s ∈ 𝓤 α := by rw [uniformity_eq_uniformity_interior] <;> exact mem_lift' hs #align interior_mem_uniformity interior_mem_uniformity /- warning: mem_uniformity_is_closed -> mem_uniformity_isClosed is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => And (IsClosed.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) t s)))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (And (IsClosed.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) t s)))) Case conversion may be inaccurate. Consider using '#align mem_uniformity_is_closed mem_uniformity_isClosedₓ'. -/ theorem mem_uniformity_isClosed {s : Set (α × α)} (h : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, IsClosed t ∧ t ⊆ s := let ⟨t, ⟨ht_mem, htc⟩, hts⟩ := uniformity_hasBasis_closed.mem_iff.1 h ⟨t, ht_mem, htc, hts⟩ #align mem_uniformity_is_closed mem_uniformity_isClosed /- warning: is_open_iff_open_ball_subset -> isOpen_iff_open_ball_subset is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α}, Iff (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) (forall (x : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x s) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) => And (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (UniformSpace.ball.{u1} α x V) s))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α}, Iff (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) (forall (x : α), (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x s) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (And (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (UniformSpace.ball.{u1} α x V) s))))) Case conversion may be inaccurate. Consider using '#align is_open_iff_open_ball_subset isOpen_iff_open_ball_subsetₓ'. -/ theorem isOpen_iff_open_ball_subset {s : Set α} : IsOpen s ↔ ∀ x ∈ s, ∃ V ∈ 𝓤 α, IsOpen V ∧ ball x V ⊆ s := by rw [isOpen_iff_ball_subset] constructor <;> intro h x hx · obtain ⟨V, hV, hV'⟩ := h x hx exact ⟨interior V, interior_mem_uniformity hV, isOpen_interior, (ball_mono interior_subset x).trans hV'⟩ · obtain ⟨V, hV, -, hV'⟩ := h x hx exact ⟨V, hV, hV'⟩ #align is_open_iff_open_ball_subset isOpen_iff_open_ball_subset #print Dense.bunionᵢ_uniformity_ball /- /-- The uniform neighborhoods of all points of a dense set cover the whole space. -/ theorem Dense.bunionᵢ_uniformity_ball {s : Set α} {U : Set (α × α)} (hs : Dense s) (hU : U ∈ 𝓤 α) : (⋃ x ∈ s, ball x U) = univ := by refine' Union₂_eq_univ_iff.2 fun y => _ rcases hs.inter_nhds_nonempty (mem_nhds_right y hU) with ⟨x, hxs, hxy : (x, y) ∈ U⟩ exact ⟨x, hxs, hxy⟩ #align dense.bUnion_uniformity_ball Dense.bunionᵢ_uniformity_ball -/ /-! ### Uniformity bases -/ /- warning: uniformity_has_basis_open -> uniformity_hasBasis_open is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V)) (id.{succ u1} (Set.{u1} (Prod.{u1, u1} α α))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V)) (id.{succ u1} (Set.{u1} (Prod.{u1, u1} α α))) Case conversion may be inaccurate. Consider using '#align uniformity_has_basis_open uniformity_hasBasis_openₓ'. -/ /-- Open elements of `𝓤 α` form a basis of `𝓤 α`. -/ theorem uniformity_hasBasis_open : HasBasis (𝓤 α) (fun V : Set (α × α) => V ∈ 𝓤 α ∧ IsOpen V) id := hasBasis_self.2 fun s hs => ⟨interior s, interior_mem_uniformity hs, isOpen_interior, interior_subset⟩ #align uniformity_has_basis_open uniformity_hasBasis_open /- warning: filter.has_basis.mem_uniformity_iff -> Filter.HasBasis.mem_uniformity_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : β -> Prop} {s : β -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, succ u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) p s) -> (forall {t : Set.{u1} (Prod.{u1, u1} α α)}, Iff (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (Exists.{succ u2} β (fun (i : β) => Exists.{0} (p i) (fun (hi : p i) => forall (a : α) (b : α), (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) (s i)) -> (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) t))))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : β -> Prop} {s : β -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, succ u2} (Prod.{u1, u1} α α) β (uniformity.{u1} α _inst_1) p s) -> (forall {t : Set.{u1} (Prod.{u1, u1} α α)}, Iff (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (Exists.{succ u2} β (fun (i : β) => And (p i) (forall (a : α) (b : α), (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) (s i)) -> (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) t))))) Case conversion may be inaccurate. Consider using '#align filter.has_basis.mem_uniformity_iff Filter.HasBasis.mem_uniformity_iffₓ'. -/ theorem Filter.HasBasis.mem_uniformity_iff {p : β → Prop} {s : β → Set (α × α)} (h : (𝓤 α).HasBasis p s) {t : Set (α × α)} : t ∈ 𝓤 α ↔ ∃ (i : _)(hi : p i), ∀ a b, (a, b) ∈ s i → (a, b) ∈ t := h.mem_iff.trans <| by simp only [Prod.forall, subset_def] #align filter.has_basis.mem_uniformity_iff Filter.HasBasis.mem_uniformity_iff /- warning: uniformity_has_basis_open_symmetric -> uniformity_hasBasis_open_symmetric is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (And (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) (SymmetricRel.{u1} α V))) (id.{succ u1} (Set.{u1} (Prod.{u1, u1} α α))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α], Filter.HasBasis.{u1, succ u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α _inst_1) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (And (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) (SymmetricRel.{u1} α V))) (id.{succ u1} (Set.{u1} (Prod.{u1, u1} α α))) Case conversion may be inaccurate. Consider using '#align uniformity_has_basis_open_symmetric uniformity_hasBasis_open_symmetricₓ'. -/ /-- Open elements `s : set (α × α)` of `𝓤 α` such that `(x, y) ∈ s ↔ (y, x) ∈ s` form a basis of `𝓤 α`. -/ theorem uniformity_hasBasis_open_symmetric : HasBasis (𝓤 α) (fun V : Set (α × α) => V ∈ 𝓤 α ∧ IsOpen V ∧ SymmetricRel V) id := by simp only [← and_assoc'] refine' uniformity_has_basis_open.restrict fun s hs => ⟨symmetrizeRel s, _⟩ exact ⟨⟨symmetrize_mem_uniformity hs.1, IsOpen.inter hs.2 (hs.2.Preimage continuous_swap)⟩, symmetric_symmetrizeRel s, symmetrizeRel_subset_self s⟩ #align uniformity_has_basis_open_symmetric uniformity_hasBasis_open_symmetric /- warning: comp_open_symm_mem_uniformity_sets -> comp_open_symm_mem_uniformity_sets is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) => And (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (And (SymmetricRel.{u1} α t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasSubset.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) s (uniformity.{u1} α _inst_1)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (t : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) t (uniformity.{u1} α _inst_1)) (And (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) t) (And (SymmetricRel.{u1} α t) (HasSubset.Subset.{u1} (Set.{u1} (Prod.{u1, u1} α α)) (Set.instHasSubsetSet.{u1} (Prod.{u1, u1} α α)) (compRel.{u1} α t t) s))))) Case conversion may be inaccurate. Consider using '#align comp_open_symm_mem_uniformity_sets comp_open_symm_mem_uniformity_setsₓ'. -/ theorem comp_open_symm_mem_uniformity_sets {s : Set (α × α)} (hs : s ∈ 𝓤 α) : ∃ t ∈ 𝓤 α, IsOpen t ∧ SymmetricRel t ∧ t ○ t ⊆ s := by obtain ⟨t, ht₁, ht₂⟩ := comp_mem_uniformity_sets hs obtain ⟨u, ⟨hu₁, hu₂, hu₃⟩, hu₄ : u ⊆ t⟩ := uniformity_has_basis_open_symmetric.mem_iff.mp ht₁ exact ⟨u, hu₁, hu₂, hu₃, (compRel_mono hu₄ hu₄).trans ht₂⟩ #align comp_open_symm_mem_uniformity_sets comp_open_symm_mem_uniformity_sets section variable (α) #print UniformSpace.has_seq_basis /- theorem UniformSpace.has_seq_basis [IsCountablyGenerated <| 𝓤 α] : ∃ V : ℕ → Set (α × α), HasAntitoneBasis (𝓤 α) V ∧ ∀ n, SymmetricRel (V n) := let ⟨U, hsym, hbasis⟩ := UniformSpace.hasBasis_symmetric.exists_antitone_subbasis ⟨U, hbasis, fun n => (hsym n).2⟩ #align uniform_space.has_seq_basis UniformSpace.has_seq_basis -/ end /- warning: filter.has_basis.bInter_bUnion_ball -> Filter.HasBasis.binterᵢ_bunionᵢ_ball is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} [_inst_1 : UniformSpace.{u1} α] {p : ι -> Prop} {U : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u2} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p U) -> (forall (s : Set.{u1} α), Eq.{succ u1} (Set.{u1} α) (Set.interᵢ.{u1, u2} α ι (fun (i : ι) => Set.interᵢ.{u1, 0} α (p i) (fun (hi : p i) => Set.unionᵢ.{u1, succ u1} α α (fun (x : α) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x s) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x s) => UniformSpace.ball.{u1} α x (U i)))))) (closure.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s)) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u2} α] {p : ι -> Prop} {U : ι -> (Set.{u2} (Prod.{u2, u2} α α))}, (Filter.HasBasis.{u2, u1} (Prod.{u2, u2} α α) ι (uniformity.{u2} α _inst_1) p U) -> (forall (s : Set.{u2} α), Eq.{succ u2} (Set.{u2} α) (Set.interᵢ.{u2, u1} α ι (fun (i : ι) => Set.interᵢ.{u2, 0} α (p i) (fun (hi : p i) => Set.unionᵢ.{u2, succ u2} α α (fun (x : α) => Set.unionᵢ.{u2, 0} α (Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x s) (fun (H : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x s) => UniformSpace.ball.{u2} α x (U i)))))) (closure.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) s)) Case conversion may be inaccurate. Consider using '#align filter.has_basis.bInter_bUnion_ball Filter.HasBasis.binterᵢ_bunionᵢ_ballₓ'. -/ theorem Filter.HasBasis.binterᵢ_bunionᵢ_ball {p : ι → Prop} {U : ι → Set (α × α)} (h : HasBasis (𝓤 α) p U) (s : Set α) : (⋂ (i) (hi : p i), ⋃ x ∈ s, ball x (U i)) = closure s := by ext x simp [mem_closure_iff_nhds_basis (nhds_basis_uniformity h), ball] #align filter.has_basis.bInter_bUnion_ball Filter.HasBasis.binterᵢ_bunionᵢ_ball /-! ### Uniform continuity -/ #print UniformContinuous /- /-- A function `f : α → β` is *uniformly continuous* if `(f x, f y)` tends to the diagonal as `(x, y)` tends to the diagonal. In other words, if `x` is sufficiently close to `y`, then `f x` is close to `f y` no matter where `x` and `y` are located in `α`. -/ def UniformContinuous [UniformSpace β] (f : α → β) := Tendsto (fun x : α × α => (f x.1, f x.2)) (𝓤 α) (𝓤 β) #align uniform_continuous UniformContinuous -/ /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ #print UniformContinuousOn /- /-- A function `f : α → β` is *uniformly continuous* on `s : set α` if `(f x, f y)` tends to the diagonal as `(x, y)` tends to the diagonal while remaining in `s ×ˢ s`. In other words, if `x` is sufficiently close to `y`, then `f x` is close to `f y` no matter where `x` and `y` are located in `s`.-/ def UniformContinuousOn [UniformSpace β] (f : α → β) (s : Set α) : Prop := Tendsto (fun x : α × α => (f x.1, f x.2)) (𝓤 α ⊓ principal (s ×ˢ s)) (𝓤 β) #align uniform_continuous_on UniformContinuousOn -/ #print uniformContinuous_def /- theorem uniformContinuous_def [UniformSpace β] {f : α → β} : UniformContinuous f ↔ ∀ r ∈ 𝓤 β, { x : α × α | (f x.1, f x.2) ∈ r } ∈ 𝓤 α := Iff.rfl #align uniform_continuous_def uniformContinuous_def -/ #print uniformContinuous_iff_eventually /- theorem uniformContinuous_iff_eventually [UniformSpace β] {f : α → β} : UniformContinuous f ↔ ∀ r ∈ 𝓤 β, ∀ᶠ x : α × α in 𝓤 α, (f x.1, f x.2) ∈ r := Iff.rfl #align uniform_continuous_iff_eventually uniformContinuous_iff_eventually -/ #print uniformContinuousOn_univ /- theorem uniformContinuousOn_univ [UniformSpace β] {f : α → β} : UniformContinuousOn f univ ↔ UniformContinuous f := by rw [UniformContinuousOn, UniformContinuous, univ_prod_univ, principal_univ, inf_top_eq] #align uniform_continuous_on_univ uniformContinuousOn_univ -/ #print uniformContinuous_of_const /- theorem uniformContinuous_of_const [UniformSpace β] {c : α → β} (h : ∀ a b, c a = c b) : UniformContinuous c := have : (fun x : α × α => (c x.fst, c x.snd)) ⁻¹' idRel = univ := eq_univ_iff_forall.2 fun ⟨a, b⟩ => h a b le_trans (map_le_iff_le_comap.2 <| by simp [comap_principal, this, univ_mem]) refl_le_uniformity #align uniform_continuous_of_const uniformContinuous_of_const -/ #print uniformContinuous_id /- theorem uniformContinuous_id : UniformContinuous (@id α) := by simp [UniformContinuous] <;> exact tendsto_id #align uniform_continuous_id uniformContinuous_id -/ #print uniformContinuous_const /- theorem uniformContinuous_const [UniformSpace β] {b : β} : UniformContinuous fun a : α => b := uniformContinuous_of_const fun _ _ => rfl #align uniform_continuous_const uniformContinuous_const -/ #print UniformContinuous.comp /- theorem UniformContinuous.comp [UniformSpace β] [UniformSpace γ] {g : β → γ} {f : α → β} (hg : UniformContinuous g) (hf : UniformContinuous f) : UniformContinuous (g ∘ f) := hg.comp hf #align uniform_continuous.comp UniformContinuous.comp -/ /- warning: filter.has_basis.uniform_continuous_iff -> Filter.HasBasis.uniformContinuous_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {ι : Sort.{u3}} [_inst_1 : UniformSpace.{u1} α] {ι' : Sort.{u4}} [_inst_2 : UniformSpace.{u2} β] {p : ι -> Prop} {s : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u3} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p s) -> (forall {q : ι' -> Prop} {t : ι' -> (Set.{u2} (Prod.{u2, u2} β β))}, (Filter.HasBasis.{u2, u4} (Prod.{u2, u2} β β) ι' (uniformity.{u2} β _inst_2) q t) -> (forall {f : α -> β}, Iff (UniformContinuous.{u1, u2} α β _inst_1 _inst_2 f) (forall (i : ι'), (q i) -> (Exists.{u3} ι (fun (j : ι) => Exists.{0} (p j) (fun (hj : p j) => forall (x : α) (y : α), (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) (s j)) -> (Membership.Mem.{u2, u2} (Prod.{u2, u2} β β) (Set.{u2} (Prod.{u2, u2} β β)) (Set.hasMem.{u2} (Prod.{u2, u2} β β)) (Prod.mk.{u2, u2} β β (f x) (f y)) (t i)))))))) but is expected to have type forall {α : Type.{u3}} {β : Type.{u4}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u3} α] {ι' : Sort.{u2}} [_inst_2 : UniformSpace.{u4} β] {p : ι -> Prop} {s : ι -> (Set.{u3} (Prod.{u3, u3} α α))}, (Filter.HasBasis.{u3, u1} (Prod.{u3, u3} α α) ι (uniformity.{u3} α _inst_1) p s) -> (forall {q : ι' -> Prop} {t : ι' -> (Set.{u4} (Prod.{u4, u4} β β))}, (Filter.HasBasis.{u4, u2} (Prod.{u4, u4} β β) ι' (uniformity.{u4} β _inst_2) q t) -> (forall {f : α -> β}, Iff (UniformContinuous.{u3, u4} α β _inst_1 _inst_2 f) (forall (i : ι'), (q i) -> (Exists.{u1} ι (fun (j : ι) => And (p j) (forall (x : α) (y : α), (Membership.mem.{u3, u3} (Prod.{u3, u3} α α) (Set.{u3} (Prod.{u3, u3} α α)) (Set.instMembershipSet.{u3} (Prod.{u3, u3} α α)) (Prod.mk.{u3, u3} α α x y) (s j)) -> (Membership.mem.{u4, u4} (Prod.{u4, u4} β β) (Set.{u4} (Prod.{u4, u4} β β)) (Set.instMembershipSet.{u4} (Prod.{u4, u4} β β)) (Prod.mk.{u4, u4} β β (f x) (f y)) (t i)))))))) Case conversion may be inaccurate. Consider using '#align filter.has_basis.uniform_continuous_iff Filter.HasBasis.uniformContinuous_iffₓ'. -/ theorem Filter.HasBasis.uniformContinuous_iff {ι'} [UniformSpace β] {p : ι → Prop} {s : ι → Set (α × α)} (ha : (𝓤 α).HasBasis p s) {q : ι' → Prop} {t : ι' → Set (β × β)} (hb : (𝓤 β).HasBasis q t) {f : α → β} : UniformContinuous f ↔ ∀ (i) (hi : q i), ∃ (j : _)(hj : p j), ∀ x y, (x, y) ∈ s j → (f x, f y) ∈ t i := (ha.tendsto_iffₓ hb).trans <| by simp only [Prod.forall] #align filter.has_basis.uniform_continuous_iff Filter.HasBasis.uniformContinuous_iff /- warning: filter.has_basis.uniform_continuous_on_iff -> Filter.HasBasis.uniformContinuousOn_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {ι : Sort.{u3}} [_inst_1 : UniformSpace.{u1} α] {ι' : Sort.{u4}} [_inst_2 : UniformSpace.{u2} β] {p : ι -> Prop} {s : ι -> (Set.{u1} (Prod.{u1, u1} α α))}, (Filter.HasBasis.{u1, u3} (Prod.{u1, u1} α α) ι (uniformity.{u1} α _inst_1) p s) -> (forall {q : ι' -> Prop} {t : ι' -> (Set.{u2} (Prod.{u2, u2} β β))}, (Filter.HasBasis.{u2, u4} (Prod.{u2, u2} β β) ι' (uniformity.{u2} β _inst_2) q t) -> (forall {f : α -> β} {S : Set.{u1} α}, Iff (UniformContinuousOn.{u1, u2} α β _inst_1 _inst_2 f S) (forall (i : ι'), (q i) -> (Exists.{u3} ι (fun (j : ι) => Exists.{0} (p j) (fun (hj : p j) => forall (x : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x S) -> (forall (y : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) y S) -> (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) (s j)) -> (Membership.Mem.{u2, u2} (Prod.{u2, u2} β β) (Set.{u2} (Prod.{u2, u2} β β)) (Set.hasMem.{u2} (Prod.{u2, u2} β β)) (Prod.mk.{u2, u2} β β (f x) (f y)) (t i))))))))) but is expected to have type forall {α : Type.{u3}} {β : Type.{u4}} {ι : Sort.{u1}} [_inst_1 : UniformSpace.{u3} α] {ι' : Sort.{u2}} [_inst_2 : UniformSpace.{u4} β] {p : ι -> Prop} {s : ι -> (Set.{u3} (Prod.{u3, u3} α α))}, (Filter.HasBasis.{u3, u1} (Prod.{u3, u3} α α) ι (uniformity.{u3} α _inst_1) p s) -> (forall {q : ι' -> Prop} {t : ι' -> (Set.{u4} (Prod.{u4, u4} β β))}, (Filter.HasBasis.{u4, u2} (Prod.{u4, u4} β β) ι' (uniformity.{u4} β _inst_2) q t) -> (forall {f : α -> β} {S : Set.{u3} α}, Iff (UniformContinuousOn.{u3, u4} α β _inst_1 _inst_2 f S) (forall (i : ι'), (q i) -> (Exists.{u1} ι (fun (j : ι) => And (p j) (forall (x : α), (Membership.mem.{u3, u3} α (Set.{u3} α) (Set.instMembershipSet.{u3} α) x S) -> (forall (y : α), (Membership.mem.{u3, u3} α (Set.{u3} α) (Set.instMembershipSet.{u3} α) y S) -> (Membership.mem.{u3, u3} (Prod.{u3, u3} α α) (Set.{u3} (Prod.{u3, u3} α α)) (Set.instMembershipSet.{u3} (Prod.{u3, u3} α α)) (Prod.mk.{u3, u3} α α x y) (s j)) -> (Membership.mem.{u4, u4} (Prod.{u4, u4} β β) (Set.{u4} (Prod.{u4, u4} β β)) (Set.instMembershipSet.{u4} (Prod.{u4, u4} β β)) (Prod.mk.{u4, u4} β β (f x) (f y)) (t i))))))))) Case conversion may be inaccurate. Consider using '#align filter.has_basis.uniform_continuous_on_iff Filter.HasBasis.uniformContinuousOn_iffₓ'. -/ /- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/ /- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (x y «expr ∈ » S) -/ theorem Filter.HasBasis.uniformContinuousOn_iff {ι'} [UniformSpace β] {p : ι → Prop} {s : ι → Set (α × α)} (ha : (𝓤 α).HasBasis p s) {q : ι' → Prop} {t : ι' → Set (β × β)} (hb : (𝓤 β).HasBasis q t) {f : α → β} {S : Set α} : UniformContinuousOn f S ↔ ∀ (i) (hi : q i), ∃ (j : _)(hj : p j), ∀ (x) (_ : x ∈ S) (y) (_ : y ∈ S), (x, y) ∈ s j → (f x, f y) ∈ t i := ((ha.inf_principal (S ×ˢ S)).tendsto_iffₓ hb).trans <| by simp_rw [Prod.forall, Set.inter_comm (s _), ball_mem_comm, mem_inter_iff, mem_prod, and_imp] #align filter.has_basis.uniform_continuous_on_iff Filter.HasBasis.uniformContinuousOn_iff end UniformSpace open uniformity section Constructions instance : PartialOrder (UniformSpace α) where le t s := t.uniformity ≤ s.uniformity le_antisymm t s h₁ h₂ := uniformSpace_eq <| le_antisymm h₁ h₂ le_refl t := le_rfl le_trans a b c h₁ h₂ := le_trans h₁ h₂ instance : InfSet (UniformSpace α) := ⟨fun s => UniformSpace.ofCore { uniformity := ⨅ u ∈ s, 𝓤[u] refl := le_infᵢ fun u => le_infᵢ fun hu => u.refl symm := le_infᵢ fun u => le_infᵢ fun hu => le_trans (map_mono <| infᵢ_le_of_le _ <| infᵢ_le _ hu) u.symm comp := le_infᵢ fun u => le_infᵢ fun hu => le_trans (lift'_mono (infᵢ_le_of_le _ <| infᵢ_le _ hu) <| le_rfl) u.comp }⟩ /- warning: Inf_le -> infₛ_le is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : CompleteSemilatticeInf.{u1} α] {s : Set.{u1} α} {a : α}, (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) a s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α _inst_1))) (InfSet.infₛ.{u1} α (CompleteSemilatticeInf.toHasInf.{u1} α _inst_1) s) a) but is expected to have type forall {α : Type.{u1}} [_inst_1 : CompleteSemilatticeInf.{u1} α] {s : Set.{u1} α} {a : α}, (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) a s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α _inst_1))) (InfSet.infₛ.{u1} α (CompleteSemilatticeInf.toInfSet.{u1} α _inst_1) s) a) Case conversion may be inaccurate. Consider using '#align Inf_le infₛ_leₓ'. -/ private theorem infₛ_le {tt : Set (UniformSpace α)} {t : UniformSpace α} (h : t ∈ tt) : infₛ tt ≤ t := show (⨅ u ∈ tt, 𝓤[u]) ≤ 𝓤[t] from infᵢ₂_le t h #align Inf_le infₛ_le /- warning: le_Inf -> le_infₛ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : CompleteSemilatticeInf.{u1} α] {s : Set.{u1} α} {a : α}, (forall (b : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α _inst_1))) a b)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α _inst_1))) a (InfSet.infₛ.{u1} α (CompleteSemilatticeInf.toHasInf.{u1} α _inst_1) s)) but is expected to have type forall {α : Type.{u1}} [_inst_1 : CompleteSemilatticeInf.{u1} α] {s : Set.{u1} α} {a : α}, (forall (b : α), (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) b s) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α _inst_1))) a b)) -> (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α _inst_1))) a (InfSet.infₛ.{u1} α (CompleteSemilatticeInf.toInfSet.{u1} α _inst_1) s)) Case conversion may be inaccurate. Consider using '#align le_Inf le_infₛₓ'. -/ private theorem le_infₛ {tt : Set (UniformSpace α)} {t : UniformSpace α} (h : ∀ t' ∈ tt, t ≤ t') : t ≤ infₛ tt := show 𝓤[t] ≤ ⨅ u ∈ tt, 𝓤[u] from le_infᵢ₂ h #align le_Inf le_infₛ instance : Top (UniformSpace α) := ⟨UniformSpace.ofCore { uniformity := ⊤ refl := le_top symm := le_top comp := le_top }⟩ instance : Bot (UniformSpace α) := ⟨{ toTopologicalSpace := ⊥ uniformity := 𝓟 idRel refl := le_rfl symm := by simp [tendsto] comp := lift'_le (mem_principal_self _) <| principal_mono.2 id_compRel.Subset isOpen_uniformity := fun s => by simp (config := { contextual := true }) [isOpen_fold, subset_def, idRel] }⟩ instance : Inf (UniformSpace α) := ⟨fun u₁ u₂ => @UniformSpace.replaceTopology _ (u₁.toTopologicalSpace ⊓ u₂.toTopologicalSpace) (UniformSpace.ofCore { uniformity := u₁.uniformity ⊓ u₂.uniformity refl := le_inf u₁.refl u₂.refl symm := u₁.symm.inf u₂.symm comp := (lift'_inf_le _ _ _).trans <| inf_le_inf u₁.comp u₂.comp }) <| eq_of_nhds_eq_nhds fun a => by simpa only [nhds_inf, nhds_eq_comap_uniformity] using comap_inf.symm⟩ instance : CompleteLattice (UniformSpace α) := { UniformSpace.partialOrder with sup := fun a b => infₛ { x | a ≤ x ∧ b ≤ x } le_sup_left := fun a b => le_infₛ fun _ ⟨h, _⟩ => h le_sup_right := fun a b => le_infₛ fun _ ⟨_, h⟩ => h sup_le := fun a b c h₁ h₂ => infₛ_le ⟨h₁, h₂⟩ inf := (· ⊓ ·) le_inf := fun a b c h₁ h₂ => show a.uniformity ≤ _ from le_inf h₁ h₂ inf_le_left := fun a b => show _ ≤ a.uniformity from inf_le_left inf_le_right := fun a b => show _ ≤ b.uniformity from inf_le_right top := ⊤ le_top := fun a => show a.uniformity ≤ ⊤ from le_top bot := ⊥ bot_le := fun u => u.refl supₛ := fun tt => infₛ { t | ∀ t' ∈ tt, t' ≤ t } le_sup := fun s u h => le_infₛ fun u' h' => h' u h sup_le := fun s u h => infₛ_le h infₛ := infₛ le_inf := fun s a hs => le_infₛ hs inf_le := fun s a ha => infₛ_le ha } /- warning: infi_uniformity -> infᵢ_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} {u : ι -> (UniformSpace.{u1} α)}, Eq.{succ u1} (Filter.{u1} (Prod.{u1, u1} α α)) (uniformity.{u1} α (infᵢ.{u1, u2} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) ι u)) (infᵢ.{u1, u2} (Filter.{u1} (Prod.{u1, u1} α α)) (ConditionallyCompleteLattice.toHasInf.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (CompleteLattice.toConditionallyCompleteLattice.{u1} (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.completeLattice.{u1} (Prod.{u1, u1} α α)))) ι (fun (i : ι) => uniformity.{u1} α (u i))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} {u : ι -> (UniformSpace.{u2} α)}, Eq.{succ u2} (Filter.{u2} (Prod.{u2, u2} α α)) (uniformity.{u2} α (infᵢ.{u2, u1} (UniformSpace.{u2} α) (instInfSetUniformSpace.{u2} α) ι u)) (infᵢ.{u2, u1} (Filter.{u2} (Prod.{u2, u2} α α)) (ConditionallyCompleteLattice.toInfSet.{u2} (Filter.{u2} (Prod.{u2, u2} α α)) (CompleteLattice.toConditionallyCompleteLattice.{u2} (Filter.{u2} (Prod.{u2, u2} α α)) (Filter.instCompleteLatticeFilter.{u2} (Prod.{u2, u2} α α)))) ι (fun (i : ι) => uniformity.{u2} α (u i))) Case conversion may be inaccurate. Consider using '#align infi_uniformity infᵢ_uniformityₓ'. -/ theorem infᵢ_uniformity {ι : Sort _} {u : ι → UniformSpace α} : 𝓤[infᵢ u] = ⨅ i, 𝓤[u i] := infᵢ_range #align infi_uniformity infᵢ_uniformity #print inf_uniformity /- theorem inf_uniformity {u v : UniformSpace α} : 𝓤[u ⊓ v] = 𝓤[u] ⊓ 𝓤[v] := rfl #align inf_uniformity inf_uniformity -/ #print inhabitedUniformSpace /- instance inhabitedUniformSpace : Inhabited (UniformSpace α) := ⟨⊥⟩ #align inhabited_uniform_space inhabitedUniformSpace -/ #print inhabitedUniformSpaceCore /- instance inhabitedUniformSpaceCore : Inhabited (UniformSpace.Core α) := ⟨@UniformSpace.toCore _ default⟩ #align inhabited_uniform_space_core inhabitedUniformSpaceCore -/ #print UniformSpace.comap /- /-- Given `f : α → β` and a uniformity `u` on `β`, the inverse image of `u` under `f` is the inverse image in the filter sense of the induced function `α × α → β × β`. -/ def UniformSpace.comap (f : α → β) (u : UniformSpace β) : UniformSpace α where uniformity := 𝓤[u].comap fun p : α × α => (f p.1, f p.2) toTopologicalSpace := u.toTopologicalSpace.induced f refl := le_trans (by simp <;> exact fun ⟨a, b⟩ (h : a = b) => h ▸ rfl) (comap_mono u.refl) symm := by simp [tendsto_comap_iff, Prod.swap, (· ∘ ·)] <;> exact tendsto_swap_uniformity.comp tendsto_comap comp := le_trans (by rw [comap_lift'_eq, comap_lift'_eq2] exact lift'_mono' fun s hs ⟨a₁, a₂⟩ ⟨x, h₁, h₂⟩ => ⟨f x, h₁, h₂⟩ exact monotone_id.comp_rel monotone_id) (comap_mono u.comp) isOpen_uniformity s := by simp only [isOpen_fold, isOpen_induced, isOpen_iff_mem_nhds, nhds_induced, nhds_eq_comap_uniformity, comap_comap, ← mem_comap_prod_mk, ← uniformity] #align uniform_space.comap UniformSpace.comap -/ #print uniformity_comap /- theorem uniformity_comap [UniformSpace β] (f : α → β) : 𝓤[UniformSpace.comap f ‹_›] = comap (Prod.map f f) (𝓤 β) := rfl #align uniformity_comap uniformity_comap -/ #print uniformSpace_comap_id /- @[simp] theorem uniformSpace_comap_id {α : Type _} : UniformSpace.comap (id : α → α) = id := by ext : 2 rw [uniformity_comap, Prod.map_id, comap_id] #align uniform_space_comap_id uniformSpace_comap_id -/ /- warning: uniform_space.comap_comap -> UniformSpace.comap_comap is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [uγ : UniformSpace.{u3} γ] {f : α -> β} {g : β -> γ}, Eq.{succ u1} (UniformSpace.{u1} α) (UniformSpace.comap.{u1, u3} α γ (Function.comp.{succ u1, succ u2, succ u3} α β γ g f) uγ) (UniformSpace.comap.{u1, u2} α β f (UniformSpace.comap.{u2, u3} β γ g uγ)) but is expected to have type forall {α : Type.{u3}} {β : Type.{u2}} {γ : Type.{u1}} {uγ : UniformSpace.{u1} γ} {f : α -> β} {g : β -> γ}, Eq.{succ u3} (UniformSpace.{u3} α) (UniformSpace.comap.{u3, u1} α γ (Function.comp.{succ u3, succ u2, succ u1} α β γ g f) uγ) (UniformSpace.comap.{u3, u2} α β f (UniformSpace.comap.{u2, u1} β γ g uγ)) Case conversion may be inaccurate. Consider using '#align uniform_space.comap_comap UniformSpace.comap_comapₓ'. -/ theorem UniformSpace.comap_comap {α β γ} [uγ : UniformSpace γ] {f : α → β} {g : β → γ} : UniformSpace.comap (g ∘ f) uγ = UniformSpace.comap f (UniformSpace.comap g uγ) := by ext1 simp only [uniformity_comap, comap_comap, Prod.map_comp_map] #align uniform_space.comap_comap UniformSpace.comap_comap /- warning: uniform_space.comap_inf -> UniformSpace.comap_inf is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {γ : Type.{u2}} {u₁ : UniformSpace.{u2} γ} {u₂ : UniformSpace.{u2} γ} {f : α -> γ}, Eq.{succ u1} (UniformSpace.{u1} α) (UniformSpace.comap.{u1, u2} α γ f (Inf.inf.{u2} (UniformSpace.{u2} γ) (UniformSpace.hasInf.{u2} γ) u₁ u₂)) (Inf.inf.{u1} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) (UniformSpace.comap.{u1, u2} α γ f u₁) (UniformSpace.comap.{u1, u2} α γ f u₂)) but is expected to have type forall {α : Type.{u2}} {γ : Type.{u1}} {u₁ : UniformSpace.{u1} γ} {u₂ : UniformSpace.{u1} γ} {f : α -> γ}, Eq.{succ u2} (UniformSpace.{u2} α) (UniformSpace.comap.{u2, u1} α γ f (Inf.inf.{u1} (UniformSpace.{u1} γ) (instInfUniformSpace.{u1} γ) u₁ u₂)) (Inf.inf.{u2} (UniformSpace.{u2} α) (instInfUniformSpace.{u2} α) (UniformSpace.comap.{u2, u1} α γ f u₁) (UniformSpace.comap.{u2, u1} α γ f u₂)) Case conversion may be inaccurate. Consider using '#align uniform_space.comap_inf UniformSpace.comap_infₓ'. -/ theorem UniformSpace.comap_inf {α γ} {u₁ u₂ : UniformSpace γ} {f : α → γ} : (u₁ ⊓ u₂).comap f = u₁.comap f ⊓ u₂.comap f := uniformSpace_eq comap_inf #align uniform_space.comap_inf UniformSpace.comap_inf /- warning: uniform_space.comap_infi -> UniformSpace.comap_infᵢ is a dubious translation: lean 3 declaration is forall {ι : Sort.{u1}} {α : Type.{u2}} {γ : Type.{u3}} {u : ι -> (UniformSpace.{u3} γ)} {f : α -> γ}, Eq.{succ u2} (UniformSpace.{u2} α) (UniformSpace.comap.{u2, u3} α γ f (infᵢ.{u3, u1} (UniformSpace.{u3} γ) (UniformSpace.hasInf.{u3} γ) ι (fun (i : ι) => u i))) (infᵢ.{u2, u1} (UniformSpace.{u2} α) (UniformSpace.hasInf.{u2} α) ι (fun (i : ι) => UniformSpace.comap.{u2, u3} α γ f (u i))) but is expected to have type forall {ι : Sort.{u3}} {α : Type.{u2}} {γ : Type.{u1}} {u : ι -> (UniformSpace.{u1} γ)} {f : α -> γ}, Eq.{succ u2} (UniformSpace.{u2} α) (UniformSpace.comap.{u2, u1} α γ f (infᵢ.{u1, u3} (UniformSpace.{u1} γ) (instInfSetUniformSpace.{u1} γ) ι (fun (i : ι) => u i))) (infᵢ.{u2, u3} (UniformSpace.{u2} α) (instInfSetUniformSpace.{u2} α) ι (fun (i : ι) => UniformSpace.comap.{u2, u1} α γ f (u i))) Case conversion may be inaccurate. Consider using '#align uniform_space.comap_infi UniformSpace.comap_infᵢₓ'. -/ theorem UniformSpace.comap_infᵢ {ι α γ} {u : ι → UniformSpace γ} {f : α → γ} : (⨅ i, u i).comap f = ⨅ i, (u i).comap f := by ext : 1 simp [uniformity_comap, infᵢ_uniformity] #align uniform_space.comap_infi UniformSpace.comap_infᵢ /- warning: uniform_space.comap_mono -> UniformSpace.comap_mono is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {γ : Type.{u2}} {f : α -> γ}, Monotone.{u2, u1} (UniformSpace.{u2} γ) (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u2} (UniformSpace.{u2} γ) (UniformSpace.partialOrder.{u2} γ)) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (UniformSpace.partialOrder.{u1} α)) (fun (u : UniformSpace.{u2} γ) => UniformSpace.comap.{u1, u2} α γ f u) but is expected to have type forall {α : Type.{u2}} {γ : Type.{u1}} {f : α -> γ}, Monotone.{u1, u2} (UniformSpace.{u1} γ) (UniformSpace.{u2} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} γ) (instPartialOrderUniformSpace.{u1} γ)) (PartialOrder.toPreorder.{u2} (UniformSpace.{u2} α) (instPartialOrderUniformSpace.{u2} α)) (fun (u : UniformSpace.{u1} γ) => UniformSpace.comap.{u2, u1} α γ f u) Case conversion may be inaccurate. Consider using '#align uniform_space.comap_mono UniformSpace.comap_monoₓ'. -/ theorem UniformSpace.comap_mono {α γ} {f : α → γ} : Monotone fun u : UniformSpace γ => u.comap f := by intro u₁ u₂ hu change 𝓤 _ ≤ 𝓤 _ rw [uniformity_comap] exact comap_mono hu #align uniform_space.comap_mono UniformSpace.comap_mono /- warning: uniform_continuous_iff -> uniformContinuous_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {uα : UniformSpace.{u1} α} {uβ : UniformSpace.{u2} β} {f : α -> β}, Iff (UniformContinuous.{u1, u2} α β uα uβ f) (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (UniformSpace.partialOrder.{u1} α))) uα (UniformSpace.comap.{u1, u2} α β f uβ)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {uα : UniformSpace.{u2} α} {uβ : UniformSpace.{u1} β} {f : α -> β}, Iff (UniformContinuous.{u2, u1} α β uα uβ f) (LE.le.{u2} (UniformSpace.{u2} α) (Preorder.toLE.{u2} (UniformSpace.{u2} α) (PartialOrder.toPreorder.{u2} (UniformSpace.{u2} α) (instPartialOrderUniformSpace.{u2} α))) uα (UniformSpace.comap.{u2, u1} α β f uβ)) Case conversion may be inaccurate. Consider using '#align uniform_continuous_iff uniformContinuous_iffₓ'. -/ theorem uniformContinuous_iff {α β} {uα : UniformSpace α} {uβ : UniformSpace β} {f : α → β} : UniformContinuous f ↔ uα ≤ uβ.comap f := Filter.map_le_iff_le_comap #align uniform_continuous_iff uniformContinuous_iff /- warning: le_iff_uniform_continuous_id -> le_iff_uniformContinuous_id is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {u : UniformSpace.{u1} α} {v : UniformSpace.{u1} α}, Iff (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (UniformSpace.partialOrder.{u1} α))) u v) (UniformContinuous.{u1, u1} α α u v (id.{succ u1} α)) but is expected to have type forall {α : Type.{u1}} {u : UniformSpace.{u1} α} {v : UniformSpace.{u1} α}, Iff (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (instPartialOrderUniformSpace.{u1} α))) u v) (UniformContinuous.{u1, u1} α α u v (id.{succ u1} α)) Case conversion may be inaccurate. Consider using '#align le_iff_uniform_continuous_id le_iff_uniformContinuous_idₓ'. -/ theorem le_iff_uniformContinuous_id {u v : UniformSpace α} : u ≤ v ↔ @UniformContinuous _ _ u v id := by rw [uniformContinuous_iff, uniformSpace_comap_id, id] #align le_iff_uniform_continuous_id le_iff_uniformContinuous_id #print uniformContinuous_comap /- theorem uniformContinuous_comap {f : α → β} [u : UniformSpace β] : @UniformContinuous α β (UniformSpace.comap f u) u f := tendsto_comap #align uniform_continuous_comap uniformContinuous_comap -/ #print toTopologicalSpace_comap /- theorem toTopologicalSpace_comap {f : α → β} {u : UniformSpace β} : @UniformSpace.toTopologicalSpace _ (UniformSpace.comap f u) = TopologicalSpace.induced f (@UniformSpace.toTopologicalSpace β u) := rfl #align to_topological_space_comap toTopologicalSpace_comap -/ #print uniformContinuous_comap' /- theorem uniformContinuous_comap' {f : γ → β} {g : α → γ} [v : UniformSpace β] [u : UniformSpace α] (h : UniformContinuous (f ∘ g)) : @UniformContinuous α γ u (UniformSpace.comap f v) g := tendsto_comap_iff.2 h #align uniform_continuous_comap' uniformContinuous_comap' -/ /- warning: to_nhds_mono -> to_nhds_mono is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {u₁ : UniformSpace.{u1} α} {u₂ : UniformSpace.{u1} α}, (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (UniformSpace.partialOrder.{u1} α))) u₁ u₂) -> (forall (a : α), LE.le.{u1} (Filter.{u1} α) (Preorder.toLE.{u1} (Filter.{u1} α) (PartialOrder.toPreorder.{u1} (Filter.{u1} α) (Filter.partialOrder.{u1} α))) (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α u₁) a) (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α u₂) a)) but is expected to have type forall {α : Type.{u1}} {u₁ : UniformSpace.{u1} α} {u₂ : UniformSpace.{u1} α}, (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (instPartialOrderUniformSpace.{u1} α))) u₁ u₂) -> (forall (a : α), LE.le.{u1} (Filter.{u1} α) (Preorder.toLE.{u1} (Filter.{u1} α) (PartialOrder.toPreorder.{u1} (Filter.{u1} α) (Filter.instPartialOrderFilter.{u1} α))) (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α u₁) a) (nhds.{u1} α (UniformSpace.toTopologicalSpace.{u1} α u₂) a)) Case conversion may be inaccurate. Consider using '#align to_nhds_mono to_nhds_monoₓ'. -/ theorem to_nhds_mono {u₁ u₂ : UniformSpace α} (h : u₁ ≤ u₂) (a : α) : @nhds _ (@UniformSpace.toTopologicalSpace _ u₁) a ≤ @nhds _ (@UniformSpace.toTopologicalSpace _ u₂) a := by rw [@nhds_eq_uniformity α u₁ a, @nhds_eq_uniformity α u₂ a] <;> exact lift'_mono h le_rfl #align to_nhds_mono to_nhds_mono /- warning: to_topological_space_mono -> toTopologicalSpace_mono is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {u₁ : UniformSpace.{u1} α} {u₂ : UniformSpace.{u1} α}, (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (UniformSpace.partialOrder.{u1} α))) u₁ u₂) -> (LE.le.{u1} (TopologicalSpace.{u1} α) (Preorder.toLE.{u1} (TopologicalSpace.{u1} α) (PartialOrder.toPreorder.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.partialOrder.{u1} α))) (UniformSpace.toTopologicalSpace.{u1} α u₁) (UniformSpace.toTopologicalSpace.{u1} α u₂)) but is expected to have type forall {α : Type.{u1}} {u₁ : UniformSpace.{u1} α} {u₂ : UniformSpace.{u1} α}, (LE.le.{u1} (UniformSpace.{u1} α) (Preorder.toLE.{u1} (UniformSpace.{u1} α) (PartialOrder.toPreorder.{u1} (UniformSpace.{u1} α) (instPartialOrderUniformSpace.{u1} α))) u₁ u₂) -> (LE.le.{u1} (TopologicalSpace.{u1} α) (Preorder.toLE.{u1} (TopologicalSpace.{u1} α) (PartialOrder.toPreorder.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.instPartialOrderTopologicalSpace.{u1} α))) (UniformSpace.toTopologicalSpace.{u1} α u₁) (UniformSpace.toTopologicalSpace.{u1} α u₂)) Case conversion may be inaccurate. Consider using '#align to_topological_space_mono toTopologicalSpace_monoₓ'. -/ theorem toTopologicalSpace_mono {u₁ u₂ : UniformSpace α} (h : u₁ ≤ u₂) : @UniformSpace.toTopologicalSpace _ u₁ ≤ @UniformSpace.toTopologicalSpace _ u₂ := le_of_nhds_le_nhds <| to_nhds_mono h #align to_topological_space_mono toTopologicalSpace_mono #print UniformContinuous.continuous /- theorem UniformContinuous.continuous [UniformSpace α] [UniformSpace β] {f : α → β} (hf : UniformContinuous f) : Continuous f := continuous_iff_le_induced.mpr <| toTopologicalSpace_mono <| uniformContinuous_iff.1 hf #align uniform_continuous.continuous UniformContinuous.continuous -/ /- warning: to_topological_space_bot -> toTopologicalSpace_bot is a dubious translation: lean 3 declaration is forall {α : Type.{u1}}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (Bot.bot.{u1} (UniformSpace.{u1} α) (UniformSpace.hasBot.{u1} α))) (Bot.bot.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toHasBot.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.completeLattice.{u1} α))) but is expected to have type forall {α : Type.{u1}}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (Bot.bot.{u1} (UniformSpace.{u1} α) (instBotUniformSpace.{u1} α))) (Bot.bot.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toBot.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.instCompleteLatticeTopologicalSpace.{u1} α))) Case conversion may be inaccurate. Consider using '#align to_topological_space_bot toTopologicalSpace_botₓ'. -/ theorem toTopologicalSpace_bot : @UniformSpace.toTopologicalSpace α ⊥ = ⊥ := rfl #align to_topological_space_bot toTopologicalSpace_bot /- warning: to_topological_space_top -> toTopologicalSpace_top is a dubious translation: lean 3 declaration is forall {α : Type.{u1}}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (Top.top.{u1} (UniformSpace.{u1} α) (UniformSpace.hasTop.{u1} α))) (Top.top.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toHasTop.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.completeLattice.{u1} α))) but is expected to have type forall {α : Type.{u1}}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (Top.top.{u1} (UniformSpace.{u1} α) (instTopUniformSpace.{u1} α))) (Top.top.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toTop.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.instCompleteLatticeTopologicalSpace.{u1} α))) Case conversion may be inaccurate. Consider using '#align to_topological_space_top toTopologicalSpace_topₓ'. -/ theorem toTopologicalSpace_top : @UniformSpace.toTopologicalSpace α ⊤ = ⊤ := top_unique fun s hs => s.eq_empty_or_nonempty.elim (fun this : s = ∅ => this.symm ▸ @isOpen_empty _ ⊤) fun ⟨x, hx⟩ => have : s = univ := top_unique fun y hy => hs x hx (x, y) rfl this.symm ▸ @isOpen_univ _ ⊤ #align to_topological_space_top toTopologicalSpace_top /- warning: to_topological_space_infi -> toTopologicalSpace_infᵢ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {ι : Sort.{u2}} {u : ι -> (UniformSpace.{u1} α)}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (infᵢ.{u1, u2} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) ι u)) (infᵢ.{u1, u2} (TopologicalSpace.{u1} α) (ConditionallyCompleteLattice.toHasInf.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toConditionallyCompleteLattice.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.completeLattice.{u1} α))) ι (fun (i : ι) => UniformSpace.toTopologicalSpace.{u1} α (u i))) but is expected to have type forall {α : Type.{u2}} {ι : Sort.{u1}} {u : ι -> (UniformSpace.{u2} α)}, Eq.{succ u2} (TopologicalSpace.{u2} α) (UniformSpace.toTopologicalSpace.{u2} α (infᵢ.{u2, u1} (UniformSpace.{u2} α) (instInfSetUniformSpace.{u2} α) ι u)) (infᵢ.{u2, u1} (TopologicalSpace.{u2} α) (ConditionallyCompleteLattice.toInfSet.{u2} (TopologicalSpace.{u2} α) (CompleteLattice.toConditionallyCompleteLattice.{u2} (TopologicalSpace.{u2} α) (TopologicalSpace.instCompleteLatticeTopologicalSpace.{u2} α))) ι (fun (i : ι) => UniformSpace.toTopologicalSpace.{u2} α (u i))) Case conversion may be inaccurate. Consider using '#align to_topological_space_infi toTopologicalSpace_infᵢₓ'. -/ theorem toTopologicalSpace_infᵢ {ι : Sort _} {u : ι → UniformSpace α} : (infᵢ u).toTopologicalSpace = ⨅ i, (u i).toTopologicalSpace := by refine' eq_of_nhds_eq_nhds fun a => _ simp only [nhds_infᵢ, nhds_eq_uniformity, infᵢ_uniformity] exact lift'_infi_of_map_univ (ball_inter _) preimage_univ #align to_topological_space_infi toTopologicalSpace_infᵢ /- warning: to_topological_space_Inf -> toTopologicalSpace_infₛ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {s : Set.{u1} (UniformSpace.{u1} α)}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (InfSet.infₛ.{u1} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) s)) (infᵢ.{u1, succ u1} (TopologicalSpace.{u1} α) (ConditionallyCompleteLattice.toHasInf.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toConditionallyCompleteLattice.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.completeLattice.{u1} α))) (UniformSpace.{u1} α) (fun (i : UniformSpace.{u1} α) => infᵢ.{u1, 0} (TopologicalSpace.{u1} α) (ConditionallyCompleteLattice.toHasInf.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toConditionallyCompleteLattice.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.completeLattice.{u1} α))) (Membership.Mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.hasMem.{u1} (UniformSpace.{u1} α)) i s) (fun (H : Membership.Mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.hasMem.{u1} (UniformSpace.{u1} α)) i s) => UniformSpace.toTopologicalSpace.{u1} α i))) but is expected to have type forall {α : Type.{u1}} {s : Set.{u1} (UniformSpace.{u1} α)}, Eq.{succ u1} (TopologicalSpace.{u1} α) (UniformSpace.toTopologicalSpace.{u1} α (InfSet.infₛ.{u1} (UniformSpace.{u1} α) (instInfSetUniformSpace.{u1} α) s)) (infᵢ.{u1, succ u1} (TopologicalSpace.{u1} α) (ConditionallyCompleteLattice.toInfSet.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toConditionallyCompleteLattice.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.instCompleteLatticeTopologicalSpace.{u1} α))) (UniformSpace.{u1} α) (fun (i : UniformSpace.{u1} α) => infᵢ.{u1, 0} (TopologicalSpace.{u1} α) (ConditionallyCompleteLattice.toInfSet.{u1} (TopologicalSpace.{u1} α) (CompleteLattice.toConditionallyCompleteLattice.{u1} (TopologicalSpace.{u1} α) (TopologicalSpace.instCompleteLatticeTopologicalSpace.{u1} α))) (Membership.mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.instMembershipSet.{u1} (UniformSpace.{u1} α)) i s) (fun (H : Membership.mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.instMembershipSet.{u1} (UniformSpace.{u1} α)) i s) => UniformSpace.toTopologicalSpace.{u1} α i))) Case conversion may be inaccurate. Consider using '#align to_topological_space_Inf toTopologicalSpace_infₛₓ'. -/ theorem toTopologicalSpace_infₛ {s : Set (UniformSpace α)} : (infₛ s).toTopologicalSpace = ⨅ i ∈ s, @UniformSpace.toTopologicalSpace α i := by rw [infₛ_eq_infᵢ] simp only [← toTopologicalSpace_infᵢ] #align to_topological_space_Inf toTopologicalSpace_infₛ #print toTopologicalSpace_inf /- theorem toTopologicalSpace_inf {u v : UniformSpace α} : (u ⊓ v).toTopologicalSpace = u.toTopologicalSpace ⊓ v.toTopologicalSpace := rfl #align to_topological_space_inf toTopologicalSpace_inf -/ #print ULift.uniformSpace /- /-- Uniform space structure on `ulift α`. -/ instance ULift.uniformSpace [UniformSpace α] : UniformSpace (ULift α) := UniformSpace.comap ULift.down ‹_› #align ulift.uniform_space ULift.uniformSpace -/ section UniformContinuousInfi #print UniformContinuous.inf_rng /- theorem UniformContinuous.inf_rng {f : α → β} {u₁ : UniformSpace α} {u₂ u₃ : UniformSpace β} (h₁ : @UniformContinuous u₁ u₂ f) (h₂ : @UniformContinuous u₁ u₃ f) : @UniformContinuous u₁ (u₂ ⊓ u₃) f := tendsto_inf.mpr ⟨h₁, h₂⟩ #align uniform_continuous_inf_rng UniformContinuous.inf_rng -/ #print UniformContinuous.inf_dom_left /- theorem UniformContinuous.inf_dom_left {f : α → β} {u₁ u₂ : UniformSpace α} {u₃ : UniformSpace β} (hf : @UniformContinuous u₁ u₃ f) : @UniformContinuous (u₁ ⊓ u₂) u₃ f := tendsto_inf_left hf #align uniform_continuous_inf_dom_left UniformContinuous.inf_dom_left -/ #print UniformContinuous.inf_dom_right /- theorem UniformContinuous.inf_dom_right {f : α → β} {u₁ u₂ : UniformSpace α} {u₃ : UniformSpace β} (hf : @UniformContinuous u₂ u₃ f) : @UniformContinuous (u₁ ⊓ u₂) u₃ f := tendsto_inf_right hf #align uniform_continuous_inf_dom_right UniformContinuous.inf_dom_right -/ /- warning: uniform_continuous_Inf_dom -> uniformContinuous_infₛ_dom is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {u₁ : Set.{u1} (UniformSpace.{u1} α)} {u₂ : UniformSpace.{u2} β} {u : UniformSpace.{u1} α}, (Membership.Mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.hasMem.{u1} (UniformSpace.{u1} α)) u u₁) -> (UniformContinuous.{u1, u2} α β u u₂ f) -> (UniformContinuous.{u1, u2} α β (InfSet.infₛ.{u1} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) u₁) u₂ f) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {u₁ : Set.{u1} (UniformSpace.{u1} α)} {u₂ : UniformSpace.{u2} β} {u : UniformSpace.{u1} α}, (Membership.mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.instMembershipSet.{u1} (UniformSpace.{u1} α)) u u₁) -> (UniformContinuous.{u1, u2} α β u u₂ f) -> (UniformContinuous.{u1, u2} α β (InfSet.infₛ.{u1} (UniformSpace.{u1} α) (instInfSetUniformSpace.{u1} α) u₁) u₂ f) Case conversion may be inaccurate. Consider using '#align uniform_continuous_Inf_dom uniformContinuous_infₛ_domₓ'. -/ theorem uniformContinuous_infₛ_dom {f : α → β} {u₁ : Set (UniformSpace α)} {u₂ : UniformSpace β} {u : UniformSpace α} (h₁ : u ∈ u₁) (hf : @UniformContinuous u u₂ f) : @UniformContinuous (infₛ u₁) u₂ f := by rw [UniformContinuous, infₛ_eq_infᵢ', infᵢ_uniformity] exact tendsto_infi' ⟨u, h₁⟩ hf #align uniform_continuous_Inf_dom uniformContinuous_infₛ_dom /- warning: uniform_continuous_Inf_rng -> uniformContinuous_infₛ_rng is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {u₁ : UniformSpace.{u1} α} {u₂ : Set.{u2} (UniformSpace.{u2} β)}, (forall (u : UniformSpace.{u2} β), (Membership.Mem.{u2, u2} (UniformSpace.{u2} β) (Set.{u2} (UniformSpace.{u2} β)) (Set.hasMem.{u2} (UniformSpace.{u2} β)) u u₂) -> (UniformContinuous.{u1, u2} α β u₁ u f)) -> (UniformContinuous.{u1, u2} α β u₁ (InfSet.infₛ.{u2} (UniformSpace.{u2} β) (UniformSpace.hasInf.{u2} β) u₂) f) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {f : α -> β} {u₁ : UniformSpace.{u1} α} {u₂ : Set.{u2} (UniformSpace.{u2} β)}, (forall (u : UniformSpace.{u2} β), (Membership.mem.{u2, u2} (UniformSpace.{u2} β) (Set.{u2} (UniformSpace.{u2} β)) (Set.instMembershipSet.{u2} (UniformSpace.{u2} β)) u u₂) -> (UniformContinuous.{u1, u2} α β u₁ u f)) -> (UniformContinuous.{u1, u2} α β u₁ (InfSet.infₛ.{u2} (UniformSpace.{u2} β) (instInfSetUniformSpace.{u2} β) u₂) f) Case conversion may be inaccurate. Consider using '#align uniform_continuous_Inf_rng uniformContinuous_infₛ_rngₓ'. -/ theorem uniformContinuous_infₛ_rng {f : α → β} {u₁ : UniformSpace α} {u₂ : Set (UniformSpace β)} (h : ∀ u ∈ u₂, @UniformContinuous u₁ u f) : @UniformContinuous u₁ (infₛ u₂) f := by rw [UniformContinuous, infₛ_eq_infᵢ', infᵢ_uniformity] exact tendsto_infi.mpr fun ⟨u, hu⟩ => h u hu #align uniform_continuous_Inf_rng uniformContinuous_infₛ_rng /- warning: uniform_continuous_infi_dom -> uniformContinuous_infᵢ_dom is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {ι : Sort.{u3}} {f : α -> β} {u₁ : ι -> (UniformSpace.{u1} α)} {u₂ : UniformSpace.{u2} β} {i : ι}, (UniformContinuous.{u1, u2} α β (u₁ i) u₂ f) -> (UniformContinuous.{u1, u2} α β (infᵢ.{u1, u3} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) ι u₁) u₂ f) but is expected to have type forall {α : Type.{u2}} {β : Type.{u3}} {ι : Sort.{u1}} {f : α -> β} {u₁ : ι -> (UniformSpace.{u2} α)} {u₂ : UniformSpace.{u3} β} {i : ι}, (UniformContinuous.{u2, u3} α β (u₁ i) u₂ f) -> (UniformContinuous.{u2, u3} α β (infᵢ.{u2, u1} (UniformSpace.{u2} α) (instInfSetUniformSpace.{u2} α) ι u₁) u₂ f) Case conversion may be inaccurate. Consider using '#align uniform_continuous_infi_dom uniformContinuous_infᵢ_domₓ'. -/ theorem uniformContinuous_infᵢ_dom {f : α → β} {u₁ : ι → UniformSpace α} {u₂ : UniformSpace β} {i : ι} (hf : @UniformContinuous (u₁ i) u₂ f) : @UniformContinuous (infᵢ u₁) u₂ f := by rw [UniformContinuous, infᵢ_uniformity] exact tendsto_infi' i hf #align uniform_continuous_infi_dom uniformContinuous_infᵢ_dom /- warning: uniform_continuous_infi_rng -> uniformContinuous_infᵢ_rng is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {ι : Sort.{u3}} {f : α -> β} {u₁ : UniformSpace.{u1} α} {u₂ : ι -> (UniformSpace.{u2} β)}, (forall (i : ι), UniformContinuous.{u1, u2} α β u₁ (u₂ i) f) -> (UniformContinuous.{u1, u2} α β u₁ (infᵢ.{u2, u3} (UniformSpace.{u2} β) (UniformSpace.hasInf.{u2} β) ι u₂) f) but is expected to have type forall {α : Type.{u2}} {β : Type.{u3}} {ι : Sort.{u1}} {f : α -> β} {u₁ : UniformSpace.{u2} α} {u₂ : ι -> (UniformSpace.{u3} β)}, (forall (i : ι), UniformContinuous.{u2, u3} α β u₁ (u₂ i) f) -> (UniformContinuous.{u2, u3} α β u₁ (infᵢ.{u3, u1} (UniformSpace.{u3} β) (instInfSetUniformSpace.{u3} β) ι u₂) f) Case conversion may be inaccurate. Consider using '#align uniform_continuous_infi_rng uniformContinuous_infᵢ_rngₓ'. -/ theorem uniformContinuous_infᵢ_rng {f : α → β} {u₁ : UniformSpace α} {u₂ : ι → UniformSpace β} (h : ∀ i, @UniformContinuous u₁ (u₂ i) f) : @UniformContinuous u₁ (infᵢ u₂) f := by rwa [UniformContinuous, infᵢ_uniformity, tendsto_infi] #align uniform_continuous_infi_rng uniformContinuous_infᵢ_rng end UniformContinuousInfi #print discreteTopology_of_discrete_uniformity /- /-- A uniform space with the discrete uniformity has the discrete topology. -/ theorem discreteTopology_of_discrete_uniformity [hα : UniformSpace α] (h : uniformity α = 𝓟 idRel) : DiscreteTopology α := ⟨(uniformSpace_eq h.symm : ⊥ = hα) ▸ rfl⟩ #align discrete_topology_of_discrete_uniformity discreteTopology_of_discrete_uniformity -/ instance : UniformSpace Empty := ⊥ instance : UniformSpace PUnit := ⊥ instance : UniformSpace Bool := ⊥ instance : UniformSpace ℕ := ⊥ instance : UniformSpace ℤ := ⊥ section variable [UniformSpace α] open Additive Multiplicative instance : UniformSpace (Additive α) := ‹UniformSpace α› instance : UniformSpace (Multiplicative α) := ‹UniformSpace α› #print uniformContinuous_ofMul /- theorem uniformContinuous_ofMul : UniformContinuous (ofMul : α → Additive α) := uniformContinuous_id #align uniform_continuous_of_mul uniformContinuous_ofMul -/ #print uniformContinuous_toMul /- theorem uniformContinuous_toMul : UniformContinuous (toMul : Additive α → α) := uniformContinuous_id #align uniform_continuous_to_mul uniformContinuous_toMul -/ #print uniformContinuous_ofAdd /- theorem uniformContinuous_ofAdd : UniformContinuous (ofAdd : α → Multiplicative α) := uniformContinuous_id #align uniform_continuous_of_add uniformContinuous_ofAdd -/ #print uniformContinuous_toAdd /- theorem uniformContinuous_toAdd : UniformContinuous (toAdd : Multiplicative α → α) := uniformContinuous_id #align uniform_continuous_to_add uniformContinuous_toAdd -/ #print uniformity_additive /- theorem uniformity_additive : 𝓤 (Additive α) = (𝓤 α).map (Prod.map ofMul ofMul) := by convert map_id.symm exact Prod.map_id #align uniformity_additive uniformity_additive -/ #print uniformity_multiplicative /- theorem uniformity_multiplicative : 𝓤 (Multiplicative α) = (𝓤 α).map (Prod.map ofAdd ofAdd) := by convert map_id.symm exact Prod.map_id #align uniformity_multiplicative uniformity_multiplicative -/ end instance {p : α → Prop} [t : UniformSpace α] : UniformSpace (Subtype p) := UniformSpace.comap Subtype.val t #print uniformity_subtype /- theorem uniformity_subtype {p : α → Prop} [t : UniformSpace α] : 𝓤 (Subtype p) = comap (fun q : Subtype p × Subtype p => (q.1.1, q.2.1)) (𝓤 α) := rfl #align uniformity_subtype uniformity_subtype -/ #print uniformity_setCoe /- theorem uniformity_setCoe {s : Set α} [t : UniformSpace α] : 𝓤 s = comap (Prod.map (coe : s → α) (coe : s → α)) (𝓤 α) := rfl #align uniformity_set_coe uniformity_setCoe -/ #print uniformContinuous_subtype_val /- theorem uniformContinuous_subtype_val {p : α → Prop} [UniformSpace α] : UniformContinuous (Subtype.val : { a : α // p a } → α) := uniformContinuous_comap #align uniform_continuous_subtype_val uniformContinuous_subtype_val -/ /- warning: uniform_continuous_subtype_coe clashes with uniform_continuous_subtype_val -> uniformContinuous_subtype_val Case conversion may be inaccurate. Consider using '#align uniform_continuous_subtype_coe uniformContinuous_subtype_valₓ'. -/ #print uniformContinuous_subtype_val /- theorem uniformContinuous_subtype_val {p : α → Prop} [UniformSpace α] : UniformContinuous (coe : { a : α // p a } → α) := uniformContinuous_subtype_val #align uniform_continuous_subtype_coe uniformContinuous_subtype_val -/ #print UniformContinuous.subtype_mk /- theorem UniformContinuous.subtype_mk {p : α → Prop} [UniformSpace α] [UniformSpace β] {f : β → α} (hf : UniformContinuous f) (h : ∀ x, p (f x)) : UniformContinuous (fun x => ⟨f x, h x⟩ : β → Subtype p) := uniformContinuous_comap' hf #align uniform_continuous.subtype_mk UniformContinuous.subtype_mk -/ #print uniformContinuousOn_iff_restrict /- theorem uniformContinuousOn_iff_restrict [UniformSpace α] [UniformSpace β] {f : α → β} {s : Set α} : UniformContinuousOn f s ↔ UniformContinuous (s.restrict f) := by unfold UniformContinuousOn Set.restrict UniformContinuous tendsto conv_rhs => rw [show (fun x : s × s => (f x.1, f x.2)) = Prod.map f f ∘ Prod.map coe coe from rfl, uniformity_setCoe, ← map_map, map_comap, range_prod_map, Subtype.range_coe] rfl #align uniform_continuous_on_iff_restrict uniformContinuousOn_iff_restrict -/ #print tendsto_of_uniformContinuous_subtype /- theorem tendsto_of_uniformContinuous_subtype [UniformSpace α] [UniformSpace β] {f : α → β} {s : Set α} {a : α} (hf : UniformContinuous fun x : s => f x.val) (ha : s ∈ 𝓝 a) : Tendsto f (𝓝 a) (𝓝 (f a)) := by rw [(@map_nhds_subtype_coe_eq_nhds α _ s a (mem_of_mem_nhds ha) ha).symm] <;> exact tendsto_map' (continuous_iff_continuous_at.mp hf.continuous _) #align tendsto_of_uniform_continuous_subtype tendsto_of_uniformContinuous_subtype -/ #print UniformContinuousOn.continuousOn /- theorem UniformContinuousOn.continuousOn [UniformSpace α] [UniformSpace β] {f : α → β} {s : Set α} (h : UniformContinuousOn f s) : ContinuousOn f s := by rw [uniformContinuousOn_iff_restrict] at h rw [continuousOn_iff_continuous_restrict] exact h.continuous #align uniform_continuous_on.continuous_on UniformContinuousOn.continuousOn -/ @[to_additive] instance [UniformSpace α] : UniformSpace αᵐᵒᵖ := UniformSpace.comap MulOpposite.unop ‹_› #print uniformity_mulOpposite /- @[to_additive] theorem uniformity_mulOpposite [UniformSpace α] : 𝓤 αᵐᵒᵖ = comap (fun q : αᵐᵒᵖ × αᵐᵒᵖ => (q.1.unop, q.2.unop)) (𝓤 α) := rfl #align uniformity_mul_opposite uniformity_mulOpposite #align uniformity_add_opposite uniformity_addOpposite -/ #print comap_uniformity_mulOpposite /- @[simp, to_additive] theorem comap_uniformity_mulOpposite [UniformSpace α] : comap (fun p : α × α => (MulOpposite.op p.1, MulOpposite.op p.2)) (𝓤 αᵐᵒᵖ) = 𝓤 α := by simpa [uniformity_mulOpposite, comap_comap, (· ∘ ·)] using comap_id #align comap_uniformity_mul_opposite comap_uniformity_mulOpposite #align comap_uniformity_add_opposite comap_uniformity_addOpposite -/ namespace MulOpposite #print MulOpposite.uniformContinuous_unop /- @[to_additive] theorem uniformContinuous_unop [UniformSpace α] : UniformContinuous (unop : αᵐᵒᵖ → α) := uniformContinuous_comap #align mul_opposite.uniform_continuous_unop MulOpposite.uniformContinuous_unop #align add_opposite.uniform_continuous_unop AddOpposite.uniformContinuous_unop -/ #print MulOpposite.uniformContinuous_op /- @[to_additive] theorem uniformContinuous_op [UniformSpace α] : UniformContinuous (op : α → αᵐᵒᵖ) := uniformContinuous_comap' uniformContinuous_id #align mul_opposite.uniform_continuous_op MulOpposite.uniformContinuous_op #align add_opposite.uniform_continuous_op AddOpposite.uniformContinuous_op -/ end MulOpposite section Prod /- a similar product space is possible on the function space (uniformity of pointwise convergence), but we want to have the uniformity of uniform convergence on function spaces -/ instance [u₁ : UniformSpace α] [u₂ : UniformSpace β] : UniformSpace (α × β) := u₁.comap Prod.fst ⊓ u₂.comap Prod.snd -- check the above produces no diamond example [u₁ : UniformSpace α] [u₂ : UniformSpace β] : (Prod.topologicalSpace : TopologicalSpace (α × β)) = UniformSpace.toTopologicalSpace := rfl /- warning: uniformity_prod -> uniformity_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{succ (max u1 u2)} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (uniformity.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (Inf.inf.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.hasInf.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.comap.{max u1 u2, u1} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u1, u1} α α) (fun (p : Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{u1} α _inst_1)) (Filter.comap.{max u1 u2, u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u2, u2} β β) (fun (p : Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{u2} β _inst_2))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{max (succ u1) (succ u2)} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (uniformity.{max u2 u1} (Prod.{u1, u2} α β) (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2)) (Inf.inf.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.instInfFilter.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.comap.{max u1 u2, u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u1, u1} α α) (fun (p : Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{u1} α _inst_1)) (Filter.comap.{max u1 u2, u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u2, u2} β β) (fun (p : Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{u2} β _inst_2))) Case conversion may be inaccurate. Consider using '#align uniformity_prod uniformity_prodₓ'. -/ theorem uniformity_prod [UniformSpace α] [UniformSpace β] : 𝓤 (α × β) = ((𝓤 α).comap fun p : (α × β) × α × β => (p.1.1, p.2.1)) ⊓ (𝓤 β).comap fun p : (α × β) × α × β => (p.1.2, p.2.2) := rfl #align uniformity_prod uniformity_prod /- warning: uniformity_prod_eq_comap_prod -> uniformity_prod_eq_comap_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{succ (max u1 u2)} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (uniformity.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (Filter.comap.{max u1 u2, max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β)) (fun (p : Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) (Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)))) (Filter.prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) (uniformity.{u1} α _inst_1) (uniformity.{u2} β _inst_2))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{max (succ u1) (succ u2)} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (uniformity.{max u2 u1} (Prod.{u1, u2} α β) (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2)) (Filter.comap.{max u1 u2, max u2 u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β)) (fun (p : Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) (Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)))) (Filter.prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) (uniformity.{u1} α _inst_1) (uniformity.{u2} β _inst_2))) Case conversion may be inaccurate. Consider using '#align uniformity_prod_eq_comap_prod uniformity_prod_eq_comap_prodₓ'. -/ theorem uniformity_prod_eq_comap_prod [UniformSpace α] [UniformSpace β] : 𝓤 (α × β) = comap (fun p : (α × β) × α × β => ((p.1.1, p.2.1), (p.1.2, p.2.2))) (𝓤 α ×ᶠ 𝓤 β) := by rw [uniformity_prod, Filter.prod, comap_inf, comap_comap, comap_comap] #align uniformity_prod_eq_comap_prod uniformity_prod_eq_comap_prod /- warning: uniformity_prod_eq_prod -> uniformity_prod_eq_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{succ (max u1 u2)} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (uniformity.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (Filter.map.{max u1 u2, max u1 u2} (Prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β)) (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (fun (p : Prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β)) => Prod.mk.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) (Prod.mk.{u1, u2} α β (Prod.fst.{u1, u1} α α (Prod.fst.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p)) (Prod.fst.{u2, u2} β β (Prod.snd.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p))) (Prod.mk.{u1, u2} α β (Prod.snd.{u1, u1} α α (Prod.fst.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p)) (Prod.snd.{u2, u2} β β (Prod.snd.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p)))) (Filter.prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) (uniformity.{u1} α _inst_1) (uniformity.{u2} β _inst_2))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{max (succ u1) (succ u2)} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (uniformity.{max u2 u1} (Prod.{u1, u2} α β) (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2)) (Filter.map.{max u1 u2, max u2 u1} (Prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β)) (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (fun (p : Prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β)) => Prod.mk.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) (Prod.mk.{u1, u2} α β (Prod.fst.{u1, u1} α α (Prod.fst.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p)) (Prod.fst.{u2, u2} β β (Prod.snd.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p))) (Prod.mk.{u1, u2} α β (Prod.snd.{u1, u1} α α (Prod.fst.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p)) (Prod.snd.{u2, u2} β β (Prod.snd.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) p)))) (Filter.prod.{u1, u2} (Prod.{u1, u1} α α) (Prod.{u2, u2} β β) (uniformity.{u1} α _inst_1) (uniformity.{u2} β _inst_2))) Case conversion may be inaccurate. Consider using '#align uniformity_prod_eq_prod uniformity_prod_eq_prodₓ'. -/ theorem uniformity_prod_eq_prod [UniformSpace α] [UniformSpace β] : 𝓤 (α × β) = map (fun p : (α × α) × β × β => ((p.1.1, p.2.1), (p.1.2, p.2.2))) (𝓤 α ×ᶠ 𝓤 β) := by rw [map_swap4_eq_comap, uniformity_prod_eq_comap_prod] #align uniformity_prod_eq_prod uniformity_prod_eq_prod /- warning: mem_uniformity_of_uniform_continuous_invariant -> mem_uniformity_of_uniformContinuous_invariant is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {s : Set.{u2} (Prod.{u2, u2} β β)} {f : α -> α -> β}, (UniformContinuous.{u1, u2} (Prod.{u1, u1} α α) β (Prod.uniformSpace.{u1, u1} α α _inst_1 _inst_1) _inst_2 (fun (p : Prod.{u1, u1} α α) => f (Prod.fst.{u1, u1} α α p) (Prod.snd.{u1, u1} α α p))) -> (Membership.Mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} β β)) (Filter.{u2} (Prod.{u2, u2} β β)) (Filter.hasMem.{u2} (Prod.{u2, u2} β β)) s (uniformity.{u2} β _inst_2)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (u : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) u (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) u (uniformity.{u1} α _inst_1)) => forall (a : α) (b : α) (c : α), (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) u) -> (Membership.Mem.{u2, u2} (Prod.{u2, u2} β β) (Set.{u2} (Prod.{u2, u2} β β)) (Set.hasMem.{u2} (Prod.{u2, u2} β β)) (Prod.mk.{u2, u2} β β (f a c) (f b c)) s)))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {s : Set.{u2} (Prod.{u2, u2} β β)} {f : α -> α -> β}, (UniformContinuous.{u1, u2} (Prod.{u1, u1} α α) β (instUniformSpaceProd.{u1, u1} α α _inst_1 _inst_1) _inst_2 (fun (p : Prod.{u1, u1} α α) => f (Prod.fst.{u1, u1} α α p) (Prod.snd.{u1, u1} α α p))) -> (Membership.mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} β β)) (Filter.{u2} (Prod.{u2, u2} β β)) (instMembershipSetFilter.{u2} (Prod.{u2, u2} β β)) s (uniformity.{u2} β _inst_2)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (u : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) u (uniformity.{u1} α _inst_1)) (forall (a : α) (b : α) (c : α), (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α a b) u) -> (Membership.mem.{u2, u2} (Prod.{u2, u2} β β) (Set.{u2} (Prod.{u2, u2} β β)) (Set.instMembershipSet.{u2} (Prod.{u2, u2} β β)) (Prod.mk.{u2, u2} β β (f a c) (f b c)) s)))) Case conversion may be inaccurate. Consider using '#align mem_uniformity_of_uniform_continuous_invariant mem_uniformity_of_uniformContinuous_invariantₓ'. -/ theorem mem_uniformity_of_uniformContinuous_invariant [UniformSpace α] [UniformSpace β] {s : Set (β × β)} {f : α → α → β} (hf : UniformContinuous fun p : α × α => f p.1 p.2) (hs : s ∈ 𝓤 β) : ∃ u ∈ 𝓤 α, ∀ a b c, (a, b) ∈ u → (f a c, f b c) ∈ s := by rw [UniformContinuous, uniformity_prod_eq_prod, tendsto_map'_iff, (· ∘ ·)] at hf rcases mem_prod_iff.1 (mem_map.1 <| hf hs) with ⟨u, hu, v, hv, huvt⟩ exact ⟨u, hu, fun a b c hab => @huvt ((_, _), (_, _)) ⟨hab, refl_mem_uniformity hv⟩⟩ #align mem_uniformity_of_uniform_continuous_invariant mem_uniformity_of_uniformContinuous_invariant /- warning: mem_uniform_prod -> mem_uniform_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [t₁ : UniformSpace.{u1} α] [t₂ : UniformSpace.{u2} β] {a : Set.{u1} (Prod.{u1, u1} α α)} {b : Set.{u2} (Prod.{u2, u2} β β)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) a (uniformity.{u1} α t₁)) -> (Membership.Mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} β β)) (Filter.{u2} (Prod.{u2, u2} β β)) (Filter.hasMem.{u2} (Prod.{u2, u2} β β)) b (uniformity.{u2} β t₂)) -> (Membership.Mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.hasMem.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (setOf.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (fun (p : Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => And (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) a) (Membership.Mem.{u2, u2} (Prod.{u2, u2} β β) (Set.{u2} (Prod.{u2, u2} β β)) (Set.hasMem.{u2} (Prod.{u2, u2} β β)) (Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) b))) (uniformity.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β t₁ t₂))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [t₁ : UniformSpace.{u1} α] [t₂ : UniformSpace.{u2} β] {a : Set.{u1} (Prod.{u1, u1} α α)} {b : Set.{u2} (Prod.{u2, u2} β β)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) a (uniformity.{u1} α t₁)) -> (Membership.mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} β β)) (Filter.{u2} (Prod.{u2, u2} β β)) (instMembershipSetFilter.{u2} (Prod.{u2, u2} β β)) b (uniformity.{u2} β t₂)) -> (Membership.mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (instMembershipSetFilter.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β))) (setOf.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (fun (p : Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => And (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) a) (Membership.mem.{u2, u2} (Prod.{u2, u2} β β) (Set.{u2} (Prod.{u2, u2} β β)) (Set.instMembershipSet.{u2} (Prod.{u2, u2} β β)) (Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) b))) (uniformity.{max u2 u1} (Prod.{u1, u2} α β) (instUniformSpaceProd.{u1, u2} α β t₁ t₂))) Case conversion may be inaccurate. Consider using '#align mem_uniform_prod mem_uniform_prodₓ'. -/ theorem mem_uniform_prod [t₁ : UniformSpace α] [t₂ : UniformSpace β] {a : Set (α × α)} {b : Set (β × β)} (ha : a ∈ 𝓤 α) (hb : b ∈ 𝓤 β) : { p : (α × β) × α × β | (p.1.1, p.2.1) ∈ a ∧ (p.1.2, p.2.2) ∈ b } ∈ 𝓤 (α × β) := by rw [uniformity_prod] <;> exact inter_mem_inf (preimage_mem_comap ha) (preimage_mem_comap hb) #align mem_uniform_prod mem_uniform_prod /- warning: tendsto_prod_uniformity_fst -> tendsto_prod_uniformity_fst is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Filter.Tendsto.{max u1 u2, u1} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u1, u1} α α) (fun (p : Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (uniformity.{u1} α _inst_1) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Filter.Tendsto.{max u1 u2, u1} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u1, u1} α α) (fun (p : Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u1, u1} α α (Prod.fst.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.fst.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{max u2 u1} (Prod.{u1, u2} α β) (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2)) (uniformity.{u1} α _inst_1) Case conversion may be inaccurate. Consider using '#align tendsto_prod_uniformity_fst tendsto_prod_uniformity_fstₓ'. -/ theorem tendsto_prod_uniformity_fst [UniformSpace α] [UniformSpace β] : Tendsto (fun p : (α × β) × α × β => (p.1.1, p.2.1)) (𝓤 (α × β)) (𝓤 α) := le_trans (map_mono inf_le_left) map_comap_le #align tendsto_prod_uniformity_fst tendsto_prod_uniformity_fst /- warning: tendsto_prod_uniformity_snd -> tendsto_prod_uniformity_snd is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Filter.Tendsto.{max u1 u2, u2} (Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u2, u2} β β) (fun (p : Prod.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (uniformity.{u2} β _inst_2) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Filter.Tendsto.{max u1 u2, u2} (Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) (Prod.{u2, u2} β β) (fun (p : Prod.{max u2 u1, max u2 u1} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β)) => Prod.mk.{u2, u2} β β (Prod.snd.{u1, u2} α β (Prod.fst.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p)) (Prod.snd.{u1, u2} α β (Prod.snd.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (Prod.{u1, u2} α β) p))) (uniformity.{max u2 u1} (Prod.{u1, u2} α β) (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2)) (uniformity.{u2} β _inst_2) Case conversion may be inaccurate. Consider using '#align tendsto_prod_uniformity_snd tendsto_prod_uniformity_sndₓ'. -/ theorem tendsto_prod_uniformity_snd [UniformSpace α] [UniformSpace β] : Tendsto (fun p : (α × β) × α × β => (p.1.2, p.2.2)) (𝓤 (α × β)) (𝓤 β) := le_trans (map_mono inf_le_right) map_comap_le #align tendsto_prod_uniformity_snd tendsto_prod_uniformity_snd /- warning: uniform_continuous_fst -> uniformContinuous_fst is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], UniformContinuous.{max u1 u2, u1} (Prod.{u1, u2} α β) α (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_1 (fun (p : Prod.{u1, u2} α β) => Prod.fst.{u1, u2} α β p) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], UniformContinuous.{max u1 u2, u1} (Prod.{u1, u2} α β) α (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_1 (fun (p : Prod.{u1, u2} α β) => Prod.fst.{u1, u2} α β p) Case conversion may be inaccurate. Consider using '#align uniform_continuous_fst uniformContinuous_fstₓ'. -/ theorem uniformContinuous_fst [UniformSpace α] [UniformSpace β] : UniformContinuous fun p : α × β => p.1 := tendsto_prod_uniformity_fst #align uniform_continuous_fst uniformContinuous_fst /- warning: uniform_continuous_snd -> uniformContinuous_snd is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], UniformContinuous.{max u1 u2, u2} (Prod.{u1, u2} α β) β (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_2 (fun (p : Prod.{u1, u2} α β) => Prod.snd.{u1, u2} α β p) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], UniformContinuous.{max u1 u2, u2} (Prod.{u1, u2} α β) β (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_2 (fun (p : Prod.{u1, u2} α β) => Prod.snd.{u1, u2} α β p) Case conversion may be inaccurate. Consider using '#align uniform_continuous_snd uniformContinuous_sndₓ'. -/ theorem uniformContinuous_snd [UniformSpace α] [UniformSpace β] : UniformContinuous fun p : α × β => p.2 := tendsto_prod_uniformity_snd #align uniform_continuous_snd uniformContinuous_snd variable [UniformSpace α] [UniformSpace β] [UniformSpace γ] /- warning: uniform_continuous.prod_mk -> UniformContinuous.prod_mk is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f₁ : α -> β} {f₂ : α -> γ}, (UniformContinuous.{u1, u2} α β _inst_1 _inst_2 f₁) -> (UniformContinuous.{u1, u3} α γ _inst_1 _inst_3 f₂) -> (UniformContinuous.{u1, max u2 u3} α (Prod.{u2, u3} β γ) _inst_1 (Prod.uniformSpace.{u2, u3} β γ _inst_2 _inst_3) (fun (a : α) => Prod.mk.{u2, u3} β γ (f₁ a) (f₂ a))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f₁ : α -> β} {f₂ : α -> γ}, (UniformContinuous.{u1, u2} α β _inst_1 _inst_2 f₁) -> (UniformContinuous.{u1, u3} α γ _inst_1 _inst_3 f₂) -> (UniformContinuous.{u1, max u3 u2} α (Prod.{u2, u3} β γ) _inst_1 (instUniformSpaceProd.{u2, u3} β γ _inst_2 _inst_3) (fun (a : α) => Prod.mk.{u2, u3} β γ (f₁ a) (f₂ a))) Case conversion may be inaccurate. Consider using '#align uniform_continuous.prod_mk UniformContinuous.prod_mkₓ'. -/ theorem UniformContinuous.prod_mk {f₁ : α → β} {f₂ : α → γ} (h₁ : UniformContinuous f₁) (h₂ : UniformContinuous f₂) : UniformContinuous fun a => (f₁ a, f₂ a) := by rw [UniformContinuous, uniformity_prod] <;> exact tendsto_inf.2 ⟨tendsto_comap_iff.2 h₁, tendsto_comap_iff.2 h₂⟩ #align uniform_continuous.prod_mk UniformContinuous.prod_mk /- warning: uniform_continuous.prod_mk_left -> UniformContinuous.prod_mk_left is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f : (Prod.{u1, u2} α β) -> γ}, (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_3 f) -> (forall (b : β), UniformContinuous.{u1, u3} α γ _inst_1 _inst_3 (fun (a : α) => f (Prod.mk.{u1, u2} α β a b))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f : (Prod.{u1, u2} α β) -> γ}, (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_3 f) -> (forall (b : β), UniformContinuous.{u1, u3} α γ _inst_1 _inst_3 (fun (a : α) => f (Prod.mk.{u1, u2} α β a b))) Case conversion may be inaccurate. Consider using '#align uniform_continuous.prod_mk_left UniformContinuous.prod_mk_leftₓ'. -/ theorem UniformContinuous.prod_mk_left {f : α × β → γ} (h : UniformContinuous f) (b) : UniformContinuous fun a => f (a, b) := h.comp (uniformContinuous_id.prod_mk uniformContinuous_const) #align uniform_continuous.prod_mk_left UniformContinuous.prod_mk_left /- warning: uniform_continuous.prod_mk_right -> UniformContinuous.prod_mk_right is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f : (Prod.{u1, u2} α β) -> γ}, (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_3 f) -> (forall (a : α), UniformContinuous.{u2, u3} β γ _inst_2 _inst_3 (fun (b : β) => f (Prod.mk.{u1, u2} α β a b))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f : (Prod.{u1, u2} α β) -> γ}, (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_3 f) -> (forall (a : α), UniformContinuous.{u2, u3} β γ _inst_2 _inst_3 (fun (b : β) => f (Prod.mk.{u1, u2} α β a b))) Case conversion may be inaccurate. Consider using '#align uniform_continuous.prod_mk_right UniformContinuous.prod_mk_rightₓ'. -/ theorem UniformContinuous.prod_mk_right {f : α × β → γ} (h : UniformContinuous f) (a) : UniformContinuous fun b => f (a, b) := h.comp (uniformContinuous_const.prod_mk uniformContinuous_id) #align uniform_continuous.prod_mk_right UniformContinuous.prod_mk_right /- warning: uniform_continuous.prod_map -> UniformContinuous.prod_map is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] [_inst_4 : UniformSpace.{u4} δ] {f : α -> γ} {g : β -> δ}, (UniformContinuous.{u1, u3} α γ _inst_1 _inst_3 f) -> (UniformContinuous.{u2, u4} β δ _inst_2 _inst_4 g) -> (UniformContinuous.{max u1 u2, max u3 u4} (Prod.{u1, u2} α β) (Prod.{u3, u4} γ δ) (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) (Prod.uniformSpace.{u3, u4} γ δ _inst_3 _inst_4) (Prod.map.{u1, u3, u2, u4} α γ β δ f g)) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] [_inst_4 : UniformSpace.{u4} δ] {f : α -> γ} {g : β -> δ}, (UniformContinuous.{u1, u3} α γ _inst_1 _inst_3 f) -> (UniformContinuous.{u2, u4} β δ _inst_2 _inst_4 g) -> (UniformContinuous.{max u2 u1, max u4 u3} (Prod.{u1, u2} α β) (Prod.{u3, u4} γ δ) (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) (instUniformSpaceProd.{u3, u4} γ δ _inst_3 _inst_4) (Prod.map.{u1, u3, u2, u4} α γ β δ f g)) Case conversion may be inaccurate. Consider using '#align uniform_continuous.prod_map UniformContinuous.prod_mapₓ'. -/ theorem UniformContinuous.prod_map [UniformSpace δ] {f : α → γ} {g : β → δ} (hf : UniformContinuous f) (hg : UniformContinuous g) : UniformContinuous (Prod.map f g) := (hf.comp uniformContinuous_fst).prod_mk (hg.comp uniformContinuous_snd) #align uniform_continuous.prod_map UniformContinuous.prod_map /- warning: to_topological_space_prod -> toTopologicalSpace_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [u : UniformSpace.{u1} α] [v : UniformSpace.{u2} β], Eq.{succ (max u1 u2)} (TopologicalSpace.{max u1 u2} (Prod.{u1, u2} α β)) (UniformSpace.toTopologicalSpace.{max u1 u2} (Prod.{u1, u2} α β) (Prod.uniformSpace.{u1, u2} α β u v)) (Prod.topologicalSpace.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α u) (UniformSpace.toTopologicalSpace.{u2} β v)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} [u : UniformSpace.{u2} α] [v : UniformSpace.{u1} β], Eq.{max (succ u2) (succ u1)} (TopologicalSpace.{max u1 u2} (Prod.{u2, u1} α β)) (UniformSpace.toTopologicalSpace.{max u1 u2} (Prod.{u2, u1} α β) (instUniformSpaceProd.{u2, u1} α β u v)) (instTopologicalSpaceProd.{u2, u1} α β (UniformSpace.toTopologicalSpace.{u2} α u) (UniformSpace.toTopologicalSpace.{u1} β v)) Case conversion may be inaccurate. Consider using '#align to_topological_space_prod toTopologicalSpace_prodₓ'. -/ theorem toTopologicalSpace_prod {α} {β} [u : UniformSpace α] [v : UniformSpace β] : @UniformSpace.toTopologicalSpace (α × β) Prod.uniformSpace = @Prod.topologicalSpace α β u.toTopologicalSpace v.toTopologicalSpace := rfl #align to_topological_space_prod toTopologicalSpace_prod /- warning: uniform_continuous_inf_dom_left₂ -> uniformContinuous_inf_dom_left₂ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {f : α -> β -> γ} {ua1 : UniformSpace.{u1} α} {ua2 : UniformSpace.{u1} α} {ub1 : UniformSpace.{u2} β} {ub2 : UniformSpace.{u2} β} {uc1 : UniformSpace.{u3} γ}, (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β ua1 ub1) uc1 (fun (p : Prod.{u1, u2} α β) => f (Prod.fst.{u1, u2} α β p) (Prod.snd.{u1, u2} α β p))) -> (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β (Inf.inf.{u1} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) ua1 ua2) (Inf.inf.{u2} (UniformSpace.{u2} β) (UniformSpace.hasInf.{u2} β) ub1 ub2)) uc1 (fun (p : Prod.{u1, u2} α β) => f (Prod.fst.{u1, u2} α β p) (Prod.snd.{u1, u2} α β p))) but is expected to have type forall {α : Type.{u3}} {β : Type.{u2}} {γ : Type.{u1}} {f : α -> β -> γ} {ua1 : UniformSpace.{u3} α} {ua2 : UniformSpace.{u3} α} {ub1 : UniformSpace.{u2} β} {ub2 : UniformSpace.{u2} β} {uc1 : UniformSpace.{u1} γ}, (UniformContinuous.{max u3 u2, u1} (Prod.{u3, u2} α β) γ (instUniformSpaceProd.{u3, u2} α β ua1 ub1) uc1 (fun (p : Prod.{u3, u2} α β) => f (Prod.fst.{u3, u2} α β p) (Prod.snd.{u3, u2} α β p))) -> (UniformContinuous.{max u3 u2, u1} (Prod.{u3, u2} α β) γ (instUniformSpaceProd.{u3, u2} α β (Inf.inf.{u3} (UniformSpace.{u3} α) (instInfUniformSpace.{u3} α) ua1 ua2) (Inf.inf.{u2} (UniformSpace.{u2} β) (instInfUniformSpace.{u2} β) ub1 ub2)) uc1 (fun (p : Prod.{u3, u2} α β) => f (Prod.fst.{u3, u2} α β p) (Prod.snd.{u3, u2} α β p))) Case conversion may be inaccurate. Consider using '#align uniform_continuous_inf_dom_left₂ uniformContinuous_inf_dom_left₂ₓ'. -/ /-- A version of `uniform_continuous_inf_dom_left` for binary functions -/ theorem uniformContinuous_inf_dom_left₂ {α β γ} {f : α → β → γ} {ua1 ua2 : UniformSpace α} {ub1 ub2 : UniformSpace β} {uc1 : UniformSpace γ} (h : by haveI := ua1 <;> haveI := ub1 <;> exact UniformContinuous fun p : α × β => f p.1 p.2) : by haveI := ua1 ⊓ ua2 <;> haveI := ub1 ⊓ ub2 <;> exact UniformContinuous fun p : α × β => f p.1 p.2 := by -- proof essentially copied from ``continuous_inf_dom_left₂` have ha := @UniformContinuous.inf_dom_left _ _ id ua1 ua2 ua1 (@uniformContinuous_id _ (id _)) have hb := @UniformContinuous.inf_dom_left _ _ id ub1 ub2 ub1 (@uniformContinuous_id _ (id _)) have h_unif_cont_id := @UniformContinuous.prod_map _ _ _ _ (ua1 ⊓ ua2) (ub1 ⊓ ub2) ua1 ub1 _ _ ha hb exact @UniformContinuous.comp _ _ _ (id _) (id _) _ _ _ h h_unif_cont_id #align uniform_continuous_inf_dom_left₂ uniformContinuous_inf_dom_left₂ /- warning: uniform_continuous_inf_dom_right₂ -> uniformContinuous_inf_dom_right₂ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {f : α -> β -> γ} {ua1 : UniformSpace.{u1} α} {ua2 : UniformSpace.{u1} α} {ub1 : UniformSpace.{u2} β} {ub2 : UniformSpace.{u2} β} {uc1 : UniformSpace.{u3} γ}, (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β ua2 ub2) uc1 (fun (p : Prod.{u1, u2} α β) => f (Prod.fst.{u1, u2} α β p) (Prod.snd.{u1, u2} α β p))) -> (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β (Inf.inf.{u1} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) ua1 ua2) (Inf.inf.{u2} (UniformSpace.{u2} β) (UniformSpace.hasInf.{u2} β) ub1 ub2)) uc1 (fun (p : Prod.{u1, u2} α β) => f (Prod.fst.{u1, u2} α β p) (Prod.snd.{u1, u2} α β p))) but is expected to have type forall {α : Type.{u3}} {β : Type.{u2}} {γ : Type.{u1}} {f : α -> β -> γ} {ua1 : UniformSpace.{u3} α} {ua2 : UniformSpace.{u3} α} {ub1 : UniformSpace.{u2} β} {ub2 : UniformSpace.{u2} β} {uc1 : UniformSpace.{u1} γ}, (UniformContinuous.{max u3 u2, u1} (Prod.{u3, u2} α β) γ (instUniformSpaceProd.{u3, u2} α β ua2 ub2) uc1 (fun (p : Prod.{u3, u2} α β) => f (Prod.fst.{u3, u2} α β p) (Prod.snd.{u3, u2} α β p))) -> (UniformContinuous.{max u3 u2, u1} (Prod.{u3, u2} α β) γ (instUniformSpaceProd.{u3, u2} α β (Inf.inf.{u3} (UniformSpace.{u3} α) (instInfUniformSpace.{u3} α) ua1 ua2) (Inf.inf.{u2} (UniformSpace.{u2} β) (instInfUniformSpace.{u2} β) ub1 ub2)) uc1 (fun (p : Prod.{u3, u2} α β) => f (Prod.fst.{u3, u2} α β p) (Prod.snd.{u3, u2} α β p))) Case conversion may be inaccurate. Consider using '#align uniform_continuous_inf_dom_right₂ uniformContinuous_inf_dom_right₂ₓ'. -/ /-- A version of `uniform_continuous_inf_dom_right` for binary functions -/ theorem uniformContinuous_inf_dom_right₂ {α β γ} {f : α → β → γ} {ua1 ua2 : UniformSpace α} {ub1 ub2 : UniformSpace β} {uc1 : UniformSpace γ} (h : by haveI := ua2 <;> haveI := ub2 <;> exact UniformContinuous fun p : α × β => f p.1 p.2) : by haveI := ua1 ⊓ ua2 <;> haveI := ub1 ⊓ ub2 <;> exact UniformContinuous fun p : α × β => f p.1 p.2 := by -- proof essentially copied from ``continuous_inf_dom_right₂` have ha := @UniformContinuous.inf_dom_right _ _ id ua1 ua2 ua2 (@uniformContinuous_id _ (id _)) have hb := @UniformContinuous.inf_dom_right _ _ id ub1 ub2 ub2 (@uniformContinuous_id _ (id _)) have h_unif_cont_id := @UniformContinuous.prod_map _ _ _ _ (ua1 ⊓ ua2) (ub1 ⊓ ub2) ua2 ub2 _ _ ha hb exact @UniformContinuous.comp _ _ _ (id _) (id _) _ _ _ h h_unif_cont_id #align uniform_continuous_inf_dom_right₂ uniformContinuous_inf_dom_right₂ /- warning: uniform_continuous_Inf_dom₂ -> uniformContinuous_infₛ_dom₂ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {f : α -> β -> γ} {uas : Set.{u1} (UniformSpace.{u1} α)} {ubs : Set.{u2} (UniformSpace.{u2} β)} {ua : UniformSpace.{u1} α} {ub : UniformSpace.{u2} β} {uc : UniformSpace.{u3} γ}, (Membership.Mem.{u1, u1} (UniformSpace.{u1} α) (Set.{u1} (UniformSpace.{u1} α)) (Set.hasMem.{u1} (UniformSpace.{u1} α)) ua uas) -> (Membership.Mem.{u2, u2} (UniformSpace.{u2} β) (Set.{u2} (UniformSpace.{u2} β)) (Set.hasMem.{u2} (UniformSpace.{u2} β)) ub ubs) -> (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β ua ub) uc (fun (p : Prod.{u1, u2} α β) => f (Prod.fst.{u1, u2} α β p) (Prod.snd.{u1, u2} α β p))) -> (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β (InfSet.infₛ.{u1} (UniformSpace.{u1} α) (UniformSpace.hasInf.{u1} α) uas) (InfSet.infₛ.{u2} (UniformSpace.{u2} β) (UniformSpace.hasInf.{u2} β) ubs)) uc (fun (p : Prod.{u1, u2} α β) => f (Prod.fst.{u1, u2} α β p) (Prod.snd.{u1, u2} α β p))) but is expected to have type forall {α : Type.{u3}} {β : Type.{u2}} {γ : Type.{u1}} {f : α -> β -> γ} {uas : Set.{u3} (UniformSpace.{u3} α)} {ubs : Set.{u2} (UniformSpace.{u2} β)} {ua : UniformSpace.{u3} α} {ub : UniformSpace.{u2} β} {uc : UniformSpace.{u1} γ}, (Membership.mem.{u3, u3} (UniformSpace.{u3} α) (Set.{u3} (UniformSpace.{u3} α)) (Set.instMembershipSet.{u3} (UniformSpace.{u3} α)) ua uas) -> (Membership.mem.{u2, u2} (UniformSpace.{u2} β) (Set.{u2} (UniformSpace.{u2} β)) (Set.instMembershipSet.{u2} (UniformSpace.{u2} β)) ub ubs) -> (UniformContinuous.{max u3 u2, u1} (Prod.{u3, u2} α β) γ (instUniformSpaceProd.{u3, u2} α β ua ub) uc (fun (p : Prod.{u3, u2} α β) => f (Prod.fst.{u3, u2} α β p) (Prod.snd.{u3, u2} α β p))) -> (UniformContinuous.{max u3 u2, u1} (Prod.{u3, u2} α β) γ (instUniformSpaceProd.{u3, u2} α β (InfSet.infₛ.{u3} (UniformSpace.{u3} α) (instInfSetUniformSpace.{u3} α) uas) (InfSet.infₛ.{u2} (UniformSpace.{u2} β) (instInfSetUniformSpace.{u2} β) ubs)) uc (fun (p : Prod.{u3, u2} α β) => f (Prod.fst.{u3, u2} α β p) (Prod.snd.{u3, u2} α β p))) Case conversion may be inaccurate. Consider using '#align uniform_continuous_Inf_dom₂ uniformContinuous_infₛ_dom₂ₓ'. -/ /-- A version of `uniform_continuous_Inf_dom` for binary functions -/ theorem uniformContinuous_infₛ_dom₂ {α β γ} {f : α → β → γ} {uas : Set (UniformSpace α)} {ubs : Set (UniformSpace β)} {ua : UniformSpace α} {ub : UniformSpace β} {uc : UniformSpace γ} (ha : ua ∈ uas) (hb : ub ∈ ubs) (hf : UniformContinuous fun p : α × β => f p.1 p.2) : by haveI := Inf uas <;> haveI := Inf ubs <;> exact @UniformContinuous _ _ _ uc fun p : α × β => f p.1 p.2 := by -- proof essentially copied from ``continuous_Inf_dom` let t : UniformSpace (α × β) := Prod.uniformSpace have ha := uniformContinuous_infₛ_dom ha uniformContinuous_id have hb := uniformContinuous_infₛ_dom hb uniformContinuous_id have h_unif_cont_id := @UniformContinuous.prod_map _ _ _ _ (Inf uas) (Inf ubs) ua ub _ _ ha hb exact @UniformContinuous.comp _ _ _ (id _) (id _) _ _ _ hf h_unif_cont_id #align uniform_continuous_Inf_dom₂ uniformContinuous_infₛ_dom₂ end Prod section open UniformSpace Function variable {δ' : Type _} [UniformSpace α] [UniformSpace β] [UniformSpace γ] [UniformSpace δ] [UniformSpace δ'] -- mathport name: «expr ∘₂ » local notation f " ∘₂ " g => Function.bicompr f g #print UniformContinuous₂ /- /-- Uniform continuity for functions of two variables. -/ def UniformContinuous₂ (f : α → β → γ) := UniformContinuous (uncurry f) #align uniform_continuous₂ UniformContinuous₂ -/ /- warning: uniform_continuous₂_def -> uniformContinuous₂_def is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] (f : α -> β -> γ), Iff (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 f) (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_3 (Function.uncurry.{u1, u2, u3} α β γ f)) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] (f : α -> β -> γ), Iff (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 f) (UniformContinuous.{max u2 u1, u3} (Prod.{u1, u2} α β) γ (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_3 (Function.uncurry.{u1, u2, u3} α β γ f)) Case conversion may be inaccurate. Consider using '#align uniform_continuous₂_def uniformContinuous₂_defₓ'. -/ theorem uniformContinuous₂_def (f : α → β → γ) : UniformContinuous₂ f ↔ UniformContinuous (uncurry f) := Iff.rfl #align uniform_continuous₂_def uniformContinuous₂_def /- warning: uniform_continuous₂.uniform_continuous -> UniformContinuous₂.uniformContinuous is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f : α -> β -> γ}, (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 f) -> (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_3 (Function.uncurry.{u1, u2, u3} α β γ f)) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] {f : α -> β -> γ}, (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 f) -> (UniformContinuous.{max u2 u1, u3} (Prod.{u1, u2} α β) γ (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_3 (Function.uncurry.{u1, u2, u3} α β γ f)) Case conversion may be inaccurate. Consider using '#align uniform_continuous₂.uniform_continuous UniformContinuous₂.uniformContinuousₓ'. -/ theorem UniformContinuous₂.uniformContinuous {f : α → β → γ} (h : UniformContinuous₂ f) : UniformContinuous (uncurry f) := h #align uniform_continuous₂.uniform_continuous UniformContinuous₂.uniformContinuous /- warning: uniform_continuous₂_curry -> uniformContinuous₂_curry is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] (f : (Prod.{u1, u2} α β) -> γ), Iff (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 (Function.curry.{u1, u2, u3} α β γ f)) (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (Prod.uniformSpace.{u1, u2} α β _inst_1 _inst_2) _inst_3 f) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] (f : (Prod.{u1, u2} α β) -> γ), Iff (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 (Function.curry.{u1, u2, u3} α β γ f)) (UniformContinuous.{max u1 u2, u3} (Prod.{u1, u2} α β) γ (instUniformSpaceProd.{u1, u2} α β _inst_1 _inst_2) _inst_3 f) Case conversion may be inaccurate. Consider using '#align uniform_continuous₂_curry uniformContinuous₂_curryₓ'. -/ theorem uniformContinuous₂_curry (f : α × β → γ) : UniformContinuous₂ (Function.curry f) ↔ UniformContinuous f := by rw [UniformContinuous₂, uncurry_curry] #align uniform_continuous₂_curry uniformContinuous₂_curry #print UniformContinuous₂.comp /- theorem UniformContinuous₂.comp {f : α → β → γ} {g : γ → δ} (hg : UniformContinuous g) (hf : UniformContinuous₂ f) : UniformContinuous₂ (g ∘₂ f) := hg.comp hf #align uniform_continuous₂.comp UniformContinuous₂.comp -/ /- warning: uniform_continuous₂.bicompl -> UniformContinuous₂.bicompl is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {δ' : Type.{u5}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] [_inst_3 : UniformSpace.{u3} γ] [_inst_4 : UniformSpace.{u4} δ] [_inst_5 : UniformSpace.{u5} δ'] {f : α -> β -> γ} {ga : δ -> α} {gb : δ' -> β}, (UniformContinuous₂.{u1, u2, u3} α β γ _inst_1 _inst_2 _inst_3 f) -> (UniformContinuous.{u4, u1} δ α _inst_4 _inst_1 ga) -> (UniformContinuous.{u5, u2} δ' β _inst_5 _inst_2 gb) -> (UniformContinuous₂.{u4, u5, u3} δ δ' γ _inst_4 _inst_5 _inst_3 (Function.bicompl.{u4, u5, u1, u2, u3} δ δ' α β γ f ga gb)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u4}} {δ : Type.{u5}} {δ' : Type.{u1}} [_inst_1 : UniformSpace.{u2} α] [_inst_2 : UniformSpace.{u3} β] [_inst_3 : UniformSpace.{u4} γ] [_inst_4 : UniformSpace.{u5} δ] [_inst_5 : UniformSpace.{u1} δ'] {f : α -> β -> γ} {ga : δ -> α} {gb : δ' -> β}, (UniformContinuous₂.{u2, u3, u4} α β γ _inst_1 _inst_2 _inst_3 f) -> (UniformContinuous.{u5, u2} δ α _inst_4 _inst_1 ga) -> (UniformContinuous.{u1, u3} δ' β _inst_5 _inst_2 gb) -> (UniformContinuous₂.{u5, u1, u4} δ δ' γ _inst_4 _inst_5 _inst_3 (Function.bicompl.{u5, u1, u2, u3, u4} δ δ' α β γ f ga gb)) Case conversion may be inaccurate. Consider using '#align uniform_continuous₂.bicompl UniformContinuous₂.bicomplₓ'. -/ theorem UniformContinuous₂.bicompl {f : α → β → γ} {ga : δ → α} {gb : δ' → β} (hf : UniformContinuous₂ f) (hga : UniformContinuous ga) (hgb : UniformContinuous gb) : UniformContinuous₂ (bicompl f ga gb) := hf.UniformContinuous.comp (hga.Prod_map hgb) #align uniform_continuous₂.bicompl UniformContinuous₂.bicompl end #print toTopologicalSpace_subtype /- theorem toTopologicalSpace_subtype [u : UniformSpace α] {p : α → Prop} : @UniformSpace.toTopologicalSpace (Subtype p) Subtype.uniformSpace = @Subtype.topologicalSpace α p u.toTopologicalSpace := rfl #align to_topological_space_subtype toTopologicalSpace_subtype -/ section Sum variable [UniformSpace α] [UniformSpace β] open Sum #print UniformSpace.Core.sum /- /-- Uniformity on a disjoint union. Entourages of the diagonal in the union are obtained by taking independently an entourage of the diagonal in the first part, and an entourage of the diagonal in the second part. -/ def UniformSpace.Core.sum : UniformSpace.Core (Sum α β) := UniformSpace.Core.mk' (map (fun p : α × α => (inl p.1, inl p.2)) (𝓤 α) ⊔ map (fun p : β × β => (inr p.1, inr p.2)) (𝓤 β)) (fun r ⟨H₁, H₂⟩ x => by cases x <;> [apply refl_mem_uniformity H₁, apply refl_mem_uniformity H₂]) (fun r ⟨H₁, H₂⟩ => ⟨symm_le_uniformity H₁, symm_le_uniformity H₂⟩) fun r ⟨Hrα, Hrβ⟩ => by rcases comp_mem_uniformity_sets Hrα with ⟨tα, htα, Htα⟩ rcases comp_mem_uniformity_sets Hrβ with ⟨tβ, htβ, Htβ⟩ refine' ⟨_, ⟨mem_map_iff_exists_image.2 ⟨tα, htα, subset_union_left _ _⟩, mem_map_iff_exists_image.2 ⟨tβ, htβ, subset_union_right _ _⟩⟩, _⟩ rintro ⟨_, _⟩ ⟨z, ⟨⟨a, b⟩, hab, ⟨⟩⟩ | ⟨⟨a, b⟩, hab, ⟨⟩⟩, ⟨⟨_, c⟩, hbc, ⟨⟩⟩ | ⟨⟨_, c⟩, hbc, ⟨⟩⟩⟩ · have A : (a, c) ∈ tα ○ tα := ⟨b, hab, hbc⟩ exact Htα A · have A : (a, c) ∈ tβ ○ tβ := ⟨b, hab, hbc⟩ exact Htβ A #align uniform_space.core.sum UniformSpace.Core.sum -/ /- warning: union_mem_uniformity_sum -> union_mem_uniformity_sum is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {a : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) a (uniformity.{u1} α _inst_1)) -> (forall {b : Set.{u2} (Prod.{u2, u2} β β)}, (Membership.Mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} β β)) (Filter.{u2} (Prod.{u2, u2} β β)) (Filter.hasMem.{u2} (Prod.{u2, u2} β β)) b (uniformity.{u2} β _inst_2)) -> (Membership.Mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.hasMem.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Union.union.{max u1 u2} (Set.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Set.hasUnion.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Set.image.{u1, max u1 u2} (Prod.{u1, u1} α α) (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u1, u1} α α) => Prod.mk.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inl.{u1, u2} α β (Prod.fst.{u1, u1} α α p)) (Sum.inl.{u1, u2} α β (Prod.snd.{u1, u1} α α p))) a) (Set.image.{u2, max u1 u2} (Prod.{u2, u2} β β) (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u2, u2} β β) => Prod.mk.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inr.{u1, u2} α β (Prod.fst.{u2, u2} β β p)) (Sum.inr.{u1, u2} α β (Prod.snd.{u2, u2} β β p))) b)) (UniformSpace.Core.uniformity.{max u1 u2} (Sum.{u1, u2} α β) (UniformSpace.Core.sum.{u1, u2} α β _inst_1 _inst_2)))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {a : Set.{u1} (Prod.{u1, u1} α α)}, (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) a (uniformity.{u1} α _inst_1)) -> (forall {b : Set.{u2} (Prod.{u2, u2} β β)}, (Membership.mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} β β)) (Filter.{u2} (Prod.{u2, u2} β β)) (instMembershipSetFilter.{u2} (Prod.{u2, u2} β β)) b (uniformity.{u2} β _inst_2)) -> (Membership.mem.{max u2 u1, max u1 u2} (Set.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (instMembershipSetFilter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Union.union.{max u2 u1} (Set.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Set.instUnionSet.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Set.image.{u1, max u2 u1} (Prod.{u1, u1} α α) (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u1, u1} α α) => Prod.mk.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inl.{u1, u2} α β (Prod.fst.{u1, u1} α α p)) (Sum.inl.{u1, u2} α β (Prod.snd.{u1, u1} α α p))) a) (Set.image.{u2, max u1 u2} (Prod.{u2, u2} β β) (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u2, u2} β β) => Prod.mk.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inr.{u1, u2} α β (Prod.fst.{u2, u2} β β p)) (Sum.inr.{u1, u2} α β (Prod.snd.{u2, u2} β β p))) b)) (UniformSpace.Core.uniformity.{max u1 u2} (Sum.{u1, u2} α β) (UniformSpace.Core.sum.{u1, u2} α β _inst_1 _inst_2)))) Case conversion may be inaccurate. Consider using '#align union_mem_uniformity_sum union_mem_uniformity_sumₓ'. -/ /-- The union of an entourage of the diagonal in each set of a disjoint union is again an entourage of the diagonal. -/ theorem union_mem_uniformity_sum {a : Set (α × α)} (ha : a ∈ 𝓤 α) {b : Set (β × β)} (hb : b ∈ 𝓤 β) : (fun p : α × α => (inl p.1, inl p.2)) '' a ∪ (fun p : β × β => (inr p.1, inr p.2)) '' b ∈ (@UniformSpace.Core.sum α β _ _).uniformity := ⟨mem_map_iff_exists_image.2 ⟨_, ha, subset_union_left _ _⟩, mem_map_iff_exists_image.2 ⟨_, hb, subset_union_right _ _⟩⟩ #align union_mem_uniformity_sum union_mem_uniformity_sum /- warning: uniformity_sum_of_open_aux -> uniformity_sum_of_open_aux is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {s : Set.{max u1 u2} (Sum.{u1, u2} α β)}, (IsOpen.{max u1 u2} (Sum.{u1, u2} α β) (Sum.topologicalSpace.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_2)) s) -> (forall {x : Sum.{u1, u2} α β}, (Membership.Mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u1 u2} (Sum.{u1, u2} α β)) (Set.hasMem.{max u1 u2} (Sum.{u1, u2} α β)) x s) -> (Membership.Mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.hasMem.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (setOf.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) => (Eq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β) (Prod.fst.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) x) -> (Membership.Mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u1 u2} (Sum.{u1, u2} α β)) (Set.hasMem.{max u1 u2} (Sum.{u1, u2} α β)) (Prod.snd.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) s))) (UniformSpace.Core.uniformity.{max u1 u2} (Sum.{u1, u2} α β) (UniformSpace.Core.sum.{u1, u2} α β _inst_1 _inst_2)))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {s : Set.{max u2 u1} (Sum.{u1, u2} α β)}, (IsOpen.{max u1 u2} (Sum.{u1, u2} α β) (instTopologicalSpaceSum.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_2)) s) -> (forall {x : Sum.{u1, u2} α β}, (Membership.mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u2 u1} (Sum.{u1, u2} α β)) (Set.instMembershipSet.{max u1 u2} (Sum.{u1, u2} α β)) x s) -> (Membership.mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (instMembershipSetFilter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (setOf.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) => (Eq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β) (Prod.fst.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) x) -> (Membership.mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u2 u1} (Sum.{u1, u2} α β)) (Set.instMembershipSet.{max u1 u2} (Sum.{u1, u2} α β)) (Prod.snd.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) s))) (UniformSpace.Core.uniformity.{max u1 u2} (Sum.{u1, u2} α β) (UniformSpace.Core.sum.{u1, u2} α β _inst_1 _inst_2)))) Case conversion may be inaccurate. Consider using '#align uniformity_sum_of_open_aux uniformity_sum_of_open_auxₓ'. -/ /- To prove that the topology defined by the uniform structure on the disjoint union coincides with the disjoint union topology, we need two lemmas saying that open sets can be characterized by the uniform structure -/ theorem uniformity_sum_of_open_aux {s : Set (Sum α β)} (hs : IsOpen s) {x : Sum α β} (xs : x ∈ s) : { p : Sum α β × Sum α β | p.1 = x → p.2 ∈ s } ∈ (@UniformSpace.Core.sum α β _ _).uniformity := by cases x · refine' mem_of_superset (union_mem_uniformity_sum (mem_nhds_uniformity_iff_right.1 (IsOpen.mem_nhds hs.1 xs)) univ_mem) (union_subset _ _) <;> rintro _ ⟨⟨_, b⟩, h, ⟨⟩⟩ ⟨⟩ exact h rfl · refine' mem_of_superset (union_mem_uniformity_sum univ_mem (mem_nhds_uniformity_iff_right.1 (IsOpen.mem_nhds hs.2 xs))) (union_subset _ _) <;> rintro _ ⟨⟨a, _⟩, h, ⟨⟩⟩ ⟨⟩ exact h rfl #align uniformity_sum_of_open_aux uniformity_sum_of_open_aux /- warning: open_of_uniformity_sum_aux -> open_of_uniformity_sum_aux is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {s : Set.{max u1 u2} (Sum.{u1, u2} α β)}, (forall (x : Sum.{u1, u2} α β), (Membership.Mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u1 u2} (Sum.{u1, u2} α β)) (Set.hasMem.{max u1 u2} (Sum.{u1, u2} α β)) x s) -> (Membership.Mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.hasMem.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (setOf.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) => (Eq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β) (Prod.fst.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) x) -> (Membership.Mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u1 u2} (Sum.{u1, u2} α β)) (Set.hasMem.{max u1 u2} (Sum.{u1, u2} α β)) (Prod.snd.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) s))) (UniformSpace.Core.uniformity.{max u1 u2} (Sum.{u1, u2} α β) (UniformSpace.Core.sum.{u1, u2} α β _inst_1 _inst_2)))) -> (IsOpen.{max u1 u2} (Sum.{u1, u2} α β) (Sum.topologicalSpace.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_2)) s) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β] {s : Set.{max u2 u1} (Sum.{u1, u2} α β)}, (forall (x : Sum.{u1, u2} α β), (Membership.mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u2 u1} (Sum.{u1, u2} α β)) (Set.instMembershipSet.{max u1 u2} (Sum.{u1, u2} α β)) x s) -> (Membership.mem.{max u1 u2, max u1 u2} (Set.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (instMembershipSetFilter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (setOf.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) => (Eq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β) (Prod.fst.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) x) -> (Membership.mem.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Set.{max u2 u1} (Sum.{u1, u2} α β)) (Set.instMembershipSet.{max u1 u2} (Sum.{u1, u2} α β)) (Prod.snd.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) p) s))) (UniformSpace.Core.uniformity.{max u1 u2} (Sum.{u1, u2} α β) (UniformSpace.Core.sum.{u1, u2} α β _inst_1 _inst_2)))) -> (IsOpen.{max u1 u2} (Sum.{u1, u2} α β) (instTopologicalSpaceSum.{u1, u2} α β (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u2} β _inst_2)) s) Case conversion may be inaccurate. Consider using '#align open_of_uniformity_sum_aux open_of_uniformity_sum_auxₓ'. -/ theorem open_of_uniformity_sum_aux {s : Set (Sum α β)} (hs : ∀ x ∈ s, { p : Sum α β × Sum α β | p.1 = x → p.2 ∈ s } ∈ (@UniformSpace.Core.sum α β _ _).uniformity) : IsOpen s := by constructor · refine' (@isOpen_iff_mem_nhds α _ _).2 fun a ha => mem_nhds_uniformity_iff_right.2 _ rcases mem_map_iff_exists_image.1 (hs _ ha).1 with ⟨t, ht, st⟩ refine' mem_of_superset ht _ rintro p pt rfl exact st ⟨_, pt, rfl⟩ rfl · refine' (@isOpen_iff_mem_nhds β _ _).2 fun b hb => mem_nhds_uniformity_iff_right.2 _ rcases mem_map_iff_exists_image.1 (hs _ hb).2 with ⟨t, ht, st⟩ refine' mem_of_superset ht _ rintro p pt rfl exact st ⟨_, pt, rfl⟩ rfl #align open_of_uniformity_sum_aux open_of_uniformity_sum_aux #print Sum.uniformSpace /- -- We can now define the uniform structure on the disjoint union instance Sum.uniformSpace : UniformSpace (Sum α β) where toCore := UniformSpace.Core.sum isOpen_uniformity s := ⟨uniformity_sum_of_open_aux, open_of_uniformity_sum_aux⟩ #align sum.uniform_space Sum.uniformSpace -/ /- warning: sum.uniformity -> Sum.uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{succ (max u1 u2)} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (uniformity.{max u1 u2} (Sum.{u1, u2} α β) (Sum.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (Sup.sup.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (SemilatticeSup.toHasSup.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Lattice.toSemilatticeSup.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (ConditionallyCompleteLattice.toLattice.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (CompleteLattice.toConditionallyCompleteLattice.{max u1 u2} (Filter.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.completeLattice.{max u1 u2} (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))))))) (Filter.map.{u1, max u1 u2} (Prod.{u1, u1} α α) (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u1, u1} α α) => Prod.mk.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inl.{u1, u2} α β (Prod.fst.{u1, u1} α α p)) (Sum.inl.{u1, u2} α β (Prod.snd.{u1, u1} α α p))) (uniformity.{u1} α _inst_1)) (Filter.map.{u2, max u1 u2} (Prod.{u2, u2} β β) (Prod.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u2, u2} β β) => Prod.mk.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inr.{u1, u2} α β (Prod.fst.{u2, u2} β β p)) (Sum.inr.{u1, u2} α β (Prod.snd.{u2, u2} β β p))) (uniformity.{u2} β _inst_2))) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : UniformSpace.{u2} β], Eq.{max (succ u1) (succ u2)} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (uniformity.{max u2 u1} (Sum.{u1, u2} α β) (Sum.uniformSpace.{u1, u2} α β _inst_1 _inst_2)) (Sup.sup.{max u2 u1} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (SemilatticeSup.toSup.{max u1 u2} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Lattice.toSemilatticeSup.{max u1 u2} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (ConditionallyCompleteLattice.toLattice.{max u1 u2} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (CompleteLattice.toConditionallyCompleteLattice.{max u1 u2} (Filter.{max u2 u1} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))) (Filter.instCompleteLatticeFilter.{max u1 u2} (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β))))))) (Filter.map.{u1, max u2 u1} (Prod.{u1, u1} α α) (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u1, u1} α α) => Prod.mk.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inl.{u1, u2} α β (Prod.fst.{u1, u1} α α p)) (Sum.inl.{u1, u2} α β (Prod.snd.{u1, u1} α α p))) (uniformity.{u1} α _inst_1)) (Filter.map.{u2, max u1 u2} (Prod.{u2, u2} β β) (Prod.{max u2 u1, max u2 u1} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β)) (fun (p : Prod.{u2, u2} β β) => Prod.mk.{max u1 u2, max u1 u2} (Sum.{u1, u2} α β) (Sum.{u1, u2} α β) (Sum.inr.{u1, u2} α β (Prod.fst.{u2, u2} β β p)) (Sum.inr.{u1, u2} α β (Prod.snd.{u2, u2} β β p))) (uniformity.{u2} β _inst_2))) Case conversion may be inaccurate. Consider using '#align sum.uniformity Sum.uniformityₓ'. -/ theorem Sum.uniformity : 𝓤 (Sum α β) = map (fun p : α × α => (inl p.1, inl p.2)) (𝓤 α) ⊔ map (fun p : β × β => (inr p.1, inr p.2)) (𝓤 β) := rfl #align sum.uniformity Sum.uniformity end Sum end Constructions /- warning: lebesgue_number_lemma -> lebesgue_number_lemma is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {ι : Sort.{u2}} {c : ι -> (Set.{u1} α)}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) -> (forall (i : ι), IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (c i)) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) s (Set.unionᵢ.{u1, u2} α ι (fun (i : ι) => c i))) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (n : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) n (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) n (uniformity.{u1} α _inst_1)) => forall (x : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x s) -> (Exists.{u2} ι (fun (i : ι) => HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (setOf.{u1} α (fun (y : α) => Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) n)) (c i)))))) but is expected to have type forall {α : Type.{u2}} [_inst_1 : UniformSpace.{u2} α] {s : Set.{u2} α} {ι : Sort.{u1}} {c : ι -> (Set.{u2} α)}, (IsCompact.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) s) -> (forall (i : ι), IsOpen.{u2} α (UniformSpace.toTopologicalSpace.{u2} α _inst_1) (c i)) -> (HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) s (Set.unionᵢ.{u2, u1} α ι (fun (i : ι) => c i))) -> (Exists.{succ u2} (Set.{u2} (Prod.{u2, u2} α α)) (fun (n : Set.{u2} (Prod.{u2, u2} α α)) => And (Membership.mem.{u2, u2} (Set.{u2} (Prod.{u2, u2} α α)) (Filter.{u2} (Prod.{u2, u2} α α)) (instMembershipSetFilter.{u2} (Prod.{u2, u2} α α)) n (uniformity.{u2} α _inst_1)) (forall (x : α), (Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x s) -> (Exists.{u1} ι (fun (i : ι) => HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) (setOf.{u2} α (fun (y : α) => Membership.mem.{u2, u2} (Prod.{u2, u2} α α) (Set.{u2} (Prod.{u2, u2} α α)) (Set.instMembershipSet.{u2} (Prod.{u2, u2} α α)) (Prod.mk.{u2, u2} α α x y) n)) (c i)))))) Case conversion may be inaccurate. Consider using '#align lebesgue_number_lemma lebesgue_number_lemmaₓ'. -/ /-- Let `c : ι → set α` be an open cover of a compact set `s`. Then there exists an entourage `n` such that for each `x ∈ s` its `n`-neighborhood is contained in some `c i`. -/ theorem lebesgue_number_lemma {α : Type u} [UniformSpace α] {s : Set α} {ι} {c : ι → Set α} (hs : IsCompact s) (hc₁ : ∀ i, IsOpen (c i)) (hc₂ : s ⊆ ⋃ i, c i) : ∃ n ∈ 𝓤 α, ∀ x ∈ s, ∃ i, { y | (x, y) ∈ n } ⊆ c i := by let u n := { x | ∃ i, ∃ m ∈ 𝓤 α, { y | (x, y) ∈ m ○ n } ⊆ c i } have hu₁ : ∀ n ∈ 𝓤 α, IsOpen (u n) := by refine' fun n hn => isOpen_uniformity.2 _ rintro x ⟨i, m, hm, h⟩ rcases comp_mem_uniformity_sets hm with ⟨m', hm', mm'⟩ apply (𝓤 α).sets_of_superset hm' rintro ⟨x, y⟩ hp rfl refine' ⟨i, m', hm', fun z hz => h (monotone_id.comp_rel monotone_const mm' _)⟩ dsimp [-mem_compRel] at hz⊢ rw [compRel_assoc] exact ⟨y, hp, hz⟩ have hu₂ : s ⊆ ⋃ n ∈ 𝓤 α, u n := by intro x hx rcases mem_Union.1 (hc₂ hx) with ⟨i, h⟩ rcases comp_mem_uniformity_sets (isOpen_uniformity.1 (hc₁ i) x h) with ⟨m', hm', mm'⟩ exact mem_bUnion hm' ⟨i, _, hm', fun y hy => mm' hy rfl⟩ rcases hs.elim_finite_subcover_image hu₁ hu₂ with ⟨b, bu, b_fin, b_cover⟩ refine' ⟨_, (bInter_mem b_fin).2 bu, fun x hx => _⟩ rcases mem_Union₂.1 (b_cover hx) with ⟨n, bn, i, m, hm, h⟩ refine' ⟨i, fun y hy => h _⟩ exact prod_mk_mem_compRel (refl_mem_uniformity hm) (bInter_subset_of_mem bn hy) #align lebesgue_number_lemma lebesgue_number_lemma /- warning: lebesgue_number_lemma_sUnion -> lebesgue_number_lemma_unionₛ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {c : Set.{u1} (Set.{u1} α)}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) -> (forall (t : Set.{u1} α), (Membership.Mem.{u1, u1} (Set.{u1} α) (Set.{u1} (Set.{u1} α)) (Set.hasMem.{u1} (Set.{u1} α)) t c) -> (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) t)) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) s (Set.unionₛ.{u1} α c)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (n : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) n (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) n (uniformity.{u1} α _inst_1)) => forall (x : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x s) -> (Exists.{succ u1} (Set.{u1} α) (fun (t : Set.{u1} α) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} α) (Set.{u1} (Set.{u1} α)) (Set.hasMem.{u1} (Set.{u1} α)) t c) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} α) (Set.{u1} (Set.{u1} α)) (Set.hasMem.{u1} (Set.{u1} α)) t c) => forall (y : α), (Membership.Mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.hasMem.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) n) -> (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) y t))))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {s : Set.{u1} α} {c : Set.{u1} (Set.{u1} α)}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) s) -> (forall (t : Set.{u1} α), (Membership.mem.{u1, u1} (Set.{u1} α) (Set.{u1} (Set.{u1} α)) (Set.instMembershipSet.{u1} (Set.{u1} α)) t c) -> (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) t)) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) s (Set.unionₛ.{u1} α c)) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (n : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) n (uniformity.{u1} α _inst_1)) (forall (x : α), (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x s) -> (Exists.{succ u1} (Set.{u1} α) (fun (t : Set.{u1} α) => And (Membership.mem.{u1, u1} (Set.{u1} α) (Set.{u1} (Set.{u1} α)) (Set.instMembershipSet.{u1} (Set.{u1} α)) t c) (forall (y : α), (Membership.mem.{u1, u1} (Prod.{u1, u1} α α) (Set.{u1} (Prod.{u1, u1} α α)) (Set.instMembershipSet.{u1} (Prod.{u1, u1} α α)) (Prod.mk.{u1, u1} α α x y) n) -> (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) y t))))))) Case conversion may be inaccurate. Consider using '#align lebesgue_number_lemma_sUnion lebesgue_number_lemma_unionₛₓ'. -/ /-- Let `c : set (set α)` be an open cover of a compact set `s`. Then there exists an entourage `n` such that for each `x ∈ s` its `n`-neighborhood is contained in some `t ∈ c`. -/ theorem lebesgue_number_lemma_unionₛ {α : Type u} [UniformSpace α] {s : Set α} {c : Set (Set α)} (hs : IsCompact s) (hc₁ : ∀ t ∈ c, IsOpen t) (hc₂ : s ⊆ ⋃₀ c) : ∃ n ∈ 𝓤 α, ∀ x ∈ s, ∃ t ∈ c, ∀ y, (x, y) ∈ n → y ∈ t := by rw [sUnion_eq_Union] at hc₂ <;> simpa using lebesgue_number_lemma hs (by simpa) hc₂ #align lebesgue_number_lemma_sUnion lebesgue_number_lemma_unionₛ /- warning: lebesgue_number_of_compact_open -> lebesgue_number_of_compact_open is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {K : Set.{u1} α} {U : Set.{u1} α}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) K) -> (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) U) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) K U) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => Exists.{0} (Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (fun (H : Membership.Mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (Filter.hasMem.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) => And (IsOpen.{u1} (Prod.{u1, u1} α α) (Prod.topologicalSpace.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) (forall (x : α), (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x K) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (UniformSpace.ball.{u1} α x V) U))))) but is expected to have type forall {α : Type.{u1}} [_inst_1 : UniformSpace.{u1} α] {K : Set.{u1} α} {U : Set.{u1} α}, (IsCompact.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) K) -> (IsOpen.{u1} α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) U) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) K U) -> (Exists.{succ u1} (Set.{u1} (Prod.{u1, u1} α α)) (fun (V : Set.{u1} (Prod.{u1, u1} α α)) => And (Membership.mem.{u1, u1} (Set.{u1} (Prod.{u1, u1} α α)) (Filter.{u1} (Prod.{u1, u1} α α)) (instMembershipSetFilter.{u1} (Prod.{u1, u1} α α)) V (uniformity.{u1} α _inst_1)) (And (IsOpen.{u1} (Prod.{u1, u1} α α) (instTopologicalSpaceProd.{u1, u1} α α (UniformSpace.toTopologicalSpace.{u1} α _inst_1) (UniformSpace.toTopologicalSpace.{u1} α _inst_1)) V) (forall (x : α), (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x K) -> (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (UniformSpace.ball.{u1} α x V) U))))) Case conversion may be inaccurate. Consider using '#align lebesgue_number_of_compact_open lebesgue_number_of_compact_openₓ'. -/ /-- A useful consequence of the Lebesgue number lemma: given any compact set `K` contained in an open set `U`, we can find an (open) entourage `V` such that the ball of size `V` about any point of `K` is contained in `U`. -/ theorem lebesgue_number_of_compact_open [UniformSpace α] {K U : Set α} (hK : IsCompact K) (hU : IsOpen U) (hKU : K ⊆ U) : ∃ V ∈ 𝓤 α, IsOpen V ∧ ∀ x ∈ K, UniformSpace.ball x V ⊆ U := by let W : K → Set (α × α) := fun k => Classical.choose <| is_open_iff_open_ball_subset.mp hU k.1 <| hKU k.2 have hW : ∀ k, W k ∈ 𝓤 α ∧ IsOpen (W k) ∧ UniformSpace.ball k.1 (W k) ⊆ U := by intro k obtain ⟨h₁, h₂, h₃⟩ := Classical.choose_spec (is_open_iff_open_ball_subset.mp hU k.1 (hKU k.2)) exact ⟨h₁, h₂, h₃⟩ let c : K → Set α := fun k => UniformSpace.ball k.1 (W k) have hc₁ : ∀ k, IsOpen (c k) := fun k => UniformSpace.isOpen_ball k.1 (hW k).2.1 have hc₂ : K ⊆ ⋃ i, c i := by intro k hk simp only [mem_Union, SetCoe.exists] exact ⟨k, hk, UniformSpace.mem_ball_self k (hW ⟨k, hk⟩).1⟩ have hc₃ : ∀ k, c k ⊆ U := fun k => (hW k).2.2 obtain ⟨V, hV, hV'⟩ := lebesgue_number_lemma hK hc₁ hc₂ refine' ⟨interior V, interior_mem_uniformity hV, isOpen_interior, _⟩ intro k hk obtain ⟨k', hk'⟩ := hV' k hk exact ((ball_mono interior_subset k).trans hk').trans (hc₃ k') #align lebesgue_number_of_compact_open lebesgue_number_of_compact_open /-! ### Expressing continuity properties in uniform spaces We reformulate the various continuity properties of functions taking values in a uniform space in terms of the uniformity in the target. Since the same lemmas (essentially with the same names) also exist for metric spaces and emetric spaces (reformulating things in terms of the distance or the edistance in the target), we put them in a namespace `uniform` here. In the metric and emetric space setting, there are also similar lemmas where one assumes that both the source and the target are metric spaces, reformulating things in terms of the distance on both sides. These lemmas are generally written without primes, and the versions where only the target is a metric space is primed. We follow the same convention here, thus giving lemmas with primes. -/ namespace Uniform variable [UniformSpace α] #print Uniform.tendsto_nhds_right /- theorem tendsto_nhds_right {f : Filter β} {u : β → α} {a : α} : Tendsto u f (𝓝 a) ↔ Tendsto (fun x => (a, u x)) f (𝓤 α) := by rw [nhds_eq_comap_uniformity, tendsto_comap_iff] #align uniform.tendsto_nhds_right Uniform.tendsto_nhds_right -/ #print Uniform.tendsto_nhds_left /- theorem tendsto_nhds_left {f : Filter β} {u : β → α} {a : α} : Tendsto u f (𝓝 a) ↔ Tendsto (fun x => (u x, a)) f (𝓤 α) := by rw [nhds_eq_comap_uniformity', tendsto_comap_iff] #align uniform.tendsto_nhds_left Uniform.tendsto_nhds_left -/ #print Uniform.continuousAt_iff'_right /- theorem continuousAt_iff'_right [TopologicalSpace β] {f : β → α} {b : β} : ContinuousAt f b ↔ Tendsto (fun x => (f b, f x)) (𝓝 b) (𝓤 α) := by rw [ContinuousAt, tendsto_nhds_right] #align uniform.continuous_at_iff'_right Uniform.continuousAt_iff'_right -/ #print Uniform.continuousAt_iff'_left /- theorem continuousAt_iff'_left [TopologicalSpace β] {f : β → α} {b : β} : ContinuousAt f b ↔ Tendsto (fun x => (f x, f b)) (𝓝 b) (𝓤 α) := by rw [ContinuousAt, tendsto_nhds_left] #align uniform.continuous_at_iff'_left Uniform.continuousAt_iff'_left -/ /- warning: uniform.continuous_at_iff_prod -> Uniform.continuousAt_iff_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : TopologicalSpace.{u2} β] {f : β -> α} {b : β}, Iff (ContinuousAt.{u2, u1} β α _inst_2 (UniformSpace.toTopologicalSpace.{u1} α _inst_1) f b) (Filter.Tendsto.{u2, u1} (Prod.{u2, u2} β β) (Prod.{u1, u1} α α) (fun (x : Prod.{u2, u2} β β) => Prod.mk.{u1, u1} α α (f (Prod.fst.{u2, u2} β β x)) (f (Prod.snd.{u2, u2} β β x))) (nhds.{u2} (Prod.{u2, u2} β β) (Prod.topologicalSpace.{u2, u2} β β _inst_2 _inst_2) (Prod.mk.{u2, u2} β β b b)) (uniformity.{u1} α _inst_1)) but is expected to have type forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u1} α] [_inst_2 : TopologicalSpace.{u2} β] {f : β -> α} {b : β}, Iff (ContinuousAt.{u2, u1} β α _inst_2 (UniformSpace.toTopologicalSpace.{u1} α _inst_1) f b) (Filter.Tendsto.{u2, u1} (Prod.{u2, u2} β β) (Prod.{u1, u1} α α) (fun (x : Prod.{u2, u2} β β) => Prod.mk.{u1, u1} α α (f (Prod.fst.{u2, u2} β β x)) (f (Prod.snd.{u2, u2} β β x))) (nhds.{u2} (Prod.{u2, u2} β β) (instTopologicalSpaceProd.{u2, u2} β β _inst_2 _inst_2) (Prod.mk.{u2, u2} β β b b)) (uniformity.{u1} α _inst_1)) Case conversion may be inaccurate. Consider using '#align uniform.continuous_at_iff_prod Uniform.continuousAt_iff_prodₓ'. -/ theorem continuousAt_iff_prod [TopologicalSpace β] {f : β → α} {b : β} : ContinuousAt f b ↔ Tendsto (fun x : β × β => (f x.1, f x.2)) (𝓝 (b, b)) (𝓤 α) := ⟨fun H => le_trans (H.prod_map' H) (nhds_le_uniformity _), fun H => continuousAt_iff'_left.2 <| H.comp <| tendsto_id.prod_mk_nhds tendsto_const_nhds⟩ #align uniform.continuous_at_iff_prod Uniform.continuousAt_iff_prod #print Uniform.continuousWithinAt_iff'_right /- theorem continuousWithinAt_iff'_right [TopologicalSpace β] {f : β → α} {b : β} {s : Set β} : ContinuousWithinAt f s b ↔ Tendsto (fun x => (f b, f x)) (𝓝[s] b) (𝓤 α) := by rw [ContinuousWithinAt, tendsto_nhds_right] #align uniform.continuous_within_at_iff'_right Uniform.continuousWithinAt_iff'_right -/ #print Uniform.continuousWithinAt_iff'_left /- theorem continuousWithinAt_iff'_left [TopologicalSpace β] {f : β → α} {b : β} {s : Set β} : ContinuousWithinAt f s b ↔ Tendsto (fun x => (f x, f b)) (𝓝[s] b) (𝓤 α) := by rw [ContinuousWithinAt, tendsto_nhds_left] #align uniform.continuous_within_at_iff'_left Uniform.continuousWithinAt_iff'_left -/ #print Uniform.continuousOn_iff'_right /- theorem continuousOn_iff'_right [TopologicalSpace β] {f : β → α} {s : Set β} : ContinuousOn f s ↔ ∀ b ∈ s, Tendsto (fun x => (f b, f x)) (𝓝[s] b) (𝓤 α) := by simp [ContinuousOn, continuous_within_at_iff'_right] #align uniform.continuous_on_iff'_right Uniform.continuousOn_iff'_right -/ #print Uniform.continuousOn_iff'_left /- theorem continuousOn_iff'_left [TopologicalSpace β] {f : β → α} {s : Set β} : ContinuousOn f s ↔ ∀ b ∈ s, Tendsto (fun x => (f x, f b)) (𝓝[s] b) (𝓤 α) := by simp [ContinuousOn, continuous_within_at_iff'_left] #align uniform.continuous_on_iff'_left Uniform.continuousOn_iff'_left -/ #print Uniform.continuous_iff'_right /- theorem continuous_iff'_right [TopologicalSpace β] {f : β → α} : Continuous f ↔ ∀ b, Tendsto (fun x => (f b, f x)) (𝓝 b) (𝓤 α) := continuous_iff_continuousAt.trans <| forall_congr' fun b => tendsto_nhds_right #align uniform.continuous_iff'_right Uniform.continuous_iff'_right -/ #print Uniform.continuous_iff'_left /- theorem continuous_iff'_left [TopologicalSpace β] {f : β → α} : Continuous f ↔ ∀ b, Tendsto (fun x => (f x, f b)) (𝓝 b) (𝓤 α) := continuous_iff_continuousAt.trans <| forall_congr' fun b => tendsto_nhds_left #align uniform.continuous_iff'_left Uniform.continuous_iff'_left -/ end Uniform /- warning: filter.tendsto.congr_uniformity -> Filter.Tendsto.congr_uniformity is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u2} β] {f : α -> β} {g : α -> β} {l : Filter.{u1} α} {b : β}, (Filter.Tendsto.{u1, u2} α β f l (nhds.{u2} β (UniformSpace.toTopologicalSpace.{u2} β _inst_1) b)) -> (Filter.Tendsto.{u1, u2} α (Prod.{u2, u2} β β) (fun (x : α) => Prod.mk.{u2, u2} β β (f x) (g x)) l (uniformity.{u2} β _inst_1)) -> (Filter.Tendsto.{u1, u2} α β g l (nhds.{u2} β (UniformSpace.toTopologicalSpace.{u2} β _inst_1) b)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : UniformSpace.{u1} β] {f : α -> β} {g : α -> β} {l : Filter.{u2} α} {b : β}, (Filter.Tendsto.{u2, u1} α β f l (nhds.{u1} β (UniformSpace.toTopologicalSpace.{u1} β _inst_1) b)) -> (Filter.Tendsto.{u2, u1} α (Prod.{u1, u1} β β) (fun (x : α) => Prod.mk.{u1, u1} β β (f x) (g x)) l (uniformity.{u1} β _inst_1)) -> (Filter.Tendsto.{u2, u1} α β g l (nhds.{u1} β (UniformSpace.toTopologicalSpace.{u1} β _inst_1) b)) Case conversion may be inaccurate. Consider using '#align filter.tendsto.congr_uniformity Filter.Tendsto.congr_uniformityₓ'. -/ theorem Filter.Tendsto.congr_uniformity {α β} [UniformSpace β] {f g : α → β} {l : Filter α} {b : β} (hf : Tendsto f l (𝓝 b)) (hg : Tendsto (fun x => (f x, g x)) l (𝓤 β)) : Tendsto g l (𝓝 b) := Uniform.tendsto_nhds_right.2 <| (Uniform.tendsto_nhds_right.1 hf).uniformity_trans hg #align filter.tendsto.congr_uniformity Filter.Tendsto.congr_uniformity /- warning: uniform.tendsto_congr -> Uniform.tendsto_congr is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : UniformSpace.{u2} β] {f : α -> β} {g : α -> β} {l : Filter.{u1} α} {b : β}, (Filter.Tendsto.{u1, u2} α (Prod.{u2, u2} β β) (fun (x : α) => Prod.mk.{u2, u2} β β (f x) (g x)) l (uniformity.{u2} β _inst_1)) -> (Iff (Filter.Tendsto.{u1, u2} α β f l (nhds.{u2} β (UniformSpace.toTopologicalSpace.{u2} β _inst_1) b)) (Filter.Tendsto.{u1, u2} α β g l (nhds.{u2} β (UniformSpace.toTopologicalSpace.{u2} β _inst_1) b))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : UniformSpace.{u1} β] {f : α -> β} {g : α -> β} {l : Filter.{u2} α} {b : β}, (Filter.Tendsto.{u2, u1} α (Prod.{u1, u1} β β) (fun (x : α) => Prod.mk.{u1, u1} β β (f x) (g x)) l (uniformity.{u1} β _inst_1)) -> (Iff (Filter.Tendsto.{u2, u1} α β f l (nhds.{u1} β (UniformSpace.toTopologicalSpace.{u1} β _inst_1) b)) (Filter.Tendsto.{u2, u1} α β g l (nhds.{u1} β (UniformSpace.toTopologicalSpace.{u1} β _inst_1) b))) Case conversion may be inaccurate. Consider using '#align uniform.tendsto_congr Uniform.tendsto_congrₓ'. -/ theorem Uniform.tendsto_congr {α β} [UniformSpace β] {f g : α → β} {l : Filter α} {b : β} (hfg : Tendsto (fun x => (f x, g x)) l (𝓤 β)) : Tendsto f l (𝓝 b) ↔ Tendsto g l (𝓝 b) := ⟨fun h => h.congr_uniformity hfg, fun h => h.congr_uniformity hfg.uniformity_symm⟩ #align uniform.tendsto_congr Uniform.tendsto_congr
using AdventOfCode2017 using Test @test puzzle01() == [1203, 1146] @test puzzle02() == [32020, 236] @test puzzle03() == [552, 330785] @test puzzle04() == [386, 208] @test puzzle05() == [360603, 25347697] @test puzzle06() == [3156, 1610] @test puzzle07() == ["cyrupz", nothing] @test puzzle08() == [6343, 7184] @test puzzle09() == [8337, 4330] @test puzzle10() == [826, "d067d3f14d07e09c2e7308c3926605c4"] @test puzzle11() == [707, 1490] @test puzzle12() == [115, 221] @test puzzle13() == [1580, 3943252] @test puzzle14() == [8074, 1212] @test puzzle15() == [609, 253] @test puzzle16() == ["kgdchlfniambejop", "fjpmholcibdgeakn"] @test puzzle17() == [417, 34334221] @test puzzle18() == [9423, nothing] @test puzzle19() == ["GINOWKYXH", 16636] @test puzzle20() == [119, 471]
State Before: x y : ℝ ⊢ cosh 0 = 1 State After: no goals Tactic: simp [cosh]
subsection \<open>NotNode Phase\<close> theory NotPhase imports Common begin phase NotNode terminating size begin (* Word level proofs *) lemma bin_not_cancel: "bin[\<not>(\<not>(e))] = bin[e]" by auto (* Value level proofs *) lemma val_not_cancel: assumes "val[~(new_int b v)] \<noteq> UndefVal" shows "val[~(~(new_int b v))] = (new_int b v)" by (simp add: take_bit_not_take_bit) (* Exp level proofs *) lemma exp_not_cancel: "exp[~(~a)] \<ge> exp[a]" using val_not_cancel apply auto by (metis eval_unused_bits_zero intval_logic_negation.cases new_int.simps intval_not.simps(1) intval_not.simps(2) intval_not.simps(3) intval_not.simps(4)) text \<open>Optimisations\<close> optimization NotCancel: "exp[~(~a)] \<longmapsto> a" by (metis exp_not_cancel) end (* End of NotPhase *) end (* End of file *)
c******6/10/98 corrected output formatting error in statement #30 amd c********12/12/97 modivied as suggested by Jeff Grenda to address problem c of code blowing up when exit channel lower than entrance c (for only 1 well and 1 exit channel) c SETCALC******************* subroutine setCalc(lecho,option) c finds how deep the wells are on the chart; needed for high p limit c also determines actual well depths in kcal implicit none include 'cdparams.fh' include 'cdwell0.fh' include 'cdwell1.fh' include 'cdwell2.fh' include 'cdlabels.fh' include 'cdcontrl.fh' c local variables integer iwell,iowell,iprod,icount integer idepth,lecho,ioccur real*8 Epmax character option*8 cjmg fix - already declared c logical isomers c end declarations c check isomerization matrix do 50 iwell = 1,nwells do 50 iowell = iwell,nwells if (.not.(((Aisom(iwell,iowell).gt.1.d-175).and. 2 (Aisom(iowell,iwell).gt.1.d-175)).or. 3 ((Aisom(iwell,iowell).lt.1.d-175).and.(Aisom(iowell, 4 iwell).lt.1.d-175)))) then write(lecho,40) ISname(iwell),ISname(iowell) 40 format(/,' ERROR: no reciprocity between isomers ', 2 a,' and ',a) stop endif 50 continue c get minimum and maximum E exit channel for each well do 100 iwell = 1,nwells Exmin(iwell) = 1.d175 Exmax(iwell) = -1.d175 if (nprods(iwell).gt.0) then do 70 iprod = 1, nprods(iwell) Exmax(iwell) = dmax1(Exmax(iwell),Eprod(iwell,iprod)) Exmin(iwell) = dmin1(Exmin(iwell),Eprod(iwell,iprod)) 70 continue endif if (isomers) then do 80 iowell = 1,nwells if (Aisom(iwell,iowell).gt.1.d-175) then Exmax(iwell) = dmax1(Exmax(iwell), 2 Eisom(iwell,iowell)) Exmin(iwell) = dmin1(Exmin(iwell), 2 Eisom(iwell,iowell)) endif 80 continue endif 100 continue c input well is entrance and has depth 1; find those connected to depth 1 c and assign them depth 2; repeat etc.; first initialize do 170 iwell = 1,nwells idpwell(iwell) = nwells+1 170 continue idpwell(inpwell) = 1 do 200 idepth = 2,nwells do 190 iwell=1,nwells do 180 iowell = 1,nwells if ((Aisom(iowell,iwell).gt.1.d-175).and.(idpwell(iowell) 2 .eq.idepth-1).and.(idepth.lt.idpwell(iwell))) 3 idpwell(iwell) = idepth 180 continue 190 continue 200 continue c check for unconnected isomers do 250 iwell = 1,nwells if (idpwell(iwell).gt.nwells) then write(lecho,240) ISname(iwell) 240 format(/,1x,'ERROR: isomer ',a,' not connected to input ', 2 'channel.') stop endif 250 continue c dmm 20000627 c If ASA has been specified, and this is our 'first time' through c setCalc, then break out here for ASAScreen if(ASActrl) then write(*, 252) cjmg fix c252 format(1X, 'INFO::cdsets.f: setCalc check complete on whole',1X c $ ,'network. ASA specified, '/, 16X' leaving setCalc.') 252 format(1X, 'INFO::cdsets.f: setCalc check complete on whole',1X $ ,'network. ASA specified, '/, 16X,' leaving setCalc.') return endif c dmm 20000627 c now calculate well depths in kcal and EbarHP - the total barrier c to isomerization in the hp limit (one that takes the shortest c path) if (option.eq.'Chemact') then Ewell(inpwell) = -Eprod(inpwell,inpchan) else Ewell(inpwell) = 0.d0 endif EbarHP(inpwell) = Ewell(inpwell) do 300 idepth = 2,nwells do 290 iwell = 1,nwells if (idpwell(iwell).eq.idepth) then do 280 iowell = 1,nwells ioccur = 0 if ((idpwell(iowell).eq.idepth-1).and. 2 (Aisom(iwell,iowell).gt.1.d-175)) then ioccur = ioccur+1 if (ioccur.eq.1) then Ewell(iwell) = Ewell(iowell) + 2 Eisom(iowell,iwell) - Eisom(iwell,iowell) EbarHP(iwell) = dmax1(EbarHP(iowell), 2 Ewell(iowell) + Eisom(iowell,iwell)) else if (ioccur.gt.1) then EbarHP(iwell) = dmin1(EbarHP(iwell), 2 dmax1(EbarHP(iowell), 3 Ewell(iowell) + Eisom(iowell,iwell))) endif endif 280 continue endif 290 continue 300 continue c now actually find most isolated output channel in hp limit Epmax = 0.d0 c*********added 12/12/97 as suggested by Jeff Grenda cjmg Epmax = -1.d10 do 400 iwell = 1,nwells if (EbarHP(iwell).gt.Epmax) then Epmax = EbarHP(iwell) iwcmax = iwell ipcmax = 0 endif do 400 iprod = 1,Nprods(iwell) if ((EbarHP(iwell) + Eprod(iwell,iprod)).gt.Epmax) then Epmax = EbarHP(iwell) + Ewell(iwell) + Eprod(iwell,iprod) iwcmax = iwell ipcmax = iprod endif 400 continue c now find EbarLP - the barrier to isomerization in the low pressure c limit - first initialize do 500 iwell = 1,nwells EbarLP(iwell) = EbarHP(iwell) 500 continue c we have to repeat this nwell times in order that we have everything c updated properly - this is the same algorithm as EbarHP, except c we don't care what the depth of the connecting well is do 600 icount = 1,nwells do 600 iwell = 1,nwells do 600 iowell = 1,nwells if (Aisom(iwell,iowell).gt.1.d-175) 2 EbarLP(iwell) = dmin1(EbarLP(iwell), 3 dmax1(EbarHP(iowell), 4 Ewell(iowell) + Eisom(iowell,iwell))) 600 continue c finally check consistency of E's do 660 iwell = 1,nwells do 660 iowell = 1,nwells if ((Aisom(iwell,iowell).gt.1.d-175).and. 2 abs( (Eisom(iwell,iowell)-Eisom(iowell,iwell)) - 3 (Ewell(iowell)-Ewell(iwell)) ).ge.1.d-2) then write(lecho,655) ISname(iwell),ISname(iowell) 655 format(/,1x,'ERROR: E''s on ',a,' and ',a,' are ', 2 'not consistent.') stop endif 660 continue return end c ECHO2****************** subroutine echo2(lecho,option) implicit none include 'cdparams.fh' include 'cdwell0.fh' include 'cdwell1.fh' include 'cdwell2.fh' include 'cdisprop.fh' include 'cdlabels.fh' c local variables integer iwell integer lecho character option*8 c end declarations if (option.eq.'Chemact') then write(lecho,10) 10 format(//,1x,'**** CHEMACT PARAMETERS ****') write(lecho,20) 20 format(/,1x,'REACTANT CHANNEL ',26x, 2 ' A ',4x,' n',4x,' alpha ',1x,' E (kcal)') write(lecho,30) inpwell,inpchan,PDname(inpwell,inpchan), 2 ISname(inpwell),Ain,rNin,alphaIn,Ein c******6/10/98 corrected output formatting error in statement below: c 30 format(1x,i2':-',i2,2x,a,' => ',a,1pe10.4,2x, c 2 0pf6.3,2x,1pe9.3,3x,f8.3) cjmg fix c30 format(1x,i2':-',i2,2x,a,' => ',a,1pe10.4,2x, c 2 0pf6.3,2x,1pe9.3,3x,0pf8.3) 30 format(1x,i2,':-',i2,2x,a,' => ',a,1pe10.4,2x, 2 0pf6.3,2x,1pe9.3,3x,0pf8.3) else write(lecho,35) inpwell 35 format(//,1x,'**** DISSOC (',i2,') PARAMETERS ****') write(lecho,40) 40 format(/,1x,'REACTANT CHANNEL ') write(lecho,45) inpwell,inpchan,ISname(inpwell),ISname(inpwell) cjmg fix c45 format(1x,i2': ',i2,2x,a,' => *',a) 45 format(1x,i2,': ',i2,2x,a,' => *',a) endif write(lecho,50) 50 format(/,1x,'WELL INFO ',6x,' kcal ',2x, 2 '# depth',2x,' sigma ',1x,' e/k ',3x,'Ebar HP', 3 3x,'Ebar LP') do 70 iwell = 1,nwells write(lecho,60) iwell,ISname(iwell),Ewell(iwell), 2 idpwell(iwell),sig(iwell),ek(iwell),EbarHP(iwell), 3 EbarLP(iwell) 60 format(1x,i2,2x,a,2x,f7.2,5x,i2,4x,f5.2,3x,f6.2,4x,f6.2, 2 4x,f6.2) 70 continue write(lecho,110) 110 format(/,1x,'Well depth (kcal) is determined from input', 2 ' barrier heights.') write(lecho,120) 120 format(1x,'Chart depth is determined by isomerization', 2 ' linkage.') return end c $Id$ c $Author$ c $Date$ c $Log$ c Revision 1.1 2007-02-20 23:10:23 sandeeps c Initial revision c c Revision 1.1 2003/04/23 19:11:12 dmmatheu c Initial revision c
module test_bukdu_system module NAStuff using Bukdu struct NA <: ApplicationController; conn::Conn end index(c::NA) = nothing hello(c::NA) = TEST_INTERNAL_ERROR end # module NAStuff using Test using Bukdu using .Bukdu.System function index end routes() do get("/na", NAStuff.NA, index) post("/hello", NAStuff.NA, NAStuff.hello) end result = Router.call(get, "/na") @test occursin("Bukdu.System.NotApplicableError", result.got) @test result.resp.status == 500 @test result.route.action === System.not_applicable Plug.Loggers.config[:error_stackframes_range] = 1:2 result = Router.call(post, "/hello") @test result.route.action === System.internal_error Routing.reset!() @test Plug.Loggers._regularize_text("가1", 1) == "가" struct Controller <: ApplicationController conn::Conn end function index(::Controller) end struct VeryLongNamedController <: ApplicationController conn::Conn end function index(::VeryLongNamedController) end routes() do get("/just", Controller, index) get("/long", VeryLongNamedController, index) end Router.call(get, "/just") Router.call(get, "/long") Routing.reset!() end # module test_bukdu_system module test_bukdu_system_proc_time using Bukdu function Bukdu.System.catch_request(route::Bukdu.Route, conn) conn.private[:req_time_ns] = time_ns() end using Logging: AbstractLogger struct MyLogger <: AbstractLogger stream end function Plug.Loggers.info_response(logger::MyLogger, conn::Conn, route::Bukdu.RouteAction) io = logger.stream proc_time = (time_ns() - conn.private[:req_time_ns]) / 1e9 print(io, proc_time, ' ') Plug.Loggers.default_info_response(io, conn, route) end plug(MyLogger, IOContext(Core.stdout, :color => Plug.Loggers.have_color())) get("/") do conn::Conn 42 end Router.call(get, "/") plug(Plug.Loggers.DefaultLogger) Routing.reset!() end # module test_bukdu_system_proc_time
Event-related potentials (ERPs) are indicators of brain activity related to cognitive processes. They can be de- tected from EEG signals and thus constitute an attractive non-invasive option to study cognitive information pro- cessing. The P300 wave is probably the most celebrated example of an event-related potential and it is classically studied in connection to the odd-ball paradigm experi- mental protocol, able to consistently provoke the brain wave. We propose the use of P300 detection to identify the scientific interest in a large set of images and train a computer with machine learning algorithms using the subject’s responses to the stimuli as the training data set. As a first step, we here describe a number of experiments designed to relate the P300 brain wave to the cognitive processes related to placing a scientific judgment on a picture and to study the number of images per seconds that can be processed by such a system.
> module Isomorphism.Operations > import Data.Fin > import Control.Isomorphism > %default total > %access public export > to : {A, B : Type} -> Iso A B -> (A -> B) > to (MkIso to from toFrom fromTo) = to > from : {A, B : Type} -> Iso A B -> (B -> A) > from (MkIso to from toFrom fromTo) = from > toFrom : {A, B : Type} -> (iso : Iso A B) -> (b : B) -> Isomorphism.Operations.to iso (Isomorphism.Operations.from iso b) = b > toFrom (MkIso to from toFrom fromTo) = toFrom > fromTo : {A, B : Type} -> (iso : Iso A B) -> (a : A) -> Isomorphism.Operations.from iso (Isomorphism.Operations.to iso a) = a > fromTo (MkIso to from toFrom fromTo) = fromTo
If $f$ is a continuous function on a connected set $S$, and the level set $\{x \in S \mid f(x) = a\}$ is open in $S$, then either $f$ is identically equal to $a$ on $S$, or $f$ is never equal to $a$ on $S$.
Members of our administrative team will enjoy full benefit eligibility, including Medical, Dental, Vision, 401k matching, company paid group term life insurance, paid vacation and sick time and an opportunity for growth that is second to none in the industry. Acadia Healthcare is committed to offering an enviable internal culture and environment that encourages and supports both professional and personal growth that you are proud of. The Office Assistant role will perform various administrative functions assigned in accordance with the office procedures of the clinic. Responsibilities may include answering telephones, bookkeeping, typing or word processing, office machine operation, and filing. We have 8 hour, daily shifts that allow our employees to have stable schedules and enjoy quality time away from the facility. Operating hours are Monday - Friday from 5:30 AM - 1:30 PM. Rotating Tuesday evenings from 12:00 PM - 8:00 PM. Occasional weekend hours available on Saturday and Sunday from 5:45 AM - 9:45 AM.
---------------------------------------------------------- -- QISKIT DEMO ------------------------------------------- ---------------------------------------------------------- -- imports: import Quipper import Quipper.Internal import Quantum.Synthesis.Matrix import Quantum.Synthesis.Ring import Quipper.Libraries.Synthesis import Data.Complex import Data.Ratio import Quipper.Libraries.Simulation ---------------------------------------------------------- -- How to write your quantum circuit? -------------------- ---------------------------------------------------------- -- There are many ways to implement a quantum circuit -- * Manual implementation -- * With circuit's matrix -- * Simulation ---------------------------------------------------------- -- * Manual implementation mybell1 :: Bool -> Circ (Qubit, Qubit) mybell1 b = do q <- qinit b x <- hadamard q y <- qinit False y <- qnot y `controlled` x return (x,y) main1 :: IO () main1 = do print_simple Preview (mybell1 False) -------------------------------------------------------------- -- * From a matrix invsq2 :: Cplx (RootTwo (Ratio Integer)) invsq2 = Cplx (RootTwo 0 (1 % 2)) 0 :: Cplx (RootTwo (Ratio Integer)) mymatrix :: Matrix Four Four (Cplx (RootTwo (Ratio Integer))) mymatrix = matrix [[ invsq2, 0, invsq2, 0], [ 0, invsq2, 0, invsq2], [ 0, invsq2, 0, -invsq2], [ invsq2, 0, -invsq2, 0]] synthesized = exact_synthesis mymatrix mybell2 :: Bool -> Bool -> Circ (Qubit, Qubit) mybell2 a b = do x <- qinit a y <- qinit b synthesized [x,y] return (x,y) main2 :: IO () main2 = do print_simple Preview (mybell2 False False) ---------------------------------------------------------------- -- * Simulate function simulate :: Circ (Qubit,Qubit) -> IO () simulate circuit = print (sim_generic (1.0::Float) circuit) main3 :: IO () main3 = do simulate (mybell1 False) main4 :: IO () main4 = do simulate (mybell2 False False) -- ---------------------------------------------------------------------- -- * Main main = do putStrLn "\n \n choose an option:" putStrLn " 1 - PDF manual implementation of circuit\n" putStrLn " 2 - PDF matrix implementation of circuit\n" putStrLn " 3 - simulation of first circuit\n" putStrLn " 4 - simulation of the second circuit\n" line <- getLine case line of "1" -> do main1 main "2" -> do main2 main "3" -> do main3 main "4" -> do main4 main _ -> do main
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2013. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Andreas Bertsch $ // $Authors: $ // -------------------------------------------------------------------------- // #ifndef OPENMS_MATH_STATISTICS_GAMMADISTRIBUTIONFITTER_H #define OPENMS_MATH_STATISTICS_GAMMADISTRIBUTIONFITTER_H #include <OpenMS/DATASTRUCTURES/String.h> #include <OpenMS/DATASTRUCTURES/DPosition.h> #include <vector> // gsl includes #include <gsl/gsl_rng.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_multifit_nlin.h> namespace OpenMS { namespace Math { /** @brief Implements a fitter for the Gamma distribution. This class fits a Gamma distribution to a number of data points. The results as well as the initial guess are specified using the struct GammaDistributionFitResult. The formula with the fitted parameters can be transformed into a gnuplot formula using getGnuplotFormula() after fitting. The implementation is done using GSL fitting algorithms. @ingroup Math */ class OPENMS_DLLAPI GammaDistributionFitter { public: /// struct to represent the parameters of a gamma distribution struct GammaDistributionFitResult { public: GammaDistributionFitResult() : b(1.0), p(5.0) { } GammaDistributionFitResult(const GammaDistributionFitResult & rhs) : b(rhs.b), p(rhs.p) { } GammaDistributionFitResult & operator=(const GammaDistributionFitResult & rhs) { if (this != &rhs) { b = rhs.b; p = rhs.p; } return *this; } /// parameter b of the gamma distribution double b; /// parameter p of the gamma distribution double p; }; /// Default constructor GammaDistributionFitter(); /// Destructor virtual ~GammaDistributionFitter(); /// sets the gamma distribution start parameters b and p for the fitting void setInitialParameters(const GammaDistributionFitResult & result); /** @brief Fits a gamma distribution to the given data points @param points Input parameter which represents the point used for the fitting @exception Exception::UnableToFit is thrown if fitting cannot be performed */ GammaDistributionFitResult fit(std::vector<DPosition<2> > & points); /// returns the gnuplot formula of the fitted gamma distribution const String & getGnuplotFormula() const; protected: static int gammaDistributionFitterf_(const gsl_vector * x, void * params, gsl_vector * f); static int gammaDistributionFitterdf_(const gsl_vector * x, void * params, gsl_matrix * J); static int gammaDistributionFitterfdf_(const gsl_vector * x, void * params, gsl_vector * f, gsl_matrix * J); void printState_(size_t iter, gsl_multifit_fdfsolver * s); GammaDistributionFitResult init_param_; String gnuplot_formula_; private: /// Copy constructor (not implemented) GammaDistributionFitter(const GammaDistributionFitter & rhs); /// assignment operator (not implemented) GammaDistributionFitter & operator=(const GammaDistributionFitter & rhs); }; } } #endif
(* *********************************************************************) (* *) (* The CertiKOS Certified Kit Operating System *) (* *) (* The FLINT Group, Yale University *) (* *) (* Copyright The FLINT Group, Yale University. All rights reserved. *) (* This file is distributed under the terms of the Yale University *) (* Non-Commercial License Agreement. *) (* *) (* *********************************************************************) Require Import ZArith. Require Import Decision. Open Scope Z_scope. (** * The [xomega] tactic *) (** Calculations involving page addresses and the like involve some integer divisions, which [omega] and friends cannot handle as is. To remedy this, we can add a preprocessing step where we prove inequalities which characterize the subterms which [omega] does not understand. So for instance, if the subexpression [a / b] appears, we can add the hypothesis [0 <= a - a / b * b < b] to the context. Then when [omega] abstracts the division as a variable [q], it will nonetheless know that [0 <= a - q * b < b] which is equivalent to [q = a / b]. The [xomega] tactic applies this principle to enable [omega] to solve goals involving [Z.div], [Z.modulo] and [Z.max]. *) Module XOmega. (** ** Inject [nat]'s into [Z] *) (** Although we mostly use [Z], occasionally some goals or hypotheses involving [nat] show up. This is the case for instance when doing induction on the [Calculate_foo] family of fixpoints. The rewrite base below makes sure everything is expressed in terms of [Z]: relations on [nat] are converted into relations on [Z], and the resulting [Z.of_nat]'s are pushed inwards in all expressions. *) (** Use [Z.max] to avoid side conditions. *) Lemma Nat2Z_inj_to_nat_max z: Z.of_nat (Z.to_nat z) = Z.max 0 z. Proof. destruct (decide (0 <= z)) as [Hz | Hz]. * rewrite Z2Nat.id by assumption. rewrite Zmax_right by assumption. reflexivity. * destruct z. - elim Hz. reflexivity. - elim Hz. apply Pos2Z.is_nonneg. - rewrite Z2Nat.inj_neg. rewrite Nat2Z.inj_0. rewrite Zmax_left by omega. reflexivity. Qed. Hint Rewrite <- Nat2Z.inj_iff Nat2Z.inj_compare : nat2z. Hint Rewrite -> Nat2Z.inj_le Nat2Z.inj_lt Nat2Z.inj_ge Nat2Z.inj_gt Nat2Z.inj_0 Nat2Z.inj_succ Nat2Z.inj_abs_nat Nat2Z.inj_add Nat2Z.inj_mul Nat2Z.inj_sub_max Nat2Z.inj_pred_max Nat2Z.inj_min Nat2Z.inj_max Nat2Z_inj_to_nat_max : nat2z. Ltac nat2z := autorewrite with nat2z in *. (** ** Characterize [Z.div] and [Z.max] by inequalities *) (** Ensure that no hypothesis of type [P] already exists, then use the provided tactic to add one. *) Ltac assert_new P tac := match goal with | H_already_there : P |- _ => fail 1 | _ => assert P by tac end. (** Characterize [Z.div a b]. *) Notation DIVSPEC a b := ((b < 0 /\ b < a - a / b * b <= 0) \/ (b = 0 /\ a / b = 0) \/ (b > 0 /\ 0 <= a - a / b * b < b))%Z. Lemma specify_division a b: DIVSPEC a b. Proof with try omega. destruct (decide (b < 0)) as [Hbn | Hbn]; [left | right; destruct (decide (b = 0)) as [Hbz | Hbz]; [left | right]]; split... * rewrite <- Zmod_eq_full... apply Z.mod_neg_bound... * subst. apply Zdiv_0_r... * rewrite <- Zmod_eq_full... apply Z.mod_pos_bound... Qed. Ltac specify_division a b := let tac := (apply specify_division) in assert_new (DIVSPEC a b) tac. (** Characterize [Z.modulo a b]. *) Notation MODSPEC a b := ((b = 0 /\ a mod b = 0) \/ (b <> 0 /\ a mod b = a - a / b * b))%Z. Lemma specify_modulo a b: MODSPEC a b. Proof with try omega. destruct (decide (b = 0)); [left | right]; split... * subst; apply Zmod_0_r. * apply Zmod_eq_full... Qed. Ltac specify_modulo a b := let tac := (apply specify_modulo) in assert_new (MODSPEC a b) tac. (** Characterize [Z.max a b]. *) Notation MAXSPEC a b := ((a >= b /\ Z.max a b = a) \/ (a < b /\ Z.max a b = b))%Z. Lemma specify_max a b: MAXSPEC a b. Proof. apply Zmax_spec. Qed. Ltac specify_max a b := let tac := (apply specify_max) in assert_new (MAXSPEC a b) tac. (** Now look in the hypotheses and goal for occurences of division and max, then instantiate the corresponding hypotheses. *) Ltac instantiate_extra_hypotheses_for P := match P with | context [Z.div ?a ?b] => specify_division a b | context [Z.modulo ?a ?b] => specify_modulo a b | context [Z.max ?a ?b] => specify_max a b end; (* If we found at least one, go on. *) try instantiate_extra_hypotheses_for P. Ltac instantiate_extra_hypotheses := repeat match goal with | H: ?P |- _ => instantiate_extra_hypotheses_for P | |- ?G => instantiate_extra_hypotheses_for G end. (** ** Assembling the pieces *) Ltac omegify := nat2z; instantiate_extra_hypotheses. End XOmega. (** Random hint: the extra hypotheses are a little hairy, so if you use [omegify] manually on its own, you may want to import the [XOmega] module to get the [FOOSPEC] notations. *) Ltac xomega := XOmega.omegify; omega. (** These should work as well if we ever need them. *) (* Require Import Psatz. Ltac xlia := XOmega.omegify; lia. Ltac xnia := XOmega.omegify; nia. *) (** Finally, a quick test. *) Goal forall (x: Z) (y: nat), (y >= max 2 17)%nat -> (~ Z.to_nat x < y)%nat -> - x * 5 / - 4 > x + x / 0. Proof. intros. xomega. Qed.
In 1985 , Bedell put forward an agricultural plan that he thought would increase production controls for farmers , thus raising prices for crops . This plan , backed by labor unions and certain Democrats , passed the Agriculture Committee as an amendment to farm legislation . It mandated a referendum that would then be used to determine what types of production controls to enact . The purpose of this plan was twofold : production controls would decrease the aggregate supply of crops , thus making individual crops cost more ( which would benefit farmers , who were in the middle of an acute debt crisis . ) Second , by styling it as a referendum , the farmers would get to decide the severity of the controls .
/- Copyright (c) 2021 Bolton Bailey. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bolton Bailey ! This file was ported from Lean 3 source module data.nat.periodic ! leanprover-community/mathlib commit dc6c365e751e34d100e80fe6e314c3c3e0fd2988 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Algebra.Periodic import Mathlib.Data.Nat.Count import Mathlib.Data.Nat.Interval /-! # Periodic Functions on ℕ This file identifies a few functions on `ℕ` which are periodic, and also proves a lemma about periodic predicates which helps determine their cardinality when filtering intervals over them. -/ namespace Nat open Nat Function theorem periodic_gcd (a : ℕ) : Periodic (gcd a) a := by simp only [forall_const, gcd_add_self_right, eq_self_iff_true, Periodic] #align nat.periodic_gcd Nat.periodic_gcd theorem periodic_coprime (a : ℕ) : Periodic (coprime a) a := by simp only [coprime_add_self_right, forall_const, iff_self_iff, eq_iff_iff, Periodic] #align nat.periodic_coprime Nat.periodic_coprime theorem periodic_mod (a : ℕ) : Periodic (fun n => n % a) a := by simp only [forall_const, eq_self_iff_true, add_mod_right, Periodic] #align nat.periodic_mod Nat.periodic_mod theorem Function.Periodic.map_mod_nat {α : Type _} {f : ℕ → α} {a : ℕ} (hf : Periodic f a) : ∀ n, f (n % a) = f n := fun n => by conv_rhs => rw [← Nat.mod_add_div n a, mul_comm, ← Nat.nsmul_eq_mul, hf.nsmul] #align function.periodic.map_mod_nat Nat.Function.Periodic.map_mod_nat section Multiset open Multiset /-- An interval of length `a` filtered over a periodic predicate of period `a` has cardinality equal to the number naturals below `a` for which `p a` is true. -/ theorem filter_multiset_Ico_card_eq_of_periodic (n a : ℕ) (p : ℕ → Prop) [DecidablePred p] (pp : Periodic p a) : card (filter p (Ico n (n + a))) = a.count p := by rw [count_eq_card_filter_range, Finset.card, Finset.filter_val, Finset.range_val, ← multiset_Ico_map_mod n, ← map_count_True_eq_filter_card, ← map_count_True_eq_filter_card, map_map] congr; funext n exact (Function.Periodic.map_mod_nat pp n).symm #align nat.filter_multiset_Ico_card_eq_of_periodic Nat.filter_multiset_Ico_card_eq_of_periodic end Multiset section Finset open Finset /-- An interval of length `a` filtered over a periodic predicate of period `a` has cardinality equal to the number naturals below `a` for which `p a` is true. -/ theorem filter_Ico_card_eq_of_periodic (n a : ℕ) (p : ℕ → Prop) [DecidablePred p] (pp : Periodic p a) : ((Ico n (n + a)).filter p).card = a.count p := filter_multiset_Ico_card_eq_of_periodic n a p pp #align nat.filter_Ico_card_eq_of_periodic Nat.filter_Ico_card_eq_of_periodic end Finset end Nat
/** * * @precisions normal z -> c d s * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cblas.h> #include <lapacke.h> #include <plasma.h> #include <core_blas.h> #include "auxiliary.h" /*------------------------------------------------------------------- * Check the orthogonality of Q */ int z_check_orthogonality(int M, int N, int LDQ, PLASMA_Complex64_t *Q) { double alpha, beta; double normQ; int info_ortho; int i; int minMN = min(M, N); double eps; double *work = (double *)malloc(minMN*sizeof(double)); eps = LAPACKE_dlamch_work('e'); alpha = 1.0; beta = -1.0; /* Build the idendity matrix USE DLASET?*/ PLASMA_Complex64_t *Id = (PLASMA_Complex64_t *) malloc(minMN*minMN*sizeof(PLASMA_Complex64_t)); memset((void*)Id, 0, minMN*minMN*sizeof(PLASMA_Complex64_t)); for (i = 0; i < minMN; i++) Id[i*minMN+i] = (PLASMA_Complex64_t)1.0; /* Perform Id - Q'Q */ if (M >= N) cblas_zherk(CblasColMajor, CblasUpper, CblasConjTrans, N, M, alpha, Q, LDQ, beta, Id, N); else cblas_zherk(CblasColMajor, CblasUpper, CblasNoTrans, M, N, alpha, Q, LDQ, beta, Id, M); normQ = LAPACKE_zlansy_work(LAPACK_COL_MAJOR, 'i', 'u', minMN, Id, minMN, work); printf("============\n"); printf("Checking the orthogonality of Q \n"); printf("||Id-Q'*Q||_oo / (N*eps) = %e \n",normQ/(minMN*eps)); if ( isnan(normQ / (minMN * eps)) || (normQ / (minMN * eps) > 10.0) ) { printf("-- Orthogonality is suspicious ! \n"); info_ortho=1; } else { printf("-- Orthogonality is CORRECT ! \n"); info_ortho=0; } free(work); free(Id); return info_ortho; } /*------------------------------------------------------------ * Check the factorization QR */ int z_check_QRfactorization(int M, int N, PLASMA_Complex64_t *A1, PLASMA_Complex64_t *A2, int LDA, PLASMA_Complex64_t *Q) { double Anorm, Rnorm; PLASMA_Complex64_t alpha, beta; int info_factorization; int i,j; double eps; eps = LAPACKE_dlamch_work('e'); PLASMA_Complex64_t *Ql = (PLASMA_Complex64_t *)malloc(M*N*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *Residual = (PLASMA_Complex64_t *)malloc(M*N*sizeof(PLASMA_Complex64_t)); double *work = (double *)malloc(max(M,N)*sizeof(double)); alpha=1.0; beta=0.0; if (M >= N) { /* Extract the R */ PLASMA_Complex64_t *R = (PLASMA_Complex64_t *)malloc(N*N*sizeof(PLASMA_Complex64_t)); memset((void*)R, 0, N*N*sizeof(PLASMA_Complex64_t)); LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'u', M, N, A2, LDA, R, N); /* Perform Ql=Q*R */ memset((void*)Ql, 0, M*N*sizeof(PLASMA_Complex64_t)); cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, N, N, CBLAS_SADDR(alpha), Q, LDA, R, N, CBLAS_SADDR(beta), Ql, M); free(R); } else { /* Extract the L */ PLASMA_Complex64_t *L = (PLASMA_Complex64_t *)malloc(M*M*sizeof(PLASMA_Complex64_t)); memset((void*)L, 0, M*M*sizeof(PLASMA_Complex64_t)); LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'l', M, N, A2, LDA, L, M); /* Perform Ql=LQ */ memset((void*)Ql, 0, M*N*sizeof(PLASMA_Complex64_t)); cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, N, M, CBLAS_SADDR(alpha), L, M, Q, LDA, CBLAS_SADDR(beta), Ql, M); free(L); } /* Compute the Residual */ for (i = 0; i < M; i++) for (j = 0 ; j < N; j++) Residual[j*M+i] = A1[j*LDA+i]-Ql[j*M+i]; Rnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, Residual, M, work); Anorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, A2, LDA, work); if (M >= N) { printf("============\n"); printf("Checking the QR Factorization \n"); printf("-- ||A-QR||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps)); } else { printf("============\n"); printf("Checking the LQ Factorization \n"); printf("-- ||A-LQ||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps)); } if (isnan(Rnorm / (Anorm * N *eps)) || (Rnorm / (Anorm * N * eps) > 10.0) ) { printf("-- Factorization is suspicious ! \n"); info_factorization = 1; } else { printf("-- Factorization is CORRECT ! \n"); info_factorization = 0; } free(work); free(Ql); free(Residual); return info_factorization; } /*------------------------------------------------------------------------ * Check the factorization of the matrix A2 */ int z_check_LLTfactorization(int N, PLASMA_Complex64_t *A1, PLASMA_Complex64_t *A2, int LDA, int uplo) { double Anorm, Rnorm; PLASMA_Complex64_t alpha; int info_factorization; int i,j; double eps; eps = LAPACKE_dlamch_work('e'); PLASMA_Complex64_t *Residual = (PLASMA_Complex64_t *)malloc(N*N*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *L1 = (PLASMA_Complex64_t *)malloc(N*N*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *L2 = (PLASMA_Complex64_t *)malloc(N*N*sizeof(PLASMA_Complex64_t)); double *work = (double *)malloc(N*sizeof(double)); memset((void*)L1, 0, N*N*sizeof(PLASMA_Complex64_t)); memset((void*)L2, 0, N*N*sizeof(PLASMA_Complex64_t)); alpha= 1.0; LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,' ', N, N, A1, LDA, Residual, N); /* Dealing with L'L or U'U */ if (uplo == PlasmaUpper){ LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'u', N, N, A2, LDA, L1, N); LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'u', N, N, A2, LDA, L2, N); cblas_ztrmm(CblasColMajor, CblasLeft, CblasUpper, CblasConjTrans, CblasNonUnit, N, N, CBLAS_SADDR(alpha), L1, N, L2, N); } else{ LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'l', N, N, A2, LDA, L1, N); LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'l', N, N, A2, LDA, L2, N); cblas_ztrmm(CblasColMajor, CblasRight, CblasLower, CblasConjTrans, CblasNonUnit, N, N, CBLAS_SADDR(alpha), L1, N, L2, N); } /* Compute the Residual || A -L'L|| */ for (i = 0; i < N; i++) for (j = 0; j < N; j++) Residual[j*N+i] = L2[j*N+i] - Residual[j*N+i]; Rnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', N, N, Residual, N, work); Anorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', N, N, A1, LDA, work); printf("============\n"); printf("Checking the Cholesky Factorization \n"); printf("-- ||L'L-A||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps)); if ( isnan(Rnorm/(Anorm*N*eps)) || (Rnorm/(Anorm*N*eps) > 10.0) ){ printf("-- Factorization is suspicious ! \n"); info_factorization = 1; } else{ printf("-- Factorization is CORRECT ! \n"); info_factorization = 0; } free(Residual); free(L1); free(L2); free(work); return info_factorization; } /*-------------------------------------------------------------- * Check the gemm */ double z_check_gemm(PLASMA_enum transA, PLASMA_enum transB, int M, int N, int K, PLASMA_Complex64_t alpha, PLASMA_Complex64_t *A, int LDA, PLASMA_Complex64_t *B, int LDB, PLASMA_Complex64_t beta, PLASMA_Complex64_t *Cplasma, PLASMA_Complex64_t *Cref, int LDC, double *Cinitnorm, double *Cplasmanorm, double *Clapacknorm ) { PLASMA_Complex64_t beta_const = -1.0; double Rnorm; double *work = (double *)malloc(max(K,max(M, N))* sizeof(double)); *Cinitnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, Cref, LDC, work); *Cplasmanorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, Cplasma, LDC, work); cblas_zgemm(CblasColMajor, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K, CBLAS_SADDR(alpha), A, LDA, B, LDB, CBLAS_SADDR(beta), Cref, LDC); *Clapacknorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, Cref, LDC, work); cblas_zaxpy(LDC * N, CBLAS_SADDR(beta_const), Cplasma, 1, Cref, 1); Rnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, Cref, LDC, work); free(work); return Rnorm; } /*-------------------------------------------------------------- * Check the trsm */ double z_check_trsm(PLASMA_enum side, PLASMA_enum uplo, PLASMA_enum trans, PLASMA_enum diag, int M, int NRHS, PLASMA_Complex64_t alpha, PLASMA_Complex64_t *A, int LDA, PLASMA_Complex64_t *Bplasma, PLASMA_Complex64_t *Bref, int LDB, double *Binitnorm, double *Bplasmanorm, double *Blapacknorm ) { PLASMA_Complex64_t beta_const = -1.0; double Rnorm; double *work = (double *)malloc(max(M, NRHS)* sizeof(double)); /*double eps = LAPACKE_dlamch_work('e');*/ *Binitnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, NRHS, Bref, LDB, work); *Bplasmanorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'm', M, NRHS, Bplasma, LDB, work); cblas_ztrsm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, (CBLAS_DIAG)diag, M, NRHS, CBLAS_SADDR(alpha), A, LDA, Bref, LDB); *Blapacknorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'm', M, NRHS, Bref, LDB, work); cblas_zaxpy(LDB * NRHS, CBLAS_SADDR(beta_const), Bplasma, 1, Bref, 1); Rnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'm', M, NRHS, Bref, LDB, work); Rnorm = Rnorm / *Blapacknorm; /* max(M,NRHS) * eps);*/ free(work); return Rnorm; } /*-------------------------------------------------------------- * Check the solution */ double z_check_solution(int M, int N, int NRHS, PLASMA_Complex64_t *A, int LDA, PLASMA_Complex64_t *B, PLASMA_Complex64_t *X, int LDB, double *anorm, double *bnorm, double *xnorm ) { /* int info_solution; */ double Rnorm = -1.00; PLASMA_Complex64_t zone = 1.0; PLASMA_Complex64_t mzone = -1.0; double *work = (double *)malloc(max(M, N)* sizeof(double)); *anorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, N, A, LDA, work); *xnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', M, NRHS, X, LDB, work); *bnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', N, NRHS, B, LDB, work); cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, NRHS, N, CBLAS_SADDR(zone), A, LDA, X, LDB, CBLAS_SADDR(mzone), B, LDB); Rnorm = LAPACKE_zlange_work(LAPACK_COL_MAJOR, 'i', N, NRHS, B, LDB, work); free(work); return Rnorm; }
Just what we do. It's powerful enough to do the job & a 3 week holiday hardly made a dent in the charge on a 5 AH battery. I suspect that other makes of cordless 18V tools will also do a small vacuum cleaner for a similar price. We have a Challenger 530 - having moved on from a smaller 2 berth. The side diner is really useful & the extra acres of space around the kitchen makes it really comfortable to live in. Most importantly, SWMBO is really happy with it. We've spent a calendar month away in it, & any worries we might have had about choosing this layout vanished very quickly. Chances of someone, who tows a boat on a trailer, reading a forum called "Caravan Talk"? Not too high. On re-reading, I think you're right about that. Low down in the middle of the car is a good place to stow weighty items, even though I'm not too sure what would happen to it in the event of a crash. If the van's spare wheel lives in the "empty spare footwell of the car" Where does the car's spare wheel live? Toddler! Date of Birth: 1st April 1900. Thanks for that. There's a list of questions which _must_ be answered, & I do dislike being forced to hand over personal details - especially when it looks like it's marketing who want to know everything about you. We've put down a deposit on a new Challenger & I thought it would be a good idea to join Swift Talk. I wanted to read what owners of Swift vans liked and disliked about their vans, where snags can be found & if there are any common defects or problems. (Yes I can & do read what's posted here, but a dedicated forum might also be useful) Only, I can't. If you try to join Swift Talk there's a question you must answer before you're allowed to open an account. Caravan VIN - I don't have one & probably won't for a couple of months. I would like to raise this as a question on Swift Talk - but, obviously, I can't do that because. .. Can anyone on here, who is also in Swift Talk, take this up with them? The OP didn't say they _had_ to find a new insurer. The only time you will find out how good your insurance is, is when you need to make a claim. Clearly they made a claim & were pretty unhappy with the result. Different answers for different people in different situations. As our van is kept a few miles away, I bring the battery home & store it over the winter. I had a Kojak a few years back. One month after the warranty ended, the hydraulic seals started to leak a bit - & then failed altogether. Purpleline didn't offer any repair, replacement or even advice on the problem.
If $c$ is a nonzero real number, then the limit of $c \cdot f(x)$ as $x$ approaches $c \cdot l$ is $F$ if and only if the limit of $f(x)$ as $x$ approaches $l$ is $F$.
There are three World Heritage Sites on the island : the <unk> na <unk> , Skellig Michael and the Giant 's Causeway . A number of other places are on the tentative list , for example the <unk> , the <unk> Fields and Mount Stewart .
/*! \headerfile SparseMatrix.hpp "include/SparseMatrix.hpp" * "SparseMatrix.hpp" contains the class definition encapsulating the * data structure interface for providing sparse matrices to KernelComposer */ // Copyright 2020 United States Government as represented by the Administrator of the National // Aeronautics and Space Administration. No copyright is claimed in the United States under // Title 17, U.S. Code. All Other Rights Reserved. See Appendix A for 3rd party licenses. // // The Solid-Wave Sim (swSIM) platform is licensed under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. // // Unless required by applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing permissions // and limitations under the License. #ifndef SWSIM_SPARSE_MATRIX_BASE_H #define SWSIM_SPARSE_MATRIX_BASE_H #include <string> //#include <Eigen/Sparse> namespace swSim{ /*! \class SparseMatrix SparseMatrix.hpp "include/SparseMatrix.hpp" * * Defines data structures for providing sparse matrices to KernelComposer */ class SparseMatrix_base{ public: /*! * Get Key-Name for this matrix */ virtual std::string getName() const =0; /*! * Get length of value array */ virtual size_t getNonZeroLength() const =0; /*! * Get pointer to head of value array */ virtual double* getValueArray() =0; /*! * Get pointer to head of column index array */ virtual int* getColumnIndexArray() =0; /*! * Get pointer to head of rowpointer array */ virtual int* getRowpointerArray() =0; /*! * Get length of rowpointer array */ virtual size_t getRowpointerLength() const =0; /*! * Get row dimension of the matrix */ virtual int getRowCount() const =0; /*! * Get column dimension of the matrix */ virtual int getColumnCount() const =0; }; }; #endif /*SWSIM_SPARSE_MATRIX_BASE_H*/
Formal statement is: lemma contractible_space_alt: "contractible_space X \<longleftrightarrow> (\<forall>a \<in> topspace X. homotopic_with (\<lambda>x. True) X X id (\<lambda>x. a))" (is "?lhs = ?rhs") Informal statement is: A topological space $X$ is contractible if and only if for every point $a \in X$, the identity map $X \to X$ is homotopic to the constant map $X \to X$ that maps every point to $a$.
program test real(kind = 8), parameter :: val1 = 3.14845624E4 real(kind = 8), parameter :: val2 = 3.14845624E+4 real(kind = 8), parameter :: val3 = 3.14845624E-4 print *, val1 print *, val2 print *, val3 end program test
! ! Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ! See https://llvm.org/LICENSE.txt for license information. ! SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ! ! test f2008 tan function taking a complex arguement program p use ISO_C_BINDING use check_mod interface subroutine get_expected_cf( src1, expct, n ) bind(C) use ISO_C_BINDING type(C_PTR), value :: src1 type(C_PTR), value :: expct integer(C_INT), value :: n end subroutine subroutine get_expected_cd( src1, expct, n ) bind(C) use ISO_C_BINDING type(C_PTR), value :: src1 type(C_PTR), value :: expct integer(C_INT), value :: n end subroutine end interface integer, parameter :: N=10 complex(4), target, dimension(N) :: cf_src1 complex(4), target, dimension(N) :: cf_rslt complex(4), target, dimension(N) :: cf_expct complex(4) :: valuecf complex(8), target, dimension(N) :: cd_src1 complex(8), target, dimension(N) :: cd_rslt complex(8), target, dimension(N) :: cd_expct complex(8) :: value8 valuecf = CMPLX(-31.4, -9.999) valuecd = CMPLX(-31.4_8, -9.999_8) do i = 0,N-1 cf_src1(i+1) = valuecf + CMPLX(i*6.97, i*2.2) cd_src1(i+1) = valuecd + CMPLX(i*6.97_8, i*2.2_8) enddo cf_rslt = tan(cf_src1) cd_rslt = tan(cd_src1) call get_expected_cf(C_LOC(cf_src1), C_LOC(cf_expct), N) call get_expected_cd(C_LOC(cd_src1), C_LOC(cd_expct), N) call checkc4( cf_rslt, cf_expct, N, rtoler=(0.0000003,0.0000003)) call checkc8( cd_rslt, cd_expct, N, rtoler=(0.0000003_8,0.0000003_8)) ! print *, "cf_expct:" ! print *, cf_expct ! print *, "cf_rslt:" ! print *, cf_rslt ! ! print *, "cd_expct:" ! print *, cd_expct ! print *, "cd_rslt:" ! print *, cd_rslt end program
module PackageCompiler using Pkg, Serialization, Libdl, UUIDs using Pkg: TOML, Operations, Types include("compiler_flags.jl") include("static_julia.jl") include("api.jl") include("snooping.jl") include("system_image.jl") include("pkg.jl") include("incremental.jl") const sysimage_binaries = ("sys.$(Libdl.dlext)",) function copy_system_image(src, dest, ignore_missing = false) for file in sysimage_binaries # backup srcfile = joinpath(src, file) destfile = joinpath(dest, file) if !isfile(srcfile) ignore_missing && continue error("No file: $srcfile") end if isfile(destfile) if isfile(destfile * ".backup") rm(destfile * ".backup", force = true) end mv(destfile, destfile * ".backup", force = true) end @info "Copying system image: $srcfile to $destfile" cp(srcfile, destfile, force = true) end end julia_cpu_target(x) = error("CPU target needs to be a string or `nothing`") julia_cpu_target(x::String) = x # TODO: match against available targets function julia_cpu_target(::Nothing) replace(Base.julia_cmd().exec[2], "-C" => "") end """ Reverts a forced compilation of the system image. This will restore any previously backed up system image files, or build a new, clean system image. """ function revert(debug = false) syspath = default_sysimg_path(debug) sysimg_backup = dirname(get_backup!(debug)) copy_system_image(sysimg_backup, syspath) end function get_root_dir(path) path, name = splitdir(path) if isempty(name) return splitdir(path)[2] else name end end function sysimg_folder(files...) base_path = normpath(abspath(joinpath(@__DIR__, "..", "sysimg"))) isdir(base_path) || mkpath(base_path) normpath(abspath(joinpath(base_path, files...))) end function sysimgbackup_folder(files...) backup = sysimg_folder("backup") isdir(backup) || mkpath(backup) sysimg_folder("backup", files...) end function package_folder(package...) packages = normpath(abspath(joinpath(@__DIR__, "..", "packages"))) isdir(packages) || mkpath(packages) normpath(abspath(joinpath(packages, package...))) end """ compile_package(packages...; kw_args...) with packages being either a string naming a package, or a tuple `(package_name, precompile_file)`. If no precompile file is given, it will use the packages `runtests.jl`, which is a good canditate for figuring out what functions to compile! """ function compile_package(packages...; kw_args...) args = map(packages) do package # If no explicit path to a seperate precompile file, use runtests isa(package, String) && return (package, "test/runtests.jl") isa(package, Tuple{String, String}) && return package error("Unrecognized package. Use `packagename::String`, or `(packagename::String, rel_path_to_testfile::String)`. Found: `$package`") end compile_package(args...; kw_args...) end """ compile_package( packages::Tuple{String, String}...; force = false, reuse = false, debug = false, cpu_target = nothing, additional_packages = Symbol[] ) Compile a list of packages. Each package comes as a tuple of `(package_name, precompile_file)` where the precompile file should contain all function calls, that should get compiled into the system image. Usually the `runtests.jl` file is a good candidate, since it should run all important functions of a package. You can pass `additional_packages` a vector of symbols with package names, to help AOT compiling uninstalled, recursive dependencies of `packages`. Look at `compile_incremental` to use a toml instead. """ function compile_package( packages::Tuple{String, String}...; force = false, reuse = false, debug = false, cpu_target = nothing, verbose = false ) userimg = sysimg_folder("precompile.jl") if !reuse # TODO that's a pretty weak way to check that it's not a path... ispackage = all(x-> !occursin(Base.Filesystem.path_separator, x), first.(packages)) isruntests = all(x-> x == "test/runtests.jl", last.(packages)) if ispackage && isruntests snoop_packages(Symbol.(first.(packages))...; file = userimg) else ispackage || @warn "Giving path to package deprecated. Use Package name!" isruntests || @warn "Giving a snoopfile is deprecated. Use runtests from package!" end end !isfile(userimg) && reuse && error("Nothing to reuse. Please run `compile_package(reuse = true)`") image_path = sysimg_folder() build_sysimg(image_path, userimg, cpu_target=cpu_target, verbose = verbose) imgfile = joinpath(image_path, "sys.$(Libdl.dlext)") syspath = joinpath(default_sysimg_path(debug), "sys.$(Libdl.dlext)") if force try backup = syspath * ".packagecompiler_backup" isfile(backup) || mv(syspath, backup) cp(imgfile, syspath) @info """ Replaced system image successfully. Next start of julia will load the newly compiled system image. If you encounter any errors with the new julia image, try `PackageCompiler.revert([debug = false])`. """ catch e @warn "An error occured while replacing sysimg files:" error = e @info "Recovering old system image from backup" # if any file is missing in default system image, revert! if !isfile(syspath) @info "$syspath missing. Reverting!" revert(debug) end end else @info """ Not replacing system image. You can start julia with $(`julia -J $imgfile`) at a posix shell to load the compiled files. """ end imgfile end export compile_package, revert, force_native_image!, executable_ext, build_executable, build_shared_lib, static_julia, compile_incremental end # module
[STATEMENT] lemma of_int_floor [simp]: "a \<in> \<int> \<Longrightarrow> of_int (floor a) = a" [PROOF STATE] proof (prove) goal (1 subgoal): 1. a \<in> \<int> \<Longrightarrow> of_int \<lfloor>a\<rfloor> = a [PROOF STEP] by (metis Ints_cases of_int_floor_cancel)
(* Default settings (from HsToCoq.Coq.Preamble) *) Generalizable All Variables. Unset Implicit Arguments. Set Maximal Implicit Insertion. Unset Strict Implicit. Unset Printing Implicit Defensive. Require Coq.Program.Tactics. Require Coq.Program.Wf. (* Converted imports: *) Require GHC.Base. (* Converted type declarations: *) Record MonadTrans__Dict (t : (Type -> Type) -> Type -> Type) := MonadTrans__Dict_Build { lift__ : forall {m : Type -> Type}, forall {a : Type}, forall `{GHC.Base.Monad m}, m a -> t m a }. Definition MonadTrans (t : (Type -> Type) -> Type -> Type) := forall r__, (MonadTrans__Dict t -> r__) -> r__. Existing Class MonadTrans. Definition lift `{g__0__ : MonadTrans t} : forall {m : Type -> Type}, forall {a : Type}, forall `{GHC.Base.Monad m}, m a -> t m a := g__0__ _ (lift__ t). (* No value declarations to convert. *) (* External variables: Type GHC.Base.Monad *)
/- File: signature_recover_public_key_verify_zero_soundness.lean Autogenerated file. -/ import starkware.cairo.lean.semantics.soundness.hoare import .signature_recover_public_key_code import ..signature_recover_public_key_spec open tactic open starkware.cairo.common.cairo_secp.field open starkware.cairo.common.cairo_secp.bigint open starkware.cairo.common.cairo_secp.constants variables {F : Type} [field F] [decidable_eq F] [prelude_hyps F] variable mem : F → F variable σ : register_state F /- starkware.cairo.common.cairo_secp.field.verify_zero autogenerated soundness theorem -/ theorem auto_sound_verify_zero -- arguments (range_check_ptr : F) (val : UnreducedBigInt3 F) -- code is in memory at σ.pc (h_mem : mem_at mem code_verify_zero σ.pc) -- input arguments on the stack (hin_range_check_ptr : range_check_ptr = mem (σ.fp - 6)) (hin_val : val = cast_UnreducedBigInt3 mem (σ.fp - 5)) -- conclusion : ensures_ret mem σ (λ κ τ, τ.ap = σ.ap + 11 ∧ ∃ μ ≤ κ, rc_ensures mem (rc_bound F) μ (mem (σ.fp - 6)) (mem $ τ.ap - 1) (spec_verify_zero mem κ range_check_ptr val (mem (τ.ap - 1)))) := begin apply ensures_of_ensuresb, intro νbound, have h_mem_rec := h_mem, unpack_memory code_verify_zero at h_mem with ⟨hpc0, hpc1, hpc2, hpc3, hpc4, hpc5, hpc6, hpc7, hpc8, hpc9, hpc10, hpc11, hpc12, hpc13, hpc14, hpc15, hpc16, hpc17, hpc18, hpc19, hpc20, hpc21, hpc22⟩, -- let (ap reference) apply of_register_state, intros regstate_q regstateeq_q, generalize' hl_rev_q: mem regstate_q.ap = q, have hl_q := hl_rev_q.symm, rw [regstateeq_q] at hl_q, try { dsimp at hl_q }, -- let (ap reference) apply of_register_state, intros regstate_q_biased regstateeq_q_biased, generalize' hl_rev_q_biased: mem (regstate_q_biased.ap + 1) = q_biased, have hl_q_biased := hl_rev_q_biased.symm, rw [regstateeq_q_biased] at hl_q_biased, try { dsimp at hl_q_biased }, -- assert eq step_assert_eq hpc0 hpc1 with temp0, have a0: q_biased = q + 2 ^ 127, { apply assert_eq_reduction temp0, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, try { dsimp at a0 }, try { arith_simps at a0 }, clear temp0, -- assert eq step_assert_eq hpc2 with temp0, have a2: mem (range_check_ptr) = q_biased, { apply assert_eq_reduction temp0.symm, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, try { dsimp at a2 }, try { arith_simps at a2 }, clear temp0, -- tempvar step_assert_eq hpc3 hpc4 with tv_r10, step_assert_eq hpc5 with tv_r11, step_assert_eq hpc6 hpc7 with tv_r12, generalize' hl_rev_r1: ((val.d0 + q * SECP_REM) / (BASE : ℤ) : F) = r1, have hl_r1 := hl_rev_r1.symm, clear hl_rev_r1, have htv_r1: r1 = _, { have h_δ3_c0 : ∀ x : F, x / (BASE : ℤ) = x * (-46768052394588894761721767695234645457402928824320 : ℤ), { intro x, apply div_eq_mul_inv', apply PRIME.int_cast_mul_eq_one, rw [PRIME], try { simp_int_casts }, norm_num1 }, apply eq.symm, apply eq.trans tv_r12, try { simp only [h_δ3_c0] at hl_r1 }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased, hl_r1] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps }, try { simp only [tv_r10, tv_r11] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, clear tv_r10 tv_r11 tv_r12, try { dsimp at hl_r1 }, try { arith_simps at hl_r1 }, -- compound assert eq step_assert_eq hpc8 hpc9 with temp0, step_assert_eq hpc10 with temp1, have a8: mem (range_check_ptr + 1) = r1 + 2 ^ 127, { apply assert_eq_reduction temp1.symm, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased, hl_r1, htv_r1] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps }, try { simp only [temp0] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, try { dsimp at a8 }, try { arith_simps at a8 }, clear temp0 temp1, -- tempvar step_assert_eq hpc11 with tv_r20, step_assert_eq hpc12 hpc13 with tv_r21, generalize' hl_rev_r2: ((val.d1 + r1) / (BASE : ℤ) : F) = r2, have hl_r2 := hl_rev_r2.symm, clear hl_rev_r2, have htv_r2: r2 = _, { have h_δ11_c0 : ∀ x : F, x / (BASE : ℤ) = x * (-46768052394588894761721767695234645457402928824320 : ℤ), { intro x, apply div_eq_mul_inv', apply PRIME.int_cast_mul_eq_one, rw [PRIME], try { simp_int_casts }, norm_num1 }, apply eq.symm, apply eq.trans tv_r21, try { simp only [h_δ11_c0] at hl_r2 }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased, hl_r1, htv_r1, hl_r2] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps }, try { simp only [tv_r20] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, clear tv_r20 tv_r21, try { dsimp at hl_r2 }, try { arith_simps at hl_r2 }, -- compound assert eq step_assert_eq hpc14 hpc15 with temp0, step_assert_eq hpc16 with temp1, have a14: mem (range_check_ptr + 2) = r2 + 2 ^ 127, { apply assert_eq_reduction temp1.symm, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased, hl_r1, htv_r1, hl_r2, htv_r2] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps }, try { simp only [temp0] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, try { dsimp at a14 }, try { arith_simps at a14 }, clear temp0 temp1, -- compound assert eq have h_δ17_c0 : ((BASE : ℤ) / (4 : ℤ) : F) = (19342813113834066795298816 : ℤ), { apply PRIME.div_eq_const, { apply PRIME.cast_ne_zero, norm_num1, rw [PRIME], try { simp_int_casts }, norm_num1 }, rw [PRIME], try { simp_int_casts }, norm_num1 }, step_assert_eq hpc17 hpc18 with temp0, step_assert_eq hpc19 with temp1, have a17: val.d2 = q * ((BASE : ℤ) / (4 : ℤ)) - r2, { try { simp only [h_δ17_c0] }, apply assert_eq_reduction (eq_sub_of_eq_add temp1), try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased, hl_r1, htv_r1, hl_r2, htv_r2] }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps }, try { simp only [temp0] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, }, try { dsimp at a17 }, try { arith_simps at a17 }, clear temp0 temp1, -- let generalize' hl_rev_range_check_ptr₁: (range_check_ptr + 3 : F) = range_check_ptr₁, have hl_range_check_ptr₁ := hl_rev_range_check_ptr₁.symm, clear hl_rev_range_check_ptr₁, try { dsimp at hl_range_check_ptr₁ }, try { arith_simps at hl_range_check_ptr₁ }, -- return step_assert_eq hpc20 hpc21 with hret0, step_ret hpc22, -- finish step_done, use_only [rfl, rfl], split, refl, -- range check condition use_only (3+0+0), split, linarith [], split, { arith_simps, try { simp only [hret0] }, try { arith_simps, refl <|> norm_cast }, try { refl } }, intro rc_h_range_check_ptr, repeat { rw [add_assoc] at rc_h_range_check_ptr }, have rc_h_range_check_ptr' := range_checked_add_right rc_h_range_check_ptr, -- Final Proof -- user-provided reduction suffices auto_spec: auto_spec_verify_zero mem _ range_check_ptr val _, { apply sound_verify_zero, apply auto_spec }, -- prove the auto generated assertion dsimp [auto_spec_verify_zero], try { norm_num1 }, try { arith_simps }, use_only [q], use_only [q_biased], use_only [a0], use_only [a2], cases rc_h_range_check_ptr' (0) (by norm_num1) with n hn, arith_simps at hn, use_only [n], { simp only [a2.symm, hin_range_check_ptr], arith_simps, exact hn }, use_only [r1, hl_r1], use_only [a8], cases rc_h_range_check_ptr' (1) (by norm_num1) with n hn, arith_simps at hn, use_only [n], { simp only [a8.symm, hin_range_check_ptr], arith_simps, exact hn }, use_only [r2, hl_r2], use_only [a14], cases rc_h_range_check_ptr' (2) (by norm_num1) with n hn, arith_simps at hn, use_only [n], { simp only [a14.symm, hin_range_check_ptr], arith_simps, exact hn }, use_only [a17], have rc_h_range_check_ptr₁ := range_checked_offset' rc_h_range_check_ptr, have rc_h_range_check_ptr₁' := range_checked_add_right rc_h_range_check_ptr₁,try { norm_cast at rc_h_range_check_ptr₁' }, use_only [range_check_ptr₁, hl_range_check_ptr₁], try { split, linarith }, try { ensures_simps; try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_val, hl_q, hl_q_biased, hl_r1, htv_r1, hl_r2, htv_r2, hl_range_check_ptr₁] }, }, try { dsimp [cast_UnreducedBigInt3] }, try { arith_simps }, try { simp only [hret0] }, try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, end
% Author: Christian Ruppert <[email protected]> % $Id: insert.tex,v 1.1 2003/05/13 07:58:40 ruppert Exp $ % Copyright (C) 2003, Berlin University of Technology \documentclass{webpage} \begin{document} \title{MMTex insert samples} \subtitle{A sample of the insert command} \tableofcontents \section{The Source Code} \verbatim This is some \emph{normal} code, followed by the \emph{insert} command. \insert{insert.tex.code} \endverbatim \section{The Result} This is some \emph{normal} code, followed by the \emph{insert} command. \insert{insert.tex.code} \end{document}
Require Import Fiat.Common.BoundedLookup. Require Import Fiat.Narcissus.Common.Specs Fiat.Narcissus.BaseFormats Fiat.Narcissus.Formats.WordOpt. Require Import Coq.Vectors.Vector Bedrock.Word. Section Enum. Context {len : nat}. Context {A : Type}. Context {B : Type}. Context {cache : Cache}. Context {cacheAddNat : CacheAdd cache nat}. Context {monoid : Monoid B}. Context {monoidUnit : QueueMonoidOpt monoid bool}. Context {sz : nat}. Context {ta : t A (S len)}. Variable (tb : t (word sz) (S len)). Inductive NoDupVector {A} : forall {n}, Vector.t A n -> Prop := NoDupVector_nil : NoDupVector (Vector.nil _) | NoDupVector_cons : forall (x : A) {n} (l : Vector.t A n), ~ Vector.In x l -> NoDupVector l -> NoDupVector (Vector.cons _ x _ l). Lemma NoDupVector_invert {A'} : forall n (l : Vector.t A' n), NoDupVector l -> match l with | Vector.nil => True | Vector.cons a _ l' => ~ Vector.In a l' /\ NoDupVector l' end. Proof. clear; induction 1; eauto. Qed. Definition format_enum (idx : Fin.t _) : CacheFormat -> Comp (B * CacheFormat) := format_word (nth tb idx). Definition encode_enum (idx : Fin.t _) : CacheFormat -> B * CacheFormat := encode_word (nth tb idx). Lemma refine_format_enum : forall idx ce, refine (format_enum idx ce) (ret (encode_enum idx ce)). Proof. intros; reflexivity. Qed. Fixpoint word_indexed {n : nat} (w : word sz) (t : t (word sz) n) : Hopefully (Fin.t n) := match t in Vector.t _ n return Hopefully (Fin.t n) with | nil => OtherErrorInfo "Decoding enum error: index too large." | cons w' _ t' => if (weqb w w') then Ok (Fin.F1) else match word_indexed w t' with | Ok f => Ok (Fin.FS f) | Error e => Error e end end. Definition decode_enum (b : B) (cd : CacheDecode) : Hopefully (Fin.t _ * B * CacheDecode) := `(w, b', cd') <- decode_word (sz:=sz) b cd; HBind word_indexed w tb as idx With Ok (idx, b', cd'). Lemma word_indexed_correct : forall n (i : Fin.t n) (t : t (word sz) n), NoDupVector t -> match word_indexed (nth t i) t with | Ok w' => i = w' | _ => False end. Proof. clear. induction i. - intro; pattern n, t; apply Vector.caseS; simpl; intros. rewrite (proj2 (weqb_true_iff h h)); eauto. - intro; generalize i (IHi (Vector.tl t)); clear. pattern n, t; apply Vector.caseS; simpl. intros h n0 t0 i; case_eq (word_indexed (nth t0 i) t0); intros; apply NoDupVector_invert in H1; intuition subst. case_eq (weqb (nth t0 t1) h); intros; eauto. apply weqb_true_iff in H0; subst. destruct H2; generalize t0 H; clear; induction t1. + intro; pattern n, t0; apply Vector.caseS; simpl; intros; econstructor. + intro; revert t1 IHt1; pattern n, t0; apply Vector.caseS; simpl; intros. case_eq (weqb (nth t t1) h); intros; eauto. * apply weqb_true_iff in H0; subst; econstructor. * rewrite H0 in H. econstructor 2; apply IHt1. destruct (word_indexed (nth t t1) t); try discriminate. f_equal; apply Fin.FS_inj; congruence. Qed. Lemma word_indexed_correct': forall n (v : Fin.t n) (w : word sz) (t : t (word sz) n), word_indexed w t = Ok v -> w = nth t v. Proof. clear. induction v. - intros w tb; pattern n, tb; eapply Vector.caseS; simpl. intros; destruct (weqb w h) eqn: ?. eapply weqb_true_iff; eauto. destruct ( word_indexed w t); try discriminate. - intros w tb. revert w v IHv. pattern n, tb; eapply Vector.rectS; simpl; intros. inversion v. intros; destruct (weqb w a) eqn: ?. discriminate. destruct (word_indexed w v) eqn : ? ; try discriminate. eapply IHv. rewrite Heqh. f_equal. eapply Fin.FS_inj. congruence. Qed. Theorem Enum_decode_correct (tb_OK : NoDupVector tb) {P : CacheDecode -> Prop} (P_OK : cache_inv_Property P (fun P => forall b cd, P cd -> P (addD cd b))) : CorrectDecoder monoid (fun _ => True) (fun _ => True) eq format_enum decode_enum P format_enum. Proof. apply_bijection_rule' with (fun w => word_indexed w tb); intuition eauto using Word_decode_correct. eapply word_indexed_correct in tb_OK. destruct word_indexed eqn:?; subst; intuition eauto. symmetry. eauto using word_indexed_correct'. derive_decoder_equiv; destruct (word_indexed w tb); injections; simpl; eauto; try discriminate. Qed. End Enum. Lemma VectorIn_cons {A} {n} : forall (v : Vector.t A n) a a', Vector.In a' (Vector.cons _ a _ v) -> a = a' \/ Vector.In a' v. Proof. intros; inversion H; subst; eauto. apply Eqdep_dec.inj_pair2_eq_dec in H3; subst; eauto using Peano_dec.eq_nat_dec. Qed. Lemma forall_Vector_P {A} (P : A -> Prop) {n} : forall v : Vector.t A n, Vector.Forall P v -> forall idx, P (Vector.nth v idx). Proof. induction v; simpl; intros. - inversion idx. - revert v IHv H; pattern n, idx; apply Fin.caseS; simpl; intros; inversion H; subst; eauto. eapply IHv. apply Eqdep_dec.inj_pair2_eq_dec in H2; subst; eauto using Peano_dec.eq_nat_dec. Qed. Ltac Discharge_NoDupVector := match goal with |- NoDupVector _ => repeat econstructor; intro; repeat match goal with | H : Vector.In _ _ |- _ => first [apply VectorIn_cons in H; destruct H; try discriminate | inversion H] end end.
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9. Copyright (c) 2020, 2021, Oracle and/or its affiliates. Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl -} -- RWS monad implementation, and functionality for proving properties about -- programs written using this RWS monad. -- It includes constructors for branching code, to aid in the verification. module Haskell.Modules.RWS where open import Haskell.Prelude ------------------------------------------------------------------------------ open import Data.Product using (_×_; _,_) -- (free) RWS : the AST of computations with state `St` reading from an environment -- `Ev` and producing a list of outputs of type `Wr` data RWS (Ev Wr St : Set) : Set → Set₁ where -- Primitive combinators RWS-return : ∀ {A} → A → RWS Ev Wr St A RWS-bind : ∀ {A B} → RWS Ev Wr St A → (A → RWS Ev Wr St B) → RWS Ev Wr St B RWS-gets : ∀ {A} → (St → A) → RWS Ev Wr St A RWS-put : St → RWS Ev Wr St Unit RWS-ask : RWS Ev Wr St Ev RWS-tell : List Wr → RWS Ev Wr St Unit -- Branching combinators (used for creating more convenient contracts) RWS-if : ∀ {A} → Guards (RWS Ev Wr St A) → RWS Ev Wr St A RWS-either : ∀ {A B C} → (B → RWS Ev Wr St A) → (C → RWS Ev Wr St A) → Either B C → RWS Ev Wr St A RWS-ebind : ∀ {A B C} → RWS Ev Wr St (Either C A) → (A → RWS Ev Wr St (Either C B)) → RWS Ev Wr St (Either C B) RWS-maybe : ∀ {A B} → (RWS Ev Wr St A) → (B → RWS Ev Wr St A) → Maybe B → RWS Ev Wr St A private variable Ev Wr St : Set A B C : Set -- From this instance declaration, we get _<$>_, pure, and _<*>_ also. instance RWS-Monad : Monad (RWS Ev Wr St) Monad.return RWS-Monad = RWS-return Monad._>>=_ RWS-Monad = RWS-bind gets : (St → A) → RWS Ev Wr St A gets = RWS-gets get : RWS Ev Wr St St get = gets id put : St → RWS Ev Wr St Unit put = RWS-put modify : (St → St) → RWS Ev Wr St Unit modify f = do st ← get put (f st) ask : RWS Ev Wr St Ev ask = RWS-ask tell : List Wr → RWS Ev Wr St Unit tell = RWS-tell void : RWS Ev Wr St A → RWS Ev Wr St Unit void m = do _ ← m pure unit -- To execute an RWS program, you provide an environment and prestate. -- This produces a result value, poststate, and list of outputs. RWS-run : RWS Ev Wr St A → Ev → St → A × St × List Wr RWS-run (RWS-return x) ev st = x , st , [] RWS-run (RWS-bind m f) ev st with RWS-run m ev st ...| x₁ , st₁ , outs₁ with RWS-run (f x₁) ev st₁ ...| x₂ , st₂ , outs₂ = x₂ , st₂ , outs₁ ++ outs₂ RWS-run (RWS-gets f) ev st = f st , st , [] RWS-run (RWS-put st) ev _ = unit , st , [] RWS-run RWS-ask ev st = ev , st , [] RWS-run (RWS-tell outs) ev st = unit , st , outs RWS-run (RWS-if (clause (b ≔ c) gs)) ev st = if toBool b then RWS-run c ev st else RWS-run (RWS-if gs) ev st RWS-run (RWS-if (otherwise≔ c)) ev st = RWS-run c ev st RWS-run (RWS-either f₁ f₂ (Left x) ) ev st = RWS-run (f₁ x) ev st RWS-run (RWS-either f₁ f₂ (Right y)) ev st = RWS-run (f₂ y) ev st RWS-run (RWS-ebind m f) ev st with RWS-run m ev st ...| Left c , st₁ , outs₁ = Left c , st₁ , outs₁ ...| Right a , st₁ , outs₁ with RWS-run (f a) ev st₁ ...| r , st₂ , outs₂ = r , st₂ , outs₁ ++ outs₂ RWS-run (RWS-maybe f₁ f₂ nothing ) ev st = RWS-run f₁ ev st RWS-run (RWS-maybe f₁ f₂ (just x)) ev st = RWS-run (f₂ x) ev st -- Accessors for the result, poststate, and outputs. RWS-result : RWS Ev Wr St A → Ev → St → A RWS-result m ev st = fst (RWS-run m ev st) RWS-post : RWS Ev Wr St A → Ev → St → St RWS-post m ev st = fst (snd (RWS-run m ev st)) RWS-outs : RWS Ev Wr St A → Ev → St → List Wr RWS-outs m ev st = snd (snd (RWS-run m ev st))
Over last weekend while I was reading the newspaper, I came across an article by Janice Tay, a freelance writer, who was studying in a Japanese language school in Kyoto. It was time for her and her classmates to graduate and time for them to bid Sayoranara to each other. They had been in Kyoto for a year and a half and some of them had grown so attach to each other that it was hard for some of them to say goodbye. I believe that most of us would be able to relate to what they felt as we had already gone through similar experience in our lives. They were having a farewell party at the pub and everyone was drinking and catching up for the last time before going their separate way. It had gone to a point whereby some were getting emotional and were weeping. In that moment, someone said, “Don’t cry – everyone will be friends forever.” Yes. That is true to certain extent and only for a few. I particularly agree with the author’s point of view that ‘keeping in touch with someone you can’t see and hear takes energy and imagination that few have.‘ If you and the person you are keeping in touch with believe in this. There will be a good chance to be friends forever. Otherwise, one will find it challenging even to find time to send an email. Friends forever – it is possible when both sides make time and effort plus a right portion of expectation. In fact, what is being said here can be applied to most relationships. In order to make a relationship work, it takes a good amount of communication but you will need to find time and to put in the effort to communicate in the first place. Well, the key thing in the article that caught my attention was not how to be friends forever but rather in a statement that Janice wrote ‘You can’t take today with you.‘ What immediately came across my mind when I was reading it is that the statement makes a lot of positive sense. It reminds me of a quotation by Helen Keller. How often have you come across someone who told you that he/she had just broken up? Most initial response will be, “You will find someone new.” And isn’t that true for most people … eventually? The ones that never found someone new, most probably they never allowed themselves to start looking again after they went through the unhappy relationship. For one who believes that thing always happened for a reason, one will not have any challenges of moving on. You may ask me, “For what reason it happened?” What I can say is that we may not always see the reason immediately when it happened but when we do, it will be like “Ahem, so that’s why!” And sometimes we may not even realise the reason. We can’t take today with us … so we have to continue to move forward with the faith that tomorrow will be better and a lot of times, it really depends on ourselves to make tomorrow better. I wish goodness in everyday of your life and that you will have the courage to move forward fearlessly. Well said. Why do we only think about the good ol’ memories when a relationship end? Shouldn’t we be dwelling on the parts that hurt the most in it which brings us to the ending? Human minds are strange in selecting only the good things to remember at the wrong time. Well BK, I must say that you must be really inspired these days to come up with such a moving journal. Anyway,I definitely agree that one should let go of the past, especially the ones that brought them excruciating experience (pain). However, the lesson it brought that person should be kept forever, thus giving him/her the opportunity to use them as he /she goes forward in his/her life. No matter how painful a situation can be, at the end of the road, a ray of light is waiting to be caught. So, always look at the brighter side of every situation! It’s the art of letting go. Very nice article. I will dig it. The answer was ‘No’ naturally. Exactly! In order for you to put another glass of water at the same corner, the first glass of water must be removed. The same applies. I agree with you totally that lessons must be learnt from the past experience. Very true. It is not always easy to live in the now. One is constantly thinking of yesterday or tomorrow. When only TODAY counts. Like your blog. your attitude and choices of the past. the choices you make today! That will make a difference tomorrow! Hi Nihal, you are definitely right, we have “only one life chance” to live. I love this post. It’s just so true. I especially liked your question about how you would play the game if you knew that by putting in 100% you would win. Your attitude can truly make you or break you. Jane @ Kidzaramas last blog post..Do You Get Dressed Up To Go Out? Excellent post BK…you only get one today…so making the most of it & watching out for those “open doors” makes all the difference 🙂 Hanging onto things that can’t be saved doesn’t do much good for anyone. @ Shawie, the art of detachment is truly a challenge to learn and yet it gives us freedom. @ Chelle, you are right; hanging onto things that can’t be saved doesn’t do much good for anyone and yet it is such a challenge for a lot of people to learn to let go and move on; Just as water left a trail wherever it passed. Tinas last blog post..HOW DO YOU THINK MAN WAS MADE? very well written and totally agree with you. One chance, truly. Great writing. Stefans last blog post..Is the lifestyle in America too convenient? I had to swallow real big on this one. What a beautiful post. wow…for me, I just need to read this over and over and over again. I guess that is what time is for. Thank you for such a truly heartfelt post. @ Cricket, when I first read the article, I felt a big knock on my head and the bell seemed to be ringing. Some may call it enlightenment. However, it makes so much sense to me that the thought starts to flow in my mind and eventually this article. Thank you for enjoying this post and hope that you might help you in ways it did for me. wow BK! you’re so inspired huh! it’s so true about relationships. i’ve tried communicating again with old friends with the hope of reviving old friendships, but it seems some are leading and living different lives. but there are few who are still the same old friends i’ve known even without constant communication. @ Cricket, glad that it did. Have a great weekend! That was a great post – really enjoyed the reminder! dang! i am so stupid! im sorry bk, i thought you were a lady. 🙁 i hope you’ll forgive me. @ Fashion Forward, it is simple truth and yet very challenging in action. Very often people are unwilling to let it go. Letting go is something we all need to do. It will take practice. I’ve moved a lot in my adult life. And I’ve asked myself those same kinds of questions. Is there really a point to keeping up with someone that you can’t see or spend time with. I’ve definitely come to the conclusion that when one door seems like it’s closing, trying to hold it open is the wrong way to go. @ Ben, much as we must not be ever ready to give up without a fight, we must know when to let go when holding on is doing more harm to us. The challenge is always knowing when to fight and when to let go. .-= rainboy´s last blog ..Happy Birthday 😀 =-. I guess it is alright to be caught up in the moment and wail about being apart and all, for it is the only way to really show that our friends really matter and have been a big part of our lives. However people tend to cling on to those memories for too long at times that they do more harm than good. I love your post 🙂 Awesome blog. .-= yileen´s last blog ..euphoria =-.
section \<open>Auxiliary lemmas\<close> theory AuxLemmas imports Main begin abbreviation "arbitrary == undefined" text \<open>Lemmas about left- and rightmost elements in lists\<close> lemma leftmost_element_property: assumes "\<exists>x \<in> set xs. P x" obtains zs x' ys where "xs = zs@x'#ys" and "P x'" and "\<forall>z \<in> set zs. \<not> P z" proof(atomize_elim) from \<open>\<exists>x \<in> set xs. P x\<close> show "\<exists>zs x' ys. xs = zs @ x' # ys \<and> P x' \<and> (\<forall>z\<in>set zs. \<not> P z)" proof(induct xs) case Nil thus ?case by simp next case (Cons x' xs') note IH = \<open>\<exists>a\<in>set xs'. P a \<Longrightarrow> \<exists>zs x' ys. xs' = zs@x'#ys \<and> P x' \<and> (\<forall>z\<in>set zs. \<not> P z)\<close> show ?case proof (cases "P x'") case True then have "(\<exists>ys. x' # xs' = [] @ x' # ys) \<and> P x' \<and> (\<forall>x\<in>set []. \<not> P x)" by simp then show ?thesis by blast next case False with \<open>\<exists>y\<in>set (x'#xs'). P y\<close> have "\<exists>y\<in>set xs'. P y" by simp from IH[OF this] obtain y ys zs where "xs' = zs@y#ys" and "P y" and "\<forall>z\<in>set zs. \<not> P z" by blast from \<open>\<forall>z\<in>set zs. \<not> P z\<close> False have "\<forall>z\<in>set (x'#zs). \<not> P z" by simp with \<open>xs' = zs@y#ys\<close> \<open>P y\<close> show ?thesis by (metis Cons_eq_append_conv) qed qed qed lemma rightmost_element_property: assumes "\<exists>x \<in> set xs. P x" obtains ys x' zs where "xs = ys@x'#zs" and "P x'" and "\<forall>z \<in> set zs. \<not> P z" proof(atomize_elim) from \<open>\<exists>x \<in> set xs. P x\<close> show "\<exists>ys x' zs. xs = ys @ x' # zs \<and> P x' \<and> (\<forall>z\<in>set zs. \<not> P z)" proof(induct xs) case Nil thus ?case by simp next case (Cons x' xs') note IH = \<open>\<exists>a\<in>set xs'. P a \<Longrightarrow> \<exists>ys x' zs. xs' = ys @ x' # zs \<and> P x' \<and> (\<forall>z\<in>set zs. \<not> P z)\<close> show ?case proof(cases "\<exists>y\<in>set xs'. P y") case True from IH[OF this] obtain y ys zs where "xs' = ys @ y # zs" and "P y" and "\<forall>z\<in>set zs. \<not> P z" by blast thus ?thesis by (metis Cons_eq_append_conv) next case False with \<open>\<exists>y\<in>set (x'#xs'). P y\<close> have "P x'" by simp with False show ?thesis by (metis eq_Nil_appendI) qed qed qed text \<open>Lemma concerning maps and \<open>@\<close>\<close> lemma map_append_append_maps: assumes map:"map f xs = ys@zs" obtains xs' xs'' where "map f xs' = ys" and "map f xs'' = zs" and "xs=xs'@xs''" by (metis append_eq_conv_conj append_take_drop_id assms drop_map take_map that) text \<open>Lemma concerning splitting of @{term list}s\<close> lemma path_split_general: assumes all:"\<forall>zs. xs \<noteq> ys@zs" obtains j zs where "xs = (take j ys)@zs" and "j < length ys" and "\<forall>k > j. \<forall>zs'. xs \<noteq> (take k ys)@zs'" proof(atomize_elim) from \<open>\<forall>zs. xs \<noteq> ys@zs\<close> show "\<exists>j zs. xs = take j ys @ zs \<and> j < length ys \<and> (\<forall>k>j. \<forall>zs'. xs \<noteq> take k ys @ zs')" proof(induct ys arbitrary:xs) case Nil thus ?case by auto next case (Cons y' ys') note IH = \<open>\<And>xs. \<forall>zs. xs \<noteq> ys' @ zs \<Longrightarrow> \<exists>j zs. xs = take j ys' @ zs \<and> j < length ys' \<and> (\<forall>k. j < k \<longrightarrow> (\<forall>zs'. xs \<noteq> take k ys' @ zs'))\<close> show ?case proof(cases xs) case Nil thus ?thesis by simp next case (Cons x' xs') with \<open>\<forall>zs. xs \<noteq> (y' # ys') @ zs\<close> have "x' \<noteq> y' \<or> (\<forall>zs. xs' \<noteq> ys' @ zs)" by simp show ?thesis proof(cases "x' = y'") case True with \<open>x' \<noteq> y' \<or> (\<forall>zs. xs' \<noteq> ys' @ zs)\<close> have "\<forall>zs. xs' \<noteq> ys' @ zs" by simp from IH[OF this] have "\<exists>j zs. xs' = take j ys' @ zs \<and> j < length ys' \<and> (\<forall>k. j < k \<longrightarrow> (\<forall>zs'. xs' \<noteq> take k ys' @ zs'))" . then obtain j zs where "xs' = take j ys' @ zs" and "j < length ys'" and all_sub:"\<forall>k. j < k \<longrightarrow> (\<forall>zs'. xs' \<noteq> take k ys' @ zs')" by blast from \<open>xs' = take j ys' @ zs\<close> True have "(x'#xs') = take (Suc j) (y' # ys') @ zs" by simp from all_sub True have all_imp:"\<forall>k. j < k \<longrightarrow> (\<forall>zs'. (x'#xs') \<noteq> take (Suc k) (y' # ys') @ zs')" by auto { fix l assume "(Suc j) < l" then obtain k where [simp]:"l = Suc k" by(cases l) auto with \<open>(Suc j) < l\<close> have "j < k" by simp with all_imp have "\<forall>zs'. (x'#xs') \<noteq> take (Suc k) (y' # ys') @ zs'" by simp hence "\<forall>zs'. (x'#xs') \<noteq> take l (y' # ys') @ zs'" by simp } with \<open>(x'#xs') = take (Suc j) (y' # ys') @ zs\<close> \<open>j < length ys'\<close> Cons show ?thesis by (metis Suc_length_conv less_Suc_eq_0_disj) next case False with Cons have "\<forall>i zs'. i > 0 \<longrightarrow> xs \<noteq> take i (y' # ys') @ zs'" by auto(case_tac i,auto) moreover have "\<exists>zs. xs = take 0 (y' # ys') @ zs" by simp ultimately show ?thesis by(rule_tac x="0" in exI,auto) qed qed qed qed end
[STATEMENT] lemma atoms_insert_DynCh [simp]: "atoms (insert (DynCh c K (Msg M)) H) = set M \<union> atoms H" [PROOF STATE] proof (prove) goal (1 subgoal): 1. atoms (insert (DynCh c K (Msg M)) H) = set M \<union> atoms H [PROOF STEP] by (auto elim!: atoms.cases)
""" [Tan2018](@cite) """ LifeCycleTan2018_θ_liq_ice(::Type{FT}) where {FT} = z -> if z <= 520.0 FT(298.7) elseif z > 520.0 && z <= 1480.0 FT(298.7) + (z - 520) * (FT(302.4) - FT(298.7)) / (1480 - 520) elseif z > 1480.0 && z <= 2000 FT(302.4) + (z - 1480) * (FT(308.2) - FT(302.4)) / (2000 - 1480) elseif z > 2000.0 FT(308.2) + (z - 2000) * (FT(311.85) - FT(308.2)) / (3000 - 2000) else FT(0) end """ [Tan2018](@cite) """ LifeCycleTan2018_q_tot(::Type{FT}) where {FT} = z -> if z <= 520 (FT(17) + z * (FT(16.3) - FT(17.0)) / 520) / 1000 elseif z > 520.0 && z <= 1480.0 (FT(16.3) + (z - 520) * (FT(10.7) - FT(16.3)) / (1480 - 520)) / 1000 elseif z > 1480.0 && z <= 2000.0 (FT(10.7) + (z - 1480) * (FT(4.2) - FT(10.7)) / (2000 - 1480)) / 1000 elseif z > 2000.0 (FT(4.2) + (z - 2000) * (3 - FT(4.2)) / (3000 - 2000)) / 1000 else FT(0) end """ [Tan2018](@cite) """ LifeCycleTan2018_u(::Type{FT}) where {FT} = z -> if z <= 700.0 FT(-8.75) else FT(-8.75) + (z - 700) * (FT(-4.61) - FT(-8.75)) / (3000 - 700) end """ [Tan2018](@cite) """ LifeCycleTan2018_tke(::Type{FT}) where {FT} = z -> if z <= 2500.0 FT(1) - z / 3000 else FT(0) end # Large-scale cooling """ [Tan2018](@cite) """ LifeCycleTan2018_dTdt(::Type{FT}) where {FT} = (Π, z) -> if z <= 1500.0 FT(-2 / (3600 * 24)) * Π else FT(-2 / (3600 * 24) + (z - 1500) * (0 - -2 / (3600 * 24)) / (3000 - 1500)) * Π end # geostrophic velocity profiles """ [Tan2018](@cite) """ LifeCycleTan2018_geostrophic_u(::Type{FT}) where {FT} = z -> -10 + FT(1.8e-3) * z """ [Tan2018](@cite) """ LifeCycleTan2018_geostrophic_v(::Type{FT}) where {FT} = z -> FT(0) # Large-scale drying """ [Tan2018](@cite) """ LifeCycleTan2018_dqtdt(::Type{FT}) where {FT} = z -> if z <= 300.0 FT(-1.2e-8) #kg/(kg * s) elseif z > 300.0 && z <= 500.0 FT(-1.2e-8) + (z - 300) * (0 - FT(-1.2e-8)) / (500 - 300) #kg/(kg * s) else FT(0) end #Large scale subsidence """ [Tan2018](@cite) """ LifeCycleTan2018_subsidence(::Type{FT}) where {FT} = z -> if z <= 1500.0 FT(0) + z * (FT(-0.65) / 100 - 0) / (1500 - 0) elseif z > 1500.0 && z <= 2100.0 FT(-0.65) / 100 + (z - 1500) * (0 - FT(-0.65) / 100) / (2100 - 1500) else FT(0) end
lemma little_Picard_01: assumes holf: "f holomorphic_on UNIV" and f01: "\<And>z. f z \<noteq> 0 \<and> f z \<noteq> 1" obtains c where "f = (\<lambda>x. c)"
#pragma once #include <vector> #include <gsl-lite/gsl-lite.hpp> #include <Eigen/Sparse> #include <thrustshift/CSR.h> #include <thrustshift/container-conversion.h> #include <thrustshift/managed-vector.h> #include <thrustshift/memory-resource.h> namespace thrustshift { namespace eigen { //! Return a CSR matrix with the default memory resource of the CSR class template <class EigenSparseMatrix> auto sparse_mtx2csr(EigenSparseMatrix&& m_) { using DataType = typename std::remove_reference<EigenSparseMatrix>::type::value_type; using StorageIndex = typename std::remove_reference<EigenSparseMatrix>::type::StorageIndex; // Create copy because we might modify the container with `makeCompressed` Eigen::SparseMatrix<DataType, Eigen::RowMajor, StorageIndex> m = m_; m.makeCompressed(); auto m_data = m.data(); auto nnz = m.nonZeros(); auto rows = m.rows(); const DataType* A = &m_data.value(0); const StorageIndex* IA = m.outerIndexPtr(); const StorageIndex* JA = &m_data.index(0); managed_vector<DataType> seq_A(A, A + nnz); managed_vector<StorageIndex> seq_JA(JA, JA + nnz); managed_vector<StorageIndex> seq_IA(IA, IA + rows + 1); return CSR<DataType, StorageIndex>( seq_A, seq_JA, seq_IA, gsl_lite::narrow<size_t>(m.cols())); } // Forward declaration to avoid co-dependent headers template <class EigenSparseMatrix, class COO_C> EigenSparseMatrix coo2sparse_mtx(COO_C&& coo); template <class EigenSparseMatrix, class CSR_C> EigenSparseMatrix csr2sparse_mtx(CSR_C&& csr) { using DataType = typename std::remove_reference<EigenSparseMatrix>::type::value_type; using StorageIndex = typename std::remove_reference<EigenSparseMatrix>::type::StorageIndex; return coo2sparse_mtx<EigenSparseMatrix>( csr2coo<thrustshift::COO<DataType, StorageIndex>>( std::forward<CSR_C>(csr), pmr::default_resource)); } } // namespace eigen } // namespace thrustshift
(* Title: HOL/Isar_Examples/Group.thy Author: Makarius *) section \<open>Basic group theory\<close> theory Group imports MainRLT begin subsection \<open>Groups and calculational reasoning\<close> text \<open> Groups over signature \<open>(* :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>, 1 :: \<alpha>, inverse :: \<alpha> \<Rightarrow> \<alpha>)\<close> are defined as an axiomatic type class as follows. Note that the parent classes \<^class>\<open>times\<close>, \<^class>\<open>one\<close>, \<^class>\<open>inverse\<close> is provided by the basic HOL theory. \<close> class group = times + one + inverse + assumes group_assoc: "(x * y) * z = x * (y * z)" and group_left_one: "1 * x = x" and group_left_inverse: "inverse x * x = 1" text \<open> The group axioms only state the properties of left one and inverse, the right versions may be derived as follows. \<close> theorem (in group) group_right_inverse: "x * inverse x = 1" proof - have "x * inverse x = 1 * (x * inverse x)" by (simp only: group_left_one) also have "\<dots> = 1 * x * inverse x" by (simp only: group_assoc) also have "\<dots> = inverse (inverse x) * inverse x * x * inverse x" by (simp only: group_left_inverse) also have "\<dots> = inverse (inverse x) * (inverse x * x) * inverse x" by (simp only: group_assoc) also have "\<dots> = inverse (inverse x) * 1 * inverse x" by (simp only: group_left_inverse) also have "\<dots> = inverse (inverse x) * (1 * inverse x)" by (simp only: group_assoc) also have "\<dots> = inverse (inverse x) * inverse x" by (simp only: group_left_one) also have "\<dots> = 1" by (simp only: group_left_inverse) finally show ?thesis . qed text \<open> With \<open>group_right_inverse\<close> already available, \<open>group_right_one\<close> is now established much easier. \<close> theorem (in group) group_right_one: "x * 1 = x" proof - have "x * 1 = x * (inverse x * x)" by (simp only: group_left_inverse) also have "\<dots> = x * inverse x * x" by (simp only: group_assoc) also have "\<dots> = 1 * x" by (simp only: group_right_inverse) also have "\<dots> = x" by (simp only: group_left_one) finally show ?thesis . qed text \<open> \<^medskip> The calculational proof style above follows typical presentations given in any introductory course on algebra. The basic technique is to form a transitive chain of equations, which in turn are established by simplifying with appropriate rules. The low-level logical details of equational reasoning are left implicit. Note that ``\<open>\<dots>\<close>'' is just a special term variable that is bound automatically to the argument\<^footnote>\<open>The argument of a curried infix expression happens to be its right-hand side.\<close> of the last fact achieved by any local assumption or proven statement. In contrast to \<open>?thesis\<close>, the ``\<open>\<dots>\<close>'' variable is bound \<^emph>\<open>after\<close> the proof is finished. There are only two separate Isar language elements for calculational proofs: ``\<^theory_text>\<open>also\<close>'' for initial or intermediate calculational steps, and ``\<^theory_text>\<open>finally\<close>'' for exhibiting the result of a calculation. These constructs are not hardwired into Isabelle/Isar, but defined on top of the basic Isar/VM interpreter. Expanding the \<^theory_text>\<open>also\<close> and \<^theory_text>\<open>finally\<close> derived language elements, calculations may be simulated by hand as demonstrated below. \<close> theorem (in group) "x * 1 = x" proof - have "x * 1 = x * (inverse x * x)" by (simp only: group_left_inverse) note calculation = this \<comment> \<open>first calculational step: init calculation register\<close> have "\<dots> = x * inverse x * x" by (simp only: group_assoc) note calculation = trans [OF calculation this] \<comment> \<open>general calculational step: compose with transitivity rule\<close> have "\<dots> = 1 * x" by (simp only: group_right_inverse) note calculation = trans [OF calculation this] \<comment> \<open>general calculational step: compose with transitivity rule\<close> have "\<dots> = x" by (simp only: group_left_one) note calculation = trans [OF calculation this] \<comment> \<open>final calculational step: compose with transitivity rule \dots\<close> from calculation \<comment> \<open>\dots\ and pick up the final result\<close> show ?thesis . qed text \<open> Note that this scheme of calculations is not restricted to plain transitivity. Rules like anti-symmetry, or even forward and backward substitution work as well. For the actual implementation of \<^theory_text>\<open>also\<close> and \<^theory_text>\<open>finally\<close>, Isabelle/Isar maintains separate context information of ``transitivity'' rules. Rule selection takes place automatically by higher-order unification. \<close> subsection \<open>Groups as monoids\<close> text \<open> Monoids over signature \<open>(* :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>, 1 :: \<alpha>)\<close> are defined like this. \<close> class monoid = times + one + assumes monoid_assoc: "(x * y) * z = x * (y * z)" and monoid_left_one: "1 * x = x" and monoid_right_one: "x * 1 = x" text \<open> Groups are \<^emph>\<open>not\<close> yet monoids directly from the definition. For monoids, \<open>right_one\<close> had to be included as an axiom, but for groups both \<open>right_one\<close> and \<open>right_inverse\<close> are derivable from the other axioms. With \<open>group_right_one\<close> derived as a theorem of group theory (see @{thm group_right_one}), we may still instantiate \<open>group \<subseteq> monoid\<close> properly as follows. \<close> instance group \<subseteq> monoid by intro_classes (rule group_assoc, rule group_left_one, rule group_right_one) text \<open> The \<^theory_text>\<open>instance\<close> command actually is a version of \<^theory_text>\<open>theorem\<close>, setting up a goal that reflects the intended class relation (or type constructor arity). Thus any Isar proof language element may be involved to establish this statement. When concluding the proof, the result is transformed into the intended type signature extension behind the scenes. \<close> subsection \<open>More theorems of group theory\<close> text \<open> The one element is already uniquely determined by preserving an \<^emph>\<open>arbitrary\<close> group element. \<close> theorem (in group) group_one_equality: assumes eq: "e * x = x" shows "1 = e" proof - have "1 = x * inverse x" by (simp only: group_right_inverse) also have "\<dots> = (e * x) * inverse x" by (simp only: eq) also have "\<dots> = e * (x * inverse x)" by (simp only: group_assoc) also have "\<dots> = e * 1" by (simp only: group_right_inverse) also have "\<dots> = e" by (simp only: group_right_one) finally show ?thesis . qed text \<open> Likewise, the inverse is already determined by the cancel property. \<close> theorem (in group) group_inverse_equality: assumes eq: "x' * x = 1" shows "inverse x = x'" proof - have "inverse x = 1 * inverse x" by (simp only: group_left_one) also have "\<dots> = (x' * x) * inverse x" by (simp only: eq) also have "\<dots> = x' * (x * inverse x)" by (simp only: group_assoc) also have "\<dots> = x' * 1" by (simp only: group_right_inverse) also have "\<dots> = x'" by (simp only: group_right_one) finally show ?thesis . qed text \<open> The inverse operation has some further characteristic properties. \<close> theorem (in group) group_inverse_times: "inverse (x * y) = inverse y * inverse x" proof (rule group_inverse_equality) show "(inverse y * inverse x) * (x * y) = 1" proof - have "(inverse y * inverse x) * (x * y) = (inverse y * (inverse x * x)) * y" by (simp only: group_assoc) also have "\<dots> = (inverse y * 1) * y" by (simp only: group_left_inverse) also have "\<dots> = inverse y * y" by (simp only: group_right_one) also have "\<dots> = 1" by (simp only: group_left_inverse) finally show ?thesis . qed qed theorem (in group) inverse_inverse: "inverse (inverse x) = x" proof (rule group_inverse_equality) show "x * inverse x = one" by (simp only: group_right_inverse) qed theorem (in group) inverse_inject: assumes eq: "inverse x = inverse y" shows "x = y" proof - have "x = x * 1" by (simp only: group_right_one) also have "\<dots> = x * (inverse y * y)" by (simp only: group_left_inverse) also have "\<dots> = x * (inverse x * y)" by (simp only: eq) also have "\<dots> = (x * inverse x) * y" by (simp only: group_assoc) also have "\<dots> = 1 * y" by (simp only: group_right_inverse) also have "\<dots> = y" by (simp only: group_left_one) finally show ?thesis . qed end
(* Title: Ground Ordered Resolution Calculus with Selection Author: Anders Schlichtkrull <andschl at dtu.dk>, 2016, 2017 Author: Jasmin Blanchette <j.c.blanchette at vu.nl>, 2014, 2017 Author: Dmitriy Traytel <traytel at inf.ethz.ch>, 2014 Maintainer: Anders Schlichtkrull <andschl at dtu.dk> *) section \<open>Ground Ordered Resolution Calculus with Selection\<close> theory Ordered_Ground_Resolution imports Inference_System Ground_Resolution_Model begin text \<open> Ordered ground resolution with selection is the second inference system studied in Section~3 (``Standard Resolution'') of Bachmair and Ganzinger's chapter. \<close> subsection \<open>Inference Rule\<close> text \<open> Ordered ground resolution consists of a single rule, called \<open>ord_resolve\<close> below. Like \<open>unord_resolve\<close>, the rule is sound and counterexample-reducing. In addition, it is reductive. \<close> context ground_resolution_with_selection begin text \<open> The following inductive definition corresponds to Figure 2. \<close> definition maximal_wrt :: "'a \<Rightarrow> 'a literal multiset \<Rightarrow> bool" where "maximal_wrt A DA \<equiv> A = Max (atms_of DA)" (* FIXME: change definition so that it returns true if DA is empty *) definition strictly_maximal_wrt :: "'a \<Rightarrow> 'a literal multiset \<Rightarrow> bool" where "strictly_maximal_wrt A CA \<longleftrightarrow> (\<forall>B \<in> atms_of CA. B < A)" inductive eligible :: "'a list \<Rightarrow> 'a clause \<Rightarrow> bool" where eligible: "(S DA = negs (mset As)) \<or> (S DA = {#} \<and> length As = 1 \<and> maximal_wrt (As ! 0) DA) \<Longrightarrow> eligible As DA" lemma "(S DA = negs (mset As) \<or> S DA = {#} \<and> length As = 1 \<and> maximal_wrt (As ! 0) DA) \<longleftrightarrow> eligible As DA" using eligible.intros ground_resolution_with_selection.eligible.cases ground_resolution_with_selection_axioms by blast inductive ord_resolve :: "'a clause list \<Rightarrow> 'a clause \<Rightarrow> 'a multiset list \<Rightarrow> 'a list \<Rightarrow> 'a clause \<Rightarrow> bool" where ord_resolve: "length CAs = n \<Longrightarrow> length Cs = n \<Longrightarrow> length AAs = n \<Longrightarrow> length As = n \<Longrightarrow> n \<noteq> 0 \<Longrightarrow> (\<forall>i < n. CAs ! i = Cs ! i + poss (AAs ! i)) \<Longrightarrow> (\<forall>i < n. AAs ! i \<noteq> {#}) \<Longrightarrow> (\<forall>i < n. \<forall>A \<in># AAs ! i. A = As ! i) \<Longrightarrow> eligible As (D + negs (mset As)) \<Longrightarrow> (\<forall>i < n. strictly_maximal_wrt (As ! i) (Cs ! i)) \<Longrightarrow> (\<forall>i < n. S (CAs ! i) = {#}) \<Longrightarrow> ord_resolve CAs (D + negs (mset As)) AAs As (\<Union># (mset Cs) + D)" lemma ord_resolve_sound: assumes res_e: "ord_resolve CAs DA AAs As E" and cc_true: "I \<Turnstile>m mset CAs" and d_true: "I \<Turnstile> DA" shows "I \<Turnstile> E" using res_e proof (cases rule: ord_resolve.cases) case (ord_resolve n Cs D) note DA = this(1) and e = this(2) and cas_len = this(3) and cs_len = this(4) and as_len = this(6) and cas = this(8) and aas_ne = this(9) and a_eq = this(10) show ?thesis proof (cases "\<forall>A \<in> set As. A \<in> I") case True then have "\<not> I \<Turnstile> negs (mset As)" unfolding true_cls_def by fastforce then have "I \<Turnstile> D" using d_true DA by fast then show ?thesis unfolding e by blast next case False then obtain i where a_in_aa: "i < n" and a_false: "As ! i \<notin> I" using cas_len as_len by (metis in_set_conv_nth) have "\<not> I \<Turnstile> poss (AAs ! i)" using a_false a_eq aas_ne a_in_aa unfolding true_cls_def by auto moreover have "I \<Turnstile> CAs ! i" using a_in_aa cc_true unfolding true_cls_mset_def using cas_len by auto ultimately have "I \<Turnstile> Cs ! i" using cas a_in_aa by auto then show ?thesis using a_in_aa cs_len unfolding e true_cls_def by (meson in_Union_mset_iff nth_mem_mset union_iff) qed qed lemma filter_neg_atm_of_S: "{#Neg (atm_of L). L \<in># S C#} = S C" by (simp add: S_selects_neg_lits) text \<open> This corresponds to Lemma 3.13: \<close> lemma ord_resolve_reductive: assumes "ord_resolve CAs DA AAs As E" shows "E < DA" using assms proof (cases rule: ord_resolve.cases) case (ord_resolve n Cs D) note DA = this(1) and e = this(2) and cas_len = this(3) and cs_len = this(4) and ai_len = this(6) and nz = this(7) and cas = this(8) and maxim = this(12) show ?thesis proof (cases "\<Union># (mset Cs) = {#}") case True have "negs (mset As) \<noteq> {#}" using nz ai_len by auto then show ?thesis unfolding True e DA by auto next case False define max_A_of_Cs where "max_A_of_Cs = Max (atms_of (\<Union># (mset Cs)))" have mc_in: "max_A_of_Cs \<in> atms_of (\<Union># (mset Cs))" and mc_max: "\<And>B. B \<in> atms_of (\<Union># (mset Cs)) \<Longrightarrow> B \<le> max_A_of_Cs" using max_A_of_Cs_def False by auto then have "\<exists>C_max \<in> set Cs. max_A_of_Cs \<in> atms_of (C_max)" by (metis atm_imp_pos_or_neg_lit in_Union_mset_iff neg_lit_in_atms_of pos_lit_in_atms_of set_mset_mset) then obtain max_i where cm_in_cas: "max_i < length CAs" and mc_in_cm: "max_A_of_Cs \<in> atms_of (Cs ! max_i)" using in_set_conv_nth[of _ CAs] by (metis cas_len cs_len in_set_conv_nth) define CA_max where "CA_max = CAs ! max_i" define A_max where "A_max = As ! max_i" define C_max where "C_max = Cs ! max_i" have mc_lt_ma: "max_A_of_Cs < A_max" using maxim cm_in_cas mc_in_cm cas_len unfolding strictly_maximal_wrt_def A_max_def by auto then have ucas_ne_neg_aa: "\<Union># (mset Cs) \<noteq> negs (mset As)" using mc_in mc_max mc_lt_ma cm_in_cas cas_len ai_len unfolding A_max_def by (metis atms_of_negs nth_mem set_mset_mset leD) moreover have ucas_lt_ma: "\<forall>B \<in> atms_of (\<Union># (mset Cs)). B < A_max" using mc_max mc_lt_ma by fastforce moreover have "\<not> Neg A_max \<in># \<Union># (mset Cs)" using ucas_lt_ma neg_lit_in_atms_of[of A_max "\<Union># (mset Cs)"] by auto moreover have "Neg A_max \<in># negs (mset As)" using cm_in_cas cas_len ai_len A_max_def by auto ultimately have "\<Union># (mset Cs) < negs (mset As)" unfolding less_multiset\<^sub>H\<^sub>O by (metis (no_types) atms_less_eq_imp_lit_less_eq_neg count_greater_zero_iff count_inI le_imp_less_or_eq less_imp_not_less not_le) then show ?thesis unfolding e DA by auto qed qed text \<open> This corresponds to Theorem 3.15: \<close> theorem ord_resolve_counterex_reducing: assumes ec_ni_n: "{#} \<notin> N" and d_in_n: "DA \<in> N" and d_cex: "\<not> INTERP N \<Turnstile> DA" and d_min: "\<And>C. C \<in> N \<Longrightarrow> \<not> INTERP N \<Turnstile> C \<Longrightarrow> DA \<le> C" obtains CAs AAs As E where "set CAs \<subseteq> N" "INTERP N \<Turnstile>m mset CAs" "\<And>CA. CA \<in> set CAs \<Longrightarrow> productive N CA" "ord_resolve CAs DA AAs As E" "\<not> INTERP N \<Turnstile> E" "E < DA" proof - have d_ne: "DA \<noteq> {#}" using d_in_n ec_ni_n by blast have "\<exists>As. As \<noteq> [] \<and> negs (mset As) \<le># DA \<and> eligible As DA" proof (cases "S DA = {#}") assume s_d_e: "S DA = {#}" define A where "A = Max (atms_of DA)" define As where "As = [A]" define D where "D = DA-{#Neg A #}" have na_in_d: "Neg A \<in># DA" unfolding A_def using s_d_e d_ne d_in_n d_cex d_min by (metis Max_in_lits Max_lit_eq_pos_or_neg_Max_atm max_pos_imp_Interp Interp_imp_INTERP) then have das: "DA = D + negs (mset As)" unfolding D_def As_def by auto moreover from na_in_d have "negs (mset As) \<subseteq># DA" by (simp add: As_def) moreover have "As ! 0 = Max (atms_of (D + negs (mset As)))" using A_def As_def das by auto then have "eligible As DA" using eligible s_d_e As_def das maximal_wrt_def by auto ultimately show ?thesis using As_def by blast next assume s_d_e: "S DA \<noteq> {#}" define As :: "'a list" where "As = list_of_mset {#atm_of L. L \<in># S DA#}" define D :: "'a clause" where "D = DA - negs {#atm_of L. L \<in># S DA#}" have "As \<noteq> []" unfolding As_def using s_d_e by (metis image_mset_is_empty_iff list_of_mset_empty) moreover have da_sub_as: "negs {#atm_of L. L \<in># S DA#} \<subseteq># DA" using S_selects_subseteq by (auto simp: filter_neg_atm_of_S) then have "negs (mset As) \<subseteq># DA" unfolding As_def by auto moreover have das: "DA = D + negs (mset As)" using da_sub_as unfolding D_def As_def by auto moreover have "S DA = negs {#atm_of L. L \<in># S DA#}" by (auto simp: filter_neg_atm_of_S) then have "S DA = negs (mset As)" unfolding As_def by auto then have "eligible As DA" unfolding das using eligible by auto ultimately show ?thesis by blast qed then obtain As :: "'a list" where as_ne: "As \<noteq> []" and negs_as_le_d: "negs (mset As) \<le># DA" and s_d: "eligible As DA" by blast define D :: "'a clause" where "D = DA - negs (mset As)" have "set As \<subseteq> INTERP N" using d_cex negs_as_le_d by force then have prod_ex: "\<forall>A \<in> set As. \<exists>D. produces N D A" unfolding INTERP_def by (metis (no_types, lifting) INTERP_def subsetCE UN_E not_produces_imp_notin_production) then have "\<And>A. \<exists>D. produces N D A \<longrightarrow> A \<in> set As" using ec_ni_n by (auto intro: productive_in_N) then have "\<And>A. \<exists>D. produces N D A \<longleftrightarrow> A \<in> set As" using prod_ex by blast then obtain CA_of where c_of0: "\<And>A. produces N (CA_of A) A \<longleftrightarrow> A \<in> set As" by metis then have prod_c0: "\<forall>A \<in> set As. produces N (CA_of A) A" by blast define C_of where "\<And>A. C_of A = {#L \<in># CA_of A. L \<noteq> Pos A#}" define Aj_of where "\<And>A. Aj_of A = image_mset atm_of {#L \<in># CA_of A. L = Pos A#}" have pospos: "\<And>LL A. {#Pos (atm_of x). x \<in># {#L \<in># LL. L = Pos A#}#} = {#L \<in># LL. L = Pos A#}" by (metis (mono_tags, lifting) image_filter_cong literal.sel(1) multiset.map_ident) have ca_of_c_of_aj_of: "\<And>A. CA_of A = C_of A + poss (Aj_of A)" using pospos[of _ "CA_of _"] by (simp add: C_of_def Aj_of_def) define n :: nat where "n = length As" define Cs :: "'a clause list" where "Cs = map C_of As" define AAs :: "'a multiset list" where "AAs = map Aj_of As" define CAs :: "'a literal multiset list" where "CAs = map CA_of As" have m_nz: "\<And>A. A \<in> set As \<Longrightarrow> Aj_of A \<noteq> {#}" unfolding Aj_of_def using prod_c0 produces_imp_Pos_in_lits by (metis (full_types) filter_mset_empty_conv image_mset_is_empty_iff) have prod_c: "productive N CA" if ca_in: "CA \<in> set CAs" for CA proof - obtain i where i_p: "i < length CAs" "CAs ! i = CA" using ca_in by (meson in_set_conv_nth) have "production N (CA_of (As ! i)) = {As ! i}" using i_p CAs_def prod_c0 by auto then show "productive N CA" using i_p CAs_def by auto qed then have cs_subs_n: "set CAs \<subseteq> N" using productive_in_N by auto have cs_true: "INTERP N \<Turnstile>m mset CAs" unfolding true_cls_mset_def using prod_c productive_imp_INTERP by auto have "\<And>A. A \<in> set As \<Longrightarrow> \<not> Neg A \<in># CA_of A" using prod_c0 produces_imp_neg_notin_lits by auto then have a_ni_c': "\<And>A. A \<in> set As \<Longrightarrow> A \<notin> atms_of (C_of A)" unfolding C_of_def using atm_imp_pos_or_neg_lit by force have c'_le_c: "\<And>A. C_of A \<le> CA_of A" unfolding C_of_def by (auto intro: subset_eq_imp_le_multiset) have a_max_c: "\<And>A. A \<in> set As \<Longrightarrow> A = Max (atms_of (CA_of A))" using prod_c0 productive_imp_produces_Max_atom[of N] by auto then have "\<And>A. A \<in> set As \<Longrightarrow> C_of A \<noteq> {#} \<Longrightarrow> Max (atms_of (C_of A)) \<le> A" using c'_le_c by (metis less_eq_Max_atms_of) moreover have "\<And>A. A \<in> set As \<Longrightarrow> C_of A \<noteq> {#} \<Longrightarrow> Max (atms_of (C_of A)) \<noteq> A" using a_ni_c' Max_in by (metis (no_types) atms_empty_iff_empty finite_atms_of) ultimately have max_c'_lt_a: "\<And>A. A \<in> set As \<Longrightarrow> C_of A \<noteq> {#} \<Longrightarrow> Max (atms_of (C_of A)) < A" by (metis order.strict_iff_order) have le_cs_as: "length CAs = length As" unfolding CAs_def by simp have "length CAs = n" by (simp add: le_cs_as n_def) moreover have "length Cs = n" by (simp add: Cs_def n_def) moreover have "length AAs = n" by (simp add: AAs_def n_def) moreover have "length As = n" using n_def by auto moreover have "n \<noteq> 0" by (simp add: as_ne n_def) moreover have " \<forall>i. i < length AAs \<longrightarrow> (\<forall>A \<in># AAs ! i. A = As ! i)" using AAs_def Aj_of_def by auto have "\<And>x B. production N (CA_of x) = {x} \<Longrightarrow> B \<in># CA_of x \<Longrightarrow> B \<noteq> Pos x \<Longrightarrow> atm_of B < x" by (metis atm_of_lit_in_atms_of insert_not_empty le_imp_less_or_eq Pos_atm_of_iff Neg_atm_of_iff pos_neg_in_imp_true produces_imp_Pos_in_lits produces_imp_atms_leq productive_imp_not_interp) then have "\<And>B A. A\<in>set As \<Longrightarrow> B \<in># CA_of A \<Longrightarrow> B \<noteq> Pos A \<Longrightarrow> atm_of B < A" using prod_c0 by auto have "\<forall>i. i < length AAs \<longrightarrow> AAs ! i \<noteq> {#}" unfolding AAs_def using m_nz by simp have "\<forall>i < n. CAs ! i = Cs ! i + poss (AAs ! i)" unfolding CAs_def Cs_def AAs_def using ca_of_c_of_aj_of by (simp add: n_def) moreover have "\<forall>i < n. AAs ! i \<noteq> {#}" using \<open>\<forall>i < length AAs. AAs ! i \<noteq> {#}\<close> calculation(3) by blast moreover have "\<forall>i < n. \<forall>A \<in># AAs ! i. A = As ! i" by (simp add: \<open>\<forall>i < length AAs. \<forall>A \<in># AAs ! i. A = As ! i\<close> calculation(3)) moreover have "eligible As DA" using s_d by auto then have "eligible As (D + negs (mset As))" using D_def negs_as_le_d by auto moreover have "\<And>i. i < length AAs \<Longrightarrow> strictly_maximal_wrt (As ! i) ((Cs ! i))" by (simp add: C_of_def Cs_def \<open>\<And>x B. \<lbrakk>production N (CA_of x) = {x}; B \<in># CA_of x; B \<noteq> Pos x\<rbrakk> \<Longrightarrow> atm_of B < x\<close> atms_of_def calculation(3) n_def prod_c0 strictly_maximal_wrt_def) have "\<forall>i < n. strictly_maximal_wrt (As ! i) (Cs ! i)" by (simp add: \<open>\<And>i. i < length AAs \<Longrightarrow> strictly_maximal_wrt (As ! i) (Cs ! i)\<close> calculation(3)) moreover have "\<forall>CA \<in> set CAs. S CA = {#}" using prod_c producesD productive_imp_produces_Max_literal by blast have "\<forall>CA\<in>set CAs. S CA = {#}" using \<open>\<forall>CA\<in>set CAs. S CA = {#}\<close> by simp then have "\<forall>i < n. S (CAs ! i) = {#}" using \<open>length CAs = n\<close> nth_mem by blast ultimately have res_e: "ord_resolve CAs (D + negs (mset As)) AAs As (\<Union># (mset Cs) + D)" using ord_resolve by auto have "\<And>A. A \<in> set As \<Longrightarrow> \<not> interp N (CA_of A) \<Turnstile> CA_of A" by (simp add: prod_c0 producesD) then have "\<And>A. A \<in> set As \<Longrightarrow> \<not> Interp N (CA_of A) \<Turnstile> C_of A" unfolding prod_c0 C_of_def Interp_def true_cls_def using true_lit_def not_gr_zero prod_c0 by auto then have c'_at_n: "\<And>A. A \<in> set As \<Longrightarrow> \<not> INTERP N \<Turnstile> C_of A" using a_max_c c'_le_c max_c'_lt_a not_Interp_imp_not_INTERP unfolding true_cls_def by (metis true_cls_def true_cls_empty) have "\<not> INTERP N \<Turnstile> \<Union># (mset Cs)" unfolding Cs_def true_cls_def using c'_at_n by fastforce moreover have "\<not> INTERP N \<Turnstile> D" using d_cex by (metis D_def add_diff_cancel_right' negs_as_le_d subset_mset.add_diff_assoc2 true_cls_def union_iff) ultimately have e_cex: "\<not> INTERP N \<Turnstile> \<Union># (mset Cs) + D" by simp have "set CAs \<subseteq> N" by (simp add: cs_subs_n) moreover have "INTERP N \<Turnstile>m mset CAs" by (simp add: cs_true) moreover have "\<And>CA. CA \<in> set CAs \<Longrightarrow> productive N CA" by (simp add: prod_c) moreover have "ord_resolve CAs DA AAs As (\<Union># (mset Cs) + D)" using D_def negs_as_le_d res_e by auto moreover have "\<not> INTERP N \<Turnstile> \<Union># (mset Cs) + D" using e_cex by simp moreover have "\<Union># (mset Cs) + D < DA" using calculation(4) ord_resolve_reductive by auto ultimately show thesis .. qed lemma ord_resolve_atms_of_concl_subset: assumes "ord_resolve CAs DA AAs As E" shows "atms_of E \<subseteq> (\<Union>C \<in> set CAs. atms_of C) \<union> atms_of DA" using assms proof (cases rule: ord_resolve.cases) case (ord_resolve n Cs D) note DA = this(1) and e = this(2) and cas_len = this(3) and cs_len = this(4) and cas = this(8) have "\<forall>i < n. set_mset (Cs ! i) \<subseteq> set_mset (CAs ! i)" using cas by auto then have "\<forall>i < n. Cs ! i \<subseteq># \<Union># (mset CAs)" by (metis cas cas_len mset_subset_eq_add_left nth_mem_mset sum_mset.remove union_assoc) then have "\<forall>C \<in> set Cs. C \<subseteq># \<Union># (mset CAs)" using cs_len in_set_conv_nth[of _ Cs] by auto then have "set_mset (\<Union># (mset Cs)) \<subseteq> set_mset (\<Union># (mset CAs))" by auto (meson in_mset_sum_list2 mset_subset_eqD) then have "atms_of (\<Union># (mset Cs)) \<subseteq> atms_of (\<Union># (mset CAs))" by (meson lits_subseteq_imp_atms_subseteq mset_subset_eqD subsetI) moreover have "atms_of (\<Union># (mset CAs)) = (\<Union>CA \<in> set CAs. atms_of CA)" by (intro set_eqI iffI, simp_all, metis in_mset_sum_list2 atm_imp_pos_or_neg_lit neg_lit_in_atms_of pos_lit_in_atms_of, metis in_mset_sum_list atm_imp_pos_or_neg_lit neg_lit_in_atms_of pos_lit_in_atms_of) ultimately have "atms_of (\<Union># (mset Cs)) \<subseteq> (\<Union>CA \<in> set CAs. atms_of CA)" by auto moreover have "atms_of D \<subseteq> atms_of DA" using DA by auto ultimately show ?thesis unfolding e by auto qed subsection \<open>Inference System\<close> text \<open> Theorem 3.16 is subsumed in the counterexample-reducing inference system framework, which is instantiated below. Unlike its unordered cousin, ordered resolution is additionally a reductive inference system. \<close> definition ord_\<Gamma> :: "'a inference set" where "ord_\<Gamma> = {Infer (mset CAs) DA E | CAs DA AAs As E. ord_resolve CAs DA AAs As E}" sublocale ord_\<Gamma>_sound_counterex_reducing?: sound_counterex_reducing_inference_system "ground_resolution_with_selection.ord_\<Gamma> S" "ground_resolution_with_selection.INTERP S" + reductive_inference_system "ground_resolution_with_selection.ord_\<Gamma> S" proof unfold_locales fix DA :: "'a clause" and N :: "'a clause set" assume "{#} \<notin> N" and "DA \<in> N" and "\<not> INTERP N \<Turnstile> DA" and "\<And>C. C \<in> N \<Longrightarrow> \<not> INTERP N \<Turnstile> C \<Longrightarrow> DA \<le> C" then obtain CAs AAs As E where dd_sset_n: "set CAs \<subseteq> N" and dd_true: "INTERP N \<Turnstile>m mset CAs" and res_e: "ord_resolve CAs DA AAs As E" and e_cex: "\<not> INTERP N \<Turnstile> E" and e_lt_c: "E < DA" using ord_resolve_counterex_reducing[of N DA thesis] by auto have "Infer (mset CAs) DA E \<in> ord_\<Gamma>" using res_e unfolding ord_\<Gamma>_def by (metis (mono_tags, lifting) mem_Collect_eq) then show "\<exists>CC E. set_mset CC \<subseteq> N \<and> INTERP N \<Turnstile>m CC \<and> Infer CC DA E \<in> ord_\<Gamma> \<and> \<not> INTERP N \<Turnstile> E \<and> E < DA" using dd_sset_n dd_true e_cex e_lt_c by (metis set_mset_mset) qed (auto simp: ord_\<Gamma>_def intro: ord_resolve_sound ord_resolve_reductive) lemmas clausal_logic_compact = ord_\<Gamma>_sound_counterex_reducing.clausal_logic_compact end text \<open> A second proof of Theorem 3.12, compactness of clausal logic: \<close> lemmas clausal_logic_compact = ground_resolution_with_selection.clausal_logic_compact end
To describe clinical aspects of pandemic (H1N1) 2009 virus-associated pneumonia in children, we studied 80 such children, including 17 (21%) with complications, who were admitted to 5 hospitals in Japan during August-November 2009 after a mean of 2.9 symptomatic days. All enrolled patients recovered (median hospitalization 6 days). Timely access to hospitals may have contributed to favorable outcomes.
{-# OPTIONS --without-K #-} module PathStructure.Product {a b} {A : Set a} {B : Set b} where open import Equivalence open import PathOperations open import Types split-path : {x y : A × B} → x ≡ y → (π₁ x ≡ π₁ y) × (π₂ x ≡ π₂ y) split-path p = ap π₁ p , ap π₂ p merge-path : {x₁ x₂ : A} {y₁ y₂ : B} → (x₁ ≡ x₂) × (y₁ ≡ y₂) → Id (A × B) (x₁ , y₁) (x₂ , y₂) merge-path (p , q) = ap₂ _,_ p q split-merge-eq : {x y : A × B} → (x ≡ y) ≃ (π₁ x ≡ π₁ y) × (π₂ x ≡ π₂ y) split-merge-eq = split-path , (merge-path , λ pq → J (λ _ _ p → ∀ {b₁ b₂} (q : b₁ ≡ b₂) → split-path (merge-path (p , q)) ≡ p , q) (λ _ q → J (λ _ _ q → split-path (merge-path (refl , q)) ≡ refl , q) (λ _ → refl) _ _ q) _ _ (π₁ pq) (π₂ pq)) , (merge-path , J (λ _ _ p → merge-path (split-path p) ≡ p) (λ _ → refl) _ _)
import SciLean.Core.FunctionProperties namespace SciLean -------------------------------------------------------------------------------- -- Core bootstrapping theorems -------------------------------------------------------------------------------- instance IsLin_is_IsSmooth {X Y : Type} {Xs Y' : Type} [Vec Xs] [Vec Y'] (n : Nat) (f : X → Y) [Prod.Uncurry n (X → Y) Xs Y'] [inst : IsLinN n f] : IsSmoothN n f := ⟨sorry_proof⟩ instance {X Y} [Vec X] [Vec Y] (f : X → Y) [inst : IsSmoothN 1 f] : IsSmooth f := by induction inst apply IsSmooth.mk @[simp ↓, diff] theorem diff_of_linear {X Y} [Vec X] [Vec Y] (f : X → Y) [IsLin f] : ∂ f = λ _ dx => f dx := sorry_proof @[simp ↓, diff] theorem diff_of_linear_2 {X Y Z} [Vec X] [Vec Y] [Vec Z] (f : X → Y → Z) [IsLinN 2 f] : ∂ (uncurryN 2 f) = λ _ (dx,dy) => f dx dy := sorry_proof -------------------------------------------------------------------------------- -- Prod.fst - (·.1) -------------------------------------------------------------------------------- function_properties Prod.fst {X Y} [Vec X] [Vec Y] (xy : X×Y) : X argument xy isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := dxy.1 by symdiff function_properties Prod.fst {X Y} [SemiHilbert X] [SemiHilbert Y] (xy : X×Y) : X argument xy hasAdjoint := sorry_proof, abbrev † := ⟨xy',0⟩ by sorry_proof, hasAdjDiff := by apply HasAdjDiffN.mk'; symdiff; infer_instance, abbrev ∂† ℛ := (dxy', 0) by unfold adjointDifferential; symdiff; symdiff -- abbrev ℛ := (xy.1, λ dxy' => (dxy',0)) by symdiff -------------------------------------------------------------------------------- -- Prod.snd - (·.2) -------------------------------------------------------------------------------- function_properties Prod.snd {X Y} [Vec X] [Vec Y] (xy : X×Y) : Y argument xy isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := dxy.2 by symdiff -- , -- abbrev 𝒯 := (xy.2, dxy.2) by symdiff function_properties Prod.snd {X Y} [SemiHilbert X] [SemiHilbert Y] (xy : X×Y) : Y argument xy hasAdjoint := sorry_proof, abbrev † := ⟨0, xy'⟩ by sorry_proof, hasAdjDiff := by apply HasAdjDiffN.mk'; symdiff; infer_instance, abbrev ∂† := (0, dxy') by unfold adjointDifferential; symdiff; symdiff, abbrev ℛ := (xy.2, λ dxy' => (0,dxy')) by symdiff -------------------------------------------------------------------------------- -- Prod.mk -------------------------------------------------------------------------------- function_properties Prod.mk {X Y} [Vec X] [Vec Y] (x : X) (y : Y) : X×Y argument (x,y) isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := (dx, dy) by symdiff argument x isSmooth := sorry_proof, abbrev ∂ 𝒯 := (dx,0) by sorry_proof-- , -- abbrev 𝒯 := ((x,y), (dx,0)) by symdiff argument y isSmooth := sorry_proof, abbrev ∂ 𝒯 := (0,dy) by sorry_proof -- , -- abbrev 𝒯 := ((x,y), (0,dy)) by symdiff function_properties Prod.mk {X Y} [SemiHilbert X] [SemiHilbert Y] (x : X) (y : Y) : X×Y argument (x,y) hasAdjoint := sorry_proof, abbrev † := xy' by sorry_proof, hasAdjDiff := sorry_proof, abbrev ∂† ℛ := dxy' by unfold adjointDifferential; symdiff; symdiff; simp; symdiff; admit argument x hasAdjDiff := sorry_proof, abbrev ∂† ℛ := dx'.1 by sorry_proof argument y hasAdjDiff := sorry_proof, abbrev ∂† ℛ := dy'.2 by sorry_proof -------------------------------------------------------------------------------- -- Neg.neg - (-·) -------------------------------------------------------------------------------- function_properties Neg.neg {X} [Vec X] (x : X) : X argument x isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := - dx by symdiff-- , -- abbrev 𝒯 := (-x, -dx) by symdiff function_properties Neg.neg {X} [SemiHilbert X] (x : X) : X argument x hasAdjoint := sorry_proof, abbrev † := - x' by sorry_proof, hasAdjDiff, abbrev ∂† ℛ := - dx' by unfold adjointDifferential; symdiff; symdiff -------------------------------------------------------------------------------- -- HAdd.hAdd - (·+·) -------------------------------------------------------------------------------- function_properties HAdd.hAdd {X} [Vec X] (x y : X) : X argument (x,y) isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := dx + dy by symdiff-- , -- abbrev 𝒯 := (x+y, dx+dy) by symdiff argument x isSmooth := sorry_proof, abbrev ∂ 𝒯 := dx by sorry_proof-- , -- abbrev 𝒯 := (x+y, dx) by symdiff argument y isSmooth := sorry_proof, abbrev ∂ 𝒯 := dy by sorry_proof-- , -- abbrev 𝒯 := (x+y, dy) by symdiff function_properties HAdd.hAdd {X} [SemiHilbert X] (x y : X) : X argument (x,y) hasAdjoint := sorry_proof, abbrev † := (xy',xy') by sorry_proof, hasAdjDiff := sorry_proof, -- by apply HasAdjDiffN.mk'; symdiff; (try infer_instance); sorry_proof, abbrev ∂† ℛ := (dxy', dxy') by unfold adjointDifferential; symdiff; symdiff; admit argument x hasAdjDiff := sorry_proof, abbrev ∂† ℛ := dx' by sorry_proof argument y hasAdjDiff := sorry_proof, abbrev ∂† ℛ := dy' by sorry_proof -------------------------------------------------------------------------------- -- HSub.hSub - (·-·) -------------------------------------------------------------------------------- function_properties HSub.hSub {X} [Vec X] (x y : X) : X argument (x,y) isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := dx - dy by symdiff-- , -- abbrev 𝒯 := (x-y, dx-dy) by symdiff argument x isSmooth := sorry_proof, abbrev ∂ 𝒯 := dx by sorry_proof-- , -- abbrev 𝒯 := (x-y, dx) by symdiff argument y isSmooth := sorry_proof, abbrev ∂ 𝒯 := -dy by sorry_proof-- , -- abbrev 𝒯 := (x-y,-dy) by symdiff function_properties HSub.hSub {X} [SemiHilbert X] (x y : X) : X argument (x,y) hasAdjoint := sorry_proof, hasAdjDiff := sorry_proof, -- by apply HasAdjDiffN.mk'; symdiff; sorry_proof, abbrev † := (xy',-xy') by sorry_proof, abbrev ∂† ℛ := (dxy', -dxy') by unfold adjointDifferential; symdiff; symdiff; admit argument x hasAdjDiff := sorry_proof, abbrev ∂† ℛ := dx' by sorry_proof argument y hasAdjDiff := sorry_proof, abbrev ∂† ℛ := -dy' by sorry_proof -------------------------------------------------------------------------------- -- HMul.hMul - (·*·) -------------------------------------------------------------------------------- function_properties SMul.smul {X} [Vec X] (x : ℝ) (y : X) : X argument (x,y) isSmooth := sorry_proof, abbrev ∂ 𝒯 := dx•y + x•dy by sorry_proof argument x isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := dx•y by sorry_proof argument y isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := x•dy by sorry_proof function_properties SMul.smul {X} [SemiHilbert X] (x : ℝ) (y : X) : X argument y hasAdjoint := sorry_proof, abbrev † := x•y' by sorry_proof, hasAdjDiff, abbrev ∂† ℛ := x•dy' by unfold adjointDifferential; symdiff; symdiff function_properties SMul.smul {X} [Hilbert X] (x : ℝ) (y : X) : X argument x hasAdjoint := sorry_proof, abbrev † := ⟪x',y⟫ by sorry_proof, hasAdjDiff := by sorry_proof, -- apply HasAdjDiffN.mk'; symdiff; infer_instance, abbrev ∂† ℛ := ⟪dx',y⟫ by unfold adjointDifferential; sorry_proof -- symdiff; symdiff argument (x,y) hasAdjDiff := sorry_proof, -- by apply HasAdjDiffN.mk'; symdiff; sorry_proof, abbrev ∂† ℛ := (⟪dxy',y⟫, x•dxy') by unfold adjointDifferential; symdiff; sorry_proof -------------------------------------------------------------------------------- -- Inner.innet - ⟪·,·⟫ -------------------------------------------------------------------------------- function_properties Inner.inner {X} [Hilbert X] (x y : X) : ℝ argument (x,y) isSmooth := sorry_proof, abbrev ∂ 𝒯 := ⟪dx,y⟫ + ⟪x,dy⟫ by sorry_proof, hasAdjDiff := sorry_proof, -- by apply HasAdjDiffN.mk'; symdiff; sorry_proof, abbrev ∂† ℛ := (dxy'•x, dxy'•y) by sorry_proof argument x .. isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := ⟪dx,y⟫ by symdiff argument x hasAdjoint := sorry_proof, abbrev † := x'•y by sorry_proof argument y isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := ⟪x,dy⟫ by symdiff, hasAdjoint := sorry_proof, abbrev † := y'•x by sorry_proof -------------------------------------------------------------------------------- -- Inner.normSqr - ∥·∥² -------------------------------------------------------------------------------- function_properties Inner.normSqr {X} [Hilbert X] (x : X) : ℝ argument x isSmooth := sorry_proof, abbrev ∂ 𝒯 := 2*⟪dx,x⟫ by sorry_proof, hasAdjDiff := sorry_proof, abbrev ∂† ℛ := (2*dx')•x by sorry_proof -------------------------------------------------------------------------------- -- sum - ∑ -------------------------------------------------------------------------------- function_properties sum {X ι} [Vec X] [Enumtype ι] (f : ι → X) : X argument f isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := sum df by symdiff function_properties sum {X ι} [SemiHilbert X] [Enumtype ι] (f : ι → X) : X argument f hasAdjoint := sorry_proof, abbrev † := λ _ => f' by sorry_proof, hasAdjDiff, abbrev ∂† ℛ := λ _ => df' by unfold adjointDifferential; symdiff; symdiff -------------------------------------------------------------------------------- -- SmoothMap.val -------------------------------------------------------------------------------- function_properties SmoothMap.val {X Y} [Vec X] [Vec Y] (f : X⟿Y) (x : X) : Y argument (f,x) isSmooth := sorry_proof, abbrev ∂ := df x + ∂ f x dx by funext (f,x) (df,dx); simp; sorry_proof, abbrev 𝒯 := let (y,dy) := 𝒯 f x dx; (y, df x + dy) by unfold Smooth.tangentMap; symdiff argument f isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := df x by symdiff -- argument x -- isSmooth := sorry_proof, -- abbrev ∂ := ∂ f x dx by unfold Smooth.differential; symdiff, -- abbrev 𝒯 := 𝒯 f x dx by unfold Smooth.tangentMap; symdiff -------------------------------------------------------------------------------- -- SmoothMap.mk' -------------------------------------------------------------------------------- -- instance SmoothMap.mk'.arg_f.prolongation.isSmoothT {X Y W} [Vec X] [Vec Y] [Vec W] -- (f : W → X → Y) [IsSmoothNT 2 f] -- : IsSmoothT (λ w => λ x ⟿ f w x) := sorry_proof -- instance SmoothMap.mk'.arg_f.prolongation.diff_simp {X Y W} [Vec X] [Vec Y] [Vec W] -- (f : W → X → Y) [IsSmoothNT 2 f] -- : ∂ (λ w => λ x ⟿ f w x) -- = -- λ w dw => λ x ⟿ ∂ f w dw x:= sorry_proof -------------------------------------------------------------------------------- -- LinMap.val -------------------------------------------------------------------------------- function_properties LinMap.val {X Y} [Vec X] [Vec Y] (f : X⊸Y) (x : X) : Y argument (f,x) isSmooth := sorry_proof, abbrev ∂ 𝒯 := df x + f dx by funext (f,x) (df,dx); simp; sorry_proof argument f .. isLin := sorry_proof, isSmooth := sorry_proof, abbrev ∂ 𝒯 := df x by symdiff -- argument x -- isLin := sorry_proof-- , -- isSmooth := sorry_proof, -- abbrev ∂ 𝒯 := f dx by symdiff -- function_properties LinMap.val {X Y ι} [Enumtype ι] [FinVec X ι] [Hilbert Y] (f : X⊸Y) (x : X) : Y -- argument f -- hasAdjoint := sorry_proof, -- isLin := sorry_proof, -- TODO: this should be done automatically! -- abbrev † := ⟨λ x' => ⟪x,x'⟫ * f', sorry_proof⟩ by sorry_proof, -- hasAdjDiff, -- abbrev ∂† ℛ := ⟨λ x' => ⟪x,x'⟫ * df', sorry_proof⟩ by unfold adjointDifferential; symdiff; symdiff -------------------------------------------------------------------------------- -- LinMap.mk' -------------------------------------------------------------------------------- -- instance SmoothMap.mk'.arg_f.prolongation.isSmoothT {X Y W} [Vec X] [Vec Y] [Vec W] -- (f : W → X → Y) [IsSmoothNT 2 f] -- : IsSmoothT (λ w => λ x ⟿ f w x) := sorry_proof -- instance SmoothMap.mk'.arg_f.prolongation.diff_simp {X Y W} [Vec X] [Vec Y] [Vec W] -- (f : W → X → Y) [IsSmoothNT 2 f] -- : ∂ (λ w => λ x ⟿ f w x) -- = -- λ w dw => λ x ⟿ ∂ f w dw x:= sorry_proof
State Before: ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ ⊢ (i, x) ∈ enumFrom j [] → j ≤ i ∧ i < j + length [] ∧ x ∈ [] State After: no goals Tactic: simp [enumFrom] State Before: ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α ⊢ (i, x) ∈ enumFrom j (y :: ys) → j ≤ i ∧ i < j + length (y :: ys) ∧ x ∈ y :: ys State After: ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α ⊢ i = j ∧ x = y ∨ (i, x) ∈ enumFrom (j + 1) ys → j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) Tactic: suffices i = j ∧ x = y ∨ (i, x) ∈ enumFrom (j + 1) ys → j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) by simpa [enumFrom, mem_enumFrom ys] State Before: ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α ⊢ i = j ∧ x = y ∨ (i, x) ∈ enumFrom (j + 1) ys → j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) State After: case inl ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : i = j ∧ x = y ⊢ j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) case inr ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys ⊢ j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) Tactic: rintro (h | h) State Before: ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α this : i = j ∧ x = y ∨ (i, x) ∈ enumFrom (j + 1) ys → j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) ⊢ (i, x) ∈ enumFrom j (y :: ys) → j ≤ i ∧ i < j + length (y :: ys) ∧ x ∈ y :: ys State After: no goals Tactic: simpa [enumFrom, mem_enumFrom ys] State Before: case inl ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : i = j ∧ x = y ⊢ j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) State After: case inl ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : i = j ∧ x = y ⊢ i < i + (length ys + 1) Tactic: refine' ⟨le_of_eq h.1.symm, h.1 ▸ _, Or.inl h.2⟩ State Before: case inl ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : i = j ∧ x = y ⊢ i < i + (length ys + 1) State After: case inl.h ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : i = j ∧ x = y ⊢ 0 < length ys + 1 Tactic: apply Nat.lt_add_of_pos_right State Before: case inl.h ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : i = j ∧ x = y ⊢ 0 < length ys + 1 State After: no goals Tactic: simp State Before: case inr ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys ⊢ j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) State After: case inr ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) Tactic: have ⟨hji, hijlen, hmem⟩ := mem_enumFrom _ h State Before: case inr ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys) State After: case inr.refine'_1 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ j ≤ i case inr.refine'_2 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ i < j + (length ys + 1) case inr.refine'_3 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ x = y ∨ x ∈ ys Tactic: refine' ⟨_, _, _⟩ State Before: case inr.refine'_1 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ j ≤ i State After: no goals Tactic: exact le_trans (Nat.le_succ _) hji State Before: case inr.refine'_2 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ i < j + (length ys + 1) State After: case h.e'_4 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ j + (length ys + 1) = j + 1 + length ys Tactic: convert hijlen using 1 State Before: case h.e'_4 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ j + (length ys + 1) = j + 1 + length ys State After: no goals Tactic: ac_rfl State Before: case inr.refine'_3 ι : Type ?u.432423 α : Type u β : Type v γ : Type w δ : Type x l₁ l₂ : List α x : α i j : ℕ y : α ys : List α h : (i, x) ∈ enumFrom (j + 1) ys hji : j + 1 ≤ i hijlen : i < j + 1 + length ys hmem : x ∈ ys ⊢ x = y ∨ x ∈ ys State After: no goals Tactic: simp [hmem]
module Choice --- Axiom of choice in type theory
{-# OPTIONS --without-K #-} module algebra.semigroup.morphism where open import level open import algebra.semigroup.core open import equality.core open import hott.level module _ {i}{j} {X : Set i}⦃ sX : IsSemigroup X ⦄ {Y : Set j}⦃ sY : IsSemigroup Y ⦄ where open IsSemigroup ⦃ ... ⦄ IsSemigroupMorphism : (X → Y) → Set (i ⊔ j) IsSemigroupMorphism f = (x₁ x₂ : X) → f (x₁ * x₂) ≡ f x₁ * f x₂ is-semigroup-morphism-level : (f : X → Y) → h 1 (IsSemigroupMorphism f) is-semigroup-morphism-level f = Π-level λ x₁ → Π-level λ x₂ → is-set _ _
{-# OPTIONS --rewriting #-} module Issue2792 where open import Issue2792.Safe
[STATEMENT] lemma eps_free_automata_empty [simp]: "eps \<A> = {||} \<Longrightarrow> eps_free_automata {||} \<A> = \<A>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eps \<A> = {||} \<Longrightarrow> eps_free_automata {||} \<A> = \<A> [PROOF STEP] by (auto simp add: eps_free_automata_def intro!: TA_equalityI)
Formal statement is: lemma pred_le_const[measurable (raw generic)]: assumes f: "f \<in> measurable M N" and c: "{.. c} \<in> sets N" shows "pred M (\<lambda>x. f x \<le> c)" Informal statement is: If $f$ is a measurable function from $M$ to $N$ and $c$ is a constant in $N$, then the predicate $\{x \in M \mid f(x) \leq c\}$ is measurable.