Datasets:
AI4M
/

text
stringlengths
0
3.34M
#pragma once // Eigen #include <Eigen/Core> #include <Eigen/Dense> // offset from center of laser rotation #define AXIS_OFFSET 0.0 //-0.015 using namespace Eigen; /** * get the 3D minimum distance between 2 lines * following http://geomalgorithms.com/a07-_distance.html#Distance-between-Lines * @param pos0 origin line0 * @param dir1 direction line0 * @param pos1 origin line1 * @param dir2 direction line1 * @param tri0 point on line0 closest to line1 * @param tri1 point on line1 closest to line0 * @return distance between the lines */ double dist3D_Line_to_Line( Vector3d &pos0, Vector3d &dir1, Vector3d &pos1, Vector3d &dir2, Vector3d &tri0, Vector3d &tri1); /** * This function triangulates the position of a sensor using the horizontal and vertical angles from two ligthouses * @param angles0 vertical/horizontal angles form first lighthouse * @param angles1 vertical/horizontal angles form second lighthouse * @param RT_0 pose matrix of first lighthouse * @param RT_1 pose matrix of second lighthouses * @param triangulated_position the triangulated position * @param ray0 ligthhouse ray * @param ray1 ligthhouse ray */ double triangulateFromLighthouseAngles(Vector2d &angles0, Vector2d &angles1, Matrix4d &RT_0, Matrix4d &RT_1, Vector3d &triangulated_position, Vector3d &ray0, Vector3d &ray1); double triangulateFromRays(Vector3d &ray0, Vector3d &ray1, Matrix4d &RT_0, Matrix4d &RT_1, Vector3d &triangulated_position); void rayFromLighthouseAngles(Vector2d &angles, Vector3d &ray);
State Before: G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H n : ℕ ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) n) = upperCentralSeries G (Nat.succ n) State After: case zero G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) Nat.zero) = upperCentralSeries G (Nat.succ Nat.zero) case succ G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H n : ℕ ih : comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) n) = upperCentralSeries G (Nat.succ n) ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) (Nat.succ n)) = upperCentralSeries G (Nat.succ (Nat.succ n)) Tactic: induction' n with n ih State Before: case zero G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) Nat.zero) = upperCentralSeries G (Nat.succ Nat.zero) State After: no goals Tactic: simp only [Nat.zero_eq, upperCentralSeries_zero, MonoidHom.comap_bot, ker_mk', (upperCentralSeries_one G).symm] State Before: case succ G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H n : ℕ ih : comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) n) = upperCentralSeries G (Nat.succ n) ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) (Nat.succ n)) = upperCentralSeries G (Nat.succ (Nat.succ n)) State After: case succ G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H n : ℕ ih : comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) n) = upperCentralSeries G (Nat.succ n) Hn : Subgroup (G ⧸ center G) := upperCentralSeries (G ⧸ center G) n ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) (Nat.succ n)) = upperCentralSeries G (Nat.succ (Nat.succ n)) Tactic: let Hn := upperCentralSeries (G ⧸ center G) n State Before: case succ G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H n : ℕ ih : comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) n) = upperCentralSeries G (Nat.succ n) Hn : Subgroup (G ⧸ center G) := upperCentralSeries (G ⧸ center G) n ⊢ comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) (Nat.succ n)) = upperCentralSeries G (Nat.succ (Nat.succ n)) State After: no goals Tactic: calc comap (mk' (center G)) (upperCentralSeriesStep Hn) = comap (mk' (center G)) (comap (mk' Hn) (center ((G ⧸ center G) ⧸ Hn))) := by rw [upperCentralSeriesStep_eq_comap_center] _ = comap (mk' (comap (mk' (center G)) Hn)) (center (G ⧸ comap (mk' (center G)) Hn)) := QuotientGroup.comap_comap_center _ = comap (mk' (upperCentralSeries G n.succ)) (center (G ⧸ upperCentralSeries G n.succ)) := (comap_center_subst ih) _ = upperCentralSeriesStep (upperCentralSeries G n.succ) := symm (upperCentralSeriesStep_eq_comap_center _) State Before: G : Type u_1 inst✝¹ : Group G H : Subgroup G inst✝ : Normal H n : ℕ ih : comap (mk' (center G)) (upperCentralSeries (G ⧸ center G) n) = upperCentralSeries G (Nat.succ n) Hn : Subgroup (G ⧸ center G) := upperCentralSeries (G ⧸ center G) n ⊢ comap (mk' (center G)) (upperCentralSeriesStep Hn) = comap (mk' (center G)) (comap (mk' Hn) (center ((G ⧸ center G) ⧸ Hn))) State After: no goals Tactic: rw [upperCentralSeriesStep_eq_comap_center]
% Generate an n-column convolution matirx of % An n-column convolution matrix of p is the matrix for the % linear transformation L(f) = p*f for polynomial f of degree n-1. % If f is the coefficient vector of f, and C is the convolution matrix, % then C*f is the coefficient vector of p*f. % % Syntax: C = ConvolutionMatrix(p,n) % % Input: p --- (string or vector) the univariate polynomial % n --- (integer) the column dimension of the convolution matrix % % Output: C --- the convolution matrix % % Example: >> p = '1+3*x^4-6*x^8'; % >> C = ConvolutionMatrix(p,3) % C = % -6 0 0 % 0 -6 0 % 0 0 -6 % 0 0 0 % 3 0 0 % 0 3 0 % 0 0 3 % 0 0 0 % 1 0 0 % 0 1 0 % 0 0 1
import data.real.basic import for_mathlib.decimal_expansions import zero_point_seven_one -- "obvious" proof that 0.71 has no 8's in decimal expansion! /- M1F May exam 2018, question 2. -/ universe u local attribute [instance, priority 0] classical.prop_decidable -- Q2(a)(i) def ub (S : set ℝ) (x : ℝ) := ∀ s ∈ S, s ≤ x ---ans -- Q2(a)(ii) -- iba: is bounded above def iba (S : set ℝ) := ∃ x, ub S x ---ans -- Q2(a)(iii) def lub (S : set ℝ) (b : ℝ) := ub S b ∧ ∀ y : ℝ, (ub S y → b ≤ y) ---ans -- Q2(b) theorem lub_duh (S : set ℝ) : (∃ x, lub S x) → S ≠ ∅ ∧ iba S := ---ans begin intro Hexlub, cases Hexlub with x Hlub, split, intro Hemp, rw set.empty_def at Hemp, cases Hlub with Hub Hl, have Hallub : ∀ y : ℝ, ub S y, unfold ub, rw Hemp, change (∀ (y s : ℝ), false → s ≤ y), intros y s Hf, exfalso, exact Hf, have Hneginf : ∀ y : ℝ, x ≤ y, intro y, apply Hl, apply Hallub, have Hcontr := Hneginf (x - 1), revert Hcontr, norm_num, existsi x, exact Hlub.left, end -- Q2(c)(i) preparation def S1 := {x : ℝ | x < 59} lemma between_bounds (x y : ℝ) (H : x < y) : x < (x + y) / 2 ∧ (x + y) / 2 < y := ⟨by linarith, by linarith⟩ -- Q2(c)(i) theorem S1_lub : lub S1 59 := ---ans begin split, intro, change (s < 59 → s ≤ 59), exact le_of_lt, intro y, change ((∀ (s : ℝ), s < 59 → s ≤ y) → 59 ≤ y), intro Hbub, apply le_of_not_gt, intro Hbadub, have Houtofbounds := between_bounds y 59 Hbadub, apply not_le_of_gt Houtofbounds.1 (Hbub ((y + 59) / 2) Houtofbounds.2), end -- Q2(c)(ii) preparations definition S2 : set ℝ := {x | 7/10 < x ∧ x < 9/10 ∧ ∀ n : ℕ, decimal.expansion_nonneg x n ≠ 8} lemma S2_nonempty_and_bounded : (∃ s : ℝ, s ∈ S2) ∧ ∀ (s : ℝ), s ∈ S2 → s ≤ 9/10 := begin split, { -- 0.71 ∈ S use (71 / 100 : ℝ), split, norm_num, split, norm_num, exact no_eights_in_0_point_71 }, rintro s ⟨hs1, hs2, h⟩, exact le_of_lt hs2 end -- Q2(c)(ii) theorem S2_has_lub : ∃ b : ℝ, lub S2 b := begin cases S2_nonempty_and_bounded with Hne Hbd, have H := real.exists_sup S2 Hne ⟨(9/10 : ℝ), Hbd⟩, cases H with b Hb, use b, split, { intros s2 Hs2, exact (Hb b).mp (le_refl _) s2 Hs2, }, { intros y Hy, exact (Hb y).mpr Hy, } end -- Q2(d)(i) theorem ublub_the_first (S : set ℝ) (b : ℝ) (hub : ub S b) (hin : b ∈ S) : lub S b := ---ans begin split, exact hub, intros y huby, exact huby b hin, end -- Q2(d)(ii) theorem adlub_the_second (S T : set ℝ) (b c : ℝ) (hlubb : lub S b) (hlubc : lub T c) ---ans : lub ({x : ℝ | ∃ s t : ℝ, s ∈ S ∧ t ∈ T ∧ x = s + t}) (b + c) := begin split, unfold ub, simp, intros x s hss t htt hxst, rw hxst, apply add_le_add (hlubb.1 s hss) (hlubc.1 t htt), unfold ub, simp, intros x Hx, apply le_of_not_gt, intro Hcontr, let ε := b + c - x, have Hcontr' : ε > 0 := (by linarith : b + c - x > 0), have rwx : x = (b - ε / 2) + (c - ε / 2) := (by linarith : x = (b - (b + c - x) / 2) + (c - (b + c - x) / 2)), have hnbub : ∃ s' ∈ S, b - ε / 2 < s', by_contradiction, have a' : (¬∃ (s' : ℝ), s' ∈ S ∧ b - ε / 2 < s'), intro b, apply a, cases b with σ Hσ, existsi σ, existsi Hσ.1, exact Hσ.2, have a'' : ∀ (x : ℝ), x ∈ S → ¬(b - ε / 2 < x), intros x Hx Hb, rw not_exists at a', apply a' x, exact ⟨Hx, Hb⟩, simp only [not_lt] at a'', rw ←ub at a'', have a''' := hlubb.2 _ a'', linarith, have hnbuc : ∃ t' ∈ T, c - ε / 2 < t', by_contradiction, have a' : (¬∃ (t' : ℝ), t' ∈ T ∧ c - ε / 2 < t'), intro b, apply a, cases b with σ Hσ, existsi σ, existsi Hσ.1, exact Hσ.2, have a'' : ∀ (x : ℝ), x ∈ T → ¬(c - ε / 2 < x), intros x Hx Hc, rw not_exists at a', apply a' x, exact ⟨Hx, Hc⟩, simp only [not_lt] at a'', rw ←ub at a'', have a''' := hlubc.2 _ a'', linarith, cases hnbub with s' hnbub', cases hnbub' with Hs' hnbub'', cases hnbuc with t' hnbuc', cases hnbuc' with Ht' hnbuc'', have Hx' := Hx (s' + t') s' Hs' t' Ht' rfl, have Haha : x < x := lt_of_lt_of_le (by { rw rwx, apply add_lt_add hnbub'' hnbuc'' } : x < s' + t') Hx', linarith, end
lemma pred_in_If[measurable (raw)]: "(P \<Longrightarrow> pred M (\<lambda>x. x \<in> A x)) \<Longrightarrow> (\<not> P \<Longrightarrow> pred M (\<lambda>x. x \<in> B x)) \<Longrightarrow> pred M (\<lambda>x. x \<in> (if P then A x else B x))"
State Before: α : Type u β : Type v γ : Type w inst✝² : TopologicalSpace α inst✝¹ : LinearOrder α inst✝ : OrderTopology α a : α s : Set α hs : s ∈ 𝓝 a u : α hu : a < u ⊢ ∃ u', u' ∈ Ioc a u ∧ Ico a u' ⊆ s State After: no goals Tactic: simpa only [OrderDual.exists, exists_prop, dual_Ico, dual_Ioc] using exists_Ioc_subset_of_mem_nhds' (show ofDual ⁻¹' s ∈ 𝓝 (toDual a) from hs) hu.dual
/- Copyright (c) 2017 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Mario Carneiro -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.list.basic import Mathlib.Lean3Lib.data.stream import Mathlib.Lean3Lib.data.lazy_list import Mathlib.data.seq.computation import Mathlib.PostPort universes u u_1 v w namespace Mathlib /- coinductive seq (α : Type u) : Type u | nil : seq α | cons : α → seq α → seq α -/ /-- A stream `s : option α` is a sequence if `s.nth n = none` implies `s.nth (n + 1) = none`. -/ def stream.is_seq {α : Type u} (s : stream (Option α)) := ∀ {n : ℕ}, s n = none → s (n + 1) = none /-- `seq α` is the type of possibly infinite lists (referred here as sequences). It is encoded as an infinite stream of options such that if `f n = none`, then `f m = none` for all `m ≥ n`. -/ def seq (α : Type u) := Subtype fun (f : stream (Option α)) => stream.is_seq f /-- `seq1 α` is the type of nonempty sequences. -/ def seq1 (α : Type u_1) := α × seq α namespace seq /-- The empty sequence -/ def nil {α : Type u} : seq α := { val := stream.const none, property := sorry } protected instance inhabited {α : Type u} : Inhabited (seq α) := { default := nil } /-- Prepend an element to a sequence -/ def cons {α : Type u} (a : α) : seq α → seq α := sorry /-- Get the nth element of a sequence (if it exists) -/ def nth {α : Type u} : seq α → ℕ → Option α := subtype.val /-- A sequence has terminated at position `n` if the value at position `n` equals `none`. -/ def terminated_at {α : Type u} (s : seq α) (n : ℕ) := nth s n = none /-- It is decidable whether a sequence terminates at a given position. -/ protected instance terminated_at_decidable {α : Type u} (s : seq α) (n : ℕ) : Decidable (terminated_at s n) := decidable_of_iff' ↥(option.is_none (nth s n)) sorry /-- A sequence terminates if there is some position `n` at which it has terminated. -/ def terminates {α : Type u} (s : seq α) := ∃ (n : ℕ), terminated_at s n /-- Functorial action of the functor `option (α × _)` -/ @[simp] def omap {α : Type u} {β : Type v} {γ : Type w} (f : β → γ) : Option (α × β) → Option (α × γ) := sorry /-- Get the first element of a sequence -/ def head {α : Type u} (s : seq α) : Option α := nth s 0 /-- Get the tail of a sequence (or `nil` if the sequence is `nil`) -/ def tail {α : Type u} : seq α → seq α := sorry protected def mem {α : Type u} (a : α) (s : seq α) := some a ∈ subtype.val s protected instance has_mem {α : Type u} : has_mem α (seq α) := has_mem.mk seq.mem theorem le_stable {α : Type u} (s : seq α) {m : ℕ} {n : ℕ} (h : m ≤ n) : nth s m = none → nth s n = none := sorry /-- If a sequence terminated at position `n`, it also terminated at `m ≥ n `. -/ theorem terminated_stable {α : Type u} (s : seq α) {m : ℕ} {n : ℕ} (m_le_n : m ≤ n) (terminated_at_m : terminated_at s m) : terminated_at s n := le_stable s m_le_n terminated_at_m /-- If `s.nth n = some aₙ` for some value `aₙ`, then there is also some value `aₘ` such that `s.nth = some aₘ` for `m ≤ n`. -/ theorem ge_stable {α : Type u} (s : seq α) {aₙ : α} {n : ℕ} {m : ℕ} (m_le_n : m ≤ n) (s_nth_eq_some : nth s n = some aₙ) : ∃ (aₘ : α), nth s m = some aₘ := sorry theorem not_mem_nil {α : Type u} (a : α) : ¬a ∈ nil := sorry theorem mem_cons {α : Type u} (a : α) (s : seq α) : a ∈ cons a s := subtype.cases_on s fun (s_val : stream (Option α)) (s_property : stream.is_seq s_val) => idRhs (some a ∈ some a :: s_val) (stream.mem_cons (some a) s_val) theorem mem_cons_of_mem {α : Type u} (y : α) {a : α} {s : seq α} : a ∈ s → a ∈ cons y s := sorry theorem eq_or_mem_of_mem_cons {α : Type u} {a : α} {b : α} {s : seq α} : a ∈ cons b s → a = b ∨ a ∈ s := sorry @[simp] theorem mem_cons_iff {α : Type u} {a : α} {b : α} {s : seq α} : a ∈ cons b s ↔ a = b ∨ a ∈ s := sorry /-- Destructor for a sequence, resulting in either `none` (for `nil`) or `some (a, s)` (for `cons a s`). -/ def destruct {α : Type u} (s : seq α) : Option (seq1 α) := (fun (a' : α) => (a', tail s)) <$> nth s 0 theorem destruct_eq_nil {α : Type u} {s : seq α} : destruct s = none → s = nil := sorry theorem destruct_eq_cons {α : Type u} {s : seq α} {a : α} {s' : seq α} : destruct s = some (a, s') → s = cons a s' := sorry @[simp] theorem destruct_nil {α : Type u} : destruct nil = none := rfl @[simp] theorem destruct_cons {α : Type u} (a : α) (s : seq α) : destruct (cons a s) = some (a, s) := sorry theorem head_eq_destruct {α : Type u} (s : seq α) : head s = prod.fst <$> destruct s := sorry @[simp] theorem head_nil {α : Type u} : head nil = none := rfl @[simp] theorem head_cons {α : Type u} (a : α) (s : seq α) : head (cons a s) = some a := eq.mpr (id (Eq._oldrec (Eq.refl (head (cons a s) = some a)) (head_eq_destruct (cons a s)))) (eq.mpr (id (Eq._oldrec (Eq.refl (prod.fst <$> destruct (cons a s) = some a)) (destruct_cons a s))) (Eq.refl (prod.fst <$> some (a, s)))) @[simp] theorem tail_nil {α : Type u} : tail nil = nil := rfl @[simp] theorem tail_cons {α : Type u} (a : α) (s : seq α) : tail (cons a s) = s := sorry def cases_on {α : Type u} {C : seq α → Sort v} (s : seq α) (h1 : C nil) (h2 : (x : α) → (s : seq α) → C (cons x s)) : C s := (fun (_x : Option (seq1 α)) (H : destruct s = _x) => Option.rec (fun (H : destruct s = none) => eq.mpr sorry h1) (fun (v : seq1 α) (H : destruct s = some v) => prod.cases_on v (fun (a : α) (s' : seq α) (H : destruct s = some (a, s')) => eq.mpr sorry (h2 a s')) H) _x H) (destruct s) sorry theorem mem_rec_on {α : Type u} {C : seq α → Prop} {a : α} {s : seq α} (M : a ∈ s) (h1 : ∀ (b : α) (s' : seq α), a = b ∨ C s' → C (cons b s')) : C s := sorry def corec.F {α : Type u} {β : Type v} (f : β → Option (α × β)) : Option β → Option α × Option β := sorry /-- Corecursor for `seq α` as a coinductive type. Iterates `f` to produce new elements of the sequence until `none` is obtained. -/ def corec {α : Type u} {β : Type v} (f : β → Option (α × β)) (b : β) : seq α := { val := stream.corec' sorry (some b), property := sorry } @[simp] theorem corec_eq {α : Type u} {β : Type v} (f : β → Option (α × β)) (b : β) : destruct (corec f b) = omap (corec f) (f b) := sorry /-- Embed a list as a sequence -/ def of_list {α : Type u} (l : List α) : seq α := { val := list.nth l, property := sorry } protected instance coe_list {α : Type u} : has_coe (List α) (seq α) := has_coe.mk of_list @[simp] def bisim_o {α : Type u} (R : seq α → seq α → Prop) : Option (seq1 α) → Option (seq1 α) → Prop := sorry def is_bisimulation {α : Type u} (R : seq α → seq α → Prop) := ∀ {s₁ s₂ : seq α}, R s₁ s₂ → bisim_o R (destruct s₁) (destruct s₂) theorem eq_of_bisim {α : Type u} (R : seq α → seq α → Prop) (bisim : is_bisimulation R) {s₁ : seq α} {s₂ : seq α} (r : R s₁ s₂) : s₁ = s₂ := sorry theorem coinduction {α : Type u} {s₁ : seq α} {s₂ : seq α} : head s₁ = head s₂ → (∀ (β : Type u) (fr : seq α → β), fr s₁ = fr s₂ → fr (tail s₁) = fr (tail s₂)) → s₁ = s₂ := sorry theorem coinduction2 {α : Type u} {β : Type v} (s : seq α) (f : seq α → seq β) (g : seq α → seq β) (H : ∀ (s : seq α), bisim_o (fun (s1 s2 : seq β) => ∃ (s : seq α), s1 = f s ∧ s2 = g s) (destruct (f s)) (destruct (g s))) : f s = g s := sorry /-- Embed an infinite stream as a sequence -/ def of_stream {α : Type u} (s : stream α) : seq α := { val := stream.map some s, property := sorry } protected instance coe_stream {α : Type u} : has_coe (stream α) (seq α) := has_coe.mk of_stream /-- Embed a `lazy_list α` as a sequence. Note that even though this is non-meta, it will produce infinite sequences if used with cyclic `lazy_list`s created by meta constructions. -/ def of_lazy_list {α : Type u} : lazy_list α → seq α := corec fun (l : lazy_list α) => sorry protected instance coe_lazy_list {α : Type u} : has_coe (lazy_list α) (seq α) := has_coe.mk of_lazy_list /-- Translate a sequence into a `lazy_list`. Since `lazy_list` and `list` are isomorphic as non-meta types, this function is necessarily meta. -/ /-- Translate a sequence to a list. This function will run forever if run on an infinite sequence. -/ /-- The sequence of natural numbers some 0, some 1, ... -/ def nats : seq ℕ := ↑stream.nats @[simp] theorem nats_nth (n : ℕ) : nth nats n = some n := rfl /-- Append two sequences. If `s₁` is infinite, then `s₁ ++ s₂ = s₁`, otherwise it puts `s₂` at the location of the `nil` in `s₁`. -/ def append {α : Type u} (s₁ : seq α) (s₂ : seq α) : seq α := corec (fun (_x : seq α × seq α) => sorry) (s₁, s₂) /-- Map a function over a sequence. -/ def map {α : Type u} {β : Type v} (f : α → β) : seq α → seq β := sorry /-- Flatten a sequence of sequences. (It is required that the sequences be nonempty to ensure productivity; in the case of an infinite sequence of `nil`, the first element is never generated.) -/ def join {α : Type u} : seq (seq1 α) → seq α := corec fun (S : seq (seq1 α)) => sorry /-- Remove the first `n` elements from the sequence. -/ @[simp] def drop {α : Type u} (s : seq α) : ℕ → seq α := sorry /-- Take the first `n` elements of the sequence (producing a list) -/ def take {α : Type u} : ℕ → seq α → List α := sorry /-- Split a sequence at `n`, producing a finite initial segment and an infinite tail. -/ def split_at {α : Type u} : ℕ → seq α → List α × seq α := sorry /-- Combine two sequences with a function -/ def zip_with {α : Type u} {β : Type v} {γ : Type w} (f : α → β → γ) : seq α → seq β → seq γ := sorry theorem zip_with_nth_some {α : Type u} {β : Type v} {γ : Type w} {s : seq α} {s' : seq β} {n : ℕ} {a : α} {b : β} (s_nth_eq_some : nth s n = some a) (s_nth_eq_some' : nth s' n = some b) (f : α → β → γ) : nth (zip_with f s s') n = some (f a b) := sorry theorem zip_with_nth_none {α : Type u} {β : Type v} {γ : Type w} {s : seq α} {s' : seq β} {n : ℕ} (s_nth_eq_none : nth s n = none) (f : α → β → γ) : nth (zip_with f s s') n = none := sorry theorem zip_with_nth_none' {α : Type u} {β : Type v} {γ : Type w} {s : seq α} {s' : seq β} {n : ℕ} (s'_nth_eq_none : nth s' n = none) (f : α → β → γ) : nth (zip_with f s s') n = none := sorry /-- Pair two sequences into a sequence of pairs -/ def zip {α : Type u} {β : Type v} : seq α → seq β → seq (α × β) := zip_with Prod.mk /-- Separate a sequence of pairs into two sequences -/ def unzip {α : Type u} {β : Type v} (s : seq (α × β)) : seq α × seq β := (map prod.fst s, map prod.snd s) /-- Convert a sequence which is known to terminate into a list -/ def to_list {α : Type u} (s : seq α) (h : ∃ (n : ℕ), ¬↥(option.is_some (nth s n))) : List α := take (nat.find h) s /-- Convert a sequence which is known not to terminate into a stream -/ def to_stream {α : Type u} (s : seq α) (h : ∀ (n : ℕ), ↥(option.is_some (nth s n))) : stream α := fun (n : ℕ) => option.get (h n) /-- Convert a sequence into either a list or a stream depending on whether it is finite or infinite. (Without decidability of the infiniteness predicate, this is not constructively possible.) -/ def to_list_or_stream {α : Type u} (s : seq α) [Decidable (∃ (n : ℕ), ¬↥(option.is_some (nth s n)))] : List α ⊕ stream α := dite (∃ (n : ℕ), ¬↥(option.is_some (nth s n))) (fun (h : ∃ (n : ℕ), ¬↥(option.is_some (nth s n))) => sum.inl (to_list s h)) fun (h : ¬∃ (n : ℕ), ¬↥(option.is_some (nth s n))) => sum.inr (to_stream s sorry) @[simp] theorem nil_append {α : Type u} (s : seq α) : append nil s = s := sorry @[simp] theorem cons_append {α : Type u} (a : α) (s : seq α) (t : seq α) : append (cons a s) t = cons a (append s t) := sorry @[simp] theorem append_nil {α : Type u} (s : seq α) : append s nil = s := sorry @[simp] theorem append_assoc {α : Type u} (s : seq α) (t : seq α) (u : seq α) : append (append s t) u = append s (append t u) := sorry @[simp] theorem map_nil {α : Type u} {β : Type v} (f : α → β) : map f nil = nil := rfl @[simp] theorem map_cons {α : Type u} {β : Type v} (f : α → β) (a : α) (s : seq α) : map f (cons a s) = cons (f a) (map f s) := sorry @[simp] theorem map_id {α : Type u} (s : seq α) : map id s = s := sorry @[simp] theorem map_tail {α : Type u} {β : Type v} (f : α → β) (s : seq α) : map f (tail s) = tail (map f s) := sorry theorem map_comp {α : Type u} {β : Type v} {γ : Type w} (f : α → β) (g : β → γ) (s : seq α) : map (g ∘ f) s = map g (map f s) := sorry @[simp] theorem map_append {α : Type u} {β : Type v} (f : α → β) (s : seq α) (t : seq α) : map f (append s t) = append (map f s) (map f t) := sorry @[simp] theorem map_nth {α : Type u} {β : Type v} (f : α → β) (s : seq α) (n : ℕ) : nth (map f s) n = option.map f (nth s n) := sorry protected instance functor : Functor seq := { map := map, mapConst := fun (α β : Type u_1) => map ∘ function.const β } protected instance is_lawful_functor : is_lawful_functor seq := is_lawful_functor.mk map_id map_comp @[simp] theorem join_nil {α : Type u} : join nil = nil := destruct_eq_nil rfl @[simp] theorem join_cons_nil {α : Type u} (a : α) (S : seq (seq1 α)) : join (cons (a, nil) S) = cons a (join S) := sorry @[simp] theorem join_cons_cons {α : Type u} (a : α) (b : α) (s : seq α) (S : seq (seq1 α)) : join (cons (a, cons b s) S) = cons a (join (cons (b, s) S)) := sorry @[simp] theorem join_cons {α : Type u} (a : α) (s : seq α) (S : seq (seq1 α)) : join (cons (a, s) S) = cons a (append s (join S)) := sorry @[simp] theorem join_append {α : Type u} (S : seq (seq1 α)) (T : seq (seq1 α)) : join (append S T) = append (join S) (join T) := sorry @[simp] theorem of_list_nil {α : Type u} : of_list [] = nil := rfl @[simp] theorem of_list_cons {α : Type u} (a : α) (l : List α) : of_list (a :: l) = cons a (of_list l) := sorry @[simp] theorem of_stream_cons {α : Type u} (a : α) (s : stream α) : of_stream (a :: s) = cons a (of_stream s) := sorry @[simp] theorem of_list_append {α : Type u} (l : List α) (l' : List α) : of_list (l ++ l') = append (of_list l) (of_list l') := sorry @[simp] theorem of_stream_append {α : Type u} (l : List α) (s : stream α) : of_stream (l++ₛs) = append (of_list l) (of_stream s) := sorry /-- Convert a sequence into a list, embedded in a computation to allow for the possibility of infinite sequences (in which case the computation never returns anything). -/ def to_list' {α : Type u_1} (s : seq α) : computation (List α) := computation.corec (fun (_x : List α × seq α) => sorry) ([], s) theorem dropn_add {α : Type u} (s : seq α) (m : ℕ) (n : ℕ) : drop s (m + n) = drop (drop s m) n := sorry theorem dropn_tail {α : Type u} (s : seq α) (n : ℕ) : drop (tail s) n = drop s (n + 1) := eq.mpr (id (Eq._oldrec (Eq.refl (drop (tail s) n = drop s (n + 1))) (add_comm n 1))) (Eq.symm (dropn_add s 1 n)) theorem nth_tail {α : Type u} (s : seq α) (n : ℕ) : nth (tail s) n = nth s (n + 1) := sorry protected theorem ext {α : Type u} (s : seq α) (s' : seq α) (hyp : ∀ (n : ℕ), nth s n = nth s' n) : s = s' := sorry @[simp] theorem head_dropn {α : Type u} (s : seq α) (n : ℕ) : head (drop s n) = nth s n := sorry theorem mem_map {α : Type u} {β : Type v} (f : α → β) {a : α} {s : seq α} : a ∈ s → f a ∈ map f s := sorry theorem exists_of_mem_map {α : Type u} {β : Type v} {f : α → β} {b : β} {s : seq α} : b ∈ map f s → ∃ (a : α), a ∈ s ∧ f a = b := sorry theorem of_mem_append {α : Type u} {s₁ : seq α} {s₂ : seq α} {a : α} (h : a ∈ append s₁ s₂) : a ∈ s₁ ∨ a ∈ s₂ := sorry theorem mem_append_left {α : Type u} {s₁ : seq α} {s₂ : seq α} {a : α} (h : a ∈ s₁) : a ∈ append s₁ s₂ := sorry end seq namespace seq1 /-- Convert a `seq1` to a sequence. -/ def to_seq {α : Type u} : seq1 α → seq α := sorry protected instance coe_seq {α : Type u} : has_coe (seq1 α) (seq α) := has_coe.mk to_seq /-- Map a function on a `seq1` -/ def map {α : Type u} {β : Type v} (f : α → β) : seq1 α → seq1 β := sorry theorem map_id {α : Type u} (s : seq1 α) : map id s = s := sorry /-- Flatten a nonempty sequence of nonempty sequences -/ def join {α : Type u} : seq1 (seq1 α) → seq1 α := sorry @[simp] theorem join_nil {α : Type u} (a : α) (S : seq (seq1 α)) : join ((a, seq.nil), S) = (a, seq.join S) := rfl @[simp] theorem join_cons {α : Type u} (a : α) (b : α) (s : seq α) (S : seq (seq1 α)) : join ((a, seq.cons b s), S) = (a, seq.join (seq.cons (b, s) S)) := sorry /-- The `return` operator for the `seq1` monad, which produces a singleton sequence. -/ def ret {α : Type u} (a : α) : seq1 α := (a, seq.nil) protected instance inhabited {α : Type u} [Inhabited α] : Inhabited (seq1 α) := { default := ret Inhabited.default } /-- The `bind` operator for the `seq1` monad, which maps `f` on each element of `s` and appends the results together. (Not all of `s` may be evaluated, because the first few elements of `s` may already produce an infinite result.) -/ def bind {α : Type u} {β : Type v} (s : seq1 α) (f : α → seq1 β) : seq1 β := join (map f s) @[simp] theorem join_map_ret {α : Type u} (s : seq α) : seq.join (seq.map ret s) = s := sorry @[simp] theorem bind_ret {α : Type u} {β : Type v} (f : α → β) (s : seq1 α) : bind s (ret ∘ f) = map f s := sorry @[simp] theorem ret_bind {α : Type u} {β : Type v} (a : α) (f : α → seq1 β) : bind (ret a) f = f a := sorry @[simp] theorem map_join' {α : Type u} {β : Type v} (f : α → β) (S : seq (seq1 α)) : seq.map f (seq.join S) = seq.join (seq.map (map f) S) := sorry @[simp] theorem map_join {α : Type u} {β : Type v} (f : α → β) (S : seq1 (seq1 α)) : map f (join S) = join (map (map f) S) := sorry @[simp] theorem join_join {α : Type u} (SS : seq (seq1 (seq1 α))) : seq.join (seq.join SS) = seq.join (seq.map join SS) := sorry @[simp] theorem bind_assoc {α : Type u} {β : Type v} {γ : Type w} (s : seq1 α) (f : α → seq1 β) (g : β → seq1 γ) : bind (bind s f) g = bind s fun (x : α) => bind (f x) g := sorry protected instance monad : Monad seq1 := { toApplicative := { toFunctor := { map := map, mapConst := fun (α β : Type u_1) => map ∘ function.const β }, toPure := { pure := ret }, toSeq := { seq := fun (α β : Type u_1) (f : seq1 (α → β)) (x : seq1 α) => bind f fun (_x : α → β) => map _x x }, toSeqLeft := { seqLeft := fun (α β : Type u_1) (a : seq1 α) (b : seq1 β) => (fun (α β : Type u_1) (f : seq1 (α → β)) (x : seq1 α) => bind f fun (_x : α → β) => map _x x) β α (map (function.const β) a) b }, toSeqRight := { seqRight := fun (α β : Type u_1) (a : seq1 α) (b : seq1 β) => (fun (α β : Type u_1) (f : seq1 (α → β)) (x : seq1 α) => bind f fun (_x : α → β) => map _x x) β β (map (function.const α id) a) b } }, toBind := { bind := bind } } protected instance is_lawful_monad : is_lawful_monad seq1 := is_lawful_monad.mk ret_bind bind_assoc end Mathlib
Formal statement is: lemma exp_pi_i [simp]: "exp (of_real pi * \<i>) = -1" Informal statement is: $e^{i * \pi} = -1$.
[STATEMENT] lemma closed_ereal_semiline: fixes a :: ereal shows "closed {y. a \<le> ereal y}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. closed {y. a \<le> ereal y} [PROOF STEP] proof (cases a) [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>r. a = ereal r \<Longrightarrow> closed {y. a \<le> ereal y} 2. a = \<infinity> \<Longrightarrow> closed {y. a \<le> ereal y} 3. a = - \<infinity> \<Longrightarrow> closed {y. a \<le> ereal y} [PROOF STEP] case (real r) [PROOF STATE] proof (state) this: a = ereal r goal (3 subgoals): 1. \<And>r. a = ereal r \<Longrightarrow> closed {y. a \<le> ereal y} 2. a = \<infinity> \<Longrightarrow> closed {y. a \<le> ereal y} 3. a = - \<infinity> \<Longrightarrow> closed {y. a \<le> ereal y} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: a = ereal r [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: a = ereal r goal (1 subgoal): 1. closed {y. a \<le> ereal y} [PROOF STEP] using closed_real_atLeast [PROOF STATE] proof (prove) using this: a = ereal r closed {?a..} goal (1 subgoal): 1. closed {y. a \<le> ereal y} [PROOF STEP] unfolding atLeast_def [PROOF STATE] proof (prove) using this: a = ereal r closed {x. ?a \<le> x} goal (1 subgoal): 1. closed {y. a \<le> ereal y} [PROOF STEP] by simp [PROOF STATE] proof (state) this: closed {y. a \<le> ereal y} goal (2 subgoals): 1. a = \<infinity> \<Longrightarrow> closed {y. a \<le> ereal y} 2. a = - \<infinity> \<Longrightarrow> closed {y. a \<le> ereal y} [PROOF STEP] qed auto
#= Copyright (c) 2021 https://github.com/Ismael-VC/Varvara.jl/blob/main/CONTRIBUTORS.md Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE. =# module Tables using Printf: @printf uint8(x::Float64) = UInt8(trunc(x)) uint8(x::UInt8) = x function main()::Int println("60 points on a circle128(bytex,bytey):\n") for i ∈ 0:59 cx = cy = r = 128 pos = (i - 15) % 60 deg = (pos / 60.0) * 360.0 rad = deg * (π / 180) x = cx + r * cos(rad) y = cy + r * sin(rad) if i > 0 && i % 8 == 0 println() end @printf("%02x%02x ", uint8(clamp(x, 0x00, 0xff)), uint8(clamp(y, 0x00, 0xff))) end println("\n") return 0 end end # module if abspath(PROGRAM_FILE) == @__FILE__ using .Tables: main main() end
As the sky turns from blue to orange in the Zimbabwe bush, a cool breeze blows to chase away the scorching heat of the afternoon, ushering in the night. A small brown mammal with plate-like scales rustles through the grass. Little does it know that it walks close to the path of extinction. There hasn’t been a more dangerous time to be a pangolin. The species has become the most trafficked mammal in the world. They are illegally traded for their scales, as bush meat or for medicinal purposes. But the threat of their extinction rarely makes news. An organisation in Zimbabwe aims to change this by educating the public about an animal many may not know even exists. Enabled by the Tikki Hywood Trust, a group of men dedicate their lives to rehabilitating captured pangolins. They are entrusted with caring for animals that have endured major stress, often having been transported many kilometres bound in a sack, starved and dehydrated. The process takes a lot of patience. It is a difficult task to gain the trust of the pangolins while trying to nurture them to full health. But the men do this with love for the animal. They have developed an intimate relationship while caring for them. Pangolins are like their children. And like any parent, they will protect their family from anything that poses a threat.
theory nat_acc_plus_same imports Main "$HIPSTER_HOME/IsaHipster" begin datatype Nat = Z | S "Nat" fun plus :: "Nat => Nat => Nat" where "plus (Z) y = y" | "plus (S n) y = S (plus n y)" fun accplus :: "Nat => Nat => Nat" where "accplus (Z) y = y" | "accplus (S z) y = accplus z (S y)" (*hipster plus accplus *) theorem x0 : "!! (x :: Nat) . !! (y :: Nat) . (plus x y) = (accplus x y)" by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>) end
program problem21 use euler implicit none integer, parameter :: n = 10000 integer*8 :: i, temp, sum sum = 0 do i=1,n-1 temp=sum_of_divisors(i) if ((i /= temp) .and. (sum_of_divisors(temp) == i)) then sum=sum+i end if end do print *, sum end program problem21
lemma real_polynomial_function_minus [intro]: "real_polynomial_function f \<Longrightarrow> real_polynomial_function (\<lambda>x. - f x)"
-- This file defines the Euclidean Domain structure. {-# OPTIONS --without-K --safe #-} module EuclideanDomain where -- We comply to the definition format in stdlib, i.e. define an -- IsSomething predicate then define the bundle. open import Relation.Binary using (Rel; Setoid; IsEquivalence) module Structures {a ℓ} {A : Set a} -- The underlying set (_≈_ : Rel A ℓ) -- The underlying equality relation where open import Algebra.Structures _≈_ open import Algebra.Core open import Algebra.Definitions _≈_ import Algebra.Consequences.Setoid as Consequences open import Data.Product using (_,_; proj₁; proj₂) open import Level using (_⊔_) open import Data.Nat using (ℕ ; _<_) open import Data.Product using (∃ ; _×_) open import Relation.Nullary using (¬_) -- We only require leftcancellative since we have required the -- domain to be commutative. An Euclidean domain is a commutative -- domain with a div, mod, and rank function satisfy euc-eq and -- euc-rank. record IsEuclideanDomain (+ * : Op₂ A) (- : Op₁ A) (0# 1# : A) : Set (a ⊔ ℓ) where field isCommutativeRing : IsCommutativeRing + * - 0# 1# *-alc : AlmostLeftCancellative 0# * div : ∀ (n d : A) -> ¬ d ≈ 0# -> A mod : ∀ (n d : A) -> ¬ d ≈ 0# -> A rank : A → ℕ euc-eq : ∀ (n d : A) -> (n0 : ¬ d ≈ 0#) -> let r = mod n d n0 in let q = div n d n0 in n ≈ + r (* q d) euc-rank : ∀ (n d : A) -> (n0 : ¬ d ≈ 0#) -> let r = mod n d n0 in let q = div n d n0 in rank r < rank d module Bundles where open Structures open import Algebra.Core open import Algebra.Structures open import Relation.Binary open import Function.Base import Relation.Nullary as N open import Level record EuclideanDomainBundle c ℓ : Set (suc (c ⊔ ℓ)) where infix 8 -_ infixl 7 _*_ infixl 6 _+_ infix 4 _≈_ field Carrier : Set c _≈_ : Rel Carrier ℓ _+_ : Op₂ Carrier _*_ : Op₂ Carrier -_ : Op₁ Carrier 0# : Carrier 1# : Carrier isEuclideanDomain : IsEuclideanDomain _≈_ _+_ _*_ -_ 0# 1# open IsEuclideanDomain isEuclideanDomain public
module Main ok : String -> String ok s = if length s `mod` 2 == 0 then reverse s else s main : IO () main = putStrLn $ substr 3 2 ( ok "abcdefghjklmnopqrstuvw")
#include("helperfunctions_rectgrid.jl") using RecipesBase @recipe function f(rp::RectangularPartition) label --> "" for rect in rp.rectangles @series begin connectrect(rect) end end end @recipe function f(r::Rectangle) label --> "" lc --> :black connectrect(r) end
[GOAL] ⊢ I * I = ↑(-1) [PROOFSTEP] simp [GOAL] x y : ℤ ⊢ ↑toComplex { re := x, im := y } = ↑x + ↑y * I [PROOFSTEP] simp [toComplex_def] [GOAL] x : ℤ[i] ⊢ ↑toComplex x = { re := ↑x.re, im := ↑x.im } [PROOFSTEP] apply Complex.ext [GOAL] case a x : ℤ[i] ⊢ (↑toComplex x).re = { re := ↑x.re, im := ↑x.im }.re [PROOFSTEP] simp [toComplex_def] [GOAL] case a x : ℤ[i] ⊢ (↑toComplex x).im = { re := ↑x.re, im := ↑x.im }.im [PROOFSTEP] simp [toComplex_def] [GOAL] x : ℤ[i] ⊢ ↑x.re = (↑toComplex x).re [PROOFSTEP] simp [toComplex_def] [GOAL] x : ℤ[i] ⊢ ↑x.im = (↑toComplex x).im [PROOFSTEP] simp [toComplex_def] [GOAL] x y : ℤ ⊢ (↑toComplex { re := x, im := y }).re = ↑x [PROOFSTEP] simp [toComplex_def] [GOAL] x y : ℤ ⊢ (↑toComplex { re := x, im := y }).im = ↑y [PROOFSTEP] simp [toComplex_def] [GOAL] x : ℤ[i] ⊢ ↑toComplex (star x) = ↑(starRingEnd ((fun x => ℂ) x)) (↑toComplex x) [PROOFSTEP] rw [toComplex_def₂, toComplex_def₂] [GOAL] x : ℤ[i] ⊢ { re := ↑(star x).re, im := ↑(star x).im } = ↑(starRingEnd ((fun x => ℂ) x)) { re := ↑x.re, im := ↑x.im } [PROOFSTEP] exact congr_arg₂ _ rfl (Int.cast_neg _) [GOAL] x y : ℤ[i] ⊢ ↑toComplex x = ↑toComplex y ↔ x = y [PROOFSTEP] cases x [GOAL] case mk y : ℤ[i] re✝ im✝ : ℤ ⊢ ↑toComplex { re := re✝, im := im✝ } = ↑toComplex y ↔ { re := re✝, im := im✝ } = y [PROOFSTEP] cases y [GOAL] case mk.mk re✝¹ im✝¹ re✝ im✝ : ℤ ⊢ ↑toComplex { re := re✝¹, im := im✝¹ } = ↑toComplex { re := re✝, im := im✝ } ↔ { re := re✝¹, im := im✝¹ } = { re := re✝, im := im✝ } [PROOFSTEP] simp [toComplex_def₂] [GOAL] x : ℤ[i] ⊢ ↑toComplex x = 0 ↔ x = 0 [PROOFSTEP] rw [← toComplex_zero, toComplex_inj] [GOAL] x : ℤ[i] ⊢ ↑(norm x) = ↑normSq (↑toComplex x) [PROOFSTEP] rw [Zsqrtd.norm, normSq] [GOAL] x : ℤ[i] ⊢ ↑(x.re * x.re - -1 * x.im * x.im) = ↑{ toZeroHom := { toFun := fun z => z.re * z.re + z.im * z.im, map_zero' := normSq.proof_1 }, map_one' := normSq.proof_2, map_mul' := normSq.proof_3 } (↑toComplex x) [PROOFSTEP] simp [GOAL] x : ℤ[i] ⊢ ↑(norm x) = ↑(↑normSq (↑toComplex x)) [PROOFSTEP] cases x [GOAL] case mk re✝ im✝ : ℤ ⊢ ↑(norm { re := re✝, im := im✝ }) = ↑(↑normSq (↑toComplex { re := re✝, im := im✝ })) [PROOFSTEP] rw [Zsqrtd.norm, normSq] [GOAL] case mk re✝ im✝ : ℤ ⊢ ↑({ re := re✝, im := im✝ }.re * { re := re✝, im := im✝ }.re - -1 * { re := re✝, im := im✝ }.im * { re := re✝, im := im✝ }.im) = ↑(↑{ toZeroHom := { toFun := fun z => z.re * z.re + z.im * z.im, map_zero' := normSq.proof_1 }, map_one' := normSq.proof_2, map_mul' := normSq.proof_3 } (↑toComplex { re := re✝, im := im✝ })) [PROOFSTEP] simp [GOAL] x : ℤ[i] ⊢ -1 ≤ 0 [PROOFSTEP] norm_num [GOAL] x : ℤ[i] ⊢ norm x = 0 ↔ x = 0 [PROOFSTEP] rw [← @Int.cast_inj ℝ _ _ _] [GOAL] x : ℤ[i] ⊢ ↑(norm x) = ↑0 ↔ x = 0 [PROOFSTEP] simp [GOAL] x : ℤ[i] ⊢ 0 < norm x ↔ x ≠ 0 [PROOFSTEP] rw [lt_iff_le_and_ne, Ne.def, eq_comm, norm_eq_zero] [GOAL] x : ℤ[i] ⊢ 0 ≤ norm x ∧ ¬x = 0 ↔ x ≠ 0 [PROOFSTEP] simp [norm_nonneg] [GOAL] α : Type u_1 inst✝ : Ring α x : ℤ[i] ⊢ ↑(Int.natAbs (norm x)) = ↑(norm x) [PROOFSTEP] rw [← Int.cast_ofNat, abs_coe_nat_norm] [GOAL] x : ℤ[i] ⊢ Int.ofNat (Int.natAbs (norm x)) = Int.ofNat (Int.natAbs x.re * Int.natAbs x.re + Int.natAbs x.im * Int.natAbs x.im) [PROOFSTEP] simp [GOAL] x : ℤ[i] ⊢ norm x = x.re * x.re + x.im * x.im [PROOFSTEP] simp [Zsqrtd.norm] [GOAL] x y : ℤ[i] ⊢ { re := round (↑(x * star y).re * (↑(norm y))⁻¹), im := round (↑(x * star y).im * (↑(norm y))⁻¹) } = { re := round (↑(x * star y).re / ↑(norm y)), im := round (↑(x * star y).im / ↑(norm y)) } [PROOFSTEP] simp [div_eq_mul_inv] [GOAL] x y : ℤ[i] ⊢ (↑toComplex (x / y)).re = ↑(round (↑toComplex x / ↑toComplex y).re) [PROOFSTEP] rw [div_def, ← @Rat.round_cast ℝ _ _] [GOAL] x y : ℤ[i] ⊢ (↑toComplex { re := round ↑(↑(x * star y).re / ↑(norm y)), im := round (↑(x * star y).im / ↑(norm y)) }).re = ↑(round (↑toComplex x / ↑toComplex y).re) [PROOFSTEP] simp [-Rat.round_cast, mul_assoc, div_eq_mul_inv, mul_add, add_mul] [GOAL] x y : ℤ[i] ⊢ (↑toComplex (x / y)).im = ↑(round (↑toComplex x / ↑toComplex y).im) [PROOFSTEP] rw [div_def, ← @Rat.round_cast ℝ _ _, ← @Rat.round_cast ℝ _ _] [GOAL] x y : ℤ[i] ⊢ (↑toComplex { re := round ↑(↑(x * star y).re / ↑(norm y)), im := round ↑(↑(x * star y).im / ↑(norm y)) }).im = ↑(round (↑toComplex x / ↑toComplex y).im) [PROOFSTEP] simp [-Rat.round_cast, mul_assoc, div_eq_mul_inv, mul_add, add_mul] [GOAL] x y : ℂ hre : |x.re| ≤ |y.re| him : |x.im| ≤ |y.im| ⊢ ↑normSq x ≤ ↑normSq y [PROOFSTEP] rw [normSq_apply, normSq_apply, ← _root_.abs_mul_self, _root_.abs_mul, ← _root_.abs_mul_self y.re, _root_.abs_mul y.re, ← _root_.abs_mul_self x.im, _root_.abs_mul x.im, ← _root_.abs_mul_self y.im, _root_.abs_mul y.im] [GOAL] x y : ℂ hre : |x.re| ≤ |y.re| him : |x.im| ≤ |y.im| ⊢ |x.re| * |x.re| + |x.im| * |x.im| ≤ |y.re| * |y.re| + |y.im| * |y.im| [PROOFSTEP] exact add_le_add (mul_self_le_mul_self (abs_nonneg _) hre) (mul_self_le_mul_self (abs_nonneg _) him) [GOAL] x y : ℤ[i] ⊢ ↑toComplex x / ↑toComplex y - ↑toComplex (x / y) = ↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I [PROOFSTEP] apply Complex.ext [GOAL] case a x y : ℤ[i] ⊢ (↑toComplex x / ↑toComplex y - ↑toComplex (x / y)).re = (↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I).re [PROOFSTEP] simp [GOAL] case a x y : ℤ[i] ⊢ (↑toComplex x / ↑toComplex y - ↑toComplex (x / y)).im = (↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I).im [PROOFSTEP] simp [GOAL] x y : ℤ[i] ⊢ ↑normSq (↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I) ≤ ↑normSq (1 / 2 + 1 / 2 * I) [PROOFSTEP] have : |(2⁻¹ : ℝ)| = 2⁻¹ := abs_of_nonneg (by norm_num) [GOAL] x y : ℤ[i] ⊢ 0 ≤ 2⁻¹ [PROOFSTEP] norm_num [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ ↑normSq (↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I) ≤ ↑normSq (1 / 2 + 1 / 2 * I) [PROOFSTEP] exact normSq_le_normSq_of_re_le_of_im_le (by rw [toComplex_div_re]; simp [normSq, this]; simpa using abs_sub_round (x / y : ℂ).re) (by rw [toComplex_div_im]; simp [normSq, this]; simpa using abs_sub_round (x / y : ℂ).im) [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ |(↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I).re| ≤ |(1 / 2 + 1 / 2 * I).re| [PROOFSTEP] rw [toComplex_div_re] [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ |(↑(↑toComplex x / ↑toComplex y).re - ↑↑(round (↑toComplex x / ↑toComplex y).re) + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I).re| ≤ |(1 / 2 + 1 / 2 * I).re| [PROOFSTEP] simp [normSq, this] [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ |(↑toComplex x / ↑toComplex y).re - ↑(round (↑toComplex x / ↑toComplex y).re)| ≤ 2⁻¹ [PROOFSTEP] simpa using abs_sub_round (x / y : ℂ).re [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ |(↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑(↑toComplex (x / y)).im) * I).im| ≤ |(1 / 2 + 1 / 2 * I).im| [PROOFSTEP] rw [toComplex_div_im] [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ |(↑(↑toComplex x / ↑toComplex y).re - ↑(↑toComplex (x / y)).re + (↑(↑toComplex x / ↑toComplex y).im - ↑↑(round (↑toComplex x / ↑toComplex y).im)) * I).im| ≤ |(1 / 2 + 1 / 2 * I).im| [PROOFSTEP] simp [normSq, this] [GOAL] x y : ℤ[i] this : |2⁻¹| = 2⁻¹ ⊢ |(↑toComplex x / ↑toComplex y).im - ↑(round (↑toComplex x / ↑toComplex y).im)| ≤ 2⁻¹ [PROOFSTEP] simpa using abs_sub_round (x / y : ℂ).im [GOAL] x y : ℤ[i] ⊢ ↑normSq (1 / 2 + 1 / 2 * I) < 1 [PROOFSTEP] simp [normSq] [GOAL] x y : ℤ[i] ⊢ 2⁻¹ * 2⁻¹ + 2⁻¹ * 2⁻¹ < 1 [PROOFSTEP] norm_num [GOAL] x y : ℤ[i] hy : y ≠ 0 ⊢ ↑toComplex y ≠ 0 [PROOFSTEP] rwa [Ne.def, ← toComplex_zero, toComplex_inj] [GOAL] x y : ℤ[i] hy : y ≠ 0 this : ↑toComplex y ≠ 0 ⊢ ↑(norm (x % y)) = ↑normSq (↑toComplex x - ↑toComplex y * ↑toComplex (x / y)) [PROOFSTEP] simp [mod_def] [GOAL] x y : ℤ[i] hy : y ≠ 0 this : ↑toComplex y ≠ 0 ⊢ ↑normSq (↑toComplex x - ↑toComplex y * ↑toComplex (x / y)) = ↑normSq (↑toComplex y) * ↑normSq (↑toComplex x / ↑toComplex y - ↑toComplex (x / y)) [PROOFSTEP] rw [← normSq_mul, mul_sub, mul_div_cancel' _ this] [GOAL] x y : ℤ[i] hy : y ≠ 0 this : ↑toComplex y ≠ 0 ⊢ ↑normSq (↑toComplex y) * 1 = ↑(norm y) [PROOFSTEP] simp [GOAL] x y : ℤ[i] hy : y ≠ 0 ⊢ ↑(Int.natAbs (norm (x % y))) < ↑(Int.natAbs (norm y)) [PROOFSTEP] simp [-Int.ofNat_lt, norm_mod_lt x hy] [GOAL] x y : ℤ[i] hy : y ≠ 0 ⊢ Int.natAbs (norm x) ≤ Int.natAbs (norm (x * y)) [PROOFSTEP] rw [Zsqrtd.norm_mul, Int.natAbs_mul] [GOAL] x y : ℤ[i] hy : y ≠ 0 ⊢ Int.natAbs (norm x) ≤ Int.natAbs (norm x) * Int.natAbs (norm y) [PROOFSTEP] exact le_mul_of_one_le_right (Nat.zero_le _) (Int.ofNat_le.1 (by rw [abs_coe_nat_norm] exact Int.add_one_le_of_lt (norm_pos.2 hy))) [GOAL] x y : ℤ[i] hy : y ≠ 0 ⊢ ↑1 ≤ ↑(Int.natAbs (norm y)) [PROOFSTEP] rw [abs_coe_nat_norm] [GOAL] x y : ℤ[i] hy : y ≠ 0 ⊢ ↑1 ≤ norm y [PROOFSTEP] exact Int.add_one_le_of_lt (norm_pos.2 hy) [GOAL] ⊢ 0 ≠ 1 [PROOFSTEP] decide [GOAL] src✝¹ : CommRing ℤ[i] := instCommRing src✝ : Nontrivial ℤ[i] := instNontrivial ⊢ ∀ (a : ℤ[i]), (fun x x_1 => x / x_1) a 0 = 0 [PROOFSTEP] simp [div_def] [GOAL] src✝¹ : CommRing ℤ[i] := instCommRing src✝ : Nontrivial ℤ[i] := instNontrivial ⊢ { re := 0, im := 0 } = 0 [PROOFSTEP] rfl [GOAL] src✝¹ : CommRing ℤ[i] := instCommRing src✝ : Nontrivial ℤ[i] := instNontrivial x✝¹ x✝ : ℤ[i] ⊢ x✝ * (fun x x_1 => x / x_1) x✝¹ x✝ + (fun x x_1 => x % x_1) x✝¹ x✝ = x✝¹ [PROOFSTEP] simp [mod_def] [GOAL] p : ℕ hp : Fact (Nat.Prime p) hpi : ¬Irreducible ↑p ⊢ ¬Int.natAbs (norm ↑p) = 1 [PROOFSTEP] rw [norm_nat_cast, Int.natAbs_mul, mul_eq_one] [GOAL] p : ℕ hp : Fact (Nat.Prime p) hpi : ¬Irreducible ↑p ⊢ ¬(Int.natAbs ↑p = 1 ∧ Int.natAbs ↑p = 1) [PROOFSTEP] exact fun h => (ne_of_lt hp.1.one_lt).symm h.1 [GOAL] p : ℕ hp : Fact (Nat.Prime p) hpi : ¬Irreducible ↑p hpu : ¬IsUnit ↑p ⊢ ∃ a b, ↑p = a * b ∧ ¬IsUnit a ∧ ¬IsUnit b [PROOFSTEP] simpa only [true_and, not_false_iff, exists_prop, irreducible_iff, hpu, not_forall, not_or] using hpi [GOAL] p : ℕ hp : Fact (Nat.Prime p) hpi : ¬Irreducible ↑p hpu : ¬IsUnit ↑p hab : ∃ a b, ↑p = a * b ∧ ¬IsUnit a ∧ ¬IsUnit b a b : ℤ[i] hpab : ↑p = a * b hau : ¬IsUnit a hbu : ¬IsUnit b ⊢ Int.natAbs (norm a) * Int.natAbs (norm b) = p ^ 2 [PROOFSTEP] rw [← Int.coe_nat_inj', Int.coe_nat_pow, sq, ← @norm_nat_cast (-1), hpab] [GOAL] p : ℕ hp : Fact (Nat.Prime p) hpi : ¬Irreducible ↑p hpu : ¬IsUnit ↑p hab : ∃ a b, ↑p = a * b ∧ ¬IsUnit a ∧ ¬IsUnit b a b : ℤ[i] hpab : ↑p = a * b hau : ¬IsUnit a hbu : ¬IsUnit b ⊢ ↑(Int.natAbs (norm a) * Int.natAbs (norm b)) = norm (a * b) [PROOFSTEP] simp [GOAL] p : ℕ hp : Fact (Nat.Prime p) hpi : ¬Irreducible ↑p hpu : ¬IsUnit ↑p hab : ∃ a b, ↑p = a * b ∧ ¬IsUnit a ∧ ¬IsUnit b a b : ℤ[i] hpab : ↑p = a * b hau : ¬IsUnit a hbu : ¬IsUnit b hnap : Int.natAbs (norm a) = p ⊢ Int.natAbs a.re ^ 2 + Int.natAbs a.im ^ 2 = p [PROOFSTEP] simpa [natAbs_norm_eq, sq] using hnap
module Lib.Logic where infix 30 _∨_ infix 40 _∧_ infix 50 ¬_ data _∨_ (A B : Set) : Set where inl : A -> A ∨ B inr : B -> A ∨ B data _∧_ (A B : Set) : Set where _,_ : A -> B -> A ∧ B data False : Set where record True : Set where ¬_ : Set -> Set ¬ A = A -> False
lemma UN_space_closed: "\<Union>(sets ` S) \<subseteq> Pow (\<Union>(space ` S))"
(* heap_lang with deterministic allocation *) From stdpp Require Import base gmap. From iris.proofmode Require Import base proofmode classes. From iris.heap_lang Require Import lang primitive_laws. From iris_ni.program_logic Require Import dwp heap_lang_lifting. (** A simple allocator only knows about the state. In the future we can also make it aware of the threadpool. *) Module Type Allocator. Parameter oracle : state -> Z -> loc. Axiom oracle_fresh : ∀ σ n (i : Z), (0 ≤ i)%Z → (i < n)%Z → (heap σ) !! (oracle σ n +ₗ i) = None. End Allocator. Module SimpleAllocator : Allocator. Definition oracle σ (n : Z) := fresh_locs (dom (gset loc) σ.(heap)). Lemma oracle_fresh : ∀ σ n (i : Z), (0 ≤ i)%Z → (i < n)%Z → (heap σ) !! (oracle σ n +ₗ i) = None. Proof. intros σ n i Hi Hn. eapply (not_elem_of_dom (D:=gset loc)). by apply fresh_locs_fresh. Qed. End SimpleAllocator. Module heap_lang_det (A : Allocator). Inductive head_step : expr → state → list observation → expr → state → list expr → Prop := | RecS f x e σ : head_step (Rec f x e) σ [] (Val $ RecV f x e) σ [] | PairS v1 v2 σ : head_step (Pair (Val v1) (Val v2)) σ [] (Val $ PairV v1 v2) σ [] | InjLS v σ : head_step (InjL $ Val v) σ [] (Val $ InjLV v) σ [] | InjRS v σ : head_step (InjR $ Val v) σ [] (Val $ InjRV v) σ [] | BetaS f x e1 v2 e' σ : e' = subst' x v2 (subst' f (RecV f x e1) e1) → head_step (App (Val $ RecV f x e1) (Val v2)) σ [] e' σ [] | UnOpS op v v' σ : un_op_eval op v = Some v' → head_step (UnOp op (Val v)) σ [] (Val v') σ [] | BinOpS op v1 v2 v' σ : bin_op_eval op v1 v2 = Some v' → head_step (BinOp op (Val v1) (Val v2)) σ [] (Val v') σ [] | IfTrueS e1 e2 σ : head_step (If (Val $ LitV $ LitBool true) e1 e2) σ [] e1 σ [] | IfFalseS e1 e2 σ : head_step (If (Val $ LitV $ LitBool false) e1 e2) σ [] e2 σ [] | FstS v1 v2 σ : head_step (Fst (Val $ PairV v1 v2)) σ [] (Val v1) σ [] | SndS v1 v2 σ : head_step (Snd (Val $ PairV v1 v2)) σ [] (Val v2) σ [] | CaseLS v e1 e2 σ : head_step (Case (Val $ InjLV v) e1 e2) σ [] (App e1 (Val v)) σ [] | CaseRS v e1 e2 σ : head_step (Case (Val $ InjRV v) e1 e2) σ [] (App e2 (Val v)) σ [] | ForkS e σ: head_step (Fork e) σ [] (Val $ LitV LitUnit) σ [e] | AllocNS n v σ : let l := A.oracle σ n in (0 < n)%Z → (∀ i, (0 ≤ i)%Z → (i < n)%Z → σ.(heap) !! (l +ₗ i) = None) → head_step (AllocN (Val $ LitV $ LitInt n) (Val v)) σ [] (Val $ LitV $ LitLoc l) (state_init_heap l n v σ) [] | FreeS l v σ : σ.(heap) !! l = Some $ Some v → head_step (Free (Val $ LitV $ LitLoc l)) σ [] (Val $ LitV LitUnit) (state_upd_heap <[l:=None]> σ) [] | LoadS l v σ : σ.(heap) !! l = Some $ Some v → head_step (Load (Val $ LitV $ LitLoc l)) σ [] (of_val v) σ [] | StoreS l v0 v σ : σ.(heap) !! l = Some $ Some v0 → head_step (Store (Val $ LitV $ LitLoc l) (Val v)) σ [] (Val $ LitV LitUnit) (state_upd_heap <[l:=Some v]> σ) [] | XchgS l v1 v2 σ : σ.(heap) !! l = Some $ Some v1 → head_step (Xchg (Val $ LitV $ LitLoc l) (Val v2)) σ [] (Val v1) (state_upd_heap <[l:=Some v2]> σ) [] | CmpXchgS l v1 v2 vl σ b : σ.(heap) !! l = Some $ Some vl → (* Crucially, this compares the same way as [EqOp]! *) vals_compare_safe vl v1 → b = bool_decide (vl = v1) → head_step (CmpXchg (Val $ LitV $ LitLoc l) (Val v1) (Val v2)) σ [] (Val $ PairV vl (LitV $ LitBool b)) (if b then state_upd_heap <[l:=Some v2]> σ else σ) [] | FaaS l i1 i2 σ : σ.(heap) !! l = Some $ Some (LitV (LitInt i1)) → head_step (FAA (Val $ LitV $ LitLoc l) (Val $ LitV $ LitInt i2)) σ [] (Val $ LitV $ LitInt i1) (state_upd_heap <[l:=Some $ LitV (LitInt (i1 + i2))]>σ) [] | NewProphS σ : let p := fresh σ.(used_proph_id) in head_step NewProph σ [] (Val $ LitV $ LitProphecy p) (state_upd_used_proph_id ({[ p ]} ∪.) σ) [] | ResolveS p v e σ w σ' κs ts : head_step e σ κs (Val v) σ' ts → head_step (Resolve e (Val $ LitV $ LitProphecy p) (Val w)) σ (κs ++ [(p, (v, w))]) (Val v) σ' ts. Ltac inv_head_step := repeat match goal with | _ => progress simplify_map_eq/= (* simplify memory stuff *) | H : to_val _ = Some _ |- _ => apply of_to_val in H | H : head_step ?e _ _ _ _ _ |- _ => inversion H; subst; clear H end. Local Hint Extern 0 (head_reducible _ _) => eexists _, _, _, _; simpl : core. Local Hint Extern 0 (head_reducible_no_obs _ _) => eexists _, _, _; simpl : core. (* [simpl apply] is too stupid, so we need extern hints here. *) Local Hint Extern 1 (head_step _ _ _ _ _ _) => econstructor : core. Local Hint Extern 0 (head_step (CmpXchg _ _ _) _ _ _ _ _) => eapply CmpXchgS : core. Local Hint Extern 0 (head_step (AllocN _ _) _ _ _ _ _) => apply alloc_fresh : core. Local Hint Extern 0 (head_step NewProph _ _ _ _ _) => apply new_proph_id_fresh : core. Local Hint Resolve to_of_val : core. (** The op sem is actually deterministic. *) Theorem head_step_det e σ e'1 σ'1 obs1 efs1 e'2 σ'2 obs2 efs2 : head_step e σ obs1 e'1 σ'1 efs1 → head_step e σ obs2 e'2 σ'2 efs2 → obs1 = obs2 ∧ e'1 = e'2 ∧ σ'1 = σ'2 ∧ efs1 = efs2. Proof. intros Hst1. revert obs2 e'2 σ'2 efs2. induction Hst1; intros obs2 e'2 σ'2 efs2; inversion 1; repeat simplify_map_eq/=; eauto. specialize (IHHst1 κs0 (Val v0) σ'2 efs2). assert (v = v0) as <-. { enough (Val v = Val v0); first by simplify_eq/=. by apply IHHst1. } repeat split; try f_equiv; eauto; by apply IHHst1. Qed. (** Basic properties *) #[local] Instance fill_item_inj Ki : Inj (=) (=) (fill_item Ki). Proof. induction Ki; intros ???; simplify_eq/=; auto with f_equal. Qed. Lemma fill_item_val Ki e : is_Some (to_val (fill_item Ki e)) → is_Some (to_val e). Proof. intros [v ?]. induction Ki; simplify_option_eq; eauto. Qed. Lemma val_head_stuck e1 σ1 κ e2 σ2 efs : head_step e1 σ1 κ e2 σ2 efs → to_val e1 = None. Proof. destruct 1; naive_solver. Qed. Lemma head_ctx_step_val Ki e σ1 κ e2 σ2 efs : head_step (fill_item Ki e) σ1 κ e2 σ2 efs → is_Some (to_val e). Proof. revert κ e2. induction Ki; inversion_clear 1; simplify_option_eq; eauto. Qed. Lemma fill_item_no_val_inj Ki1 Ki2 e1 e2 : to_val e1 = None → to_val e2 = None → fill_item Ki1 e1 = fill_item Ki2 e2 → Ki1 = Ki2. Proof. revert Ki1. induction Ki2, Ki1; naive_solver eauto with f_equal. Qed. Lemma heap_lang_det_mixin : EctxiLanguageMixin of_val to_val fill_item head_step. Proof. split; apply _ || eauto using to_of_val, of_to_val, val_head_stuck, fill_item_val, fill_item_no_val_inj, head_ctx_step_val. Qed. Canonical Structure heap_ectxi_lang_det := EctxiLanguage heap_lang_det_mixin. Canonical Structure heap_ectx_lang_det := EctxLanguageOfEctxi heap_ectxi_lang_det. Canonical Structure heap_lang_det := LanguageOfEctx heap_ectx_lang_det. (** Different relations between the deterministic and non-deterministic semantics *) Lemma head_step_det_nondet e σ e' σ' κs efs : head_step e σ κs e' σ' efs → heap_lang.head_step e σ κs e' σ' efs. Proof. induction 1; try econstructor; eauto. - unfold p. apply is_fresh. Qed. Lemma prim_step_det_nondet e σ e' σ' κs efs : prim_step (Λ := heap_ectx_lang_det) e σ κs e' σ' efs → prim_step (Λ := heap_ectx_lang) e σ κs e' σ' efs. Proof. inversion 1. subst. econstructor; try done. by apply head_step_det_nondet. Qed. Lemma head_step_nondet_det_val e v σ σ' κs efs e2 σ'2 κs2 efs2 : heap_lang.head_step e σ κs (Val v) σ' efs → head_step e σ κs2 e2 σ'2 efs2 → is_Some (to_val e2). Proof. intros Hst_nondet Hst_det. inversion Hst_nondet; inversion Hst_det; simplify_eq/=; eauto. exists v. rewrite -H. eauto. Qed. Lemma head_reducible_nondet_det e σ : head_reducible (Λ := heap_ectx_lang) e σ → head_reducible (Λ := heap_ectx_lang_det) e σ. Proof. destruct 1 as (κs&e2&σ2&efs&H). induction H; try by (do 4 eexists; econstructor; eauto). - repeat econstructor; eauto=> i Hi Hn. by apply A.oracle_fresh. - destruct IHhead_step as (κs2&e2&σ2&efs2&Hst_det). simpl in *. assert (is_Some (to_val e2)) as [v2 Hv2]. { by eapply head_step_nondet_det_val. } repeat econstructor. rewrite -(of_to_val _ _ Hv2) in Hst_det. done. Qed. Lemma reducible_det_nondet e σ : reducible (Λ := heap_lang_det) e σ → reducible (Λ := heap_lang) e σ. Proof. destruct 1 as (κs & e' & σ' & efs & H). inversion H. simpl in *. subst e e'. do 4 eexists. econstructor; try naive_solver. by apply head_step_det_nondet. Qed. Lemma reducible_nondet_det e σ : reducible (Λ := heap_lang) e σ → reducible (Λ := heap_lang_det) e σ. Proof. destruct 1 as (κs & e' & σ' & efs & H). inversion H. simpl in *. subst e e'. apply (@head_prim_fill_reducible heap_ectxi_lang_det). apply head_reducible_nondet_det. by repeat eexists. Qed. Lemma head_step_nondet_det_obs e1 σ1 e2 σ2 efs e2' σ2' efs' κ : heap_lang.head_step e1 σ1 [] e2 σ2 efs → head_step e1 σ1 κ e2' σ2' efs' → κ = []. Proof. intros Hst1 Hst2. inversion Hst2; try by eauto. simpl in * ; subst. inversion Hst1. exfalso. by eapply app_cons_not_nil. Qed. Lemma head_reducible_no_obs_nondet_det e σ : head_reducible_no_obs (Λ := heap_ectx_lang) e σ → head_reducible_no_obs (Λ := heap_ectx_lang_det) e σ. Proof. destruct 1 as (e'&σ'&efs&Hst). assert (head_reducible (Λ := heap_ectx_lang_det) e σ) as Hred2. { apply head_reducible_nondet_det. eauto. } destruct Hred2 as (κ&e''&σ''&efs''&Hst2). assert (κ = []) as ->. { eapply head_step_nondet_det_obs; eauto. } eauto. Qed. Lemma reducible_no_obs_nondet_det e σ : reducible_no_obs (Λ := heap_lang) e σ → reducible_no_obs (Λ := heap_lang_det) e σ. Proof. destruct 1 as (e' & σ' & efs & H). inversion H. simpl in *. subst e e'. apply (@head_prim_fill_reducible_no_obs heap_ectxi_lang_det). apply head_reducible_no_obs_nondet_det. by repeat eexists. Qed. Section lifting. #[local] Instance heapG_irisG_det `{!heapGS Σ} : irisGS heap_lang_det Σ := { iris_invGS := heapGS_invGS; state_interp σ _ κs _ := (gen_heap_interp σ.(heap) ∗ proph_map_interp κs σ.(used_proph_id))%I; fork_post _ := True%I; num_laters_per_step := λ _, 0; state_interp_mono _ _ _ _ := fupd_intro _ _; }. Context `{!heapGS Σ}. Implicit Types Φ Ψ : val → iProp Σ. Lemma wp_simul e E Φ : (wp (EXPR := language.expr heap_lang ) (VAL := language.val heap_lang ) NotStuck E e Φ) -∗ (wp (EXPR := language.expr heap_lang_det) (VAL := language.val heap_lang_det) NotStuck E e Φ). Proof. iLöb as "IH" forall (e E Φ). rewrite !wp_unfold /wp_pre /=. destruct (to_val e) as [v|]; first by eauto. iIntros "H". iIntros (σ1 m κ κs n) "[Hσ Hp]". iMod ("H" $! σ1 m κ κs n with "[$Hσ $Hp]") as "[% H]". iModIntro. iSplitR. { iPureIntro. by apply reducible_nondet_det. } iIntros (e2 σ2 efs Hst_det). iSpecialize ("H" $! e2 σ2 efs with "[%]"). { by apply prim_step_det_nondet. } iMod "H" as "H". iModIntro. iNext. iMod "H" as "H". iModIntro. iMod "H" as "($&HWP&Hefs)". iModIntro. iSplitL "HWP". - by iApply "IH". - iApply (big_sepL_impl with "Hefs []"). iModIntro. iIntros (???). iApply "IH". Qed. End lifting. Section dwp_lifting. #[local] Instance heapDG_irisDG_det `{heapDG Σ} : irisDG heap_lang_det Σ := { state_rel := (λ σ1 σ2 κs1 κs2, @gen_heap_interp _ _ _ _ _ heapDG_gen_heapG1 σ1.(heap) ∗ @proph_map_interp _ _ _ _ _ heapDG_proph_mapG1 κs1 σ1.(used_proph_id) ∗ @gen_heap_interp _ _ _ _ _ heapDG_gen_heapG2 σ2.(heap) ∗ @proph_map_interp _ _ _ _ _ heapDG_proph_mapG2 κs2 σ2.(used_proph_id))%I }. Context `{!heapDG Σ}. Lemma dwp_simul e1 e2 E Φ : (dwp (Λ := heap_lang) E e1 e2 Φ) -∗ (dwp (Λ := heap_lang_det) E e1 e2 Φ). Proof. iLöb as "IH" forall (e1 e2 E Φ). rewrite !dwp_unfold /dwp_pre /=. repeat case_match; [by eauto with iFrame..|]. iIntros "H". iIntros (σ1 σ2 κ1 κs1 κ2 κs2) "(Hσ1 & Hp1 & Hσ2 & Hp2)". iMod ("H" with "[$Hσ1 $Hp1 $Hσ2 $Hp2]") as "[% [% H]]". iModIntro. iSplitR. { iPureIntro. by apply reducible_no_obs_nondet_det. } iSplitR. { iPureIntro. by apply reducible_no_obs_nondet_det. } iIntros (e1' σ1' efs1 e2' σ2' efs2 Hst_det1 Hst_det2). iSpecialize ("H" $! e1' σ1' efs1 e2' σ2' efs2 with "[%] [%]"). { by apply prim_step_det_nondet. } { by apply prim_step_det_nondet. } iMod "H" as "H". iModIntro. iNext. iMod "H" as "H". iModIntro. iDestruct "H" as "($&HWP&Hefs)". iSplitL "HWP". - by iApply "IH". - iApply (big_sepL2_impl with "Hefs []"). iModIntro. iIntros (?????). iApply "IH". Qed. End dwp_lifting. End heap_lang_det.
module _ where module A where syntax c x = ⟦ x data D₁ : Set where b : D₁ c : D₁ → D₁ module B where syntax c x = ⟦ x ⟧ data D₂ : Set where c : A.D₁ → D₂ open A open B test₁ : D₂ test₁ = ⟦ (⟦ c b) ⟧ test₂ : D₂ → D₁ test₂ ⟦ x ⟧ = ⟦ x test₃ : D₁ → D₂ test₃ b = c b test₃ (⟦ x) = ⟦ x ⟧ test₄ : D₁ → D₂ test₄ A.b = B.c A.b test₄ (A.⟦ x) = B.⟦ x ⟧ test₅ : D₂ → D₁ test₅ B.⟦ x ⟧ = A.⟦ x -- Should work.
""" hasrowmajororder(::Type{AbstractSparseMatrix}) Check if values are stored in row-major order. Return false. """ function hasrowmajororder(::Type{AbstractSparseMatrix}) @abstractmethod end function hasrowmajororder(S::AbstractSparseMatrix) hasrowmajororder(typeof(S)) end """ hascolmajororder(::Type{AbstractSparseMatrix}) Check if values are stored in col-major order. Return true. """ function hascolmajororder(::Type{AbstractSparseMatrix}) @abstractmethod end function hascolmajororder(S::AbstractSparseMatrix) hascolmajororder(typeof(S)) end """ getptr(S::AbstractSparseMatrix) Return columns pointer. """ function getptr(S::AbstractSparseMatrix) @abstractmethod end """ getindices(S::AbstractSparseMatrix) Return row indices. """ function getindices(S::AbstractSparseMatrix) @abstractmethod end """ rowvals(S::AbstractSparseMatrix) Return row indices or raises an error (Depending on the SparseMatrix type) """ function rowvals(S::AbstractSparseMatrix) @abstractmethod end """ colvals(S::AbstractSparseMatrix) Return columns indices or raises an error (Depending on the SparseMatrix type) """ function colvals(S::AbstractSparseMatrix) @abstractmethod end """ nnz(S::AbstractSparseMatrix) Returns the number of stored (filled) elements in a sparse array. """ function nnz(S::AbstractSparseMatrix) @abstractmethod end """ nonzeros(S::AbstractSparseMatrix) Return a vector of the structural nonzero values in sparse array S. This includes zeros that are explicitly stored in the sparse array. The returned vector points directly to the internal nonzero storage of S, and any modifications to the returned vector will mutate S as well. """ function nonzeros(S::AbstractSparseMatrix) @abstractmethod end """ count(pred, S::AbstractSparseMatrix) -> Integer Count the number of elements in nonzeros(S) for which predicate pred returns true. """ function count(pred, S::AbstractSparseMatrix) @abstractmethod end """ nzrange(S::AbstractSparseMatrix, index::Integer) where {Bi} Return the range of indices to the structural nonzero values of a sparse matrix index (Row or column depending on the compression type). """ function nzrange(S::AbstractSparseMatrix, index::Integer) @abstractmethod end """ findnz(S::AbstractSparseMatrix) Return a tuple (I, J, V) where I and J are the row and column indices of the stored ("structurally non-zero") values in sparse matrix A, and V is a vector of the values. """ function findnz(S::AbstractSparseMatrix) @abstractmethod end """ convert(::Type{AbstractSparseMatrix}, x::AbstractSparseMatrix) Convert x to a value of the first type given. """ function convert(::Type{AbstractSparseMatrix}, x::AbstractSparseMatrix) @abstractmethod end function fill_entries!(A::AbstractSparseMatrix{Tv,Ti},v::Number) where {Tv,Ti} nonzeros(A) .= convert(Tv,v) end
/- Copyright (c) 2018 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Mario Carneiro, Reid Barton, Andrew Yang ! This file was ported from Lean 3 source module topology.sheaves.presheaf ! leanprover-community/mathlib commit 8a318021995877a44630c898d0b2bc376fceef3b ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.CategoryTheory.Limits.KanExtension import Mathbin.Topology.Category.Top.Opens import Mathbin.CategoryTheory.Adjunction.Opposites /-! # Presheaves on a topological space We define `presheaf C X` simply as `(opens X)ᵒᵖ ⥤ C`, and inherit the category structure with natural transformations as morphisms. We define * `pushforward_obj {X Y : Top.{w}} (f : X ⟶ Y) (ℱ : X.presheaf C) : Y.presheaf C` with notation `f _* ℱ` and for `ℱ : X.presheaf C` provide the natural isomorphisms * `pushforward.id : (𝟙 X) _* ℱ ≅ ℱ` * `pushforward.comp : (f ≫ g) _* ℱ ≅ g _* (f _* ℱ)` along with their `@[simp]` lemmas. We also define the functors `pushforward` and `pullback` between the categories `X.presheaf C` and `Y.presheaf C`, and provide their adjunction at `pushforward_pullback_adjunction`. -/ universe w v u open CategoryTheory open TopologicalSpace open Opposite variable (C : Type u) [Category.{v} C] namespace TopCat /-- The category of `C`-valued presheaves on a (bundled) topological space `X`. -/ @[nolint has_nonempty_instance] def Presheaf (X : TopCat.{w}) : Type max u v w := (Opens X)ᵒᵖ ⥤ C deriving Category #align Top.presheaf TopCat.Presheaf variable {C} namespace Presheaf attribute [local instance] concrete_category.has_coe_to_sort concrete_category.has_coe_to_fun /-- Tag lemmas to use in `Top.presheaf.restrict_tac`. -/ @[user_attribute] unsafe def restrict_attr : user_attribute (tactic Unit → tactic Unit) Unit where Name := `sheaf_restrict descr := "tag lemmas to use in `Top.presheaf.restrict_tac`" cache_cfg := { mk_cache := fun ns => pure fun t => do let ctx ← tactic.local_context ctx (tactic.focus1 ∘ (tactic.apply' >=> fun _ => tactic.done) >=> fun _ => t) <|> ns (tactic.focus1 ∘ (tactic.resolve_name >=> tactic.to_expr >=> tactic.apply' >=> fun _ => tactic.done) >=> fun _ => t) dependencies := [] } #align Top.presheaf.restrict_attr Top.presheaf.restrict_attr /- ./././Mathport/Syntax/Translate/Expr.lean:330:4: warning: unsupported (TODO): `[tacs] -/ /-- A tactic to discharge goals of type `U ≤ V` for `Top.presheaf.restrict_open` -/ unsafe def restrict_tac : ∀ n : ℕ, tactic Unit | 0 => tactic.fail "`restrict_tac` failed" | n + 1 => Monad.join (restrict_attr.get_cache <*> pure tactic.done) <|> sorry #align Top.presheaf.restrict_tac Top.presheaf.restrict_tac /-- A tactic to discharge goals of type `U ≤ V` for `Top.presheaf.restrict_open`. Defaults to three iterations. -/ unsafe def restrict_tac' := restrict_tac 3 #align Top.presheaf.restrict_tac' Top.presheaf.restrict_tac' attribute [sheaf_restrict] bot_le le_top le_refl inf_le_left inf_le_right le_sup_left le_sup_right /- ./././Mathport/Syntax/Translate/Tactic/Builtin.lean:69:18: unsupported non-interactive tactic Top.presheaf.restrict_tac' -/ example {X : TopCat} {v w x y z : Opens X} (h₀ : v ≤ x) (h₁ : x ≤ z ⊓ w) (h₂ : x ≤ y ⊓ z) : v ≤ y := by run_tac restrict_tac' /-- The restriction of a section along an inclusion of open sets. For `x : F.obj (op V)`, we provide the notation `x |_ₕ i` (`h` stands for `hom`) for `i : U ⟶ V`, and the notation `x |_ₗ U ⟪i⟫` (`l` stands for `le`) for `i : U ≤ V`. -/ def restrict {X : TopCat} {C : Type _} [Category C] [ConcreteCategory C] {F : X.Presheaf C} {V : Opens X} (x : F.obj (op V)) {U : Opens X} (h : U ⟶ V) : F.obj (op U) := F.map h.op x #align Top.presheaf.restrict TopCat.Presheaf.restrict -- mathport name: «expr |_ₕ » scoped[AlgebraicGeometry] infixl:80 " |_ₕ " => TopCat.Presheaf.restrict -- mathport name: «expr |_ₗ ⟪ ⟫» scoped[AlgebraicGeometry] notation:80 x " |_ₗ " U " ⟪" e "⟫ " => @TopCat.Presheaf.restrict _ _ _ _ _ _ x U (@homOfLE (Opens _) _ U _ e) /- ./././Mathport/Syntax/Translate/Tactic/Builtin.lean:69:18: unsupported non-interactive tactic Top.presheaf.restrict_tac' -/ /-- The restriction of a section along an inclusion of open sets. For `x : F.obj (op V)`, we provide the notation `x |_ U`, where the proof `U ≤ V` is inferred by the tactic `Top.presheaf.restrict_tac'` -/ abbrev restrictOpen {X : TopCat} {C : Type _} [Category C] [ConcreteCategory C] {F : X.Presheaf C} {V : Opens X} (x : F.obj (op V)) (U : Opens X) (e : U ≤ V := by run_tac Top.presheaf.restrict_tac') : F.obj (op U) := x |_ₗ U ⟪e⟫ #align Top.presheaf.restrict_open TopCat.Presheaf.restrictOpen -- mathport name: «expr |_ » scoped[AlgebraicGeometry] infixl:80 " |_ " => TopCat.Presheaf.restrictOpen @[simp] theorem restrict_restrict {X : TopCat} {C : Type _} [Category C] [ConcreteCategory C] {F : X.Presheaf C} {U V W : Opens X} (e₁ : U ≤ V) (e₂ : V ≤ W) (x : F.obj (op W)) : x |_ V |_ U = x |_ U := by delta restrict_open restrict rw [← comp_apply, ← functor.map_comp] rfl #align Top.presheaf.restrict_restrict TopCat.Presheaf.restrict_restrict @[simp] theorem map_restrict {X : TopCat} {C : Type _} [Category C] [ConcreteCategory C] {F G : X.Presheaf C} (e : F ⟶ G) {U V : Opens X} (h : U ≤ V) (x : F.obj (op V)) : e.app _ (x |_ U) = e.app _ x |_ U := by delta restrict_open restrict rw [← comp_apply, nat_trans.naturality, comp_apply] #align Top.presheaf.map_restrict TopCat.Presheaf.map_restrict /-- Pushforward a presheaf on `X` along a continuous map `f : X ⟶ Y`, obtaining a presheaf on `Y`. -/ def pushforwardObj {X Y : TopCat.{w}} (f : X ⟶ Y) (ℱ : X.Presheaf C) : Y.Presheaf C := (Opens.map f).op ⋙ ℱ #align Top.presheaf.pushforward_obj TopCat.Presheaf.pushforwardObj -- mathport name: «expr _* » infixl:80 " _* " => pushforwardObj @[simp] theorem pushforwardObj_obj {X Y : TopCat.{w}} (f : X ⟶ Y) (ℱ : X.Presheaf C) (U : (Opens Y)ᵒᵖ) : (f _* ℱ).obj U = ℱ.obj ((Opens.map f).op.obj U) := rfl #align Top.presheaf.pushforward_obj_obj TopCat.Presheaf.pushforwardObj_obj @[simp] theorem pushforwardObj_map {X Y : TopCat.{w}} (f : X ⟶ Y) (ℱ : X.Presheaf C) {U V : (Opens Y)ᵒᵖ} (i : U ⟶ V) : (f _* ℱ).map i = ℱ.map ((Opens.map f).op.map i) := rfl #align Top.presheaf.pushforward_obj_map TopCat.Presheaf.pushforwardObj_map /-- An equality of continuous maps induces a natural isomorphism between the pushforwards of a presheaf along those maps. -/ def pushforwardEq {X Y : TopCat.{w}} {f g : X ⟶ Y} (h : f = g) (ℱ : X.Presheaf C) : f _* ℱ ≅ g _* ℱ := isoWhiskerRight (NatIso.op (Opens.mapIso f g h).symm) ℱ #align Top.presheaf.pushforward_eq TopCat.Presheaf.pushforwardEq theorem pushforward_eq' {X Y : TopCat.{w}} {f g : X ⟶ Y} (h : f = g) (ℱ : X.Presheaf C) : f _* ℱ = g _* ℱ := by rw [h] #align Top.presheaf.pushforward_eq' TopCat.Presheaf.pushforward_eq' @[simp] theorem pushforwardEq_hom_app {X Y : TopCat.{w}} {f g : X ⟶ Y} (h : f = g) (ℱ : X.Presheaf C) (U) : (pushforwardEq h ℱ).Hom.app U = ℱ.map (by dsimp [functor.op]; apply Quiver.Hom.op; apply eq_to_hom; rw [h]) := by simp [pushforward_eq] #align Top.presheaf.pushforward_eq_hom_app TopCat.Presheaf.pushforwardEq_hom_app theorem pushforward_eq'_hom_app {X Y : TopCat.{w}} {f g : X ⟶ Y} (h : f = g) (ℱ : X.Presheaf C) (U) : NatTrans.app (eqToHom (pushforward_eq' h ℱ)) U = ℱ.map (eqToHom (by rw [h])) := by simpa [eq_to_hom_map] #align Top.presheaf.pushforward_eq'_hom_app TopCat.Presheaf.pushforward_eq'_hom_app @[simp] theorem pushforwardEq_rfl {X Y : TopCat.{w}} (f : X ⟶ Y) (ℱ : X.Presheaf C) (U) : (pushforwardEq (rfl : f = f) ℱ).Hom.app (op U) = 𝟙 _ := by dsimp [pushforward_eq] simp #align Top.presheaf.pushforward_eq_rfl TopCat.Presheaf.pushforwardEq_rfl theorem pushforwardEq_eq {X Y : TopCat.{w}} {f g : X ⟶ Y} (h₁ h₂ : f = g) (ℱ : X.Presheaf C) : ℱ.pushforwardEq h₁ = ℱ.pushforwardEq h₂ := rfl #align Top.presheaf.pushforward_eq_eq TopCat.Presheaf.pushforwardEq_eq namespace Pushforward variable {X : TopCat.{w}} (ℱ : X.Presheaf C) /-- The natural isomorphism between the pushforward of a presheaf along the identity continuous map and the original presheaf. -/ def id : 𝟙 X _* ℱ ≅ ℱ := isoWhiskerRight (NatIso.op (Opens.mapId X).symm) ℱ ≪≫ Functor.leftUnitor _ #align Top.presheaf.pushforward.id TopCat.Presheaf.Pushforward.id theorem id_eq : 𝟙 X _* ℱ = ℱ := by unfold pushforward_obj rw [opens.map_id_eq] erw [functor.id_comp] #align Top.presheaf.pushforward.id_eq TopCat.Presheaf.Pushforward.id_eq @[simp] theorem id_hom_app' (U) (p) : (id ℱ).Hom.app (op ⟨U, p⟩) = ℱ.map (𝟙 (op ⟨U, p⟩)) := by dsimp [id] simp #align Top.presheaf.pushforward.id_hom_app' TopCat.Presheaf.Pushforward.id_hom_app' attribute [local tidy] tactic.op_induction' @[simp] theorem id_hom_app (U) : (id ℱ).Hom.app U = ℱ.map (eqToHom (Opens.op_map_id_obj U)) := by -- was `tidy` induction U using Opposite.rec cases U rw [id_hom_app'] congr #align Top.presheaf.pushforward.id_hom_app TopCat.Presheaf.Pushforward.id_hom_app @[simp] theorem id_inv_app' (U) (p) : (id ℱ).inv.app (op ⟨U, p⟩) = ℱ.map (𝟙 (op ⟨U, p⟩)) := by dsimp [id] simp #align Top.presheaf.pushforward.id_inv_app' TopCat.Presheaf.Pushforward.id_inv_app' /-- The natural isomorphism between the pushforward of a presheaf along the composition of two continuous maps and the corresponding pushforward of a pushforward. -/ def comp {Y Z : TopCat.{w}} (f : X ⟶ Y) (g : Y ⟶ Z) : (f ≫ g) _* ℱ ≅ g _* (f _* ℱ) := isoWhiskerRight (NatIso.op (Opens.mapComp f g).symm) ℱ #align Top.presheaf.pushforward.comp TopCat.Presheaf.Pushforward.comp theorem comp_eq {Y Z : TopCat.{w}} (f : X ⟶ Y) (g : Y ⟶ Z) : (f ≫ g) _* ℱ = g _* (f _* ℱ) := rfl #align Top.presheaf.pushforward.comp_eq TopCat.Presheaf.Pushforward.comp_eq @[simp] theorem comp_hom_app {Y Z : TopCat.{w}} (f : X ⟶ Y) (g : Y ⟶ Z) (U) : (comp ℱ f g).Hom.app U = 𝟙 _ := by dsimp [comp] tidy #align Top.presheaf.pushforward.comp_hom_app TopCat.Presheaf.Pushforward.comp_hom_app @[simp] theorem comp_inv_app {Y Z : TopCat.{w}} (f : X ⟶ Y) (g : Y ⟶ Z) (U) : (comp ℱ f g).inv.app U = 𝟙 _ := by dsimp [comp] tidy #align Top.presheaf.pushforward.comp_inv_app TopCat.Presheaf.Pushforward.comp_inv_app end Pushforward /-- A morphism of presheaves gives rise to a morphisms of the pushforwards of those presheaves. -/ @[simps] def pushforwardMap {X Y : TopCat.{w}} (f : X ⟶ Y) {ℱ 𝒢 : X.Presheaf C} (α : ℱ ⟶ 𝒢) : f _* ℱ ⟶ f _* 𝒢 where app U := α.app _ naturality' U V i := by erw [α.naturality] rfl #align Top.presheaf.pushforward_map TopCat.Presheaf.pushforwardMap open CategoryTheory.Limits section Pullback variable [HasColimits C] noncomputable section /-- Pullback a presheaf on `Y` along a continuous map `f : X ⟶ Y`, obtaining a presheaf on `X`. This is defined in terms of left Kan extensions, which is just a fancy way of saying "take the colimits over the open sets whose preimage contains U". -/ @[simps] def pullbackObj {X Y : TopCat.{v}} (f : X ⟶ Y) (ℱ : Y.Presheaf C) : X.Presheaf C := (lan (Opens.map f).op).obj ℱ #align Top.presheaf.pullback_obj TopCat.Presheaf.pullbackObj /-- Pulling back along continuous maps is functorial. -/ def pullbackMap {X Y : TopCat.{v}} (f : X ⟶ Y) {ℱ 𝒢 : Y.Presheaf C} (α : ℱ ⟶ 𝒢) : pullbackObj f ℱ ⟶ pullbackObj f 𝒢 := (lan (Opens.map f).op).map α #align Top.presheaf.pullback_map TopCat.Presheaf.pullbackMap /-- If `f '' U` is open, then `f⁻¹ℱ U ≅ ℱ (f '' U)`. -/ @[simps] def pullbackObjObjOfImageOpen {X Y : TopCat.{v}} (f : X ⟶ Y) (ℱ : Y.Presheaf C) (U : Opens X) (H : IsOpen (f '' U)) : (pullbackObj f ℱ).obj (op U) ≅ ℱ.obj (op ⟨_, H⟩) := by let x : costructured_arrow (opens.map f).op (op U) := by refine' @costructured_arrow.mk _ _ _ _ _ (op (opens.mk (f '' U) H)) _ _ exact (@hom_of_le _ _ _ ((opens.map f).obj ⟨_, H⟩) (set.image_preimage.le_u_l _)).op have hx : is_terminal x := { lift := fun s => by fapply costructured_arrow.hom_mk change op (unop _) ⟶ op (⟨_, H⟩ : opens _) refine' (hom_of_le _).op exact (Set.image_subset f s.X.hom.unop.le).trans (set.image_preimage.l_u_le ↑(unop s.X.left)) simp } exact is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit _) (colimit_of_diagram_terminal hx _) #align Top.presheaf.pullback_obj_obj_of_image_open TopCat.Presheaf.pullbackObjObjOfImageOpen namespace Pullback variable {X Y : TopCat.{v}} (ℱ : Y.Presheaf C) /-- The pullback along the identity is isomorphic to the original presheaf. -/ def id : pullbackObj (𝟙 _) ℱ ≅ ℱ := NatIso.ofComponents (fun U => pullbackObjObjOfImageOpen (𝟙 _) ℱ (unop U) (by simpa using U.unop.2) ≪≫ ℱ.mapIso (eqToIso (by simp))) fun U V i => by ext; simp erw [colimit.pre_desc_assoc] erw [colimit.ι_desc_assoc] erw [colimit.ι_desc_assoc] dsimp; simp only [← ℱ.map_comp]; congr #align Top.presheaf.pullback.id TopCat.Presheaf.Pullback.id theorem id_inv_app (U : Opens Y) : (id ℱ).inv.app (op U) = colimit.ι (Lan.diagram (Opens.map (𝟙 Y)).op ℱ (op U)) (@CostructuredArrow.mk _ _ _ _ _ (op U) _ (eqToHom (by simp))) := by rw [← category.id_comp ((id ℱ).inv.app (op U)), ← nat_iso.app_inv, iso.comp_inv_eq] dsimp [id] rw [colimit.ι_desc_assoc] dsimp rw [← ℱ.map_comp, ← ℱ.map_id]; rfl #align Top.presheaf.pullback.id_inv_app TopCat.Presheaf.Pullback.id_inv_app end Pullback end Pullback variable (C) /-- The pushforward functor. -/ def pushforward {X Y : TopCat.{w}} (f : X ⟶ Y) : X.Presheaf C ⥤ Y.Presheaf C where obj := pushforwardObj f map := @pushforwardMap _ _ X Y f #align Top.presheaf.pushforward TopCat.Presheaf.pushforward @[simp] theorem pushforward_map_app' {X Y : TopCat.{w}} (f : X ⟶ Y) {ℱ 𝒢 : X.Presheaf C} (α : ℱ ⟶ 𝒢) {U : (Opens Y)ᵒᵖ} : ((pushforward C f).map α).app U = α.app (op <| (Opens.map f).obj U.unop) := rfl #align Top.presheaf.pushforward_map_app' TopCat.Presheaf.pushforward_map_app' theorem id_pushforward {X : TopCat.{w}} : pushforward C (𝟙 X) = 𝟭 (X.Presheaf C) := by apply CategoryTheory.Functor.ext · intros ext U have h := f.congr erw [h (opens.op_map_id_obj U)] simpa [eq_to_hom_map] · intros apply pushforward.id_eq #align Top.presheaf.id_pushforward TopCat.Presheaf.id_pushforward section Iso /-- A homeomorphism of spaces gives an equivalence of categories of presheaves. -/ @[simps] def presheafEquivOfIso {X Y : TopCat} (H : X ≅ Y) : X.Presheaf C ≌ Y.Presheaf C := Equivalence.congrLeft (Opens.mapMapIso H).symm.op #align Top.presheaf.presheaf_equiv_of_iso TopCat.Presheaf.presheafEquivOfIso variable {C} /-- If `H : X ≅ Y` is a homeomorphism, then given an `H _* ℱ ⟶ 𝒢`, we may obtain an `ℱ ⟶ H ⁻¹ _* 𝒢`. -/ def toPushforwardOfIso {X Y : TopCat} (H : X ≅ Y) {ℱ : X.Presheaf C} {𝒢 : Y.Presheaf C} (α : H.Hom _* ℱ ⟶ 𝒢) : ℱ ⟶ H.inv _* 𝒢 := (presheafEquivOfIso _ H).toAdjunction.homEquiv ℱ 𝒢 α #align Top.presheaf.to_pushforward_of_iso TopCat.Presheaf.toPushforwardOfIso @[simp] theorem toPushforwardOfIso_app {X Y : TopCat} (H₁ : X ≅ Y) {ℱ : X.Presheaf C} {𝒢 : Y.Presheaf C} (H₂ : H₁.Hom _* ℱ ⟶ 𝒢) (U : (Opens X)ᵒᵖ) : (toPushforwardOfIso H₁ H₂).app U = ℱ.map (eqToHom (by simp [opens.map, Set.preimage_preimage])) ≫ H₂.app (op ((Opens.map H₁.inv).obj (unop U))) := by delta to_pushforward_of_iso simp only [Equiv.toFun_as_coe, nat_trans.comp_app, equivalence.equivalence_mk'_unit, eq_to_hom_map, eq_to_hom_op, eq_to_hom_trans, presheaf_equiv_of_iso_unit_iso_hom_app_app, equivalence.to_adjunction, equivalence.equivalence_mk'_counit, presheaf_equiv_of_iso_inverse_map_app, adjunction.mk_of_unit_counit_hom_equiv_apply] congr #align Top.presheaf.to_pushforward_of_iso_app TopCat.Presheaf.toPushforwardOfIso_app /-- If `H : X ≅ Y` is a homeomorphism, then given an `H _* ℱ ⟶ 𝒢`, we may obtain an `ℱ ⟶ H ⁻¹ _* 𝒢`. -/ def pushforwardToOfIso {X Y : TopCat} (H₁ : X ≅ Y) {ℱ : Y.Presheaf C} {𝒢 : X.Presheaf C} (H₂ : ℱ ⟶ H₁.Hom _* 𝒢) : H₁.inv _* ℱ ⟶ 𝒢 := ((presheafEquivOfIso _ H₁.symm).toAdjunction.homEquiv ℱ 𝒢).symm H₂ #align Top.presheaf.pushforward_to_of_iso TopCat.Presheaf.pushforwardToOfIso @[simp] theorem pushforwardToOfIso_app {X Y : TopCat} (H₁ : X ≅ Y) {ℱ : Y.Presheaf C} {𝒢 : X.Presheaf C} (H₂ : ℱ ⟶ H₁.Hom _* 𝒢) (U : (Opens X)ᵒᵖ) : (pushforwardToOfIso H₁ H₂).app U = H₂.app (op ((Opens.map H₁.inv).obj (unop U))) ≫ 𝒢.map (eqToHom (by simp [opens.map, Set.preimage_preimage])) := by simpa [pushforward_to_of_iso, equivalence.to_adjunction] #align Top.presheaf.pushforward_to_of_iso_app TopCat.Presheaf.pushforwardToOfIso_app end Iso variable (C) [HasColimits C] /-- Pullback a presheaf on `Y` along a continuous map `f : X ⟶ Y`, obtaining a presheaf on `X`. -/ @[simps map_app] def pullback {X Y : TopCat.{v}} (f : X ⟶ Y) : Y.Presheaf C ⥤ X.Presheaf C := lan (Opens.map f).op #align Top.presheaf.pullback TopCat.Presheaf.pullback @[simp] theorem pullbackObj_eq_pullbackObj {C} [Category C] [HasColimits C] {X Y : TopCat.{w}} (f : X ⟶ Y) (ℱ : Y.Presheaf C) : (pullback C f).obj ℱ = pullbackObj f ℱ := rfl #align Top.presheaf.pullback_obj_eq_pullback_obj TopCat.Presheaf.pullbackObj_eq_pullbackObj /-- The pullback and pushforward along a continuous map are adjoint to each other. -/ @[simps unit_app_app counit_app_app] def pushforwardPullbackAdjunction {X Y : TopCat.{v}} (f : X ⟶ Y) : pullback C f ⊣ pushforward C f := Lan.adjunction _ _ #align Top.presheaf.pushforward_pullback_adjunction TopCat.Presheaf.pushforwardPullbackAdjunction /-- Pulling back along a homeomorphism is the same as pushing forward along its inverse. -/ def pullbackHomIsoPushforwardInv {X Y : TopCat.{v}} (H : X ≅ Y) : pullback C H.Hom ≅ pushforward C H.inv := Adjunction.leftAdjointUniq (pushforwardPullbackAdjunction C H.Hom) (presheafEquivOfIso C H.symm).toAdjunction #align Top.presheaf.pullback_hom_iso_pushforward_inv TopCat.Presheaf.pullbackHomIsoPushforwardInv /-- Pulling back along the inverse of a homeomorphism is the same as pushing forward along it. -/ def pullbackInvIsoPushforwardHom {X Y : TopCat.{v}} (H : X ≅ Y) : pullback C H.inv ≅ pushforward C H.Hom := Adjunction.leftAdjointUniq (pushforwardPullbackAdjunction C H.inv) (presheafEquivOfIso C H).toAdjunction #align Top.presheaf.pullback_inv_iso_pushforward_hom TopCat.Presheaf.pullbackInvIsoPushforwardHom end Presheaf end TopCat
\chapter{Linear Models: Multiple variables and interactions} \label{ch:MulExplInter} Aims of this chapter\footnote{Here you work with the script file {\tt MulExplInter.R}}: \begin{compactitem} \item Creating more complex models, including ANCOVA \item Looking at interactions between variables \item Plotting predictions from models \end{compactitem} We will look at two models in this chapter: \begin{compactenum} \item Model 1: Is mammalian genome size predicted by interactions between trophic level and whether species are ground dwelling? \item ANCOVA: Is body size in Odonata predicted by interactions between genome size and taxonomic suborder? \end{compactenum} So far, we have only looked at the independent effects of variables. For example, in the trophic level and ground dwelling model from Chapter \ref{ch:MulExpl}, we only looked for specific differences for being a omnivore {\it or} being ground dwelling, not for being specifically a {\it ground dwelling omnivore}. These independent effects of a variable are known as {\it main effects} and the effects of combinations of variables acting together are known as {\it interactions} --- they describe how the variables {\it interact}. \section{Formulae with interactions in R} We've already seen a number of different model formulae in R. They all use this syntax:\\ {\tt response variable \textasciitilde\ explanatory variable(s)} \\ but we are now going to add two extra pieces of syntax: \begin{compactdesc} \item [{\tt y \textasciitilde\ a + b + a:b}] The {\tt a:b} means the interaction between {\tt a} and {\tt b} --- do combinations of these variables lead to different outcomes? \item [{\tt y \textasciitilde\ a * b}] This a shorthand for the model above. The {\tt *} means fit {\tt a} and {\tt b} as main effects and their interaction {\tt a:b}. \end{compactdesc} \section{Model 1: Mammalian genome size} \begin{compactitem}[$\quad\star$] \item Make sure you have changed the working directory to {\tt Code} in your stats coursework directory. \item Create a new blank script called `Interactions.R' and add some introductory comments. \item Use {\tt load('mammals.Rdata')} to load the data. \end{compactitem} If {\tt mammals.Rdata} is missing, just import the data again using {\tt read.csv("../Data/MammalData.csv")}. You will then have to add the log C Value column to the imported data frame again. Let's refit the model from Chapter \ref{ch:MulExpl}, but including the interaction between trophic level and ground dwelling. We'll immediately check the model is appropriate: \begin{lstlisting} > model <- lm(logCvalue ~ TrophicLevel * GroundDwelling, data= mammals) > par(mfrow=c(2,2), mar=c(3,3,1,1), mgp=c(2, 0.8,0)) > plot(model) \end{lstlisting} This gives: \begin{center} \includegraphics[width=\textwidth]{mamMod.pdf} \end{center} Now, we'll examine the {\tt anova} and {\tt summary} outputs for the model: \begin{lstlisting} > anova(model) Analysis of Variance Table Response: logCvalue Df Sum Sq Mean Sq F value Pr(>F) TrophicLevel 2 0.81 0.407 8.06 0.0004 *** GroundDwelling 1 2.75 2.747 54.40 2.3e-12 *** TrophicLevel:GroundDwelling 2 0.43 0.216 4.27 0.0150 * Residuals 253 12.77 0.050 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 \end{lstlisting} Compared to the model from Chapter \ref{ch:MulExpl}, there is an extra line at the bottom. The top two are the same and show that trophic level and ground dwelling both have independent main effects. The extra line shows that there is also an interaction between the two. It doesn't explain a huge amount of variation, about half as much as trophic level, but it is significant. Again, we can calculate the $r^2$ for the model: \[ \frac{0.81 + 2.75 + 0.43}{0.81+2.75+0.43+12.77} = 0.238 \] The model from Chapter \ref{ch:MulExpl} without the interaction had an $r^2 = 0.212$ --- our new model explains 2.6\% more of the variation in the data. The summary table is as follows: \begin{lstlisting} >summary(model) Call: lm(formula = logCvalue ~ TrophicLevel * GroundDwelling, data = mammals) Residuals: Min 1Q Median 3Q Max -0.523 -0.171 -0.010 0.119 0.831 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 0.9589 0.0441 21.76 < 2e-16 *** TrophicLevelHerbivore 0.0535 0.0554 0.97 0.33460 TrophicLevelOmnivore 0.2328 0.0523 4.45 1.3e-05 *** GroundDwellingYes 0.2549 0.0651 3.92 0.00012 *** TrophicLevelHerbivore:GroundDwellingYes 0.0303 0.0786 0.39 0.69979 TrophicLevelOmnivore:GroundDwellingYes -0.1476 0.0793 -1.86 0.06384 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.225 on 253 degrees of freedom (120 observations deleted due to missingness) Multiple R-squared: 0.238, Adjusted R-squared: 0.223 F-statistic: 15.8 on 5 and 253 DF, p-value: 1.5e-13 \end{lstlisting} The lines in this are: \begin{compactitem} \item The reference level (intercept) for non ground dwelling carnivores. (The reference level is decided just by the alphabetic order of the levels) \item Two differences for being in different trophic levels. \item One difference for being ground dwelling \item Two new differences that give specific differences for ground dwelling herbivores and omnivores. \end{compactitem} The first four lines, as in the model from Chapter~\ref{ch:ANOVA}, which would allow us to find the predicted values for each group {\it if the size of the differences did not vary between levels because of the interactions}. That is, this part of the model only includes a single difference ground and non-ground species, which has to be the same for each trophic group because it ignores interactions between trophic level and ground / non-ground identity of each species. The last two lines then give the estimated coefficients associated with the interaction terms, and allow cause the size of differences to vary between levels because of the further effects of interactions. The table below show how these combine to give the predictions for each group combination, with those two new lines show in red: \[\begin{array}{|r|r|r|} \hline & \textrm{Not ground} & \textrm{Ground} \\ \hline \textrm{Carnivore} & 0.96 = 0.96 & 0.96+0.25=1.21 \\ \textrm{Herbivore} & 0.96 + 0.05 = 1.01 & 0.96+0.05+0.25{\color{red}+0.03}=1.29\\ \textrm{Omnivore} & 0.96 + 0.23 = 1.19 & 0.96+0.23+0.25{\color{red}-0.15}=1.29\\ \hline \end{array}\] So why are there two new coefficients? For interactions between two factors, there are always $(n-1)\times(m-1)$ new coefficients, where $n$ and $m$ are the number of levels in the two factors (Ground dwelling or not: 2 levels and trophic level: 3 levels, in our current example). So in this model, $(3-1) \times (2-1) =2$. It is easier to understand why graphically: the prediction for the white boxes below can be found by adding the main effects together but for the grey boxes we need to find specific differences and so there are $(n-1)\times(m-1)$ interaction coefficients to add. \[ n=4,m=4\quad \begin{array}{|c|c|c|c|} \hline & & & \\ \hline & \gc & \gc & \gc \\ \hline & \gc & \gc & \gc \\ \hline & \gc & \gc & \gc \\ \hline \end{array} \quad n=3,m=6 \quad \begin{array}{|c|c|c|c|c|c|} \hline & & & & &\\ \hline & \gc & \gc & \gc & \gc & \gc \\ \hline & \gc & \gc & \gc & \gc & \gc\\ \hline \end{array} \] If we put this together, what is the model telling us? \begin{compactitem} \item Herbivores have the same genome sizes as carnivores, but omnivores have larger genomes. \item Ground dwelling mammals have larger genomes. \item These two findings suggest that ground dwelling omnivores should have extra big genomes. However, the interaction shows they are smaller than expected and are, in fact, similar to ground dwelling herbivores. \end{compactitem} Note that although the interaction term in the {\tt anova} output is significant, neither of the two coefficients in the {\tt summary} has a $p<0.05$. There are two weak differences (one very weak, one nearly significant) that together explain significant variance in the data. \begin{compactitem}[$\quad\star$] \item Copy the code above into your script and run the model. \item Make sure you understand the output! \end{compactitem} Just to make sure the sums above are correct, we'll use the same code as in \ref{ch:MulExpl} to get R to calculate predictions for us: \begin{lstlisting} # a data frame of combinations of variables > gd <- rep(levels(mammals$GroundDwelling), times = 3) > print(gd) [1] "No" "Yes" "No" "Yes" "No" "Yes" > tl <- rep(levels(mammals$TrophicLevel), each = 2) > print(tl) [1] "Carnivore" "Carnivore" "Herbivore" "Herbivore" "Omnivore" "Omnivore" # New data frame > predVals <- data.frame(GroundDwelling = gd, TrophicLevel = tl) # predict using the new data frame > predVals$predict <- predict(model, newdata = predVals) > print(predVals) GroundDwelling TrophicLevel predict 1 No Carnivore 0.9589 2 Yes Carnivore 1.2138 3 No Herbivore 1.0125 4 Yes Herbivore 1.2977 5 No Omnivore 1.1918 6 Yes Omnivore 1.2990 \end{lstlisting} \begin{compactitem}[$\quad\star$] \item Run these predictions in your script. \end{compactitem} If we plot these data points onto the barplot from Chapter \label{ch:MulExpl}, they now lie exactly on the mean values, because we've allowed for interactions. The triangle on this plot shows the predictions for ground dwelling omnivores from the main effects ($0.96 + 0.23 + 0.25 = 1.44$), the interaction of $-0.15$ pushes the prediction back down. \begin{center} \includegraphics[width=0.8\textwidth]{predPlot.pdf} \end{center} \section{ANCOVA: Body Weight in Odonata} We'll go all the way back to the regression analyses from Chapter~\ref{ch:regress}. Remember that we fitted two separate regression lines to the data for damselflies and dragonflies. We'll now use an interaction to fit these in a single model. This kind of linear model --- with a mixture of continuous variables and factors --- is often called an {\it analysis of covariance}, or ANCOVA. That is, ANCOVA is a type of linear model that blends ANOVA and regression. ANCOVA evaluates whether population means of a dependent variable are equal across levels of a categorical independent variable, while statistically controlling for the effects of other continuous variables that are not of primary interest, known as covariates. {\it That is, this is still a linear model, but with one categorical and one or more continuous predictors}. \begin{compactitem}[$\quad\star$] \item Load the data: {\tt odonata <- read.csv('../Data/GenomeSize.csv')}. \item Create two new variables in the {\tt odonata} data set called {\tt logGS} and {\tt logBW} containing log genome size and log body weight. \end{compactitem} The models we fitted before looked like this: \begin{center} \includegraphics[width=0.7\textwidth]{dragonData.pdf} \end{center} We can now fit the model of body weight as a function of both genome size and suborder: \begin{lstlisting} > odonModel <- lm(logBW ~ logGS * Suborder, data = odonata) \end{lstlisting} Again, we'll look at the {\tt anova} table first: \begin{lstlisting} > anova(odonModel) Analysis of Variance Table Response: logBW Df Sum Sq Mean Sq F value Pr(>F) logGS 1 1.1 1.1 2.71 0.1 Suborder 1 112.0 112.0 265.13 < 2e-16 *** logGS:Suborder 1 9.1 9.1 21.65 1.1e-05 *** Residuals 94 39.7 0.4 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 \end{lstlisting} Interpreting this gives the following: \begin{compactitem} \item There is no significant main effect of log genome size. The {\it main} effect is the important thing here --- genome size is hugely important but does very different things for the two different suborders. If we ignored {\tt Suborder}, there isn't an overall relationship: the average of those two lines is pretty much flat. \item There is a very strong main effect of Suborder: the mean body weight in the two groups are very different. \item There is a strong interaction between suborder and genome size. This is an interaction between a factor and a continuous variable and shows that the {\it slopes} are different for the different factor levels. \end{compactitem} The summary table looks like this: \begin{lstlisting} > summary(odonModel) Call: lm(formula = logBW ~ logGS * Suborder, data = odonata) Residuals: Min 1Q Median 3Q Max -1.3243 -0.3225 0.0073 0.3962 1.4976 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -2.3995 0.0848 -28.31 < 2e-16 *** logGS 1.0052 0.2237 4.49 2.0e-05 *** SuborderZygoptera -2.2489 0.1354 -16.61 < 2e-16 *** logGS:SuborderZygoptera -2.1492 0.4619 -4.65 1.1e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.65 on 94 degrees of freedom (2 observations deleted due to missingness) Multiple R-squared: 0.755, Adjusted R-squared: 0.747 F-statistic: 96.5 on 3 and 94 DF, p-value: <2e-16 \end{lstlisting} The first thing to note is that the $r^2$ value is really high. The model explains three quarters (0.752) of the variation in the data. Next, there are four coefficients: \begin{compactitem} \item The intercept is for the first level of {\tt Suborder}, which is Anisoptera (dragonflies). \item The next line, for log genome size, is the slope for Anisoptera. \item We then have a coefficient for the second level of {\tt Suborder}, which is Zygoptera (damselflies). As with the first model, this difference in factor levels is a difference in mean values and shows the difference in the intercept for Zygoptera. \item The last line is the interaction between {\tt Suborder} and {\tt logGS}. This shows how the slope for Zygoptera differs from the slope for Anisoptera. \end{compactitem} How do these hang together to give the two lines shown in the model? We can calculate these by hand: \begin{align*} \textrm{Body Weight} &= -2.40 + 1.01 \times \textrm{logGS} & \textrm{[Anisoptera]}\\ \textrm{Body Weight} &= (-2.40 -2.25) + (1.01 - 2.15) \times \textrm{logGS} & \textrm{[Zygoptera]}\\ &= -4.65 - 1.14 \times \textrm{logGS} \\ \end{align*} \begin{compactitem}[$\quad\star$] \item Add the code into your script and check that you understand the outputs. \end{compactitem} We'll use the {\tt predict} function to get the predicted values from the model and add lines to the plot above. First, we'll create a set of numbers spanning the range of genome size: \begin{lstlisting} #get the range of the data > rng <- range(odonata$logGS) #get a sequence from the min to the max with 100 equally spaced values > LogGSForFitting <- seq(rng[1], rng[2], length = 100) \end{lstlisting} Have a look at these numbers: \begin{lstlisting} print(LogGSForFitting) \end{lstlisting} We can now use the model to predict the values of body weight at each of those points for each of the two suborders. We've added {\tt se.fit=TRUE} to the function to get the standard error around the regression lines. Note that we are now using \begin{lstlisting} #get a data frame of new data for the order > ZygoVals <- data.frame(logGS = LogGSForFitting, Suborder = "Zygoptera") #get the predictions and standard error > ZygoPred <- predict(odonModel, newdata = ZygoVals, se.fit = TRUE) #repeat for anisoptera AnisoVals <- data.frame(logGS = LogGSForFitting, Suborder = "Anisoptera") AnisoPred <- predict(odonModel, newdata = AnisoVals, se.fit = TRUE) \end{lstlisting} Both {\tt AnisoPred} and {\tt ZygoPred} contain predicted values (called {\tt fit}) and standard error values (called {\tt se.fit}) for each of the values in our generated values in {\tt LogGSForFitting} for each of the two suborders. We can add the predictions onto a plot like this: \begin{lstlisting} # plot the scatterplot of the data > plot(logBW ~ logGS, data = odonata, col = Suborder) # add the predicted lines > lines(AnisoPred$fit ~ LogGSForFitting, col = "black") > lines(AnisoPred$fit + AnisoPred$se.fit ~ LogGSForFitting, col = "black", lty = 2) > lines(AnisoPred$fit - AnisoPred$se.fit ~ LogGSForFitting, col = "black", lty = 2) \end{lstlisting} \begin{compactitem}[$\quad\star$] \item Copy the prediction code into your script and run the plot above. Copy and modify the last three lines to add the lines for the Zygoptera. Your final plot should look like this. \end{compactitem} \begin{center} \includegraphics[width=0.7\textwidth]{odonPlot.pdf} \end{center}
def add (n m : Nat) : Nat := n + m @[simp] theorem addZero x : add 0 x = x := by simp [add] syntax "bigAdd0Seq! " num : term macro_rules | `(bigAdd0Seq! $n) => let n := n.toNat if n == 0 then `(0) else `(add 0 (bigAdd0Seq! $(Lean.quote (n - 1)))) set_option maxRecDepth 10000 theorem ex : bigAdd0Seq! 20 = 0 := by simp #print ex
lemma eucl_rel_poly_unique: assumes 1: "eucl_rel_poly x y (q1, r1)" assumes 2: "eucl_rel_poly x y (q2, r2)" shows "q1 = q2 \<and> r1 = r2"
module Specifications.OrderedRing import public Specifications.Ring import public Specifications.TranslationInvariance import public Specifications.DiscreteOrderedGroup %default total %access public export ||| composed specification ||| todo: multiplication of positive elements is positive data PartiallyOrderedRingSpec : Binop s -> s -> (s -> s) -> Binop s -> Binrel s -> Type where MkPartiallyOrderedRing : RingSpec add zero neg mul -> PartiallyOrderedMagmaSpec add leq -> PartiallyOrderedRingSpec add zero neg mul leq ||| forget ring : PartiallyOrderedRingSpec add zero neg mul _ -> RingSpec add zero neg mul ring (MkPartiallyOrderedRing r _) = r ||| forget partiallyOrderedGroup : PartiallyOrderedRingSpec add zero neg _ leq -> PartiallyOrderedGroupSpec add zero neg leq partiallyOrderedGroup (MkPartiallyOrderedRing r o) = MkPartiallyOrderedGroup (group (abelianGroup r)) o ||| composed specification data OrderedRingSpec : Binop s -> s -> (s -> s) -> Binop s -> Binrel s -> Type where MkOrderedRing : PartiallyOrderedRingSpec add zero neg mul leq -> isTotalOrder leq -> OrderedRingSpec add zero neg mul leq namespace ForgetOrder ring : OrderedRingSpec add zero neg mul _ -> RingSpec add zero neg mul ring (MkOrderedRing r _) = ring r ||| forget orderedGroup : OrderedRingSpec add zero neg _ leq -> OrderedGroupSpec add zero neg leq orderedGroup (MkOrderedRing r t) = MkOrderedGroup (partiallyOrderedGroup r) t ||| composed specification data DiscreteOrderedRingSpec : Binop s -> s -> (s -> s) -> Binop s -> Binrel s -> s -> Type where MkDiscreteOrderedRing : OrderedRingSpec add zero neg mul leq -> isDiscreteOrder add leq zero one -> isNeutralL mul one -> isNeutralR mul one -> (onePositive : leq zero one) -> DiscreteOrderedRingSpec add zero neg mul leq one ||| forget discreteOrderedGroup : DiscreteOrderedRingSpec add zero neg _ leq one -> DiscreteOrderedGroupSpec add zero neg leq one discreteOrderedGroup (MkDiscreteOrderedRing r d _ _ _) = MkDiscreteOrderedGroup (orderedGroup r) (abelian (abelianGroup (ring r))) d ||| forget unitalRing : DiscreteOrderedRingSpec add zero neg mul _ one -> UnitalRingSpec add zero neg mul one unitalRing (MkDiscreteOrderedRing or _ l r _) = MkUnitalRing (ring or) l r ||| forget onePositive : DiscreteOrderedRingSpec _ zero _ _ leq one -> leq zero one onePositive (MkDiscreteOrderedRing _ _ _ _ pos) = pos
Hasta el momento hemos trabajado independientemente con caminantes aleatorios y distribuciones de probabilidad. Ahora trataremos de juntar los dos tópicos. Si $P_{t}(i)$ es la probabilidad de que un caminante se encuentra en el sitio $i$ al tiempo $t$, entonces la distribución de probabilidad está dada por el conjunto $\{ P_{t}(i): i \in \mathbb{Z} \}$. Abstrayendo un poco, este objeto puede ser visto también como un vector con el número de entradas igual al número de sitios en el cual puede estar nuestro caminante. A este vector lo llamaremos $\mathbf{P}_{t}$. **Nota 1**: Este es el primer ejemplo en cual podemos decir que el tiempo es discreto, por lo que $t \in \mathbb{N}$. **Nota 2**: En principio, el *lugar* donde nuestros caminantes marchen puede ser *infinito* (por ahora en una dimensión), por lo que el vector $\mathbf{P}_{t}$ tendría una infinidad de entradas. ## Ecuación maestra Coloquemonos en nuestro paso $0$, i.e. $t=0$. El caminante estará en su condición inicial esperando a la bandera de salida. Supongamos que su condición inicial es en $i = 0$ y que el caminante tiene una probabilidad $p$ de dar un paso a la derecha y $q:= 1-p$ de darlo a la izquierda. Para lo siguiente supondremos que $p = q = \frac{1}{2}$ No es dificil llegar entonces a que $P_{0}(i) = 0, \ \forall i \neq 0$ y que $P_{0}(0) = 1 $ En el primer paso, tendremos que: $$ \begin{matrix} P_{1}(-1) = \frac{1}{2} & P_{1}(0) = 0 & P_{1}(1) = \frac{1}{2} \end{matrix} $$ ¿Qué pasa en el siguiente paso? El espacio que el caminante puede abarcar se hace más grande, yendo de $i =-2,\dotsc, 2$. Ahora veamos como queda la distribución de probabilidad. $$ \begin{matrix} P_{2}(-2) = \frac{1}{4} & P_{2}(-1) = 0 & P_{2}(0) = \frac{1}{2} & P_{2}(1) = 0 & P_{2}(2) = \frac{1}{4} \end{matrix} $$ Las cosas ya se han puesto un poco más interesantes. Para entender un poco más como se han calculado un poco más rigurosamente estas probabilidades tomemos el caso de $P_{2}(0)$. en el paso $t = 1$, el caminante tenía $\frac{1}{2}$ de probabilidad de estar en $i= -1, 1$. Supongamos que estaba en la celda $i=-1$, entonces el caminante tiene $\frac{1}{2}$ de probabilidad de dar un paso a la derecha en el paso $t = 2$; y de la misma forma, si el caminante estuviera en la celda $i = 1$, también habría $\frac{1}{2}$ de probabilidad de que al siguiente paso estuviera de nuevo en la celda $i = 0$. De esta manera llegamos a la **ecuación maestra** de nuestro ejemplo en una dimensión y con probabilidades $p =q = \frac{1}{2}$ $$P_{t+1}(i) = \frac{1}{2}P_t(i-1) + \frac{1}{2}P_t(i+1) $$ La generalización de la **ecuación maestra** es la siguiente $$ \begin{equation} P_{t+1}(i) = pP_t(i-1) + (1-p)P_t(i+1) \ \ \ \ \ (1) \end{equation} $$ **[1a]** Pongamos manos a la obra. El objetivo general será graficar cómo evoluciona temporalmente la distribución de probabilidad con ayuda de nuestra ecuación maestra. Para esto necesitaremos un `kernel` un poco más sofisticado de los que ya hemos hecho. El gran cambio es que aquí sólo jugaremos con *un solo* caminante aleatorio, y no con varios como lo habíamos hecho hasta entonces. La `malla` de `bloques` de `núcleos` pasa a ser el espacio en que se mueve el caminante. Cambio sutil pero de grandes consecuencias. En primer lugar necesitamos el arreglo en el cual el caminante pueda moverse de un lado a otro. A este le llamaremos $X$. Recuerda hacerlo lo suficientemente grande para que el caminante no choque con los extremos. Para calcular la distribución de probabilidades del paso $t+1$ necesitamos la distribución del paso $t$. Sin embargo al sobreescribir nuestro arreglo $X$ perderemos información, y por lo tanto nuestros cálculos serán incorrectos. Es por esto que necesitaremos declarar otro `arreglo` en el cual podamos copiar nuestra información del tiempo $t$ para calcular la distribución deseada. Ahora, aquí es donde viene lo interesante. *La manera de copiar los datos*. Para esto nos basaremos en el ejemplo de *tiled programmation* hecho en el Notebook 6 de la primera parte de este curso el cual se basaba en declarar un `arreglo` tipo `__shared__` en el cual copiar los datos. Así que veamos un poco como se vería el kernel. Supongamos que, tal cual dicho anteriormente, nuestros datos estuviesen en un arreglo $X$ y el lugar en el cual nos apoyaremos sea un arreglo en la memoria *compartida* llamado $X_{copia}$. La idea general es entonces es de ir calculando los estados de X en el tiempo $t+1$ tesela por tesela. Supongamos entonces que X consiste de un arreglo de 200 celdas y buscamos calcular estas 200 celdas en el tiempo $t+1$ en grupos de 5. Entonces en $X_{copia}$ habremos de tener todos aquellas celdas con las cuales podamos realizar dichos cálculos. En este caso en específico, puesto que el estado de una celda en el tiempo $t+1$ está determinada por ella misma **y sus vecinos**, entonces habremos de copiar cada uno de estos en $X_{copia}$. Esto no causa problemas a no ser por las celdas extremas de cada bloque. Para resolver esto tendremos que copiar también los vecinos que no aparecen en nuestro bloque de 5 celdas pero que también son necesarios para los cálculos. De esta manera, para un arreglo de 200 celdas cuyos estados quieren ser calculados en bloques de 5, entonces necesitaremos en la memoria compartida teselas de 7 celdas. A continuación mostramos la manera en la cual se copian los datos a la memoria compartida. En primer lugar mostraremos un programa en Python para darnos una idea más clara de que es lo que estamos buscando. Sólo después pasaremos al kernel. ```python import numpy as np ``` Supongamos un arreglo A con 17 estados iniciales. Nuestra intención es calcular el estado de cada celda en el tiempo siguiente. Usaremos el método con teselas para copiar los datos. Cada tesela se ocupará de 4 datos en A, por lo que según la ecuación maestra (1) necesitaremos que la dimensión teselar sea de 6. ```python A = np.array([1,2,3,4,5,6,7,8,9,10,11,12, 13, 14, 15, 16, 17]) tesela_A = np.ones(6) ``` ```python # blockDim es la número de celdas que serán copiadas a la tesela # gridDim es el número de bloques que tendremos # ANCHO_TESELA es el número de datos que necesitaremos para calcular blockDim celdas en el siguiente estado blockDim = 4 gridDim = len(A)/blockDim+1 ANCHO_TESELA = blockDim+2 # Regresamos a tener dos bucles... # El primer bucle va de bloque en bloque por A for blockIdx in xrange(gridDim): # el segundo bucle busca los elementos de cada bloque for tx in xrange(ANCHO_TESELA-1): # los elementos necesarios son copiados a la tesela if blockDim*blockIdx + tx-1 >= 0 and blockDim*blockIdx + tx-1 < len(A) : tesela_A[tx] = A[blockDim*blockIdx + tx-1] # y si hemos llegado a los extremos de A, entonces se coloca un 0. else: tesela_A[tx] = 0.0 # Este if else se dedica a colocar aquellos datos en la frontera derecha de la tesela if 4*(blockIdx+1) < len(A): tesela_A[ANCHO_TESELA-1] = A[blockDim*(blockIdx+1)] else: tesela_A[ANCHO_TESELA-1] = 0. print B ``` [ 0. 1. 2. 3. 4. 5.] [ 4. 5. 6. 7. 8. 9.] [ 8. 9. 10. 11. 12. 13.] [ 12. 13. 14. 15. 16. 17.] [ 16. 17. 0. 0. 0. 0.] Nota como en cada tesela los 4 valores centrales corresponden a aquellas celdas cuyo estado será calculado al tiempo $t+1$. En el caso de la última tesela, puesto que los valores de A ya fueron cubiertos, entonces la tesela es llenada con $0$'s para que no haya cálculos erróneos. Ahora sí podemos pasar al kernel en CUDA C. Algunos nombres fueron cambiados debido al modo de escribir los programas, sin embargo la idea es la misma. Entre estos cambios notamos que `blockDim` fue cambiado a `TAMANIO_BLOQUE`y la introducción de `tesela_idx` que es en realidad `blockDim*blockIdx + tx-1` con el que trabajamos en Python. Este índice es usado puesto que para cada dato en A, necesitamos también su vecino a la izquierda (idx-1). Con `tesela_idx` cubrimos cada uno de estos. Ahora sólo falta el vecino derecho de la última celda. Este estará cubierto por otro pequeño programa condicional con un `if else`. ```C++ __shared__ float tesela_X[TAMANIO_BLOQUE+2] ; int tx = threadIdx.x ; int idx = blockIdx.x*TAMANIO_BLOQUE + tx ; int tesela_idx = idx - 1 ; if tx < TAMANIO_BLOQUE { if ((tesela_idx >= 0) && (tesela_idx < Dim_Camino) ) { tesela_X[tx] = X[tesela_idx] ; } else { tesela_X[tx] = 0.0f ; } __syncthreads() ; } if blockDim.x*(blockIdx.x+1) < Dim_Camino { tesela_X[TAMANIO_BLOQUE+1] = X[blockDim.x*(blockIdx.x +1)] ; } else { tesela_X[TEMANIO_BLOQUE+1] = 0.0f ; } __syncthreads() ; ``` Una vez hecha la copia de los datos de `X` en `tesela_X` sólo falta reescribir `X` con los nuevos valores. Eso quedará de ustedes. También es importante fijar el tamaño de los bloques `TAMANIO_BLOQUE`. Así que es hora de que el lector se ponga a trabajar y complete el `kernel` para luego graficar la evolución temporal de la distribución de probabilidad del caminante aleatorio. Supón en un primer tiempo que $p = q = \frac{1}{2}$. Recomendamos graficar con la función `imshow()` de `matplotlib`. A este método de resolver la ecuación maestra numéricamente se le llama **enumeración exacta** y es sumamente importante y utilizado para resolver ecuaciones diferenciales parciales. **[1b]** Una vez que hayas obtenido las imágenes, cambia el valor de $p$ para ver como varía la distribución de probabilidad. ## Dos dimensiones **[2]** Escribe la ecuación maestra del caminante aleatorio en dos dimensiones. **[3]** Modifica tu código para obtener una seria de imágenes con las que puedas observar la evolución temporal de la distribución de probabilidad en 2 dimensiones. **Hint**: En este caso tendrás que hacer una matriz tipo `__shared__` y no un arreglo unidimensional. Te recomendamos revisar los notebooks sobre multiplicación de matrices para que recuerdes la manera de indexar. Ahora tendremos cuatro indices: ```C++ int Fila = blockIdx.y*BLOCK_SIZE + ty ; int Columna = blockIdx.x*BLOCK_SIZE + tx ; int Fila_copia = Fila - 1 ; int Columna_copia = Columna - 1 ; if( (Fila_copia >= 0) && (Fila_copia < DimY) && (Columna_copia >= 0) && (Columna_copia < DimX) ) { ds_copiaPlano[ty][tx] = Plano[Fila_copia][Columna_copia]; } else { ds_copiaPlano[ty][tx] = 0.0f ; } ``` En caso de que te pierdas con los índices y copias, haz un código en Python semejante al de arriba que te sirva como guía. ## Fronteras Hasta ahora no nos hemos enfrentado con el problema de las fronteras, pero no podíamos escapar de él. Supongamos que son paredes *reflejantes* y no *absorbentes*, lo cual hará que el caminante "rebote" en las fronteras. **[4]** Escribe la regla que tienen que seguir las probabilidades cuando un caminante llega a cualquiera de las cuatro fronteras. **[5]** Implementa esta regla en tu código y observa qué pasa. ## Una primera aproximación a las EDP Supongamos ahora que los cambios en el espacio y en el tiempo del caminante aleatorio se dan por diferenciales $\delta x$ y $\delta t$. La ecuación (1) se vuelve entonces $$ P(x, t+\delta t) = pP(x-\delta x, t) + qP(x+\delta x, t) $$ Si ahora expandemos cada termino en series de Taylor (hasta 2º orden), llegamos a que: $$ \frac{\partial P}{\partial t}(x, t) = (q-p)\frac{\delta x}{\delta t}\frac{\partial P}{\partial x}(x, t)+ \frac{\delta x^2}{2\delta t}\frac{\partial^2 P}{\partial x^2}(x, t) $$ Si ahora volvemos de nuevo al caso $p = q = \frac{1}{2}$, y haciendo $D = \frac{\delta x^2}{2\delta t}$ obtenemos entonces la ya conocida ecuación de difusión. $$\frac{\partial P}{\partial t}(x, t) = D\frac{\partial^2 P}{\partial x^2}(x, t)$$ **[6]** Las soluciones analíticas de esta EDP son bien conocidas. Compáralas con tu solución numérica. Así, vemos que el método de enumeración exacta para un caminante aleatorio provee un método numérico para resolver esta EDP de evolución. [El método se llama de diferencias finitas.] ## Referencias + [Ecuación maestra](https://en.wikipedia.org/wiki/Master_equation) + Método de [diferencias finitas](https://en.wikipedia.org/wiki/Finite_difference_method) + [Ecuación de difusión](https://en.wikipedia.org/wiki/Diffusion_equation) ```python ```
Require Import Omega. Require Import Psatz. Require Import Coq.ZArith.ZArith. Require Import Coq.NArith.NArith. Require Import Coq.Bool.Bool. Local Open Scope Z_scope. Require Import BitUtils. Require Import CTZ. Require Import DyadicIntervals. Require Import Tactics. Require Import OrdTactic. (** ** Utilities about sorted (specialized to [N.lt]) *) Require Import Coq.Lists.List. Require Import Coq.Sorting.Sorted. Lemma sorted_append: forall l1 l2 x, StronglySorted N.lt l1 -> StronglySorted N.lt l2 -> (forall y, In y l1 -> y < x)%N -> (forall y, In y l2 -> x <= y)%N -> StronglySorted N.lt (l1 ++ l2). Proof. intros ??? Hsorted1 Hsorted2 Hlt Hge. induction Hsorted1. * apply Hsorted2. * simpl. apply SSorted_cons. + apply IHHsorted1. intros y Hy. apply Hlt. right. assumption. + rewrite Forall_forall. intros z Hz. rewrite in_app_iff in Hz. destruct Hz. - rewrite Forall_forall in H. apply H; auto. - apply N.lt_le_trans with (m := x). apply Hlt. left. reflexivity. apply Hge. assumption. Qed. Lemma sorted_append': forall a l1 l2 (f : a -> Z) x, StronglySorted (fun x y => Z.lt (f x) (f y)) l1 -> StronglySorted (fun x y => Z.lt (f x) (f y)) l2 -> (forall y, In y l1 -> f y < f x)%Z -> (forall y, In y l2 -> f x <= f y)%Z -> StronglySorted (fun x y => Z.lt (f x) (f y)) (l1 ++ l2). Proof. intros ????? Hsorted1 Hsorted2 Hlt Hge. induction Hsorted1. * apply Hsorted2. * simpl. apply SSorted_cons. + apply IHHsorted1. intros y Hy. apply Hlt. right. assumption. + rewrite Forall_forall. intros z Hz. rewrite in_app_iff in Hz. destruct Hz. - rewrite Forall_forall in H. apply H; auto. - apply Z.lt_le_trans with (m := f x). apply Hlt. left. reflexivity. apply Hge. assumption. Qed. Lemma StronglySorted_map: forall a b (R1 : a -> a -> Prop) (R2 : b -> b -> Prop) (f : a -> b) (l : list a), StronglySorted R1 l -> (forall (x y : a), In x l -> In y l -> R1 x y -> R2 (f x) (f y)) -> StronglySorted R2 (List.map f l). Proof. intros. induction H. * simpl. constructor. * simpl. constructor. + apply IHStronglySorted; intuition. + clear IHStronglySorted. rewrite Forall_forall. intros. rewrite in_map_iff in H2. destruct H2 as [?[[]?]]. apply H0. left. reflexivity. right. assumption. rewrite Forall_forall in H1. apply H1. assumption. Qed. (** *** Stuff about [option] *) Definition oro {a} : option a -> option a -> option a := fun x y => match x with | Some v => Some v | None => y end. (** ** IntMap-specific operations These definitions and lemmas are used to link some concepts from the IntMap implementation to the range sets above. *) Require Import GHC.Base. Import GHC.Base.Notations. Require Import GHC.Num. Import GHC.Num.Notations. Require Import Data.Bits. Import Data.Bits.Notations. Require Import Data.IntSet.InternalWord. Require Import IntWord. Require Import Proofs.Data.Bits.Popcount. Local Open Scope N_scope. Set Bullet Behavior "Strict Subproofs". (** A tactic to remove all relevant Haskell type class methods, and exposes the underlying Coq concepts. *) Ltac unfoldMethods := unfold op_zsze__, op_zeze__, Eq___Int, Eq___Word, Eq___IntSet, op_zsze____, op_zeze____, op_zl__, op_zg__, Ord__Int, Ord__Word, op_zl____, op_zg____, GHC.Real.fromIntegral, GHC.Real.instance__Integral_Int__74__, fromInteger, GHC.Real.toInteger, natFromInt, Num_Word__, Num__Word, Num__Int, op_zm__, op_zp__, Num_Integer__, Prim.seq, op_zdzn__, xor, op_zizazi__, op_zizbzi__, Bits.complement, Bits__Int, Bits__Word, id, op_z2218U__ in *. (* Move to IntWord *) Lemma intToN_inj_iff: forall i1 i2, intToN i1 = intToN i2 <-> i1 = i2. Admitted. Lemma wordToN_inj_iff: forall x y, wordToN x = wordToN y <-> x = y. Admitted. (* Move to IntWord *) Lemma wordToN_NToWord: (* Lets add preconditionn later *) forall x, wordToN (NToWord x) = x. Admitted. Lemma intToN_NToInt: (* Lets add precondition later *) forall x, intToN (NToInt x) = x. Admitted. Lemma intToZ_ZToInt: (* Lets add precondition later *) forall x, intToZ (ZToInt x) = x. Admitted. Lemma NToWord_wordToN: forall x, NToWord (wordToN x) = x. Admitted. Lemma NToInt_intToN: forall x, NToInt (intToN x) = x. Admitted. Lemma ZToInt_intToZ: forall x, ZToInt (intToZ x) = x. Admitted. Ltac Int_Word_N := unfold natFromInt, wordFromInt in *; rewrite ?intToN_inj_iff, ?wordToN_inj_iff, ?wordToN_NToWord, ?intToN_NToInt, ?NToWord_wordToN, ?NToInt_intToN, ?intToZ_ZToInt, ?ZToInt_intToZ in *. (** We hardcode the width of the leaf bit maps to 64 bits *) Definition WIDTH := 64%N. Definition tip_width := N.log2 WIDTH. (** *** Lemmas about [prefixOf] *) Lemma rPrefix_shiftr: forall e, NToInt (rPrefix (N.shiftr (intToN e) tip_width, tip_width)) = prefixOf e. Admitted. (* Proof. intros. unfold rPrefix, prefixOf, suffixBitMask. unfoldMethods. rewrite <- N.ldiff_ones_r. reflexivity. Qed. *) (* Lemma prefixOf_eq_shiftr: forall i p, (prefixOf i =? N.shiftl p tip_width) = ((N.shiftr i tip_width) =? p). Proof. intros. unfold prefixOf, suffixBitMask. unfoldMethods. rewrite -> N.ldiff_ones_r. replace tip_width with 6 by reflexivity. rewrite eq_iff_eq_true. rewrite !N.eqb_eq. rewrite -> N_shiftl_inj by omega. reflexivity. Qed. *) (** This lemma indicaes that [prefixOf] implements the check of whether the number is part of a tip-sized range. *) Lemma prefixOf_eqb_spec: forall r i, (rBits r = N.log2 WIDTH)%N -> intToN (prefixOf i) =? rPrefix r = inRange (intToN i) r. Admitted. (* Proof. intros. destruct r; simpl in *; subst. rewrite prefixOf_eq_shiftr. reflexivity. Qed. *) (* Lemma prefixOf_mono: forall x y, x <= y -> prefixOf x <= prefixOf y. Proof. intros. rewrite <- !rPrefix_shiftr. unfold rPrefix. rewrite !N.shiftl_mul_pow2 by (intro Htmp; inversion Htmp). rewrite !N.shiftr_div_pow2 by (intro Htmp; inversion Htmp). apply N.mul_le_mono_nonneg_r. nonneg. apply N.div_le_mono. apply N.pow_nonzero; Nomega. assumption. Qed. *) (* Lemma prefixOf_rPrefix: forall r, rBits r = N.log2 WIDTH -> (prefixOf (rPrefix r) = rPrefix r). Proof. intros. destruct r as [p b]. simpl in *. subst. rewrite <- rPrefix_shiftr. unfold rPrefix, tip_width, tip_width. simpl N.log2. rewrite N.shiftr_shiftl_l by reflexivity. replace (6 - 6) with 0 by Nomega. rewrite N.shiftl_0_r. reflexivity. Qed. *) (* Lemma prefixOf_suffixOf: forall i, prefixOf i + suffixOf i = i. Proof. intros. rewrite <- rPrefix_shiftr. unfold rPrefix, tip_width, tip_width. simpl id. rewrite N.shiftr_div_pow2 by omega. rewrite N.shiftl_mul_pow2 by omega. unfold prefixOf, suffixOf, suffixBitMask, WIDTH in *. unfoldMethods. simpl N.log2. rewrite N.land_ones by omega. symmetry. rewrite N.mul_comm. apply N.div_mod. intro Htmp; inversion Htmp. Qed. *) (** *** Lemmas about [suffixOf] *) (* Lemma suffixOf_lt_WIDTH: forall e, suffixOf e < WIDTH. intros. unfold suffixOf, suffixBitMask. unfoldMethods. rewrite N.land_ones. change (e mod 64 < 64). apply N.mod_upper_bound. Nomega. Qed. *) Lemma bitmapOfSuffix_pow : forall x, wordToN (bitmapOfSuffix (NToInt x)) = 2^x. Admitted. (* Proof. intros. apply N.shiftl_1_l. Qed. *) (* Lemma suffixOf_plus_bitmapOf: forall x, (N.ones (suffixOf x) + bitmapOf x)%N = N.ones (N.succ (suffixOf x)). Proof. intros. unfold bitmapOf. rewrite bitmapOfSuffix_pow. rewrite !N.ones_equiv. rewrite N.pow_succ_r by nonneg. Nomega. Qed. *) (** *** Operation: [rMask] Calculates a mask in the sense of the IntSet implementation: A single bit set just to the right of the prefix. (Somewhat illdefined for singleton ranges). *) Definition rMask : range -> N := fun '(p,b) => 2^(b - 1). (** *** Verification of [nomatch] *) Lemma nomatch_spec: forall i r, (0 < rBits r)%N -> nomatch i (NToInt (rPrefix r)) (NToInt (rMask r)) = negb (inRange (intToN i) r). (* Proof. intros. destruct r as [p b]. simpl in *. unfold nomatch, zero, inRange. unfoldMethods. unfold mask, maskW. unfoldMethods. f_equal. rewrite eq_iff_eq_true. rewrite !N.eqb_eq. rewrite <- N.pow_succ_r by Nomega. replace (N.succ (b - 1)) with b by Nomega. rewrite N.sub_1_r. rewrite <- N.ones_equiv. rewrite -> N.ldiff_ones_r by nonneg. rewrite -> N_shiftl_inj by nonneg. reflexivity. Qed. *) Admitted. Lemma match_nomatch: forall x p ms, match_ x p ms = negb (nomatch x p ms). Proof. intros. unfold match_, nomatch. unfoldMethods. rewrite negb_involutive. reflexivity. Qed. (** *** Verification of [zero] *) Lemma zero_spec: forall i r, (0 < rBits r)%N -> zero i (NToInt (rMask r)) = negb (N.testbit (intToN i) (rBits r - 1)). Proof. intros. destruct r as [p b]. simpl in *. unfold zero. unfoldMethods. Admitted. (* apply N_land_pow2_eq. Qed. *) (** The IntSet code has a repeating pattern consisting of calls to [nomatch] and [zero]. The following two lemmas capture that pattern concisely. *) Lemma nomatch_zero: forall {a} i r (P : a -> Prop) left right otherwise, (0 < rBits r)%N -> (inRange (intToN i) r = false -> P otherwise) -> (inRange (intToN i) (halfRange r false) = true -> inRange (intToN i) (halfRange r true) = false -> P left) -> (inRange (intToN i) (halfRange r false) = false -> inRange (intToN i) (halfRange r true) = true -> P right) -> P (if nomatch i (NToInt (rPrefix r)) (NToInt (rMask r)) then otherwise else if zero i (NToInt (rMask r)) then left else right). Proof. intros. rewrite nomatch_spec by auto. rewrite if_negb. destruct (inRange (intToN i) r) eqn:?. * rewrite zero_spec by auto. rewrite if_negb. destruct (N.testbit (intToN i) (rBits r - 1)) eqn:Hbit. + apply H2. rewrite halfRange_inRange_testbit by auto. rewrite Hbit. reflexivity. rewrite halfRange_inRange_testbit by auto. rewrite Hbit. reflexivity. + apply H1. rewrite halfRange_inRange_testbit by auto. rewrite Hbit. reflexivity. rewrite halfRange_inRange_testbit by auto. rewrite Hbit. reflexivity. * apply H0; reflexivity. Qed. Lemma nomatch_zero_smaller: forall {a} r1 r (P : a -> Prop) left right otherwise, (rBits r1 < rBits r)%N -> (rangeDisjoint r1 r = true -> P otherwise) -> (isSubrange r1 (halfRange r false) = true -> isSubrange r1 (halfRange r true) = false -> P left) -> (isSubrange r1 (halfRange r false) = false -> isSubrange r1 (halfRange r true) = true -> P right) -> P (if nomatch (NToInt (rPrefix r1)) (NToInt (rPrefix r)) (NToInt (rMask r)) then otherwise else if zero (NToInt (rPrefix r1)) (NToInt (rMask r)) then left else right). Proof. intros ????????. assert (rBits r1 <= rBits r)%N by Nomega. assert (forall h, rBits r1 <= rBits (halfRange r h))%N by (intros; rewrite rBits_halfRange; Nomega). rewrite <- smaller_not_subrange_disjoint_iff; auto. repeat rewrite <- smaller_inRange_iff_subRange by auto. apply nomatch_zero; only 1: Nomega. all: Int_Word_N; intuition. Qed. (** Two ranges with the same size, are either the same, or they are disjoint *) Lemma same_size_compare: forall {a} r1 r2 (P : a -> Prop) same different, (rBits r1 = rBits r2) -> (r1 = r2 -> P same) -> (rangeDisjoint r1 r2 = true -> P different) -> P (if rPrefix r1 =? rPrefix r2 then same else different). Proof. intros. destruct (N.eqb_spec (rPrefix r1) (rPrefix r2)). * apply H0. apply rPrefix_rBits_range_eq; auto. * apply H1. apply different_prefix_same_bits_disjoint; auto. Qed. (** *** Verification of [branchMask] *) Lemma branchMask_spec: forall r1 r2, branchMask (NToInt (rPrefix r1)) (NToInt (rPrefix r2)) = NToInt (rMask (commonRangeDisj r1 r2)). Proof. intros. destruct r1 as [p1 b1], r2 as [p2 b2]. simpl. unfold branchMask. unfold msDiffBit. rewrite N.add_sub. Admitted. (* reflexivity. Qed. *) (** *** Verification of [mask] *) Lemma mask_spec: forall r1 r2, mask (NToInt (rPrefix r1)) (NToInt (rMask (commonRangeDisj r1 r2))) = NToInt (rPrefix (commonRangeDisj r1 r2)). Proof. intros. assert (0 < msDiffBit (rPrefix r1) (rPrefix r2))%N by apply msDiffBit_pos. destruct r1 as [p1 b1], r2 as [p2 b2]. unfold mask, maskW. cbn -[N.mul] in *. rewrite <- N.ldiff_ones_r by nonneg. Admitted. (* rewrite <- N.pow_succ_r'. rewrite <- N.add_1_r. rewrite N.sub_add by lia. rewrite N.sub_1_r. rewrite <- N.ones_equiv. reflexivity. Qed. *) (** *** Verification of [shorter] *) Lemma shorter_spec: forall r1 r2, (0 < rBits r1)%N -> (0 < rBits r2)%N -> shorter (NToInt (rMask r1)) (NToInt (rMask r2)) = (rBits r2 <? rBits r1)%N. Proof. intros. destruct r1 as [p1 b1], r2 as [p2 b2]. simpl in *. Admitted. (* change ((2 ^ (b2 - 1)) <? (2 ^ (b1 - 1)) = (b2 <? b1)). apply eq_true_iff_eq. rewrite !N.ltb_lt. rewrite <- N.pow_lt_mono_r_iff by Nomega. Nomega. Qed. *) (** *** Operation: [bitmapInRange] Looks up values, which are in the given range, as bits in the given bitmap. *) Definition bitmapInRange r bm i := if inRange i r then N.testbit (wordToN bm) (N.land i (N.ones (rBits r))) else false. Lemma bitmapInRange_outside: forall r bm i, inRange i r = false -> bitmapInRange r bm i = false. Proof. intros. unfold bitmapInRange. rewrite H. reflexivity. Qed. Lemma bitmapInRange_inside: forall r bm i, bitmapInRange r bm i = true -> inRange i r = true. Proof. intros. unfold bitmapInRange in *. destruct (inRange i r); auto. Qed. Lemma bitmapInRange_0: forall r i, bitmapInRange r (NToWord 0%N) i = false. Proof. intros. unfold bitmapInRange. destruct (inRange i r); auto. Admitted. (* Qed. *) Lemma bitmapInRange_lor: forall r bm1 bm2 i, bitmapInRange r (NToWord (N.lor bm1 bm2)) i = orb (bitmapInRange r (NToWord bm1) i) (bitmapInRange r (NToWord bm2) i). Proof. intros. unfold bitmapInRange. destruct (inRange i r); try reflexivity. Admitted. (* rewrite N.lor_spec; reflexivity. Qed. *) Lemma bitmapInRange_lxor: forall r bm1 bm2 i, bitmapInRange r (NToWord (N.lxor bm1 bm2)) i = xorb (bitmapInRange r (NToWord bm1) i) (bitmapInRange r (NToWord bm2) i). Proof. intros. unfold bitmapInRange. destruct (inRange i r); try reflexivity. Admitted. (* rewrite N.lxor_spec; reflexivity. Qed. *) Lemma bitmapInRange_lnot: (* Misses some precondition *) forall r bm i, bitmapInRange r (NToWord (N.lnot bm WIDTH)) i = negb (bitmapInRange r (NToWord bm) i). Admitted. Lemma bitmapInRange_land: forall r bm1 bm2 i, bitmapInRange r (NToWord (N.land bm1 bm2)) i = andb (bitmapInRange r (NToWord bm1) i) (bitmapInRange r (NToWord bm2) i). Proof. intros. unfold bitmapInRange. destruct (inRange i r); try reflexivity. Admitted. (* rewrite N.land_spec; reflexivity. Qed. *) Lemma bitmapInRange_ldiff: forall r bm1 bm2 i, bitmapInRange r (NToWord (N.ldiff bm1 bm2)) i = andb (bitmapInRange r (NToWord bm1) i) (negb (bitmapInRange r (NToWord bm2) i)). Proof. intros. unfold bitmapInRange. destruct (inRange i r); try reflexivity. Admitted. (* rewrite N.ldiff_spec; reflexivity. Qed. *) Lemma bitmapInRange_bitmapOf: forall e i, bitmapInRange (N.shiftr (intToN e) 6, N.log2 WIDTH) (bitmapOf e) i = (i =? (intToN e)). Proof. intros. unfold bitmapInRange, inRange. simpl id. rewrite <- andb_lazy_alt. unfold bitmapOf, suffixOf, suffixBitMask. unfoldMethods. rewrite bitmapOfSuffix_pow. rewrite -> N.pow2_bits_eqb by nonneg. rewrite -> N.eqb_sym. Int_Word_N. Admitted. (* rewrite <- N_eq_shiftr_land_ones. apply N.eqb_sym. Qed. *) Lemma bitmapInRange_pow: forall r e i, (e < 2^rBits r)%N -> bitmapInRange r (NToWord (2 ^ e))%N i = (rPrefix r + e =? i). Proof. intros. destruct r as [p b]. unfold bitmapInRange. simpl in *. destruct (N.eqb_spec (N.shiftr i b) p). Admitted. (* * rewrite N.pow2_bits_eqb. transitivity (e =? N.land i (N.ones b)). - rewrite eq_iff_eq_true. rewrite !N.eqb_eq. intuition. - rewrite eq_iff_eq_true. rewrite !N.eqb_eq. rewrite N.land_ones by nonneg. rewrite N.shiftr_div_pow2 in e0 by nonneg. rewrite N.div_mod with (a := i) (b := 2^b) at 2 by (apply N.pow_nonzero; Nomega). rewrite N.shiftl_mul_pow2 by nonneg. rewrite N.mul_comm. subst; Nomega. * symmetry. rewrite N.eqb_neq. contradict n. subst. rewrite N.shiftr_div_pow2 by nonneg. rewrite N.shiftl_mul_pow2 by nonneg. rewrite N.div_add_l by (apply N.pow_nonzero; Nomega). enough (e/2^b = 0) by lia. apply N.div_small. assumption. Qed. *) Lemma bitmapInRange_ones: forall r n i, rBits r = N.log2 WIDTH -> inRange i r = true -> bitmapInRange r (NToWord (N.ones n)) i = (intToN (suffixOf (NToInt i)) <? n). Proof. intros ?????. unfold bitmapInRange; rewrite H0. unfold suffixOf, suffixBitMask, WIDTH in *. rewrite H; clear H. unfoldMethods. simpl. rewrite eq_iff_eq_true. rewrite N.ltb_lt. Admitted. (* rewrite N.ones_spec_iff. intros. reflexivity. Qed. *) (** *** Operation: [intoRange] This is the inverse of bitmapInRange, in a way. *) Definition intoRange r i := N.lor (rPrefix r) i. Definition inRange_intoRange: forall r i, (i < 2^(rBits r))%N -> inRange (intoRange r i) r = true. Proof. intros. destruct r as [p b]; unfold intoRange, inRange, rPrefix, rBits, snd in *; subst. rewrite N.shiftr_lor. rewrite N.shiftr_shiftl_l by lia. replace (_ - _) with 0 by lia. rewrite N.shiftr_div_pow2 by nonneg. rewrite N.div_small. rewrite N.lor_0_r, N.shiftl_0_r. apply N.eqb_refl. assumption. Qed. Definition bitmapInRange_intoRange: forall r i bm, (i < 2^(rBits r))%N -> bitmapInRange r bm (intoRange r i) = N.testbit (wordToN bm) i. Proof. intros. unfold bitmapInRange. rewrite inRange_intoRange by assumption. f_equal. destruct r as [p b]; unfold intoRange, inRange, rPrefix, rBits, snd in *; subst. rewrite N.land_lor_distr_l. rewrite N_land_shiftl_ones. rewrite N.lor_0_l. rewrite !N.land_ones by nonneg. rewrite N.mod_small by assumption. reflexivity. Qed. (** *** Operation: [isTipPrefix] A Tip prefix is a number with [N.log2 WIDTH] zeros at the end. *) Definition isTipPrefix (p : N) := N.land p (intToN suffixBitMask) = 0. Lemma isTipPrefix_suffixMask: forall p, isTipPrefix p -> N.land p (intToN suffixBitMask) = 0. Proof. intros. apply H. Qed. Lemma isTipPrefix_prefixOf: forall e, isTipPrefix (intToN (prefixOf e)). Proof. intros. unfold isTipPrefix, prefixOf, suffixBitMask. unfoldMethods. Admitted. (* rewrite N.land_ones. rewrite N.ldiff_ones_r. rewrite N.shiftl_mul_pow2. apply N.mod_mul. intro Htmp; inversion Htmp. Qed. *) Lemma isTipPrefix_shiftl_shiftr: forall p, isTipPrefix p -> p = N.shiftl (N.shiftr p 6) 6. Proof. intros. rewrite <- N.ldiff_ones_r. symmetry. etransitivity. Focus 2. apply N.lor_ldiff_and. pose proof (isTipPrefix_suffixMask p H). unfold suffixBitMask in H0. rewrite H0. rewrite N.lor_0_r. Admitted. (* reflexivity. Qed. *) (** *** Operation: [isBitMask] A Tip bit mask is a non-zero number with [WIDTH] bits. *) Definition isBitMask (bm : N) := (0 < bm /\ bm < 2^WIDTH)%N. (** Sometimes, we need to allow zero. *) Definition isBitMask0 (bm : N) := (bm < 2^WIDTH)%N. Create HintDb isBitMask. Ltac isBitMask := solve [auto with isBitMask]. Lemma isBitMask_isBitMask0: forall bm, isBitMask bm -> isBitMask0 bm. Proof. intros. unfold isBitMask0, isBitMask in *. intuition. Qed. Hint Resolve isBitMask_isBitMask0 : isBitMask. Lemma isBitMask0_zero_or_isBitMask: forall bm, isBitMask0 bm <-> (bm = 0%N \/ isBitMask bm). Proof. intros. unfold isBitMask, isBitMask0. assert (0 <= bm)%N by nonneg. rewrite N.lt_eq_cases in H. intuition; subst; reflexivity. Qed. Lemma isBitMask_isBitMask_and_noneg: forall bm, isBitMask bm <-> (bm <> 0%N /\ isBitMask0 bm). Proof. intros. unfold isBitMask, isBitMask0. Nomega. Qed. Lemma isBitMask_testbit: forall bm, isBitMask bm -> (exists i, i < WIDTH /\ N.testbit bm i = true)%N. Proof. intros. exists (N.log2 bm); intuition. * destruct H. destruct (N.lt_decidable 0%N (N.log2 bm)). - apply N.log2_lt_pow2; try assumption. - assert (N.log2 bm = 0%N) by (destruct (N.log2 bm); auto; contradict H1; reflexivity). rewrite H2. reflexivity. * apply N.bit_log2. unfold isBitMask in *. destruct bm; simpl in *; intuition; compute in H1; congruence. Qed. Lemma isBitMask_lor: forall bm1 bm2, isBitMask bm1 -> isBitMask bm2 -> isBitMask (N.lor bm1 bm2). Proof. intros. assert (0 < N.lor bm1 bm2)%N. * destruct (isBitMask_testbit bm1 H) as [j[??]]. assert (N.testbit (N.lor bm1 bm2) j = true) by (rewrite N.lor_spec, H2; auto). enough (0 <> N.lor bm1 bm2)%N by (destruct (N.lor bm1 bm2); auto; try congruence; apply pos_pos). contradict H3; rewrite <- H3. rewrite N.bits_0. congruence. * split; try assumption. unfold isBitMask in *; destruct H, H0. rewrite N_lt_pow2_testbits in *. intros j?. rewrite N.lor_spec. rewrite H2, H3 by assumption. reflexivity. Qed. Hint Resolve isBitMask_lor : isBitMask. Lemma isBitMask0_land: forall bm1 bm2, isBitMask0 bm1 -> isBitMask0 bm2 -> isBitMask0 (N.land bm1 bm2). Proof. intros. unfold isBitMask0 in *. rewrite N_lt_pow2_testbits in *. intros j?. rewrite N.land_spec. rewrite H, H0 by assumption. reflexivity. Qed. Hint Resolve isBitMask0_land : isBitMask. Lemma isBitMask0_lxor: forall bm1 bm2, isBitMask0 bm1 -> isBitMask0 bm2 -> isBitMask0 (N.lxor bm1 bm2). Proof. intros. unfold isBitMask0 in *. rewrite N_lt_pow2_testbits in *. intros j?. rewrite N.lxor_spec. rewrite H, H0 by assumption. reflexivity. Qed. Hint Resolve isBitMask0_lxor : isBitMask. Lemma isBitMask0_ldiff: forall bm1 bm2, isBitMask0 bm1 -> isBitMask0 (N.ldiff bm1 bm2). Proof. intros. unfold isBitMask0 in *. rewrite N_lt_pow2_testbits in *. intros j?. rewrite N.ldiff_spec. rewrite H by assumption. reflexivity. Qed. Hint Resolve isBitMask0_ldiff : isBitMask. Lemma isBitMask0_lor: forall bm1 bm2, isBitMask0 bm1 -> isBitMask0 bm2 -> isBitMask0 (N.lor bm1 bm2). Proof. intros. unfold isBitMask0 in *. rewrite N_lt_pow2_testbits in *. intros j?. rewrite N.lor_spec. rewrite H, H0 by assumption. reflexivity. Qed. Hint Resolve isBitMask0_lor : isBitMask. Lemma isBitMask0_ones: forall n, (n <= WIDTH)%N -> isBitMask0 (N.ones n). Proof. intros. induction n using N.peano_ind; simpl. constructor. rewrite N.ones_equiv. unfold isBitMask0. apply N.pow_le_mono_r with (a:=2%N) in H; [|Nomega]. assert (0 < 2 ^ N.succ n)%N. apply N_pow_pos_nonneg. constructor. Nomega. Qed. Hint Resolve isBitMask0_ones : isBitMask. Lemma isBitMask_bitmapOf: forall e, isBitMask (wordToN (bitmapOf e)). Proof. intros. Admitted. (* unfold isBitMask, bitmapOf, suffixOf, suffixBitMask, shiftLL. unfold op_zizazi__, Bits.complement, Bits__N, instance_Bits_Int, complement_Int. unfold fromInteger, Num_Word__. rewrite bitmapOfSuffix_pow. rewrite N.land_ones. constructor. * apply N_pow_pos_nonneg. reflexivity. * apply N.pow_lt_mono_r. reflexivity. apply N.mod_upper_bound; compute; congruence. Qed. *) Hint Resolve isBitMask_bitmapOf : isBitMask. Lemma isBitMask0_outside: forall bm i, isBitMask0 bm -> (WIDTH <= i)%N -> N.testbit bm i = false. Proof. intros. unfold isBitMask0 in H. rewrite N_lt_pow2_testbits in H. intuition. Qed. Lemma isBitMask_log2_lt_WIDTH: forall bm, isBitMask bm -> (N.log2 bm < WIDTH)%N. Proof. intros. apply N.log2_lt_pow2; apply H. Qed. Hint Resolve isBitMask_log2_lt_WIDTH : isBitMask. Lemma isBitMask_ctz_lt_WIDTH: forall bm, isBitMask0 bm -> (N_ctz bm < WIDTH)%N. Proof. intros. destruct (N.ltb_spec (N_ctz bm) WIDTH); try assumption; exfalso. assert (bm = 0%N). { apply N.bits_inj; intro j. rewrite N.bits_0. destruct (N.ltb_spec j WIDTH). + apply N_bits_below_ctz; Nomega. + apply isBitMask0_outside; assumption. } subst. unfold WIDTH in H0. simpl in H0. Nomega. Qed. Hint Resolve isBitMask_ctz_lt_WIDTH : isBitMask. (** *** Verification of [revNat] *) Require RevNatSlowProofs. Lemma revNat_spec: forall n i, (i < WIDTH)%N -> N.testbit (wordToN (revNat n)) i = N.testbit (wordToN n) (WIDTH - 1 - i)%N. Proof. Admitted. (* exact (RevNatSlowProofs.revNat_spec). Qed. *) Lemma isBitMask0_revNat: forall n, isBitMask0 (wordToN (revNat n)). Proof. Admitted. (* exact (RevNatSlowProofs.isBitMask0_revNat). Qed. *) Hint Resolve isBitMask0_revNat : isBitMask. Lemma isBitMask0_clearbit: forall n i, isBitMask0 n -> isBitMask0 (N.clearbit n i). Proof. intros. unfold isBitMask0 in *. eapply N.le_lt_trans. apply clearbit_le. assumption. Qed. Hint Resolve isBitMask0_clearbit : isBitMask. Lemma clearbit_revNat: forall n i, (i < WIDTH)%N -> N.clearbit (wordToN (revNat n)) i = wordToN (revNat (NToWord ((N.clearbit (wordToN n) (WIDTH - 1 - i)))))%N. Proof. intros. apply N.bits_inj. intro j. destruct (N.ltb_spec j WIDTH). * rewrite !revNat_spec by assumption. rewrite !N.clearbit_eqb. rewrite !revNat_spec by assumption. destruct (N.eqb_spec i j), (N.eqb_spec (WIDTH - 1 - i) (WIDTH - 1 - j))%N; try reflexivity; try Nomega. Admitted. (* * rewrite !isBitMask0_outside by isBitMask. reflexivity. Qed. *) Lemma revNat_eq_0: forall bm, (wordToN (revNat bm) = 0)%N <-> (wordToN bm = 0)%N. Proof. intros. split; intro. * apply N.bits_inj; intro j. destruct (N.ltb_spec j WIDTH). Admitted. (* - apply N.bits_inj_iff in H0. specialize (H0 (WIDTH - 1 - j)%N). rewrite N.bits_0 in *. rewrite revNat_spec in H0 by (assumption || Nomega). replace (WIDTH - 1 - (WIDTH - 1 - j))%N with j in H0 by Nomega. assumption. - rewrite N.bits_0 in *. apply isBitMask0_outside; auto. * subst. reflexivity. Qed. *) Lemma revNat_eqb_0: forall bm,(wordToN (revNat bm) =? 0)%N = (wordToN bm =? 0)%N. Proof. intros. rewrite eq_iff_eq_true. rewrite !N.eqb_eq. apply revNat_eq_0. Qed. Lemma isBitMask_revNat: forall n, isBitMask (wordToN n) -> isBitMask (wordToN (revNat n)). Proof. intros. rewrite isBitMask_isBitMask_and_noneg in *. intuition. rewrite revNat_eq_0 in H by assumption. intuition. Qed. Hint Resolve isBitMask_revNat : isBitMask. Lemma revNat_revNat: forall n, revNat (revNat n) = n. Proof. intros. Admitted. (* apply N.bits_inj_iff; intro i. destruct (N.ltb_spec i WIDTH). * rewrite !revNat_spec; try isBitMask. replace (WIDTH - 1 - (WIDTH - 1 - i))%N with i by Nomega. reflexivity. Nomega. * rewrite !isBitMask0_outside; isBitMask. Qed. *) Lemma revNat_lxor: forall n m, isBitMask0 n -> isBitMask0 m -> revNat (NToWord (N.lxor n m)) = NToWord (N.lxor (wordToN (revNat (NToWord n))) (wordToN (revNat (NToWord m)))). Proof. intros. Admitted. (* apply N.bits_inj_iff; intro i. destruct (N.ltb_spec i WIDTH). * rewrite !revNat_spec, !N.lxor_spec, !revNat_spec; isBitMask. * rewrite N.lxor_spec. rewrite !isBitMask0_outside; isBitMask. Qed.*) Lemma revNat_ldiff: forall n m, isBitMask0 n -> isBitMask0 m -> revNat (NToWord (N.ldiff n m)) = NToWord (N.ldiff (wordToN (revNat (NToWord n))) (wordToN (revNat (NToWord m)))). Proof. intros. Admitted. (* apply N.bits_inj_iff; intro i. destruct (N.ltb_spec i WIDTH). * rewrite !revNat_spec, !N.ldiff_spec, !revNat_spec; isBitMask. * rewrite N.ldiff_spec. rewrite !isBitMask0_outside; isBitMask. Qed. *) Lemma pow_isBitMask: forall i, (i < WIDTH)%N -> isBitMask (2^i)%N. Proof. intros. split. * apply N_pow_pos_nonneg; Nomega. * apply N.pow_lt_mono_r; Nomega. Qed. Hint Resolve pow_isBitMask : isBitMask. Lemma revNat_pow: forall i, (i < WIDTH)%N -> (revNat (NToWord (2 ^ i)) = NToWord (2 ^ (WIDTH - 1 - i)))%N. Proof. intros. Admitted. (* apply N.bits_inj_iff; intro j. destruct (N.ltb_spec j WIDTH). * rewrite !revNat_spec by assumption. rewrite !N.pow2_bits_eqb. rewrite eq_iff_eq_true. rewrite !N.eqb_eq. Nomega. * rewrite isBitMask0_outside by isBitMask. symmetry. apply N.pow2_bits_false. Nomega. Qed. *) (** *** Verification of [highestBitMask] and [lowestBitMask] *) (** And the operations they are based on, [N.log2] and [N_ctz]. *) (* Lemma N_ctz_log2: forall bm, isBitMask bm -> N_ctz bm = (WIDTH - 1 - N.log2 (revNatSafe bm))%N. Proof. intros. apply N_ctz_bits_unique. * apply H. * rewrite <- revNat_spec by isBitMask. apply N.bit_log2. rewrite revNat_eq_0 by isBitMask. unfold isBitMask in H; Nomega. * intros. rewrite <- (revNat_revNat bm) by isBitMask. rewrite revNat_spec by Nomega. apply N.bits_above_log2. Nomega. Qed. Lemma N_log2_ctz: forall bm, isBitMask bm -> N.log2 bm = (WIDTH - 1 - N_ctz (revNatSafe bm))%N. Proof. intros. rewrite N_ctz_log2 by isBitMask. rewrite revNat_revNat by isBitMask. assert (N.log2 bm < WIDTH)%N by isBitMask. Nomega. Qed. *) Lemma isBitMask0_lowestBitMask: forall bm, isBitMask0 bm -> isBitMask0 (wordToN (lowestBitMask (NToWord bm))). Proof. intros. unfold lowestBitMask. unfold isBitMask0 in *. Admitted. (* apply N.pow_lt_mono_r; try Nomega. isBitMask. Qed. *) Hint Resolve isBitMask0_lowestBitMask : isBitMask. Lemma isBitMask_highestBitMask: forall bm, isBitMask bm -> isBitMask (wordToN (highestBitMask (NToWord bm))). Proof. intros. split. Admitted. (* * change (0 < 2^N.log2 bm)%N. apply N_pow_pos_nonneg; Nomega. * apply N.pow_lt_mono_r. Nomega. isBitMask. Qed. Hint Resolve isBitMask_highestBitMask : isBitMask. *) Lemma lxor_pow2_clearbit: forall a i, N.testbit a i = true -> N.lxor a (2 ^ i)%N = N.clearbit a i. Proof. intros. apply N.bits_inj. intro j. rewrite N.lxor_spec, N.pow2_bits_eqb, N.clearbit_eqb. destruct (N.eqb_spec i j). * subst. destruct (N.testbit _ _) eqn:?; try reflexivity; congruence. * destruct (N.testbit _ _) eqn:?; try reflexivity. Qed. (* Lemma lxor_lowestBitMask: forall bm, isBitMask bm -> N.lxor bm (lowestBitMask bm) = N.clearbit bm (N_ctz bm). Proof. intros. apply lxor_pow2_clearbit. apply N_bit_ctz. unfold isBitMask in *. Nomega. Qed. *) Lemma split_highestBitMask: forall bm, isBitMask bm -> bm = N.lor (N.clearbit bm (N.log2 bm)) (2^(N.log2 bm))%N. Proof. intros. apply N.bits_inj; intro j. rewrite N.lor_spec, N.clearbit_eqb. rewrite !N.pow2_bits_eqb. destruct (N.eqb_spec (N.log2 bm) j). * subst. destruct (N.testbit _ _) eqn:?; try reflexivity; exfalso. rewrite N.bit_log2 in Heqb by (unfold isBitMask in *; Nomega). congruence. * destruct (N.testbit _ _) eqn:?; try reflexivity. Qed. (** *** Bitmasks with one bit *) Lemma clearbit_log2_0: forall bm, isBitMask bm -> N.clearbit bm (N.log2 bm) = 0%N -> bm = (2^N.log2 bm)%N. Proof. intros. apply N.bits_inj; intro j. rewrite N.pow2_bits_eqb. apply N.bits_inj_iff in H0. specialize (H0 j). rewrite N.clearbit_eqb in H0. rewrite N.bits_0 in H0. destruct (N.eqb_spec (N.log2 bm) j). * subst. apply N.bit_log2. destruct H; Nomega. * simpl in H0. rewrite andb_true_r in H0. assumption. Qed. (* Lemma clearbit_ctz_0: forall bm, isBitMask bm -> N.clearbit bm (N_ctz bm) = 0%N -> bm = (2^N_ctz bm)%N. Proof. intros. apply N.bits_inj; intro j. rewrite N.pow2_bits_eqb. apply N.bits_inj_iff in H0. specialize (H0 j). rewrite N.clearbit_eqb in H0. rewrite N.bits_0 in H0. destruct (N.eqb_spec (N_ctz bm) j). * subst. apply N_bit_ctz. destruct H; Nomega. * simpl in H0. rewrite andb_true_r in H0. assumption. Qed. *) (** *** Bitmasks with more than one bit *) Definition hasTwoBits bm := isBitMask bm /\ N.clearbit bm (N_ctz bm) <> 0%N. Lemma isBitMask_twoBits: forall bm, hasTwoBits bm -> isBitMask bm. Proof. intros. apply H. Qed. Hint Immediate isBitMask_twoBits : isBitMask. Lemma hasTwoBits_revNat: forall bm, hasTwoBits bm -> hasTwoBits (wordToN (revNat (NToWord bm))). Admitted. (* Proof. intros. unfold hasTwoBits in *. destruct H. split; try isBitMask. contradict H0. rewrite clearbit_revNat in H0 by isBitMask. rewrite revNat_eq_0 in H0 by isBitMask. rewrite <- N_log2_ctz in H0 by isBitMask. apply clearbit_log2_0 in H0; try isBitMask. rewrite H0; clear H0. rewrite N_ctz_pow2. apply clearbit_pow2_0. Qed. *) Lemma log2_clearbit_ctz: forall bm, hasTwoBits bm -> N.log2 (N.clearbit bm (N_ctz bm)) = N.log2 bm. Proof. intros. destruct H. apply N.log2_bits_unique. * rewrite N.clearbit_eqb. rewrite N.bit_log2 by (unfold isBitMask in H; Nomega). rewrite andb_true_l. rewrite negb_true_iff. rewrite N.eqb_neq. contradict H0. apply N.bits_inj. intro j. rewrite N.bits_0. rewrite N.clearbit_eqb. destruct (N.ltb_spec j WIDTH). - destruct (N.eqb_spec (N_ctz bm) j). + subst. simpl. apply andb_false_r. + enough (N.testbit bm j = false) by (replace (N.testbit bm j); apply andb_false_l). destruct (N.ltb_spec j (N.log2 bm)). ** apply N_bits_below_ctz. Nomega. ** apply N.bits_above_log2. Nomega. - rewrite isBitMask0_outside by isBitMask. apply andb_false_l. * intros j Hj. rewrite N.clearbit_eqb. rewrite N.bits_above_log2 by assumption. reflexivity. Qed. Lemma ctz_clearbit_log2: forall bm, hasTwoBits bm -> N_ctz (N.clearbit bm (N.log2 bm)) = N_ctz bm. Proof. intros. destruct H. apply N_ctz_bits_unique. * enough (N.clearbit bm (N.log2 bm) <> 0)%N by Nomega. contradict H0. apply clearbit_log2_0 in H0; try assumption. rewrite H0. rewrite N_ctz_pow2. rewrite clearbit_pow2_0. reflexivity. * rewrite N.clearbit_eqb. rewrite N_bit_ctz by (unfold isBitMask in H; Nomega). rewrite andb_true_l. rewrite negb_true_iff. rewrite N.eqb_neq. contradict H0. apply N.bits_inj. intro j. rewrite N.bits_0. rewrite N.clearbit_eqb. destruct (N.ltb_spec j WIDTH). - destruct (N.eqb_spec (N_ctz bm) j). + subst. simpl. apply andb_false_r. + enough (N.testbit bm j = false) by (replace (N.testbit bm j); apply andb_false_l). destruct (N.ltb_spec j (N.log2 bm)). ** apply N_bits_below_ctz. Nomega. ** apply N.bits_above_log2. Nomega. - rewrite isBitMask0_outside by isBitMask. apply andb_false_l. * intros j Hj. rewrite N.clearbit_eqb. rewrite N_bits_below_ctz by assumption. reflexivity. Qed. Lemma isBitMask_clearbit_twoBits: forall bm, hasTwoBits bm -> isBitMask (N.clearbit bm (N.log2 bm)). Proof. intros. rewrite isBitMask_isBitMask_and_noneg; split. * destruct H. contradict H0. apply clearbit_log2_0 in H0; try isBitMask. rewrite H0. rewrite N_ctz_pow2. apply clearbit_pow2_0. * destruct H. isBitMask. Qed. Hint Resolve isBitMask_clearbit_twoBits : isBitMask. (** *** Induction along a bitmask *) Lemma bits_ind: forall bm (P : N -> Prop), isBitMask0 bm -> P (0%N) -> (forall bm, isBitMask bm -> P (N.clearbit bm (N.log2 bm)) -> P bm) -> P bm. Proof. intros bm P Hbm HP0 HPstep. revert Hbm. apply well_founded_ind with (R := N.lt) (a := bm); try apply N.lt_wf_0. clear bm. intros bm IH Hbm0. destruct (N.eqb_spec bm 0%N). * subst. apply HP0. * assert (0 < bm)%N by Nomega. assert (Hbm : isBitMask bm) by (unfold isBitMask in Hbm0; unfold isBitMask; auto). clear H Hbm0. apply HPstep; auto. apply IH. - apply clearbit_lt. apply N.bit_log2. assumption. - isBitMask. Qed. Lemma bits_ind_up: forall bm (P : N -> Prop), isBitMask0 bm -> P (0%N) -> (forall bm, isBitMask bm -> P (N.clearbit bm (N_ctz bm)) -> P bm) -> P bm. Proof. intros bm P Hbm HP0 HPstep. revert Hbm. apply well_founded_ind with (R := N.lt) (a := bm); try apply N.lt_wf_0. clear bm. intros bm IH Hbm0. destruct (N.eqb_spec bm 0%N). * subst. apply HP0. * assert (0 < bm)%N by Nomega. assert (Hbm : isBitMask bm) by (unfold isBitMask in Hbm0; unfold isBitMask; auto). clear H Hbm0. apply HPstep; auto. apply IH. - apply clearbit_lt. apply N_bit_ctz. Nomega. - isBitMask. Qed. (** *** Lemmas about [popcount] *) Lemma popcount_N_0: N_popcount 0%N = 0%N. Proof. reflexivity. Qed. Lemma popCount_N_bm: forall bm, (0 < bm)%N -> N_popcount bm = N.succ (N_popcount (N.clearbit bm (N.log2 bm)%N)). Proof. intros. rewrite N.clearbit_spec'. pose proof (N_popcount_diff bm (2^N.log2 bm)%N). replace (N.land (2 ^ N.log2 bm) bm)%N with (2 ^ N.log2 bm)%N in *; only 1: replace (N.ldiff (2 ^ N.log2 bm) bm)%N with 0%N in *. * rewrite N_popcount_pow2 in *. simpl N_popcount in *. simpl N.double in *. Nomega. * symmetry. apply N.bits_inj; intro j. rewrite N.bits_0. rewrite N.ldiff_spec. rewrite N.pow2_bits_eqb. destruct (N.eqb_spec (N.log2 bm) j). + subst. rewrite N.bit_log2 by Nomega. reflexivity. + reflexivity. * symmetry. apply N.bits_inj; intro j. rewrite N.land_spec. rewrite N.pow2_bits_eqb. destruct (N.eqb_spec (N.log2 bm) j). + subst. rewrite N.bit_log2 by Nomega. reflexivity. + reflexivity. Qed. (** ** Well-formed IntSets. This section introduces the predicate to describe the well-formedness of an IntSet. It has parameters that describe the range that this set covers, and a function that carries it denotation. This way, invariant preservation and functional correctness of an operation can be expressed in one go. *) Inductive Desc : IntSet -> range -> (N -> bool) -> Prop := | DescTip : forall p bm r f, p = NToInt (rPrefix r) -> rBits r = N.log2 WIDTH -> (forall i, f i = bitmapInRange r bm i) -> isBitMask (wordToN bm) -> Desc (Tip p bm) r f | DescBin : forall s1 r1 f1 s2 r2 f2 p msk r f, Desc s1 r1 f1 -> Desc s2 r2 f2 -> (0 < rBits r)%N -> isSubrange r1 (halfRange r false) = true -> isSubrange r2 (halfRange r true) = true -> p = NToInt (rPrefix r) -> msk = NToInt (rMask r) -> (forall i, f i = f1 i || f2 i) -> Desc (Bin p msk s1 s2) r f. (** A variant that also allows [Nil], or sets that do not cover the full given range, but are certainly contained in them. This is used to describe operations that may delete elements. *) Inductive Desc0 : IntSet -> range -> (N -> bool) -> Prop := | Desc0Nil : forall r f, (forall i, f i = false) -> Desc0 Nil r f | Desc0NotNil : forall s r f r' f', forall (HD : Desc s r f), forall (Hsubrange: isSubrange r r' = true) (Hf : forall i, f' i = f i), Desc0 s r' f'. (** A variant that also allows [Nil] and does not reqiure a range. Used for the top-level specification. *) Inductive Sem : IntSet -> (N -> bool) -> Prop := | SemNil : forall f, (forall i, f i = false) -> Sem Nil f | DescSem : forall s r f (HD : Desc s r f), Sem s f. (** The highest level: Just well-formedness. *) Definition WF (s : IntSet) : Prop := exists f, Sem s f. (** ** Lemmas related to well-formedness *) (** All of these respect extensionality of [f] *) Lemma Desc_change_f: forall s r f f', Desc s r f -> (forall i, f' i = f i) -> Desc s r f'. Proof. intros. induction H. * eapply DescTip; try eassumption. intro i. rewrite H0, H2. reflexivity. * eapply DescBin; try eassumption. intro i. rewrite H0, H7. reflexivity. Qed. Lemma Sem_change_f: forall s f f', Sem s f -> (forall i, f' i = f i) -> Sem s f'. Proof. intros. destruct H. * apply SemNil. intro i. rewrite H0, H. reflexivity. * eapply DescSem. eapply Desc_change_f. eassumption. intro i. rewrite H0. reflexivity. Qed. Lemma Desc_Desc0: forall s r f, Desc s r f -> Desc0 s r f. Proof. intros. eapply Desc0NotNil. * eassumption. * apply isSubrange_refl. * intro. reflexivity. Qed. Lemma Desc0_Sem: forall s r f, Desc0 s r f -> Sem s f. Proof. intros. destruct H. * apply SemNil; eassumption. * eapply DescSem. eapply Desc_change_f. eassumption. assumption. Qed. Lemma Desc0_WF: forall s r f, Desc0 s r f -> WF s. Proof. intros. eexists. eapply Desc0_Sem. eassumption. Qed. Lemma Desc_larger_WIDTH: forall {s r f}, Desc s r f -> (N.log2 WIDTH <= rBits r)%N. Proof. intros ??? HD. induction HD; subst. * destruct r. simpl in *. subst. reflexivity. * etransitivity. apply IHHD1. etransitivity. eapply subRange_smaller. eassumption. eapply subRange_smaller. apply isSubrange_halfRange. assumption. Qed. Lemma Desc_outside: forall {s r f i}, Desc s r f -> inRange i r = false -> f i = false. Proof. intros ???? HD Houtside. induction HD;subst. * rewrite H1. apply bitmapInRange_outside; auto. * rewrite H4; clear H4. rewrite IHHD1 by inRange_false. rewrite IHHD2 by inRange_false. reflexivity. Qed. Lemma Desc_inside: forall {s r f i}, Desc s r f -> f i = true -> inRange i r = true. Proof. intros ???? HD Hf. destruct (inRange i r) eqn:?; intuition. rewrite (Desc_outside HD) in Hf by assumption. congruence. Qed. Lemma Desc0_outside: forall {s r f i}, Desc0 s r f -> inRange i r = false -> f i = false. Proof. intros. destruct H; auto. rewrite Hf. rewrite (Desc_outside HD) by inRange_false. reflexivity. Qed. Lemma Desc0_subRange: forall {s r r' f}, Desc0 s r f -> isSubrange r r' = true -> Desc0 s r' f. Proof. intros. induction H. * apply Desc0Nil; assumption. * eapply Desc0NotNil; try eassumption. isSubrange_true. Qed. Lemma isBitMask_bitmapInRange: forall r bm, rBits r = Nlog2 WIDTH -> isBitMask (wordToN bm) -> exists i, bitmapInRange r bm i = true. Proof. intros. destruct (isBitMask_testbit _ H0) as [j[??]]. exists (intoRange r j). rewrite bitmapInRange_intoRange; try assumption. replace (rBits r); assumption. Qed. (** The [Desc] predicate only holds for non-empty sets. *) Lemma Desc_some_f: forall {s r f}, Desc s r f -> exists i, f i = true. Proof. intros ??? HD. induction HD; subst. + destruct (isBitMask_bitmapInRange _ _ H0 H2) as [j ?]. exists j. rewrite H1. assumption. + destruct IHHD1 as [j?]. exists j. rewrite H4. rewrite H2. reflexivity. Qed. (** The [Desc] predicate is right_unique *) Lemma Desc_unique_f: forall {s r1 f1 r2 f2}, Desc s r1 f1 -> Desc s r2 f2 -> (forall i, f1 i = f2 i). Proof. intros ????? HD. revert r2 f2. induction HD; subst. + intros r2 f2 HD2 i. inversion_clear HD2. (* assert (r = r2) by (apply rPrefix_rBits_range_eq; congruence); subst. *) assert (r = r2). apply rPrefix_rBits_range_eq; admit. rewrite H1, H4. subst. reflexivity. + intros r3 f3 HD3 i. inversion_clear HD3. rewrite H10, H4. erewrite IHHD1 by eassumption. erewrite IHHD2 by eassumption. reflexivity. Admitted. (** *** Tactics *) (** This auxillary tactic destructs one boolean atom in the argument *) Ltac split_bool_go expr := lazymatch expr with | true => fail | false => fail | Some _ => fail | None => fail | match ?x with _ => _ end => split_bool_go x || (simpl x; cbv match) | negb ?x => split_bool_go x | ?x && ?y => split_bool_go x || split_bool_go y | ?x || ?y => split_bool_go x || split_bool_go y | xorb ?x ?y => split_bool_go x || split_bool_go y | oro ?x ?y => split_bool_go x || split_bool_go y | ?bexpr => destruct bexpr eqn:? end. (** This auxillary tactic destructs one boolean or option atom in the goal *) Ltac split_bool := match goal with | [ |- ?lhs = ?rhs] => split_bool_go lhs || split_bool_go rhs end. (** This tactic solves goal of the forms [ forall i, f1 i = f2 i || f3 i ] by introducing [i], rewriting with all premises of the form [forall i, f1 i = … ] and then destructing on all boolean atoms. It leaves unsolved cases as subgoal. *) Ltac solve_f_eq := let i := fresh "i" in intro i; simpl; repeat ( (rewrite bitmapInRange_lxor; Int_Word_N) + (rewrite bitmapInRange_lnot; Int_Word_N) + (rewrite bitmapInRange_land; Int_Word_N) + (rewrite bitmapInRange_lor; Int_Word_N) + match goal with | [ H : forall i : N, ?f i = _ |- context [?f i] ] => rewrite H end); repeat split_bool; try reflexivity. Ltac point_to_inRange := lazymatch goal with | [ HD : Desc ?s ?r ?f, Hf : ?f ?i = true |- _ ] => apply (Desc_inside HD) in Hf | [ H : bitmapInRange ?r ?bm ?i = true |- _ ] => apply bitmapInRange_inside in H end. Ltac pose_new prf := let prop := type of prf in match goal with | [ H : prop |- _] => fail 1 | _ => pose proof prf end. Ltac saturate_inRange := match goal with | [ Hsr : isSubrange ?r1 ?r2 = true, Hir : inRange ?i ?r1 = true |- _ ] => pose_new (inRange_isSubrange_true i r1 r2 Hsr Hir) | [ HrBits : (0 < rBits ?r)%N, Hir : inRange ?i (halfRange ?r ?h) = true |- _ ] => pose_new (inRange_isSubrange_true i _ r (isSubrange_halfRange r h HrBits) Hir) end. Ltac inRange_disjoint := match goal with | [ H1 : inRange ?i (halfRange ?r false) = true, H2 : inRange ?i (halfRange ?r true) = true |- _ ] => exfalso; refine (rangeDisjoint_inRange_false_false i _ _ _ H1 H2); apply halves_disj; auto | [ H1 : isSubrange ?r (halfRange ?r2 false) = true, H2 : isSubrange ?r (halfRange ?r2 true) = true |- _ ] => exfalso; refine (rangeDisjoint_isSubrange_false_false r _ _ _ H1 H2); apply halves_disj; auto | [ H : rangeDisjoint ?r1 ?r2 = true, H1 : inRange ?i ?r1 = true, H2 : inRange ?i ?r2 = true |- _ ] => exfalso; apply (rangeDisjoint_inRange_false_false i _ _ H H1 H2) end. (** Like [solve_f_eq], but tries to solve the resulting bugus cases using reasoning about [inRange]. *) Ltac solve_f_eq_disjoint := solve_f_eq; repeat point_to_inRange; repeat saturate_inRange; try inRange_disjoint. (* Only try this, so that we see wher we are stuck. *) (** *** Uniqueness of representation *) Lemma both_halfs: forall i1 i2 r1 r2, (0 < rBits r2)%N -> inRange i1 r1 = true -> inRange i2 r1 = true -> inRange i1 (halfRange r2 false) = true -> inRange i2 (halfRange r2 true) = true -> isSubrange r2 r1 = true. Proof. intros. destruct (N.ltb_spec (rBits r1) (rBits r2)). * exfalso. assert (isSubrange r1 (halfRange r2 false) = true) by (apply inRange_both_smaller_subRange with (i := i1); try assumption; rewrite rBits_halfRange; Nomega). assert (isSubrange r1 (halfRange r2 true) = true) by (apply inRange_both_smaller_subRange with (i := i2); try assumption; rewrite rBits_halfRange; Nomega). assert (isSubrange r1 r2 = true) by isSubrange_true. pose proof (smaller_subRange_other_half _ _ H4). rewrite H7, H6, H5 in H8. intuition. * apply inRange_both_smaller_subRange with (i := i1). + eapply inRange_isSubrange_true; [apply isSubrange_halfRange; assumption|eassumption]. + assumption. + assumption. Qed. Lemma criss_cross: forall i1 i2 i3 i4 r1 r2, (0 < rBits r1)%N -> (0 < rBits r2)%N -> inRange i1 (halfRange r1 false) = true -> inRange i2 (halfRange r1 true) = true -> inRange i3 (halfRange r2 false) = true -> inRange i4 (halfRange r2 true) = true -> inRange i1 r2 = true -> inRange i2 r2 = true -> inRange i3 r1 = true -> inRange i4 r1 = true -> r1 = r2. Proof. intros. apply isSubrange_antisym. + eapply both_halfs with (i1 := i1) (i2 := i2); eassumption. + eapply both_halfs with (i1 := i3) (i2 := i4); eassumption. Qed. Lemma larger_f_imp: forall s1 r1 f1 s2 r2 f2, (rBits r2 < rBits r1)%N -> Desc s1 r1 f1 -> Desc s2 r2 f2 -> (forall i : N, f1 i = true -> f2 i = true) -> False. Proof. intros ??? ??? Hsmaller HD1 HD2 Hf. destruct HD1. * pose proof (Desc_larger_WIDTH HD2). Nomega. * subst. assert (isSubrange r2 (halfRange r false) = true). { destruct (Desc_some_f HD1_1) as [i Hi]. pose proof (Desc_inside HD1_1 Hi). specialize (H4 i). rewrite (Desc_outside HD1_2) in H4 by inRange_false. rewrite orb_false_r in H4. rewrite <- H4 in Hi; clear H4. apply Hf in Hi; clear Hf. apply (Desc_inside HD2) in Hi. apply inRange_both_smaller_subRange with (i := i). * inRange_true. * inRange_true. * rewrite rBits_halfRange. Nomega. } assert (isSubrange r2 (halfRange r true) = true). { destruct (Desc_some_f HD1_2) as [i Hi]. pose proof (Desc_inside HD1_2 Hi). specialize (H4 i). rewrite (Desc_outside HD1_1) in H4 by inRange_false. rewrite orb_false_l in H4. rewrite <- H4 in Hi; clear H4. apply Hf in Hi; clear Hf. apply (Desc_inside HD2) in Hi. apply inRange_both_smaller_subRange with (i := i). * inRange_true. * inRange_true. * rewrite rBits_halfRange. Nomega. } inRange_disjoint. Qed. Lemma Desc_unique: forall s1 r1 f1 s2 r2 f2, Desc s1 r1 f1 -> Desc s2 r2 f2 -> (forall i, f1 i = f2 i) -> s1 = s2. Proof. intros ?????? HD1. revert s2 r2 f2. induction HD1. * intros s2 r2 f2 HD2 Hf. destruct HD2. + subst. assert (r = r0). { destruct (isBitMask_bitmapInRange r bm H0 H2) as [i Hbit]. assert (Hir : inRange i r = true) by (eapply bitmapInRange_inside; eassumption). specialize (Hf i). specialize (H1 i). specialize (H5 i). rewrite H1 in Hf; clear H1. rewrite H5 in Hf; clear H5. rewrite Hbit in Hf; symmetry in Hf. apply bitmapInRange_inside in Hf. apply inRange_both_same with (i := i); try assumption; Nomega. } subst. f_equal. enough (forall j, j < WIDTH -> N.testbit (wordToN bm) j = N.testbit (wordToN bm0) j) by admit. intros j Hlt. set (i := intoRange r0 j). specialize (H1 i). specialize (H5 i). specialize (Hf i). rewrite Hf in H1 by assumption; clear Hf. rewrite H1 in H5; clear H1. subst i. rewrite !bitmapInRange_intoRange in H5 by (replace (rBits r0); assumption). assumption. + exfalso. subst. eapply larger_f_imp with (r1 := r0) (r2 := r). - assert (N.log2 WIDTH <= rBits r1)%N by (eapply Desc_larger_WIDTH; eauto). apply subRange_smaller in H4. rewrite rBits_halfRange in H4. Nomega. - eapply DescBin with (s1 := s1) (s2 := s2); try eassumption; reflexivity. - eapply DescTip; try eassumption; reflexivity. - intros i Hi. rewrite Hf. assumption. * intros s3 r3 f3 HD3 Hf. destruct HD3. + exfalso. subst. eapply larger_f_imp with (r1 := r) (r2 := r0). - assert (N.log2 WIDTH <= rBits r1)%N by (eapply Desc_larger_WIDTH; eauto). apply subRange_smaller in H0. rewrite rBits_halfRange in H0. Nomega. - eapply DescBin with (s1 := s1) (s2 := s2); try eassumption; reflexivity. - eapply DescTip; try eassumption; reflexivity. - intros i Hi. rewrite <- Hf. assumption. + subst. assert (r4 = r). { destruct (Desc_some_f HD3_1) as [i1 Hi1]. destruct (Desc_some_f HD3_2) as [i2 Hi2]. destruct (Desc_some_f HD1_1) as [i3 Hi3]. destruct (Desc_some_f HD1_2) as [i4 Hi4]. apply criss_cross with i1 i2 i3 i4; try assumption. * apply (Desc_inside HD3_1) in Hi1; inRange_true. * apply (Desc_inside HD3_2) in Hi2; inRange_true. * apply (Desc_inside HD1_1) in Hi3; inRange_true. * apply (Desc_inside HD1_2) in Hi4; inRange_true. * specialize (H10 i1); rewrite Hi1 in H10. rewrite orb_true_l in H10. rewrite <- Hf in H10. specialize (H4 i1); rewrite H10 in H4; clear H10; symmetry in H4. rewrite orb_true_iff in H4; destruct H4. + apply (Desc_inside HD1_1) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. + apply (Desc_inside HD1_2) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. * specialize (H10 i2); rewrite Hi2 in H10. rewrite orb_true_r in H10. rewrite <- Hf in H10. specialize (H4 i2); rewrite H10 in H4; clear H10; symmetry in H4. rewrite orb_true_iff in H4; destruct H4. + apply (Desc_inside HD1_1) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. + apply (Desc_inside HD1_2) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. * specialize (H4 i3); rewrite Hi3 in H4. rewrite orb_true_l in H4. rewrite -> Hf in H4. specialize (H10 i3); rewrite H4 in H10; clear H4; symmetry in H10. rewrite orb_true_iff in H10; destruct H10. + apply (Desc_inside HD3_1) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. + apply (Desc_inside HD3_2) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. * specialize (H4 i4); rewrite Hi4 in H4. rewrite orb_true_r in H4. rewrite -> Hf in H4. specialize (H10 i4); rewrite H4 in H10; clear H4; symmetry in H10. rewrite orb_true_iff in H10; destruct H10. + apply (Desc_inside HD3_1) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. + apply (Desc_inside HD3_2) in H2. eapply inRange_isSubrange_true; [|eassumption]. isSubrange_true. } subst. assert (IH_prem_1 : (forall i : N, f1 i = f0 i)). { intro i. specialize (H4 i). specialize (H10 i). specialize (Hf i). destruct (inRange i (halfRange r false)) eqn:?. -- rewrite (Desc_outside HD1_2) in H4 by inRange_false. rewrite orb_false_r in H4. rewrite <- H4; clear H4. rewrite (Desc_outside HD3_2) in H10 by inRange_false. rewrite orb_false_r in H10. rewrite <- H10; clear H10. assumption. -- rewrite (Desc_outside HD1_1) by inRange_false. rewrite (Desc_outside HD3_1) by inRange_false. reflexivity. } assert (IH_prem_2 : (forall i : N, f2 i = f3 i)). { intro i. specialize (H4 i). specialize (H10 i). specialize (Hf i). destruct (inRange i (halfRange r true)) eqn:?. -- rewrite (Desc_outside HD1_1) in H4 by inRange_false. rewrite orb_false_l in H4. rewrite <- H4; clear H4. rewrite (Desc_outside HD3_1) in H10 by inRange_false. rewrite orb_false_l in H10. rewrite <- H10; clear H10. assumption. -- rewrite (Desc_outside HD1_2) by inRange_false. rewrite (Desc_outside HD3_2) by inRange_false. reflexivity. } specialize (IHHD1_1 _ _ _ HD3_1 IH_prem_1). destruct IHHD1_1; subst. specialize (IHHD1_2 _ _ _ HD3_2 IH_prem_2). destruct IHHD1_2; subst. reflexivity. Admitted. Lemma Sem_unique: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> (forall i, f1 i = f2 i) -> s1 = s2. Proof. intros. destruct H, H0. * reflexivity. * exfalso. destruct (Desc_some_f HD) as [i Hi]. rewrite <- H1 in Hi. rewrite H in Hi. congruence. * exfalso. destruct (Desc_some_f HD) as [i Hi]. rewrite -> H1 in Hi. rewrite H in Hi. congruence. * eapply Desc_unique; eassumption. Qed. Theorem Sem_extensional (s : IntSet) (f1 f2 : N -> bool) : Sem s f1 -> Sem s f2 -> forall i, f1 i = f2 i. Proof. intros S1 S2 k; inversion S1 as [f1' E1 | s1 r1 f1' D1]; subst f1' s; inversion S2 as [f2' E2 | s2 r2 f2' D2]; subst f2'; try subst s1; try subst s2. - now rewrite E1,E2. - inversion D2. - inversion D1. - eauto using Desc_unique_f. Qed. (** *** Verification of [equal] *) Lemma equal_spec: forall s1 s2, equal s1 s2 = true <-> s1 = s2. Proof. induction s1; intro s2; destruct s2; try solve [simpl; intuition congruence]. * simpl. unfoldMethods. rewrite !andb_true_iff. rewrite !N.eqb_eq. rewrite !intToN_inj_iff. rewrite IHs1_1. rewrite IHs1_2. intuition congruence. * simpl. unfoldMethods. rewrite !andb_true_iff. rewrite !N.eqb_eq. rewrite !intToN_inj_iff, !wordToN_inj_iff. intuition congruence. Qed. (** *** Verification of [nequal] *) Lemma nequal_spec: forall s1 s2, nequal s1 s2 = negb (equal s1 s2). Proof. induction s1; intro s2; destruct s2; try solve [simpl; intuition congruence]. * simpl. unfoldMethods. rewrite !negb_andb. rewrite IHs1_1. rewrite IHs1_2. intuition congruence. * simpl. unfoldMethods. rewrite !negb_andb. intuition congruence. Qed. (** *** Verification of [isSubsetOf] *) Lemma isSubsetOf_disjoint: forall s1 r1 f1 s2 r2 f2, rangeDisjoint r1 r2 = true -> Desc s1 r1 f1 -> Desc s2 r2 f2 -> (forall i : N, f1 i = true -> f2 i = true) <-> False. Proof. intros ??? ??? Hdis HD1 HD2. intuition. destruct (Desc_some_f HD1) as [i Hi]. eapply rangeDisjoint_inRange_false_false with (i := i). ** eassumption. ** eapply Desc_inside; eassumption. ** apply H in Hi. apply (Desc_inside HD2) in Hi. assumption. Qed. Lemma pointwise_iff: forall {a} (P Q Z : a -> Prop), (forall i, P i -> (Q i <-> Z i)) -> (forall i, P i -> Q i) <-> (forall i, P i -> Z i). Proof. intuition; specialize (H0 i); specialize (H i); intuition. Qed. (* Move somewhere else *) Lemma isBitMask0_wordToN: forall bm, isBitMask0 (wordToN bm). Admitted. Hint Resolve isBitMask0_wordToN : isBitMask. Lemma isBitMask0_lnot: forall n, isBitMask0 (N.lnot n WIDTH). Admitted. Hint Resolve isBitMask0_lnot : isBitMask. Program Fixpoint isSubsetOf_Desc s1 r1 f1 s2 r2 f2 { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> isSubsetOf s1 s2 = true <-> (forall i, f1 i = true -> f2 i = true) := _. Next Obligation. revert isSubsetOf_Desc H H0. intros IH HD1 HD2. destruct HD1, HD2. * (* Both are tips *) simpl; subst. unfoldMethods. Int_Word_N. rewrite andb_true_iff. rewrite !N.eqb_eq. destruct (N.eqb_spec (rPrefix r) (rPrefix r0)). - replace r0 with r in * by (apply rPrefix_rBits_range_eq; congruence). clear r0. intuition. ** rewrite H1 in H. rewrite H5. unfold bitmapInRange. unfold bitmapInRange in H. destruct (inRange i r); try congruence. set (j := N.land i _) in *. apply N.bits_inj_iff in H7. specialize (H7 j). rewrite N.land_spec, N.lnot_spec_low, N.bits_0 in H7 by admit. destruct (N.testbit (wordToN bm) j), (N.testbit (wordToN bm0) j); simpl in *; try congruence. ** apply N.bits_inj_iff; intro j. rewrite N.bits_0. destruct (N.ltb_spec j WIDTH). ++ rewrite N.land_spec, N.lnot_spec_low by assumption. do 2 split_bool; try reflexivity; exfalso. apply not_true_iff_false in Heqb0. contradict Heqb0. set (i := intoRange r j). assert (Hbmir : bitmapInRange r bm0 i = N.testbit (wordToN bm0) j) by (apply bitmapInRange_intoRange; replace (rBits r); assumption). rewrite <- Hbmir; clear Hbmir. rewrite <- H5. apply H. rewrite H1. assert (Hbmir : bitmapInRange r bm i = N.testbit (wordToN bm) j) by (apply bitmapInRange_intoRange; replace (rBits r); assumption). rewrite Hbmir. assumption. ++ apply isBitMask0_outside. isBitMask. assumption. - rewrite isSubsetOf_disjoint. ** intuition. ** apply different_prefix_same_bits_disjoint; try eassumption; congruence. ** eapply DescTip with (p := NToInt (rPrefix r)) (r := r) (bm := bm); try eassumption; try congruence. ** eapply DescTip with (p := NToInt (rPrefix r0)) (r := r0) (bm := bm0); try eassumption; try congruence. * (* Tip left, Bin right *) simpl; subst. apply nomatch_zero_smaller. - assert (N.log2 WIDTH <= rBits r1)%N by (eapply Desc_larger_WIDTH; eauto). apply subRange_smaller in H4. rewrite rBits_halfRange in H4. lia. - intros Hdisj. rewrite isSubsetOf_disjoint. ** intuition. ** eassumption. ** eapply DescTip; try eassumption; try reflexivity. ** eapply (DescBin s1 _ _ s2); try eassumption; try reflexivity. - intros. etransitivity; [eapply IH with (f2 := f1)|]. + simpl. omega. + eapply DescTip with (p := NToInt (rPrefix r)) (r := r) (bm := bm); try eassumption; try congruence. + eassumption. + apply pointwise_iff. intros i Hi. assert (inRange i r = true). { rewrite H1 in Hi. apply bitmapInRange_inside in Hi. assumption. } rewrite H8. rewrite (Desc_outside HD2_2) by inRange_false. rewrite orb_false_r. reflexivity. - intros. etransitivity; [eapply IH with (f2 := f2)|]. + simpl. omega. + eapply DescTip with (p := NToInt (rPrefix r)) (r := r) (bm := bm); try eassumption; try congruence. + eassumption. + apply pointwise_iff. intros i Hi. assert (inRange i r = true). { rewrite H1 in Hi. apply bitmapInRange_inside in Hi. assumption. } rewrite H8. rewrite (Desc_outside HD2_1) by inRange_false. rewrite orb_false_l. reflexivity. * (* Bin right, Tip left *) intuition; exfalso. eapply larger_f_imp with (r1 := r) (r2 := r2). - assert (N.log2 WIDTH <= rBits r1)%N by (eapply Desc_larger_WIDTH; eauto). apply subRange_smaller in H0. rewrite rBits_halfRange in H0. Nomega. - eapply DescBin with (s1 := s1) (s2 := s0); try eassumption; reflexivity. - eapply DescTip; try eassumption; reflexivity. - intros i Hi. apply H9. assumption. * (* Bin both sides *) simpl; subst. rewrite shorter_spec by assumption. rewrite shorter_spec by assumption. destruct (N.ltb_spec (rBits r4) (rBits r)); [|destruct (N.ltb_spec (rBits r) (rBits r4))]. - (* left is bigger than right *) intuition; exfalso. eapply larger_f_imp with (r1 := r) (r2 := r4). -- assumption. -- eapply DescBin with (s1 := s1) (s2 := s0); try eassumption; reflexivity. -- eapply DescBin with (s1 := s2) (s2 := s3); try eassumption; reflexivity. -- assumption. - (* right is bigger than left *) match goal with [ |- ((?x && ?y) = true) <-> ?z ] => enough (Htmp : (if x then y else false) = true <-> z) by (destruct x; try rewrite andb_true_iff; intuition congruence) end. match goal with [ |- context [match_ ?x ?y ?z] ] => replace (match_ x y z) with (negb (nomatch x y z)) by (unfold nomatch, match_; unfoldMethods; rewrite negb_involutive; reflexivity) end. rewrite if_negb. apply nomatch_zero_smaller; try assumption. ** intro Hdisj. rewrite isSubsetOf_disjoint. -- intuition. -- eassumption. -- eapply DescBin with (s1 := s1) (s2 := s0); try eassumption; reflexivity. -- eapply DescBin with (s1 := s2) (s2 := s3); try eassumption; reflexivity. ** intros. etransitivity; [eapply IH with (f2 := f2)|]. + simpl. omega. + eapply DescBin with (s1 := s1) (s2 := s0) (r := r); try eassumption; reflexivity. + eassumption. + apply pointwise_iff. intros i Hi. assert (inRange i r = true). { rewrite H4 in Hi. rewrite orb_true_iff in Hi; destruct Hi as [Hi | Hi]; (apply (Desc_inside HD1_1) in Hi || apply (Desc_inside HD1_2) in Hi); eapply inRange_isSubrange_true; swap 1 2; try eassumption; isSubrange_true. } rewrite H10. rewrite (Desc_outside HD2_2) by inRange_false. rewrite orb_false_r. reflexivity. ** intros. etransitivity; [eapply IH with (f2 := f3)|]. + simpl. omega. + eapply DescBin with (s1 := s1) (s2 := s0) (r := r); try eassumption; reflexivity. + eassumption. + apply pointwise_iff. intros i Hi. assert (inRange i r = true). { rewrite H4 in Hi. rewrite orb_true_iff in Hi; destruct Hi as [Hi | Hi]; (apply (Desc_inside HD1_1) in Hi || apply (Desc_inside HD1_2) in Hi); eapply inRange_isSubrange_true; swap 1 2; try eassumption; isSubrange_true. } rewrite H10. rewrite (Desc_outside HD2_1) by inRange_false. rewrite orb_false_l. reflexivity. - (* same sized bins *) unfoldMethods. Int_Word_N. destruct (N.eqb_spec (rPrefix r) (rPrefix r4)). + replace r4 with r in * by (apply rPrefix_rBits_range_eq; Nomega). clear r4. simpl. rewrite andb_true_iff. rewrite (IH s1 r1 f1 s2 r2 f2); try assumption; simpl; try omega. rewrite (IH s0 r0 f0 s3 r3 f3); try assumption; simpl; try omega. intuition. ++ rewrite H10. rewrite H4 in H8. rewrite orb_true_iff in H8. destruct H8. ** apply H9 in H8. rewrite H8. rewrite orb_true_l. reflexivity. ** apply H11 in H8. rewrite H8. rewrite orb_true_r. reflexivity. ++ specialize (H4 i). specialize (H10 i). rewrite H9 in H4. apply (Desc_inside HD1_1) in H9. rewrite orb_true_l in H4. apply H8 in H4. rewrite H10 in H4. rewrite (Desc_outside HD2_2) in H4 by inRange_false. rewrite orb_false_r in H4. assumption. ++ specialize (H4 i). specialize (H10 i). rewrite H9 in H4. apply (Desc_inside HD1_2) in H9. rewrite orb_true_r in H4. apply H8 in H4. rewrite H10 in H4. rewrite (Desc_outside HD2_1) in H4 by inRange_false. rewrite orb_false_l in H4. assumption. + rewrite isSubsetOf_disjoint. ** intuition. ** apply different_prefix_same_bits_disjoint; try eassumption; Nomega. ** eapply DescBin with (s1 := s1) (s2 := s0); try eassumption; reflexivity. ** eapply DescBin with (s1 := s2) (s2 := s3); try eassumption; reflexivity. Admitted. Lemma isSubsetOf_Sem: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> isSubsetOf s1 s2 = true <-> (forall i, f1 i = true -> f2 i = true). Proof. intros ???? HSem1 HSem2. destruct HSem1. * replace (isSubsetOf Nil s2) with true by (destruct s2; reflexivity). intuition; exfalso. rewrite H in H1; congruence. * destruct HSem2. + replace (isSubsetOf s Nil) with false by (destruct HD; reflexivity). intuition; exfalso. destruct (Desc_some_f HD) as [i Hi]. apply H0 in Hi. rewrite H in Hi. congruence. + eapply isSubsetOf_Desc; eassumption. Qed. Lemma isSubsetOf_refl: forall s f, Sem s f -> isSubsetOf s s = true. Proof. intros. rewrite isSubsetOf_Sem by eassumption. intuition. Qed. Lemma isSubsetOf_antisym: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> isSubsetOf s1 s2 = true -> isSubsetOf s2 s1 = true -> s1 = s2. Proof. intros. rewrite isSubsetOf_Sem in H1 by eassumption. rewrite isSubsetOf_Sem in H2 by eassumption. eapply Sem_unique; try eassumption. intro i. specialize (H1 i). specialize (H2 i). apply eq_true_iff_eq. intuition. Qed. (** *** Verification of [member] *) Lemma member_Desc: forall {s r f i}, Desc s r f -> member i s = f (intToN i). Proof. intros ???? HD. induction HD; subst. * simpl. change (((prefixOf i == NToInt (rPrefix r)) && ((bitmapOf i .&.bm) /= #0)) = f (intToN i)). unfoldMethods. Int_Word_N. rewrite -> prefixOf_eqb_spec by assumption. rewrite H1. unfold bitmapOf, suffixOf, suffixBitMask, bitmapInRange. unfoldMethods. replace (intToN (ZToInt 63)) with 63 by admit. rewrite bitmapOfSuffix_pow. rewrite N_land_pow2_testbit. rewrite H0. reflexivity. * rewrite H4. clear H4. simpl member. rewrite IHHD1, IHHD2. clear IHHD1 IHHD2. apply nomatch_zero; [auto|..]; intros. + rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. + rewrite (Desc_outside HD2) by inRange_false. rewrite orb_false_r. reflexivity. + rewrite (Desc_outside HD1) by inRange_false. rewrite orb_false_l. reflexivity. Admitted. Lemma member_Desc0: forall {s r f i}, Desc0 s r f -> member i s = f (intToN i). Proof. intros. destruct H; simpl; auto. rewrite Hf. eapply member_Desc; eauto. Qed. Lemma member_Sem: forall {s f i}, Sem s f -> member i s = f (intToN i). Proof. intros. destruct H. * rewrite H. reflexivity. * erewrite member_Desc; eauto. Qed. Lemma Desc_has_member: forall {s r f}, Desc s r f -> exists i, member i s = true. Proof. intros ??? HD. destruct (Desc_some_f HD) as [j?]. exists (NToInt j). rewrite (member_Desc HD). Int_Word_N. intuition. Qed. (** *** Verification of [notMember] *) Lemma notMember_Sem: forall {s f i}, Sem s f -> notMember i s = negb (f (intToN i)). Proof. intros. change (negb (member i s) = negb (f (intToN i))). f_equal. apply member_Sem. assumption. Qed. (** *** Verification of [null] *) Lemma null_Sem: forall {s f}, Sem s f -> null s = true <-> (forall i, f i = false). Proof. intros s f HSem. destruct HSem. * intuition. * assert (null s = false) by (destruct HD; reflexivity). intuition try congruence. destruct (Desc_some_f HD). specialize (H0 x). congruence. Qed. (** *** Verification of [empty] *) Lemma empty_Sem (f : N -> bool) : Sem empty f <-> forall i, f i = false. Proof. split. - now inversion 1; intros; subst. - now constructor. Qed. Lemma empty_WF : WF empty. Proof. now exists (fun _ => false); constructor. Qed. Hint Resolve empty_WF. (** *** Verification of [singleton] *) Lemma singleton_Desc: forall e, Desc (singleton e) (N.shiftr (intToN e) 6, N.log2 WIDTH) (fun x => x =? intToN e). Proof. intros. apply DescTip; try nonneg; try isBitMask. symmetry; apply rPrefix_shiftr. intro i. symmetry; apply bitmapInRange_bitmapOf. Qed. Lemma singleton_Sem: forall e, Sem (singleton e) (fun x => x =? (intToN e)). Proof. intros. eapply DescSem. apply singleton_Desc; assumption. Qed. Lemma singleton_WF: forall e, WF (singleton e). Proof. intros. eexists. apply singleton_Sem; auto. Qed. (** *** Verification of [insert] *) Lemma link_Desc: forall p1' s1 r1 f1 p2' s2 r2 f2 r f, Desc s1 r1 f1 -> Desc s2 r2 f2 -> p1' = NToInt (rPrefix r1) -> p2' = NToInt (rPrefix r2) -> rangeDisjoint r1 r2 = true-> r = commonRangeDisj r1 r2 -> (forall i, f i = f1 i || f2 i) -> Desc (link p1' s1 p2' s2) r f. Proof. intros; subst. unfold link. rewrite branchMask_spec. rewrite mask_spec. rewrite -> zero_spec by (apply commonRangeDisj_rBits_pos; eapply Desc_rNonneg; eassumption). Int_Word_N. rewrite if_negb. match goal with [ |- context [N.testbit ?i ?b] ] => destruct (N.testbit i b) eqn:Hbit end. * assert (Hbit2 : N.testbit (rPrefix r2) (rBits (commonRangeDisj r1 r2) - 1) = false). { apply not_true_is_false. rewrite <- Hbit. apply not_eq_sym. apply commonRangeDisj_rBits_Different; try (eapply Desc_rNonneg; eassumption); auto. } rewrite rangeDisjoint_sym in H3. rewrite -> commonRangeDisj_sym in * by (eapply Desc_rNonneg; eassumption). apply (DescBin _ _ _ _ _ _ _ _ _ f H0 H); auto. + apply commonRangeDisj_rBits_pos; (eapply Desc_rNonneg; eassumption). + rewrite <- Hbit2. apply isSubrange_halfRange_commonRangeDisj; try (eapply Desc_rNonneg; eassumption); auto. + rewrite <- Hbit at 1. rewrite -> commonRangeDisj_sym by (eapply Desc_rNonneg; eassumption). rewrite rangeDisjoint_sym in H3. apply isSubrange_halfRange_commonRangeDisj; try (eapply Desc_rNonneg; eassumption); auto. + solve_f_eq. * assert (Hbit2 : N.testbit (rPrefix r2) (rBits (commonRangeDisj r1 r2) - 1) = true). { apply not_false_iff_true. rewrite <- Hbit. apply not_eq_sym. apply commonRangeDisj_rBits_Different; try (eapply Desc_rNonneg; eassumption); auto. } apply (DescBin _ _ _ _ _ _ _ _ _ f H H0); auto. + apply commonRangeDisj_rBits_pos; (eapply Desc_rNonneg; eassumption). + rewrite <- Hbit. apply isSubrange_halfRange_commonRangeDisj; try (eapply Desc_rNonneg; eassumption); auto. + rewrite <- Hbit2 at 1. rewrite -> commonRangeDisj_sym by (eapply Desc_rNonneg; eassumption). rewrite rangeDisjoint_sym in H3. apply isSubrange_halfRange_commonRangeDisj; try (eapply Desc_rNonneg; eassumption); auto. Qed. Lemma insertBM_Desc: forall p' bm r1 f1, forall s2 r2 f2, forall r f, Desc (Tip p' bm) r1 f1 -> Desc s2 r2 f2 -> r = commonRange r1 r2 -> (forall i, f i = f1 i || f2 i) -> Desc (insertBM p' bm s2) r f. Proof. intros ????????? HDTip HD ??; subst. assert (p' = NToInt (rPrefix r1)) by (inversion HDTip; auto); subst. assert (rBits r1 = N.log2 WIDTH) by (inversion HDTip; auto). generalize dependent f. induction HD as [p2' bm2 r2 f2|s2 r2 f2 s3 r3 f3 p2' r]; subst; intros f' Hf. * simpl. unfoldMethods. Int_Word_N. apply same_size_compare; try Nomega; intros. + subst. rewrite commonRange_idem. inversion_clear HDTip. apply DescTip; auto. - solve_f_eq. - Int_Word_N. isBitMask. + rewrite rangeDisjoint_sym in *. eapply link_Desc; try apply HDTip; auto. - apply DescTip; auto. - apply disjoint_commonRange; auto. * simpl. unfoldMethods. assert (N.log2 WIDTH <= rBits r2)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r2 <= rBits (halfRange r0 false))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r0 false) < rBits r0)%N by (apply halfRange_smaller; auto). assert (rBits r1 < rBits r0)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. + eapply link_Desc; eauto; try (inversion HDTip; auto). eapply DescBin; eauto. apply disjoint_commonRange; assumption. + rewrite -> (isSubrange_commonRange_r r1 r0) in * by isSubrange_true. eapply DescBin; try apply HD2; try apply IHHD1 with (f := fun j => f1 j || f2 j); auto. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. + rewrite -> (isSubrange_commonRange_r r1 r0) in * by isSubrange_true. eapply DescBin; try apply HD1; try apply IHHD2 with (f := fun j => f1 j || f3 j); auto. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. Qed. Lemma insert_Desc: forall e r1, forall s2 r2 f2, forall r f, Desc s2 r2 f2 -> r1 = (N.shiftr (intToN e) tip_width, tip_width) -> r = commonRange r1 r2 -> (forall i, f i = (i =? intToN e) || f2 i) -> Desc (insert e s2) r f. Proof. intros. eapply insertBM_Desc. eapply DescTip; try nonneg. * symmetry. apply rPrefix_shiftr. * reflexivity. * isBitMask. * eassumption. * congruence. * intros j. rewrite H2. f_equal. symmetry. apply bitmapInRange_bitmapOf. Qed. Lemma insert_Nil_Desc: forall e r f, r = (N.shiftr (intToN e) tip_width, tip_width) -> (forall i, f i = (i =? intToN e)) -> Desc (insert e Nil) r f. Proof. intros; subst. apply DescTip; try nonneg. * symmetry. apply rPrefix_shiftr. * intros j. rewrite H0. symmetry. apply bitmapInRange_bitmapOf. * isBitMask. Qed. Lemma insert_Sem: forall e s2 f2 f, Sem s2 f2 -> (forall i, f i = (i =? intToN e) || f2 i) -> Sem (insert e s2) f. Proof. intros. destruct H. * eapply DescSem. apply insert_Nil_Desc; auto. solve_f_eq. * eapply DescSem. eapply insert_Desc; eauto. Qed. Lemma insert_WF: forall n s, WF s -> WF (insert n s). Proof. intros. destruct H. eexists. eapply insert_Sem; eauto. intro i; reflexivity. Qed. (** *** Verification of the smart constructors [tip] and [bin] *) Lemma tip_Desc0: forall p bm r f, p = NToInt (rPrefix r) -> rBits r = N.log2 WIDTH -> (forall i, f i = bitmapInRange r bm i) -> isBitMask0 (wordToN bm) -> Desc0 (tip p bm) r f. Proof. intros. unfold tip. unfoldMethods. Int_Word_N. simpl (Z.to_N 0). rewrite isBitMask0_zero_or_isBitMask in H2. destruct H2. * assert (bm = NToWord 0) by admit. subst. Int_Word_N. rewrite N.eqb_refl. apply Desc0Nil. intro j. rewrite H1. apply bitmapInRange_0. * replace (wordToN bm =? 0)%N with false by admit. (* by (symmetry; apply N.eqb_neq; intro; subst; inversion H2; inversion H). *) apply Desc_Desc0. apply DescTip; auto. Admitted. Lemma bin_Desc0: forall s1 r1 f1 s2 r2 f2 p msk r f, Desc0 s1 r1 f1 -> Desc0 s2 r2 f2 -> (0 < rBits r)%N -> isSubrange r1 (halfRange r false) = true -> isSubrange r2 (halfRange r true) = true -> p = NToInt (rPrefix r) -> msk = NToInt (rMask r) -> (forall i, f i = f1 i || f2 i) -> Desc0 (bin p msk s1 s2) r f. Proof. intros. destruct H, H0. * apply Desc0Nil. intro j. rewrite H6, H, H0. reflexivity. * replace (bin _ _ _ _) with s by (destruct s; reflexivity). eapply Desc0NotNil; eauto. + isSubrange_true. + solve_f_eq. * replace (bin _ _ _ _) with s by (destruct s; reflexivity). eapply Desc0NotNil; try eassumption. + isSubrange_true. + solve_f_eq. * replace (bin p msk s s0) with (Bin p msk s s0) by (destruct s, s0; try reflexivity; try inversion HD; try inversion HD0). apply Desc_Desc0. eapply DescBin; try eassumption. + isSubrange_true. + isSubrange_true. + solve_f_eq. Qed. (** *** Verification of [delete] *) Lemma deleteBM_Desc: forall p' bm s2 r1 r2 f1 f2 f, Desc (Tip p' bm) r1 f1 -> Desc s2 r2 f2 -> (forall i, f i = negb (f1 i) && f2 i) -> Desc0 (deleteBM p' bm s2) r2 f. Proof. intros ???????? HTip HD Hf. revert dependent f. induction HD; intros f' Hf'; subst. * simpl deleteBM; unfold Prim.seq. inversion_clear HTip; subst. unfoldMethods. Int_Word_N. apply same_size_compare; try Nomega; intros. + subst. apply tip_Desc0; auto. - solve_f_eq. - isBitMask. + apply Desc_Desc0. apply DescTip; auto. solve_f_eq_disjoint. * simpl. unfold Prim.seq. inversion_clear HTip; subst. assert (N.log2 WIDTH <= rBits r2)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r2 <= rBits (halfRange r true))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r true) < rBits r)%N by (apply halfRange_smaller; auto). assert (rBits r1 < rBits r)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. + rewrite rangeDisjoint_sym in *. apply Desc_Desc0. eapply DescBin; try eassumption; try reflexivity. intro. rewrite Hf'. rewrite H4. rewrite H5. destruct (inRange i r) eqn:Hir. - rewrite bitmapInRange_outside by inRange_false. reflexivity. - rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. split_bool; reflexivity. + eapply bin_Desc0. ** apply IHHD1. intro. reflexivity. ** apply Desc_Desc0; eassumption. ** assumption. ** assumption. ** assumption. ** reflexivity. ** reflexivity. ** solve_f_eq_disjoint. + eapply bin_Desc0. ** apply Desc_Desc0; eassumption. ** apply IHHD2. intro. reflexivity. ** assumption. ** assumption. ** assumption. ** reflexivity. ** reflexivity. ** solve_f_eq_disjoint. Qed. Lemma delete_Desc: forall e s r f f', Desc s r f -> (forall i, f' i = negb (i =? intToN e) && f i) -> Desc0 (delete e s) r f'. Proof. intros. unfold delete, Prim.seq. eapply deleteBM_Desc. * eapply DescTip; try nonneg. + symmetry. apply rPrefix_shiftr. + reflexivity. + isBitMask. * eassumption. * setoid_rewrite bitmapInRange_bitmapOf. assumption. Qed. Lemma delete_Sem: forall e s f f', Sem s f -> (forall i, f' i = negb (i =? intToN e) && f i) -> Sem (delete e s) f'. Proof. intros. destruct H. * apply SemNil. solve_f_eq. * eapply Desc0_Sem. eapply delete_Desc; try eassumption. Qed. Lemma delete_WF: forall n s, WF s -> WF (delete n s). Proof. intros. destruct H. eexists. eapply delete_Sem; try eassumption. intro i. reflexivity. Qed. (** *** Verification of [union] *) (** The following is copied from the body of [union] *) Definition union_body s1 s2 := match s1, s2 with | (Bin p1 m1 l1 r1 as t1) , (Bin p2 m2 l2 r2 as t2) => let union2 := if nomatch p1 p2 m2 : bool then link p1 t1 p2 t2 else if zero p1 m2 : bool then Bin p2 m2 (union t1 l2) r2 else Bin p2 m2 l2 (union t1 r2) in let union1 := if nomatch p2 p1 m1 : bool then link p1 t1 p2 t2 else if zero p2 m1 : bool then Bin p1 m1 (union l1 t2) r1 else Bin p1 m1 l1 (union r1 t2) in if shorter m1 m2 : bool then union1 else if shorter m2 m1 : bool then union2 else if p1 == p2 : bool then Bin p1 m1 (union l1 l2) (union r1 r2) else link p1 t1 p2 t2 | (Bin _ _ _ _ as t) , Tip kx bm => insertBM kx bm t | (Bin _ _ _ _ as t) , Nil => t | Tip kx bm , t => insertBM kx bm t | Nil , t => t end. Lemma union_eq s1 s2 : union s1 s2 = union_body s1 s2. Proof. unfold union, union_func. rewrite Wf.WfExtensionality.fix_sub_eq_ext. unfold projT1, projT2. unfold union_body. repeat lazymatch goal with | [ |- _ = match ?x with _ => _ end ] => destruct x | _ => reflexivity end. Qed. Program Fixpoint union_Desc s1 r1 f1 s2 r2 f2 f { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> (forall i, f i = f1 i || f2 i) -> Desc (union s1 s2) (commonRange r1 r2) f := fun HD1 HD2 Hf => _. Next Obligation. rewrite union_eq. unfold union_body. inversion HD1; subst. * eapply insertBM_Desc; try eassumption; try reflexivity. * set (sl := Bin (NToInt (rPrefix r1)) (NToInt (rMask r1)) s0 s3) in *. inversion HD2; subst. + rewrite commonRange_sym by (eapply Desc_rNonneg; eassumption). eapply insertBM_Desc; try eassumption; try reflexivity. solve_f_eq. + set (sr := Bin (NToInt (rPrefix r2)) (NToInt (rMask r2)) s1 s4) in *. rewrite !shorter_spec by assumption. destruct (N.ltb_spec (rBits r2) (rBits r1)); [|destruct (N.ltb_spec (rBits r1) (rBits r2))]. ++ apply nomatch_zero_smaller; try assumption; intros. - rewrite rangeDisjoint_sym in *. rewrite disjoint_commonRange in * by assumption. eapply link_Desc; [ eapply DescBin with (r := r1); try eassumption; try reflexivity | eapply DescBin with (r := r2); try eassumption; try reflexivity |..]; auto. - rewrite -> (isSubrange_commonRange_l r1 r2) in * by isSubrange_true. eapply DescBin; [eapply union_Desc|eassumption|..]; try eassumption; try reflexivity. ** subst sl sr. simpl. omega. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. - rewrite -> (isSubrange_commonRange_l r1 r2) in * by isSubrange_true. eapply DescBin; [eassumption|eapply union_Desc|..]; try eassumption; try reflexivity. ** subst sl sr. simpl. omega. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. ++ apply nomatch_zero_smaller; try assumption; intros. - rewrite disjoint_commonRange in * by assumption. eapply link_Desc; [ eapply DescBin with (r := r1); try eassumption; try reflexivity | eapply DescBin with (r := r2); try eassumption; try reflexivity |..]; auto. - rewrite -> (isSubrange_commonRange_r r1 r2) in * by isSubrange_true. eapply DescBin; [eapply union_Desc|eassumption|..]; try eassumption; try reflexivity. ** subst sl sr. simpl. omega. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. - rewrite -> (isSubrange_commonRange_r r1 r2) in * by isSubrange_true. eapply DescBin; [eassumption|eapply union_Desc|..]; try eassumption; try reflexivity. ** subst sl sr. simpl. omega. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. ++ unfoldMethods. Int_Word_N. apply same_size_compare; try Nomega; intros. - subst. rewrite commonRange_idem in *. eapply DescBin; try assumption; try reflexivity. ** eapply union_Desc. -- subst sl sr. simpl. omega. -- eassumption. -- eassumption. -- intro i. reflexivity. ** eapply union_Desc. -- subst sl sr. simpl. omega. -- eassumption. -- eassumption. -- intro i. reflexivity. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq. - rewrite disjoint_commonRange in * by assumption. eapply link_Desc; [ eapply DescBin with (r := r1); try eassumption; try reflexivity | eapply DescBin with (r := r2); try eassumption; try reflexivity |..]; auto. Qed. Lemma union_Sem: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> Sem (union s1 s2) (fun i => f1 i || f2 i). Proof. intros. destruct H; [|destruct H0]. * eapply Sem_change_f. apply H0. solve_f_eq. * eapply Sem_change_f. eapply DescSem. replace (union s Nil) with s by (destruct s; reflexivity). eapply HD. solve_f_eq. * eapply DescSem. eapply union_Desc; try eassumption. solve_f_eq. Qed. Lemma union_WF: forall s1 s2, WF s1 -> WF s2 -> WF (union s1 s2). Proof. intros. destruct H, H0. eexists. apply union_Sem; eassumption. Qed. Local Ltac union_Sem_reasoning := repeat intros [? ?]; repeat match goal with | SEMs : Sem ?s ?fs, SEMt : Sem ?t ?ft |- context[union ?s ?t] => match goal with | _ : Sem (union s t) _ |- _ => fail 1 | _ => specialize (union_Sem s fs t ft SEMs SEMt) as ? end end; eapply Sem_unique; try eassumption; intros; simpl. Lemma union_assoc (s1 s2 s3 : IntSet) : WF s1 -> WF s2 -> WF s3 -> union s1 (union s2 s3) = union (union s1 s2) s3. Proof. now union_Sem_reasoning; rewrite orb_assoc. Qed. Lemma union_comm (s1 s2 : IntSet) : WF s1 -> WF s2 -> union s1 s2 = union s2 s1. Proof. now union_Sem_reasoning; rewrite orb_comm. Qed. (** *** Verification of [unions] *) Require Import Proofs.Data.Foldable. Lemma Forall_rev: forall A P (l : list A), Forall P (rev l) <-> Forall P l. Proof. intros. rewrite !Forall_forall. setoid_rewrite <- in_rev. reflexivity. Qed. (* Needs more than a trivial rephrasing for word-sizes IntMaps *) (* Lemma unions_Sem (ss : list IntSet) : Forall WF ss -> Sem (unions ss) (fun i => existsb (member i) ss). Proof. unfold unions; rewrite hs_coq_foldl'_list, <-fold_left_rev_right. remember (rev ss) as rss eqn:def_rss. replace ss with (rev rss) by now subst; apply rev_involutive. clear def_rss. rewrite Forall_rev. induction rss as [|s rss IH]; simpl; intros WFrss'. - now constructor. - inversion WFrss' as [|s_ rss_ WFs WFrss]; subst s_ rss_. eapply Sem_change_f; [|intros; rewrite existsb_app; simpl; rewrite orb_false_r; reflexivity]. apply union_Sem. + now apply IH. + destruct WFs as [f SEM]. apply Sem_change_f with f; trivial. now intros; apply member_Sem. Qed. Lemma unions_WF (ss : list IntSet) : Forall WF ss -> WF (unions ss). Proof. now eexists; apply unions_Sem. Qed. *) (** *** Verification of [intersection] *) (** The following is copied from the body of [intersection] *) Definition intersection_body s1 s2 := match s1, s2 with | (Bin p1 m1 l1 r1 as t1) , (Bin p2 m2 l2 r2 as t2) => let intersection2 := if nomatch p1 p2 m2 : bool then Nil else if zero p1 m2 : bool then intersection t1 l2 else intersection t1 r2 in let intersection1 := if nomatch p2 p1 m1 : bool then Nil else if zero p2 m1 : bool then intersection l1 t2 else intersection r1 t2 in if shorter m1 m2 : bool then intersection1 else if shorter m2 m1 : bool then intersection2 else if p1 GHC.Base.== p2 : bool then bin p1 m1 (intersection l1 l2) (intersection r1 r2) else Nil | (Bin _ _ _ _ as t1) , Tip kx2 bm2 => (fix intersectBM arg_11__ := match arg_11__ as arg_11__' return (arg_11__' = arg_11__ -> IntSet) with | Bin p1 m1 l1 r1 => fun _ => if nomatch kx2 p1 m1 : bool then Nil else if zero kx2 m1 : bool then intersectBM l1 else intersectBM r1 | Tip kx1 bm1 => fun _ => if kx1 GHC.Base.== kx2 : bool then tip kx1 (bm1 Data.Bits..&.(**) bm2) else Nil | Nil => fun _ => Nil end eq_refl) t1 | Bin _ _ _ _ , Nil => Nil | Tip kx1 bm1 , t2 => (fix intersectBM arg_18__ := match arg_18__ as arg_18__' return (arg_18__' = arg_18__ -> IntSet) with | Bin p2 m2 l2 r2 => fun _ => if nomatch kx1 p2 m2 : bool then Nil else if zero kx1 m2 : bool then intersectBM l2 else intersectBM r2 | Tip kx2 bm2 => fun _ => if kx1 GHC.Base.== kx2 : bool then tip kx1 (bm1 Data.Bits..&.(**) bm2) else Nil | Nil => fun _ => Nil end eq_refl) t2 | Nil , _ => Nil end. Lemma intersection_eq s1 s2 : intersection s1 s2 = intersection_body s1 s2. Proof. unfold intersection, intersection_func. rewrite Wf.WfExtensionality.fix_sub_eq_ext. unfold projT1, projT2. unfold intersection_body. repeat match goal with | _ => progress replace (Sumbool.sumbool_of_bool false) with (@right (false = true) (false = false) (@eq_refl bool false)) by reflexivity | _ => progress replace (Sumbool.sumbool_of_bool true) with (@left (true = true) (true = false) (@eq_refl bool true)) by reflexivity | [ |- _ = match ?x with _ => _ end ] => destruct x | _ => assumption || reflexivity | [ |- _ ?x = _ ?x ] => induction x end. Qed. Program Fixpoint intersection_Desc s1 r1 f1 s2 r2 f2 f { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> (forall i, f i = f1 i && f2 i) -> Desc0 (intersection s1 s2) r1 f := fun HD1 HD2 Hf => _. Next Obligation. rewrite intersection_eq. unfold intersection_body. unfoldMethods. inversion HD1. * (* s1 is a Tip *) subst. clear intersection_Desc. generalize dependent f. induction HD2; intros f' Hf'; subst. + Int_Word_N. apply same_size_compare; try Nomega; intros. -- subst. apply tip_Desc0; auto. ** solve_f_eq. ** isBitMask. -- apply Desc0Nil. solve_f_eq_disjoint. + assert (N.log2 WIDTH <= rBits r0)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r0 <= rBits (halfRange r false))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r false) < rBits r)%N by (apply halfRange_smaller; auto). assert (rBits r1 < rBits r)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. - apply Desc0Nil. solve_f_eq_disjoint. - eapply Desc0_subRange; [apply IHHD2_1|]. clear IHHD2_1 IHHD2_2. ** solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. - eapply Desc0_subRange. ** apply IHHD2_2. solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. * (* s1 is a Bin *) inversion HD2. + (* s2 is a Tip *) (* Need to undo the split of s1 *) change (Desc0 ((fix intersectBM (arg_11__ : IntSet) : IntSet := match arg_11__ as arg_11__' return (arg_11__' = arg_11__ -> IntSet) with | Bin p1 m1 l1 r5 => fun _ => if nomatch p0 p1 m1 then Nil else if zero p0 m1 then intersectBM l1 else intersectBM r5 | Tip kx1 bm1 => fun _ => if _GHC.Base.==_ kx1 p0 then tip kx1 (NToWord (N.land (wordToN bm1) (wordToN bm))) else Nil | Nil => fun _ => Nil end eq_refl) (Bin p msk s0 s3)) r1 f). rewrite H7. clear dependent s0. clear dependent s3. clear dependent r0. clear dependent r3. clear dependent f0. clear dependent f3. clear H1. subst. (* Now we are essentially in the same situation as above. *) (* Unfortunately, the two implementations of [intersectionBM] are slightly different in irrelevant details that make ist just hard enough to abstract over them in a lemma of its own. So let’s just copy’n’paste. *) clear intersection_Desc. generalize dependent f. induction HD1; intros f' Hf'; subst. ++ unfoldMethods. Int_Word_N. apply same_size_compare; try Nomega; intros. subst. apply tip_Desc0; auto. ** solve_f_eq_disjoint. ** isBitMask. ** apply Desc0Nil. solve_f_eq_disjoint. ++ assert (N.log2 WIDTH <= rBits r1)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r1 <= rBits (halfRange r false))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r false) < rBits r)%N by (apply halfRange_smaller; auto). assert (rBits r2 < rBits r)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. - apply Desc0Nil. solve_f_eq_disjoint. - eapply Desc0_subRange. ** apply IHHD1_1. solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. - eapply Desc0_subRange. ** apply IHHD1_2. solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. + subst. set (sl := Bin (NToInt (rPrefix r1)) (NToInt (rMask r1)) s0 s3) in *. set (sr := Bin (NToInt (rPrefix r2)) (NToInt (rMask r2)) s4 s5) in *. rewrite !shorter_spec by assumption. destruct (N.ltb_spec (rBits r2) (rBits r1)). ++ (* s2 is smaller than s1 *) apply nomatch_zero_smaller; try assumption; intros. - (* s2 is disjoint of s1 *) apply Desc0Nil. solve_f_eq_disjoint. - (* s2 is part of the left half of s1 *) eapply Desc0_subRange. eapply intersection_Desc; clear intersection_Desc; try eassumption. ** subst sl sr. simpl. omega. ** solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. - (* s2 is part of the right half of s1 *) eapply Desc0_subRange. eapply intersection_Desc; clear intersection_Desc; try eassumption. ** subst sl sr. simpl. omega. ** solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ++ (* s2 is not smaller than s1 *) destruct (N.ltb_spec (rBits r1) (rBits r2)). -- (* s2 is smaller than s1 *) apply nomatch_zero_smaller; try assumption; intros. - (* s1 is disjoint of s2 *) apply Desc0Nil. solve_f_eq_disjoint. - (* s1 is part of the left half of s2 *) eapply Desc0_subRange. eapply intersection_Desc; clear intersection_Desc; try eassumption. ** subst sl sr. simpl. omega. ** solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. - (* s1 is part of the right half of s2 *) eapply Desc0_subRange. eapply intersection_Desc; clear intersection_Desc; try eassumption. ** subst sl sr. simpl. omega. ** solve_f_eq_disjoint. ** isSubrange_true; eapply Desc_rNonneg; eassumption. -- (* s1 and s2 are the same size *) Int_Word_N. apply same_size_compare; try Nomega; intros. - subst. eapply bin_Desc0; try assumption; try reflexivity. ** eapply intersection_Desc. --- subst sl sr. simpl. omega. --- eassumption. --- eassumption. --- intro i. reflexivity. ** eapply intersection_Desc. --- subst sl sr. simpl. omega. --- eassumption. --- eassumption. --- intro i. reflexivity. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** isSubrange_true; eapply Desc_rNonneg; eassumption. ** solve_f_eq_disjoint. - apply Desc0Nil. solve_f_eq_disjoint. Qed. Lemma intersection_Sem: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> Sem (intersection s1 s2) (fun i => f1 i && f2 i). Proof. intros. destruct H; [|destruct H0]. * apply SemNil. solve_f_eq. * replace (intersection s Nil) with Nil by (destruct s; reflexivity). apply SemNil. solve_f_eq. * eapply Desc0_Sem. eapply intersection_Desc; try eauto. Qed. Lemma intersection_WF: forall s1 s2, WF s1 -> WF s2 -> WF (intersection s1 s2). Proof. intros. destruct H, H0. eexists. apply intersection_Sem; eassumption. Qed. (** *** Verification of [difference] *) (** The following is copied from the body of [difference] *) Definition difference_body s1 s2 := match s1, s2 with | (Bin p1 m1 l1 r1 as t1) , (Bin p2 m2 l2 r2 as t2) => let difference2 := if nomatch p1 p2 m2 : bool then t1 else if zero p1 m2 : bool then difference t1 l2 else difference t1 r2 in let difference1 := if nomatch p2 p1 m1 : bool then t1 else if zero p2 m1 : bool then bin p1 m1 (difference l1 t2) r1 else bin p1 m1 l1 (difference r1 t2) in if shorter m1 m2 : bool then difference1 else if shorter m2 m1 : bool then difference2 else if p1 GHC.Base.== p2 : bool then bin p1 m1 (difference l1 l2) (difference r1 r2) else t1 | (Bin _ _ _ _ as t) , Tip kx bm => deleteBM kx bm t | (Bin _ _ _ _ as t) , Nil => t | (Tip kx bm as t1) , t2 => (fix differenceTip arg_12__ := match arg_12__ as arg_12__' return (arg_12__' = arg_12__ -> IntSet) with | Bin p2 m2 l2 r2 => fun _ => if nomatch kx p2 m2 : bool then t1 else if zero kx m2 : bool then differenceTip l2 else differenceTip r2 | Tip kx2 bm2 => fun _ => if kx GHC.Base.== kx2 : bool then tip kx (bm Data.Bits..&.(**) (Data.Bits.complement bm2)) else t1 | Nil => fun _ => t1 end eq_refl) t2 | Nil , _ => Nil end. Lemma difference_eq s1 s2 : difference s1 s2 = difference_body s1 s2. Proof. unfold difference, difference_func. rewrite Wf.WfExtensionality.fix_sub_eq_ext. unfold projT1, projT2. unfold difference_body. repeat match goal with | _ => progress replace (Sumbool.sumbool_of_bool false) with (@right (false = true) (false = false) (@eq_refl bool false)) by reflexivity | _ => progress replace (Sumbool.sumbool_of_bool true) with (@left (true = true) (true = false) (@eq_refl bool true)) by reflexivity | [ |- _ = match ?x with _ => _ end ] => destruct x | _ => assumption || reflexivity | [ |- _ ?x = _ ?x ] => induction x end. Qed. Program Fixpoint difference_Desc s1 r1 f1 s2 r2 f2 f { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> (forall i, f i = f1 i && negb (f2 i)) -> Desc0 (difference s1 s2) r1 f := fun HD1 HD2 Hf => _. Next Obligation. rewrite difference_eq. unfold difference_body. unfoldMethods. inversion HD1. * (* s1 is a Tip *) subst. clear difference_Desc. generalize dependent f. induction HD2; intros f' Hf'; subst. + Int_Word_N. apply same_size_compare; try Nomega; intros. -- subst. apply tip_Desc0; auto. ** solve_f_eq. ** isBitMask. -- eapply Desc0NotNil; try eassumption. ** apply isSubrange_refl. ** solve_f_eq_disjoint. + assert (N.log2 WIDTH <= rBits r0)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r0 <= rBits (halfRange r false))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r false) < rBits r)%N by (apply halfRange_smaller; auto). assert (rBits r1 < rBits r)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. - eapply Desc0NotNil; try eassumption. ** apply isSubrange_refl. ** solve_f_eq_disjoint. - eapply Desc0_subRange; [apply IHHD2_1|apply isSubrange_refl]. clear IHHD2_1 IHHD2_2. solve_f_eq_disjoint. - eapply Desc0_subRange; [apply IHHD2_2|apply isSubrange_refl]. clear IHHD2_1 IHHD2_2. solve_f_eq_disjoint. * (* s1 is a Bin *) inversion HD2. + (* s2 is a Tip *) subst. eapply deleteBM_Desc; try eassumption. solve_f_eq. + subst. set (sl := Bin (NToInt (rPrefix r1)) (NToInt (rMask r1)) s0 s3) in *. set (sr := Bin (NToInt (rPrefix r2)) (NToInt (rMask r2)) s4 s5) in *. rewrite !shorter_spec by assumption. destruct (N.ltb_spec (rBits r2) (rBits r1)). ** (* s2 is smaller than s1 *) apply nomatch_zero_smaller; try assumption; intros. - (* s2 is disjoint of s1 *) eapply Desc_Desc0; eapply DescBin; try eassumption; try reflexivity. solve_f_eq_disjoint. - (* s2 is part of the left half of s1 *) eapply bin_Desc0. ++ eapply difference_Desc; clear difference_Desc; try eassumption. subst sl sr. simpl. omega. intro i; reflexivity. ++ apply Desc_Desc0; eassumption. ++ eassumption. ++ eassumption. ++ eassumption. ++ reflexivity. ++ reflexivity. ++ solve_f_eq_disjoint. - (* s2 is part of the right half of s1 *) eapply bin_Desc0. ++ apply Desc_Desc0; eassumption. ++ eapply difference_Desc; clear difference_Desc; try eassumption. subst sl sr. simpl. omega. intro i; reflexivity. ++ eassumption. ++ eassumption. ++ eassumption. ++ reflexivity. ++ reflexivity. ++ solve_f_eq_disjoint. ** (* s2 is not smaller than s1 *) destruct (N.ltb_spec (rBits r1) (rBits r2)). -- (* s2 is smaller than s1 *) apply nomatch_zero_smaller; try assumption; intros. - (* s1 is disjoint of s2 *) eapply Desc_Desc0; eapply DescBin; try eassumption; try reflexivity. solve_f_eq_disjoint. - (* s1 is part of the left half of s2 *) eapply Desc0_subRange. eapply difference_Desc; clear difference_Desc; try eassumption. *** subst sl sr. simpl. omega. *** solve_f_eq_disjoint. *** apply isSubrange_refl. - (* s1 is part of the right half of s2 *) eapply Desc0_subRange. eapply difference_Desc; clear difference_Desc; try eassumption. *** subst sl sr. simpl. omega. *** solve_f_eq_disjoint. *** apply isSubrange_refl. -- (* s1 and s2 are the same size *) assert (rBits r1 = rBits r2) by Nomega. Int_Word_N. apply same_size_compare; try Nomega; intros. - subst. eapply bin_Desc0; try assumption; try reflexivity. ++ eapply difference_Desc. --- subst sl sr. simpl. omega. --- eassumption. --- eassumption. --- intro i. reflexivity. ++ eapply difference_Desc. --- subst sl sr. simpl. omega. --- eassumption. --- eassumption. --- intro i. reflexivity. ++ assumption. ++ assumption. ++ solve_f_eq_disjoint. - eapply Desc_Desc0; eapply DescBin; try eassumption; try reflexivity. solve_f_eq_disjoint. Qed. Lemma difference_Sem: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> Sem (difference s1 s2) (fun i => f1 i && negb (f2 i)). Proof. intros. destruct H; [|destruct H0]. * apply SemNil. solve_f_eq. * replace (difference s Nil) with s by (destruct s; reflexivity). eapply DescSem. eapply Desc_change_f. eassumption. solve_f_eq. * eapply Desc0_Sem. eapply difference_Desc; try eauto. Qed. Lemma difference_WF: forall s1 s2, WF s1 -> WF s2 -> WF (difference s1 s2). Proof. intros. destruct H, H0. eexists. apply difference_Sem; eassumption. Qed. (** *** Verification of [disjoint] *) (** The following is copied from the body of [disjoint] *) Definition disjoint_body s1 s2 := match s1, s2 with | (Bin p1 m1 l1 r1 as t1) , (Bin p2 m2 l2 r2 as t2) => let disjoint2 := if nomatch p1 p2 m2 then true else if zero p1 m2 then disjoint t1 l2 else disjoint t1 r2 in let disjoint1 := if nomatch p2 p1 m1 then true else if zero p2 m1 then disjoint l1 t2 else disjoint r1 t2 in if shorter m1 m2 then disjoint1 else if shorter m2 m1 then disjoint2 else if p1 GHC.Base.== p2 then andb (disjoint l1 l2) (disjoint r1 r2) else true | (Bin _ _ _ _ as t1) , Tip kx2 bm2 => let fix disjointBM arg_11__ := match arg_11__ with | Bin p1 m1 l1 r1 => if nomatch kx2 p1 m1 then true else if zero kx2 m1 then disjointBM l1 else disjointBM r1 | Tip kx1 bm1 => if kx1 GHC.Base.== kx2 then (bm1 Data.Bits..&.(**) bm2) GHC.Base.== GHC.Num.fromInteger 0 else true | Nil => true end in disjointBM t1 | Bin _ _ _ _ , Nil => true | Tip kx1 bm1 , t2 => let fix disjointBM arg_18__ := match arg_18__ with | Bin p2 m2 l2 r2 => if nomatch kx1 p2 m2 then true else if zero kx1 m2 then disjointBM l2 else disjointBM r2 | Tip kx2 bm2 => if kx1 GHC.Base.== kx2 then (bm1 Data.Bits..&.(**) bm2) GHC.Base.== GHC.Num.fromInteger 0 else true | Nil => true end in disjointBM t2 | Nil , _ => true end. Lemma disjoint_eq s1 s2 : disjoint s1 s2 = disjoint_body s1 s2. Proof. unfold disjoint, disjoint_func. rewrite Wf.WfExtensionality.fix_sub_eq_ext. unfold projT1, projT2. unfold disjoint_body. repeat match goal with | _ => progress replace (Sumbool.sumbool_of_bool false) with (@right (false = true) (false = false) (@eq_refl bool false)) by reflexivity | _ => progress replace (Sumbool.sumbool_of_bool true) with (@left (true = true) (true = false) (@eq_refl bool true)) by reflexivity | [ |- _ = match ?x with _ => _ end ] => destruct x | _ => assumption || reflexivity | [ |- _ ?x = _ ?x ] => induction x end. Qed. Program Fixpoint disjoint_Desc s1 r1 f1 s2 r2 f2 { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> disjoint s1 s2 = true <-> (forall i, f1 i && f2 i = false) := _. Next Obligation. Ltac solve_eq_disjoint_specialize := intro i; repeat match goal with H : (forall i, _) |- _ => specialize (H i) end; repeat split_bool; repeat point_to_inRange; repeat saturate_inRange; try inRange_disjoint; simpl in *; rewrite ?orb_true_r, ?andb_true_l in *; try congruence. rename H into HD1, H0 into HD0. rewrite disjoint_eq. unfold disjoint_body. unfoldMethods. inversion HD1. * (* s1 is a Tip *) subst. clear disjoint_Desc. induction HD0; intros; subst. + Int_Word_N. apply same_size_compare; try Nomega; intros. -- simpl. subst r1. rewrite N.eqb_eq. rewrite <- N.bits_inj_iff. unfold N.eqf. setoid_rewrite N.land_spec. setoid_rewrite N.bits_0. setoid_rewrite H4. setoid_rewrite H1. split; intro. ** intro i. unfold bitmapInRange. destruct (inRange i r). ++ apply H. ++ reflexivity. ** intro n. destruct (N.ltb_spec n (2 ^ rBits r))%N. ++ specialize (H (intoRange r n)). rewrite !bitmapInRange_intoRange in H by assumption. assumption. ++ replace (rBits r) in H6. change (WIDTH <= n)%N in H6. rewrite isBitMask0_outside by isBitMask. reflexivity. -- split; intro; try reflexivity. solve_f_eq_disjoint. + assert (N.log2 WIDTH <= rBits r0)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r0 <= rBits (halfRange r false))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r false) < rBits r)%N by (apply halfRange_smaller; auto). assert (rBits r1 < rBits r)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. - clear IHHD0_1 IHHD0_2. split; intro; try reflexivity. solve_f_eq_disjoint. - rewrite IHHD0_1; clear IHHD0_1 IHHD0_2. setoid_rewrite H7. setoid_rewrite H1. split; intro; solve_eq_disjoint_specialize. - rewrite IHHD0_2; clear IHHD0_1 IHHD0_2. setoid_rewrite H7. setoid_rewrite H1. split; intro; solve_eq_disjoint_specialize. * (* s1 is a Bin *) inversion HD0. + (* s2 is a Tip *) (* Need to undo the split of s1 *) change ((fix disjointBM (arg_11__ : IntSet) : bool := match arg_11__ with | Bin p1 m1 l1 r5 => if nomatch p0 p1 m1 then true else if zero p0 m1 then disjointBM l1 else disjointBM r5 | Tip kx1 bm1 => if intToN kx1 =? intToN p0 then wordToN (NToWord (N.land (wordToN bm1) (wordToN bm))) =? wordToN (NToWord (Z.to_N 0)) else true | Nil => true end) (Bin p msk s0 s3) = true <-> (forall i : N, f1 i && f2 i = false)). rewrite H7. clear dependent s0. clear dependent s3. clear dependent r0. clear dependent r3. clear dependent f0. clear dependent f3. clear dependent p. clear dependent msk. clear H1. subst. (* Now we are essentially in the same situation as above. *) (* Unfortunately, the two implementations of [intersectionBM] are slightly different in irrelevant details that make ist just hard enough to abstract over them in a lemma of its own. So let’s just copy’n’paste. *) clear disjoint_Desc. induction HD1; intros; subst. ++ Int_Word_N. apply same_size_compare; try Nomega; intros. -- simpl. subst r2. rewrite N.eqb_eq. rewrite <- N.bits_inj_iff. unfold N.eqf. setoid_rewrite N.land_spec. setoid_rewrite N.bits_0. setoid_rewrite H12. setoid_rewrite H1. split; intro. ** intro i. unfold bitmapInRange. destruct (inRange i r). +++ apply H. +++ reflexivity. ** intro n. destruct (N.ltb_spec n (2 ^ rBits r))%N. +++ specialize (H (intoRange r n)). rewrite !bitmapInRange_intoRange in H by assumption. assumption. +++ replace (rBits r) in H3. change (WIDTH <= n)%N in H3. rewrite isBitMask0_outside by isBitMask. reflexivity. -- split; intro; try reflexivity. solve_f_eq_disjoint. ++ assert (N.log2 WIDTH <= rBits r1)%N by (eapply Desc_larger_WIDTH; eauto). assert (rBits r1 <= rBits (halfRange r false))%N by (apply subRange_smaller; auto). assert (rBits (halfRange r false) < rBits r)%N by (apply halfRange_smaller; auto). assert (rBits r2 < rBits r)%N by Nomega. apply nomatch_zero_smaller; try assumption; intros. - clear IHHD1_1 IHHD1_2. split; intro; try reflexivity. solve_f_eq_disjoint. - rewrite IHHD1_1; clear IHHD1_1 IHHD1_2. setoid_rewrite H12. setoid_rewrite H4. split; intro; solve_eq_disjoint_specialize. - rewrite IHHD1_2; clear IHHD1_1 IHHD1_2. setoid_rewrite H12. setoid_rewrite H4. split; intro; solve_eq_disjoint_specialize. + subst. set (sl := Bin (NToInt (rPrefix r1)) (NToInt (rMask r1)) s0 s3) in *. set (sr := Bin (NToInt (rPrefix r2)) (NToInt (rMask r2)) s4 s5) in *. rewrite !shorter_spec by assumption. destruct (N.ltb_spec (rBits r2) (rBits r1)). ++ (* s2 is smaller than s1 *) apply nomatch_zero_smaller; try assumption; intros. - (* s2 is disjoint of s1 *) split; intro; try reflexivity. solve_f_eq_disjoint. - (* s2 is part of the left half of s1 *) rewrite disjoint_Desc; try eassumption. ** setoid_rewrite H6. split; intro; solve_eq_disjoint_specialize. ** subst sl sr. simpl. omega. - (* s2 is part of the right half of s1 *) rewrite disjoint_Desc; try eassumption. ** setoid_rewrite H6. split; intro; solve_eq_disjoint_specialize. ** subst sl sr. simpl. omega. ++ (* s2 is not smaller than s1 *) destruct (N.ltb_spec (rBits r1) (rBits r2)). -- (* s2 is smaller than s1 *) apply nomatch_zero_smaller; try assumption; intros. - (* s1 is disjoint of s2 *) split; intro; try reflexivity. solve_f_eq_disjoint. - (* s1 is part of the left half of s2 *) rewrite disjoint_Desc; try eassumption. ** setoid_rewrite H17. split; intro; solve_eq_disjoint_specialize. ** subst sl sr. simpl. omega. - (* s1 is part of the right half of s2 *) rewrite disjoint_Desc; try eassumption. ** setoid_rewrite H17. split; intro; solve_eq_disjoint_specialize. ** subst sl sr. simpl. omega. -- (* s1 and s2 are the same size *) Int_Word_N. apply same_size_compare; try Nomega; intros. - subst. rewrite andb_true_iff. rewrite !disjoint_Desc; try eassumption. ** setoid_rewrite H17. setoid_rewrite H6. intuition solve_eq_disjoint_specialize. ** subst sl sr. simpl. omega. ** subst sl sr. simpl. omega. - split; intro; try reflexivity. solve_f_eq_disjoint. Qed. Lemma disjoint_Sem s1 f1 s2 f2 : Sem s1 f1 -> Sem s2 f2 -> disjoint s1 s2 = true <-> (forall i, f1 i && f2 i = false). Proof. intros HSem1 HSem2. destruct HSem1 as [f1 def_f1 | s1 r1 f1 HDesc1]. * rewrite disjoint_eq; simpl. split; intro; try reflexivity. setoid_rewrite andb_false_iff; intuition. * destruct HSem2 as [f2 def_f2 | s2 r2 f2 HDesc2]. + replace (disjoint s1 Nil) with true by (destruct s1; reflexivity). split; intro; try reflexivity. setoid_rewrite andb_false_iff; intuition. + eapply disjoint_Desc; eassumption. Qed. (** ** Verification of [split] *) (* Punting on [split] for now while introducing fixed-width numbers. [splitGo] lemmas need a precondition that the tree contains only negative or only positive numbers. *) (* Definition splitGo : Key -> IntSet -> IntSet * IntSet. Proof. let rhs := eval unfold split in split in match rhs with fun x s => match _ with Nil => match ?go _ _ with _ => _ end | _ => _ end => exact go end. Defined. Lemma splitGo_Sem : forall x s r f, Desc s r f -> forall (P : IntSet * IntSet -> Prop), (forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> (forall i, f1 i = f i && (i <? x)) -> (forall i, f2 i = f i && (x <? i)) -> P (s1, s2)) -> P (splitGo x s) : Prop. Proof. intros ???? HD. induction HD; intros X HX. * cbn -[bitmapOf prefixOf N.ones]. fold WIDTH. destruct (N.ltb_spec x p); only 2: destruct (N.ltb_spec p (prefixOf x)). - (* s is Tip, x is below *) eapply HX. + constructor; intro; reflexivity. + eapply DescSem. constructor; try eassumption. + solve_f_eq. apply bitmapInRange_inside in Heqb. apply inRange_bounded in Heqb. rewrite N.ltb_lt in Heqb0. lia. + solve_f_eq. apply bitmapInRange_inside in Heqb. apply inRange_bounded in Heqb. rewrite N.ltb_ge in Heqb0. lia. - (* s is Tip, x is above *) eapply HX. + eapply DescSem. constructor; try eassumption. + constructor; intro; reflexivity. + solve_f_eq. apply bitmapInRange_inside in Heqb. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in Heqb. rewrite N.ltb_ge in Heqb0. subst. apply prefixOf_mono in Heqb0. lia. + solve_f_eq. apply bitmapInRange_inside in Heqb. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in Heqb. rewrite N.ltb_lt in Heqb0. subst. assert (x <= i) by lia. apply prefixOf_mono in H. lia. - (* s is Tip, x is part of it *) assert ((bitmapOf x - 1)%N = (N.ones (suffixOf x)%N)) as Htmp. { rewrite N.ones_equiv. rewrite N.pred_sub. unfold bitmapOf. rewrite bitmapOfSuffix_pow. reflexivity. } assert (prefixOf x = rPrefix r). { subst. unfold Prefix, Nat in *. apply prefixOf_mono in H3. rewrite prefixOf_rPrefix in H3 by assumption. lia. } rewrite Htmp; clear Htmp. eapply HX. + eapply Desc0_Sem. eapply tip_Desc0; try eassumption; try reflexivity. apply isBitMask0_land; try isBitMask. apply isBitMask0_ones. pose proof (suffixOf_lt_WIDTH x). lia. + eapply Desc0_Sem. eapply tip_Desc0; try eassumption; try reflexivity. apply isBitMask0_land; try isBitMask. apply isBitMask0_ldiff. apply isBitMask0_ones. Nomega. + intro i. rewrite H1. rewrite bitmapInRange_land. destruct (bitmapInRange r bm i) eqn:?; try reflexivity; simpl. apply bitmapInRange_inside in Heqb. rewrite bitmapInRange_ones by assumption. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in *. unfold Prefix, Nat in *. pose proof (prefixOf_suffixOf i). pose proof (prefixOf_suffixOf x). apply eq_iff_eq_true. rewrite !N.ltb_lt. lia. + intro i. rewrite H1. rewrite bitmapInRange_land. destruct (bitmapInRange r bm i) eqn:?; try reflexivity; rewrite !andb_true_l. apply bitmapInRange_inside in Heqb. rewrite bitmapInRange_ldiff. rewrite bitmapInRange_ones by assumption. rewrite suffixOf_plus_bitmapOf. rewrite bitmapInRange_ones by assumption. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in *. unfold Prefix, Nat in *. pose proof (prefixOf_suffixOf i). pose proof (prefixOf_suffixOf x). apply eq_iff_eq_true. rewrite andb_true_iff, negb_true_iff. rewrite !N.ltb_lt, N.ltb_ge. assert (suffixOf i < WIDTH) by apply suffixOf_lt_WIDTH. lia. * simpl. unfoldMethods. subst. rewrite match_nomatch. rewrite if_negb. apply nomatch_zero; try assumption; intros. + (* s is bin, x is outside *) apply inRange_false_bounded in H2. clear IHHD1 IHHD2. destruct (N.ltb_spec x (rPrefix r)). - (* s is bin, x is below *) eapply HX. ** constructor; intro; reflexivity. ** eapply DescSem. econstructor; try eassumption; reflexivity. ** intros i. simpl. rewrite H4. destruct (N.ltb_spec i x). ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ++ rewrite andb_false_r. reflexivity. ** intros i. destruct (N.ltb_spec x i). ++ rewrite andb_true_r. reflexivity. ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite H4. rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. - (* s is bin, x is above *) eapply HX. ** eapply DescSem. econstructor; try eassumption; reflexivity. ** constructor; intro; reflexivity. ** intros i. simpl. rewrite H4. destruct (N.ltb_spec i x). ++ rewrite andb_true_r. reflexivity. ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ** intros i. simpl. rewrite H4. destruct (N.ltb_spec x i). ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ++ rewrite andb_false_r. reflexivity. + eapply IHHD1. clear IHHD1 IHHD2. intros sl fl sr fr Hsl Hsr Hfl Hfr. eapply HX; clear HX. - eassumption. - apply union_Sem; [ eassumption | eapply DescSem; eassumption]. - intro i. rewrite H4, Hfl; clear H4 Hfl Hfr. destruct (f2 i) eqn:?, (N.ltb_spec i x); rewrite ?andb_true_r, ?andb_false_r, ?orb_true_r, ?orb_false_r; try reflexivity; simpl. apply (Desc_inside HD2) in Heqb. assert (inRange i (halfRange r true) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H2. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. - intro i. rewrite H4, Hfr; clear H4 Hfl Hfr. destruct (f2 i) eqn:?, (N.ltb_spec x i); rewrite ?andb_true_r, ?andb_false_r; try reflexivity; simpl. apply (Desc_inside HD2) in Heqb. assert (inRange i (halfRange r true) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H2. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. + eapply IHHD2. clear IHHD1 IHHD2. intros sl fl sr fr Hsl Hsr Hfl Hfr. eapply HX; clear HX. - apply union_Sem; [ eassumption | eapply DescSem; eassumption]. - eassumption. - intro i. rewrite H4, Hfl; clear H4 Hfl Hfr. destruct (f1 i) eqn:?, (N.ltb_spec i x); rewrite ?andb_true_r, ?andb_false_r, ?orb_true_r, ?orb_false_r; try reflexivity; simpl. apply (Desc_inside HD1) in Heqb. assert (inRange i (halfRange r false) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H3. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. - intro i. rewrite H4, Hfr; clear H4 Hfl Hfr. destruct (f1 i) eqn:?, (N.ltb_spec x i); rewrite ?andb_true_r, ?andb_false_r, ?orb_true_r, ?orb_false_r; try reflexivity; simpl. apply (Desc_inside HD1) in Heqb. assert (inRange i (halfRange r false) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H3. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. Qed. Lemma split_Sem : forall x s f, Sem s f -> forall (P : IntSet * IntSet -> Prop), (forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> (forall i, f1 i = f i && (i <? x)) -> (forall i, f2 i = f i && (x <? i)) -> P (s1, s2)) -> P (split x s) : Prop. Proof. intros ??? HSem X HX. unfold split. fold splitGo. destruct HSem. * simpl. eapply HX; try constructor; try reflexivity; solve_f_eq. * destruct HD eqn:?; unfoldMethods. + eapply splitGo_Sem; only 1: eassumption; intros sl fl sr fr Hsl Hsr Hfl Hfr. eapply HX; eassumption. + simpl Z.to_N. destruct (N.ltb_spec msk 0). - (* This branch is invalid since we only allow positive members. Otherwise, we would have to do something here. *) exfalso. lia. - eapply splitGo_Sem; only 1: eassumption; intros sl fl sr fr Hsl Hsr Hfl Hfr. eapply HX; eassumption. Qed. Theorem split_WF (x : N) (s : IntSet) : WF s -> let '(l,r) := split x s in WF l /\ WF r. Proof. intros [fs Sem_s]. apply split_Sem with fs; [assumption|]; simpl; intros l fl r fr Sem_l Sem_r def_fl def_fr. split; [exists fl | exists fr]; assumption. Qed. Theorem split_WF' (x : N) (s : IntSet) : WF s -> WF (fst (split x s)) /\ WF (snd (split x s)). Proof. generalize (split_WF x s); destruct (split x s); auto. Qed. Corollary split_1_WF (x : N) (s : IntSet) : WF s -> WF (fst (split x s)). Proof. apply split_WF'. Qed. Corollary split_2_WF (x : N) (s : IntSet) : WF s -> WF (snd (split x s)). Proof. apply split_WF'. Qed. (** ** Verification of [splitMember] *) Definition splitMemberGo : Key -> IntSet -> IntSet * bool * IntSet. Proof. let rhs := eval unfold splitMember in splitMember in match rhs with fun x t => match _ with | Nil => ?go x t | _ => _ end => exact go end. Defined. Lemma splitMemberGo_Sem : forall x s r f, Desc s r f -> forall (P : IntSet * bool * IntSet -> Prop), (forall s1 f1 s2 f2 b, Sem s1 f1 -> Sem s2 f2 -> f x = b -> (forall i, f1 i = f i && (i <? x)) -> (forall i, f2 i = f i && (x <? i)) -> P (s1, b, s2)) -> P (splitMemberGo x s) : Prop. Proof. intros ???? HD. induction HD; intros X HX. * cbn -[bitmapOf prefixOf N.ones]. fold WIDTH. destruct (N.ltb_spec x p); only 2: destruct (N.ltb_spec p (prefixOf x)). - (* s is Tip, x is below *) eapply HX. + constructor; intro; reflexivity. + eapply DescSem. constructor; try eassumption. + rewrite H1. apply bitmapInRange_outside. rewrite inRange_false_bounded_iff. lia. + solve_f_eq. apply bitmapInRange_inside in Heqb. apply inRange_bounded in Heqb. rewrite N.ltb_lt in Heqb0. lia. + solve_f_eq. apply bitmapInRange_inside in Heqb. apply inRange_bounded in Heqb. rewrite N.ltb_ge in Heqb0. lia. - (* s is Tip, x is above *) eapply HX. + eapply DescSem. constructor; try eassumption. + constructor; intro; reflexivity. + rewrite H1. apply bitmapInRange_outside. rewrite <- prefixOf_eqb_spec by assumption. rewrite N.eqb_neq. lia. + solve_f_eq. apply bitmapInRange_inside in Heqb. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in Heqb. rewrite N.ltb_ge in Heqb0. subst. apply prefixOf_mono in Heqb0. lia. + solve_f_eq. apply bitmapInRange_inside in Heqb. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in Heqb. rewrite N.ltb_lt in Heqb0. subst. assert (x <= i) by lia. apply prefixOf_mono in H. lia. - (* s is Tip, x is part of it *) assert ((bitmapOf x - 1)%N = (N.ones (suffixOf x)%N)) as Htmp. { rewrite N.ones_equiv. rewrite N.pred_sub. unfold bitmapOf. rewrite bitmapOfSuffix_pow. reflexivity. } rewrite Htmp. assert (prefixOf x = rPrefix r). { subst. unfold Prefix, Nat in *. apply prefixOf_mono in H3. rewrite prefixOf_rPrefix in H3 by assumption. lia. } assert (inRange x r = true). { rewrite <- prefixOf_eqb_spec by assumption. rewrite N.eqb_eq. assumption. } eapply HX. + eapply Desc0_Sem. eapply tip_Desc0; try eassumption; try reflexivity. apply isBitMask0_land; try isBitMask. apply isBitMask0_ones. pose proof (suffixOf_lt_WIDTH x). lia. + eapply Desc0_Sem. eapply tip_Desc0; try eassumption; try reflexivity. apply isBitMask0_land; try isBitMask. apply isBitMask0_ldiff. apply isBitMask0_ones. Nomega. + rewrite H1. unfold bitmapOf. rewrite bitmapOfSuffix_pow. rewrite N.land_comm. rewrite N_land_pow2_testbit. unfold bitmapInRange. replace (inRange x r). rewrite H0. reflexivity. + intro i. rewrite H1. rewrite bitmapInRange_land. destruct (bitmapInRange r bm i) eqn:?; try reflexivity; simpl. apply bitmapInRange_inside in Heqb. rewrite bitmapInRange_ones by assumption. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in *. unfold Prefix, Int in *. pose proof (prefixOf_suffixOf i). pose proof (prefixOf_suffixOf x). apply eq_iff_eq_true. rewrite !N.ltb_lt. lia. + intro i. rewrite H1. rewrite bitmapInRange_land. destruct (bitmapInRange r bm i) eqn:?; try reflexivity; rewrite !andb_true_l. apply bitmapInRange_inside in Heqb. rewrite bitmapInRange_ldiff. rewrite bitmapInRange_ones by assumption. rewrite suffixOf_plus_bitmapOf. rewrite bitmapInRange_ones by assumption. rewrite <- prefixOf_eqb_spec in Heqb by assumption. rewrite N.eqb_eq in *. unfold Prefix, Int in *. pose proof (prefixOf_suffixOf i). pose proof (prefixOf_suffixOf x). apply eq_iff_eq_true. rewrite andb_true_iff, negb_true_iff. rewrite !N.ltb_lt, N.ltb_ge. assert (suffixOf i < WIDTH) by apply suffixOf_lt_WIDTH. lia. * simpl. unfoldMethods. subst. rewrite match_nomatch. rewrite if_negb. apply nomatch_zero; try assumption; intros. + (* s is bin, x is outside *) pose proof (inRange_false_bounded _ _ H2). clear IHHD1 IHHD2. destruct (N.ltb_spec x (rPrefix r)). - (* s is bin, x is below *) eapply HX; clear HX. ** constructor; intro; reflexivity. ** eapply DescSem. econstructor; try eassumption; reflexivity. ** rewrite H4. rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ** intros i. simpl. rewrite H4. destruct (N.ltb_spec i x). ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ++ rewrite andb_false_r. reflexivity. ** intros i. destruct (N.ltb_spec x i). ++ rewrite andb_true_r. reflexivity. ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite H4. rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. - (* s is bin, x is above *) eapply HX. ** eapply DescSem. econstructor; try eassumption; reflexivity. ** constructor; intro; reflexivity. ** rewrite H4. rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ** intros i. simpl. rewrite H4. destruct (N.ltb_spec i x). ++ rewrite andb_true_r. reflexivity. ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ** intros i. simpl. rewrite H4. destruct (N.ltb_spec x i). ++ destruct (inRange i r) eqn:Hir; only 1: (apply inRange_bounded in Hir; lia). rewrite (Desc_outside HD1) by inRange_false. rewrite (Desc_outside HD2) by inRange_false. reflexivity. ++ rewrite andb_false_r. reflexivity. + eapply IHHD1. clear IHHD1 IHHD2. intros sl fl sr fr b Hsl Hsr Hb Hfl Hfr. eapply HX; clear HX. - eassumption. - apply union_Sem; [ eassumption | eapply DescSem; eassumption]. - rewrite H4. rewrite Hb. destruct b; try reflexivity; try simpl. apply (Desc_outside HD2). inRange_false. - intro i. rewrite H4, Hfl; clear H4 Hfl Hfr. destruct (f2 i) eqn:?, (N.ltb_spec i x); rewrite ?andb_true_r, ?andb_false_r, ?orb_true_r, ?orb_false_r; try reflexivity; simpl. apply (Desc_inside HD2) in Heqb0. assert (inRange i (halfRange r true) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H2. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. - intro i. rewrite H4, Hfr; clear H4 Hfl Hfr. destruct (f2 i) eqn:?, (N.ltb_spec x i); rewrite ?andb_true_r, ?andb_false_r; try reflexivity; simpl. apply (Desc_inside HD2) in Heqb0. assert (inRange i (halfRange r true) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H2. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. + eapply IHHD2. clear IHHD1 IHHD2. intros sl fl sr fr b Hsl Hsr Hb Hfl Hfr. eapply HX; clear HX. - apply union_Sem; [ eassumption | eapply DescSem; eassumption]. - eassumption. - rewrite H4. rewrite Hb. destruct b; rewrite ?orb_false_r, ?orb_true_r; try reflexivity. apply (Desc_outside HD1). inRange_false. - intro i. rewrite H4, Hfl; clear H4 Hfl Hfr. destruct (f1 i) eqn:?, (N.ltb_spec i x); rewrite ?andb_true_r, ?andb_false_r, ?orb_true_r, ?orb_false_r; try reflexivity; simpl. apply (Desc_inside HD1) in Heqb0. assert (inRange i (halfRange r false) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H3. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. - intro i. rewrite H4, Hfr; clear H4 Hfl Hfr. destruct (f1 i) eqn:?, (N.ltb_spec x i); rewrite ?andb_true_r, ?andb_false_r, ?orb_true_r, ?orb_false_r; try reflexivity; simpl. apply (Desc_inside HD1) in Heqb0. assert (inRange i (halfRange r false) = true) by inRange_true. apply inRange_bounded in H5. apply inRange_bounded in H3. rewrite rPrefix_halfRange_otherhalf in * by assumption. rewrite !rBits_halfRange in *. lia. Qed. Lemma splitMember_Sem : forall x s f, Sem s f -> forall (P : IntSet * bool * IntSet -> Prop), (forall s1 f1 s2 f2 b, Sem s1 f1 -> Sem s2 f2 -> f x = b -> (forall i, f1 i = f i && (i <? x)) -> (forall i, f2 i = f i && (x <? i)) -> P (s1, b, s2)) -> P (splitMember x s) : Prop. Proof. intros ??? HSem X HX. unfold splitMember. fold splitMemberGo. destruct HSem. * simpl. eapply HX; try constructor; try reflexivity; try solve_f_eq. apply H. * destruct HD eqn:?; unfoldMethods. + eapply splitMemberGo_Sem; only 1: eassumption; intros sl fl sr fr Hsl Hsr Hfl Hfr. eapply HX; eassumption. + simpl Z.to_N. destruct (N.ltb_spec msk 0). - (* This branch is invalid since we only allow positive members. Otherwise, we would have to do something here. *) lia. - eapply splitMemberGo_Sem; only 1: eassumption; intros sl fl sr fr Hsl Hsr Hfl Hfr. eapply HX; eassumption. Qed. Theorem splitMember_WF (x : N) (s : IntSet) : WF s -> let '(l,_,r) := splitMember x s in WF l /\ WF r. Proof. intros [fs Sem_s]. apply splitMember_Sem with fs; [assumption|]; simpl; intros l fl r fr b Sem_l Sem_r def_b def_fl def_fr. split; [exists fl | exists fr]; assumption. Qed. Theorem splitMember_WF' (x : N) (s : IntSet) : WF s -> WF (fst (fst (splitMember x s))) /\ WF (snd (splitMember x s)). Proof. generalize (splitMember_WF x s); destruct (splitMember x s) as [[? ?] ?]; auto. Qed. Corollary splitMember_1_WF (x : N) (s : IntSet) : WF s -> WF (fst (fst (splitMember x s))). Proof. apply splitMember_WF'. Qed. Corollary splitMember_2_WF (x : N) (s : IntSet) : WF s -> WF (snd (splitMember x s)). Proof. apply splitMember_WF'. Qed. *) (** *** Verification of [foldr] *) (* We can extract the argument to [wfFix2] from the definition of [foldrBits]. *) Definition foldrBits_go {a} (p : Int) (f : Int -> a -> a) (x : a) (bm : Nat) : (forall (bm : Nat), a -> (forall x', {_ : a | (wordTonat x' < wordTonat bm)%nat} -> a) -> a). Proof. let rhs := eval unfold foldrBits in (foldrBits p f x bm) in match rhs with context[ GHC.Wf.wfFix2 _ _ _ ?f ] => exact f end. Defined. Lemma foldrBits_eq: forall {a} p (f : Int -> a -> a) x bm, foldrBits p f x bm = @foldrBits_go a p f x bm (revNat bm) x (fun x y => foldrBits p f (proj1_sig y) (revNat x)). Proof. intros. unfold foldrBits. rewrite GHC.Wf.wfFix2_eq at 1. unfold foldrBits_go. destruct (Sumbool.sumbool_of_bool _); try reflexivity. f_equal. admit. Admitted. (* rewrite revNat_revNat. reflexivity. Qed. *) Lemma foldrBits_0: forall {a} p (f : Int -> a -> a) x, foldrBits p f x (NToWord 0%N) = x. Proof. intros. (* This hopefully works once NToWord is implemented. *) Fail apply foldrBits_eq. Admitted. Lemma foldrBits_bm: forall {a} p (f : Int -> a -> a) bm x, isBitMask (wordToN bm) -> foldrBits p f x bm = foldrBits p f (f (ZToInt (intToZ p + Z.of_N (N.log2 (wordToN bm)))%Z) x) (NToWord (N.clearbit (wordToN bm) (N.log2 (wordToN bm)))). Proof. intros. rewrite foldrBits_eq at 1 by isBitMask. unfold foldrBits_go, proj1_sig. unfoldMethods. Int_Word_N. replace (wordToN (revNat bm) =? Z.to_N 0)%N with false by (symmetry; apply N.eqb_neq; rewrite revNat_eq_0 by isBitMask; unfold isBitMask in *; Nomega). (* eek *) replace (Sumbool.sumbool_of_bool false) with (@right (false = true) (false = false) (@eq_refl bool false)) by reflexivity. f_equal. * (* Needs a theory of indexOfTheOnlyBit *) admit. (* unfold lowestBitMask. unfold indexOfTheOnlyBit. rewrite N.log2_pow2 by Nomega. rewrite N_log2_ctz by isBitMask. unfold WIDTH. rewrite !N.add_sub_assoc. reflexivity. unfold WIDTH; Nomega. assert (N_ctz (revNatSafe bm) < WIDTH)%N by isBitMask. unfold WIDTH in *; lia. simpl. lia. *) * admit. (* rewrite lxor_lowestBitMask by isBitMask. rewrite clearbit_revNat by isBitMask. rewrite revNat_revNat by isBitMask. rewrite <- N_log2_ctz by isBitMask. reflexivity. *) Admitted. Definition foldr_go {a} k := (fix go (arg_0__ : a) (arg_1__ : IntSet) {struct arg_1__} : a := match arg_1__ with | Bin _ _ l r0 => go (go arg_0__ r0) l | Tip kx bm => foldrBits kx k arg_0__ bm | Nil => arg_0__ end). (** *** Verification of [toList] *) Lemma In_cons_iff: forall {a} (y x : a) xs, In y (x :: xs) <-> x = y \/ In y xs. Proof. intros. reflexivity. Qed. Lemma In_foldrBits_cons: forall i r bm l, rBits r = N.log2 WIDTH -> In i (foldrBits (NToInt (rPrefix r)) cons l bm) <-> (bitmapInRange r bm (intToN i) = true \/ In i l). Proof. intros. (* Needs a variant of [bits_ind] that works on [Word] *) Admitted. (* revert l. apply bits_ind with (bm := bm). * assumption. * intros. rewrite foldrBits_0. rewrite bitmapInRange_0. intuition congruence. * clear bm H. intros bm Hbm IH l. rewrite foldrBits_bm by isBitMask. rewrite -> IH. rewrite split_highestBitMask with (bm := bm) at 4 by assumption. rewrite bitmapInRange_lor. rewrite orb_true_iff. rewrite In_cons_iff. rewrite bitmapInRange_pow by (replace (rBits r); isBitMask). rewrite N.eqb_eq. solve [tauto]. Qed. *) Definition toList_go := foldr_go cons. Lemma toList_go_In: forall s f, Sem s f -> forall l i, (f (intToN i) = true \/ In i l) <-> In i (toList_go l s). Proof. intros ?? HS. destruct HS. * intuition. rewrite H in H1. congruence. * induction HD; intros; simpl; subst. + rewrite In_foldrBits_cons by isBitMask. rewrite H1; reflexivity. + unfold op_zl__, Ord_Integer___, op_zl____. rewrite <- IHHD1. rewrite <- IHHD2. rewrite H4. rewrite orb_true_iff. intuition. Qed. Lemma toList_go_In_nil: forall s f, Sem s f -> forall i, f (intToN i) = true <-> In i (toList_go nil s). Proof. intros. rewrite <- toList_go_In by eassumption. intuition. Qed. Lemma toList_In: forall s f, Sem s f -> forall i, f (intToN i) = true <-> In i (toList s). Proof. intros. pose proof (toList_go_In_nil s f H i) as Hgo. destruct H. * apply Hgo. * destruct HD. + apply Hgo. + subst. simpl. unfold op_zl__, Ord__Int, op_zl____. destruct (Z.ltb_spec (intToZ (NToInt (rMask r))) (intToZ (ZToInt 0))). - rewrite <- toList_go_In by (eapply DescSem; eassumption). rewrite <- toList_go_In by (eapply DescSem; eassumption). rewrite H4. rewrite orb_true_iff. intuition. - rewrite <- toList_go_In by (eapply DescSem; eassumption). rewrite <- toList_go_In by (eapply DescSem; eassumption). rewrite H4. rewrite orb_true_iff. intuition. Qed. Lemma toList_Bits_append: forall p l bm, foldrBits p cons l bm = foldrBits p cons nil bm ++ l. Proof. intros. revert l. (* Needs [bit_ind] for [Word] *) Admitted. (* apply bits_ind with (bm := bm). - isBitMask. - intros xs. rewrite !foldrBits_0. reflexivity. - clear bm H. intros bm Hbm IH xs. rewrite !foldrBits_bm with (bm0 := bm) by isBitMask. rewrite IH. rewrite IH with (l := _ :: nil). rewrite <- app_assoc. reflexivity. Qed. *) Lemma toList_go_append: forall l s r f, Desc s r f -> toList_go l s = toList_go nil s ++ l. Proof. intros. revert l. induction H; intro l. * simpl. apply toList_Bits_append; isBitMask. * simpl. rewrite IHDesc2. rewrite IHDesc1. rewrite IHDesc2 at 2. rewrite IHDesc1 with (l := toList_go nil s2 ++ nil). rewrite app_nil_r. rewrite app_assoc. reflexivity. Qed. Theorem toAscList_exact (s1 s2 : IntSet) : WF s1 -> WF s2 -> s1 = s2 <-> toAscList s1 = toAscList s2. Admitted. (* Proof. intros [f1 Sem1] [f2 Sem2]; split; [now intros; subst | intros EQ]. eapply Sem_unique; try eassumption. generalize (toList_In _ _ Sem1), (toList_In _ _ Sem2). unfold toList; rewrite EQ; intros def_f1 def_f2. intros i; generalize (def_f1 i), (def_f2 i). destruct (f1 i), (f2 i); intuition. Qed. *) Theorem toList_exact (s1 s2 : IntSet) : WF s1 -> WF s2 -> s1 = s2 <-> toList s1 = toList s2. Proof. apply toAscList_exact. Qed. Theorem toList_toAscList : toList = toAscList. Proof. reflexivity. Qed. Theorem toAscList_toList : toAscList = toList. Proof. reflexivity. Qed. (** *** Sortedness of [toList] *) Lemma to_List_Bits_below: forall p bm, forall y, In y (foldrBits p cons nil bm) -> (intToZ y <= intToZ p + Z.of_N (N.log2 (wordToN bm)))%Z. Proof. intros p bm H. Admitted. (* apply bits_ind with (bm := bm). * assumption. * intros. rewrite foldrBits_0 in H0. inversion H0. * intros. rewrite foldrBits_bm in H2 by isBitMask. rewrite toList_Bits_append in H2 by isBitMask. apply in_app_iff in H2. destruct H2. + apply H1 in H2. transitivity (p + N.log2 (N.clearbit bm0 (N.log2 bm0))); try assumption. apply N.add_le_mono; try reflexivity. apply N.log2_le_mono. apply ldiff_le. + destruct H2 as [?|[]]. subst. reflexivity. Qed. *) Lemma to_List_Bits_sorted: forall p bm, StronglySorted (fun x y => Z.lt (intToZ x) (intToZ y)) (foldrBits p cons nil bm). Proof. intros. Admitted. (* apply bits_ind with (bm := bm). * assumption. * rewrite foldrBits_0. apply SSorted_nil. * clear bm H. intros. rewrite foldrBits_bm by isBitMask. rewrite toList_Bits_append by isBitMask. apply sorted_append with (x := p + N.log2 bm). + assumption. + apply SSorted_cons; constructor. + intros. eapply N.le_lt_trans. eapply to_List_Bits_below; try eassumption; try isBitMask. enough (N.log2 (N.clearbit bm (N.log2 bm)) < N.log2 bm)%N by Nomega. assert (N.clearbit bm (N.log2 bm) <> 0)%N. { intro. rewrite H2 in H1. rewrite foldrBits_0 in H1. inversion H1. } apply N.log2_lt_pow2; try Nomega. rewrite clearbit_log2_mod by (unfold isBitMask in H; Nomega). apply N.mod_lt. apply N.pow_nonzero. Nomega. + intros y Hy. destruct Hy as [?|[]]. subst. reflexivity. Qed. *) Lemma to_List_go_Desc_sorted: forall s r f, Desc s r f -> StronglySorted (fun x y => Z.lt (intToZ x) (intToZ y)) (toList_go nil s). Proof. intros ??? HD. induction HD. * simpl. apply to_List_Bits_sorted. * simpl. subst. unfoldMethods. erewrite toList_go_append by (apply HD1). apply sorted_append' with (x := NToInt (rPrefix (halfRange r true))). + eapply IHHD1; eassumption. + eapply IHHD2; eassumption. + intros i Hi. rewrite rPrefix_halfRange_otherhalf by assumption. rewrite <- toList_go_In_nil in Hi by (eapply DescSem; eassumption). apply (Desc_inside HD1) in Hi. eapply inRange_isSubrange_true in Hi; try eassumption. admit. (* apply inRange_bounded. assumption. *) + intros i Hi. rewrite <- toList_go_In_nil in Hi by (eapply DescSem; eassumption). apply (Desc_inside HD2) in Hi. eapply inRange_isSubrange_true in Hi; try eassumption. admit. (* apply inRange_bounded. assumption. *) Admitted. Lemma to_List_Desc_sorted: forall s r f, Desc s r f -> StronglySorted (fun x y => Z.lt (intToZ x) (intToZ y)) (toList s). Proof. intros ??? HD. destruct HD. * simpl. apply to_List_Bits_sorted. * simpl. subst. unfoldMethods. destruct (Z.ltb_spec (intToZ (NToInt (rMask r))) (intToZ (ZToInt 0))). - (* This branch used to be inaccessible. Now there is stuff to do. *) admit. - fold (foldr_go (@cons Key)). erewrite toList_go_append by (apply HD1). change (StronglySorted (fun x y : Int => (intToZ x < intToZ y)%Z) (toList_go nil s1 ++ toList_go nil s2)). apply sorted_append' with (x := NToInt (rPrefix (halfRange r true))). + eapply to_List_go_Desc_sorted; eassumption. + eapply to_List_go_Desc_sorted; eassumption. + intros i Hi. rewrite rPrefix_halfRange_otherhalf by assumption. rewrite <- toList_go_In_nil in Hi by (eapply DescSem; eassumption). apply (Desc_inside HD1) in Hi. eapply inRange_isSubrange_true in Hi; try eassumption. admit. (* apply inRange_bounded. assumption. *) + intros i Hi. rewrite <- toList_go_In_nil in Hi by (eapply DescSem; eassumption). apply (Desc_inside HD2) in Hi. eapply inRange_isSubrange_true in Hi; try eassumption. admit. (* apply inRange_bounded. assumption. *) Admitted. Lemma to_List_sorted: forall s, WF s -> StronglySorted (fun x y => Z.lt (intToZ x) (intToZ y)) (toList s). Proof. intros. destruct H as [f HSem]. destruct HSem. * apply SSorted_nil. * eapply to_List_Desc_sorted; eassumption. Qed. (** ** Verification of [toAscList] *) Lemma toAscList_spec: @toAscList = @toList. Proof. reflexivity. Qed. (** ** Verification of [elems] *) Lemma elems_spec: @elems = @toList. Proof. reflexivity. Qed. (** *** Verification of [foldl] *) Definition foldl_go {a} k := fix go (arg_0__ : a) (arg_1__ : IntSet) {struct arg_1__} : a := match arg_1__ with | Bin _ _ l r0 => go (go arg_0__ l) r0 | Tip kx bm => foldlBits kx k arg_0__ bm | Nil => arg_0__ end. Definition foldlBits_go {a} (p : Int) (f : a -> Int -> a) (x : a) (bm : Nat) : ((forall x', {_ : a | (wordTonat x' < wordTonat bm)%nat} -> a) -> a). Proof. let rhs := eval unfold foldlBits in (foldlBits p f x bm) in match rhs with context[ GHC.Wf.wfFix2 _ _ _ ?f ] => exact (f bm x) end. Defined. Lemma foldlBits_eq: forall {a} p (f : a -> Int -> a) x bm, foldlBits p f x bm = @foldlBits_go a p f x bm (fun x y => foldlBits p f (proj1_sig y) x). Admitted. (* This should work once the termiation proof of foldlBits does not use an axiom. *) (* Proof. intros. apply GHC.Wf.wfFix2_eq. Qed. *) Lemma foldlBits_0: forall {a} p (f : a -> Int -> a) x, foldlBits p f x (NToWord 0) = x. Proof. intros. Fail apply foldlBits_eq. (* might work once no axioms are around *) Admitted. Lemma foldlBits_bm: forall {a} p (f : a -> Int -> a) bm x, isBitMask (wordToN bm) -> foldlBits p f x bm = foldlBits p f (f x (ZToInt (intToZ p + Z.of_N (N_ctz (wordToN bm))))) (NToWord (N.clearbit (wordToN bm) (N_ctz (wordToN bm)))). Proof. intros. rewrite foldlBits_eq at 1. unfold foldlBits_go, proj1_sig. unfoldMethods. Int_Word_N. replace (wordToN bm =? Z.to_N 0)%N with false by (symmetry; apply N.eqb_neq; unfold isBitMask in *; zify; rewrite Z2N.id; omega). (* eek *) replace (Sumbool.sumbool_of_bool false) with (@right (false = true) (false = false) (@eq_refl bool false)) by reflexivity. f_equal. * admit. (* See above unfold lowestBitMask. unfold indexOfTheOnlyBit. rewrite N.log2_pow2 by Nomega. reflexivity. *) * admit. (* rewrite lxor_lowestBitMask by assumption. reflexivity. *) Admitted. Lemma foldl'Bits_foldlBits : @foldl'Bits = @foldlBits. Admitted. (* Should work once we have termination proofs *) (* Proof. reflexivity. Qed. *) Lemma foldlBits_high_bm_aux: forall {a} p (f : a -> Int -> a) bm, (wordToN bm <> 0)%N -> (forall x, foldlBits p f x bm = f (foldlBits p f x (NToWord (N.clearbit (wordToN bm) (N.log2 (wordToN bm))))) (ZToInt (intToZ p + Z.of_N (N.log2 (wordToN bm))))%Z). Proof. intros. pose proof H. revert H0 x. (* Need induction scheme *) Admitted. (* apply bits_ind_up with (bm := bm). - isBitMask. - clear bm H. intros Hbm Hpos x. Nomega. - clear bm H. intros bm Hbm IH _ Hpos x. destruct (N.eqb_spec (N.clearbit bm (N_ctz bm)) (0%N)). * clear IH. rewrite foldlBits_bm by isBitMask. rewrite e. rewrite foldlBits_0. apply clearbit_ctz_0 in e; try isBitMask. rewrite e. rewrite N.log2_pow2 by nonneg. rewrite clearbit_pow2_0. rewrite foldlBits_0. replace bm with (2^N.log2 bm)%N by (rewrite e; rewrite N.log2_pow2 by nonneg; reflexivity). rewrite N_ctz_pow2. reflexivity. * assert (hasTwoBits bm) by (split; auto; isBitMask). rewrite foldlBits_bm by isBitMask. rewrite IH by (isBitMask || assumption). rewrite log2_clearbit_ctz by assumption. f_equal. etransitivity; [|rewrite foldlBits_bm by isBitMask; reflexivity]. rewrite ctz_clearbit_log2 by assumption. rewrite clearbit_clearbit_comm at 1. reflexivity. Qed. *) Lemma foldlBits_high_bm: forall {a} p (f : a -> Int -> a) bm x, isBitMask (wordToN bm) -> foldlBits p f x bm = f (foldlBits p f x (NToWord (N.clearbit (wordToN bm) (N.log2 (wordToN bm))))) (ZToInt (intToZ p + Z.of_N (N.log2 (wordToN bm))))%Z. Proof. intros. unfold isBitMask in H. apply foldlBits_high_bm_aux. Nomega. Qed. Lemma foldlBits_foldrBits: forall {a b} k (x : a) p bm (k' : a -> b), k' (foldlBits p k x bm) = foldrBits p (fun x g a => g (k a x)) k' bm x. Proof. intros. revert k'. Admitted. (* apply bits_ind with (bm := bm). - assumption. - intros. rewrite foldrBits_0. rewrite foldlBits_0. reflexivity. - clear bm H. intros bm Hbm IH k'. rewrite !@foldrBits_bm with (bm := bm) by isBitMask. rewrite !@foldlBits_high_bm with (bm := bm) by isBitMask. rewrite <- IH. reflexivity. Qed. *) Lemma foldl_go_foldr_go: forall {a b} k (x : a) s r f (k' : a -> b), Desc s r f -> k' (foldl_go k x s) = foldr_go (fun x g a => g (k a x)) k' s x. Proof. intros. revert x k'; induction H; intros. * apply foldlBits_foldrBits; isBitMask. * simpl. rewrite IHDesc2 with (k' := k'). rewrite IHDesc1 with (k' := foldr_go _ k' s2). reflexivity. Qed. Lemma foldl_foldr: forall {a} k (x : a) s, WF s -> foldl k x s = foldr (fun x g a => g (k a x)) id s x. Proof. intros. destruct H as [f HSem]. destruct HSem. * reflexivity. * revert x; destruct HD; intros. + simpl. apply foldlBits_foldrBits with (k' := fun x => x); isBitMask. + simpl. fold (foldl_go k). fold (foldr_go (fun (x0 : Key) (g : a -> a) (a0 : a) => g (k a0 x0))). unfoldMethods. Int_Word_N. destruct (Z.ltb_spec (intToZ msk) 0). - erewrite foldl_go_foldr_go with (k' := fun x => x) by eassumption. eapply foldl_go_foldr_go; eassumption. - erewrite foldl_go_foldr_go with (k' := fun x => x) by eassumption. eapply foldl_go_foldr_go; eassumption. Qed. Lemma fold_right_foldrBits_go: forall {a} f (x : a) p bm xs, fold_right f x (foldrBits p cons xs bm) = foldrBits p f (fold_right f x xs) bm. Proof. intros. revert xs. Admitted. (* apply bits_ind with (bm := bm). - assumption. - intros xs. rewrite !foldrBits_0. reflexivity. - clear bm H. intros bm Hbm IH xs. rewrite !@foldrBits_bm with (bm := bm) by isBitMask. rewrite IH. reflexivity. Qed. *) Lemma fold_right_toList_go: forall {a} f (x : a) s r f' xs, Desc s r f' -> fold_right f x (foldr_go cons xs s) = foldr_go f (fold_right f x xs) s. Proof. intros. revert xs; induction H; intros. * apply fold_right_foldrBits_go; isBitMask. * simpl. rewrite IHDesc1. rewrite IHDesc2. reflexivity. Qed. Lemma fold_right_toList: forall {a} f (x : a) s xs, WF s-> fold_right f x (foldr cons xs s) = foldr f (fold_right f x xs) s. Proof. intros. destruct H as [f' HSem]. destruct HSem. * reflexivity. * destruct HD. + apply fold_right_foldrBits_go; isBitMask. + simpl. unfoldMethods. Int_Word_N. fold (foldr_go (@cons Key)). fold (foldr_go f). destruct (Z.ltb_spec (intToZ msk) 0). - do 2 erewrite fold_right_toList_go by eassumption. reflexivity. - do 2 erewrite fold_right_toList_go by eassumption. reflexivity. Qed. Lemma List_foldl_foldr: forall {a b} f (x : b) (xs : list a), fold_left f xs x = List.fold_right (fun x g a => g (f a x)) id xs x. Proof. intros. revert x. induction xs; intro. * reflexivity. * simpl. rewrite IHxs. reflexivity. Qed. Lemma foldl_spec: forall {a} f (x : a) s, WF s -> foldl f x s = fold_left f (toList s) x. Proof. intros. unfold toList, toAscList. rewrite foldl_foldr by assumption. rewrite List_foldl_foldr. rewrite fold_right_toList by assumption. reflexivity. Qed. (** *** Verification of [size] *) (** Because [size] returns an [Int], it actually overflows for large [IntSet], but the equations still hold, as overflow is modulo. *) Definition sizeGo : Int -> IntSet -> Int. Proof. let size_rhs := eval unfold size in size in match size_rhs with ?f #0 => exact f end. Defined. Lemma popCount_N_length_toList_go: forall bm p l, (Z.of_N (N_popcount (wordToN bm)) + Z.of_nat (length l) = Z.of_nat (length (foldrBits p cons l bm)))%Z. Proof. intros. revert l. Admitted. (* apply bits_ind with (bm := bm). - isBitMask. - intros. rewrite foldrBits_0. reflexivity. - clear bm H. intros bm Hbm IH l. rewrite !@foldrBits_bm with (bm := bm) by isBitMask. rewrite popCount_N_bm by (unfold isBitMask in Hbm; Nomega). rewrite <- IH; clear IH. simpl. Nomega. Qed. *) Lemma sizeGo_spec': forall x s r f, Desc s r f -> sizeGo x s = ZToInt (intToZ x + Z.of_nat (length (toList_go nil s)))%Z. Proof. intros. intros. revert x; induction H; intro x. + simpl. unfold bitcount. rewrite <- popCount_N_length_toList_go. simpl. Int_Word_N. rewrite Z.add_0_r. rewrite Z.add_0_l. reflexivity. + simpl. erewrite toList_go_append with (s := s1) by eassumption. erewrite toList_go_append with (s := s2) by eassumption. rewrite IHDesc1. rewrite IHDesc2. rewrite !app_length. Int_Word_N. simpl length. unfold Nat in *. rewrite Nat.add_0_r. rewrite Nat2Z.inj_add. rewrite Z.add_assoc. reflexivity. Qed. Lemma sizeGo_spec: forall x s, WF s -> sizeGo x s = ZToInt (intToZ x + Z.of_nat (length (toList s)))%Z. Proof. intros. destruct H as [f HSem]. destruct HSem. * simpl. admit. (* rewrite N.add_0_r. reflexivity. *) * destruct HD. + simpl. unfold bitcount. rewrite <- popCount_N_length_toList_go. simpl. Int_Word_N. rewrite Z.add_0_r. rewrite Z.add_0_l. reflexivity. + subst. simpl. unfoldMethods. Int_Word_N. destruct (Z.ltb_spec (intToZ (NToInt (rMask r))) 0). -- erewrite toList_go_append with (s := s1) by eassumption. erewrite toList_go_append with (s := s2) by eassumption. erewrite sizeGo_spec' by eassumption. erewrite sizeGo_spec' by eassumption. rewrite !app_length. simpl length. Int_Word_N. rewrite Nat2Z.inj_add. f_equal. lia. -- erewrite toList_go_append with (s := s1) by eassumption. erewrite toList_go_append with (s := s2) by eassumption. erewrite sizeGo_spec' by eassumption. erewrite sizeGo_spec' by eassumption. rewrite !app_length. simpl length. rewrite Nat.add_0_r. Int_Word_N. rewrite Nat2Z.inj_add. f_equal. lia. Admitted. Lemma size_spec: forall s, WF s -> size s = ZToInt (Z.of_nat (length (toList s))). Proof. intros. unfold size. rewrite sizeGo_spec by assumption. simpl. Int_Word_N. reflexivity. Qed. (** *** Verification of [toDescList] *) (** The easiest complete specification simply relates this to [toList] *) Lemma toDescList_spec: forall s, WF s -> toDescList s = rev (toList s). Proof. intros. unfold toDescList. rewrite foldl_spec by assumption. rewrite <- fold_left_rev_right. generalize (rev (toList s)). intro xs. induction xs. * reflexivity. * simpl. rewrite IHxs. reflexivity. Qed. (** *** Verification of [fromList] *) Lemma fromList_Sem: forall l, exists f, Sem (fromList l) f /\ (forall i, f (intToN i) = true <-> In i l). Proof. intros l. unfold fromList. rewrite hs_coq_foldl'_list. (* Rewrite to use fold_right instead of fold_left *) enough (forall l, exists f : N -> bool, Sem (fold_right (fun (x : Key) (t : IntSet) => insert x t) empty l) f /\ (forall i, f (intToN i) = true <-> In i l)). { specialize (H (rev l)). rewrite fold_left_rev_right in H. setoid_rewrite <- in_rev in H. assumption. } (* Now induction *) clear l. intros l. induction l; intros. * exists (fun _ => false). split. + constructor. auto. + intuition. congruence. * destruct IHl as [?[??]]. eexists. split. + simpl. eapply insert_Sem; try eassumption. intro; reflexivity. + intro i. specialize (H0 i). simpl. destruct (N.eqb_spec (intToN i) (intToN a)); intuition Int_Word_N; try congruence. Qed. Lemma fromList_WF: forall l, WF (fromList l). Proof. intros. destruct (fromList_Sem l) as [?[??]]. econstructor. eassumption. Qed. (** *** Verification of [filter] *) Definition filterBits p o bm := (foldlBits (ZToInt 0) (fun (bm0 : BitMap) (bi : Key) => if p (ZToInt (intToZ o + intToZ bi)%Z) : bool then NToWord (N.lor (wordToN bm0) (wordToN (bitmapOfSuffix bi))) else bm0) (NToWord 0%N) bm). Lemma testbit_filterBits: forall p o bm i, N.testbit (wordToN (filterBits p o bm)) i = (N.testbit (wordToN bm) i && p (ZToInt ((intToZ o + Z.of_N i)%Z))). Proof. intros. unfold filterBits. transitivity ((N.testbit (wordToN bm) i && p (ZToInt ((intToZ o + Z.of_N i)%Z))) || N.testbit (wordToN (NToWord 0%N)) i); try (rewrite N.bits_0; rewrite orb_false_r; reflexivity). enough (forall a, N.testbit (wordToN (foldlBits (ZToInt 0) (fun (bm0 : BitMap) (bi : Key) => if p (ZToInt (intToZ o + intToZ bi)) then NToWord (N.lor (wordToN bm0) (wordToN (bitmapOfSuffix bi))) else bm0) a bm)) i = N.testbit (wordToN bm) i && p (ZToInt (intToZ o + Z.of_N i)) || N.testbit (wordToN a) i) by intuition. Admitted. (* apply bits_ind with (bm := bm). * assumption. * intros a. rewrite foldlBits_0. rewrite N.bits_0. rewrite andb_false_l. rewrite orb_false_l. reflexivity. * intros. unfold filterBits. rewrite foldlBits_high_bm by isBitMask. lazymatch goal with [|- N.testbit (if ?x then N.lor ?z ?y else ?z) ?i = _] => transitivity (N.testbit (N.lor z (if x then y else 0%N)) i); [destruct x; try rewrite N.lor_0_r; reflexivity|] end. rewrite N.lor_spec. rewrite H1; clear H1. rewrite N.clearbit_eqb. lazymatch goal with [|- context [N.testbit (if ?x then ?y else 0%N) ?i] ] => replace (N.testbit (if x then y else 0%N) i) with (x && N.testbit y i) by (destruct x; try rewrite N.bits_0; repeat split_bool; reflexivity) end. rewrite N.add_0_l. rewrite bitmapOfSuffix_pow. rewrite !N.pow2_bits_eqb. destruct (N.eqb_spec (N.log2 bm0) i). + subst; repeat split_bool; try reflexivity; exfalso. rewrite N.bit_log2 in Heqb by (unfold isBitMask in *; Nomega). congruence. + repeat split_bool; try reflexivity; exfalso. Qed. *) Lemma filter_Desc: forall p s r f f', Desc s r f -> (forall i, f' i = f i && p (NToInt i)) -> Desc0 (filter p s) r f'. Proof. intros. revert f' H0. induction H. * intros. simpl. subst. rewrite foldl'Bits_foldlBits. fold (filterBits p (NToInt (rPrefix r)) bm). eapply tip_Desc0; try assumption; try reflexivity. + intro i. rewrite H3. rewrite H1. unfold bitmapInRange. destruct (inRange i r) eqn:Hir. - rewrite testbit_filterBits by isBitMask. f_equal. f_equal. clear p H3. admit. (* rewrite N.div_mod with (a := i) (b := id WIDTH) at 1 by (intro Htmp; inversion Htmp). f_equal. ** destruct r as [p b]. unfold inRange, rPrefix, rBits, snd in *. rewrite N.eqb_eq in Hir. subst. rewrite N.shiftl_mul_pow2 by nonneg. rewrite N.shiftr_div_pow2 by nonneg. rewrite N.mul_comm. reflexivity. ** rewrite N.land_ones by nonneg. rewrite H0. reflexivity. *) - rewrite andb_false_l. reflexivity. + isBitMask. * intros. subst. simpl. eapply bin_Desc0. + apply IHDesc1; intro; reflexivity. + apply IHDesc2; intro; reflexivity. + assumption. + assumption. + assumption. + reflexivity. + reflexivity. + solve_f_eq. Admitted. Lemma filter_Sem: forall p s f f', Sem s f -> (forall i, f' i = f i && p (NToInt i)) -> Sem (filter p s) f'. Proof. intros. destruct H. * apply SemNil. solve_f_eq. * eapply Desc0_Sem. eapply filter_Desc. eassumption. eassumption. Qed. Lemma filter_WF: forall p s, WF s -> WF (filter p s). Proof. intros. destruct H. eexists. eapply filter_Sem. eassumption. intro. reflexivity. Qed. (** *** Verification of [partition] *) (** Conveniently, [partition] uses [filterBits] *) Lemma partition_fst: forall p s, fst (partition p s) = filter p s. Proof. intros. induction s. * simpl. rewrite (surjective_pairing (partition p s1)). rewrite (surjective_pairing (partition p s2)). simpl. rewrite IHs1, IHs2. reflexivity. * reflexivity. * reflexivity. Qed. Lemma filterBits_neg: forall P p bm, N.lxor (wordToN bm) (wordToN (filterBits P p bm)) = wordToN (filterBits (fun x => negb (P x)) p bm). Proof. intros. apply N.bits_inj; intro i. rewrite N.lxor_spec. rewrite testbit_filterBits by isBitMask. rewrite testbit_filterBits by isBitMask. repeat split_bool; reflexivity. Qed. Lemma partition_snd: forall p s, snd (partition p s) = filter (fun x => negb (p x)) s. Proof. intros P s. induction s. * simpl. rewrite (surjective_pairing (partition P s1)). rewrite (surjective_pairing (partition P s2)). rewrite IHs1, IHs2. reflexivity. * simpl. rewrite foldl'Bits_foldlBits. f_equal. change (NToWord (N.lxor (wordToN b) (wordToN (filterBits P p b))) = filterBits (fun x => negb (P x)) p b). rewrite filterBits_neg. Int_Word_N. reflexivity. * reflexivity. Qed. Theorem partition_WF (p : Int -> bool) (s : IntSet) : WF s -> let '(l,r) := partition p s in WF l /\ WF r. Proof. intros WFs; rewrite (surjective_pairing (partition p s)), partition_fst, partition_snd; auto using filter_WF. Qed. Theorem partition_WF' (p : Int -> bool) (s : IntSet) : WF s -> WF (fst (partition p s)) /\ WF (snd (partition p s)). Proof. generalize (partition_WF p s); destruct (partition p s); auto. Qed. Corollary partition_1_WF (p : Int -> bool) (s : IntSet) : WF s -> WF (fst (partition p s)). Proof. apply partition_WF'. Qed. Corollary partition_2_WF (p : Int -> bool) (s : IntSet) : WF s -> WF (snd (partition p s)). Proof. apply partition_WF'. Qed. (** *** Constructiveness of inequality *) (** I found this easiest to specify once we have [toList]: If two sets are not equal, we find an element where they differ.*) Lemma Sem_notSubset_witness: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> isSubsetOf s1 s2 = false <-> (exists i, f1 i = true /\ f2 i = false). Admitted. (* intros. split; intro. * assert (~ (Forall (fun i => f2 (intToN i) = true) (toList s1))). { intro. rewrite <- not_true_iff_false in H1. contradict H1. rewrite Forall_forall in H2. rewrite isSubsetOf_Sem by eassumption. intros i Hi. apply H2. rewrite <- toList_In by eassumption. assumption. } rewrite <- Exists_Forall_neg in H2 by (intro i; destruct (f2 i); intuition). rewrite Exists_exists in H2. destruct H2 as [i [Hin Hf2]]. rewrite not_true_iff_false in Hf2. rewrite <- toList_In in Hin by eassumption. exists i; intuition. * apply not_true_iff_false. intro. rewrite isSubsetOf_Sem in H2 by eassumption. destruct H1 as [i [Hin Hf2]]. specialize (H2 i). intuition congruence. Qed. *) Lemma Sem_differ_witness: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> s1 <> s2 <-> (exists i, f1 i <> f2 i). Proof. intros. transitivity (isSubsetOf s1 s2 = false \/ isSubsetOf s2 s1 = false). * destruct (isSubsetOf s1 s2) eqn:?, (isSubsetOf s2 s1) eqn:?; intuition try congruence. + exfalso. apply H1. eapply isSubsetOf_antisym; eassumption. + subst. erewrite isSubsetOf_refl in Heqb by eassumption. congruence. + subst. erewrite isSubsetOf_refl in Heqb by eassumption. congruence. * do 2 rewrite Sem_notSubset_witness by eassumption. intuition. + destruct H2 as [i?]. exists i. intuition congruence. + destruct H2 as [i?]. exists i. intuition congruence. + destruct H1 as [i?]. destruct (f1 i) eqn:?, (f2 i) eqn:?; try congruence. - left. exists i. intuition congruence. - right. exists i. intuition congruence. Qed. (** *** Verification of [isProperSubsetOf] *) (** [subsetCmp] is a strange beast, as it returns an [ordering], but does not totally order the sets. We first relate it to [equal] and [isSubsetOf], and then stich the specification for [isProperSubsetOf] together using that. We use [difference] in the stiching, hence the position of this section. *) Program Fixpoint subsetCmp_equal s1 r1 f1 s2 r2 f2 { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> subsetCmp s1 s2 = Eq <-> equal s1 s2 = true := _. Next Obligation. revert subsetCmp_equal H H0. intros IH HD1 HD2. destruct HD1, HD2. * (* Both are tips *) simpl; subst. unfoldMethods. Int_Word_N. rewrite if_negb. destruct (N.eqb_spec (rPrefix r) (rPrefix r0)). - destruct (N.eqb_spec (wordToN bm) (wordToN bm0)); try intuition congruence. destruct (N.land (wordToN bm) (N.lnot (wordToN bm0) IntWord.WIDTH) =? 0)%N; intuition; rewrite ?andb_false_r in *; try congruence. - destruct (N.eqb_spec (wordToN bm) (wordToN bm0)); intuition; rewrite ?andb_true_r, ?andb_false_r in *; try congruence. * (* Tip left, Bin right *) simpl; subst. repeat (match goal with [ |- (match ?scrut with _ => _ end) = Eq <-> _ ] => destruct scrut end; try intuition congruence). * (* Bin right, Tip left *) simpl; subst. intuition congruence. * (* Bin both sides *) simpl; subst; unfold shorter; unfoldMethods. repeat rewrite andb_true_iff. repeat rewrite !N.eqb_eq. repeat rewrite !N.eqb_eq. unfold op_zg__, Ord__Word, op_zg____, natFromInt, wordFromInt. Int_Word_N. destruct (N.ltb_spec (rMask r4) (rMask r)); only 2: destruct (N.ltb_spec (rMask r) (rMask r4)). - (* left is bigger than right *) intuition try (congruence||lia). admit. (* Injectivity of NToInt on small values *) - (* right is bigger than left *) repeat (match goal with [ |- (match ?scrut with _ => _ end) = Eq <-> _ ] => destruct scrut end); intuition try (congruence||lia). all:admit. (* Injectivity of NToInt on small values *) - (* same sized bins *) unfoldMethods. rewrite <- IH by ((simpl; lia) || eassumption). rewrite <- IH by ((simpl; lia) || eassumption). destruct (N.eqb_spec (rPrefix r) (rPrefix r4)); repeat (match goal with [ |- (match ?scrut with _ => _ end) = Eq <-> _ ] => destruct scrut eqn:? end); intuition try (congruence || lia). all:admit. (* Injectivity of NToInt on small values *) Admitted. Program Fixpoint subsetCmp_isSubsetOf s1 r1 f1 s2 r2 f2 { measure (size_nat s1 + size_nat s2) } : Desc s1 r1 f1 -> Desc s2 r2 f2 -> negb (eq_comparison (subsetCmp s1 s2) Gt) = isSubsetOf s1 s2 := _. Next Obligation. revert subsetCmp_isSubsetOf H H0. intros IH HD1 HD2. destruct HD1, HD2. * (* Both are tips *) simpl; subst. unfoldMethods. Int_Word_N. rewrite if_negb. destruct (N.eqb_spec (rPrefix r) (rPrefix r0)). - destruct (N.eqb_spec (wordToN bm) (wordToN bm0)); try intuition congruence. + subst. admit. (* rewrite N.land_diag, N.lxor_nilpotent, N.eqb_refl. intuition congruence. *) + destruct (N.land (wordToN bm) (N.lnot (wordToN bm0) IntWord.WIDTH) =? 0)%N; intuition; rewrite ?andb_false_r in *; try congruence. - destruct (N.land (wordToN bm) (N.lnot (wordToN bm0) IntWord.WIDTH) =? 0)%N; intuition; rewrite ?andb_false_r in *; try congruence. * (* Tip left, Bin right *) simpl; subst. do 2 erewrite <- IH by (first [ simpl; omega | apply DescTip; try eassumption; reflexivity | eassumption ]). repeat (match goal with [ |- context [match ?scrut with _ => _ end] ] => destruct scrut end; try intuition congruence). * (* Bin right, Tip left *) simpl; subst. intuition congruence. * (* Bin both sides *) simpl; subst; unfold shorter; unfoldMethods. Int_Word_N. repeat rewrite andb_true_iff. repeat rewrite !N.eqb_eq. repeat rewrite !N.eqb_eq. destruct (N.ltb_spec (rMask r4) (rMask r)); only 2: destruct (N.ltb_spec (rMask r) (rMask r4)). - (* left is bigger than right *) reflexivity. - (* right is bigger than left *) unfold match_, nomatch. unfoldMethods. rewrite if_negb. do 2 erewrite <- IH by (first [ simpl; omega | eapply DescBin; try beassumption; reflexivity | eassumption ]). destruct (intToN (mask _ _) =? _), (zero _ _); repeat (match goal with [ |- context [match ?scrut with _ => _ end] ] => destruct scrut eqn:? end); intuition. - (* same sized bins *) unfoldMethods. do 2 erewrite <- IH by (first [ simpl; omega | eapply DescBin; try beassumption; reflexivity | eassumption ]). destruct (N.eqb_spec (rPrefix r) (rPrefix r4)); repeat (match goal with [ |- context [match ?scrut with _ => _ end] ] => destruct scrut eqn:? end); intuition. Admitted. Lemma isProperSubsetOf_Sem: forall s1 f1 s2 f2, Sem s1 f1 -> Sem s2 f2 -> isProperSubsetOf s1 s2 = true <-> ((forall i, f1 i = true -> f2 i = true) /\ (exists i, f1 i <> f2 i)). Proof. intros ???? HSem1 HSem2. rewrite <- Sem_differ_witness by eassumption. rewrite <- isSubsetOf_Sem by eassumption. destruct HSem1, HSem2. * replace (isProperSubsetOf _ _) with false by reflexivity. replace (isSubsetOf _ _) with true by reflexivity. intuition try congruence. * replace (isProperSubsetOf Nil s) with true by (destruct HD; reflexivity). replace (isSubsetOf Nil s) with true by (destruct s; reflexivity). intuition try congruence. subst; inversion HD. * replace (isProperSubsetOf s Nil) with false by (destruct s; reflexivity). replace (isSubsetOf s Nil) with false by (destruct HD; reflexivity). intuition try congruence. * pose proof (subsetCmp_isSubsetOf _ _ _ _ _ _ HD HD0). rewrite eq_iff_eq_true in H. pose proof (subsetCmp_equal _ _ _ _ _ _ HD HD0). rewrite equal_spec in H0. unfold isProperSubsetOf. destruct (subsetCmp s s0) eqn:Hssc; simpl in *; intuition try congruence. Qed. (** *** Verification of [map] *) Lemma map_Sem: forall g s f1, Sem s f1 -> Sem (map g s) (fun i => existsb (fun j => intToN (g j) =? i) (toList s)). Proof. intros. unfold map. change (Sem (fromList (List.map g (toList s))) (fun i : N => existsb (fun j : Key => intToN (g j) =? i) (toList s))). destruct (fromList_Sem (List.map g (toList s))) as [f [HSem Hf]]. eapply Sem_change_f; only 1: eassumption. intro i. apply eq_iff_eq_true. admit. (* rewrite Hf. rewrite existsb_exists, in_map_iff. split; intros [x Hx]; exists x; try rewrite N.eqb_eq in *; intuition. Qed. *) (** *** Verification of [valid] *) (** The [valid] function is used in the test suite to detect whether functions return valid trees. It should be equivalent to our [WF], but we cannot fully prove that as long as we use arbitrary width numbers. *) (* Need to re-export with fixed width ints *) (* Require Import IntSetValidity. Definition noNilInSet : IntSet -> bool := (fix noNilInSet (t' : IntSet) : bool := match t' with | Bin _ _ l' r' => noNilInSet l' && noNilInSet r' | Tip _ _ => true | Nil => false end). Lemma valid_noNilInSet: forall s r f, Desc s r f -> noNilInSet s = true. Proof. intros. induction H. * reflexivity. * simpl. rewrite IHDesc1, IHDesc2. reflexivity. Qed. Lemma valid_nilNeverChildOfBin: forall s, WF s -> nilNeverChildOfBin s = true. Proof. intros. destruct H as [f HSem]. destruct HSem. * reflexivity. * pose proof (valid_noNilInSet _ _ _ HD). destruct HD; apply H. Qed. Lemma valid_maskPowerOfTwo: forall s, WF s -> maskPowerOfTwo s = true. intros. destruct H as [f HSem]. destruct HSem. * reflexivity. * induction HD. - reflexivity. - simpl. unfold bitcount. unfoldMethods. rewrite IHHD1, IHHD2. subst. simpl. rewrite andb_true_r. rewrite N.eqb_eq. destruct r as [p b]. unfold rMask, rBits, snd in *. unfold id. rewrite N_popcount_pow2. reflexivity. Qed. Lemma Foldable_all_forallb: forall {a} p (l : list a), Foldable.all p l = forallb p l. Proof. intros. induction l. * reflexivity. * simpl. rewrite <- IHl. compute. match goal with [ |- _ = match ?x with _ => _ end ] => destruct x end. match goal with [ |- _ = match ?x with _ => _ end ] => destruct x end. reflexivity. reflexivity. Qed. Lemma valid_commonPrefix: forall s, WF s -> commonPrefix s = true. Proof. intros. destruct H as [f HSem]. destruct HSem. * reflexivity. * induction HD. - reflexivity. - cbv fix beta delta [commonPrefix]. fold commonPrefix. unfoldMethods. rewrite IHHD1, IHHD2. rewrite andb_true_r. set (s := Bin p msk s1 s2). assert (Desc s r f) by (eapply DescBin; eassumption). replace elems with toList by reflexivity. rewrite Foldable_all_forallb. rewrite forallb_forall. intros i Hi. rewrite <- toList_In in Hi by (eapply DescSem; eassumption). eapply Desc_inside in Hi; try eassumption. rewrite N.eqb_eq. symmetry. subst p. clear - Hi. destruct r as [p b]. unfold inRange, rPrefix in *. rewrite N.eqb_eq in Hi. subst. apply N.bits_inj_iff. intros j. rewrite N.land_spec. destruct (N.ltb_spec j b). + rewrite N.shiftl_spec_low by assumption. reflexivity. + rewrite N.shiftl_spec_high' by assumption. rewrite !N.shiftr_spec by Nomega. replace (j - b + b) with j by Nomega. rewrite andb_diag. reflexivity. Qed. Lemma valid_maskRespected: forall s, WF s -> maskRespected s = true. Proof. intros. destruct H as [f HSem]. destruct HSem. * reflexivity. * induction HD. - reflexivity. - simpl. rewrite !Foldable_all_forallb. rewrite andb_true_iff; split. + rewrite forallb_forall. intros i Hi. rewrite <- toList_In in Hi by (eapply DescSem; eassumption). eapply Desc_inside in Hi; try eassumption. subst msk. rewrite zero_spec by assumption. rewrite negb_true_iff. apply testbit_halfRange_true_false; try assumption. eapply inRange_isSubrange_true; [|eassumption]; isSubrange_true. inRange_false; fail. + rewrite IHHD1, IHHD2. rewrite andb_true_r. rewrite forallb_forall. intros i Hi. rewrite <- toList_In in Hi by (eapply DescSem; eassumption). eapply Desc_inside in Hi; try eassumption. subst msk. rewrite zero_spec by assumption. rewrite negb_involutive. apply testbit_halfRange_false_false; try assumption. eapply inRange_isSubrange_true; [|eassumption]; isSubrange_true. inRange_false; fail. Qed. Lemma valid_tipsValid: forall s, WF s -> tipsValid s = true. Proof. intros. destruct H as [f HSem]. destruct HSem. * reflexivity. * induction HD. + simpl. unfold validTipPrefix. unfoldMethods. destruct r as [p' b]. unfold rPrefix, rBits, snd in *. subst. rewrite N.eqb_eq. apply N.bits_inj_iff. intros i. rewrite N.bits_0. rewrite N.land_spec. destruct (N.ltb_spec i (N.log2 WIDTH)). - rewrite N.shiftl_spec_low by assumption. apply andb_false_r. - rewrite N.shiftl_spec_high' by assumption. rewrite N.bits_above_log2. apply andb_false_l. unfold WIDTH in *. simpl N.log2 in *. lia. + simpl. rewrite IHHD1, IHHD2. reflexivity. Qed. Lemma valid_correct: forall s, WF s -> valid s = true. Proof. intros. unfold valid. rewrite valid_nilNeverChildOfBin by assumption. rewrite valid_maskPowerOfTwo by assumption. rewrite valid_commonPrefix by assumption. rewrite valid_maskRespected by assumption. rewrite valid_tipsValid by assumption. reflexivity. Qed. *) (** ** [IntSet]s with [WF] *) Definition WFIntSet : Type := {s : IntSet | WF s}. Definition pack : forall s : IntSet, WF s -> WFIntSet := exist _. Definition unpack : WFIntSet -> IntSet := @proj1_sig _ _. Definition unpack_WF : forall s : WFIntSet, WF (unpack s) := @proj2_sig _ _. (** ** Type classes *) (** *** Verification of [Eq] *) Require Import Proofs.GHC.Base. Theorem Eq_eq_IntSet (x y : IntSet) : reflect (x = y) (x == y). Proof. change (reflect (x = y) (equal x y)). apply iff_reflect. rewrite equal_spec. reflexivity. Qed. Instance EqLaws_IntSet : EqLaws IntSet. Proof. EqLaws_from_reflect Eq_eq_IntSet. intros x y; unfoldMethods; unfold InternalWord.Eq___IntSet_op_zsze__. rewrite nequal_spec, negb_involutive. reflexivity. Qed. Instance EqExact_IntSet : EqExact IntSet. Proof. constructor; apply Eq_eq_IntSet. Qed. Instance Eq__WFIntSet : Eq_ WFIntSet := fun _ k => k {| op_zeze____ := fun s1 s2 => unpack s1 == unpack s2 ; op_zsze____ := fun s1 s2 => unpack s1 /= unpack s2 |}. Instance EqLaws_WFIntSet : EqLaws WFIntSet := {| Eq_refl := fun s => Eq_refl (unpack s) ; Eq_sym := fun s1 s2 => Eq_sym (unpack s1) (unpack s2) ; Eq_trans := fun s1 s2 s3 => Eq_trans (unpack s1) (unpack s2) (unpack s3) ; Eq_inv := fun s1 s2 => Eq_inv (unpack s1) (unpack s2) |}. (** *** Verification of [Ord] *) Local Close Scope N_scope. Instance Ord_WFIntSet : Ord WFIntSet := fun _ k => k {| op_zlze____ := fun s1 s2 => unpack s1 <= unpack s2 ; op_zgze____ := fun s1 s2 => unpack s1 >= unpack s2 ; op_zl____ := fun s1 s2 => unpack s1 < unpack s2 ; op_zg____ := fun s1 s2 => unpack s1 > unpack s2 ; compare__ := fun s1 s2 => compare (unpack s1) (unpack s2) ; min__ := fun s1 s2 => if unpack s1 > unpack s2 then s2 else s1 ; max__ := fun s1 s2 => if unpack s1 < unpack s2 then s1 else s2 |}. Ltac unfold_applied t := let rec hd t' := match t' with | ?f _ => hd f | ?x => x end in let f := hd t in let x := fresh "x" in let E := fresh "E" in (remember t as x eqn:E; unfold f in E; subst x) || fail "nothing to unfold". Ltac fold_is_true := repeat match goal with | |- context[?x = true] => let H := fresh in assert ((x = true) <-> is_true x) as H by reflexivity; rewrite H; clear H end. Local Ltac unfold_WFIntSet_Eq := repeat first [ progress simpl | unfold_applied (@op_zeze__ WFIntSet) | unfold_applied (@op_zsze__ WFIntSet) | unfold_applied Eq__WFIntSet ]. Local Ltac unfold_WFIntSet_Ord := unfold_WFIntSet_Eq; repeat first [ progress simpl | unfold_applied (@op_zl__ WFIntSet) | unfold_applied (@op_zl__ IntSet) | unfold_applied (@op_zlze__ WFIntSet) | unfold_applied (@op_zlze__ IntSet) | unfold_applied (@op_zg__ WFIntSet) | unfold_applied (@op_zg__ IntSet) | unfold_applied (@op_zgze__ WFIntSet) | unfold_applied (@op_zgze__ IntSet) | unfold_applied (@compare WFIntSet) | unfold_applied (@compare IntSet) | unfold_applied (@max WFIntSet) | unfold_applied (@max IntSet) | unfold_applied (@min WFIntSet) | unfold_applied (@min IntSet) | unfold_applied Ord_WFIntSet | unfold_applied Ord__IntSet | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_op_zl__ | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_op_zlze__ | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_op_zg__ | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_op_zgze__ | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_compare | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_max | unfold_applied Data.IntSet.InternalWord.Ord__IntSet_min ]. Lemma compare_not_Gt_le {A} `{OrdLaws A} (x y : A) : (compare x y /= Gt) = (x <= y). Proof. destruct (compare x y) eqn:C; order A. Qed. Local Ltac to_Ord_list := repeat match goal with | |- forall s : WFIntSet, _ => intros [? ?]; simpl end; rewrite ?compare_not_Gt_le; repeat match goal with | |- context[toAscList ?s] => let x := fresh in let E := fresh in (remember (toAscList s) as x eqn:E; clear E) end; unfold Key in *. (* Needs OrdLaws on Int *) (* Instance OrdLaws_WFIntSet : OrdLaws WFIntSet. Proof. constructor; unfold_WFIntSet_Ord; try now to_Ord_list; order (list N). - intros [s1 WF1] [s2 WF2]; simpl. rewrite !compare_not_Gt_le => LE1 LE2. apply equal_spec, toAscList_exact; trivial. apply (reflect_iff _ _ (Eq_eq _ _)); generalize dependent LE1; generalize dependent LE2; to_Ord_list; order (list N). - intros [s1 WF1] [s2 WF2]; simpl. destruct (Ord_total (toAscList s1) (toAscList s2)); to_Ord_list; order (list N). - intros [s1 WF1] [s2 WF2]; simpl. unfold "==", Eq___IntSet, Data.IntSet.Internal.Eq___IntSet_op_zeze__; simpl. rewrite equal_spec, toAscList_exact, Ord_compare_Eq; trivial. symmetry; apply (ssrbool.rwP (Eq_eq _ _)). - intros [s1 WF1] [s2 WF2]; simpl. apply eq_iff_eq_true; rewrite !compare_not_Gt_le; fold_is_true; rewrite <-(ssrbool.rwP (Eq_eq _ _)), Ord_compare_Lt; order (list N). - intros [s1 WF1] [s2 WF2]; simpl. apply eq_iff_eq_true; rewrite !compare_not_Gt_le; fold_is_true; rewrite <-(ssrbool.rwP (Eq_eq _ _)), Ord_compare_Gt; order (list N). Qed. *) (** *** Verification of [Semigroup] *) Instance Semigroup_WFIntSet : Semigroup WFIntSet := fun _ k => k {| op_zlzlzgzg____ := fun s1 s2 => pack (unpack s1 <<>> unpack s2) (union_WF _ _ (unpack_WF s1) (unpack_WF s2)) |}. Instance SemigroupLaws_WFIntSet : SemigroupLaws WFIntSet. Proof. constructor; intros [s1 [f1 SEM1]] [s2 [f2 SEM2]] [s3 [f3 SEM3]]. unfold_WFIntSet_Eq; fold_is_true; rewrite <-(ssrbool.rwP (Eq_eq _ _)). repeat match goal with | SEMs : Sem ?s ?fs, SEMt : Sem ?t ?ft |- context[?s <<>> ?t] => match goal with | _ : Sem (s <> t) _ |- _ => fail 1 | _ : Sem (union s t) _ |- _ => fail 1 | _ => specialize (union_Sem s fs t ft SEMs SEMt) as ? end end. eapply Sem_unique; try eassumption. now intros i; simpl; rewrite orb_assoc. Qed. (** *** Verification of [Monoid] *) Require Import Data.Monoid. Lemma WFIntSet_unpack_mconcat_WF (ss : list WFIntSet) : WF (mconcat (GHC.Base.map unpack ss)). Admitted. (* needs unions Proof. apply unions_WF, Forall_forall; intros s; rewrite in_map_iff. intros [? [? ?]]; subst; apply unpack_WF. Qed. *) Instance Monoid_WFIntSet : Monoid WFIntSet := fun _ k => k {| mempty__ := pack mempty empty_WF ; mappend__ := _<<>>_ ; mconcat__ := fun ss => pack (mconcat (GHC.Base.map unpack ss)) (WFIntSet_unpack_mconcat_WF ss) |}. Local Ltac WFIntSet_Eq_eq := unfold_WFIntSet_Eq; fold_is_true; rewrite <-(ssrbool.rwP (Eq_eq _ _)). Lemma MonoidLaws_WFIntSet_mconcat_swing (ss : list WFIntSet) (s' z : IntSet) : WF s' -> WF z -> fold_left union (List.map unpack ss) (union z s') = union s' (fold_left union (List.map unpack ss) z). Proof. generalize dependent z; generalize dependent s'; induction ss as [|s ss IH]; simpl; intros s' z WFs' WFz. - now rewrite union_comm. - assert (WF (unpack s)) as WFs by apply unpack_WF. assert (WF (union z s')) as WFzs' by now apply union_WF. assert (WF (fold_left union (List.map unpack ss) z)) as WFssz. { clear - WFz. rewrite <-fold_left_rev_right, <-hs_coq_map, <-map_rev. induction (rev ss) as [|s rss IH]; simpl; trivial. apply union_WF; [apply IH | apply unpack_WF]. } rewrite !IH, !union_assoc; trivial. now rewrite (union_comm s'). (* TODO: This Qed takes 40 seconds on my machine for some reason?! —ASZ *) (* Here too, and this is annoying while working on the file, so I just admit it.*) all:fail. (* ensure that no subgoals are left *) Admitted. Instance MonoidLaws_WFIntSet : MonoidLaws WFIntSet. Proof. constructor. - intros [s WFs]; WFIntSet_Eq_eq. now destruct s. - intros [s WFs]; WFIntSet_Eq_eq. now destruct s. - intros [s1 WF1] [s2 WF2]; WFIntSet_Eq_eq. reflexivity. - intros ss; WFIntSet_Eq_eq. unfold mconcat, Monoid__IntSet, Data.IntSet.InternalWord.Monoid__IntSet_mconcat; simpl. unfold unions; rewrite hs_coq_foldl_list', hs_coq_foldr_base, hs_coq_map. induction ss as [|s ss IH]; simpl. + reflexivity. + rewrite MonoidLaws_WFIntSet_mconcat_swing, IH; auto using unpack_WF. Qed. (** ** Instantiating the [FSetInterface] *) Require Import Coq.FSets.FSetInterface. Require Import Coq.Structures.OrderedTypeEx. Require Import SortedUtil. Module Int_as_OT <: UsualOrderedType. Definition t := Int. Definition eq := @eq Int. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Definition lt x y := Z.lt (intToZ x) (intToZ y). Lemma lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Admitted. Lemma lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Admitted. Definition compare x y : Compare lt eq x y. Admitted. Definition eq_dec (x y : t) : { eq x y } + { ~ eq x y }. Admitted. End Int_as_OT. Module IntSetFSet <: WSfun(Int_as_OT) <: WS <: Sfun(Int_as_OT) <: S. Module E := Int_as_OT. Module OrdFacts := OrderedTypeFacts(Int_as_OT). Definition elt := Int. (* Well-formedness *) Definition t := WFIntSet. Notation "x <-- f ;; P" := (match f with | exist x _ => P end) (at level 99, f at next level, right associativity). (* Membership, equality, etc. *) Definition In_set x (s : IntSet) := member x s = true. Definition In x (s' : t) := s <-- s' ;; In_set x s. Definition Equal_set s s' := forall a : Int, In_set a s <-> In_set a s'. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition eq : t -> t -> Prop := Equal. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition empty : t. eexists. eexists. apply SemNil. intro. reflexivity. Defined. Definition is_empty : t -> bool := fun s' => s <-- s' ;; null s. (* IntSet comparison predicate *) Definition lt (s s' : t) : Prop := (s < s') = true. (* More information later, after we've proved theorems *) (* Minimal and maximal elements *) (* Definition min_elt : t -> option elt := fmap fst ∘ minView ∘ unpack. *) (* Definition max_elt : t -> option elt := fmap fst ∘ maxView ∘ unpack. *) (* The bit-twiddling to prove `minView` and `maxView` correct is quite difficult. In the meantime: *) Definition min_elt : t -> option elt := @hd_error _ ∘ toAscList ∘ unpack. Definition max_elt : t -> option elt := @hd_error _ ∘ toDescList ∘ unpack. (* Theorems *) Lemma empty_1 : Empty empty. Proof. unfold Empty; intros a H. inversion H. Qed. Lemma is_empty_1 : forall s : t, Empty s -> is_empty s = true. Proof. intros. unfold Empty, In, In_set, is_empty in *. destruct s. destruct w as [s HSem]. erewrite null_Sem by eassumption. intro i. specialize (H (NToInt i)). rewrite not_true_iff_false in H. erewrite member_Sem in H by eassumption. Int_Word_N. assumption. Qed. Lemma is_empty_2 : forall s : t, is_empty s = true -> Empty s. Proof. intros ????. unfold In, In_set in *. destruct s. simpl in *. destruct x; try inversion H. inversion H0. Qed. Definition singleton : elt -> t. refine (fun e => pack (singleton e) _). apply singleton_WF; nonneg. Defined. Definition add (e: elt) (s': t) : t. refine (s <-- s' ;; pack (insert e s) _). apply insert_WF; nonneg. Defined. Definition remove (e: elt) (s': t) : t. refine (s <-- s' ;; pack (delete e s) _). apply delete_WF; nonneg. Defined. Definition union (s1' s2' : t) : t. refine (s1 <-- s1' ;; s2 <-- s2' ;; pack (union s1 s2) _). apply union_WF; assumption. Defined. Definition inter (s1' s2' : t) : t. refine (s1 <-- s1' ;; s2 <-- s2' ;; pack (intersection s1 s2) _). apply intersection_WF; assumption. Defined. Definition diff (s1' s2' : t) : t. refine (s1 <-- s1' ;; s2 <-- s2' ;; pack (difference s1 s2) _). apply difference_WF; assumption. Defined. Definition equal : t -> t -> bool := fun ws ws' => s <-- ws ;; s' <-- ws' ;; s == s'. Definition subset : t -> t -> bool := fun ws ws' => s <-- ws ;; s' <-- ws' ;; isSubsetOf s s'. Definition eq_dec : forall s s' : t, {eq s s'} + {~ eq s s'}. Proof. intros. destruct s as [s1 Hwf1]. destruct s' as [s2 Hwf2]. destruct (InternalWord.equal s1 s2) eqn:?. * left. rewrite equal_spec in Heqb. subst. intro. unfold In. reflexivity. * right. apply not_true_iff_false in Heqb. contradict Heqb. rewrite equal_spec. destruct Hwf1 as [f1 HSem1]. destruct Hwf2 as [f2 HSem2]. eapply Sem_unique; try eassumption. intro i. apply eq_iff_eq_true. specialize (Heqb (NToInt i)). unfold eq, In, In_set in Heqb. erewrite (member_Sem HSem1) in Heqb. erewrite (member_Sem HSem2) in Heqb. Int_Word_N. assumption. Defined. Lemma eq_refl : forall s : t, eq s s. Proof. destruct s. unfold eq. unfold Equal. intro. reflexivity. Qed. Lemma eq_sym : forall s s' : t, eq s s' -> eq s' s. Proof. destruct s; destruct s'; unfold eq, Equal in *. intros. rewrite H. intuition. Qed. Lemma eq_trans : forall s s' s'' : t, eq s s' -> eq s' s'' -> eq s s''. Proof. destruct s; destruct s'; destruct s''; simpl. unfold eq, Equal. intros ???. rewrite H, H0. reflexivity. Qed. Definition fold (A : Type) (f : elt -> A -> A) (ws : t) (x : A) : A := s <-- ws;; foldl (fun x a => f a x) x s. Definition filter : (elt -> bool) -> t -> t. refine (fun p ws => s <-- ws;; pack (filter p s) _). apply filter_WF; assumption. Defined. Program Definition partition : (elt -> bool) -> t -> t * t := (fun p ws => Data.IntSet.InternalWord.partition p ws). Next Obligation. rewrite partition_snd. apply filter_WF. destruct ws; auto. Qed. Next Obligation. rewrite partition_fst. apply filter_WF. destruct ws; auto. Qed. Definition cardinal : t -> nat := fun ws => s <-- ws;; Z.to_nat (intToZ (size s)). Definition elements (ws : t) : list elt := s <-- ws;; toList s. Lemma In_1 : forall (s : t) (x y : elt), Int_as_OT.eq x y -> In x s -> In y s. Proof. intros. destruct H. assumption. Qed. Definition mem : elt -> t -> bool := fun e s' => s <-- s' ;; member e s. Lemma mem_1 : forall (s : t) (x : elt), In x s -> mem x s = true. Proof. unfold In; intros; destruct s as [s]; auto. Qed. Lemma mem_2 : forall (s : t) (x : elt), mem x s = true -> In x s. Proof. unfold In; intros; destruct s as [s]; auto. Qed. Lemma equal_1 : forall s s' : t, Equal s s' -> equal s s' = true. Proof. intros. destruct s as [s1 [f1 HSem1]]. destruct s' as [s2 [f2 HSem2]]. apply equal_spec. eapply Sem_unique; try eassumption. intro i. apply eq_iff_eq_true. specialize (H (NToInt i)). unfold eq, In, In_set in H. erewrite (member_Sem HSem1) in H. erewrite (member_Sem HSem2) in H. Int_Word_N. assumption. Qed. Lemma equal_2 : forall s s' : t, equal s s' = true -> Equal s s'. Proof. intros. destruct s as [s1 [f1 HSem1]]. destruct s' as [s2 [f2 HSem2]]. apply equal_spec in H. subst. intro i; intuition. Qed. Lemma subset_1 : forall s s' : t, Subset s s' -> subset s s' = true. Proof. intros. destruct s as [s1 [f1 HSem1]]. destruct s' as [s2 [f2 HSem2]]. unfold Subset, subset, In, In_set in *. rewrite isSubsetOf_Sem by eassumption. intro i. specialize (H (NToInt i)). do 2 erewrite member_Sem in H by eassumption. Int_Word_N. assumption. Qed. Lemma subset_2 : forall s s' : t, subset s s' = true -> Subset s s'. Proof. intros. destruct s as [s1 [f1 HSem1]]. destruct s' as [s2 [f2 HSem2]]. unfold Subset, subset, In, In_set in *. rewrite isSubsetOf_Sem in H by eassumption. intro i. specialize (H (intToN i)). do 2 erewrite member_Sem by eassumption. assumption. Qed. Lemma add_1 : forall (s : t) (x y : elt), Int_as_OT.eq x y -> In y (add x s). Proof. intros. inversion_clear H; subst. unfold In, add, pack, In_set; intros; destruct s as [s]. destruct w as [f HSem]. erewrite member_Sem. Focus 2. eapply insert_Sem; try nonneg; try eassumption. intro. reflexivity. simpl. rewrite N.eqb_refl. reflexivity. Qed. Lemma add_2 : forall (s : t) (x y : elt), In y s -> In y (add x s). Proof. intros. unfold In, add, pack, In_set in *; intros; destruct s as [s]. destruct w as [f HSem]. erewrite member_Sem. Focus 2. eapply insert_Sem; try nonneg; try eassumption. intro. reflexivity. simpl. rewrite orb_true_iff. right. erewrite <- member_Sem. eassumption. eassumption. Qed. Lemma add_3 : forall (s : t) (x y : elt), ~ Int_as_OT.eq x y -> In y (add x s) -> In y s. Proof. intros. unfold In, add, pack, In_set in *; intros; destruct s as [s]. destruct w as [f HSem]. erewrite member_Sem in H0. Focus 2. eapply insert_Sem; try nonneg; try eassumption. intro. reflexivity. simpl in *. rewrite -> orb_true_iff in H0. rewrite -> N.eqb_eq in H0. Int_Word_N. destruct H0. congruence. erewrite member_Sem. Focus 2. eassumption. assumption. Qed. Lemma remove_1 : forall (s : t) (x y : elt), Int_as_OT.eq x y -> ~ In y (remove x s). Proof. intros. unfold In, remove, pack, In_set in *; intros; destruct s as [s]. destruct H. destruct w as [f HSem]. erewrite member_Sem. Focus 2. eapply delete_Sem; try nonneg. eassumption. intro i. reflexivity. simpl. rewrite N.eqb_refl; simpl. congruence. Qed. Lemma remove_2 : forall (s : t) (x y : elt), ~ Int_as_OT.eq x y -> In y s -> In y (remove x s). Proof. intros. unfold In, remove, pack, In_set in *; intros; destruct s as [s]. apply not_false_iff_true. contradict H. destruct w as [f HSem]. erewrite member_Sem in H. Focus 2. eapply delete_Sem; try nonneg. eassumption. intro i. reflexivity. erewrite member_Sem in H0 by eassumption. simpl in *. destruct (N.eqb_spec (intToN y) (intToN x)); simpl in *; Int_Word_N; try congruence. Qed. Lemma remove_3 : forall (s : t) (x y : elt), In y (remove x s) -> In y s. Proof. intros. unfold In, remove, pack, In_set in *; intros; destruct s as [s]. destruct w as [f HSem]. erewrite member_Sem in H. Focus 2. eapply delete_Sem; try nonneg. eassumption. intro i. reflexivity. erewrite member_Sem by eassumption. simpl in *. rewrite andb_true_iff in H. intuition. Qed. Lemma singleton_1 : forall x y : elt, In y (singleton x) -> Int_as_OT.eq x y. Proof. intros. unfold In, In_set, singleton, pack in *. erewrite member_Sem in H. Focus 2. apply singleton_Sem; nonneg. simpl in H. rewrite -> N.eqb_eq in H. symmetry. Int_Word_N. assumption. Qed. Lemma singleton_2 : forall x y : elt, Int_as_OT.eq x y -> In y (singleton x). Proof. intros. unfold In, In_set, singleton, pack in *. erewrite member_Sem. Focus 2. apply singleton_Sem; nonneg. simpl. rewrite -> N.eqb_eq. congruence. Qed. Lemma union_1 : forall (s s' : t) (x : elt), In x (union s s') -> In x s \/ In x s'. Proof. intros. destruct s, s'. unfold In, In_set, union, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem by eassumption. erewrite member_Sem in H by (eapply union_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite orb_true_iff in H. assumption. Qed. Lemma union_2 : forall (s s' : t) (x : elt), In x s -> In x (union s s'). Proof. intros. destruct s, s'. unfold In, In_set, union, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem in H by eassumption. erewrite member_Sem by (eapply union_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite orb_true_iff. intuition. Qed. Lemma union_3 : forall (s s' : t) (x : elt), In x s' -> In x (union s s'). Proof. intros. destruct s, s'. unfold In, In_set, union, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem in H by eassumption. erewrite member_Sem by (eapply union_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite orb_true_iff. intuition. Qed. Lemma inter_1 : forall (s s' : t) (x : elt), In x (inter s s') -> In x s. Proof. intros. destruct s, s'. unfold In, In_set, inter, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem by eassumption. erewrite member_Sem in H by (eapply intersection_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite andb_true_iff in H. intuition. Qed. Lemma inter_2 : forall (s s' : t) (x : elt), In x (inter s s') -> In x s'. Proof. intros. destruct s, s'. unfold In, In_set, inter, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem by eassumption. erewrite member_Sem in H by (eapply intersection_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite andb_true_iff in H. intuition. Qed. Lemma inter_3 : forall (s s' : t) (x : elt), In x s -> In x s' -> In x (inter s s'). Proof. intros. destruct s, s'. unfold In, In_set, inter, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem in H by eassumption. erewrite !member_Sem in H0 by eassumption. erewrite member_Sem by (eapply intersection_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite andb_true_iff. intuition. Qed. Lemma diff_1 : forall (s s' : t) (x : elt), In x (diff s s') -> In x s. Proof. intros. destruct s, s'. unfold In, In_set, diff, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem by eassumption. erewrite member_Sem in H by (eapply difference_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite andb_true_iff in H. intuition. Qed. Lemma diff_2 : forall (s s' : t) (x : elt), In x (diff s s') -> ~ In x s'. Proof. intros. destruct s, s'. unfold In, In_set, diff, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem by eassumption. erewrite member_Sem in H by (eapply difference_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite andb_true_iff in H. rewrite negb_true_iff in H. intuition congruence. Qed. Lemma diff_3 : forall (s s' : t) (x : elt), In x s -> ~ In x s' -> In x (diff s s'). Proof. intros. destruct s, s'. unfold In, In_set, diff, pack in *. destruct w as [f1 HSem1], w0 as [f2 HSem2]. erewrite !member_Sem in H by eassumption. erewrite !member_Sem in H0 by eassumption. erewrite member_Sem by (eapply difference_Sem; try eassumption; intro; reflexivity). simpl in *. rewrite andb_true_iff. intuition. Qed. Lemma fold_left_map: forall {a b c} f (g : a -> b) (x : c) xs, fold_left (fun a e => f a e) (List.map g xs) x = fold_left (fun a e => f a (g e)) xs x. Proof. intros. revert x. induction xs; intros. * reflexivity. * simpl. rewrite IHxs. reflexivity. Qed. Lemma fold_1 : forall (s : t) (A : Type) (i : A) (f : elt -> A -> A), fold A f s i = fold_left (fun (a : A) (e : elt) => f e a) (elements s) i. Proof. intros. destruct s as [s Hwf]. simpl. apply foldl_spec; assumption. Qed. Lemma cardinal_1 : forall s : t, cardinal s = length (elements s). Proof. intros. destruct s as [s Hwf]. simpl. rewrite size_spec by assumption. Int_Word_N. rewrite Nat2Z.id. reflexivity. Qed. Lemma filter_1 : forall (s : t) (x : elt) (f : elt -> bool), compat_bool Int_as_OT.eq f -> In x (filter f s) -> In x s. Proof. intros s x P Heq Hin. destruct s as [s [f HSem]]. unfold filter, In, In_set in *. simpl in *. erewrite member_Sem by eassumption. erewrite member_Sem in Hin by (eapply filter_Sem; try eassumption; intro i; reflexivity). simpl in *. rewrite andb_true_iff in Hin. intuition. Qed. Lemma filter_2 : forall (s : t) (x : elt) (f : elt -> bool), compat_bool Int_as_OT.eq f -> In x (filter f s) -> f x = true. Proof. intros s x P Heq Hin. destruct s as [s [f HSem]]. unfold filter, In, In_set in *. simpl in *. erewrite member_Sem in Hin by (eapply filter_Sem; try eassumption; intro i; reflexivity). simpl in *. rewrite andb_true_iff in Hin. Int_Word_N. intuition. Qed. Lemma filter_3 : forall (s : t) (x : elt) (f : elt -> bool), compat_bool Int_as_OT.eq f -> In x s -> f x = true -> In x (filter f s). Proof. intros s x P Heq Hin HP. destruct s as [s [f HSem]]. unfold filter, In, In_set in *. simpl in *. erewrite member_Sem in Hin by eassumption. erewrite member_Sem by (eapply filter_Sem; try eassumption; intro i; reflexivity). simpl in *. rewrite andb_true_iff. Int_Word_N. intuition. Qed. Lemma partition_1 : forall (s : t) (f : elt -> bool), compat_bool Int_as_OT.eq f -> Equal (fst (partition f s)) (filter f s). Proof. intros. destruct s. unfold Equal, partition; simpl. rewrite partition_fst. reflexivity. Qed. Lemma partition_2 : forall (s : t) (f : elt -> bool), compat_bool Int_as_OT.eq f -> Equal (snd (partition f s)) (filter (fun x : elt => negb (f x)) s). Proof. intros. destruct s. unfold Equal, partition; simpl. rewrite partition_snd by assumption. reflexivity. Qed. Lemma elements_1 : forall (s : t) (x : elt), In x s -> InA Int_as_OT.eq x (elements s). Proof. intros. destruct s as [s Hwf]. destruct Hwf as [f HSem]. simpl in *. unfold In_set in *. erewrite member_Sem in H by eassumption. apply OrdFacts.ListIn_In. rewrite <- toList_In by eassumption. assumption. Qed. Lemma elements_2 : forall (s : t) (x : elt), InA Int_as_OT.eq x (elements s) -> In x s. Proof. intros. destruct s as [s Hwf]. destruct Hwf as [f HSem]. simpl in *. unfold In_set in *. erewrite member_Sem by eassumption. rewrite InA_alt in H. destruct H as [_[[]?]]. rewrite <- toList_In in H by eassumption. assumption. Qed. Lemma elements_3 (s : t) : Sorted E.lt (elements s). Proof. unfold E.lt; destruct s as [s WFs]; simpl. apply StronglySorted_Sorted. now apply to_List_sorted. Qed. Lemma elements_3w (s : t) : NoDupA Int_as_OT.eq (elements s). Proof. apply OrdFacts.Sort_NoDup, elements_3. Qed. (* Ordering theorems *) Definition compare (s s' : t) : Compare lt eq s s'. Admitted. (* Needs OrdLaws *) (* Proof. destruct (compare s s') eqn:CMP. - apply EQ; abstract now apply equal_2; destruct s, s'; generalize dependent CMP; rewrite Ord_compare_Eq; unfold "==", Eq__WFIntSet; simpl. - apply LT; abstract order t. - apply GT; abstract order t. Defined. *) Theorem lt_trans (s1 s2 s3 : t) : lt s1 s2 -> lt s2 s3 -> lt s1 s3. Admitted. (* Proof. unfold lt; order t. Qed. *) Theorem lt_not_eq (s1 s2 : t) : lt s1 s2 -> ~ eq s1 s2. Admitted. (* Proof. destruct s1 as [s1 WF1], s2 as [s2 WF2]. unfold lt, "<", Ord_WFIntSet; simpl. intros LT EQ; apply equal_1, (reflect_iff _ _ (Eq_eq _ _)) in EQ. subst s2; clear WF2. assert (pack s1 WF1 < pack s1 WF1 = true) by now unfold "<", Ord_WFIntSet; simpl. order WFIntSet. Qed. *) Lemma min_elt_1 (s : t) (x : elt) : min_elt s = Some x -> In x s. Proof. destruct s as [s [fs SEM_s]]; unfold min_elt, "∘"; simpl; unfold In_set. destruct (toAscList s) as [|e xs] eqn:ASC; simpl; [easy | intros EQ; inversion EQ; subst e; clear EQ]. erewrite member_Sem by eassumption. eapply toList_In; [eassumption|]. now unfold toList; rewrite ASC; left. Qed. Lemma min_elt_2 (s : t) (x y : elt) : min_elt s = Some x -> In y s -> ~ E.lt y x. Proof. destruct s as [s WFs]; destruct (WFs) as [fs SEM_s]; unfold min_elt, "∘", E.lt; simpl; unfold In_set. destruct (toAscList s) as [|e xs] eqn:ASC; simpl; [easy | intros EQ; inversion EQ; subst e; clear EQ]. erewrite member_Sem, toList_In by eassumption. specialize (to_List_sorted s WFs); unfold toList; rewrite ASC; simpl. intros SS' IN_y LT. inversion SS' as [|x_ xs_ SS min_x]; subst x_ xs_. rewrite Forall_forall in min_x. destruct IN_y as [|IN_y]; [subst y|]. - now apply Z.lt_irrefl in LT. - specialize (min_x _ IN_y). specialize (Z.lt_trans _ _ _ LT min_x); apply Z.lt_irrefl. Qed. Lemma min_elt_3 (s : t) : min_elt s = None -> Empty s. Proof. destruct s as [s [fs SEM_s]]; unfold min_elt, "∘", Empty; simpl; unfold In_set. destruct (toAscList s) as [|e xs] eqn:ASC; simpl; [intros _ | easy]. intros x. erewrite member_Sem, toList_In by eassumption. now unfold toList; rewrite ASC. Qed. Lemma max_elt_1 (s : t) (x : elt) : max_elt s = Some x -> In x s. Proof. destruct s as [s WFs]; destruct (WFs) as [fs SEM_s]; unfold max_elt, "∘"; simpl; unfold In_set. rewrite toDescList_spec by easy. destruct (rev (toList s)) as [|e xs] eqn:DESC; simpl; [easy | intros EQ; inversion EQ; subst e; clear EQ]. erewrite member_Sem by eassumption. eapply toList_In, in_rev; [eassumption|]. now fold Key; rewrite DESC; left. Qed. Lemma max_elt_2 (s : t) (x y : elt) : max_elt s = Some x -> In y s -> ~ E.lt x y. Proof. destruct s as [s WFs]; destruct (WFs) as [fs SEM_s]; unfold max_elt, "∘", E.lt; simpl; unfold In_set. rewrite toDescList_spec by easy. destruct (rev (toList s)) as [|e xs] eqn:DESC; simpl; [easy | intros EQ; inversion EQ; subst e; clear EQ]. erewrite member_Sem, toList_In, in_rev by eassumption. specialize (to_List_sorted s WFs); fold Key; rewrite StronglySorted_rev, DESC; simpl. intros SS' IN_y LT. inversion SS' as [|x_ xs_ SS max_x]; subst x_ xs_. rewrite Forall_forall in max_x. destruct IN_y as [|IN_y]; [subst y|]. - now apply Z.lt_irrefl in LT. - specialize (max_x _ IN_y). specialize (Z.lt_trans _ _ _ LT max_x); apply Z.lt_irrefl. Qed. Lemma max_elt_3 (s : t) : max_elt s = None -> Empty s. Proof. destruct s as [s WFs]; destruct (WFs) as [fs SEM_s]; unfold max_elt, "∘", Empty; simpl; unfold In_set. rewrite toDescList_spec by easy. destruct (rev (toList s)) as [|e xs] eqn:DESC; simpl; [intros _ | easy]. intros x. erewrite member_Sem, toList_In, in_rev by eassumption. now fold Key; rewrite DESC. Qed. (** These (non-ordering) portions of the [FSetInterface]s have no counterpart in the [IntSet] interface. We implement them generically. *) Definition For_all (P : elt -> Prop) s := forall x, In x s -> P x. Definition Exists (P : elt -> Prop) s := exists x, In x s /\ P x. Definition for_all : (elt -> bool) -> t -> bool := fun P s => forallb P (elements s). Definition exists_ : (elt -> bool) -> t -> bool := fun P s => existsb P (elements s). Lemma for_all_1 : forall (s : t) (f : elt -> bool), compat_bool Int_as_OT.eq f -> For_all (fun x : elt => f x = true) s -> for_all f s = true. Proof. intros. unfold For_all, for_all in *. rewrite forallb_forall. intros. apply H0. apply elements_2. apply OrdFacts.ListIn_In. assumption. Qed. Lemma for_all_2 : forall (s : t) (f : elt -> bool), compat_bool Int_as_OT.eq f -> for_all f s = true -> For_all (fun x : elt => f x = true) s. Proof. intros. unfold For_all, for_all in *. rewrite forallb_forall in H0. intros. apply H0. apply elements_1 in H1. rewrite InA_alt in H1. destruct H1 as [?[[]?]]. assumption. Qed. Lemma exists_1 : forall (s : t) (f : elt -> bool), compat_bool Int_as_OT.eq f -> Exists (fun x : elt => f x = true) s -> exists_ f s = true. Proof. intros. unfold Exists, exists_ in *. rewrite existsb_exists. destruct H0 as [x[??]]. exists x. split; auto. apply elements_1 in H0. rewrite InA_alt in H0. destruct H0 as [?[[]?]]. assumption. Qed. Lemma exists_2 : forall (s : t) (f : elt -> bool), compat_bool Int_as_OT.eq f -> exists_ f s = true -> Exists (fun x : elt => f x = true) s. Proof. intros. unfold Exists, exists_ in *. rewrite existsb_exists in H0. destruct H0 as [x[??]]. exists x. split; auto. apply elements_2. apply OrdFacts.ListIn_In. assumption. Qed. (** One could implement [choose] with [minView]. We currenlty do not translate [minView], because of a call to [error] in a branch that is inaccessible in well-formed trees. Stretch goal: translate that and use it here. *) Definition choose : t -> option elt := fun s => match elements s with | nil => None | x :: _ => Some x end. Lemma choose_1 : forall (s : t) (x : elt), choose s = Some x -> In x s. Proof. intros. unfold choose in *. destruct (elements s) eqn:?; try congruence. inversion H; subst. apply elements_2. rewrite Heql. left. reflexivity. Qed. Lemma choose_2 : forall s : t, choose s = None -> Empty s. Proof. intros. unfold choose in *. destruct (elements s) eqn:?; try congruence. intros x ?. apply elements_1 in H0. rewrite Heql in H0. inversion H0. Qed. Lemma choose_3 (s1 s2 : t) (x1 x2 : elt) : choose s1 = Some x1 -> choose s2 = Some x2 -> Equal s1 s2 -> E.eq x1 x2. Proof. destruct s1 as [s1 WF1], s2 as [s2 WF2]. intros C1 C2 EQ. apply equal_1 in EQ; unfold equal in EQ; eapply reflect_iff in EQ; [|apply Eq_eq]; subst s2. enough (Some x1 = Some x2) as E by now inversion E. etransitivity; first symmetry; eassumption. Qed. End IntSetFSet. (** * Rewrite rules *) (** @ {-# RULES "IntSet.toAscList" [~1] forall s . toAscList s = build (\c n -> foldrFB c n s) #-} {-# RULES "IntSet.toAscListBack" [1] foldrFB (:) [] = toAscList #-} {-# RULES "IntSet.toDescList" [~1] forall s . toDescList s = build (\c n -> foldlFB (\xs x -> c x xs) n s) #-} {-# RULES "IntSet.toDescListBack" [1] foldlFB (\xs x -> x : xs) [] = toDescList #-} @ *) Lemma rule_toAscList: forall (s : IntSet), toAscList s = build (fun _ c n => foldrFB c n s). Proof. intros. reflexivity. Qed. Lemma rule_toAscListBack: foldrFB cons nil = toAscList. Proof. intros. reflexivity. Qed. Lemma rule_toDescList: forall (s : IntSet), toDescList s = build (fun _ c n => foldlFB (fun xs x => c x xs) n s). Proof. intros. reflexivity. Qed. Lemma rule_toDescListBack: forall (s : IntSet), foldlFB (fun xs x => x :: xs) nil = toDescList. Proof. intros. reflexivity. Qed.
"X has a bone to pick with Arcadia's political kingmaker Berkshire, and the two share some ""face time."" Meanwhile, Leigh's online exposé attracts some murderously angry readers. Swierczynski and Nguyen take on Dark Horse's classic vigilante! X MAKES HIS DARK HORSE RETURN WITH SWIERCZYNSKI! ![Project Black Sky](http://images.darkhorse.com/digital/common/pbs.png ""Project Black Sky"") "
% Copyright 2017 Lime Microsystems Ltd. % % Licensed under the Apache License, Version 2.0 (the "License"); % you may not use this file except in compliance with the License. % You may obtain a copy of the License at % % http://www.apache.org/licenses/LICENSE-2.0 % % Unless required by applicable law or agreed to in writing, software % distributed under the License is distributed on an "AS IS" BASIS, % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % See the License for the specific language governing permissions and % limitations under the License. % % just for fun now, uses compressed ASCII (6 bit) and BCH(15,11) FEC % Note real encoding uses Viterbi over several radio frames! Viterbi is more efficient than block coding. % also have not muxed DPCCH into DPDCH function [y,din,dout]=WCDMADLtxtMsgWrite2( txt, bitsPerFrame ) m=4; n=2^m-1; % also from bchpoly (1) k=11; % from bchpoly (2) % 21 maybe better than 16, as gives 14 chars instead of 10, with t=2 bit error correct t=1; % from bchpoly (3) maxMsg=floor(k*floor(bitsPerFrame/n)/6); qpsk=[1+i,1-i,-1+i,-1-i]; % if message too short, add ' 's if length(txt)<maxMsg chars=maxMsg-length(txt); tmpTxt=[txt,repmat(32,1,chars)]; % if message too long, cut elseif length(txt)>maxMsg tmpTxt=txt(1:maxMsg); else % length(txt)==maxMsg tmpTxt=txt; end pad1=randint(1,(k*ceil(maxMsg*6/k)-maxMsg*6)); % unused bits prior to encoding pad2=randint(1,(bitsPerFrame-n*floor(bitsPerFrame/n))); % unused part of encoded message, pad with random numbers 26 printf('WCDMAtxtMsgULWrite[%s],BCH(%g,%g)\n', tmpTxt, n, k ); din=[reshape(de2bi(ASCII6enc(tmpTxt),6,2,"left-msb")',1,[])]; data=[din,pad1]; % pad to fit FEC % convert to ?xk matrix prior to encoding data=reshape(data,length(data)/k,k); data=bchenco(data,n,k); % convert back to linear matrix and pad to bits per frame l=size(data); data=reshape(data,1,n*l(1)); idx=[data,pad2]; % pad unused bits with random data % interleave to reduce effect of burst errors % idx=reshape(reshape(idx,15,40)',1,600); % was 15 10 150 dout=matintrlv(idx,15,40); idx=sum( ( (reshape(dout,2,(length(dout)/2))').*[2,1])' )+1; y=qpsk(idx); % convert to qpsk end
[GOAL] d x y x' y' : ℤ h : { re := x, im := y } = { re := x', im := y' } ⊢ { re := x, im := y }.re = { re := x', im := y' }.re ∧ { re := x, im := y }.im = { re := x', im := y' }.im [PROOFSTEP] injection h [GOAL] d x y x' y' : ℤ re_eq✝ : x = x' im_eq✝ : y = y' ⊢ { re := x, im := y }.re = { re := x', im := y' }.re ∧ { re := x, im := y }.im = { re := x', im := y' }.im [PROOFSTEP] constructor [GOAL] case left d x y x' y' : ℤ re_eq✝ : x = x' im_eq✝ : y = y' ⊢ { re := x, im := y }.re = { re := x', im := y' }.re [PROOFSTEP] assumption [GOAL] case right d x y x' y' : ℤ re_eq✝ : x = x' im_eq✝ : y = y' ⊢ { re := x, im := y }.im = { re := x', im := y' }.im [PROOFSTEP] assumption [GOAL] d x y x' y' : ℤ x✝ : { re := x, im := y }.re = { re := x', im := y' }.re ∧ { re := x, im := y }.im = { re := x', im := y' }.im h₁ : { re := x, im := y }.re = { re := x', im := y' }.re h₂ : { re := x, im := y }.im = { re := x', im := y' }.im ⊢ { re := x, im := y } = { re := x', im := y' } [PROOFSTEP] congr [GOAL] d : ℤ z : ℤ√d ⊢ (bit1 z).im = bit0 z.im [PROOFSTEP] simp [bit1] [GOAL] d : ℤ ⊢ AddCommGroup (ℤ√d) [PROOFSTEP] refine { add := (· + ·) zero := (0 : ℤ√d) sub := fun a b => a + -b neg := Neg.neg zsmul := @zsmulRec (ℤ√d) ⟨0⟩ ⟨(· + ·)⟩ ⟨Neg.neg⟩ nsmul := @nsmulRec (ℤ√d) ⟨0⟩ ⟨(· + ·)⟩ add_assoc := ?_ zero_add := ?_ add_zero := ?_ add_left_neg := ?_ add_comm := ?_ } [GOAL] case refine_1 d : ℤ ⊢ ∀ (a b c : ℤ√d), a + b + c = a + (b + c) [PROOFSTEP] intros [GOAL] case refine_2 d : ℤ ⊢ ∀ (a : ℤ√d), 0 + a = a [PROOFSTEP] intros [GOAL] case refine_3 d : ℤ ⊢ ∀ (a : ℤ√d), a + 0 = a [PROOFSTEP] intros [GOAL] case refine_4 d : ℤ ⊢ ∀ (a : ℤ√d), -a + a = 0 [PROOFSTEP] intros [GOAL] case refine_5 d : ℤ ⊢ ∀ (a b : ℤ√d), a + b = b + a [PROOFSTEP] intros [GOAL] case refine_1 d : ℤ a✝ b✝ c✝ : ℤ√d ⊢ a✝ + b✝ + c✝ = a✝ + (b✝ + c✝) [PROOFSTEP] simp [ext, add_comm, add_left_comm] [GOAL] case refine_2 d : ℤ a✝ : ℤ√d ⊢ 0 + a✝ = a✝ [PROOFSTEP] simp [ext, add_comm, add_left_comm] [GOAL] case refine_3 d : ℤ a✝ : ℤ√d ⊢ a✝ + 0 = a✝ [PROOFSTEP] simp [ext, add_comm, add_left_comm] [GOAL] case refine_4 d : ℤ a✝ : ℤ√d ⊢ -a✝ + a✝ = 0 [PROOFSTEP] simp [ext, add_comm, add_left_comm] [GOAL] case refine_5 d : ℤ a✝ b✝ : ℤ√d ⊢ a✝ + b✝ = b✝ + a✝ [PROOFSTEP] simp [ext, add_comm, add_left_comm] [GOAL] d : ℤ ⊢ CommRing (ℤ√d) [PROOFSTEP] refine { Zsqrtd.addGroupWithOne with add := (· + ·) zero := (0 : ℤ√d) mul := (· * ·) one := 1 npow := @npowRec (ℤ√d) ⟨1⟩ ⟨(· * ·)⟩, add_comm := ?_ left_distrib := ?_ right_distrib := ?_ zero_mul := ?_ mul_zero := ?_ mul_assoc := ?_ one_mul := ?_ mul_one := ?_ mul_comm := ?_ } [GOAL] case refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a b : ℤ√d), a + b = b + a [PROOFSTEP] intros [GOAL] case refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a b c : ℤ√d), a * (b + c) = a * b + a * c [PROOFSTEP] intros [GOAL] case refine_3 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a b c : ℤ√d), (a + b) * c = a * c + b * c [PROOFSTEP] intros [GOAL] case refine_4 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a : ℤ√d), 0 * a = 0 [PROOFSTEP] intros [GOAL] case refine_5 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a : ℤ√d), a * 0 = 0 [PROOFSTEP] intros [GOAL] case refine_6 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a b c : ℤ√d), a * b * c = a * (b * c) [PROOFSTEP] intros [GOAL] case refine_7 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a : ℤ√d), 1 * a = a [PROOFSTEP] intros [GOAL] case refine_8 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a : ℤ√d), a * 1 = a [PROOFSTEP] intros [GOAL] case refine_9 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne ⊢ ∀ (a b : ℤ√d), a * b = b * a [PROOFSTEP] intros [GOAL] case refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ a✝ + b✝ = b✝ + a✝ [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ a✝ * (b✝ + c✝) = a✝ * b✝ + a✝ * c✝ [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_3 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝ + b✝) * c✝ = a✝ * c✝ + b✝ * c✝ [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_4 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ 0 * a✝ = 0 [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_5 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ a✝ * 0 = 0 [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_6 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ a✝ * b✝ * c✝ = a✝ * (b✝ * c✝) [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_7 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ 1 * a✝ = a✝ [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_8 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ a✝ * 1 = a✝ [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_9 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ a✝ * b✝ = b✝ * a✝ [PROOFSTEP] refine ext.mpr ⟨?_, ?_⟩ [GOAL] case refine_1.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ (a✝ + b✝).re = (b✝ + a✝).re [PROOFSTEP] simp [GOAL] case refine_1.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ (a✝ + b✝).im = (b✝ + a✝).im [PROOFSTEP] simp [GOAL] case refine_2.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝ * (b✝ + c✝)).re = (a✝ * b✝ + a✝ * c✝).re [PROOFSTEP] simp [GOAL] case refine_2.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝ * (b✝ + c✝)).im = (a✝ * b✝ + a✝ * c✝).im [PROOFSTEP] simp [GOAL] case refine_3.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ ((a✝ + b✝) * c✝).re = (a✝ * c✝ + b✝ * c✝).re [PROOFSTEP] simp [GOAL] case refine_3.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ ((a✝ + b✝) * c✝).im = (a✝ * c✝ + b✝ * c✝).im [PROOFSTEP] simp [GOAL] case refine_4.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (0 * a✝).re = 0.re [PROOFSTEP] simp [GOAL] case refine_4.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (0 * a✝).im = 0.im [PROOFSTEP] simp [GOAL] case refine_5.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (a✝ * 0).re = 0.re [PROOFSTEP] simp [GOAL] case refine_5.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (a✝ * 0).im = 0.im [PROOFSTEP] simp [GOAL] case refine_6.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝ * b✝ * c✝).re = (a✝ * (b✝ * c✝)).re [PROOFSTEP] simp [GOAL] case refine_6.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝ * b✝ * c✝).im = (a✝ * (b✝ * c✝)).im [PROOFSTEP] simp [GOAL] case refine_7.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (1 * a✝).re = a✝.re [PROOFSTEP] simp [GOAL] case refine_7.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (1 * a✝).im = a✝.im [PROOFSTEP] simp [GOAL] case refine_8.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (a✝ * 1).re = a✝.re [PROOFSTEP] simp [GOAL] case refine_8.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ : ℤ√d ⊢ (a✝ * 1).im = a✝.im [PROOFSTEP] simp [GOAL] case refine_9.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ (a✝ * b✝).re = (b✝ * a✝).re [PROOFSTEP] simp [GOAL] case refine_9.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ (a✝ * b✝).im = (b✝ * a✝).im [PROOFSTEP] simp [GOAL] case refine_1.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ a✝.re + b✝.re = b✝.re + a✝.re [PROOFSTEP] ring [GOAL] case refine_1.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ a✝.im + b✝.im = b✝.im + a✝.im [PROOFSTEP] ring [GOAL] case refine_2.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ a✝.re * (b✝.re + c✝.re) + d * a✝.im * (b✝.im + c✝.im) = a✝.re * b✝.re + d * a✝.im * b✝.im + (a✝.re * c✝.re + d * a✝.im * c✝.im) [PROOFSTEP] ring [GOAL] case refine_2.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ a✝.re * (b✝.im + c✝.im) + a✝.im * (b✝.re + c✝.re) = a✝.re * b✝.im + a✝.im * b✝.re + (a✝.re * c✝.im + a✝.im * c✝.re) [PROOFSTEP] ring [GOAL] case refine_3.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝.re + b✝.re) * c✝.re + d * (a✝.im + b✝.im) * c✝.im = a✝.re * c✝.re + d * a✝.im * c✝.im + (b✝.re * c✝.re + d * b✝.im * c✝.im) [PROOFSTEP] ring [GOAL] case refine_3.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝.re + b✝.re) * c✝.im + (a✝.im + b✝.im) * c✝.re = a✝.re * c✝.im + a✝.im * c✝.re + (b✝.re * c✝.im + b✝.im * c✝.re) [PROOFSTEP] ring [GOAL] case refine_6.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝.re * b✝.re + d * a✝.im * b✝.im) * c✝.re + d * (a✝.re * b✝.im + a✝.im * b✝.re) * c✝.im = a✝.re * (b✝.re * c✝.re + d * b✝.im * c✝.im) + d * a✝.im * (b✝.re * c✝.im + b✝.im * c✝.re) [PROOFSTEP] ring [GOAL] case refine_6.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ c✝ : ℤ√d ⊢ (a✝.re * b✝.re + d * a✝.im * b✝.im) * c✝.im + (a✝.re * b✝.im + a✝.im * b✝.re) * c✝.re = a✝.re * (b✝.re * c✝.im + b✝.im * c✝.re) + a✝.im * (b✝.re * c✝.re + d * b✝.im * c✝.im) [PROOFSTEP] ring [GOAL] case refine_9.refine_1 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ a✝.re * b✝.re + d * a✝.im * b✝.im = b✝.re * a✝.re + d * b✝.im * a✝.im [PROOFSTEP] ring [GOAL] case refine_9.refine_2 d : ℤ src✝ : AddGroupWithOne (ℤ√d) := addGroupWithOne a✝ b✝ : ℤ√d ⊢ a✝.re * b✝.im + a✝.im * b✝.re = b✝.re * a✝.im + b✝.im * a✝.re [PROOFSTEP] ring [GOAL] d : ℤ ⊢ AddMonoid (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ Monoid (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ CommMonoid (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ CommSemigroup (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ Semigroup (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ AddCommSemigroup (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ AddSemigroup (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ CommSemiring (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ Semiring (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ Ring (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ ⊢ Distrib (ℤ√d) [PROOFSTEP] infer_instance [GOAL] d : ℤ a b : ℤ√d ⊢ (star (a * b)).re = (star b * star a).re [PROOFSTEP] simp [GOAL] d : ℤ a b : ℤ√d ⊢ a.re * b.re + d * a.im * b.im = b.re * a.re + d * b.im * a.im [PROOFSTEP] ring [GOAL] d : ℤ a b : ℤ√d ⊢ (star (a * b)).im = (star b * star a).im [PROOFSTEP] simp [GOAL] d : ℤ a b : ℤ√d ⊢ -(a.im * b.re) + -(a.re * b.im) = -(b.re * a.im) + -(b.im * a.re) [PROOFSTEP] ring [GOAL] d : ℤ ⊢ ¬(0.re = 1.re ∧ 0.im = 1.im) [PROOFSTEP] simp [GOAL] d n : ℤ ⊢ (↑n).re = n [PROOFSTEP] cases n [GOAL] case ofNat d : ℤ a✝ : ℕ ⊢ (↑(Int.ofNat a✝)).re = Int.ofNat a✝ [PROOFSTEP] rfl [GOAL] case negSucc d : ℤ a✝ : ℕ ⊢ (↑(Int.negSucc a✝)).re = Int.negSucc a✝ [PROOFSTEP] rfl [GOAL] d n : ℤ ⊢ (↑n).im = 0 [PROOFSTEP] cases n [GOAL] case ofNat d : ℤ a✝ : ℕ ⊢ (↑(Int.ofNat a✝)).im = 0 [PROOFSTEP] rfl [GOAL] case negSucc d : ℤ a✝ : ℕ ⊢ (↑(Int.negSucc a✝)).im = 0 [PROOFSTEP] rfl [GOAL] d n : ℤ ⊢ ↑n = { re := n, im := 0 } [PROOFSTEP] simp [ext] [GOAL] d : ℤ m n : ℕ ⊢ ↑m = ↑n → m = n [PROOFSTEP] simp [ext] [GOAL] d n : ℤ ⊢ ofInt n = ↑n [PROOFSTEP] simp [ext, ofInt_re, ofInt_im] [GOAL] d n x y : ℤ ⊢ ↑n * { re := x, im := y } = { re := n * x, im := n * y } [PROOFSTEP] simp [ext] [GOAL] d a : ℤ b : ℤ√d ⊢ (↑a * b).re = a * b.re [PROOFSTEP] simp [GOAL] d a : ℤ b : ℤ√d ⊢ (↑a * b).im = a * b.im [PROOFSTEP] simp [GOAL] d x y : ℤ ⊢ sqrtd * { re := x, im := y } = { re := d * y, im := x } [PROOFSTEP] simp [ext] [GOAL] d : ℤ ⊢ sqrtd * sqrtd = ↑d [PROOFSTEP] simp [ext] [GOAL] d n x y : ℤ ⊢ sqrtd * ↑n * { re := x, im := y } = { re := d * n * y, im := n * x } [PROOFSTEP] simp [ext] [GOAL] d x y : ℤ ⊢ { re := x, im := y } = ↑x + sqrtd * ↑y [PROOFSTEP] simp [ext] [GOAL] d x y : ℤ ⊢ { re := x, im := y } * star { re := x, im := y } = ↑x * ↑x - ↑d * ↑y * ↑y [PROOFSTEP] simp [ext, sub_eq_add_neg, mul_comm] [GOAL] d m n : ℤ h : ↑m = ↑n ⊢ m = n [PROOFSTEP] simpa using congr_arg re h [GOAL] d z : ℤ a : ℤ√d ⊢ ↑z ∣ a ↔ z ∣ a.re ∧ z ∣ a.im [PROOFSTEP] constructor [GOAL] case mp d z : ℤ a : ℤ√d ⊢ ↑z ∣ a → z ∣ a.re ∧ z ∣ a.im [PROOFSTEP] rintro ⟨x, rfl⟩ [GOAL] case mp.intro d z : ℤ x : ℤ√d ⊢ z ∣ (↑z * x).re ∧ z ∣ (↑z * x).im [PROOFSTEP] simp only [add_zero, coe_int_re, zero_mul, mul_im, dvd_mul_right, and_self_iff, mul_re, mul_zero, coe_int_im] [GOAL] case mpr d z : ℤ a : ℤ√d ⊢ z ∣ a.re ∧ z ∣ a.im → ↑z ∣ a [PROOFSTEP] rintro ⟨⟨r, hr⟩, ⟨i, hi⟩⟩ [GOAL] case mpr.intro.intro.intro d z : ℤ a : ℤ√d r : ℤ hr : a.re = z * r i : ℤ hi : a.im = z * i ⊢ ↑z ∣ a [PROOFSTEP] use⟨r, i⟩ [GOAL] case h d z : ℤ a : ℤ√d r : ℤ hr : a.re = z * r i : ℤ hi : a.im = z * i ⊢ a = ↑z * { re := r, im := i } [PROOFSTEP] rw [smul_val, ext] [GOAL] case h d z : ℤ a : ℤ√d r : ℤ hr : a.re = z * r i : ℤ hi : a.im = z * i ⊢ a.re = { re := z * r, im := z * i }.re ∧ a.im = { re := z * r, im := z * i }.im [PROOFSTEP] exact ⟨hr, hi⟩ [GOAL] d a b : ℤ ⊢ ↑a ∣ ↑b ↔ a ∣ b [PROOFSTEP] rw [coe_int_dvd_iff] [GOAL] d a b : ℤ ⊢ a ∣ (↑b).re ∧ a ∣ (↑b).im ↔ a ∣ b [PROOFSTEP] constructor [GOAL] case mp d a b : ℤ ⊢ a ∣ (↑b).re ∧ a ∣ (↑b).im → a ∣ b [PROOFSTEP] rintro ⟨hre, -⟩ [GOAL] case mp.intro d a b : ℤ hre : a ∣ (↑b).re ⊢ a ∣ b [PROOFSTEP] rwa [coe_int_re] at hre [GOAL] case mpr d a b : ℤ ⊢ a ∣ b → a ∣ (↑b).re ∧ a ∣ (↑b).im [PROOFSTEP] rw [coe_int_re, coe_int_im] [GOAL] case mpr d a b : ℤ ⊢ a ∣ b → a ∣ b ∧ a ∣ 0 [PROOFSTEP] exact fun hc => ⟨hc, dvd_zero a⟩ [GOAL] d a : ℤ b c : ℤ√d ha : a ≠ 0 h : ↑a * b = ↑a * c ⊢ b = c [PROOFSTEP] rw [ext] at h ⊢ [GOAL] d a : ℤ b c : ℤ√d ha : a ≠ 0 h : (↑a * b).re = (↑a * c).re ∧ (↑a * b).im = (↑a * c).im ⊢ b.re = c.re ∧ b.im = c.im [PROOFSTEP] apply And.imp _ _ h [GOAL] d a : ℤ b c : ℤ√d ha : a ≠ 0 h : (↑a * b).re = (↑a * c).re ∧ (↑a * b).im = (↑a * c).im ⊢ (↑a * b).re = (↑a * c).re → b.re = c.re [PROOFSTEP] simpa only [smul_re, smul_im] using mul_left_cancel₀ ha [GOAL] d a : ℤ b c : ℤ√d ha : a ≠ 0 h : (↑a * b).re = (↑a * c).re ∧ (↑a * b).im = (↑a * c).im ⊢ (↑a * b).im = (↑a * c).im → b.im = c.im [PROOFSTEP] simpa only [smul_re, smul_im] using mul_left_cancel₀ ha [GOAL] d : ℤ a : ℤ√d ⊢ Int.gcd a.re a.im = 0 ↔ a = 0 [PROOFSTEP] simp only [Int.gcd_eq_zero_iff, ext, eq_self_iff_true, zero_im, zero_re] [GOAL] d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a ⊢ IsCoprime b.re b.im [PROOFSTEP] apply isCoprime_of_dvd [GOAL] case nonzero d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a ⊢ ¬(b.re = 0 ∧ b.im = 0) [PROOFSTEP] rintro ⟨hre, him⟩ [GOAL] case nonzero.intro d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a hre : b.re = 0 him : b.im = 0 ⊢ False [PROOFSTEP] obtain rfl : b = 0 := by simp only [ext, hre, eq_self_iff_true, zero_im, him, and_self_iff, zero_re] [GOAL] d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a hre : b.re = 0 him : b.im = 0 ⊢ b = 0 [PROOFSTEP] simp only [ext, hre, eq_self_iff_true, zero_im, him, and_self_iff, zero_re] [GOAL] case nonzero.intro d : ℤ a : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : 0 ∣ a hre : 0.re = 0 him : 0.im = 0 ⊢ False [PROOFSTEP] rw [zero_dvd_iff] at hdvd [GOAL] case nonzero.intro d : ℤ a : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : a = 0 hre : 0.re = 0 him : 0.im = 0 ⊢ False [PROOFSTEP] simp [hdvd, zero_im, zero_re, not_isCoprime_zero_zero] at hcoprime [GOAL] case H d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a ⊢ ∀ (z : ℤ), z ∈ nonunits ℤ → z ≠ 0 → z ∣ b.re → ¬z ∣ b.im [PROOFSTEP] rintro z hz - hzdvdu hzdvdv [GOAL] case H d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ⊢ False [PROOFSTEP] apply hz [GOAL] case H d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ⊢ IsUnit z [PROOFSTEP] obtain ⟨ha, hb⟩ : z ∣ a.re ∧ z ∣ a.im := by rw [← coe_int_dvd_iff] apply dvd_trans _ hdvd rw [coe_int_dvd_iff] exact ⟨hzdvdu, hzdvdv⟩ [GOAL] d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ⊢ z ∣ a.re ∧ z ∣ a.im [PROOFSTEP] rw [← coe_int_dvd_iff] [GOAL] d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ⊢ ↑z ∣ a [PROOFSTEP] apply dvd_trans _ hdvd [GOAL] d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ⊢ ↑z ∣ b [PROOFSTEP] rw [coe_int_dvd_iff] [GOAL] d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ⊢ z ∣ b.re ∧ z ∣ b.im [PROOFSTEP] exact ⟨hzdvdu, hzdvdv⟩ [GOAL] case H.intro d : ℤ a b : ℤ√d hcoprime : IsCoprime a.re a.im hdvd : b ∣ a z : ℤ hz : z ∈ nonunits ℤ hzdvdu : z ∣ b.re hzdvdv : z ∣ b.im ha : z ∣ a.re hb : z ∣ a.im ⊢ IsUnit z [PROOFSTEP] exact hcoprime.isUnit_of_dvd' ha hb [GOAL] d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im ⊢ ∃ b, a = ↑↑(Int.gcd a.re a.im) * b ∧ IsCoprime b.re b.im [PROOFSTEP] obtain ⟨re, im, H1, Hre, Him⟩ := Int.exists_gcd_one hgcd [GOAL] case intro.intro.intro.intro d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = re * ↑(Int.gcd a.re a.im) Him : a.im = im * ↑(Int.gcd a.re a.im) ⊢ ∃ b, a = ↑↑(Int.gcd a.re a.im) * b ∧ IsCoprime b.re b.im [PROOFSTEP] rw [mul_comm] at Hre Him [GOAL] case intro.intro.intro.intro d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = ↑(Int.gcd a.re a.im) * re Him : a.im = ↑(Int.gcd a.re a.im) * im ⊢ ∃ b, a = ↑↑(Int.gcd a.re a.im) * b ∧ IsCoprime b.re b.im [PROOFSTEP] refine' ⟨⟨re, im⟩, _, _⟩ [GOAL] case intro.intro.intro.intro.refine'_1 d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = ↑(Int.gcd a.re a.im) * re Him : a.im = ↑(Int.gcd a.re a.im) * im ⊢ a = ↑↑(Int.gcd a.re a.im) * { re := re, im := im } [PROOFSTEP] rw [smul_val, ext, ← Hre, ← Him] [GOAL] case intro.intro.intro.intro.refine'_1 d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = ↑(Int.gcd a.re a.im) * re Him : a.im = ↑(Int.gcd a.re a.im) * im ⊢ a.re = { re := a.re, im := a.im }.re ∧ a.im = { re := a.re, im := a.im }.im [PROOFSTEP] constructor [GOAL] case intro.intro.intro.intro.refine'_1.left d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = ↑(Int.gcd a.re a.im) * re Him : a.im = ↑(Int.gcd a.re a.im) * im ⊢ a.re = { re := a.re, im := a.im }.re [PROOFSTEP] rfl [GOAL] case intro.intro.intro.intro.refine'_1.right d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = ↑(Int.gcd a.re a.im) * re Him : a.im = ↑(Int.gcd a.re a.im) * im ⊢ a.im = { re := a.re, im := a.im }.im [PROOFSTEP] rfl [GOAL] case intro.intro.intro.intro.refine'_2 d : ℤ a : ℤ√d hgcd : 0 < Int.gcd a.re a.im re im : ℤ H1 : Int.gcd re im = 1 Hre : a.re = ↑(Int.gcd a.re a.im) * re Him : a.im = ↑(Int.gcd a.re a.im) * im ⊢ IsCoprime { re := re, im := im }.re { re := re, im := im }.im [PROOFSTEP] rw [← Int.gcd_eq_one_iff_coprime, H1] [GOAL] d✝ : ℤ c d x y z w : ℕ xy : SqLe x c y d zw : SqLe z c w d ⊢ c * (x * z) * (c * (x * z)) ≤ d * (y * w) * (d * (y * w)) [PROOFSTEP] simpa [mul_comm, mul_left_comm] using mul_le_mul xy zw (Nat.zero_le _) (Nat.zero_le _) [GOAL] d✝ : ℤ c d x y z w : ℕ xy : SqLe x c y d zw : SqLe z c w d ⊢ SqLe (x + z) c (y + w) d [PROOFSTEP] have xz := sqLe_add_mixed xy zw [GOAL] d✝ : ℤ c d x y z w : ℕ xy : SqLe x c y d zw : SqLe z c w d xz : c * (x * z) ≤ d * (y * w) ⊢ SqLe (x + z) c (y + w) d [PROOFSTEP] simp [SqLe, mul_assoc] at xy zw [GOAL] d✝ : ℤ c d x y z w : ℕ xz : c * (x * z) ≤ d * (y * w) xy : c * (x * x) ≤ d * (y * y) zw : c * (z * z) ≤ d * (w * w) ⊢ SqLe (x + z) c (y + w) d [PROOFSTEP] simp [SqLe, mul_add, mul_comm, mul_left_comm, add_le_add, *] [GOAL] d✝ : ℤ c d x y z w : ℕ zw : SqLe y d x c h : SqLe (x + z) c (y + w) d ⊢ SqLe z c w d [PROOFSTEP] apply le_of_not_gt [GOAL] case a d✝ : ℤ c d x y z w : ℕ zw : SqLe y d x c h : SqLe (x + z) c (y + w) d ⊢ ¬c * z * z > d * w * w [PROOFSTEP] intro l [GOAL] case a d✝ : ℤ c d x y z w : ℕ zw : SqLe y d x c h : SqLe (x + z) c (y + w) d l : c * z * z > d * w * w ⊢ False [PROOFSTEP] refine' not_le_of_gt _ h [GOAL] case a d✝ : ℤ c d x y z w : ℕ zw : SqLe y d x c h : SqLe (x + z) c (y + w) d l : c * z * z > d * w * w ⊢ c * (x + z) * (x + z) > d * (y + w) * (y + w) [PROOFSTEP] simp only [SqLe, mul_add, mul_comm, mul_left_comm, add_assoc, gt_iff_lt] [GOAL] case a d✝ : ℤ c d x y z w : ℕ zw : SqLe y d x c h : SqLe (x + z) c (y + w) d l : c * z * z > d * w * w ⊢ d * (y * y) + (d * (y * w) + (d * (y * w) + d * (w * w))) < c * (x * x) + (c * (x * z) + (c * (x * z) + c * (z * z))) [PROOFSTEP] have hm := sqLe_add_mixed zw (le_of_lt l) [GOAL] case a d✝ : ℤ c d x y z w : ℕ zw : SqLe y d x c h : SqLe (x + z) c (y + w) d l : c * z * z > d * w * w hm : d * (y * w) ≤ c * (x * z) ⊢ d * (y * y) + (d * (y * w) + (d * (y * w) + d * (w * w))) < c * (x * x) + (c * (x * z) + (c * (x * z) + c * (z * z))) [PROOFSTEP] simp only [SqLe, mul_assoc, gt_iff_lt] at l zw [GOAL] case a d✝ : ℤ c d x y z w : ℕ h : SqLe (x + z) c (y + w) d hm : d * (y * w) ≤ c * (x * z) l : d * (w * w) < c * (z * z) zw : d * (y * y) ≤ c * (x * x) ⊢ d * (y * y) + (d * (y * w) + (d * (y * w) + d * (w * w))) < c * (x * x) + (c * (x * z) + (c * (x * z) + c * (z * z))) [PROOFSTEP] exact lt_of_le_of_lt (add_le_add_right zw _) (add_lt_add_left (add_lt_add_of_le_of_lt hm (add_lt_add_of_le_of_lt hm l)) _) [GOAL] d✝ : ℤ c d x y n : ℕ xy : SqLe x c y d ⊢ SqLe (n * x) c (n * y) d [PROOFSTEP] simpa [SqLe, mul_left_comm, mul_assoc] using Nat.mul_le_mul_left (n * n) xy [GOAL] d✝ : ℤ d x y z w : ℕ ⊢ (SqLe x 1 y d → SqLe z 1 w d → SqLe (x * w + y * z) d (x * z + d * y * w) 1) ∧ (SqLe x 1 y d → SqLe w d z 1 → SqLe (x * z + d * y * w) 1 (x * w + y * z) d) ∧ (SqLe y d x 1 → SqLe z 1 w d → SqLe (x * z + d * y * w) 1 (x * w + y * z) d) ∧ (SqLe y d x 1 → SqLe w d z 1 → SqLe (x * w + y * z) d (x * z + d * y * w) 1) [PROOFSTEP] refine' ⟨_, _, _, _⟩ [GOAL] case refine'_1 d✝ : ℤ d x y z w : ℕ ⊢ SqLe x 1 y d → SqLe z 1 w d → SqLe (x * w + y * z) d (x * z + d * y * w) 1 [PROOFSTEP] intro xy zw [GOAL] case refine'_1 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe z 1 w d ⊢ SqLe (x * w + y * z) d (x * z + d * y * w) 1 [PROOFSTEP] have := Int.mul_nonneg (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le xy)) (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le zw)) [GOAL] case refine'_1 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe z 1 w d this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ SqLe (x * w + y * z) d (x * z + d * y * w) 1 [PROOFSTEP] refine' Int.le_of_ofNat_le_ofNat (le_of_sub_nonneg _) [GOAL] case refine'_1 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe z 1 w d this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ 0 ≤ ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) - ↑(d * (x * w + y * z) * (x * w + y * z)) [PROOFSTEP] convert this using 1 [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe z 1 w d this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) - ↑(d * (x * w + y * z) * (x * w + y * z)) = (↑(d * y * y) - ↑(1 * x * x)) * (↑(d * w * w) - ↑(1 * z * z)) [PROOFSTEP] simp only [one_mul, Int.ofNat_add, Int.ofNat_mul] [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe z 1 w d this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ (↑x * ↑z + ↑d * ↑y * ↑w) * (↑x * ↑z + ↑d * ↑y * ↑w) - ↑d * (↑x * ↑w + ↑y * ↑z) * (↑x * ↑w + ↑y * ↑z) = (↑d * ↑y * ↑y - ↑x * ↑x) * (↑d * ↑w * ↑w - ↑z * ↑z) [PROOFSTEP] ring [GOAL] case refine'_2 d✝ : ℤ d x y z w : ℕ ⊢ SqLe x 1 y d → SqLe w d z 1 → SqLe (x * z + d * y * w) 1 (x * w + y * z) d [PROOFSTEP] intro xy zw [GOAL] case refine'_2 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe w d z 1 ⊢ SqLe (x * z + d * y * w) 1 (x * w + y * z) d [PROOFSTEP] have := Int.mul_nonneg (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le xy)) (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le zw)) [GOAL] case refine'_2 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe w d z 1 this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ SqLe (x * z + d * y * w) 1 (x * w + y * z) d [PROOFSTEP] refine' Int.le_of_ofNat_le_ofNat (le_of_sub_nonneg _) [GOAL] case refine'_2 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe w d z 1 this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ 0 ≤ ↑(d * (x * w + y * z) * (x * w + y * z)) - ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) [PROOFSTEP] convert this using 1 [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe w d z 1 this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ ↑(d * (x * w + y * z) * (x * w + y * z)) - ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) = (↑(d * y * y) - ↑(1 * x * x)) * (↑(1 * z * z) - ↑(d * w * w)) [PROOFSTEP] simp only [one_mul, Int.ofNat_add, Int.ofNat_mul] [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe x 1 y d zw : SqLe w d z 1 this : 0 ≤ (↑(d * y * y) - ↑(1 * x * x)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ ↑d * (↑x * ↑w + ↑y * ↑z) * (↑x * ↑w + ↑y * ↑z) - (↑x * ↑z + ↑d * ↑y * ↑w) * (↑x * ↑z + ↑d * ↑y * ↑w) = (↑d * ↑y * ↑y - ↑x * ↑x) * (↑z * ↑z - ↑d * ↑w * ↑w) [PROOFSTEP] ring [GOAL] case refine'_3 d✝ : ℤ d x y z w : ℕ ⊢ SqLe y d x 1 → SqLe z 1 w d → SqLe (x * z + d * y * w) 1 (x * w + y * z) d [PROOFSTEP] intro xy zw [GOAL] case refine'_3 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe z 1 w d ⊢ SqLe (x * z + d * y * w) 1 (x * w + y * z) d [PROOFSTEP] have := Int.mul_nonneg (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le xy)) (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le zw)) [GOAL] case refine'_3 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe z 1 w d this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ SqLe (x * z + d * y * w) 1 (x * w + y * z) d [PROOFSTEP] refine' Int.le_of_ofNat_le_ofNat (le_of_sub_nonneg _) [GOAL] case refine'_3 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe z 1 w d this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ 0 ≤ ↑(d * (x * w + y * z) * (x * w + y * z)) - ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) [PROOFSTEP] convert this using 1 [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe z 1 w d this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ ↑(d * (x * w + y * z) * (x * w + y * z)) - ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) = (↑(1 * x * x) - ↑(d * y * y)) * (↑(d * w * w) - ↑(1 * z * z)) [PROOFSTEP] simp only [one_mul, Int.ofNat_add, Int.ofNat_mul] [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe z 1 w d this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(d * w * w) - ↑(1 * z * z)) ⊢ ↑d * (↑x * ↑w + ↑y * ↑z) * (↑x * ↑w + ↑y * ↑z) - (↑x * ↑z + ↑d * ↑y * ↑w) * (↑x * ↑z + ↑d * ↑y * ↑w) = (↑x * ↑x - ↑d * ↑y * ↑y) * (↑d * ↑w * ↑w - ↑z * ↑z) [PROOFSTEP] ring [GOAL] case refine'_4 d✝ : ℤ d x y z w : ℕ ⊢ SqLe y d x 1 → SqLe w d z 1 → SqLe (x * w + y * z) d (x * z + d * y * w) 1 [PROOFSTEP] intro xy zw [GOAL] case refine'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe w d z 1 ⊢ SqLe (x * w + y * z) d (x * z + d * y * w) 1 [PROOFSTEP] have := Int.mul_nonneg (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le xy)) (sub_nonneg_of_le (Int.ofNat_le_ofNat_of_le zw)) [GOAL] case refine'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe w d z 1 this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ SqLe (x * w + y * z) d (x * z + d * y * w) 1 [PROOFSTEP] refine' Int.le_of_ofNat_le_ofNat (le_of_sub_nonneg _) [GOAL] case refine'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe w d z 1 this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ 0 ≤ ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) - ↑(d * (x * w + y * z) * (x * w + y * z)) [PROOFSTEP] convert this using 1 [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe w d z 1 this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ ↑(1 * (x * z + d * y * w) * (x * z + d * y * w)) - ↑(d * (x * w + y * z) * (x * w + y * z)) = (↑(1 * x * x) - ↑(d * y * y)) * (↑(1 * z * z) - ↑(d * w * w)) [PROOFSTEP] simp only [one_mul, Int.ofNat_add, Int.ofNat_mul] [GOAL] case h.e'_4 d✝ : ℤ d x y z w : ℕ xy : SqLe y d x 1 zw : SqLe w d z 1 this : 0 ≤ (↑(1 * x * x) - ↑(d * y * y)) * (↑(1 * z * z) - ↑(d * w * w)) ⊢ (↑x * ↑z + ↑d * ↑y * ↑w) * (↑x * ↑z + ↑d * ↑y * ↑w) - ↑d * (↑x * ↑w + ↑y * ↑z) * (↑x * ↑w + ↑y * ↑z) = (↑x * ↑x - ↑d * ↑y * ↑y) * (↑z * ↑z - ↑d * ↑w * ↑w) [PROOFSTEP] ring [GOAL] d✝ : ℤ c d : ℕ x y : ℤ ⊢ Nonnegg c d x y = Nonnegg d c y x [PROOFSTEP] induction x [GOAL] case ofNat d✝ : ℤ c d : ℕ y : ℤ a✝ : ℕ ⊢ Nonnegg c d (Int.ofNat a✝) y = Nonnegg d c y (Int.ofNat a✝) [PROOFSTEP] induction y [GOAL] case negSucc d✝ : ℤ c d : ℕ y : ℤ a✝ : ℕ ⊢ Nonnegg c d (Int.negSucc a✝) y = Nonnegg d c y (Int.negSucc a✝) [PROOFSTEP] induction y [GOAL] case ofNat.ofNat d✝ : ℤ c d a✝¹ a✝ : ℕ ⊢ Nonnegg c d (Int.ofNat a✝¹) (Int.ofNat a✝) = Nonnegg d c (Int.ofNat a✝) (Int.ofNat a✝¹) [PROOFSTEP] rfl [GOAL] case ofNat.negSucc d✝ : ℤ c d a✝¹ a✝ : ℕ ⊢ Nonnegg c d (Int.ofNat a✝¹) (Int.negSucc a✝) = Nonnegg d c (Int.negSucc a✝) (Int.ofNat a✝¹) [PROOFSTEP] rfl [GOAL] case negSucc.ofNat d✝ : ℤ c d a✝¹ a✝ : ℕ ⊢ Nonnegg c d (Int.negSucc a✝¹) (Int.ofNat a✝) = Nonnegg d c (Int.ofNat a✝) (Int.negSucc a✝¹) [PROOFSTEP] rfl [GOAL] case negSucc.negSucc d✝ : ℤ c d a✝¹ a✝ : ℕ ⊢ Nonnegg c d (Int.negSucc a✝¹) (Int.negSucc a✝) = Nonnegg d c (Int.negSucc a✝) (Int.negSucc a✝¹) [PROOFSTEP] rfl [GOAL] d✝ : ℤ c d b : ℕ ⊢ Nonnegg c d (-↑0) ↑b → SqLe 0 d b c [PROOFSTEP] simp [SqLe, Nat.zero_le] [GOAL] d✝ : ℤ c d a b : ℕ ⊢ Nonnegg c d (-↑(a + 1)) ↑b ↔ SqLe (a + 1) d b c [PROOFSTEP] rw [← Int.negSucc_coe] [GOAL] d✝ : ℤ c d a b : ℕ ⊢ Nonnegg c d (Int.negSucc a) ↑b ↔ SqLe (a + 1) d b c [PROOFSTEP] rfl [GOAL] d✝ : ℤ c d a b : ℕ ⊢ Nonnegg c d (↑a) (-↑b) ↔ SqLe b c a d [PROOFSTEP] rw [nonnegg_comm] [GOAL] d✝ : ℤ c d a b : ℕ ⊢ Nonnegg d c (-↑b) ↑a ↔ SqLe b c a d [PROOFSTEP] exact nonnegg_neg_pos [GOAL] d : ℤ ⊢ norm 0 = 0 [PROOFSTEP] simp [norm] [GOAL] d : ℤ ⊢ norm 1 = 1 [PROOFSTEP] simp [norm] [GOAL] d n : ℤ ⊢ norm ↑n = n * n [PROOFSTEP] simp [norm] [GOAL] d : ℤ n m : ℤ√d ⊢ norm (n * m) = norm n * norm m [PROOFSTEP] simp only [norm, mul_im, mul_re] [GOAL] d : ℤ n m : ℤ√d ⊢ (n.re * m.re + d * n.im * m.im) * (n.re * m.re + d * n.im * m.im) - d * (n.re * m.im + n.im * m.re) * (n.re * m.im + n.im * m.re) = (n.re * n.re - d * n.im * n.im) * (m.re * m.re - d * m.im * m.im) [PROOFSTEP] ring [GOAL] d : ℤ n : ℤ√d ⊢ ↑(norm n) = n * star n [PROOFSTEP] cases n [GOAL] case mk d re✝ im✝ : ℤ ⊢ ↑(norm { re := re✝, im := im✝ }) = { re := re✝, im := im✝ } * star { re := re✝, im := im✝ } [PROOFSTEP] simp [norm, star, Zsqrtd.ext, mul_comm, sub_eq_add_neg] [GOAL] d : ℤ x : ℤ√d ⊢ ↑(norm (-x)) = ↑(norm x) [PROOFSTEP] rw [norm_eq_mul_conj, star_neg, neg_mul_neg, norm_eq_mul_conj] [GOAL] d : ℤ x : ℤ√d ⊢ ↑(norm (star x)) = ↑(norm x) [PROOFSTEP] rw [norm_eq_mul_conj, star_star, mul_comm, norm_eq_mul_conj] [GOAL] d : ℤ hd : d ≤ 0 n : ℤ√d ⊢ 0 ≤ -(d * n.im * n.im) [PROOFSTEP] rw [mul_assoc, neg_mul_eq_neg_mul] [GOAL] d : ℤ hd : d ≤ 0 n : ℤ√d ⊢ 0 ≤ -d * (n.im * n.im) [PROOFSTEP] exact mul_nonneg (neg_nonneg.2 hd) (mul_self_nonneg _) [GOAL] d : ℤ x : ℤ√d h : Int.natAbs (norm x) = 1 hx : 0 ≤ norm x ⊢ 1 = x * star x [PROOFSTEP] rwa [← Int.coe_nat_inj', Int.natAbs_of_nonneg hx, ← @Int.cast_inj (ℤ√d) _ _, norm_eq_mul_conj, eq_comm] at h [GOAL] d : ℤ x : ℤ√d h : Int.natAbs (norm x) = 1 hx : norm x ≤ 0 ⊢ 1 = x * -star x [PROOFSTEP] rwa [← Int.coe_nat_inj', Int.ofNat_natAbs_of_nonpos hx, ← @Int.cast_inj (ℤ√d) _ _, Int.cast_neg, norm_eq_mul_conj, neg_mul_eq_mul_neg, eq_comm] at h [GOAL] d : ℤ x : ℤ√d h : IsUnit x ⊢ Int.natAbs (norm x) = 1 [PROOFSTEP] let ⟨y, hy⟩ := isUnit_iff_dvd_one.1 h [GOAL] d : ℤ x : ℤ√d h : IsUnit x y : ℤ√d hy : 1 = x * y ⊢ Int.natAbs (norm x) = 1 [PROOFSTEP] have := congr_arg (Int.natAbs ∘ norm) hy [GOAL] d : ℤ x : ℤ√d h : IsUnit x y : ℤ√d hy : 1 = x * y this : (Int.natAbs ∘ norm) 1 = (Int.natAbs ∘ norm) (x * y) ⊢ Int.natAbs (norm x) = 1 [PROOFSTEP] rw [Function.comp_apply, Function.comp_apply, norm_mul, Int.natAbs_mul, norm_one, Int.natAbs_one, eq_comm, mul_eq_one] at this [GOAL] d : ℤ x : ℤ√d h : IsUnit x y : ℤ√d hy : 1 = x * y this : Int.natAbs (norm x) = 1 ∧ Int.natAbs (norm y) = 1 ⊢ Int.natAbs (norm x) = 1 [PROOFSTEP] exact this.1 [GOAL] d✝ d : ℤ z : ℤ√d ⊢ IsUnit z ↔ IsUnit (norm z) [PROOFSTEP] rw [Int.isUnit_iff_natAbs_eq, norm_eq_one_iff] [GOAL] d✝ d : ℤ hd : d ≤ 0 z : ℤ√d ⊢ norm z = 1 ↔ IsUnit z [PROOFSTEP] rw [← norm_eq_one_iff, ← Int.coe_nat_inj', Int.natAbs_of_nonneg (norm_nonneg hd z), Int.ofNat_one] [GOAL] d✝ d : ℤ hd : d < 0 z : ℤ√d ⊢ norm z = 0 ↔ z = 0 [PROOFSTEP] constructor [GOAL] case mp d✝ d : ℤ hd : d < 0 z : ℤ√d ⊢ norm z = 0 → z = 0 [PROOFSTEP] intro h [GOAL] case mp d✝ d : ℤ hd : d < 0 z : ℤ√d h : norm z = 0 ⊢ z = 0 [PROOFSTEP] rw [ext, zero_re, zero_im] [GOAL] case mp d✝ d : ℤ hd : d < 0 z : ℤ√d h : norm z = 0 ⊢ z.re = 0 ∧ z.im = 0 [PROOFSTEP] rw [norm_def, sub_eq_add_neg, mul_assoc] at h [GOAL] case mp d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 ⊢ z.re = 0 ∧ z.im = 0 [PROOFSTEP] have left := mul_self_nonneg z.re [GOAL] case mp d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re ⊢ z.re = 0 ∧ z.im = 0 [PROOFSTEP] have right := neg_nonneg.mpr (mul_nonpos_of_nonpos_of_nonneg hd.le (mul_self_nonneg z.im)) [GOAL] case mp d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ⊢ z.re = 0 ∧ z.im = 0 [PROOFSTEP] obtain ⟨ha, hb⟩ := (add_eq_zero_iff' left right).mp h [GOAL] case mp.intro d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ha : z.re * z.re = 0 hb : -(d * (z.im * z.im)) = 0 ⊢ z.re = 0 ∧ z.im = 0 [PROOFSTEP] constructor [GOAL] case mp.intro.left d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ha : z.re * z.re = 0 hb : -(d * (z.im * z.im)) = 0 ⊢ z.re = 0 [PROOFSTEP] apply eq_zero_of_mul_self_eq_zero [GOAL] case mp.intro.right d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ha : z.re * z.re = 0 hb : -(d * (z.im * z.im)) = 0 ⊢ z.im = 0 [PROOFSTEP] apply eq_zero_of_mul_self_eq_zero [GOAL] case mp.intro.left.h d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ha : z.re * z.re = 0 hb : -(d * (z.im * z.im)) = 0 ⊢ z.re * z.re = 0 [PROOFSTEP] exact ha [GOAL] case mp.intro.right.h d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ha : z.re * z.re = 0 hb : -(d * (z.im * z.im)) = 0 ⊢ z.im * z.im = 0 [PROOFSTEP] rw [neg_eq_zero, mul_eq_zero] at hb [GOAL] case mp.intro.right.h d✝ d : ℤ hd : d < 0 z : ℤ√d h : z.re * z.re + -(d * (z.im * z.im)) = 0 left : 0 ≤ z.re * z.re right : 0 ≤ -(d * (z.im * z.im)) ha : z.re * z.re = 0 hb : d = 0 ∨ z.im * z.im = 0 ⊢ z.im * z.im = 0 [PROOFSTEP] exact hb.resolve_left hd.ne [GOAL] case mpr d✝ d : ℤ hd : d < 0 z : ℤ√d ⊢ z = 0 → norm z = 0 [PROOFSTEP] rintro rfl [GOAL] case mpr d✝ d : ℤ hd : d < 0 ⊢ norm 0 = 0 [PROOFSTEP] exact norm_zero [GOAL] d✝ d : ℤ hd : d ≤ 0 x y : ℤ√d h : Associated x y ⊢ norm x = norm y [PROOFSTEP] obtain ⟨u, rfl⟩ := h [GOAL] case intro d✝ d : ℤ hd : d ≤ 0 x : ℤ√d u : (ℤ√d)ˣ ⊢ norm x = norm (x * ↑u) [PROOFSTEP] rw [norm_mul, (norm_eq_one_iff' hd _).mpr u.isUnit, mul_one] [GOAL] d✝ c d : ℕ a b : ℤ ⊢ Decidable (Nonnegg c d a b) [PROOFSTEP] cases a [GOAL] case ofNat d✝ c d : ℕ b : ℤ a✝ : ℕ ⊢ Decidable (Nonnegg c d (Int.ofNat a✝) b) [PROOFSTEP] cases b [GOAL] case negSucc d✝ c d : ℕ b : ℤ a✝ : ℕ ⊢ Decidable (Nonnegg c d (Int.negSucc a✝) b) [PROOFSTEP] cases b [GOAL] case ofNat.ofNat d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (Nonnegg c d (Int.ofNat a✝¹) (Int.ofNat a✝)) [PROOFSTEP] unfold Nonnegg SqLe [GOAL] case ofNat.negSucc d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (Nonnegg c d (Int.ofNat a✝¹) (Int.negSucc a✝)) [PROOFSTEP] unfold Nonnegg SqLe [GOAL] case negSucc.ofNat d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (Nonnegg c d (Int.negSucc a✝¹) (Int.ofNat a✝)) [PROOFSTEP] unfold Nonnegg SqLe [GOAL] case negSucc.negSucc d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (Nonnegg c d (Int.negSucc a✝¹) (Int.negSucc a✝)) [PROOFSTEP] unfold Nonnegg SqLe [GOAL] case ofNat.ofNat d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (match Int.ofNat a✝¹, Int.ofNat a✝ with | Int.ofNat a, Int.ofNat b => True | Int.ofNat a, Int.negSucc b => c * (b + 1) * (b + 1) ≤ d * a * a | Int.negSucc a, Int.ofNat b => d * (a + 1) * (a + 1) ≤ c * b * b | Int.negSucc a, Int.negSucc a_1 => False) [PROOFSTEP] infer_instance [GOAL] case ofNat.negSucc d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (match Int.ofNat a✝¹, Int.negSucc a✝ with | Int.ofNat a, Int.ofNat b => True | Int.ofNat a, Int.negSucc b => c * (b + 1) * (b + 1) ≤ d * a * a | Int.negSucc a, Int.ofNat b => d * (a + 1) * (a + 1) ≤ c * b * b | Int.negSucc a, Int.negSucc a_1 => False) [PROOFSTEP] infer_instance [GOAL] case negSucc.ofNat d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (match Int.negSucc a✝¹, Int.ofNat a✝ with | Int.ofNat a, Int.ofNat b => True | Int.ofNat a, Int.negSucc b => c * (b + 1) * (b + 1) ≤ d * a * a | Int.negSucc a, Int.ofNat b => d * (a + 1) * (a + 1) ≤ c * b * b | Int.negSucc a, Int.negSucc a_1 => False) [PROOFSTEP] infer_instance [GOAL] case negSucc.negSucc d✝ c d a✝¹ a✝ : ℕ ⊢ Decidable (match Int.negSucc a✝¹, Int.negSucc a✝ with | Int.ofNat a, Int.ofNat b => True | Int.ofNat a, Int.negSucc b => c * (b + 1) * (b + 1) ≤ d * a * a | Int.negSucc a, Int.ofNat b => d * (a + 1) * (a + 1) ≤ c * b * b | Int.negSucc a, Int.negSucc a_1 => False) [PROOFSTEP] infer_instance [GOAL] d x y z w : ℕ xy : Nonneg { re := ↑x, im := -↑y } zw : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] have : Nonneg ⟨Int.subNatNat x z, Int.subNatNat w y⟩ := Int.subNatNat_elim x z (fun m n i => SqLe y d m 1 → SqLe n 1 w d → Nonneg ⟨i, Int.subNatNat w y⟩) (fun j k => Int.subNatNat_elim w y (fun m n i => SqLe n d (k + j) 1 → SqLe k 1 m d → Nonneg ⟨Int.ofNat j, i⟩) (fun _ _ _ _ => trivial) fun m n xy zw => sqLe_cancel zw xy) (fun j k => Int.subNatNat_elim w y (fun m n i => SqLe n d k 1 → SqLe (k + j + 1) 1 m d → Nonneg ⟨-[j+1], i⟩) (fun m n xy zw => sqLe_cancel xy zw) fun m n xy zw => let t := Nat.le_trans zw (sqLe_of_le (Nat.le_add_right n (m + 1)) le_rfl xy) have : k + j + 1 ≤ k := Nat.mul_self_le_mul_self_iff.2 (by simpa [one_mul] using t) absurd this (not_le_of_gt <| Nat.succ_le_succ <| Nat.le_add_right _ _)) (nonnegg_pos_neg.1 xy) (nonnegg_neg_pos.1 zw) [GOAL] d x y z w : ℕ xy✝ : Nonneg { re := ↑x, im := -↑y } zw✝ : Nonneg { re := -↑z, im := ↑w } j k m n : ℕ xy : SqLe (n + m + 1) d k 1 zw : SqLe (k + j + 1) 1 n d t : 1 * (k + j + 1) * (k + j + 1) ≤ 1 * k * k := Nat.le_trans zw (sqLe_of_le (Nat.le_add_right n (m + 1)) le_rfl xy) ⊢ (k + j + 1) * (k + j + 1) ≤ k * k [PROOFSTEP] simpa [one_mul] using t [GOAL] d x y z w : ℕ xy : Nonneg { re := ↑x, im := -↑y } zw : Nonneg { re := -↑z, im := ↑w } this : Nonneg { re := subNatNat x z, im := subNatNat w y } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] rw [add_def, neg_add_eq_sub] [GOAL] d x y z w : ℕ xy : Nonneg { re := ↑x, im := -↑y } zw : Nonneg { re := -↑z, im := ↑w } this : Nonneg { re := subNatNat x z, im := subNatNat w y } ⊢ Nonneg { re := ↑x + -↑z, im := ↑w - ↑y } [PROOFSTEP] rwa [Int.subNatNat_eq_coe, Int.subNatNat_eq_coe] at this [GOAL] d : ℕ a b : ℤ√↑d ha : Nonneg a hb : Nonneg b ⊢ Nonneg (a + b) [PROOFSTEP] rcases nonneg_cases ha with ⟨x, y, rfl | rfl | rfl⟩ [GOAL] case intro.intro.inl d : ℕ b : ℤ√↑d hb : Nonneg b x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } ⊢ Nonneg ({ re := ↑x, im := ↑y } + b) [PROOFSTEP] rcases nonneg_cases hb with ⟨z, w, rfl | rfl | rfl⟩ [GOAL] case intro.intro.inr.inl d : ℕ b : ℤ√↑d hb : Nonneg b x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } ⊢ Nonneg ({ re := ↑x, im := -↑y } + b) [PROOFSTEP] rcases nonneg_cases hb with ⟨z, w, rfl | rfl | rfl⟩ [GOAL] case intro.intro.inr.inr d : ℕ b : ℤ√↑d hb : Nonneg b x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } ⊢ Nonneg ({ re := -↑x, im := ↑y } + b) [PROOFSTEP] rcases nonneg_cases hb with ⟨z, w, rfl | rfl | rfl⟩ [GOAL] case intro.intro.inl.intro.intro.inl d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := ↑y } + { re := ↑z, im := ↑w }) [PROOFSTEP] trivial [GOAL] case intro.intro.inl.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg ({ re := ↑x, im := ↑y } + { re := ↑z, im := -↑w }) [PROOFSTEP] refine' nonnegg_cases_right fun i h => sqLe_of_le _ _ (nonnegg_pos_neg.1 hb) [GOAL] case intro.intro.inl.intro.intro.inr.inl.refine'_1 d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } i : ℕ h : { re := ↑x, im := ↑y }.im + { re := ↑z, im := -↑w }.im = -↑i ⊢ i ≤ w [PROOFSTEP] dsimp only at h [GOAL] case intro.intro.inl.intro.intro.inr.inl.refine'_1 d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } i : ℕ h : ↑y + -↑w = -↑i ⊢ i ≤ w [PROOFSTEP] exact Int.ofNat_le.1 (le_of_neg_le_neg (Int.le.intro y (by simp [add_comm, *]))) [GOAL] d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } i : ℕ h : ↑y + -↑w = -↑i ⊢ -↑w + ↑y = -↑i [PROOFSTEP] simp [add_comm, *] [GOAL] case intro.intro.inl.intro.intro.inr.inl.refine'_2 d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } i : ℕ h : { re := ↑x, im := ↑y }.im + { re := ↑z, im := -↑w }.im = -↑i ⊢ z ≤ x + z [PROOFSTEP] apply Nat.le_add_left [GOAL] case intro.intro.inl.intro.intro.inr.inr d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := ↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] refine' nonnegg_cases_left fun i h => sqLe_of_le _ _ (nonnegg_neg_pos.1 hb) [GOAL] case intro.intro.inl.intro.intro.inr.inr.refine'_1 d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } i : ℕ h : { re := ↑x, im := ↑y }.re + { re := -↑z, im := ↑w }.re = -↑i ⊢ i ≤ z [PROOFSTEP] dsimp only at h [GOAL] case intro.intro.inl.intro.intro.inr.inr.refine'_1 d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } i : ℕ h : ↑x + -↑z = -↑i ⊢ i ≤ z [PROOFSTEP] exact Int.ofNat_le.1 (le_of_neg_le_neg (Int.le.intro x (by simp [add_comm, *]))) [GOAL] d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } i : ℕ h : ↑x + -↑z = -↑i ⊢ -↑z + ↑x = -↑i [PROOFSTEP] simp [add_comm, *] [GOAL] case intro.intro.inl.intro.intro.inr.inr.refine'_2 d x y : ℕ ha : Nonneg { re := ↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } i : ℕ h : { re := ↑x, im := ↑y }.re + { re := -↑z, im := ↑w }.re = -↑i ⊢ w ≤ y + w [PROOFSTEP] apply Nat.le_add_left [GOAL] case intro.intro.inr.inl.intro.intro.inl d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := ↑z, im := ↑w }) [PROOFSTEP] refine' nonnegg_cases_right fun i h => sqLe_of_le _ _ (nonnegg_pos_neg.1 ha) [GOAL] case intro.intro.inr.inl.intro.intro.inl.refine'_1 d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : { re := ↑x, im := -↑y }.im + { re := ↑z, im := ↑w }.im = -↑i ⊢ i ≤ y [PROOFSTEP] dsimp only at h [GOAL] case intro.intro.inr.inl.intro.intro.inl.refine'_1 d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : -↑y + ↑w = -↑i ⊢ i ≤ y [PROOFSTEP] exact Int.ofNat_le.1 (le_of_neg_le_neg (Int.le.intro w (by simp [*]))) [GOAL] d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : -↑y + ↑w = -↑i ⊢ -↑y + ↑w = -↑i [PROOFSTEP] simp [*] [GOAL] case intro.intro.inr.inl.intro.intro.inl.refine'_2 d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : { re := ↑x, im := -↑y }.im + { re := ↑z, im := ↑w }.im = -↑i ⊢ x ≤ x + z [PROOFSTEP] apply Nat.le_add_right [GOAL] case intro.intro.inr.inl.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := ↑z, im := -↑w }) [PROOFSTEP] have : Nonneg ⟨_, _⟩ := nonnegg_pos_neg.2 (sqLe_add (nonnegg_pos_neg.1 ha) (nonnegg_pos_neg.1 hb)) [GOAL] case intro.intro.inr.inl.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } this : Nonneg { re := ↑(x + z), im := -↑(y + w) } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := ↑z, im := -↑w }) [PROOFSTEP] rw [Nat.cast_add, Nat.cast_add, neg_add] at this [GOAL] case intro.intro.inr.inl.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } this : Nonneg { re := ↑x + ↑z, im := -↑y + -↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := ↑z, im := -↑w }) [PROOFSTEP] rwa [add_def] -- Porting note: was -- simpa [add_comm] using -- nonnegg_pos_neg.2 (sqLe_add (nonnegg_pos_neg.1 ha) (nonnegg_pos_neg.1 hb)) [GOAL] case intro.intro.inr.inl.intro.intro.inr.inr d x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] exact nonneg_add_lem ha hb [GOAL] case intro.intro.inr.inr.intro.intro.inl d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } + { re := ↑z, im := ↑w }) [PROOFSTEP] refine' nonnegg_cases_left fun i h => sqLe_of_le _ _ (nonnegg_neg_pos.1 ha) [GOAL] case intro.intro.inr.inr.intro.intro.inl.refine'_1 d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : { re := -↑x, im := ↑y }.re + { re := ↑z, im := ↑w }.re = -↑i ⊢ i ≤ x [PROOFSTEP] dsimp only at h [GOAL] case intro.intro.inr.inr.intro.intro.inl.refine'_1 d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : -↑x + ↑z = -↑i ⊢ i ≤ x [PROOFSTEP] exact Int.ofNat_le.1 (le_of_neg_le_neg (Int.le.intro _ h)) [GOAL] case intro.intro.inr.inr.intro.intro.inl.refine'_2 d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := ↑w } i : ℕ h : { re := -↑x, im := ↑y }.re + { re := ↑z, im := ↑w }.re = -↑i ⊢ y ≤ y + w [PROOFSTEP] apply Nat.le_add_right [GOAL] case intro.intro.inr.inr.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } + { re := ↑z, im := -↑w }) [PROOFSTEP] dsimp [GOAL] case intro.intro.inr.inr.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg { re := -↑x + ↑z, im := ↑y + -↑w } [PROOFSTEP] rw [add_comm, add_comm (y : ℤ)] [GOAL] case intro.intro.inr.inr.intro.intro.inr.inl d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg { re := ↑z + -↑x, im := -↑w + ↑y } [PROOFSTEP] exact nonneg_add_lem hb ha [GOAL] case intro.intro.inr.inr.intro.intro.inr.inr d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] have : Nonneg ⟨_, _⟩ := nonnegg_neg_pos.2 (sqLe_add (nonnegg_neg_pos.1 ha) (nonnegg_neg_pos.1 hb)) [GOAL] case intro.intro.inr.inr.intro.intro.inr.inr d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } this : Nonneg { re := -↑(x + z), im := ↑(y + w) } ⊢ Nonneg ({ re := -↑x, im := ↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] rw [Nat.cast_add, Nat.cast_add, neg_add] at this [GOAL] case intro.intro.inr.inr.intro.intro.inr.inr d x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } z w : ℕ hb : Nonneg { re := -↑z, im := ↑w } this : Nonneg { re := -↑x + -↑z, im := ↑y + ↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } + { re := -↑z, im := ↑w }) [PROOFSTEP] rwa [add_def] -- Porting note: was -- simpa [add_comm] using -- nonnegg_neg_pos.2 (sqLe_add (nonnegg_neg_pos.1 ha) (nonnegg_neg_pos.1 hb)) [GOAL] d : ℕ a : ℤ√↑d ⊢ Nonneg a ↔ Nonneg (a - 0) [PROOFSTEP] simp [GOAL] d : ℕ a b : ℤ√↑d ⊢ a ≤ b ∨ b ≤ a [PROOFSTEP] have t := (b - a).nonneg_total [GOAL] d : ℕ a b : ℤ√↑d t : Nonneg (b - a) ∨ Nonneg (-(b - a)) ⊢ a ≤ b ∨ b ≤ a [PROOFSTEP] rwa [neg_sub] at t [GOAL] d : ℕ a : ℤ√↑d ⊢ Nonneg (a - a) [PROOFSTEP] simp only [sub_self] [GOAL] d : ℕ a : ℤ√↑d ⊢ Nonneg 0 [PROOFSTEP] trivial [GOAL] d : ℕ a b c : ℤ√↑d hab : a ≤ b hbc : b ≤ c ⊢ a ≤ c [PROOFSTEP] simpa [sub_add_sub_cancel'] using hab.add hbc [GOAL] d : ℕ a : ℤ√↑d ⊢ ∃ n, a ≤ ↑n [PROOFSTEP] obtain ⟨x, y, (h : a ≤ ⟨x, y⟩)⟩ : ∃ x y : ℕ, Nonneg (⟨x, y⟩ + -a) := match -a with | ⟨Int.ofNat x, Int.ofNat y⟩ => ⟨0, 0, by trivial⟩ | ⟨Int.ofNat x, -[y+1]⟩ => ⟨0, y + 1, by simp [add_def, Int.negSucc_coe, add_assoc]; trivial⟩ | ⟨-[x+1], Int.ofNat y⟩ => ⟨x + 1, 0, by simp [Int.negSucc_coe, add_assoc]; trivial⟩ | ⟨-[x+1], -[y+1]⟩ => ⟨x + 1, y + 1, by simp [Int.negSucc_coe, add_assoc]; trivial⟩ [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg ({ re := ↑0, im := ↑0 } + { re := ofNat x, im := ofNat y }) [PROOFSTEP] trivial [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg ({ re := ↑0, im := ↑(y + 1) } + { re := ofNat x, im := -[y+1] }) [PROOFSTEP] simp [add_def, Int.negSucc_coe, add_assoc] [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg { re := ↑x, im := 0 } [PROOFSTEP] trivial [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg ({ re := ↑(x + 1), im := ↑0 } + { re := -[x+1], im := ofNat y }) [PROOFSTEP] simp [Int.negSucc_coe, add_assoc] [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg { re := 0, im := ↑y } [PROOFSTEP] trivial [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg ({ re := ↑(x + 1), im := ↑(y + 1) } + { re := -[x+1], im := -[y+1] }) [PROOFSTEP] simp [Int.negSucc_coe, add_assoc] [GOAL] d : ℕ a : ℤ√↑d x y : ℕ ⊢ Nonneg { re := 0, im := 0 } [PROOFSTEP] trivial [GOAL] case intro.intro d : ℕ a : ℤ√↑d x y : ℕ h : a ≤ { re := ↑x, im := ↑y } ⊢ ∃ n, a ≤ ↑n [PROOFSTEP] refine' ⟨x + d * y, h.trans _⟩ [GOAL] case intro.intro d : ℕ a : ℤ√↑d x y : ℕ h : a ≤ { re := ↑x, im := ↑y } ⊢ { re := ↑x, im := ↑y } ≤ ↑(x + d * y) [PROOFSTEP] change Nonneg ⟨↑x + d * y - ↑x, 0 - ↑y⟩ [GOAL] case intro.intro d : ℕ a : ℤ√↑d x y : ℕ h : a ≤ { re := ↑x, im := ↑y } ⊢ Nonneg { re := ↑x + ↑d * ↑y - ↑x, im := 0 - ↑y } [PROOFSTEP] cases' y with y [GOAL] case intro.intro.zero d : ℕ a : ℤ√↑d x : ℕ h : a ≤ { re := ↑x, im := ↑Nat.zero } ⊢ Nonneg { re := ↑x + ↑d * ↑Nat.zero - ↑x, im := 0 - ↑Nat.zero } [PROOFSTEP] simp [GOAL] case intro.intro.zero d : ℕ a : ℤ√↑d x : ℕ h : a ≤ { re := ↑x, im := ↑Nat.zero } ⊢ Nonneg { re := 0, im := 0 } [PROOFSTEP] trivial [GOAL] case intro.intro.succ d : ℕ a : ℤ√↑d x y : ℕ h : a ≤ { re := ↑x, im := ↑(Nat.succ y) } ⊢ Nonneg { re := ↑x + ↑d * ↑(Nat.succ y) - ↑x, im := 0 - ↑(Nat.succ y) } [PROOFSTEP] have h : ∀ y, SqLe y d (d * y) 1 := fun y => by simpa [SqLe, mul_comm, mul_left_comm] using Nat.mul_le_mul_right (y * y) (Nat.le_mul_self d) [GOAL] d : ℕ a : ℤ√↑d x y✝ : ℕ h : a ≤ { re := ↑x, im := ↑(Nat.succ y✝) } y : ℕ ⊢ SqLe y d (d * y) 1 [PROOFSTEP] simpa [SqLe, mul_comm, mul_left_comm] using Nat.mul_le_mul_right (y * y) (Nat.le_mul_self d) [GOAL] case intro.intro.succ d : ℕ a : ℤ√↑d x y : ℕ h✝ : a ≤ { re := ↑x, im := ↑(Nat.succ y) } h : ∀ (y : ℕ), SqLe y d (d * y) 1 ⊢ Nonneg { re := ↑x + ↑d * ↑(Nat.succ y) - ↑x, im := 0 - ↑(Nat.succ y) } [PROOFSTEP] rw [show (x : ℤ) + d * Nat.succ y - x = d * Nat.succ y by simp] [GOAL] d : ℕ a : ℤ√↑d x y : ℕ h✝ : a ≤ { re := ↑x, im := ↑(Nat.succ y) } h : ∀ (y : ℕ), SqLe y d (d * y) 1 ⊢ ↑x + ↑d * ↑(Nat.succ y) - ↑x = ↑d * ↑(Nat.succ y) [PROOFSTEP] simp [GOAL] case intro.intro.succ d : ℕ a : ℤ√↑d x y : ℕ h✝ : a ≤ { re := ↑x, im := ↑(Nat.succ y) } h : ∀ (y : ℕ), SqLe y d (d * y) 1 ⊢ Nonneg { re := ↑d * ↑(Nat.succ y), im := 0 - ↑(Nat.succ y) } [PROOFSTEP] exact h (y + 1) [GOAL] d : ℕ a b : ℤ√↑d ab : a ≤ b c : ℤ√↑d ⊢ Nonneg (c + b - (c + a)) [PROOFSTEP] rw [add_sub_add_left_eq_sub] [GOAL] d : ℕ a b : ℤ√↑d ab : a ≤ b c : ℤ√↑d ⊢ Nonneg (b - a) [PROOFSTEP] exact ab [GOAL] d : ℕ a b c : ℤ√↑d h : c + a ≤ c + b ⊢ a ≤ b [PROOFSTEP] simpa using Zsqrtd.add_le_add_left _ _ h (-c) [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha : Nonneg a ⊢ Nonneg (↑n * a) [PROOFSTEP] rw [← Int.cast_ofNat n] [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha : Nonneg a ⊢ Nonneg (↑↑n * a) [PROOFSTEP] exact match a, nonneg_cases ha, ha with | _, ⟨x, y, Or.inl rfl⟩, _ => by rw [smul_val]; trivial | _, ⟨x, y, Or.inr <| Or.inl rfl⟩, ha => by rw [smul_val]; simpa using nonnegg_pos_neg.2 (sqLe_smul n <| nonnegg_pos_neg.1 ha) | _, ⟨x, y, Or.inr <| Or.inr rfl⟩, ha => by rw [smul_val]; simpa using nonnegg_neg_pos.2 (sqLe_smul n <| nonnegg_neg_pos.1 ha) [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha : Nonneg a x y : ℕ x✝ : Nonneg { re := ↑x, im := ↑y } ⊢ Nonneg (↑↑n * { re := ↑x, im := ↑y }) [PROOFSTEP] rw [smul_val] [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha : Nonneg a x y : ℕ x✝ : Nonneg { re := ↑x, im := ↑y } ⊢ Nonneg { re := ↑n * ↑x, im := ↑n * ↑y } [PROOFSTEP] trivial [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } ⊢ Nonneg (↑↑n * { re := ↑x, im := -↑y }) [PROOFSTEP] rw [smul_val] [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } ⊢ Nonneg { re := ↑n * ↑x, im := ↑n * -↑y } [PROOFSTEP] simpa using nonnegg_pos_neg.2 (sqLe_smul n <| nonnegg_pos_neg.1 ha) [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } ⊢ Nonneg (↑↑n * { re := -↑x, im := ↑y }) [PROOFSTEP] rw [smul_val] [GOAL] d : ℕ a : ℤ√↑d n : ℕ ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } ⊢ Nonneg { re := ↑n * -↑x, im := ↑n * ↑y } [PROOFSTEP] simpa using nonnegg_neg_pos.2 (sqLe_smul n <| nonnegg_neg_pos.1 ha) [GOAL] d : ℕ a : ℤ√↑d ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } ⊢ Nonneg (sqrtd * { re := ↑x, im := -↑y }) [PROOFSTEP] simp only [muld_val, mul_neg] [GOAL] d : ℕ a : ℤ√↑d ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } ⊢ Nonneg { re := -(↑d * ↑y), im := ↑x } [PROOFSTEP] apply nonnegg_neg_pos.2 [GOAL] d : ℕ a : ℤ√↑d ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := ↑x, im := -↑y } ⊢ SqLe (d * y) 1 x d [PROOFSTEP] simpa [SqLe, mul_comm, mul_left_comm] using Nat.mul_le_mul_left d (nonnegg_pos_neg.1 ha) [GOAL] d : ℕ a : ℤ√↑d ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } ⊢ Nonneg (sqrtd * { re := -↑x, im := ↑y }) [PROOFSTEP] simp only [muld_val] [GOAL] d : ℕ a : ℤ√↑d ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } ⊢ Nonneg { re := ↑d * ↑y, im := -↑x } [PROOFSTEP] apply nonnegg_pos_neg.2 [GOAL] d : ℕ a : ℤ√↑d ha✝ : Nonneg a x y : ℕ ha : Nonneg { re := -↑x, im := ↑y } ⊢ SqLe x d (d * y) 1 [PROOFSTEP] simpa [SqLe, mul_comm, mul_left_comm] using Nat.mul_le_mul_left d (nonnegg_neg_pos.1 ha) [GOAL] d x y : ℕ a : ℤ√↑d ha : Nonneg a ⊢ Nonneg ({ re := ↑x, im := ↑y } * a) [PROOFSTEP] have : (⟨x, y⟩ * a : ℤ√d) = (x : ℤ√d) * a + sqrtd * ((y : ℤ√d) * a) := by rw [decompose, right_distrib, mul_assoc, Int.cast_ofNat, Int.cast_ofNat] [GOAL] d x y : ℕ a : ℤ√↑d ha : Nonneg a ⊢ { re := ↑x, im := ↑y } * a = ↑x * a + sqrtd * (↑y * a) [PROOFSTEP] rw [decompose, right_distrib, mul_assoc, Int.cast_ofNat, Int.cast_ofNat] [GOAL] d x y : ℕ a : ℤ√↑d ha : Nonneg a this : { re := ↑x, im := ↑y } * a = ↑x * a + sqrtd * (↑y * a) ⊢ Nonneg ({ re := ↑x, im := ↑y } * a) [PROOFSTEP] rw [this] [GOAL] d x y : ℕ a : ℤ√↑d ha : Nonneg a this : { re := ↑x, im := ↑y } * a = ↑x * a + sqrtd * (↑y * a) ⊢ Nonneg (↑x * a + sqrtd * (↑y * a)) [PROOFSTEP] exact (nonneg_smul ha).add (nonneg_muld <| nonneg_smul ha) [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } x✝ : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } * { re := ↑z, im := ↑w }) [PROOFSTEP] rw [mul_comm] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } x✝ : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := ↑z, im := ↑w } * { re := -↑x, im := ↑y }) [PROOFSTEP] exact nonneg_mul_lem ha [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } x✝ : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } * { re := ↑z, im := ↑w }) [PROOFSTEP] rw [mul_comm] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } x✝ : Nonneg { re := ↑z, im := ↑w } ⊢ Nonneg ({ re := ↑z, im := ↑w } * { re := ↑x, im := -↑y }) [PROOFSTEP] exact nonneg_mul_lem ha [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } * { re := -↑z, im := ↑w }) [PROOFSTEP] rw [calc (⟨-x, y⟩ * ⟨-z, w⟩ : ℤ√d) = ⟨_, _⟩ := rfl _ = ⟨x * z + d * y * w, -(x * w + y * z)⟩ := by simp [add_comm]] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } hb : Nonneg { re := -↑z, im := ↑w } ⊢ { re := { re := -↑x, im := ↑y }.re * { re := -↑z, im := ↑w }.re + ↑d * { re := -↑x, im := ↑y }.im * { re := -↑z, im := ↑w }.im, im := { re := -↑x, im := ↑y }.re * { re := -↑z, im := ↑w }.im + { re := -↑x, im := ↑y }.im * { re := -↑z, im := ↑w }.re } = { re := ↑x * ↑z + ↑d * ↑y * ↑w, im := -(↑x * ↑w + ↑y * ↑z) } [PROOFSTEP] simp [add_comm] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg { re := ↑x * ↑z + ↑d * ↑y * ↑w, im := -(↑x * ↑w + ↑y * ↑z) } [PROOFSTEP] exact nonnegg_pos_neg.2 (sqLe_mul.left (nonnegg_neg_pos.1 ha) (nonnegg_neg_pos.1 hb)) [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg ({ re := -↑x, im := ↑y } * { re := ↑z, im := -↑w }) [PROOFSTEP] rw [calc (⟨-x, y⟩ * ⟨z, -w⟩ : ℤ√d) = ⟨_, _⟩ := rfl _ = ⟨-(x * z + d * y * w), x * w + y * z⟩ := by simp [add_comm]] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } hb : Nonneg { re := ↑z, im := -↑w } ⊢ { re := { re := -↑x, im := ↑y }.re * { re := ↑z, im := -↑w }.re + ↑d * { re := -↑x, im := ↑y }.im * { re := ↑z, im := -↑w }.im, im := { re := -↑x, im := ↑y }.re * { re := ↑z, im := -↑w }.im + { re := -↑x, im := ↑y }.im * { re := ↑z, im := -↑w }.re } = { re := -(↑x * ↑z + ↑d * ↑y * ↑w), im := ↑x * ↑w + ↑y * ↑z } [PROOFSTEP] simp [add_comm] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := -↑x, im := ↑y } hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg { re := -(↑x * ↑z + ↑d * ↑y * ↑w), im := ↑x * ↑w + ↑y * ↑z } [PROOFSTEP] exact nonnegg_neg_pos.2 (sqLe_mul.right.left (nonnegg_neg_pos.1 ha) (nonnegg_pos_neg.1 hb)) [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } * { re := -↑z, im := ↑w }) [PROOFSTEP] rw [calc (⟨x, -y⟩ * ⟨-z, w⟩ : ℤ√d) = ⟨_, _⟩ := rfl _ = ⟨-(x * z + d * y * w), x * w + y * z⟩ := by simp [add_comm]] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } hb : Nonneg { re := -↑z, im := ↑w } ⊢ { re := { re := ↑x, im := -↑y }.re * { re := -↑z, im := ↑w }.re + ↑d * { re := ↑x, im := -↑y }.im * { re := -↑z, im := ↑w }.im, im := { re := ↑x, im := -↑y }.re * { re := -↑z, im := ↑w }.im + { re := ↑x, im := -↑y }.im * { re := -↑z, im := ↑w }.re } = { re := -(↑x * ↑z + ↑d * ↑y * ↑w), im := ↑x * ↑w + ↑y * ↑z } [PROOFSTEP] simp [add_comm] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } hb : Nonneg { re := -↑z, im := ↑w } ⊢ Nonneg { re := -(↑x * ↑z + ↑d * ↑y * ↑w), im := ↑x * ↑w + ↑y * ↑z } [PROOFSTEP] exact nonnegg_neg_pos.2 (sqLe_mul.right.right.left (nonnegg_pos_neg.1 ha) (nonnegg_neg_pos.1 hb)) [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg ({ re := ↑x, im := -↑y } * { re := ↑z, im := -↑w }) [PROOFSTEP] rw [calc (⟨x, -y⟩ * ⟨z, -w⟩ : ℤ√d) = ⟨_, _⟩ := rfl _ = ⟨x * z + d * y * w, -(x * w + y * z)⟩ := by simp [add_comm]] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } hb : Nonneg { re := ↑z, im := -↑w } ⊢ { re := { re := ↑x, im := -↑y }.re * { re := ↑z, im := -↑w }.re + ↑d * { re := ↑x, im := -↑y }.im * { re := ↑z, im := -↑w }.im, im := { re := ↑x, im := -↑y }.re * { re := ↑z, im := -↑w }.im + { re := ↑x, im := -↑y }.im * { re := ↑z, im := -↑w }.re } = { re := ↑x * ↑z + ↑d * ↑y * ↑w, im := -(↑x * ↑w + ↑y * ↑z) } [PROOFSTEP] simp [add_comm] [GOAL] d : ℕ a b : ℤ√↑d ha✝ : Nonneg a hb✝ : Nonneg b x y z w : ℕ ha : Nonneg { re := ↑x, im := -↑y } hb : Nonneg { re := ↑z, im := -↑w } ⊢ Nonneg { re := ↑x * ↑z + ↑d * ↑y * ↑w, im := -(↑x * ↑w + ↑y * ↑z) } [PROOFSTEP] exact nonnegg_pos_neg.2 (sqLe_mul.right.right.right (nonnegg_pos_neg.1 ha) (nonnegg_pos_neg.1 hb)) [GOAL] d : ℕ a b : ℤ√↑d ⊢ 0 ≤ a → 0 ≤ b → 0 ≤ a * b [PROOFSTEP] simp_rw [← nonneg_iff_zero_le] [GOAL] d : ℕ a b : ℤ√↑d ⊢ Nonneg a → Nonneg b → Nonneg (a * b) [PROOFSTEP] exact nonneg_mul [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ h : x * x = d * y * y g : ℕ := Nat.gcd x y gpos : g > 0 ⊢ False [PROOFSTEP] let ⟨m, n, co, (hx : x = m * g), (hy : y = n * g)⟩ := Nat.exists_coprime gpos [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ h : x * x = d * y * y g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ co : Nat.coprime m n hx : x = m * g hy : y = n * g ⊢ False [PROOFSTEP] rw [hx, hy] at h [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g ⊢ False [PROOFSTEP] have : m * m = d * (n * n) := by refine mul_left_cancel₀ (mul_pos gpos gpos).ne' ?_ -- Porting note: was `simpa [mul_comm, mul_left_comm] using h` calc g * g * (m * m) _ = m * g * (m * g) := by ring _ = d * (n * g) * (n * g) := h _ = g * g * (d * (n * n)) := by ring [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g ⊢ m * m = d * (n * n) [PROOFSTEP] refine mul_left_cancel₀ (mul_pos gpos gpos).ne' ?_ -- Porting note: was `simpa [mul_comm, mul_left_comm] using h` [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g ⊢ g * g * (m * m) = g * g * (d * (n * n)) [PROOFSTEP] calc g * g * (m * m) _ = m * g * (m * g) := by ring _ = d * (n * g) * (n * g) := h _ = g * g * (d * (n * n)) := by ring [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g ⊢ g * g * (m * m) = m * g * (m * g) [PROOFSTEP] ring [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g ⊢ d * (n * g) * (n * g) = g * g * (d * (n * n)) [PROOFSTEP] ring [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g this : m * m = d * (n * n) ⊢ False [PROOFSTEP] have co2 := let co1 := co.mul_right co co1.mul co1 [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g this : m * m = d * (n * n) co2 : Nat.coprime (m * m) (n * n) ⊢ False [PROOFSTEP] exact Nonsquare.ns d m (Nat.dvd_antisymm (by rw [this]; apply dvd_mul_right) <| co2.dvd_of_dvd_mul_right <| by simp [this]) [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g this : m * m = d * (n * n) co2 : Nat.coprime (m * m) (n * n) ⊢ d ∣ m * m [PROOFSTEP] rw [this] [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g this : m * m = d * (n * n) co2 : Nat.coprime (m * m) (n * n) ⊢ d ∣ d * (n * n) [PROOFSTEP] apply dvd_mul_right [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ g : ℕ := Nat.gcd x y gpos : g > 0 m n : ℕ h : m * g * (m * g) = d * (n * g) * (n * g) co : Nat.coprime m n hx : x = m * g hy : y = n * g this : m * m = d * (n * n) co2 : Nat.coprime (m * m) (n * n) ⊢ m * m ∣ d * (n * n) [PROOFSTEP] simp [this] [GOAL] d : ℕ dnsq : Nonsquare d x y : ℤ h : x * x = ↑d * y * y ⊢ x = 0 ∧ y = 0 [PROOFSTEP] rw [mul_assoc, ← Int.natAbs_mul_self, ← Int.natAbs_mul_self, ← Int.ofNat_mul, ← mul_assoc] at h [GOAL] d : ℕ dnsq : Nonsquare d x y : ℤ h : ↑(Int.natAbs x * Int.natAbs x) = ↑(d * Int.natAbs y * Int.natAbs y) ⊢ x = 0 ∧ y = 0 [PROOFSTEP] exact let ⟨h1, h2⟩ := divides_sq_eq_zero (Int.ofNat.inj h) ⟨Int.natAbs_eq_zero.mp h1, Int.natAbs_eq_zero.mp h2⟩ [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ e : (x + 1) * (x + 1) = d * (y + 1) * (y + 1) ⊢ False [PROOFSTEP] have t := (divides_sq_eq_zero e).left [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ e : (x + 1) * (x + 1) = d * (y + 1) * (y + 1) t : x + 1 = 0 ⊢ False [PROOFSTEP] contradiction [GOAL] d : ℕ dnsq : Nonsquare d x : ℕ xy : Nonneg { re := -[x+1], im := 0 } x✝ : Nonneg (-{ re := -[x+1], im := 0 }) ⊢ 0 < 1 [PROOFSTEP] decide [GOAL] d : ℕ dnsq : Nonsquare d x : ℕ x✝ : Nonneg { re := ↑(x + 1), im := 0 } yx : Nonneg (-{ re := ↑(x + 1), im := 0 }) ⊢ 0 < 1 [PROOFSTEP] decide [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ xy : SqLe (y + 1) d (x + 1) 1 yx : SqLe (Nat.add x 0 + 1) 1 (Nat.succ y) d ⊢ { re := ↑(x + 1), im := -[y+1] } = 0 [PROOFSTEP] let t := le_antisymm yx xy [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ xy : SqLe (y + 1) d (x + 1) 1 yx : SqLe (Nat.add x 0 + 1) 1 (Nat.succ y) d t : 1 * (Nat.add x 0 + 1) * (Nat.add x 0 + 1) = d * Nat.succ y * Nat.succ y := le_antisymm yx xy ⊢ { re := ↑(x + 1), im := -[y+1] } = 0 [PROOFSTEP] rw [one_mul] at t [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ xy : SqLe (y + 1) d (x + 1) 1 yx : SqLe (Nat.add x 0 + 1) 1 (Nat.succ y) d t : (Nat.add x 0 + 1) * (Nat.add x 0 + 1) = d * Nat.succ y * Nat.succ y ⊢ { re := ↑(x + 1), im := -[y+1] } = 0 [PROOFSTEP] exact absurd t (not_divides_sq _ _) [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ xy : SqLe (x + 1) 1 (y + 1) d yx : SqLe (Nat.add y 0 + 1) d (Nat.succ x) 1 ⊢ { re := -[x+1], im := ↑(y + 1) } = 0 [PROOFSTEP] let t := le_antisymm xy yx [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ xy : SqLe (x + 1) 1 (y + 1) d yx : SqLe (Nat.add y 0 + 1) d (Nat.succ x) 1 t : 1 * (x + 1) * (x + 1) = d * (y + 1) * (y + 1) := le_antisymm xy yx ⊢ { re := -[x+1], im := ↑(y + 1) } = 0 [PROOFSTEP] rw [one_mul] at t [GOAL] d : ℕ dnsq : Nonsquare d x y : ℕ xy : SqLe (x + 1) 1 (y + 1) d yx : SqLe (Nat.add y 0 + 1) d (Nat.succ x) 1 t : (x + 1) * (x + 1) = d * (y + 1) * (y + 1) ⊢ { re := -[x+1], im := ↑(y + 1) } = 0 [PROOFSTEP] exact absurd t (not_divides_sq _ _) [GOAL] d : ℕ dnsq : Nonsquare d a b : ℤ√↑d ab : a ≤ b ba : b ≤ a ⊢ Nonneg (-(a - b)) [PROOFSTEP] rwa [neg_sub] [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h : { re := x, im := y } * { re := z, im := w } = 0 ⊢ { re := x, im := y } = 0 ∨ { re := z, im := w } = 0 [PROOFSTEP] injection h with h1 h2 [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1 : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2 : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 ⊢ { re := x, im := y } = 0 ∨ { re := z, im := w } = 0 [PROOFSTEP] have h1 : x * z = -(d * y * w) := eq_neg_of_add_eq_zero_left h1 [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2 : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) ⊢ { re := x, im := y } = 0 ∨ { re := z, im := w } = 0 [PROOFSTEP] have h2 : x * w = -(y * z) := eq_neg_of_add_eq_zero_left h2 [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2✝ : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) h2 : x * w = -(y * z) ⊢ { re := x, im := y } = 0 ∨ { re := z, im := w } = 0 [PROOFSTEP] have fin : x * x = d * y * y → (⟨x, y⟩ : ℤ√d) = 0 := fun e => match x, y, divides_sq_eq_zero_z e with | _, _, ⟨rfl, rfl⟩ => rfl [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2✝ : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) h2 : x * w = -(y * z) fin : x * x = ↑d * y * y → { re := x, im := y } = 0 ⊢ { re := x, im := y } = 0 ∨ { re := z, im := w } = 0 [PROOFSTEP] exact if z0 : z = 0 then if w0 : w = 0 then Or.inr (match z, w, z0, w0 with | _, _, rfl, rfl => rfl) else Or.inl <| fin <| mul_right_cancel₀ w0 <| calc x * x * w = -y * (x * z) := by simp [h2, mul_assoc, mul_left_comm] _ = d * y * y * w := by simp [h1, mul_assoc, mul_left_comm] else Or.inl <| fin <| mul_right_cancel₀ z0 <| calc x * x * z = d * -y * (x * w) := by simp [h1, mul_assoc, mul_left_comm] _ = d * y * y * z := by simp [h2, mul_assoc, mul_left_comm] [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2✝ : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) h2 : x * w = -(y * z) fin : x * x = ↑d * y * y → { re := x, im := y } = 0 z0 : z = 0 w0 : ¬w = 0 ⊢ x * x * w = -y * (x * z) [PROOFSTEP] simp [h2, mul_assoc, mul_left_comm] [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2✝ : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) h2 : x * w = -(y * z) fin : x * x = ↑d * y * y → { re := x, im := y } = 0 z0 : z = 0 w0 : ¬w = 0 ⊢ -y * (x * z) = ↑d * y * y * w [PROOFSTEP] simp [h1, mul_assoc, mul_left_comm] [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2✝ : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) h2 : x * w = -(y * z) fin : x * x = ↑d * y * y → { re := x, im := y } = 0 z0 : ¬z = 0 ⊢ x * x * z = ↑d * -y * (x * w) [PROOFSTEP] simp [h1, mul_assoc, mul_left_comm] [GOAL] d : ℕ dnsq : Nonsquare d x y z w : ℤ h1✝ : { re := x, im := y }.re * { re := z, im := w }.re + ↑d * { re := x, im := y }.im * { re := z, im := w }.im = 0 h2✝ : { re := x, im := y }.re * { re := z, im := w }.im + { re := x, im := y }.im * { re := z, im := w }.re = 0 h1 : x * z = -(↑d * y * w) h2 : x * w = -(y * z) fin : x * x = ↑d * y * y → { re := x, im := y } = 0 z0 : ¬z = 0 ⊢ ↑d * -y * (x * w) = ↑d * y * y * z [PROOFSTEP] simp [h2, mul_assoc, mul_left_comm] [GOAL] d : ℕ dnsq : Nonsquare d src✝² : CommRing (ℤ√↑d) := commRing src✝¹ : LinearOrder (ℤ√↑d) := linearOrder src✝ : Nontrivial (ℤ√↑d) := nontrivial ⊢ 0 ≤ 1 [PROOFSTEP] trivial [GOAL] d : ℕ dnsq : Nonsquare d ⊢ LinearOrderedRing (ℤ√↑d) [PROOFSTEP] infer_instance [GOAL] d : ℕ dnsq : Nonsquare d ⊢ OrderedRing (ℤ√↑d) [PROOFSTEP] infer_instance [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ⊢ norm a = 0 ↔ a = 0 [PROOFSTEP] refine' ⟨fun ha => ext.mpr _, fun h => by rw [h, norm_zero]⟩ [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d h : a = 0 ⊢ norm a = 0 [PROOFSTEP] rw [h, norm_zero] [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : norm a = 0 ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] dsimp only [norm] at ha [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re - d * a.im * a.im = 0 ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] rw [sub_eq_zero] at ha [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] by_cases h : 0 ≤ d [GOAL] case pos d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : 0 ≤ d ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] obtain ⟨d', rfl⟩ := Int.eq_ofNat_of_zero_le h [GOAL] case pos.intro d' : ℕ h_nonsquare : ∀ (n : ℤ), ↑d' ≠ n * n a : ℤ√↑d' ha : a.re * a.re = ↑d' * a.im * a.im h : 0 ≤ ↑d' ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] haveI : Nonsquare d' := ⟨fun n h => h_nonsquare n <| by exact_mod_cast h⟩ [GOAL] d' : ℕ h_nonsquare : ∀ (n : ℤ), ↑d' ≠ n * n a : ℤ√↑d' ha : a.re * a.re = ↑d' * a.im * a.im h✝ : 0 ≤ ↑d' n : ℕ h : d' = n * n ⊢ ↑d' = ↑n * ↑n [PROOFSTEP] exact_mod_cast h [GOAL] case pos.intro d' : ℕ h_nonsquare : ∀ (n : ℤ), ↑d' ≠ n * n a : ℤ√↑d' ha : a.re * a.re = ↑d' * a.im * a.im h : 0 ≤ ↑d' this : Nonsquare d' ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] exact divides_sq_eq_zero_z ha [GOAL] case neg d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : ¬0 ≤ d ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] push_neg at h [GOAL] case neg d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : d < 0 ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] suffices a.re * a.re = 0 by rw [eq_zero_of_mul_self_eq_zero this] at ha ⊢ simpa only [true_and_iff, or_self_right, zero_re, zero_im, eq_self_iff_true, zero_eq_mul, mul_zero, mul_eq_zero, h.ne, false_or_iff, or_self_iff] using ha [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : d < 0 this : a.re * a.re = 0 ⊢ a.re = 0.re ∧ a.im = 0.im [PROOFSTEP] rw [eq_zero_of_mul_self_eq_zero this] at ha ⊢ [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : 0 * 0 = d * a.im * a.im h : d < 0 this : a.re * a.re = 0 ⊢ 0 = 0.re ∧ a.im = 0.im [PROOFSTEP] simpa only [true_and_iff, or_self_right, zero_re, zero_im, eq_self_iff_true, zero_eq_mul, mul_zero, mul_eq_zero, h.ne, false_or_iff, or_self_iff] using ha [GOAL] case neg d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : d < 0 ⊢ a.re * a.re = 0 [PROOFSTEP] apply _root_.le_antisymm _ (mul_self_nonneg _) [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : d < 0 ⊢ a.re * a.re ≤ 0 [PROOFSTEP] rw [ha, mul_assoc] [GOAL] d : ℤ h_nonsquare : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : a.re * a.re = d * a.im * a.im h : d < 0 ⊢ d * (a.im * a.im) ≤ 0 [PROOFSTEP] exact mul_nonpos_of_nonpos_of_nonneg h.le (mul_self_nonneg _) [GOAL] R : Type inst✝ : Ring R d : ℤ f g : ℤ√d →+* R h : ↑f sqrtd = ↑g sqrtd ⊢ f = g [PROOFSTEP] ext ⟨x_re, x_im⟩ [GOAL] case a.mk R : Type inst✝ : Ring R d : ℤ f g : ℤ√d →+* R h : ↑f sqrtd = ↑g sqrtd x_re x_im : ℤ ⊢ ↑f { re := x_re, im := x_im } = ↑g { re := x_re, im := x_im } [PROOFSTEP] simp [decompose, h] [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } ⊢ (fun a => ↑a.re + ↑a.im * ↑r) 1 = 1 [PROOFSTEP] simp [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } a b : ℤ√d ⊢ OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b [PROOFSTEP] have : (a.re + a.im * r : R) * (b.re + b.im * r) = a.re * b.re + (a.re * b.im + a.im * b.re) * r + a.im * b.im * (r * r) := by ring [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } a b : ℤ√d ⊢ (↑a.re + ↑a.im * ↑r) * (↑b.re + ↑b.im * ↑r) = ↑a.re * ↑b.re + (↑a.re * ↑b.im + ↑a.im * ↑b.re) * ↑r + ↑a.im * ↑b.im * (↑r * ↑r) [PROOFSTEP] ring [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } a b : ℤ√d this : (↑a.re + ↑a.im * ↑r) * (↑b.re + ↑b.im * ↑r) = ↑a.re * ↑b.re + (↑a.re * ↑b.im + ↑a.im * ↑b.re) * ↑r + ↑a.im * ↑b.im * (↑r * ↑r) ⊢ OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b [PROOFSTEP] simp [this, r.prop] [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } a b : ℤ√d this : (↑a.re + ↑a.im * ↑r) * (↑b.re + ↑b.im * ↑r) = ↑a.re * ↑b.re + (↑a.re * ↑b.im + ↑a.im * ↑b.re) * ↑r + ↑a.im * ↑b.im * (↑r * ↑r) ⊢ ↑a.re * ↑b.re + ↑d * ↑a.im * ↑b.im + (↑a.re * ↑b.im + ↑a.im * ↑b.re) * ↑r = ↑a.re * ↑b.re + (↑a.re * ↑b.im + ↑a.im * ↑b.re) * ↑r + ↑a.im * ↑b.im * ↑d [PROOFSTEP] ring [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } ⊢ OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) 0 = 0 [PROOFSTEP] simp [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } a b : ℤ√d ⊢ OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) (a + b) = OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) a + OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) b [PROOFSTEP] simp [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } a b : ℤ√d ⊢ ↑a.re + ↑b.re + (↑a.im + ↑b.im) * ↑r = ↑a.re + ↑a.im * ↑r + (↑b.re + ↑b.im * ↑r) [PROOFSTEP] ring [GOAL] R : Type inst✝ : CommRing R d : ℤ f : ℤ√d →+* R ⊢ ↑f sqrtd * ↑f sqrtd = ↑d [PROOFSTEP] rw [← f.map_mul, dmuld, map_intCast] [GOAL] R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } ⊢ (fun f => { val := ↑f sqrtd, property := (_ : ↑f sqrtd * ↑f sqrtd = ↑d) }) ((fun r => { toMonoidHom := { toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }, map_zero' := (_ : ↑0 + ↑0 * ↑r = 0), map_add' := (_ : ∀ (a b : ℤ√d), OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) (a + b) = OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) a + OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) b) }) r) = r [PROOFSTEP] ext [GOAL] case a R : Type inst✝ : CommRing R d : ℤ r : { r // r * r = ↑d } ⊢ ↑((fun f => { val := ↑f sqrtd, property := (_ : ↑f sqrtd * ↑f sqrtd = ↑d) }) ((fun r => { toMonoidHom := { toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }, map_zero' := (_ : ↑0 + ↑0 * ↑r = 0), map_add' := (_ : ∀ (a b : ℤ√d), OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) (a + b) = OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) a + OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) b) }) r)) = ↑r [PROOFSTEP] simp [GOAL] R : Type inst✝ : CommRing R d : ℤ f : ℤ√d →+* R ⊢ (fun r => { toMonoidHom := { toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }, map_zero' := (_ : ↑0 + ↑0 * ↑r = 0), map_add' := (_ : ∀ (a b : ℤ√d), OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) (a + b) = OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) a + OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) b) }) ((fun f => { val := ↑f sqrtd, property := (_ : ↑f sqrtd * ↑f sqrtd = ↑d) }) f) = f [PROOFSTEP] refine hom_ext _ _ ?_ [GOAL] R : Type inst✝ : CommRing R d : ℤ f : ℤ√d →+* R ⊢ ↑((fun r => { toMonoidHom := { toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }, map_zero' := (_ : ↑0 + ↑0 * ↑r = 0), map_add' := (_ : ∀ (a b : ℤ√d), OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) (a + b) = OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) a + OneHom.toFun (↑{ toOneHom := { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) }, map_mul' := (_ : ∀ (a b : ℤ√d), OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } (a * b) = OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } a * OneHom.toFun { toFun := fun a => ↑a.re + ↑a.im * ↑r, map_one' := (_ : ↑1 + ↑0 * ↑r = 1) } b) }) b) }) ((fun f => { val := ↑f sqrtd, property := (_ : ↑f sqrtd * ↑f sqrtd = ↑d) }) f)) sqrtd = ↑f sqrtd [PROOFSTEP] simp [GOAL] R : Type inst✝¹ : CommRing R inst✝ : CharZero R d : ℤ r : { r // r * r = ↑d } hd : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : ↑(↑lift r) a = 0 ⊢ a = 0 [PROOFSTEP] have h_inj : Function.Injective ((↑) : ℤ → R) := Int.cast_injective [GOAL] R : Type inst✝¹ : CommRing R inst✝ : CharZero R d : ℤ r : { r // r * r = ↑d } hd : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : ↑(↑lift r) a = 0 h_inj : Function.Injective Int.cast ⊢ a = 0 [PROOFSTEP] suffices lift r a.norm = 0 by simp only [coe_int_re, add_zero, lift_apply_apply, coe_int_im, Int.cast_zero, zero_mul] at this rwa [← Int.cast_zero, h_inj.eq_iff, norm_eq_zero hd] at this [GOAL] R : Type inst✝¹ : CommRing R inst✝ : CharZero R d : ℤ r : { r // r * r = ↑d } hd : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : ↑(↑lift r) a = 0 h_inj : Function.Injective Int.cast this : ↑(↑lift r) ↑(norm a) = 0 ⊢ a = 0 [PROOFSTEP] simp only [coe_int_re, add_zero, lift_apply_apply, coe_int_im, Int.cast_zero, zero_mul] at this [GOAL] R : Type inst✝¹ : CommRing R inst✝ : CharZero R d : ℤ r : { r // r * r = ↑d } hd : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : ↑(↑lift r) a = 0 h_inj : Function.Injective Int.cast this : ↑(norm a) = 0 ⊢ a = 0 [PROOFSTEP] rwa [← Int.cast_zero, h_inj.eq_iff, norm_eq_zero hd] at this [GOAL] R : Type inst✝¹ : CommRing R inst✝ : CharZero R d : ℤ r : { r // r * r = ↑d } hd : ∀ (n : ℤ), d ≠ n * n a : ℤ√d ha : ↑(↑lift r) a = 0 h_inj : Function.Injective Int.cast ⊢ ↑(↑lift r) ↑(norm a) = 0 [PROOFSTEP] rw [norm_eq_mul_conj, RingHom.map_mul, ha, zero_mul] [GOAL] R : Type inst✝ : CommRing R d : ℤ a : ℤ√d ⊢ norm a = 1 ↔ a ∈ unitary (ℤ√d) [PROOFSTEP] rw [unitary.mem_iff_self_mul_star, ← norm_eq_mul_conj] [GOAL] R : Type inst✝ : CommRing R d : ℤ a : ℤ√d ⊢ norm a = 1 ↔ ↑(norm a) = 1 [PROOFSTEP] norm_cast
" Vitamin D " was nominated for the best " Comedy Series Episode " award at the 2010 PRISM Awards . It received generally positive reviews from critics . Shawna Malcom of the Los Angeles Times noted that she preferred the boys ' performance to the girls ' , commenting : " Their number had the same heart @-@ soaring power as " Don 't Stop Believin ' " [ performed in the pilot episode ] . " Malcom enjoyed Sue 's character development in the episode , claiming that , " In less skilled hands , there ’ s no doubt Sue would be an over @-@ the @-@ top disaster . But thanks to the incomparable Jane Lynch , I can ’ t wait to see what trouble the character stirs up next . " Aly Semigran of MTV also enjoyed the boys ' performance more than the girls ' , and gave the episode a mostly positive review , writing that it moved the series ' storylines to " a whole new level " . She felt , however , that the episode " didn 't have nearly enough singing " . Mandi Bierly for Entertainment Weekly similarly noted that : " So much happened in this hour that the musical numbers , though enjoyable , were almost an afterthought . " Bierly favoured the girls ' performance , and praised Morrison 's acting , commenting : " Matthew Morrison communicates so much with his eyes . There ’ s a softness and a longing in them that I ’ m always surprised Emma ( Jayma Mays ) matches . "
/- The task: 1) Define clone (see Wikipedia https://en.wikipedia.org/wiki/Clone_(algebra) ) 2) Proof that in every clone, if there is a ternary operation p satisfying p(y,x,x) = p(y,x,y) = p(x,x,y) = y then there is a ternary operation m satisfying m(y,x,x) = m(x,y,x) = m(x,x,y) = x (hint: m(a,b,c) = p(a,p(a,b,c),c) ) TODO: 1) expand forall conjunction -> conjunction forall by a tactic? 2) tactic for cases of fin 3) use several rewrite rules as long as possible 4) general version of compose3_lemma () -/ -- handy way of writing arrays def list.a {α : Type} (l : list α) : array (l.length) α := { data := λ i, l.nth_le i.val i.is_lt} #check [2,3,4,5].a #reduce [2,3,4,5].a -- ability to map array from one type to another def array.my_map {α β : Type} {n : ℕ} (f : α → β) (a : array n α) : array n β := {data := λ i : fin n, f (a.data i)} -- lemma for case-analysis on natural numbers theorem not_lt_cases {n : ℕ} {i_val : ℕ} (h : ¬i_val < n) : i_val = n ∨ ¬(i_val < nat.succ(n)) := begin have: i_val = n ∨ n < i_val, from nat.eq_or_lt_of_not_lt h, cases this, left, assumption, right, have: ¬ i_val ≤ n, from (nat.lt_iff_le_not_le.elim_left this).right, intro a, revert this, apply non_contradictory_intro, show i_val ≤ n, from nat.le_of_lt_succ a end -- operations, composition, projections def operation (ar : ℕ) (α : Type) := (array ar α) → α def compose {α : Type} {n m : ℕ} (f : operation n α) (g_tup : array n (operation m α)) : operation m α := assume input : array m α, f (g_tup.my_map (λ g, g input)) constants f : operation 3 ℕ theorem compose3_lemma {α : Type} {m : ℕ} {f : operation 3 α} {g1 g2 g3 : operation m α} : compose f [g1, g2, g3].a = λ input, f [g1 input, g2 input, g3 input].a := begin apply funext, intros, apply congr_arg f, apply array.ext, intros, cases i, by_cases i_val = 0, subst i_val, refl, by_cases i_val = 1, subst i_val, refl, by_cases i_val = 2, subst i_val, refl, have ineq := nat.not_lt_zero i_val, iterate 3 {have ineq := not_lt_cases ineq, cases ineq with neq ineq, contradiction}, contradiction, end def projection (α : Type) (n : ℕ) (i : fin n) : operation n α := λ input, input.read i -- clone def operation_set (α : Type) : Type := Π n : ℕ, set (operation n α) def is_clone {α : Type} (ops : operation_set α) : Prop := (∀ n : ℕ, ∀ i : fin n, projection α n i ∈ ops n) ∧ (∀ n m : ℕ, ∀ f, f ∈ ops n → ∀ g_tup : array n (operation m α), (∀ i, g_tup.read i ∈ ops m) → compose f g_tup ∈ ops m) -- testing proposition theorem clone_prop {α : Type} (ops : operation_set α) (hclone: is_clone ops) : (∃ (p : operation 3 α), p ∈ ops 3 ∧ ∀ x y, p [y,x,x].a = y ∧ p [y,x,y].a = y ∧ p [x,x,y].a = y ) → (∃ (m : operation 3 α), m ∈ ops 3 ∧ ∀ x y, m [y,x,x].a = x ∧ m [x,y,x].a = x ∧ m [x,x,y].a = x ) := let π₁ := projection α 3 (⟨0, by comp_val⟩ : fin 3) in let π₃ := projection α 3 (⟨2, by comp_val⟩ : fin 3) in begin intro h, cases h with p hp, cases hclone with has_proj has_compositions, cases hp, existsi compose p [π₁, p, π₃].a, constructor, -- m is in the clone have has_comp_concrete := has_compositions 3 3 p hp_left [π₁, p, π₃].a, apply has_comp_concrete, intro i, cases i, by_cases i_val = 0, subst i_val, show π₁ ∈ ops 3, from has_proj 3 0, by_cases i_val = 1, subst i_val, show p ∈ ops 3, by assumption, by_cases i_val = 2, subst i_val, show π₃ ∈ ops 3, from has_proj 3 2, have ineq := nat.not_lt_zero i_val, iterate 3 {have ineq := not_lt_cases ineq, cases ineq with neq ineq, contradiction}, contradiction, -- m satisfies the identities -- state the necessary identities have p1: ∀ (x y : α), p [y, x, x].a = y, intros, apply (hp_right x y).left, have p2: ∀ (x y : α), p [y, x, y].a = y, intros, apply (hp_right x y).right.left, have p3: ∀ (x y : α), p [x, x, y].a = y, intros, apply (hp_right x y).right.right, have pi1: ∀ (a b c : α), π₁ [a, b, c].a = a, intros, refl, have pi3: ∀ (a b c : α), π₃ [a, b, c].a = c, intros, refl, intros, simp [compose3_lemma], -- and apply them iterate 10 { /- a bit hacky, how to just rewrite all the (nested) occurences of (p1 p2 p3 pi1 pi3)? -/ try {rw p1}, try {rw p2}, try {rw p3}, try {rw pi1}, try {rw pi3}, }, simp, end
integer:: NN = 6, d, Rez = 0 integer(len=NN):: T do d = 0, NN if (T(d) > 0) Rez = Rez + 1 end do print *, "Rez=", Rez
Brazil nuts come from the Amazon region and are known for their antioxidant and moisturising properties. Terrapeutics hand creams contain highly concentrated plant extracts that promote regeneration, nourishment, revitalisation and moisturising care. Açai, rice-bran and passion-flower oils add antioxidants. The gentle formula is quickly absorbed and leaves hands scented and moisturised longer. No artificial colours or parabens. Aqua (Water), Cetearyl Alcohol, Glyceril Stearate Se, Paraffinum Liquidum, C 12-15 Alkyl Benzoate, Ceteareth-20, Caprylic/Capric Triglyceride, Passiflora Edulis Seed Oil, Oryza Sativa Bran Oil, Sorbitan Caprylate, Parfum (Fragrance), Euterpe Oleraceae Fruit Oil, Propanediol, Orbignya Oleifera Oil, Benzoic Acid, Propylene Glycol, Aminomethyl Propanol, Bht, Lavandula Angustifolia Extract, Citronellol, Coumarin, Eugenol, Geraniol, Hexyl Cinnamal, Linalool, Limonene, Butylphenyl Methylpropional, Alpha Isomethyl Ionone. Massage into hands until completely absorbed. Use as often as required all day long.
Formal statement is: lemma algebraic_int_cnj [intro]: assumes "algebraic_int x" shows "algebraic_int (cnj x)" Informal statement is: If $x$ is an algebraic integer, then $\overline{x}$ is an algebraic integer.
(****************************************************************************** * Orca: A Functional Correctness Verifier for Imperative Programs * Based on Isabelle/UTP * * Copyright (c) 2016-2018 Virginia Tech, USA * 2016-2018 Technische Universität München, Germany * 2016-2018 University of York, UK * 2016-2018 Université Paris-Saclay, Univ. Paris-Sud, France * * This software may be distributed and modified according to the terms of * the GNU Lesser General Public License version 3.0 or any later version. * Note that NO WARRANTY is provided. * * See CONTRIBUTORS, LICENSE and CITATION files for details. ******************************************************************************) theory vcg imports "../../Midend-IVL/Isabelle-UTP-Extended/HoareLogic/TotalCorrectness/utp_hoare_ndes_prog" "~~/src/HOL/Eisbach/Eisbach_Tools" begin section \<open>VCG\<close> subsection \<open>VCG General Purpose Tactics\<close> text \<open>Automating premises insertion\<close> method_setup insert_assms = \<open>Scan.succeed (fn _ => CONTEXT_METHOD (fn facts => fn (ctxt,st) => let val tac = HEADGOAL (Method.insert_tac ctxt facts) val ctxt = Method.set_facts [] ctxt in Method.CONTEXT ctxt (tac st) end))\<close> text \<open>The defer processing and the thin_tac processing in the sequel was inspired by tutorial5.thy in Peter Lammich course \url{https://bitbucket.org/plammich/certprog_public/downloads/}\<close> subsection \<open>Deterministic Repeated Elimination Rule\<close> text \<open>Attention: Slightly different semantics than @{method elim}: repeats the rule as long as possible, but only on the first subgoal.\<close> method_setup vcg_elim_determ = \<open> Attrib.thms >> (fn thms => fn ctxt => SIMPLE_METHOD (REPEAT_DETERM1 (HEADGOAL (ematch_tac ctxt thms))))\<close> text \<open>The \<open>DETERM\<close> combinator on method level\<close> method_setup determ = \<open> Method.text_closure >> (fn (text) => fn ctxt => fn using => fn st => Seq.DETERM (Method.evaluate_runtime text ctxt using) st ) \<close> (*method insert_assms = tactic \<open>@{context} |> Assumption.all_prems_of |> (@{context} |> Method.insert_tac) |> FIRSTGOAL\<close>*) text \<open>vcg_can_defer is a tactic that succeed if the conclusion of a goal is not Hoare triple or if it has no DEFERRED markup\<close> definition DEFERRED :: "bool \<Rightarrow> bool" where "DEFERRED P = P" lemma DEFERREDD: "DEFERRED P \<Longrightarrow> P" by (auto simp: DEFERRED_def) (*TODO: FINISH THE PROTOTYPE OF THE DEBUGGER VERSION*) method vcg_can_defer_debugger = (match conclusion in "DEFERRED _" \<Rightarrow> \<open>print_term \<open>''DEFERRED''\<close>,fail\<close> -- \<open>Refuse to defer already deferred goals\<close> \<bar> "(\<lbrace>_\<rbrace>a\<lbrace>_\<rbrace>\<^sub>P)" for a \<Rightarrow> \<open>print_term "a",fail\<close> -- \<open>Refuse to defer Hoare_H1_H3 triples (They are no VCs!)\<close> \<bar> "\<lbrace>_\<rbrace>_\<lbrace>_\<rbrace>\<^sub>u" \<Rightarrow> \<open>print_term "''u''",fail\<close> -- \<open>Refuse to defer Hoare_rel triples (They are no VCs!)\<close> \<bar> "_" \<Rightarrow> \<open>print_term "''succeed''",succeed\<close>) method vcg_defer_debugger = (print_term "''vcg_avant''",vcg_can_defer_debugger, print_term "''vcg_apres''", rule DEFERREDD, tactic \<open>FIRSTGOAL defer_tac\<close>) method vcg_can_defer = (match conclusion in "DEFERRED _" \<Rightarrow> \<open>fail\<close> -- \<open>Refuse to defer already deferred goals\<close> \<bar> "\<lbrace>_\<rbrace>_\<lbrace>_\<rbrace>\<^sub>P" \<Rightarrow> \<open>fail\<close> -- \<open>Refuse to defer Hoare_H1_H3 triples (They are no VCs!)\<close> \<bar> "\<lbrace>_\<rbrace>_\<lbrace>_\<rbrace>\<^sub>u" \<Rightarrow> \<open>fail\<close> -- \<open>Refuse to defer Hoare_rel triples (They are no VCs!)\<close> \<bar> "_" \<Rightarrow> \<open>succeed\<close>) method vcg_defer = (vcg_can_defer,rule DEFERREDD, tactic \<open>FIRSTGOAL defer_tac\<close>) subsection \<open>VCG Post Processing Tactics\<close> text \<open>Tactics and methods in this section are used to do Post-Processing on the generated VCs. Namely, The application of symbolic execution laws from the theory usubst to the VCs in a very controlled way.\<close> lemma vwb_lens_weak[simp]: "vwb_lens x \<Longrightarrow> weak_lens x" by simp text \<open>substitution simplifier for debugging mode\<close> definition "ZERO_SUBST_TAG expr = True" definition "ONE_SUBST_TAG expr = True" definition "LIT_SUBST_TAG expr = True" definition "VAR_SUBST_TAG expr = True" definition "UOP_SUBST_TAG expr = True" definition "BOP_SUBST_TAG expr = True" definition "TROP_SUBST_TAG expr= True" definition "QTOP_SUBST_TAG expr = True" lemma ZERO_SUBST_DEBUG: "(ZERO_SUBST_TAG 0 \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding ZERO_SUBST_TAG_def by blast lemma ONE_SUBST_DEBUG: "(ONE_SUBST_TAG 1 \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding ONE_SUBST_TAG_def by blast lemma LIT_SUBST_DEBUG: "(LIT_SUBST_TAG (lit v) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding LIT_SUBST_TAG_def by blast lemma VAR_SUBST_DEBUG: "(VAR_SUBST_TAG (utp_expr.var x) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding VAR_SUBST_TAG_def by blast lemma UOP_SUBST_DEBUG: "(UOP_SUBST_TAG (uop f a) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding UOP_SUBST_TAG_def by blast lemma BOP_SUBST_DEBUG: "(BOP_SUBST_TAG (bop f a b) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding BOP_SUBST_TAG_def by blast lemma EQ_UPRED_SUBST_DEBUG: "(BOP_SUBST_TAG (eq_upred a b) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding BOP_SUBST_TAG_def by blast lemma TROP_SUBST_DEBUG: "(TROP_SUBST_TAG (trop f a b c) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding TROP_SUBST_TAG_def by blast lemma QTOP_SUBST_DEBUG: "(QTOP_SUBST_TAG (qtop f a b c d) \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding QTOP_SUBST_TAG_def by blast method subst_debugger = (*Zero*) (match conclusion in "_ (\<lambda> _. subst _ 0)" \<Rightarrow> \<open>rule ZERO_SUBST_DEBUG , (simp only:subst_zero)\<close>) (*Zero*) | (match conclusion in "_ (\<lambda> _. subst _ 1)" \<Rightarrow> \<open>rule ONE_SUBST_DEBUG , (simp only:subst_one)\<close>) (*UTP vars*) |(match conclusion in "_ (\<lambda> _. (subst _ (utp_expr.var x)))" for x \<Rightarrow> \<open>rule VAR_SUBST_DEBUG[where x= x], (simp only:subst_var)\<close>) (*UTP Literals*) |(match conclusion in "_ (\<lambda> _ . (subst _ (lit v)))" for v \<Rightarrow> \<open>rule LIT_SUBST_DEBUG[where v= v], (simp only:subst_lit)\<close>) (*UTP Unary operation*) | (match conclusion in "_ (\<lambda> _ .(subst _ (uop f a)))" for f a \<Rightarrow> \<open>rule UOP_SUBST_DEBUG[where f= f and a = a], simp only: subst_uop\<close>) (*Derived UOP for UTP Logical Operators*) | (match conclusion in "_ (\<lambda> _ .(subst _ (\<not> a)))" for a \<Rightarrow> \<open>rule UOP_SUBST_DEBUG[where f= "Not" and a = a], simp only: subst_uop\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (\<^bold>\<forall>x \<bullet> P x)))" for P \<Rightarrow> \<open>rule UOP_SUBST_DEBUG[where f= "All" and a= "(\<lambda>x \<bullet> P x)" ] , simp only: utp_pred.subst_shAll\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (\<^bold>\<exists>x \<bullet> P x)))" for P \<Rightarrow> \<open>rule UOP_SUBST_DEBUG[where f= "Ex" and a= "(\<lambda>x \<bullet> P x)" ] , simp only: utp_pred.subst_shEx\<close>) (*UTP Binary operation*) | (match conclusion in "_ (\<lambda> _ . (subst _ (bop f a b)))" for f a b \<Rightarrow> \<open> rule BOP_SUBST_DEBUG[where f= f and a= a and b = b], simp only:subst_bop\<close>) (*Derived BOP for UTP Arith Operators*) | (match conclusion in "_ (\<lambda> _. (subst _ (a =\<^sub>u b)))" for a b \<Rightarrow> \<open>rule EQ_UPRED_SUBST_DEBUG[where a= a and b = b], simp only:subst_eq_upred\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a + b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op +)" and a= a and b = b], simp only:subst_plus\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a - b)))" for a b \<Rightarrow> \<open> rule BOP_SUBST_DEBUG[where f= "(op -)" and a= a and b = b], simp only:subst_minus\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a * b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op *)" and a= a and b = b],simp only:subst_times \<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a div b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op div)" and a= a and b = b], simp only:subst_div\<close>) (*Derived BOP Logical Operators*) | (match conclusion in "_ (\<lambda> _. (subst _ (a \<and> b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op \<and>)" and a= a and b = b] , simp only: utp_pred.subst_conj\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a \<or> b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op \<or>)" and a= a and b = b] , simp only: utp_pred.subst_disj\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a \<Rightarrow> b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op \<longrightarrow>)" and a= a and b = b] , simp only: utp_pred.subst_impl\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a \<Leftrightarrow> b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op \<longleftrightarrow>)" and a= a and b = b] , simp only: utp_pred.subst_iff\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a \<sqinter> b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op \<sqinter>)" and a= a and b = b] , simp only: utp_pred.subst_sup\<close>) | (match conclusion in "_ (\<lambda> _. (subst _ (a \<squnion> b)))" for a b \<Rightarrow> \<open>rule BOP_SUBST_DEBUG[where f= "(op \<squnion>)" and a= a and b = b] , simp only: utp_pred.subst_inf\<close>) (*UTP Ternary operation*) | (match conclusion in "_ (\<lambda> _. (subst _ (trop f a b c)))" for f a b c\<Rightarrow> \<open>rule TROP_SUBST_DEBUG[where f=f and a=a and b=b and c=c] , simp only:subst_trop\<close>) (*UTP Quaternary operation*) | (match conclusion in "_ (\<lambda> _. (subst _ (qtop f a b c d)))" for f a b c d\<Rightarrow> \<open>rule QTOP_SUBST_DEBUG[where f=f and a=a and b=b and c=c and d=d], simp only:subst_qtop\<close>) definition "SUBST_ID_LOOKUP_TAG \<sigma> x = True" definition "SUBST_UPD_LOOKUP_TAG \<sigma> x y = True" definition "UEX_BOUNDED x = x" lemma SUBST_UPD_LOOKUP_DEBUG: "(SUBST_UPD_LOOKUP_TAG \<sigma> (UEX_BOUNDED x) y \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding SUBST_UPD_LOOKUP_TAG_def by blast lemma SUBST_ID_LOOKUP_DEBUG: "(SUBST_ID_LOOKUP_TAG id y \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding SUBST_ID_LOOKUP_TAG_def by blast method subst_lookup_debugger = (match conclusion in "_ (\<lambda> _. (usubst_lookup id y))" for y \<Rightarrow> \<open>rule SUBST_ID_LOOKUP_DEBUG[where y=y], (simp only: usubst_lookup_id)\<close>) | (match conclusion in "_ (\<lambda> _. (usubst_lookup (subst_upd \<sigma> x _) y))" for \<sigma> x y \<Rightarrow> \<open>rule SUBST_UPD_LOOKUP_DEBUG[where \<sigma>=\<sigma> and x=x and y=y], (simp only: usubst_lookup_upd_indep usubst_lookup_ovar_unrest usubst_lookup_ivar_unrest usubst_lookup_upd vwb_lens_mwb vwb_lens_wb lens_indep_sym)\<close>) text \<open>very well behaved lens simplifier for debugging mode\<close> definition "VWB_VAR_TAG x = True" definition "WB_VAR_TAG x = True" definition "WEAK_VAR_TAG x = True" definition "MWB_VAR_TAG x = True" lemma VWB_VAR_DEBUG: obtains e where "e = vwb_lens x " "VWB_VAR_TAG x" unfolding VWB_VAR_TAG_def by blast lemma MWB_VAR_DEBUG: obtains e where "e = mwb_lens x " "MWB_VAR_TAG x" unfolding MWB_VAR_TAG_def by blast lemma WB_VAR_DEBUG: obtains e where "e = wb_lens x " "WB_VAR_TAG x" unfolding WB_VAR_TAG_def by blast lemma WEAK_VAR_DEBUG: obtains e where "e = wb_lens x " "WEAK_VAR_TAG x" unfolding WEAK_VAR_TAG_def by blast method vwb_lens_debugger = (match conclusion in "vwb_lens x" for x \<Rightarrow> \<open>(rule VWB_VAR_DEBUG[where x= x],assumption) (*if this fails add a debug message here*)\<close>) |(match conclusion in "wb_lens x" for x \<Rightarrow> \<open>rule WB_VAR_DEBUG[where x= x],(simp only: vwb_lens_wb)(*if this fails add a debug message here*)\<close>) |(match conclusion in "mwb_lens x" for x \<Rightarrow> \<open>rule MWB_VAR_DEBUG[where x= x],(simp only: vwb_lens_mwb)(*if this fails add a debug message here*)\<close>) |(match conclusion in "mwb_lens x" for x \<Rightarrow> \<open>rule WEAK_VAR_DEBUG[where x= x],(simp only: vwb_lens_weak)(*if this fails add a debug message here*)\<close>) definition "WF_TAG expr = True" lemma WF_DEBUG: "(WF_TAG expr \<Longrightarrow> thesis) \<Longrightarrow> thesis" unfolding WF_TAG_def by simp method wf_debugger = (match conclusion in "wf expr" for expr \<Rightarrow> \<open>rule WF_DEBUG[where expr = expr], simp\<close>) text \<open>Post processing for debugging mode\<close> method vcg_upreds_post_processing_debugger = (vwb_lens_debugger(*TODO: if this fails add a debug message here*) |wf_debugger(*TODO: if this fails add a debug message here*) |subst_debugger(*TODO: if this fails add a debug message here*) |subst_lookup_debugger(*TODO: if this fails add a debug message here*)) text \<open>substitution simplifier for non debugging mode\<close> named_theorems usubst_simplifier declare usubst[usubst_simplifier] declare vwb_lens_weak[usubst_simplifier] declare vwb_lens_mwb[usubst_simplifier] declare vwb_lens_wb[usubst_simplifier] declare lens_indep_sym[usubst_simplifier] text \<open>very well behaved lens simplifier for non-debugging mode\<close> named_theorems vwb_simplifier declare vwb_lens_wb[vwb_simplifier] declare vwb_lens_mwb[vwb_simplifier] declare vwb_lens_weak[vwb_simplifier] declare bij_lens_vwb[vwb_simplifier] declare Lens_Algebra.id_bij_lens[vwb_simplifier] text \<open>Post processing for non debugging mode\<close> method vcg_upreds_post_processing = (assumption|simp only: vwb_simplifier) |simp |(simp only:usubst_simplifier) subsection \<open>VCG Goal Beautify Tactics\<close> text \<open>Tactics and methods in this section are used to beautify the goals before presenting them to the user. Namely, after the application of post-processing a lot of semantic machinery and debugging information remains existing in the different proof goals. The methods below clean it up since these assumptions are useless at this point.\<close> definition "LVAR L x = True" lemma GET_REMOVER: obtains x where "lens_get L s = x" "LVAR L x" unfolding LVAR_def by blast (*Prototype by Peter for variable renaming*) method_setup get_disambiguator = \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (fn i => fn st => if i > Thm.nprems_of st then all_tac st else let fun cnv (Const (@{const_name Trueprop},_)$ (Const (@{const_name LVAR},_) $(Free (name,_)) $ Bound i)) = SOME (name,i) | cnv _ = NONE val (_, _, Bi, _) = Thm.dest_state (st, i) val free_names = Term.fold_aterms (fn Free (x, _) => insert (op =) x | _ => I) Bi []; val newnames = Logic.get_goal (Thm.prop_of st) i |> Logic.strip_assums_hyp |> map_filter cnv |> sort (apply2 snd #> int_ord #> rev_order) |> (fn newnames => fold_map (fn (name, i) => fn free_names => let fun aux n = if List.exists (fn n0 => n0 = n) free_names then aux (n ^ "'") else n val name = aux name in ((name, i), name :: free_names) end) newnames free_names) |> #1 |> map fst in rename_tac newnames i st end))\<close> (*Frederic's method for removing get functions from the goal. Modified by Yakoub.*) method get_remover = (match conclusion in "_ (put\<^bsub>x\<^esub> A (get\<^bsub>x\<^esub> _))" for x A \<Rightarrow> \<open>fail\<close> --{*In case that a proof engineer forget to specify LENS WELL BEHAVED assumptions*} \<bar>"_ (put\<^bsub>x\<^esub> (put\<^bsub>x\<^esub> A _ ) _)" for x A \<Rightarrow> \<open>fail\<close> --{*In case that a proof engineer forget to specify LENS WELL BEHAVED assumptions*} \<bar>"_ (get\<^bsub>_\<^esub> (put\<^bsub>x\<^esub> A _))" for x A \<Rightarrow> \<open>fail\<close> --{*In case that a proof engineer forget to specify LENS INDEP assumptions*} \<bar>"_ (get\<^bsub>x\<^esub> (put\<^bsub>_\<^esub> A _))" for x A \<Rightarrow> \<open>fail\<close> --{*In case that a proof engineer forget to specify LENS INDEP assumptions*} \<bar>"_ (get\<^bsub>x\<^esub> A)" for x A \<Rightarrow> \<open>rule GET_REMOVER[where L= x and s= A], simp only:\<close>)+, get_disambiguator, vcg_elim_determ thin_rl[of "lens_get _ _ = _"] thin_rl[of "LVAR _ _"] named_theorems beautify_thms lemma thin_vwb_lens[beautify_thms]: "vwb_lens l \<Longrightarrow> P \<Longrightarrow> P" . lemma thin_weak_lens[beautify_thms]: "weak_lens l \<Longrightarrow> P \<Longrightarrow> P" . lemma [beautify_thms]: "\<not> ief_lens i \<Longrightarrow> P \<Longrightarrow> P" . lemma [beautify_thms]: "i\<bowtie>j \<Longrightarrow> P \<Longrightarrow> P" . lemma [beautify_thms]: "i\<noteq>(j::_\<Longrightarrow>_) \<Longrightarrow> P \<Longrightarrow> P" . lemma [beautify_thms]: "i\<noteq>(j::_\<Longrightarrow>_) \<longrightarrow> i \<bowtie> j \<Longrightarrow> P \<Longrightarrow> P" . lemma [beautify_thms]: "get\<^bsub>i\<^esub> A = x \<Longrightarrow> P \<Longrightarrow> P" . subsection \<open>Custom UTP tactics for VCG\<close> text \<open>For debugging purpose we re-construct utp tactics. It allows controlled execution of these tactics.\<close> (*TODO: Debugging version of these tactics*) text \<open>a generic UTP tactic inspired by @{method gen_rel_tac} and @{method gen_pred_tac}\<close> method utp_tac_control methods utp_defs utp_transfer utp_rewrites utp_solve utp_interp = utp_defs? ; utp_transfer, utp_rewrites?, utp_interp?, utp_solve text \<open>For UTP transfer tactics see @{method slow_uexpr_transfer} and @{method fast_uexpr_transfer}\<close> text \<open>For UTP interp tactics see @{method uexpr_interp_tac}\<close> text \<open>For UTP solve tactics see @{method utp_simp_tac}\<close> subsubsection \<open> UTP predicates tactics\<close> method upreds_defs = (unfold upred_defs)[1] method upreds_rewrites = (simp add: fun_eq_iff lens_defs upred_defs alpha_splits Product_Type.split_beta) method upreds_simp = utp_tac_control upreds_defs fast_uexpr_transfer upreds_rewrites uexpr_interp_tac utp_simp_tac method upreds_simp_slow = utp_tac_control upreds_defs slow_uexpr_transfer upreds_rewrites uexpr_interp_tac utp_simp_tac method upreds_auto = utp_tac_control upreds_defs fast_uexpr_transfer upreds_rewrites uexpr_interp_tac utp_auto_tac method upreds_auto_slow = utp_tac_control upreds_defs slow_uexpr_transfer upreds_rewrites uexpr_interp_tac utp_auto_tac method upreds_blast = utp_tac_control upreds_defs fast_uexpr_transfer upreds_rewrites uexpr_interp_tac utp_blast_tac method upreds_blast_slow = utp_tac_control upreds_defs slow_uexpr_transfer upreds_rewrites uexpr_interp_tac utp_blast_tac subsubsection \<open> UTP relations tactics\<close> method urels_defs = (unfold upred_defs urel_defs)[1] method urels_rewrites = (simp add: fun_eq_iff relcomp_unfold OO_def lens_defs upred_defs alpha_splits Product_Type.split_beta) method urels_simp = utp_tac_control urels_defs fast_uexpr_transfer urels_rewrites uexpr_interp_tac utp_simp_tac method urels_simp_slow = utp_tac_control urels_defs slow_uexpr_transfer urels_rewrites uexpr_interp_tac utp_simp_tac method urels_auto = utp_tac_control urels_defs fast_uexpr_transfer urels_rewrites uexpr_interp_tac utp_auto_tac method urels_auto_slow = utp_tac_control urels_defs slow_uexpr_transfer urels_rewrites uexpr_interp_tac utp_auto_tac method urels_blast = utp_tac_control urels_defs fast_uexpr_transfer urels_rewrites uexpr_interp_tac utp_blast_tac method urels_blast_slow = utp_tac_control urels_defs slow_uexpr_transfer urels_rewrites uexpr_interp_tac utp_blast_tac subsection \<open>VCG Core Tactics\<close> text \<open>In this section we define the core tactics for the VCG. Namely, tactics for the computational mode such as weakest pre-condition and strongest post_condition rules. Also tactics for symbolic execution on the generated verification conditions are defined.\<close> method hoare_sp_vcg_pre = (simp only: seqr_assoc[symmetric])?, (rule post_weak_prog_hoare | rule post_weak_hoare_rel) method hoare_wp_vcg_pre = (simp only: seqr_assoc[symmetric])?, (rule pre_str_prog_hoare | rule pre_str_hoare_rel) method hoare_sp_rule_apply = rule hoare_sp_rules method hoare_wp_rule_apply = rule hoare_wp_rules method vcg_step methods vcg_reasoning_method = (vcg_reasoning_method | vcg_defer) text \<open>A one step vcg without post processing nor debugging information. The output of this method is: a upred.\<close> method hoare_sp_vcg_step = vcg_step hoare_sp_rule_apply method hoare_wp_vcg_step = vcg_step hoare_wp_rule_apply text \<open>A multiple step vcg without post processing nor debugging information. The output of this method is proof goals of the form of upreds.\<close> method sp = hoare_sp_vcg_pre, hoare_sp_vcg_step+ , (unfold DEFERRED_def) method wp = hoare_wp_vcg_pre, hoare_wp_vcg_step+ , (unfold DEFERRED_def) named_theorems lens_laws_vcg_simps lemmas [lens_laws_vcg_simps] = lens_indep.lens_put_irr1 lens_indep.lens_put_irr2 method vcg_hol_post_processing_debugger = (upreds_simp)?, (simp only: lens_laws_vcg_simps)? named_theorems lens_get_lens_put_simplifer declare lens_laws_vcg_simps[lens_get_lens_put_simplifer] declare vwb_simplifier[lens_get_lens_put_simplifer] declare mwb_lens.put_put[lens_get_lens_put_simplifer] declare weak_lens.put_get[lens_get_lens_put_simplifer] declare wb_lens.get_put[lens_get_lens_put_simplifer] declare bij_lens.strong_get_put[lens_get_lens_put_simplifer] method vcg_hol_post_processing = (upreds_simp)?, (auto simp only: lens_get_lens_put_simplifer)? text \<open>Lens indep all simplifier for non debugging mode\<close> named_theorems lens_indep_all_simplifier declare distinct.simps[lens_indep_all_simplifier] declare HOL.conj_assoc[lens_indep_all_simplifier] declare HOL.simp_thms[lens_indep_all_simplifier] declare List.list.set[lens_indep_all_simplifier] declare Set.ball_simps[lens_indep_all_simplifier] declare Set.insert_iff[lens_indep_all_simplifier] declare Set.empty_iff [lens_indep_all_simplifier] method symbolic_execution = ((unfold pr_var_def in_var_def out_var_def)?, (unfold lens_indep_all_alt)?, ((simp only: lens_indep_all_simplifier)+)?, clarsimp?, (vcg_upreds_post_processing+)?, vcg_hol_post_processing(*TODO: ADD SOLVING STEP HERE*)) method vcg methods vcg_reasoning_method = (vcg_reasoning_method ; symbolic_execution); get_remover?; (vcg_elim_determ beautify_thms)? end
/- Copyright (c) 2020 Yury G. Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury G. Kudryashov, Patrick Massot -/ import data.set.intervals.basic /-! # Projection of a line onto a closed interval Given a linearly ordered type `α`, in this file we define * `set.proj_Icc (a b : α) (h : a ≤ b)` to be the map `α → [a, b]` sending `(-∞, a]` to `a`, `[b, ∞)` to `b`, and each point `x ∈ [a, b]` to itself; * `set.Icc_extend {a b : α} (h : a ≤ b) (f : Icc a b → β)` to be the extension of `f` to `α` defined as `f ∘ proj_Icc a b h`. We also prove some trivial properties of these maps. -/ variables {α β : Type*} [linear_order α] open function namespace set /-- Projection of `α` to the closed interval `[a, b]`. -/ def proj_Icc (a b : α) (h : a ≤ b) (x : α) : Icc a b := ⟨max a (min b x), le_max_left _ _, max_le h (min_le_left _ _)⟩ variables {a b : α} (h : a ≤ b) {x : α} lemma proj_Icc_of_le_left (hx : x ≤ a) : proj_Icc a b h x = ⟨a, left_mem_Icc.2 h⟩ := by simp [proj_Icc, hx, hx.trans h] @[simp] lemma proj_Icc_left : proj_Icc a b h a = ⟨a, left_mem_Icc.2 h⟩ := proj_Icc_of_le_left h le_rfl lemma proj_Icc_of_right_le (hx : b ≤ x) : proj_Icc a b h x = ⟨b, right_mem_Icc.2 h⟩ := by simp [proj_Icc, hx, h] @[simp] lemma proj_Icc_right : proj_Icc a b h b = ⟨b, right_mem_Icc.2 h⟩ := proj_Icc_of_right_le h le_rfl lemma proj_Icc_eq_left (h : a < b) : proj_Icc a b h.le x = ⟨a, left_mem_Icc.mpr h.le⟩ ↔ x ≤ a := begin refine ⟨λ h', _, proj_Icc_of_le_left _⟩, simp_rw [subtype.ext_iff_val, proj_Icc, max_eq_left_iff, min_le_iff, h.not_le, false_or] at h', exact h' end lemma proj_Icc_eq_right (h : a < b) : proj_Icc a b h.le x = ⟨b, right_mem_Icc.mpr h.le⟩ ↔ b ≤ x := begin refine ⟨λ h', _, proj_Icc_of_right_le _⟩, simp_rw [subtype.ext_iff_val, proj_Icc] at h', have := ((max_choice _ _).resolve_left (by simp [h.ne', h'])).symm.trans h', exact min_eq_left_iff.mp this end lemma proj_Icc_of_mem (hx : x ∈ Icc a b) : proj_Icc a b h x = ⟨x, hx⟩ := by simp [proj_Icc, hx.1, hx.2] @[simp] lemma proj_Icc_coe (x : Icc a b) : proj_Icc a b h x = x := by { cases x, apply proj_Icc_of_mem } lemma proj_Icc_surj_on : surj_on (proj_Icc a b h) (Icc a b) univ := λ x _, ⟨x, x.2, proj_Icc_coe h x⟩ lemma proj_Icc_surjective : surjective (proj_Icc a b h) := λ x, ⟨x, proj_Icc_coe h x⟩ @[simp] lemma range_proj_Icc : range (proj_Icc a b h) = univ := (proj_Icc_surjective h).range_eq lemma monotone_proj_Icc : monotone (proj_Icc a b h) := λ x y hxy, max_le_max le_rfl $ min_le_min le_rfl hxy lemma strict_mono_on_proj_Icc : strict_mono_on (proj_Icc a b h) (Icc a b) := λ x hx y hy hxy, by simpa only [proj_Icc_of_mem, hx, hy] /-- Extend a function `[a, b] → β` to a map `α → β`. -/ def Icc_extend {a b : α} (h : a ≤ b) (f : Icc a b → β) : α → β := f ∘ proj_Icc a b h @[simp] lemma Icc_extend_range (f : Icc a b → β) : range (Icc_extend h f) = range f := by simp [Icc_extend, range_comp f] lemma Icc_extend_of_le_left (f : Icc a b → β) (hx : x ≤ a) : Icc_extend h f x = f ⟨a, left_mem_Icc.2 h⟩ := congr_arg f $ proj_Icc_of_le_left h hx @[simp] lemma Icc_extend_left (f : Icc a b → β) : Icc_extend h f a = f ⟨a, left_mem_Icc.2 h⟩ := Icc_extend_of_le_left h f le_rfl lemma Icc_extend_of_right_le (f : Icc a b → β) (hx : b ≤ x) : Icc_extend h f x = f ⟨b, right_mem_Icc.2 h⟩ := congr_arg f $ proj_Icc_of_right_le h hx @[simp] lemma Icc_extend_right (f : Icc a b → β) : Icc_extend h f b = f ⟨b, right_mem_Icc.2 h⟩ := Icc_extend_of_right_le h f le_rfl lemma Icc_extend_of_mem (f : Icc a b → β) (hx : x ∈ Icc a b) : Icc_extend h f x = f ⟨x, hx⟩ := congr_arg f $ proj_Icc_of_mem h hx @[simp] end set open set variables [preorder β] {a b : α} (h : a ≤ b) {f : Icc a b → β} lemma monotone.Icc_extend (hf : monotone f) : monotone (Icc_extend h f) := hf.comp $ monotone_proj_Icc h lemma strict_mono.strict_mono_on_Icc_extend (hf : strict_mono f) : strict_mono_on (Icc_extend h f) (Icc a b) := hf.comp_strict_mono_on (strict_mono_on_proj_Icc h)
Formal statement is: lemma Bseq_mono_convergent: "Bseq X \<Longrightarrow> (\<forall>m n. m \<le> n \<longrightarrow> X m \<le> X n) \<Longrightarrow> convergent X" for X :: "nat \<Rightarrow> real" Informal statement is: If a bounded sequence is monotone, then it converges.
## consumer TopicPartition = Tuple{String, Int} mutable struct KafkaConsumer client::KafkaClient rkparlist::Ptr{Cvoid} end function Base.setproperty!(c::KafkaConsumer, f::Symbol, v) f == :rkparlist || error("KafkaConsumer.$f unassignable") c.rkparlist == C_NULL || kafka_topic_partition_list_destroy(c.rkparlist) v isa Ptr{Cvoid} && return setfield!(c, f, v) rkparlist = kafka_topic_partition_list_new(length(v)) for (t, p) in v kafka_topic_partition_list_add(rkparlist, t, p) end setfield!(c, f, rkparlist) end function KafkaConsumer(conf::Dict) @assert haskey(conf, "bootstrap.servers") "`bootstrap.servers` should be specified in conf" @assert haskey(conf, "group.id") "`group.id` should be specified in conf" client = KafkaClient(KAFKA_TYPE_CONSUMER, conf) consumer = KafkaConsumer(client, C_NULL) finalizer(c -> c.rkparlist = C_NULL, consumer) return consumer end function KafkaConsumer(bootstrap_servers::String, group_id::String, conf::Dict=Dict()) conf["bootstrap.servers"] = bootstrap_servers conf["group.id"] = group_id return KafkaConsumer(conf) end function Base.show(io::IO, c::KafkaConsumer) group_id = c.client.conf["group.id"] bootstrap_servers = c.client.conf["bootstrap.servers"] print(io, "KafkaConsumer($group_id @ $bootstrap_servers)") end function subscribe(c::KafkaConsumer, tpars::Vector{TopicPartition}) c.rkparlist = tpars kafka_subscribe(c.client.rk, c.rkparlist) end function assign(c::KafkaConsumer, tpars::Vector{TopicPartition}) c.rkparlist = tpars kafka_assign(c.client.rk, c.rkparlist) end function poll(::Type{K}, ::Type{P}, c::KafkaConsumer, timeout::Int=1000) where {K,P} c_msg_ptr = kafka_consumer_poll(c.client.rk, timeout) if c_msg_ptr != nothing c_msg = unsafe_load(c_msg_ptr) msg = Message{K,P}(c_msg) kafka_message_destroy(c_msg_ptr) return msg else return nothing end end poll(c::KafkaConsumer, timeout::Int=1000) = poll(Vector{UInt8}, Vector{UInt8}, c, timeout) function Base.seek(c::KafkaConsumer, topic_partition::TopicPartition, offset::Integer, timeout::Integer=1000) topic, par = topic_partition kafka_topic_partition_list_find(c.rkparlist, topic, par) || error("Seek on an assigned/subscribed topic partition $topic_partition") kt = KafkaTopic(c.client, topic) kafka_seek(kt.rkt, par, offset, timeout) end
module dump_module ! overloading interface ! dump data arrays of various types interface dump module procedure dump_integers module procedure dump_reals module procedure dump_strings end interface dump contains subroutine dump_integers(n, x) implicit none ! dummy arguments integer, intent(in) :: n integer, intent(in), dimension(n) :: x ! local data integer :: i ! processing do i = 1, n write (*,*) x(i) end do end subroutine dump_integers subroutine dump_reals(n, x) implicit none ! dummy arguments integer, intent(in) :: n double precision, intent(in), dimension(n) :: x ! local data integer :: i ! processing do i = 1, n write (*,*) x(i) end do end subroutine dump_reals subroutine dump_strings(n, str) implicit none ! dummy arguments integer, intent(in) :: n character (len = *), intent(in), dimension(n) :: str ! local data integer :: i ! processing do i = 1, n write (*,*) TRIM(str(i)) end do end subroutine dump_strings end module dump_module
theory mdom_mran imports Function_Model_Base mapp begin context Function_Model begin (*MOVE TO mSet.thy*) lemma msetI_eq : assumes "x = <set,x'>" "<set,x'> : M" shows "x : mSet" using msetI assms by auto (* ------------- *) subsection \<open>Domain of model Functions\<close> definition mdom' :: \<open>'a \<Rightarrow> 'a\<close> where "mdom' f \<equiv> <set, dom (snd f)>" definition mdom :: \<open>'a \<Rightarrow> 'a\<close> where "mdom f \<equiv> if f : mFunc then mdom' f else Function_Model_mdefault" lemma mdom_eq : assumes f : "f : mFunc" shows "mdom f = <set, dom (snd f)>" unfolding mdom_def mdom'_def using f by auto lemma mdom_eq_pair : assumes f : "<func, f'> : mFunc" shows "mdom <func,f'> = <set, dom f'>" using mdom_eq[OF f] mfunc_snd_eq[OF f] by auto lemma mdom_typ : "mdom : mFunc \<rightarrow> mSet" proof (rule funI) fix f assume f : "f : mFunc" then obtain f' j where f' : "f' : Function" and f_eq : "f = <func, f'>" and j : "j : Ord" and dom : "dom f' \<subseteq> Tier j \<ominus> func" using mfuncE2 by metis hence "<set, dom f'> : M" using mI_mset[OF j dom_set[OF f'] ex_func_set_trans[OF tier_set]] by auto thus "mdom f : mSet" using msetI mdom_eq_pair f unfolding f_eq by auto qed lemmas mdom_mset = funE[OF mdom_typ] lemma mdom_typ_ax : "m\<forall>x. x : mFunc \<longrightarrow> mdom x : mSet" unfolding mall_def tall_def using mdom_mset by auto lemma mdom_iff : assumes f : "f : mFunc" shows "b m mdom f \<longleftrightarrow> (\<exists>c. mapp f b c)" proof (rule mfuncE2[OF f]) fix f' j assume f' : "f' : Function" and f_eq : "f = <func, f'>" show "(b m mdom f) = (\<exists>c. mapp f b c)" proof assume "b m mdom f" hence "b \<in> dom f'" using mdom_eq_pair f f_eq mmemD by auto then obtain c where "app f' b c" using domE[OF f'] by auto hence "mapp f b c" using mappI_pair f f_eq by auto thus "\<exists>c. mapp f b c" by auto next assume "\<exists>c. mapp f b c" then obtain c where "app f' b c" using mappD_pair f f_eq by auto hence "b \<in> dom f'" using domI[OF f'] by auto thus "b m mdom f" using mdom_eq_pair f mmemI_eq[OF mdom_mset] unfolding f_eq by auto qed qed lemma mdom_ax : "m\<forall>f : mFunc. m\<forall>x. (x m mdom f) = (m\<exists>y. mapp f x y)" unfolding mtall_def mall_def tall_def mex_def tex_def using mdom_iff mapp_m by metis subsection \<open>Range of model Functions\<close> definition mran' :: \<open>'a \<Rightarrow> 'a\<close> where "mran' f \<equiv> <set, ran (snd f)>" definition mran :: \<open>'a \<Rightarrow> 'a\<close> where "mran f \<equiv> if f : mFunc then mran' f else Function_Model_mdefault" lemma mran_eq : assumes f : "f : mFunc" shows "mran f = <set, ran (snd f)>" unfolding mran_def mran'_def using f by auto lemma mran_eq_pair : assumes f : "<func, f'> : mFunc" shows "mran <func,f'> = <set, ran f'>" using mran_eq[OF f] mfunc_snd_eq[OF f] by auto lemma mran_typ : "mran : mFunc \<rightarrow> mSet" proof (rule funI) fix f assume f : "f : mFunc" then obtain f' j where f' : "f' : Function" and f_eq : "f = <func, f'>" and j : "j : Ord" and ran : "ran f' \<subseteq> Tier j \<ominus> func" using mfuncE2 by metis hence "<set, ran f'> : M" using mI_mset[OF j ran_set[OF f'] ex_func_set_trans[OF tier_set]] by auto thus "mran f : mSet" using msetI mran_eq_pair f unfolding f_eq by auto qed lemmas mran_mset = funE[OF mran_typ] lemma mran_typ_ax : "m\<forall>x. x : mFunc \<longrightarrow> mran x : mSet" unfolding mall_def tall_def using mran_mset by auto lemma mran_iff : assumes f : "f : mFunc" shows "c m mran f \<longleftrightarrow> (\<exists>b. mapp f b c)" proof (rule mfuncE2[OF f]) fix f' j assume f' : "f' : Function" and f_eq : "f = <func, f'>" show "(c m mran f) = (\<exists>b. mapp f b c)" proof assume "c m mran f" hence "c \<in> ran f'" using mran_eq_pair f f_eq mmemD by auto then obtain b where "app f' b c" using ranE[OF f'] by auto hence "mapp f b c" using mappI_pair f f_eq by auto thus "\<exists>b. mapp f b c" by auto next assume "\<exists>b. mapp f b c" then obtain b where "app f' b c" using mappD_pair f f_eq by auto hence "c \<in> ran f'" using ranI[OF f'] by auto thus "c m mran f" using mran_eq_pair f mmemI_eq[OF mran_mset] unfolding f_eq by auto qed qed lemma mran_ax : "m\<forall>f : mFunc. m\<forall>y. (y m mran f) = (m\<exists>x. mapp f x y)" unfolding mtall_def mall_def tall_def mex_def tex_def using mran_iff mapp_m by metis lemma mdom_rsp : "f : M \<Longrightarrow> mdom f : M" using mset_m[OF mdom_mset] Function_Model_mdefault_m unfolding mdom_def by auto lemma mran_rsp : "f : M \<Longrightarrow> mran f : M" using mset_m[OF mran_mset] Function_Model_mdefault_m unfolding mran_def by auto end end
----------------------------------------------------------------------------- -- | -- Module : Numeric.LinearAlgebra.Packed.ST -- Copyright : Copyright (c) 2010, Patrick Perry <[email protected]> -- License : BSD3 -- Maintainer : Patrick Perry <[email protected]> -- Stability : experimental -- -- Mutable packed matrices. -- module Numeric.LinearAlgebra.Packed.ST ( -- * Mutable packed matrices STPacked, IOPacked, create, -- * Read-only packed matrices RPacked(..), -- * Conversions between mutable and immutable packed matrices freeze, -- * Creating new packed matrices new_, -- * Copying matrices newCopy, -- * Vector views of packed matrices withVectorM, -- * Packed matrix views of vectors withFromVector, withFromVectorM, ) where import Numeric.LinearAlgebra.Packed.Base
-- This example used to produce -- `error: (kernel) declaration has metavariables` -- Because we were not registering postponed instance metavariables def f {α : Type u} (a : α) : α := let g a b := a + b g a a
/- Copyright (c) 2022 Yaël Dillies, Junyan Xu. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Junyan Xu ! This file was ported from Lean 3 source module order.extension.well ! leanprover-community/mathlib commit 740acc0e6f9adf4423f92a485d0456fc271482da ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Data.Prod.Lex import Mathlib.SetTheory.Ordinal.Arithmetic /-! # Extend a well-founded order to a well-order This file constructs a well-order (linear well-founded order) which is an extension of a given well-founded order. ## Proof idea We can map our order into two well-orders: * the first map respects the order but isn't necessarily injective. Namely, this is the *rank* function `WellFounded.rank : α → Ordinal`. * the second map is injective but doesn't necessarily respect the order. This is an arbitrary embedding into `Cardinal` given by `embeddingToCardinal`. Then their lexicographic product is a well-founded linear order which our original order injects in. ## Porting notes The definition in `mathlib` 3 used an auxiliary well-founded order on `α` lifted from `Cardinal` instead of `Cardinal`. The new definition is definitionally equal to the `mathlib` 3 version but avoids non-standard instances. ## Tags well founded relation, well order, extension -/ universe u variable {α : Type u} {r : α → α → Prop} namespace WellFounded variable (hwf : WellFounded r) /-- An arbitrary well order on `α` that extends `r`. The construction maps `r` into two well-orders: the first map is `WellFounded.rank`, which is not necessarily injective but respects the order `r`; the other map is the identity (with an arbitrarily chosen well-order on `α`), which is injective but doesn't respect `r`. By taking the lexicographic product of the two, we get both properties, so we can pull it back and get an well-order that extend our original order `r`. Another way to view this is that we choose an arbitrary well-order to serve as a tiebreak between two elements of same rank. -/ noncomputable def wellOrderExtension : LinearOrder α := @LinearOrder.lift' α (Ordinal ×ₗ Cardinal) _ (fun a : α => (hwf.rank a, embeddingToCardinal a)) fun _ _ h => embeddingToCardinal.injective <| congr_arg Prod.snd h #align well_founded.well_order_extension WellFounded.wellOrderExtension instance wellOrderExtension.isWellFounded_lt : IsWellFounded α hwf.wellOrderExtension.lt := ⟨InvImage.wf (fun a : α => (hwf.rank a, embeddingToCardinal a)) <| Ordinal.lt_wf.prod_lex Cardinal.lt_wf⟩ #align well_founded.well_order_extension.is_well_founded_lt WellFounded.wellOrderExtension.isWellFounded_lt /-- Any well-founded relation can be extended to a well-ordering on that type. -/ theorem exists_well_order_ge : ∃ s, r ≤ s ∧ IsWellOrder α s := ⟨hwf.wellOrderExtension.lt, fun _ _ h => Prod.Lex.left _ _ (hwf.rank_lt_of_rel h), ⟨⟩⟩ #align well_founded.exists_well_order_ge WellFounded.exists_well_order_ge end WellFounded /-- A type alias for `α`, intended to extend a well-founded order on `α` to a well-order. -/ def WellOrderExtension (α : Type _) : Type _ := α #align well_order_extension WellOrderExtension instance [Inhabited α] : Inhabited (WellOrderExtension α) := ‹_› /-- "Identity" equivalence between a well-founded order and its well-order extension. -/ def toWellOrderExtension : α ≃ WellOrderExtension α := Equiv.refl _ #align to_well_order_extension toWellOrderExtension noncomputable instance [LT α] [WellFoundedLT α] : LinearOrder (WellOrderExtension α) := (IsWellFounded.wf : @WellFounded α (· < ·)).wellOrderExtension instance WellOrderExtension.wellFoundedLT [LT α] [WellFoundedLT α] : WellFoundedLT (WellOrderExtension α) := WellFounded.wellOrderExtension.isWellFounded_lt _ #align well_order_extension.well_founded_lt WellOrderExtension.wellFoundedLT theorem toWellOrderExtension_strictMono [Preorder α] [WellFoundedLT α] : StrictMono (toWellOrderExtension : α → WellOrderExtension α) := fun _ _ h => Prod.Lex.left _ _ <| WellFounded.rank_lt_of_rel _ h #align to_well_order_extension_strict_mono toWellOrderExtension_strictMono
module Circuits.Common.Parser import public Data.Nat import public Text.Parser import public Toolkit.Data.Location import public Toolkit.Text.Lexer.Run import public Toolkit.Text.Parser.Support import public Toolkit.Text.Parser.Location import public Toolkit.Text.Parser.Run import public Toolkit.Data.Whole import Circuits.Common.Lexer import Circuits.Common %default total namespace Circuits public export Rule : Type -> Type Rule = Rule Unit Token public export RuleEmpty : Type -> Type RuleEmpty = RuleEmpty Unit Token export eoi : RuleEmpty Unit eoi = eoi isEOI where isEOI : Token -> Bool isEOI EndInput = True isEOI _ = False namespace API export symbol : String -> Rule Unit symbol str = terminal ("Expected Symbol '" ++ str ++ "'") (\x => case x of Symbol s => if s == str then Just MkUnit else Nothing _ => Nothing) export nat : Rule Nat nat = terminal "Expected nat literal" (\x => case x of LitNat i => Just i _ => Nothing) export keyword : String -> Rule Builtin.Unit keyword str = terminal ("Expected Keyword '" ++ str ++ "'") (\x => case x of Keyword s => if s == str then Just Builtin.MkUnit else Nothing _ => Nothing) export identifier : Rule String identifier = terminal "Expected Identifier" (\x => case x of ID str => Just str _ => Nothing) export name : Rule String name = identifier export ref : Rule Ref ref = do s <- Toolkit.location n <- name e <- Toolkit.location pure (MkRef (newFC s e) n) export gives : String -> a -> Rule a gives s ctor = do keyword s pure ctor export inserts : Rule a -> (a -> b) -> Rule b inserts value ctor = do v <- value pure (ctor v) export whole : Rule Whole whole = do n <- nat isWhole n where isWhole : Nat -> RuleEmpty Whole isWhole Z = fail "expected whole" isWhole (S n) = pure (W (S n) ItIsSucc) export sFooter : Location -> Rule FileContext sFooter s = do symbol ")" symbol ";" e <- Toolkit.location pure (newFC s e) namespace Types mutual logic : Rule DType logic = gives "logic" LOGIC array : Rule DType array = do ty <- logic ns <- indices pure (arraytype ty ns) where mustBeZero : Nat -> Whole -> RuleEmpty Whole mustBeZero Z (W w prf) = pure (W (S w) ItIsSucc) mustBeZero (S n) w = fail "No ranges or big endian supported" index : Rule Whole index = do symbol "[" n <- whole symbol ":" a <- nat symbol "]" mustBeZero a n indices : Rule (List1 Whole) indices = some index arraytype : DType -> List1 Whole -> DType arraytype ty (x:::xs) = foldl (\ty, n => BVECT n ty) ty (x::xs) export type : Rule DType type = array <|> logic -- [ EOF ]
State Before: R : Type u S : Type v σ : Type u_1 τ : Type ?u.157002 r : R e : ℕ n m : σ s : σ →₀ ℕ inst✝¹ : CommSemiring R p✝ q✝ : MvPolynomial σ R inst✝ : DecidableEq σ p q : MvPolynomial σ R ⊢ vars (p + q) ⊆ vars p ∪ vars q State After: R : Type u S : Type v σ : Type u_1 τ : Type ?u.157002 r : R e : ℕ n m : σ s : σ →₀ ℕ inst✝¹ : CommSemiring R p✝ q✝ : MvPolynomial σ R inst✝ : DecidableEq σ p q : MvPolynomial σ R x : σ hx : x ∈ vars (p + q) ⊢ x ∈ vars p ∪ vars q Tactic: intro x hx State Before: R : Type u S : Type v σ : Type u_1 τ : Type ?u.157002 r : R e : ℕ n m : σ s : σ →₀ ℕ inst✝¹ : CommSemiring R p✝ q✝ : MvPolynomial σ R inst✝ : DecidableEq σ p q : MvPolynomial σ R x : σ hx : x ∈ vars (p + q) ⊢ x ∈ vars p ∪ vars q State After: R : Type u S : Type v σ : Type u_1 τ : Type ?u.157002 r : R e : ℕ n m : σ s : σ →₀ ℕ inst✝¹ : CommSemiring R p✝ q✝ : MvPolynomial σ R inst✝ : DecidableEq σ p q : MvPolynomial σ R x : σ hx : x ∈ degrees (p + q) ⊢ x ∈ degrees p ∨ x ∈ degrees q Tactic: simp only [vars_def, Finset.mem_union, Multiset.mem_toFinset] at hx⊢ State Before: R : Type u S : Type v σ : Type u_1 τ : Type ?u.157002 r : R e : ℕ n m : σ s : σ →₀ ℕ inst✝¹ : CommSemiring R p✝ q✝ : MvPolynomial σ R inst✝ : DecidableEq σ p q : MvPolynomial σ R x : σ hx : x ∈ degrees (p + q) ⊢ x ∈ degrees p ∨ x ∈ degrees q State After: no goals Tactic: simpa using Multiset.mem_of_le (degrees_add _ _) hx
State Before: C : Type u_2 inst✝¹ : Category C f : Arrow C inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom S : SplitEpi f.hom n : ℕ ⊢ (s f S n ≫ WidePullback.base fun x => f.hom) = WidePullback.base fun x => f.hom State After: no goals Tactic: apply WidePullback.lift_base
[STATEMENT] lemma rel_interior_convex_cone: fixes S :: "'m::euclidean_space set" assumes "convex S" shows "rel_interior (cone hull ({1 :: real} \<times> S)) = {(c, c *\<^sub>R x) | c x. c > 0 \<and> x \<in> rel_interior S}" (is "?lhs = ?rhs") [PROOF STATE] proof (prove) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] { [PROOF STATE] proof (state) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] fix z [PROOF STATE] proof (state) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] assume "z \<in> ?lhs" [PROOF STATE] proof (state) this: z \<in> rel_interior (cone hull {1} \<times> S) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] have *: "z = (fst z, snd z)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. z = (fst z, snd z) [PROOF STEP] by auto [PROOF STATE] proof (state) this: z = (fst z, snd z) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: z = (fst z, snd z) [PROOF STEP] have "z \<in> ?rhs" [PROOF STATE] proof (prove) using this: z = (fst z, snd z) goal (1 subgoal): 1. z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] using rel_interior_convex_cone_aux[of S "fst z" "snd z"] assms \<open>z \<in> ?lhs\<close> [PROOF STATE] proof (prove) using this: z = (fst z, snd z) convex S \<Longrightarrow> ((fst z, snd z) \<in> rel_interior (cone hull {1} \<times> S)) = (0 < fst z \<and> snd z \<in> (*\<^sub>R) (fst z) ` rel_interior S) convex S z \<in> rel_interior (cone hull {1} \<times> S) goal (1 subgoal): 1. z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] } [PROOF STATE] proof (state) this: ?z2 \<in> rel_interior (cone hull {1} \<times> S) \<Longrightarrow> ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] moreover [PROOF STATE] proof (state) this: ?z2 \<in> rel_interior (cone hull {1} \<times> S) \<Longrightarrow> ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] { [PROOF STATE] proof (state) this: ?z2 \<in> rel_interior (cone hull {1} \<times> S) \<Longrightarrow> ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] fix z [PROOF STATE] proof (state) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] assume "z \<in> ?rhs" [PROOF STATE] proof (state) this: z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] then [PROOF STATE] proof (chain) picking this: z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] have "z \<in> ?lhs" [PROOF STATE] proof (prove) using this: z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal (1 subgoal): 1. z \<in> rel_interior (cone hull {1} \<times> S) [PROOF STEP] using rel_interior_convex_cone_aux[of S "fst z" "snd z"] assms [PROOF STATE] proof (prove) using this: z \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} convex S \<Longrightarrow> ((fst z, snd z) \<in> rel_interior (cone hull {1} \<times> S)) = (0 < fst z \<and> snd z \<in> (*\<^sub>R) (fst z) ` rel_interior S) convex S goal (1 subgoal): 1. z \<in> rel_interior (cone hull {1} \<times> S) [PROOF STEP] by auto [PROOF STATE] proof (state) this: z \<in> rel_interior (cone hull {1} \<times> S) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] } [PROOF STATE] proof (state) this: ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} \<Longrightarrow> ?z2 \<in> rel_interior (cone hull {1} \<times> S) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: ?z2 \<in> rel_interior (cone hull {1} \<times> S) \<Longrightarrow> ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} \<Longrightarrow> ?z2 \<in> rel_interior (cone hull {1} \<times> S) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: ?z2 \<in> rel_interior (cone hull {1} \<times> S) \<Longrightarrow> ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} ?z2 \<in> {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} \<Longrightarrow> ?z2 \<in> rel_interior (cone hull {1} \<times> S) goal (1 subgoal): 1. rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} [PROOF STEP] by blast [PROOF STATE] proof (state) this: rel_interior (cone hull {1} \<times> S) = {(c, c *\<^sub>R x) |c x. 0 < c \<and> x \<in> rel_interior S} goal: No subgoals! [PROOF STEP] qed
{- The module CanThetaContinuation contains the continuation-passing variant of Canθ, which is used as a tool to simplify Canθ-Can expressions. The lemmas are mainly about the function Canθ' defined by the equation unfold : ∀ sigs S'' p θ → Canθ sigs S'' p θ ≡ Canθ' sigs S'' (Can p) θ The main property proved here is that the search function Canθ is distributive over the environment: canθ'-←-distribute : ∀ sigs sigs' S'' r θ → Canθ (SigMap.union sigs sigs') S'' r θ ≡ Canθ' sigs S'' (Canθ sigs' S'' r) θ Other properties about how the search is performed are: canθ'-inner-shadowing-irr : ∀ sigs S'' sigs' p S status θ → S ∈ SigMap.keys sigs' → Canθ' sigs S'' (Canθ sigs' 0 p) (θ ← [ (S ₛ) ↦ status ]) ≡ Canθ' sigs S'' (Canθ sigs' 0 p) θ canθ'-search-acc : ∀ sigs S κ θ → ∀ S'' status → S'' ∉ map (_+_ S) (SigMap.keys sigs) → Canθ' sigs S κ (θ ← [ (S'' ₛ) ↦ status ]) ≡ Canθ' sigs S (κ ∘ (_← [ (S'' ₛ) ↦ status ])) θ -} module Esterel.Lang.CanFunction.CanThetaContinuation where open import utility open import utility renaming (_U̬_ to _∪_ ; _|̌_ to _-_) open import Esterel.Lang open import Esterel.Lang.Binding open import Esterel.Lang.CanFunction open import Esterel.Lang.CanFunction.Base open import Esterel.Context using (EvaluationContext1 ; EvaluationContext ; _⟦_⟧e ; _≐_⟦_⟧e) open import Esterel.Context.Properties using (plug ; unplug) open import Esterel.Environment as Env using (Env ; Θ ; _←_ ; Dom ; module SigMap ; module ShrMap ; module VarMap) open import Esterel.CompletionCode as Code using () renaming (CompletionCode to Code) open import Esterel.Variable.Signal as Signal using (Signal ; _ₛ) open import Esterel.Variable.Shared as SharedVar using (SharedVar ; _ₛₕ) open import Esterel.Variable.Sequential as SeqVar using (SeqVar) open EvaluationContext1 open _≐_⟦_⟧e open import Data.Bool using (Bool ; not ; if_then_else_) open import Data.Empty using (⊥ ; ⊥-elim) open import Data.List using (List ; [] ; _∷_ ; _++_ ; map ; concatMap ; foldr) open import Data.List.Properties using (map-id) open import Data.List.Any using (Any ; any ; here ; there) open import Data.List.Any.Properties using () renaming (++⁺ˡ to ++ˡ ; ++⁺ʳ to ++ʳ) open import Data.Maybe using (Maybe ; maybe ; just ; nothing) open import Data.Nat using (ℕ ; zero ; suc ; _≟_ ; _+_) open import Data.Nat.Properties.Simple using (+-comm) open import Data.Product using (Σ ; proj₁ ; proj₂ ; ∃ ; _,_ ; _,′_ ; _×_) open import Data.Sum using (_⊎_ ; inj₁ ; inj₂) open import Function using (_∘_ ; id ; _∋_) open import Relation.Nullary using (¬_ ; Dec ; yes ; no) open import Relation.Nullary.Decidable using (⌊_⌋) open import Relation.Binary.PropositionalEquality using (_≡_ ; _≢_ ; refl ; trans ; sym ; cong ; subst ; module ≡-Reasoning) open ListSet Data.Nat._≟_ using (set-subtract ; set-subtract-[] ; set-subtract-split ; set-subtract-merge ; set-subtract-notin ; set-remove ; set-remove-mono-∈ ; set-remove-removed ; set-remove-not-removed ; set-subtract-[a]≡set-remove) open import Data.OrderedListMap Signal Signal.unwrap Signal.Status as SigM open import Data.OrderedListMap SharedVar SharedVar.unwrap (Σ SharedVar.Status (λ _ → ℕ)) as ShrM open import Data.OrderedListMap SeqVar SeqVar.unwrap ℕ as SeqM open ≡-Reasoning -- equation: Canθ sig S'' p θ = Canθ' sig S'' (Can p) θ Canθ' : SigMap.Map Signal.Status → ℕ → (Env → SigSet.ST × CodeSet.ST × ShrSet.ST) → Env → SigSet.ST × CodeSet.ST × ShrSet.ST Canθ' [] S κ θ = κ θ Canθ' (nothing ∷ sig') S κ θ = Canθ' sig' (suc S) κ θ Canθ' (just Signal.present ∷ sig') S κ θ = Canθ' sig' (suc S) κ (θ ← [S]-env-present (S ₛ)) Canθ' (just Signal.absent ∷ sig') S κ θ = Canθ' sig' (suc S) κ (θ ← [S]-env-absent (S ₛ)) Canθ' (just Signal.unknown ∷ sig') S κ θ with any (_≟_ S) (proj₁ (Canθ' sig' (suc S) κ (θ ← [S]-env (S ₛ)))) ... | yes S∈can-p-θ←[S] = Canθ' sig' (suc S) κ (θ ← [S]-env (S ₛ)) ... | no S∉can-p-θ←[S] = Canθ' sig' (suc S) κ (θ ← [S]-env-absent (S ₛ)) unfold : ∀ sigs S'' p θ → Canθ sigs S'' p θ ≡ Canθ' sigs S'' (Can p) θ unfold [] S'' p θ = refl unfold (nothing ∷ sigs) S'' p θ = unfold sigs (suc S'') p θ unfold (just Signal.present ∷ sigs) S'' p θ = unfold sigs (suc S'') p (θ ← [S]-env-present (S'' ₛ)) unfold (just Signal.absent ∷ sigs) S'' p θ = unfold sigs (suc S'') p (θ ← [S]-env-absent (S'' ₛ)) unfold (just Signal.unknown ∷ sigs) S'' p θ with any (_≟_ S'') (proj₁ (Canθ sigs (suc S'') p (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Can p) (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ-sigs-θ←[S''] | yes S''∈canθ'-sigs-θ←[S''] = unfold sigs (suc S'') p (θ ← [S]-env (S'' ₛ)) ... | no S''∉canθ-sigs-θ←[S''] | no S''∉canθ'-sigs-θ←[S''] = unfold sigs (suc S'') p (θ ← [S]-env-absent (S'' ₛ)) ... | yes S''∈canθ-sigs-θ←[S''] | no S''∉canθ'-sigs-θ←[S''] rewrite unfold sigs (suc S'') p (θ ← [S]-env (S'' ₛ)) = ⊥-elim (S''∉canθ'-sigs-θ←[S''] S''∈canθ-sigs-θ←[S'']) ... | no S''∉canθ-sigs-θ←[S''] | yes S''∈canθ'-sigs-θ←[S''] rewrite unfold sigs (suc S'') p (θ ← [S]-env (S'' ₛ)) = ⊥-elim (S''∉canθ-sigs-θ←[S''] S''∈canθ'-sigs-θ←[S'']) canθ'-cong : ∀ sigs S'' κ κ' θ → (∀ θ* → κ θ* ≡ κ' θ*) → Canθ' sigs S'' κ θ ≡ Canθ' sigs S'' κ' θ canθ'-cong [] S'' κ κ' θ κ≗κ' = κ≗κ' θ canθ'-cong (nothing ∷ sigs) S'' κ κ' θ κ≗κ' = canθ'-cong sigs (suc S'') κ κ' θ κ≗κ' canθ'-cong (just Signal.present ∷ sigs) S'' κ κ' θ κ≗κ' = canθ'-cong sigs (suc S'') κ κ' (θ ← [S]-env-present (S'' ₛ)) κ≗κ' canθ'-cong (just Signal.absent ∷ sigs) S'' κ κ' θ κ≗κ' = canθ'-cong sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ≗κ' canθ'-cong (just Signal.unknown ∷ sigs) S'' κ κ' θ κ≗κ' with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ' (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-κ-θ←[S''] | yes S''∈canθ'-sigs-κ'-θ←[S''] = canθ'-cong sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ≗κ' ... | no S''∉canθ'-sigs-κ-θ←[S''] | no S''∉canθ'-sigs-κ'-θ←[S''] = canθ'-cong sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ≗κ' ... | yes S''∈canθ'-sigs-κ-θ←[S''] | no S''∉canθ'-sigs-κ'-θ←[S''] rewrite canθ'-cong sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ≗κ' = ⊥-elim (S''∉canθ'-sigs-κ'-θ←[S''] S''∈canθ'-sigs-κ-θ←[S'']) ... | no S''∉canθ'-sigs-κ-θ←[S''] | yes S''∈canθ'-sigs-κ'-θ←[S''] rewrite canθ'-cong sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ≗κ' = ⊥-elim (S''∉canθ'-sigs-κ-θ←[S''] S''∈canθ'-sigs-κ'-θ←[S'']) canθₛ'-cong : ∀ sigs S'' κ κ' θ → (∀ θ* → proj₁ (κ θ*) ≡ proj₁ (κ' θ*)) → proj₁ (Canθ' sigs S'' κ θ) ≡ proj₁ (Canθ' sigs S'' κ' θ) canθₛ'-cong [] S'' κ κ' θ κ≗κ' = κ≗κ' θ canθₛ'-cong (nothing ∷ sigs) S'' κ κ' θ κ≗κ' = canθₛ'-cong sigs (suc S'') κ κ' θ κ≗κ' canθₛ'-cong (just Signal.present ∷ sigs) S'' κ κ' θ κ≗κ' = canθₛ'-cong sigs (suc S'') κ κ' (θ ← [S]-env-present (S'' ₛ)) κ≗κ' canθₛ'-cong (just Signal.absent ∷ sigs) S'' κ κ' θ κ≗κ' = canθₛ'-cong sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ≗κ' canθₛ'-cong (just Signal.unknown ∷ sigs) S'' κ κ' θ κ≗κ' with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ' (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-κ-θ←[S''] | yes S''∈canθ'-sigs-κ'-θ←[S''] = canθₛ'-cong sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ≗κ' ... | no S''∉canθ'-sigs-κ-θ←[S''] | no S''∉canθ'-sigs-κ'-θ←[S''] = canθₛ'-cong sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ≗κ' ... | yes S''∈canθ'-sigs-κ-θ←[S''] | no S''∉canθ'-sigs-κ'-θ←[S''] rewrite canθₛ'-cong sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ≗κ' = ⊥-elim (S''∉canθ'-sigs-κ'-θ←[S''] S''∈canθ'-sigs-κ-θ←[S'']) ... | no S''∉canθ'-sigs-κ-θ←[S''] | yes S''∈canθ'-sigs-κ'-θ←[S''] rewrite canθₛ'-cong sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ≗κ' = ⊥-elim (S''∉canθ'-sigs-κ-θ←[S''] S''∈canθ'-sigs-κ'-θ←[S'']) canθ'-map-comm : ∀ f sigs S κ θ → Canθ' sigs S (map-second f ∘ κ) θ ≡ map-second f (Canθ' sigs S κ θ) canθ'-map-comm f [] S κ θ = refl canθ'-map-comm f (nothing ∷ sigs) S κ θ = canθ'-map-comm f sigs (suc S) κ θ canθ'-map-comm f (just Signal.present ∷ sigs) S κ θ = canθ'-map-comm f sigs (suc S) κ (θ ← [S]-env-present (S ₛ)) canθ'-map-comm f (just Signal.absent ∷ sigs) S κ θ = canθ'-map-comm f sigs (suc S) κ (θ ← [S]-env-absent (S ₛ)) canθ'-map-comm f (just Signal.unknown ∷ sigs) S κ θ with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (map-second f ∘ κ) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) κ (θ ← [S]-env (S ₛ)))) ... | yes S∈canθ'-sigs-f∘κ-θ←[S] | yes S∈canθ'-sigs-κ-θ←[S] = canθ'-map-comm f sigs (suc S) κ (θ ← [S]-env (S ₛ)) ... | no S∉canθ'-sigs-f∘κ-θ←[S] | no S∉canθ'-sigs-κ-θ←[S] = canθ'-map-comm f sigs (suc S) κ (θ ← [S]-env-absent (S ₛ)) ... | yes S∈canθ'-sigs-f∘κ-θ←[S] | no S∉canθ'-sigs-κ-θ←[S] rewrite canθ'-map-comm f sigs (suc S) κ (θ ← [S]-env (S ₛ)) = ⊥-elim (S∉canθ'-sigs-κ-θ←[S] S∈canθ'-sigs-f∘κ-θ←[S]) ... | no S∉canθ'-sigs-f∘κ-θ←[S] | yes S∈canθ'-sigs-κ-θ←[S] rewrite canθ'-map-comm f sigs (suc S) κ (θ ← [S]-env (S ₛ)) = ⊥-elim (S∉canθ'-sigs-f∘κ-θ←[S] S∈canθ'-sigs-κ-θ←[S]) canθ'ₛ-add-sig-monotonic : ∀ sigs S'' κ θ S status → (∀ θ S status S' → S' ∈ proj₁ (κ (θ ← Θ SigMap.[ S ↦ status ] ShrMap.empty VarMap.empty)) → S' ∈ proj₁ (κ (θ ← [S]-env S))) → ∀ S' → S' ∈ proj₁ (Canθ' sigs S'' κ (θ ← Θ SigMap.[ S ↦ status ] ShrMap.empty VarMap.empty)) → S' ∈ proj₁ (Canθ' sigs S'' κ (θ ← [S]-env S)) canθ'ₛ-add-sig-monotonic [] S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] = κ-add-sig-monotonic θ S status S' S'∈canθ'-sigs-p-θ←[S↦status] canθ'ₛ-add-sig-monotonic (nothing ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] canθ'ₛ-add-sig-monotonic (just x ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] with Signal.unwrap S ≟ S'' canθ'ₛ-add-sig-monotonic (just Signal.present ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] | yes refl rewrite Env.sig-single-←-←-overwrite θ (S'' ₛ) Signal.unknown Signal.present | Env.sig-single-←-←-overwrite θ (S'' ₛ) status Signal.present = S'∈canθ'-sigs-p-θ←[S↦status] canθ'ₛ-add-sig-monotonic (just Signal.absent ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] | yes refl rewrite Env.sig-single-←-←-overwrite θ (S'' ₛ) Signal.unknown Signal.absent | Env.sig-single-←-←-overwrite θ (S'' ₛ) status Signal.absent = S'∈canθ'-sigs-p-θ←[S↦status] canθ'ₛ-add-sig-monotonic (just Signal.unknown ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] | yes refl with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ ((θ ← [S]-env (S'' ₛ)) ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ ((θ ← [ (S'' ₛ) ↦ status ]) ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-κ-θ←[S''↦unknown]←[S''] | yes S''∈canθ'-sigs-κ-θ←[S''↦status]←[S''] rewrite Env.sig-single-←-←-overwrite θ (S'' ₛ) Signal.unknown Signal.unknown | Env.sig-single-←-←-overwrite θ (S'' ₛ) status Signal.unknown = S'∈canθ'-sigs-p-θ←[S↦status] ... | no S''∉canθ'-sigs-κ-θ←[S''↦unknown]←[S''] | no S''∉canθ'-sigs-κ-θ←[S''↦status]←[S''] rewrite Env.sig-single-←-←-overwrite θ (S'' ₛ) Signal.unknown Signal.absent | Env.sig-single-←-←-overwrite θ (S'' ₛ) status Signal.absent = S'∈canθ'-sigs-p-θ←[S↦status] ... | yes S''∈canθ'-sigs-κ-θ←[S''↦unknown]←[S''] | no S''∉canθ'-sigs-κ-θ←[S''↦status]←[S''] rewrite Env.sig-single-←-←-overwrite θ (S'' ₛ) Signal.unknown Signal.unknown | Env.sig-single-←-←-overwrite θ (S'' ₛ) status Signal.unknown = ⊥-elim (S''∉canθ'-sigs-κ-θ←[S''↦status]←[S''] S''∈canθ'-sigs-κ-θ←[S''↦unknown]←[S'']) ... | no S''∉canθ'-sigs-κ-θ←[S''↦unknown]←[S''] | yes S''∈canθ'-sigs-κ-θ←[S''↦status]←[S''] rewrite Env.sig-single-←-←-overwrite θ (S'' ₛ) Signal.unknown Signal.unknown | Env.sig-single-←-←-overwrite θ (S'' ₛ) status Signal.unknown = ⊥-elim (S''∉canθ'-sigs-κ-θ←[S''↦unknown]←[S''] S''∈canθ'-sigs-κ-θ←[S''↦status]←[S'']) canθ'ₛ-add-sig-monotonic (just Signal.present ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] | no S≢S'' rewrite Env.←-assoc-comm θ ([S]-env S) ([S]-env-present (S'' ₛ)) (Env.sig-single-noteq-distinct S Signal.unknown (S'' ₛ) Signal.present S≢S'') = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [S]-env-present (S'' ₛ)) S status κ-add-sig-monotonic S' (subst (S' ∈_) (cong (proj₁ ∘ Canθ' sigs (suc S'') κ) (Env.←-assoc-comm θ [ S ↦ status ] ([S]-env-present (S'' ₛ)) (Env.sig-single-noteq-distinct S status (S'' ₛ) Signal.present S≢S''))) S'∈canθ'-sigs-p-θ←[S↦status]) canθ'ₛ-add-sig-monotonic (just Signal.absent ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] | no S≢S'' rewrite Env.←-assoc-comm θ ([S]-env S) ([S]-env-absent (S'' ₛ)) (Env.sig-single-noteq-distinct S Signal.unknown (S'' ₛ) Signal.absent S≢S'') = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [S]-env-absent (S'' ₛ)) S status κ-add-sig-monotonic S' (subst (S' ∈_) (cong (proj₁ ∘ Canθ' sigs (suc S'') κ) (Env.←-assoc-comm θ [ S ↦ status ] ([S]-env-absent (S'' ₛ)) (Env.sig-single-noteq-distinct S status (S'' ₛ) Signal.absent S≢S''))) S'∈canθ'-sigs-p-θ←[S↦status]) canθ'ₛ-add-sig-monotonic (just Signal.unknown ∷ sigs) S'' κ θ S status κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status] | no S≢S'' with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ ((θ ← [S]-env S) ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ ((θ ← [ S ↦ status ]) ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-κ-θ←[S↦unknown]←[S''] | yes S''∈canθ'-sigs-κ-θ←[S↦status]←[S''] rewrite Env.←-assoc-comm θ ([S]-env S) ([S]-env (S'' ₛ)) (Env.sig-single-noteq-distinct S Signal.unknown (S'' ₛ) Signal.unknown S≢S'') = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [S]-env (S'' ₛ)) S status κ-add-sig-monotonic S' (subst (S' ∈_) (cong (proj₁ ∘ Canθ' sigs (suc S'') κ) (Env.←-assoc-comm θ [ S ↦ status ] ([S]-env (S'' ₛ)) (Env.sig-single-noteq-distinct S status (S'' ₛ) Signal.unknown S≢S''))) S'∈canθ'-sigs-p-θ←[S↦status]) ... | no S''∉canθ'-sigs-κ-θ←[S↦unknown]←[S''] | no S''∉canθ'-sigs-κ-θ←[S↦status]←[S''] rewrite Env.←-assoc-comm θ ([S]-env S) ([S]-env-absent (S'' ₛ)) (Env.sig-single-noteq-distinct S Signal.unknown (S'' ₛ) Signal.absent S≢S'') = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [S]-env-absent (S'' ₛ)) S status κ-add-sig-monotonic S' (subst (S' ∈_) (cong (proj₁ ∘ Canθ' sigs (suc S'') κ) (Env.←-assoc-comm θ [ S ↦ status ] ([S]-env-absent (S'' ₛ)) (Env.sig-single-noteq-distinct S status (S'' ₛ) Signal.absent S≢S''))) S'∈canθ'-sigs-p-θ←[S↦status]) ... | yes S''∈canθ'-sigs-κ-θ←[S↦unknown]←[S''] | no S''∉canθ'-sigs-κ-θ←[S↦status]←[S''] rewrite Env.←-assoc-comm θ ([S]-env S) ([S]-env (S'' ₛ)) (Env.sig-single-noteq-distinct S Signal.unknown (S'' ₛ) Signal.unknown S≢S'') = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [S]-env (S'' ₛ)) S status κ-add-sig-monotonic S' (subst (S' ∈_) (cong (proj₁ ∘ Canθ' sigs (suc S'') κ) (Env.←-assoc-comm θ [ S ↦ status ] ([S]-env (S'' ₛ)) (Env.sig-single-noteq-distinct S status (S'' ₛ) Signal.unknown S≢S''))) (canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [ S ↦ status ]) (S'' ₛ) Signal.absent κ-add-sig-monotonic S' S'∈canθ'-sigs-p-θ←[S↦status])) ... | no S''∉canθ'-sigs-κ-θ←[S↦unknown]←[S''] | yes S''∈canθ'-sigs-κ-θ←[S↦status]←[S''] rewrite Env.←-assoc-comm θ ([S]-env S) ([S]-env (S'' ₛ)) (Env.sig-single-noteq-distinct S Signal.unknown (S'' ₛ) Signal.unknown S≢S'') = ⊥-elim (S''∉canθ'-sigs-κ-θ←[S↦unknown]←[S''] (canθ'ₛ-add-sig-monotonic sigs (suc S'') κ (θ ← [S]-env (S'' ₛ)) S status κ-add-sig-monotonic S'' (subst (S'' ∈_) (cong (proj₁ ∘ Canθ' sigs (suc S'') κ) (Env.←-assoc-comm θ [ S ↦ status ] ([S]-env (S'' ₛ)) (Env.sig-single-noteq-distinct S status (S'' ₛ) Signal.unknown S≢S''))) S''∈canθ'-sigs-κ-θ←[S↦status]←[S'']))) canθ'ₛ-canθ-add-sig-monotonic : ∀ sigs S sigs' S' p θ S''' status → ∀ S'' → S'' ∈ proj₁ (Canθ' sigs S (Canθ sigs' S' p) (θ ← Θ SigMap.[ S''' ↦ status ] ShrMap.empty VarMap.empty)) → S'' ∈ proj₁ (Canθ' sigs S (Canθ sigs' S' p) (θ ← [S]-env S''')) canθ'ₛ-canθ-add-sig-monotonic sigs S sigs' S' p θ S''' status S'' S''∈canθ'-sigs-p-θ←[S↦status] = canθ'ₛ-add-sig-monotonic sigs S (Canθ sigs' S' p) θ S''' status (canθₛ-add-sig-monotonic sigs' S' p) S'' S''∈canθ'-sigs-p-θ←[S↦status] canθ'ₛ-subset-lemma : ∀ sigs S'' κ κ' θ → (∀ θ' S → S ∈ proj₁ (κ θ') → S ∈ proj₁ (κ' θ')) → (∀ θ S status S' → S' ∈ proj₁ (κ' (θ ← Θ SigMap.[ S ↦ status ] ShrMap.empty VarMap.empty)) → S' ∈ proj₁ (κ' (θ ← [S]-env S))) → ∀ S → S ∈ proj₁ (Canθ' sigs S'' κ θ) → S ∈ proj₁ (Canθ' sigs S'' κ' θ) canθ'ₛ-subset-lemma [] S'' κ κ' θ κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ = κ⊆κ' θ S S∈canθ'-κ-θ canθ'ₛ-subset-lemma (nothing ∷ sigs) S'' κ κ' θ κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ = canθ'ₛ-subset-lemma sigs (suc S'') κ κ' θ κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ canθ'ₛ-subset-lemma (just Signal.present ∷ sigs) S'' κ κ' θ κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ = canθ'ₛ-subset-lemma sigs (suc S'') κ κ' (θ ← [S]-env-present (S'' ₛ)) κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ canθ'ₛ-subset-lemma (just Signal.absent ∷ sigs) S'' κ κ' θ κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ = canθ'ₛ-subset-lemma sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ canθ'ₛ-subset-lemma (just Signal.unknown ∷ sigs) S'' κ κ' θ κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') κ' (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-κ-θ' | yes S''∈canθ-κ'-θ' = canθ'ₛ-subset-lemma sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ ... | no S''∉canθ'-κ-θ' | no S''∉canθ-q-θ' = canθ'ₛ-subset-lemma sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ ... | yes S''∈canθ'-κ-θ' | no S''∉canθ-q-θ' = ⊥-elim (S''∉canθ-q-θ' (canθ'ₛ-subset-lemma sigs (suc S'') κ κ' (θ ← [S]-env (S'' ₛ)) κ⊆κ' κ'-add-sig-monotonic S'' S''∈canθ'-κ-θ')) ... | no S''∉canθ'-κ-θ' | yes S''∈canθ-κ'-θ' = canθ'ₛ-add-sig-monotonic sigs (suc S'') κ' θ (S'' ₛ) Signal.absent κ'-add-sig-monotonic S (canθ'ₛ-subset-lemma sigs (suc S'') κ κ' (θ ← [S]-env-absent (S'' ₛ)) κ⊆κ' κ'-add-sig-monotonic S S∈canθ'-κ-θ) canθ'-inner-shadowing-irr' : ∀ sigs S'' sigs' p S status θ θo → S ∈ SigMap.keys sigs' → Canθ' sigs S'' (Canθ sigs' 0 p) ((θ ← [ (S ₛ) ↦ status ]) ← θo) ≡ Canθ' sigs S'' (Canθ sigs' 0 p) (θ ← θo) canθ'-inner-shadowing-irr' [] S'' sigs' p S status θ θo S∈sigs' rewrite sym (map-id (SigMap.keys sigs')) = canθ-shadowing-irr' sigs' 0 p S status θ θo S∈sigs' canθ'-inner-shadowing-irr' (nothing ∷ sigs) S'' sigs' p S status θ θo S∈sigs' = canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ θo S∈sigs' canθ'-inner-shadowing-irr' (just Signal.present ∷ sigs) S'' sigs' p S status θ θo S∈sigs' rewrite sym (Env.←-assoc (θ ← [ (S ₛ) ↦ status ]) θo ([S]-env-present (S'' ₛ))) | sym (Env.←-assoc θ θo ([S]-env-present (S'' ₛ))) = canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ (θo ← ([S]-env-present (S'' ₛ))) S∈sigs' canθ'-inner-shadowing-irr' (just Signal.absent ∷ sigs) S'' sigs' p S status θ θo S∈sigs' rewrite sym (Env.←-assoc (θ ← [ (S ₛ) ↦ status ]) θo ([S]-env-absent (S'' ₛ))) | sym (Env.←-assoc θ θo ([S]-env-absent (S'' ₛ))) = canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ (θo ← ([S]-env-absent (S'' ₛ))) S∈sigs' canθ'-inner-shadowing-irr' (just Signal.unknown ∷ sigs) S'' sigs' p S status θ θo S∈sigs' with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ sigs' 0 p) (((θ ← [ (S ₛ) ↦ status ]) ← θo) ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ sigs' 0 p) ((θ ← θo) ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-Canθ-θ←[S]-absent←S←θo←[S''] | yes S''∈canθ'-sigs-Canθ-θ←[S]←S←θo←[S''] rewrite sym (Env.←-assoc (θ ← [ (S ₛ) ↦ status ]) θo ([S]-env (S'' ₛ))) | sym (Env.←-assoc θ θo ([S]-env (S'' ₛ))) = canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ (θo ← ([S]-env (S'' ₛ))) S∈sigs' ... | no S''∉canθ'-sigs-Canθ-θ←[S]-absent←S←θo←[S''] | no S''∉canθ'-sigs-Canθ-θ←[S]←S←θo←[S''] rewrite sym (Env.←-assoc (θ ← [ (S ₛ) ↦ status ]) θo ([S]-env-absent (S'' ₛ))) | sym (Env.←-assoc θ θo ([S]-env-absent (S'' ₛ))) = canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ (θo ← ([S]-env-absent (S'' ₛ))) S∈sigs' ... | yes S''∈canθ'-sigs-Canθ-θ←[S]-absent←S←θo←[S''] | no S''∉canθ'-sigs-Canθ-θ←[S]←S←θo←[S''] rewrite sym (Env.←-assoc (θ ← [ (S ₛ) ↦ status ]) θo ([S]-env (S'' ₛ))) | sym (Env.←-assoc θ θo ([S]-env (S'' ₛ))) | canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ (θo ← ([S]-env (S'' ₛ))) S∈sigs' = ⊥-elim (S''∉canθ'-sigs-Canθ-θ←[S]←S←θo←[S''] S''∈canθ'-sigs-Canθ-θ←[S]-absent←S←θo←[S'']) ... | no S''∉canθ'-sigs-Canθ-θ←[S]-absent←S←θo←[S''] | yes S''∈canθ'-sigs-Canθ-θ←[S]←S←θo←[S''] rewrite sym (Env.←-assoc (θ ← [ (S ₛ) ↦ status ]) θo ([S]-env (S'' ₛ))) | sym (Env.←-assoc θ θo ([S]-env (S'' ₛ))) | canθ'-inner-shadowing-irr' sigs (suc S'') sigs' p S status θ (θo ← ([S]-env (S'' ₛ))) S∈sigs' = ⊥-elim (S''∉canθ'-sigs-Canθ-θ←[S]-absent←S←θo←[S''] S''∈canθ'-sigs-Canθ-θ←[S]←S←θo←[S'']) canθ'-inner-shadowing-irr : ∀ sigs S'' sigs' p S status θ → S ∈ SigMap.keys sigs' → Canθ' sigs S'' (Canθ sigs' 0 p) (θ ← [ (S ₛ) ↦ status ]) ≡ Canθ' sigs S'' (Canθ sigs' 0 p) θ canθ'-inner-shadowing-irr sigs S'' sigs' p S status θ S∈sigs' rewrite cong (Canθ' sigs S'' (Canθ sigs' 0 p)) (Env.←-comm Env.[]env θ distinct-empty-left) | cong (Canθ' sigs S'' (Canθ sigs' 0 p)) (Env.←-comm Env.[]env (θ ← [ (S ₛ) ↦ status ]) distinct-empty-left) = canθ'-inner-shadowing-irr' sigs S'' sigs' p S status θ Env.[]env S∈sigs' canθ'-search-acc : ∀ sigs S κ θ → ∀ S'' status → S'' ∉ map (_+_ S) (SigMap.keys sigs) → Canθ' sigs S κ (θ ← [ (S'' ₛ) ↦ status ]) ≡ Canθ' sigs S (κ ∘ (_← [ (S'' ₛ) ↦ status ])) θ canθ'-search-acc [] S κ θ S'' status S''∉map-+-S-sigs = refl canθ'-search-acc (nothing ∷ sigs) S κ θ S'' status S''∉map-+-S-sigs rewrite map-+-compose-suc S (SigMap.keys sigs) = canθ'-search-acc sigs (suc S) κ θ S'' status S''∉map-+-S-sigs canθ'-search-acc (just Signal.present ∷ sigs) S κ θ S'' status S''∉map-+-S-sigs rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env-present (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.present (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc sigs (suc S) κ (θ ← [S]-env-present (S ₛ)) S'' status (S''∉map-+-S-sigs ∘ there) canθ'-search-acc (just Signal.absent ∷ sigs) S κ θ S'' status S''∉map-+-S-sigs rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env-absent (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.absent (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc sigs (suc S) κ (θ ← [S]-env-absent (S ₛ)) S'' status (S''∉map-+-S-sigs ∘ there) canθ'-search-acc (just Signal.unknown ∷ sigs) S κ θ S'' status S''∉map-+-S-sigs with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → κ (θ* ← [ (S'' ₛ) ↦ status ])) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) κ ((θ ← [ (S'' ₛ) ↦ status ]) ← [S]-env (S ₛ)))) ... | yes S∈canθ'-⟨canθ-←[S'']⟩-θ←[S] | yes S∈canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc sigs (suc S) κ (θ ← [S]-env (S ₛ)) S'' status (S''∉map-+-S-sigs ∘ there) ... | no S∉canθ'-⟨canθ-←[S'']⟩-θ←[S] | no S∉canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env-absent (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.absent (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc sigs (suc S) κ (θ ← [S]-env-absent (S ₛ)) S'' status (S''∉map-+-S-sigs ∘ there) ... | yes S∈canθ'-⟨canθ-←[S'']⟩-θ←[S] | no S∉canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | canθ'-search-acc sigs (suc S) κ (θ ← [S]-env (S ₛ)) S'' status (S''∉map-+-S-sigs ∘ there) = ⊥-elim (S∉canθ'-canθ-θ←[S'']←[S] S∈canθ'-⟨canθ-←[S'']⟩-θ←[S]) ... | no S∉canθ'-⟨canθ-←[S'']⟩-θ←[S] | yes S∈canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | canθ'-search-acc sigs (suc S) κ (θ ← [S]-env (S ₛ)) S'' status (S''∉map-+-S-sigs ∘ there) = ⊥-elim (S∉canθ'-⟨canθ-←[S'']⟩-θ←[S] S∈canθ'-canθ-θ←[S'']←[S]) canθ'-search-acc-set-irr : ∀ sigs S κ θ → ∀ S'' status status' → S'' ∉ map (_+_ S) (SigMap.keys sigs) → Canθ' sigs S κ (θ ← [ (S'' ₛ) ↦ status ]) ≡ Canθ' sigs S (κ ∘ (_← [ (S'' ₛ) ↦ status ])) (θ ← [ (S'' ₛ) ↦ status' ]) canθ'-search-acc-set-irr [] S κ θ S'' status status' S''∉map-+-S-sigs rewrite sym (Env.←-assoc θ [ (S'' ₛ) ↦ status' ] [ (S'' ₛ) ↦ status ]) | cong (θ ←_) (Env.←-single-overwrite-sig (S'' ₛ) status' [ (S'' ₛ) ↦ status ] (Env.sig-∈-single (S'' ₛ) status)) = refl canθ'-search-acc-set-irr (nothing ∷ sigs) S κ θ S'' status status' S''∉map-+-S-sigs rewrite map-+-compose-suc S (SigMap.keys sigs) = canθ'-search-acc-set-irr sigs (suc S) κ θ S'' status status' S''∉map-+-S-sigs canθ'-search-acc-set-irr (just Signal.present ∷ sigs) S κ θ S'' status status' S''∉map-+-S-sigs rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env-present (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.present (S''∉map-+-S-sigs ∘ here)) | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status' ] ([S]-env-present (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status' (S ₛ) Signal.present (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc-set-irr sigs (suc S) κ (θ ← [S]-env-present (S ₛ)) S'' status status' (S''∉map-+-S-sigs ∘ there) canθ'-search-acc-set-irr (just Signal.absent ∷ sigs) S κ θ S'' status status' S''∉map-+-S-sigs rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env-absent (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.absent (S''∉map-+-S-sigs ∘ here)) | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status' ] ([S]-env-absent (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status' (S ₛ) Signal.absent (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc-set-irr sigs (suc S) κ (θ ← [S]-env-absent (S ₛ)) S'' status status' (S''∉map-+-S-sigs ∘ there) canθ'-search-acc-set-irr (just Signal.unknown ∷ sigs) S κ θ S'' status status' S''∉map-+-S-sigs with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → κ (θ* ← [ (S'' ₛ) ↦ status ])) ((θ ← [ (S'' ₛ) ↦ status' ]) ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) κ ((θ ← [ (S'' ₛ) ↦ status ]) ← [S]-env (S ₛ)))) ... | yes S∈canθ'-⟨canθ-←[S'']⟩-θ←[S'']←[S] | yes S∈canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status' ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status' (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc-set-irr sigs (suc S) κ (θ ← [S]-env (S ₛ)) S'' status status' (S''∉map-+-S-sigs ∘ there) ... | no S∉canθ'-⟨canθ-←[S'']⟩-θ←[S'']←[S] | no S∉canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env-absent (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.absent (S''∉map-+-S-sigs ∘ here)) | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status' ] ([S]-env-absent (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status' (S ₛ) Signal.absent (S''∉map-+-S-sigs ∘ here)) = canθ'-search-acc-set-irr sigs (suc S) κ (θ ← [S]-env-absent (S ₛ)) S'' status status' (S''∉map-+-S-sigs ∘ there) ... | yes S∈canθ'-⟨canθ-←[S'']⟩-θ←[S'']←[S] | no S∉canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status' ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status' (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | canθ'-search-acc-set-irr sigs (suc S) κ (θ ← [S]-env (S ₛ)) S'' status status' (S''∉map-+-S-sigs ∘ there) = ⊥-elim (S∉canθ'-canθ-θ←[S'']←[S] S∈canθ'-⟨canθ-←[S'']⟩-θ←[S'']←[S]) ... | no S∉canθ'-⟨canθ-←[S'']⟩-θ←[S'']←[S] | yes S∈canθ'-canθ-θ←[S'']←[S] rewrite map-+-compose-suc S (SigMap.keys sigs) | +-comm S 0 | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | Env.←-assoc-comm θ [ (S'' ₛ) ↦ status' ] ([S]-env (S ₛ)) (Env.sig-single-noteq-distinct (S'' ₛ) status' (S ₛ) Signal.unknown (S''∉map-+-S-sigs ∘ here)) | canθ'-search-acc-set-irr sigs (suc S) κ (θ ← [S]-env (S ₛ)) S'' status status' (S''∉map-+-S-sigs ∘ there) = ⊥-elim (S∉canθ'-⟨canθ-←[S'']⟩-θ←[S'']←[S] S∈canθ'-canθ-θ←[S'']←[S]) canθ'-canθ-propagate-up-in : ∀ sigs S r θ → ∀ sigs' S' S'' → S' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) θ) → S'' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) θ) → S'' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r θ*) θ) canθ'-canθ-propagate-up-in [] S r θ sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ with any (_≟_ S') (Canθₛ sigs' (suc S') r (θ ← [S]-env (S' ₛ))) ... | yes S'∈canθ-sigs'-r-θ*←[S'] = S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ ... | no S'∉canθ-sigs'-r-θ*←[S'] = ⊥-elim (S'∉canθ-sigs'-r-θ*←[S'] S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩) canθ'-canθ-propagate-up-in (nothing ∷ sigs) S r θ sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ = canθ'-canθ-propagate-up-in sigs (suc S) r θ sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ canθ'-canθ-propagate-up-in (just Signal.present ∷ sigs) S r θ sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ = canθ'-canθ-propagate-up-in sigs (suc S) r (θ ← [S]-env-present (S ₛ)) sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ canθ'-canθ-propagate-up-in (just Signal.absent ∷ sigs) S r θ sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ = canθ'-canθ-propagate-up-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ canθ'-canθ-propagate-up-in (just Signal.unknown ∷ sigs) S r θ sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (Canθ (just Signal.unknown ∷ sigs') S' r) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) (θ ← [S]-env (S ₛ)))) ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-up-in sigs (suc S) r (θ ← [S]-env (S ₛ)) sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-up-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'ₛ-canθ-add-sig-monotonic sigs (suc S) (just Signal.unknown ∷ sigs') S' r θ (S ₛ) Signal.absent S'' (canθ'-canθ-propagate-up-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩) ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = ⊥-elim (S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] (canθ'-canθ-propagate-up-in sigs (suc S) r (θ ← [S]-env (S ₛ)) sigs' S' S S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S])) canθₛ-a∷s⊆canθₛ-u∷s : ∀ sigs r S' θ S → S ∈ Canθₛ (just Signal.absent ∷ sigs) S' r θ → S ∈ Canθₛ (just Signal.unknown ∷ sigs) S' r θ canθₛ-a∷s⊆canθₛ-u∷s sigs r S' θ S S∈can-sigs-r-θ←[S↦absent] with any (_≟_ S') (Canθₛ sigs (suc S') r (θ ← [S]-env (S' ₛ))) ... | yes a = canθₛ-add-sig-monotonic sigs (suc S') r θ (S' ₛ) Signal.absent S S∈can-sigs-r-θ←[S↦absent] ... | no na = S∈can-sigs-r-θ←[S↦absent] canθ'-canθ-propagate-down-not-in : ∀ sigs S r θ → ∀ S' sigs' → S' ∉ proj₁ (Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r θ*) θ) → Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r θ*) θ ≡ Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env-absent (S' ₛ))) θ canθ'-canθ-propagate-down-not-in [] S r θ S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ with any (_≟_ S') (Canθₛ sigs' (suc S') r (θ ← [S]-env (S' ₛ))) ... | yes S'∈canθ-sigs'-r-θ←[S'] = ⊥-elim (S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ S'∈canθ-sigs'-r-θ←[S']) ... | no S'∉canθ-sigs'-r-θ←[S'] = refl canθ'-canθ-propagate-down-not-in (nothing ∷ sigs) S r θ S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-not-in sigs (suc S) r θ S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-not-in (just Signal.present ∷ sigs) S r θ S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-not-in sigs (suc S) r (θ ← [S]-env-present (S ₛ)) S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-not-in (just Signal.absent ∷ sigs) S r θ S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-not-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-not-in (just Signal.unknown ∷ sigs) S r θ S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r θ*) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env-absent (S' ₛ))) (θ ← [S]-env (S ₛ)))) ... | yes a | yes b = canθ'-canθ-propagate-down-not-in sigs (suc S) r (θ ← [S]-env (S ₛ)) S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ ... | no na | no nb = canθ'-canθ-propagate-down-not-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ ... | yes a | no nb rewrite sym (canθ'-canθ-propagate-down-not-in sigs (suc S) r (θ ← [S]-env (S ₛ)) S' sigs' S'∉canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩) = ⊥-elim (nb a) ... | no na | yes b = ⊥-elim (na (canθ'ₛ-subset-lemma sigs (suc S) (Canθ (just Signal.absent ∷ sigs') S' r) (Canθ (just Signal.unknown ∷ sigs') S' r) (θ ← [S]-env (S ₛ)) (canθₛ-a∷s⊆canθₛ-u∷s sigs' r S') (canθₛ-add-sig-monotonic (just Signal.unknown ∷ sigs') S' r) S b)) canθ'-canθ-propagate-down-in : ∀ sigs S r θ → ∀ S' sigs' → S' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r θ*) θ) → Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r θ*) θ ≡ Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) θ canθ'-canθ-propagate-down-in [] S r θ S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ with any (_≟_ S') (Canθₛ sigs' (suc S') r (θ ← [S]-env (S' ₛ))) ... | yes S'∈canθ-sigs'-r-θ←[S'] = refl ... | no S'∉canθ-sigs'-r-θ←[S'] = ⊥-elim (S'∉canθ-sigs'-r-θ←[S'] (canθₛ-add-sig-monotonic sigs' (suc S') r θ (S' ₛ) Signal.absent S' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩)) canθ'-canθ-propagate-down-in (nothing ∷ sigs) S r θ S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-in sigs (suc S) r θ S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-in (just Signal.present ∷ sigs) S r θ S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-in sigs (suc S) r (θ ← [S]-env-present (S ₛ)) S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-in (just Signal.absent ∷ sigs) S r θ S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-in (just Signal.unknown ∷ sigs) S r θ S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (Canθ (just Signal.unknown ∷ sigs') S' r) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) (θ ← [S]-env (S ₛ)))) ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-down-in sigs (suc S) r (θ ← [S]-env (S ₛ)) S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-down-in sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = ⊥-elim (S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] (subst (S ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in sigs (suc S) r (θ ← [S]-env (S ₛ)) S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩)) S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S])) ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = ⊥-elim (S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] (subst (S ∈_) (cong proj₁ (sym (canθ'-canθ-propagate-down-in sigs (suc S) r (θ ← [S]-env (S ₛ)) S' sigs' (canθ'ₛ-canθ-add-sig-monotonic sigs (suc S) (just Signal.unknown ∷ sigs') S' r θ (S ₛ) Signal.absent S' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩)))) S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S])) canθ'-canθ-propagate-up-in-set-irr : ∀ sigs S r θ status → ∀ sigs' S' S'' → S' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) θ) → S'' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) θ) → S'' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r (θ* ← [ (S' ₛ) ↦ status ])) θ) canθ'-canθ-propagate-up-in-set-irr [] S r θ status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ with any (_≟_ S') (Canθₛ sigs' (suc S') r ((θ ← [ (S' ₛ) ↦ status ]) ← [S]-env (S' ₛ))) ... | yes S'∈canθ-sigs'-r-θ*←[S'] rewrite Env.sig-single-←-←-overwrite θ (S' ₛ) status Signal.unknown = S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ ... | no S'∉canθ-sigs'-r-θ*←[S'] rewrite Env.sig-single-←-←-overwrite θ (S' ₛ) status Signal.unknown = ⊥-elim (S'∉canθ-sigs'-r-θ*←[S'] S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩) canθ'-canθ-propagate-up-in-set-irr (nothing ∷ sigs) S r θ status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ = canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r θ status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ canθ'-canθ-propagate-up-in-set-irr (just Signal.present ∷ sigs) S r θ status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ = canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r (θ ← [S]-env-present (S ₛ)) status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ canθ'-canθ-propagate-up-in-set-irr (just Signal.absent ∷ sigs) S r θ status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ = canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ canθ'-canθ-propagate-up-in-set-irr (just Signal.unknown ∷ sigs) S r θ status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r (θ* ← [ (S' ₛ) ↦ status ])) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) (θ ← [S]-env (S ₛ)))) ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r (θ ← [S]-env (S ₛ)) status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'ₛ-add-sig-monotonic sigs (suc S) (Canθ (just Signal.unknown ∷ sigs') S' r ∘ (_← [ (S' ₛ) ↦ status ])) θ (S ₛ) Signal.absent (λ θ* S* status* S'' S''∈ → canθₛ-cong-←-add-sig-monotonic (just Signal.unknown ∷ sigs') S' r θ* [ (S' ₛ) ↦ status ] S* status* S'' S''∈) S'' (canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) status sigs' S' S'' S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S''∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩) ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = ⊥-elim (S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] (canθ'-canθ-propagate-up-in-set-irr sigs (suc S) r (θ ← [S]-env (S ₛ)) status sigs' S' S S'∈canθ'-sigs-⟨canθ-sigs'-r-θ*←[S']⟩ S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S])) canθ'-canθ-propagate-down-in-set-irr : ∀ sigs S r θ status → ∀ S' sigs' → S' ∈ proj₁ (Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r (θ* ← [ (S' ₛ) ↦ status ])) θ) → Canθ' sigs S (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r (θ* ← [ (S' ₛ) ↦ status ])) θ ≡ Canθ' sigs S (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) θ canθ'-canθ-propagate-down-in-set-irr [] S r θ status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ with any (_≟_ S') (Canθₛ sigs' (suc S') r ((θ ← [ (S' ₛ) ↦ status ]) ← [S]-env (S' ₛ))) ... | yes S'∈canθ-sigs'-r-θ←[S'] rewrite Env.sig-single-←-←-overwrite θ (S' ₛ) status Signal.unknown = refl ... | no S'∉canθ-sigs'-r-θ←[S'] = ⊥-elim (S'∉canθ-sigs'-r-θ←[S'] (canθₛ-add-sig-monotonic sigs' (suc S') r (θ ← [ (S' ₛ) ↦ status ]) (S' ₛ) Signal.absent S' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩)) canθ'-canθ-propagate-down-in-set-irr (nothing ∷ sigs) S r θ status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r θ status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-in-set-irr (just Signal.present ∷ sigs) S r θ status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r (θ ← [S]-env-present (S ₛ)) status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-in-set-irr (just Signal.absent ∷ sigs) S r θ status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ = canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ canθ'-canθ-propagate-down-in-set-irr (just Signal.unknown ∷ sigs) S r θ status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ with any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ (just Signal.unknown ∷ sigs') S' r (θ* ← [ (S' ₛ) ↦ status ])) (θ ← [S]-env (S ₛ)))) | any (_≟_ S) (proj₁ (Canθ' sigs (suc S) (λ θ* → Canθ sigs' (suc S') r (θ* ← [S]-env (S' ₛ))) (θ ← [S]-env (S ₛ)))) ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r (θ ← [S]-env (S ₛ)) status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r (θ ← [S]-env-absent (S ₛ)) status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩ ... | yes S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | no S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = ⊥-elim (S∉canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] (subst (S ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r (θ ← [S]-env (S ₛ)) status S' sigs' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩)) S∈canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S])) ... | no S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] | yes S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S] = ⊥-elim (S∉canθ'-sigs-⟨canθ-u∷sigs'-r⟩-θ←[S] (subst (S ∈_) (cong proj₁ (sym (canθ'-canθ-propagate-down-in-set-irr sigs (suc S) r (θ ← [S]-env (S ₛ)) status S' sigs' (canθ'ₛ-add-sig-monotonic sigs (suc S) (Canθ (just Signal.unknown ∷ sigs') S' r ∘ (λ section → section ← [ (S' ₛ) ↦ status ])) θ (S ₛ) Signal.absent (λ θ* S* status* → canθₛ-cong-←-add-sig-monotonic (just Signal.unknown ∷ sigs') S' r θ* [ (S' ₛ) ↦ status ] S* status*) S' S'∈canθ'-sigs-⟨Canθ-u∷sigs'-θ*⟩)))) S∈canθ'-sigs-⟨canθ-sigs'-θ*←[S']⟩-θ←[S])) canθ'-←-distribute : ∀ sigs sigs' S'' r θ → Canθ (SigMap.union sigs sigs') S'' r θ ≡ Canθ' sigs S'' (Canθ sigs' S'' r) θ canθ'-←-distribute [] sigs' S'' r θ = refl canθ'-←-distribute sigs [] S'' r θ rewrite SigMap.union-comm sigs SigMap.empty (λ _ _ ()) | unfold sigs S'' r θ = refl canθ'-←-distribute (nothing ∷ sigs) (nothing ∷ sigs') S'' r θ = canθ'-←-distribute sigs sigs' (suc S'') r θ canθ'-←-distribute (just Signal.present ∷ sigs) (nothing ∷ sigs') S'' r θ = canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-present (S'' ₛ)) canθ'-←-distribute (just Signal.absent ∷ sigs) (nothing ∷ sigs') S'' r θ = canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ)) canθ'-←-distribute (just Signal.unknown ∷ sigs) (nothing ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ (SigMap.union sigs sigs') (suc S'') r (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ sigs' (suc S'') r) (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨canθ-sigs'⟩-θ←[S''] = canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ)) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨canθ-sigs'⟩-θ←[S''] = canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ)) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨canθ-sigs'⟩-θ←[S''] rewrite canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ)) = ⊥-elim (S''∉canθ'-sigs-⟨canθ-sigs'⟩-θ←[S''] S''∈canθ'-sigs←sigs'-r-θ←[S'']) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨canθ-sigs'⟩-θ←[S''] rewrite canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ)) = ⊥-elim (S''∉canθ'-sigs←sigs'-r-θ←[S''] S''∈canθ'-sigs-⟨canθ-sigs'⟩-θ←[S'']) canθ'-←-distribute (nothing ∷ sigs) (just Signal.present ∷ sigs') S'' r θ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-present (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (nothing ∷ sigs) (just Signal.absent ∷ sigs') S'' r θ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (nothing ∷ sigs) (just Signal.unknown ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ (SigMap.union sigs sigs') (suc S'') r (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ)) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-in sigs (suc S'') r θ S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-not-in sigs (suc S'') r θ S'' sigs' S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) = ⊥-elim (S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ (canθ'-canθ-propagate-up-in sigs (suc S'') r θ sigs' S'' S'' S''∈canθ'-sigs←sigs'-r-θ←[S''] S''∈canθ'-sigs←sigs'-r-θ←[S''])) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) = ⊥-elim (S''∉canθ'-sigs←sigs'-r-θ←[S''] (subst (S'' ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in sigs (suc S'') r θ S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) canθ'-←-distribute (just Signal.present ∷ sigs) (just Signal.present ∷ sigs') S'' r θ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-present (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.present Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (just Signal.absent ∷ sigs) (just Signal.present ∷ sigs') S'' r θ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-present (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.present Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (just Signal.unknown ∷ sigs) (just Signal.present ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (λ θ* → Canθ sigs' (suc S'') r (θ* ← [S]-env-present (S'' ₛ))) (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-⟨Canθ-sigs'-r-θ*←[S'']⟩-θ←[S''] = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-present (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.present Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs-⟨Canθ-sigs'-r-θ*←[S'']⟩-θ←[S''] = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-present (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.present Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (just Signal.present ∷ sigs) (just Signal.absent ∷ sigs') S'' r θ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (just Signal.absent ∷ sigs) (just Signal.absent ∷ sigs') S'' r θ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (just Signal.unknown ∷ sigs) (just Signal.absent ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (λ θ* → Canθ sigs' (suc S'') r (θ* ← [S]-env-absent (S'' ₛ))) (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs-⟨Canθ-sigs'-r-θ*←[S'']⟩-θ←[S''] = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs-⟨Canθ-sigs'-r-θ*←[S'']⟩-θ←[S''] = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) canθ'-←-distribute (just Signal.present ∷ sigs) (just Signal.unknown ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ (SigMap.union sigs sigs') (suc S'') r (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) (θ ← [S]-env-present (S'' ₛ)))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-in sigs (suc S'') r (θ ← [S]-env-present (S'' ₛ)) S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-not-in sigs (suc S'') r (θ ← [S]-env-present (S'' ₛ)) S'' sigs' S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) = ⊥-elim (S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ (subst (S'' ∈_) (sym (cong proj₁ (canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs))))) (canθ'-canθ-propagate-up-in-set-irr sigs (suc S'') r θ Signal.present sigs' S'' S'' S''∈canθ'-sigs←sigs'-r-θ←[S''] S''∈canθ'-sigs←sigs'-r-θ←[S'']))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) | canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.present (n∉map-suc-n-+ S'' (SigMap.keys sigs)) = ⊥-elim (S''∉canθ'-sigs←sigs'-r-θ←[S''] (subst (S'' ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in-set-irr sigs (suc S'') r θ Signal.present S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) canθ'-←-distribute (just Signal.absent ∷ sigs) (just Signal.unknown ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ (SigMap.union sigs sigs') (suc S'') r (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) (θ ← [S]-env-absent (S'' ₛ)))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-in sigs (suc S'') r (θ ← [S]-env-absent (S'' ₛ)) S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-not-in sigs (suc S'') r (θ ← [S]-env-absent (S'' ₛ)) S'' sigs' S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) = ⊥-elim (S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ (subst (S'' ∈_) (sym (cong proj₁ (canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))))) (canθ'-canθ-propagate-up-in-set-irr sigs (suc S'') r θ Signal.absent sigs' S'' S'' S''∈canθ'-sigs←sigs'-r-θ←[S''] S''∈canθ'-sigs←sigs'-r-θ←[S'']))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) | canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs)) = ⊥-elim (S''∉canθ'-sigs←sigs'-r-θ←[S''] (subst (S'' ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in-set-irr sigs (suc S'') r θ Signal.absent S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) canθ'-←-distribute (just Signal.unknown ∷ sigs) (just Signal.unknown ∷ sigs') S'' r θ with any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (λ θ* → Canθ (just Signal.unknown ∷ sigs') S'' r θ*) (θ ← [S]-env (S'' ₛ)))) canθ'-←-distribute (just Signal.unknown ∷ sigs) (just Signal.unknown ∷ sigs') S'' r θ | yes p with any (_≟_ S'') (proj₁ (Canθ (SigMap.union sigs sigs') (suc S'') r (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) (θ ← [S]-env (S'' ₛ)))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-in sigs (suc S'') r (θ ← [S]-env (S'' ₛ)) S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-not-in sigs (suc S'') r (θ ← [S]-env (S'' ₛ)) S'' sigs' S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) = ⊥-elim (S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ (subst (S'' ∈_) (sym (cong proj₁ (canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))))) (canθ'-canθ-propagate-up-in-set-irr sigs (suc S'') r θ Signal.unknown sigs' S'' S'' S''∈canθ'-sigs←sigs'-r-θ←[S''] S''∈canθ'-sigs←sigs'-r-θ←[S'']))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) | canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs)) = ⊥-elim (S''∉canθ'-sigs←sigs'-r-θ←[S''] (subst (S'' ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in-set-irr sigs (suc S'') r θ Signal.unknown S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) canθ'-←-distribute (just Signal.unknown ∷ sigs) (just Signal.unknown ∷ sigs') S'' r θ | no ¬p with any (_≟_ S'') (proj₁ (Canθ (SigMap.union sigs sigs') (suc S'') r (θ ← [S]-env (S'' ₛ)))) | any (_≟_ S'') (proj₁ (Canθ' sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) (θ ← [S]-env-absent (S'' ₛ)))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-in sigs (suc S'') r (θ ← [S]-env-absent (S'' ₛ)) S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite canθ'-canθ-propagate-down-not-in sigs (suc S'') r (θ ← [S]-env-absent (S'' ₛ)) S'' sigs' S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ = trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env-absent (S'' ₛ))) (canθ'-search-acc-set-irr sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.absent Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))) ... | yes S''∈canθ'-sigs←sigs'-r-θ←[S''] | no S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) = ⊥-elim (S''∉canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ (subst (S'' ∈_) (sym (cong proj₁ (canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs))))) (canθ'-canθ-propagate-up-in-set-irr sigs (suc S'') r θ Signal.absent sigs' S'' S'' S''∈canθ'-sigs←sigs'-r-θ←[S''] S''∈canθ'-sigs←sigs'-r-θ←[S'']))) ... | no S''∉canθ'-sigs←sigs'-r-θ←[S''] | yes S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩ rewrite trans (canθ'-←-distribute sigs sigs' (suc S'') r (θ ← [S]-env (S'' ₛ))) (canθ'-search-acc sigs (suc S'') (Canθ sigs' (suc S'') r) θ S'' Signal.unknown (n∉map-suc-n-+ S'' (SigMap.keys sigs))) | canθ'-search-acc sigs (suc S'') (Canθ (just Signal.unknown ∷ sigs') S'' r) θ S'' Signal.absent (n∉map-suc-n-+ S'' (SigMap.keys sigs)) = ⊥-elim (S''∉canθ'-sigs←sigs'-r-θ←[S''] (subst (S'' ∈_) (cong proj₁ (canθ'-canθ-propagate-down-in-set-irr sigs (suc S'') r θ Signal.absent S'' sigs' S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩)) S''∈canθ'-sigs-⟨Canθ-u∷sigs'-r-θ⟩))
#ifndef PMVS3_OPTIM_H #define PMVS3_OPTIM_H #include <vector> #include "patch.h" #include <gsl/gsl_multimin.h> namespace PMVS3 { class CfindMatch; class Coptim { public: Coptim(CfindMatch& findMatch); void init(void); //----------------------------------------------------------------- // Image manipulation //----------------------------------------------------------------- void collectImages(const int index, std::vector<int>& indexes) const; void addImages(Patch::Cpatch& patch) const; void removeImagesEdge(Patch::Cpatch& patch) const; float getUnit(const int index, const Vec4f& coord) const; void computeUnits(const Patch::Cpatch& patch, std::vector<int>& indexes, std::vector<float>& fineness, std::vector<Vec4f>& rays) const; void computeUnits(const Patch::Cpatch& patch, std::vector<float>& fineness) const; //----------------------------------------------------------------- // Optimization //----------------------------------------------------------------- int preProcess(Patch::Cpatch& patch, const int id, const int seed); void refinePatch(Patch::Cpatch& patch, const int id, const int time); void refinePatchBFGS(Patch::Cpatch& patch, const int id, const int time); void refinePatchBFGS(Patch::Cpatch& patch, const int id, const int time, const int ncc); bool refinePatchBFGS2(Patch::Cpatch& patch, const int id, const int time, const int ncc); // LM version void refineDepthBFGS(Patch::Cpatch& patch, const int id, const int time, const int ncc); int postProcess(Patch::Cpatch& patch, const int id, const int seed); void setRefImage(Patch::Cpatch& patch, const int id); int check(Patch::Cpatch& patch); std::vector<int> m_status; protected: void filterImagesByAngle(Patch::Cpatch& patch); void sortImages(Patch::Cpatch& patch) const; void constraintImages(Patch::Cpatch& patch, const float nccThreshold, const int id); void setRefConstraintImages(Patch::Cpatch& patch, const float nccThreshold, const int id); void setINCCs(const Patch::Cpatch& patch, std::vector<float> & nccs, const std::vector<int>& indexes, const int id, const int robust); void setINCCs(const Patch::Cpatch& patch, std::vector<std::vector<float> >& nccs, const std::vector<int>& indexes, const int id, const int robust); int grabTex(const Vec4f& coord, const Vec4f& pxaxis, const Vec4f& pyaxis, const Vec4f& pzaxis, const int index, const int size, std::vector<float>& tex) const; int grabSafe(const int index, const int size, const Vec3f& center, const Vec3f& dx, const Vec3f& dy, const int level) const; double computeSSD(const Vec4f& coord, const Vec4f& normal, const std::vector<int>& indexes, const int id); double computeSSD(const Vec4f& coord, const Vec4f& normal, const std::vector<int>& indexes, const Vec4f& pxaxis, const Vec4f& pyaxis, const int id); /* double computeINCC(const Vec4f& coord, const Vec4f& normal, const std::vector<int>& indexes, const int id, const int robust); */ double computeINCC(const Vec4f& coord, const Vec4f& normal, const std::vector<int>& indexes, const Vec4f& pxaxis, const Vec4f& pyaxis, const int id, const int robust); public: static void normalize(std::vector<float>& tex); static void normalize(std::vector<std::vector<float> >& texs, const int size); float dot(const std::vector<float>& tex0, const std::vector<float>& tex1) const; float ssd(const std::vector<float>& tex0, const std::vector<float>& tex1) const; protected: static void lfunc(double* p, double* hx, int m, int n, void* adata); void func(int m, int n, double* x, double* fvec, int* iflag, void* arg); //BFGS static double my_f(const gsl_vector *v, void *params); static void my_f_lm(const double *par, int m_dat, const void *data, double *fvec, int *info); // LM version static void my_df(const gsl_vector *v, void *params, gsl_vector *df); static void my_fdf(const gsl_vector *x, void *params, double *f, gsl_vector *df); // for derivative computation static double my_f0(double x, void* params); static double my_f1(double x, void* params); static double my_f2(double x, void* params); //---------------------------------------------------------------------- // For ssd static double my_f_ssd(const gsl_vector *v, void *params); static void my_df_ssd(const gsl_vector *v, void *params, gsl_vector *df); static void my_fdf_ssd(const gsl_vector *x, void *params, double *f, gsl_vector *df); // for derivative computation static double my_f_ssd0(double x, void* params); static double my_f_ssd1(double x, void* params); static double my_f_ssd2(double x, void* params); //---------------------------------------------------------------------- // For debugging depth static double my_f_depth(const gsl_vector *v, void *params); static void my_df_depth(const gsl_vector *v, void *params, gsl_vector *df); static void my_fdf_depth(const gsl_vector *x, void *params, double *f, gsl_vector *df); // for derivative computation static double my_f0_depth(double x, void* params); void encode(const Vec4f& coord, double* const vect, const int id) const; void encode(const Vec4f& coord, const Vec4f& normal, double* const vect, const int id) const; void decode(Vec4f& coord, Vec4f& normal, const double* const vect, const int id) const; void decode(Vec4f& coord, const double* const vect, const int id) const; public: void setWeightsT(const Patch::Cpatch& patch, const int id); double computeINCC(const Vec4f& coord, const Vec4f& normal, const std::vector<int>& indexes, const int id, const int robust); void getPAxes(const int index, const Vec4f& coord, const Vec4f& normal, Vec4f& pxaxis, Vec4f& pyaxis) const; static inline float robustincc(const float rhs) { return rhs / (1 + 3 * rhs); } static inline float unrobustincc(const float rhs) { return rhs / (1 - 3 * rhs); } protected: void setAxesScales(void); static Coptim* m_one; CfindMatch& m_fm; //----------------------------------------------------------------- // Axes std::vector<Vec3f> m_xaxes; std::vector<Vec3f> m_yaxes; std::vector<Vec3f> m_zaxes; // Scales std::vector<float> m_ipscales; //----------------------------------------------------------------- // For threads std::vector<float> m_vect0T; std::vector<Vec4f> m_centersT; std::vector<Vec4f> m_raysT; std::vector<std::vector<int> > m_indexesT; std::vector<float> m_dscalesT; std::vector<float> m_ascalesT; // stores current parameters for derivative computation std::vector<Vec3f> m_paramsT; // Grabbed texture std::vector<std::vector<std::vector<float> > > m_texsT; // last is 7x7x3 patch // weights for refineDepthOrientationWeighed std::vector<std::vector<float> > m_weightsT; // Working array for levmar std::vector<std::vector<double> > m_worksT; }; }; #endif // PMVS3_OPTIM_H
theory ExF002 imports Main begin lemma "\<lbrakk> \<forall>x.(P x \<longrightarrow> Q x) ; P a \<rbrakk> \<Longrightarrow> Q a" proof - assume "\<forall>x.(P x \<longrightarrow> Q x)" assume "P a" from \<open>\<forall>x.(P x \<longrightarrow> Q x)\<close> have "P a \<longrightarrow> Q a" by (rule allE) from this and \<open>P a\<close> show ?thesis by (rule impE) qed
------------------------------------------------------------------------------ -- Proving properties without using pattern matching on refl ------------------------------------------------------------------------------ {-# OPTIONS --no-pattern-matching #-} {-# OPTIONS --exact-split #-} {-# OPTIONS --no-sized-types #-} {-# OPTIONS --no-universe-polymorphism #-} module FOT.PA.Inductive2Standard.NoPatternMatchingOnRefl where open import PA.Inductive.Base ------------------------------------------------------------------------------ -- From PA.Inductive2Standard -- 20 May 2013. Requires the predecessor function. -- PA₂ : ∀ {m n} → succ m ≡ succ n → m ≡ n
# Lab 1: Single-qubit and multi-qubit states, quantum teleportation In this lab, you will learn how to write `Qiskit` code and investigate single-qubit and multi-qubit states using the `qpshere` visualization that you learned in lecture 1. If you have not used Jupyter notebooks before, take a look at the following video to quickly get started. - https://www.youtube.com/watch?v=jZ952vChhuI Remember, to run a cell in Jupyter notebooks, you press `Shift` + `Return/Enter` on your keyboard. ### Installing necessary packages Before we begin, you will need to install some prerequisites into your environment. Run the cell below to complete these installations. At the end, the cell outputs will be cleared. ```python !pip install -U -r grading_tools/requirements.txt from IPython.display import clear_output clear_output() ``` Requirement already satisfied, skipping upgrade: networkx>=2.2; python_version > "3.5" in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.4) Requirement already satisfied, skipping upgrade: retworkx>=0.3.2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.3.4) Requirement already satisfied, skipping upgrade: python-dateutil>=2.8.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.8.1) Requirement already satisfied, skipping upgrade: ply>=3.10 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (3.11) Requirement already satisfied, skipping upgrade: cython>=0.27.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aer==0.5.1->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.29.20) Requirement already satisfied, skipping upgrade: pybind11>=2.4 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aer==0.5.1->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.5.0) Requirement already satisfied, skipping upgrade: requests>=2.19 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.24.0) Requirement already satisfied, skipping upgrade: nest-asyncio!=1.1.0,>=1.0.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.3.3) Requirement already satisfied, skipping upgrade: urllib3>=1.21.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.25.9) Requirement already satisfied, skipping upgrade: websockets<8,>=7 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (7.0) Requirement already satisfied, skipping upgrade: requests-ntlm>=1.1.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.1.0) Requirement already satisfied, skipping upgrade: setuptools>=40.1.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-ignis==0.3.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (47.3.1) Requirement already satisfied, skipping upgrade: cplex; python_version >= "3.6" and python_version < "3.8" in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (12.10.0.2) Requirement already satisfied, skipping upgrade: h5py in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.10.0) Requirement already satisfied, skipping upgrade: scikit-learn>=0.20.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.23.1) Requirement already satisfied, skipping upgrade: quandl in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (3.5.0) Requirement already satisfied, skipping upgrade: fastdtw in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.3.4) Requirement already satisfied, skipping upgrade: dlx in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.0.4) Requirement already satisfied, skipping upgrade: docplex in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.14.186) Requirement already satisfied, skipping upgrade: pytz>=2017.2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from pandas>=0.22.0->seaborn==0.10->-r grading_tools/requirements.txt (line 2)) (2020.1) Requirement already satisfied, skipping upgrade: cycler>=0.10 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from matplotlib>=2.1.2->seaborn==0.10->-r grading_tools/requirements.txt (line 2)) (0.10.0) Requirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from matplotlib>=2.1.2->seaborn==0.10->-r grading_tools/requirements.txt (line 2)) (1.2.0) Requirement already satisfied, skipping upgrade: pillow>=6.2.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from matplotlib>=2.1.2->seaborn==0.10->-r grading_tools/requirements.txt (line 2)) (7.2.0) Requirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from matplotlib>=2.1.2->seaborn==0.10->-r grading_tools/requirements.txt (line 2)) (2.4.7) Requirement already satisfied, skipping upgrade: pygments in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (2.6.1) Requirement already satisfied, skipping upgrade: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (3.0.5) Requirement already satisfied, skipping upgrade: decorator in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (4.4.2) Requirement already satisfied, skipping upgrade: jedi>=0.10 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.17.0) Requirement already satisfied, skipping upgrade: pickleshare in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.7.5) Requirement already satisfied, skipping upgrade: backcall in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.1.0) Requirement already satisfied, skipping upgrade: colorama; sys_platform == "win32" in c:\users\codie\appdata\roaming\python\python37\site-packages (from ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.4.1) Requirement already satisfied, skipping upgrade: jupyter-core in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbformat>=4.2.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (4.6.3) Requirement already satisfied, skipping upgrade: ipython-genutils in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbformat>=4.2.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.2.0) Requirement already satisfied, skipping upgrade: jupyter-client in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipykernel>=4.5.1->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (6.1.3) Requirement already satisfied, skipping upgrade: tornado>=4.2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from ipykernel>=4.5.1->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (6.0.4) Requirement already satisfied, skipping upgrade: six in c:\users\codie\appdata\roaming\python\python37\site-packages (from traitlets>=4.3.1->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (1.12.0) Requirement already satisfied, skipping upgrade: notebook>=4.4.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (6.0.3) Requirement already satisfied, skipping upgrade: mpmath>=0.19 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from sympy>=1.3->qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.1.0) Requirement already satisfied, skipping upgrade: attrs>=17.4.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jsonschema>=2.6->qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (19.3.0) Requirement already satisfied, skipping upgrade: pyrsistent>=0.14.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jsonschema>=2.6->qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.16.0) Requirement already satisfied, skipping upgrade: importlib-metadata; python_version < "3.8" in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jsonschema>=2.6->qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.6.0) Requirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from requests>=2.19->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (3.0.4) Requirement already satisfied, skipping upgrade: idna<3,>=2.5 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from requests>=2.19->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.9) Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from requests>=2.19->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2020.6.20) Requirement already satisfied, skipping upgrade: ntlm-auth>=1.0.2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from requests-ntlm>=1.1.0->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.5.0) Requirement already satisfied, skipping upgrade: cryptography>=1.3 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from requests-ntlm>=1.1.0->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.8) Requirement already satisfied, skipping upgrade: joblib>=0.11 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from scikit-learn>=0.20.0->qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.15.1) Requirement already satisfied, skipping upgrade: threadpoolctl>=2.0.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from scikit-learn>=0.20.0->qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.1.0) Requirement already satisfied, skipping upgrade: more-itertools in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from quandl->qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (8.4.0) Requirement already satisfied, skipping upgrade: inflection>=0.3.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from quandl->qiskit-aqua==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (0.5.0) Requirement already satisfied, skipping upgrade: wcwidth in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.2.2) Requirement already satisfied, skipping upgrade: parso>=0.7.0 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jedi>=0.10->ipython>=4.0.0; python_version >= "3.3"->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.7.0) Requirement already satisfied, skipping upgrade: pywin32>=1.0; sys_platform == "win32" in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jupyter-core->nbformat>=4.2.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (227) Requirement already satisfied, skipping upgrade: pyzmq>=13 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (19.0.1) Requirement already satisfied, skipping upgrade: prometheus-client in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.8.0) Requirement already satisfied, skipping upgrade: Send2Trash in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (1.5.0) Requirement already satisfied, skipping upgrade: jinja2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (2.11.2) Requirement already satisfied, skipping upgrade: nbconvert in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (5.6.1) Requirement already satisfied, skipping upgrade: terminado>=0.8.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.8.3) Requirement already satisfied, skipping upgrade: zipp>=0.5 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from importlib-metadata; python_version < "3.8"->jsonschema>=2.6->qiskit-terra==0.14.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (3.1.0) Requirement already satisfied, skipping upgrade: cffi!=1.11.3,>=1.8 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from cryptography>=1.3->requests-ntlm>=1.1.0->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (1.14.0) Requirement already satisfied, skipping upgrade: MarkupSafe>=0.23 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from jinja2->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (1.1.1) Requirement already satisfied, skipping upgrade: entrypoints>=0.2.2 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.3) Requirement already satisfied, skipping upgrade: bleach in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (3.1.5) Requirement already satisfied, skipping upgrade: defusedxml in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.6.0) Requirement already satisfied, skipping upgrade: testpath in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.4.4) Requirement already satisfied, skipping upgrade: pandocfilters>=1.4.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (1.4.2) Requirement already satisfied, skipping upgrade: mistune<2,>=0.8.1 in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.8.4) Requirement already satisfied, skipping upgrade: pywinpty>=0.5; os_name == "nt" in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from terminado>=0.8.1->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.5.7) Requirement already satisfied, skipping upgrade: pycparser in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from cffi!=1.11.3,>=1.8->cryptography>=1.3->requests-ntlm>=1.1.0->qiskit-ibmq-provider==0.7.0->qiskit==0.19->-r grading_tools/requirements.txt (line 1)) (2.20) Requirement already satisfied, skipping upgrade: packaging in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (20.4) Requirement already satisfied, skipping upgrade: webencodings in c:\users\codie\appdata\local\programs\python\python37\lib\site-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets==7.5.1->-r grading_tools/requirements.txt (line 3)) (0.5.1) # Single-qubit states In lecture, you learned that single qubit states can be written down generally as $$\sqrt{1-p}\vert0\rangle + e^{i\phi}\sqrt{p}\vert1\rangle$$ Here, $p$ is the probability that a measurement of the state in the computational basis $\{\vert0\rangle, \vert1\rangle\}$ will have the outcome $1$, and $\phi$ is the phase between the two computational basis states. Single-qubit gates can then be used to manipulate this quantum state by changing either $p$, $\phi$, or both. Let's begin by creating a single-qubit quantum circuit. We can do this in `Qiskit` using the following: ```python from qiskit import QuantumCircuit mycircuit = QuantumCircuit(1) mycircuit.draw('mpl') ``` The above quantum circuit does not contain any gates. Therefore, if you start in any state, say $\vert0\rangle$, applying this circuit to your state doesn't change the state. To see this clearly, let's create the statevector $\vert0\rangle$. In `Qiskit`, you can do this using the following: ```python from qiskit.quantum_info import Statevector sv = Statevector.from_label('0') ``` You can see what's contained in the object `sv`: ```python sv ``` Statevector([1.+0.j, 0.+0.j], dims=(2,)) The vector itself can be found by writing ```python sv.data ``` array([1.+0.j, 0.+0.j]) As you can see, the above matches what you learned in lecture. Recall that $$\vert0\rangle = \begin{bmatrix}1\\0\end{bmatrix}$$ We can now apply the quantum circuit `mycircuit` to this state by using the following: ```python new_sv = sv.evolve(mycircuit) ``` Once again, you can look at the new statevector by writing ```python new_sv ``` Statevector([1.+0.j, 0.+0.j], dims=(2,)) As you can see, the statevector hasn't changed. Recall the concept of state projection that you learned in lecture. You can compute the projection of `new_sv` onto `sv` by writing ```python from qiskit.quantum_info import state_fidelity state_fidelity(sv, new_sv) ``` 1.0 As you can see, the projection of `new_sv` onto `sv` is 1, indicating that the two states are identical. You can visualize this state using the `qsphere` by writing ```python from qiskit.visualization import plot_state_qsphere plot_state_qsphere(sv.data) ``` As you learned in lecture 1, applying an $X$ gate flips the qubit from the state $\vert0\rangle$ to the state $\vert1\rangle$. To see this clearly, we will first create a single-qubit quantum circuit with the $X$ gate. ```python mycircuit = QuantumCircuit(1) mycircuit.x(0) mycircuit.draw('mpl') ``` Now, we can apply this circuit onto our state by writing ```python sv = Statevector.from_label('0') new_sv = sv.evolve(mycircuit) new_sv ``` Statevector([0.+0.j, 1.+0.j], dims=(2,)) As you can see, the statevector now corresponds to that of the state $\vert1\rangle$. Recall that $$\vert1\rangle = \begin{bmatrix}0\\1\end{bmatrix}$$ Now, the projection of `new_sv` onto `sv` is ```python state_fidelity(new_sv, sv) ``` 0.0 This is not surprising. Recall from the lecture that the states $\vert0\rangle$ and $\vert1\rangle$ are orthogonal. Therefore, $\langle0\vert1\rangle = 0$. The state can be shown on the `qsphere` by writing ```python plot_state_qsphere(new_sv.data) ``` Similarly, we can create the state $$\frac{1}{\sqrt{2}}\left(\vert0\rangle + \vert1\rangle\right)$$ by applying a Hadamard gate as you learned in lecture. Here is how we can create the state and visualize it in `Qiskit`: ```python sv = Statevector.from_label('0') mycircuit = QuantumCircuit(1) mycircuit.h(0) mycircuit.draw('mpl') ``` ```python new_sv = sv.evolve(mycircuit) print(new_sv) plot_state_qsphere(new_sv.data) ``` As you can see above, the state has equal components of $\vert0\rangle$ and $\vert1\rangle$. The size of the circle is proportional to the probability of measuring each basis state in the statevector. As a result, you can see that the size of the circles is half of the size of the circles in our previous visualizations. Recall from lecture that we can also create other superpositions with different phase. Let's create $$\frac{1}{\sqrt{2}}\left(\vert0\rangle - \vert1\rangle\right)$$ which can be done by applying the Hadamard gate on the state $\vert1\rangle$. ```python sv = Statevector.from_label('1') mycircuit = QuantumCircuit(1) mycircuit.h(0) new_sv = sv.evolve(mycircuit) print(new_sv) plot_state_qsphere(new_sv.data) ``` This time, the bottom circle, corresponding to the basis state $\vert1\rangle$ has a different color corresponding to the phase of $\phi = \pi$. This is because the coefficient of $\vert1\rangle$ in the state $$\frac{1}{\sqrt{2}}\left(\vert0\rangle - \vert1\rangle\right)$$ is $-1$, which is equal to $e^{i\pi}$. Other phases can also be created by applying different gates. The $T$ and $S$ gates apply phases of $+\pi/4$ and $+\pi/2$, respectively. The widget below helps you see different gates, and their actions on single-qubit quantum states. ```python from resources.qiskit_textbook.widgets import gate_demo gate_demo(qsphere=True) ``` VBox(children=(HBox(children=(Button(description='I', layout=Layout(height='3em', width='3em'), style=ButtonSt… Image(value=b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\xf8\x00\x00\x01\xf8\x08\x06\x00\x00\x00\xa6(\xff… A summary of the operations of the most common gates on single-qubit states is given by the handy image below, where the phases are shown in degrees. # Multi-qubit states Similar to the discussion above, you can also explore multi-qubit gates in `Qiskit`. In lecture, you learned about Bell states, and how they can be generated using quantum gates. We will demonstrate below how to create the Bell state $$\frac{1}{\sqrt{2}}\left(\vert00\rangle + \vert11\rangle\right)$$ from the state $\vert00\rangle$. We'll start by visualizing the state $\vert00\rangle$ using the same procedure: ```python sv = Statevector.from_label('00') plot_state_qsphere(sv.data) ``` Next, we use the Hadamard gate described above, along with a controlled-X gate, to create the Bell state. ```python mycircuit = QuantumCircuit(2) mycircuit.h(0) mycircuit.cx(0,1) mycircuit.draw('mpl') ``` The result of this quantum circuit on the state $\vert00\rangle$ is found by writing ```python new_sv = sv.evolve(mycircuit) print(new_sv) plot_state_qsphere(new_sv.data) ``` Note how this looks very similar to a single-qubit superposition with zero phase. Following entanglement, it is no longer possible to treat the two qubits individually, and they must be considered to be one system. To see this clearly, we can see what would happen if we measured the Bell state above 1000 times. ```python counts = new_sv.sample_counts(shots=1000) from qiskit.visualization import plot_histogram plot_histogram(counts) ``` As you can see above, all measurements give either the result `00` or `11`. In other words, if the measurement outcome for one of the qubits is known, then the outcome for the other is fully determined. ### Ungraded exercise 1 Can you create the state $$\frac{1}{\sqrt{2}}\left(\vert01\rangle + \vert10\rangle\right)$$ using a similar procedure? ### Ungraded exercise 2 Can you create the state $$\frac{1}{\sqrt{2}}\left(\vert01\rangle - \vert10\rangle\right)$$ using a similar procedure? # Measurements In the above example, we simulated the action of a measurement by sampling counts from the statevector. A measurement can explicitly be inserted into a quantum circuit as well. Here is an example that creates the same Bell state and applies a measurement. ```python mycircuit = QuantumCircuit(2, 2) mycircuit.h(0) mycircuit.cx(0,1) mycircuit.measure([0,1], [0,1]) mycircuit.draw('mpl') ``` Two new features appeared in the circuit compared to our previous examples. - First, note that we used a second argument in the `QuantumCircuit(2,2)` command. The second argument says that we will be creating a quantum circuit that contains two qubits (the first argument), and two classical bits (the second argument). - Second, note that the `measure` command takes two arguments. The first argument is the set of qubits that will be measured. The second is the set of classical bits onto which the outcomes from the measurements of the qubits will be stored. Since the above quantum circuit contains non-unitaries (the measurement gates), we will use `Qiskit`'s built-in `Aer` simulators to run the circuit. To get the measurement counts, we can use the following code: ```python from qiskit import Aer, execute simulator = Aer.get_backend('qasm_simulator') result = execute(mycircuit, simulator, shots=10000).result() counts = result.get_counts(mycircuit) plot_histogram(counts) ``` As you can see, the measurement outcomes are similar to when we sampled counts from the statevector itself. # Graded exercise 1: Quantum teleportation In this graded exercise, you will teleport the quantum state $$\sqrt{0.70}\vert0\rangle + \sqrt{0.30}\vert1\rangle$$ from Alice's qubit to Bob's qubit. Recall that the teleportation algorithm consists of four major components: 1. Initializing the state to be teleported. We will do this on Alice's qubit `q0`. 2. Creating entanglement between two qubits. We will use qubits `q1` and `q2` for this. Recall that Alice owns `q1`, and Bob owns `q2`. 3. Applying a Bell measurement on Alice's qubits `q0` and `q1`. 4. Applying classically controlled operations on Bob's qubit `q2` depending on the outcomes of the Bell measurement on Alice's qubits. This exercise guides you through each of these steps. ### Initializing the state to be teleported First, create a quantum circuit that creates the state $$\sqrt{0.70}\vert0\rangle + \sqrt{0.30}\vert1\rangle$$ You can do this by using `Qiskit`'s `initialize` function. ```python import math def initialize_qubit(given_circuit, qubit_index): import numpy as np ### WRITE YOUR CODE BETWEEN THESE LINES - START initial_vec = [math.sqrt(0.7),math.sqrt(0.3)] given_circuit.initialize(initial_vec,qubit_index) #given_circuit.u3(11 * np.pi / 30, 0, 0, qubit_index) ### WRITE YOUR CODE BETWEEN THESE LINES - END return given_circuit ``` Next, we need to create entanglement between Alice's and Bob's qubits. ```python def entangle_qubits(given_circuit, qubit_Alice, qubit_Bob): ### WRITE YOUR CODE BETWEEN THESE LINES - START given_circuit.h(qubit_Alice) given_circuit.cx(qubit_Alice,qubit_Bob) ### WRITE YOUR CODE BETWEEN THESE LINES - END return given_circuit ``` Next, we need to do a Bell measurement of Alice's qubits. ```python def bell_meas_Alice_qubits(given_circuit, qubit1_Alice, qubit2_Alice, clbit1_Alice, clbit2_Alice): ### WRITE YOUR CODE BETWEEN THESE LINES - START given_circuit.cx(qubit1_Alice,qubit2_Alice) given_circuit.h(qubit1_Alice) given_circuit.measure(qubit1_Alice, clbit1_Alice) given_circuit.measure(qubit2_Alice, clbit2_Alice) #given_circuit.measure([qubit1_Alice,qubit2_Alice], [qubit2_Alice,clbit2_Alice]) ### WRITE YOUR CODE BETWEEN THESE LINES - END return given_circuit ``` Finally, we apply controlled operations on Bob's qubit. Recall that the controlled operations are applied in this order: - an $X$ gate is applied on Bob's qubit if the measurement coutcome of Alice's second qubit, `clbit2_Alice`, is `1`. - a $Z$ gate is applied on Bob's qubit if the measurement coutcome of Alice's first qubit, `clbit1_Alice`, is `1`. ```python def controlled_ops_Bob_qubit(given_circuit, qubit_Bob, clbit1_Alice, clbit2_Alice): ### WRITE YOUR CODE BETWEEN THESE LINES - START given_circuit.x(qubit_Bob).c_if(clbit2_Alice, 1) given_circuit.z(qubit_Bob).c_if(clbit1_Alice, 1) ### WRITE YOUR CODE BETWEEN THESE LINES - END return given_circuit ``` The next lines of code put everything together. **You do not need to modify anything below, but you will need to run the cell to submit your solution.** ```python ### imports from qiskit import QuantumRegister, ClassicalRegister ### set up the qubits and classical bits all_qubits_Alice = QuantumRegister(2) all_qubits_Bob = QuantumRegister(1) creg1_Alice = ClassicalRegister(1) creg2_Alice = ClassicalRegister(1) ### quantum teleportation circuit here # Initialize mycircuit = QuantumCircuit(all_qubits_Alice, all_qubits_Bob, creg1_Alice, creg2_Alice) initialize_qubit(mycircuit, 0) mycircuit.barrier() # Entangle entangle_qubits(mycircuit, 1, 2) mycircuit.barrier() # Do a Bell measurement bell_meas_Alice_qubits(mycircuit, all_qubits_Alice[0], all_qubits_Alice[1], creg1_Alice, creg2_Alice) mycircuit.barrier() # Apply classically controlled quantum gates controlled_ops_Bob_qubit(mycircuit, all_qubits_Bob[0], creg1_Alice, creg2_Alice) ### Look at the complete circuit print(mycircuit.draw(output='text')) ### store the circuit as the submitted answer answer = mycircuit ``` ┌─────────────────────────────┐ ░ ░ ┌───┐┌─┐ ░ » q0_0: ┤ initialize(0.83666,0.54772) ├─░────────────░───■──┤ H ├┤M├─░────────» └─────────────────────────────┘ ░ ┌───┐ ░ ┌─┴─┐└┬─┬┘└╥┘ ░ » q0_1: ────────────────────────────────░─┤ H ├──■───░─┤ X ├─┤M├──╫──░────────» ░ └───┘┌─┴─┐ ░ └───┘ └╥┘ ║ ░ ┌───┐ » q1_0: ────────────────────────────────░──────┤ X ├─░────────╫───╫──░──┤ X ├─» ░ └───┘ ░ ║ ║ ░ └─┬─┘ » c0_0: ══════════════════════════════════════════════════════╬═══╩═══════╪═══» ║ ┌──┴──┐» c1_0: ══════════════════════════════════════════════════════╩════════╡ = 1 ╞» └─────┘» « «q0_0: ─────── « «q0_1: ─────── « ┌───┐ «q1_0: ─┤ Z ├─ « └─┬─┘ « ┌──┴──┐ «c0_0: ╡ = 1 ╞ « └─────┘ «c1_0: ═══════ « Then, grade your solution by running the cell below. Provide always the same name and email, as the one you wrote during the course sign up. ```python name = 'Rohit Prasad' email = '[email protected]' from grading_tools import grade grade(answer, name, email, 'lab1', 'ex1') ``` lab1/ex1 - 🎉 Correct # Additional reading - You can watch a video on building the quantum teleportation quantum circuit here: https://www.youtube.com/watch?v=mMwovHK2NrE&list=PLOFEBzvs-Vvp2xg9-POLJhQwtVktlYGbY&index=6&t=0s - For additional details about the quantum teleportation algorithm, including the principle of deferred measurement, you can refer to the Qiskit Textbook's section on the algorithm here: https://qiskit.org/textbook/ch-algorithms/teleportation.html - The `1 minute Qiskit` episode entitled `What is the qsphere?` succinctly describes the Qsphere visualization tool that we used in this lab. You can find it here: https://youtu.be/4SoK2h4a7us
module ShapeExercises import Shape area : Shape -> Double area s with (shapeView s) area (triangle x y) | STriangle = 0.5 * x * y area (rectangle x y) | SRectangle = x * y area (circle x) | SCircle = pi * x * x
/- Copyright (c) 2022 Eric Rodriguez. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Eric Rodriguez ! This file was ported from Lean 3 source module number_theory.cyclotomic.gal ! leanprover-community/mathlib commit 861a26926586cd46ff80264d121cdb6fa0e35cc1 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.NumberTheory.Cyclotomic.PrimitiveRoots import Mathbin.FieldTheory.PolynomialGaloisGroup /-! # Galois group of cyclotomic extensions In this file, we show the relationship between the Galois group of `K(ζₙ)` and `(zmod n)ˣ`; it is always a subgroup, and if the `n`th cyclotomic polynomial is irreducible, they are isomorphic. ## Main results * `is_primitive_root.aut_to_pow_injective`: `is_primitive_root.aut_to_pow` is injective in the case that it's considered over a cyclotomic field extension. * `is_cyclotomic_extension.aut_equiv_pow`: If the `n`th cyclotomic polynomial is irreducible in `K`, then `is_primitive_root.aut_to_pow` is a `mul_equiv` (for example, in `ℚ` and certain `𝔽ₚ`). * `gal_X_pow_equiv_units_zmod`, `gal_cyclotomic_equiv_units_zmod`: Repackage `is_cyclotomic_extension.aut_equiv_pow` in terms of `polynomial.gal`. * `is_cyclotomic_extension.aut.comm_group`: Cyclotomic extensions are abelian. ## References * https://kconrad.math.uconn.edu/blurbs/galoistheory/cyclotomic.pdf ## TODO * We currently can get away with the fact that the power of a primitive root is a primitive root, but the correct long-term solution for computing other explicit Galois groups is creating `power_basis.map_conjugate`; but figuring out the exact correct assumptions + proof for this is mathematically nontrivial. (Current thoughts: the correct condition is that the annihilating ideal of both elements is equal. This may not hold in an ID, and definitely holds in an ICD.) -/ variable {n : ℕ+} (K : Type _) [Field K] {L : Type _} {μ : L} open Polynomial IsCyclotomicExtension open Cyclotomic namespace IsPrimitiveRoot variable [CommRing L] [IsDomain L] (hμ : IsPrimitiveRoot μ n) [Algebra K L] [IsCyclotomicExtension {n} K L] /- ./././Mathport/Syntax/Translate/Tactic/Lean3.lean:132:4: warning: unsupported: rw with cfg: { occs := occurrences.pos[occurrences.pos] «expr[ ,]»([2]) } -/ /-- `is_primitive_root.aut_to_pow` is injective in the case that it's considered over a cyclotomic field extension. -/ theorem autToPow_injective : Function.Injective <| hμ.autToPow K := by intro f g hfg apply_fun Units.val at hfg simp only [IsPrimitiveRoot.coe_autToPow_apply, [anonymous]] at hfg generalize_proofs hf' hg' at hfg have hf := hf'.some_spec have hg := hg'.some_spec generalize_proofs hζ at hf hg suffices f hμ.to_roots_of_unity = g hμ.to_roots_of_unity by apply AlgEquiv.coe_algHom_injective apply (hμ.power_basis K).algHom_ext exact this rw [ZMod.eq_iff_modEq_nat] at hfg refine' (hf.trans _).trans hg.symm rw [← rootsOfUnity.coe_pow _ hf'.some, ← rootsOfUnity.coe_pow _ hg'.some] congr 1 rw [pow_eq_pow_iff_modEq] convert hfg rw [hμ.eq_order_of] rw [← hμ.coe_to_roots_of_unity_coe] rw [orderOf_units, orderOf_subgroup] #align is_primitive_root.aut_to_pow_injective IsPrimitiveRoot.autToPow_injective end IsPrimitiveRoot namespace IsCyclotomicExtension variable [CommRing L] [IsDomain L] (hμ : IsPrimitiveRoot μ n) [Algebra K L] [IsCyclotomicExtension {n} K L] /-- Cyclotomic extensions are abelian. -/ noncomputable def Aut.commGroup : CommGroup (L ≃ₐ[K] L) := ((zeta_spec n K L).autToPow_injective K).CommGroup _ (map_one _) (map_mul _) (map_inv _) (map_div _) (map_pow _) (map_zpow _) #align is_cyclotomic_extension.aut.comm_group IsCyclotomicExtension.Aut.commGroup variable (h : Irreducible (cyclotomic n K)) {K} (L) include h /- ./././Mathport/Syntax/Translate/Tactic/Lean3.lean:132:4: warning: unsupported: rw with cfg: { occs := occurrences.pos[occurrences.pos] «expr[ ,]»([1, 5]) } -/ /-- The `mul_equiv` that takes an automorphism `f` to the element `k : (zmod n)ˣ` such that `f μ = μ ^ k` for any root of unity `μ`. A strengthening of `is_primitive_root.aut_to_pow`. -/ @[simps] noncomputable def autEquivPow : (L ≃ₐ[K] L) ≃* (ZMod n)ˣ := let hζ := zeta_spec n K L let hμ t := hζ.pow_of_coprime _ (ZMod.val_coe_unit_coprime t) { (zeta_spec n K L).autToPow K with invFun := fun t => (hζ.PowerBasis K).equivOfMinpoly ((hμ t).PowerBasis K) (by haveI := IsCyclotomicExtension.ne_zero' n K L simp only [IsPrimitiveRoot.powerBasis_gen] have hr := IsPrimitiveRoot.minpoly_eq_cyclotomic_of_irreducible ((zeta_spec n K L).pow_of_coprime _ (ZMod.val_coe_unit_coprime t)) h exact ((zeta_spec n K L).minpoly_eq_cyclotomic_of_irreducible h).symm.trans hr) left_inv := fun f => by simp only [MonoidHom.toFun_eq_coe] apply AlgEquiv.coe_algHom_injective apply (hζ.power_basis K).algHom_ext simp only [AlgEquiv.coe_algHom, AlgEquiv.map_pow] rw [PowerBasis.equivOfMinpoly_gen] simp only [IsPrimitiveRoot.powerBasis_gen, IsPrimitiveRoot.autToPow_spec] right_inv := fun x => by simp only [MonoidHom.toFun_eq_coe] generalize_proofs _ h have key := hζ.aut_to_pow_spec K ((hζ.power_basis K).equivOfMinpoly ((hμ x).PowerBasis K) h) have := (hζ.power_basis K).equivOfMinpoly_gen ((hμ x).PowerBasis K) h rw [hζ.power_basis_gen K] at this rw [this, IsPrimitiveRoot.powerBasis_gen] at key rw [← hζ.coe_to_roots_of_unity_coe] at key simp only [← coe_coe, ← rootsOfUnity.coe_pow] at key replace key := rootsOfUnity.coe_injective key rw [pow_eq_pow_iff_modEq, ← orderOf_subgroup, ← orderOf_units, hζ.coe_to_roots_of_unity_coe, ← (zeta_spec n K L).eq_orderOf, ← ZMod.eq_iff_modEq_nat] at key simp only [ZMod.nat_cast_val, ZMod.cast_id', id.def] at key exact Units.ext key } #align is_cyclotomic_extension.aut_equiv_pow IsCyclotomicExtension.autEquivPow include hμ variable {L} /-- Maps `μ` to the `alg_equiv` that sends `is_cyclotomic_extension.zeta` to `μ`. -/ noncomputable def fromZetaAut : L ≃ₐ[K] L := let hζ := (zeta_spec n K L).eq_pow_of_pow_eq_one hμ.pow_eq_one n.Pos (autEquivPow L h).symm <| ZMod.unitOfCoprime hζ.some <| ((zeta_spec n K L).pow_iff_coprime n.Pos hζ.some).mp <| hζ.choose_spec.choose_spec.symm ▸ hμ #align is_cyclotomic_extension.from_zeta_aut IsCyclotomicExtension.fromZetaAut /- ./././Mathport/Syntax/Translate/Tactic/Lean3.lean:132:4: warning: unsupported: rw with cfg: { occs := occurrences.pos[occurrences.pos] «expr[ ,]»([4]) } -/ theorem fromZetaAut_spec : fromZetaAut hμ h (zeta n K L) = μ := by simp_rw [from_zeta_aut, aut_equiv_pow_symm_apply] generalize_proofs hζ h _ hμ _ rw [← hζ.power_basis_gen K] rw [PowerBasis.equivOfMinpoly_gen, hμ.power_basis_gen K] convert h.some_spec.some_spec exact ZMod.val_cast_of_lt h.some_spec.some #align is_cyclotomic_extension.from_zeta_aut_spec IsCyclotomicExtension.fromZetaAut_spec end IsCyclotomicExtension section Gal variable [Field L] (hμ : IsPrimitiveRoot μ n) [Algebra K L] [IsCyclotomicExtension {n} K L] (h : Irreducible (cyclotomic n K)) {K} /-- `is_cyclotomic_extension.aut_equiv_pow` repackaged in terms of `gal`. Asserts that the Galois group of `cyclotomic n K` is equivalent to `(zmod n)ˣ` if `cyclotomic n K` is irreducible in the base field. -/ noncomputable def galCyclotomicEquivUnitsZmod : (cyclotomic n K).Gal ≃* (ZMod n)ˣ := (AlgEquiv.autCongr (IsSplittingField.algEquiv _ _)).symm.trans (IsCyclotomicExtension.autEquivPow L h) #align gal_cyclotomic_equiv_units_zmod galCyclotomicEquivUnitsZmod /-- `is_cyclotomic_extension.aut_equiv_pow` repackaged in terms of `gal`. Asserts that the Galois group of `X ^ n - 1` is equivalent to `(zmod n)ˣ` if `cyclotomic n K` is irreducible in the base field. -/ noncomputable def galXPowEquivUnitsZmod : (X ^ (n : ℕ) - 1).Gal ≃* (ZMod n)ˣ := (AlgEquiv.autCongr (IsSplittingField.algEquiv _ _)).symm.trans (IsCyclotomicExtension.autEquivPow L h) #align gal_X_pow_equiv_units_zmod galXPowEquivUnitsZmod end Gal
Traveling through Virginia to visit a close friend and family, I parked my car in Richmond. I heard of a great southern tradition that sounded delicious. I had to give it a try before venturing off on the road again to my next destination. Sally Bells Kitchen, opened 90 years ago in 1924. The establishment was founded by Sarah Cabell Jones and a business partner. Sarah and partner met at the Women’s Exchange, a former nationwide movement, intended to teach women skills and financial independence. Sally Bells Kitchen is known for their delicious boxed lunches. Items such as potato salad, chicken salad, tuna salad, deviled eggs, cheese wafers and an array of cup cakes are made fresh from scratch daily. They sell thousands of sandwiches, cupcakes and hundreds of pounds of salads each year. This establishment is very welcoming, friendly and certainly has southern charm. The staff is just wonderful!
(* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *) Require Import String. Require Import List. Require Import Lia. Require Import Utils. Require Import DataRuntime. Require Import NRARuntime. Require Import CAMPRuntime. Section CAMPtoNRA. Local Open Scope string_scope. Local Open Scope list_scope. Context {fruntime:foreign_runtime}. (* Output encoding *) Definition lift_failure d := match d with | TerminalError => None | RecoverableError => Some (dcoll nil) | Success d' => Some (dcoll (d'::nil)) end. (** Translation from CAMP to NRA *) Fixpoint nra_of_camp (p:camp) : nra := match p with | pconst d' => nra_match (NRAConst d') | punop uop p₁ => NRAMap (NRAUnop uop NRAID) (nra_of_camp p₁) | pbinop bop p₁ p₂ => NRAMap (NRABinop bop (NRAUnop (OpDot "a1") NRAID) (NRAUnop (OpDot "a2") NRAID)) (NRAProduct (NRAMap (NRAUnop (OpRec "a1") NRAID) (nra_of_camp p₁)) (NRAMap (NRAUnop (OpRec "a2") NRAID) (nra_of_camp p₂))) | pmap p₁ => nra_match (NRAUnop OpFlatten (NRAMap (nra_of_camp p₁) (unnest_two "a1" "PDATA" (NRAUnop OpBag (nra_wrap_a1 (NRAUnop (OpDot "PDATA") NRAID)))))) | passert p₁ => NRAMap (NRAConst (drec nil)) (NRASelect NRAID (nra_of_camp p₁)) | porElse p₁ p₂ => NRADefault (nra_of_camp p₁) (nra_of_camp p₂) | pit => nra_match nra_data | pletIt p₁ p₂ => NRAUnop OpFlatten (NRAMap (nra_of_camp p₂) (unnest_two "a1" "PDATA" (NRAUnop OpBag (nra_wrap_a1 (nra_of_camp p₁))))) | pgetConstant s => nra_match (NRAGetConstant s) | penv => nra_match nra_bind | pletEnv p₁ p₂ => NRAUnop OpFlatten (NRAMap (nra_of_camp p₂) (unnest_two (* Needed because MergeConcat may fail so is a collection which must be unnested *) "PBIND1" "PBIND" (NRAMap (NRABinop OpRecConcat (NRAUnop (OpRec "PDATA") (NRAUnop (OpDot "PDATA") NRAID)) (NRAUnop (OpRec "PBIND1") (NRABinop OpRecMerge (NRAUnop (OpDot "PBIND") NRAID) (NRAUnop (OpDot "PBIND1") NRAID)))) (unnest_two "a1" "PBIND1" (NRAUnop OpBag (NRABinop OpRecConcat NRAID (NRAUnop (OpRec "a1") (nra_of_camp p₁)))))))) | pleft => NRAApp (NRAEither (nra_match NRAID) (nra_fail)) nra_data | pright => NRAApp (NRAEither (nra_fail) (nra_match NRAID)) nra_data end. (** top level version sets up the appropriate input (with an empty context) *) Definition nra_of_camp_top p := NRAUnop OpFlatten (NRAMap (nra_of_camp p) (NRAUnop OpBag (nra_context (NRAConst (drec nil)) NRAID))). (** Auxiliary lemmas -- all used inside pmap proof *) Lemma lift_map_lift (l:list data) : lift_map (fun x : data => Some (drec (("PDATA", x) :: nil))) l = Some (map (fun x => (drec (("PDATA", x) :: nil))) l). Proof. induction l; simpl. reflexivity. rewrite IHl. reflexivity. Qed. Lemma lift_map_lift2 bind (l l':list data): (lift_map (fun x : data => match x with | drec r1 => Some (drec (rec_concat_sort (("PBIND", drec bind) :: ("a1", dcoll l') :: nil) r1)) | _ => None end) (map (fun x : data => drec (("PDATA", x) :: nil)) l)) = Some (map (fun x :data => (drec (rec_concat_sort (("PBIND", drec bind) :: ("a1", dcoll l') :: nil) (("PDATA", x) :: nil)))) l). Proof. induction l; simpl. reflexivity. rewrite IHl. simpl. reflexivity. Qed. Lemma lift_map_lift3 bind (l l':list data): lift_map (fun x : data => match x with | drec r => Some (drec (rremove r "a1")) | _ => None end) (map (fun x : data => drec (rec_concat_sort (("PBIND", drec bind) :: ("a1", dcoll l') :: nil) (("PDATA", x) :: nil))) l ++ nil) = Some (map (fun x : data => drec (rec_concat_sort (("PBIND", drec bind) :: nil) (("PDATA", x) :: nil))) l ++ nil). Proof. induction l; simpl. reflexivity. simpl. rewrite IHl. simpl. reflexivity. Qed. Lemma oflatten_lift1 (l:option (list data)): (olift (fun d1 : data => lift_oncoll (fun l0 : list data => lift dcoll (oflatten l0)) d1) (lift dcoll (lift (fun t' : list data => dcoll nil :: t') l))) = (olift (fun d1 : data => lift_oncoll (fun l0 : list data => lift dcoll (oflatten l0)) d1) (lift dcoll l)). Proof. destruct l; try reflexivity. induction l; simpl; try reflexivity. simpl. unfold oflatten; simpl. assert (forall d, lift (fun t':list data => t') d = d). intros. destruct d; try reflexivity. rewrite H. reflexivity. Qed. (** Theorem 4.2: lemma of translation correctness for campterns *) Theorem camp_trans_correct {h:brand_relation_t} c p bind d: lift_failure (camp_eval h c p bind d) = nra_eval h c (nra_of_camp p) (nra_context_data (drec bind) d). Proof. revert d bind; camp_cases (induction p) Case; simpl; intros. - Case "pconst"%string. reflexivity. - Case "punop"%string. rewrite <- IHp; clear IHp; simpl. destruct (camp_eval h c p bind d); try reflexivity. simpl; destruct (unary_op_eval h u res); reflexivity. - Case "pbinop"%string. rewrite <- IHp1; rewrite <- IHp2; clear IHp1 IHp2. destruct (camp_eval h c p1 bind d); try reflexivity. destruct (camp_eval h c p2 bind d); try reflexivity. simpl; destruct (binary_op_eval h b res res0); reflexivity. - Case "pmap"%string. destruct d; try reflexivity. unfold omap_product in *; simpl. unfold oncoll_map_concat in *; simpl. rewrite lift_map_lift; simpl. unfold omap_concat in *; simpl. rewrite lift_map_lift2; simpl. rewrite lift_map_lift3; simpl. induction l; simpl; try reflexivity. unfold nra_context_data in IHp. assert ((rec_concat_sort (("PBIND", drec bind) :: nil) (("PDATA", a) :: nil)) = ("PBIND", drec bind) :: ("PDATA", a) :: nil). reflexivity. rewrite H; clear H. rewrite <- (IHp a bind). clear IHp. destruct (camp_eval h c p bind a); try reflexivity; simpl. rewrite IHl; simpl. rewrite oflatten_lift1. reflexivity. revert IHl. destruct ((lift_map (nra_eval h c (nra_of_camp p)) (map (fun x : data => drec (rec_concat_sort (("PBIND", drec bind) :: nil) (("PDATA", x) :: nil))) l ++ nil))). * simpl. intros. unfold oflatten in *. simpl. revert IHl. destruct ((lift_flat_map (fun x : data => match x with | dcoll y => Some y | _ => None end) l0)); simpl; intros; revert IHl; destruct (gather_successes (map (camp_eval h c p bind) l)); intros; simpl in *; try reflexivity; congruence. * simpl. destruct ((gather_successes (map (camp_eval h c p bind) l))); simpl; intros. reflexivity. congruence. congruence. - Case "passert"%string. rewrite <- IHp; clear IHp; simpl. destruct (camp_eval h c p bind d); try reflexivity. destruct res; try reflexivity; simpl. destruct b; reflexivity. - Case "porElse"%string. rewrite <- IHp1; clear IHp1; simpl. destruct (camp_eval h c p1 bind d); simpl; auto. - Case "pit"%string. reflexivity. - Case "pletIt"%string. rewrite <- IHp1; clear IHp1; simpl. destruct (camp_eval h c p1 bind d); try reflexivity. simpl. specialize (IHp2 res). unfold nra_context_data in IHp2. rewrite <- IHp2; clear IHp2. destruct (camp_eval h c p2 bind res); reflexivity. - Case "pgetConstant"%string. destruct (edot c s); simpl; trivial. - Case "penv"%string. eauto. - Case "pletEnv"%string. rewrite <- IHp1; clear IHp1; simpl. destruct (camp_eval h c p1 bind d); try reflexivity. destruct res; try reflexivity. simpl. destruct (merge_bindings bind l); try reflexivity. specialize (IHp2 d l0). unfold nra_context_data in *. simpl. rewrite <- IHp2; clear IHp2; simpl. destruct (camp_eval h c p2 l0 d); try reflexivity. - Case "pleft"%string. unfold lift_failure. destruct d; simpl; trivial. - Case "pright"%string. unfold lift_failure. destruct d; simpl; trivial. Qed. Lemma camp_trans_yields_coll {h:brand_relation_t} c p d d0: Forall (fun x => data_normalized h (snd x)) c -> nra_eval h c (nra_of_camp p) d = Some d0 -> {x | d0 = dcoll x}. Proof. Ltac findcol := repeat match goal with | [H:context [ olift _ ?x] |- _ ] => (case_eq x; [intros ?|idtac]; intros eq; rewrite eq in H; simpl in *; try discriminate) | [H:context [ olift2 _ ?x ?y] |- _ ] => (case_eq x; [intros ?|idtac]; intros eq; rewrite eq in H; simpl in *; try discriminate); (case_eq y; [intros ?|idtac]; intros eq2; rewrite eq2 in H; simpl in *; try discriminate) | [H:lift_oncoll _ ?d = Some _ |- _] => destruct d; simpl in *; try discriminate | [H:lift _ _ = Some _ |- _ ] => apply some_lift in H; destruct H; try subst | [H:Some _ = Some _ |- _ ] => inversion H; clear H; try subst end; eauto. revert d d0; induction p; simpl; intros; try findcol. destruct (IHp1 _ _ H eq). subst. destruct x; findcol. destruct d; try congruence. destruct (edot l "PDATA"); try congruence. findcol. destruct d1; try congruence; [ exists (d1::nil); congruence | exists (nil); congruence]. destruct d1; try congruence; [ exists nil; congruence | exists (d1::nil); congruence]. Qed. Lemma camp_trans_top_nra_context {h:brand_relation_t} c p d: Forall (fun x => data_normalized h (snd x)) c -> nra_eval h c (nra_of_camp_top p) d = nra_eval h c (nra_of_camp p) (nra_context_data (drec nil) d). Proof. simpl. unfold olift, nra_context_data; intros; trivial. case_eq (h ⊢ (nra_of_camp p) @ₐ (drec (("PBIND", drec nil) :: ("PDATA", d) :: nil)) ⊣ c); simpl; trivial; intros. unfold oflatten. simpl. apply camp_trans_yields_coll in H0; trivial. destruct H0; subst; simpl. rewrite app_nil_r. trivial. Qed. Lemma camp_trans_top_correct {h:brand_relation_t} c p d: Forall (fun x => data_normalized h (snd x)) c -> lift_failure (camp_eval h c p nil d) = nra_eval h c (nra_of_camp_top p) d. Proof. intros. rewrite camp_trans_top_nra_context by trivial. apply camp_trans_correct. Qed. (* Wrapping the error on the NRA side might be a little nicer, notably for later top-level proofs *) Definition lift_camp_failure (d : option data) := match d with | Some (dcoll nil) => RecoverableError | Some (dcoll (l::nil)) => Success l | _ => TerminalError end. Lemma camp_trans_correct_r {h:brand_relation_t} c p bind d: camp_eval h c p bind d = lift_camp_failure (nra_eval h c (nra_of_camp p) (nra_context_data (drec bind) d)). Proof. rewrite <- camp_trans_correct. destruct (camp_eval h c p bind d); intros; simpl; reflexivity. Qed. Lemma camp_trans_top_correct_r {h:brand_relation_t} c p d: Forall (fun x => data_normalized h (snd x)) c -> camp_eval h c p nil d = lift_camp_failure (nra_eval h c (nra_of_camp_top p) d). Proof. intros. rewrite <- camp_trans_top_correct by trivial. destruct (camp_eval h c p nil d); intros; simpl; eauto. Qed. Section size. (** Proof showing linear size translation *) Lemma camp_trans_size p : nra_size (nra_of_camp p) <= 41 * camp_size p. Proof. induction p; simpl; lia. Qed. End size. Section sugar. (* Mapping to NRA for the built-in operators *) (* and *) Definition nra_of_pand (p1 p2:camp) : nra := nra_of_camp (pand p1 p2). Definition nra_for_pand (q1 q2:nra) : nra := (♯flatten(χ⟨q2 ⟩( unnest_two "PBIND1" "PBIND" (χ⟨‵[| ("PDATA", (ID) · "PDATA")|] ⊕ ‵[| ("PBIND1", (ID) · "PBIND" ⊗ (ID) · "PBIND1")|] ⟩( unnest_two "a1" "PBIND1" (‵{|ID ⊕ ‵[| ("a1", χ⟨‵[||] ⟩( σ⟨ID ⟩( q1)))|]|}))))))%nra. Lemma nra_of_pand_works (p1 p2:camp) : nra_of_camp (pand p1 p2) = nra_for_pand (nra_of_camp p1) (nra_of_camp p2). Proof. simpl. reflexivity. Qed. (* WW *) Definition nra_of_WW (p:camp) := nra_of_camp (WW p). End sugar. Section Top. Context (h:brand_relation_t). Definition camp_to_nra_top (q:camp) : nra := NRAApp (nra_of_camp q) (nra_context (NRAConst (drec nil)) (NRAConst dunit)). Theorem camp_to_nra_top_correct : forall q:camp, forall global_env:bindings, camp_eval_top h q global_env = nra_eval_top h (camp_to_nra_top q) global_env. Proof. intros. unfold camp_eval_top. unfold nra_eval_top. unfold camp_to_nra_top. unfold presult_to_result. unfold camp_eval_top_to_presult. generalize (@camp_trans_correct h (rec_sort global_env) q nil dunit); intros. unfold lift_failure in H. destruct (camp_eval h (rec_sort global_env) q nil dunit); rewrite H; simpl; unfold nra_context_data in *; reflexivity. Qed. End Top. End CAMPtoNRA.
tryCatch( { if(runif(1) > 0.5) { message("This doesn't throw an error") } else { stop("This is an error") } }, error = function(e) message(paste("An error occured", e$message, sep = ": ")), finally = message("This is called whether or not an exception occured") )
open import Agda.Builtin.Nat open import Agda.Builtin.Equality data Three : Set -- (AAA) To fix things, move this line... data One : Set where one : Nat → One data Two : Set where two : One → Two lemma′ : ∀ (m n : Nat) → (one m) ≡ (one n) → m ≡ n lemma′ m .m refl = refl lemma : ∀ (m n : Nat) → (two (one m)) ≡ (two (one n)) → m ≡ n lemma m .m refl = refl {- Error was: I'm not sure if there should be a case for the constructor refl, because I get stuck when trying to solve the following unification problems (inferred index ≟ expected index): two (one m) ≟ two (one n) when checking that the pattern refl has type two (one m) ≡ two (one n) -} -- (BBB) ... to here. data Three where three : Three
module Idris.Codegen.ExtSTG.JSON -- where import Data.Strings import Idris.Codegen.ExtSTG.STG import Language.JSON.Data {- TODO: This could be autogenerated? -- In many cases yes using proofsearch %search -} export interface ToJSON a where toJSON : a -> JSON export ToJSON a => ToJSON (List a) where toJSON as = JArray (map toJSON as) export (ToJSON a, ToJSON b) => ToJSON (a, b) where toJSON (a, b) = JArray [toJSON a, toJSON b] export -- This is a special case in the Haskell AEson library used by Ext-STG JSON. ToJSON a => ToJSON (Maybe a) where toJSON Nothing = JNull toJSON (Just a) = toJSON a export ToJSON String where toJSON = JString ToJSON Bool where toJSON = JBoolean ToJSON Char where toJSON c = JString (singleton c) ToJSON Double where toJSON = JNumber ToJSON Nat where toJSON n = JNumber (cast n) ToJSON Int where toJSON i = JNumber (cast i) ToJSON Integer where toJSON i = JNumber (cast i) ToJSON FilePath where toJSON = JString . getFilePath ToJSON UnitId where toJSON u = JObject [ ("getUnitId", toJSON (GetUnitId u)) ] ToJSON ModuleName where toJSON m = JObject [ ("getModuleName", toJSON (GetModuleName m)) ] ToJSON ForeignStubs where toJSON NoStubs = JObject [ ("tag", toJSON "NoStubs") ] toJSON (MkForeignStubs h s) = JObject [ ("tag", toJSON "ForeignStubs") , ("fsCHeader", toJSON h) , ("fsCSource", toJSON s) ] ToJSON BufSpan where toJSON b = JObject [ ("bufSpanStart", toJSON (BufSpanStart b)) , ("bufSpanEnd", toJSON (BufSpanEnd b)) ] ToJSON RealSrcSpan where toJSON r = JObject [ ("srcSpanFile" , toJSON (SpanFile r)) , ("srcSpanSLine" , toJSON (SpanSLine r)) , ("srcSpanSCol" , toJSON (SpanSCol r)) , ("srcSpanELine" , toJSON (SpanELine r)) , ("srcSpanECol" , toJSON (SpanECol r)) ] -- Because of different names in STG rep in Ext-STG, we need different tags. ToJSON SrcSpan where toJSON (SsRealSrcSpan r m) = JObject [ ("tag", JString "RealSrcSpan") -- TODO: Rename constructor , ("contents", JArray [toJSON r, toJSON m]) ] toJSON (SsUnhelpfulSpan n) = JObject [ ("tag", JString "UnhelpfulSpan") , ("contents", toJSON n) ] ToJSON Unique where toJSON (MkUnique c i) = JArray [toJSON c, toJSON i] ToJSON (DataConId r) where toJSON (MkDataConId u) = toJSON u ToJSON DataConIdSg where toJSON (r ** d) = toJSON d ToJSON TyConId where toJSON (MkTyConId u) = toJSON u ToJSON ForeignSrcLang where toJSON LangC = JString "LangC" toJSON LangCxx = JString "LangCxx" toJSON LangObjc = JString "LangObjc" toJSON LangObjxcc = JString "LangObjxcc" toJSON LangAsm = JString "LangAsm" toJSON RawObject = JString "RawObject" ToJSON IdDetails where toJSON (VanillaId) = JObject [ ("tag", JString "VanillaId") ] toJSON (FExportedId) = JObject [ ("tag", JString "FExportedId") ] toJSON (RecSelId) = JObject [ ("tag", JString "RecSelId") ] toJSON (ClassOpId) = JObject [ ("tag", JString "ClassOpId") ] toJSON (PrimOpId) = JObject [ ("tag", JString "PrimOpId") ] toJSON (TickBoxOpId) = JObject [ ("tag", JString "TickBoxOpId") ] toJSON (DFunId) = JObject [ ("tag", JString "DFunId") ] toJSON (CoVarId) = JObject [ ("tag", JString "CoVarId") ] toJSON (JoinId i) = JObject [ ("tag", JString "JoinId") , ("contents", toJSON i) ] toJSON (DataConWorkId d) = JObject [ ("tag", JString "DataConWorkId") , ("contents", toJSON d) ] toJSON (DataConWrapId d) = JObject [ ("tag", JString "DataConWrapId") , ("contents", toJSON d) ] ToJSON Scope where toJSON LocalScope = JString "LocalScope" toJSON GlobalScope = JString "GlobalScope" toJSON HaskellExported = JString "HaskellExported" toJSON ForeignExported = JString "ForeignExported" ToJSON UpdateFlag where toJSON ReEntrant = JString "ReEntrant" toJSON Updatable = JString "Updatable" toJSON SingleEntry = JString "SingleEntry" ToJSON PrimElemRep where toJSON Int8ElemRep = JString "Int8ElemRep" toJSON Int16ElemRep = JString "Int16ElemRep" toJSON Int32ElemRep = JString "Int32ElemRep" toJSON Int64ElemRep = JString "Int64ElemRep" toJSON Word8ElemRep = JString "Word8ElemRep" toJSON Word16ElemRep = JString "Word16ElemRep" toJSON Word32ElemRep = JString "Word32ElemRep" toJSON Word64ElemRep = JString "Word64ElemRep" toJSON FloatElemRep = JString "FloatElemRep" toJSON DoubleElemRep = JString "DoubleElemRep" ToJSON PrimRep where toJSON VoidRep = JObject [ ("tag", JString "VoidRep") ] toJSON LiftedRep = JObject [ ("tag", JString "LiftedRep") ] toJSON UnliftedRep = JObject [ ("tag", JString "UnliftedRep") ] toJSON Int8Rep = JObject [ ("tag", JString "Int8Rep") ] toJSON Int16Rep = JObject [ ("tag", JString "Int16Rep") ] toJSON Int32Rep = JObject [ ("tag", JString "Int32Rep") ] toJSON Int64Rep = JObject [ ("tag", JString "Int64Rep") ] toJSON IntRep = JObject [ ("tag", JString "IntRep") ] toJSON Word8Rep = JObject [ ("tag", JString "Word8Rep") ] toJSON Word16Rep = JObject [ ("tag", JString "Word16Rep") ] toJSON Word32Rep = JObject [ ("tag", JString "Word32Rep") ] toJSON Word64Rep = JObject [ ("tag", JString "Word64Rep") ] toJSON WordRep = JObject [ ("tag", JString "WordRep") ] toJSON AddrRep = JObject [ ("tag", JString "AddrRep") ] toJSON FloatRep = JObject [ ("tag", JString "FloatRep") ] toJSON DoubleRep = JObject [ ("tag", JString "DoubleRep") ] toJSON (VecRep n p) = JObject [ ("tag", JString "VecRep") , ("contents", JArray [toJSON n, toJSON p]) ] ToJSON RepType where toJSON (SingleValue p) = JObject [ ("tag", JString "SingleValue") , ("contents", toJSON p) ] toJSON (UnboxedTuple ps) = JObject [ ("tag", JString "UnboxedTuple") , ("contents", toJSON ps) ] toJSON PolymorphicRep = JObject [ ("tag", JString "PolymorphicRep") ] ToJSON (BinderId r) where toJSON (MkBinderId u) = toJSON u ToJSON BinderIdSg where toJSON (r ** b) = toJSON b ToJSON (SBinder r) where toJSON b = JObject [ ("sbinderName" , toJSON (binderName b)) , ("sbinderId" , toJSON (binderId b)) , ("sbinderType" , toJSON (binderRep b)) , ("sbinderTypeSig" , toJSON "") -- Information field in ExtSTG , ("sbinderScope" , toJSON (binderScope b)) , ("sbinderDetails" , toJSON (binderDetails b)) , ("sbinderInfo" , toJSON (binderInfo b)) , ("sbinderDefLoc" , toJSON (binderDefLoc b)) ] ToJSON SBinderSg where toJSON (_ ** b) = toJSON b ToJSON DataConRep where toJSON (AlgDataCon p) = JObject [ ("tag", JString "AlgDataCon") , ("contents", toJSON p) ] toJSON (UnboxedTupleCon p) = JObject [ ("tag", JString "UnboxedTupleCon") , ("contents", toJSON p) ] mutual ToJSON STyCon where toJSON s = JObject [ ("stcName" , toJSON (Name s)) , ("stcId" , toJSON (Id s)) , ("stcDataCons", toJSON (DataCons s)) , ("stcDefLoc" , toJSON (DefLoc s)) ] ToJSON (SDataCon r) where toJSON s = JObject [ ("sdcName" , toJSON (name s)) , ("sdcId" , toJSON (ident s)) , ("sdcRep" , toJSON (rep s)) , ("sdcWorker" , toJSON (worker s)) , ("sdcDefLoc" , toJSON (defLoc s)) ] ToJSON SDataConSg where toJSON (r ** d) = toJSON d ToJSON LitNumType where toJSON LitNumInt = JString "LitNumInt" toJSON LitNumInt64 = JString "LitNumInt64" toJSON LitNumWord = JString "LitNumWord" toJSON LitNumWord64 = JString "LitNumWord64" ToJSON LabelSpec where toJSON (FunctionLabel i) = JObject [ ("tag", JString "FunctionLabel") , ("contents", toJSON i) ] toJSON DataLabel = JObject [ ("tag", JString "DataLabel") ] ToJSON Lit where toJSON (LitChar c) = JObject [ ("tag", JString "LitChar") , ("contents", toJSON c) ] toJSON (LitString s) = JObject [ ("tag", JString "LitString") , ("contents", toJSON s) ] toJSON LitNullAddr = JObject [ ("tag", JString "LitNullAddr") ] toJSON (LitFloat d) = JObject [ ("tag", JString "LitFloat") , ("contents", toJSON d) ] toJSON (LitDouble d) = JObject [ ("tag", JString "LitDouble") , ("contents", toJSON d) ] toJSON (LitLabel s l) = JObject [ ("tag", JString "LitLabel") , ("contents", JArray [toJSON s, toJSON l]) ] toJSON (LitNumber l i) = JObject [ ("tag", JString "LitNumber") , ("contents", JArray [toJSON l, toJSON i]) ] ToJSON (Arg r) where toJSON (StgVarArg i) = JObject [ ("tag" , JString "StgVarArg") , ("contents", toJSON i) ] toJSON (StgLitArg l) = JObject [ ("tag", JString "StgLitArg") , ("contents", toJSON l) ] toJSON StgVoid = JObject [ ("tag", JString "StgVoid") ] ToJSON ArgSg where toJSON (r ** x) = toJSON x ToJSON SourceText where toJSON (MkSourceText s) = JObject [ ("tag", JString "MkSourceText") , ("contents", toJSON s) ] toJSON (NoSourceText) = JObject [ ("tag", JString "NoSourceText") ] ToJSON CCallTarget where toJSON (StaticTarget t s u b) = JObject [ ("tag", JString "StaticTarget") , ("contents", JArray [toJSON t, toJSON s, toJSON u, toJSON b]) ] toJSON DynamicTarget = JObject [ ("tag", JString "DynamicTarget") ] ToJSON CCallConv where toJSON MkCCallConv = JString "MkCCallConv" toJSON CApiConv = JString "CApiConv" toJSON StdCallConv = JString "StdCallConv" toJSON PrimCallConv = JString "PrimCallConv" toJSON JavaScriptCallConv = JString "JavaScriptCallConv" ToJSON Safety where toJSON PlaySafe = JString "PlaySafe" toJSON PlayInterruptible = JString "PlayInterruptible" toJSON PlayRisky = JString "PlayRisky" ToJSON ForeignCall where toJSON f = JObject [ ("foreignCTarget", toJSON (CTarget f)) , ("foreignCConv" , toJSON (CConv f)) , ("foreignCSafety", toJSON (CSafety f)) ] ToJSON PrimCall where toJSON (MkPrimCall s u) = JArray [toJSON s, toJSON u] {n : String} -> ToJSON (PrimOp n args ret) where toJSON p = JObject [ ("tag", JString "StgPrimOp") , ("contents", toJSON $ PrimOp.name p) ] ToJSON StgOp where toJSON (StgPrimOp p) = JObject [ ("tag", JString "StgPrimOp") , ("contents", toJSON p) ] toJSON (StgPrimCallOp p) = JObject [ ("tag", JString "StgPrimCallOp") , ("contents", toJSON p) ] toJSON (StgFCallOp p) = JObject [ ("tag", JString "StgFCallOp") , ("contents", toJSON p) ] ToJSON AltType where toJSON (PolyAlt) = JObject [ ("tag", JString "PolyAlt") ] toJSON (MultiValAlt i) = JObject [ ("tag", JString "MultiValAlt") , ("contents", toJSON i) ] toJSON (PrimAlt p) = JObject [ ("tag", JString "PrimAlt") , ("contents", toJSON p) ] toJSON (AlgAlt t) = JObject [ ("tag", JString "AlgAlt") , ("contents", toJSON t) ] ToJSON (AltCon r) where toJSON (AltDataCon d) = JObject [ ("tag", JString "AltDataCon") , ("contents", toJSON d) ] toJSON (AltLit l) = JObject [ ("tag", JString "AltLit") , ("contents", toJSON l) ] toJSON AltDefault = JObject [ ("tag", JString "AltDefault") ] toBinderList : {ps : List PrimRep} -> BList ps -> List SBinderSg toBinderList [] = [] toBinderList (x :: xs) = mkSBinderSg x :: toBinderList xs toConArgList : {ps : List PrimRep} -> ArgList ps -> List ArgSg toConArgList [] = [] toConArgList (x :: xs) = mkArgSg x :: toConArgList xs mutual ToJSON (Expr r) where toJSON (StgApp f a r) = JObject [ ("tag", JString "StgApp") , ("contents", JArray [toJSON f, toJSON a, toJSON r, JArray [toJSON "",toJSON "",toJSON ""]]) -- ^^ Haskell implementation of the ExtSTG constains an extra field -- of type (String, String, String) for debugging, we don't have it here -- but when we generate the JSON it is needed on the Haskell side. ] toJSON (StgLit l) = JObject [ ("tag", JString "StgLit") , ("contents", toJSON l) ] toJSON (StgConApp {r=AlgDataCon []} d s) = JObject [ ("tag", JString "StgConApp") , ("contents", JArray [toJSON d, JArray [], JArray []]) -- No (List RepType) is given, as we don't support UnboxedTuples for now. ] toJSON (StgConApp {r=AlgDataCon [p]} d s) = JObject [ ("tag", JString "StgConApp") , ("contents", JArray [toJSON d, JArray [toJSON s], JArray []]) -- No (List RepType) is given, as we don't support UnboxedTuples for now. ] toJSON (StgConApp {r=AlgDataCon (p0 :: p1 :: ps)} d s) = JObject [ ("tag", JString "StgConApp") , ("contents", JArray [toJSON d, toJSON (toConArgList s), JArray []]) -- No (List RepType) is given, as we don't support UnboxedTuples for now. ] toJSON (StgConApp {r=UnboxedTupleCon n} d s) impossible toJSON (StgOpApp {args=[]} {ret} p s) = JObject [ ("tag", JString "StgOpApp") , ("contents", JArray [ toJSON p , JArray [] , toJSON (SingleValue ret) , toJSON (the (Maybe TyConId) Nothing) ]) ] toJSON (StgOpApp {args=[a1]} {ret} p s) = JObject [ ("tag", JString "StgOpApp") , ("contents", JArray [ toJSON p , JArray [toJSON s] , toJSON (SingleValue ret) , toJSON (the (Maybe TyConId) Nothing) ]) ] toJSON (StgOpApp {args=(a1::a2::as)} {ret} p s) = JObject [ ("tag", JString "StgOpApp") , ("contents", JArray [ toJSON p , toJSON (toConArgList s) , toJSON (SingleValue ret) , toJSON (the (Maybe TyConId) Nothing) ]) ] toJSON (StgCase d s i a) = JObject [ ("tag", JString "StgCase") , ("contents", JArray [toJSON s, toJSON i, toJSON d, toJSON a]) -- The order of the contents is important for the ExtSTG compatibility ] toJSON (StgLet b e) = JObject [ ("tag", JString "StgLet") , ("contents", JArray [toJSON b, toJSON e]) ] toJSON (StgLetNoEscape b e) = JObject [ ("tag", JString "StgLetNoEscape") , ("contents", JArray [toJSON b, toJSON e]) ] ToJSON (Alt r q) where toJSON (MkAlt AltDefault () body) = JObject [ ("altCon" , toJSON (the (AltCon r) AltDefault)) , ("altBinders", JArray []) , ("altRHS" , toJSON body) ] toJSON (MkAlt (AltLit l) () body) = JObject [ ("altCon" , toJSON (AltLit l)) , ("altBinders", JArray []) , ("altRHS" , toJSON body) ] toJSON (MkAlt (AltDataCon ((UnboxedTupleCon _) ** _)) unboxed body) impossible toJSON (MkAlt alt@(AltDataCon ((AlgDataCon []) ** _)) binders body) = JObject [ ("altCon" , toJSON alt) , ("altBinders", JArray []) , ("altRHS" , toJSON body) ] toJSON (MkAlt alt@(AltDataCon ((AlgDataCon [p]) ** _)) binder body) = JObject [ ("altCon" , toJSON alt) , ("altBinders", JArray [toJSON binder]) , ("altRHS" , toJSON body) ] toJSON (MkAlt alt@(AltDataCon ((AlgDataCon (p0 :: p1 :: ps)) ** dc)) binders body) = JObject [ ("altCon" , toJSON alt) , ("altBinders", toJSON $ toBinderList binders) , ("altRHS" , toJSON body) ] ToJSON Rhs where toJSON (StgRhsClosure u a b) = JObject [ ("tag", JString "StgRhsClosure") , ("contents", JArray [JArray [], toJSON u, toJSON a, toJSON b]) ] toJSON (StgRhsCon d a) = JObject [ ("tag", JString "StgRhsCon") , ("contents", JArray [toJSON d, toJSON a]) ] ToJSON Binding where toJSON (StgNonRec i r) = JObject [ ("tag", JString "StgNonRec") , ("contents", JArray [toJSON i, toJSON r]) ] toJSON (StgRec bs) = JObject [ ("tag", JString "StgRec") , ("contents", toJSON bs) ] export ToJSON TopBinding where toJSON (StgTopLifted b) = JObject [ ("tag" , JString "StgTopLifted") , ("contents", toJSON b) ] toJSON (StgTopStringLit i l) = JObject [ ("tag", JString "StgTopStringLit") , ("contents", JArray [toJSON i, toJSON l]) ] export ToJSON Module where toJSON m = JObject [ ("modulePhase" , toJSON (Phase m)) , ("moduleUnitId" , toJSON (ModuleUnitId m)) , ("moduleName" , toJSON (Name m)) , ("moduleSourceFilePath" , toJSON (SourceFilePath m)) , ("moduleForeignStubs" , toJSON (ForeignStubs m)) , ("moduleHasForeignExported" , toJSON (HasForeignExported m)) , ("moduleDependency" , toJSON (Dependency m)) , ("moduleExternalTopIds" , toJSON (ExternalTopIds m)) , ("moduleTyCons" , toJSON (TyCons m)) , ("moduleTopBindings" , toJSON (TopBindings m)) , ("moduleForeignFiles" , toJSON (ForeignFiles m)) ]
#include <stdlib.h> #include <stdio.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_multiroots.h> struct rparams { double a; double b; }; int rosenbrock_f (const gsl_vector * x, void *params, gsl_vector * f) { double a = ((struct rparams *) params)->a; double b = ((struct rparams *) params)->b; double x0 = gsl_vector_get (x, 0); double x1 = gsl_vector_get (x, 1); double y0 = a * (1 - x0); double y1 = b * (x1 - x0 * x0); gsl_vector_set (f, 0, y0); gsl_vector_set (f, 1, y1); return GSL_SUCCESS; } #ifdef DERIV int rosenbrock_df (const gsl_vector * x, void *params, gsl_matrix * df) { double a = ((struct rparams *) params)->a; double b = ((struct rparams *) params)->b; double x0 = gsl_vector_get (x, 0); double df00 = -a; double df01 = 0; double df10 = -2 * b * x0; double df11 = b; gsl_matrix_set (df, 0, 0, df00); gsl_matrix_set (df, 0, 1, df01); gsl_matrix_set (df, 1, 0, df10); gsl_matrix_set (df, 1, 1, df11); return GSL_SUCCESS; } int rosenbrock_fdf (const gsl_vector * x, void *params, gsl_vector * f, gsl_matrix * df) { rosenbrock_f (x, params, f); rosenbrock_df (x, params, df); return GSL_SUCCESS; } #endif #ifdef DERIV #define SOLVER gsl_multiroot_fdfsolver #define SOLVER_TYPE gsl_multiroot_fdfsolver_type #else #define SOLVER gsl_multiroot_fsolver #define SOLVER_TYPE gsl_multiroot_fsolver_type #endif int main (void) { const SOLVER_TYPE *T; SOLVER *s; int status; size_t i, iter = 0; const size_t n = 2; struct rparams p = {1.0, 10.0}; #ifdef DERIV gsl_multiroot_function_fdf f = {&rosenbrock_f, &rosenbrock_df, &rosenbrock_fdf, n, &p}; #else gsl_multiroot_function f = {&rosenbrock_f, n, &p}; #endif double x_init[2] = {-10.0, -5.0}; gsl_vector *x = gsl_vector_alloc (n); gsl_vector_set (x, 0, x_init[0]); gsl_vector_set (x, 1, x_init[1]); #ifdef DERIV T = gsl_multiroot_fdfsolver_gnewton; s = gsl_multiroot_fdfsolver_alloc (T, &f, x); #else T = gsl_multiroot_fsolver_hybrids; s = gsl_multiroot_fsolver_alloc (T, &f, x); #endif print_state (iter, s); do { iter++; #ifdef DERIV status = gsl_multiroot_fdfsolver_iterate (s); #else status = gsl_multiroot_fsolver_iterate (s); #endif print_state (iter, s); if (status) break; status = gsl_multiroot_test_residual (s->f, 0.0000001); } while (status == GSL_CONTINUE && iter < 1000); printf ("status = %s\n", gsl_strerror (status)); #ifdef DERIV gsl_multiroot_fdfsolver_free (s); #else gsl_multiroot_fsolver_free (s); #endif gsl_vector_free (x); } int print_state (size_t iter, SOLVER * s) { printf ("iter = %3u x = % 15.8f % 15.8f f(x) = % .3e % .3e\n", iter, gsl_vector_get (s->x, 0), gsl_vector_get (s->x, 1), gsl_vector_get (s->f, 0), gsl_vector_get (s->f, 1)); }
theory tangle_relation imports Datatype Main begin lemma symmetry1: assumes "symp R" shows "\<forall>x y. (x, y) \<in> {(x, y). R x y}\<^sup>* \<longrightarrow> (y, x) \<in> {(x, y). R x y}\<^sup>*" proof- have "R x y \<longrightarrow> R y x" by (metis assms sympD) then have " (x, y) \<in> {(x, y). R x y} \<longrightarrow> (y, x) \<in> {(x, y). R x y}" by auto then have 2:"\<forall> x y. (x, y) \<in> {(x, y). R x y} \<longrightarrow> (y, x) \<in> {(x, y). R x y}" by (metis (full_types) assms mem_Collect_eq split_conv sympE) then have "sym {(x, y). R x y}" unfolding sym_def by auto then have 3: "sym (rtrancl {(x, y). R x y})" using sym_rtrancl by auto then show ?thesis by (metis symE) qed lemma symmetry2: assumes "\<forall>x y. (x, y) \<in> {(x, y). R x y}\<^sup>* \<longrightarrow> (y, x) \<in> {(x, y). R x y}\<^sup>* " shows "symp R^**" unfolding symp_def Enum.rtranclp_rtrancl_eq assms by (metis assms) lemma symmetry3: assumes "symp R" shows "symp R^**" using assms symmetry1 symmetry2 by metis lemma symm_trans: assumes "symp R" shows "symp R^++" by (metis assms rtranclpD symmetry3 symp_def tranclp_into_rtranclp) end
State Before: ⊢ StrictConcaveOn ℝ (Icc 0 π) sin State After: x : ℝ hx : x ∈ interior (Icc 0 π) ⊢ (deriv^[2]) sin x < 0 Tactic: apply strictConcaveOn_of_deriv2_neg (convex_Icc _ _) continuousOn_sin fun x hx => ?_ State Before: x : ℝ hx : x ∈ interior (Icc 0 π) ⊢ (deriv^[2]) sin x < 0 State After: x : ℝ hx : x ∈ Ioo 0 π ⊢ (deriv^[2]) sin x < 0 Tactic: rw [interior_Icc] at hx State Before: x : ℝ hx : x ∈ Ioo 0 π ⊢ (deriv^[2]) sin x < 0 State After: no goals Tactic: simp [sin_pos_of_mem_Ioo hx]
{-# LANGUAGE RecordWildCards #-} module Main where import Control.DeepSeq (force) import Control.Exception (evaluate) import Control.Monad (replicateM) import Data.List (minimumBy) import Data.Ord (comparing) import qualified Data.Text as T import qualified Data.Vector.Unboxed as V import GHC.Base (String) import Options.Applicative (Parser (..), auto, execParser, fullDesc, help, helper, info, long, metavar, option, progDesc, showDefault, strOption, value, (<**>)) import qualified Statistics.Distribution.StudentT as S import qualified Statistics.Function as S import qualified Statistics.Sample as S import qualified Statistics.Test.StudentT as S import qualified Statistics.Test.Types as ST import System.IO (hGetContents) import System.Process (CreateProcess (..), StdStream (..), shell, waitForProcess, withCreateProcess) import Text.Printf (printf) type Samples = V.Vector Double data Args = Args { old :: T.Text , new :: T.Text , forks :: Int } data Diff = Diff { meanPct :: Double , oldMean :: Double , oldMin :: Double , oldMax :: Double , newMin :: Double , newMax :: Double , newMean :: Double , test :: Maybe (ST.Test S.StudentT) } deriving (Show) args :: Parser Args args = Args <$> strOption ( long "old" <> metavar "OLD" <> help "Command that returns samples for \"old\"" ) <*> strOption ( long "new" <> metavar "NEW" <> help "Command that returns samples for \"new\"" ) <*> option auto ( long "forks" <> help "How often the commands will be invoked to gather samples" <> showDefault <> value 1 <> metavar "FORKS" ) execProc :: String -> IO Samples execProc cmd = withCreateProcess createProc $ \_ (Just out) _ ph -> do samples <- textToSamples <$> hGetContents out evaluate $ force samples ex <- waitForProcess ph pure samples where textToSamples = V.fromList . map read . lines createProc = (shell cmd) { std_out = CreatePipe } calcDiff :: Samples -> Samples -> Diff calcDiff oldSamples newSamples = Diff{..} where (oldMin, oldMax) = S.minMax oldSamples (newMin, newMax) = S.minMax newSamples oldMean = S.mean oldSamples newMean = S.mean newSamples oldStdDev = S.stdDev oldSamples newStdDev = S.stdDev newSamples meanPct = percDiff oldMean newMean test = S.welchTTest S.BGreater oldSamples newSamples percDiff :: Double -> Double -> Double percDiff x y = (abs (x - y) / ((x + y) / 2)) * 100 cmpPerf :: Args -> IO () cmpPerf (Args old new forks) = do listOfOldSamples <- replicateM forks $ do putStrLn ("Gathering metrics for: " <> T.unpack old) execProc $ T.unpack old listOfNewSamples <- replicateM forks $ do putStrLn ("Gathering metrics for: " <> T.unpack new) execProc $ T.unpack new let bestOldSamples = minimumBy (comparing S.mean) listOfOldSamples bestNewSamples = minimumBy (comparing S.mean) listOfNewSamples diff = calcDiff bestOldSamples bestNewSamples printf "%.2f%% mean difference\n" (meanPct diff) putStrLn " Cmd | Mean | Min | Max |" printf " Old | %10.3f | %10.3f | %10.3f |\n" (oldMean diff) (oldMin diff) (oldMax diff) printf " New | %10.3f | %10.3f | %10.3f |\n" (newMean diff) (newMin diff) (newMax diff) main :: IO () main = cmpPerf =<< execParser opts where opts = info (args <**> helper) ( fullDesc <> progDesc "Compare the (runtime) values returned by two different programs" )
% @Author: ArthurBernard % @Email: [email protected] % @Date: 2019-07-31 22:09:26 % @Last modified by: ArthurBernard % @Last modified time: 2020-01-17 11:38:29 \documentclass[a4paper]{arthur-cv} \title{Curiculum Vitae} \author{Arthur Bernard} \usepackage[english]{babel} \usepackage{fontspec} \usepackage{luatexbase} \usepackage{microtype} \usepackage{hyperref} \usepackage{xcolor} % Customize colorthem % \definecolor{leftcolorband}{HTML}{e0e0e0} % \definecolor{boxcolor}{HTML}{851919} % \definecolor{maincolor}{HTML}{420c0c} % \definecolor{secondcolor}{HTML}{861919} % \definecolor{thirdcolor}{HTML}{591111} % Define color for hyperlink \definecolor{colhyperlink}{HTML}{0E5484} % Set profile info \profilepic{pictures/luisg.jpg} \cvname{Luis Giraldo González} \cvlinkedin{/in/luisg-gonz-14b008172/} \cvgithub{LuisGGon} \cvmail{[email protected]} \cvnumberphone{+34 622 537 395} \cvjobtitle{PhD Mathematical Engineering} \cvsite{} \cvaddress{Madrid, Spain} \cvyearsold{September 05, 1986} \begin{document} \makeprofile % Set header \begin{textblock}{20.5}(0.25, 3.5) \begin{minipage}[t]{0.37\textwidth} %%%%%%%%%%%%%%%%% %% Left side %% %%%%%%%%%%%%%%%%% \sectionleft{About me} I am a friendly and communicative person, and also a professional mathematician with 10+ years of experience in scientific research. Now I am looking forward to move my career to private sector, where I can applied mathematical and computational tools, together with a rigorous analytical reasoning to solve practical problems. \sectionleft{Key skills} \subsectionleft{Proficiency in \textbf{statistics} \& \textbf{statistical models}} \subsectionleft{OS:}{\textbf{Linux} and \textbf{Windows}.} \subsectionleft{Languages:}{Spanish (\textbf{native}), English (\textbf{professional level}).} \subsectionleft{Database:}{Basic knowledge of \textbf{SQL}.} \sectionleft{Programming} \subsectionleft{Proficient:}{\textbf{Python} (expertise in NumPy, Pandas, Sympy, BeatifulSoup, Keras).} \subsectionleft{Advanced:}{\LaTeX.} \subsectionleft{In progress:}{\textbf{R}, \textsc{Matlab}/\textsc{Octave}} \sectionleft{MOOCs} \subsectionleft{Scientific Computing with \textbf{Python},}{on FreeCodeCamp.} \subsectionleft{\textbf{Machine Learning}, by Andrew Ng,}{on Coursera \href{https://coursera.org/share/24d5a81188809eebe5637a1c428303d0}{(link)}.} \subsectionleft{\textbf{Statistical Learning}, by T. Hastie \& R. Tibshirani,}{on edX \href{https://courses.edx.org/certificates/e9eb8d0c5e08499eaa6007a3ae9a82b8}{(link).}} %\subsectionleft{And other diverse courses about \textbf{Python}, \textbf{Deep Learning}, etc.}{} \sectionleft{Interests} \subsectionleft{AI and Data Science.}{} \subsectionleft{Machine Learning.}{} \end{minipage}\hfill\begin{minipage}[t]{0.61\textwidth} %%%%%%%%%%%%%%%%%% %% Right side %% %%%%%%%%%%%%%%%%%% \section{Job Experience} \begin{rightenv} \subsectionright{Nov. 2021 – Feb. 2022}{Research Visit}[at \textbf{KU Leuven}][Belgium]{Applications of Complex Analysis to Analytic Number Theory.} \subsectionright{Sep. 2019 – Present}{PhD Fellow}[at \textbf{University Carlos III of Madrid}]{Teaching of Calculus at Bachelor level. \textbf{Research:} Applied Mathematical Analysis.} \subsectionright{Sep. 2017 – Aug. 2019}{MSC Fellow}[at \textbf{University Carlos III of Madrid}]{Teaching of Calculus at Bachelor level. \textbf{Research:} Applied Mathematical Analysis.} \subsectionright{Sep. 2009 – Jul. 2017}{Assistant Professor}[at \textbf{University of Havana}]{Teaching of Real and Complex Analysis to math majors. \textbf{Research}: logic and foundations of Mathematics.} %\subsectionright{Sept. 2013 – May 2018}{Director}[at \textbf{Mutuelle des Etudiants de Provence}][Marseille]{Approval of budgets, financial investments, internal policy, etc.} \end{rightenv} % \section{Personal projects} % \begin{rightenv} % \subsectionright{2018 – 2019}{Machine/deep learning tools adapted to finance}{Development of a Python and Cython package to create \textbf{neural networks}, \textbf{backtest strategies}, analysis with \textbf{econmetric models} and \textbf{financial indicators}, etc. Published on PyPI as \href{https://github.com/ArthurBernard/Fynance}{\textcolor{colhyperlink}{\textit{fynance}}}.} % \subsectionright{2017 – 2018}{Webscraping package}{Development of a python package to \textbf{download data} and \textbf{update database} from some crypto-currency exchanges. Published on PyPI as \href{https://github.com/ArthurBernard/Download_Crypto_Currencies_Data}{\textcolor{colhyperlink}{\textit{dccd}}}.} % \subsectionright{2016 – 2019}{Trading bot algorithms on crypto-currencies}{Development and maintenance of trading bots with Python and Bash scripts. Starting in 2016 with \textbf{arbitrage strategy}, and more recently create \textbf{strategies with neural network}. Partly available on my GitHub in the repository \href{https://github.com/ArthurBernard/Strategy_Manager}{\textcolor{colhyperlink}{\textit{Strategy\_Manager}}}.} % \end{rightenv} \section{Education} \begin{rightenv} \subsectionright{2019 – 2022}[PhD]{Mathematical Engineering}[at \textbf{University Carlos III of Madrid}]{\href{https://www.uc3m.es/phdprogram/mathematical-engineering}{(link)}} \subsectionright{2017 – 2019}[MSc]{Mathematical Engineering}[at \textbf{University Carlos III of Madrid}]{\textbf{Courses:} Statistical Inference, Statistical Models, Graph Theory, etc.} \subsectionright{2012-2014}[MSc]{Mathematics}[at \textbf{University of Havana}]{\textbf{Courses:} Stochastic Processes, Advanced Linear Algebra, etc.} \subsectionright{2005-2009}[BSc]{Mathematics}[at \textbf{University of Havana}]{\textbf{Courses:} Programming, Optimization, Probabilities, etc.} \end{rightenv} \section{Miscellaneous} \begin{rightenv} % \subsectionright{2019}{Data-science competition}[at \textbf{ENS Challenge Data}]{\href{http://datachallenge.cfm.fr/t/end-of-year-ranking-2019-official-top-10/243}{\textcolor{colhyperlink}{$6^{th}$}} out of more than $100$ competitors, about prediction of daily stock movements on the US market, proposed by \textbf{Capital Fund Management}.} % \subsectionright{2013 – 2018}{Director}[at \textbf{Mutuelle des Etudiants de Provence}][Marseille]{Approval of budgets, financial investments, internal policy, etc.} \subsectionright{2009-Present:}{Publications}{Links to \href{https://scholar.google.com/citations?hl=es&user=rvGe3TYAAAAJ}{Google Scholar} and \href{https://orcid.org/0000-0001-8746-3822}{ORCID} profiles.} \subsectionright{Present}{Hobbies}{Melomaniac, auteur cinema, literature (sci-fi, Hispanic, North-American), politics, mathematics.} \end{rightenv} \end{minipage} \end{textblock} \end{document}
install.packages(c( "usethis", "testthat", "startup" ), repos="http://cran.us.r-project.org")
```python # Erasmus+ ICCT project (2018-1-SI01-KA203-047081) # Toggle cell visibility from IPython.display import HTML tag = HTML(''' Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>.''') display(tag) # Hide the code completely # from IPython.display import HTML # tag = HTML('''<style> # div.input { # display:none; # } # </style>''') # display(tag) ``` Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>. ```python %matplotlib notebook import numpy as np import control as control import matplotlib.pyplot as plt import ipywidgets as widgets import sympy as sym # from IPython.display import Markdown # For displaying Markdown and LaTeX code sym.init_printing() continuous_update=False ``` ## Pogreška stacionarnog stanja S obzirom na ulaznu prijenosnu funkciju $I(s)$ i prijenosnu funkciju sustava otvorene petlje $G(s)$, pogreška u stacionarnom stanju $e(\infty)$ sustava zatvorene petlje može se, u slučaju jedinstvene povratne veze (engl. unity feedback), odrediti kao: \begin{equation} e(\infty)=\lim_{s\to0}\frac{sI(s)}{1+G(s)}. \end{equation} u slučaju jedinične step funkcije kao ulaza $I(s)=\frac{1}{s}$ dobiva se: \begin{equation} e_{step}(\infty)=\frac{1}{1+\lim_{s\to0}G(s)}, \end{equation} u slučaju rampa funkcije $I(s)=\frac{1}{s^2}$: \begin{equation} e_{ramp}(\infty)=\frac{1}{\lim_{s\to0}sG(s)}, \end{equation} a u slučaju parabolične funkcije $I(s)=\frac{1}{s^3}$: \begin{equation} e_{parabolic}(\infty)=\frac{1}{\lim_{s\to0}s^2G(s)} \end{equation} ### Sustavi bez integracije Primjer prijenosne funckije $G(s)$ sustava bez integracije može se definirati kao: \begin{equation} G(s) = \frac{K}{as^2 + bs + c} \end{equation} Pogreška stacionarnog stanja u slučaju sustava bez integracije u unaprijednoj stazi je beskonačna za ulaznu funkciju rampe i ulaznu paraboličnu funkciju. ### Sustavi s jednom integracijom Primjer prijenosne funckije $G(s)$ sustava s jednom integracijom može se definirati kao: \begin{equation} G(s) = \frac{K(as^2 + bs + c)}{s(ds^2 + es + fc)} \end{equation} Pogreška stacionarnog stanja u slučaju sustava s jednom integracijom u unaprijednoj stazi je beskonačna za ulaznu paraboličnu funkciju. --- ### Kako koristiti ovaj interaktivni primjer? - Izaberite između sustava bez integracije i sustava s jednom integracijom. - Pomičite klizače za promjenu vrijednosti $a$, $b$, $c$ (koeficijenata prijenosne funkcije) i $K$ (pojačanja). ```python style = {'description_width': 'initial'} layout1 = widgets.Layout(width='auto', height='auto') #set width and height systemSelect = widgets.ToggleButtons( options=[('bez integracije', 0), ('jedna integracija', 1)], description='Odaberi sustav: ',style=style) functionSelect = widgets.ToggleButtons( options=[('jedinična step funkcija', 0), ('rampa funkcija', 1), ('parabolična funkcija', 2)], description='Odaberi ulaznu funkciju: ',style=style) fig=plt.figure(num='Pogreška stacionarnog stanja') fig.set_size_inches((9.8,3)) fig.set_tight_layout(True) f1 = fig.add_subplot(1, 1, 1) f1.grid(which='both', axis='both', color='lightgray') f1.set_ylabel('ulaz, izlaz') f1.set_xlabel('$t$ [s]') inputf, = f1.plot([],[]) responsef, = f1.plot([],[]) errorf, = f1.plot([],[]) ann1=f1.annotate("", xy=([0], [0]), xytext=([0], [0])) ann2=f1.annotate("", xy=([0], [0]), xytext=([0], [0])) display(systemSelect) display(functionSelect) def create_draw_functions(K,a,b,c,index_system,index_input): num_of_samples = 1000 total_time = 150 t = np.linspace(0, total_time, num_of_samples) # time for which response is calculated (start, stop, step) if index_system == 0: Wsys = control.tf([K], [a, b, c]) ess, G_s, s, n = sym.symbols('e_{step}(\infty), G(s), s, n') sys1 = control.feedback(Wsys) elif index_system == 1: Wsys = control.tf([K,K,K*a], [1, b, c, 0]) ess, G_s, s, n = sym.symbols('e_{step}(\infty), G(s), s, n') sys1 = control.feedback(Wsys) global inputf, responsef, ann1, ann2 if index_input==0: infunction = np.ones(len(t)) infunction[0]=0 tout, yout = control.step_response(sys1,t) s=sym.Symbol('s') if index_system == 0: limit_val = sym.limit((K/(a*s**2+b*s+c)),s,0) elif index_system == 1: limit_val = sym.limit((K*s*s+K*s+K*a)/(s*s*s+b*s*s+c*s),s,0) e_inf=1/(1+limit_val) elif index_input==1: infunction=t; tout, yout, xx = control.forced_response(sys1, t, infunction) if index_system == 0: limit_val = sym.limit(s*(K/(a*s**2+b*s+c)),s,0) elif index_system == 1: limit_val = sym.limit(s*((K*s*s+K*s+K*a)/(s*s*s+b*s*s+c*s)),s,0) e_inf=1/limit_val elif index_input==2: infunction=t*t tout, yout, xx = control.forced_response(sys1, t, infunction) if index_system == 0: limit_val = sym.limit(s*s*(K/(a*s**2+b*s+c)),s,0) elif index_system == 1: limit_val = sym.limit(s*s*((K*s*s+K*s+K*a)/(s*s*s+b*s*s+c*s)),s,0) e_inf=1/limit_val ann1.remove() ann2.remove() if type(e_inf) == sym.numbers.ComplexInfinity: print('Pogreška stacionarnog stanja je beskonačna.') elif e_inf==0: print('Pogreška stacionarnog stanja je nula.') else: print('Pogreška stacionarnog stanja je jednaka %f.'% (e_inf,)) # if type(e_inf) == sym.numbers.ComplexInfinity: # display(Markdown('Steady-state error is infinite.')) # elif e_inf==0: # display(Markdown('Steady-state error is zero.')) # else: # display(Markdown('Steady-state error is equal to %f.'%(e_inf,))) if type(e_inf) != sym.numbers.ComplexInfinity and e_inf>0: ann1=plt.annotate("", xy=(tout[-60],infunction[-60]), xytext=(tout[-60],yout[-60]), arrowprops=dict(arrowstyle="|-|", connectionstyle="arc3")) ann2=plt.annotate("$e(\infty)$", xy=(145, 1.), xytext=(145, (yout[-60]+(infunction[-60]-yout[-60])/2))) elif type(e_inf) == sym.numbers.ComplexInfinity: ann1=plt.annotate("", xy=(0,0), xytext=(0,0), arrowprops=dict(arrowstyle="|-|", connectionstyle="arc3")) ann2=plt.annotate("", xy=(134, 1.), xytext=(134, (1 - infunction[-10])/2 + infunction[-10])) elif type(e_inf) != sym.numbers.ComplexInfinity and e_inf==0: ann1=plt.annotate("", xy=(0,0), xytext=(0,0), arrowprops=dict(arrowstyle="|-|", connectionstyle="arc3")) ann2=plt.annotate("", xy=(134, 1.), xytext=(134, (1 - yout[-10])/2 + yout[-10])) f1.lines.remove(inputf) f1.lines.remove(responsef) inputf, = f1.plot(t,infunction,label='ulaz',color='C0') responsef, = f1.plot(tout,yout,label='izlaz',color='C1') f1.relim() f1.autoscale_view() f1.legend() K_slider=widgets.IntSlider(min=1,max=8,step=1,value=1,description='$K$',continuous_update=False) a_slider=widgets.IntSlider(min=0,max=8,step=1,value=1,description='$a$',continuous_update=False) b_slider=widgets.IntSlider(min=0,max=8,step=1,value=1,description='$b$',continuous_update=False) c_slider=widgets.IntSlider(min=1,max=8,step=1,value=1,description='$c$',continuous_update=False) input_data=widgets.interactive_output(create_draw_functions, {'K':K_slider,'a':a_slider,'b':b_slider,'c':c_slider, 'index_system':systemSelect,'index_input':functionSelect}) def update_sliders(index): global K_slider, a_slider, b_slider, c_slider Kval=[1, 1, 1] aval=[1, 1, 1] bval=[2, 2, 2] cval=[6, 6, 6] K_slider.value=Kval[index] a_slider.value=aval[index] b_slider.value=bval[index] c_slider.value=cval[index] input_data2=widgets.interactive_output(update_sliders, {'index':functionSelect}) display(K_slider,a_slider,b_slider,c_slider,input_data) ``` <IPython.core.display.Javascript object> ToggleButtons(description='Odaberi sustav: ', options=(('bez integracije', 0), ('jedna integracija', 1)), styl… ToggleButtons(description='Odaberi ulaznu funkciju: ', options=(('jedinična step funkcija', 0), ('rampa funkci… IntSlider(value=1, continuous_update=False, description='$K$', max=8, min=1) IntSlider(value=1, continuous_update=False, description='$a$', max=8) IntSlider(value=2, continuous_update=False, description='$b$', max=8) IntSlider(value=6, continuous_update=False, description='$c$', max=8, min=1) Output() ```python ```
{-# OPTIONS --safe #-} module Cubical.Algebra.CommAlgebra.Instances.Unit where open import Cubical.Foundations.Prelude open import Cubical.Data.Unit open import Cubical.Algebra.Ring open import Cubical.Algebra.CommRing open import Cubical.Algebra.CommRing.Instances.Unit open import Cubical.Algebra.CommAlgebra private variable ℓ ℓ' : Level open CommAlgebraStr module _ (R : CommRing ℓ) where UnitCommAlgebra : CommAlgebra R ℓ' UnitCommAlgebra = commAlgebraFromCommRing UnitCommRing (λ _ _ → tt*) (λ _ _ _ → refl) (λ _ _ _ → refl) (λ _ _ _ → refl) (λ _ → refl) (λ _ _ _ → refl)
function [s,it]=display(c,tab,it) if nargin>1 [s,it]=display(c.algorithm,tab,it); return; end disp(c.algorithm.name); disp(['data dimensions:']) global X; global Y; if isempty(c.myX) s=(['X = ' num2str(size(X(c.index,c.findex),1)) 'x' ... num2str(size(X(c.index,c.findex),2)) ]); else s=(['X = ' num2str(size(c.myX,1)) 'x' num2str(size(c.myX,2)) ]); end if isempty(c.myY) if ~isempty(Y) disp([s ' Y = ' num2str(size(Y(c.index,:),1)) 'x' ... num2str(size(Y(c.index,:),2)) ]); else disp([s ' Y = 0x0']); end else disp([s ' Y = ' num2str(size(c.myY,1)) 'x' num2str(size(c.myY,2)) ]); end
[STATEMENT] lemma infnorm[code]: fixes x::"'a::executable_euclidean_space" shows "infnorm x = fold max (map (\<lambda>i. abs (x \<bullet> i)) Basis_list) 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. infnorm x = fold max (map (\<lambda>i. \<bar>x \<bullet> i\<bar>) Basis_list) 0 [PROOF STEP] by (auto simp: Max.set_eq_fold[symmetric] infnorm_Max[symmetric] infnorm_pos_le intro!: max.absorb2[symmetric])
theory power imports Main begin (* We define the property of power x^n. We will recursively go through till n is equal to 0. *) primrec pow :: "nat => nat => nat" where "pow x 0 = Suc 0" | "pow x (Suc n) = x * pow x n" (* We will prove that when we multiply the powers, its the same as doing the power twice first with m then with n. *) (* First we will prove the base case, which is multiplying the results of two powered numbers is the same as adding the powers before hand. We use induction tactic on the power and apply auto. *) lemma pow_add: "pow x (m + n) = pow x m * pow x n" apply (induct n) apply auto done (* This uses the base case that we proved above using the lemma. We use induction tactic on the power and apply auto. *) theorem pow_mult: "pow x (m * n) = pow (pow x m) n" apply (induct n) apply (auto simp add: pow_add) done end
/* * Software License Agreement (BSD License) * * Copyright (c) 2013, The University of Texas at Dallas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The University of Texas at Dallas nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * Filename: falcon_driver.cpp * * Description: This file contains the ROS node that interfaces with the * Novint Falcon 3D Controller * * Log * ---- * 2015-02-09 File created by Hazen Eckert * */ // ROS includes #include "ros/ros.h" #include "geometry_msgs/Vector3.h" #include "novint_falcon_driver/NovintFalcon.h" #include <string> #include <Eigen/Core> #include <Eigen/Dense> #include <Eigen/Geometry> /* Process: - get configuration parameters - setup flacon class - */ using namespace Eigen; Vector3d force; void force_callback(const geometry_msgs::Vector3::ConstPtr& msg) { force(0) = msg->x; force(1) = msg->y; force(2) = msg->z; } int main(int argc, char **argv) { // ROS Initalization ros::init(argc, argv, "falcon_driver"); ros::NodeHandle n; ros::NodeHandle private_n("~"); // ROS Parameters std::string firmware; bool force_firmware, skip_checksum; int device_index; private_n.param<std::string>("firmware", firmware, "nvent_firmware"); private_n.param<bool>("force_firmware", force_firmware, false); private_n.param<bool>("skip_checksum", skip_checksum, true); private_n.param<int>("device_index", device_index, 0); // ROS Subscribers ros::Subscriber force_sub = n.subscribe("force", 10, force_callback); // ROS Publishers ros::Publisher position_pub = n.advertise<geometry_msgs::Vector3>("position", 10); // Falcon Initialization NovintFalcon falcon; if(!falcon.initialize( firmware, force_firmware, skip_checksum, device_index )) { ROS_FATAL("Unable to initialize the falcon device. Exiting..."); return 1; } while(!falcon.calibrate() && ros::ok()); // ROS loop while(ros::ok()) { if (!falcon.update()) continue; ros::spinOnce(); Vector3d position = falcon.getPosition(); geometry_msgs::Vector3 msg; msg.x = position(0); msg.y = position(1); msg.z = position(2); position_pub.publish(msg); if(force_sub.getNumPublishers() != 1) falcon.setForce(Vector3d(0,0,0)); else falcon.setForce(force); } }
classdef InstanceSegMMLoss < dagnn.Loss properties marginAlpha_ end properties (Transient) curSimMat weightMat mass_ size_ end methods function outputs = forward(obj, inputs, params) [h, w, ch, bs] = size(inputs{1}); sz = [h, w, ch, bs]; mass = sz(1) * sz(2) + 1; obj.size_ = sz; obj.mass_ = mass; % gpuMode = isa(inputs{1}, 'gpuArray'); % if gpuMode % grndLabel = gpuArray(zeros(sz, 'single')); % else % grndLabel = zeros(sz, 'single'); % end % % for j = 1:sz(4) % C = inputs{2}(:,:,:,j); % C = reshape(C, [sz(1), 1]); % C = repmat(C, 1, sz(2)); % C = (C==C'); % grndLabel(:,:,:,j) = C; % end % obj.curSimMat = grndLabel; layerName = 'obj_instSeg'; if isnan(obj.net.getLayerIndex(layerName)) layerName = 'obj_instSeg_reg'; end obj.curSimMat = obj.net.layers(obj.net.getLayerIndex(layerName)).block.curSimMat; obj.weightMat = obj.net.layers(obj.net.getLayerIndex(layerName)).block.weightMat; outputs{1} = vl_nnloss(inputs{1}, obj.curSimMat, 'marginAlpha_', obj.marginAlpha_, ... 'loss', obj.loss, ... 'instanceWeights', obj.weightMat ) ; % 1./mass n = obj.numAveraged ; m = n + size(inputs{1},4) ; obj.average = (n * obj.average + double(gather(outputs{1}))) / m ; obj.numAveraged = m ; end function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) % sz = obj.size_; % mass = obj.mass_; % grndLabel = obj.curSimMat; derInputs{1} = vl_nnloss(inputs{1}, obj.curSimMat, derOutputs{1}, 'marginAlpha_', obj.marginAlpha_, ... 'loss', obj.loss, ... 'instanceWeights', obj.weightMat ) ; % 1./mass derInputs{2} = [] ; derParams = {} ; end function obj = InstanceSegMMLoss(varargin) obj.load(varargin) ; end end end
\chapter{Diameter error propagation model} \label{ch::diameter} \textit{In this last chapter we will briefly discuss the error propagation while diameter is estimated. We will present one of the models introduced in Section \ref{sec:sys-cmp} and we will show some experimental results about it.} % The Erone's model \input{./src/chapters/ch6-Diameter/model.tex} % Experimental result \input{./src/chapters/ch6-Diameter/results.tex}
{-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} #if __GLASGOW_HASKELL__ >= 800 {-# OPTIONS_GHC -Wno-redundant-constraints #-} #endif -- | -- Module : Graphics.ColorSpace.Elevator -- Copyright : (c) Alexey Kuleshevich 2018-2019 -- License : BSD3 -- Maintainer : Alexey Kuleshevich <[email protected]> -- Stability : experimental -- Portability : non-portable -- module Graphics.ColorSpace.Elevator ( Elevator(..) , clamp01 ) where import qualified Data.Complex as C import Data.Int import Data.Typeable import Data.Vector.Storable (Storable) import Data.Vector.Unboxed (Unbox) import Data.Word import GHC.Float -- | A class with a set of convenient functions that allow for changing precision of -- channels within pixels, while scaling the values to keep them in an appropriate range. -- -- >>> let rgb = PixelRGB 0.0 0.5 1.0 :: Pixel RGB Double -- >>> eToWord8 <$> rgb -- <RGB:(0|128|255)> -- >>> eToWord16 <$> rgb -- <RGB:(0|32768|65535)> -- class (Eq e, Num e, Typeable e, Unbox e, Storable e) => Elevator e where -- | Values are scaled to @[0, 255]@ range. eToWord8 :: e -> Word8 -- | Values are scaled to @[0, 65535]@ range. eToWord16 :: e -> Word16 -- | Values are scaled to @[0, 4294967295]@ range. eToWord32 :: e -> Word32 -- | Values are scaled to @[0, 18446744073709551615]@ range. eToWord64 :: e -> Word64 -- | Values are scaled to @[0.0, 1.0]@ range. eToFloat :: e -> Float -- | Values are scaled to @[0.0, 1.0]@ range. eToDouble :: e -> Double -- | Values are scaled from @[0.0, 1.0]@ range. eFromDouble :: Double -> e -- | Lower the precision dropDown :: forall a b. (Integral a, Bounded a, Integral b, Bounded b) => a -> b dropDown !e = fromIntegral $ fromIntegral e `div` ((maxBound :: a) `div` fromIntegral (maxBound :: b)) {-# INLINE dropDown #-} -- | Increase the precision raiseUp :: forall a b. (Integral a, Bounded a, Integral b, Bounded b) => a -> b raiseUp !e = fromIntegral e * ((maxBound :: b) `div` fromIntegral (maxBound :: a)) {-# INLINE raiseUp #-} -- | Convert to fractional with value less than or equal to 1. squashTo1 :: forall a b. (Fractional b, Integral a, Bounded a) => a -> b squashTo1 !e = fromIntegral e / fromIntegral (maxBound :: a) {-# INLINE squashTo1 #-} -- | Convert to integral streaching it's value up to a maximum value. stretch :: forall a b. (RealFrac a, Floating a, Integral b, Bounded b) => a -> b stretch !e = round (fromIntegral (maxBound :: b) * clamp01 e) {-# INLINE stretch #-} -- | Clamp a value to @[0, 1]@ range. clamp01 :: (Ord a, Floating a) => a -> a clamp01 !x = min (max 0 x) 1 {-# INLINE clamp01 #-} -- | Values between @[0, 255]]@ instance Elevator Word8 where eToWord8 = id {-# INLINE eToWord8 #-} eToWord16 = raiseUp {-# INLINE eToWord16 #-} eToWord32 = raiseUp {-# INLINE eToWord32 #-} eToWord64 = raiseUp {-# INLINE eToWord64 #-} eToFloat = squashTo1 {-# INLINE eToFloat #-} eToDouble = squashTo1 {-# INLINE eToDouble #-} eFromDouble = eToWord8 {-# INLINE eFromDouble #-} -- | Values between @[0, 65535]]@ instance Elevator Word16 where eToWord8 = dropDown {-# INLINE eToWord8 #-} eToWord16 = id {-# INLINE eToWord16 #-} eToWord32 = raiseUp {-# INLINE eToWord32 #-} eToWord64 = raiseUp {-# INLINE eToWord64 #-} eToFloat = squashTo1 {-# INLINE eToFloat #-} eToDouble = squashTo1 {-# INLINE eToDouble #-} eFromDouble = eToWord16 {-# INLINE eFromDouble #-} -- | Values between @[0, 4294967295]@ instance Elevator Word32 where eToWord8 = dropDown {-# INLINE eToWord8 #-} eToWord16 = dropDown {-# INLINE eToWord16 #-} eToWord32 = id {-# INLINE eToWord32 #-} eToWord64 = raiseUp {-# INLINE eToWord64 #-} eToFloat = squashTo1 {-# INLINE eToFloat #-} eToDouble = squashTo1 {-# INLINE eToDouble #-} eFromDouble = eToWord32 {-# INLINE eFromDouble #-} -- | Values between @[0, 18446744073709551615]@ instance Elevator Word64 where eToWord8 = dropDown {-# INLINE eToWord8 #-} eToWord16 = dropDown {-# INLINE eToWord16 #-} eToWord32 = dropDown {-# INLINE eToWord32 #-} eToWord64 = id {-# INLINE eToWord64 #-} eToFloat = squashTo1 {-# INLINE eToFloat #-} eToDouble = squashTo1 {-# INLINE eToDouble #-} eFromDouble = eToWord64 {-# INLINE eFromDouble #-} -- | Values between @[0, 18446744073709551615]@ on 64bit instance Elevator Word where eToWord8 = dropDown {-# INLINE eToWord8 #-} eToWord16 = dropDown {-# INLINE eToWord16 #-} eToWord32 = dropDown {-# INLINE eToWord32 #-} eToWord64 = fromIntegral {-# INLINE eToWord64 #-} eToFloat = squashTo1 {-# INLINE eToFloat #-} eToDouble = squashTo1 {-# INLINE eToDouble #-} eFromDouble = stretch . clamp01 {-# INLINE eFromDouble #-} -- | Values between @[0, 127]@ instance Elevator Int8 where eToWord8 = fromIntegral . max 0 {-# INLINE eToWord8 #-} eToWord16 = raiseUp . max 0 {-# INLINE eToWord16 #-} eToWord32 = raiseUp . max 0 {-# INLINE eToWord32 #-} eToWord64 = raiseUp . max 0 {-# INLINE eToWord64 #-} eToFloat = squashTo1 . max 0 {-# INLINE eToFloat #-} eToDouble = squashTo1 . max 0 {-# INLINE eToDouble #-} eFromDouble = stretch . clamp01 {-# INLINE eFromDouble #-} -- | Values between @[0, 32767]@ instance Elevator Int16 where eToWord8 = dropDown . max 0 {-# INLINE eToWord8 #-} eToWord16 = fromIntegral . max 0 {-# INLINE eToWord16 #-} eToWord32 = raiseUp . max 0 {-# INLINE eToWord32 #-} eToWord64 = raiseUp . max 0 {-# INLINE eToWord64 #-} eToFloat = squashTo1 . max 0 {-# INLINE eToFloat #-} eToDouble = squashTo1 . max 0 {-# INLINE eToDouble #-} eFromDouble = stretch . clamp01 {-# INLINE eFromDouble #-} -- | Values between @[0, 2147483647]@ instance Elevator Int32 where eToWord8 = dropDown . max 0 {-# INLINE eToWord8 #-} eToWord16 = dropDown . max 0 {-# INLINE eToWord16 #-} eToWord32 = fromIntegral . max 0 {-# INLINE eToWord32 #-} eToWord64 = raiseUp . max 0 {-# INLINE eToWord64 #-} eToFloat = squashTo1 . max 0 {-# INLINE eToFloat #-} eToDouble = squashTo1 . max 0 {-# INLINE eToDouble #-} eFromDouble = stretch . clamp01 {-# INLINE eFromDouble #-} -- | Values between @[0, 9223372036854775807]@ instance Elevator Int64 where eToWord8 = dropDown . max 0 {-# INLINE eToWord8 #-} eToWord16 = dropDown . max 0 {-# INLINE eToWord16 #-} eToWord32 = dropDown . max 0 {-# INLINE eToWord32 #-} eToWord64 = fromIntegral . max 0 {-# INLINE eToWord64 #-} eToFloat = squashTo1 . max 0 {-# INLINE eToFloat #-} eToDouble = squashTo1 . max 0 {-# INLINE eToDouble #-} eFromDouble = stretch . clamp01 {-# INLINE eFromDouble #-} -- | Values between @[0, 9223372036854775807]@ on 64bit instance Elevator Int where eToWord8 = dropDown . max 0 {-# INLINE eToWord8 #-} eToWord16 = dropDown . max 0 {-# INLINE eToWord16 #-} eToWord32 = dropDown . max 0 {-# INLINE eToWord32 #-} eToWord64 = fromIntegral . max 0 {-# INLINE eToWord64 #-} eToFloat = squashTo1 . max 0 {-# INLINE eToFloat #-} eToDouble = squashTo1 . max 0 {-# INLINE eToDouble #-} eFromDouble = stretch . clamp01 {-# INLINE eFromDouble #-} -- | Values between @[0.0, 1.0]@ instance Elevator Float where eToWord8 = stretch . clamp01 {-# INLINE eToWord8 #-} eToWord16 = stretch . clamp01 {-# INLINE eToWord16 #-} eToWord32 = stretch . clamp01 {-# INLINE eToWord32 #-} eToWord64 = stretch . clamp01 {-# INLINE eToWord64 #-} eToFloat = id {-# INLINE eToFloat #-} eToDouble = float2Double {-# INLINE eToDouble #-} eFromDouble = eToFloat {-# INLINE eFromDouble #-} -- | Values between @[0.0, 1.0]@ instance Elevator Double where eToWord8 = stretch . clamp01 {-# INLINE eToWord8 #-} eToWord16 = stretch . clamp01 {-# INLINE eToWord16 #-} eToWord32 = stretch . clamp01 {-# INLINE eToWord32 #-} eToWord64 = stretch . clamp01 {-# INLINE eToWord64 #-} eToFloat = double2Float {-# INLINE eToFloat #-} eToDouble = id {-# INLINE eToDouble #-} eFromDouble = id {-# INLINE eFromDouble #-} -- | Discards imaginary part and changes precision of real part. instance (Num e, Elevator e, RealFloat e) => Elevator (C.Complex e) where eToWord8 = eToWord8 . C.realPart {-# INLINE eToWord8 #-} eToWord16 = eToWord16 . C.realPart {-# INLINE eToWord16 #-} eToWord32 = eToWord32 . C.realPart {-# INLINE eToWord32 #-} eToWord64 = eToWord64 . C.realPart {-# INLINE eToWord64 #-} eToFloat = eToFloat . C.realPart {-# INLINE eToFloat #-} eToDouble = eToDouble . C.realPart {-# INLINE eToDouble #-} eFromDouble = (C.:+ 0) . eFromDouble {-# INLINE eFromDouble #-}
<table border="0"> <tr> <td> </td> <td> </td> </tr> </table> # Dynamic Double Machine Learning: Use Cases and Examples Dynamic DoubleML is an extension of the Double ML approach for treatments assigned sequentially over time periods. This estimator will account for treatments that can have causal effects on future outcomes. For more details, see [this paper](https://arxiv.org/abs/2002.07285) or the [EconML docummentation](https://econml.azurewebsites.net/). For example, the Dynamic DoubleML could be useful in estimating the following causal effects: * the effect of investments on revenue at companies that receive investments at regular intervals ([see more](https://arxiv.org/abs/2103.08390)) * the effect of prices on demand in stores where prices of goods change over time * the effect of income on health outcomes in people who receive yearly income The preferred data format is balanced panel data. Each panel corresponds to one entity (e.g. company, store or person) and the different rows in a panel correspond to different time points. Example: ||Company|Year|Features|Investment|Revenue| |---|---|---|---|---|---| |1|A|2018|...|\$1,000|\$10,000| |2|A|2019|...|\$2,000|\$12,000| |3|A|2020|...|\$3,000|\$15,000| |4|B|2018|...|\$0|\$5,000| |5|B|2019|...|\$100|\$10,000| |6|B|2020|...|\$1,200|\$7,000| |7|C|2018|...|\$1,000|\$20,000| |8|C|2019|...|\$1,500|\$25,000| |9|C|2020|...|\$500|\$15,000| (Note: when passing the data to the DynamicDML estimator, the "Company" column above corresponds to the `groups` argument at fit time. The "Year" column above should not be passed in as it will be inferred from the "Company" column) If group memebers do not appear together, it is assumed that the first instance of a group in the dataset corresponds to the first period of that group, the second instance of the group corresponds to the second period, etc. Example: ||Company|Features|Investment|Revenue| |---|---|---|---|---| |1|A|...|\$1,000|\$10,000| |2|B|...|\$0|\$5,000 |3|C|...|\$1,000|\$20,000| |4|A|...|\$2,000|\$12,000| |5|B|...|\$100|\$10,000| |6|C|...|\$1,500|\$25,000| |7|A|...|\$3,000|\$15,000| |8|B|...|\$1,200|\$7,000| |9|C|...|\$500|\$15,000| In this dataset, 1<sup>st</sup> row corresponds to the first period of group `A`, 4<sup>th</sup> row corresponds to the second period of group `A`, etc. In this notebook, we show the performance of the DynamicDML on synthetic and observational data. ## Notebook Contents 1. [Example Usage with Average Treatment Effects](#1.-Example-Usage-with-Average-Treatment-Effects) 2. [Example Usage with Heterogeneous Treatment Effects](#2.-Example-Usage-with-Heterogeneous-Treatment-Effects) ```python %load_ext autoreload %autoreload 2 ``` ```python import econml ``` ```python # Main imports from econml.dynamic.dml import DynamicDML from econml.tests.dgp import DynamicPanelDGP, add_vlines # Helper imports import numpy as np from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV, MultiTaskLassoCV import matplotlib.pyplot as plt %matplotlib inline ``` # 1. Example Usage with Average Treatment Effects ## 1.1 DGP We consider a data generating process from a markovian treatment model. In the example bellow, $T_t\rightarrow$ treatment(s) at time $t$, $Y_t\rightarrow$outcome at time $t$, $X_t\rightarrow$ features and controls at time $t$ (the coefficients $e, f$ will pick the features and the controls). \begin{align} X_t =& (\pi'X_{t-1} + 1) \cdot A\, T_{t-1} + B X_{t-1} + \epsilon_t\\ T_t =& \gamma\, T_{t-1} + (1-\gamma) \cdot D X_t + \zeta_t\\ Y_t =& (\sigma' X_{t} + 1) \cdot e\, T_{t} + f X_t + \eta_t \end{align} with $X_0, T_0 = 0$ and $\epsilon_t, \zeta_t, \eta_t \sim N(0, \sigma^2)$. Moreover, $X_t \in R^{n_x}$, $B[:, 0:s_x] \neq 0$ and $B[:, s_x:-1] = 0$, $\gamma\in [0, 1]$, $D[:, 0:s_x] \neq 0$, $D[:, s_x:-1]=0$, $f[0:s_x]\neq 0$, $f[s_x:-1]=0$. We draw a single time series of samples of length $n\_panels \cdot n\_periods$. ```python # Define DGP parameters np.random.seed(123) n_panels = 5000 # number of panels n_periods = 3 # number of time periods in each panel n_treatments = 2 # number of treatments in each period n_x = 100 # number of features + controls s_x = 10 # number of controls (endogeneous variables) s_t = 10 # treatment support size ``` ```python # Generate data dgp = DynamicPanelDGP(n_periods, n_treatments, n_x).create_instance( s_x, random_seed=12345) Y, T, X, W, groups = dgp.observational_data(n_panels, s_t=s_t, random_seed=12345) true_effect = dgp.true_effect ``` ## 1.2 Train Estimator ```python est = DynamicDML( model_y=LassoCV(cv=3, max_iter=1000), model_t=MultiTaskLassoCV(cv=3, max_iter=1000), cv=3) ``` ```python est.fit(Y, T, X=None, W=W, groups=groups) ``` <econml.dynamic.dml._dml.DynamicDML at 0x19d2abd6a00> ```python # Average treatment effect of all periods on last period for unit treatments print(f"Average effect of default policy: {est.ate():0.2f}") ``` Average effect of default policy: 1.40 A scalar was specified but there are multiple treatments; the same value will be used for each treatment. Consider specifyingall treatments, or using the const_marginal_effect method. ```python # Effect of target policy over baseline policy # Must specify a treatment for each period baseline_policy = np.zeros((1, n_periods * n_treatments)) target_policy = np.ones((1, n_periods * n_treatments)) eff = est.effect(T0=baseline_policy, T1=target_policy) print(f"Effect of target policy over baseline policy: {eff[0]:0.2f}") ``` Effect of target policy over baseline policy: 1.40 ```python # Period treatment effects + interpretation for i, theta in enumerate(est.intercept_.reshape(-1, n_treatments)): print(f"Marginal effect of a treatments in period {i+1} on period {n_periods} outcome: {theta}") ``` Marginal effect of a treatments in period 1 on period 3 outcome: [0.04000235 0.0701606 ] Marginal effect of a treatments in period 2 on period 3 outcome: [0.31611764 0.23714736] Marginal effect of a treatments in period 3 on period 3 outcome: [0.13108411 0.60656886] ```python # Period treatment effects with confidence intervals est.summary() ``` Coefficient Results: X is None, please call intercept_inference to learn the constant! <table class="simpletable"> <caption>CATE Intercept Results</caption> <tr> <td></td> <th>point_estimate</th> <th>stderr</th> <th>zstat</th> <th>pvalue</th> <th>ci_lower</th> <th>ci_upper</th> </tr> <tr> <th>cate_intercept|(T0)$_0$</th> <td>0.04</td> <td>0.041</td> <td>0.977</td> <td>0.328</td> <td>-0.027</td> <td>0.107</td> </tr> <tr> <th>cate_intercept|(T1)$_0$</th> <td>0.07</td> <td>0.04</td> <td>1.74</td> <td>0.082</td> <td>0.004</td> <td>0.136</td> </tr> <tr> <th>cate_intercept|(T0)$_1$</th> <td>0.316</td> <td>0.036</td> <td>8.848</td> <td>0.0</td> <td>0.257</td> <td>0.375</td> </tr> <tr> <th>cate_intercept|(T1)$_1$</th> <td>0.237</td> <td>0.036</td> <td>6.608</td> <td>0.0</td> <td>0.178</td> <td>0.296</td> </tr> <tr> <th>cate_intercept|(T0)$_2$</th> <td>0.131</td> <td>0.003</td> <td>45.665</td> <td>0.0</td> <td>0.126</td> <td>0.136</td> </tr> <tr> <th>cate_intercept|(T1)$_2$</th> <td>0.607</td> <td>0.003</td> <td>210.244</td> <td>0.0</td> <td>0.602</td> <td>0.611</td> </tr> </table><br/><br/><sub>A linear parametric conditional average treatment effect (CATE) model was fitted:<br/>$Y = \Theta(X)\cdot T + g(X, W) + \epsilon$<br/>where for every outcome $i$ and treatment $j$ the CATE $\Theta_{ij}(X)$ has the form:<br/>$\Theta_{ij}(X) = \phi(X)' coef_{ij} + cate\_intercept_{ij}$<br/>where $\phi(X)$ is the output of the `featurizer` or $X$ if `featurizer`=None. Coefficient Results table portrays the $coef_{ij}$ parameter vector for each outcome $i$ and treatment $j$. Intercept Results table portrays the $cate\_intercept_{ij}$ parameter.</sub> ```python conf_ints = est.intercept__interval(alpha=0.05) ``` ## 1.3 Performance Visualization ```python # Some plotting boilerplate code plt.figure(figsize=(15, 5)) plt.errorbar(np.arange(n_periods*n_treatments)-.04, est.intercept_, yerr=(conf_ints[1] - est.intercept_, est.intercept_ - conf_ints[0]), fmt='o', label='DynamicDML') plt.errorbar(np.arange(n_periods*n_treatments), true_effect.flatten(), fmt='o', alpha=.6, label='Ground truth') for t in np.arange(1, n_periods): plt.axvline(x=t * n_treatments - .5, linestyle='--', alpha=.4) plt.xticks([t * n_treatments - .5 + n_treatments/2 for t in range(n_periods)], ["$\\theta_{}$".format(t) for t in range(n_periods)]) plt.gca().set_xlim([-.5, n_periods*n_treatments - .5]) plt.ylabel("Effect") plt.legend() plt.show() ``` # 2. Example Usage with Heterogeneous Treatment Effects on Time-Invariant Unit Characteristics We can also estimate treatment effect heterogeneity with respect to the value of some subset of features $X$ in the initial period. Heterogeneity is currently only supported with respect to such initial state features. This for instance can support heterogeneity with respect to time-invariant unit characteristics. In that case you can simply pass as $X$ a repetition of some unit features that stay constant in all periods. You can also pass time-varying features, and their time varying component will be used as a time-varying control. However, heterogeneity will only be estimated with respect to the initial state. ## 2.1 DGP ```python # Define additional DGP parameters het_strength = .5 het_inds = np.arange(n_x - n_treatments, n_x) ``` ```python # Generate data dgp = DynamicPanelDGP(n_periods, n_treatments, n_x).create_instance( s_x, hetero_strength=het_strength, hetero_inds=het_inds, random_seed=12) Y, T, X, W, groups = dgp.observational_data(n_panels, s_t=s_t, random_seed=1) ate_effect = dgp.true_effect het_effect = dgp.true_hetero_effect[:, het_inds + 1] ``` ## 2.2 Train Estimator ```python est = DynamicDML( model_y=LassoCV(cv=3), model_t=MultiTaskLassoCV(cv=3), cv=3) ``` ```python est.fit(Y, T, X=X, W=W, groups=groups, inference="auto") ``` <econml.dynamic.dml._dml.DynamicDML at 0x19d2ae7e5e0> ```python est.summary() ``` <table class="simpletable"> <caption>Coefficient Results</caption> <tr> <td></td> <th>point_estimate</th> <th>stderr</th> <th>zstat</th> <th>pvalue</th> <th>ci_lower</th> <th>ci_upper</th> </tr> <tr> <th>X0|(T0)$_0$</th> <td>0.009</td> <td>0.045</td> <td>0.203</td> <td>0.839</td> <td>-0.065</td> <td>0.083</td> </tr> <tr> <th>X0|(T1)$_0$</th> <td>0.017</td> <td>0.042</td> <td>0.416</td> <td>0.677</td> <td>-0.051</td> <td>0.086</td> </tr> <tr> <th>X0|(T0)$_1$</th> <td>-0.001</td> <td>0.041</td> <td>-0.035</td> <td>0.972</td> <td>-0.069</td> <td>0.067</td> </tr> <tr> <th>X0|(T1)$_1$</th> <td>-0.031</td> <td>0.041</td> <td>-0.76</td> <td>0.447</td> <td>-0.099</td> <td>0.036</td> </tr> <tr> <th>X0|(T0)$_2$</th> <td>-0.306</td> <td>0.008</td> <td>-36.667</td> <td>0.0</td> <td>-0.32</td> <td>-0.292</td> </tr> <tr> <th>X0|(T1)$_2$</th> <td>0.158</td> <td>0.008</td> <td>19.656</td> <td>0.0</td> <td>0.145</td> <td>0.171</td> </tr> <tr> <th>X1|(T0)$_0$</th> <td>0.017</td> <td>0.044</td> <td>0.378</td> <td>0.706</td> <td>-0.056</td> <td>0.09</td> </tr> <tr> <th>X1|(T1)$_0$</th> <td>-0.007</td> <td>0.045</td> <td>-0.164</td> <td>0.87</td> <td>-0.082</td> <td>0.067</td> </tr> <tr> <th>X1|(T0)$_1$</th> <td>-0.034</td> <td>0.042</td> <td>-0.821</td> <td>0.412</td> <td>-0.103</td> <td>0.034</td> </tr> <tr> <th>X1|(T1)$_1$</th> <td>-0.025</td> <td>0.042</td> <td>-0.6</td> <td>0.549</td> <td>-0.095</td> <td>0.044</td> </tr> <tr> <th>X1|(T0)$_2$</th> <td>-0.302</td> <td>0.008</td> <td>-35.72</td> <td>0.0</td> <td>-0.316</td> <td>-0.288</td> </tr> <tr> <th>X1|(T1)$_2$</th> <td>0.156</td> <td>0.008</td> <td>18.801</td> <td>0.0</td> <td>0.142</td> <td>0.169</td> </tr> </table> <table class="simpletable"> <caption>CATE Intercept Results</caption> <tr> <td></td> <th>point_estimate</th> <th>stderr</th> <th>zstat</th> <th>pvalue</th> <th>ci_lower</th> <th>ci_upper</th> </tr> <tr> <th>cate_intercept|(T0)$_0$</th> <td>0.024</td> <td>0.036</td> <td>0.653</td> <td>0.513</td> <td>-0.036</td> <td>0.084</td> </tr> <tr> <th>cate_intercept|(T1)$_0$</th> <td>-0.033</td> <td>0.036</td> <td>-0.929</td> <td>0.353</td> <td>-0.092</td> <td>0.025</td> </tr> <tr> <th>cate_intercept|(T0)$_1$</th> <td>-0.105</td> <td>0.034</td> <td>-3.067</td> <td>0.002</td> <td>-0.162</td> <td>-0.049</td> </tr> <tr> <th>cate_intercept|(T1)$_1$</th> <td>0.037</td> <td>0.034</td> <td>1.079</td> <td>0.281</td> <td>-0.019</td> <td>0.093</td> </tr> <tr> <th>cate_intercept|(T0)$_2$</th> <td>-0.743</td> <td>0.005</td> <td>-140.503</td> <td>0.0</td> <td>-0.752</td> <td>-0.734</td> </tr> <tr> <th>cate_intercept|(T1)$_2$</th> <td>0.48</td> <td>0.005</td> <td>91.061</td> <td>0.0</td> <td>0.472</td> <td>0.489</td> </tr> </table><br/><br/><sub>A linear parametric conditional average treatment effect (CATE) model was fitted:<br/>$Y = \Theta(X)\cdot T + g(X, W) + \epsilon$<br/>where for every outcome $i$ and treatment $j$ the CATE $\Theta_{ij}(X)$ has the form:<br/>$\Theta_{ij}(X) = \phi(X)' coef_{ij} + cate\_intercept_{ij}$<br/>where $\phi(X)$ is the output of the `featurizer` or $X$ if `featurizer`=None. Coefficient Results table portrays the $coef_{ij}$ parameter vector for each outcome $i$ and treatment $j$. Intercept Results table portrays the $cate\_intercept_{ij}$ parameter.</sub> ```python # Average treatment effect for test points X_test = X[np.arange(0, 25, 3)] print(f"Average effect of default policy:{est.ate(X=X_test):0.2f}") ``` Average effect of default policy:-0.42 A scalar was specified but there are multiple treatments; the same value will be used for each treatment. Consider specifyingall treatments, or using the const_marginal_effect method. A scalar was specified but there are multiple treatments; the same value will be used for each treatment. Consider specifyingall treatments, or using the const_marginal_effect method. ```python # Effect of target policy over baseline policy # Must specify a treatment for each period baseline_policy = np.zeros((1, n_periods * n_treatments)) target_policy = np.ones((1, n_periods * n_treatments)) eff = est.effect(X=X_test, T0=baseline_policy, T1=target_policy) print("Effect of target policy over baseline policy for test set:\n", eff) ``` Effect of target policy over baseline policy for test set: [-0.37368525 -0.30896804 -0.43030363 -0.52252401 -0.42849622 -0.48790877 -0.34417987 -0.51804937 -0.36806744] ```python # Coefficients: intercept is of shape n_treatments*n_periods # coef_ is of shape (n_treatments*n_periods, n_hetero_inds). # first n_treatment rows are from first period, next n_treatment # from second period, etc. est.intercept_, est.coef_ ``` (array([ 0.02374269, -0.03302781, -0.10526464, 0.03675719, -0.74294675, 0.48025068]), array([[ 0.00914226, 0.01675409], [ 0.01732804, -0.00741467], [-0.00143705, -0.03431712], [-0.03136295, -0.02536834], [-0.30581311, -0.30189654], [ 0.15773252, 0.15564665]])) ```python # Confidence intervals conf_ints_intercept = est.intercept__interval(alpha=0.05) conf_ints_coef = est.coef__interval(alpha=0.05) ``` ## 2.3 Performance Visualization ```python # parse true parameters in array of shape (n_treatments*n_periods, 1 + n_hetero_inds) # first column is the intercept true_effect_inds = [] for t in range(n_treatments): true_effect_inds += [t * (1 + n_x)] + (list(t * (1 + n_x) + 1 + het_inds) if len(het_inds)>0 else []) true_effect_params = dgp.true_hetero_effect[:, true_effect_inds] true_effect_params = true_effect_params.reshape((n_treatments*n_periods, 1 + het_inds.shape[0])) ``` ```python # concatenating intercept and coef_ param_hat = np.hstack([est.intercept_.reshape(-1, 1), est.coef_]) lower = np.hstack([conf_ints_intercept[0].reshape(-1, 1), conf_ints_coef[0]]) upper = np.hstack([conf_ints_intercept[1].reshape(-1, 1), conf_ints_coef[1]]) ``` ```python plt.figure(figsize=(15, 5)) plt.errorbar(np.arange(n_periods * (len(het_inds) + 1) * n_treatments), true_effect_params.flatten(), fmt='*', label='Ground Truth') plt.errorbar(np.arange(n_periods * (len(het_inds) + 1) * n_treatments), param_hat.flatten(), yerr=((upper - param_hat).flatten(), (param_hat - lower).flatten()), fmt='o', label='DynamicDML') add_vlines(n_periods, n_treatments, het_inds) plt.legend() plt.show() ```
(*First theorem play *) (* parametrized by set , proposition (truth value) *) Theorem frobenius (A : Set) (p : A -> Prop) (q : Prop) : (exists x: A, q /\ p x) <-> (q /\ exists x : A , p x). (*there exists x in A such that q /\ p of x (is the same thing) <=> as q and exists in A such that p of x*) Proof. split. (* if and only if = implication both ways -> split to 2 subgoals*) intros. (*for implication/forall take hypothesis H*) destruct H as [y [H1 H2]]. (*destruct the hypothesis*) split. assumption. exists y. assumption. intros [H1 [y H2]]. exists y. split. assumption. assumption. Qed. (**) Theorem frobenius2 (A : Set) (p : A -> Prop) (q : Prop) : (exists x: A, q /\ p x) <-> (q /\ exists x : A , p x). Proof. split. intros. destruct H as [y [H1 H2]]. split. assumption. exists y. assumption. intros [H1 [y H2]]. exists y. auto. Qed. Theorem frobenius3 (A : Set) (p : A -> Prop) (q : Prop) : (exists x: A, q /\ p x) <-> (q /\ exists x : A , p x). (*there exists x in A such that q /\ p of x (is the same thing) <=> as q and exists in A such that p of x*) Proof. split. (* if and only if = implication both ways -> split to 2 subgoals*) intros [y [H1 H2]]. (*for implication/forall take hypothesis H*) split. assumption. exists y. assumption. intros [H1 [y H2]]. exists y. split; assumption. Qed. (*First order logic*) Theorem frobenius4 (A : Set) (p : A -> Prop) (q : Prop) : (exists x: A, q /\ p x) <-> (q /\ exists x : A , p x). (*there exists x in A such that q /\ p of x (is the same thing) <=> as q and exists in A such that p of x*) Proof. firstorder. Qed. Check frobenius. Print frobenius2. Check frobenius4. Save.
example : Prop := ∀ n, (n:Nat) + n = n.succ example : Prop := ∀ n, n.succ = (n:Nat) + n example : Prop := ∀ n, (n:Nat) + n.succ = n example : Prop := ∀ n, n.succ + (n:Nat) = n example : Prop := ∀ n, (n.succ:Nat) + n = n example : Prop := ∀ n, (n:Nat).succ + n = n def fib: Nat → Nat | 0 => 0 | 1 => 1 | n + 2 => fib n + fib (n + 1) theorem fib50Eq : fib 50 = 12586269025 := rfl inductive type : Type | A : type | B : type inductive val : type → Type | cA : val type.A | cB : val type.B inductive wrap : Type | val : ∀ {t : type}, (val t) → wrap def f : wrap → Nat | wrap.val val.cA => 1 | _ => 1 example (a : Nat) : True := by have : ∀ n, n ≥ 0 → a ≤ a := fun _ _ => Nat.leRefl .. exact True.intro example (ᾰ : Nat) : True := by have : ∀ n, n ≥ 0 → ᾰ ≤ ᾰ := fun _ _ => Nat.leRefl .. exact True.intro inductive Vec.{u} (α : Type u) : Nat → Type u | nil : Vec α 0 | cons : α → {n : Nat} → Vec α n → Vec α (Nat.succ n) -- TODO: investigate why +1 doesn't work here constant Vars : Type structure Lang := (funcs : Nat → Type) (consts : Type) inductive Term (L : Lang) : Type | const_term : L.consts → Term L | var_term : Vars → Term L | func_term (n : Nat) (f : L.funcs n) (v : Vec (Term L) n) : Term L
section \<open>Simple Convergence Graphs\<close> text \<open>Alternative Implementation of the simple convergence graph using tries instead of sets.\<close> theory Simple_Convergence_Graph_Trie imports Convergence_Graph Collections.TrieSetImpl begin subsection \<open>Basic Definitions\<close> type_synonym 'a simple_cg = "'a ts list" definition simple_cg_empty :: "'a simple_cg" where "simple_cg_empty = []" thm TrieSetImpl.ts_ops_def (* collects all traces in the same convergent class set as ys *) fun simple_cg_lookup :: "('a::linorder) simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a list list" where "simple_cg_lookup xs ys = ts.to_list (ts.ins ys (foldl (ts.union) (ts.empty ()) (filter (\<lambda>x . ts.memb ys x) xs)))" (* collects all traces (zs@ys'') such that there exists a prefix ys' of ys with (ys=ys'@ys'') and zs is in the same convergent class set as ys' *) fun simple_cg_lookup_with_conv :: "('a::linorder) simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a list list" where "simple_cg_lookup_with_conv g ys = (let lookup_for_prefix = (\<lambda>i . let pref = take i ys; suff = drop i ys; pref_conv = (foldl (ts.union) (ts.empty ()) (filter (\<lambda>x . ts.memb pref x) g)) in map (\<lambda> pref' . pref'@suff) (ts.to_list pref_conv)) in ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix [0..<Suc (length ys)])))))" fun simple_cg_insert' :: "('a::linorder) simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a simple_cg" where "simple_cg_insert' xs ys = (case find (\<lambda>x . ts.memb ys x) xs of Some x \<Rightarrow> xs | None \<Rightarrow> (ts.sng ys)#xs)" fun simple_cg_insert :: "('a::linorder) simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a simple_cg" where "simple_cg_insert xs ys = foldl (\<lambda> xs' ys' . simple_cg_insert' xs' ys') xs (prefixes ys)" fun simple_cg_initial :: "('a,'b::linorder,'c::linorder) fsm \<Rightarrow> ('b\<times>'c) prefix_tree \<Rightarrow> ('b\<times>'c) simple_cg" where "simple_cg_initial M1 T = foldl (\<lambda> xs' ys' . simple_cg_insert' xs' ys') simple_cg_empty (filter (is_in_language M1 (initial M1)) (sorted_list_of_sequences_in_tree T))" subsection \<open>Merging by Closure\<close> text \<open>The following implementation of the merge operation follows the closure operation described by Simão et al. in Simão, A., Petrenko, A. and Yevtushenko, N. (2012), On reducing test length for FSMs with extra states. Softw. Test. Verif. Reliab., 22: 435-454. https://doi.org/10.1002/stvr.452. That is, two traces u and v are merged by adding {u,v} to the list of convergent classes followed by computing the closure of the graph based on two operations: (1) classes A and B can be merged if there exists some class C such that C contains some w1, w2 and there exists some w such that A contains w1.w and B contains w2.w. (2) classes A and B can be merged if one is a subset of the other.\<close> (* classes x1 and x2 can be merged via class x if there exist \<alpha>, \<beta> in x and some suffix \<gamma> such that x1 contains \<alpha>@\<gamma> and x2 contains \<beta>@\<gamma> *) fun can_merge_by_suffix :: "'a ts \<Rightarrow> 'a ts \<Rightarrow> 'a ts \<Rightarrow> bool" where "can_merge_by_suffix x x1 x2 = (\<exists> \<alpha> \<beta> \<gamma> . ts.memb \<alpha> x \<and> ts.memb \<beta> x \<and> ts.memb (\<alpha>@\<gamma>) x1 \<and> ts.memb (\<beta>@\<gamma>) x2)" lemma can_merge_by_suffix_code[code] : "can_merge_by_suffix x x1 x2 = (ts.bex x (\<lambda> ys . ts.bex x1 (\<lambda> ys1 . is_prefix ys ys1 \<and> (let ys'' = (drop (length ys) ys1) in ts.bex x (\<lambda> ys' . ts.memb (ys'@ys'') x2)))))" unfolding can_merge_by_suffix.simps unfolding ts.memb_correct[OF ts.invar] unfolding ts.bex_correct[OF ts.invar] Let_def is_prefix_prefix by force lemma can_merge_by_suffix_validity : assumes "observable M1" and "observable M2" and "\<And> u v . ts.memb u x \<Longrightarrow> ts.memb v x \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> u v . ts.memb u x1 \<Longrightarrow> ts.memb v x1 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> u v . ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "can_merge_by_suffix x x1 x2" and "ts.memb u (ts.union x1 x2)" and "ts.memb v (ts.union x1 x2)" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - obtain \<alpha> \<beta> \<gamma> where "ts.memb \<alpha> x" and "ts.memb \<beta> x" and "ts.memb (\<alpha>@\<gamma>) x1" and "ts.memb (\<beta>@\<gamma>) x2" using \<open>can_merge_by_suffix x x1 x2\<close> by auto consider "ts.memb u x1" | "ts.memb u x2" using \<open>ts.memb u (ts.union x1 x2)\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast then show ?thesis proof cases case 1 then have "converge M1 u (\<alpha>@\<gamma>)" and "converge M2 u (\<alpha>@\<gamma>)" using \<open>ts.memb u (ts.union x1 x2)\<close> assms(4)[OF _ \<open>ts.memb (\<alpha>@\<gamma>) x1\<close> assms(9,10)] by blast+ then have "(\<alpha>@\<gamma>) \<in> L M1" and "(\<alpha>@\<gamma>) \<in> L M2" by auto then have "\<alpha> \<in> L M1" and "\<alpha> \<in> L M2" using language_prefix by metis+ then have "converge M1 \<alpha> \<beta>" and "converge M2 \<alpha> \<beta>" using assms(3) \<open>ts.memb \<alpha> x\<close> \<open>ts.memb \<beta> x\<close> by blast+ have "converge M1 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)" using \<open>converge M1 \<alpha> \<beta>\<close> by (meson \<open>\<alpha> @ \<gamma> \<in> L M1\<close> assms(1) converge.simps converge_append) then have "\<beta>@\<gamma> \<in> L M1" by auto have "converge M2 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)" using \<open>converge M2 \<alpha> \<beta>\<close> by (meson \<open>\<alpha> @ \<gamma> \<in> L M2\<close> assms(2) converge.simps converge_append) then have "\<beta>@\<gamma> \<in> L M2" by auto consider (11) "ts.memb v x1" | (12) "ts.memb v x2" using \<open>ts.memb v (ts.union x1 x2)\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast then show ?thesis proof cases case 11 show ?thesis using "1" "11" assms(10) assms(4) assms(9) by blast next case 12 then have "converge M1 v (\<beta>@\<gamma>)" and "converge M2 v (\<beta>@\<gamma>)" using assms(5)[OF \<open>ts.memb (\<beta>@\<gamma>) x2\<close> _ \<open>\<beta>@\<gamma> \<in> L M1\<close> \<open>\<beta>@\<gamma> \<in> L M2\<close>] by auto then show ?thesis using \<open>converge M1 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)\<close> \<open>converge M2 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)\<close> \<open>converge M1 u (\<alpha>@\<gamma>)\<close> \<open>converge M2 u (\<alpha>@\<gamma>)\<close> by auto qed next case 2 then have "converge M1 u (\<beta>@\<gamma>)" and "converge M2 u (\<beta>@\<gamma>)" using \<open>ts.memb u (ts.union x1 x2)\<close> assms(5)[OF _ \<open>ts.memb (\<beta>@\<gamma>) x2\<close> assms(9,10)] by blast+ then have "(\<beta>@\<gamma>) \<in> L M1" and "(\<beta>@\<gamma>) \<in> L M2" by auto then have "\<beta> \<in> L M1" and "\<beta> \<in> L M2" using language_prefix by metis+ then have "converge M1 \<alpha> \<beta>" and "converge M2 \<alpha> \<beta>" using assms(3)[OF \<open>ts.memb \<beta> x\<close> \<open>ts.memb \<alpha> x\<close>] by auto have "converge M1 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)" using \<open>converge M1 \<alpha> \<beta>\<close> using \<open>\<beta> @ \<gamma> \<in> L M1\<close> \<open>\<beta> \<in> L M1\<close> assms(1) converge_append converge_append_language_iff by blast then have "\<alpha>@\<gamma> \<in> L M1" by auto have "converge M2 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)" using \<open>converge M2 \<alpha> \<beta>\<close> using \<open>\<beta> @ \<gamma> \<in> L M2\<close> \<open>\<beta> \<in> L M2\<close> assms(2) converge_append converge_append_language_iff by blast then have "\<alpha>@\<gamma> \<in> L M2" by auto consider (21) "ts.memb v x1" | (22) "ts.memb v x2" using \<open>ts.memb v (ts.union x1 x2)\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast then show ?thesis proof cases case 22 show ?thesis using "2" "22" assms(10) assms(5) assms(9) by blast next case 21 then have "converge M1 v (\<alpha>@\<gamma>)" and "converge M2 v (\<alpha>@\<gamma>)" using assms(4)[OF \<open>ts.memb (\<alpha>@\<gamma>) x1\<close> _ \<open>\<alpha>@\<gamma> \<in> L M1\<close> \<open>\<alpha>@\<gamma> \<in> L M2\<close>] by auto then show ?thesis using \<open>converge M1 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)\<close> \<open>converge M2 (\<alpha>@\<gamma>) (\<beta>@\<gamma>)\<close> \<open>converge M1 u (\<beta>@\<gamma>)\<close> \<open>converge M2 u (\<beta>@\<gamma>)\<close> by auto qed qed qed fun simple_cg_closure_phase_1_helper' :: "'a ts \<Rightarrow> 'a ts \<Rightarrow> 'a simple_cg \<Rightarrow> (bool \<times> 'a ts \<times> 'a simple_cg)" where "simple_cg_closure_phase_1_helper' x x1 xs = (let (x2s,others) = separate_by (can_merge_by_suffix x x1) xs; x1Union = foldl (ts.union) x1 x2s in (x2s \<noteq> [],x1Union,others))" lemma simple_cg_closure_phase_1_helper'_False : "\<not>fst (simple_cg_closure_phase_1_helper' x x1 xs) \<Longrightarrow> simple_cg_closure_phase_1_helper' x x1 xs = (False,x1,xs)" unfolding simple_cg_closure_phase_1_helper'.simps Let_def separate_by.simps by (simp add: filter_empty_conv) lemma simple_cg_closure_phase_1_helper'_True : assumes "fst (simple_cg_closure_phase_1_helper' x x1 xs)" shows "length (snd (snd (simple_cg_closure_phase_1_helper' x x1 xs))) < length xs" proof - have "snd (snd (simple_cg_closure_phase_1_helper' x x1 xs)) = filter (\<lambda>x2 . \<not> (can_merge_by_suffix x x1 x2)) xs" by auto moreover have "filter (\<lambda>x2 . (can_merge_by_suffix x x1 x2)) xs \<noteq> []" using assms unfolding simple_cg_closure_phase_1_helper'.simps Let_def separate_by.simps by fastforce ultimately show ?thesis using filter_not_all_length[of "can_merge_by_suffix x x1" xs] by metis qed lemma simple_cg_closure_phase_1_helper'_length : "length (snd (snd (simple_cg_closure_phase_1_helper' x x1 xs))) \<le> length xs" by auto lemma simple_cg_closure_phase_1_helper'_validity_fst : assumes "observable M1" and "observable M2" and "\<And> u v . ts.memb u x \<Longrightarrow> ts.memb v x \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> u v . ts.memb u x1 \<Longrightarrow> ts.memb v x1 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> x2 u v . x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "ts.memb u (fst (snd (simple_cg_closure_phase_1_helper' x x1 xs)))" and "ts.memb v (fst (snd (simple_cg_closure_phase_1_helper' x x1 xs)))" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - have *:"\<And> w . ts.memb w (fst (snd (simple_cg_closure_phase_1_helper' x x1 xs))) \<Longrightarrow> ts.memb w x1 \<or> (\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb w x2 \<and> can_merge_by_suffix x x1 x2)" proof - fix w assume "ts.memb w (fst (snd (simple_cg_closure_phase_1_helper' x x1 xs)))" then have "ts.memb w (foldl ts.union x1 (filter (can_merge_by_suffix x x1) xs))" unfolding simple_cg_closure_phase_1_helper'.simps unfolding separate_by.simps Let_def case_prod_conv fst_conv snd_conv by auto then show "ts.memb w x1 \<or> (\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb w x2 \<and> can_merge_by_suffix x x1 x2)" proof (induction xs rule: rev_induct) case Nil then show ?case by auto next case (snoc x' xs) show ?case proof (cases "can_merge_by_suffix x x1 x'") case False then have "(foldl ts.union x1 (filter (can_merge_by_suffix x x1) (xs @ [x']))) = (foldl ts.union x1 (filter (can_merge_by_suffix x x1) xs))" by auto then show ?thesis using snoc by (metis (no_types, opaque_lifting) append_Cons empty_append_eq_id insert_iff list.simps(15) list_set_sym) next case True then have *: "(foldl ts.union x1 (filter (can_merge_by_suffix x x1) (xs @ [x']))) = ts.union (foldl ts.union x1 (filter (can_merge_by_suffix x x1) xs)) x'" by auto show ?thesis proof (cases "ts.memb w (foldl ts.union x1 (filter (can_merge_by_suffix x x1) xs))") case True then show ?thesis using snoc by (metis (no_types, opaque_lifting) append_Cons empty_append_eq_id insert_iff list.simps(15) list_set_sym) next case False then have "ts.memb w x'" using snoc.prems unfolding * unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast then show ?thesis using True by force qed qed qed qed consider "ts.memb u x1" | "(\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb u x2 \<and> can_merge_by_suffix x x1 x2)" using *[OF assms(6)] by blast then show ?thesis proof cases case 1 consider (a) "ts.memb v x1" | (b) "(\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb v x2 \<and> can_merge_by_suffix x x1 x2)" using *[OF assms(7)] by blast then show ?thesis proof cases case a then show ?thesis using assms(4)[OF 1 _ assms(8,9)] by auto next case b then obtain x2v where "x2v \<in> list.set xs" and "ts.memb v x2v" and "can_merge_by_suffix x x1 x2v" using *[OF assms(6)] by blast then have "ts.memb u (ts.union x1 x2v)" and "ts.memb v (ts.union x1 x2v)" using 1 unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto show ?thesis using can_merge_by_suffix_validity[OF assms(1,2), of x x1 x2v, OF assms(3,4) assms(5)[OF \<open>x2v \<in> list.set xs\<close>] \<open>can_merge_by_suffix x x1 x2v\<close> \<open>ts.memb u (ts.union x1 x2v)\<close> \<open>ts.memb v (ts.union x1 x2v)\<close> assms(8,9)] by blast qed next case 2 then obtain x2u where "x2u \<in> list.set xs" and "ts.memb u x2u" and "can_merge_by_suffix x x1 x2u" using *[OF assms(6)] by blast then have "ts.memb u (ts.union x1 x2u)" unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto consider (a) "ts.memb v x1" | (b) "(\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb v x2 \<and> can_merge_by_suffix x x1 x2)" using *[OF assms(7)] by blast then show ?thesis proof cases case a then have "ts.memb v (ts.union x1 x2u)" unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto show ?thesis using can_merge_by_suffix_validity[OF assms(1,2), of x x1 x2u, OF assms(3,4) assms(5)[OF \<open>x2u \<in> list.set xs\<close>] \<open>can_merge_by_suffix x x1 x2u\<close> \<open>ts.memb u (ts.union x1 x2u)\<close> \<open>ts.memb v (ts.union x1 x2u)\<close> assms(8,9)] by blast next case b then obtain x2v where "x2v \<in> list.set xs" and "ts.memb v x2v" and "can_merge_by_suffix x x1 x2v" using *[OF assms(6)] by blast then have "ts.memb v (ts.union x1 x2v)" unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto have "\<And> v . ts.memb v (ts.union x1 x2u) \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using can_merge_by_suffix_validity[OF assms(1,2), of x x1 x2u, OF assms(3,4) assms(5)[OF \<open>x2u \<in> list.set xs\<close>] \<open>can_merge_by_suffix x x1 x2u\<close> \<open>ts.memb u (ts.union x1 x2u)\<close> _ assms(8,9)] by blast have "\<And> u . ts.memb u (ts.union x1 x2v) \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using can_merge_by_suffix_validity[OF assms(1,2), of x x1 x2v, OF assms(3,4) assms(5)[OF \<open>x2v \<in> list.set xs\<close>] \<open>can_merge_by_suffix x x1 x2v\<close> _ \<open>ts.memb v (ts.union x1 x2v)\<close>] by blast obtain \<alpha>v \<beta>v \<gamma>v where "ts.memb \<alpha>v x" and "ts.memb \<beta>v x" and "ts.memb (\<alpha>v@\<gamma>v) x1" and "ts.memb (\<beta>v@\<gamma>v) x2v" using \<open>can_merge_by_suffix x x1 x2v\<close> by auto show ?thesis using \<open>\<And>u. \<lbrakk>ts.memb u (ts.union x1 x2v); u \<in> L M1; u \<in> L M2\<rbrakk> \<Longrightarrow> converge M1 u v \<and> converge M2 u v\<close> \<open>\<And>v. ts.memb v (ts.union x1 x2u) \<Longrightarrow> converge M1 u v \<and> converge M2 u v\<close> \<open>ts.memb (\<alpha>v @ \<gamma>v) x1\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by fastforce qed qed qed lemma simple_cg_closure_phase_1_helper'_validity_snd : assumes "\<And> x2 u v . x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (snd (simple_cg_closure_phase_1_helper' x x1 xs)))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - have "list.set (snd (snd (simple_cg_closure_phase_1_helper' x x1 xs))) \<subseteq> list.set xs" by auto then show ?thesis using assms by blast qed fun simple_cg_closure_phase_1_helper :: "'a ts \<Rightarrow> 'a simple_cg \<Rightarrow> (bool \<times> 'a simple_cg) \<Rightarrow> (bool \<times> 'a simple_cg)" where "simple_cg_closure_phase_1_helper x [] (b,done) = (b,done)" | "simple_cg_closure_phase_1_helper x (x1#xs) (b,done) = (let (hasChanged,x1',xs') = simple_cg_closure_phase_1_helper' x x1 xs in simple_cg_closure_phase_1_helper x xs' (b \<or> hasChanged, x1' # done))" lemma simple_cg_closure_phase_1_helper_validity : assumes "observable M1" and "observable M2" and "\<And> u v . ts.memb u x \<Longrightarrow> ts.memb v x \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> x2 u v . x2 \<in> list.set don \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> x2 u v . x2 \<in> list.set xss \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (simple_cg_closure_phase_1_helper x xss (b,don)))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" using assms(4,5,6) proof (induction "length xss" arbitrary: xss don b rule: less_induct) case less show ?case proof (cases xss) case Nil then have "x2 \<in> list.set don" using less.prems(3) by auto then show ?thesis using less.prems(1) assms(7,8,9,10) by blast next case (Cons x1 xs) obtain b' x1' xs' where "simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')" using prod.exhaust by metis then have "simple_cg_closure_phase_1_helper x xss (b,don) = simple_cg_closure_phase_1_helper x xs' (b \<or> b', x1' # don)" unfolding Cons by auto have *:"\<And> u v . ts.memb u x1 \<Longrightarrow> ts.memb v x1 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using less.prems(2)[of x1] unfolding Cons by (meson list.set_intros(1)) have **:"\<And> x2 u v . x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using less.prems(2) unfolding Cons by (meson list.set_intros(2)) have ***:"\<And> u v. ts.memb u x1' \<Longrightarrow> ts.memb v x1' \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using simple_cg_closure_phase_1_helper'_validity_fst[of M1 M2 x x1 xs _ _, OF assms(1,2,3) * **, of "\<lambda> a b c . a"] unfolding \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> fst_conv snd_conv by blast have "length xs' < length xss" using simple_cg_closure_phase_1_helper'_length[of x x1 xs] unfolding \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> Cons by auto have "(\<And>x2 u v. x2 \<in> list.set (x1' # don) \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using *** less.prems(1) by (metis set_ConsD) have "xs' = snd (snd (simple_cg_closure_phase_1_helper' x x1 xs))" using \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> by auto have "(\<And>x2 u v. x2 \<in> list.set xs' \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using simple_cg_closure_phase_1_helper'_validity_snd[of xs' M1] unfolding \<open>xs' = snd (snd (simple_cg_closure_phase_1_helper' x x1 xs))\<close> using ** simple_cg_closure_phase_1_helper'_validity_snd by blast have "x2 \<in> list.set (snd (simple_cg_closure_phase_1_helper x xs' (b \<or> b', x1' # don)))" using less.prems(3) unfolding \<open>simple_cg_closure_phase_1_helper x xss (b,don) = simple_cg_closure_phase_1_helper x xs' (b \<or> b', x1' # don)\<close> . then show ?thesis using less.hyps[OF \<open>length xs' < length xss\<close> \<open>(\<And>x2 u v. x2 \<in> list.set (x1' # don) \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)\<close> \<open>(\<And>x2 u v. x2 \<in> list.set xs' \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)\<close>, of "x1'#don" "\<lambda> a b c . a" "\<lambda> a b c . a"] by force qed qed lemma simple_cg_closure_phase_1_helper_length : "length (snd (simple_cg_closure_phase_1_helper x xss (b,don))) \<le> length xss + length don" proof (induction "length xss" arbitrary: xss b don rule: less_induct) case less show ?case proof (cases xss) case Nil then show ?thesis by auto next case (Cons x1 xs) obtain b' x1' xs' where "simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')" using prod.exhaust by metis then have "simple_cg_closure_phase_1_helper x xss (b,don) = simple_cg_closure_phase_1_helper x xs' (b \<or> b', x1' # don)" unfolding Cons by auto have "length xs' < length xss" using simple_cg_closure_phase_1_helper'_length[of x x1 xs] unfolding \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> Cons by auto then have "length (snd (simple_cg_closure_phase_1_helper x xs' (b \<or> b', x1'#don))) \<le> length xs' + length (x1'#don)" using less[of xs'] unfolding Cons by blast moreover have "length xs' + length (x1'#don) \<le> length xss + length don" using simple_cg_closure_phase_1_helper'_length[of x x1 xs] unfolding \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> snd_conv Cons by auto ultimately show ?thesis unfolding \<open>simple_cg_closure_phase_1_helper x xss (b,don) = simple_cg_closure_phase_1_helper x xs' (b \<or> b', x1' # don)\<close> by presburger qed qed lemma simple_cg_closure_phase_1_helper_True : assumes "fst (simple_cg_closure_phase_1_helper x xss (False,don))" and "xss \<noteq> []" shows "length (snd (simple_cg_closure_phase_1_helper x xss (False,don))) < length xss + length don" using assms proof (induction "length xss" arbitrary: xss don rule: less_induct) case less show ?case proof (cases xss) case Nil then show ?thesis using less.prems(2) by auto next case (Cons x1 xs) obtain b' x1' xs' where "simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')" using prod.exhaust by metis then have "simple_cg_closure_phase_1_helper x xss (False,don) = simple_cg_closure_phase_1_helper x xs' (b', x1' # don)" unfolding Cons by auto show ?thesis proof (cases b') case True then have "length xs' < length xs" using simple_cg_closure_phase_1_helper'_True[of x x1 xs] unfolding \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> fst_conv snd_conv by blast then have "length (snd (simple_cg_closure_phase_1_helper x xs' (b', x1' # don))) < length xss + length don" using simple_cg_closure_phase_1_helper_length[of x xs' b' "x1'#don"] unfolding Cons by auto then show ?thesis unfolding \<open>simple_cg_closure_phase_1_helper x xss (False,don) = simple_cg_closure_phase_1_helper x xs' (b', x1' # don)\<close> . next case False then have "simple_cg_closure_phase_1_helper x xss (False,don) = simple_cg_closure_phase_1_helper x xs' (False, x1' # don)" using \<open>simple_cg_closure_phase_1_helper x xss (False,don) = simple_cg_closure_phase_1_helper x xs' (b', x1' # don)\<close> by auto then have "fst (simple_cg_closure_phase_1_helper x xs' (False, x1' # don))" using less.prems(1) by auto have "length xs' < length xss" using simple_cg_closure_phase_1_helper'_length[of x x1 xs] unfolding \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> Cons by auto have "xs' \<noteq> []" using \<open>simple_cg_closure_phase_1_helper' x x1 xs = (b',x1',xs')\<close> False by (metis \<open>fst (simple_cg_closure_phase_1_helper x xs' (False, x1' # don))\<close> simple_cg_closure_phase_1_helper.simps(1) fst_eqD) show ?thesis using less.hyps[OF \<open>length xs' < length xss\<close> \<open>fst (simple_cg_closure_phase_1_helper x xs' (False, x1' # don))\<close> \<open>xs' \<noteq> []\<close>] \<open>length xs' < length xss\<close> unfolding \<open>simple_cg_closure_phase_1_helper x xss (False,don) = simple_cg_closure_phase_1_helper x xs' (False, x1' # don)\<close> unfolding Cons by auto qed qed qed (* closure operation (1) *) fun simple_cg_closure_phase_1 :: "'a simple_cg \<Rightarrow> (bool \<times> 'a simple_cg)" where "simple_cg_closure_phase_1 xs = foldl (\<lambda> (b,xs) x. let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xs" lemma simple_cg_closure_phase_1_validity : assumes "observable M1" and "observable M2" and "\<And> x2 u v . x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (simple_cg_closure_phase_1 xs))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - have "\<And> xss x2 u v . (\<And> x2 u v . x2 \<in> list.set xss \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v) \<Longrightarrow> x2 \<in> list.set (snd (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss)) \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" proof - fix xss x2 u v assume "\<And> x2 u v . x2 \<in> list.set xss \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" then show "converge M1 u v \<and> converge M2 u v" proof (induction xss arbitrary: x2 u v rule: rev_induct) case Nil then have "x2 \<in> list.set xs" by auto then show ?case using Nil.prems(3,4,5,6) assms(3) by blast next case (snoc x xss) obtain b xss' where "(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) = (b,xss')" using prod.exhaust by metis moreover obtain b' xss'' where "simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')" using prod.exhaust by metis ultimately have *:"(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) (xss@[x])) = (b\<or>b',xss'')" by auto have "(\<And>u v. ts.memb u x \<Longrightarrow> ts.memb v x \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using snoc.prems(1) by (metis append_Cons list.set_intros(1) list_set_sym) moreover have "(\<And>x2 u v. x2 \<in> list.set [] \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" by auto moreover have "(\<And>x2 u v. x2 \<in> list.set xss' \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" proof - have "(\<And>x2 u v. x2 \<in> list.set xss \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using snoc.prems(1) by (metis (no_types, lifting) append_Cons append_Nil2 insertCI list.simps(15) list_set_sym) then show "(\<And>x2 u v. x2 \<in> list.set xss' \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using snoc.IH unfolding \<open>(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) = (b,xss')\<close> snd_conv by blast qed ultimately have "(\<And>x2 u v. x2 \<in> list.set xss'' \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using simple_cg_closure_phase_1_helper_validity[OF assms(1,2), of x "[]" xss' _ False] unfolding \<open>simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')\<close> snd_conv by blast then show ?case using snoc.prems(2,3,4,5,6) unfolding * snd_conv by blast qed qed then show ?thesis using assms(3,4,5,6,7,8) unfolding simple_cg_closure_phase_1.simps by blast qed lemma simple_cg_closure_phase_1_length_helper : "length (snd (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss)) \<le> length xs" proof (induction xss rule: rev_induct) case Nil then show ?case by auto next case (snoc x xss) obtain b xss' where "(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) = (b,xss')" using prod.exhaust by metis moreover obtain b' xss'' where "simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')" using prod.exhaust by metis ultimately have *:"(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) (xss@[x])) = (b\<or>b',xss'')" by auto have "length xss' \<le> length xs" using snoc.IH unfolding \<open>(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) = (b,xss')\<close> by auto moreover have "length xss'' \<le> length xss'" using simple_cg_closure_phase_1_helper_length[of x xss' False "[]"] unfolding \<open>simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')\<close> by auto ultimately show ?case unfolding * snd_conv by simp qed lemma simple_cg_closure_phase_1_length : "length (snd (simple_cg_closure_phase_1 xs)) \<le> length xs" using simple_cg_closure_phase_1_length_helper by auto lemma simple_cg_closure_phase_1_True : assumes "fst (simple_cg_closure_phase_1 xs)" shows "length (snd (simple_cg_closure_phase_1 xs)) < length xs" proof - have "\<And> xss . fst (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) \<Longrightarrow> length (snd (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss)) < length xs" proof - fix xss assume "fst (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss)" then show "length (snd (foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss)) < length xs" proof (induction xss rule: rev_induct) case Nil then show ?case by auto next case (snoc x xss) obtain b xss' where "(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) = (b,xss')" using prod.exhaust by metis moreover obtain b' xss'' where "simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')" using prod.exhaust by metis ultimately have "(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) (xss@[x])) = (b\<or>b',xss'')" by auto consider b | b' using snoc.prems unfolding \<open>(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) (xss@[x])) = (b\<or>b',xss'')\<close> fst_conv by blast then show ?case proof cases case 1 then have "length xss' < length xs" using snoc.IH unfolding \<open>(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) xss) = (b,xss')\<close> fst_conv snd_conv by auto moreover have "length xss'' \<le> length xss'" using simple_cg_closure_phase_1_helper_length[of x xss' False "[]"] unfolding \<open>simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')\<close> by auto ultimately show ?thesis unfolding \<open>(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) (xss@[x])) = (b\<or>b',xss'')\<close> snd_conv by simp next case 2 have "length xss' \<le> length xs" using simple_cg_closure_phase_1_length_helper[of xss xs] by (metis \<open>foldl (\<lambda>(b, xs) x. let (b', xs') = simple_cg_closure_phase_1_helper x xs (False, []) in (b \<or> b', xs')) (False, xs) xss = (b, xss')\<close> simple_cg_closure_phase_1_length_helper snd_conv) moreover have "length xss'' < length xss'" proof - have "xss' \<noteq> []" using "2" \<open>simple_cg_closure_phase_1_helper x xss' (False, []) = (b', xss'')\<close> by auto then show ?thesis using simple_cg_closure_phase_1_helper_True[of x xss' "[]"] 2 unfolding \<open>simple_cg_closure_phase_1_helper x xss' (False,[]) = (b',xss'')\<close> fst_conv snd_conv by auto qed ultimately show ?thesis unfolding \<open>(foldl (\<lambda> (b,xs) x . let (b',xs') = simple_cg_closure_phase_1_helper x xs (False,[]) in (b\<or>b',xs')) (False,xs) (xss@[x])) = (b\<or>b',xss'')\<close> snd_conv by simp qed qed qed then show ?thesis using assms by auto qed fun can_merge_by_intersection :: "'a ts \<Rightarrow> 'a ts \<Rightarrow> bool" where "can_merge_by_intersection x1 x2 = (\<exists> \<alpha> . ts.memb \<alpha> x1 \<and> ts.memb \<alpha> x2)" lemma can_merge_by_intersection_code[code] : "can_merge_by_intersection x1 x2 = ts.bex x1 (\<lambda> \<alpha> . ts.memb \<alpha> x2)" unfolding can_merge_by_intersection.simps unfolding ts.memb_correct[OF ts.invar] unfolding ts.bex_correct[OF ts.invar] by (meson notin_fset) lemma can_merge_by_intersection_validity : assumes "\<And> u v . ts.memb u x1 \<Longrightarrow> ts.memb v x1 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> u v . ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "can_merge_by_intersection x1 x2" and "ts.memb u (ts.union x1 x2)" and "ts.memb v (ts.union x1 x2)" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - obtain \<alpha> where "ts.memb \<alpha> x1" and "ts.memb \<alpha> x2" using assms(3) by auto have "converge M1 u \<alpha> \<and> converge M2 u \<alpha>" using \<open>ts.memb \<alpha> x1\<close> \<open>ts.memb \<alpha> x2\<close> assms(1,2,4,6,7) unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast moreover have "converge M1 v \<alpha> \<and> converge M2 v \<alpha>" by (metis UnE \<open>ts.memb \<alpha> x1\<close> \<open>ts.memb \<alpha> x2\<close> assms(1) assms(2) assms(5) calculation converge.elims(2) ts.correct(18) ts.correct(5) ts.invar) ultimately show ?thesis by simp qed fun simple_cg_closure_phase_2_helper :: "'a ts \<Rightarrow> 'a simple_cg \<Rightarrow> (bool \<times> 'a ts \<times> 'a simple_cg)" where "simple_cg_closure_phase_2_helper x1 xs = (let (x2s,others) = separate_by (can_merge_by_intersection x1) xs; x1Union = foldl (ts.union) x1 x2s in (x2s \<noteq> [],x1Union,others))" lemma simple_cg_closure_phase_2_helper_length : "length (snd (snd (simple_cg_closure_phase_2_helper x1 xs))) \<le> length xs" by auto lemma simple_cg_closure_phase_2_helper_validity_fst : assumes "\<And> u v . ts.memb u x1 \<Longrightarrow> ts.memb v x1 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> x2 u v . x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "ts.memb u (fst (snd (simple_cg_closure_phase_2_helper x1 xs)))" and "ts.memb v (fst (snd (simple_cg_closure_phase_2_helper x1 xs)))" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - have *:"\<And> w . ts.memb w (fst (snd (simple_cg_closure_phase_2_helper x1 xs))) \<Longrightarrow> ts.memb w x1 \<or> (\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb w x2 \<and> can_merge_by_intersection x1 x2)" proof - fix w assume "ts.memb w (fst (snd (simple_cg_closure_phase_2_helper x1 xs)))" then have "ts.memb w (foldl ts.union x1 (filter (can_merge_by_intersection x1) xs))" unfolding simple_cg_closure_phase_1_helper'.simps unfolding separate_by.simps Let_def case_prod_conv fst_conv snd_conv by auto then show "ts.memb w x1 \<or> (\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb w x2 \<and> can_merge_by_intersection x1 x2)" proof (induction xs rule: rev_induct) case Nil then show ?case by auto next case (snoc x' xs) show ?case proof (cases "can_merge_by_intersection x1 x'") case False then have "(foldl ts.union x1 (filter (can_merge_by_intersection x1) (xs @ [x']))) = (foldl ts.union x1 (filter (can_merge_by_intersection x1) xs))" by auto then show ?thesis using snoc by (metis (no_types, opaque_lifting) append_Cons empty_append_eq_id insert_iff list.simps(15) list_set_sym) next case True then have *: "(foldl ts.union x1 (filter (can_merge_by_intersection x1) (xs @ [x']))) = ts.union (foldl ts.union x1 (filter (can_merge_by_intersection x1) xs)) x'" by auto show ?thesis proof (cases "ts.memb w (foldl ts.union x1 (filter (can_merge_by_intersection x1) xs))") case True then show ?thesis using snoc by (metis (no_types, opaque_lifting) append_Cons empty_append_eq_id insert_iff list.simps(15) list_set_sym) next case False then have "ts.memb w x'" using snoc.prems unfolding * unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast then show ?thesis using True by force qed qed qed qed consider "ts.memb u x1" | "(\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb u x2 \<and> can_merge_by_intersection x1 x2)" using *[OF assms(3)] by blast then show ?thesis proof cases case 1 consider (a) "ts.memb v x1" | (b) "(\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb v x2 \<and> can_merge_by_intersection x1 x2)" using *[OF assms(4)] by blast then show ?thesis proof cases case a then show ?thesis using assms(1)[OF 1 _ assms(5,6)] by auto next case b then obtain x2v where "x2v \<in> list.set xs" and "ts.memb v x2v" and "can_merge_by_intersection x1 x2v" using *[OF assms(3)] by blast show ?thesis using can_merge_by_intersection_validity[of x1 M1 M2 x2v, OF assms(1) assms(2)[OF \<open>x2v \<in> list.set xs\<close>] \<open>can_merge_by_intersection x1 x2v\<close>] using 1 \<open>ts.memb v x2v\<close> assms(5,6) unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast qed next case 2 then obtain x2u where "x2u \<in> list.set xs" and "ts.memb u x2u" and "can_merge_by_intersection x1 x2u" using *[OF assms(3)] by blast obtain \<alpha>u where "ts.memb \<alpha>u x1" and "ts.memb \<alpha>u x2u" using \<open>can_merge_by_intersection x1 x2u\<close> by auto consider (a) "ts.memb v x1" | (b) "(\<exists> x2 . x2 \<in> list.set xs \<and> ts.memb v x2 \<and> can_merge_by_intersection x1 x2)" using *[OF assms(4)] by blast then show ?thesis proof cases case a show ?thesis using can_merge_by_intersection_validity[of x1 M1 M2 x2u, OF assms(1) assms(2)[OF \<open>x2u \<in> list.set xs\<close>] \<open>can_merge_by_intersection x1 x2u\<close>] using \<open>ts.memb u x2u\<close> a assms(5,6) unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast next case b then obtain x2v where "x2v \<in> list.set xs" and "ts.memb v x2v" and "can_merge_by_intersection x1 x2v" using *[OF assms(4)] by blast obtain \<alpha>v where "ts.memb \<alpha>v x1" and "ts.memb \<alpha>v x2v" using \<open>can_merge_by_intersection x1 x2v\<close> by auto have "\<And> v . ts.memb v (ts.union x1 x2u) \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using can_merge_by_intersection_validity[of x1 M1 M2 x2u, OF assms(1) assms(2)[OF \<open>x2u \<in> list.set xs\<close>] \<open>can_merge_by_intersection x1 x2u\<close> _ _ assms(5,6)] \<open>ts.memb u x2u\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast have "\<And> u . ts.memb u (ts.union x1 x2v) \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using can_merge_by_intersection_validity[of x1 M1 M2 x2v, OF assms(1) assms(2)[OF \<open>x2v \<in> list.set xs\<close>] \<open>can_merge_by_intersection x1 x2v\<close> ] \<open>ts.memb v x2v\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast show ?thesis using \<open>\<And>u. \<lbrakk>ts.memb u (ts.union x1 x2v); u \<in> L M1; u \<in> L M2\<rbrakk> \<Longrightarrow> converge M1 u v \<and> converge M2 u v\<close> \<open>\<And>v. ts.memb v (ts.union x1 x2u) \<Longrightarrow> converge M1 u v \<and> converge M2 u v\<close> \<open>ts.memb \<alpha>u x1\<close> unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by fastforce qed qed qed lemma simple_cg_closure_phase_2_helper_validity_snd : assumes "\<And> x2 u v . x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (snd (simple_cg_closure_phase_2_helper x1 xs)))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - have "list.set (snd (snd (simple_cg_closure_phase_2_helper x1 xs))) \<subseteq> list.set xs" by auto then show ?thesis using assms by blast qed lemma simple_cg_closure_phase_2_helper_True : assumes "fst (simple_cg_closure_phase_2_helper x xs)" shows "length (snd (snd (simple_cg_closure_phase_2_helper x xs))) < length xs" proof - have "snd (snd (simple_cg_closure_phase_2_helper x xs)) = filter (\<lambda>x2 . \<not> (can_merge_by_intersection x x2)) xs" by auto moreover have "filter (\<lambda>x2 . (can_merge_by_intersection x x2)) xs \<noteq> []" using assms unfolding simple_cg_closure_phase_1_helper'.simps Let_def separate_by.simps by fastforce ultimately show ?thesis using filter_not_all_length[of "can_merge_by_intersection x" xs] by metis qed function simple_cg_closure_phase_2' :: "'a simple_cg \<Rightarrow> (bool \<times> 'a simple_cg) \<Rightarrow> (bool \<times> 'a simple_cg)" where "simple_cg_closure_phase_2' [] (b,done) = (b,done)" | "simple_cg_closure_phase_2' (x#xs) (b,done) = (let (hasChanged,x',xs') = simple_cg_closure_phase_2_helper x xs in if hasChanged then simple_cg_closure_phase_2' xs' (True,x'#done) else simple_cg_closure_phase_2' xs (b,x#done))" by pat_completeness auto termination proof - { fix xa :: "(bool \<times> 'a ts \<times> 'a simple_cg)" fix x xs b don xb y xaa ya assume "xa = simple_cg_closure_phase_2_helper x xs" and "(xb, y) = xa" and "(xaa, ya) = y" and xb have "length ya < Suc (length xs)" using simple_cg_closure_phase_2_helper_True[of x xs] \<open>xb\<close> unfolding \<open>xa = simple_cg_closure_phase_2_helper x xs\<close>[symmetric] unfolding \<open>(xb, y) = xa\<close>[symmetric] \<open>(xaa, ya) = y\<close>[symmetric] unfolding fst_conv snd_conv by auto then have "((ya, True, xaa # don), x # xs, b, don) \<in> measure (\<lambda>(xs, bd). length xs)" by auto } then show ?thesis by (relation "measure (\<lambda> (xs,bd) . length xs)"; force) qed lemma simple_cg_closure_phase_2'_validity : assumes "\<And> x2 u v . x2 \<in> list.set don \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "\<And> x2 u v . x2 \<in> list.set xss \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (simple_cg_closure_phase_2' xss (b,don)))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" using assms(1,2,3) proof (induction "length xss" arbitrary: xss b don rule: less_induct) case less show ?case proof (cases xss) case Nil show ?thesis using less.prems(3) less.prems(1)[OF _ assms(4,5,6,7)] unfolding Nil by auto next case (Cons x xs) obtain hasChanged x' xs' where "simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')" using prod.exhaust by metis show ?thesis proof (cases hasChanged) case True then have "simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs' (True,x'#don)" using \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> unfolding Cons by auto have *:"(\<And>u v. ts.memb u x \<Longrightarrow> ts.memb v x \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" and **:"(\<And>x2 u v. x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using less.prems(2) unfolding Cons by (meson list.set_intros)+ have "length xs' < length xss" unfolding Cons using simple_cg_closure_phase_2_helper_True[of x xs] True unfolding \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> fst_conv snd_conv by auto moreover have "(\<And>x2 u v. x2 \<in> list.set (x' # don) \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using simple_cg_closure_phase_2_helper_validity_fst[of x M1 M2 xs, OF * **, of "\<lambda> a b c . a"] using less.prems(1) unfolding \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> fst_conv snd_conv using set_ConsD[of _ x' don] by blast moreover have "(\<And>x2 u v. x2 \<in> list.set xs' \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using simple_cg_closure_phase_2_helper_validity_snd[of xs M1 M2 _ x, OF **, of "\<lambda> a b c . a"] unfolding \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> fst_conv snd_conv by blast moreover have "x2 \<in> list.set (snd (simple_cg_closure_phase_2' xs' (True, x' # don)))" using less.prems(3) unfolding \<open>simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs' (True,x'#don)\<close> . ultimately show ?thesis using less.hyps[of xs' "x'#don"] by blast next case False then have "simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs (b,x#don)" using \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> unfolding Cons by auto have "length xs < length xss" unfolding Cons by auto moreover have "(\<And>x2 u v. x2 \<in> list.set (x # don) \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using less.prems(1,2) unfolding Cons by (metis list.set_intros(1) set_ConsD) moreover have "(\<And>x2 u v. x2 \<in> list.set xs \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using less.prems(2) unfolding Cons by (metis list.set_intros(2)) moreover have "x2 \<in> list.set (snd (simple_cg_closure_phase_2' xs (b, x # don)))" using less.prems(3) unfolding \<open>simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs (b,x#don)\<close> unfolding Cons . ultimately show ?thesis using less.hyps[of xs "x#don" b] by blast qed qed qed lemma simple_cg_closure_phase_2'_length : "length (snd (simple_cg_closure_phase_2' xss (b,don))) \<le> length xss + length don" proof (induction "length xss" arbitrary: xss b don rule: less_induct) case less show ?case proof (cases xss) case Nil then show ?thesis by auto next case (Cons x xs) obtain hasChanged x' xs' where "simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')" using prod.exhaust by metis show ?thesis proof (cases hasChanged) case True then have "simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs' (True,x'#don)" using \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> unfolding Cons by auto have "length xs' < length xss" using simple_cg_closure_phase_2_helper_True[of x xs] True unfolding \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> snd_conv fst_conv unfolding Cons by auto then show ?thesis using less.hyps[of xs' True "x'#don"] unfolding \<open>simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs' (True,x'#don)\<close> unfolding Cons by auto next case False then have "simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs (b,x#don)" using \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> unfolding Cons by auto show ?thesis using less.hyps[of xs b "x#don"] unfolding \<open>simple_cg_closure_phase_2' xss (b,don) = simple_cg_closure_phase_2' xs (b,x#don)\<close> unfolding Cons by auto qed qed qed lemma simple_cg_closure_phase_2'_True : assumes "fst (simple_cg_closure_phase_2' xss (False,don))" and "xss \<noteq> []" shows "length (snd (simple_cg_closure_phase_2' xss (False,don))) < length xss + length don" using assms proof (induction "length xss" arbitrary: xss don rule: less_induct) case less show ?case proof (cases xss) case Nil then show ?thesis using less.prems(2) by auto next case (Cons x xs) obtain hasChanged x' xs' where "simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')" using prod.exhaust by metis show ?thesis proof (cases hasChanged) case True then have "simple_cg_closure_phase_2' xss (False,don) = simple_cg_closure_phase_2' xs' (True,x'#don)" using \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> unfolding Cons by auto have "length xs' < length xs" using simple_cg_closure_phase_2_helper_True[of x xs] True unfolding \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> snd_conv fst_conv unfolding Cons by auto moreover have "length (snd (simple_cg_closure_phase_2' xs' (True,x'#don))) \<le> length xs' + length (x'#don)" using simple_cg_closure_phase_2'_length by metis ultimately show ?thesis unfolding \<open>simple_cg_closure_phase_2' xss (False,don) = simple_cg_closure_phase_2' xs' (True,x'#don)\<close> unfolding Cons by auto next case False then have "simple_cg_closure_phase_2' xss (False,don) = simple_cg_closure_phase_2' xs (False,x#don)" using \<open>simple_cg_closure_phase_2_helper x xs = (hasChanged,x',xs')\<close> unfolding Cons by auto have "xs \<noteq> []" using \<open>simple_cg_closure_phase_2' xss (False, don) = simple_cg_closure_phase_2' xs (False, x # don)\<close> less.prems(1) by auto show ?thesis using less.hyps[of xs "x#don", OF _ _ \<open>xs \<noteq> []\<close>] using less.prems(1) unfolding \<open>simple_cg_closure_phase_2' xss (False,don) = simple_cg_closure_phase_2' xs (False,x#don)\<close> unfolding Cons by auto qed qed qed (* closure operation (2) *) fun simple_cg_closure_phase_2 :: "'a simple_cg \<Rightarrow> (bool \<times> 'a simple_cg)" where "simple_cg_closure_phase_2 xs = simple_cg_closure_phase_2' xs (False,[])" lemma simple_cg_closure_phase_2_validity : assumes "\<And> x2 u v . x2 \<in> list.set xss \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (snd (simple_cg_closure_phase_2 xss))" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" using assms(2) unfolding simple_cg_closure_phase_2.simps using simple_cg_closure_phase_2'_validity[OF _ assms(1) _ assms(3,4,5,6), of "[]" xss "\<lambda> a b c . a" False] by auto lemma simple_cg_closure_phase_2_length : "length (snd (simple_cg_closure_phase_2 xss)) \<le> length xss" unfolding simple_cg_closure_phase_2.simps using simple_cg_closure_phase_2'_length[of xss False "[]"] by auto lemma simple_cg_closure_phase_2_True : assumes "fst (simple_cg_closure_phase_2 xss)" shows "length (snd (simple_cg_closure_phase_2 xss)) < length xss" proof - have "xss \<noteq> []" using assms by auto then show ?thesis using simple_cg_closure_phase_2'_True[of xss "[]"] assms by auto qed function simple_cg_closure :: "'a simple_cg \<Rightarrow> 'a simple_cg" where "simple_cg_closure g = (let (hasChanged1,g1) = simple_cg_closure_phase_1 g; (hasChanged2,g2) = simple_cg_closure_phase_2 g1 in if hasChanged1 \<or> hasChanged2 then simple_cg_closure g2 else g2)" by pat_completeness auto termination proof - { fix g :: "'a simple_cg" fix x hasChanged1 g1 xb hasChanged2 g2 assume "x = simple_cg_closure_phase_1 g" "(hasChanged1, g1) = x" "xb = simple_cg_closure_phase_2 g1" "(hasChanged2, g2) = xb" "hasChanged1 \<or> hasChanged2" then have "simple_cg_closure_phase_1 g = (hasChanged1, g1)" and "simple_cg_closure_phase_2 g1 = (hasChanged2, g2)" by auto have "length g1 \<le> length g" using \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> using simple_cg_closure_phase_1_length[of g] by auto have "length g2 \<le> length g1" using \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> using simple_cg_closure_phase_2_length[of g1] by auto consider hasChanged1 | hasChanged2 using \<open>hasChanged1 \<or> hasChanged2\<close> by blast then have "length g2 < length g" proof cases case 1 then have "length g1 < length g" using \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> using simple_cg_closure_phase_1_True[of g] by auto then show ?thesis using \<open>length g2 \<le> length g1\<close> by linarith next case 2 then have "length g2 < length g1" using \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> using simple_cg_closure_phase_2_True[of g1] by auto then show ?thesis using \<open>length g1 \<le> length g\<close> by linarith qed then have "(g2, g) \<in> measure length" by auto } then show ?thesis by (relation "measure length"; simp) qed lemma simple_cg_closure_validity : assumes "observable M1" and "observable M2" and "\<And> x2 u v . x2 \<in> list.set g \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (simple_cg_closure g)" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" using assms(3,4) proof (induction "length g" arbitrary: g rule: less_induct) case less obtain hasChanged1 hasChanged2 g1 g2 where "simple_cg_closure_phase_1 g = (hasChanged1, g1)" and "simple_cg_closure_phase_2 g1 = (hasChanged2, g2)" using prod.exhaust by metis have "length g1 \<le> length g" using \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> using simple_cg_closure_phase_1_length[of g] by auto have "length g2 \<le> length g1" using \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> using simple_cg_closure_phase_2_length[of g1] by auto have "(\<And>x2 u v. x2 \<in> list.set g2 \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" proof - have "(\<And>x2 u v. x2 \<in> list.set g1 \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using simple_cg_closure_phase_1_validity[OF assms(1,2), of g] using less.prems(1) unfolding \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> snd_conv by blast then show "(\<And>x2 u v. x2 \<in> list.set g2 \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using simple_cg_closure_phase_2_validity[of g1] unfolding \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> snd_conv by blast qed show ?thesis proof (cases "hasChanged1 \<or> hasChanged2") case True then consider hasChanged1 | hasChanged2 by blast then have "length g2 < length g" proof cases case 1 then have "length g1 < length g" using \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> using simple_cg_closure_phase_1_True[of g] by auto then show ?thesis using \<open>length g2 \<le> length g1\<close> by linarith next case 2 then have "length g2 < length g1" using \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> using simple_cg_closure_phase_2_True[of g1] by auto then show ?thesis using \<open>length g1 \<le> length g\<close> by linarith qed moreover have "x2 \<in> list.set (simple_cg_closure g2)" using less.prems(2) using \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> True by auto moreover note \<open>(\<And>x2 u v. x2 \<in> list.set g2 \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)\<close> ultimately show ?thesis using less.hyps[of g2] by blast next case False then have "(simple_cg_closure g) = g2" using \<open>simple_cg_closure_phase_1 g = (hasChanged1, g1)\<close> \<open>simple_cg_closure_phase_2 g1 = (hasChanged2, g2)\<close> by auto show ?thesis using less.prems(2) using \<open>(\<And>x2 u v. x2 \<in> list.set g2 \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)\<close> assms(5,6,7,8) unfolding \<open>(simple_cg_closure g) = g2\<close> by blast qed qed (* when inserting \<alpha> this also for all \<alpha>1@\<alpha>2 = \<alpha> and \<beta> in [\<alpha>1] inserts \<beta>@\<alpha>2 -- extremely inefficient *) fun simple_cg_insert_with_conv :: "('a::linorder) simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a simple_cg" where "simple_cg_insert_with_conv g ys = (let insert_for_prefix = (\<lambda> g i . let pref = take i ys; suff = drop i ys; pref_conv = simple_cg_lookup g pref in foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) g pref_conv); g' = simple_cg_insert g ys; g'' = foldl insert_for_prefix g' [0..<length ys] in simple_cg_closure g'')" fun simple_cg_merge :: "'a simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> 'a simple_cg" where "simple_cg_merge g ys1 ys2 = simple_cg_closure (ts.from_list [ys1,ys2] #g)" lemma simple_cg_merge_validity : assumes "observable M1" and "observable M2" and "converge M1 u' v' \<and> converge M2 u' v'" and "\<And> x2 u v . x2 \<in> list.set g \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" and "x2 \<in> list.set (simple_cg_merge g u' v')" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" shows "converge M1 u v \<and> converge M2 u v" proof - have "(\<And>x2 u v. x2 \<in> list.set ((ts.from_list [u',v'])#g) \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" proof - fix x2 u v assume "x2 \<in> list.set ((ts.from_list [u',v'])#g)" and "ts.memb u x2" and "ts.memb v x2" and "u \<in> L M1" and "u \<in> L M2" then consider "x2 = (ts.from_list [u',v'])" | "x2 \<in> list.set g" by auto then show "converge M1 u v \<and> converge M2 u v" proof cases case 1 have "u \<in> {u',v'}" and "v \<in> {u',v'}" using \<open>ts.memb u x2\<close> \<open>ts.memb v x2\<close> unfolding ts.memb_correct[OF ts.invar] 1 ts.correct(35) by auto then show ?thesis using assms(3) by (cases "u = u'"; cases "v = v'"; auto) next case 2 then show ?thesis using assms(4) \<open>ts.memb u x2\<close> \<open>ts.memb v x2\<close> \<open>u \<in> L M1\<close> \<open>u \<in> L M2\<close> by blast qed qed moreover have "x2 \<in> list.set (simple_cg_closure ((ts.from_list [u',v'])#g))" using assms(5) by auto ultimately show ?thesis using simple_cg_closure_validity[OF assms(1,2) _ _ assms(6,7,8,9)] by blast qed subsection \<open>Invariants\<close> lemma simple_cg_lookup_iff : "\<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longleftrightarrow> (\<beta> = \<alpha> \<or> (\<exists> x . x \<in> list.set G \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x))" proof (induction G rule: rev_induct) case Nil then show ?case unfolding simple_cg_lookup.simps unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] unfolding ts.memb_correct[OF ts.invar] unfolding filter.simps foldl.simps unfolding ts.empty_correct by auto next case (snoc x G) show ?case proof (cases "ts.memb \<alpha> x \<and> ts.memb \<beta> x") case True then have *: "(foldl ts.union (ts.empty ()) (filter (ts.memb \<alpha>) (G @ [x]))) = ts.union (foldl ts.union (ts.empty ()) (filter (ts.memb \<alpha>) G)) x" by auto have "\<beta> \<in> list.set (simple_cg_lookup (G@[x]) \<alpha>)" using True unfolding simple_cg_lookup.simps * unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by blast then show ?thesis by (meson True in_set_conv_decomp) next case False have "\<beta> \<in> list.set (simple_cg_lookup (G@[x]) \<alpha>) = (\<beta> = \<alpha> \<or> (\<beta> \<in> list.set (simple_cg_lookup G \<alpha>)))" proof - consider "\<not> (ts.memb \<alpha> x)" | "\<not> (ts.memb \<beta> x)" using False by blast then show "\<beta> \<in> list.set (simple_cg_lookup (G@[x]) \<alpha>) = (\<beta> = \<alpha> \<or> (\<beta> \<in> list.set (simple_cg_lookup G \<alpha>)))" proof cases case 1 then show ?thesis unfolding simple_cg_lookup.simps unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto next case 2 then have "ts.memb \<beta> (foldl ts.union (ts.empty ()) (filter (ts.memb \<alpha>) (G @ [x]))) = ts.memb \<beta> (foldl ts.union (ts.empty ()) (filter (ts.memb \<alpha>) G))" proof (cases "ts.memb \<alpha> x") case True then have *: "(foldl ts.union (ts.empty ()) (filter (ts.memb \<alpha>) (G @ [x]))) = ts.union (foldl ts.union (ts.empty ()) (filter (ts.memb \<alpha>) G)) x" by auto show ?thesis using 2 unfolding * unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto next case False then show ?thesis by auto qed then have "(\<beta> \<in> list.set (simple_cg_lookup (G@[x]) \<alpha>)) = (\<beta> \<in> Set.insert \<alpha> (list.set (simple_cg_lookup G \<alpha>)))" unfolding simple_cg_lookup.simps unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] unfolding ts.memb_correct[OF ts.invar] unfolding ts.union_correct[OF ts.invar ts.invar] by auto then show ?thesis by (induction G; auto) qed qed moreover have "(\<exists> x' . x' \<in> list.set (G@[x]) \<and> ts.memb \<alpha> x' \<and> ts.memb \<beta> x') = (\<exists> x . x \<in> list.set G \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x)" using False by auto ultimately show ?thesis using snoc.IH by blast qed qed lemma simple_cg_insert'_invar : "convergence_graph_insert_invar M1 M2 simple_cg_lookup simple_cg_insert'" proof - have "\<And> G \<gamma> \<alpha> \<beta> . \<gamma> \<in> L M1 \<Longrightarrow> \<gamma> \<in> L M2 \<Longrightarrow> (\<And>\<alpha> . \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup G \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof fix G \<gamma> \<alpha> assume "\<gamma> \<in> L M1" and "\<gamma> \<in> L M2" and *:"(\<And>\<alpha> . \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup G \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>))" and "\<alpha> \<in> L M1" and "\<alpha> \<in> L M2" show "\<alpha> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>)" unfolding simple_cg_lookup.simps unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] by blast have "\<And> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>) \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof - fix \<beta> assume **: "\<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>)" show "converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof (cases "\<beta> \<in> list.set (simple_cg_lookup G \<alpha>)") case True then show ?thesis using *[OF \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close>] by presburger next case False show ?thesis proof (cases "find (\<lambda>x . ts.memb \<gamma> x) G") case None then have "(simple_cg_insert' G \<gamma>) = (ts.sng \<gamma>)#G" by auto have "\<alpha> = \<gamma> \<and> \<beta> = \<gamma>" using False \<open>\<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>)\<close> unfolding \<open>(simple_cg_insert' G \<gamma>) = (ts.sng \<gamma>)#G\<close> unfolding simple_cg_lookup_iff unfolding ts.memb_correct[OF ts.invar] using ts.sng_correct(1) by auto then show ?thesis using \<open>\<gamma> \<in> L M1\<close> \<open>\<gamma> \<in> L M2\<close> by auto next case (Some x) then have "(simple_cg_insert' G \<gamma>) = G" by auto then show ?thesis using *[OF \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close>] ** by presburger qed qed qed then show "(\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G \<gamma>) \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" by blast qed then show ?thesis unfolding convergence_graph_insert_invar_def convergence_graph_lookup_invar_def by blast qed lemma simple_cg_insert'_foldl_helper: assumes "list.set xss \<subseteq> L M1 \<inter> L M2" and "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" shows "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl (\<lambda> xs' ys' . simple_cg_insert' xs' ys') G xss) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using \<open>list.set xss \<subseteq> L M1 \<inter> L M2\<close> proof (induction xss rule: rev_induct) case Nil then show ?case using \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close> by auto next case (snoc x xs) have "x \<in> L M1" and "x \<in> L M2" using snoc.prems by auto have "list.set xs \<subseteq> L M1 \<inter> L M2" using snoc.prems by auto then have *:"(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl (\<lambda> xs' ys'. simple_cg_insert' xs' ys') G xs) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using snoc.IH by blast have **:"(foldl (\<lambda> xs' ys'. simple_cg_insert' xs' ys') G (xs@[x])) = simple_cg_insert' (foldl (\<lambda> xs' ys' . simple_cg_insert' xs' ys') G xs) x" by auto show ?case using snoc.prems(1,2,3) * \<open>x \<in> L M1\<close> \<open>x \<in> L M2\<close> unfolding ** using simple_cg_insert'_invar[of M1 M2] unfolding convergence_graph_insert_invar_def convergence_graph_lookup_invar_def using simple_cg_lookup_iff by blast qed lemma simple_cg_insert_invar : "convergence_graph_insert_invar M1 M2 simple_cg_lookup simple_cg_insert" proof - have "\<And> G \<gamma> \<alpha> \<beta> . \<gamma> \<in> L M1 \<Longrightarrow> \<gamma> \<in> L M2 \<Longrightarrow> (\<And>\<alpha> . \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup G \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup (simple_cg_insert G \<gamma>) \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert G \<gamma>) \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof fix G \<gamma> \<alpha> assume "\<gamma> \<in> L M1" and "\<gamma> \<in> L M2" and *:"(\<And>\<alpha> . \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup G \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>))" and "\<alpha> \<in> L M1" and "\<alpha> \<in> L M2" show "\<alpha> \<in> list.set (simple_cg_lookup (simple_cg_insert G \<gamma>) \<alpha>)" unfolding simple_cg_lookup.simps unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] by auto note simple_cg_insert'_foldl_helper[of "prefixes \<gamma>" M1 M2] moreover have "list.set (prefixes \<gamma>) \<subseteq> L M1 \<inter> L M2" by (metis (no_types, lifting) IntI \<open>\<gamma> \<in> L M1\<close> \<open>\<gamma> \<in> L M2\<close> language_prefix prefixes_set_ob subsetI) ultimately show "(\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert G \<gamma>) \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close> by (metis "*" simple_cg_insert.simps) qed then show ?thesis unfolding convergence_graph_insert_invar_def convergence_graph_lookup_invar_def by blast qed lemma simple_cg_closure_invar_helper : assumes "observable M1" and "observable M2" and "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" and "\<beta> \<in> list.set (simple_cg_lookup (simple_cg_closure G) \<alpha>)" and "\<alpha> \<in> L M1" and "\<alpha> \<in> L M2" shows "converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof (cases "\<beta> = \<alpha>") case True then show ?thesis using assms(5,6) by auto next case False show ?thesis proof obtain x where "x \<in> list.set (simple_cg_closure G)" and "ts.memb \<alpha> x" and "ts.memb \<beta> x" using False \<open>\<beta> \<in> list.set (simple_cg_lookup (simple_cg_closure G) \<alpha>)\<close> unfolding simple_cg_lookup_iff by blast have "\<And> x2 u v . x2 \<in> list.set G \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v" using \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close> unfolding simple_cg_lookup_iff by blast have "(\<And>x2 u v. x2 \<in> list.set G \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close> unfolding simple_cg_lookup_iff by blast then show "converge M1 \<alpha> \<beta>" using \<open>ts.memb \<alpha> x\<close> \<open>ts.memb \<beta> x\<close> \<open>x \<in> list.set (simple_cg_closure G)\<close> assms(1) assms(2) assms(5) assms(6) simple_cg_closure_validity by blast have "(\<And>x2 u v. x2 \<in> list.set G \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close> unfolding simple_cg_lookup_iff by blast then show "converge M2 \<alpha> \<beta>" using \<open>ts.memb \<alpha> x\<close> \<open>ts.memb \<beta> x\<close> \<open>x \<in> list.set (simple_cg_closure G)\<close> assms(1) assms(2) assms(5) assms(6) simple_cg_closure_validity by blast qed qed lemma simple_cg_merge_invar : assumes "observable M1" and "observable M2" shows "convergence_graph_merge_invar M1 M2 simple_cg_lookup simple_cg_merge" proof - have *: "\<And> G \<gamma> \<gamma>' \<alpha> \<beta>. converge M1 \<gamma> \<gamma>' \<Longrightarrow> converge M2 \<gamma> \<gamma>' \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>) \<Longrightarrow> \<beta> \<in> list.set (simple_cg_lookup (simple_cg_merge G \<gamma> \<gamma>') \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof - fix G \<gamma> \<gamma>' \<alpha> \<beta> assume "converge M1 \<gamma> \<gamma>'" "converge M2 \<gamma> \<gamma>'" "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" "\<beta> \<in> list.set (simple_cg_lookup (simple_cg_merge G \<gamma> \<gamma>') \<alpha>)" "\<alpha> \<in> L M1" "\<alpha> \<in> L M2" show "converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof (cases "\<beta> = \<alpha>") case True then show ?thesis using \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close> by auto next case False then obtain x where "x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>')" and "ts.memb \<alpha> x" and "ts.memb \<beta> x" using \<open>\<beta> \<in> list.set (simple_cg_lookup (simple_cg_merge G \<gamma> \<gamma>') \<alpha>)\<close> unfolding simple_cg_lookup_iff by blast have "(\<And>x2 u v. x2 \<in> list.set G \<Longrightarrow> ts.memb u x2 \<Longrightarrow> ts.memb v x2 \<Longrightarrow> u \<in> L M1 \<Longrightarrow> u \<in> L M2 \<Longrightarrow> converge M1 u v \<and> converge M2 u v)" using \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close> unfolding simple_cg_lookup_iff by blast then show ?thesis using simple_cg_merge_validity[OF assms(1,2) _ _ \<open>x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>')\<close> \<open>ts.memb \<alpha> x\<close> \<open>ts.memb \<beta> x\<close> \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close>] \<open>converge M1 \<gamma> \<gamma>'\<close> \<open>converge M2 \<gamma> \<gamma>'\<close> by blast qed qed have "\<And>G \<gamma> \<gamma>'. converge M1 \<gamma> \<gamma>' \<Longrightarrow> converge M2 \<gamma> \<gamma>' \<Longrightarrow> (\<And>\<alpha>. \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> (\<alpha> = \<alpha> \<or> (\<exists>x. x \<in> list.set G \<and> ts.memb \<alpha> x \<and> ts.memb \<alpha> x)) \<and> (\<forall>\<beta>. \<beta> = \<alpha> \<or> (\<exists>x. x \<in> list.set G \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)) \<Longrightarrow> (\<And>\<alpha>. \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> (\<alpha> = \<alpha> \<or> (\<exists>x. x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>') \<and> ts.memb \<alpha> x \<and> ts.memb \<alpha> x)) \<and> (\<forall>\<beta>. \<beta> = \<alpha> \<or> (\<exists>x. x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>') \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>))" proof - fix G \<gamma> \<gamma>' assume a1: "converge M1 \<gamma> \<gamma>'" and a2: "converge M2 \<gamma> \<gamma>'" and a3: "(\<And>\<alpha>. \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> (\<alpha> = \<alpha> \<or> (\<exists>x. x \<in> list.set G \<and> ts.memb \<alpha> x \<and> ts.memb \<alpha> x)) \<and> (\<forall>\<beta>. \<beta> = \<alpha> \<or> (\<exists>x. x \<in> list.set G \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>))" fix \<alpha> assume "\<alpha> \<in> L M1" and "\<alpha> \<in> L M2" have "\<And> \<beta>. \<beta> = \<alpha> \<or> (\<exists>x. x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>') \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x) \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof - fix \<beta> assume assm: "\<beta> = \<alpha> \<or> (\<exists>x. x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>') \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x)" show "converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof (cases "\<beta> = \<alpha>") case True then show ?thesis using \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close> assms(1,2) converge_refl by auto next case False then obtain x where "x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>')" and "ts.memb \<alpha> x" and "ts.memb \<beta> x" using assm by blast then have "\<beta> \<in> list.set (simple_cg_lookup (simple_cg_merge G \<gamma> \<gamma>') \<alpha>)" unfolding simple_cg_lookup_iff by blast show ?thesis using *[OF a1 a2 _ \<open>\<beta> \<in> list.set (simple_cg_lookup (simple_cg_merge G \<gamma> \<gamma>') \<alpha>)\<close> \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close>] by (metis a3 simple_cg_lookup_iff) qed qed then show "(\<alpha> = \<alpha> \<or> (\<exists>x. x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>') \<and> ts.memb \<alpha> x \<and> ts.memb \<alpha> x)) \<and> (\<forall>\<beta>. \<beta> = \<alpha> \<or> (\<exists>x. x \<in> list.set (simple_cg_merge G \<gamma> \<gamma>') \<and> ts.memb \<alpha> x \<and> ts.memb \<beta> x) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" by blast qed then show ?thesis unfolding convergence_graph_merge_invar_def convergence_graph_lookup_invar_def unfolding simple_cg_lookup_iff [symmetric] by blast qed lemma simple_cg_empty_invar : "convergence_graph_lookup_invar M1 M2 simple_cg_lookup simple_cg_empty" unfolding convergence_graph_lookup_invar_def simple_cg_empty_def unfolding simple_cg_lookup_iff using converge_refl by auto lemma simple_cg_initial_invar : assumes "observable M1" shows "convergence_graph_initial_invar M1 M2 simple_cg_lookup simple_cg_initial" proof - have "\<And> T . (L M1 \<inter> set T = (L M2 \<inter> set T)) \<Longrightarrow> finite_tree T \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (simple_cg_initial M1 T) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof - fix T assume "(L M1 \<inter> set T = (L M2 \<inter> set T))" and "finite_tree T" then have "list.set (filter (is_in_language M1 (initial M1)) (sorted_list_of_sequences_in_tree T)) \<subseteq> L M1 \<inter> L M2" unfolding is_in_language_iff[OF assms fsm_initial] using sorted_list_of_sequences_in_tree_set[OF \<open>finite_tree T\<close>] by auto moreover have "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup simple_cg_empty \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using simple_cg_empty_invar unfolding convergence_graph_lookup_invar_def by blast ultimately show "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (simple_cg_initial M1 T) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using simple_cg_insert'_foldl_helper[of "(filter (is_in_language M1 (initial M1)) (sorted_list_of_sequences_in_tree T))" M1 M2] unfolding simple_cg_initial.simps by blast qed then show ?thesis unfolding convergence_graph_initial_invar_def convergence_graph_lookup_invar_def using simple_cg_lookup_iff by blast qed lemma simple_cg_insert_with_conv_invar : assumes "observable M1" assumes "observable M2" shows "convergence_graph_insert_invar M1 M2 simple_cg_lookup simple_cg_insert_with_conv" proof - have "\<And> G \<gamma> \<alpha> \<beta> . \<gamma> \<in> L M1 \<Longrightarrow> \<gamma> \<in> L M2 \<Longrightarrow> (\<And>\<alpha> . \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup G \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G \<gamma>) \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G \<gamma>) \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof fix G ys \<alpha> assume "ys \<in> L M1" and "ys \<in> L M2" and *:"(\<And>\<alpha> . \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> \<alpha> \<in> list.set (simple_cg_lookup G \<alpha>) \<and> (\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>))" and "\<alpha> \<in> L M1" and "\<alpha> \<in> L M2" show "\<alpha> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G ys) \<alpha>)" using simple_cg_lookup_iff by blast have "\<And> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G ys) \<alpha>) \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>" proof - fix \<beta> assume "\<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G ys) \<alpha>)" define insert_for_prefix where insert_for_prefix: "insert_for_prefix = (\<lambda> g i . let pref = take i ys; suff = drop i ys; pref_conv = simple_cg_lookup g pref in foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) g pref_conv)" define g' where g': "g' = simple_cg_insert G ys" define g'' where g'': "g'' = foldl insert_for_prefix g' [0..<length ys]" have "simple_cg_insert_with_conv G ys = simple_cg_closure g''" unfolding simple_cg_insert_with_conv.simps g'' g' insert_for_prefix Let_def by force have g'_invar: "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup g' \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using g' * using simple_cg_insert_invar \<open>ys \<in> L M1\<close> \<open>ys \<in> L M2\<close> unfolding convergence_graph_insert_invar_def convergence_graph_lookup_invar_def by blast have insert_for_prefix_invar: "\<And> i g . (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup g \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>) \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (insert_for_prefix g i) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof - fix i g assume "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup g \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" define pref where pref: "pref = take i ys" define suff where suff: "suff = drop i ys" let ?pref_conv = "simple_cg_lookup g pref" have "insert_for_prefix g i = foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) g ?pref_conv" unfolding insert_for_prefix pref suff Let_def by force have "ys = pref @ suff" unfolding pref suff by auto then have "pref \<in> L M1" and "pref \<in> L M2" using \<open>ys \<in> L M1\<close> \<open>ys \<in> L M2\<close> language_prefix by metis+ have insert_step_invar: "\<And> ys' pc G . list.set pc \<subseteq> list.set (simple_cg_lookup g pref) \<Longrightarrow> ys' \<in> list.set pc \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>) \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G (ys'@suff)) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof - fix ys' pc G assume "list.set pc \<subseteq> list.set (simple_cg_lookup g pref)" and "ys' \<in> list.set pc" and "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" then have "converge M1 pref ys'" and "converge M2 pref ys'" using \<open>\<And>\<beta> \<alpha>. \<beta> \<in> list.set (simple_cg_lookup g \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>\<close> using \<open>pref \<in> L M1\<close> \<open>pref \<in> L M2\<close> by blast+ have "(ys'@suff) \<in> L M1" using \<open>converge M1 pref ys'\<close> using \<open>ys = pref @ suff\<close> \<open>ys \<in> L M1\<close> assms(1) converge_append_language_iff by blast moreover have "(ys'@suff) \<in> L M2" using \<open>converge M2 pref ys'\<close> using \<open>ys = pref @ suff\<close> \<open>ys \<in> L M2\<close> assms(2) converge_append_language_iff by blast ultimately show "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert' G (ys'@suff)) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close> using simple_cg_insert'_invar[of M1 M2] unfolding convergence_graph_insert_invar_def convergence_graph_lookup_invar_def using simple_cg_lookup_iff by blast qed have insert_foldl_invar: "\<And> pc G . list.set pc \<subseteq> list.set (simple_cg_lookup g pref) \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>) \<Longrightarrow> (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) G pc) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof - fix pc G assume "list.set pc \<subseteq> list.set (simple_cg_lookup g pref)" and "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" then show "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) G pc) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof (induction pc rule: rev_induct) case Nil then show ?case by auto next case (snoc a pc) have **:"(foldl (\<lambda>g' ys'. simple_cg_insert' g' (ys' @ suff)) G (pc @ [a])) = simple_cg_insert' (foldl (\<lambda>g' ys'. simple_cg_insert' g' (ys' @ suff)) G pc) (a@suff)" unfolding foldl_append by auto have "list.set pc \<subseteq> list.set (simple_cg_lookup g pref)" using snoc.prems(4) by auto then have *: "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) G pc) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using snoc.IH using snoc.prems(5) by blast have "a \<in> list.set (pc @ [a])" by auto then show ?case using snoc.prems(1,2,3) unfolding ** using insert_step_invar[OF snoc.prems(4), of a "(foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) G pc)", OF _ *] by blast qed qed show "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (insert_for_prefix g i) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" using insert_foldl_invar[of ?pref_conv g, OF _ \<open>(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup g \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)\<close>] unfolding \<open>insert_for_prefix g i = foldl (\<lambda> g' ys' . simple_cg_insert' g' (ys'@suff)) g ?pref_conv\<close> by blast qed have insert_for_prefix_foldl_invar: "\<And> ns . (\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl insert_for_prefix g' ns) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof - fix ns show "(\<And>\<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup (foldl insert_for_prefix g' ns) \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof (induction ns rule: rev_induct) case Nil then show ?case using g'_invar by auto next case (snoc a ns) show ?case using snoc.prems using insert_for_prefix_invar [OF snoc.IH] by auto qed qed show \<open>converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>\<close> using \<open>\<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G ys) \<alpha>)\<close> unfolding \<open>simple_cg_insert_with_conv G ys = simple_cg_closure g''\<close> g'' using insert_for_prefix_foldl_invar[of _ "[0..<length ys]" _] using simple_cg_closure_invar_helper[OF assms, of "(foldl insert_for_prefix g' [0..<length ys])", OF insert_for_prefix_foldl_invar[of _ "[0..<length ys]" _]] using \<open>\<alpha> \<in> L M1\<close> \<open>\<alpha> \<in> L M2\<close> by blast qed then show "(\<forall> \<beta> . \<beta> \<in> list.set (simple_cg_lookup (simple_cg_insert_with_conv G ys) \<alpha>) \<longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" by blast qed then show ?thesis unfolding convergence_graph_insert_invar_def convergence_graph_lookup_invar_def by blast qed (* fun simple_cg_lookup_with_conv :: "('a::linorder) simple_cg \<Rightarrow> 'a list \<Rightarrow> 'a list list" where "simple_cg_lookup_with_conv g ys = (let lookup_for_prefix = (\<lambda>i . let pref = take i ys; suff = drop i ys; pref_conv = (foldl (ts.union) (ts.empty ()) (filter (\<lambda>x . ts.memb pref x) g)) in map (\<lambda> pref' . pref'@suff) (ts.to_list pref_conv)) in ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix [0..<Suc (length ys)])))))" *) lemma simple_cg_lookup_with_conv_from_lookup_invar: fixes G :: "('a::linorder\<times>'b::linorder) simple_cg" assumes "observable M1" and "observable M2" and "convergence_graph_lookup_invar M1 M2 simple_cg_lookup G" shows "convergence_graph_lookup_invar M1 M2 simple_cg_lookup_with_conv G" proof - have "(\<And> \<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup_with_conv G \<alpha>) \<Longrightarrow> \<alpha> \<in> L M1 \<Longrightarrow> \<alpha> \<in> L M2 \<Longrightarrow> converge M1 \<alpha> \<beta> \<and> converge M2 \<alpha> \<beta>)" proof - fix ys \<beta> assume "\<beta> \<in> list.set (simple_cg_lookup_with_conv G ys)" and "ys \<in> L M1" and "ys \<in> L M2" define lookup_for_prefix where lookup_for_prefix: "lookup_for_prefix = (\<lambda>i . let pref = take i ys; suff = drop i ys; pref_conv = (foldl (ts.union) (ts.empty ()) (filter (\<lambda>x . ts.memb pref x) G)) in map (\<lambda> pref' . pref'@suff) (ts.to_list pref_conv))" have "\<And> ns . \<beta> \<in> list.set (ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix ns))))) \<Longrightarrow> converge M1 ys \<beta> \<and> converge M2 ys \<beta>" proof - fix ns assume "\<beta> \<in> list.set (ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix ns)))))" then show "converge M1 ys \<beta> \<and> converge M2 ys \<beta>" proof (induction ns rule: rev_induct) case Nil then have "\<beta> = ys" unfolding ts.correct(33)[OF ts.invar] unfolding ts.correct(35) by auto then show ?case using \<open>ys \<in> L M1\<close> \<open>ys \<in> L M2\<close> by auto next case (snoc a ns) have "list.set (ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix (ns@[a])))))) = list.set (lookup_for_prefix a) \<union> list.set (ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix ns)))))" unfolding ts.correct(33)[OF ts.invar] unfolding ts.correct(35) by auto then consider "\<beta> \<in> list.set (lookup_for_prefix a)" | "\<beta> \<in> list.set (ts.to_list (ts.from_list (ys # (concat (map lookup_for_prefix ns)))))" using snoc.prems by auto then show ?case proof cases case 1 define pref where pref: "pref = take a ys" define suff where suff: "suff = drop a ys" define pref_conv where pref_conv: "pref_conv = (foldl (ts.union) (ts.empty ()) (filter (\<lambda>x . ts.memb pref x) G))" have "lookup_for_prefix a = map (\<lambda> pref' . pref'@suff) (ts.to_list pref_conv)" unfolding lookup_for_prefix pref suff pref_conv by metis then have *:"\<beta> \<in> list.set (map (\<lambda> pref' . pref'@suff) (ts.to_list (foldl (ts.union) (ts.empty ()) (filter (\<lambda>x . ts.memb pref x) G))))" using 1 unfolding pref_conv by auto obtain \<gamma> where "\<gamma> \<in> list.set (simple_cg_lookup G pref)" and "\<beta> = \<gamma>@suff" using set_map_elem[OF *] unfolding simple_cg_lookup.simps unfolding ts.correct(33)[OF ts.invar] unfolding ts.ins_correct[OF ts.invar] by blast then have "converge M1 \<gamma> pref" and "converge M2 \<gamma> pref" using \<open>convergence_graph_lookup_invar M1 M2 simple_cg_lookup G\<close> unfolding convergence_graph_lookup_invar_def by (metis \<open>ys \<in> L M1\<close> \<open>ys \<in> L M2\<close> append_take_drop_id converge_sym language_prefix pref)+ then show ?thesis by (metis \<open>\<And>thesis. (\<And>\<gamma>. \<lbrakk>\<gamma> \<in> list.set (simple_cg_lookup G pref); \<beta> = \<gamma> @ suff\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis\<close> \<open>ys \<in> L M1\<close> \<open>ys \<in> L M2\<close> append_take_drop_id assms(1) assms(2) assms(3) converge_append converge_append_language_iff convergence_graph_lookup_invar_def language_prefix pref suff) next case 2 then show ?thesis using snoc.IH by blast qed qed qed then show "converge M1 ys \<beta> \<and> converge M2 ys \<beta>" using \<open>\<beta> \<in> list.set (simple_cg_lookup_with_conv G ys)\<close> unfolding simple_cg_lookup_with_conv.simps Let_def lookup_for_prefix sorted_list_of_set_set by blast qed moreover have "\<And> \<alpha> . \<alpha> \<in> list.set (simple_cg_lookup_with_conv G \<alpha>)" unfolding simple_cg_lookup_with_conv.simps Let_def unfolding ts.correct(33)[OF ts.invar] unfolding ts.correct(35) by auto ultimately show ?thesis unfolding convergence_graph_lookup_invar_def by blast qed lemma simple_cg_lookup_from_lookup_invar_with_conv: assumes "convergence_graph_lookup_invar M1 M2 simple_cg_lookup_with_conv G" shows "convergence_graph_lookup_invar M1 M2 simple_cg_lookup G" proof - have "\<And> \<alpha> \<beta>. \<beta> \<in> list.set (simple_cg_lookup G \<alpha>) \<Longrightarrow> \<beta> \<in> list.set (simple_cg_lookup_with_conv G \<alpha>)" proof - fix \<alpha> \<beta> assume "\<beta> \<in> list.set (simple_cg_lookup G \<alpha>)" define lookup_for_prefix where lookup_for_prefix: "lookup_for_prefix = (\<lambda>i . let pref = take i \<alpha>; suff = drop i \<alpha>; pref_conv = simple_cg_lookup G pref in map (\<lambda> pref' . pref'@suff) pref_conv)" have "lookup_for_prefix (length \<alpha>) = simple_cg_lookup G \<alpha>" unfolding lookup_for_prefix by auto moreover have "list.set (lookup_for_prefix (length \<alpha>)) \<subseteq> list.set (simple_cg_lookup_with_conv G \<alpha>)" proof - have "\<And> xs . list.set (map (\<lambda>pref'. pref' @ drop (length \<alpha>) \<alpha>) (ts.to_list (ts.ins (take (length \<alpha>) \<alpha>) xs))) = list.set (\<alpha> # map (\<lambda>pref'. pref' @ drop (length \<alpha>) \<alpha>) (ts.to_list xs))" using ts.ins_correct(1)[OF ts.invar] using ts.correct(33)[OF ts.invar] by (simp add: \<open>\<And>s. list.set (ts.to_list s) = ts.\<alpha> s\<close> \<open>\<And>x s. ts.\<alpha> (ts.ins x s) = Set.insert x (ts.\<alpha> s)\<close>) then have *:"list.set (lookup_for_prefix (length \<alpha>)) = list.set (\<alpha> # concat (map (\<lambda>i. map (\<lambda>pref'. pref' @ drop i \<alpha>) (ts.to_list (foldl ts.union (ts.empty ()) (filter (ts.memb (take i \<alpha>)) G)))) [length \<alpha>]))" unfolding simple_cg_lookup_with_conv.simps Let_def unfolding simple_cg_lookup.simps lookup_for_prefix Let_def unfolding ts.correct(33)[OF ts.invar] unfolding ts.correct(35) unfolding list.map concat.simps by simp show ?thesis unfolding * unfolding simple_cg_lookup_with_conv.simps Let_def unfolding ts.correct(33)[OF ts.invar] unfolding ts.correct(35) by auto qed ultimately show "\<beta> \<in> list.set (simple_cg_lookup_with_conv G \<alpha>)" using \<open>\<beta> \<in> list.set (simple_cg_lookup G \<alpha>)\<close> by (metis subsetD) qed then show ?thesis using assms unfolding convergence_graph_lookup_invar_def using simple_cg_lookup_iff by blast qed lemma simple_cg_lookup_invar_with_conv_eq : assumes "observable M1" and "observable M2" shows "convergence_graph_lookup_invar M1 M2 simple_cg_lookup_with_conv G = convergence_graph_lookup_invar M1 M2 simple_cg_lookup G" using simple_cg_lookup_with_conv_from_lookup_invar[OF assms] simple_cg_lookup_from_lookup_invar_with_conv[of M1 M2] by blast lemma simple_cg_insert_invar_with_conv : assumes "observable M1" and "observable M2" shows "convergence_graph_insert_invar M1 M2 simple_cg_lookup_with_conv simple_cg_insert" using simple_cg_insert_invar[of M1 M2] unfolding convergence_graph_insert_invar_def unfolding simple_cg_lookup_invar_with_conv_eq[OF assms] . lemma simple_cg_merge_invar_with_conv : assumes "observable M1" and "observable M2" shows "convergence_graph_merge_invar M1 M2 simple_cg_lookup_with_conv simple_cg_merge" using simple_cg_merge_invar[OF assms] unfolding convergence_graph_merge_invar_def unfolding simple_cg_lookup_invar_with_conv_eq[OF assms] . lemma simple_cg_initial_invar_with_conv : assumes "observable M1" and "observable M2" shows "convergence_graph_initial_invar M1 M2 simple_cg_lookup_with_conv simple_cg_initial" using simple_cg_initial_invar[OF assms(1), of M2] unfolding convergence_graph_initial_invar_def unfolding simple_cg_lookup_invar_with_conv_eq[OF assms] . end
//================================================================================================== /*! @file @copyright 2016 NumScale SAS @copyright 2016 J.T. Lapreste Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ //================================================================================================== #ifndef BOOST_SIMD_FUNCTION_INCS_HPP_INCLUDED #define BOOST_SIMD_FUNCTION_INCS_HPP_INCLUDED #if defined(DOXYGEN_ONLY) namespace boost { namespace simd { /*! @ingroup group-arithmetic Function object implementing incs capabilities Returns the entry plus one, saturated in the entry type. This is a convenient alias of @ref oneplus **/ const boost::dispatch::functor<tag::incs_> incs = {}; } } #endif #include <boost/simd/function/scalar/oneplus.hpp> #include <boost/simd/function/simd/incs.hpp> #endif
[GOAL] m n : ℕ hne : m ≠ n ⊢ ↑m ≠ ↑n [PROOFSTEP] exact_mod_cast hne [GOAL] x : ℕ r : ℝ ⊢ closedBall x r = Icc ⌈↑x - r⌉₊ ⌊↑x + r⌋₊ [PROOFSTEP] rcases le_or_lt 0 r with (hr | hr) [GOAL] case inl x : ℕ r : ℝ hr : 0 ≤ r ⊢ closedBall x r = Icc ⌈↑x - r⌉₊ ⌊↑x + r⌋₊ [PROOFSTEP] rw [← preimage_closedBall, Real.closedBall_eq_Icc, preimage_Icc] [GOAL] case inl x : ℕ r : ℝ hr : 0 ≤ r ⊢ 0 ≤ ↑x + r [PROOFSTEP] exact add_nonneg (cast_nonneg x) hr [GOAL] case inr x : ℕ r : ℝ hr : r < 0 ⊢ closedBall x r = Icc ⌈↑x - r⌉₊ ⌊↑x + r⌋₊ [PROOFSTEP] rw [closedBall_eq_empty.2 hr, Icc_eq_empty_of_lt] [GOAL] case inr x : ℕ r : ℝ hr : r < 0 ⊢ ⌊↑x + r⌋₊ < ⌈↑x - r⌉₊ [PROOFSTEP] calc ⌊(x : ℝ) + r⌋₊ ≤ ⌊(x : ℝ)⌋₊ := floor_mono <| by linarith _ < ⌈↑x - r⌉₊ := by rw [floor_coe, Nat.lt_ceil] linarith [GOAL] x : ℕ r : ℝ hr : r < 0 ⊢ ↑x + r ≤ ↑x [PROOFSTEP] linarith [GOAL] x : ℕ r : ℝ hr : r < 0 ⊢ ⌊↑x⌋₊ < ⌈↑x - r⌉₊ [PROOFSTEP] rw [floor_coe, Nat.lt_ceil] [GOAL] x : ℕ r : ℝ hr : r < 0 ⊢ ↑x < ↑x - r [PROOFSTEP] linarith [GOAL] x : ℕ r : ℝ ⊢ IsCompact (closedBall x r) [PROOFSTEP] rw [closedBall_eq_Icc] [GOAL] x : ℕ r : ℝ ⊢ IsCompact (Icc ⌈↑x - r⌉₊ ⌊↑x + r⌋₊) [PROOFSTEP] exact (Set.finite_Icc _ _).isCompact [GOAL] ⊢ NeBot (cocompact ℕ) [PROOFSTEP] simp [Filter.atTop_neBot]
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE InstanceSigs #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE IncoherentInstances #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ForeignFunctionInterface #-} {-# OPTIONS_GHC -fno-warn-orphans #-} module Data.Vector.Image.IO ( readImg , writeImg , writeBmp , writePng , writeJpeg ) where import Prelude hiding(map) import qualified Prelude as P import qualified Data.List as L import Data.Vector.Storable hiding(forM_,map,replicate,fromList,toList,(!?),(!)) import Data.Maybe import Numeric.LinearAlgebra.Data hiding (fromList,toList,(!)) import Numeric.LinearAlgebra.HMatrix hiding (fromList,toList,(!)) import qualified Data.Vector.Storable as V import Foreign import GHC.Real import Foreign.C.String import Foreign.C.Types import qualified Control.Monad as C import Data.Vector.Image.Color.RGB import Data.Vector.Image.Image type WriteImgFunc = Ptr CChar -- ^ Filename -> Ptr RGB8 -- ^ Image Vector -> CInt -- ^ Width of Image -> CInt -- ^ Height of Image -> IO CInt -- ^ When write fails, this returns negative value. When success, this returns 0. foreign import ccall unsafe "readImg" c_readImg :: Ptr CChar -> Ptr (Ptr RGB8) -> Ptr CInt -> Ptr CInt -> IO CInt foreign import ccall unsafe "&freeImg" c_p_freeImg :: FunPtr(Ptr RGB8 -> IO ()) foreign import ccall unsafe "writeBmp" c_writeBmp :: WriteImgFunc foreign import ccall unsafe "writePng" c_writePng :: WriteImgFunc foreign import ccall unsafe "writeJpeg" c_writeJpeg :: WriteImgFunc readImg :: String -- ^ Filename -> IO (Either Int Image8) readImg file = do withCString file $ \ cfile -> do alloca $ \cp -> do alloca $ \cw -> do alloca $ \ch -> do r <- fmap fromIntegral $ c_readImg cfile cp cw ch w <- fmap fromIntegral $ peek cw h <- fmap fromIntegral $ peek ch p <- peek cp :: IO (Ptr RGB8) let n = w*h fp <- newForeignPtr c_p_freeImg p return $ if r < 0 then Left r else Right $ Image w h (unsafeFromForeignPtr fp 0 n) writeImg' :: WriteImgFunc -> String -> Image8 -> IO (Either Int ()) writeImg' func file (Image w h dat) = do r <- fmap fromIntegral $ withCString file $ \ cfile -> do unsafeWith dat $ \p -> do func cfile p (fromIntegral w) (fromIntegral h) return $ if r < 0 then Left r else Right () writeImg :: String -- ^ Filename -> Image8 -- ^ Image Data -> IO (Either Int ()) writeImg file img | L.isSuffixOf ".bmp" file = writeBmp file img | L.isSuffixOf ".BMP" file = writeBmp file img | L.isSuffixOf ".png" file = writePng file img | L.isSuffixOf ".Png" file = writePng file img | L.isSuffixOf ".jpg" file = writeJpeg file img | L.isSuffixOf ".JPG" file = writeJpeg file img | otherwise = return $ Left (-1) writeBmp :: String -- ^ Filename -> Image8 -- ^ Image Data -> IO (Either Int ()) writeBmp = writeImg' c_writeBmp writeJpeg :: String -- ^ Filename -> Image8 -- ^ Image Data -> IO (Either Int ()) writeJpeg = writeImg' c_writeJpeg writePng :: String -- ^ Filename -> Image8 -- ^ Image Data -> IO (Either Int ()) writePng = writeImg' c_writePng
One Response to "2nd Session Video Snapshot – 3" Love it! Another great video. Thank you!
\chapter{Chi Squared Tests} \section{Contingency tables} \section{Fitting a theoretical distribution} \section{Goodness of fit test}
State Before: K : Type u V : Type v inst✝⁵ : Ring K inst✝⁴ : AddCommGroup V inst✝³ : Module K V V₂ : Type v' inst✝² : AddCommGroup V₂ inst✝¹ : Module K V₂ inst✝ : StrongRankCondition K ι : Type w b : Finset ι h : Basis { x // x ∈ b } K V ⊢ finrank K V = Finset.card b State After: no goals Tactic: rw [finrank_eq_card_basis h, Fintype.card_coe]
State Before: C : Type u₁ inst✝² : Category C Z X Y P : C f : Z ⟶ X g : Z ⟶ Y inl : X ⟶ P inr : Y ⟶ P inst✝¹ : HasZeroObject C inst✝ : HasZeroMorphisms C b : BinaryBicone X Y h : BinaryBicone.IsBilimit b ⊢ IsPushout b.fst b.snd 0 0 State After: C : Type u₁ inst✝² : Category C Z X Y P : C f : Z ⟶ X g : Z ⟶ Y inl : X ⟶ P inr : Y ⟶ P inst✝¹ : HasZeroObject C inst✝ : HasZeroMorphisms C b : BinaryBicone X Y h : BinaryBicone.IsBilimit b ⊢ IsPushout (b.inl ≫ b.fst) 0 0 (0 ≫ 0) Tactic: refine' IsPushout.of_right _ (by simp) (IsPushout.inl_snd' h) State Before: C : Type u₁ inst✝² : Category C Z X Y P : C f : Z ⟶ X g : Z ⟶ Y inl : X ⟶ P inr : Y ⟶ P inst✝¹ : HasZeroObject C inst✝ : HasZeroMorphisms C b : BinaryBicone X Y h : BinaryBicone.IsBilimit b ⊢ IsPushout (b.inl ≫ b.fst) 0 0 (0 ≫ 0) State After: no goals Tactic: simp State Before: C : Type u₁ inst✝² : Category C Z X Y P : C f : Z ⟶ X g : Z ⟶ Y inl : X ⟶ P inr : Y ⟶ P inst✝¹ : HasZeroObject C inst✝ : HasZeroMorphisms C b : BinaryBicone X Y h : BinaryBicone.IsBilimit b ⊢ b.fst ≫ 0 = b.snd ≫ 0 State After: no goals Tactic: simp
/- Copyright (c) 2017 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl -/ import data.set.pairwise /-! # Chains and Zorn's lemmas This file defines chains for an arbitrary relation and proves several formulations of Zorn's Lemma, along with Hausdorff's Maximality Principle. ## Main declarations * `chain c`: A chain `c` is a set of comparable elements. * `max_chain_spec`: Hausdorff's Maximality Principle. * `exists_maximal_of_chains_bounded`: Zorn's Lemma. Many variants are offered. ## Variants The primary statement of Zorn's lemma is `exists_maximal_of_chains_bounded`. Then it is specialized to particular relations: * `(≤)` with `zorn_partial_order` * `(⊆)` with `zorn_subset` * `(⊇)` with `zorn_superset` Lemma names carry modifiers: * `₀`: Quantifies over a set, as opposed to over a type. * `_nonempty`: Doesn't ask to prove that the empty chain is bounded and lets you give an element that will be smaller than the maximal element found (the maximal element is no smaller than any other element, but it can also be incomparable to some). ## How-to This file comes across as confusing to those who haven't yet used it, so here is a detailed walkthrough: 1. Know what relation on which type/set you're looking for. See Variants above. You can discharge some conditions to Zorn's lemma directly using a `_nonempty` variant. 2. Write down the definition of your type/set, put a `suffices : ∃ m, ∀ a, m ≺ a → a ≺ m, { ... },` (or whatever you actually need) followed by a `apply some_version_of_zorn`. 3. Fill in the details. This is where you start talking about chains. A typical proof using Zorn could look like this ```lean lemma zorny_lemma : zorny_statement := begin let s : set α := {x | whatever x}, suffices : ∃ x ∈ s, ∀ y ∈ s, y ⊆ x → y = x, -- or with another operator { exact proof_post_zorn }, apply zorn.zorn_subset, -- or another variant rintro c hcs hc, obtain rfl | hcnemp := c.eq_empty_or_nonempty, -- you might need to disjunct on c empty or not { exact ⟨edge_case_construction, proof_that_edge_case_construction_respects_whatever, proof_that_edge_case_construction_contains_all_stuff_in_c⟩ }, exact ⟨construction, proof_that_construction_respects_whatever, proof_that_construction_contains_all_stuff_in_c⟩, end ``` ## Notes Originally ported from Isabelle/HOL. The [original file](https://isabelle.in.tum.de/dist/library/HOL/HOL/Zorn.html) was written by Jacques D. Fleuriot, Tobias Nipkow, Christian Sternagel. -/ noncomputable theory universes u open set classical open_locale classical namespace zorn section chain parameters {α : Type u} (r : α → α → Prop) local infix ` ≺ `:50 := r /-- A chain is a subset `c` satisfying `x ≺ y ∨ x = y ∨ y ≺ x` for all `x y ∈ c`. -/ def chain (c : set α) := c.pairwise (λ x y, x ≺ y ∨ y ≺ x) parameters {r} lemma chain.total_of_refl [is_refl α r] {c} (H : chain c) {x y} (hx : x ∈ c) (hy : y ∈ c) : x ≺ y ∨ y ≺ x := if e : x = y then or.inl (e ▸ refl _) else H _ hx _ hy e lemma chain.mono {c c'} : c' ⊆ c → chain c → chain c' := set.pairwise.mono lemma chain_of_trichotomous [is_trichotomous α r] (s : set α) : chain s := begin intros a _ b _ hab, obtain h | h | h := @trichotomous _ r _ a b, { exact or.inl h }, { exact (hab h).elim }, { exact or.inr h } end lemma chain_univ_iff : chain (univ : set α) ↔ is_trichotomous α r := begin refine ⟨λ h, ⟨λ a b , _⟩, λ h, @chain_of_trichotomous _ _ h univ⟩, rw [or.left_comm, or_iff_not_imp_left], exact h a trivial b trivial, end lemma chain.directed_on [is_refl α r] {c} (H : chain c) : directed_on (≺) c := λ x hx y hy, match H.total_of_refl hx hy with | or.inl h := ⟨y, hy, h, refl _⟩ | or.inr h := ⟨x, hx, refl _, h⟩ end lemma chain_insert {c : set α} {a : α} (hc : chain c) (ha : ∀ b ∈ c, b ≠ a → a ≺ b ∨ b ≺ a) : chain (insert a c) := forall_insert_of_forall (λ x hx, forall_insert_of_forall (hc x hx) (λ hneq, (ha x hx hneq).symm)) (forall_insert_of_forall (λ x hx hneq, ha x hx $ λ h', hneq h'.symm) (λ h, (h rfl).rec _)) /-- `super_chain c₁ c₂` means that `c₂` is a chain that strictly includes `c₁`. -/ def super_chain (c₁ c₂ : set α) : Prop := chain c₂ ∧ c₁ ⊂ c₂ /-- A chain `c` is a maximal chain if there does not exists a chain strictly including `c`. -/ def is_max_chain (c : set α) := chain c ∧ ¬ (∃ c', super_chain c c') /-- Given a set `c`, if there exists a chain `c'` strictly including `c`, then `succ_chain c` is one of these chains. Otherwise it is `c`. -/ def succ_chain (c : set α) : set α := if h : ∃ c', chain c ∧ super_chain c c' then some h else c lemma succ_spec {c : set α} (h : ∃ c', chain c ∧ super_chain c c') : super_chain c (succ_chain c) := let ⟨c', hc'⟩ := h in have chain c ∧ super_chain c (some h), from @some_spec _ (λ c', chain c ∧ super_chain c c') _, by simp [succ_chain, dif_pos, h, this.right] lemma chain_succ {c : set α} (hc : chain c) : chain (succ_chain c) := if h : ∃ c', chain c ∧ super_chain c c' then (succ_spec h).left else by simp [succ_chain, dif_neg, h]; exact hc lemma super_of_not_max {c : set α} (hc₁ : chain c) (hc₂ : ¬ is_max_chain c) : super_chain c (succ_chain c) := begin simp [is_max_chain, not_and_distrib, not_forall_not] at hc₂, cases hc₂.neg_resolve_left hc₁ with c' hc', exact succ_spec ⟨c', hc₁, hc'⟩ end lemma succ_increasing {c : set α} : c ⊆ succ_chain c := if h : ∃ c', chain c ∧ super_chain c c' then have super_chain c (succ_chain c), from succ_spec h, this.right.left else by simp [succ_chain, dif_neg, h, subset.refl] /-- Set of sets reachable from `∅` using `succ_chain` and `⋃₀`. -/ inductive chain_closure : set (set α) | succ : ∀ {s}, chain_closure s → chain_closure (succ_chain s) | union : ∀ {s}, (∀ a ∈ s, chain_closure a) → chain_closure (⋃₀ s) lemma chain_closure_empty : ∅ ∈ chain_closure := have chain_closure (⋃₀ ∅), from chain_closure.union $ λ a h, h.rec _, by simp at this; assumption lemma chain_closure_closure : ⋃₀ chain_closure ∈ chain_closure := chain_closure.union $ λ s hs, hs variables {c c₁ c₂ c₃ : set α} private lemma chain_closure_succ_total_aux (hc₁ : c₁ ∈ chain_closure) (hc₂ : c₂ ∈ chain_closure) (h : ∀ {c₃}, c₃ ∈ chain_closure → c₃ ⊆ c₂ → c₂ = c₃ ∨ succ_chain c₃ ⊆ c₂) : c₁ ⊆ c₂ ∨ succ_chain c₂ ⊆ c₁ := begin induction hc₁, case succ : c₃ hc₃ ih { cases ih with ih ih, { have h := h hc₃ ih, cases h with h h, { exact or.inr (h ▸ subset.refl _) }, { exact or.inl h } }, { exact or.inr (subset.trans ih succ_increasing) } }, case union : s hs ih { refine (or_iff_not_imp_right.2 $ λ hn, sUnion_subset $ λ a ha, _), apply (ih a ha).resolve_right, apply mt (λ h, _) hn, exact subset.trans h (subset_sUnion_of_mem ha) } end private lemma chain_closure_succ_total (hc₁ : c₁ ∈ chain_closure) (hc₂ : c₂ ∈ chain_closure) (h : c₁ ⊆ c₂) : c₂ = c₁ ∨ succ_chain c₁ ⊆ c₂ := begin induction hc₂ generalizing c₁ hc₁ h, case succ : c₂ hc₂ ih { have h₁ : c₁ ⊆ c₂ ∨ @succ_chain α r c₂ ⊆ c₁ := (chain_closure_succ_total_aux hc₁ hc₂ $ λ c₁, ih), cases h₁ with h₁ h₁, { have h₂ := ih hc₁ h₁, cases h₂ with h₂ h₂, { exact (or.inr $ h₂ ▸ subset.refl _) }, { exact (or.inr $ subset.trans h₂ succ_increasing) } }, { exact (or.inl $ subset.antisymm h₁ h) } }, case union : s hs ih { apply or.imp_left (λ h', subset.antisymm h' h), apply classical.by_contradiction, simp [not_or_distrib, sUnion_subset_iff, not_forall], intros c₃ hc₃ h₁ h₂, have h := chain_closure_succ_total_aux hc₁ (hs c₃ hc₃) (λ c₄, ih _ hc₃), cases h with h h, { have h' := ih c₃ hc₃ hc₁ h, cases h' with h' h', { exact (h₁ $ h' ▸ subset.refl _) }, { exact (h₂ $ subset.trans h' $ subset_sUnion_of_mem hc₃) } }, { exact (h₁ $ subset.trans succ_increasing h) } } end lemma chain_closure_total (hc₁ : c₁ ∈ chain_closure) (hc₂ : c₂ ∈ chain_closure) : c₁ ⊆ c₂ ∨ c₂ ⊆ c₁ := or.imp_right succ_increasing.trans $ chain_closure_succ_total_aux hc₁ hc₂ $ λ c₃ hc₃, chain_closure_succ_total hc₃ hc₂ lemma chain_closure_succ_fixpoint (hc₁ : c₁ ∈ chain_closure) (hc₂ : c₂ ∈ chain_closure) (h_eq : succ_chain c₂ = c₂) : c₁ ⊆ c₂ := begin induction hc₁, case succ : c₁ hc₁ h { exact or.elim (chain_closure_succ_total hc₁ hc₂ h) (λ h, h ▸ h_eq.symm ▸ subset.refl c₂) id }, case union : s hs ih { exact (sUnion_subset $ λ c₁ hc₁, ih c₁ hc₁) } end lemma chain_closure_succ_fixpoint_iff (hc : c ∈ chain_closure) : succ_chain c = c ↔ c = ⋃₀ chain_closure := ⟨λ h, (subset_sUnion_of_mem hc).antisymm (chain_closure_succ_fixpoint chain_closure_closure hc h), λ h, subset.antisymm (calc succ_chain c ⊆ ⋃₀{c : set α | c ∈ chain_closure} : subset_sUnion_of_mem $ chain_closure.succ hc ... = c : h.symm) succ_increasing⟩ lemma chain_chain_closure (hc : c ∈ chain_closure) : chain c := begin induction hc, case succ : c hc h { exact chain_succ h }, case union : s hs h { have h : ∀ c ∈ s, zorn.chain c := h, exact λ c₁ ⟨t₁, ht₁, (hc₁ : c₁ ∈ t₁)⟩ c₂ ⟨t₂, ht₂, (hc₂ : c₂ ∈ t₂)⟩ hneq, have t₁ ⊆ t₂ ∨ t₂ ⊆ t₁, from chain_closure_total (hs _ ht₁) (hs _ ht₂), or.elim this (λ ht, h t₂ ht₂ c₁ (ht hc₁) c₂ hc₂ hneq) (λ ht, h t₁ ht₁ c₁ hc₁ c₂ (ht hc₂) hneq) } end /-- An explicit maximal chain. `max_chain` is taken to be the union of all sets in `chain_closure`. -/ def max_chain := ⋃₀ chain_closure /-- Hausdorff's maximality principle There exists a maximal totally ordered subset of `α`. Note that we do not require `α` to be partially ordered by `r`. -/ theorem max_chain_spec : is_max_chain max_chain := classical.by_contradiction $ λ h, begin obtain ⟨h₁, H⟩ := super_of_not_max (chain_chain_closure chain_closure_closure) h, obtain ⟨h₂, h₃⟩ := ssubset_iff_subset_ne.1 H, exact h₃ ((chain_closure_succ_fixpoint_iff chain_closure_closure).mpr rfl).symm, end /-- Zorn's lemma If every chain has an upper bound, then there exists a maximal element. -/ theorem exists_maximal_of_chains_bounded (h : ∀ c, chain c → ∃ ub, ∀ a ∈ c, a ≺ ub) (trans : ∀ {a b c}, a ≺ b → b ≺ c → a ≺ c) : ∃ m, ∀ a, m ≺ a → a ≺ m := have ∃ ub, ∀ a ∈ max_chain, a ≺ ub, from h _ $ max_chain_spec.left, let ⟨ub, (hub : ∀ a ∈ max_chain, a ≺ ub)⟩ := this in ⟨ub, λ a ha, have chain (insert a max_chain), from chain_insert max_chain_spec.left $ λ b hb _, or.inr $ trans (hub b hb) ha, have a ∈ max_chain, from classical.by_contradiction $ λ h : a ∉ max_chain, max_chain_spec.right $ ⟨insert a max_chain, this, ssubset_insert h⟩, hub a this⟩ /-- A variant of Zorn's lemma. If every nonempty chain of a nonempty type has an upper bound, then there is a maximal element. -/ theorem exists_maximal_of_nonempty_chains_bounded [nonempty α] (h : ∀ c, chain c → c.nonempty → ∃ ub, ∀ a ∈ c, a ≺ ub) (trans : ∀ {a b c}, a ≺ b → b ≺ c → a ≺ c) : ∃ m, ∀ a, m ≺ a → a ≺ m := exists_maximal_of_chains_bounded (λ c hc, (eq_empty_or_nonempty c).elim (λ h, ⟨classical.arbitrary α, λ x hx, (h ▸ hx : x ∈ (∅ : set α)).elim⟩) (h c hc)) (λ a b c, trans) end chain --This lemma isn't under section `chain` because `parameters` messes up with it. Feel free to fix it /-- This can be used to turn `zorn.chain (≥)` into `zorn.chain (≤)` and vice-versa. -/ lemma chain.symm {α : Type u} {s : set α} {q : α → α → Prop} (h : chain q s) : chain (flip q) s := h.mono' (λ _ _, or.symm) theorem zorn_partial_order {α : Type u} [partial_order α] (h : ∀ c : set α, chain (≤) c → ∃ ub, ∀ a ∈ c, a ≤ ub) : ∃ m : α, ∀ a, m ≤ a → a = m := let ⟨m, hm⟩ := @exists_maximal_of_chains_bounded α (≤) h (λ a b c, le_trans) in ⟨m, λ a ha, le_antisymm (hm a ha) ha⟩ theorem zorn_nonempty_partial_order {α : Type u} [partial_order α] [nonempty α] (h : ∀ (c : set α), chain (≤) c → c.nonempty → ∃ ub, ∀ a ∈ c, a ≤ ub) : ∃ (m : α), ∀ a, m ≤ a → a = m := let ⟨m, hm⟩ := @exists_maximal_of_nonempty_chains_bounded α (≤) _ h (λ a b c, le_trans) in ⟨m, λ a ha, le_antisymm (hm a ha) ha⟩ theorem zorn_partial_order₀ {α : Type u} [partial_order α] (s : set α) (ih : ∀ c ⊆ s, chain (≤) c → ∃ ub ∈ s, ∀ z ∈ c, z ≤ ub) : ∃ m ∈ s, ∀ z ∈ s, m ≤ z → z = m := let ⟨⟨m, hms⟩, h⟩ := @zorn_partial_order {m // m ∈ s} _ (λ c hc, let ⟨ub, hubs, hub⟩ := ih (subtype.val '' c) (λ _ ⟨⟨x, hx⟩, _, h⟩, h ▸ hx) (by { rintro _ ⟨p, hpc, rfl⟩ _ ⟨q, hqc, rfl⟩ hpq; refine hc _ hpc _ hqc (λ t, hpq (subtype.ext_iff.1 t)) }) in ⟨⟨ub, hubs⟩, λ ⟨y, hy⟩ hc, hub _ ⟨_, hc, rfl⟩⟩) in ⟨m, hms, λ z hzs hmz, congr_arg subtype.val (h ⟨z, hzs⟩ hmz)⟩ theorem zorn_nonempty_partial_order₀ {α : Type u} [partial_order α] (s : set α) (ih : ∀ c ⊆ s, chain (≤) c → ∀ y ∈ c, ∃ ub ∈ s, ∀ z ∈ c, z ≤ ub) (x : α) (hxs : x ∈ s) : ∃ m ∈ s, x ≤ m ∧ ∀ z ∈ s, m ≤ z → z = m := let ⟨⟨m, hms, hxm⟩, h⟩ := @zorn_partial_order {m // m ∈ s ∧ x ≤ m} _ (λ c hc, c.eq_empty_or_nonempty.elim (λ hce, hce.symm ▸ ⟨⟨x, hxs, le_refl _⟩, λ _, false.elim⟩) (λ ⟨m, hmc⟩, let ⟨ub, hubs, hub⟩ := ih (subtype.val '' c) (image_subset_iff.2 $ λ z hzc, z.2.1) (by rintro _ ⟨p, hpc, rfl⟩ _ ⟨q, hqc, rfl⟩ hpq; exact hc p hpc q hqc (mt (by rintro rfl; refl) hpq)) m.1 (mem_image_of_mem _ hmc) in ⟨⟨ub, hubs, le_trans m.2.2 $ hub m.1 $ mem_image_of_mem _ hmc⟩, λ a hac, hub a.1 ⟨a, hac, rfl⟩⟩)) in ⟨m, hms, hxm, λ z hzs hmz, congr_arg subtype.val $ h ⟨z, hzs, le_trans hxm hmz⟩ hmz⟩ theorem zorn_subset {α : Type u} (S : set (set α)) (h : ∀ c ⊆ S, chain (⊆) c → ∃ ub ∈ S, ∀ s ∈ c, s ⊆ ub) : ∃ m ∈ S, ∀ a ∈ S, m ⊆ a → a = m := zorn_partial_order₀ S h theorem zorn_subset_nonempty {α : Type u} (S : set (set α)) (H : ∀ c ⊆ S, chain (⊆) c → c.nonempty → ∃ ub ∈ S, ∀ s ∈ c, s ⊆ ub) (x) (hx : x ∈ S) : ∃ m ∈ S, x ⊆ m ∧ ∀ a ∈ S, m ⊆ a → a = m := zorn_nonempty_partial_order₀ _ (λ c cS hc y yc, H _ cS hc ⟨y, yc⟩) _ hx theorem zorn_superset {α : Type u} (S : set (set α)) (h : ∀ c ⊆ S, chain (⊆) c → ∃ lb ∈ S, ∀ s ∈ c, lb ⊆ s) : ∃ m ∈ S, ∀ a ∈ S, a ⊆ m → a = m := @zorn_partial_order₀ (order_dual (set α)) _ S $ λ c cS hc, h c cS hc.symm theorem zorn_superset_nonempty {α : Type u} (S : set (set α)) (H : ∀ c ⊆ S, chain (⊆) c → c.nonempty → ∃ lb ∈ S, ∀ s ∈ c, lb ⊆ s) (x) (hx : x ∈ S) : ∃ m ∈ S, m ⊆ x ∧ ∀ a ∈ S, a ⊆ m → a = m := @zorn_nonempty_partial_order₀ (order_dual (set α)) _ S (λ c cS hc y yc, H _ cS hc.symm ⟨y, yc⟩) _ hx lemma chain.total {α : Type u} [preorder α] {c : set α} (H : chain (≤) c) : ∀ {x y}, x ∈ c → y ∈ c → x ≤ y ∨ y ≤ x := λ x y, H.total_of_refl lemma chain.image {α β : Type*} (r : α → α → Prop) (s : β → β → Prop) (f : α → β) (h : ∀ x y, r x y → s (f x) (f y)) {c : set α} (hrc : chain r c) : chain s (f '' c) := λ x ⟨a, ha₁, ha₂⟩ y ⟨b, hb₁, hb₂⟩, ha₂ ▸ hb₂ ▸ λ hxy, (hrc a ha₁ b hb₁ (mt (congr_arg f) $ hxy)).elim (or.inl ∘ h _ _) (or.inr ∘ h _ _) end zorn lemma directed_of_chain {α β r} [is_refl β r] {f : α → β} {c : set α} (h : zorn.chain (f ⁻¹'o r) c) : directed r (λ x : {a : α // a ∈ c}, f x) := λ ⟨a, ha⟩ ⟨b, hb⟩, classical.by_cases (λ hab : a = b, by simp only [hab, exists_prop, and_self, subtype.exists]; exact ⟨b, hb, refl _⟩) (λ hab, (h a ha b hb hab).elim (λ h : r (f a) (f b), ⟨⟨b, hb⟩, h, refl _⟩) (λ h : r (f b) (f a), ⟨⟨a, ha⟩, refl _, h⟩))
Many free plugins for WordPress, maybe outdated, vulnerable, badly coded and may no longer be supported by an active development team. This means that using such a plugin may be detrimental to your website’s security and may lead you to being a hacking victim due to the flaws in that plugin. Always install plugins which have good reviews, good ratings, are compatible with your current version of WordPress and which are regular updated by an active developer team. You can see the plugin details and inspect them before integrating it with your website. And for security – there is a new plugin that is making the life of hackers really hard and we think every serious webmaster should use it for ultimate WordPress security. This plugins is hide my wp. It is a premium plugin, but don’t worry you get to download the demo first to try it out, but we know that you are going to love it. The same applies to WordPress Themes as well. Always read the theme reviews and see their rating before you choose a theme. Also, just because you pay for a theme does not mean that it is more secure or has no vulnerabilities, the only advantage will be that you will be able to contact the developers to patch your theme or update it. Bad coding in the themes may lead your site to become slow or open it up for hackers to exploit. Always keep your WordPress major version and all other themes and plugins up to date. You can do this manually or if your web host provides you with an auto installer, you can allow the auto installer to update WordPress, the themes and the plugins through a scheduled cron command. Keeping your site in sync with the latest version will prevent hackers from exploiting old vulnerabilities, for which a fix is already available. Although this is a very simple and easy counter-measure, keeping updated software can go a long way in ensuring security. Always backup your site regularly and maintain a remote backup location in case of a disaster or damage to your site. Keeping a remote backup location is ideal, so that you “do not keep all your eggs in one basket”. Make sure that your backup is easy to restore in the event of an emergency. While you can backup parts of your website separately Eg: Database, FilesPsychology Articles, Image etc. you can also have a compressed zip backup of your entire website in a single file. Auto Installer software allow you to schedule nightly backups and set the backups to happen automatically. Don’t forget to test your website for any known exploits or vulnerabilities before the hackers do. Free online tools like Sucuri Website Scanner will scan your website and suggest some security measures. They will also alert you of any major flaws in the system and will also indicate any outdated WordPress versions. Better scan your website before the hackers do. Previous Post : What Are The Requirements To Start Lifeguard Training?
(* Title: System_Of_Equations_IArrays.thy Author: Jose Divasón <jose.divasonm at unirioja.es> Author: Jesús Aransay <jesus-maria.aransay at unirioja.es> *) section\<open>Solving systems of equations using the Gauss Jordan algorithm over nested IArrays\<close> theory System_Of_Equations_IArrays imports System_Of_Equations Bases_Of_Fundamental_Subspaces_IArrays begin subsection\<open>Previous definitions and properties\<close> definition greatest_not_zero :: "'a::{zero} iarray => nat" where "greatest_not_zero A = the (List.find (\<lambda>n. A !! n \<noteq> 0) (rev [0..<IArray.length A]))" lemma vec_to_iarray_exists: shows "(\<exists>b. A $ b \<noteq> 0) = IArray.exists (\<lambda>b. (vec_to_iarray A) !! b \<noteq> 0) (IArray[0..<IArray.length (vec_to_iarray A)])" proof (unfold IArray.exists_def length_vec_to_iarray, auto simp del: IArray.sub_def) fix b assume Ab: "A $ b \<noteq> 0" show "\<exists>b\<in>{0..<CARD('a)}. vec_to_iarray A !! b \<noteq> 0" by (rule bexI[of _ "to_nat b"], unfold vec_to_iarray_nth', auto simp add: Ab to_nat_less_card[of b]) next fix b assume b: "b < CARD('a)" and Ab_vec: "vec_to_iarray A !! b \<noteq> 0" show "\<exists>b. A $ b \<noteq> 0" by (rule exI[of _ "from_nat b"], metis Ab_vec vec_to_iarray_nth[OF b]) qed corollary vec_to_iarray_exists': shows "(\<exists>b. A $ b \<noteq> 0) = IArray.exists (\<lambda>b. (vec_to_iarray A) !! b \<noteq> 0) (IArray (rev [0..<IArray.length (vec_to_iarray A)]))" by (simp add: vec_to_iarray_exists Option.is_none_def find_None_iff) lemma not_is_zero_iarray_eq_iff: "(\<exists>b. A $ b \<noteq> 0) = (\<not> is_zero_iarray (vec_to_iarray A))" by (metis (full_types) is_zero_iarray_eq_iff vec_eq_iff zero_index) lemma vec_to_iarray_greatest_not_zero: assumes ex_b: "(\<exists>b. A $ b \<noteq> 0)" shows "greatest_not_zero (vec_to_iarray A) = to_nat (GREATEST b. A $ b \<noteq> 0)" proof - let ?P="(\<lambda>n. (vec_to_iarray A) !! n \<noteq> 0)" let ?xs="(rev [0..<IArray.length (vec_to_iarray A)])" have "\<exists>a. (List.find ?P ?xs) = Some a" proof(rule ccontr, simp, unfold find_None_iff) assume "\<not> (\<exists>x. x \<in> set (rev [0..<length (IArray.list_of (vec_to_iarray A))]) \<and> IArray.list_of (vec_to_iarray A) ! x \<noteq> 0)" thus False using ex_b unfolding set_rev by (auto, unfold IArray.length_def[symmetric] IArray.sub_def[symmetric] length_vec_to_iarray,metis to_nat_less_card vec_to_iarray_nth') qed from this obtain a where a: "(List.find ?P ?xs) = Some a" by blast from this obtain ia where ia_less_length: "ia<length ?xs" and P_xs_ia: "?P (?xs!ia)" and a_eq: "a = ?xs!ia" and all_zero: "(\<forall>j<ia. \<not> ?P (?xs!j))" unfolding find_Some_iff by auto have ia_less_card: "ia < CARD('a)" using ia_less_length by (metis diff_zero length_rev length_upt length_vec_to_iarray) have ia_less_length': "ia < length ([0..<IArray.length (vec_to_iarray A)])" using ia_less_length unfolding length_rev . have a_less_card: "a < CARD('a)" unfolding a_eq unfolding rev_nth[OF ia_less_length'] using nth_upt[of 0 "(length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" "(length [0..<IArray.length (vec_to_iarray A)])" ] by (metis diff_less length_upt length_vec_to_iarray minus_nat.diff_0 plus_nat.add_0 zero_less_Suc zero_less_card_finite) have "(GREATEST b. A $ b \<noteq> 0) = from_nat a" proof (rule Greatest_equality) have "A $ from_nat a = (vec_to_iarray A) !! a" by (rule vec_to_iarray_nth[symmetric,OF a_less_card]) also have "... \<noteq> 0" using P_xs_ia unfolding a_eq[symmetric] . finally show "A $ from_nat a \<noteq> 0" . next fix y assume Ay: "A $ y \<noteq> 0" show "y \<le> from_nat a" proof (rule ccontr) assume "\<not> y \<le> from_nat a" hence y_greater_a: "y > from_nat a" by simp have y_greater_a': "to_nat y > a" using y_greater_a using to_nat_mono[of "from_nat a" y] using to_nat_from_nat_id by (metis a_less_card) have "a = ?xs ! ia" using a_eq . also have "... = [0..<IArray.length (vec_to_iarray A)] ! (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" by (rule rev_nth[OF ia_less_length']) also have "... = 0 + (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" apply (rule nth_upt) using ia_less_length' by fastforce also have "... = (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" by simp finally have "a = (length [0..<IArray.length (vec_to_iarray A)] - Suc ia)" . hence ia_eq: "ia = length [0..<IArray.length (vec_to_iarray A)] - (Suc a)" by (metis Suc_diff_Suc Suc_eq_plus1_left diff_diff_cancel less_imp_le ia_less_length length_rev) define ja where "ja = length [0..<IArray.length (vec_to_iarray A)] - to_nat y - 1" have ja_less_length: "ja < length [0..<IArray.length (vec_to_iarray A)]" unfolding ja_def using ia_eq ia_less_length' by (simp add: algebra_simps ) have suc_i_le: "IArray.length (vec_to_iarray A)\<ge>Suc (to_nat y)" unfolding vec_to_iarray_def using to_nat_less_card[of y] by auto have "?xs ! ja = [0..<IArray.length (vec_to_iarray A)] ! (length [0..<IArray.length (vec_to_iarray A)] - Suc ja)" unfolding rev_nth[OF ja_less_length] .. also have "... = 0 + (length [0..<IArray.length (vec_to_iarray A)] - Suc ja)" apply (rule nth_upt, auto simp del: IArray.length_def) unfolding ja_def by (metis diff_Suc_less ia_less_length' length_upt less_nat_zero_code minus_nat.diff_0 neq0_conv) also have "... = (length [0..<IArray.length (vec_to_iarray A)] - Suc ja)" by simp also have "... = to_nat y" unfolding ja_def using suc_i_le by force finally have xs_ja_eq_y: "?xs ! ja = to_nat y" . have ja_less_ia: "ja < ia" unfolding ja_def ia_eq by (auto simp del: IArray.length_def, metis Suc_leI suc_i_le diff_less_mono2 le_imp_less_Suc less_le_trans y_greater_a') hence eq_0: "vec_to_iarray A !! (?xs ! ja) = 0" using all_zero by simp hence "A $ y = 0" using vec_to_iarray_nth'[of A y] unfolding xs_ja_eq_y by simp thus False using Ay by contradiction qed qed thus ?thesis unfolding greatest_not_zero_def a unfolding to_nat_eq[symmetric] unfolding to_nat_from_nat_id[OF a_less_card] by simp qed subsection\<open>Consistency and inconsistency\<close> definition "consistent_iarrays A b = (let GJ=Gauss_Jordan_iarrays_PA A; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ) . \<not> is_zero_iarray x]; P_mult_b = fst(GJ) *iv b in (rank_A \<ge> (if (\<not> is_zero_iarray P_mult_b) then (greatest_not_zero P_mult_b + 1) else 0)))" definition "inconsistent_iarrays A b = (\<not> consistent_iarrays A b)" lemma matrix_to_iarray_consistent[code]: "consistent A b = consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding consistent_eq_rank_ge_code unfolding consistent_iarrays_def Let_def unfolding Gauss_Jordan_PA_eq unfolding rank_Gauss_Jordan_code[symmetric, unfolded Let_def] unfolding snd_Gauss_Jordan_iarrays_PA_eq unfolding rank_iarrays_code[symmetric] unfolding matrix_to_iarray_rank unfolding matrix_to_iarray_fst_Gauss_Jordan_PA[symmetric] unfolding vec_to_iarray_matrix_matrix_mult[symmetric] unfolding not_is_zero_iarray_eq_iff using vec_to_iarray_greatest_not_zero[unfolded not_is_zero_iarray_eq_iff] by force lemma matrix_to_iarray_inconsistent[code]: "inconsistent A b = inconsistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding inconsistent_def inconsistent_iarrays_def unfolding matrix_to_iarray_consistent .. definition "solve_consistent_rref_iarrays A b = IArray.of_fun (\<lambda>j. if (IArray.exists (\<lambda>i. A !! i !! j = 1 \<and> j=least_non_zero_position_of_vector (row_iarray i A)) (IArray[0..<nrows_iarray A])) then b !! (least_non_zero_position_of_vector (column_iarray j A)) else 0) (ncols_iarray A)" lemma exists_solve_consistent_rref: fixes A::"'a::{field}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" shows "(\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0)) = (IArray.exists (\<lambda>i. (matrix_to_iarray A) !! i !! (to_nat j) = 1 \<and> (to_nat j)=least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) (IArray[0..<nrows_iarray (matrix_to_iarray A)]))" proof (rule) assume "\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0)" from this obtain i where Aij: "A $ i $ j = 1" and j_eq: "j = (LEAST n. A $ i $ n \<noteq> 0)" by blast show "IArray.exists (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) (IArray [0..<nrows_iarray (matrix_to_iarray A)])" unfolding IArray.exists_def find_Some_iff apply (rule bexI[of _ "to_nat i"])+ proof (auto, unfold IArray.sub_def[symmetric]) show "to_nat i < nrows_iarray (matrix_to_iarray A)" unfolding matrix_to_iarray_nrows[symmetric] nrows_def using to_nat_less_card by fast have "to_nat j = to_nat (LEAST n. A $ i $ n \<noteq> 0)" unfolding j_eq by simp also have "... = to_nat (LEAST n. A $ i $ n \<noteq> 0 \<and> 0\<le>n)" by (metis least_mod_type) also have "...= least_non_zero_position_of_vector_from_index (vec_to_iarray (row i A)) (to_nat (0::'cols))" proof (rule vec_to_iarray_least_non_zero_position_of_vector_from_index''[symmetric, of "0::'cols" i A]) show "\<not> vector_all_zero_from_index (to_nat (0::'cols), vec_to_iarray (row i A))" unfolding vector_all_zero_from_index_eq[symmetric, of "0::'cols" "row i A"] unfolding row_def vec_nth_inverse using Aij least_mod_type[of j] by fastforce qed also have "... = least_non_zero_position_of_vector (row_iarray (to_nat i) (matrix_to_iarray A))" unfolding vec_to_iarray_row least_non_zero_position_of_vector_def unfolding to_nat_0 .. finally show "to_nat j = least_non_zero_position_of_vector (row_iarray (to_nat i) (matrix_to_iarray A))" . show "matrix_to_iarray A !! mod_type_class.to_nat i !! mod_type_class.to_nat j = 1" unfolding matrix_to_iarray_nth using Aij . qed next assume ex_eq: "IArray.exists (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) (IArray [0..<nrows_iarray (matrix_to_iarray A)])" have "\<exists>y. List.find (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) [0..<nrows_iarray (matrix_to_iarray A)] = Some y" proof (rule ccontr, simp del: IArray.length_def IArray.sub_def, unfold find_None_iff) assume" \<not> (\<exists>x. x \<in> set [0..<nrows_iarray (matrix_to_iarray A)] \<and> matrix_to_iarray A !! x !! mod_type_class.to_nat j = 1 \<and> mod_type_class.to_nat j = least_non_zero_position_of_vector (row_iarray x (matrix_to_iarray A)))" thus False using ex_eq unfolding IArray.exists_def by auto qed from this obtain y where y: "List.find (\<lambda>i. matrix_to_iarray A !! i !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray i (matrix_to_iarray A))) [0..<nrows_iarray (matrix_to_iarray A)] = Some y" by blast from this obtain i where i_less_length: "i<length [0..<nrows_iarray (matrix_to_iarray A)]" and Aij_1: "matrix_to_iarray A !! ([0..<nrows_iarray (matrix_to_iarray A)] ! i) !! to_nat j = 1" and j_eq: "to_nat j = least_non_zero_position_of_vector (row_iarray ([0..<nrows_iarray (matrix_to_iarray A)] ! i) (matrix_to_iarray A))" and y_eq: "y = [0..<nrows_iarray (matrix_to_iarray A)] ! i" and least: "(\<forall>ja<i. \<not> (matrix_to_iarray A !! ([0..<nrows_iarray (matrix_to_iarray A)] ! ja) !! to_nat j = 1 \<and> to_nat j = least_non_zero_position_of_vector (row_iarray ([0..<nrows_iarray (matrix_to_iarray A)] ! ja) (matrix_to_iarray A))))" unfolding find_Some_iff by blast show "\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0)" proof (rule exI[of _ "from_nat i"], rule conjI) have i_rw: "[0..<nrows_iarray (matrix_to_iarray A)] ! i = i" using nth_upt[of 0 i "nrows_iarray (matrix_to_iarray A)"] using i_less_length by auto have i_less_card: "i < CARD ('rows)" using i_less_length unfolding nrows_iarray_def matrix_to_iarray_def by auto show A_ij: "A $ from_nat i $ j = 1" using Aij_1 unfolding i_rw using matrix_to_iarray_nth[of A "from_nat i" j] unfolding to_nat_from_nat_id[OF i_less_card] by simp have "to_nat j = least_non_zero_position_of_vector (row_iarray ([0..<nrows_iarray (matrix_to_iarray A)] ! i) (matrix_to_iarray A))" using j_eq . also have "... = least_non_zero_position_of_vector_from_index (row_iarray i (matrix_to_iarray A)) 0" unfolding least_non_zero_position_of_vector_def i_rw .. also have "... = least_non_zero_position_of_vector_from_index (vec_to_iarray (row (from_nat i) A)) (to_nat (0::'cols))" unfolding vec_to_iarray_row unfolding to_nat_from_nat_id[OF i_less_card] unfolding to_nat_0 .. also have "... = to_nat (LEAST n. A $ (from_nat i) $ n \<noteq> 0 \<and> 0 \<le> n)" proof (rule vec_to_iarray_least_non_zero_position_of_vector_from_index'') show "\<not> vector_all_zero_from_index (to_nat (0::'cols), vec_to_iarray (row (from_nat i) A))" unfolding vector_all_zero_from_index_eq[symmetric] using A_ij by (metis iarray_to_vec_vec_to_iarray least_mod_type vec_matrix vec_to_iarray_row' zero_neq_one) qed also have "... = to_nat (LEAST n. A $ (from_nat i) $ n \<noteq> 0)" using least_mod_type by metis finally show "j = (LEAST n. A $ from_nat i $ n \<noteq> 0)" unfolding to_nat_eq . qed qed lemma to_nat_the_solve_consistent_rref: fixes A::"'a::{field}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" and exists: "(\<exists>i. A $ i $ j = 1 \<and> j = (LEAST n. A $ i $ n \<noteq> 0))" shows "to_nat (THE i. A $ i $ j = 1) = least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A))" proof - obtain i where Aij: "A $ i $ j = 1" and j:"j = (LEAST n. A $ i $ n \<noteq> 0)" using exists by blast have "least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)) = least_non_zero_position_of_vector (vec_to_iarray (column j A))" unfolding vec_to_iarray_column .. also have "... = least_non_zero_position_of_vector_from_index (vec_to_iarray (column j A)) (to_nat (0::'rows))" unfolding least_non_zero_position_of_vector_def to_nat_0 .. also have "... = to_nat (LEAST n. A $ n $ j \<noteq> 0 \<and> 0 \<le> n)" proof (rule vec_to_iarray_least_non_zero_position_of_vector_from_index') show "\<not> vector_all_zero_from_index (to_nat (0::'rows), vec_to_iarray (column j A))" unfolding vector_all_zero_from_index_eq[symmetric] column_def using Aij least_mod_type[of i] by fastforce qed also have "... = to_nat (LEAST n. A $ n $ j \<noteq> 0)" using least_mod_type by metis finally have least_eq: "least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)) = to_nat (LEAST n. A $ n $ j \<noteq> 0)" . have i_eq_least: "i=(LEAST n. A $ n $ j \<noteq> 0)" proof (rule Least_equality[symmetric]) show "A $ i $ j \<noteq> 0" by (metis Aij zero_neq_one) show "\<And>y. A $ y $ j \<noteq> 0 \<Longrightarrow> i \<le> y" by (metis (mono_tags) Aij is_zero_row_def' j order_refl rref rref_condition4 zero_neq_one) qed have the_eq_least_pos: "(THE i. A $ i $ j = 1) = from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))" proof (rule the_equality) show " A $ from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A))) $ j = 1" unfolding least_eq from_nat_to_nat_id i_eq_least[symmetric] using Aij . fix a assume a: "A $ a $ j = 1" show "a = from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))" unfolding least_eq from_nat_to_nat_id by (metis Aij a i_eq_least is_zero_row_def' j rref rref_condition4_explicit zero_neq_one) qed have "to_nat (THE i. A $ i $ j = 1) = to_nat (from_nat (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))::'rows)" using the_eq_least_pos by auto also have "... = (least_non_zero_position_of_vector (column_iarray (to_nat j) (matrix_to_iarray A)))" by (rule to_nat_from_nat_id, unfold least_eq, simp add: to_nat_less_card) also have "... = to_nat (LEAST n. A $ n $ j \<noteq> 0)" unfolding least_eq from_nat_to_nat_id .. finally have "(THE i. A $ i $ j = 1) = (LEAST n. A $ n $ j \<noteq> 0)" unfolding to_nat_eq . thus ?thesis unfolding least_eq from_nat_to_nat_id unfolding to_nat_eq . qed lemma iarray_exhaust2: "(xs = ys) = (IArray.list_of xs = IArray.list_of ys)" by (metis iarray.exhaust list_of.simps) lemma vec_to_iarray_solve_consistent_rref: fixes A::"'a::{field}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" shows "vec_to_iarray (solve_consistent_rref A b) = solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b)" proof(unfold iarray_exhaust2 list_eq_iff_nth_eq IArray.length_def[symmetric] IArray.sub_def[symmetric], rule conjI) show "IArray.length (vec_to_iarray (solve_consistent_rref A b)) = IArray.length (solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b))" unfolding solve_consistent_rref_def solve_consistent_rref_iarrays_def unfolding ncols_iarray_def matrix_to_iarray_def by (simp add: vec_to_iarray_def) show "\<forall>i<IArray.length (vec_to_iarray (solve_consistent_rref A b)). vec_to_iarray (solve_consistent_rref A b) !! i = solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b) !! i" proof (clarify) fix i assume i: "i < IArray.length (vec_to_iarray (solve_consistent_rref A b))" hence i_less_card: "i<CARD('cols)" unfolding vec_to_iarray_def by auto hence i_less_ncols: "i<(ncols_iarray (matrix_to_iarray A))" unfolding ncols_eq_card_columns . show "vec_to_iarray (solve_consistent_rref A b) !! i = solve_consistent_rref_iarrays (matrix_to_iarray A) (vec_to_iarray b) !! i" unfolding vec_to_iarray_nth[OF i_less_card] unfolding solve_consistent_rref_def unfolding vec_lambda_beta unfolding solve_consistent_rref_iarrays_def unfolding of_fun_nth[OF i_less_ncols] unfolding exists_solve_consistent_rref[OF rref, of "from_nat i", symmetric, unfolded to_nat_from_nat_id[OF i_less_card]] using to_nat_the_solve_consistent_rref[OF rref, of "from_nat i", symmetric, unfolded to_nat_from_nat_id[OF i_less_card]] using vec_to_iarray_nth' by metis qed qed subsection\<open>Independence and dependence\<close> definition "independent_and_consistent_iarrays A b = (let GJ = Gauss_Jordan_iarrays_PA A; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ) . \<not> is_zero_iarray x]; P_mult_b = fst GJ *iv b; consistent_A = ((if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A); dim_solution_set = ncols_iarray A - rank_A in consistent_A \<and> dim_solution_set = 0)" definition "dependent_and_consistent_iarrays A b = (let GJ = Gauss_Jordan_iarrays_PA A; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ) . \<not> is_zero_iarray x]; P_mult_b = fst GJ *iv b; consistent_A = ((if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A); dim_solution_set = ncols_iarray A - rank_A in consistent_A \<and> dim_solution_set > 0)" lemma matrix_to_iarray_independent_and_consistent[code]: shows "independent_and_consistent A b = independent_and_consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding independent_and_consistent_def unfolding independent_and_consistent_iarrays_def unfolding dim_solution_set_homogeneous_eq_dim_null_space unfolding matrix_to_iarray_consistent unfolding consistent_iarrays_def unfolding dim_null_space_iarray unfolding rank_iarrays_code unfolding snd_Gauss_Jordan_iarrays_PA_eq[symmetric] unfolding Let_def .. lemma matrix_to_iarray_dependent_and_consistent[code]: shows "dependent_and_consistent A b = dependent_and_consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding dependent_and_consistent_def unfolding dependent_and_consistent_iarrays_def unfolding dim_solution_set_homogeneous_eq_dim_null_space unfolding matrix_to_iarray_consistent unfolding consistent_iarrays_def unfolding dim_null_space_iarray unfolding rank_iarrays_code unfolding snd_Gauss_Jordan_iarrays_PA_eq[symmetric] unfolding Let_def .. subsection\<open>Solve a system of equations over nested IArrays\<close> definition "solve_system_iarrays A b = (let A' = Gauss_Jordan_iarrays_PA A in (snd A', fst A' *iv b))" lemma matrix_to_iarray_fst_solve_system: "matrix_to_iarray (fst (solve_system A b)) = fst (solve_system_iarrays (matrix_to_iarray A) (vec_to_iarray b))" unfolding solve_system_def solve_system_iarrays_def Let_def fst_conv by (metis matrix_to_iarray_snd_Gauss_Jordan_PA) lemma vec_to_iarray_snd_solve_system: "vec_to_iarray (snd (solve_system A b)) = snd (solve_system_iarrays (matrix_to_iarray A) (vec_to_iarray b))" unfolding solve_system_def solve_system_iarrays_def Let_def snd_conv by (metis matrix_to_iarray_fst_Gauss_Jordan_PA vec_to_iarray_matrix_matrix_mult) definition "solve_iarrays A b = (let GJ_P=Gauss_Jordan_iarrays_PA A; P_mult_b = fst GJ_P *iv b; rank_A = length [x\<leftarrow>IArray.list_of (snd GJ_P) . \<not> is_zero_iarray x]; consistent_Ab = (if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A; GJ_transpose = Gauss_Jordan_iarrays_PA (transpose_iarray A); basis = set (map (\<lambda>i. row_iarray i (fst GJ_transpose)) [rank_A..<ncols_iarray A]) in (if consistent_Ab then Some (solve_consistent_rref_iarrays (snd GJ_P) P_mult_b,basis) else None))" definition "pair_vec_vecset A = (if Option.is_none A then None else Some (vec_to_iarray (fst (the A)), vec_to_iarray` (snd (the A))))" lemma pair_vec_vecset_solve[code_unfold]: shows "pair_vec_vecset (solve A b) = solve_iarrays (matrix_to_iarray A) (vec_to_iarray b)" unfolding pair_vec_vecset_def proof (auto) assume none_solve_Ab: "Option.is_none (solve A b)" show "None = solve_iarrays (matrix_to_iarray A) (vec_to_iarray b)" proof - define GJ_P where "GJ_P = Gauss_Jordan_iarrays_PA (matrix_to_iarray A)" define P_mult_b where "P_mult_b = fst GJ_P *iv vec_to_iarray b" define rank_A where "rank_A = length [x\<leftarrow>IArray.list_of (snd GJ_P). \<not> is_zero_iarray x]" have "\<not> consistent A b" using none_solve_Ab unfolding solve_def unfolding Option.is_none_def by auto hence "\<not> consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" using matrix_to_iarray_consistent by auto hence "\<not> (if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A" unfolding GJ_P_def P_mult_b_def rank_A_def using consistent_iarrays_def unfolding Let_def by fast thus ?thesis unfolding solve_iarrays_def Let_def unfolding GJ_P_def P_mult_b_def rank_A_def by presburger qed next assume not_none: "\<not> Option.is_none (solve A b)" show "Some (vec_to_iarray (fst (the (solve A b))), vec_to_iarray ` snd (the (solve A b))) = solve_iarrays (matrix_to_iarray A) (vec_to_iarray b)" proof - define GJ_P where "GJ_P = Gauss_Jordan_iarrays_PA (matrix_to_iarray A)" define P_mult_b where "P_mult_b = fst GJ_P *iv vec_to_iarray b" define rank_A where "rank_A = length [x\<leftarrow>IArray.list_of (snd GJ_P) . \<not> is_zero_iarray x]" define GJ_transpose where "GJ_transpose = Gauss_Jordan_iarrays_PA (transpose_iarray (matrix_to_iarray A))" define basis where "basis = set (map (\<lambda>i. row_iarray i (fst GJ_transpose)) [rank_A..<ncols_iarray (matrix_to_iarray A)])" define P_mult_b where "P_mult_b = fst GJ_P *iv vec_to_iarray b" have consistent_Ab: "consistent A b" using not_none unfolding solve_def unfolding Option.is_none_def by metis hence "consistent_iarrays (matrix_to_iarray A) (vec_to_iarray b)" using matrix_to_iarray_consistent by auto hence "(if \<not> is_zero_iarray P_mult_b then greatest_not_zero P_mult_b + 1 else 0) \<le> rank_A" unfolding GJ_P_def P_mult_b_def rank_A_def using consistent_iarrays_def unfolding Let_def by fast hence solve_iarrays_rw: "solve_iarrays (matrix_to_iarray A) (vec_to_iarray b) = Some (solve_consistent_rref_iarrays (snd GJ_P) P_mult_b, basis)" unfolding solve_iarrays_def Let_def P_mult_b_def GJ_P_def rank_A_def basis_def GJ_transpose_def by auto have snd_rw: "vec_to_iarray ` basis_null_space A = basis" unfolding basis_def GJ_transpose_def rank_A_def GJ_P_def unfolding vec_to_iarray_basis_null_space unfolding basis_null_space_iarrays_def Let_def unfolding snd_Gauss_Jordan_iarrays_PA_eq unfolding rank_iarrays_code[symmetric] unfolding matrix_to_iarray_transpose[symmetric] unfolding matrix_to_iarray_rank[symmetric] unfolding rank_transpose[symmetric, of A] .. have fst_rw: "vec_to_iarray (solve_consistent_rref (fst (solve_system A b)) (snd (solve_system A b))) = solve_consistent_rref_iarrays (snd GJ_P) P_mult_b" using vec_to_iarray_solve_consistent_rref[OF rref_Gauss_Jordan, of A "fst (Gauss_Jordan_PA A) *v b"] unfolding solve_system_def Let_def fst_conv unfolding Gauss_Jordan_PA_eq snd_conv unfolding GJ_P_def P_mult_b_def unfolding vec_to_iarray_matrix_matrix_mult unfolding matrix_to_iarray_fst_Gauss_Jordan_PA[symmetric] unfolding matrix_to_iarray_snd_Gauss_Jordan_PA[symmetric] unfolding Gauss_Jordan_PA_eq . show ?thesis unfolding solve_iarrays_rw unfolding solve_def if_P[OF consistent_Ab] option.sel fst_conv snd_conv unfolding fst_rw snd_rw .. qed qed end
~% gap ######### ###### ########### ### ############# ###### ############ #### ############## ######## ############# ##### ############### ######## ##### ###### ##### ###### # ######### ##### ##### ###### ###### ########## ##### ##### ####### ##### ##### #### ##### ###### ######## #### ##### ##### ############# ### #### ##### ####### #### #### ########### #### #### ##### ####### ##### ##### ###### #### #### ##### ####### ##### ##### ##### ############# ##### ##### ################ ##### ############# ###### ##### ################ ##### ############# ################ ################## ##### #### ############### ##### ##### ##### #### ############# ##### ##### ##### #### ######### ##### ##### ##### #### Information at: http://www.gap-system.org Try '?help' for help. See also '?copyright' and '?authors' Loading the library. Please be patient, this may take a while. GAP4, Version: 4.4.12 of 17-Dec-2008, x86_64-unknown-linux-gnu-gcc Components: small 2.1, small2 2.0, small3 2.0, small4 1.0, small5 1.0, small6 1.0, small7 1.0, small8 1.0, small9 1.0, small10 0.2, id2 3.0, id3 2.1, id4 1.0, id5 1.0, id6 1.0, id9 1.0, id10 0.1, trans 1.0, prim 2.1 loaded. Packages: AClib 1.1, Polycyclic 2.6, Alnuth 2.2.5, AutPGrp 1.4, CrystCat 1.1.3, Cryst 4.1.6, CRISP 1.3.2, CTblLib 1.1.3, TomLib 1.1.4, FactInt 1.5.2, GAPDoc 1.2, FGA 1.1.0.1, IRREDSOL 1.1.2, LAGUNA 3.5.0, Sophus 1.23, Polenta 1.2.7, ResClasses 2.5.3 loaded. gap> join := function(a, b, sep) > return Concatenation(a, sep, sep, b); > end; function( a, b, sep ) ... end gap> gap> join("Rosetta", "Code", ":"); "Rosetta::Code" gap>
State Before: n : ℕ h : sqrt n = 0 ⊢ sqrt n < 1 State After: n : ℕ h : sqrt n = 0 ⊢ 0 < 1 Tactic: rw [h] State Before: n : ℕ h : sqrt n = 0 ⊢ 0 < 1 State After: no goals Tactic: decide State Before: n : ℕ ⊢ n = 0 → sqrt n = 0 State After: ⊢ sqrt 0 = 0 Tactic: rintro rfl State Before: ⊢ sqrt 0 = 0 State After: no goals Tactic: simp
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies ! This file was ported from Lean 3 source module data.set.sigma ! leanprover-community/mathlib commit 448144f7ae193a8990cb7473c9e9a01990f64ac7 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Data.Set.Image /-! # Sets in sigma types > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines `set.sigma`, the indexed sum of sets. -/ namespace Set variable {ι ι' : Type _} {α β : ι → Type _} {s s₁ s₂ : Set ι} {t t₁ t₂ : ∀ i, Set (α i)} {u : Set (Σi, α i)} {x : Σi, α i} {i j : ι} {a : α i} /- warning: set.range_sigma_mk -> Set.range_sigmaMk is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} (i : ι), Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι α)) (Set.range.{max u1 u2, succ u2} (Sigma.{u1, u2} ι α) (α i) (Sigma.mk.{u1, u2} ι α i)) (Set.preimage.{max u1 u2, u1} (Sigma.{u1, u2} ι α) ι (Sigma.fst.{u1, u2} ι α) (Singleton.singleton.{u1, u1} ι (Set.{u1} ι) (Set.hasSingleton.{u1} ι) i)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} (i : ι), Eq.{max (succ u2) (succ u1)} (Set.{max u2 u1} (Sigma.{u2, u1} ι α)) (Set.range.{max u2 u1, succ u1} (Sigma.{u2, u1} ι α) (α i) (Sigma.mk.{u2, u1} ι α i)) (Set.preimage.{max u1 u2, u2} (Sigma.{u2, u1} ι α) ι (Sigma.fst.{u2, u1} ι α) (Singleton.singleton.{u2, u2} ι (Set.{u2} ι) (Set.instSingletonSet.{u2} ι) i)) Case conversion may be inaccurate. Consider using '#align set.range_sigma_mk Set.range_sigmaMkₓ'. -/ @[simp] theorem range_sigmaMk (i : ι) : range (Sigma.mk i : α i → Sigma α) = Sigma.fst ⁻¹' {i} := by apply subset.antisymm · rintro _ ⟨b, rfl⟩ simp · rintro ⟨x, y⟩ (rfl | _) exact mem_range_self y #align set.range_sigma_mk Set.range_sigmaMk /- warning: set.preimage_image_sigma_mk_of_ne -> Set.preimage_image_sigmaMk_of_ne is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {i : ι} {j : ι}, (Ne.{succ u1} ι i j) -> (forall (s : Set.{u2} (α j)), Eq.{succ u2} (Set.{u2} (α i)) (Set.preimage.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun {j : ι} => α j)) (Sigma.mk.{u1, u2} ι (fun {j : ι} => α j) i) (Set.image.{u2, max u1 u2} (α j) (Sigma.{u1, u2} ι (fun {j : ι} => α j)) (Sigma.mk.{u1, u2} ι (fun {j : ι} => α j) j) s)) (EmptyCollection.emptyCollection.{u2} (Set.{u2} (α i)) (Set.hasEmptyc.{u2} (α i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {i : ι} {j : ι}, (Ne.{succ u2} ι i j) -> (forall (s : Set.{u1} (α j)), Eq.{succ u1} (Set.{u1} (α i)) (Set.preimage.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι α) (Sigma.mk.{u2, u1} ι α i) (Set.image.{u1, max u2 u1} (α j) (Sigma.{u2, u1} ι α) (Sigma.mk.{u2, u1} ι α j) s)) (EmptyCollection.emptyCollection.{u1} (Set.{u1} (α i)) (Set.instEmptyCollectionSet.{u1} (α i)))) Case conversion may be inaccurate. Consider using '#align set.preimage_image_sigma_mk_of_ne Set.preimage_image_sigmaMk_of_neₓ'. -/ theorem preimage_image_sigmaMk_of_ne (h : i ≠ j) (s : Set (α j)) : Sigma.mk i ⁻¹' (Sigma.mk j '' s) = ∅ := by ext x simp [h.symm] #align set.preimage_image_sigma_mk_of_ne Set.preimage_image_sigmaMk_of_ne /- warning: set.image_sigma_mk_preimage_sigma_map_subset -> Set.image_sigmaMk_preimage_sigmaMap_subset is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {ι' : Type.{u2}} {α : ι -> Type.{u3}} {β : ι' -> Type.{u4}} (f : ι -> ι') (g : forall (i : ι), (α i) -> (β (f i))) (i : ι) (s : Set.{u4} (β (f i))), HasSubset.Subset.{max u1 u3} (Set.{max u1 u3} (Sigma.{u1, u3} ι (fun (i : ι) => α i))) (Set.hasSubset.{max u1 u3} (Sigma.{u1, u3} ι (fun (i : ι) => α i))) (Set.image.{u3, max u1 u3} (α i) (Sigma.{u1, u3} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u3} ι (fun (i : ι) => α i) i) (Set.preimage.{u3, u4} (α i) (β (f i)) (g i) s)) (Set.preimage.{max u1 u3, max u2 u4} (Sigma.{u1, u3} ι (fun (i : ι) => α i)) (Sigma.{u2, u4} ι' β) (Sigma.map.{u1, u2, u3, u4} ι ι' (fun (i : ι) => α i) β f g) (Set.image.{u4, max u2 u4} (β (f i)) (Sigma.{u2, u4} ι' β) (Sigma.mk.{u2, u4} ι' β (f i)) s)) but is expected to have type forall {ι : Type.{u3}} {ι' : Type.{u1}} {α : ι -> Type.{u2}} {β : ι' -> Type.{u4}} (f : ι -> ι') (g : forall (i : ι), (α i) -> (β (f i))) (i : ι) (s : Set.{u4} (β (f i))), HasSubset.Subset.{max u3 u2} (Set.{max u3 u2} (Sigma.{u3, u2} ι α)) (Set.instHasSubsetSet.{max u3 u2} (Sigma.{u3, u2} ι α)) (Set.image.{u2, max u3 u2} (α i) (Sigma.{u3, u2} ι α) (Sigma.mk.{u3, u2} ι α i) (Set.preimage.{u2, u4} (α i) (β (f i)) (g i) s)) (Set.preimage.{max u3 u2, max u4 u1} (Sigma.{u3, u2} ι α) (Sigma.{u1, u4} ι' β) (Sigma.map.{u3, u1, u2, u4} ι ι' α β f g) (Set.image.{u4, max u1 u4} (β (f i)) (Sigma.{u1, u4} ι' β) (Sigma.mk.{u1, u4} ι' β (f i)) s)) Case conversion may be inaccurate. Consider using '#align set.image_sigma_mk_preimage_sigma_map_subset Set.image_sigmaMk_preimage_sigmaMap_subsetₓ'. -/ theorem image_sigmaMk_preimage_sigmaMap_subset {β : ι' → Type _} (f : ι → ι') (g : ∀ i, α i → β (f i)) (i : ι) (s : Set (β (f i))) : Sigma.mk i '' (g i ⁻¹' s) ⊆ Sigma.map f g ⁻¹' (Sigma.mk (f i) '' s) := image_subset_iff.2 fun x hx => ⟨g i x, hx, rfl⟩ #align set.image_sigma_mk_preimage_sigma_map_subset Set.image_sigmaMk_preimage_sigmaMap_subset /- warning: set.image_sigma_mk_preimage_sigma_map -> Set.image_sigmaMk_preimage_sigmaMap is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {ι' : Type.{u2}} {α : ι -> Type.{u3}} {β : ι' -> Type.{u4}} {f : ι -> ι'}, (Function.Injective.{succ u1, succ u2} ι ι' f) -> (forall (g : forall (i : ι), (α i) -> (β (f i))) (i : ι) (s : Set.{u4} (β (f i))), Eq.{succ (max u1 u3)} (Set.{max u1 u3} (Sigma.{u1, u3} ι (fun (i : ι) => α i))) (Set.image.{u3, max u1 u3} (α i) (Sigma.{u1, u3} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u3} ι (fun (i : ι) => α i) i) (Set.preimage.{u3, u4} (α i) (β (f i)) (g i) s)) (Set.preimage.{max u1 u3, max u2 u4} (Sigma.{u1, u3} ι (fun (i : ι) => α i)) (Sigma.{u2, u4} ι' β) (Sigma.map.{u1, u2, u3, u4} ι ι' (fun (i : ι) => α i) β f g) (Set.image.{u4, max u2 u4} (β (f i)) (Sigma.{u2, u4} ι' β) (Sigma.mk.{u2, u4} ι' β (f i)) s))) but is expected to have type forall {ι : Type.{u3}} {ι' : Type.{u2}} {α : ι -> Type.{u1}} {β : ι' -> Type.{u4}} {f : ι -> ι'}, (Function.Injective.{succ u3, succ u2} ι ι' f) -> (forall (g : forall (i : ι), (α i) -> (β (f i))) (i : ι) (s : Set.{u4} (β (f i))), Eq.{max (succ u3) (succ u1)} (Set.{max u3 u1} (Sigma.{u3, u1} ι α)) (Set.image.{u1, max u3 u1} (α i) (Sigma.{u3, u1} ι α) (Sigma.mk.{u3, u1} ι α i) (Set.preimage.{u1, u4} (α i) (β (f i)) (g i) s)) (Set.preimage.{max u1 u3, max u4 u2} (Sigma.{u3, u1} ι (fun (i : ι) => α i)) (Sigma.{u2, u4} ι' β) (Sigma.map.{u3, u2, u1, u4} ι ι' (fun (i : ι) => α i) β f g) (Set.image.{u4, max u2 u4} (β (f i)) (Sigma.{u2, u4} ι' β) (Sigma.mk.{u2, u4} ι' β (f i)) s))) Case conversion may be inaccurate. Consider using '#align set.image_sigma_mk_preimage_sigma_map Set.image_sigmaMk_preimage_sigmaMapₓ'. -/ theorem image_sigmaMk_preimage_sigmaMap {β : ι' → Type _} {f : ι → ι'} (hf : Function.Injective f) (g : ∀ i, α i → β (f i)) (i : ι) (s : Set (β (f i))) : Sigma.mk i '' (g i ⁻¹' s) = Sigma.map f g ⁻¹' (Sigma.mk (f i) '' s) := by refine' (image_sigma_mk_preimage_sigma_map_subset f g i s).antisymm _ rintro ⟨j, x⟩ ⟨y, hys, hxy⟩ simp only [hf.eq_iff, Sigma.map] at hxy rcases hxy with ⟨rfl, hxy⟩; rw [heq_iff_eq] at hxy; subst y exact ⟨x, hys, rfl⟩ #align set.image_sigma_mk_preimage_sigma_map Set.image_sigmaMk_preimage_sigmaMap #print Set.Sigma /- /-- Indexed sum of sets. `s.sigma t` is the set of dependent pairs `⟨i, a⟩` such that `i ∈ s` and `a ∈ t i`.-/ protected def Sigma (s : Set ι) (t : ∀ i, Set (α i)) : Set (Σi, α i) := { x | x.1 ∈ s ∧ x.2 ∈ t x.1 } #align set.sigma Set.Sigma -/ /- warning: set.mem_sigma_iff -> Set.mem_sigma_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {x : Sigma.{u1, u2} ι (fun (i : ι) => α i)}, Iff (Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) x (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (And (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x) s) (Membership.Mem.{u2, u2} (α (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x)) (Set.{u2} (α (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x))) (Set.hasMem.{u2} (α (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x))) (Sigma.snd.{u1, u2} ι (fun (i : ι) => α i) x) (t (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {x : Sigma.{u2, u1} ι (fun (i : ι) => α i)}, Iff (Membership.mem.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instMembershipSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) x (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (And (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i) x) s) (Membership.mem.{u1, u1} (α (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i) x)) (Set.{u1} (α (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i) x))) (Set.instMembershipSet.{u1} (α (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i) x))) (Sigma.snd.{u2, u1} ι (fun (i : ι) => α i) x) (t (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i) x)))) Case conversion may be inaccurate. Consider using '#align set.mem_sigma_iff Set.mem_sigma_iffₓ'. -/ @[simp] theorem mem_sigma_iff : x ∈ s.Sigma t ↔ x.1 ∈ s ∧ x.2 ∈ t x.1 := Iff.rfl #align set.mem_sigma_iff Set.mem_sigma_iff /- warning: set.mk_sigma_iff -> Set.mk_sigma_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι} {a : α i}, Iff (Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i a) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (And (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) (Membership.Mem.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasMem.{u2} (α i)) a (t i))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι} {a : α i}, Iff (Membership.mem.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instMembershipSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i a) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (And (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) (Membership.mem.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instMembershipSet.{u1} (α i)) a (t i))) Case conversion may be inaccurate. Consider using '#align set.mk_sigma_iff Set.mk_sigma_iffₓ'. -/ @[simp] theorem mk_sigma_iff : (⟨i, a⟩ : Σi, α i) ∈ s.Sigma t ↔ i ∈ s ∧ a ∈ t i := Iff.rfl #align set.mk_sigma_iff Set.mk_sigma_iff /- warning: set.mk_mem_sigma -> Set.mk_mem_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι} {a : α i}, (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) -> (Membership.Mem.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasMem.{u2} (α i)) a (t i)) -> (Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i a) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι} {a : α i}, (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) -> (Membership.mem.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instMembershipSet.{u1} (α i)) a (t i)) -> (Membership.mem.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instMembershipSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i a) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.mk_mem_sigma Set.mk_mem_sigmaₓ'. -/ theorem mk_mem_sigma (hi : i ∈ s) (ha : a ∈ t i) : (⟨i, a⟩ : Σi, α i) ∈ s.Sigma t := ⟨hi, ha⟩ #align set.mk_mem_sigma Set.mk_mem_sigma /- warning: set.sigma_mono -> Set.sigma_mono is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s₁ : Set.{u1} ι} {s₂ : Set.{u1} ι} {t₁ : forall (i : ι), Set.{u2} (α i)} {t₂ : forall (i : ι), Set.{u2} (α i)}, (HasSubset.Subset.{u1} (Set.{u1} ι) (Set.hasSubset.{u1} ι) s₁ s₂) -> (forall (i : ι), HasSubset.Subset.{u2} (Set.{u2} (α i)) (Set.hasSubset.{u2} (α i)) (t₁ i) (t₂ i)) -> (HasSubset.Subset.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasSubset.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₁ t₁) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₂ t₂)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s₁ : Set.{u2} ι} {s₂ : Set.{u2} ι} {t₁ : forall (i : ι), Set.{u1} (α i)} {t₂ : forall (i : ι), Set.{u1} (α i)}, (HasSubset.Subset.{u2} (Set.{u2} ι) (Set.instHasSubsetSet.{u2} ι) s₁ s₂) -> (forall (i : ι), HasSubset.Subset.{u1} (Set.{u1} (α i)) (Set.instHasSubsetSet.{u1} (α i)) (t₁ i) (t₂ i)) -> (HasSubset.Subset.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instHasSubsetSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₁ t₁) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₂ t₂)) Case conversion may be inaccurate. Consider using '#align set.sigma_mono Set.sigma_monoₓ'. -/ theorem sigma_mono (hs : s₁ ⊆ s₂) (ht : ∀ i, t₁ i ⊆ t₂ i) : s₁.Sigma t₁ ⊆ s₂.Sigma t₂ := fun x hx => ⟨hs hx.1, ht _ hx.2⟩ #align set.sigma_mono Set.sigma_mono /- warning: set.sigma_subset_iff -> Set.sigma_subset_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {u : Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))}, Iff (HasSubset.Subset.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasSubset.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t) u) (forall {{i : ι}}, (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) -> (forall {{a : α i}}, (Membership.Mem.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasMem.{u2} (α i)) a (t i)) -> (Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i a) u))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {u : Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))}, Iff (HasSubset.Subset.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instHasSubsetSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t) u) (forall {{i : ι}}, (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) -> (forall {{a : α i}}, (Membership.mem.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instMembershipSet.{u1} (α i)) a (t i)) -> (Membership.mem.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instMembershipSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i a) u))) Case conversion may be inaccurate. Consider using '#align set.sigma_subset_iff Set.sigma_subset_iffₓ'. -/ theorem sigma_subset_iff : s.Sigma t ⊆ u ↔ ∀ ⦃i⦄, i ∈ s → ∀ ⦃a⦄, a ∈ t i → (⟨i, a⟩ : Σi, α i) ∈ u := ⟨fun h i hi a ha => h <| mk_mem_sigma hi ha, fun h ⟨i, a⟩ ha => h ha.1 ha.2⟩ #align set.sigma_subset_iff Set.sigma_subset_iff /- warning: set.forall_sigma_iff -> Set.forall_sigma_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {p : (Sigma.{u1, u2} ι (fun (i : ι) => α i)) -> Prop}, Iff (forall (x : Sigma.{u1, u2} ι (fun (i : ι) => α i)), (Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) x (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) -> (p x)) (forall {{i : ι}}, (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) -> (forall {{a : α i}}, (Membership.Mem.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasMem.{u2} (α i)) a (t i)) -> (p (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i a)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {p : (Sigma.{u2, u1} ι (fun (i : ι) => α i)) -> Prop}, Iff (forall (x : Sigma.{u2, u1} ι (fun (i : ι) => α i)), (Membership.mem.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instMembershipSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) x (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) -> (p x)) (forall {{i : ι}}, (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) -> (forall {{a : α i}}, (Membership.mem.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instMembershipSet.{u1} (α i)) a (t i)) -> (p (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i a)))) Case conversion may be inaccurate. Consider using '#align set.forall_sigma_iff Set.forall_sigma_iffₓ'. -/ theorem forall_sigma_iff {p : (Σi, α i) → Prop} : (∀ x ∈ s.Sigma t, p x) ↔ ∀ ⦃i⦄, i ∈ s → ∀ ⦃a⦄, a ∈ t i → p ⟨i, a⟩ := sigma_subset_iff #align set.forall_sigma_iff Set.forall_sigma_iff /- warning: set.exists_sigma_iff -> Set.exists_sigma_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {p : (Sigma.{u1, u2} ι (fun (i : ι) => α i)) -> Prop}, Iff (Exists.{succ (max u1 u2)} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (fun (x : Sigma.{u1, u2} ι (fun (i : ι) => α i)) => Exists.{0} (Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) x (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (fun (H : Membership.Mem.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasMem.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) x (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) => p x))) (Exists.{succ u1} ι (fun (i : ι) => Exists.{0} (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) (fun (H : Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) => Exists.{succ u2} (α i) (fun (a : α i) => Exists.{0} (Membership.Mem.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasMem.{u2} (α i)) a (t i)) (fun (H : Membership.Mem.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasMem.{u2} (α i)) a (t i)) => p (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i a)))))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {p : (Sigma.{u2, u1} ι (fun (i : ι) => α i)) -> Prop}, Iff (Exists.{succ (max u2 u1)} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (fun (x : Sigma.{u2, u1} ι (fun (i : ι) => α i)) => And (Membership.mem.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instMembershipSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) x (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (p x))) (Exists.{succ u2} ι (fun (i : ι) => And (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) (Exists.{succ u1} (α i) (fun (a : α i) => And (Membership.mem.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instMembershipSet.{u1} (α i)) a (t i)) (p (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i a)))))) Case conversion may be inaccurate. Consider using '#align set.exists_sigma_iff Set.exists_sigma_iffₓ'. -/ theorem exists_sigma_iff {p : (Σi, α i) → Prop} : (∃ x ∈ s.Sigma t, p x) ↔ ∃ i ∈ s, ∃ a ∈ t i, p ⟨i, a⟩ := ⟨fun ⟨⟨i, a⟩, ha, h⟩ => ⟨i, ha.1, a, ha.2, h⟩, fun ⟨i, hi, a, ha, h⟩ => ⟨⟨i, a⟩, ⟨hi, ha⟩, h⟩⟩ #align set.exists_sigma_iff Set.exists_sigma_iff /- warning: set.sigma_empty -> Set.sigma_empty is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s (fun (i : ι) => EmptyCollection.emptyCollection.{u2} (Set.{u2} (α i)) (Set.hasEmptyc.{u2} (α i)))) (EmptyCollection.emptyCollection.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasEmptyc.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s (fun (i : ι) => EmptyCollection.emptyCollection.{u1} (Set.{u1} (α i)) (Set.instEmptyCollectionSet.{u1} (α i)))) (EmptyCollection.emptyCollection.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instEmptyCollectionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)))) Case conversion may be inaccurate. Consider using '#align set.sigma_empty Set.sigma_emptyₓ'. -/ @[simp] theorem sigma_empty : (s.Sigma fun i => (∅ : Set (α i))) = ∅ := ext fun _ => and_false_iff _ #align set.sigma_empty Set.sigma_empty /- warning: set.empty_sigma -> Set.empty_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {t : forall (i : ι), Set.{u2} (α i)}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (EmptyCollection.emptyCollection.{u1} (Set.{u1} ι) (Set.hasEmptyc.{u1} ι)) t) (EmptyCollection.emptyCollection.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasEmptyc.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {t : forall (i : ι), Set.{u1} (α i)}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (EmptyCollection.emptyCollection.{u2} (Set.{u2} ι) (Set.instEmptyCollectionSet.{u2} ι)) t) (EmptyCollection.emptyCollection.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instEmptyCollectionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)))) Case conversion may be inaccurate. Consider using '#align set.empty_sigma Set.empty_sigmaₓ'. -/ @[simp] theorem empty_sigma : (∅ : Set ι).Sigma t = ∅ := ext fun _ => false_and_iff _ #align set.empty_sigma Set.empty_sigma /- warning: set.univ_sigma_univ -> Set.univ_sigma_univ is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {i : ι}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i_1 : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (_x : ι) => α i) (Set.univ.{u1} ι) (fun (_x : ι) => Set.univ.{u2} (α i))) (Set.univ.{max u1 u2} (Sigma.{u1, u2} ι (fun (i_1 : ι) => α i))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {i : ι}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i_1 : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (_x : ι) => α i) (Set.univ.{u2} ι) (fun (_x : ι) => Set.univ.{u1} (α i))) (Set.univ.{max u2 u1} (Sigma.{u2, u1} ι (fun (i_1 : ι) => α i))) Case conversion may be inaccurate. Consider using '#align set.univ_sigma_univ Set.univ_sigma_univₓ'. -/ theorem univ_sigma_univ : ((@univ ι).Sigma fun _ => @univ (α i)) = univ := ext fun _ => true_and_iff _ #align set.univ_sigma_univ Set.univ_sigma_univ /- warning: set.sigma_univ -> Set.sigma_univ is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (_x : ι) => α _x) s (fun (_x : ι) => Set.univ.{u2} (α _x))) (Set.preimage.{max u1 u2, u1} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i)) s) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (_x : ι) => α _x) s (fun (_x : ι) => Set.univ.{u1} (α _x))) (Set.preimage.{max u1 u2, u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i)) s) Case conversion may be inaccurate. Consider using '#align set.sigma_univ Set.sigma_univₓ'. -/ @[simp] theorem sigma_univ : s.Sigma (fun _ => univ : ∀ i, Set (α i)) = Sigma.fst ⁻¹' s := ext fun _ => and_true_iff _ #align set.sigma_univ Set.sigma_univ /- warning: set.singleton_sigma -> Set.singleton_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {t : forall (i : ι), Set.{u2} (α i)} {i : ι}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (Singleton.singleton.{u1, u1} ι (Set.{u1} ι) (Set.hasSingleton.{u1} ι) i) t) (Set.image.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i) (t i)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {t : forall (i : ι), Set.{u1} (α i)} {i : ι} {a : α i}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (Singleton.singleton.{u2, u2} ι (Set.{u2} ι) (Set.instSingletonSet.{u2} ι) i) t) (Set.image.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι α) (Sigma.mk.{u2, u1} ι α i) (t i)) Case conversion may be inaccurate. Consider using '#align set.singleton_sigma Set.singleton_sigmaₓ'. -/ @[simp] theorem singleton_sigma : ({i} : Set ι).Sigma t = Sigma.mk i '' t i := ext fun x => by constructor · obtain ⟨j, a⟩ := x rintro ⟨rfl : j = i, ha⟩ exact mem_image_of_mem _ ha · rintro ⟨b, hb, rfl⟩ exact ⟨rfl, hb⟩ #align set.singleton_sigma Set.singleton_sigma /- warning: set.sigma_singleton -> Set.sigma_singleton is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {a : forall (i : ι), α i}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s (fun (i : ι) => Singleton.singleton.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasSingleton.{u2} (α i)) (a i))) (Set.image.{u1, max u1 u2} ι (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (fun (i : ι) => Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i (a i)) s) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {a : forall (i : ι), α i}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s (fun (i : ι) => Singleton.singleton.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instSingletonSet.{u1} (α i)) (a i))) (Set.image.{u2, max u1 u2} ι (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (fun (i : ι) => Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i (a i)) s) Case conversion may be inaccurate. Consider using '#align set.sigma_singleton Set.sigma_singletonₓ'. -/ @[simp] theorem sigma_singleton {a : ∀ i, α i} : (s.Sigma fun i => ({a i} : Set (α i))) = (fun i => Sigma.mk i <| a i) '' s := by ext ⟨x, y⟩ simp [and_left_comm, eq_comm] #align set.sigma_singleton Set.sigma_singleton /- warning: set.singleton_sigma_singleton -> Set.singleton_sigma_singleton is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {i : ι} {a : forall (i : ι), α i}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (Singleton.singleton.{u1, u1} ι (Set.{u1} ι) (Set.hasSingleton.{u1} ι) i) (fun (i : ι) => Singleton.singleton.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasSingleton.{u2} (α i)) (a i))) (Singleton.singleton.{max u1 u2, max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasSingleton.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i (a i))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {i : ι} {a : forall (i : ι), α i}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (Singleton.singleton.{u2, u2} ι (Set.{u2} ι) (Set.instSingletonSet.{u2} ι) i) (fun (i : ι) => Singleton.singleton.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instSingletonSet.{u1} (α i)) (a i))) (Singleton.singleton.{max u2 u1, max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instSingletonSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i (a i))) Case conversion may be inaccurate. Consider using '#align set.singleton_sigma_singleton Set.singleton_sigma_singletonₓ'. -/ theorem singleton_sigma_singleton {a : ∀ i, α i} : (({i} : Set ι).Sigma fun i => ({a i} : Set (α i))) = {⟨i, a i⟩} := by rw [sigma_singleton, image_singleton] #align set.singleton_sigma_singleton Set.singleton_sigma_singleton /- warning: set.union_sigma -> Set.union_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s₁ : Set.{u1} ι} {s₂ : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (Union.union.{u1} (Set.{u1} ι) (Set.hasUnion.{u1} ι) s₁ s₂) t) (Union.union.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasUnion.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₁ t) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₂ t)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s₁ : Set.{u2} ι} {s₂ : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (Union.union.{u2} (Set.{u2} ι) (Set.instUnionSet.{u2} ι) s₁ s₂) t) (Union.union.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instUnionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₁ t) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₂ t)) Case conversion may be inaccurate. Consider using '#align set.union_sigma Set.union_sigmaₓ'. -/ @[simp] theorem union_sigma : (s₁ ∪ s₂).Sigma t = s₁.Sigma t ∪ s₂.Sigma t := ext fun _ => or_and_right #align set.union_sigma Set.union_sigma /- warning: set.sigma_union -> Set.sigma_union is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t₁ : forall (i : ι), Set.{u2} (α i)} {t₂ : forall (i : ι), Set.{u2} (α i)}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s (fun (i : ι) => Union.union.{u2} (Set.{u2} (α i)) (Set.hasUnion.{u2} (α i)) (t₁ i) (t₂ i))) (Union.union.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasUnion.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t₁) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t₂)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t₁ : forall (i : ι), Set.{u1} (α i)} {t₂ : forall (i : ι), Set.{u1} (α i)}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s (fun (i : ι) => Union.union.{u1} (Set.{u1} (α i)) (Set.instUnionSet.{u1} (α i)) (t₁ i) (t₂ i))) (Union.union.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instUnionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t₁) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t₂)) Case conversion may be inaccurate. Consider using '#align set.sigma_union Set.sigma_unionₓ'. -/ @[simp] theorem sigma_union : (s.Sigma fun i => t₁ i ∪ t₂ i) = s.Sigma t₁ ∪ s.Sigma t₂ := ext fun _ => and_or_left #align set.sigma_union Set.sigma_union /- warning: set.sigma_inter_sigma -> Set.sigma_inter_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s₁ : Set.{u1} ι} {s₂ : Set.{u1} ι} {t₁ : forall (i : ι), Set.{u2} (α i)} {t₂ : forall (i : ι), Set.{u2} (α i)}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Inter.inter.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasInter.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₁ t₁) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₂ t₂)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (Inter.inter.{u1} (Set.{u1} ι) (Set.hasInter.{u1} ι) s₁ s₂) (fun (i : ι) => Inter.inter.{u2} (Set.{u2} (α i)) (Set.hasInter.{u2} (α i)) (t₁ i) (t₂ i))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s₁ : Set.{u2} ι} {s₂ : Set.{u2} ι} {t₁ : forall (i : ι), Set.{u1} (α i)} {t₂ : forall (i : ι), Set.{u1} (α i)}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Inter.inter.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instInterSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₁ t₁) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₂ t₂)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (Inter.inter.{u2} (Set.{u2} ι) (Set.instInterSet.{u2} ι) s₁ s₂) (fun (i : ι) => Inter.inter.{u1} (Set.{u1} (α i)) (Set.instInterSet.{u1} (α i)) (t₁ i) (t₂ i))) Case conversion may be inaccurate. Consider using '#align set.sigma_inter_sigma Set.sigma_inter_sigmaₓ'. -/ theorem sigma_inter_sigma : s₁.Sigma t₁ ∩ s₂.Sigma t₂ = (s₁ ∩ s₂).Sigma fun i => t₁ i ∩ t₂ i := by ext ⟨x, y⟩ simp [and_assoc', and_left_comm] #align set.sigma_inter_sigma Set.sigma_inter_sigma /- warning: set.insert_sigma -> Set.insert_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (Insert.insert.{u1, u1} ι (Set.{u1} ι) (Set.hasInsert.{u1} ι) i s) t) (Union.union.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasUnion.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.image.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i) (t i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι} {a : α i}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (Insert.insert.{u2, u2} ι (Set.{u2} ι) (Set.instInsertSet.{u2} ι) i s) t) (Union.union.{max u2 u1} (Set.{max u2 u1} (Sigma.{u2, u1} ι α)) (Set.instUnionSet.{max u2 u1} (Sigma.{u2, u1} ι α)) (Set.image.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι α) (Sigma.mk.{u2, u1} ι α i) (t i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.insert_sigma Set.insert_sigmaₓ'. -/ theorem insert_sigma : (insert i s).Sigma t = Sigma.mk i '' t i ∪ s.Sigma t := by rw [insert_eq, union_sigma, singleton_sigma] #align set.insert_sigma Set.insert_sigma /- warning: set.sigma_insert -> Set.sigma_insert is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {a : forall (i : ι), α i}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s (fun (i : ι) => Insert.insert.{u2, u2} (α i) (Set.{u2} (α i)) (Set.hasInsert.{u2} (α i)) (a i) (t i))) (Union.union.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasUnion.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.image.{u1, max u1 u2} ι (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (fun (i : ι) => Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i (a i)) s) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {a : forall (i : ι), α i}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s (fun (i : ι) => Insert.insert.{u1, u1} (α i) (Set.{u1} (α i)) (Set.instInsertSet.{u1} (α i)) (a i) (t i))) (Union.union.{max u2 u1} (Set.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instUnionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.image.{u2, max u2 u1} ι (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (fun (i : ι) => Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i (a i)) s) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.sigma_insert Set.sigma_insertₓ'. -/ theorem sigma_insert {a : ∀ i, α i} : (s.Sigma fun i => insert (a i) (t i)) = (fun i => ⟨i, a i⟩) '' s ∪ s.Sigma t := by simp_rw [insert_eq, sigma_union, sigma_singleton] #align set.sigma_insert Set.sigma_insert /- warning: set.sigma_preimage_eq -> Set.sigma_preimage_eq is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {ι' : Type.{u2}} {α : ι -> Type.{u3}} {β : ι -> Type.{u4}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u3} (α i)} {f : ι' -> ι} {g : forall (i : ι), (β i) -> (α i)}, Eq.{succ (max u2 u4)} (Set.{max u2 u4} (Sigma.{u2, u4} ι' (fun (i : ι') => β (f i)))) (Set.Sigma.{u2, u4} ι' (fun (i : ι') => β (f i)) (Set.preimage.{u2, u1} ι' ι f s) (fun (i : ι') => Set.preimage.{u4, u3} (β (f i)) (α (f i)) (g (f i)) (t (f i)))) (Set.preimage.{max u2 u4, max u1 u3} (Sigma.{u2, u4} ι' (fun (i : ι') => β (f i))) (Sigma.{u1, u3} ι α) (fun (p : Sigma.{u2, u4} ι' (fun (i : ι') => β (f i))) => Sigma.mk.{u1, u3} ι α (f (Sigma.fst.{u2, u4} ι' (fun (i : ι') => β (f i)) p)) (g (f (Sigma.fst.{u2, u4} ι' (fun (i : ι') => β (f i)) p)) (Sigma.snd.{u2, u4} ι' (fun (i : ι') => β (f i)) p))) (Set.Sigma.{u1, u3} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u2}} {ι' : Type.{u4}} {α : ι -> Type.{u1}} {β : ι -> Type.{u3}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {f : ι' -> ι} {g : forall (i : ι), (β i) -> (α i)}, Eq.{max (succ u4) (succ u3)} (Set.{max u3 u4} (Sigma.{u4, u3} ι' (fun (i : ι') => β (f i)))) (Set.Sigma.{u4, u3} ι' (fun (i : ι') => β (f i)) (Set.preimage.{u4, u2} ι' ι f s) (fun (i : ι') => Set.preimage.{u3, u1} (β (f i)) (α (f i)) (g (f i)) (t (f i)))) (Set.preimage.{max u4 u3, max u1 u2} (Sigma.{u4, u3} ι' (fun (i : ι') => β (f i))) (Sigma.{u2, u1} ι α) (fun (p : Sigma.{u4, u3} ι' (fun (i : ι') => β (f i))) => Sigma.mk.{u2, u1} ι α (f (Sigma.fst.{u4, u3} ι' (fun (i : ι') => β (f i)) p)) (g (f (Sigma.fst.{u4, u3} ι' (fun (i : ι') => β (f i)) p)) (Sigma.snd.{u4, u3} ι' (fun (i : ι') => β (f i)) p))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.sigma_preimage_eq Set.sigma_preimage_eqₓ'. -/ theorem sigma_preimage_eq {f : ι' → ι} {g : ∀ i, β i → α i} : ((f ⁻¹' s).Sigma fun i => g (f i) ⁻¹' t (f i)) = (fun p : Σi, β (f i) => Sigma.mk _ (g _ p.2)) ⁻¹' s.Sigma t := rfl #align set.sigma_preimage_eq Set.sigma_preimage_eq /- warning: set.sigma_preimage_left -> Set.sigma_preimage_left is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {ι' : Type.{u2}} {α : ι -> Type.{u3}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u3} (α i)} {f : ι' -> ι}, Eq.{succ (max u2 u3)} (Set.{max u2 u3} (Sigma.{u2, u3} ι' (fun (i : ι') => α (f i)))) (Set.Sigma.{u2, u3} ι' (fun (i : ι') => α (f i)) (Set.preimage.{u2, u1} ι' ι f s) (fun (i : ι') => t (f i))) (Set.preimage.{max u2 u3, max u1 u3} (Sigma.{u2, u3} ι' (fun (i : ι') => α (f i))) (Sigma.{u1, u3} ι α) (fun (p : Sigma.{u2, u3} ι' (fun (i : ι') => α (f i))) => Sigma.mk.{u1, u3} ι α (f (Sigma.fst.{u2, u3} ι' (fun (i : ι') => α (f i)) p)) (Sigma.snd.{u2, u3} ι' (fun (i : ι') => α (f i)) p)) (Set.Sigma.{u1, u3} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u1}} {ι' : Type.{u3}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {f : ι' -> ι}, Eq.{max (succ u3) (succ u2)} (Set.{max u2 u3} (Sigma.{u3, u2} ι' (fun (i : ι') => α (f i)))) (Set.Sigma.{u3, u2} ι' (fun (i : ι') => α (f i)) (Set.preimage.{u3, u1} ι' ι f s) (fun (i : ι') => t (f i))) (Set.preimage.{max u3 u2, max u2 u1} (Sigma.{u3, u2} ι' (fun (i : ι') => α (f i))) (Sigma.{u1, u2} ι α) (fun (p : Sigma.{u3, u2} ι' (fun (i : ι') => α (f i))) => Sigma.mk.{u1, u2} ι α (f (Sigma.fst.{u3, u2} ι' (fun (i : ι') => α (f i)) p)) (Sigma.snd.{u3, u2} ι' (fun (i : ι') => α (f i)) p)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.sigma_preimage_left Set.sigma_preimage_leftₓ'. -/ theorem sigma_preimage_left {f : ι' → ι} : ((f ⁻¹' s).Sigma fun i => t (f i)) = (fun p : Σi, α (f i) => Sigma.mk _ p.2) ⁻¹' s.Sigma t := rfl #align set.sigma_preimage_left Set.sigma_preimage_left /- warning: set.sigma_preimage_right -> Set.sigma_preimage_right is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {β : ι -> Type.{u3}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {g : forall (i : ι), (β i) -> (α i)}, Eq.{succ (max u1 u3)} (Set.{max u1 u3} (Sigma.{u1, u3} ι (fun (i : ι) => β i))) (Set.Sigma.{u1, u3} ι (fun (i : ι) => β i) s (fun (i : ι) => Set.preimage.{u3, u2} (β i) (α i) (g i) (t i))) (Set.preimage.{max u1 u3, max u1 u2} (Sigma.{u1, u3} ι (fun (i : ι) => β i)) (Sigma.{u1, u2} ι α) (fun (p : Sigma.{u1, u3} ι (fun (i : ι) => β i)) => Sigma.mk.{u1, u2} ι α (Sigma.fst.{u1, u3} ι (fun (i : ι) => β i) p) (g (Sigma.fst.{u1, u3} ι (fun (i : ι) => β i) p) (Sigma.snd.{u1, u3} ι (fun (i : ι) => β i) p))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u3}} {α : ι -> Type.{u1}} {β : ι -> Type.{u2}} {s : Set.{u3} ι} {t : forall (i : ι), Set.{u1} (α i)} {g : forall (i : ι), (β i) -> (α i)}, Eq.{max (succ u3) (succ u2)} (Set.{max u2 u3} (Sigma.{u3, u2} ι (fun (i : ι) => β i))) (Set.Sigma.{u3, u2} ι (fun (i : ι) => β i) s (fun (i : ι) => Set.preimage.{u2, u1} (β i) (α i) (g i) (t i))) (Set.preimage.{max u3 u2, max u1 u3} (Sigma.{u3, u2} ι (fun (i : ι) => β i)) (Sigma.{u3, u1} ι α) (fun (p : Sigma.{u3, u2} ι (fun (i : ι) => β i)) => Sigma.mk.{u3, u1} ι α (Sigma.fst.{u3, u2} ι (fun (i : ι) => β i) p) (g (Sigma.fst.{u3, u2} ι (fun (i : ι) => β i) p) (Sigma.snd.{u3, u2} ι (fun (i : ι) => β i) p))) (Set.Sigma.{u3, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.sigma_preimage_right Set.sigma_preimage_rightₓ'. -/ theorem sigma_preimage_right {g : ∀ i, β i → α i} : (s.Sigma fun i => g i ⁻¹' t i) = (fun p : Σi, β i => Sigma.mk p.1 (g _ p.2)) ⁻¹' s.Sigma t := rfl #align set.sigma_preimage_right Set.sigma_preimage_right /- warning: set.preimage_sigma_map_sigma -> Set.preimage_sigmaMap_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {ι' : Type.{u2}} {α : ι -> Type.{u3}} {α' : ι' -> Type.{u4}} (f : ι -> ι') (g : forall (i : ι), (α i) -> (α' (f i))) (s : Set.{u2} ι') (t : forall (i : ι'), Set.{u4} (α' i)), Eq.{succ (max u1 u3)} (Set.{max u1 u3} (Sigma.{u1, u3} ι (fun (i : ι) => α i))) (Set.preimage.{max u1 u3, max u2 u4} (Sigma.{u1, u3} ι (fun (i : ι) => α i)) (Sigma.{u2, u4} ι' α') (Sigma.map.{u1, u2, u3, u4} ι ι' (fun (i : ι) => α i) α' f g) (Set.Sigma.{u2, u4} ι' (fun (i : ι') => α' i) s t)) (Set.Sigma.{u1, u3} ι (fun (i : ι) => α i) (Set.preimage.{u1, u2} ι ι' f s) (fun (i : ι) => Set.preimage.{u3, u4} (α i) (α' (f i)) (g i) (t (f i)))) but is expected to have type forall {ι : Type.{u2}} {ι' : Type.{u3}} {α : ι -> Type.{u1}} {α' : ι' -> Type.{u4}} (f : ι -> ι') (g : forall (i : ι), (α i) -> (α' (f i))) (s : Set.{u3} ι') (t : forall (i : ι'), Set.{u4} (α' i)), Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.preimage.{max u1 u2, max u4 u3} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Sigma.{u3, u4} ι' α') (Sigma.map.{u2, u3, u1, u4} ι ι' (fun (i : ι) => α i) α' f g) (Set.Sigma.{u3, u4} ι' (fun (i : ι') => α' i) s t)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (Set.preimage.{u2, u3} ι ι' f s) (fun (i : ι) => Set.preimage.{u1, u4} (α i) (α' (f i)) (g i) (t (f i)))) Case conversion may be inaccurate. Consider using '#align set.preimage_sigma_map_sigma Set.preimage_sigmaMap_sigmaₓ'. -/ theorem preimage_sigmaMap_sigma {α' : ι' → Type _} (f : ι → ι') (g : ∀ i, α i → α' (f i)) (s : Set ι') (t : ∀ i, Set (α' i)) : Sigma.map f g ⁻¹' s.Sigma t = (f ⁻¹' s).Sigma fun i => g i ⁻¹' t (f i) := rfl #align set.preimage_sigma_map_sigma Set.preimage_sigmaMap_sigma /- warning: set.mk_preimage_sigma -> Set.mk_preimage_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι}, (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) -> (Eq.{succ u2} (Set.{u2} (α i)) (Set.preimage.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (t i)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι}, (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) -> (Eq.{succ u1} (Set.{u1} (α i)) (Set.preimage.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (t i)) Case conversion may be inaccurate. Consider using '#align set.mk_preimage_sigma Set.mk_preimage_sigmaₓ'. -/ @[simp] theorem mk_preimage_sigma (hi : i ∈ s) : Sigma.mk i ⁻¹' s.Sigma t = t i := ext fun _ => and_iff_right hi #align set.mk_preimage_sigma Set.mk_preimage_sigma /- warning: set.mk_preimage_sigma_eq_empty -> Set.mk_preimage_sigma_eq_empty is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι}, (Not (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s)) -> (Eq.{succ u2} (Set.{u2} (α i)) (Set.preimage.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (EmptyCollection.emptyCollection.{u2} (Set.{u2} (α i)) (Set.hasEmptyc.{u2} (α i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι}, (Not (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s)) -> (Eq.{succ u1} (Set.{u1} (α i)) (Set.preimage.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (EmptyCollection.emptyCollection.{u1} (Set.{u1} (α i)) (Set.instEmptyCollectionSet.{u1} (α i)))) Case conversion may be inaccurate. Consider using '#align set.mk_preimage_sigma_eq_empty Set.mk_preimage_sigma_eq_emptyₓ'. -/ @[simp] theorem mk_preimage_sigma_eq_empty (hi : i ∉ s) : Sigma.mk i ⁻¹' s.Sigma t = ∅ := ext fun _ => iff_of_false (hi ∘ And.left) id #align set.mk_preimage_sigma_eq_empty Set.mk_preimage_sigma_eq_empty /- warning: set.mk_preimage_sigma_eq_if -> Set.mk_preimage_sigma_eq_if is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι} [_inst_1 : DecidablePred.{succ u1} ι (fun (_x : ι) => Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) _x s)], Eq.{succ u2} (Set.{u2} (α i)) (Set.preimage.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Sigma.mk.{u1, u2} ι (fun (i : ι) => α i) i) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (ite.{succ u2} (Set.{u2} (α i)) (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) (_inst_1 i) (t i) (EmptyCollection.emptyCollection.{u2} (Set.{u2} (α i)) (Set.hasEmptyc.{u2} (α i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι} [_inst_1 : DecidablePred.{succ u2} ι (fun (_x : ι) => Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) _x s)], Eq.{succ u1} (Set.{u1} (α i)) (Set.preimage.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Sigma.mk.{u2, u1} ι (fun (i : ι) => α i) i) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (ite.{succ u1} (Set.{u1} (α i)) (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) (_inst_1 i) (t i) (EmptyCollection.emptyCollection.{u1} (Set.{u1} (α i)) (Set.instEmptyCollectionSet.{u1} (α i)))) Case conversion may be inaccurate. Consider using '#align set.mk_preimage_sigma_eq_if Set.mk_preimage_sigma_eq_ifₓ'. -/ theorem mk_preimage_sigma_eq_if [DecidablePred (· ∈ s)] : Sigma.mk i ⁻¹' s.Sigma t = if i ∈ s then t i else ∅ := by split_ifs <;> simp [h] #align set.mk_preimage_sigma_eq_if Set.mk_preimage_sigma_eq_if /- warning: set.mk_preimage_sigma_fn_eq_if -> Set.mk_preimage_sigma_fn_eq_if is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι} {β : Type.{u3}} [_inst_1 : DecidablePred.{succ u1} ι (fun (_x : ι) => Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) _x s)] (g : β -> (α i)), Eq.{succ u3} (Set.{u3} β) (Set.preimage.{u3, max u1 u2} β (Sigma.{u1, u2} ι (fun {i : ι} => α i)) (fun (b : β) => Sigma.mk.{u1, u2} ι (fun {i : ι} => α i) i (g b)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (ite.{succ u3} (Set.{u3} β) (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) (_inst_1 i) (Set.preimage.{u3, u2} β (α i) g (t i)) (EmptyCollection.emptyCollection.{u3} (Set.{u3} β) (Set.hasEmptyc.{u3} β))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι} {β : Type.{u3}} [_inst_1 : DecidablePred.{succ u2} ι (fun (_x : ι) => Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) _x s)] (g : β -> (α i)), Eq.{succ u3} (Set.{u3} β) (Set.preimage.{u3, max u1 u2} β (Sigma.{u2, u1} ι α) (fun (b : β) => Sigma.mk.{u2, u1} ι α i (g b)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (ite.{succ u3} (Set.{u3} β) (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) (_inst_1 i) (Set.preimage.{u3, u1} β (α i) g (t i)) (EmptyCollection.emptyCollection.{u3} (Set.{u3} β) (Set.instEmptyCollectionSet.{u3} β))) Case conversion may be inaccurate. Consider using '#align set.mk_preimage_sigma_fn_eq_if Set.mk_preimage_sigma_fn_eq_ifₓ'. -/ theorem mk_preimage_sigma_fn_eq_if {β : Type _} [DecidablePred (· ∈ s)] (g : β → α i) : (fun b => Sigma.mk i (g b)) ⁻¹' s.Sigma t = if i ∈ s then g ⁻¹' t i else ∅ := ext fun _ => by split_ifs <;> simp [h] #align set.mk_preimage_sigma_fn_eq_if Set.mk_preimage_sigma_fn_eq_if /- warning: set.sigma_univ_range_eq -> Set.sigma_univ_range_eq is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {β : ι -> Type.{u3}} {f : forall (i : ι), (α i) -> (β i)}, Eq.{succ (max u1 u3)} (Set.{max u1 u3} (Sigma.{u1, u3} ι (fun (i : ι) => β i))) (Set.Sigma.{u1, u3} ι (fun (i : ι) => β i) (Set.univ.{u1} ι) (fun (i : ι) => Set.range.{u3, succ u2} (β i) (α i) (f i))) (Set.range.{max u1 u3, max (succ u1) (succ u2)} (Sigma.{u1, u3} ι (fun (i : ι) => β i)) (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (fun (x : Sigma.{u1, u2} ι (fun (i : ι) => α i)) => Sigma.mk.{u1, u3} ι (fun (i : ι) => β i) (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x) (f (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i) x) (Sigma.snd.{u1, u2} ι (fun (i : ι) => α i) x)))) but is expected to have type forall {ι : Type.{u3}} {α : ι -> Type.{u1}} {β : ι -> Type.{u2}} {f : forall (i : ι), (α i) -> (β i)}, Eq.{max (succ u3) (succ u2)} (Set.{max u2 u3} (Sigma.{u3, u2} ι (fun (i : ι) => β i))) (Set.Sigma.{u3, u2} ι (fun (i : ι) => β i) (Set.univ.{u3} ι) (fun (i : ι) => Set.range.{u2, succ u1} (β i) (α i) (f i))) (Set.range.{max u3 u2, max (succ u3) (succ u1)} (Sigma.{u3, u2} ι (fun (i : ι) => β i)) (Sigma.{u3, u1} ι (fun (i : ι) => α i)) (fun (x : Sigma.{u3, u1} ι (fun (i : ι) => α i)) => Sigma.mk.{u3, u2} ι (fun (i : ι) => β i) (Sigma.fst.{u3, u1} ι (fun (i : ι) => α i) x) (f (Sigma.fst.{u3, u1} ι (fun (i : ι) => α i) x) (Sigma.snd.{u3, u1} ι (fun (i : ι) => α i) x)))) Case conversion may be inaccurate. Consider using '#align set.sigma_univ_range_eq Set.sigma_univ_range_eqₓ'. -/ theorem sigma_univ_range_eq {f : ∀ i, α i → β i} : ((univ : Set ι).Sigma fun i => range (f i)) = range fun x : Σi, α i => ⟨x.1, f _ x.2⟩ := ext <| by simp [range] #align set.sigma_univ_range_eq Set.sigma_univ_range_eq /- warning: set.nonempty.sigma -> Set.Nonempty.sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)}, (Set.Nonempty.{u1} ι s) -> (forall (i : ι), Set.Nonempty.{u2} (α i) (t i)) -> (Set.Nonempty.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)}, (Set.Nonempty.{u2} ι s) -> (forall (i : ι), Set.Nonempty.{u1} (α i) (t i)) -> (Set.Nonempty.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.nonempty.sigma Set.Nonempty.sigmaₓ'. -/ protected theorem Nonempty.sigma : s.Nonempty → (∀ i, (t i).Nonempty) → (s.Sigma t : Set _).Nonempty := fun ⟨i, hi⟩ h => let ⟨a, ha⟩ := h i ⟨⟨i, a⟩, hi, ha⟩ #align set.nonempty.sigma Set.Nonempty.sigma /- warning: set.nonempty.sigma_fst -> Set.Nonempty.sigma_fst is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)}, (Set.Nonempty.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) -> (Set.Nonempty.{u1} ι s) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)}, (Set.Nonempty.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) -> (Set.Nonempty.{u2} ι s) Case conversion may be inaccurate. Consider using '#align set.nonempty.sigma_fst Set.Nonempty.sigma_fstₓ'. -/ theorem Nonempty.sigma_fst : (s.Sigma t : Set _).Nonempty → s.Nonempty := fun ⟨x, hx⟩ => ⟨x.1, hx.1⟩ #align set.nonempty.sigma_fst Set.Nonempty.sigma_fst /- warning: set.nonempty.sigma_snd -> Set.Nonempty.sigma_snd is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)}, (Set.Nonempty.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) -> (Exists.{succ u1} ι (fun (i : ι) => Exists.{0} (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) (fun (H : Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) => Set.Nonempty.{u2} (α i) (t i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)}, (Set.Nonempty.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) -> (Exists.{succ u2} ι (fun (i : ι) => And (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) (Set.Nonempty.{u1} (α i) (t i)))) Case conversion may be inaccurate. Consider using '#align set.nonempty.sigma_snd Set.Nonempty.sigma_sndₓ'. -/ theorem Nonempty.sigma_snd : (s.Sigma t : Set _).Nonempty → ∃ i ∈ s, (t i).Nonempty := fun ⟨x, hx⟩ => ⟨x.1, hx.1, x.2, hx.2⟩ #align set.nonempty.sigma_snd Set.Nonempty.sigma_snd /- warning: set.sigma_nonempty_iff -> Set.sigma_nonempty_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)}, Iff (Set.Nonempty.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) (Exists.{succ u1} ι (fun (i : ι) => Exists.{0} (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) (fun (H : Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) => Set.Nonempty.{u2} (α i) (t i)))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)}, Iff (Set.Nonempty.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) (Exists.{succ u2} ι (fun (i : ι) => And (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) (Set.Nonempty.{u1} (α i) (t i)))) Case conversion may be inaccurate. Consider using '#align set.sigma_nonempty_iff Set.sigma_nonempty_iffₓ'. -/ theorem sigma_nonempty_iff : (s.Sigma t : Set _).Nonempty ↔ ∃ i ∈ s, (t i).Nonempty := ⟨Nonempty.sigma_snd, fun ⟨i, hi, a, ha⟩ => ⟨⟨i, a⟩, hi, ha⟩⟩ #align set.sigma_nonempty_iff Set.sigma_nonempty_iff /- warning: set.sigma_eq_empty_iff -> Set.sigma_eq_empty_iff is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)}, Iff (Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t) (EmptyCollection.emptyCollection.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasEmptyc.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))))) (forall (i : ι), (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) -> (Eq.{succ u2} (Set.{u2} (α i)) (t i) (EmptyCollection.emptyCollection.{u2} (Set.{u2} (α i)) (Set.hasEmptyc.{u2} (α i))))) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)}, Iff (Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t) (EmptyCollection.emptyCollection.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instEmptyCollectionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))))) (forall (i : ι), (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) -> (Eq.{succ u1} (Set.{u1} (α i)) (t i) (EmptyCollection.emptyCollection.{u1} (Set.{u1} (α i)) (Set.instEmptyCollectionSet.{u1} (α i))))) Case conversion may be inaccurate. Consider using '#align set.sigma_eq_empty_iff Set.sigma_eq_empty_iffₓ'. -/ theorem sigma_eq_empty_iff : s.Sigma t = ∅ ↔ ∀ i ∈ s, t i = ∅ := not_nonempty_iff_eq_empty.symm.trans <| sigma_nonempty_iff.Not.trans <| by simp only [not_nonempty_iff_eq_empty, not_exists] #align set.sigma_eq_empty_iff Set.sigma_eq_empty_iff #print Set.image_sigmaMk_subset_sigma_left /- theorem image_sigmaMk_subset_sigma_left {a : ∀ i, α i} (ha : ∀ i, a i ∈ t i) : (fun i => Sigma.mk i (a i)) '' s ⊆ s.Sigma t := image_subset_iff.2 fun i hi => ⟨hi, ha _⟩ #align set.image_sigma_mk_subset_sigma_left Set.image_sigmaMk_subset_sigma_left -/ /- warning: set.image_sigma_mk_subset_sigma_right -> Set.image_sigmaMk_subset_sigma_right is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s : Set.{u1} ι} {t : forall (i : ι), Set.{u2} (α i)} {i : ι}, (Membership.Mem.{u1, u1} ι (Set.{u1} ι) (Set.hasMem.{u1} ι) i s) -> (HasSubset.Subset.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun {i : ι} => α i))) (Set.hasSubset.{max u1 u2} (Sigma.{u1, u2} ι (fun {i : ι} => α i))) (Set.image.{u2, max u1 u2} (α i) (Sigma.{u1, u2} ι (fun {i : ι} => α i)) (Sigma.mk.{u1, u2} ι (fun {i : ι} => α i) i) (t i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s : Set.{u2} ι} {t : forall (i : ι), Set.{u1} (α i)} {i : ι}, (Membership.mem.{u2, u2} ι (Set.{u2} ι) (Set.instMembershipSet.{u2} ι) i s) -> (HasSubset.Subset.{max u2 u1} (Set.{max u2 u1} (Sigma.{u2, u1} ι α)) (Set.instHasSubsetSet.{max u2 u1} (Sigma.{u2, u1} ι α)) (Set.image.{u1, max u2 u1} (α i) (Sigma.{u2, u1} ι α) (Sigma.mk.{u2, u1} ι α i) (t i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) Case conversion may be inaccurate. Consider using '#align set.image_sigma_mk_subset_sigma_right Set.image_sigmaMk_subset_sigma_rightₓ'. -/ theorem image_sigmaMk_subset_sigma_right (hi : i ∈ s) : Sigma.mk i '' t i ⊆ s.Sigma t := image_subset_iff.2 fun a => And.intro hi #align set.image_sigma_mk_subset_sigma_right Set.image_sigmaMk_subset_sigma_right /- warning: set.sigma_subset_preimage_fst -> Set.sigma_subset_preimage_fst is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} (s : Set.{u1} ι) (t : forall (i : ι), Set.{u2} (α i)), HasSubset.Subset.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasSubset.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t) (Set.preimage.{max u1 u2, u1} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i)) s) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} (s : Set.{u2} ι) (t : forall (i : ι), Set.{u1} (α i)), HasSubset.Subset.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instHasSubsetSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t) (Set.preimage.{max u2 u1, u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i)) s) Case conversion may be inaccurate. Consider using '#align set.sigma_subset_preimage_fst Set.sigma_subset_preimage_fstₓ'. -/ theorem sigma_subset_preimage_fst (s : Set ι) (t : ∀ i, Set (α i)) : s.Sigma t ⊆ Sigma.fst ⁻¹' s := fun a => And.left #align set.sigma_subset_preimage_fst Set.sigma_subset_preimage_fst /- warning: set.fst_image_sigma_subset -> Set.fst_image_sigma_subset is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} (s : Set.{u1} ι) (t : forall (i : ι), Set.{u2} (α i)), HasSubset.Subset.{u1} (Set.{u1} ι) (Set.hasSubset.{u1} ι) (Set.image.{max u1 u2, u1} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) s but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} (s : Set.{u2} ι) (t : forall (i : ι), Set.{u1} (α i)), HasSubset.Subset.{u2} (Set.{u2} ι) (Set.instHasSubsetSet.{u2} ι) (Set.image.{max u1 u2, u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) s Case conversion may be inaccurate. Consider using '#align set.fst_image_sigma_subset Set.fst_image_sigma_subsetₓ'. -/ theorem fst_image_sigma_subset (s : Set ι) (t : ∀ i, Set (α i)) : Sigma.fst '' s.Sigma t ⊆ s := image_subset_iff.2 fun a => And.left #align set.fst_image_sigma_subset Set.fst_image_sigma_subset /- warning: set.fst_image_sigma -> Set.fst_image_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {t : forall (i : ι), Set.{u2} (α i)} (s : Set.{u1} ι), (forall (i : ι), Set.Nonempty.{u2} (α i) (t i)) -> (Eq.{succ u1} (Set.{u1} ι) (Set.image.{max u1 u2, u1} (Sigma.{u1, u2} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u1, u2} ι (fun (i : ι) => α i)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s t)) s) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {t : forall (i : ι), Set.{u1} (α i)} (s : Set.{u2} ι), (forall (i : ι), Set.Nonempty.{u1} (α i) (t i)) -> (Eq.{succ u2} (Set.{u2} ι) (Set.image.{max u1 u2, u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i)) ι (Sigma.fst.{u2, u1} ι (fun (i : ι) => α i)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s t)) s) Case conversion may be inaccurate. Consider using '#align set.fst_image_sigma Set.fst_image_sigmaₓ'. -/ theorem fst_image_sigma (s : Set ι) (ht : ∀ i, (t i).Nonempty) : Sigma.fst '' s.Sigma t = s := (fst_image_sigma_subset _ _).antisymm fun i hi => let ⟨a, ha⟩ := ht i ⟨⟨i, a⟩, ⟨hi, ha⟩, rfl⟩ #align set.fst_image_sigma Set.fst_image_sigma /- warning: set.sigma_diff_sigma -> Set.sigma_diff_sigma is a dubious translation: lean 3 declaration is forall {ι : Type.{u1}} {α : ι -> Type.{u2}} {s₁ : Set.{u1} ι} {s₂ : Set.{u1} ι} {t₁ : forall (i : ι), Set.{u2} (α i)} {t₂ : forall (i : ι), Set.{u2} (α i)}, Eq.{succ (max u1 u2)} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (SDiff.sdiff.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (BooleanAlgebra.toHasSdiff.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.booleanAlgebra.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i)))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₁ t₁) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₂ t₂)) (Union.union.{max u1 u2} (Set.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.hasUnion.{max u1 u2} (Sigma.{u1, u2} ι (fun (i : ι) => α i))) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) s₁ (SDiff.sdiff.{max u1 u2} (forall (i : ι), Set.{u2} (α i)) (Pi.sdiff.{u1, u2} ι (fun (i : ι) => Set.{u2} (α i)) (fun (i : ι) => BooleanAlgebra.toHasSdiff.{u2} (Set.{u2} (α i)) (Set.booleanAlgebra.{u2} (α i)))) t₁ t₂)) (Set.Sigma.{u1, u2} ι (fun (i : ι) => α i) (SDiff.sdiff.{u1} (Set.{u1} ι) (BooleanAlgebra.toHasSdiff.{u1} (Set.{u1} ι) (Set.booleanAlgebra.{u1} ι)) s₁ s₂) t₁)) but is expected to have type forall {ι : Type.{u2}} {α : ι -> Type.{u1}} {s₁ : Set.{u2} ι} {s₂ : Set.{u2} ι} {t₁ : forall (i : ι), Set.{u1} (α i)} {t₂ : forall (i : ι), Set.{u1} (α i)}, Eq.{max (succ u2) (succ u1)} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (SDiff.sdiff.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instSDiffSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₁ t₁) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₂ t₂)) (Union.union.{max u2 u1} (Set.{max u1 u2} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.instUnionSet.{max u2 u1} (Sigma.{u2, u1} ι (fun (i : ι) => α i))) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) s₁ (SDiff.sdiff.{max u2 u1} (forall (i : ι), Set.{u1} (α i)) (Pi.sdiff.{u2, u1} ι (fun (i : ι) => Set.{u1} (α i)) (fun (i : ι) => Set.instSDiffSet.{u1} (α i))) t₁ t₂)) (Set.Sigma.{u2, u1} ι (fun (i : ι) => α i) (SDiff.sdiff.{u2} (Set.{u2} ι) (Set.instSDiffSet.{u2} ι) s₁ s₂) t₁)) Case conversion may be inaccurate. Consider using '#align set.sigma_diff_sigma Set.sigma_diff_sigmaₓ'. -/ theorem sigma_diff_sigma : s₁.Sigma t₁ \ s₂.Sigma t₂ = s₁.Sigma (t₁ \ t₂) ∪ (s₁ \ s₂).Sigma t₁ := ext fun x => by by_cases h₁ : x.1 ∈ s₁ <;> by_cases h₂ : x.2 ∈ t₁ x.1 <;> simp [*, ← imp_iff_or_not] #align set.sigma_diff_sigma Set.sigma_diff_sigma end Set
variables A B C D P Q R: Prop example : A ∧ (A → B) → B := assume ⟨ hA, hAimpB ⟩, hAimpB hA example : A → ¬ (¬ A ∧ B) := assume : A, show ¬ (¬ A ∧ B), from assume ⟨ hnotA, hB ⟩, show false, from hnotA ‹A› example : ¬ (A ∧ B) → (A → ¬ B) := assume : ¬ (A ∧ B), show (A → ¬ B), from assume : A, show ¬ B, from assume : B, show false, from ‹¬ (A ∧ B)› ⟨ ‹A› , ‹B› ⟩ example (h₁ : A ∨ B) (h₂ : A → C) (h₃ : B → D) : C ∨ D := show C ∨ D, from or.elim h₁ (assume : A, show C ∨ D, from or.inl (h₂ ‹A›)) (assume : B, show C ∨ D, from or.inr (h₃ ‹B›)) example : ¬ (A ↔ ¬ A) := assume : (A ↔ ¬ A), show false, from have ¬ A, from assume : A, show false, from have ¬ A, from iff.elim_left ‹A ↔ ¬ A› ‹A›, ‹¬ A› ‹A›, have A, from iff.elim_right ‹A ↔ ¬ A› ‹¬ A›, ‹¬ A› ‹A› open classical ------------------------------------------------------------ example (h : ¬ A ∧ ¬ B) : ¬ (A ∨ B) := have ¬ A, from h.left, have ¬ B, from h.right, show ¬ (A ∨ B), from assume : (A ∨ B), show false, from or.elim ‹A ∨ B› (assume : A, (‹¬ A› ‹A›)) (assume : B, (‹¬ B› ‹B›)) example (h: ¬ (A ∨ B)) : (¬ A ∧ ¬ B) := have ¬ A, from assume : A, show false, from have (A ∨ B), from or.inl ‹A›, ‹¬ (A ∨ B)› ‹A ∨ B›, have ¬ B, from assume : B, show false, from have (A ∨ B), from or.inr ‹B›, ‹¬ (A ∨ B)› ‹A ∨ B›, ⟨ ‹¬ A› , ‹¬ B› ⟩ ------------------------------------------------------------ example (h: ¬ A ∨ ¬ B) : ¬ (A ∧ B) := or.elim h (assume : ¬ A, show ¬ (A ∧ B), from assume h1: (A ∧ B), ‹¬ A› h1.left) (assume : ¬ B, show ¬ (A ∧ B), from assume h1: (A ∧ B), ‹¬ B› h1.right) example (h: ¬ (A ∧ B)) : ¬ A ∨ ¬ B := by_contradiction( assume h1: ¬ (¬ A ∨ ¬ B), have A, from by_contradiction(assume : ¬ A, have h2: ¬ A ∨ ¬ B, from or.inl ‹¬ A›, h1 h2), have B, from by_contradiction(assume : ¬ B, have h2: ¬ A ∨ ¬ B, from or.inr ‹¬ B›, h1 h2), h ⟨ ‹A›, ‹B› ⟩ ) ------------------------------------------------------------ -- Also known as em A example : A ∨ ¬ A := by_contradiction( assume h: ¬ (A ∨ ¬ A), have ¬ A, from assume : A, show false, from have (A ∨ ¬ A), from or.inl ‹A›, h ‹A ∨ ¬ A›, have ¬ ¬ A, from assume : ¬ A, show false, from have (A ∨ ¬ A), from or.inr ‹¬ A›, h ‹A ∨ ¬ A›, ‹¬ ¬ A› ‹¬ A› ) example (h: ¬ ¬ A) : A := by_contradiction(assume h1 : ¬ A, h h1) example (h: A) : ¬ ¬ A := show ¬ ¬ A, from assume : ¬ A, ‹¬ A› ‹A› ------------------------------------------------------------ example (h: A → B) : ¬ A ∨ B := or.elim (em A) (assume : A, show ¬ A ∨ B, from or.inr (h ‹A›)) (assume : ¬ A, show ¬ A ∨ B, from or.inl ‹¬ A›) example (h: ¬ A ∨ B) : A → B := assume : A, or.elim h (assume : ¬ A, show B, from false.elim (‹¬ A› ‹A›)) (assume : B, ‹B›) ------------------------------------------------------------ example (h: A → B) : ¬ B → ¬ A := assume : ¬ B, show ¬ A, from assume : A, ‹¬ B› (h ‹A›) example (h: ¬ B → ¬ A) : A → B := assume : A, or.elim (em B) (assume : B, ‹B›) (assume : ¬ B, show B, from false.elim ((h ‹¬ B›) ‹A›)) ------------------------------------------------------------ example (h: ¬ P → (Q ∨ R)) (h1: ¬ Q) (h2: ¬ R) : P := or.elim (em P) (assume : P, ‹P›) (assume : ¬ P, show P, from false.elim (or.elim (h ‹¬ P›) (assume : Q, h1 ‹Q›) (assume : R, h2 ‹R›))) example : A → ((A ∧ B) ∨ (A ∧ ¬ B)) := assume : A, or.elim (em B) (assume : B, show ((A ∧ B) ∨ (A ∧ ¬ B)), from or.inl ⟨‹A›,‹B›⟩) (assume : ¬ B, show ((A ∧ B) ∨ (A ∧ ¬ B)), from or.inr ⟨‹A›,‹¬ B›⟩)
(* Title: Code_Target_Bits_Int.thy Author: Andreas Lochbihler, ETH Zurich *) chapter \<open>Implementation of bit operations on int by target language operations\<close> theory Code_Target_Bits_Int imports Bits_Integer "HOL-Library.Code_Target_Int" begin declare [[code drop: "(AND) :: int \<Rightarrow> _" "(OR) :: int \<Rightarrow> _" "(XOR) :: int \<Rightarrow> _" "(NOT) :: int \<Rightarrow> _" "lsb :: int \<Rightarrow> _" "set_bit :: int \<Rightarrow> _" "bit :: int \<Rightarrow> _" "push_bit :: _ \<Rightarrow> int \<Rightarrow> _" "drop_bit :: _ \<Rightarrow> int \<Rightarrow> _" int_of_integer_symbolic ]] declare bitval_bin_last [code_unfold] lemma [code_unfold]: \<open>bit x n \<longleftrightarrow> x AND (push_bit n 1) \<noteq> 0\<close> for x :: int by (fact bit_iff_and_push_bit_not_eq_0) context includes integer.lifting begin lemma bit_int_code [code]: "bit (int_of_integer x) n = bit x n" by transfer simp lemma and_int_code [code]: "int_of_integer i AND int_of_integer j = int_of_integer (i AND j)" by transfer simp lemma or_int_code [code]: "int_of_integer i OR int_of_integer j = int_of_integer (i OR j)" by transfer simp lemma xor_int_code [code]: "int_of_integer i XOR int_of_integer j = int_of_integer (i XOR j)" by transfer simp lemma not_int_code [code]: "NOT (int_of_integer i) = int_of_integer (NOT i)" by transfer simp lemma push_bit_int_code [code]: \<open>push_bit n (int_of_integer x) = int_of_integer (push_bit n x)\<close> by transfer simp lemma drop_bit_int_code [code]: \<open>drop_bit n (int_of_integer x) = int_of_integer (drop_bit n x)\<close> by transfer simp lemma take_bit_int_code [code]: \<open>take_bit n (int_of_integer x) = int_of_integer (take_bit n x)\<close> by transfer simp lemma lsb_int_code [code]: "lsb (int_of_integer x) = lsb x" by transfer simp lemma set_bit_int_code [code]: "set_bit (int_of_integer x) n b = int_of_integer (set_bit x n b)" by transfer simp lemma int_of_integer_symbolic_code [code]: "int_of_integer_symbolic = int_of_integer" by (simp add: int_of_integer_symbolic_def) context begin qualified definition even :: \<open>int \<Rightarrow> bool\<close> where [code_abbrev]: \<open>even = Parity.even\<close> end lemma bin_rest_code: "int_of_integer i div 2 = int_of_integer (bin_rest_integer i)" by transfer simp end end
#ifndef __GSL_PERMUTE_VECTOR_H__ #define __GSL_PERMUTE_VECTOR_H__ #if !defined( GSL_FUN ) # if !defined( GSL_DLL ) # define GSL_FUN extern # elif defined( BUILD_GSL_DLL ) # define GSL_FUN extern __declspec(dllexport) # else # define GSL_FUN extern __declspec(dllimport) # endif #endif #include <gsl/gsl_permute_vector_complex_long_double.h> #include <gsl/gsl_permute_vector_complex_double.h> #include <gsl/gsl_permute_vector_complex_float.h> #include <gsl/gsl_permute_vector_long_double.h> #include <gsl/gsl_permute_vector_double.h> #include <gsl/gsl_permute_vector_float.h> #include <gsl/gsl_permute_vector_ulong.h> #include <gsl/gsl_permute_vector_long.h> #include <gsl/gsl_permute_vector_uint.h> #include <gsl/gsl_permute_vector_int.h> #include <gsl/gsl_permute_vector_ushort.h> #include <gsl/gsl_permute_vector_short.h> #include <gsl/gsl_permute_vector_uchar.h> #include <gsl/gsl_permute_vector_char.h> #endif /* __GSL_PERMUTE_VECTOR_H__ */
function test_suite = test_survival_coxph % Run specific demo and save values for comparison. % % See also % TEST_ALL, DEMO_SURVIVAL_COXPH % Copyright (c) 2011-2012 Ville Tolvanen initTestSuite; function testDemo % Set random number stream so that failing isn't because randomness. Run % demo & save test values. prevstream=setrandstream(0); disp('Running: demo_survival_coxph') demo_survival_coxph; path = which('test_survival_coxph.m'); path = strrep(path,'test_survival_coxph.m', 'testValues'); if ~(exist(path, 'dir') == 7) mkdir(path) end path = strcat(path, '/testSurvival_coxph'); save(path, 'Ef1', 'Ef2', 'Varf1', 'Varf2'); % Set back initial random stream setrandstream(prevstream); drawnow;clear;close all % Compare test values to real values. function testPredictionsCoxph values.real = load('realValuesSurvival_coxph', 'Ef1', 'Varf1', 'Ef2', 'Varf2'); values.test = load(strrep(which('test_survival_coxph.m'), 'test_survival_coxph.m', 'testValues/testSurvival_coxph'), 'Ef1', 'Varf1', 'Ef2', 'Varf2'); assertElementsAlmostEqual(values.real.Ef1, values.test.Ef1, 'absolute', 0.10); assertElementsAlmostEqual(values.real.Ef2, values.test.Ef2, 'absolute', 0.10); assertElementsAlmostEqual(values.real.Varf1, values.test.Varf1, 'absolute', 0.10); assertElementsAlmostEqual(values.real.Varf2, values.test.Varf2, 'absolute', 0.10);
These funky little soap balls are super cute with colourful surprises in every one! Created to look a bit like Geodes, you won't know what is inside until you either slice them open or use them until their "shell" wears off!!! Selected at random, feel free to send us photos of your little gems opened!