Datasets:
AI4M
/

text
stringlengths
0
3.34M
abstract type AbstractMonteCarloConfiguration end mutable struct MonteCarloConfiguration{num1 <: Integer, num2 <: Integer, abstractMonteCarloMethod <: AbstractMonteCarloMethod, baseMode <: BaseMode, rngType <: Random.AbstractRNG} <: AbstractMonteCarloConfiguration Nsim::num1 Nstep::num2 monteCarloMethod::abstractMonteCarloMethod parallelMode::baseMode seed::Int64 offset::Union{Int64, Nothing} rng::rngType function MonteCarloConfiguration(Nsim::num1, Nstep::num2, seed::Number) where {num1 <: Integer, num2 <: Integer} return MonteCarloConfiguration(Nsim, Nstep, StandardMC(), SerialMode(), Int64(seed), MersenneTwister()) end function MonteCarloConfiguration(Nsim::num1, Nstep::num2, monteCarloMethod::abstractMonteCarloMethod = StandardMC(), seed::Number = 0) where {num1 <: Integer, num2 <: Integer, abstractMonteCarloMethod <: AbstractMonteCarloMethod} return MonteCarloConfiguration(Nsim, Nstep, monteCarloMethod, SerialMode(), Int64(seed), MersenneTwister()) end function MonteCarloConfiguration(Nsim::num1, Nstep::num2, parallelMethod::baseMode, seed::Number = 0, rng::rngType_ = MersenneTwister()) where {num1 <: Integer, num2 <: Integer, baseMode <: BaseMode, rngType_ <: Random.AbstractRNG} return MonteCarloConfiguration(Nsim, Nstep, StandardMC(), parallelMethod, Int64(seed), rng) end #Most General, no default argument, offset is controllable from outside function GeneralMonteCarloConfiguration(Nsim::num1, Nstep::num2, monteCarloMethod::abstractMonteCarloMethod, parallelMethod::baseMode, seed::Number, rng::rngType_) where {num1 <: Integer, num2 <: Integer, abstractMonteCarloMethod <: AbstractMonteCarloMethod, baseMode <: BaseMode, rngType_ <: Random.AbstractRNG} if Nsim <= zero(num1) error("Number of Simulations must be positive") elseif Nstep <= zero(num2) error("Number of Steps must be positive") else return new{num1, num2, abstractMonteCarloMethod, baseMode, rngType_}(Nsim, Nstep, monteCarloMethod, parallelMethod, Int64(seed), 0, rng) end end function GeneralMonteCarloConfiguration(Nsim::num1, Nstep::num2, monteCarloMethod::abstractMonteCarloMethod, parallelMethod::baseMode, seed::Number, rng::MersenneTwister, offset::Number) where {num1 <: Integer, num2 <: Integer, abstractMonteCarloMethod <: AbstractMonteCarloMethod, baseMode <: BaseMode} if Nsim <= zero(num1) error("Number of Simulations must be positive") elseif Nstep <= zero(num2) error("Number of Steps must be positive") else return new{num1, num2, abstractMonteCarloMethod, baseMode, MersenneTwister}(Nsim, Nstep, monteCarloMethod, parallelMethod, Int64(seed), offset, rng) end end function MonteCarloConfiguration(Nsim::num1, Nstep::num2, monteCarloMethod::abstractMonteCarloMethod, parallelMethod::baseMode, seed::Number, rng::MersenneTwister, offset::Number) where {num1 <: Integer, num2 <: Integer, abstractMonteCarloMethod <: AbstractMonteCarloMethod, baseMode <: BaseMode} if Nsim <= zero(num1) error("Number of Simulations must be positive") elseif Nstep <= zero(num2) error("Number of Steps must be positive") else return GeneralMonteCarloConfiguration(Nsim, Nstep, monteCarloMethod, parallelMethod, Int64(seed), offset, rng) end end function MonteCarloConfiguration(Nsim::num1, Nstep::num2, monteCarloMethod::abstractMonteCarloMethod, parallelMethod::baseMode, seed::Number = 0, rng::rngType_ = MersenneTwister()) where {num1 <: Integer, num2 <: Integer, abstractMonteCarloMethod <: AbstractMonteCarloMethod, baseMode <: BaseMode, rngType_ <: Random.AbstractRNG} return GeneralMonteCarloConfiguration(Nsim, Nstep, monteCarloMethod, parallelMethod, Int64(seed), rng) end function MonteCarloConfiguration(Nsim::num1, Nstep::num2, monteCarloMethod::AntitheticMC, parallelMethod::baseMode, seed::Number = 0, rng::rngType_ = MersenneTwister()) where {num1 <: Integer, num2 <: Integer, baseMode <: BaseMode, rngType_ <: Random.AbstractRNG} if div(Nsim, 2) * 2 != Nsim error("Antithetic support only even number of simulations") else return GeneralMonteCarloConfiguration(Nsim, Nstep, monteCarloMethod, parallelMethod, Int64(seed), rng) end end end export MonteCarloConfiguration;
{-# OPTIONS --without-K --rewriting #-} open import lib.Basics open import lib.cubical.Square open import lib.types.Bool open import lib.types.Cofiber open import lib.types.Lift open import lib.types.Paths open import lib.types.Pointed open import lib.types.PushoutFmap open import lib.types.Sigma open import lib.types.Span open import lib.types.Suspension open import lib.types.Wedge module lib.types.BigWedge where module _ {i j} {A : Type i} where {- the function for cofiber -} bigwedge-f : (X : A → Ptd j) → A → Σ A (de⊙ ∘ X) bigwedge-f X a = a , pt (X a) bigwedge-span : (A → Ptd j) → Span bigwedge-span X = cofiber-span (bigwedge-f X) BigWedge : (A → Ptd j) → Type (lmax i j) BigWedge X = Cofiber (bigwedge-f X) bwbase : {X : A → Ptd j} → BigWedge X bwbase = cfbase bwin : {X : A → Ptd j} → (a : A) → de⊙ (X a) → BigWedge X bwin = curry cfcod ⊙BigWedge : (A → Ptd j) → Ptd (lmax i j) ⊙BigWedge X = ⊙[ BigWedge X , bwbase ] bwglue : {X : A → Ptd j} → (a : A) → bwbase {X} == bwin a (pt (X a)) bwglue = cfglue ⊙bwin : {X : A → Ptd j} → (a : A) → X a ⊙→ ⊙BigWedge X ⊙bwin a = (bwin a , ! (bwglue a)) module BigWedgeElim {X : A → Ptd j} {k} {P : BigWedge X → Type k} (base* : P bwbase) (in* : (a : A) (x : de⊙ (X a)) → P (bwin a x)) (glue* : (a : A) → base* == in* a (pt (X a)) [ P ↓ bwglue a ]) = CofiberElim {f = bigwedge-f X} {P = P} base* (uncurry in*) glue* BigWedge-elim = BigWedgeElim.f module BigWedgeRec {X : A → Ptd j} {k} {C : Type k} (base* : C) (in* : (a : A) → de⊙ (X a) → C) (glue* : (a : A) → base* == in* a (pt (X a))) = CofiberRec {f = bigwedge-f X} {C = C} base* (uncurry in*) glue* module _ {i j₀ j₁} {A : Type i} {X₀ : A → Ptd j₀} {X₁ : A → Ptd j₁} (Xeq : ∀ a → X₀ a ⊙≃ X₁ a) where bigwedge-span-emap-r : SpanEquiv (cofiber-span (bigwedge-f X₀)) (cofiber-span (bigwedge-f X₁)) bigwedge-span-emap-r = span-map (idf _) (Σ-fmap-r λ a → fst (⊙–> (Xeq a))) (idf _) (comm-sqr λ _ → idp) (comm-sqr λ a → pair= idp (⊙–>-pt (Xeq a))) , idf-is-equiv _ , Σ-isemap-r (λ a → snd (Xeq a)) , idf-is-equiv _ BigWedge-emap-r : BigWedge X₀ ≃ BigWedge X₁ BigWedge-emap-r = Pushout-emap bigwedge-span-emap-r ⊙BigWedge-emap-r : ⊙BigWedge X₀ ⊙≃ ⊙BigWedge X₁ ⊙BigWedge-emap-r = ≃-to-⊙≃ BigWedge-emap-r idp module _ {i₀ i₁ j} {A₀ : Type i₀} {A₁ : Type i₁} (X : A₁ → Ptd j) (Aeq : A₀ ≃ A₁) where bigwedge-span-emap-l : SpanEquiv (cofiber-span (bigwedge-f (X ∘ –> Aeq))) (cofiber-span (bigwedge-f X)) bigwedge-span-emap-l = span-map (idf _) (Σ-fmap-l (de⊙ ∘ X) (–> Aeq)) (–> Aeq) (comm-sqr λ _ → idp) (comm-sqr λ _ → idp) , idf-is-equiv _ , Σ-isemap-l (de⊙ ∘ X) (snd Aeq) , snd Aeq BigWedge-emap-l : BigWedge (X ∘ –> Aeq) ≃ BigWedge X BigWedge-emap-l = Pushout-emap bigwedge-span-emap-l ⊙BigWedge-emap-l : ⊙BigWedge (X ∘ –> Aeq) ⊙≃ ⊙BigWedge X ⊙BigWedge-emap-l = ≃-to-⊙≃ BigWedge-emap-l idp module _ {i} {A : Type i} (X : A → Ptd i) where extract-glue-from-BigWedge-is-const : ∀ bw → extract-glue {s = bigwedge-span X} bw == north extract-glue-from-BigWedge-is-const = BigWedge-elim idp (λ x y → ! (merid x)) (↓-='-from-square ∘ λ x → ExtractGlue.glue-β x ∙v⊡ tr-square (merid x) ⊡v∙ ! (ap-cst north (cfglue x))) {- A BigWedge indexed by Bool is just a binary Wedge -} module _ {i} (Pick : Bool → Ptd i) where BigWedge-Bool-equiv-Wedge : BigWedge Pick ≃ Wedge (Pick true) (Pick false) BigWedge-Bool-equiv-Wedge = equiv f g f-g g-f where module F = BigWedgeRec {X = Pick} {C = Wedge (Pick true) (Pick false)} (winl (pt (Pick true))) (λ {true → winl; false → winr}) (λ {true → idp; false → wglue}) module G = WedgeRec {X = Pick true} {Y = Pick false} {C = BigWedge Pick} (bwin true) (bwin false) (! (bwglue true) ∙ bwglue false) f = F.f g = G.f abstract f-g : ∀ w → f (g w) == w f-g = Wedge-elim (λ _ → idp) (λ _ → idp) (↓-∘=idf-in' f g $ ap f (ap g wglue) =⟨ ap (ap f) G.glue-β ⟩ ap f (! (bwglue true) ∙ bwglue false) =⟨ ap-∙ f (! (bwglue true)) (bwglue false) ⟩ ap f (! (bwglue true)) ∙ ap f (bwglue false) =⟨ ap-! f (bwglue true) |in-ctx (λ w → w ∙ ap f (bwglue false)) ⟩ ! (ap f (bwglue true)) ∙ ap f (bwglue false) =⟨ F.glue-β true |in-ctx (λ w → ! w ∙ ap f (bwglue false)) ⟩ ap f (bwglue false) =⟨ F.glue-β false ⟩ wglue =∎) g-f : ∀ bw → g (f bw) == bw g-f = BigWedge-elim (! (bwglue true)) (λ {true → λ _ → idp; false → λ _ → idp}) (λ {true → ↓-∘=idf-from-square g f $ ap (ap g) (F.glue-β true) ∙v⊡ bl-square (bwglue true); false → ↓-∘=idf-from-square g f $ (ap (ap g) (F.glue-β false) ∙ G.glue-β) ∙v⊡ lt-square (! (bwglue true)) ⊡h vid-square})
/- Copyright (c) 2020 Ruben Van de Velde. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Ruben Van de Velde -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.analysis.complex.basic import Mathlib.analysis.normed_space.operator_norm import Mathlib.data.complex.is_R_or_C import Mathlib.PostPort universes u_1 u_2 namespace Mathlib /-! # Extending a continuous `ℝ`-linear map to a continuous `𝕜`-linear map In this file we provide a way to extend a continuous `ℝ`-linear map to a continuous `𝕜`-linear map in a way that bounds the norm by the norm of the original map, when `𝕜` is either `ℝ` (the extension is trivial) or `ℂ`. We formulate the extension uniformly, by assuming `is_R_or_C 𝕜`. We motivate the form of the extension as follows. Note that `fc : F →ₗ[𝕜] 𝕜` is determined fully by `Re fc`: for all `x : F`, `fc (I • x) = I * fc x`, so `Im (fc x) = -Re (fc (I • x))`. Therefore, given an `fr : F →ₗ[ℝ] ℝ`, we define `fc x = fr x - fr (I • x) * I`. -/ /-- Extend `fr : F →ₗ[ℝ] ℝ` to `F →ₗ[𝕜] 𝕜` in a way that will also be continuous and have its norm bounded by `∥fr∥` if `fr` is continuous. -/ def linear_map.extend_to_𝕜 {𝕜 : Type u_1} [is_R_or_C 𝕜] {F : Type u_2} [normed_group F] [normed_space 𝕜 F] (fr : linear_map ℝ (restrict_scalars ℝ 𝕜 F) ℝ) : linear_map 𝕜 F 𝕜 := let fc : F → 𝕜 := fun (x : F) => ↑(coe_fn fr x) - is_R_or_C.I * ↑(coe_fn fr (is_R_or_C.I • x)); linear_map.mk fc sorry sorry /-- The norm of the extension is bounded by `∥fr∥`. -/ theorem norm_bound {𝕜 : Type u_1} [is_R_or_C 𝕜] {F : Type u_2} [normed_group F] [normed_space 𝕜 F] (fr : continuous_linear_map ℝ (restrict_scalars ℝ 𝕜 F) ℝ) (x : F) : norm (coe_fn (linear_map.extend_to_𝕜 (continuous_linear_map.to_linear_map fr)) x) ≤ norm fr * norm x := sorry /-- Extend `fr : F →L[ℝ] ℝ` to `F →L[𝕜] 𝕜`. -/ def continuous_linear_map.extend_to_𝕜 {𝕜 : Type u_1} [is_R_or_C 𝕜] {F : Type u_2} [normed_group F] [normed_space 𝕜 F] (fr : continuous_linear_map ℝ (restrict_scalars ℝ 𝕜 F) ℝ) : continuous_linear_map 𝕜 F 𝕜 := linear_map.mk_continuous (linear_map.extend_to_𝕜 (continuous_linear_map.to_linear_map fr)) (norm fr) (norm_bound fr) end Mathlib
lemma filterlim_at_right_to_0: "filterlim f F (at_right a) \<longleftrightarrow> filterlim (\<lambda>x. f (x + a)) F (at_right 0)" for a :: real
[STATEMENT] lemma pref_hd_eq: "u \<le>p v \<Longrightarrow> u \<noteq> \<epsilon> \<Longrightarrow> hd u = hd v" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>u \<le>p v; u \<noteq> \<epsilon>\<rbrakk> \<Longrightarrow> hd u = hd v [PROOF STEP] by (auto simp add: prefix_def)
import Mnist import Runner import Backprop import Numeric.LinearAlgebra import Data.List import System.Random learningRate = 0.007 smallRandoms :: Int -> [Double] smallRandoms seed = map (/100) (randoms (mkStdGen seed)) randomWeightMatrix :: Int -> Int -> Int -> Matrix Double randomWeightMatrix numInputs numOutputs seed = (numOutputs><numInputs) weights where weights = take (numOutputs*numInputs) (smallRandoms seed) zeroWeightMatrix :: Int -> Int -> Matrix Double zeroWeightMatrix numInputs numOutputs = (numOutputs><numInputs) weights where weights = repeat 0 main :: IO () main = do let w1 = randomWeightMatrix (28*28 + 1) 20 7 let w2 = randomWeightMatrix 20 10 42 let initialNet = buildBackpropNet learningRate [w1, w2] tanhAS trainingData2 <- readTrainingData let trainingData = take 20000 trainingData2 putStrLn $ "Training with " ++ show (length trainingData) ++ " images" let finalNet = trainWithAllPatterns initialNet trainingData testData2 <- readTestData let testData = take 1000 testData2 putStrLn $ "Testing with " ++ show (length testData) ++ " images" let results = evalAllPatterns finalNet testData let score = fromIntegral (sum results) let count = fromIntegral (length testData) let percentage = 100.0 * score / count putStrLn $ "I got " ++ show percentage ++ "% correct"
module DifferentArities where data Nat : Set where zero : Nat suc : Nat -> Nat f : Nat -> Nat -> Nat f zero = \x -> x f (suc n) m = f n (suc m)
/* ----------------------------------------------------------------------------- * Copyright 2021 Jonathan Haigh * SPDX-License-Identifier: MIT * ---------------------------------------------------------------------------*/ #ifndef SQ_INCLUDE_GUARD_core_Token_h_ #define SQ_INCLUDE_GUARD_core_Token_h_ #include "Token.fwd.h" #include "core/typeutil.h" #include <gsl/gsl> #include <iosfwd> #include <regex> #include <string_view> #include <vector> namespace sq { enum class TokenKind : int { BoolFalse, BoolTrue, Colon, Comma, Dot, DQString, Eof, Equals, Float, GreaterThan, GreaterThanOrEqualTo, Identifier, Integer, LBrace, LBracket, LessThan, LessThanOrEqualTo, LParen, RBrace, RBracket, RParen }; class Token { public: /** * Create a Token object. * * @param query the full query string in which the token was found. * @param pos the character position within the query at which the token * was found. * @param len the length, in characters, of the token. * @param kind the kind of the token. */ Token(std::string_view query, gsl::index pos, gsl::index len, TokenKind kind) noexcept; /** * Get the full query string in which the token was found. */ SQ_ND std::string_view query() const noexcept; /** * Get the character position within the query at which the token was * found. */ SQ_ND gsl::index pos() const noexcept; /** * Get the length, in characters, of the token. */ SQ_ND gsl::index len() const noexcept; /** * Get a std::string_view pointing to the characters of the token. */ SQ_ND std::string_view view() const noexcept; /** * Get the kind of the token. */ SQ_ND TokenKind kind() const noexcept; private: std::string_view query_; gsl::index pos_; gsl::index len_; TokenKind kind_; }; std::ostream &operator<<(std::ostream &os, TokenKind kind); /** * Print information about a token. * * The text printed includes information about the position of the token in the * input query, not just the characters that make up the token. */ std::ostream &operator<<(std::ostream &os, const Token &token); } // namespace sq #endif // SQ_INCLUDE_GUARD_core_Token_h_
[STATEMENT] lemma set_tag_name_get_child_nodes: "\<forall>w \<in> set_tag_name_locs ptr. (h \<turnstile> w \<rightarrow>\<^sub>h h' \<longrightarrow> (\<forall>r \<in> get_child_nodes_locs ptr'. r h h'))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>w\<in>set_tag_name_locs ptr. h \<turnstile> w \<rightarrow>\<^sub>h h' \<longrightarrow> (\<forall>r\<in>get_child_nodes_locs ptr'. r h h') [PROOF STEP] apply(auto simp add: get_child_nodes_locs_def)[1] [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>w. \<lbrakk>is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'\<rbrakk> \<Longrightarrow> preserved (get_M\<^sub>S\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>R\<^sub>o\<^sub>o\<^sub>t (the (cast\<^sub>o\<^sub>b\<^sub>j\<^sub>e\<^sub>c\<^sub>t\<^sub>_\<^sub>p\<^sub>t\<^sub>r\<^sub>2\<^sub>s\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>_\<^sub>r\<^sub>o\<^sub>o\<^sub>t\<^sub>_\<^sub>p\<^sub>t\<^sub>r ptr')) RShadowRoot.child_nodes) h h' 2. \<And>w r. \<lbrakk>is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' 3. \<And>w r. \<lbrakk>\<not> is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' [PROOF STEP] apply(auto simp add: set_tag_name_locs_def all_args_def)[1] [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>w r. \<lbrakk>is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' 2. \<And>w r. \<lbrakk>\<not> is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' [PROOF STEP] using CD.set_tag_name_get_child_nodes [PROOF STATE] proof (prove) using this: \<forall>w\<in>set_tag_name_locs ?ptr. ?h \<turnstile> w \<rightarrow>\<^sub>h ?h' \<longrightarrow> (\<forall>r\<in>get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ?ptr'. r ?h ?h') goal (2 subgoals): 1. \<And>w r. \<lbrakk>is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' 2. \<And>w r. \<lbrakk>\<not> is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' [PROOF STEP] apply(blast) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>w r. \<lbrakk>\<not> is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' [PROOF STEP] using CD.set_tag_name_get_child_nodes [PROOF STATE] proof (prove) using this: \<forall>w\<in>set_tag_name_locs ?ptr. ?h \<turnstile> w \<rightarrow>\<^sub>h ?h' \<longrightarrow> (\<forall>r\<in>get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ?ptr'. r ?h ?h') goal (1 subgoal): 1. \<And>w r. \<lbrakk>\<not> is_shadow_root_ptr_kind ptr'; w \<in> set_tag_name_locs ptr; h \<turnstile> w \<rightarrow>\<^sub>h h'; r \<in> get_child_nodes_locs\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ptr'\<rbrakk> \<Longrightarrow> r h h' [PROOF STEP] apply(blast) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
module Text.WebIDL.Types.Numbers import Generics.Derive %default total %language ElabReflection ||| The default Eq for Nat runs in O(n), which is too slow ||| if we want to support large natlits in our tests. export [FastNatEq] Eq Nat where (==) = (==) `on` natToInteger -------------------------------------------------------------------------------- -- IntLit -------------------------------------------------------------------------------- ||| An integer literal in hexadecimal, octal, or decimal representation. ||| The code generator will use the same representation when ||| generating code for constants and default values. public export data IntLit = Hex Nat | Oct Nat | I Integer %runElab derive "IntLit" [Generic,Meta,Show] export Eq IntLit using FastNatEq where (==) = genEq -------------------------------------------------------------------------------- -- Parsing Integers -------------------------------------------------------------------------------- digitToInt : Char -> Integer digitToInt c = cast $ if isDigit c then ord c - ord '0' else ord (toUpper c) - ord 'A' + 10 charsToPosInt : (base : Integer) -> List Char -> Maybe Integer charsToPosInt base t = calc <$> traverse readDigit t where readDigit : Char -> Maybe Integer readDigit c = let d = digitToInt c in if d >= 0 && d < base then Just d else Nothing calc : List Integer -> Integer calc = foldl (\a,e => a * base + e) 0 ||| Tries to read an integer literal from a `String`. export readInt : String -> Maybe IntLit readInt s = case fastUnpack s of '0'::'x'::t => map (Hex . fromInteger) $ charsToPosInt 16 t '0'::'X'::t => map (Hex . fromInteger) $ charsToPosInt 16 t '0'::t => map (Oct . fromInteger) $ charsToPosInt 8 t '-'::t => I . negate <$> charsToPosInt 10 t t => map I $ charsToPosInt 10 t -------------------------------------------------------------------------------- -- Floating Point Literals -------------------------------------------------------------------------------- ||| The sign of a floating point literal. public export data Signum = Plus | Minus %runElab derive "Signum" [Generic,Meta,Eq,Show] ||| A parsed floating point literal. ||| ||| A floating point literal is either one of three ||| special values (`NaN`, `Infinity`, or `-Infinity`) ||| or a decimal floating point number (`NoExp`: dot is ||| mandatory), or a float in scientific notation (`Exp`: ||| dot is optional). ||| ||| The main focus of this data type is one of ||| preserving information. Encoding a `FloatLit` should ||| yield (almost) exactly the same literal as the one ||| encountered during parsin with two minor exceptions: ||| a) The encoded literal will always use a lowercase 'e' as ||| the delimiter for the exponent and b) in case of a ||| positive exponent, there will not be a '+' in the ||| encoded literal. public export data FloatLit : Type where ||| Floating point number in scientific notation. ||| ||| Example: `-12.10e10` Exp : (signum : Signum) -> (beforeDot : Nat) -> (afterDot : Maybe Nat) -> (exp : Integer) -> FloatLit ||| Floating point number without exponent. ||| ||| Example: `-12.1002` NoExp : (signum : Signum) -> (beforeDot : Nat) -> (afterDot : Nat) -> FloatLit ||| Corresponds to the WebIDL keyword `Infinity` Infinity : FloatLit ||| Corresponds to the WebIDL keyword `-Infinity` NegativeInfinity : FloatLit ||| Corresponds to the WebIDL keyword `NaN` NaN : FloatLit %runElab derive "FloatLit" [Generic,Meta,Show] export Eq FloatLit using FastNatEq where (==) = genEq -------------------------------------------------------------------------------- -- Parsing Floats -------------------------------------------------------------------------------- charsToNat : List Char -> Maybe Nat charsToNat = map fromInteger . charsToPosInt 10 beforeDot : List Char -> (Signum,Maybe Nat) beforeDot ('-'::cs) = (Minus,charsToNat cs) beforeDot cs = (Plus, charsToNat cs) exp : String -> Maybe String -> String -> Maybe FloatLit exp bds ads es = let (s,bdNat) = beforeDot (fastUnpack bds) in do bd <- bdNat ad <- maybe (Just Nothing) (map Just . charsToNat . fastUnpack) ads e <- afterExp (fastUnpack es) pure $ Exp s bd ad e where afterExp : List Char -> Maybe Integer afterExp ('-'::cs) = negate <$> charsToPosInt 10 cs afterExp ('+'::cs) = charsToPosInt 10 cs afterExp cs = charsToPosInt 10 cs noExp : String -> String -> Maybe FloatLit noExp bds ads = let (s,bdNat) = beforeDot $ fastUnpack bds in do bd <- bdNat ad <- charsToNat $ fastUnpack ads pure $ NoExp s bd ad ||| Tries to read a floating point literal ||| from a `String`. export readFloat : String -> Maybe FloatLit readFloat s = case split (('E' ==) . toUpper) s of -- with exponent, dot is optional h ::: [e] => case split ('.' ==) h of h2 ::: [t2] => exp h2 (Just t2) e h2 ::: [] => exp h2 Nothing e _ ::: _ => Nothing -- without exponent, dot is mandatory -- (otherwise it is an integer) h ::: [] => case split ('.' ==) h of h2 ::: [t2] => noExp h2 t2 _ ::: _ => Nothing -- more than one E in string _ ::: _ => Nothing
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) airy_a5 := 133.983631: airy_a6 := 3.217063: airy_a7 := 136.707378: airy_a8 := 3.223476: airy_a9 := 2.675484: airy_a10 := 3.473804: airy_f1 := s -> (1 - airy_a5*s^airy_a6 + airy_a7*s^airy_a8)/(1 + airy_a9*s^airy_a10): $include "gga_x_lag.mpl" airy_f := x -> lag_f0(X2S*x) + airy_f1(X2S*x): f := (rs, zeta, xt, xs0, xs1) -> gga_exchange(airy_f, rs, zeta, xs0, xs1):
[STATEMENT] lemma getRel_homR: (* slows down proofs in the common case *) assumes "(y, z) \<in> getRel l G" "(y,u) \<in> f" "(z,v) \<in> f" shows "(u, v) \<in> getRel l (map_graph f G)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (u, v) \<in> getRel l (map_graph f G) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: (y, z) \<in> getRel l G (y, u) \<in> f (z, v) \<in> f goal (1 subgoal): 1. (u, v) \<in> getRel l (map_graph f G) [PROOF STEP] by (auto simp:getRel_def on_triple_def)
# -*- coding: utf-8 -*- """base classes for products""" from addict import Dict from copy import deepcopy from ELDAmwl.bases.base import Params from ELDAmwl.bases.factory import BaseOperation from ELDAmwl.bases.factory import BaseOperationFactory from ELDAmwl.component.interface import IDBFunc from ELDAmwl.component.registry import registry from ELDAmwl.errors.exceptions import DetectionLimitZero from ELDAmwl.errors.exceptions import NotEnoughMCIterations from ELDAmwl.errors.exceptions import SizeMismatch from ELDAmwl.errors.exceptions import UseCaseNotImplemented from ELDAmwl.output.mwl_file_structure import MWLFileStructure from ELDAmwl.rayleigh import RayleighLidarRatio from ELDAmwl.signals import Signals from ELDAmwl.storage.cached_functions import sg_coeffs from ELDAmwl.storage.cached_functions import smooth_routine_from_db from ELDAmwl.utils.constants import CALC_WINDOW_OUTSIDE_PROFILE from ELDAmwl.utils.constants import COMBINE_DEPOL_USE_CASES from ELDAmwl.utils.constants import EBSC from ELDAmwl.utils.constants import EXT from ELDAmwl.utils.constants import FIXED from ELDAmwl.utils.constants import HIGHRES from ELDAmwl.utils.constants import LOWRES from ELDAmwl.utils.constants import MC from ELDAmwl.utils.constants import MERGE_PRODUCT_USE_CASES from ELDAmwl.utils.constants import NC_FILL_BYTE from ELDAmwl.utils.constants import NC_FILL_INT from ELDAmwl.utils.constants import RBSC from ELDAmwl.utils.constants import RESOLUTION_STR from zope import component import ELDAmwl.utils.constants import numpy as np import xarray as xr class Products(Signals): p_params = None smooth_routine = None # class to perform smoothing mwl_meta_id = None params = None num_scan_angles = None @classmethod def from_signal(cls, signal, p_params, **kw_args): """creates an instance of Products with from general data of signal. data, err, qf, and binres have the same shape as signal, but are filled with nan. """ result = cls() result.ds = deepcopy(signal.ds) result.ds['data'][:] = np.nan result.ds['err'][:] = np.nan result.ds['qf'][:] = NC_FILL_BYTE result.ds['binres'][:] = NC_FILL_INT result.station_altitude = signal.station_altitude result.params = p_params # todo: copy other general parameter result.emission_wavelength = signal.emission_wavelength result.num_scan_angles = signal.num_scan_angles result.ds['time_bounds'] = signal.ds['time_bounds'] result.mwl_meta_id = '{}_{}'.format(MWLFileStructure.NC_VAR_NAMES[p_params.general_params.product_type], round(float(result.emission_wavelength))) if result.params.smooth_method is not None: result.smooth_routine = SmoothRoutine()(method_id=result.params.smooth_method) return result def smooth(self, binres): """ performs smoothing of the data Args: binres (xarray.DataArray) array with the bin resolution which shall be used for smoothing Returns: """ if self.data.shape != binres.shape: raise SizeMismatch('bin resolution', 'product {}'.format(self.params.prod_id_str), 'smooth') num_times = self.ds.dims['time'] num_levels = self.ds.dims['level'] # first bin of the smooth window fb = binres.level - binres // 2 # next bin after smooth window nb = binres.level + binres // 2 + 1 for t in range(num_times): # first and last smoothable bins fsb = np.where(fb[:, t] >= self.first_valid_bin(t))[0][0] # actually, this is not the last smoothable bin, but the one after that lsb = np.where(nb[:, t] > self.last_valid_bin(t))[0][0] # keep this notation in order to avoid lsb + 1 everywhere for lev in range(fsb, lsb): self.qf[t, lev] = np.bitwise_or.reduce(self.qf.values[t, int(fb[lev, t]):int(nb[lev, t])]) # todo: smoothing of mol_extinction, mol_backscatter, transmission, cloudflag etc smoothed = self.smooth_routine.run(window=int(binres[t, lev]), data=self.data.values[t, int(fb[lev, t]):int(nb[lev, t])], err=self.err.values[t, int(fb[lev, t]):int(nb[lev, t])]) self.data[t, lev] = smoothed.data self.err[t, lev] = smoothed.err self.binres[t, lev] = binres[t, lev] for lev in range(fsb): self.set_invalid_point(t, lev, CALC_WINDOW_OUTSIDE_PROFILE) for lev in range(lsb, num_levels): self.set_invalid_point(t, lev, CALC_WINDOW_OUTSIDE_PROFILE) def save_to_netcdf(self): pass def write_data_in_ds(self, ds): """ insert product data into dataset find the indexes where the product coordinates fit into coordinates of the ds and write product data there Args: ds (xr.Dataset): empty dataset where to insert the product data Returns: """ subset = ds.sel(wavelength=self.emission_wavelength) if self.num_scan_angles > 1: # altitude axes might be different, analyze all time slices separately # todo: not yet tested for t_idx in range(subset.dims['time']): idx = np.searchsorted(subset.altitude.values[t_idx], self.altitude.values[t_idx]) subset.variables['values'][t_idx, idx] = self.data[t_idx, :] else: # analyze only first time slice, because altitude axes are all equal t_idx = 0 idx = np.searchsorted(subset.altitude.values[t_idx], self.altitude.values[t_idx]) subset.variables['data'][:, idx] = self.data[:, :] def to_meta_ds_dict(self, meta_data): dct = Dict({'attrs': Dict(), 'data_vars': Dict()}) dct.data_vars.error_retrieval_method = MWLFileStructure() \ .error_method_var(self.params.general_params.error_method) meta_data[self.mwl_meta_id] = dct class ProductParams(Params): def __init__(self): super(ProductParams, self).__init__() self.sub_params = ['general_params', 'mc_params', 'smooth_params'] self.general_params = None self.mc_params = None self.smooth_params = None self.quality_params = None def from_db(self, general_params): self.general_params = general_params self.smooth_params = SmoothParams.from_db(general_params.prod_id) self.quality_params = QualityParams.from_db(general_params.prod_id) def get_error_params(self, db_options): """reads error params Args: db_options (Dict): product params, read from SCC db with read_elast_bsc_params(), read_extinction_params(), or read_raman_bsc_params """ self.general_params.error_method = db_options['error_method'] if self.error_method == MC: self.mc_params = MCParams.from_db(self.prod_id) @property def prod_id_str(self): return str(self.general_params.prod_id) @property def error_method(self): return self.general_params.error_method @property def smooth_type(self): return self.smooth_params.smooth_type @property def smooth_method(self): return self.smooth_params.smooth_method @property def det_limit_asDataArray(self): units = MWLFileStructure.UNITS[self.general_params.product_type] return xr.DataArray(self.quality_params.detection_limit, name='detection_limit', attrs={'long_name': 'detection limit', 'units': units, }) @property def error_threshold_low_asDataArray(self): return xr.DataArray(self.quality_params.error_threshold.low, name='error_threshold_low', attrs={'long_name': 'threshold for the ' 'relative statistical error ' 'below {0} km height'. format(ELDAmwl.utils.constants.RANGE_BOUNDARY_KM), 'units': '1'}) @property def error_threshold_high_asDataArray(self): return xr.DataArray(self.quality_params.error_threshold.high, name='error_threshold_low', attrs={'long_name': 'threshold for the ' 'relative statistical error ' 'above {0} km height'. format(ELDAmwl.utils.constants.RANGE_BOUNDARY_KM), 'units': '1'}) @property def smooth_params_auto(self): return Dict({'error_threshold_low': self.error_threshold_low_asDataArray, 'error_threshold_high': self.error_threshold_high_asDataArray, 'detection_limit': self.det_limit_asDataArray, }) def calc_with_res(self, res): if self.general_params.calc_with_hr and res == HIGHRES: return True elif self.general_params.calc_with_lr and res == LOWRES: return True else: return False def assign_to_product_list(self, measurement_params): gen_params = self.general_params params_list = measurement_params.product_list params_table = measurement_params.product_table if self.prod_id_str not in params_list: params_list[self.prod_id_str] = self params_table.loc[len(params_table.index)] = \ {'id': self.prod_id_str, 'wl': gen_params.emission_wavelength, 'type': gen_params.product_type, 'basic': gen_params.is_basic_product, 'derived': gen_params.is_derived_product, RESOLUTION_STR[HIGHRES]: gen_params.calc_with_hr, RESOLUTION_STR[LOWRES]: gen_params.calc_with_lr, 'elpp_file': gen_params.elpp_file} else: df = params_table[(params_table.id == self.prod_id_str)] idx = df.index[0] highres = df[RESOLUTION_STR[HIGHRES]][idx] or gen_params.calc_with_hr lowres = df[RESOLUTION_STR[LOWRES]][idx] or gen_params.calc_with_lr params_table.loc[params_table.id == self.prod_id_str, RESOLUTION_STR[HIGHRES]] = highres # noqa E501 params_table.loc[params_table.id == self.prod_id_str, RESOLUTION_STR[LOWRES]] = lowres # noqa E501 def is_bsc_from_depol_components(self): if self.general_params.product_type in [RBSC, EBSC]: # todo: put info on COMBINE_DEPOL_USE_CASES in db table if self.general_params.usecase in COMBINE_DEPOL_USE_CASES[self.general_params.product_type]: # noqa E501 return True else: return False else: return False def includes_product_merging(self): if self.general_params.product_type in [EXT, RBSC, EBSC]: # todo: put info on MERGE_PRODUCT_USE_CASES in db table if self.general_params.usecase in MERGE_PRODUCT_USE_CASES[self.general_params.product_type]: # noqa E501 return True else: return False else: return False def add_signal_role(self, signal): pass def to_meta_ds_dict(self, dct): """ writes parameter content into Dict for further export in mwl file Args: dct (addict.Dict): is a dict which will be converted into dataset. has the keys 'attrs' and 'data_vars' Returns: """ pass class GeneralProductParams(Params): """ general parameters for product retrievals """ def __init__(self): super(GeneralProductParams, self).__init__() # product id self.prod_id = None self.product_type = None self.usecase = None self.emission_wavelength = None self.rayl_lr = None self.is_basic_product = False self.is_derived_product = False self.calc_with_hr = False self.calc_with_lr = False self.error_method = None # self.detection_limit = None # self.error_threshold = Dict({'low': None, # 'high': None}) self.valid_alt_range = Dict({'min_height': None, 'max_height': None}) self.elpp_file = '' self.signals = [] @classmethod def from_query(cls, query): result = cls() result.prod_id = query.Products.ID result.product_type = query.Products.prod_type_id result.usecase = query.Products.usecase_id result.emission_wavelength = float(query.Channels.emission_wavelength) result.is_basic_product = query.ProductTypes.is_basic_product == 1 result.is_derived_product = not result.is_basic_product # result.error_threshold.low = query.ErrorThresholdsLow.value # result.error_threshold.high = query.ErrorThresholdsHigh.value # result.detection_limit = query.ProductOptions.detection_limit # if result.detection_limit == 0.0: # raise(DetectionLimitZero, result.prod_id) result.valid_alt_range.min_height = float(query.PreProcOptions.min_height) result.valid_alt_range.max_height = float(query.PreProcOptions.max_height) # the MWLproducProduct and PreparedSignalFile tables # are not available if query is # related to a simple (not mwl) product. There is no way to test # whether the table is inside the query collection -> just try try: result.calc_with_hr = bool(query.MWLproductProduct.create_with_hr) result.calc_with_lr = bool(query.MWLproductProduct.create_with_lr) result.elpp_file = query.PreparedSignalFile.filename except AttributeError: pass result.rayl_lr = RayleighLidarRatio()(wavelength=result.emission_wavelength).run() return result @classmethod def from_id(cls, prod_id): db_func = component.queryUtility(IDBFunc) query = db_func.get_general_params_query(prod_id) result = cls.from_query(query) return result class MCParams(Params): nb_of_iterations = None @classmethod def from_db(cls, prod_id): result = cls() db_func = component.queryUtility(IDBFunc) query = db_func.get_mc_params_query(prod_id) result.nb_of_iterations = query.iteration_count if result.nb_of_iterations <= 1: raise(NotEnoughMCIterations, prod_id) return result class QualityParams(Params): """ quality parameters for product retrievals """ def __init__(self): super(QualityParams, self).__init__() self.detection_limit = None self.error_threshold = Dict({'lowrange': None, 'highrange': None}) @classmethod def from_query(cls, query): result = cls() result.error_threshold.lowrange = query.ErrorThresholdsLow.value result.error_threshold.highrange = query.ErrorThresholdsHigh.value result.detection_limit = query.SmoothOptions.detection_limit if result.detection_limit == 0.0: raise (DetectionLimitZero, result.prod_id) return result @classmethod def from_db(cls, prod_id): db_func = component.queryUtility(IDBFunc) query = db_func.get_quality_params_query(prod_id) result = cls.from_query(query) return result class SmoothParams(Params): """ smooth parameters for product retrievals """ def __init__(self): super(SmoothParams, self).__init__() self.smooth_type = None self.smooth_method = None # self.detection_limit = None # self.error_threshold = Dict({'lowrange': None, # 'highrange': None}) self.transition_zone = Dict({'bottom': None, 'top': None}) self.vert_res = Dict( { RESOLUTION_STR[LOWRES]: Dict( { 'lowrange': None, 'highrange': None, }), RESOLUTION_STR[HIGHRES]: Dict( { 'lowrange': None, 'highrange': None, }), }) self.time_res = Dict( { RESOLUTION_STR[LOWRES]: Dict( { 'lowrange': None, 'highrange': None, }), RESOLUTION_STR[HIGHRES]: Dict( { 'lowrange': None, 'highrange': None, }), }) @classmethod def from_query(cls, query): result = cls() result.smooth_type = query.smooth_type_id # if result.smooth_type == AUTO: # result.error_threshold.lowrange = query.ErrorThresholdsLow.value # result.error_threshold.highrange = query.ErrorThresholdsHigh.value # # result.detection_limit = query.SmoothOptions.detection_limit # if result.detection_limit == 0.0: # raise(DetectionLimitZero, result.prod_id) if result.smooth_type == FIXED: result.transition_zone.bottom = float(query.transition_zone_from) result.transition_zone.top = float(query.transition_zone_to) result.vert_res[RESOLUTION_STR[LOWRES]].lowrange \ = float(query.lowres_lowrange_vertical_resolution) result.vert_res[RESOLUTION_STR[LOWRES]].highrange \ = float(query.lowres_highrange_vertical_resolution) result.vert_res[RESOLUTION_STR[HIGHRES]].lowrange \ = float(query.highres_lowrange_vertical_resolution) result.vert_res[RESOLUTION_STR[HIGHRES]].highrange \ = float(query.highres_highrange_vertical_resolution) result.time_res[RESOLUTION_STR[LOWRES]].lowrange \ = query.lowres_lowrange_integration_time result.time_res[RESOLUTION_STR[LOWRES]].highrange \ = query.lowres_highrange_integration_time result.time_res[RESOLUTION_STR[HIGHRES]].lowrange \ = query.highres_lowrange_integration_time result.time_res[RESOLUTION_STR[HIGHRES]].highrange \ = query.highres_highrange_integration_time return result @classmethod def from_db(cls, prod_id): db_func = component.queryUtility(IDBFunc) query = db_func.get_smooth_params_query(prod_id) result = cls.from_query(query) return result class SmoothSavGolay(BaseOperation): """smoothes a profile window with Savitzky-Golay method """ name = 'SmoothSavGolay' def run(self, **kwargs): """ starts the calculation. in scipy.signal.savgol_filter (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html), the filtering is done as convolution (scipy.ndimage.convolve1d ) of the data and SG coefficients: result=convolve1d(data,sgc). Here, the filtering can be done as simple sum (which is supposedly faster) Keyword Args: window(integer): total length (diameter) of the smooth window. must be an odd number data(): ndarray (size=window) which contains the data to be smoothed err(): ndarray (size=window) which contains the errors of the data to be smoothed Returns: addict.Dict with keys 'data' and 'err' which contains the smoothed data and its error """ assert 'window' in kwargs assert 'data' in kwargs assert 'err' in kwargs win = kwargs['window'] err = kwargs['err'] data = kwargs['data'] sgc = sg_coeffs(win, 2) err_sm = np.sqrt(np.sum(np.power(err * sgc, 2))) data_sm = np.sum(data * sgc) return Dict({'data': data_sm, 'err': err_sm}) class SmoothSlidingAverage(BaseOperation): """calculates Raman backscatter profile like in ansmann et al 1992""" name = 'SmoothSlidingAverage' def run(self, **kwargs): raise UseCaseNotImplemented('SmoothSlidingAverage', 'smoothing', 'sliding average') class SmoothRoutine(BaseOperationFactory): """ Creates a Class for smoothing Keyword Args: """ name = 'SmoothRoutine' method_id = None def __call__(self, **kwargs): assert 'method_id' in kwargs self.method_id = kwargs['method_id'] res = super(SmoothRoutine, self).__call__(**kwargs) return res def get_classname_from_db(self): """ reads from SCC db which algorithm to use Returns: name of the class for the smoothing """ return smooth_routine_from_db(self.method_id) registry.register_class(SmoothRoutine, SmoothSavGolay.__name__, SmoothSavGolay) registry.register_class(SmoothRoutine, SmoothSlidingAverage.__name__, SmoothSlidingAverage)
function WeightedObservedPoints() return WeightedObservedPoints(()) end function add(obj::WeightedObservedPoints, arg0::WeightedObservedPoint) return jcall(obj, "add", void, (WeightedObservedPoint,), arg0) end function add(obj::WeightedObservedPoints, arg0::jdouble, arg1::jdouble) return jcall(obj, "add", void, (jdouble, jdouble), arg0, arg1) end function add(obj::WeightedObservedPoints, arg0::jdouble, arg1::jdouble, arg2::jdouble) return jcall(obj, "add", void, (jdouble, jdouble, jdouble), arg0, arg1, arg2) end function clear(obj::WeightedObservedPoints) return jcall(obj, "clear", void, ()) end function to_list(obj::WeightedObservedPoints) return jcall(obj, "toList", List, ()) end
{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TypeOperators #-} {-# OPTIONS_GHC -fno-warn-missing-signatures #-} module Test.Grenade.Layers.Internal.Pooling where import Grenade.Layers.Internal.Pooling import Control.Monad import Numeric.LinearAlgebra hiding (konst, uniformSample, (===)) import Hedgehog import qualified Hedgehog.Gen as Gen import qualified Hedgehog.Range as Range import qualified Test.Grenade.Layers.Internal.Reference as Reference import Test.Hedgehog.Compat import Grenade.Types prop_poolForwards_poolBackwards_behaves_as_reference = let ok extent kernel = [stride | stride <- [1..extent], (extent - kernel) `mod` stride == 0] output extent kernel stride = (extent - kernel) `div` stride + 1 in property $ do height <- forAll $ choose 2 100 width <- forAll $ choose 2 100 kernel_h <- forAll $ choose 1 (height - 1) kernel_w <- forAll $ choose 1 (width - 1) stride_h <- forAll $ Gen.element (ok height kernel_h) stride_w <- forAll $ Gen.element (ok width kernel_w) input <- forAll $ (height >< width) <$> Gen.list (Range.singleton $ height * width) (Gen.realFloat $ Range.linearFracFrom 0 (-100) 100) let outFast = poolForward 1 height width kernel_h kernel_w stride_h stride_w input let retFast = poolBackward 1 height width kernel_h kernel_w stride_h stride_w input outFast let outReference = Reference.poolForward kernel_h kernel_w stride_h stride_w (output height kernel_h stride_h) (output width kernel_w stride_w) input let retReference = Reference.poolBackward kernel_h kernel_w stride_h stride_w input outReference outFast === outReference retFast === retReference prop_same_pad_pool_behaves_as_reference_when_zero_pad = let output extent kernel_dim stride = (extent - kernel_dim) `div` stride + 1 kernel i s = let x = ceiling ((fromIntegral i :: RealNum) / (fromIntegral s :: RealNum)) in i - (x - 1) * s in property $ do height <- forAll $ choose 2 100 width <- forAll $ choose 2 100 stride_h <- forAll $ choose 1 (height - 1) stride_w <- forAll $ choose 1 (width - 1) let kernel_h = kernel height stride_h kernel_w = kernel width stride_w input <- forAll $ (height >< width) <$> Gen.list (Range.singleton $ height * width) (Gen.realFloat $ Range.linearFracFrom 0 (-100) 100) guard $ output height kernel_h stride_h == (ceiling $ (fromIntegral height :: RealNum) / (fromIntegral stride_h :: RealNum)) guard $ output width kernel_w stride_w == (ceiling $ (fromIntegral width :: RealNum) / (fromIntegral stride_w :: RealNum)) let outFast = validPadPoolForwards 1 height width kernel_h kernel_w stride_h stride_w 0 0 0 0 input let outReference = poolForward 1 height width kernel_h kernel_w stride_h stride_w input assert $ norm_Inf (outFast - outReference) < 0.000001 prop_same_pad_pool_behaves_correctly_at_edges = withTests 1 $ property $ do let input = (2 >< 2) [-0.01, -0.04, -0.02, -0.03] expected_output = (2 >< 2) [-0.01, -0.03, -0.02, -0.03] out = validPadPoolForwards 1 2 2 2 2 1 1 0 0 1 1 input assert $ norm_Inf (out - expected_output) < 0.000001 prop_same_pad_pool_behaves_correctly_at_edges_three_channels = withTests 1 $ property $ do let input = (6 >< 2) [ 0.7, -0.9, -1.4, -0.1, -0.3, 0.5, 0.1, 0.2, -1.1, -0.7, 0.5, -0.7] expected_output = (6 >< 2) [ 0.7, -0.1, -0.1, -0.1, 0.5, 0.5, 0.2, 0.2, 0.5, -0.7, 0.5, -0.7] out = validPadPoolForwards 3 2 2 2 2 1 1 0 0 1 1 input assert $ norm_Inf (out - expected_output) < 0.000001 tests :: IO Bool tests = checkParallel $$(discover)
From Undecidability.L Require Import Tactics.LTactics Datatypes.LBool Tactics.GenEncode. From Undecidability.L Require Import Functions.EqBool. Import L_Notations. (* ** Encoding of option type *) Section Fix_X. Variable X:Type. Context {intX : encodable X}. MetaCoq Run (tmGenEncode "option_enc" (option X)). Hint Resolve option_enc_correct : Lrewrite. Global Instance encInj_option_enc {H : encInj intX} : encInj (encodable_option_enc). Proof. register_inj. Qed. (* now we must register the non-constant constructors*) Global Instance term_Some : computable (@Some X). Proof. extract constructor. Defined. (*because next lemma*) End Fix_X. #[export] Hint Resolve option_enc_correct : Lrewrite. Section option_eqb. Variable X : Type. Variable eqb : X -> X -> bool. Variable spec : forall x y, reflect (x = y) (eqb x y). Definition option_eqb (A B : option X) := match A,B with | None,None => true | Some x, Some y => eqb x y | _,_ => false end. Lemma option_eqb_spec A B : reflect (A = B) (option_eqb A B). Proof using spec. destruct A, B; try now econstructor. cbn. destruct (spec x x0); econstructor; congruence. Qed. End option_eqb. Section int. Variable X:Type. Context {HX : encodable X}. Global Instance term_option_eqb : computable (@option_eqb X). Proof. extract. Qed. Global Instance eqbOption f `{eqbClass (X:=X) f}: eqbClass (option_eqb f). Proof. intros ? ?. eapply option_eqb_spec. all:eauto using eqb_spec. Qed. Global Instance eqbComp_Option `{H:eqbComp X (R:=HX)}: eqbComp (option X). Proof. constructor. unfold option_eqb. change (eqb0) with (eqb (X:=X)). extract. Qed. End int. Definition isSome {T} (u : option T) := match u with Some _ => true | _ => false end. #[global] Instance term_isSome {T} `{encodable T} : computable (@isSome T). Proof. extract. Qed. #[global] Instance term_option_map {A B} `{encodable A} `{encodable B} : computable (@option_map A B). Proof. extract. Qed.
for matrix view using Gadfly M = rand(10,10) spy(M)
\section{Full R code} This code will run all the indicated analysis and produce all plots. \lstinputlisting[style=Rsty]{../../code/hmc/d_sir_stan.r} \section{Full Stan code} Stan model code to be used with the preceding R code. \lstinputlisting[style=Stansty]{../../code/hmc/d_sirode_euler.stan}
import data.rat.basic import tactic /- In this file we will build "the algebraic hierarchy", the classes of groups, rings, fields, modules etc. To get the exercises, run: ``` leanproject get fpvandoorn/Harvard-tutoring cp -r Harvard-tutoring/src/exercises/ Harvard-tutoring/src/my_exercises code Harvard-tutoring ``` For this week, the exercises are in the files `my_exercises/algebraic_hierarchy.lean` and `my_exercises/order.lean`. Also this week, think about a project you would like to formalize in Lean for the remaining weeks, and discuss it during your weekly meeting with your tutor. -/ /- ## Notation typeclasses We have notation typeclasses which are just there to provide notation. If you write `[has_mul G]` then `G` will have a multiplication called `*` (satisfying no axioms). Similarly `[has_one G]` gives you `(1 : G)` and `[has_inv G]` gives you a map `(λ g, g⁻¹ : G → G)` -/ #print has_mul example {G : Type} [has_mul G] (g h : G) : g * h = h * g := sorry -- this is false /- ## Groups -/ -- `extends` is used to extend other classes class my_group (G : Type) extends has_mul G, has_one G, has_inv G := (mul_assoc : ∀ (a b c : G), a * b * c = a * (b * c)) (one_mul : ∀ (a : G), 1 * a = a) (mul_left_inv : ∀ (a : G), a⁻¹ * a = 1) /- Advantages of this approach: axioms look lovely. Disadvantage: what if I want the group law to be `+`? I have embedded `has_mul` in the definition. -/ class my_add_group (G : Type) extends has_add G, has_zero G, has_neg G := (add_assoc : ∀ (a b c : G), a + b + c = a + (b + c)) (zero_add : ∀ (a : G), 0 + a = a) (add_left_neg : ∀ (a : G), -a + a = 0) attribute [to_additive] my_group /- Lean's solution: develop a `to_additive` metaprogram which translates all theorems about `group`s (with group law `*`) to theorems about `add_group`s (with group law `+`). -/ #print my_group.one_mul namespace my_group #print one_mul /- We can now prove more theorems about `my_group`, such as the following: `mul_left_cancel : ∀ (a b c : G), a * b = a * c → b = c` `mul_eq_of_eq_inv_mul {a x y : G} : x = a⁻¹ * y → a * x = y` `mul_one (a : G) : a * 1 = a` `mul_right_inv (a : G) : a * a⁻¹ = 1` -/ @[to_additive] lemma mul_left_cancel {G : Type} [my_group G] (a b c : G) (Habac : a * b = a * c) : b = c := calc b = 1 * b : by rw one_mul ... = (a⁻¹ * a) * b : by rw mul_left_inv ... = a⁻¹ * (a * b) : by rw mul_assoc ... = a⁻¹ * (a * c) : by sorry ... = (a⁻¹ * a) * c : by sorry ... = 1 * c : by sorry ... = c : by sorry #check @my_group.mul_left_cancel #check @my_add_group.add_left_cancel variables (x y : ℚ) example : x * y + y = y * (x + 1) := by { rw mul_comm, ring } #check @mul_eq_of_eq_inv_mul -- Abstract example of the power of classes: we can define products of groups with instances attribute [simp] mul_assoc one_mul mul_left_inv instance (G : Type) [my_group G] (H : Type) [my_group H] : my_group (G × H) := { mul := λ gh gh', (gh.1 * gh'.1, gh.2 * gh'.2), one := (1, 1), inv := λ gh, (gh.1⁻¹, gh.2⁻¹), mul_assoc := by { intros a b c, cases c, cases b, cases a, dsimp at *, simp at * }, one_mul := by tidy, mul_left_inv := by tidy } -- the type class inference system now knows that products of groups are groups example (G H K : Type) [my_group G] [my_group H] [my_group K] : my_group (G × H × K) := by { apply_instance } end my_group -- let's make a group of order two. -- First the elements {+1, -1} inductive mu2 | p1 : mu2 | m1 : mu2 namespace mu2 /- two preliminary facts -/ -- 1) give an algorithm to decide equality attribute [derive decidable_eq] mu2 -- 2) prove it is finite instance : fintype mu2 := ⟨⟨[mu2.p1, mu2.m1], dec_trivial⟩, λ x, by { cases x; exact dec_trivial, }⟩ -- Define multiplication by doing all cases def mul : mu2 → mu2 → mu2 | p1 p1 := p1 | p1 m1 := m1 | m1 p1 := m1 | m1 m1 := p1 -- now let's make it a group instance : my_group mu2 := begin refine { mul := mu2.mul, one := p1, inv := id, .. }, all_goals {exact dec_trivial}, end end mu2 -- Now let's build rings and modules and stuff (via monoids and add_comm_groups) -- a monoid is a group without inverses class my_monoid (M : Type) extends has_mul M, has_one M := (mul_assoc : ∀ (a b c : M), a * b * c = a * (b * c)) (one_mul : ∀ (a : M), 1 * a = a) (mul_one : ∀ (a : M), a * 1 = a) #print monoid -- rings are additive abelian groups and multiplicative monoids, -- with distributivity class my_ring (R : Type) extends my_monoid R, add_comm_group R := (mul_add : ∀ (a b c : R), a * (b + c) = a * b + a * c) (add_mul : ∀ (a b c : R), (a + b) * c = a * c + b * c) -- for commutative rings, add commutativity of multiplication class my_comm_ring (R : Type) extends my_ring R := (mul_comm : ∀ a b : R, a * b = b * a) /-- Typeclass for types with a scalar multiplication operation, denoted `•` (`\bu`) -/ class my_has_scalar (R : Type) (M : Type) := (smul : R → M → M) infixr ` • `:73 := my_has_scalar.smul -- modules over a ring class my_module (R : Type) [my_ring R] (M : Type) [add_comm_group M] extends my_has_scalar R M := (smul_add : ∀(r : R) (x y : M), r • (x + y) = r • x + r • y) (add_smul : ∀(r s : R) (x : M), (r + s) • x = r • x + s • x) (mul_smul : ∀ (r s : R) (x : M), (r * s) • x = r • s • x) (one_smul : ∀ x : M, (1 : R) • x = x) -- for fields we let ⁻¹ be defined on the entire field, and demand 0⁻¹ = 0 -- and that a⁻¹ * a = 1 for non-zero a. This is merely for convenience; -- one can easily check that it's mathematically equivalent to the usual -- definition of a field. class my_field (K : Type) extends my_comm_ring K, has_inv K := (zero_ne_one : (0 : K) ≠ 1) (mul_inv_cancel : ∀ {a : K}, a ≠ 0 → a * a⁻¹ = 1) (inv_zero : (0 : K)⁻¹ = 0) -- the type of vector spaces def my_vector_space (K : Type) [my_field K] (V : Type) [add_comm_group V] := my_module K V /- Let's check that we can make the rational numbers into a field. (easy because all the work is done in the import) -/ instance : my_field ℚ := { mul := (*), one := 1, mul_assoc := rat.mul_assoc, one_mul := rat.one_mul, mul_one := rat.mul_one, add := (+), zero := 0, neg := has_neg.neg, add_assoc := rat.add_assoc, zero_add := rat.zero_add, add_zero := rat.add_zero, add_left_neg := rat.add_left_neg, add_comm := rat.add_comm, mul_add := rat.mul_add, add_mul := rat.add_mul, mul_comm := rat.mul_comm, inv := has_inv.inv, zero_ne_one := rat.zero_ne_one, mul_inv_cancel := rat.mul_inv_cancel, inv_zero := inv_zero } /- Some tactics that will help: -/ example {G : Type} [group G] (a b c d : G) : ((a * b)⁻¹ * a * 1⁻¹⁻¹⁻¹ * b * b⁻¹ * 1 * c)⁻¹ = (c⁻¹⁻¹ * (d * d⁻¹ * 1⁻¹⁻¹) * c⁻¹ * c⁻¹⁻¹⁻¹ * b)⁻¹⁻¹ := by simp example {G : Type} [add_comm_group G] (a b : G) : (a + b) - ((b + a) + a) = -a := by abel -- rewriting in abelian groups example {R : Type} [comm_ring R] (a b : R) : (a + b) * (a - b) = a ^ 2 - b ^ 2 := by ring -- rewriting in commutative rings example {F : Type} [field F] (a b : F) (h : a ≠ 0) : (a + b) / a = 1 + b / a := by field_simp -- rewriting fractions in fields /- # Orders We also have an "order hierarchy". It starts with partially ordered sets, and then goes on to lattices. We will motivate it using subgroups. -/ open set -- The type of subgroups of a group G is called `subgroup G` in Lean. -- It already has a lattice structure in Lean. -- So let's just redo the entire theory and call it `my_subgroup G`. /-- The type of subgroups of a group `G`. -/ structure my_subgroup (G : Type) [group G] := -- A subgroup of G is a sub*set* of G, called `carrier` (carrier : set G) -- and then axioms saying it's closed under the group structure (i.e. *, 1, ⁻¹) (mul_mem {a b : G} : a ∈ carrier → b ∈ carrier → a * b ∈ carrier) (one_mem : (1 : G) ∈ carrier) (inv_mem {a : G} : a ∈ carrier → a⁻¹ ∈ carrier) namespace my_subgroup /- Note in particular that we have a function `my_subgroup.carrier : my_subgroup G → set G`, sending a subgroup of `G` to the underlying subset (`set G` is the type of subsets of G). -/ -- Let G be a group, let H,J,K be subgroups of G, and let a,b,c be elements of G. variables {G : Type} [group G] (H J K : my_subgroup G) (a b c : G) /- ## Extensionality -/ lemma carrier_injective (H J : my_subgroup G) (h : H.carrier = J.carrier) : H = J := begin cases H, cases J, simp * at *, end -- Now let's prove that two subgroups are equal iff they have the same elements. -- This is the most useful "extensionality lemma" so we tag it `@[ext]`. @[ext] theorem ext {H J : my_subgroup G} (h : ∀ (x : G), x ∈ H.carrier ↔ x ∈ J.carrier) : H = J := begin apply carrier_injective, ext, apply h, end -- We also want the `iff` version of this. theorem ext_iff {H J : my_subgroup G} : H = J ↔ ∀ (x : G), x ∈ H.carrier ↔ x ∈ J.carrier := begin sorry end /- ## Partial orders -/ -- We define `H ≤ J` to mean `H.carrier ⊆ J.carrier` -- "tidy" is a one-size-fits-all tactic which solves certain kinds of "follow your nose" goals. instance : partial_order (my_subgroup G) := { le := λ H J, H.carrier ⊆ J.carrier, le_refl := by tidy, le_trans := by tidy, le_antisymm := by tidy } -- We can give a second construction using `partial_order.lift`: example : partial_order (my_subgroup G) := partial_order.lift my_subgroup.carrier carrier_injective example {H J : my_subgroup G} (h : H < J) : H ≤ J := h.le /- ## From partial orders to lattices. We can now show that `my_subgroup G` is a semilattice with finite meets (binary meets and a top element). -/ def top : my_subgroup G := { carrier := set.univ, mul_mem := begin sorry end, one_mem := begin sorry end, inv_mem := begin sorry end } -- Add the `⊤` notation (typed with `\top`) for this subgroup: instance : has_top (my_subgroup G) := ⟨top⟩ #check (⊤ : my_subgroup G) /-- "Theorem" : intersection of two my_subgroups is a my_subgroup -/ definition inf (H K : my_subgroup G) : my_subgroup G := { carrier := H.carrier ∩ K.carrier, mul_mem := begin sorry end, one_mem := begin sorry end, inv_mem := begin sorry end } -- Add the `⊓` notation (type with `\inf`) for the intersection (inf) of two subgroups: instance : has_inf (my_subgroup G) := ⟨inf⟩ -- We now check the four axioms for a semilattice_inf_top. lemma le_top (H : my_subgroup G) : H ≤ ⊤ := begin sorry end lemma inf_le_left (H K : my_subgroup G) : H ⊓ K ≤ H := begin sorry end lemma inf_le_right (H K : my_subgroup G) : H ⊓ K ≤ K := begin sorry end lemma le_inf (H J K : my_subgroup G) (h1 : H ≤ J) (h2 : H ≤ K) : H ≤ J ⊓ K := begin sorry end -- Now we're ready to make the instance. instance : semilattice_inf_top (my_subgroup G) := { top := top, le_top := le_top, inf := inf, inf_le_left := inf_le_left, inf_le_right := inf_le_right, le_inf := le_inf, .. my_subgroup.partial_order } /- ## Complete lattices Let's now show that subgroups form a complete lattice. This has arbitrary `Inf` and `Sup`s. First we show we can form arbitrary intersections. -/ def Inf (S : set (my_subgroup G)) : my_subgroup G := { carrier := ⋂ K ∈ S, (K : my_subgroup G).carrier, mul_mem := begin intros a b ha hb, simp at *, intros K hK, apply my_subgroup.mul_mem, tidy end, one_mem := begin sorry end, inv_mem := begin sorry end } -- We now equip `my_subgroup G` with an Inf. The notation is `⨅`, or `\Inf`. instance : has_Inf (my_subgroup G) := ⟨Inf⟩ instance : complete_lattice (my_subgroup G) := complete_lattice_of_Inf _ begin sorry end /- ## Galois connections A Galois conection is a pair of adjoint functors between two partially ordered sets, considered as categories whose hom sets Hom(H,J) have size 1 if H ≤ J and size 0 otherwise. In other words, a Galois connection between two partial orders α and β is a pair of monotone functions `l : α → β` and `u : β → α` such that `∀ (a : α) (b : β), l a ≤ b ↔ a ≤ u b`. There is an example coming from Galois theory (between subfields and subgroups), and an example coming from classical algebraic geometry (between affine varieties and ideals); note that in both cases you have to use the opposite partial order on one side to make everything covariant. The examples we want to keep in mind here are: 1) α = subsets of G, β = subgroups of G, l = "subgroup generated by", u = `carrier` 2) X : Type, α := set (set X), β := topologies on X, l = topology generated by a collection of open sets, u = the open sets regarded as subsets. As you can imagine, there are a bunch of abstract theorems with simple proofs proved for Galois connections. You can see them by `#check galois_connection`, jumping to the definition, and reading the next 150 lines of the mathlib file after the definition. Examples of theorems you might recognise from contexts where you have seen this before: lemma le_u_l (a : α) : a ≤ u (l a) := ... lemma l_u_le (b : β) : l (u b) ≤ b := ... lemma u_l_u_eq_u : u ∘ l ∘ u = u := ... lemma l_u_l_eq_l : l ∘ u ∘ l = l := ... # Galois insertions A particularly cool kind of Galois connection is a Galois insertion, which is a Galois connection such that `l ∘ u = id`. This is true for both the examples we're keeping in mind (the subgroup of `G` generated by a subgroup is the same subgroup; the topology on `X` generated by a topology is the same topology). Our new goal: let's make subgroups of a group into a complete lattice, using the fact that `carrier` is part of a Galois insertion. -/ -- The adjoint functor to the `carrier` functor is the `span` functor -- from subsets to my_subgroups. Here we will CHEAT by using `Inf` to -- define `span`. We could have built `span` directly with -- an inductive definition. def span (S : set G) : my_subgroup G := Inf {H : my_subgroup G | S ⊆ H.carrier} -- Here are some theorems about it. lemma monotone_carrier : monotone (my_subgroup.carrier : my_subgroup G → set G) := begin sorry end lemma monotone_span : monotone (span : set G → my_subgroup G) := begin sorry end lemma subset_span (S : set G) : S ≤ (span S).carrier := begin sorry end lemma span_my_subgroup (H : my_subgroup G) : span H.carrier = H := begin sorry end -- We have proved all the things we need to show that `span` and `carrier` -- form a Galois insertion, using `galois_insertion.monotone_intro`. def gi_my_subgroup : galois_insertion (span : set G → my_subgroup G) (my_subgroup.carrier : my_subgroup G → set G) := galois_insertion.monotone_intro monotone_carrier monotone_span subset_span span_my_subgroup -- Note that `set G` is already a complete lattice: example : complete_lattice (set G) := by apply_instance -- and now `my_subgroup G` can also be made into a complete lattice, by -- a theorem about Galois insertions. Again, I don't use `instance` -- because we already made the instance above. example : complete_lattice (my_subgroup G) := galois_insertion.lift_complete_lattice gi_my_subgroup end my_subgroup
```python %pylab inline import sk_dsp_comm.sigsys as ss import sk_dsp_comm.fir_design_helper as fir_d import sk_dsp_comm.iir_design_helper as iir_d import sk_dsp_comm.multirate_helper as mrh import scipy.signal as signal from IPython.display import Audio, display from IPython.display import Image, SVG ``` ```python %config InlineBackend.figure_formats=['svg'] # SVG inline viewing ``` # Filter Design Using the Helper Modules The Scipy package *signal* assists with the design of many digital filter types. As an alternative, here we explore the use of the filter design modules found in `scikit-dsp-comm` (https://github.com/mwickert/scikit-dsp-comm). In this note we briefly explore the use of `sk_dsp_comm.fir_design_helper` and `sk_dsp_comm.iir_design_helper`. In the examples that follow we assume the import of these modules is made as follows: ```python import sk_dsp_comm.fir_design_helper as fir_d import sk_dsp_comm.iir_design_helper as iir_d ``` The functions in these modules provide an easier and more consistent interface for both finte impulse response (FIR) (linear phase) and infinite impulse response (IIR) classical designs. Functions inside these modules *wrap* `scipy.signal` functions and also incorporate new functionality. # Design From Amplitude Response Requirements With both `fir_design_helper` and `iir_design_helper` a design starts with amplitude response requirements, that is the filter passband critical frequencies, stopband critical frequencies, passband ripple, and stopband attenuation. The number of taps/coefficients (FIR case) or the filter order (IIR case) needed to meet these requirements is then determined and the filter coefficients are returned as an ndarray `b` for FIR, and for IIR both `b` and `a` arrays, and a second-order sections `sos` 2D array, with the rows containing the corresponding cascade of second-order sections toplogy for IIR filters. For the FIR case we have in the $z$-domain $$ H_\text{FIR}(z) = \sum_{k=0}^N b_k z^{-k} $$ with ndarray `b` = $[b_0, b_1, \ldots, b_N]$. For the IIR case we have in the $z$-domain $$\begin{align} H_\text{IIR}(z) &= \frac{\sum_{k=0}^M b_k z^{-k}}{\sum_{k=1}^N a_k z^{-k}} \\ &= \prod_{k=0}^{N_s-1} \frac{b_{k0} + b_{k1} z^{-1} + b_{k2} z^{-2}}{1 + a_{k1} z^{-1} + a_{k2} z^{-2}} = \prod_{k=0}^{N_s-1} H_k(z) \end{align}$$ where $N_s = \lfloor(N+1)/2\rfloor$. For the `b/a` form the coefficients are arranged as ```python b = [b0, b1, ..., bM-1], the numerator filter coefficients a = [a0, a1, ..., aN-1], the denominator filter ceofficients ``` For the `sos` form each row of the 2D `sos` array corresponds to the coefficients of $H_k(z)$, as follows: ```python SOS_mat = [[b00, b01, b02, 1, a01, a02], #biquad 0 [b10, b11, b12, 1, a11, a12], #biquad 1 . . [bNs-10, bNs-11, bNs-12, 1, aNs-11, aNs-12]] #biquad Ns-1 ``` # Linear Phase FIR Filter Design The primary focus of this module is adding the ability to design linear phase FIR filters from user friendly amplitude response requirements. Most digital filter design is motivated by the desire to approach an ideal filter. Recall an ideal filter will pass signals of a certain of frequencies and block others. For both analog and digital filters the designer can choose from a variety of approximation techniques. For digital filters the approximation techniques fall into the categories of IIR or FIR. In the design of FIR filters two popular techniques are truncating the ideal filter impulse response and applying a window, and optimum equiripple approximations [Oppenheim2010](https://www.amazon.com/Discrete-Time-Signal-Processing-3rd-Prentice-Hall/dp/0131988425/ref=sr_1_1?ie=UTF8&qid=1519940790&sr=8-1&keywords=oppenheim+discrete+time+signal+processing&dpID=51v48p99JjL&preST=_SX218_BO1,204,203,200_QL40_&dpSrc=srch). Frequency sampling based approaches are also popular, but will not be considered here, even though `scipy.signal` supports all three. Filter design generally begins with a specification of the desired frequency response. The filter frequency response may be stated in several ways, but amplitude response is the most common, e.g., state how $H_c(j\Omega)$ or $H(e^{j\omega}) = H(e^{j2\pi f/f_s})$ should behave. A completed design consists of the number of coefficients (taps) required and the coefficients themselves (double precision float or `float64` in Numpy, and `float64_t` in C). Figure 1, below, shows amplitude response requirements in terms of filter gain and critical frequencies for lowpass, highpass, bandpass, and bandstop filters. The critical frequencies are given here in terms of analog requirements in Hz. The sampling frequency is assumed to be in Hz. The passband ripple and stopband attenuation values are in dB. Note in dB terms attenuation is the negative of gain, e.g., -60 of stopband gain is equivalent to 60 dB of stopband attenuation. ```python Image('300ppi/[email protected]',width='90%') ``` There are 10 filter design functions and one plotting function available in `fir_design_helper.py`. Four functions for designing Kaiser window based FIR filters and four functions for designing equiripple based FIR filters. Of the eight just described, they all take in amplitude response requirements and return a coefficients array. Two of the 10 filter functions are simply wrappers around the `scipy.signal` function `signal.firwin()` for designing filters of a specific order when one (lowpass) or two (bandpass) critical frequencies are given. The wrapper functions fix the window type to the `firwin` default of hann (hanning). The remamining eight are described below in Table 1. The plotting function provides an easy means to compare the resulting frequency response of one or more designs on a single plot. Display modes allow gain in dB, phase in radians, group delay in samples, and group delay in seconds for a given sampling rate. This function, `freq_resp_list()`, works for both FIR and IIR designs. Table 1 provides the interface details to the eight design functions where d_stop and d_pass are positive dB values and the critical frequencies have the same unit as the sampling frequency $f_s$. These functions do not create perfect results so some tuning of of the design parameters may be needed, in addition to bumping the filter order up or down via `N_bump`. ```python Image('300ppi/[email protected]',width='80%') ``` ## Design Examples ### Example 1: Lowpass with $f_s = 1$ Hz For this 31 tap filter we choose the cutoff frequency to be $F_c = F_s/8$, or in normalized form $f_c = 1/8$. ```python b_k = fir_d.firwin_kaiser_lpf(1/8,1/6,50,1.0) b_r = fir_d.fir_remez_lpf(1/8,1/6,0.2,50,1.0) ``` ```python fir_d.freqz_resp_list([b_k,b_r],[[1],[1]],'dB',fs=1) ylim([-80,5]) title(r'Kaiser vs Equal Ripple Lowpass') ylabel(r'Filter Gain (dB)') xlabel(r'Frequency in kHz') legend((r'Kaiser: %d taps' % len(b_k),r'Remez: %d taps' % len(b_r)),loc='best') grid(); ``` ```python b_k_hp = fir_d.firwin_kaiser_hpf(1/8,1/6,50,1.0) b_r_hp = fir_d.fir_remez_hpf(1/8,1/6,0.2,50,1.0) ``` ```python fir_d.freqz_resp_list([b_k_hp,b_r_hp],[[1],[1]],'dB',fs=1) ylim([-80,5]) title(r'Kaiser vs Equal Ripple Lowpass') ylabel(r'Filter Gain (dB)') xlabel(r'Frequency in kHz') legend((r'Kaiser: %d taps' % len(b_k),r'Remez: %d taps' % len(b_r)),loc='best') grid(); ``` ```python b_k_bp = fir_d.firwin_kaiser_bpf(7000,8000,14000,15000,50,48000) b_r_bp = fir_d.fir_remez_bpf(7000,8000,14000,15000,0.2,50,48000) ``` ```python fir_d.freqz_resp_list([b_k_bp,b_r_bp],[[1],[1]],'dB',fs=48) ylim([-80,5]) title(r'Kaiser vs Equal Ripple Bandpass') ylabel(r'Filter Gain (dB)') xlabel(r'Frequency in kHz') legend((r'Kaiser: %d taps' % len(b_k_bp), r'Remez: %d taps' % len(b_r_bp)), loc='lower right') grid(); ``` ## A Design Example Useful for Interpolation or Decimation Here we consider a lowpass design that needs to pass frequencies from [0, 4000] Hz with a sampling rate of 96000 Hz. This scenario arises when building an interpolator using the classes of the `scikit-dps-comm` module `multirate_helper.py` to increase the sampling rate from 8000 Hz to 96000 Hz, or an interpolation factor of $L = 12$. Note at the top of this notebook we have also have the import ```python import sk_dsp_comm.multirate_helper as mrh ``` so that some of the functionality can be accessed. For more details on the use of `multirate_helper` [see](https://mwickert.github.io/scikit-dsp-comm/example_notebooks/multirate_helper/Multirate_Processing.html). Start with an equalripple design having transition band centered on 4000 Hz with passband ripple of 0.5 dB and stopband attenuation of 60 dB. ```python b_up = fir_d.fir_remez_lpf(3300,4300,0.5,60,96000) ``` ```python mr_up = mrh.multirate_FIR(b_up) ``` * Consider the pole-zero configuration for this high-order filter ```python # Take a look at the pole-zero configuration of this very # high-order (many taps) linear phase FIR mr_up.zplane() ``` * Check out the passband and stopband gains ```python # Verify the passband and stopband gains are as expected mr_up.freq_resp('db',96000) ``` * See that the group delay is the expected value of $(N_\text{taps} - 1)/2 = 98$ samples ```python (len(b_up-1))/2 ``` ```python # Verify that the FIR design has constant group delay (N_taps - 1)/2 samples mr_up.freq_resp('groupdelay_s',96000,[0,100]) ``` The object `mr_up` can now be used for interpolation or decimation with a rate change factor of 12. # Traditional IIR Filter Design using the Bilinear Transform The scipy.signal package fully supports the design of IIR digital filters from analog prototypes. IIR filters like FIR filters, are typically designed with amplitude response requirements in mind. A collection of design functions are available directly from `scipy.signal` for this purpose, in particular the function `scipy.signal.iirdesign()`. To make the design of lowpass, highpass, bandpass, and bandstop filters consistent with the module `fir_design_helper.py` the module `iir_design_helper.py` was written. Figure 2, below, details how the amplitude response parameters are defined graphically. ```python Image('300ppi/[email protected]',width='90%') ``` Within `iir_design_helper.py` there are four filter design functions and a collection of supporting functions available. The four filter design functions are used for designing lowpass, highpass, bandpass, and bandstop filters, utilizing Butterworth, Chebshev type 1, Chebyshev type 2, and elliptical filter prototypes. See [Oppenheim2010](https://www.amazon.com/Discrete-Time-Signal-Processing-3rd-Prentice-Hall/dp/0131988425/ref=sr_1_1?ie=UTF8&qid=1519940790&sr=8-1&keywords=oppenheim+discrete+time+signal+processing&dpID=51v48p99JjL&preST=_SX218_BO1,204,203,200_QL40_&dpSrc=srch) and [ECE 5650 notes Chapter 9](http://www.eas.uccs.edu/~mwickert/ece5650/notes/N5650_9.pdf) for detailed design information. The function interfaces are described in Table 2. ```python Image('300ppi/[email protected]',width='80%') ``` The filter functions return the filter coefficients in two formats: 1. Traditional transfer function form as numerator coefficients `b` and denominator `a` coefficients arrays, and 2. Cascade of biquadratic sections form using the previously introduced sos 2D array or matrix. Both are provided to allow further analysis with either a direct form topology or the sos form. The underlying `signal.iirdesign()` function also provides a third option: a list of poles and zeros. The `sos` form desireable for high precision filters, as it is more robust to coefficient quantization, in spite using double precision coefficients in the `b` and `a` arrays. Of the remaining support functions four are also described in Table 2, above. The most significant functions are `freqz_resp_cas_list`, available for graphically comparing the frequency response over several designs, and `sos_zplane` a function for plotting the pole-zero pattern. Both operate using the `sos` matrix. A transfer function form (`b/a`) for frequency response plotting, `freqz_resp_list`, is also present in the module. This function was first introduced in the FIR design section. The frequency response function plotting offers modes for gain in dB, phase in radians, group delay in samples, and group delay in seconds, all for a given sampling rate in Hz. The pole-zero plotting function locates pole and zeros more accurately than `sk_dsp_commsigsys.zplane`, as the numpy function `roots()` is only solving quadratic polynomials. Also, repeated roots can be displayed as theoretically expected, and also so noted in the graphical display by superscripts next to the pole and zero markers. ## IIR Design Based on the Bilinear Transformation There are multiple ways of designing IIR filters based on amplitude response requirements. When the desire is to have the filter approximation follow an analog prototype such as Butterworth, Chebychev, etc., is using the bilinear transformation. The function `signal.iirdesign()` described above does exactly this. In the example below we consider lowpass amplitude response requirements and see how the filter order changes when we choose different analog prototypes. ### Example: Lowpass Design Comparison The lowpass amplitude response requirements given $f_s = 48$ kHz are: 1. $f_\text{pass} = 5$ kHz 2. $f_\text{stop} = 8$ kHz 3. Passband ripple of 0.5 dB 4. Stopband attenuation of 60 dB Design four filters to meet the same requirements: `butter`, `cheby1`, ,`cheby2`, and `ellip`: ```python fs = 48000 f_pass = 5000 f_stop = 8000 b_but,a_but,sos_but = iir_d.IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter') b_cheb1,a_cheb1,sos_cheb1 = iir_d.IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1') b_cheb2,a_cheb2,sos_cheb2 = iir_d.IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2') b_elli,a_elli,sos_elli = iir_d.IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip') ``` #### Frequency Response Comparison Here we compare the magnitude response in dB using the `sos` form of each filter as the input. The elliptic is the most efficient, and actually over achieves by reaching the stopband requirement at less than 8 kHz. ```python iir_d.freqz_resp_cas_list([sos_but,sos_cheb1,sos_cheb2,sos_elli],'dB',fs=48) ylim([-80,5]) title(r'IIR Lowpass Compare') ylabel(r'Filter Gain (dB)') xlabel(r'Frequency in kHz') legend((r'Butter order: %d' % (len(a_but)-1), r'Cheby1 order: %d' % (len(a_cheb1)-1), r'Cheby2 order: %d' % (len(a_cheb2)-1), r'Elliptic order: %d' % (len(a_elli)-1)),loc='best') grid(); ``` Next plot the pole-zero configuration of just the butterworth design. Here we use the a special version of `ss.zplane` that works with the `sos` 2D array. ```python iir_d.sos_zplane(sos_but) ``` Note the two plots above can also be obtained using the transfer function form via `iir_d.freqz_resp_list([b],[a],'dB',fs=48)` and `ss.zplane(b,a)`, respectively. The `sos` form will yield more accurate results, as it is less sensitive to coefficient quantization. This is particularly true for the pole-zero plot, as rooting a 15th degree polynomial is far more subject to errors than rooting a simple quadratic. For the 15th-order Butterworth the bilinear transformation maps the expected 15 s-domain zeros at infinity to $z=-1$. If you use `sk_dsp_comm.sigsys.zplane()` you will find that the 15 zeros at are in a tight circle around $z=-1$, indicating polynomial rooting errors. Likewise the frequency response will be more accurate. Signal filtering of ndarray `x` is done using the filter designs is done using functions from `scipy.signal`: 1. For transfer function form `y = signal.lfilter(b,a,x)` 2. For sos form `y = signal.sosfilt(sos,x)` ## A Half-Band Filter Design to Pass up to $W/2$ when $f_s = 8$ kHz Here we consider a lowpass design that needs to pass frequencies up to $f_s/4$. Specifically when $f_s = 8000$ Hz, the filter passband becomes [0, 2000] Hz. Once the coefficients are found a `mrh.multirate` object is created to allow further study of the filter, and ultimately implement filtering of a white noise signal. Start with an elliptical design having transition band centered on 2000 Hz with passband ripple of 0.5 dB and stopband attenuation of 80 dB. The transition bandwidth is set to 100 Hz, with 50 Hz on either side of 2000 Hz. ```python # Elliptic IIR Lowpass b_lp,a_lp,sos_lp = iir_d.IIR_lpf(1950,2050,0.5,80,8000.,'ellip') mr_lp = mrh.multirate_IIR(sos_lp) ``` ```python mr_lp.freq_resp('db',8000) ``` Pass Gaussian white noise of variance $\sigma_x^2 = 1$ through the filter. Use a lot of samples so the spectral estimate can accurately form $S_y(f) = \sigma_x^2\cdot |H(e^{j2\pi f/f_s})|^2 = |H(e^{j2\pi f/f_s})|^2$. ```python x = randn(1000000) y = mr_lp.filter(x) psd(x,2**10,8000); psd(y,2**10,8000); title(r'Filtering White Noise Having $\sigma_x^2 = 1$') legend(('Input PSD','Output PSD'),loc='best') ylim([-130,-30]) ``` ```python fs = 8000 print('Expected PSD of %2.3f dB/Hz' % (0-10*log10(fs),)) ``` ## Amplitude Response Bandpass Design Here we consider FIR and IIR bandpass designs for use in an SSB demodulator to remove potential adjacent channel signals sitting either side of a frequency band running from 23 kHz to 24 kHz. ```python b_rec_bpf1 = fir_d.fir_remez_bpf(23000,24000,28000,29000,0.5,70,96000,8) fir_d.freqz_resp_list([b_rec_bpf1],[1],mode='dB',fs=96000) ylim([-80, 5]) grid(); ``` The group delay is flat (constant) by virture of the design having linear phase. ```python b_rec_bpf1 = fir_d.fir_remez_bpf(23000,24000,28000,29000,0.5,70,96000,8) fir_d.freqz_resp_list([b_rec_bpf1],[1],mode='groupdelay_s',fs=96000) grid(); ``` Compare the FIR design with an elliptical design: ```python b_rec_bpf2,a_rec_bpf2,sos_rec_bpf2 = iir_d.IIR_bpf(23000,24000,28000,29000, 0.5,70,96000,'ellip') with np.errstate(divide='ignore'): iir_d.freqz_resp_cas_list([sos_rec_bpf2],mode='dB',fs=96000) ylim([-80, 5]) grid(); ``` This high order elliptic has a nice tight amplitude response for minimal coefficients, but the group delay is terrible: ```python with np.errstate(divide='ignore', invalid='ignore'): #manage singularity warnings iir_d.freqz_resp_cas_list([sos_rec_bpf2],mode='groupdelay_s',fs=96000) #ylim([-80, 5]) grid(); ```
Require Import Coq.Strings.String Coq.Sets.Ensembles. Require Import Fiat.Common. Require Export Fiat.Computation.Notations. Definition Comp := @Ensemble. Definition Return (A : Type) : A -> Comp A := Singleton A. Definition Bind (A B : Type) (ca : Comp A) (k : A -> Comp B) : Comp B := fun b => exists a : A, In A ca a /\ In B (k a) b. Definition Pick (A : Type) (P : Ensemble A) : Comp A := P. Definition Bind2 (A B C: Type) (c : Comp (A * B)) (k : A -> B -> Comp C) := Bind c (fun ab => k (fst ab) (snd ab)). Bind Scope comp_scope with Comp. Arguments Bind [A%type B%type] ca%comp k%comp _. Arguments Bind2 [A%type B%type C%type] c%comp k%comp _. Arguments Return [_] _ _. Arguments Pick [_] _ _. Notation ret := Return. Notation "x >>= y" := (Bind x%comp y%comp) : comp_scope. Notation "x <- y ; z" := (Bind y%comp (fun x => z%comp)) (at level 81, right associativity, format "'[v' x <- y ; '/' z ']'") : comp_scope. Notation "`( a , b ) <- c ; k" := (Bind2 c%comp (fun a b => k%comp)) (at level 81, right associativity, format "'[v' `( a , b ) <- c ; '/' k ']'") : comp_scope. Notation "x >> z" := (Bind x%comp (fun _ => z%comp) ) (at level 81, right associativity, format "'[v' x >> '/' z ']'") : comp_scope. Notation "{ x | P }" := (@Pick _ (fun x => P)) : comp_scope. Notation "{ x : A | P }" := (@Pick A%type (fun x => P)) : comp_scope. Definition computes_to {A : Type} (ca : Comp A) (a : A) : Prop := ca a. Notation "c ↝ v" := (computes_to c v). Lemma ReturnComputes {A : Type} : forall (a : A), ret a ↝ a. Proof. constructor. Qed. Lemma BindComputes {A B: Type} : forall (ca : Comp A) (f : A -> Comp B) (a : A) (b : B), ca ↝ a -> f a ↝ b -> ca >>= f ↝ b. Proof. econstructor; eauto. Qed. Lemma PickComputes {A : Type} : forall (P : Ensemble A) (a : A), P a -> {a' | P a'} ↝ a. Proof. intros; eauto. Qed. Lemma Return_inv {A : Type} : forall (a v : A), ret a ↝ v -> a = v. Proof. destruct 1; reflexivity. Qed. Lemma Bind_inv {A B: Type} : forall (ca : Comp A) (f : A -> Comp B) (v : B), ca >>= f ↝ v -> exists a', ca ↝ a' /\ f a' ↝ v. Proof. destruct 1; eauto. Qed. Lemma Pick_inv {A : Type} : forall (P : Ensemble A) (v : A), {a | P a} ↝ v -> P v. Proof. eauto. Qed. (** The old program might be non-deterministic, and the new program less so. This means we want to say that if [new] can compute to [v], then [old] should be able to compute to [v], too. *) Definition refine {A} (old : Comp A) (new : Comp A) := forall v, new ↝ v -> old ↝ v. (* A definition and notation for pretty printing the goals used to interactively deriving refinements. *) Definition Refinement_Of {A} (c : Comp A) := {c' | refine c c'}. Notation "'Refinement' 'of' c" := {c' | refine c c'} (at level 0, no associativity, format "'Refinement' 'of' '/' '[v' c ']' " ) : comp_scope. (** Define a symmetrized version of [refine] for ease of rewriting *) Definition refineEquiv {A} (old : Comp A) (new : Comp A) := refine old new /\ refine new old. Local Ltac t := repeat first [ solve [ unfold computes_to in *; eauto ] | progress hnf in * | intro | split | progress split_and ]. Global Instance refine_PreOrder A : PreOrder (@refine A). t. Qed. Global Instance refineEquiv_Equivalence A : Equivalence (@refineEquiv A). t. Qed. Global Opaque Return. Global Opaque Bind. Global Opaque Pick. Global Opaque computes_to. Global Hint Resolve ReturnComputes. Global Hint Resolve BindComputes. Global Hint Resolve PickComputes. Ltac computes_to_inv := repeat match goal with | H : {a' | @?P a'} ↝ _ |- _ => apply Pick_inv in H | H : Return ?a ↝ _ |- _ => apply Return_inv in H | H : Bind (A := ?A) ?ca ?k ↝ _ |- _ => apply Bind_inv in H; let a' := fresh "v" in let H' := fresh H "'" in destruct H as [a' [H H'] ] end. Ltac computes_to_econstructor := first [ unfold refine; intros; eapply @ReturnComputes | unfold refine; intros; eapply @BindComputes | unfold refine; intros; eapply @PickComputes ]. Ltac computes_to_constructor := first [ unfold refine; intros; apply @ReturnComputes | unfold refine; intros; apply @BindComputes | unfold refine; intros; apply @PickComputes ].
Formal statement is: theorem root_test_divergence: fixes f :: "nat \<Rightarrow> 'a :: banach" defines "l \<equiv> limsup (\<lambda>n. ereal (root n (norm (f n))))" assumes l: "l > 1" shows "\<not>summable f" Informal statement is: If the root test for a series $\sum a_n$ gives a limit superior greater than 1, then the series diverges.
Formal statement is: lemma interior_maximal: "T \<subseteq> S \<Longrightarrow> open T \<Longrightarrow> T \<subseteq> interior S" Informal statement is: If $T$ is an open subset of $S$, then $T$ is contained in the interior of $S$.
% this program is designed to correct the monthly variability for weather % generator generated daily precip according to the monthly precip generated % by FFT using a simple linear relationship function [monthly_corrected_precip]=monthly_precip_correction(filenameout) % load WG generated data load(filenameout) n=size(gP,1); % load FFT generated monthly precip monthly_FFT=zeros(n,12); load('Pnew1') monthly_FFT(:,1)=Pnew'; load('Pnew2') monthly_FFT(:,2)=Pnew'; load('Pnew3') monthly_FFT(:,3)=Pnew'; load('Pnew4') monthly_FFT(:,4)=Pnew'; load('Pnew5') monthly_FFT(:,5)=Pnew'; load('Pnew6') monthly_FFT(:,6)=Pnew'; load('Pnew7') monthly_FFT(:,7)=Pnew'; load('Pnew8') monthly_FFT(:,8)=Pnew'; load('Pnew9') monthly_FFT(:,9)=Pnew'; load('Pnew10') monthly_FFT(:,10)=Pnew'; load('Pnew11') monthly_FFT(:,11)=Pnew'; load('Pnew12') monthly_FFT(:,12)=Pnew'; % calculate the monthly precip for the generated data monthly_generated=zeros(n,12); for i=1:n monthly_generated(i,1)=sum(gP(i,1:31)); monthly_generated(i,2)=sum(gP(i,32:59)); monthly_generated(i,3)=sum(gP(i,60:90)); monthly_generated(i,4)=sum(gP(i,91:120)); monthly_generated(i,5)=sum(gP(i,121:151)); monthly_generated(i,6)=sum(gP(i,152:181)); monthly_generated(i,7)=sum(gP(i,182:212)); monthly_generated(i,8)=sum(gP(i,213:243)); monthly_generated(i,9)=sum(gP(i,244:273)); monthly_generated(i,10)=sum(gP(i,274:304)); monthly_generated(i,11)=sum(gP(i,305:334)); monthly_generated(i,12)=sum(gP(i,335:365)); end % calculate the ratio of FFT generated data to WG generated data (monthly ratio) monthly_ratio=zeros(n,12); for i=1:n for j=1:12 if monthly_generated(i,j)==0 monthly_generated(i,j)=monthly_FFT(i,j); else monthly_ratio(i,j)=monthly_FFT(i,j)/monthly_generated(i,j); end end end monthly_ratio=monthly_ratio'; monthly_ratio=reshape(monthly_ratio,[],1); % extend the above monthly ratio to daily scale,the data in each month are the same monthly_ratio2=zeros(365*n,1); j=0; for i=0:365:365*n-1 monthly_ratio2(i+1:i+31,1)=monthly_ratio(j+1,1); monthly_ratio2(i+32:i+59,1)=monthly_ratio(j+2,1); monthly_ratio2(i+60:i+90,1)=monthly_ratio(j+3,1); monthly_ratio2(i+91:i+120,1)=monthly_ratio(j+4,1); monthly_ratio2(i+121:i+151,1)=monthly_ratio(j+5,1); monthly_ratio2(i+152:i+181,1)=monthly_ratio(j+6,1); monthly_ratio2(i+182:i+212,1)=monthly_ratio(j+7,1); monthly_ratio2(i+213:i+243,1)=monthly_ratio(j+8,1); monthly_ratio2(i+244:i+273,1)=monthly_ratio(j+9,1); monthly_ratio2(i+274:i+304,1)=monthly_ratio(j+10,1); monthly_ratio2(i+305:i+334,1)=monthly_ratio(j+11,1); monthly_ratio2(i+335:i+365,1)=monthly_ratio(j+12,1); j=j+12; end % generate the years, months and days daily_generated=zeros(365*n+1,6); for i=1:366 daily_generated(i,:)=datevec(i); end % delete the 60th row because it is the Feb 29th daily_generated(60,:)=[]; % delete the fifthly and sixthly columns daily_generated(:,6)=[]; daily_generated(:,5)=[]; % extend the date of the first to all years j=1; for i=366:365:365*n daily_generated(i:i+365-1,:)=daily_generated(1:365,:); daily_generated(i:i+365-1,1)=j; j=j+1; end % correct the WG generatd data [n,m]=size(gP); gP=gP'; j=1; for i=1:365:m*n daily_generated(i:i+364,4)=gP(:,j); j=j+1; end monthly_adjust=zeros(size(daily_generated)); monthly_adjust(:,1:3)=daily_generated(:,1:3); for i=1:365*n monthly_adjust(i,4)=daily_generated(i,4)*monthly_ratio2(i,1); end monthly_corrected_precip=monthly_adjust; save('monthly_corrected_precip','monthly_corrected_precip')
#manually rotate the wheels and test the encoder import RPi.GPIO as gpio import time import numpy as np def init(): gpio.setmode(gpio.BOARD) gpio.setup(12,gpio.IN,pull_up_down = gpio.PUD_UP) def gameover(): gpio.cleanup() init() counter = np.uint64(0) button = int(0) while True: if int(gpio.input(12))!=int(button): button = int(gpio.input(12)) counter+=1 print(counter) if counter>=20: gameover() print("exit") break
module Resimp -- IO operations which read a resource data Reader : Type -> Type where MkReader : IO a -> Reader a getReader : Reader a -> IO a getReader (MkReader x) = x ior : IO a -> Reader a ior = MkReader -- IO operations which update a resource data Updater : Type -> Type where MkUpdater : IO a -> Updater a getUpdater : Updater a -> IO a getUpdater (MkUpdater x) = x iou : IO a -> Updater a iou = MkUpdater -- IO operations which create a resource data Creator : Type -> Type where MkCreator : IO a -> Creator a getCreator : Creator a -> IO a getCreator (MkCreator x) = x ioc : IO a -> Creator a ioc = MkCreator infixr 5 :-> using (i: Fin n, gam : Vect n Ty, gam' : Vect n Ty, gam'' : Vect n Ty) data Ty = R Type | Val Type | Choice Type Type | (:->) Type Ty interpTy : Ty -> Type interpTy (R t) = IO t interpTy (Val t) = t interpTy (Choice x y) = Either x y interpTy (a :-> b) = a -> interpTy b data HasType : Vect n Ty -> Fin n -> Ty -> Type where stop : HasType (a :: gam) fZ a pop : HasType gam i b -> HasType (a :: gam) (fS i) b data Env : Vect n Ty -> Type where Nil : Env Nil (::) : interpTy a -> Env gam -> Env (a :: gam) envLookup : HasType gam i a -> Env gam -> interpTy a envLookup stop (x :: xs) = x envLookup (pop k) (x :: xs) = envLookup k xs update : (gam : Vect n Ty) -> HasType gam i b -> Ty -> Vect n Ty update (x :: xs) stop y = y :: xs update (x :: xs) (pop k) y = x :: update xs k y update Nil stop _ impossible total envUpdate : (p:HasType gam i a) -> (val:interpTy b) -> Env gam -> Env (update gam p b) envUpdate stop val (x :: xs) = val :: xs envUpdate (pop k) val (x :: xs) = x :: envUpdate k val xs envUpdate stop _ Nil impossible total envUpdateVal : (p:HasType gam i a) -> (val:b) -> Env gam -> Env (update gam p (Val b)) envUpdateVal stop val (x :: xs) = val :: xs envUpdateVal (pop k) val (x :: xs) = x :: envUpdateVal k val xs envUpdateVal stop _ Nil impossible envTail : Env (a :: gam) -> Env gam envTail (x :: xs) = xs data Args : Vect n Ty -> List Type -> Type where ANil : Args gam [] ACons : HasType gam i a -> Args gam as -> Args gam (interpTy a :: as) funTy : List Type -> Ty -> Ty funTy list.Nil t = t funTy (a :: as) t = a :-> funTy as t data Res : Vect n Ty -> Vect n Ty -> Ty -> Type where {-- Resource creation and usage. 'Let' creates a resource - the type at the end means that the resource must have been consumed by the time it goes out of scope, where "consumed" simply means that it has been replaced with a value of type '()'. --} Let : Creator (interpTy a) -> Res (a :: gam) (Val () :: gam') (R t) -> Res gam gam' (R t) Update : (a -> Updater b) -> (p:HasType gam i (Val a)) -> Res gam (update gam p (Val b)) (R ()) Use : (a -> Reader b) -> HasType gam i (Val a) -> Res gam gam (R b) {-- Control structures --} Lift : IO a -> Res gam gam (R a) Check : (p:HasType gam i (Choice (interpTy a) (interpTy b))) -> (failure:Res (update gam p a) (update gam p c) t) -> (success:Res (update gam p b) (update gam p c) t) -> Res gam (update gam p c) t While : Res gam gam (R Bool) -> Res gam gam (R ()) -> Res gam gam (R ()) Return : a -> Res gam gam (R a) (>>=) : Res gam gam' (R a) -> (a -> Res gam' gam'' (R t)) -> Res gam gam'' (R t) ioret : a -> IO a ioret = return iolift : IO a -> Res gam gam (R a) iolift = Lift interp : Env gam -> (e : Res gam gam' t) -> (Env gam' -> interpTy t -> IO u) -> IO u interp env (Let val scope) k = do x <- getCreator val interp (x :: env) scope (\env', scope' => k (envTail env') scope') interp env (Update method x) k = do x' <- getUpdater (method (envLookup x env)) k (envUpdateVal x x' env) (return ()) interp env (Use method x) k = do x' <- getReader (method (envLookup x env)) k env (return x') interp env (Lift io) k = k env io interp env (Check x left right) k = either (\a => interp (envUpdate x a env) left k) (\b => interp (envUpdate x b env) right k) (envLookup x env) interp env (While test body) k = interp env test (\env', result => do r <- result if (not r) then (k env' (return ())) else (interp env' body (\env'', body' => do v <- body' -- make sure it's evalled interp env'' (While test body) k ))) interp env (Return v) k = k env (return v) interp env (v >>= f) k = interp env v (\env', v' => do n <- v' interp env' (f n) k) -- run : {static} Res [] [] (R t) -> IO t -- run prog = interp [] prog (\env, res => res) syntax run [prog] = interp [] prog (\env, res => res) dsl res variable = id let = Let index_first = stop index_next = pop syntax RES [x] = {gam:Vect n Ty} -> Res gam gam (R x)
Formal statement is: lemma closed_negations: fixes S :: "'a::real_normed_vector set" assumes "closed S" shows "closed ((\<lambda>x. -x) ` S)" Informal statement is: If $S$ is a closed set, then the set of all negations of elements of $S$ is also closed.
SUBROUTINE GSDEVA ( device, iunit, filnam, itype, xsize, ysize, + iret ) C************************************************************************ C* GSDEVA * C* * C* This subroutine sets the plot device to be used by GEMPLT. If * C* another device is in use when it is called, GSDEVA terminates * C* plotting on that device, then starts the device subprocess for * C* the requested device. * C* * C* GSDEVA ( DEVICE, IUNIT, FILNAM, ITYPE, XSIZE, YSIZE, IRET ) * C* * C* Input parameters: * C* DEVICE CHAR* Device name * C* IUNIT INTEGER Type of output device * C* For XW: * C* 1 = GEMPAK window * C* 2 = Motif window * C* FILNAM CHAR* File name or window name * C* ITYPE INTEGER Device color capability * C* XSIZE REAL Width in inches or pixels * C* YSIZE REAL Height in inches or pixels * C* * C* Output parameters: * C* IRET INTEGER Return code * C** * C* Log: * C* M. Linda/GSC 3/96 GSDEVA based on GSDEV * C* S. Jacobs/NCEP 4/96 Added iunit to DSDATT * C* S. Jacobs/NCEP 4/96 Reordered error checking for blank dev * C* S. Jacobs/NCEP 9/96 Added checks for and processing of XWP * C* S. Jacobs/NCEP 11/96 Added check to reset colors * C* M. Linda/GSC 2/97 Removed GFLUSH * C* C. Lin/EAI 6/97 Modified the IF condition in setting * C* device attribute section * C* S. Wang/GSC 03/97 Remove re_initializing sub-device color * C************************************************************************ INCLUDE 'ERROR.PRM' INCLUDE 'DEVCHR.CMN' INCLUDE 'DEVREQ.CMN' INCLUDE 'DEVSET.CMN' INCLUDE 'DEVWIN.CMN' INCLUDE 'XYDEF.CMN' C* CHARACTER*(*) device, filnam C* CHARACTER dev*12 LOGICAL newdv C------------------------------------------------------------------------ iret = NORMAL C CALL ST_LCUC ( device, dev, ier ) IF ( ( iunit .lt. 1 ) .or. ( iunit .gt. 2 ) ) iunit = 1 C C* Check to see if the device has changed. C IF ( ( dev .ne. ddev ) .or. ( ddev .eq. ' ' ) ) THEN C C* Check for a previous device or for the special XWP device. C IF ( ( ddev .eq. ' ' ) .or. + ( ( curdev .eq. 'XWP' ) .and. + ( ( dev .eq. 'XWP' ) .or. + ( dev .eq. 'XW' ) .or. + ( dev .eq. 'PS' ) ) ) ) THEN C C* If switching from PS to XW, close the plot file. C IF ( ( ddev .eq. 'PS' ) .and. + ( ( dev .eq. 'XWP' ) .or. + ( dev .eq. 'XW' ) ) ) THEN CALL DCLOSP ( ncurwn, ier ) END IF C ELSE C C* If there was a device installed, stop it. C ieop = 1 CALL DENDD ( ieop, ier ) END IF C C* Start the new device driver. C CALL DINITA ( dev, curdev, iunit, filnam, itype, + xsize, ysize, ncurwn, iret ) ncurwn = ncurwn + 1 newdv = .true. ELSE C C* Send possibly changed attributes to device. C CALL DSDATT ( iunit, filnam, itype, xsize, ysize, + ncurwn, iret ) ncurwn = ncurwn + 1 newdv = .false. END IF C C* Set the device characteristics and attributes. C IF ( ( iret .eq. NEWWIN ) .or. ( iret .eq. NWSIZE ) .or. + ( newdv .and. ( iret .eq. NORMAL ) ) ) THEN C C* If this is a new window, reset the margins. C IF ( iret .eq. NEWWIN ) THEN CALL GSMMGN ( 0., 0., 0., 0., ier ) CALL GSGMGN ( 0., 0., 0., 0., ier ) END IF C C* Reset iret to NORMAL return code. C iret = NORMAL C C* Get the information from /DEVCHR/. C CALL DQDCHR ( nncolr, ier ) C C* Store the device name in /DEVCHR/. C IF ( ( curdev .eq. 'XWP' ) .and. + ( ( dev .eq. 'XWP' ) .or. + ( dev .eq. 'XW' ) .or. + ( dev .eq. 'PS' ) ) ) THEN curdev = 'XWP' ELSE curdev = dev END IF ddev = dev niunit = iunit C C* Set the drawing attributes and map/graph projections. C CALL GSATTR ( iret ) ELSE IF ( ( newdv ) .and. ( iret .ne. NORMAL ) ) THEN ddev = ' ' IF ( iret .eq. NOPROC ) iret = NODEVC RETURN END IF END IF C C* Flush the buffers and make the window appear. C CALL GEPLOT ( ier ) C* RETURN END
using PDIPS using LinearAlgebra using Printf using SparseArrays using Test @testset "Types" begin include("types.jl") include("algorithm.jl") end
[STATEMENT] lemma bres_sol: "(x \<cdot> (x \<rightarrow> y) = y) = (\<exists>z. x \<cdot> z = y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (x \<cdot> (x \<rightarrow> y) = y) = (\<exists>z. x \<cdot> z = y) [PROOF STEP] using bres_galois order.antisym mult_isol [PROOF STATE] proof (prove) using this: (?x \<cdot> ?y \<le> ?z) = (?y \<le> ?x \<rightarrow> ?z) \<lbrakk>?a \<le> ?b; ?b \<le> ?a\<rbrakk> \<Longrightarrow> ?a = ?b ?x \<le> ?y \<Longrightarrow> ?z \<cdot> ?x \<le> ?z \<cdot> ?y goal (1 subgoal): 1. (x \<cdot> (x \<rightarrow> y) = y) = (\<exists>z. x \<cdot> z = y) [PROOF STEP] by force
Case Studies of an Employer being involved with flexible working are on their way to the website. If you're an employer and have a flexible working story to tell, make sure you submit it via our online form. Any experience with flexible working is welcome, so submit today. ​You could be our next case study!
Formal statement is: lemma finite_imp_closed: "finite S \<Longrightarrow> closed S" Informal statement is: If $S$ is a finite set, then $S$ is closed.
# Time Optimal Velocity Profiles *** When the maze solver commands that the robot go forward, it can say that it must go forward one or more squares depending on what it knows about the maze. When we don't know what is after the square we pass through, we must be going slow enough to handle any scenario. In other words, there is some $V_f$ that we must reach by the end of our motion. We also begin motions at this speed, since between we arrived where we are we required that we reach $V_f$ to get there. Therefore, we start and end at $V_f$, and we want to cover some distance $d$ in the fast possible time. To do so, we accelerate at our fixed $a$ until we reach max speed, or until we need to start slowing down (whichever comes first). This gives us a trapezoid shaped velocity profile. ## Going Straight ```python %load_ext tikzmagic ``` The tikzmagic extension is already loaded. To reload it, use: %reload_ext tikzmagic ```python %%tikz -s 400,400 \draw[->] (0,0) -- (10,0); \draw[->] (0,0) -- (0,5); \draw[line width=1] (0,0.5) -- (2.5,3); \draw[line width=1] (2.5,3) -- (5.5,3); \draw[line width=1] (5.5,3) -- (8,0.5); \draw[dashed] (0,0.5) -- (10,0.5); \draw[dashed] (0,3) -- (10,3); \draw[dashed] (2.5,0) -- (2.5,5); \draw[dashed] (5.5,0) -- (5.5,5); \draw[dashed] (8,0) -- (8,5); \draw (-0.5, 0.5) node {$V_{f}$}; \draw (-0.5, 3) node {$V_{max}$}; \draw (2.5, -0.5) node {$t_b$}; \draw (5.5, -0.5) node {$t_f-t_b$}; \draw (8, -0.5) node {$t_f$}; ``` The time to accelerate from $V_f$ to $V_{max}$ is $t_b = \frac{V-V_f}{a}$. We can substitute this into newtons first equation of motion as follows. \begin{align} d &= Vt_b - \frac{1}{2}a{t_b}^2 \\ &= V\Big(\frac{V-V_f}{a}\Big) - \frac{1}{2}a\Big(\frac{V-V_f}{a}\Big)^2 \\ &= \Big(\frac{V^2-VV_f}{a}\Big) - \Big(\frac{a(V-V_f)^2}{2a^2}\Big) \\ &= \Big(\frac{2V^2-2VV_f}{2a}\Big) - \Big(\frac{V^2-2VV_f+{V_f}^2}{2a}\Big) \\ &= \frac{2V^2-2VV_f - V^2 + 2VV_f - {V_f}^2}{2a} \\ d &= \frac{V^2-{V_f}^2}{2a} \\ \end{align} For example, if you're at starting at $V_f=0.2\frac{m}{s}$, and you're ramping up to $V=0.5\frac{m}{s}$, and you're acceleration is fixed at the $a=2\frac{m}{s^2}$, the distance you'll need to do that is $d = \frac{0.5 - 0.2}{2*2} = 0.075m$ ## Code that proves it ```python # dependencies and global setup import numpy as np import matplotlib.pyplot as plt np.set_printoptions(suppress=True, precision=3, linewidth=100) LOG_LVL = 2 def debug(*args): if LOG_LVL <= 0: print(*args) def info(*args): if LOG_LVL <= 1: print(*args) def warning(*args): if LOG_LVL <= 2: print(*args) def log(*args): if LOG_LVL < 100: print(*args) ``` ```python def profile(V0, Vf, Vmax, d, A, buffer=3e-4): v = V0 x = 0 a = A vs = [v] xs = [x] a_s = [a] dt = 0.001 while x < d: x = x + v*dt + a*dt*dt/2.0 v = v + a*dt ramp_d = (v*v+ - Vf*Vf) / (2.0*A) if (d-x) < ramp_d + buffer: a = -A elif v < Vmax: a = A else: a = 0 if v > Vmax: v = Vmax elif v < Vf: v = Vf xs.append(x) vs.append(v) a_s.append(a) return xs, vs, a_s def graph(title, idx): plt.figure() plt.title(title) # various max speeds Vs = [0.35, 0.5, 0.75, 1, 2] Vf = 0.02 V0 = 0.2 d = 0.35 a = 2 for V in Vs: results = profile(V0, Vf, V, d, a) vs = results[1] if V == 2: # make V=2 dashed so we can see it over V=1 plt.plot(results[idx], label='V={}'.format(V), linestyle='dashed') else: plt.plot(results[idx], label='V={}'.format(V)) plt.legend(bbox_to_anchor=(1, 1), loc=2) graph("position", 0) graph("velocity", 1) graph("acceleration", 2) plt.show() ``` ## General Form Trajectory Planning Let's start out with a generating trajectories that are not time optimal, but rely on specifying the final time $v_f$. For smartmouse, our state space is $[x, y, \theta]$, and a turn can be defined as starting at a point $[x_0, y_0, \theta_0]$ and going to $[x_f, y_f, \theta_0]$. Of course, we also want to specify the velocities at these point, $[\dot{x}_0, \dot{y}_0,\dot{\theta}_0]$ and $[\dot{x}_f, \dot{y}_f,\dot{\theta}_f]$. We have four constraints, so if we want to fit a smooth polynomial to those points we need a 4th order polynomial. $$q(t) = a_0 + a_1t + a_2t^2 + a_3t^3$$ $$\dot{q}(t) = a_1 + 2a_2t + 3a_3t^2$$ If we sub in our constraints, we get the following system of equations. \begin{align} q(0) &= a_0 \\ \dot{q}(0) &= a_1 \\ q(t_f) &= a_0 + a_1t_f + a_2{t_f}^2 + a_3{t_f}^3\\ \dot{q}(t_f) &= a_1 + 2a_2t_f + 3a_3{t_f}^2\\ \end{align} In matrix form that looks like: \begin{equation} \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 1 & t_f & t_f^2 & t_f^3 \\ 0 & 1 & 2t_f & 3t_f^2 \\ \end{bmatrix} \begin{bmatrix} a_0 \\ a_1 \\ a_2 \\ a_3 \\ \end{bmatrix} = \begin{bmatrix} q(0) \\ \dot{q}(0) \\ q(t_f) \\ \dot{q}(t_f) \\ \end{bmatrix} \end{equation} It can be shown that the matrix on the left is invertable, so long as $t_f-t_0 > 0$. So we can invert and solve this equation and get all the $a$ coefficients. We can then use this polynomial to generate the $q(t)$ and $\dot{q}(t)$ -- our trajectory. ```python def simple_traj_solve(q_0, q_f, q_dot_0, q_dot_t_f, t_f): # Example: you are a point in space (one dimension) go from rest at the origin to at rest at (0.18, 0, 0) in 1 second q_0 = np.array([0]) q_dot_0 = np.array([0]) q_t_f = np.array([0.18]) q_dot_t_f = np.array([0]) b = np.array([q_0, q_dot_0, q_t_f, q_dot_t_f]) a = np.array([[1,0,0,0],[0,1,0,0],[1, t_f, pow(t_f,2),pow(t_f,3)],[0,1,2*t_f,3*pow(t_f,2)]]) log(a, b) coeff = np.linalg.solve(a, b) log(coeff) return coeff simple_traj_info = (0, 0, 0.18, 0, 1) simple_traj_coeff = simple_traj_solve(*simple_traj_info) ``` [[1 0 0 0] [0 1 0 0] [1 1 1 1] [0 1 2 3]] [[0. ] [0. ] [0.18] [0. ]] [[ 0. ] [ 0. ] [ 0.54] [-0.36]] Here you can see that the resulting coeffictions are $a_0=0$, $a_1=0$, $a_2=0.54$, $a_0=-0.36$. Intuitively, this says that we're going to have positive acceleration, but our acceleration is going to slow down over time. Let's graph it! ```python def simple_traj_plot(coeff, t_f): dt = 0.01 ts = np.array([[1, t, pow(t,2), pow(t,3)] for t in np.arange(0, t_f+dt, dt)]) qs = ts@coeff plt.plot(ts[:,1], qs, label="x") plt.xlabel("time (seconds)") plt.xlabel("X (meters)") plt.legend(bbox_to_anchor=(1,1), loc=2) plt.show() simple_traj_plot(simple_traj_coeff, simple_traj_info[-1]) ``` **ooooooooooh so pretty** Let's try another example, now with our full state space of $[x, y, \theta]$. ```python def no_dynamics(): # In this example, we go from (0.18, 0.09, 0) to (0.27,0.18, -1.5707). Our starting and ending velocities are zero q_0 = np.array([0.09,0.09,0]) q_dot_0 = np.array([0,0,0]) q_f = np.array([0.27,0.18,-1.5707]) q_dot_f = np.array([0,0,0]) t_f = 1 b = np.array([q_0, q_dot_0, q_f, q_dot_f]) a = np.array([[1,0,0,0],[0,1,0,0],[1, t_f, pow(t_f,2),pow(t_f,3)],[0,1,2*t_f,3*pow(t_f,2)]]) coeff = np.linalg.solve(a, b) log(coeff) dt = 0.1 ts = np.array([[1, t, pow(t,2), pow(t,3)] for t in np.arange(0, t_f+dt, dt)]) qs = ts@coeff plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.gca().set_adjustable("box") plt.subplot(221) plt.plot(ts[:,1], qs[:,0]) plt.xlabel("time (seconds)") plt.title("x") plt.subplot(222) plt.plot(ts[:,1], qs[:,1]) plt.xlabel("time (seconds)") plt.title("y") plt.subplot(223) plt.plot(ts[:,1], qs[:,2]) plt.xlabel("time (seconds)") plt.title(r"$\theta$") plt.subplot(224) plt.scatter(qs[:,0], qs[:,1]) plt.axis('equal') plt.xlabel("X") plt.ylabel("Y") plt.tight_layout() plt.show() no_dynamics() ``` Well, they are smooth, but these are not possible to execute! The robot cannot simply translate sideways. # Trajectory Planning With a Simple Dynamics Model *** ```python %%tikz -s 100,100 \draw [rotate around={-45:(0,0)}] (-.5,-1) rectangle (0.5,1); \filldraw (0,0) circle (0.125); \draw [->] (0,0) -- (0,1.5); \draw [->] (0,0) -- (1.5,0); \draw [->] (0,0) -- (1.5,1.5); \draw (1.2, -0.2) node {$x$}; \draw (-0.2, 1.2) node {$y$}; \draw (1, 1.2) node {$v$}; ``` We need to change our constraints to the system of equations. Specifically, we need our dynamics model. For now, let's assume a simplified car model. $$ \dot{x} = v\cos(\theta) $$ $$ \dot{y} = v\sin(\theta) $$ This basically claims that for any instant in time the robot is moving a constant velocity along $\theta$. This isn't very accurate, but let's just start with that since the real dynamics of our robot are more complex. First we will bring in the constraints from before. We must satisfy specific initial and final positions in $[x, y, \theta]$. I've used new letters for cofficients to avoid confusion. \begin{align} x_0 &= c_0 + c_1(0) + c_2(0)^2 + c_3(0)^3 + c_3(0)^4 + c_3(0)^5 \\ y_0 &= d_0 + d_1(0) + d_2(0)^2 + d_3(0)^3 + d_3(0)^4 + d_3(0)^5 \\ x_{t_f} &= c_0 + c_1(t_f) + c_2(t_f)^2 + c_3(t_f)^3 + c_3(t_f)^4 + c_3(t_f)^5 \\ y_{t_f} &= d_0 + d_1(t_f) + d_2(t_f)^2 + d_3(t_f)^3 + d_3(t_f)^4 + d_3(t_f)^5 \\ \end{align} Notice here we have 12 unknowns, $c_0 \dots c_5$ and $d_0 \dots d_5$. So we're gonna need more equations for there to be a unique solution. Also notice we haven't defined any constraints related to our dynamics model. That would be a good place to get our other equations! First, we want to be able to specify initial velocity $v_0$ and final velocity $v_{t_f}$. It is easlier to just constrain $\dot{x}_0$, $\dot{y}_0$, $\dot{x}_{t_f}$, $\dot{y}_{t_f}$. So if we want to specify that we start facing $\tfrac{\pi}{2}$ going 1m/s, we'd just specify $cos(\tfrac{\pi}{2})$ for $\dot{x}_0$ and $sin(\tfrac{\pi}{2})$ for $\dot{y}_0$. \begin{align} \dot{x}_0 &= c_1 \\ \dot{y}_0 &= d_1 \\ \dot{x}_{t_f} &= (0)c_0 + (1)c_1 + 2t_fc_2 + 3{t_f}^2c_3 + 4{t_f}^3c_4 + 5{t_f}^4c_5 \\ \dot{y}_{t_f} &= (0)d_0 + (1)d_1 + 2t_fd_2 + 3{t_f}^2d_3 + 4{t_f}^3d_4 + 5{t_f}^4d_5 \end{align} Let's also make sure x and y components obey trigonometry. \begin{align} v\cos(\theta)\sin(\theta) + v\cos(\theta)\sin(\theta) &= v\sin(2\theta) \\ \dot{x}\sin(\theta) + \dot{y}\sin(\theta) &= v\sin(2\theta) \end{align} We can get two equations out of this by specifying initial and final velocities \begin{align} v_0\sin(2\theta_0) &= \dot{x}_0\sin(\theta_0) + \dot{y}_0\cos(\theta_0) \\ v_{t_f}\sin(2\theta_{t_f}) &= \dot{x}_{t_f}\sin(\theta_{t_f}) + \dot{y}_{t_f}\cos(\theta_{t_f}) \end{align} We should write out the full form though, to make things in terms of our coefficients. \begin{align} v(0)\sin(2\theta_0) &= \Big[c_1 + 2(0)c_2 + 3(0)^2c_3 + 4(0)^3c_4 + 5(0)^4c_5\Big]\sin(\theta_0) + \Big[d_1 + 2(0)d_2 + 3(0)^2d_3 + 4(0)^3d_4 + 5(0)^4d_5\Big]\cos(\theta_0) \\ v(0)\sin(2\theta_0) &= \sin(\theta_0)c_1 + \cos(\theta_0)d_1 \end{align} \begin{align} v(t_f)\sin(2\theta_{t_f}) &= \Big[c_1 + 2(t_f)c_2 + 3(t_f)^2c_3 + 4(t_f)^3c_4\ + 5(t_f)^4c_5\Big]\sin(\theta_{t_f}) + \Big[d_1 + 2(t_f)d_2 + 3(t_f)^2d_3 + 4(t_f)^3d_4 + 5(t_f)^4d_5\Big]\cos(\theta_{t_f}) \\ v(t_f)\sin(2\theta_{t_f}) &= \sin(\theta_{t_f})c_1 + 2\sin(\theta_{t_f})t_fc_2 + 3\sin(\theta_{t_f}){t_f}^2c_3 + 4\sin(\theta_{t_f}){t_f}^3c_4 + 5\sin(\theta_{t_f}){t_f}^4c_5 + \cos(\theta_{t_f})d_1 + 2\cos(\theta_{t_f})t_fd_2 + 3\cos(\theta_{t_f}){t_f}^2d_3 + 4\cos(\theta_{t_f}){t_f}^3d_4 + 5\cos(\theta_{t_f}){t_f}^4d_5 \\ \end{align} The last two equations constrains the robot from moving in any direction other than its heading. Of course it must relate $\dot{x}$ to $\dot{y}$. Still not totally sure how we got this equation so I'm just copying it from some slides$\dots$. However you can plug in some example values and check. For instance translating sideways violates this equation: set $\dot{x}=1$, $\dot{y}=0$, $v=1$, $\theta=\tfrac{\pi}{2}$. \begin{align} v\cos(\theta)\sin(\theta) - v\cos(\theta)\sin(\theta) &= 0 \\ v\cos(\theta)\sin(\theta) - v\sin(\theta)\cos(\theta) &= 0 \\ \dot{x}\sin(\theta) - \dot{y}\cos(\theta) &= 0 \end{align} and again written out fully in terms of our coefficients \begin{align} \Big[c_1 + 2(0)c_2 + 3(0)^2c_3 + 4(0)^3c_4 + 5(0)^4c_5\Big]\sin(\theta_0) - \Big[d_1 + 2(0)d_2 + 3(0)^2d_3 + 4(0)^3d_4 + 5(0)^4d_5\Big]\cos(\theta_0) &= 0 \\ \sin(\theta_0)c_1 - \cos(\theta_0)d_1 &= 0 \end{align} \begin{align} \Big[c_1 + 2(t_f)c_2 + 3(t_f)^2c_3 + 4(t_f)^3c_4 + 5(t_f)^4c_5\Big]\sin(\theta_{t_f}) - \Big[d_1 + 2(t_f)d_2 + 3(t_f)^2d_3 + 4(t_f)^3d_4 + 5(t_f)^4d_5\Big]\cos(\theta_{t_f}) &= 0 \\ \sin(\theta_{t_f})c_1 + 2\sin(\theta_{t_f})t_fc_2 + 3\sin(\theta_{t_f}){t_f}^2c_3 + 4\sin(\theta_{t_f}){t_f}^3c_4 + 5\sin(\theta_{t_f}){t_f}^4c_5 - \cos(\theta_{t_f})d_1 - 2\cos(\theta_{t_f})t_fd_2 - 3\cos(\theta_{t_f}){t_f}^2d_3 - 4\cos(\theta_{t_f}){t_f}^3d_4 - 5\cos(\theta_{t_f}){t_f}^4d_5 &= 0 \end{align} Ok, that should work. Now let's write it out in matrix form. We use $c$ and $s$ to shorten $\sin$ and $\cos$. \setcounter{MaxMatrixCols}{20} \begin{equation} \begin{bmatrix} 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ 0 & s(\theta_0) & 0 & 0 & 0 & 0 & 0 & c(\theta_0) & 0 & 0 & 0 & 0\\ 0 & s(\theta_0) & 0 & 0 & 0 & 0 & 0 & -c(\theta_0) & 0 & 0 & 0 & 0\\ 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\\ 1 & t & {t_f}^2 & {t_f}^3 & {t_f}^4 & {t_f}^5 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 1 & t_f & {t_f}^2 & {t_f}^3 & {t_f}^4 & {t_f}^5 \\ 0 & s(\theta_{t_f}) & 2s(\theta_{t_f})t_f & 3s(\theta_{t_f}){t_f}^2 & 4s(\theta_{t_f}){t_f}^3 & 5s(\theta_{t_f}){t_f}^4 & 0 & c(\theta_{t_f}) & 2c(\theta_{t_f}){t_f} & 3c(\theta_{t_f}){t_f}^2 & 4c(\theta_{t_f}){t_f}^3 & 5c(\theta_{t_f}){t_f}^4 \\ 0 & s(\theta_{t_f}) & 2s(\theta_{t_f})t_f & 3s(\theta_{t_f}){t_f}^2 & 4s(\theta_{t_f}){t_f}^3 & 5s(\theta_{t_f}){t_f}^4 & 0 & -c(\theta_{t_f}) & -2c(\theta_{t_f}){t_f} & -3c(\theta_{t_f}){t_f}^2 & -4c(\theta_{t_f}){t_f}^3 & -5c(\theta_{t_f}){t_f}^4 \\ 0 & 1 & 2t_f & 3{t_f}^2 & 4{t_f}^3 & 5{t_f}^4 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 2t_f & 3{t_f}^2 & 4{t_f}^3 & 5{t_f}^4 \end{bmatrix} \begin{bmatrix} c_0 \\ c_1 \\ c_2 \\ c_3 \\ c_4 \\ c_5 \\ d_0 \\ d_1 \\ d_2 \\ d_3 \\ d_4 \\ d_5 \end{bmatrix} = \begin{bmatrix} x_0 \\ y_0 \\ 0 \\ v_0s(2\theta_0) \\ c(\theta_0)v_0 \\ s(\theta_0)v_0 \\ x_{t_f} \\ y_{t_f} \\ 0 \\ v_{t_f}s(2\theta_{t_f}) \\ c(\theta_{t_f})v_{t_f} \\ s(\theta_{t_f})v_{t_f} \\ \end{bmatrix} \end{equation} ```python # Let's solve this in code like we did before def plot_vars(traj_plan): dt = 0.001 T = np.arange(0, traj_plan.get_t_f()+dt, dt) xts = np.array([[1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5), 0, 0, 0, 0, 0, 0] for t in T]) xdts = np.array([[0, 1, 2*t, 3*pow(t,2), 4*pow(t,3), 5*pow(t,4), 0, 0, 0, 0, 0, 0] for t in T]) yts = np.array([[0, 0, 0, 0, 0, 0, 1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5)] for t in T]) ydts = np.array([[0, 0, 0, 0, 0, 0, 0, 1, 2*t, 3*pow(t,2), 4*pow(t,3), 5*pow(t,4)] for t in T]) xs = xts@traj_plan.get_coeff() ys = yts@traj_plan.get_coeff() xds = xdts@traj_plan.get_coeff() yds = ydts@traj_plan.get_coeff() plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.rc('axes.formatter', useoffset=False) plt.figure(figsize=(10, 2.5)) plt.subplot(141) plt.plot(T, xs, linewidth=3) plt.xlabel("time (seconds)") plt.title("X") plt.subplot(142) plt.plot(T, ys, linewidth=3, color='r') plt.xlabel("time (seconds)") plt.title("Y") plt.subplot(143) plt.plot(T, xds, linewidth=3, color='g') plt.xlabel("time (seconds)") plt.title("$\dot{x}$") plt.tight_layout() plt.subplot(144) plt.plot(T,yds, linewidth=3, color='y') plt.xlabel("time (seconds)") plt.title("$\dot{y}$") plt.tight_layout() plt.show() def plot_traj(traj_plan): dt = 0.03 T = np.arange(0, traj_plan.get_t_f()+dt, dt) xts = np.array([[1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5), 0, 0, 0, 0, 0, 0] for t in T]) yts = np.array([[0, 0, 0, 0, 0, 0, 1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5)] for t in T]) xs = xts@traj_plan.get_coeff() ys = yts@traj_plan.get_coeff() plot_traj_pts(xs, ys, T, traj_plan.waypoints) def plot_traj_pts(xs, ys, T, waypoints): plt.figure(figsize=(5, 5)) plt.title("Trajectory") plt.xlabel("X") plt.ylabel("Y") W = 2 plt.xlim(0, W * 0.18) plt.ylim(0, W * 0.18) plt.xticks(np.arange(2*W+1)*0.09) plt.yticks(np.arange(2*W+1)*0.09) plt.grid(True) plt.gca().set_axisbelow(True) for t, pt in waypoints: arrow_dx = cos(pt.theta) * (pt.v) * 0.1 arrow_dy = sin(pt.theta) * (pt.v) * 0.1 plt.arrow(pt.x, pt.y, arrow_dx, arrow_dy, head_width=0.005, head_length=0.005, width=0.001, fc='k', ec='k') plt.scatter(xs, ys, marker='.', linewidth=0) plt.show() ``` ```python from math import sin, cos, pi from collections import namedtuple WayPoint = namedtuple('WayPoint', ['x', 'y', 'theta', 'v']) class TrajPlan: def x_constraint(t): return [1, t, pow(t, 2), pow(t, 3), pow(t, 4), pow(t, 5), 0, 0, 0, 0, 0, 0] def y_constraint(t): return [0, 0, 0, 0, 0, 0, 1, t, pow(t, 2), pow(t, 3), pow(t, 4), pow(t, 5)] def non_holonomic_constraint(theta_t, t): s_t = sin(theta_t) c_t = cos(theta_t) t_2 = pow(t, 2) t_3 = pow(t, 3) t_4 = pow(t, 4) return [0, s_t, 2 * s_t * t, 3 * s_t * t_2, 4 * s_t * t_3, 5 * s_t * t_4, 0, c_t, 2 * c_t * t, 3 * c_t * t_2, 4 * c_t * t_3, 5 * c_t * t_4] def trig_constraint(theta_t, t): s_t = sin(theta_t) c_t = cos(theta_t) t_2 = pow(t, 2) t_3 = pow(t, 3) t_4 = pow(t, 4) return [0, s_t, 2 * s_t * t, 3 * s_t * t_2, 4 * s_t * t_3, 5 * s_t * t_4, 0, -c_t, -2 * c_t * t, -3 * c_t * t_2, -4 * c_t * t_3, -5 * c_t * t_4] def x_dot_constraint(t): return [0, 1, 2 * t, 3 * pow(t, 2), 4 * pow(t, 3), 5 * pow(t, 4), 0, 0, 0, 0, 0, 0] def y_dot_constraint(t): return [0, 0, 0, 0, 0, 0, 0, 1, 2 * t, 3 * pow(t, 2), 4 * pow(t, 3), 5 * pow(t, 4)] def solve(self, waypoints): # Setup the matrices to match the equation above A = [] b = [] for t, pt in waypoints: A += [TrajPlan.x_constraint(t), TrajPlan.y_constraint(t), TrajPlan.non_holonomic_constraint(pt.theta, t), TrajPlan.trig_constraint(pt.theta, t), TrajPlan.x_dot_constraint(t), TrajPlan.y_dot_constraint(t)] b += [pt.x, pt.y, 0, pt.v*sin(2*pt.theta), cos(pt.theta)*pt.v, sin(pt.theta)*pt.v] A = np.array(A) b = np.array(b) rank = np.linalg.matrix_rank(A) if rank == A.shape[1]: if A.shape[0] == A.shape[1]: coeff = np.linalg.solve(A, b) else: warning("not square, using least squares.".format(A.shape)) coeff, resid, rank, s = np.linalg.lstsq(A, b) else: warning("Ranks don't match! {} equations {} variables, using least squares".format(rank, A.shape[1])) coeff, resid, rank, s = np.linalg.lstsq(A, b) debug("rank {}".format(rank)) debug("A: \n{}".format(A)) debug("coeff: \n{}".format(coeff)) error = np.sum(np.power(A@coeff - b, 2)) if error > 1e-10: info("These two vectors should be equal! But there is error.") info("b is: \n{}".format(b)) info("A@coeff is: \n{}".format(A@coeff)) info("RMS Error of solution to equations") info(error) self.coeff = coeff self.waypoints = waypoints def get_coeff(self): return self.coeff def get_t_f(self): return self.waypoints[-1][0] ``` ## Example Plots ```python # forward 1 cell, start from rest, end at 40cm/s, do it in .5 seconds LOG_LVL = 5 fwd_1 = TrajPlan() fwd_1.solve([(0, WayPoint(0.09, 0.09, pi/2, 0)), (0.5, WayPoint(0.09, 0.27, pi/2, 0.6))]) plot_vars(fwd_1) plot_traj(fwd_1) ``` ```python # continue by turning right 90 degrees LOG_LVL = 1 turn_right = TrajPlan() turn_right.solve([(0, WayPoint(0.09, 0.18, pi/2, 0.4)), (0.5, WayPoint(0.18, 0.27, 0, 0.4))]) plot_vars(turn_right) plot_traj(turn_right) ``` ```python # 3 waypoints! LOG_LVL = 1 turn_right = TrajPlan() turn_right.solve([(0, WayPoint(0.09, 0.09, pi/2, 0.0)), (0.5, WayPoint(0.18, 0.18, 0, 0.35)), (1, WayPoint(0.27, 0.27, pi/2, 0))]) plot_vars(turn_right) plot_traj(turn_right) ``` **Note for this system of equations with 3 waypoints, there is no solution. However, the error of the solution found is very small.** Now let's find one that really sucks! ```python # 4 waypoints! LOG_LVL = 1 turn_right = TrajPlan() turn_right.solve([(0, WayPoint(0.09, 0.0, pi/2, 0.1)), (1, WayPoint(0.09, 0.18, pi/2, 0.1)), (2, WayPoint(0.18, 0.27, 0, 0.1)), (3, WayPoint(0.27, 0.27, 0, 0.1))]) plot_traj(turn_right) ``` # Trajectory Following *** Now that we have a trajectory, we want to design a controller that will follow it as closely as possible. To do this, I'm just going to do a proportional controller. Later we will design an optimal controller. We want to make sure the robot is on the path, facing along the path, and going the right speed. When all of these are true the change in speed should be zero. Let's come up with an equation to relate current pose and velocity to the desired pose and velocity. Let our outputs be the linear velocity $v$ and the rotational velocity $w$. $$ w = \bar{w} + d*P_1 + (\bar{\theta} - \theta)P_2$$ $$ v = \bar{v} + l*P_3$$ where $v_d$ is desired velocity, $\theta_d$ is the desired angle, $d$ is signed distance to the planned trajectory (to the right of the plan is positive), $v_d$ and $w_d$ are the desired velocities of the robot, and $P_1$, $P_2$, and $P_3$ are constants. Essentially what we're saying with the first equation is that when you're far off the trajectory you need to turn harder to get back on to it, but you also need to be aligned with it. The second equation says if you're lagging behind your plan speed up, or slow down if you're overshooting. ```python from math import atan2, sqrt LOG_LVL = 5 def simulate(q_0, waypoints, P_1, P_2, P_3, A=3): traj = TrajPlan() traj.solve(waypoints) dt = 0.01 x = q_0[0] y = q_0[1] theta = q_0[2] v = q_0[3] w = q_0[4] actual_v = q_0[3] actual_w = q_0[4] v_acc = A * dt TRACK_WIDTH_M = 0.0633 w_acc = v_acc / (TRACK_WIDTH_M/2) T = np.arange(0, traj.get_t_f()+dt, dt) x_bar_list = [] y_bar_list = [] x_list = [] y_list = [] for t in T: x_bar = [1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5), 0, 0, 0, 0, 0, 0] @ traj.get_coeff() dx_bar = [0, 1, 2*t, 3*pow(t,2), 4*pow(t,3), 5*pow(t,4), 0, 0, 0, 0, 0, 0] @ traj.get_coeff() ddx_bar = [0, 0, 0, 0, 0, 0, 0, 0, 2, 6*t, 12*pow(t,2), 20*pow(t,3)] @ traj.get_coeff() y_bar = [0, 0, 0, 0, 0, 0, 1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5)] @ traj.get_coeff() dy_bar = [0, 0, 0, 0, 0, 0, 0, 1, 2*t, 3*pow(t,2), 4*pow(t,3), 5*pow(t,4)] @ traj.get_coeff() ddy_bar = [0, 0, 0, 0, 0, 0, 0, 0, 2, 6*t, 12*pow(t,2), 20*pow(t,3)] @ traj.get_coeff() theta_bar = atan2(dy_bar, dx_bar) v_bar = sqrt(dx_bar*dx_bar + dy_bar*dy_bar) w_bar = 1/v_bar * (ddy_bar*cos(theta_bar) - ddx_bar*sin(theta_bar)); # simple Dubin's Car forward kinematics x += cos(theta) * actual_v * dt y += sin(theta) * actual_v * dt theta += actual_w * dt # control euclidian_error = np.sqrt(pow(x_bar - x, 2) + pow(y_bar - y, 2)) transformed_x = (x - x_bar) * cos(-theta_bar) + (y - y_bar) * -sin(-theta_bar) transformed_y = (x - x_bar) * sin(-theta_bar) + (y - y_bar) * cos(-theta_bar) right_of_traj = transformed_y < 0 signed_euclidian_error = euclidian_error if right_of_traj else -euclidian_error lag_error = -transformed_x w = w_bar + signed_euclidian_error * P_1 + (theta_bar - theta) * P_2 v = v_bar + lag_error * P_3 # simple acceleration model if v < actual_v: actual_v = max(v, actual_v - v_acc) elif v > actual_v: actual_v = min(v, actual_v + v_acc) if w < actual_w: actual_w = max(w, actual_w - w_acc) elif w > actual_w: actual_w = min(w, actual_w + w_acc) x_bar_list.append(x_bar) y_bar_list.append(y_bar) x_list.append(x) y_list.append(y) plt.figure(figsize=(5, 5)) W = 3 plt.scatter(x_bar_list, y_bar_list, marker='.', linewidth=0, c='black', label='desired traj') plt.scatter(x_list, y_list, marker='.', linewidth=0, c=T, label='robot traj') plt.xlim(0, W * 0.18) plt.ylim(0, W * 0.18) plt.xticks(np.arange(2*W+1)*0.09) plt.yticks(np.arange(2*W+1)*0.09) plt.grid(True) plt.gca().set_axisbelow(True) plt.xlabel("X") plt.ylabel("Y") plt.title("Trajectory Tracking") plt.legend(bbox_to_anchor=(1,1), loc=2) ``` ```python test_P_1=300 test_P_2=50 test_P_3=10 robot_q_0 = (0.08, 0.18, pi/2, 0.3, 0) traj = [(0, WayPoint(0.09, 0.18, pi/2, 0.5)), (0.5, WayPoint(0.18, 0.27, 0, 0.35)), (1, WayPoint(0.27, 0.36, pi/2, 0))] simulate(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` ```python robot_q_0 = (0.11, 0.18, pi/2, 0.2, 5) traj = [(0, WayPoint(0.09, 0.18, pi/2, 0.2)), (1, WayPoint(0.18, 0.27, 0, 0.35))] simulate(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` ```python robot_q_0 = (0.0, 0.25, 0, 0.2, 0) traj = [(0, WayPoint(0.0, 0.27, 0, 0.2)), (1.25, WayPoint(0.54, 0.27, 0, 0.2))] simulate(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` ```python robot_q_0 = (0.45, 0.05, pi+0.25, 0.3, 0) traj = [(0, WayPoint(0.45, 0.09, pi, 0.4)), (0.75, WayPoint(0.27, 0.27, pi/2, 0.4))] simulate(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` ```python robot_q_0 = (0.0, 0.25, 0, 0.2, -5) traj = [(0, WayPoint(0.0, 0.27, 0, 0.2)), (2, WayPoint(0.48, 0.36, pi/2, 0.2))] simulate(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` ```python robot_q_0 = (0.25, 0.28, -pi*4/7, 0.5, 0) traj = [(0, WayPoint(0.27, 0.27, -pi/2, 0.8)), (0.35, WayPoint(0.45, 0.09, 0, 0.8))] simulate(robot_q_0, traj, test_P_1, test_P_2, test_P_3, A=6) plt.show() ``` ```python # no initial error robot_q_0 = (0.11, 0.18, pi/2, 0.8, 0) traj = [(0, WayPoint(0.09, 0.18, pi/2, 0.8)), (0.25, WayPoint(0.18, 0.27, 0, 0.6)), (.5, WayPoint(0.27, 0.36, pi/2, 0.4))] simulate(robot_q_0, traj, 10, 1000, 6, A=5) plt.show() ``` **Note**: The code above has a bug if I use `-pi` instead of `pi` in `robot_q_0` # LQR - The Optimal Controller *** ## Overview of the Steps: ### 1. Write out the non-linear dynamics $\dot{\vec{x}} = f(\vec{x}, \vec{u})$ Here we are interested in the full blown system dynamics of the actual smartmouse robot. The forward kinematics, which depend on the current state $x$, $y$, and $\theta$ and the velocity inputs of the wheels $v_l$, and $v_r$ are as follows. In the general case where the two wheels have different velocities, we have this: \begin{align} R &= \frac{W(v_l+v_r)}{2(v_r-v_l)} && \text{radius of turn} \\ \theta &\leftarrow \theta + \dfrac{v_l}{R-\frac{W}{2}}\Delta t \\ x &\leftarrow x-R\Bigg(\sin{\Big(\frac{v_r-v_l}{W}\Delta t-\theta\Big)}+\sin{\theta}\Bigg) \\ y &\leftarrow y-R\Bigg(\cos{\Big(\frac{v_r-v_l}{W}\Delta t-\theta\Big)}-\cos{\theta}\Bigg) \end{align} And in the special case where we're going perfectly straight: \begin{align} \theta &\leftarrow \theta \\ x &\leftarrow x + v\Delta t\cos(\theta) \\ y &\leftarrow y + v\Delta t\sin(\theta) \\ \end{align} We can take these equations and write them in the form of $\dot{\vec{x}} = f(\vec{x},\vec{u})$. Confusingly, $\vec{x}$ here is the full state vector $[x, y, \theta]$. Most controls texts simply use $\vec{x}$, so I'm sticking with that. Also, we defined $u = [v_l, v_r]$ \begin{align} \dot{x} &= \begin{bmatrix}\dot{x}\\ \dot{y}\\ \dot{\theta}\end{bmatrix} \\ &= \begin{bmatrix} -R\Bigg(\sin{\Big(\frac{v_r-v_l}{W}\Delta t-\theta\Big)}+\sin{\theta}\Bigg) \\ -R\Bigg(\cos{\Big(\frac{v_r-v_l}{W}\Delta t-\theta\Big)}-\cos{\theta}\Bigg) \\ \frac{v_l}{R-\frac{W}{2}}\Delta t \\ \end{bmatrix} \end{align} ### 2. Identify the points around which we linearize our system, $(\bar{u}, \bar{x})$ Because we are tracking a trajectory, we want to linearize around the trajectory we are trying to track. That means $\bar{u}$ is the control input associated with the trajectory, which means solving for the feed forward inputs. Specifically, that means we need to compute the $v_l$ and $v_r$ that would follow the trajectory at the point $\bar{x}$. To do this we must pick velocities that make instantaneous turning radius $R$ equal the instantaneous radius of the trajcetory at $\bar{x}$, and make the linear velocity at $\bar{x}$ equal the instantaneous linear velocity of the robot center $v$. To do this, we go back to our basic kinematics equations which rely on the fact that all points on the robot (center, left wheel, right wheel) have the same rotational velocity $\omega$ around the ICC. \begin{align} \omega = \frac{v}{R} &= \frac{v_l}{R-\frac{W}{2}} \\ \frac{v}{R}\bigg(R - \frac{W}{2}\bigg) &= v_l \\ \omega = \frac{v}{R} &= \frac{v_r}{R+\frac{W}{2}} \\ \frac{v}{R}\bigg(R + \frac{W}{2}\bigg) &= v_r \\ \end{align} Using these equations we can solve for the velocities of the wheels, which together make up $\bar{u}$. We just need the $R$ and $v$. These should be derived from the equation of the trajectory we are tracking. These are well studied equations, for which [a proof can be found other places on the internet](http://mathworld.wolfram.com/Curvature.html). $$ R = \frac{\dot{x}\ddot{y}-\dot{y}\ddot{x}}{{\big({\dot{x}}^2 + {\dot{y}}^2\big)}^{\frac{3}{2}}} = \frac{(c_1+2c_2t+3c_3t^2+4c_4t^3)(2d_2+6d_3t+12d_4t^2) - (d_1+2d_2t+3d_3t^3+4d_4t^3)(2c_2+6c_3t+12c_4t^2)}{{\big({(c_1+2c_2t+3c_3t^2+4c_4t^3)}^2 + {(d_1+2d_2t+3d_3t^3+4d_4t^3)}^2\big)}^{\frac{3}{2}}} $$ $$ v = \sqrt{{(\dot{x})}^2 + {(\dot{y})}^2} = \sqrt{{(c_1+2c_2t+3c_3t^2+4c_4t^3)}^2 + {(d_1+2d_2t+3d_3t^3+4d_4t^3)}^2} $$ We can plug in the coefficients of our polynomials and get values for $v$ and $R$. Then, we can plus these into the equations just above and get the feed forward wheel velocities. ### 3. Write the linearized dynamics around $\bar{x}$ as $\dot{\vec{x}} \approx A\delta_x + B\delta_u$, where $\delta_x = (\vec{x} - \bar{x})$ and $\delta_u = (\vec{u} - \bar{u})$ To do this, we need the partial derivitive matrixes $A$ and $B$. $$ A = \begin{bmatrix} \frac{\partial f_1}{\partial x}\big|_{\bar{x}\bar{u}} & \frac{\partial f_1}{\partial y}\big|_{\bar{x}\bar{u}} & \frac{\partial f_1}{\partial \theta}\big|_{\bar{x}\bar{u}} \\ \frac{\partial f_2}{\partial x}\big|_{\bar{x}\bar{u}} & \frac{\partial f_2}{\partial y}\big|_{\bar{x}\bar{u}} & \frac{\partial f_2}{\partial \theta}\big|_{\bar{x}\bar{u}} \\ \frac{\partial f_3}{\partial x}\big|_{\bar{x}\bar{u}} & \frac{\partial f_3}{\partial y}\big|_{\bar{x}\bar{u}} & \frac{\partial f_3}{\partial \theta}\big|_{\bar{x}\bar{u}} \\ \end{bmatrix} = \begin{bmatrix} 0 & 0 & R\bigg(\cos\Big(\frac{\bar{v}_r-\bar{v}_l}{W}\Delta t - \bar{\theta}\Big) - \cos(\bar{\theta})\bigg) \\ 0 & 0 & -R\bigg(\sin\Big(\frac{\bar{v}_r-\bar{v}_l}{W}\Delta t - \bar{\theta}\Big) + \sin(\bar{\theta})\bigg) \\ 0 & 0 & 0 \\ \end{bmatrix} $$ $$ B = \begin{bmatrix} \frac{\partial f_1}{\partial v_l}\big|_{\bar{x}\bar{u}} & \frac{\partial f_1}{\partial v_r}\big|_{\bar{x}\bar{u}} \\ \frac{\partial f_2}{\partial v_l}\big|_{\bar{x}\bar{u}} & \frac{\partial f_2}{\partial v_r}\big|_{\bar{x}\bar{u}} \\ \frac{\partial f_3}{\partial v_l}\big|_{\bar{x}\bar{u}} & \frac{\partial f_3}{\partial v_r}\big|_{\bar{x}\bar{u}} \\ \end{bmatrix} = \begin{bmatrix} R\cos\Big(\frac{(\bar{v}_r-\bar{v}_l)\Delta t}{W}-\bar{\theta}\Big)\frac{\Delta t}{W} & -R\cos\Big(\frac{(\bar{v}_r-\bar{v}_l)\Delta t}{W}-\bar{\theta}\Big)\frac{\Delta t}{W} \\ -R\sin\Big(\frac{(\bar{v}_r-\bar{v}_l)\Delta t}{W}-\bar{\theta}\Big)\frac{\Delta t}{W} & R\sin\Big(\frac{(\bar{v}_r-\bar{v}_l)\Delta t}{W}-\bar{\theta}\Big)\frac{\Delta t}{W} \\ \frac{\Delta t}{R-\frac{W}{2}} & 0 \\ \end{bmatrix} $$ ### 4. Check if our system is controllable by looking at the rank of the controllability matrix $C = [B, AB, A^2B, \dots, A^{n-1}B]$ We have three state variables so $n = 3$, which means $C = [B, AB, A^2B]$. $$ AB = \begin{bmatrix} R\bigg(\cos\Big(\frac{v_r-v_l}{W}\Delta t - \theta\Big) - \cos(\theta)\bigg)\frac{\Delta t}{R-\frac{W}{2}} & 0 \\ -R\bigg(\sin\Big(\frac{v_r-v_l}{W}\Delta t - \theta\Big) + \sin(\theta)\bigg)\frac{\Delta t}{R-\frac{W}{2}} & 0 \\ 0 & 0 \\ \end{bmatrix} $$ $$ A^2B = \begin{bmatrix} 0 & 0 & 0\\ 0 & 0 & 0\\ 0 & 0 & 0\\ \end{bmatrix} * B = \begin{bmatrix} 0 & 0 & 0\\ 0 & 0 & 0\\ 0 & 0 & 0\\ \end{bmatrix} $$ $$ C = \begin{bmatrix} \begin{bmatrix} R\cos\Big(\frac{(\bar{v_r}-\bar{v_l})\Delta t}{W}-\theta\Big)\frac{\Delta t}{W} & -R\cos\Big(\frac{(\bar{v_r}-\bar{v_l})\Delta t}{W}-\theta\Big)\frac{\Delta t}{W} \\ -R\sin\Big(\frac{(\bar{v_r}-\bar{v_l})\Delta t}{W}-\theta\Big)\frac{\Delta t}{W} & R\sin\Big(\frac{(\bar{v_r}-\bar{v_l})\Delta t}{W}-\theta\Big)\frac{\Delta t}{W} \\ \frac{\Delta t}{R-\frac{W}{2}} & 0 \\ \end{bmatrix} & \begin{bmatrix} R\bigg(\cos\Big(\frac{v_r-v_l}{W}\Delta t - \theta\Big) - \cos(\theta)\bigg)\frac{\Delta t}{R-\frac{W}{2}} & 0 \\ -R\bigg(\sin\Big(\frac{v_r-v_l}{W}\Delta t - \theta\Big) + \sin(\theta)\bigg)\frac{\Delta t}{R-\frac{W}{2}} & 0 \\ 0 & 0 \\ \end{bmatrix} & \begin{bmatrix} 0 & 0 & 0\\ 0 & 0 & 0\\ 0 & 0 & 0\\ \end{bmatrix} \end{bmatrix} $$ What is the rank of this matrix? It seems to depend on specific values of $x, y, \theta$. ### 5. Pick cost parameters $Q$ and $R$ These need to be tuned on the simulation or real system, but the identity matrices $I$ are good starting points. ### 6. Solve for $K$ given $LQR(A, B, Q, R)$ We want to minimize the quadratice cost function $J$, which is defined as follows. $$ J = \sum_0^N(\vec{x}_t - \bar{x}_t)^TQ(\vec{x}_t - \bar{x}_t) + \sum_0^N(\vec{u}_t - \bar{u}_t)^TR(\vec{u}_t - \bar{u}_t) $$ We can do this with least squares, or dynamics programming. DP is more efficient O(Nn^3). N is some finite horizon, and n is the number of state dimensions (3 for us). maybe we compute K once instead of at every time step. could be consistent within one motion primitive. ### 7. Apply our new controller of the form $\vec{u} = -K(\vec{x} - \bar{x}) + \bar{u}$ ```python from math import atan2 import scipy.linalg # source: http://www.kostasalexis.com/lqr-control.html def dlqr(A,B,Q,R): """Solve the discrete time lqr controller. p[k+1] = A p[k] + B u[k] cost = sum p[k].T*Q*p[k] + u[k].T*R*u[k] """ #ref Bertsekas, p.151 #first, try to solve the ricatti equation P = np.matrix(scipy.linalg.solve_discrete_are(A, B, Q, R)) #compute the LQR gain K = np.matrix(scipy.linalg.inv(B.T*P*B+R)*(B.T*P*A)) eigVals, eigVecs = scipy.linalg.eig(A-B*K) return K, P, eigVals def follow_plan(q_0, waypoints, P_1, P_2, P_3): traj = TrajPlan() traj.solve(waypoints) dt = 0.01 x = q_0[0] y = q_0[1] theta = q_0[2] vl = q_0[3] vr = q_0[3] actual_vl = vl actual_vr = vr v_acc = 2 * dt W = 0.0633 T = np.arange(0, traj.get_t_f()+dt, dt) x_bar_list = [] y_bar_list = [] x_list = [] y_list = [] vl_list = [] vr_list = [] actual_vl_list = [] actual_vr_list = [] for t in T: x_bar = [1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5), 0, 0, 0, 0, 0, 0] @ traj.get_coeff() dx_bar = [0, 1, 2*t, 3*pow(t,2), 4*pow(t,3), 5*pow(t,4), 0, 0, 0, 0, 0, 0] @ traj.get_coeff() ddx_bar = [0, 0, 0, 0, 0, 0, 0, 0, 2, 6*t, 12*pow(t,2), 20*pow(t,3)] @ traj.get_coeff() y_bar = [0, 0, 0, 0, 0, 0, 1, t, pow(t,2), pow(t,3), pow(t,4), pow(t,5)] @ traj.get_coeff() dy_bar = [0, 0, 0, 0, 0, 0, 0, 1, 2*t, 3*pow(t,2), 4*pow(t,3), 5*pow(t,4)] @ traj.get_coeff() ddy_bar = [0, 0, 0, 0, 0, 0, 0, 0, 2, 6*t, 12*pow(t,2), 20*pow(t,3)] @ traj.get_coeff() theta_bar = atan2(dy_bar, dx_bar) # full forward kinematics if vr - vl < 1e-5: x = x + cos(theta) * vl * dt y = y + sin(theta) * vl * dt else: R = W*(vl + vr)/(2*(vr - vl)) x = x - R * (sin((vr-vl)*dt/W - theta) + sin(theta)) y = y - R * (cos((vr-vl)*dt/W - theta) - cos(theta)) theta = theta + vl / (R - W/2) * dt # compute instanteneous Radius R_bar = (dx_bar*ddy_bar - dy_bar*ddy_bar)/pow((pow(dx_bar, 2) + pow(dy_bar, 2)), 3/2) # feed forward inputs v_bar = np.sqrt(dx_bar*dx_bar + dy_bar*dy_bar) vl_bar = v_bar/R_bar*(R_bar-W/2) vr_bar = v_bar/R_bar*(R_bar+W/2) A = np.array([[0, 0, R_bar*(cos((vr_bar - vl_bar)*dt/W - theta_bar) - cos(theta_bar))], [0, 0, -R_bar*(sin((vr_bar - vl_bar)*dt/W - theta_bar) + sin(theta_bar))], [0, 0, 0]]) B = np.array([[R_bar*cos((vr_bar - vl_bar)*dt/W - theta_bar)*dt/W, -R_bar*cos((vr_bar - vl_bar)*dt/W - theta_bar)*dt/W], [-R_bar*sin((vr_bar - vl_bar)*dt/W - theta_bar)*dt/W, R_bar*sin((vr_bar - vl_bar)*dt/W - theta_bar)*dt/W], [dt/(R_bar-W/2), 0]]); Q= np.eye(3); R = np.eye(2); K, P, eigs = dlqr(A, B, Q, R) eigs = np.linalg.eig(A - B*K) # info("eigs", eigs[0]) # debug("K", K) x_vec = np.array([[x],[y],[theta]]) x_bar_vec = np.array([[x_bar],[y_bar],[theta_bar]]) u = -K * (x_vec - x_bar_vec) + np.array([[vl_bar],[vr_bar]]); vl = u[0,0] vr = u[1,0] # simple acceleration model if vl < actual_vl: actual_vl = max(vl, actual_vl - v_acc) elif vl > actual_vl: actual_vl = min(vl, actual_vl + v_acc) if vr < actual_vr: actual_vr = max(vr, actual_vr - v_acc) elif vr > actual_vr: actual_vr = min(vr, actual_vr + v_acc) x_bar_list.append(x_bar) y_bar_list.append(y_bar) x_list.append(x) y_list.append(y) vr_list.append(vr) vl_list.append(vl) actual_vr_list.append(actual_vr) actual_vl_list.append(actual_vl) plt.figure(figsize=(5, 5)) CELL_COUNT = 3 plt.scatter(x_bar_list, y_bar_list, marker='.', linewidth=0, c='black', label='desired traj') plt.scatter(x_list, y_list, marker='.', linewidth=0, label='robot traj') plt.xlim(0, CELL_COUNT * 0.18) plt.ylim(0, CELL_COUNT * 0.18) plt.xticks(np.arange(CELL_COUNT+1)*0.18) plt.yticks(np.arange(CELL_COUNT+1)*0.18) plt.grid(True) plt.gca().set_axisbelow(True) plt.xlabel("X") plt.ylabel("Y") plt.title("LQR Trajectory Tracking") plt.legend(bbox_to_anchor=(1,1), loc=2) plt.figure() plt.plot(vr_list, label="vr") plt.plot(vl_list, label="vl") plt.plot(actual_vr_list, label="actual vr") plt.plot(actual_vl_list, label="actual vl") plt.legend(bbox_to_anchor=(1,1), loc=2) ``` ```python LOG_LVL=1 robot_q_0 = (0.08, 0.18, pi/2, 0.3) traj = [(0, WayPoint(0.09, 0.18, pi/2, 0.5)), (0.5, WayPoint(0.18, 0.27, 0, 0.35))] follow_plan(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` ```python LOG_LVL=1 robot_q_0 = (0.07, 0.18, pi/2, 0.2) traj = [(0, WayPoint(0.09, 0.18, pi/2, 0.2)), (0.5, WayPoint(0.09, 0.36, pi/2, 0.2))] follow_plan(robot_q_0, traj, test_P_1, test_P_2, test_P_3) plt.show() ``` # Tuning new PIDs ```python import csv reader = csv.reader(open("./pid_data.csv", 'r')) setpoints = [] speeds = [] for row in reader: setpoints.append(float(row[0])) speeds.append(float(row[1])) t = np.arange(0, len(setpoints)/100, 0.01) plt.plot(t, setpoints, label="setpoint") plt.plot(t, speeds, label="actual speed") plt.xlabel("time (s)") plt.ylabel("speed (m/s)") plt.title("PID Performance") plt.show() ``` ```python ```
lemma Lim_bounded2: fixes f :: "nat \<Rightarrow> 'a::linorder_topology" assumes lim:"f \<longlonglongrightarrow> l" and ge: "\<forall>n\<ge>N. f n \<ge> C" shows "l \<ge> C"
\newpage \flushleft{\huge{\bf Chapter 2}} \section{Synthesis of Interconnected Hexagonal Boron Composite} \subsection{Introduction} The development of two-dimensional (2D) materials has opened up the possibilities for their application in improving the properties of metals and alloys (\cite {bartolucci2011graphene, chu2014enhanced, yang2018microstructure, li2019thermal}). This is because 2D materials have the potential to alter the properties of metals at the nanoscale. A single layer of hexagonal boron nitride (hBN) is structurally similar to graphene (carbon system) where the hexagonal lattices are occupied by boron and nitrogen atoms. hBN has a lattice parameter of 25 nm and possesses extraordinary properties, such as high chemical stability (\cite{singh2018effect}), high mechanical strength (\cite{singh2018effect}), low density (\cite{elkady2015physico}), high thermal stability (\cite{liu2013ultrathin}), and high thermal shock resistance (\cite{duan2016review}). These excellent properties can be utilized to improve the performance of various metal matrix composites (MMCs) through the in-situ construction of three-dimensionally interconnected (3Di) hBN layers in their grain boundaries. Similar approaches have been employed by other researchers, who used 3D- networked graphene to tailor the properties of MMCs (\cite{li2019thermal, chen2016fabrication, kim2018rapid, song2018hydrogen}). For instance, Chen et al. (\cite{chen2016fabrication}) enhanced the yield and tensile strengths of copper by wrapping graphene around copper grains using chemical vapor deposition (CVD). They reported that the graphene acted as a barrier for dislocation movement, and consequently, the elastic modulus and strength were improved. Xue et al. (\cite{li2019thermal}) reported that Cu-graphene composites had a higher thermal conductivity than pure copper because graphene offered an effective path for heat transfer between the Cu grain boundaries. Other properties, such as corrosion resistance and wear resistance, have also been improved (\cite{wu2019high, tripathi2017graphene}). Because of its structure similarity with graphene, the hBN introduced to metal matrices can also impart similar effects. Several researchers have used boron nitride nanoparticles to enhance the strength, hardness, wear, and corrosion resistance of metallic alloys (\cite{gopinath2020enhancing, reddy2013study, zitoun2008microstructure, khatavkar2018influence, cho2018effect}). For instance, the microstructure and properties of BN/Ni-Cu composites fabricated by powder technology were reported by Tantaway et al. (\cite{el2018microstructure}). They found that the BN content led to a decrease in density and an increase in the hardness, electrical resistivity, and saturation magnetization of the composite. Omayma et al. (\cite{elkady2015physico}) fabricated Cu/hBN nanocomposites by the PM route, in which powder mixtures of Cu and hBN were compacted and sintered at various temperatures ranging from 950°C to 1000°C. They found that the physical, mechanical and tribological properties of the composite were influenced by the hBN. However, we have not found any published studies in which metal matrices were reinforced by a 3Di network of hBN layers. Recently, various techniques have been utilized to fabricate reinforced MMCs through the incorporation of graphene. For instance, Xiong et al. introduced graphene in Cu by the reduction of reduced graphene oxide through sintering. Similarly, ball milling, molecular-level synthesis, spark plasma sintering, and epitaxial growth have been used to improve the strength of composites using graphene as a reinforcement (\cite{chen2016fabrication, cao2017aligning, jiang2016copper, wang2019direct}). However, each of the these strengthening techniques has some limitations. For instance, ball milling and molecular-level mixing may allow a uniform dispersion of the reinforcement material but may impart structural defects due to the shear stress and the contamination during the fabrication process (\cite{naseer2019review}). A well-ordered/-aligned, uniformly dispersed, and continuous graphene network is essential to attain the best reinforcement results (\cite{chen2016fabrication}). Kawk et al. (\cite{kawk2019simple}) introduced a simple, economically efficient two-step process with the potential to deliver better-quality products with uniformly dispersed and continuous graphene networks. The two-step process involves the compaction of a metallic powder followed by CVD. In this study, we fabricated a 3Di-hBN-Cu-Ni composite using a similar simple two-step process. Various characterization techniques were employed to confirm the formation of 3Di-hBN surrounding the grains of the Cu-Ni alloys. Cu-Ni-based alloys have been applied in various industries, such as shipbuilding, construction, and processing, owing to their high mechanical strength and corrosion resistance at elevated temperatures. The 3Di-hBN-Cu-Ni composite is expected to deliver better corrosion, mechanical, and wear characteristics than the Cu-Ni alloy. Moreover, the 3Di-hBN layer, a foam-like 3D porous structure, was separated from the 3Di-hBN-Cu-Ni composite. Such a 3Di-hBN material can be applied in the fields of biomedicine, electronics, and energy storage (\cite{gautam2018synthesis, guiney2018three, yin2013ultralight}). \subsection{Fabrication of 3Di-hBN CuNi Composite} \subsubsection{Compaction of CuNi powders} A simple two-step process involves the compaction of powder particles in a mold followed by CVD. Cu powder (99.5 \% purity) with spheroidal particles of size 14–25 µm and Ni powder ($>$99.5\% purity) with spheroidal particles of size ~1 µm were purchased from Sigma-Aldrich and used after heat treatment (200°C for 2 h in an $\text{H}_{2}$ environment) to remove any moisture or oxide contents. The chemical compositions of Cu and Ni powders are provided in Table \ref{tab:chemical_composition_powder}. Cu and Ni powders (70 wt.\% Cu, 30 wt.\% Ni) were mixed manually using mortar with care not to change the particle size distribution and the mixture was compacted in a mold using a double-action oil hydraulic press at the compaction pressures of 60, 110, 220, 280, 335, and 390 MPa as shown schematically in Figure \ref{fig:Mold_and_press}. The exertion of high pressure on the spheroidal particles caused mechanical cold locking among the particles, thus forming a compact disc with the approximate diameter and thickness of 15 mm and 1.2 mm, respectively. As shown on the fracture surface of the cross section of the compact disc in Figure \ref{fig:compact_fracture_surface}, relatively large Cu particles produced mechanical interlocking owing to their deformation, and Ni particles filled the gaps between the Cu particles. \begin{figure}[!htb] \centering \includegraphics[scale=1.0]{graphics/chapter_2/Mold_and_press} \caption{Mold and hydraulic press used for powder compaction.} \label{fig:Mold_and_press} \end{figure} \begin{table}[!htb] \centering \caption{Chemical compositions of Cu and Ni powders.} \resizebox{\linewidth}{!} { \begin{tabular}{lll} \hline Material & Purity & Trace metals in ppm \\ \hline Cu & \textgreater 99.5\% & Fe 80.0, Na 9.42, Mn 7.6, Mg 4.69, Al 4.4, and B 1.98 \\ \hline Ni & \textgreater 99.5\% & \begin{tabular}[c]{@{}l@{}}Ag 1.3, Al17.9, Ba 0.8 Ca 27.9, Cr 3.6, Cu 305.2, Fe 383.7 \\ Mg 2.0, Mn 2.6, Na 11.1, Pd 8.0, Ti 144.5, and V 25.4\end{tabular} \\ \hline \end{tabular} } \label{tab:chemical_composition_powder} \end{table} \begin{figure}[!htb] \centering \includegraphics [scale=1.0]{graphics/chapter_2/Compacted_CuNi_powder} \caption{Cu and Ni particles shown at the fracture surface of the disc after compaction.} \label{fig:compact_fracture_surface} \end{figure} \subsubsection{Metal Organic Chemical Vapor Deposition} The discs were then placed in a quartz glass tube furnace with a tube diameter of 23 mm for metal-organic CVD (MOCVD). The compaction pressure and sintering time were varied to determine the optimum conditions for the fabrication of 3Di-hBN in 3Di-hBN-Cu-Ni composite. Figure \ref{fig:CVD_furnace} (a) shows a schematic of the system used to fabricate the 3Di-hBN-Cu-Ni composites. Initially, the system was flushed with argon at least three times to remove air from the MOCVD tube. The furnace temperature was increased to 400°C at a rate of 16.6°C/min and then maintained constant in a hydrogen environment at 330 Torr for 1 h for deoxidation. Subsequently, the temperature was raised to 1000°C at the same rate and maintained constant for 15 or 30 min. Finally, MOCVD was performed for 15 min at 450 Torr using heated decaborane ($\text{B}_{10}\text{H}_{14}$) as the boron source and ammonia (N$\text{H}_{3}$) as the nitrogen source. Decaborane was the preferred boron precursor because of its (i) easy handling, (ii) commercial availability, and (iii) stability, which minimized the formation of undesired side products at elevated temperatures that could potentially decrease hBN yield (\cite{chatterjee2012syntheses}). Decaborane is a crystalline solid with a melting temperature of 98–100°C and its vapor pressure can be easily controlled by varying the temperature from room temperature to 100°C. At approximately 100°C, the vapors produced upon evaporation can be transported into the MOCVD growth zone by an inert carrier gas (Ar) at a flow rate of 1 sccm. Ammonia gas was introduced as a nitrogen source in the MOCVD reaction zone at a flow rate of 2 sccm. At 1000°C, ammonia and decaborane dissociated into nitrogen and boron atoms, respectively. \begin{gather} \label{eq:ammonia_dissociation} {NH_3}_{\left(g\right)}\ \rightarrow\ \left[N\right]+\frac{3}{2}{H_2}_{\left(g\right)} \\ \label{eq:boron_dissociation} {B_{10}H_{14}}_{\left(g\right)}\rightarrow10[B]+7{H_2}_{\left(g\right)} \end{gather} The entire process (heating, sintering, and MOCVD) was conducted at a hydrogen flow rate of 10 sccm. The 3Di-hBN-Cu-Ni composite fabricated using a simple two-step process is shown in Figure \ref{fig:CVD_furnace} (b). \begin{figure}[!htb] \centering \includegraphics[width=\linewidth]{graphics/chapter_2/CVD_furnace} \caption{(a) Schematic illustration of the fabrication of 3Di-hBN-Cu-Ni composite and (b) disc-shaped 3Di-hBN-Cu-Ni composite fabricated using a simple two-step process.} \label{fig:CVD_furnace} \end{figure} \subsection{Characterization of 3Di hBN-CuNi Composites} \subsubsection{Green and Composite Density} Density measurements were made for at least three important reasons: (1) for the determination of mass and volume of samples, (2) the quality of the samples and (3) selecting the optimal experimental condition. Sample density varies with powder size, compaction pressure and experimental condition of CVD. Green density is determined by taking the ratio of compacted CuNi disc to its volume. The densities of synthesized 3Di hBN-CuNi composites were measured using Archimedes principle. The entire procedure is mentioned in Appendix “A”. \subsubsection{Microstructural Characterization} Optical microscopy (OM) and scanning electron microscopy (SEM) investigations of the 3Di-hBN-Cu-Ni composite samples were conducted after the sample was cut in half and the cutting surface was polished with emery papers of size down to 4000 grit. Finally, the polished surface was etched at room temperature using a mixed solution of 1 M FeC$\text{l}_{3}$ and 0.1 M HCl to reveal the microstructure (\cite{kawk2019simple}). The procedure to make this etchant is given in Appendix “B”. For transmission electron microscopy (TEM) investigations, the 3Di-hBN-Cu-Ni composite samples were mechanically polished to a thickness of 100 µm and cut into small pieces of 3 mm diameter. Then, Cu-Ni was etched out, leaving only 3Di-hBN foam, which was transferred after thorough cleaning to the TEM grid for investigation. A qualitative phase analysis of the 3Di-hBN-Cu-Ni composite was performed by XRD analysis using Cu Kα radiation with a wavelength of 1.54 Å and a scanning angle of 20°–100°. To obtain 3Di-hBN foam, the 3Di-hBN-Cu-Ni composite samples were cut into small pieces, polished, and placed in an etchant for a sufficient duration to etch out Cu-Ni completely such that only 3Di-hBN remained. Then, the foam-like 3Di-hBN samples were removed and washed several times with deionized water. To obtain a stable 3D structure of 3Di-hBN, the freeze-drying method was used to ensure that there was no effect of liquid capillary force and that 3Di-hBN did not structurally collapse (\cite{ding2017mechanical}). \subsubsection{Mechanism of hBN Formation at Grain Boundaries} Figure \ref{fig:Mechanism_of_formation} shows a schematic of the processes involved in the synthesis of the 3Di-hBN-Cu-Ni composite. During the sintering, the reduction in volume and the formation of Cu-Ni solid solution occur due to the diffusion of metals under the driving force to reduce the excess surface energy (\cite{german2010coarsening, mcdonald2017microstructural}). Consequently, the overall volume of the compact disc is reduced and densification occurs. The formation of 3Di-hBN in the composite is likely to occur in three stages (\cite{li2019thermal}). First, the diffusion of metal occurs to reduce the surface energy resulting in the formation of large particles (consolidation). At the same time, the diffusion of Ni to Cu or vice versa occurs to form a solid solution of Cu-Ni. Next, during the MOCVD process, the dissociation of ammonia and decaborane produces nitrogen and boron atoms that diffuse into the Cu-Ni alloy at 1000°C. Finally, upon cooling, the nitrogen and boron atoms precipitate out and alternately join together to form 2D hBN layer(s) along the interfaces of the Cu-Ni grains (\cite{joshi2012boron}) resulting in the formation of Cu-Ni composite. \begin{figure}[!htb] \centering \includegraphics[width=\linewidth]{graphics/chapter_2/Mechanism_of_formation} \caption{Schematic showing the process of formation of 3Di-hBN-Cu-Ni composite.} \label{fig:Mechanism_of_formation} \end{figure} Small pores or voids can form during the sintering as Cu and Ni particles grow to reduce their surface energy. This is probably due to insufficient sintering time or excessive free space among the particles. These pores may also act as catalytic sites for the nucleation and growth of bulk hBN. The small lighter grey areas (indicated by small white loops) in Figure \ref{fig:OM_images} indicate the bulk hBN that accumulated on the pores during the MOCVD process. These pores generated during the sintering process and then filled with bulk hBN during the MOCVD process are undesirable, as they may adversely affect the mechanical, thermal, and wear characteristics of the composite. Therefore, the processing parameters, such as compaction pressure and sintering time, must be varied to determine the optimal conditions for the fabrication of 3Di-hBN-Cu-Ni composites without the formation of bulk hBN. \begin{figure}[!htb] \centering \includegraphics[width=\linewidth]{graphics/chapter_2/OM images} \caption{OM images of 3Di-hBN-Cu-Ni composites under various conditions.} \label{fig:OM_images} \end{figure} \subsection{Optimal Conditions of Constructing 3Di-hBN in 3Di hBN-CuNi composite} Figure \ref{fig:pressure_density} shows the density of the 3Di-hBN-Cu-Ni composite as a function of the compaction pressure and sintering time. The density of the composite increased with increasing compaction pressure. However, beyond a certain compaction pressure, the density decreased. This trend occurred because at pressures below 280 MPa, the compaction pressure was not enough resulting in low density of compaction; consequently, resulting in lower density of composite having voids or pores. Although these pores were filled with bulk hBN during the subsequent MOCVD process, the overall density of the composite cannot be increased because the density of hBN (2.1 g.c$\text{m}^{-3}$) is significantly lower than that of Cu-Ni (~8.9 g.c$\text{m}^{-3}$). On the other hand, during compaction at high pressures ($˃$280 MPa), the particles on the surface were pressed with a relatively greater force than those inside the compact disc because of the friction between the particles. Consequently, at pressures exceeding 280 MPa, the surface particles of the compact disc were denser than the inner particles. The inner particles having a longer diffusion distance owing to their lower density resulted in the formation of pores due to insufficient diffusion or a short sintering time, resulting in relatively larger size of pores or shrinkage as indicated by OM images in Figure \ref {fig:ununiform_interior}(b). Hence, the density of the 3Di-hBN-Cu-Ni composite was slightly lower at higher pressures ($>28$0 MPa) as shown in Figure \ref{fig:pressure_density}. This is also evident from the OM images shown in Figure \ref{fig:OM_images}. The white arrows in Figure \ref{fig:OM_images} indicate the bulk hBN present in the microstructure of the 3Di-hBN-Cu-Ni composite. Relatively higher volume fraction of bulk hBN was observed when the compaction pressure is lower or higher than 280 MPa. Furthermore, the density of the 3Di-hBN-Cu-Ni composite also depends on the sintering time as shown in Figure \ref{fig:pressure_density}. A longer sintering time led to fewer pores (i.e. lower volume fraction of bulk hBN), and consequently more densification occurred. \begin{figure}[!htb] \centering \includegraphics[scale=1.0]{graphics/chapter_2/Pressure-density relationship-1} \caption{Density of the 3Di-hBN-Cu-Ni composite as a function of compacting pressure and sintering time.} \label{fig:pressure_density} \end{figure} \begin{figure}[!htb] \centering \includegraphics [width=\linewidth]{graphics/chapter_2/OM_image (HC)} \caption{OM images of 3Di hBN-Cu-Ni composites processed at compaction pressures of (a) 280 MPa and (b) 335 MPa.} \label{fig:ununiform_interior} \end{figure} \subsection{Microstructural Investigation} The 3Di-hBN-Cu-Ni-hBN composite, fabricated under the optimized conditions (compaction pressure of 280 MPa and sintering time of 30 min) was examined using SEM. While some of the bulk hBN was removed during the polishing and etching, the SEM image in Figure 7 and the EDS results in Table 2 show the Cu-Ni grains, bulk hBN of less than 5 µm in size, and hBN along the interfaces. The Cu-Ni grains, grain boundaries, and bulk hBN in Figure \ref{fig:EM-EDX} were analyzed using energy-dispersive X-ray spectroscopy (EDS). Boron and nitrogen were observed (locations (a) and (b) in Figure \ref{fig:EM-EDX}) in excess along with minute amounts of other impurities, such as silicon, carbon, and oxygen, as listed in Table \ref{tab:edx_analysis}. These impurities probably entered the structure during polishing and etching processes. Location (a) in Figure \ref{fig:EM-EDX} is a pore that was first formed as a consequence of sintering and then filled with bulk hBN during the subsequent MOCVD process. Considering the average size (5 µm) of these sites (location (a)), the presence of bulk hBN was verified through EDS analysis. Further EDS analysis at the grain boundaries (location (b)) revealed that the grain boundaries were also mostly occupied by boron and nitrogen with approximate stochiometric ratio of 1:1. As expected, the Cu-Ni grains (location (c) in \ref{fig:EM-EDX}) comprised Cu and Ni atoms with a ratio of approximately 7 to 3, as shown in Table 2. The solubility of B and N in Cu-Ni alloy at 1000°C is very small (~ ppm) (\cite{el2018microstructure}) and most of the atoms (B and N) precipitated out during cooling thus forming hBN with B and N having stochiometric ratio of 1:1 at grain boundaries (\cite{khan20172d, koepke2016role}). \begin{figure}[!htb] \centering \includegraphics [scale=1.0]{graphics/chapter_2/SEM-EDX} \caption{SEM image showing the surface morphology of the 3Di-hBN-Cu-Ni composite fabricated with compaction pressure of 280 MPa and sintering time of 30 min.} \label{fig:EM-EDX} \end{figure} \begin{table}[!htb] \centering \caption{EDS results of the 3Di-hBN-Cu-Ni composite for the microstructure shown in Figure \ref{fig:EM-EDX}.} \resizebox{\linewidth}{!} { \begin{tabular}{lllllllll} \hline Element & Location & Cu & Ni & B & N & Si & C & O \\ \hline At. \% & (a) in Figure \ref{fig:EM-EDX} & - & - & 49.32 & 47.45 & 0.76 & 0.87 & 0.68 \\ At. \% & (b) in Figure \ref{fig:EM-EDX} & 20.51 & 8.32 & 34.26 & 34.82 & 0.89 & 0.57 & 0.78 \\ At. \% & (c) in Figure \ref{fig:EM-EDX} & 71.28 & 28.42 & - & - & - & - & - \\ \hline \end{tabular} } \label{tab:edx_analysis} \end{table} The SEM image in Figure \ref{fig:SEM-3DihBN} shows various hBN layers that interconnect to form a foam-like structure with pockets and channels. The channels are the connected areas between the Cu-Ni grains formed by etching. The average pocket size (10-20 µm) in 3Di-hBN (Figure \ref{fig:SEM-3DihBN}) is approximately equal to the average grain size of the 3Di-hBN-Cu-Ni composite (Figures \ref{fig:OM_images} and \ref{fig:EM-EDX}) indicating that the hBN layers wrapped around the Cu-Ni grains in the 3Di-hBN-Cu-Ni composite. \begin{figure}[!htb] \centering \includegraphics [scale=1.0]{graphics/chapter_2/SEM-3DihBN} \caption{SEM image showing the 3D interconnected network of hBN in 3Di hBN-Cu-Ni composite produced under compaction pressure of 280 MPa and sintering time of 30 min, respectively.} \label{fig:SEM-3DihBN} \end{figure} The XRD pattern of 3Di-hBN-Cu-Ni composite is shown in Figure \ref{fig:XRD-element distribution} (a). This pattern shows the crystalline phase of Cu-Ni solid solution only. Moreover, the elemental distribution map of the 3Di-hBN-Cu-Ni composite shown in Figure \ref{fig:XRD-element distribution} (b) shows uniform distribution of Cu and Ni indicating the formation of Cu-Ni solid solution. \begin{figure}[!htb] \centering \includegraphics [width=\linewidth]{graphics/chapter_2/XRD-element distribution} \caption{(a) XRD pattern of 3Di-hBN-Cu-Ni composite and (b) Elemental distribution map of 3Di-hBN-Cu-Ni composite.} \label{fig:XRD-element distribution} \end{figure} The 3Di-hBN foam was inspected using TEM. The low-magnification bright-field TEM image in Figure \ref{fig:TEM-3DihBN}(a) shows a complex morphology with curvatures and overlapped structures where the 3D layers of hBN (shown in Figure \ref{fig:SEM-3DihBN}) collapsed after their transfer to the TEM grid under the capillary force acting during the drying process. The selected-area electron diffraction pattern of 3Di-hBN (inset in Figure \ref{fig:TEM-3DihBN}(a)) indicates multiple orientations associated with a couple of layers with different orientations. The high-resolution TEM (HR-TEM) image (\ref{fig:TEM-3DihBN} (b)) reveals 2–6 layers of hBN with an interlayer distance of approximately 0.25 nm (inset in Figure 10(b)), which is attributed to the thickness of a single layer of 2D hBN. \begin{figure}[!htb] \centering \includegraphics [width=\linewidth]{graphics/chapter_2/TEM-3DihBN} \caption{TEM investigation: (a) low-magnification bright-field TEM image of 3Di-hBN; (b) HR-TEM image showing 2–6 layers of hBN; inset shows a distance of 0.25 nm between layers.} \label{fig:TEM-3DihBN} \end{figure} In summary, 3Di-hBN-Cu-Ni composites were synthesized via a simple two-step process of (1) the compaction of Cu and Ni powder mixtures without any additives and (2) MOCVD. The density of the composite was the highest (7.75 g.c$\text{m}^{-3}$) when the compaction pressure and sintering time were 280 MPa and 30 min, respectively. OM, SEM, and TEM images indicated that these conditions were optimal for the growth of the interconnected network of hBN in the 3Di-hBN-Cu-Ni composite. SEM investigations and EDS analysis revealed that the grain boundaries were mostly occupied by boron and nitrogen atoms. 3Di-hBN was obtained after etching the Cu-Ni and the average pocket size of the foam was 10–20 µm. The 3Di-hBN-Cu-Ni composite with a density of 7.75 g.c$\text{m}^{-3}$ was shown to have a three-dimensional network of 2D hBN. The structural investigation of 3Di-hBN through TEM revealed 2–6 layers with an interlayer distance of 0.25 nm. This study can be extended further for the characterization of the physical and chemical properties of 3Di-hBN-Cu-Ni composite and 3Di-hBN.
%{ Copyright (c) 2015, Tom Mertens All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. %} % Contruction of Laplacian pyramid % % Arguments: % image 'I' % 'nlev', number of levels in the pyramid (optional) % % [email protected], August 2007 % % % More information: % 'The Laplacian Pyramid as a Compact Image Code' % Burt, P., and Adelson, E. H., % IEEE Transactions on Communication, COM-31:532-540 (1983). % function pyr = laplacian_pyramid(I,nlev) r = size(I,1); c = size(I,2); if ~exist('nlev') % compute the highest possible pyramid nlev = floor(log(min(r,c)) / log(2)); end % recursively build pyramid pyr = cell(nlev,1); filter = pyramid_filter; J = I; for l = 1:nlev - 1 % apply low pass filter, and downsample I = downsample(J,filter); odd = 2*size(I) - size(J); % for each dimension, check if the upsampled version has to be odd % in each level, store difference between image and upsampled low pass version pyr{l} = J - upsample(I,odd,filter); J = I; % continue with low pass image end pyr{nlev} = J; % the coarest level contains the residual low pass image
If $f$ is a continuous function on the convex hull of three points $a$, $b$, and $c$, and $c$ is a convex combination of $a$ and $b$, then the sum of the contour integrals of $f$ along the three sides of the triangle is zero.
/- Copyright (c) 2019 Amelia Livingston. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Amelia Livingston, Jireh Loreaux -/ import algebra.group_with_zero.inj_surj import algebra.ring.basic import algebra.divisibility.basic import data.pi.algebra import algebra.hom.units import data.set.image /-! # Homomorphisms of semirings and rings > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines bundled homomorphisms of (non-unital) semirings and rings. As with monoid and groups, we use the same structure `ring_hom a β`, a.k.a. `α →+* β`, for both types of homomorphisms. The unbundled homomorphisms are defined in `deprecated.ring`. They are deprecated and the plan is to slowly remove them from mathlib. ## Main definitions * `non_unital_ring_hom`: Non-unital (semi)ring homomorphisms. Additive monoid homomorphism which preserve multiplication. * `ring_hom`: (Semi)ring homomorphisms. Monoid homomorphisms which are also additive monoid homomorphism. ## Notations * `→ₙ+*`: Non-unital (semi)ring homs * `→+*`: (Semi)ring homs ## Implementation notes * There's a coercion from bundled homs to fun, and the canonical notation is to use the bundled hom as a function via this coercion. * There is no `semiring_hom` -- the idea is that `ring_hom` is used. The constructor for a `ring_hom` between semirings needs a proof of `map_zero`, `map_one` and `map_add` as well as `map_mul`; a separate constructor `ring_hom.mk'` will construct ring homs between rings from monoid homs given only a proof that addition is preserved. ## Tags `ring_hom`, `semiring_hom` -/ set_option old_structure_cmd true open function variables {F α β γ : Type*} /-- Bundled non-unital semiring homomorphisms `α →ₙ+* β`; use this for bundled non-unital ring homomorphisms too. When possible, instead of parametrizing results over `(f : α →ₙ+* β)`, you should parametrize over `(F : Type*) [non_unital_ring_hom_class F α β] (f : F)`. When you extend this structure, make sure to extend `non_unital_ring_hom_class`. -/ structure non_unital_ring_hom (α β : Type*) [non_unital_non_assoc_semiring α] [non_unital_non_assoc_semiring β] extends α →ₙ* β, α →+ β infixr ` →ₙ+* `:25 := non_unital_ring_hom /-- Reinterpret a non-unital ring homomorphism `f : α →ₙ+* β` as a semigroup homomorphism `α →ₙ* β`. The `simp`-normal form is `(f : α →ₙ* β)`. -/ add_decl_doc non_unital_ring_hom.to_mul_hom /-- Reinterpret a non-unital ring homomorphism `f : α →ₙ+* β` as an additive monoid homomorphism `α →+ β`. The `simp`-normal form is `(f : α →+ β)`. -/ add_decl_doc non_unital_ring_hom.to_add_monoid_hom section non_unital_ring_hom_class /-- `non_unital_ring_hom_class F α β` states that `F` is a type of non-unital (semi)ring homomorphisms. You should extend this class when you extend `non_unital_ring_hom`. -/ class non_unital_ring_hom_class (F : Type*) (α β : out_param Type*) [non_unital_non_assoc_semiring α] [non_unital_non_assoc_semiring β] extends mul_hom_class F α β, add_monoid_hom_class F α β variables [non_unital_non_assoc_semiring α] [non_unital_non_assoc_semiring β] [non_unital_ring_hom_class F α β] instance : has_coe_t F (α →ₙ+* β) := ⟨λ f, { to_fun := f, map_zero' := map_zero f, map_mul' := map_mul f, map_add' := map_add f }⟩ end non_unital_ring_hom_class namespace non_unital_ring_hom section coe /-! Throughout this section, some `semiring` arguments are specified with `{}` instead of `[]`. See note [implicit instance arguments]. -/ variables {rα : non_unital_non_assoc_semiring α} {rβ : non_unital_non_assoc_semiring β} include rα rβ instance : non_unital_ring_hom_class (α →ₙ+* β) α β := { coe := non_unital_ring_hom.to_fun, coe_injective' := λ f g h, by cases f; cases g; congr', map_add := non_unital_ring_hom.map_add', map_zero := non_unital_ring_hom.map_zero', map_mul := non_unital_ring_hom.map_mul' } /-- Helper instance for when there's too many metavariables to apply `fun_like.has_coe_to_fun` directly. -/ instance : has_coe_to_fun (α →ₙ+* β) (λ _, α → β) := ⟨non_unital_ring_hom.to_fun⟩ @[simp] lemma to_fun_eq_coe (f : α →ₙ+* β) : f.to_fun = f := rfl @[simp] lemma coe_mk (f : α → β) (h₁ h₂ h₃) : ⇑(⟨f, h₁, h₂, h₃⟩ : α →ₙ+* β) = f := rfl @[simp] lemma coe_coe [non_unital_ring_hom_class F α β] (f : F) : ((f : α →ₙ+* β) : α → β) = f := rfl @[simp] lemma coe_to_mul_hom (f : α →ₙ+* β) : ⇑f.to_mul_hom = f := rfl @[simp] lemma coe_mul_hom_mk (f : α → β) (h₁ h₂ h₃) : ((⟨f, h₁, h₂, h₃⟩ : α →ₙ+* β) : α →ₙ* β) = ⟨f, h₁⟩ := rfl @[simp] lemma coe_to_add_monoid_hom (f : α →ₙ+* β) : ⇑f.to_add_monoid_hom = f := rfl @[simp] lemma coe_add_monoid_hom_mk (f : α → β) (h₁ h₂ h₃) : ((⟨f, h₁, h₂, h₃⟩ : α →ₙ+* β) : α →+ β) = ⟨f, h₂, h₃⟩ := rfl /-- Copy of a `ring_hom` with a new `to_fun` equal to the old one. Useful to fix definitional equalities. -/ protected def copy (f : α →ₙ+* β) (f' : α → β) (h : f' = f) : α →ₙ+* β := { ..f.to_mul_hom.copy f' h, ..f.to_add_monoid_hom.copy f' h } @[simp] lemma coe_copy (f : α →ₙ+* β) (f' : α → β) (h : f' = f) : ⇑(f.copy f' h) = f' := rfl lemma copy_eq (f : α →ₙ+* β) (f' : α → β) (h : f' = f) : f.copy f' h = f := fun_like.ext' h end coe variables [rα : non_unital_non_assoc_semiring α] [rβ : non_unital_non_assoc_semiring β] section include rα rβ variables (f : α →ₙ+* β) {x y : α} {rα rβ} @[ext] lemma ext ⦃f g : α →ₙ+* β⦄ : (∀ x, f x = g x) → f = g := fun_like.ext _ _ lemma ext_iff {f g : α →ₙ+* β} : f = g ↔ ∀ x, f x = g x := fun_like.ext_iff @[simp] lemma mk_coe (f : α →ₙ+* β) (h₁ h₂ h₃) : non_unital_ring_hom.mk f h₁ h₂ h₃ = f := ext $ λ _, rfl lemma coe_add_monoid_hom_injective : injective (coe : (α →ₙ+* β) → (α →+ β)) := λ f g h, ext $ add_monoid_hom.congr_fun h lemma coe_mul_hom_injective : injective (coe : (α →ₙ+* β) → (α →ₙ* β)) := λ f g h, ext $ mul_hom.congr_fun h end /-- The identity non-unital ring homomorphism from a non-unital semiring to itself. -/ protected def id (α : Type*) [non_unital_non_assoc_semiring α] : α →ₙ+* α := by refine {to_fun := id, ..}; intros; refl include rα rβ instance : has_zero (α →ₙ+* β) := ⟨{ to_fun := 0, map_mul' := λ x y, (mul_zero (0 : β)).symm, map_zero' := rfl, map_add' := λ x y, (add_zero (0 : β)).symm }⟩ instance : inhabited (α →ₙ+* β) := ⟨0⟩ @[simp] lemma coe_zero : ⇑(0 : α →ₙ+* β) = 0 := rfl @[simp] lemma zero_apply (x : α) : (0 : α →ₙ+* β) x = 0 := rfl omit rβ @[simp] lemma id_apply (x : α) : non_unital_ring_hom.id α x = x := rfl @[simp] lemma coe_add_monoid_hom_id : (non_unital_ring_hom.id α : α →+ α) = add_monoid_hom.id α := rfl @[simp] lemma coe_mul_hom_id : (non_unital_ring_hom.id α : α →ₙ* α) = mul_hom.id α := rfl variable {rγ : non_unital_non_assoc_semiring γ} include rβ rγ /-- Composition of non-unital ring homomorphisms is a non-unital ring homomorphism. -/ def comp (g : β →ₙ+* γ) (f : α →ₙ+* β) : α →ₙ+* γ := { ..g.to_mul_hom.comp f.to_mul_hom, ..g.to_add_monoid_hom.comp f.to_add_monoid_hom } /-- Composition of non-unital ring homomorphisms is associative. -/ lemma comp_assoc {δ} {rδ : non_unital_non_assoc_semiring δ} (f : α →ₙ+* β) (g : β →ₙ+* γ) (h : γ →ₙ+* δ) : (h.comp g).comp f = h.comp (g.comp f) := rfl @[simp] lemma coe_comp (g : β →ₙ+* γ) (f : α →ₙ+* β) : ⇑(g.comp f) = g ∘ f := rfl @[simp] lemma comp_apply (g : β →ₙ+* γ) (f : α →ₙ+* β) (x : α) : g.comp f x = g (f x) := rfl @[simp] lemma coe_comp_add_monoid_hom (g : β →ₙ+* γ) (f : α →ₙ+* β) : (g.comp f : α →+ γ) = (g : β →+ γ).comp f := rfl @[simp] lemma coe_comp_mul_hom (g : β →ₙ+* γ) (f : α →ₙ+* β) : (g.comp f : α →ₙ* γ) = (g : β →ₙ* γ).comp f := rfl @[simp] lemma comp_zero (g : β →ₙ+* γ) : g.comp (0 : α →ₙ+* β) = 0 := by { ext, simp } @[simp] lemma zero_comp (f : α →ₙ+* β) : (0 : β →ₙ+* γ).comp f = 0 := by { ext, refl } omit rγ @[simp] lemma comp_id (f : α →ₙ+* β) : f.comp (non_unital_ring_hom.id α) = f := ext $ λ x, rfl @[simp] lemma id_comp (f : α →ₙ+* β) : (non_unital_ring_hom.id β).comp f = f := ext $ λ x, rfl omit rβ instance : monoid_with_zero (α →ₙ+* α) := { one := non_unital_ring_hom.id α, mul := comp, mul_one := comp_id, one_mul := id_comp, mul_assoc := λ f g h, comp_assoc _ _ _, zero := 0, mul_zero := comp_zero, zero_mul := zero_comp } lemma one_def : (1 : α →ₙ+* α) = non_unital_ring_hom.id α := rfl @[simp] lemma coe_one : ⇑(1 : α →ₙ+* α) = id := rfl lemma mul_def (f g : α →ₙ+* α) : f * g = f.comp g := rfl @[simp] lemma coe_mul (f g : α →ₙ+* α) : ⇑(f * g) = f ∘ g := rfl include rβ rγ lemma cancel_right {g₁ g₂ : β →ₙ+* γ} {f : α →ₙ+* β} (hf : surjective f) : g₁.comp f = g₂.comp f ↔ g₁ = g₂ := ⟨λ h, ext $ hf.forall.2 (ext_iff.1 h), λ h, h ▸ rfl⟩ lemma cancel_left {g : β →ₙ+* γ} {f₁ f₂ : α →ₙ+* β} (hg : injective g) : g.comp f₁ = g.comp f₂ ↔ f₁ = f₂ := ⟨λ h, ext $ λ x, hg $ by rw [← comp_apply, h, comp_apply], λ h, h ▸ rfl⟩ omit rα rβ rγ end non_unital_ring_hom /-- Bundled semiring homomorphisms; use this for bundled ring homomorphisms too. This extends from both `monoid_hom` and `monoid_with_zero_hom` in order to put the fields in a sensible order, even though `monoid_with_zero_hom` already extends `monoid_hom`. -/ structure ring_hom (α : Type*) (β : Type*) [non_assoc_semiring α] [non_assoc_semiring β] extends α →* β, α →+ β, α →ₙ+* β, α →*₀ β infixr ` →+* `:25 := ring_hom /-- Reinterpret a ring homomorphism `f : α →+* β` as a monoid with zero homomorphism `α →*₀ β`. The `simp`-normal form is `(f : α →*₀ β)`. -/ add_decl_doc ring_hom.to_monoid_with_zero_hom /-- Reinterpret a ring homomorphism `f : α →+* β` as a monoid homomorphism `α →* β`. The `simp`-normal form is `(f : α →* β)`. -/ add_decl_doc ring_hom.to_monoid_hom /-- Reinterpret a ring homomorphism `f : α →+* β` as an additive monoid homomorphism `α →+ β`. The `simp`-normal form is `(f : α →+ β)`. -/ add_decl_doc ring_hom.to_add_monoid_hom /-- Reinterpret a ring homomorphism `f : α →+* β` as a non-unital ring homomorphism `α →ₙ+* β`. The `simp`-normal form is `(f : α →ₙ+* β)`. -/ add_decl_doc ring_hom.to_non_unital_ring_hom section ring_hom_class /-- `ring_hom_class F α β` states that `F` is a type of (semi)ring homomorphisms. You should extend this class when you extend `ring_hom`. This extends from both `monoid_hom_class` and `monoid_with_zero_hom_class` in order to put the fields in a sensible order, even though `monoid_with_zero_hom_class` already extends `monoid_hom_class`. -/ class ring_hom_class (F : Type*) (α β : out_param Type*) [non_assoc_semiring α] [non_assoc_semiring β] extends monoid_hom_class F α β, add_monoid_hom_class F α β, monoid_with_zero_hom_class F α β variables [non_assoc_semiring α] [non_assoc_semiring β] [ring_hom_class F α β] /-- Ring homomorphisms preserve `bit1`. -/ @[simp] lemma map_bit1 (f : F) (a : α) : (f (bit1 a) : β) = bit1 (f a) := by simp [bit1] instance : has_coe_t F (α →+* β) := ⟨λ f, { to_fun := f, map_zero' := map_zero f, map_one' := map_one f, map_mul' := map_mul f, map_add' := map_add f }⟩ @[priority 100] instance ring_hom_class.to_non_unital_ring_hom_class : non_unital_ring_hom_class F α β := { .. ‹ring_hom_class F α β› } end ring_hom_class namespace ring_hom section coe /-! Throughout this section, some `semiring` arguments are specified with `{}` instead of `[]`. See note [implicit instance arguments]. -/ variables {rα : non_assoc_semiring α} {rβ : non_assoc_semiring β} include rα rβ instance : ring_hom_class (α →+* β) α β := { coe := ring_hom.to_fun, coe_injective' := λ f g h, by cases f; cases g; congr', map_add := ring_hom.map_add', map_zero := ring_hom.map_zero', map_mul := ring_hom.map_mul', map_one := ring_hom.map_one' } /-- Helper instance for when there's too many metavariables to apply `fun_like.has_coe_to_fun` directly. -/ instance : has_coe_to_fun (α →+* β) (λ _, α → β) := ⟨ring_hom.to_fun⟩ initialize_simps_projections ring_hom (to_fun → apply) @[simp] lemma to_fun_eq_coe (f : α →+* β) : f.to_fun = f := rfl @[simp] lemma coe_mk (f : α → β) (h₁ h₂ h₃ h₄) : ⇑(⟨f, h₁, h₂, h₃, h₄⟩ : α →+* β) = f := rfl @[simp] lemma coe_coe {F : Type*} [ring_hom_class F α β] (f : F) : ((f : α →+* β) : α → β) = f := rfl instance has_coe_monoid_hom : has_coe (α →+* β) (α →* β) := ⟨ring_hom.to_monoid_hom⟩ @[simp, norm_cast] lemma coe_monoid_hom (f : α →+* β) : ⇑(f : α →* β) = f := rfl @[simp] lemma to_monoid_hom_eq_coe (f : α →+* β) : f.to_monoid_hom = f := rfl @[simp] lemma to_monoid_with_zero_hom_eq_coe (f : α →+* β) : (f.to_monoid_with_zero_hom : α → β) = f := rfl @[simp] lemma coe_monoid_hom_mk (f : α → β) (h₁ h₂ h₃ h₄) : ((⟨f, h₁, h₂, h₃, h₄⟩ : α →+* β) : α →* β) = ⟨f, h₁, h₂⟩ := rfl @[simp, norm_cast] lemma coe_add_monoid_hom (f : α →+* β) : ⇑(f : α →+ β) = f := rfl @[simp] lemma to_add_monoid_hom_eq_coe (f : α →+* β) : f.to_add_monoid_hom = f := rfl @[simp] lemma coe_add_monoid_hom_mk (f : α → β) (h₁ h₂ h₃ h₄) : ((⟨f, h₁, h₂, h₃, h₄⟩ : α →+* β) : α →+ β) = ⟨f, h₃, h₄⟩ := rfl /-- Copy of a `ring_hom` with a new `to_fun` equal to the old one. Useful to fix definitional equalities. -/ def copy (f : α →+* β) (f' : α → β) (h : f' = f) : α →+* β := { ..f.to_monoid_with_zero_hom.copy f' h, ..f.to_add_monoid_hom.copy f' h } @[simp] lemma coe_copy (f : α →+* β) (f' : α → β) (h : f' = f) : ⇑(f.copy f' h) = f' := rfl lemma copy_eq (f : α →+* β) (f' : α → β) (h : f' = f) : f.copy f' h = f := fun_like.ext' h end coe variables [rα : non_assoc_semiring α] [rβ : non_assoc_semiring β] section include rα rβ variables (f : α →+* β) {x y : α} {rα rβ} lemma congr_fun {f g : α →+* β} (h : f = g) (x : α) : f x = g x := fun_like.congr_fun h x lemma congr_arg (f : α →+* β) {x y : α} (h : x = y) : f x = f y := fun_like.congr_arg f h lemma coe_inj ⦃f g : α →+* β⦄ (h : (f : α → β) = g) : f = g := fun_like.coe_injective h @[ext] lemma ext ⦃f g : α →+* β⦄ : (∀ x, f x = g x) → f = g := fun_like.ext _ _ lemma ext_iff {f g : α →+* β} : f = g ↔ ∀ x, f x = g x := fun_like.ext_iff @[simp] lemma mk_coe (f : α →+* β) (h₁ h₂ h₃ h₄) : ring_hom.mk f h₁ h₂ h₃ h₄ = f := ext $ λ _, rfl lemma coe_add_monoid_hom_injective : injective (coe : (α →+* β) → (α →+ β)) := λ f g h, ext $ add_monoid_hom.congr_fun h lemma coe_monoid_hom_injective : injective (coe : (α →+* β) → (α →* β)) := λ f g h, ext $ monoid_hom.congr_fun h /-- Ring homomorphisms map zero to zero. -/ protected lemma map_zero (f : α →+* β) : f 0 = 0 := map_zero f /-- Ring homomorphisms map one to one. -/ protected lemma map_one (f : α →+* β) : f 1 = 1 := map_one f /-- Ring homomorphisms preserve addition. -/ protected lemma map_add (f : α →+* β) : ∀ a b, f (a + b) = f a + f b := map_add f /-- Ring homomorphisms preserve multiplication. -/ protected lemma map_mul (f : α →+* β) : ∀ a b, f (a * b) = f a * f b := map_mul f /-- Ring homomorphisms preserve `bit0`. -/ protected lemma map_bit0 (f : α →+* β) : ∀ a, f (bit0 a) = bit0 (f a) := map_bit0 f /-- Ring homomorphisms preserve `bit1`. -/ protected lemma map_bit1 (f : α →+* β) : ∀ a, f (bit1 a) = bit1 (f a) := map_bit1 f @[simp] lemma map_ite_zero_one {F : Type*} [ring_hom_class F α β] (f : F) (p : Prop) [decidable p] : f (ite p 0 1) = ite p 0 1 := by { split_ifs; simp [h] } @[simp] lemma map_ite_one_zero {F : Type*} [ring_hom_class F α β] (f : F) (p : Prop) [decidable p] : f (ite p 1 0) = ite p 1 0 := by { split_ifs; simp [h] } /-- `f : α →+* β` has a trivial codomain iff `f 1 = 0`. -/ lemma codomain_trivial_iff_map_one_eq_zero : (0 : β) = 1 ↔ f 1 = 0 := by rw [map_one, eq_comm] /-- `f : α →+* β` has a trivial codomain iff it has a trivial range. -/ lemma codomain_trivial_iff_range_trivial : (0 : β) = 1 ↔ ∀ x, f x = 0 := f.codomain_trivial_iff_map_one_eq_zero.trans ⟨λ h x, by rw [←mul_one x, map_mul, h, mul_zero], λ h, h 1⟩ /-- `f : α →+* β` has a trivial codomain iff its range is `{0}`. -/ lemma codomain_trivial_iff_range_eq_singleton_zero : (0 : β) = 1 ↔ set.range f = {0} := f.codomain_trivial_iff_range_trivial.trans ⟨ λ h, set.ext (λ y, ⟨λ ⟨x, hx⟩, by simp [←hx, h x], λ hy, ⟨0, by simpa using hy.symm⟩⟩), λ h x, set.mem_singleton_iff.mp (h ▸ set.mem_range_self x)⟩ /-- `f : α →+* β` doesn't map `1` to `0` if `β` is nontrivial -/ lemma map_one_ne_zero [nontrivial β] : f 1 ≠ 0 := mt f.codomain_trivial_iff_map_one_eq_zero.mpr zero_ne_one /-- If there is a homomorphism `f : α →+* β` and `β` is nontrivial, then `α` is nontrivial. -/ lemma domain_nontrivial [nontrivial β] : nontrivial α := ⟨⟨1, 0, mt (λ h, show f 1 = 0, by rw [h, map_zero]) f.map_one_ne_zero⟩⟩ lemma codomain_trivial (f : α →+* β) [h : subsingleton α] : subsingleton β := (subsingleton_or_nontrivial β).resolve_right (λ _, by exactI not_nontrivial_iff_subsingleton.mpr h f.domain_nontrivial) end /-- Ring homomorphisms preserve additive inverse. -/ protected theorem map_neg [non_assoc_ring α] [non_assoc_ring β] (f : α →+* β) (x : α) : f (-x) = -(f x) := map_neg f x /-- Ring homomorphisms preserve subtraction. -/ protected theorem map_sub [non_assoc_ring α] [non_assoc_ring β] (f : α →+* β) (x y : α) : f (x - y) = (f x) - (f y) := map_sub f x y /-- Makes a ring homomorphism from a monoid homomorphism of rings which preserves addition. -/ def mk' [non_assoc_semiring α] [non_assoc_ring β] (f : α →* β) (map_add : ∀ a b, f (a + b) = f a + f b) : α →+* β := { ..add_monoid_hom.mk' f map_add, ..f } section semiring variables [semiring α] [semiring β] lemma is_unit_map (f : α →+* β) {a : α} : is_unit a → is_unit (f a) := is_unit.map f protected lemma map_dvd (f : α →+* β) {a b : α} : a ∣ b → f a ∣ f b := map_dvd f end semiring /-- The identity ring homomorphism from a semiring to itself. -/ def id (α : Type*) [non_assoc_semiring α] : α →+* α := by refine {to_fun := id, ..}; intros; refl include rα instance : inhabited (α →+* α) := ⟨id α⟩ @[simp] lemma id_apply (x : α) : ring_hom.id α x = x := rfl @[simp] lemma coe_add_monoid_hom_id : (id α : α →+ α) = add_monoid_hom.id α := rfl @[simp] lemma coe_monoid_hom_id : (id α : α →* α) = monoid_hom.id α := rfl variable {rγ : non_assoc_semiring γ} include rβ rγ /-- Composition of ring homomorphisms is a ring homomorphism. -/ def comp (g : β →+* γ) (f : α →+* β) : α →+* γ := { to_fun := g ∘ f, map_one' := by simp, ..g.to_non_unital_ring_hom.comp f.to_non_unital_ring_hom } /-- Composition of semiring homomorphisms is associative. -/ lemma comp_assoc {δ} {rδ: non_assoc_semiring δ} (f : α →+* β) (g : β →+* γ) (h : γ →+* δ) : (h.comp g).comp f = h.comp (g.comp f) := rfl @[simp] lemma coe_comp (hnp : β →+* γ) (hmn : α →+* β) : (hnp.comp hmn : α → γ) = hnp ∘ hmn := rfl lemma comp_apply (hnp : β →+* γ) (hmn : α →+* β) (x : α) : (hnp.comp hmn : α → γ) x = (hnp (hmn x)) := rfl omit rγ @[simp] lemma comp_id (f : α →+* β) : f.comp (id α) = f := ext $ λ x, rfl @[simp] lemma id_comp (f : α →+* β) : (id β).comp f = f := ext $ λ x, rfl omit rβ instance : monoid (α →+* α) := { one := id α, mul := comp, mul_one := comp_id, one_mul := id_comp, mul_assoc := λ f g h, comp_assoc _ _ _ } lemma one_def : (1 : α →+* α) = id α := rfl lemma mul_def (f g : α →+* α) : f * g = f.comp g := rfl @[simp] lemma coe_one : ⇑(1 : α →+* α) = _root_.id := rfl @[simp] lemma coe_mul (f g : α →+* α) : ⇑(f * g) = f ∘ g := rfl include rβ rγ lemma cancel_right {g₁ g₂ : β →+* γ} {f : α →+* β} (hf : surjective f) : g₁.comp f = g₂.comp f ↔ g₁ = g₂ := ⟨λ h, ring_hom.ext $ hf.forall.2 (ext_iff.1 h), λ h, h ▸ rfl⟩ lemma cancel_left {g : β →+* γ} {f₁ f₂ : α →+* β} (hg : injective g) : g.comp f₁ = g.comp f₂ ↔ f₁ = f₂ := ⟨λ h, ring_hom.ext $ λ x, hg $ by rw [← comp_apply, h, comp_apply], λ h, h ▸ rfl⟩ end ring_hom /-- Pullback `is_domain` instance along an injective function. -/ protected theorem function.injective.is_domain [ring α] [is_domain α] [ring β] (f : β →+* α) (hf : injective f) : is_domain β := begin haveI := pullback_nonzero f f.map_zero f.map_one, haveI := is_right_cancel_mul_zero.to_no_zero_divisors α, haveI := hf.no_zero_divisors f f.map_zero f.map_mul, exact no_zero_divisors.to_is_domain β, end namespace add_monoid_hom variables [comm_ring α] [is_domain α] [comm_ring β] (f : β →+ α) /-- Make a ring homomorphism from an additive group homomorphism from a commutative ring to an integral domain that commutes with self multiplication, assumes that two is nonzero and `1` is sent to `1`. -/ def mk_ring_hom_of_mul_self_of_two_ne_zero (h : ∀ x, f (x * x) = f x * f x) (h_two : (2 : α) ≠ 0) (h_one : f 1 = 1) : β →+* α := { map_one' := h_one, map_mul' := λ x y, begin have hxy := h (x + y), rw [mul_add, add_mul, add_mul, f.map_add, f.map_add, f.map_add, f.map_add, h x, h y, add_mul, mul_add, mul_add, ← sub_eq_zero, add_comm, ← sub_sub, ← sub_sub, ← sub_sub, mul_comm y x, mul_comm (f y) (f x)] at hxy, simp only [add_assoc, add_sub_assoc, add_sub_cancel'_right] at hxy, rw [sub_sub, ← two_mul, ← add_sub_assoc, ← two_mul, ← mul_sub, mul_eq_zero, sub_eq_zero, or_iff_not_imp_left] at hxy, exact hxy h_two, end, ..f } @[simp] lemma coe_fn_mk_ring_hom_of_mul_self_of_two_ne_zero (h h_two h_one) : (f.mk_ring_hom_of_mul_self_of_two_ne_zero h h_two h_one : β → α) = f := rfl @[simp] lemma coe_add_monoid_hom_mk_ring_hom_of_mul_self_of_two_ne_zero (h h_two h_one) : (f.mk_ring_hom_of_mul_self_of_two_ne_zero h h_two h_one : β →+ α) = f := by { ext, refl } end add_monoid_hom
if 1 % This is the setup used in the paper a=40; M=60; L=a*M; W=4; nrep=50; else a=50; M=200; L=a*M; W=4; nrep=50; end; system('rm crossover.log'); %for gl=M:M:20*M for gl=M:M:16*M s=sprintf('./time_dgt_fb %i %i %i %i %i %i >> crossover.log\n',a,M,L,W,gl,nrep); disp(s); system(s); end; s=sprintf('./time_dgt_fac %i %i %i %i %i > crossover.ref\n',a,M,L,W,nrep); disp(s); system(s); system('rm crossover_real.log'); %for gl=M:M:20*M for gl=M:M:16*M s=sprintf('./time_dgtreal_fb %i %i %i %i %i %i >> crossover_real.log\n',a,M,L,W,gl,nrep); disp(s); system(s); end; s=sprintf('./time_dgtreal_fac %i %i %i %i %i > crossover_real.ref\n',a,M,L,W,nrep); disp(s); system(s);
/** * GeoDa TM, Copyright (C) 2011-2015 by Luc Anselin - all rights reserved * * This file is part of GeoDa. * * GeoDa is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GeoDa is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <boost/foreach.hpp> #include "../logger.h" #include "../DataViewer/TableInterface.h" #include "../DataViewer/TableState.h" #include "CatClassifManager.h" CatClassifManager::CatClassifManager(TableInterface* _table_int, TableState* _table_state, CustomClassifPtree* cc_ptree) : table_state(_table_state), table_int(_table_int) { BOOST_FOREACH(const CatClassifDef& cc, cc_ptree->GetCatClassifList()) { CreateNewClassifState(cc); } table_state->registerObserver(this); } CatClassifManager::~CatClassifManager() { for (std::list<CatClassifState*>::iterator it=classif_states.begin(); it != classif_states.end(); it++) { (*it)->closeAndDeleteWhenEmpty(); } table_state->removeObserver(this); } void CatClassifManager::GetTitles(std::vector<wxString>& titles) { titles.resize(classif_states.size()); int i=0; for (std::list<CatClassifState*>::iterator it=classif_states.begin(); it != classif_states.end(); it++) { titles[i++] = (*it)->GetTitle(); } } CatClassifState* CatClassifManager::FindClassifState(const wxString& title) { CatClassifState* ccs=0; for (std::list<CatClassifState*>::iterator it=classif_states.begin(); it != classif_states.end() && !ccs; it++) { if (title == (*it)->GetTitle()) ccs = (*it); } return ccs; } CatClassifState* CatClassifManager::CreateNewClassifState( const CatClassifDef& cc_data) { CatClassifState* ccs = new CatClassifState; ccs->SetCatClassif(cc_data); classif_states.push_back(ccs); return ccs; } void CatClassifManager::RemoveClassifState(CatClassifState* ccs) { ccs->closeAndDeleteWhenEmpty(); classif_states.remove(ccs); } bool CatClassifManager::VerifyAgainstTable() { if (!table_int) return false; bool any_changed = false; for (std::list<CatClassifState*>::iterator it=classif_states.begin(); it != classif_states.end(); it++) { CatClassifDef& cc = (*it)->GetCatClassif(); if (CatClassification::CorrectCatClassifFromTable(cc, table_int)) { any_changed = true; } } return any_changed; } void CatClassifManager::update(TableState* o) { std::list<CatClassifState*>::iterator i; if (o->GetEventType() == TableState::col_rename) { if (!o->IsSimpleGroupRename()) return; for (i = classif_states.begin(); i != classif_states.end(); ++i) { if ((*i)->GetCatClassif().assoc_db_fld_name.CmpNoCase(o->GetOldColName())==0) { (*i)->GetCatClassif().assoc_db_fld_name = o->GetNewColName(); } } } else if (o->GetEventType() == TableState::cols_delta) { const TableDeltaList_type& tdl = o->GetTableDeltaListRef(); BOOST_FOREACH(const TableDeltaEntry& e, tdl) { // remove association for every deleted field if (!e.insert) { for (i = classif_states.begin(); i!=classif_states.end(); ++i) { if ((*i)->GetCatClassif().assoc_db_fld_name.CmpNoCase(e.group_name)==0) { (*i)->GetCatClassif().assoc_db_fld_name = ""; (*i)->notifyObservers(); } } } } } else if (o->GetEventType() == TableState::col_data_change) { for (i = classif_states.begin(); i != classif_states.end(); ++i) { CatClassifDef& cc = (*i)->GetCatClassif(); if (cc.assoc_db_fld_name == o->GetModifiedColName() && cc.cat_classif_type != CatClassification::custom) { // reset breaks and notify observers int col = -1, tm = 0; bool found = table_int->DbColNmToColAndTm(cc.assoc_db_fld_name, col, tm); if (!found) continue; std::vector<double> v; std::vector<bool> v_undef; table_int->GetColData(col, tm, v); table_int->GetColUndefined(col, tm, v_undef); int num_obs = table_int->GetNumberRows(); Gda::dbl_int_pair_vec_type data(num_obs); for (int ii=0; ii<num_obs; ++ii) { data[ii].first = v[ii]; data[ii].second = ii; } std::sort(data.begin(), data.end(), Gda::dbl_int_pair_cmp_less); CatClassifDef _cc = cc; CatClassification::SetBreakPoints(_cc.breaks, _cc.names, data, v_undef, _cc.cat_classif_type, _cc.num_cats); if (_cc != cc) { cc = cc; (*i)->notifyObservers(); } } else if (cc.assoc_db_fld_name == o->GetModifiedColName() && cc.cat_classif_type == CatClassification::custom) { int col = -1, tm = 0; bool found = table_int->DbColNmToColAndTm(cc.assoc_db_fld_name, col, tm); if (!found) continue; CatClassifDef _cc = cc; // ensure that breaks and min/max are within new // min/max bounds double new_min = cc.uniform_dist_min; double new_max = cc.uniform_dist_max; table_int->GetMinMaxVals(col, tm, new_min, new_max); if (cc.uniform_dist_min < new_min) { cc.uniform_dist_min = new_min; } if (cc.uniform_dist_max > new_max) { cc.uniform_dist_max = new_max; } for (int ii=0, sz=cc.breaks.size(); ii<sz; ++ii) { if (cc.breaks[ii] < new_min) cc.breaks[ii] = new_min; if (cc.breaks[ii] > new_max) cc.breaks[ii] = new_max; } if (_cc != cc) { cc = cc; (*i)->notifyObservers(); } } } } }
# Coherent dark states and polarization switching Studying the effect of polarization switching on coherent dark states in a 9-level system. The system is made of two ground states, one excited state but all with J = 1 for a total of nine levels. Basically just 3x the 3-level system studied in "Coherent dark states in a 3-level system.ipynb". Note that the system actually has two polarization dark states which significantly complicates things. ## Imports Start by importing the necessary packages ```python %load_ext autoreload %autoreload 2 import joblib import matplotlib.pyplot as plt plt.style.use("ggplot") import numpy as np import scipy import qutip from sympy import Symbol from toy_systems.couplings import FirstRankCouplingJ, ToyEnergy from toy_systems.dark_states import get_dark_states from toy_systems.decays import CouplingDecay, ToyDecay from toy_systems.hamiltonian import Hamiltonian from toy_systems.operators import JRotation from toy_systems.quantum_system import QuantumSystem from toy_systems.states import Basis, BasisState, JQuantumNumbers, ToyQuantumNumbers from toy_systems.utils import generate_P_op ``` The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload ## Set up states and basis We start by defining the three states of the system: the ground states $|g0\rangle$ and $|g1\rangle$, and the excited state $|e\rangle$> ```python g0s = [BasisState(qn=JQuantumNumbers(label="g0", J = 1, mJ = mJ)) for mJ in np.arange(-1,2)] g1s = [BasisState(qn=JQuantumNumbers(label="g1", J = 1, mJ = mJ)) for mJ in np.arange(-1,2)] es = [BasisState(qn=JQuantumNumbers(label="e", J = 1, mJ = mJ)) for mJ in np.arange(-1,2)] dump = [BasisState(qn = ToyQuantumNumbers(label="dump"))] # A state where the excited state can decay if desired # Define basis basis = Basis(g0s+g1s+es+dump) basis.print() ``` |0> = JQuantumNumbers(J=1, mJ=-1, label='g0') |1> = JQuantumNumbers(J=1, mJ=0, label='g0') |2> = JQuantumNumbers(J=1, mJ=1, label='g0') |3> = JQuantumNumbers(J=1, mJ=-1, label='g1') |4> = JQuantumNumbers(J=1, mJ=0, label='g1') |5> = JQuantumNumbers(J=1, mJ=1, label='g1') |6> = JQuantumNumbers(J=1, mJ=-1, label='e') |7> = JQuantumNumbers(J=1, mJ=0, label='e') |8> = JQuantumNumbers(J=1, mJ=1, label='e') |9> = |dump> ## Define energies, couplings, decays and quantum system I'm going to define the system in the rotating frame as usual. ### Energies ```python δ = Symbol('delta') # Energy splitting between |g0> and |g1 Δ = Symbol('Delta') # Detuning of drive field from 0 E0 = ToyEnergy(g0s, -δ/2) E1 = ToyEnergy(g1s, +δ/2) Ee = ToyEnergy(es, Δ) ``` ### Couplings Will treat the problem as if it has two time dependent laser fields: one polarized along z and the other one along x. The polarization will rotate back and forth between the two directions ```python Ωz = Symbol('Omega_z') # Drive field Rabi rate for z-polarization Ωx = Symbol('Omega_x') # Drive field Rabi rate for x-polarization # A condition to make sure the ground states don't get coupled to each other def both_not_ground(state1, state2): return (state1.qn.label == 'e') or (state2.qn.label == 'e') def both_not_excited(state1, state2): return not ((state1.qn.label == 'e') and (state2.qn.label == 'e')) def not_dump(state1, state2): return (state1.qn.label != "dump") and (state2.qn.label != "dump") coupling_z = FirstRankCouplingJ(Ωz, p_car=np.array((0,0,1)), other_conds = [both_not_ground, both_not_excited, not_dump], time_dep = "(t<0)") coupling_x = FirstRankCouplingJ(Ωx, p_car=np.array((1,0,0)), other_conds = [both_not_ground, both_not_excited, not_dump], time_dep = "(t>0)") ``` ### Decays Defining a decay from all $|e\rangle$ to all $|g0\rangle$ and $|g1\rangle$ as permitted by angular momentum: ```python # Define dipole couplings that connect excited state to ground states decay_couplings = [ FirstRankCouplingJ(1, p_car=np.array((1,0,0)), other_conds=[both_not_ground, both_not_excited, not_dump]), FirstRankCouplingJ(1, p_car=np.array((0,1,0)), other_conds=[both_not_ground, both_not_excited, not_dump]), FirstRankCouplingJ(1, p_car=np.array((0,0,1)), other_conds=[both_not_ground, both_not_excited, not_dump]), ] decays = [CouplingDecay(e, Symbol("Gamma")/2, decay_couplings) for e in es] + [ToyDecay(e, ground = dump[0], gamma = Symbol("Gamma_d")) for e in es] ``` ### Define a QuantumSystem The QuantumSystem object combines the basis, Hamiltonian and decays to make setting parameters for time evolution using QuTiP more convenient. ```python # Define the system system = QuantumSystem( basis=basis, couplings=[E0, E1, Ee, coupling_z, coupling_x], decays=decays, ) # Get representations of the Hamiltonian and the decays that will be accepted by qutip Hqobj, c_qobj = system.get_qobjs() ``` ## Time evolution No matter what state the system starts in, it should always end up in the dark state, from which it will slowly evolve out since the dark state is not an eigenstate of the Hamiltonian. ```python # Get a pointer to the time-evolution arguments args = Hqobj.args print("Keys for setting arguments:") print(f"args = {args}") ``` Keys for setting arguments: args = {'delta': 1, 'Delta': 1, 'Omega_z': 1, 'Omega_x': 1, 'Gamma': 1, 'Gamma_d': 1} ```python test_coupling_z = FirstRankCouplingJ(Ωz, p_car=np.array((0,0,1)), other_conds = [both_not_ground, both_not_excited]) bright_state, dark_states, pol_dark_states = get_dark_states([g0s[0], g1s[0]], es[0], [test_coupling_z]) print(f"|B_z> =\n{bright_state}") if len(dark_states) != 0: print(f"\n|D_z> =\n{dark_states[0]}") if len(pol_dark_states) != 0: print(f"\n|D_z> =\n{pol_dark_states[0]}") ``` |B_z> = [-0.71+0.00j x JQuantumNumbers(J=1, mJ=-1, label='g0') -0.71+0.00j x JQuantumNumbers(J=1, mJ=-1, label='g1')] |D_z> = -0.71+0.00j x JQuantumNumbers(J=1, mJ=-1, label='g0') 0.71-0.00j x JQuantumNumbers(J=1, mJ=-1, label='g1') ```python # Set the parameters for the system args['delta'] = 0.1 args['Delta'] = 0 args['Omega_z'] = 1 args['Omega_x'] = 1 args['Gamma'] = 1 args['Gamma_d'] = 0 # Find the dark and bright states for each mJ and each polarization bright_states_z = [] dark_states_z = [] pol_dark_states_z = [] bright_states_x = [] dark_states_x = [] pol_dark_states_x = [] bright_states_y = [] dark_states_y = [] pol_dark_states_y = [] for g0, g1, e in zip(g0s, g1s, es): test_coupling_z = FirstRankCouplingJ(Ωz, p_car=np.array((0,0,1)), other_conds = [both_not_ground, both_not_excited]) # Find the bright and dark states for z-polarization bright_states, dark_states, pol_dark_states = get_dark_states([g0, g1], e, [test_coupling_z]) bright_states_z += bright_states dark_states_z += dark_states pol_dark_states_z += pol_dark_states # Find the dark and bright states for x- and y-polarization by rotating the basis # X-polarized rotation_x = JRotation(np.pi/2, np.array((0,1,0))) for bs in bright_states_z: bright_states_x.append(bs.apply_operator(basis, rotation_x)) for ds in dark_states_z: dark_states_x.append(ds.apply_operator(basis, rotation_x)) for ds in pol_dark_states_z: pol_dark_states_x.append(ds.apply_operator(basis, rotation_x)) # Y-polarized rotation_y = JRotation(np.pi/2, np.array((1,0,0))) for bs in bright_states_z: bright_states_y.append(bs.apply_operator(basis, rotation_y)) for ds in dark_states_z: dark_states_y.append(ds.apply_operator(basis, rotation_y)) for ds in pol_dark_states_z: pol_dark_states_y.append(ds.apply_operator(basis, rotation_y)) # Print the bright and dark states for each polarization: print("X-polarization:\n") for i, state in enumerate(bright_states_x): print(f"\n|Bx{i}> = ") state.print_state() for i, state in enumerate(dark_states_x): print(f"\n|Dx{i}> = ") state.print_state() print("\n\nY-polarization:\n") for i, state in enumerate(bright_states_y): print(f"\n|By{i}> = ") state.print_state() for i, state in enumerate(dark_states_y): print(f"\n|Dy{i}> = ") state.print_state() print("\n\nZ-polarization:\n") for i, state in enumerate(bright_states_z): print(f"\n|Bz{i}> = ") state.print_state() for i, state in enumerate(dark_states_z): print(f"\n|Dz{i}> = ") state.print_state() ``` X-polarization: |Bx0> = -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') +0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g0') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') +0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g1') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |Bx1> = +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') +0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') +0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g1') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |Dx0> = -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') +0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g0') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') -0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g1') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |Dx1> = +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') +0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') -0.5000+0.0000j x JQuantumNumbers(J=1, mJ=0, label='g1') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') Y-polarization: |By0> = -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') 0.0000+0.5000j x JQuantumNumbers(J=1, mJ=0, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') 0.0000+0.5000j x JQuantumNumbers(J=1, mJ=0, label='g1') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |By1> = -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') 0.0000-0.5000j x JQuantumNumbers(J=1, mJ=0, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') 0.0000-0.5000j x JQuantumNumbers(J=1, mJ=0, label='g1') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |Dy0> = -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') 0.0000+0.5000j x JQuantumNumbers(J=1, mJ=0, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') 0.0000-0.5000j x JQuantumNumbers(J=1, mJ=0, label='g1') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |Dy1> = -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') 0.0000-0.5000j x JQuantumNumbers(J=1, mJ=0, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') +0.3536+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') 0.0000+0.5000j x JQuantumNumbers(J=1, mJ=0, label='g1') -0.3536+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') Z-polarization: |Bz0> = -0.7071+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') -0.7071+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') |Bz1> = +0.7071+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') +0.7071+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') |Dz0> = -0.7071+0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g0') +0.7071-0.0000j x JQuantumNumbers(J=1, mJ=-1, label='g1') |Dz1> = +0.7071+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g0') -0.7071+0.0000j x JQuantumNumbers(J=1, mJ=1, label='g1') ```python # Generate a Qobj representing the initial state # psi0 = (1*g0s[0]).qobj(basis) psi0 = (1*dark_states_z[0]).qobj(basis) # Operators for getting probability of being in each state as a function of time P_g0 = generate_P_op(g0s, basis) P_g1 = generate_P_op(g1s, basis) P_e = generate_P_op(es, basis) P_B_z = generate_P_op(bright_states_z, basis) P_D_z = generate_P_op(dark_states_z, basis) P_D_pol_z = generate_P_op(pol_dark_states_z, basis) P_B_x = generate_P_op(bright_states_x, basis) P_D_x = generate_P_op(dark_states_x, basis) P_D_pol_x = generate_P_op(pol_dark_states_x, basis) P_B_y = generate_P_op(bright_states_y, basis) P_D_y = generate_P_op(dark_states_y, basis) P_dump = generate_P_op(dump, basis) P_mJm1 = generate_P_op([g0s[0], g1s[0]], basis) P_mJ0 = generate_P_op([g0s[1], g1s[1]], basis) P_mJp1 = generate_P_op([g0s[2], g1s[2]], basis) P_ops = [P_g0, P_g1, P_e, P_B_z, P_D_z, P_B_x, P_D_x, P_B_y, P_D_y, P_mJm1, P_mJ0, P_mJp1, P_D_pol_z, P_D_pol_x] # Times at which result is requested times = np.linspace(-10,10,1001)/args["delta"] # Setting the max_step is sometimes necessary options = qutip.solver.Options(method = 'adams', nsteps=10000, max_step=1e0, rhs_reuse=False) # Setup a progress bar pb = qutip.ui.progressbar.EnhancedTextProgressBar() # Run the time-evolution result = qutip.mesolve(Hqobj, psi0, times, c_ops = c_qobj, e_ops = P_ops, progress_bar=pb, options = options) ``` Total run time: 0.74s*] Elapsed 0.74s / Remaining 00:00:00:00 Plot the results ```python fig, ax = plt.subplots(3, 1, figsize = (16,18)) ax[0].plot(times, result.expect[0], label = "P_g0") ax[0].plot(times, result.expect[1], label = "P_g1", ls = '--') ax[0].plot(times, result.expect[2], label = "P_e") ax[0].legend() ax[0].set_ylabel("Population in each state") ax[0].set_title("Energy eigenstate basis") ln = [] ln +=ax[1].plot(times, result.expect[3], label = "P_B_z") ln +=ax[1].plot(times, result.expect[4], label = "P_D_z") ln +=ax[1].plot(times, result.expect[12], label = "P_D_pol_z") ln +=ax[1].plot(times, result.expect[2], label = "P_e") ax[1].set_ylabel("Population in each state") ax[1].set_title("Dark and bright state basis for z-polarization") ax1c = ax[1].twinx() ax1c.grid(False) ln +=coupling_z.plot_time_dep(times, args, ax = ax1c, c = 'k', ls = '--', label = "z-pol mag") ax1c.set_ylabel("Magnitude of z-polarization") ax[1].legend(ln, [l.get_label() for l in ln]) ln2 = [] ln2+=ax[2].plot(times, result.expect[5], label = "P_B_x") ln2+=ax[2].plot(times, result.expect[6], label = "P_D_x") ln2+=ax[2].plot(times, result.expect[13], label = "P_D_pol_x") ln2+=ax[2].plot(times, result.expect[2], label = "P_e") ax[2].legend() ax[2].set_ylabel("Population in each state") ax[2].set_title("Dark and bright state basis for x-polarization") ax2c = ax[2].twinx() ax2c.grid(False) ln2 +=coupling_x.plot_time_dep(times, args, ax = ax2c, c = 'k', ls = '--', label = "x-pol mag") ax2c.set_ylabel("Magnitude of x-polarization") ax[2].legend(ln2, [l.get_label() for l in ln2]) print(f"\nPopulation in excited state at the end: {result.expect[2][-1]*100:.1e} %") print(f"Photons per unit time: {scipy.integrate.trapezoid(result.expect[2], x = times)/(times[-1]-times[0]):.2e}") ``` So what is happening here?: - I'm starting the system in the z-polarization coherent dark state with mJ = -1. - For x-polarization the initial state is an even mixture of a coherent and polarization dark state - Population slowly gets transferred to the z-polarization polarization dark state with mJ = 0, which is an even mixture of bright and dark for x-polarized light - Once the polarization gets flipped from z to x at t = 0, the bright state component for x-polarization allows some transitions to proceed quickly until the bright state is depleted. - After the bright state is depleted, the coherent dark state again starts to deplete at a rate that is proportional to $\delta$ and accumulates in the polarization dark state. - Would be more clear in a system that doesn't have any polarization dark states, so try J = 2 for the excited state (separate notebook for that). ```python ```
State Before: α : Type u_1 inst✝ : DecidableEq α s✝ s' s : Cycle α h : Nodup s hn : Nontrivial s ⊢ IsCycle (formPerm s h) State After: case h α : Type u_1 inst✝ : DecidableEq α s s' : Cycle α a✝ : List α h : Nodup (Quot.mk Setoid.r a✝) hn : Nontrivial (Quot.mk Setoid.r a✝) ⊢ IsCycle (formPerm (Quot.mk Setoid.r a✝) h) Tactic: induction s using Quot.inductionOn State Before: case h α : Type u_1 inst✝ : DecidableEq α s s' : Cycle α a✝ : List α h : Nodup (Quot.mk Setoid.r a✝) hn : Nontrivial (Quot.mk Setoid.r a✝) ⊢ IsCycle (formPerm (Quot.mk Setoid.r a✝) h) State After: no goals Tactic: exact List.isCycle_formPerm h (length_nontrivial hn)
# M13. Machine Epsilon One does not so much make a case *for* floating point as require what it so generously offers. Recall that an $n$-bit machine is capable of representing $2^n$ discrete states, each of which may be assigned a unique integer $z\in \mathbb{Z}$, usually via unsigned binary or two's complement. Whatever the specifics, it is possible to construct a bijective map between the set of possible machine states and an interval $I \subset \mathbb{Z}$ of cardinality $|I|=2^n$. For sufficiently large $n$, we obtain exact representations of most of the integers we might encounter in everyday life. The range of *uint32*, for instance, is 0 to $2^{32} =$ 4,294,967,296, while the range of *int32* is $-2^{31}=$ 2,147,483,648 to $+2^{31} -1 =$ +2,147,483,647. We note that this approach is by no means the last word. Most inconveniently, the Universe is under no compulsion to actually respect these ranges. Some numbers are simply Very Big: - The observable universe is roughly $10^{27}$ meters across. - There are around $10^{24}$ particles in a mole. - Our species consumes roughly $10^{13}$ watts of power *per annum*. To compound our troubles, many interesting computations occur not even over $\mathbb{Z}$ but over $\mathbb{R}$ (the entirety of calculus, for instance, as well as continuous-time signal processing). The incorporation of the reals poses a challenge that is in a certain sense insurmountable. We note that while $\mathbb{Z}$ and $\mathbb{R}$ are both infinite sets, there *do* exist finite intervals of $\mathbb{Z}$ which may be equinumerous with our machine's state space, whatever size it is. On the other hand, in general, real intervals are not even countable, much less finite. A classic discrete-math exercise is to show that every open interval on $\mathbb{R}$ has the same cardinality as $\mathbb{R}$ itself. One approach is to prove that the map $f(x): \mathbb{R}\to (0,1)$, $x \mapsto \frac{1}{1+e^{-x}}$, also known as the *logistic sigmoid*, is a bijection (in turn, one can easily construct a bijection from $(0,1)$ to any interval $(a,b)$ as a matter of scaling and shifting). The uncountability of $\mathbb{R}$ (and therefore any open interval of $\mathbb{R}$) is then shown by deploying Cantor's *diagonal argument* on $(0,1)$: For sake of contradiction, suppose that $(0,1)$ *is* enumerable. By assumption, we may list *all* of the elements in this set by some scheme or other | LIST | |:-------------:| | 0.48936032... | | 0.60012377... | | 0.23158392... | | 0.78832125... | | 0.34829373... | | 0.11239586... | | ... | Now consider a number constructed by joining each of the digits in the main diagonal of such a list | LIST | |:-------------:| | 0.**4**8936032... | | 0.6**0**012377... | | 0.23**1**58392... | | 0.788**3**2125... | | 0.3482**9**373... | | 0.11239**5**86... | | ... | Then, change each digit of this number to *another* digit (strictly speaking, another digit that isn't 9). We have constructed a number that *cannot* have been enumerated (every listed number differs from the newly-constructed number by at least one digit). Contradiction&mdash;the reals must be uncountable. It is thus fundamentally impossible to maintain exact representations of even a 'tiny' segment of the real line on a digital computer. This is good to know. Aspiring to achieve the impossible is, in my experience, demoralizing. What is instead attempted is the *approximation* of $\mathbb{R}$, a sort of illusion which we have all occasionally broken: ```python print(1.2-1.) ``` 0.19999999999999996 By *floating point*, we refer to an approximate representation of the real numbers that also solves, at a stroke, much of the scaling problem discussed prior (as in the representation extends to both very large and very small numbers). The scheme is sketched as follows: - Define an even base $\beta$, also known as a *radix*. - Define a *precision* $p$. - A *floating point number* is then represented by $$\pm (d_0 + d_1\beta^{-1} + d_2\beta^{-2}+\ldots+d_{p-1}\beta^{-(p-1)})\beta^e$$ where $e$, the *exponent*, is bounded by the machine, and $0 \leq d_i < \beta$. Additionally, one may impose the constraint $d_0 \not = 0$, also called *normalization*, to ensure the uniqueness of all representations. The number $d_0.d_1 d_2 \ldots d_{p-1}$ is often called the *mantissa* of the number (sometimes, *significand*). We will not discuss the actual encodings by which floating point is usually implemented (namely, the standards set forth in IEEE 754). What is more important is the bounding of error (and thus the bounding of our ignorance). To start, note that for a fixed exponent $e$, discrete floating point representations are available every $\beta^{e-(p-1)}$. This is clear from decrementing the unit in the last place (ulp) of the mantissa by 1: \begin{align} (d_0 + d_1\beta^{-1} + d_2\beta^{-2}+\ldots+d_{p-1}\beta^{-(p-1)})\beta^e-(d_0 + d_1\beta^{-1} + d_2\beta^{-2}+\ldots+(d_{p-1}-1)\beta^{-(p-1)})\beta^e &= d_{p-1}\beta^{-(p-1)}\beta^{e}-(d_{p-1}-1)\beta^{-(p-1)}\beta^{e} \\ &=(d_{p-1}-(d_{p-1}-1))\beta^{-(p-1)}\beta^{e}\\ &=\beta^{e-(p-1)}. \end{align} Thus, the floating point representation of a real number may differ from that real number by as much as $\frac{\beta^{e-(p-1)}}{2}$ assuming that the representation is obtained through conventional rounding. Normalizing this difference by $\beta^{e}$ gives a notion of *maximum relative error*: $$\frac{\frac{\beta^{e-(p-1)}}{2}}{\beta^e} = \frac{\beta^{-(p-1)}}{2}.$$ This number, subject to the parameters $\beta$ and $p$, is known as the *machine epsilon* and denoted as $\varepsilon$. Simply put, it is an upper bound on the relative error of a floating point representation $r'$ of a real number $r$ $$\frac{r'-r}{r} \leq \varepsilon.$$ Somewhat frustratingly (actually, extremely frustratingly), we often see the machine epsilon defined alternatively as the smallest number $\varepsilon$ such that $1+\varepsilon \not = 1$ on a given machine (this is equivalent to the distance to the next floating point representation after 1.). If this should be desired, it can be easily approximated by something like this: ```python def machineEpsilon(): epsilon = 1. while(1. + epsilon != 1.): epsilon = epsilon/2. return epsilon*2 # roll back one iteration machineEpsilon() ``` 2.220446049250313e-16 The result is in agreement with the parameter provided by the system module: ```python import sys sys.float_info.epsilon ``` 2.220446049250313e-16
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) $define lda_c_pw_params $define lda_c_pw_modified_params $include "lda_c_pw.mpl" ux := (mgamma, x) -> mgamma*x^2/(1 + mgamma*x^2): (* article uses t = 2 tau convention *) wx_ss := (t, dummy) -> (K_FACTOR_C - t)/(K_FACTOR_C + t): wx_os := (ts0, ts1) -> (K_FACTOR_C*(ts0 + ts1) - 2*ts0*ts1)/(K_FACTOR_C*(ts0 + ts1) + 2*ts0*ts1): b97mv_g := (mgamma, wx, cc, n, xs, ts0, ts1) -> add(cc[i][1]*wx(ts0, ts1)^cc[i][2]*ux(mgamma, xs)^cc[i][3], i=1..n): b97mv_fpar := (lda_func, rs, z, xs0, xs1, ts0, ts1) -> + lda_stoll_par(lda_func, rs, z, 1) * b97mv_g(gamma_x, wx_ss, par_x, par_n, xs0, ts0, 0) + lda_stoll_par(f_pw , rs, z, 1) * b97mv_g(gamma_ss, wx_ss, par_ss, par_n, xs0, ts0, 0) + lda_stoll_par(lda_func, rs, -z, -1) * b97mv_g(gamma_x, wx_ss, par_x, par_n, xs1, ts1, 0) + lda_stoll_par(f_pw , rs, -z, -1) * b97mv_g(gamma_ss, wx_ss, par_ss, par_n, xs1, ts1, 0): b97mv_fos := (rs, z, xs0, xs1, ts0, ts1) -> lda_stoll_perp(f_pw, rs, z) * b97mv_g(gamma_os, wx_os, par_os, par_n, sqrt(xs0^2 + xs1^2)/sqrt(2), ts0, ts1): f_b97mv := (lda_func, rs, z, xt, xs0, xs1, ts0, ts1) -> + b97mv_fpar(lda_func, rs, z, xs0, xs1, ts0, ts1) + b97mv_fos(rs, z, xs0, xs1, ts0, ts1):
header {* \isaheader{Standard HOL Bindings} *} theory Autoref_Bindings_HOL imports "Tool/Autoref_Tool" begin subsection "Structural Expansion" text {* In some situations, autoref imitates the operations on typeclasses and the typeclass hierarchy. This may result in structural mismatches, e.g., a hashcode side-condition may look like: @{text [display] "is_hashcode (prod_eq op= op=) hashcode"} This cannot be discharged by the rule @{text [display] "is_hashcode op= hashcode"} In order to handle such cases, we introduce a set of simplification lemmas that expand the structure of an operator as far as possible. These lemmas are integrated into a tagged solver, that can prove equality between operators modulo structural expansion. *} definition [simp]: "STRUCT_EQ_tag x y \<equiv> x = y" lemma STRUCT_EQ_tagI: "x=y \<Longrightarrow> STRUCT_EQ_tag x y" by simp ML {* structure Autoref_Struct_Expand = struct structure autoref_struct_expand = Named_Thms ( val name = @{binding autoref_struct_expand} val description = "Autoref: Structural expansion lemmas" ) fun expand_tac ctxt = let val ss = put_simpset HOL_basic_ss ctxt addsimps autoref_struct_expand.get ctxt in SOLVED' (asm_simp_tac ss) end val setup = autoref_struct_expand.setup val decl_setup = fn phi => Tagged_Solver.declare_solver @{thms STRUCT_EQ_tagI} @{binding STRUCT_EQ} "Autoref: Equality modulo structural expansion" (expand_tac) phi end *} setup Autoref_Struct_Expand.setup declaration Autoref_Struct_Expand.decl_setup text {* Sometimes, also relators must be expanded. Usually to check them to be the identity relator *} definition [simp]: "REL_IS_ID R \<equiv> R=Id" definition [simp]: "REL_FORCE_ID R \<equiv> R=Id" lemma REL_IS_ID_trigger: "R=Id \<Longrightarrow> REL_IS_ID R" by simp lemma REL_FORCE_ID_trigger: "R=Id \<Longrightarrow> REL_FORCE_ID R" by simp declaration {* Tagged_Solver.add_triggers "Relators.relator_props_solver" @{thms REL_IS_ID_trigger} *} declaration {* Tagged_Solver.add_triggers "Relators.force_relator_props_solver" @{thms REL_FORCE_ID_trigger} *} abbreviation "PREFER_id R \<equiv> PREFER REL_IS_ID R" (* TODO: Most of these are parametricity theorems! *) lemmas [autoref_rel_intf] = REL_INTFI[of fun_rel i_fun] subsection "Booleans" consts i_bool :: interface lemmas [autoref_rel_intf] = REL_INTFI[of bool_rel i_bool] lemma [autoref_itype]: "True ::\<^sub>i i_bool" "False ::\<^sub>i i_bool" "conj ::\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool" "op \<longleftrightarrow> ::\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool" "op \<longrightarrow> ::\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool" "disj ::\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool" "Not ::\<^sub>i i_bool \<rightarrow>\<^sub>i i_bool" "case_bool ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i i_bool \<rightarrow>\<^sub>i I" "old.rec_bool ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i i_bool \<rightarrow>\<^sub>i I" by auto lemma autoref_bool[autoref_rules]: "(x,x)\<in>bool_rel" "(conj,conj)\<in>bool_rel\<rightarrow>bool_rel\<rightarrow>bool_rel" "(disj,disj)\<in>bool_rel\<rightarrow>bool_rel\<rightarrow>bool_rel" "(Not,Not)\<in>bool_rel\<rightarrow>bool_rel" "(case_bool,case_bool)\<in>R\<rightarrow>R\<rightarrow>bool_rel\<rightarrow>R" "(old.rec_bool,old.rec_bool)\<in>R\<rightarrow>R\<rightarrow>bool_rel\<rightarrow>R" "(op \<longleftrightarrow>, op \<longleftrightarrow>)\<in>bool_rel\<rightarrow>bool_rel\<rightarrow>bool_rel" "(op \<longrightarrow>, op \<longrightarrow>)\<in>bool_rel\<rightarrow>bool_rel\<rightarrow>bool_rel" by (auto split: bool.split simp: rec_bool_is_case) subsection "Standard Type Classes" context begin interpretation autoref_syn . text {* We allow these operators for all interfaces. *} lemma [autoref_itype]: "op < ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i i_bool" "op \<le> ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i i_bool" "op = ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i i_bool" "op + ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I" "op - ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I" "op div ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I" "op mod ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I" "op * ::\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I" "0 ::\<^sub>i I" "1 ::\<^sub>i I" "numeral x ::\<^sub>i I" "uminus ::\<^sub>i I \<rightarrow>\<^sub>i I" by auto lemma pat_num_generic[autoref_op_pat]: "0 \<equiv> OP 0 :::\<^sub>i I" "1 \<equiv> OP 1 :::\<^sub>i I" "numeral x \<equiv> (OP (numeral x) :::\<^sub>i I)" by simp_all lemma [autoref_rules]: assumes "PRIO_TAG_GEN_ALGO" shows "(op <, op <) \<in> Id\<rightarrow>Id\<rightarrow>bool_rel" and "(op \<le>, op \<le>) \<in> Id\<rightarrow>Id\<rightarrow>bool_rel" and "(op =, op =) \<in> Id\<rightarrow>Id\<rightarrow>bool_rel" and "(numeral x,OP (numeral x) ::: Id) \<in> Id" and "(uminus,uminus) \<in> Id \<rightarrow> Id" and "(0,0) \<in> Id" and "(1,1) \<in> Id" by auto subsection "Functional Combinators" lemma [autoref_itype]: "id ::\<^sub>i I \<rightarrow>\<^sub>i I" by simp lemma autoref_id[autoref_rules]: "(id,id)\<in>R\<rightarrow>R" by auto term "op o" lemma [autoref_itype]: "op \<circ> ::\<^sub>i (Ia\<rightarrow>\<^sub>iIb) \<rightarrow>\<^sub>i (Ic \<rightarrow>\<^sub>i Ia) \<rightarrow>\<^sub>i Ic \<rightarrow>\<^sub>i Ib" by simp lemma autoref_comp[autoref_rules]: "(op o, op o) \<in> (Ra \<rightarrow> Rb) \<rightarrow> (Rc \<rightarrow> Ra) \<rightarrow> Rc \<rightarrow> Rb" by (auto dest: fun_relD) lemma [autoref_itype]: "If ::\<^sub>i i_bool \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i I" by simp lemma autoref_If[autoref_rules]: "(If,If)\<in>Id\<rightarrow>R\<rightarrow>R\<rightarrow>R" by auto lemma autoref_If_cong[autoref_rules]: assumes "(c',c)\<in>Id" assumes "REMOVE_INTERNAL c \<Longrightarrow> (t',t)\<in>R" assumes "\<not> REMOVE_INTERNAL c \<Longrightarrow> (e',e)\<in>R" shows "(If c' t' e',(OP If ::: Id\<rightarrow>R\<rightarrow>R\<rightarrow>R)$c$t$e)\<in>R" using assms by (auto) lemma [autoref_itype]: "Let ::\<^sub>i Ix \<rightarrow>\<^sub>i (Ix\<rightarrow>\<^sub>iIy) \<rightarrow>\<^sub>i Iy" by auto lemma autoref_Let[autoref_rules]: "(Let,Let)\<in>Ra \<rightarrow> (Ra\<rightarrow>Rr) \<rightarrow> Rr" by (auto dest: fun_relD) end subsection "Unit" consts i_unit :: interface lemmas [autoref_rel_intf] = REL_INTFI[of unit_rel i_unit] (*lemma [autoref_itype]: "(a::unit) ::\<^sub>i i_unit" by simp*) lemma [autoref_rules]: "((),())\<in>unit_rel" by simp subsection "Nat" consts i_nat :: interface lemmas [autoref_rel_intf] = REL_INTFI[of nat_rel i_nat] context begin interpretation autoref_syn . lemma pat_num_nat[autoref_op_pat]: "0::nat \<equiv> OP 0 :::\<^sub>i i_nat" "1::nat \<equiv> OP 1 :::\<^sub>i i_nat" "(numeral x)::nat \<equiv> (OP (numeral x) :::\<^sub>i i_nat)" by simp_all lemma autoref_nat[autoref_rules]: "(0, 0::nat) \<in> nat_rel" "(Suc, Suc) \<in> nat_rel \<rightarrow> nat_rel" "(1, 1::nat) \<in> nat_rel" "(numeral n::nat,numeral n::nat) \<in> nat_rel" "(op <, op <::nat \<Rightarrow> _) \<in> nat_rel \<rightarrow> nat_rel \<rightarrow> bool_rel" "(op \<le>, op \<le>::nat \<Rightarrow> _) \<in> nat_rel \<rightarrow> nat_rel \<rightarrow> bool_rel" "(op =, op =::nat \<Rightarrow> _) \<in> nat_rel \<rightarrow> nat_rel \<rightarrow> bool_rel" "(op +::nat\<Rightarrow>_,op +)\<in>nat_rel\<rightarrow>nat_rel\<rightarrow>nat_rel" "(op -::nat\<Rightarrow>_,op -)\<in>nat_rel\<rightarrow>nat_rel\<rightarrow>nat_rel" "(op div::nat\<Rightarrow>_,op div)\<in>nat_rel\<rightarrow>nat_rel\<rightarrow>nat_rel" "(op *, op *)\<in>nat_rel\<rightarrow>nat_rel\<rightarrow>nat_rel" "(op mod, op mod)\<in>nat_rel\<rightarrow>nat_rel\<rightarrow>nat_rel" by auto lemma autoref_case_nat[autoref_rules]: "(case_nat,case_nat)\<in>Ra \<rightarrow> (Id \<rightarrow> Ra) \<rightarrow> Id \<rightarrow> Ra" apply (intro fun_relI) apply (auto split: nat.split dest: fun_relD) done lemma autoref_rec_nat: "(rec_nat,rec_nat) \<in> R \<rightarrow> (Id \<rightarrow> R \<rightarrow> R) \<rightarrow> Id \<rightarrow> R" apply (intro fun_relI) proof - case (goal1 s s' f f' n n') thus ?case apply (induct n' arbitrary: n s s') apply (fastforce simp: fun_rel_def)+ done qed end subsection "Int" consts i_int :: interface lemmas [autoref_rel_intf] = REL_INTFI[of int_rel i_int] context begin interpretation autoref_syn . lemma pat_num_int[autoref_op_pat]: "0::int \<equiv> OP 0 :::\<^sub>i i_int" "1::int \<equiv> OP 1 :::\<^sub>i i_int" "(numeral x)::int \<equiv> (OP (numeral x) :::\<^sub>i i_int)" "(neg_numeral x)::int \<equiv> (OP (neg_numeral x) :::\<^sub>i i_int)" by simp_all (*lemma [autoref_itype]: "(op < :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int \<rightarrow>\<^sub>i i_bool" "(op \<le> :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int \<rightarrow>\<^sub>i i_bool" "(op = :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int \<rightarrow>\<^sub>i i_bool" "(op + :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int \<rightarrow>\<^sub>i i_int" "(op - :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int \<rightarrow>\<^sub>i i_int" "(op div :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int \<rightarrow>\<^sub>i i_int" "(uminus :: int \<Rightarrow> _) ::\<^sub>i i_int \<rightarrow>\<^sub>i i_int" by auto*) lemma autoref_int[autoref_rules (overloaded)]: "(0, 0::int) \<in> int_rel" "(1, 1::int) \<in> int_rel" "(numeral n::int,numeral n::int) \<in> int_rel" "(op <, op <::int \<Rightarrow> _) \<in> int_rel \<rightarrow> int_rel \<rightarrow> bool_rel" "(op \<le>, op \<le>::int \<Rightarrow> _) \<in> int_rel \<rightarrow> int_rel \<rightarrow> bool_rel" "(op =, op =::int \<Rightarrow> _) \<in> int_rel \<rightarrow> int_rel \<rightarrow> bool_rel" "(op +::int\<Rightarrow>_,op +)\<in>int_rel\<rightarrow>int_rel\<rightarrow>int_rel" "(op -::int\<Rightarrow>_,op -)\<in>int_rel\<rightarrow>int_rel\<rightarrow>int_rel" "(op div::int\<Rightarrow>_,op div)\<in>int_rel\<rightarrow>int_rel\<rightarrow>int_rel" "(uminus,uminus)\<in>int_rel\<rightarrow>int_rel" "(op *, op *)\<in>int_rel\<rightarrow>int_rel\<rightarrow>int_rel" "(op mod, op mod)\<in>int_rel\<rightarrow>int_rel\<rightarrow>int_rel" by auto end subsection "Product" consts i_prod :: "interface \<Rightarrow> interface \<Rightarrow> interface" lemmas [autoref_rel_intf] = REL_INTFI[of prod_rel i_prod] context begin interpretation autoref_syn . (* lemma [autoref_itype]: "Pair ::\<^sub>i Ia \<rightarrow>\<^sub>i Ib \<rightarrow>\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod" "case_prod ::\<^sub>i (Ia \<rightarrow>\<^sub>i Ib \<rightarrow>\<^sub>i I) \<rightarrow>\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod \<rightarrow>\<^sub>i I" "old.rec_prod ::\<^sub>i (Ia \<rightarrow>\<^sub>i Ib \<rightarrow>\<^sub>i I) \<rightarrow>\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod \<rightarrow>\<^sub>i I" "fst ::\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod \<rightarrow>\<^sub>i Ia" "snd ::\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod \<rightarrow>\<^sub>i Ib" "(op = :: _\<times>_ \<Rightarrow> _) ::\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod \<rightarrow>\<^sub>i \<langle>Ia,Ib\<rangle>\<^sub>ii_prod \<rightarrow>\<^sub>i i_bool" by auto *) lemma prod_refine[autoref_rules]: "(Pair,Pair)\<in>Ra \<rightarrow> Rb \<rightarrow> \<langle>Ra,Rb\<rangle>prod_rel" "(case_prod,case_prod) \<in> (Ra \<rightarrow> Rb \<rightarrow> Rr) \<rightarrow> \<langle>Ra,Rb\<rangle>prod_rel \<rightarrow> Rr" "(old.rec_prod,old.rec_prod) \<in> (Ra \<rightarrow> Rb \<rightarrow> Rr) \<rightarrow> \<langle>Ra,Rb\<rangle>prod_rel \<rightarrow> Rr" "(fst,fst)\<in>\<langle>Ra,Rb\<rangle>prod_rel \<rightarrow> Ra" "(snd,snd)\<in>\<langle>Ra,Rb\<rangle>prod_rel \<rightarrow> Rb" by (auto dest: fun_relD split: prod.split simp: prod_rel_def rec_prod_is_case) definition "prod_eq eqa eqb x1 x2 \<equiv> case x1 of (a1,b1) \<Rightarrow> case x2 of (a2,b2) \<Rightarrow> eqa a1 a2 \<and> eqb b1 b2" lemma prod_eq_autoref[autoref_rules (overloaded)]: "\<lbrakk>GEN_OP eqa op = (Ra\<rightarrow>Ra\<rightarrow>Id); GEN_OP eqb op = (Rb\<rightarrow>Rb\<rightarrow>Id)\<rbrakk> \<Longrightarrow> (prod_eq eqa eqb,op =) \<in> \<langle>Ra,Rb\<rangle>prod_rel \<rightarrow> \<langle>Ra,Rb\<rangle>prod_rel \<rightarrow> Id" unfolding prod_eq_def[abs_def] by (fastforce dest: fun_relD) lemma prod_eq_expand[autoref_struct_expand]: "op = = prod_eq op= op=" unfolding prod_eq_def[abs_def] by (auto intro!: ext) end subsection "Option" consts i_option :: "interface \<Rightarrow> interface" lemmas [autoref_rel_intf] = REL_INTFI[of option_rel i_option] context begin interpretation autoref_syn . (* lemma [autoref_itype]: "None ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_option" "Some ::\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_option" "the ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i I" "case_option ::\<^sub>i I \<rightarrow>\<^sub>i (Iv\<rightarrow>\<^sub>iI) \<rightarrow>\<^sub>i \<langle>Iv\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i I" "rec_option ::\<^sub>i I \<rightarrow>\<^sub>i (Iv\<rightarrow>\<^sub>iI) \<rightarrow>\<^sub>i \<langle>Iv\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i I" "(op = :: _ option \<Rightarrow> _) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i i_bool" by auto *) lemma autoref_opt[autoref_rules]: "(None,None)\<in>\<langle>R\<rangle>option_rel" "(Some,Some)\<in>R \<rightarrow> \<langle>R\<rangle>option_rel" "(case_option,case_option)\<in>Rr\<rightarrow>(R \<rightarrow> Rr)\<rightarrow>\<langle>R\<rangle>option_rel \<rightarrow> Rr" "(rec_option,rec_option)\<in>Rr\<rightarrow>(R \<rightarrow> Rr)\<rightarrow>\<langle>R\<rangle>option_rel \<rightarrow> Rr" by (auto split: option.split simp: option_rel_def case_option_def[symmetric] dest: fun_relD) lemma autoref_the[autoref_rules]: assumes "SIDE_PRECOND (x\<noteq>None)" assumes "(x',x)\<in>\<langle>R\<rangle>option_rel" shows "(the x', (OP the ::: \<langle>R\<rangle>option_rel \<rightarrow> R)$x) \<in> R" using assms by (auto simp: option_rel_def) lemma autoref_the_default[autoref_rules]: "(the_default, the_default) \<in> R \<rightarrow> \<langle>R\<rangle>option_rel \<rightarrow> R" by parametricity definition [simp]: "is_None a \<equiv> case a of None \<Rightarrow> True | _ \<Rightarrow> False" lemma pat_isNone[autoref_op_pat]: "a=None \<equiv> (OP is_None :::\<^sub>i \<langle>I\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i i_bool)$a" "None=a \<equiv> (OP is_None :::\<^sub>i \<langle>I\<rangle>\<^sub>ii_option \<rightarrow>\<^sub>i i_bool)$a" by (auto intro!: eq_reflection split: option.splits) lemma autoref_is_None[autoref_rules]: "(is_None,is_None)\<in>\<langle>R\<rangle>option_rel \<rightarrow> Id" by (auto split: option.splits) definition "option_eq eq v1 v2 \<equiv> case (v1,v2) of (None,None) \<Rightarrow> True | (Some x1, Some x2) \<Rightarrow> eq x1 x2 | _ \<Rightarrow> False" lemma option_eq_autoref[autoref_rules (overloaded)]: "\<lbrakk>GEN_OP eq op = (R\<rightarrow>R\<rightarrow>Id)\<rbrakk> \<Longrightarrow> (option_eq eq,op =) \<in> \<langle>R\<rangle>option_rel \<rightarrow> \<langle>R\<rangle>option_rel \<rightarrow> Id" unfolding option_eq_def[abs_def] by (auto dest: fun_relD split: option.splits elim!: option_relE) lemma option_eq_expand[autoref_struct_expand]: "op = = option_eq op=" by (auto intro!: ext simp: option_eq_def split: option.splits) end subsection "Sum-Types" consts i_sum :: "interface \<Rightarrow> interface \<Rightarrow> interface" lemmas [autoref_rel_intf] = REL_INTFI[of sum_rel i_sum] context begin interpretation autoref_syn . (*lemma [autoref_itype]: "(op = :: _+_ \<Rightarrow> _) ::\<^sub>i \<langle>Il,Ir\<rangle>\<^sub>ii_sum \<rightarrow>\<^sub>i \<langle>Il,Ir\<rangle>\<^sub>ii_sum \<rightarrow>\<^sub>i i_bool" "Inl ::\<^sub>i Il \<rightarrow>\<^sub>i \<langle>Il,Ir\<rangle>\<^sub>ii_sum" "Inr ::\<^sub>i Ir \<rightarrow>\<^sub>i \<langle>Il,Ir\<rangle>\<^sub>ii_sum" "case_sum ::\<^sub>i (Il\<rightarrow>\<^sub>iI) \<rightarrow>\<^sub>i (Ir \<rightarrow>\<^sub>i I) \<rightarrow>\<^sub>i \<langle>Il,Ir\<rangle>\<^sub>ii_sum \<rightarrow>\<^sub>i I" "old.rec_sum ::\<^sub>i (Il\<rightarrow>\<^sub>iI) \<rightarrow>\<^sub>i (Ir \<rightarrow>\<^sub>i I) \<rightarrow>\<^sub>i \<langle>Il,Ir\<rangle>\<^sub>ii_sum \<rightarrow>\<^sub>i I" by auto*) lemma autoref_sum[autoref_rules]: "(Inl,Inl) \<in> Rl \<rightarrow> \<langle>Rl,Rr\<rangle>sum_rel" "(Inr,Inr) \<in> Rr \<rightarrow> \<langle>Rl,Rr\<rangle>sum_rel" "(case_sum,case_sum) \<in> (Rl \<rightarrow> R) \<rightarrow> (Rr \<rightarrow> R) \<rightarrow> \<langle>Rl,Rr\<rangle>sum_rel \<rightarrow> R" "(old.rec_sum,old.rec_sum) \<in> (Rl \<rightarrow> R) \<rightarrow> (Rr \<rightarrow> R) \<rightarrow> \<langle>Rl,Rr\<rangle>sum_rel \<rightarrow> R" by (fastforce split: sum.split dest: fun_relD simp: rec_sum_is_case)+ definition "sum_eq eql eqr s1 s2 \<equiv> case (s1,s2) of (Inl x1, Inl x2) \<Rightarrow> eql x1 x2 | (Inr x1, Inr x2) \<Rightarrow> eqr x1 x2 | _ \<Rightarrow> False" lemma sum_eq_autoref[autoref_rules (overloaded)]: "\<lbrakk>GEN_OP eql op = (Rl\<rightarrow>Rl\<rightarrow>Id); GEN_OP eqr op = (Rr\<rightarrow>Rr\<rightarrow>Id)\<rbrakk> \<Longrightarrow> (sum_eq eql eqr,op =) \<in> \<langle>Rl,Rr\<rangle>sum_rel \<rightarrow> \<langle>Rl,Rr\<rangle>sum_rel \<rightarrow> Id" unfolding sum_eq_def[abs_def] by (fastforce dest: fun_relD elim!: sum_relE) lemma sum_eq_expand[autoref_struct_expand]: "op = = sum_eq op= op=" by (auto intro!: ext simp: sum_eq_def split: sum.splits) lemmas [autoref_rules] = is_Inl_param is_Inr_param lemma autoref_sum_Projl[autoref_rules]: "\<lbrakk>SIDE_PRECOND (is_Inl s); (s',s)\<in>\<langle>Ra,Rb\<rangle>sum_rel\<rbrakk> \<Longrightarrow> (Sum_Type.sum.projl s', (OP Sum_Type.sum.projl ::: \<langle>Ra,Rb\<rangle>sum_rel \<rightarrow> Ra)$s)\<in>Ra" by simp parametricity lemma autoref_sum_Projr[autoref_rules]: "\<lbrakk>SIDE_PRECOND (is_Inr s); (s',s)\<in>\<langle>Ra,Rb\<rangle>sum_rel\<rbrakk> \<Longrightarrow> (Sum_Type.sum.projr s', (OP Sum_Type.sum.projr ::: \<langle>Ra,Rb\<rangle>sum_rel \<rightarrow> Rb)$s)\<in>Rb" by simp parametricity end subsection "List" consts i_list :: "interface \<Rightarrow> interface" lemmas [autoref_rel_intf] = REL_INTFI[of list_rel i_list] context begin interpretation autoref_syn . (* term nth lemma [autoref_itype]: "(op = :: _ list \<Rightarrow> _) ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i i_bool" "[] ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" "op # ::\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" "op @ ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" "case_list ::\<^sub>i Ir \<rightarrow>\<^sub>i (I\<rightarrow>\<^sub>i\<langle>I\<rangle>\<^sub>ii_list\<rightarrow>\<^sub>iIr) \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i Ir" "rec_list ::\<^sub>i Ir \<rightarrow>\<^sub>i (I\<rightarrow>\<^sub>i\<langle>I\<rangle>\<^sub>ii_list\<rightarrow>\<^sub>iIr\<rightarrow>\<^sub>iIr) \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i Ir" "map ::\<^sub>i (I1\<rightarrow>\<^sub>iI2) \<rightarrow>\<^sub>i \<langle>I1\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I2\<rangle>\<^sub>ii_list" "foldl ::\<^sub>i (Ia\<rightarrow>\<^sub>iIb\<rightarrow>\<^sub>iIa) \<rightarrow>\<^sub>i Ia \<rightarrow>\<^sub>i \<langle>Ib\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i Ia" "foldr ::\<^sub>i (Ia\<rightarrow>\<^sub>iIb\<rightarrow>\<^sub>iIb) \<rightarrow>\<^sub>i \<langle>Ia\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i Ib \<rightarrow>\<^sub>i Ib" "fold ::\<^sub>i (Ia\<rightarrow>\<^sub>iIb\<rightarrow>\<^sub>iIb) \<rightarrow>\<^sub>i \<langle>Ia\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i Ib \<rightarrow>\<^sub>i Ib" "take ::\<^sub>i i_nat \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" "drop ::\<^sub>i i_nat \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" "length ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i i_nat" "nth ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i i_nat \<rightarrow>\<^sub>i I" "hd ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i I" "tl ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" "last ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i I" "butlast ::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list" by auto *) lemma autoref_append[autoref_rules]: "(append, append)\<in>\<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel" by (auto simp: list_rel_def list_all2_appendI) lemma refine_list[autoref_rules]: "(Nil,Nil)\<in>\<langle>R\<rangle>list_rel" "(Cons,Cons)\<in>R \<rightarrow> \<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel" "(case_list,case_list)\<in>Rr\<rightarrow>(R\<rightarrow>\<langle>R\<rangle>list_rel\<rightarrow>Rr)\<rightarrow>\<langle>R\<rangle>list_rel\<rightarrow>Rr" apply (force dest: fun_relD split: list.split)+ done lemma autoref_rec_list[autoref_rules]: "(rec_list,rec_list) \<in> Ra \<rightarrow> (Rb \<rightarrow> \<langle>Rb\<rangle>list_rel \<rightarrow> Ra \<rightarrow> Ra) \<rightarrow> \<langle>Rb\<rangle>list_rel \<rightarrow> Ra" proof (intro fun_relI) case (goal1 a a' f f' l l') from goal1(3) show ?case using goal1(1,2) apply (induct arbitrary: a a') apply simp apply (fastforce dest: fun_relD) done qed lemma refine_map[autoref_rules]: "(map,map)\<in>(R1\<rightarrow>R2) \<rightarrow> \<langle>R1\<rangle>list_rel \<rightarrow> \<langle>R2\<rangle>list_rel" using [[autoref_sbias = -1]] unfolding map_rec[abs_def] by autoref lemma refine_fold[autoref_rules]: "(fold,fold)\<in>(Re\<rightarrow>Rs\<rightarrow>Rs) \<rightarrow> \<langle>Re\<rangle>list_rel \<rightarrow> Rs \<rightarrow> Rs" "(foldl,foldl)\<in>(Rs\<rightarrow>Re\<rightarrow>Rs) \<rightarrow> Rs \<rightarrow> \<langle>Re\<rangle>list_rel \<rightarrow> Rs" "(foldr,foldr)\<in>(Re\<rightarrow>Rs\<rightarrow>Rs) \<rightarrow> \<langle>Re\<rangle>list_rel \<rightarrow> Rs \<rightarrow> Rs" unfolding List.fold_def List.foldr_def List.foldl_def by (autoref)+ schematic_lemma autoref_take[autoref_rules]: "(take,take)\<in>(?R::(_\<times>_) set)" unfolding take_def by autoref schematic_lemma autoref_drop[autoref_rules]: "(drop,drop)\<in>(?R::(_\<times>_) set)" unfolding drop_def by autoref schematic_lemma autoref_length[autoref_rules]: "(length,length)\<in>(?R::(_\<times>_) set)" unfolding size_list_overloaded_def size_list_def by (autoref) lemma autoref_nth[autoref_rules]: assumes "(l,l')\<in>\<langle>R\<rangle>list_rel" assumes "(i,i')\<in>Id" assumes "SIDE_PRECOND (i' < length l')" shows "(nth l i,(OP nth ::: \<langle>R\<rangle>list_rel \<rightarrow> Id \<rightarrow> R)$l'$i')\<in>R" unfolding ANNOT_def using assms apply (induct arbitrary: i i') apply simp apply (case_tac i') apply auto done fun list_eq :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> bool" where "list_eq eq [] [] \<longleftrightarrow> True" | "list_eq eq (a#l) (a'#l') \<longleftrightarrow> (if eq a a' then list_eq eq l l' else False)" | "list_eq _ _ _ \<longleftrightarrow> False" lemma autoref_list_eq_aux: " (list_eq,list_eq) \<in> (R \<rightarrow> R \<rightarrow> Id) \<rightarrow> \<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel \<rightarrow> Id" proof (intro fun_relI) case (goal1 eq eq' l1 l1' l2 l2') thus ?case apply - apply (induct eq' l1' l2' arbitrary: l1 l2 rule: list_eq.induct) apply simp apply (case_tac l1) apply simp apply (case_tac l2) apply (simp) apply (auto dest: fun_relD) [] apply (case_tac l1) apply simp apply simp apply (case_tac l2) apply simp apply simp done qed lemma list_eq_expand[autoref_struct_expand]: "(op =) = (list_eq op =)" proof (intro ext) fix l1 l2 :: "'a list" show "(l1 = l2) \<longleftrightarrow> list_eq op = l1 l2" apply (induct "op = :: 'a \<Rightarrow> _" l1 l2 rule: list_eq.induct) apply simp_all done qed lemma autoref_list_eq[autoref_rules (overloaded)]: "GEN_OP eq op = (R\<rightarrow>R\<rightarrow>Id) \<Longrightarrow> (list_eq eq, op =) \<in> \<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel \<rightarrow> Id" unfolding autoref_tag_defs apply (subst list_eq_expand) apply (parametricity add: autoref_list_eq_aux) done lemma autoref_hd[autoref_rules]: "\<lbrakk> SIDE_PRECOND (l'\<noteq>[]); (l,l') \<in> \<langle>R\<rangle>list_rel \<rbrakk> \<Longrightarrow> (hd l,(OP hd ::: \<langle>R\<rangle>list_rel \<rightarrow> R)$l') \<in> R" apply (simp add: ANNOT_def) apply (cases l') apply simp apply (cases l) apply auto done lemma autoref_tl[autoref_rules]: "(tl,tl) \<in> \<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel" unfolding tl_def[abs_def] by autoref definition [simp]: "is_Nil a \<equiv> case a of [] \<Rightarrow> True | _ \<Rightarrow> False" lemma is_Nil_pat[autoref_op_pat]: "a=[] \<equiv> (OP is_Nil :::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i i_bool)$a" "[]=a \<equiv> (OP is_Nil :::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i i_bool)$a" by (auto intro!: eq_reflection split: list.splits) lemma autoref_is_Nil[param,autoref_rules]: "(is_Nil,is_Nil)\<in>\<langle>R\<rangle>list_rel \<rightarrow> bool_rel" by (auto split: list.splits) lemma conv_to_is_Nil: "l=[] \<longleftrightarrow> is_Nil l" "[]=l \<longleftrightarrow> is_Nil l" unfolding is_Nil_def by (auto split: list.split) lemma autoref_butlast[param, autoref_rules]: "(butlast,butlast) \<in> \<langle>R\<rangle>list_rel \<rightarrow> \<langle>R\<rangle>list_rel" unfolding butlast_def conv_to_is_Nil by parametricity definition [simp]: "op_list_singleton x \<equiv> [x]" lemma op_list_singleton_pat[autoref_op_pat]: "[x] \<equiv> (OP op_list_singleton :::\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list)$x" by simp lemma autoref_list_singleton[autoref_rules]: "(\<lambda>a. [a],op_list_singleton) \<in> R \<rightarrow> \<langle>R\<rangle>list_rel" by auto definition [simp]: "op_list_append_elem s x \<equiv> s@[x]" lemma pat_list_append_elem[autoref_op_pat]: "s@[x] \<equiv> (OP op_list_append_elem :::\<^sub>i \<langle>I\<rangle>\<^sub>ii_list \<rightarrow>\<^sub>i I \<rightarrow>\<^sub>i \<langle>I\<rangle>\<^sub>ii_list)$s$x" by (simp add: relAPP_def) lemma autoref_list_append_elem[autoref_rules]: "(\<lambda>s x. s@[x], op_list_append_elem) \<in> \<langle>R\<rangle>list_rel \<rightarrow> R \<rightarrow> \<langle>R\<rangle>list_rel" unfolding op_list_append_elem_def[abs_def] by parametricity declare param_rev[autoref_rules] declare param_all_interval_nat[autoref_rules] end subsection "Examples" text {* Be careful to make the concrete type a schematic type variable. The default behaviour of @{text "schematic_lemma"} makes it a fixed variable, that will not unify with the infered term! *} schematic_lemma "(?f::?'c,[1,2,3]@[4::nat])\<in>?R" by autoref schematic_lemma "(?f::?'c,[1::nat, 2,3,4,5,6,7,8,9,0,1,43,5,5,435,5,1,5,6,5,6,5,63,56 ] )\<in>?R" apply (autoref) done schematic_lemma "(?f::?'c,[1,2,3] = [])\<in>?R" by autoref text {* When specifying custom refinement rules on the fly, be careful with the type-inference between @{text "notes"} and @{text "shows"}. It's too easy to ,,decouple'' the type @{text "'a"} in the autoref-rule and the actual goal, as shown below! *} schematic_lemma notes [autoref_rules] = IdI[where 'a="'a"] notes [autoref_itype] = itypeI[where 't="'a::numeral" and I=i_std] shows "(?f::?'c, hd [a,b,c::'a::numeral])\<in>?R" txt {* The autoref-rule is bound with type @{text "'a::typ"}, while the goal statement has @{text "'a::numeral"}! *} apply (autoref (keep_goal)) txt {* We get an unsolved goal, as it finds no rule to translate @{text "a"} *} oops text {* Here comes the correct version. Note the duplicate sort annotation of type @{text "'a"}: *} schematic_lemma notes [autoref_rules_raw] = IdI[where 'a="'a::numeral"] notes [autoref_itype] = itypeI[where 't="'a::numeral" and I=i_std] shows "(?f::?'c, hd [a,b,c::'a::numeral])\<in>?R" by (autoref) text {* Special cases of equality: Note that we do not require equality on the element type! *} schematic_lemma (*notes [autoref_itype] = itypeI[of a "\<langle>I\<rangle>\<^sub>ii_option"]*) assumes [autoref_rules]: "(ai,a)\<in>\<langle>R\<rangle>option_rel" shows "(?f::?'c, a = None)\<in>?R" apply (autoref (keep_goal)) done schematic_lemma (*notes [autoref_itype] = itypeI[of a "\<langle>I\<rangle>\<^sub>ii_list"]*) assumes [autoref_rules]: "(ai,a)\<in>\<langle>R\<rangle>list_rel" shows "(?f::?'c, [] = a)\<in>?R" apply (autoref (keep_goal)) done schematic_lemma shows "(?f::?'c, [1,2] = [2,3::nat])\<in>?R" apply (autoref (keep_goal)) done end
""" Kernel Smoothing ================ This example uses different kernel smoothing methods over the phoneme data set and shows how cross validations scores vary over a range of different parameters used in the smoothing methods. It also show examples of undersmoothing and oversmoothing. """ # Author: Miguel Carbajo Berrocal # License: MIT import matplotlib.pylab as plt import numpy as np import skfda import skfda.preprocessing.smoothing.kernel_smoothers as ks import skfda.preprocessing.smoothing.validation as val ############################################################################## # # For this example, we will use the # :func:`phoneme <skfda.datasets.fetch_phoneme>` dataset. This dataset # contains the log-periodograms of several phoneme pronunciations. The phoneme # curves are very irregular and noisy, so we usually will want to smooth them # as a preprocessing step. # # As an example, we will smooth the first 300 curves only. In the following # plot, the first five curves are shown. dataset = skfda.datasets.fetch_phoneme() fd = dataset['data'][:300] fd[0:5].plot() ############################################################################## # Here we show the general cross validation scores for different values of the # parameters given to the different smoothing methods. param_values = np.linspace(start=2, stop=25, num=24) # Local linear regression kernel smoothing. llr = val.SmoothingParameterSearch( ks.LocalLinearRegressionSmoother(), param_values) llr.fit(fd) llr_fd = llr.transform(fd) # Nadaraya-Watson kernel smoothing. nw = val.SmoothingParameterSearch( ks.NadarayaWatsonSmoother(), param_values) nw.fit(fd) nw_fd = nw.transform(fd) # K-nearest neighbours kernel smoothing. knn = val.SmoothingParameterSearch( ks.KNeighborsSmoother(), param_values) knn.fit(fd) knn_fd = knn.transform(fd) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(param_values, knn.cv_results_['mean_test_score'], label='k-nearest neighbors') ax.plot(param_values, llr.cv_results_['mean_test_score'], label='local linear regression') ax.plot(param_values, nw.cv_results_['mean_test_score'], label='Nadaraya-Watson') ax.legend() fig ############################################################################## # We can plot the smoothed curves corresponding to the 11th element of the # data set (this is a random choice) for the three different smoothing # methods. fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Smoothing method parameter') ax.set_ylabel('GCV score') ax.set_title('Scores through GCV for different smoothing methods') fd[10].plot(fig=fig) knn_fd[10].plot(fig=fig) llr_fd[10].plot(fig=fig) nw_fd[10].plot(fig=fig) ax.legend(['original data', 'k-nearest neighbors', 'local linear regression', 'Nadaraya-Watson'], title='Smoothing method') fig ############################################################################## # We can compare the curve before and after the smoothing. ############################################################################## # Not smoothed fd[10].plot() ############################################################################## # Smoothed fig = fd[10].scatter(s=0.5) nw_fd[10].plot(fig=fig, color='green') fig ############################################################################## # Now, we can see the effects of a proper smoothing. We can plot the same 5 # samples from the beginning using the Nadaraya-Watson kernel smoother with # the best choice of parameter. nw_fd[0:5].plot() ############################################################################## # We can also appreciate the effects of undersmoothing and oversmoothing in # the following plots. fd_us = ks.NadarayaWatsonSmoother(smoothing_parameter=2).fit_transform(fd[10]) fd_os = ks.NadarayaWatsonSmoother(smoothing_parameter=15).fit_transform(fd[10]) ############################################################################## # Under-smoothed fig = fd[10].scatter(s=0.5) fd_us.plot(fig=fig) ############################################################################## # Over-smoothed fig = fd[10].scatter(s=0.5) fd_os.plot(fig=fig)
Cancellation or Alteration in bookings is enter tained. But prior information is required at least a week before your Check-In date. Kindly also refer our Reservation/Cancellation or Amendment Policy. Early check-in or late check-out is subject to availability and may be chargeable by the hotel directly. Check-in time is 12:00 PM, Check-out time is 10:00 AM. To make modifications or cancellations please call our reservation no. +91-135-2632932, 2631732 (10 AM to 6 PM) or resort manager. Your Credit card will be charged as Hotel Shanti Plaza. Hotel Shanti Plaza reserves the right to change/modify the policy at any time at its own discretion and without any prior notice. More than 7 days prior to arrival date (20% of first room night OR Rs.500.00 ( whichever is more). Less than 7 and more than 2 days prior to arrival date (50% of first room night). Less than 2 days prior to arrival date (1st Room night).
/- Copyright (c) 2017 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import data.int.cast.lemmas import algebra.field.defs import algebra.group_with_zero.units.lemmas /-! # Cast of integers into fields > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file concerns the canonical homomorphism `ℤ → F`, where `F` is a field. ## Main results * `int.cast_div`: if `n` divides `m`, then `↑(m / n) = ↑m / ↑n` -/ namespace int open nat variables {α : Type*} /-- Auxiliary lemma for norm_cast to move the cast `-↑n` upwards to `↑-↑n`. (The restriction to `division_ring` is necessary, otherwise this would also apply in the case where `R = ℤ` and cause nontermination.) -/ @[norm_cast] lemma cast_neg_nat_cast {R} [division_ring R] (n : ℕ) : ((-n : ℤ) : R) = -n := by simp @[simp] theorem cast_div [division_ring α] {m n : ℤ} (n_dvd : n ∣ m) (n_nonzero : (n : α) ≠ 0) : ((m / n : ℤ) : α) = m / n := begin rcases n_dvd with ⟨k, rfl⟩, have : n ≠ 0, { rintro rfl, simpa using n_nonzero }, rw [int.mul_div_cancel_left _ this, mul_comm n k, int.cast_mul, mul_div_cancel _ n_nonzero], end end int
The Dale Hardware 63mm bolt through tubular mortice latch has been specifically manufactured to be u.. The Dale Hardware 76mm bolt through tubular mortice latch has been specifically manufactured to be u.. The Dale adjustable roller catch is used on cabinet doors and some full size doors where a latch is .. Dale Hardware Heavy Duty Adjustable Roller Catch For use on doors. Adjustable for gap between door a.. Magnetic Catch in Brown at a size of 40mm x 18mm. This magnetic catch is perfect for cabinets where .. Traditional style Dead Locking Night Latch and cylinder finished in Champagne (90mm). This 90mm wid.. The Dale Hardware 63mm Tubular Mortice Latch has a standard tubular design with a shimmering Electro.. Mortice latch (tubular) at a size of 76mm (case depth) with a 55mm back set. This mortice latch fea.. The Calver latch lever handle situated on a back plate. The Calver is supplied in a Polished Chrome.. The Dale Hardware Smartlatch is available in either satin nickel or polished chrome finishes and is ..
------------------------------------------------------------------------ -- The Agda standard library -- -- Lists where all elements satisfy a given property ------------------------------------------------------------------------ {-# OPTIONS --without-K --safe #-} module Data.List.Relation.Unary.All where open import Category.Applicative open import Category.Monad open import Data.Empty using (⊥) open import Data.List.Base as List using (List; []; _∷_) open import Data.List.Relation.Unary.Any as Any using (Any; here; there) open import Data.List.Membership.Propositional using (_∈_) open import Data.Product as Prod using (∃; -,_; _×_; _,_; proj₁; proj₂; uncurry) open import Function open import Level open import Relation.Nullary hiding (Irrelevant) import Relation.Nullary.Decidable as Dec open import Relation.Nullary.Product using (_×-dec_) open import Relation.Unary hiding (_∈_) open import Relation.Binary.PropositionalEquality as P private variable a b p q r : Level A : Set a B : Set b ------------------------------------------------------------------------ -- Definitions -- Given a predicate P, then All P xs means that every element in xs -- satisfies P. See `Relation.Unary` for an explanation of predicates. infixr 5 _∷_ data All {A : Set a} (P : Pred A p) : Pred (List A) (a ⊔ p) where [] : All P [] _∷_ : ∀ {x xs} (px : P x) (pxs : All P xs) → All P (x ∷ xs) -- All P xs is a finite map from indices x ∈ xs to content P x. -- Relation pxs [ i ]= px states that, in map pxs, key i : x ∈ xs points to value px. infix 4 _[_]=_ data _[_]=_ {A : Set a} {P : Pred A p} : ∀ {x xs} → All P xs → x ∈ xs → P x → Set (a ⊔ p) where here : ∀ {x xs} {px : P x} {pxs : All P xs} → px ∷ pxs [ here refl ]= px there : ∀ {x xs y} {px : P x} {pxs : All P xs} {py : P y} {i : x ∈ xs} → pxs [ i ]= px → py ∷ pxs [ there i ]= px -- A list is empty if having an element is impossible. Null : Pred (List A) _ Null = All (λ _ → ⊥) ------------------------------------------------------------------------ -- Operations on All module _ {P : Pred A p} where uncons : ∀ {x xs} → All P (x ∷ xs) → P x × All P xs uncons (px ∷ pxs) = px , pxs head : ∀ {x xs} → All P (x ∷ xs) → P x head = proj₁ ∘ uncons tail : ∀ {x xs} → All P (x ∷ xs) → All P xs tail = proj₂ ∘ uncons tabulate : ∀ {xs} → (∀ {x} → x ∈ xs → P x) → All P xs tabulate {xs = []} hyp = [] tabulate {xs = x ∷ xs} hyp = hyp (here refl) ∷ tabulate (hyp ∘ there) reduce : (f : ∀ {x} → P x → B) → ∀ {xs} → All P xs → List B reduce f [] = [] reduce f (px ∷ pxs) = f px ∷ reduce f pxs construct : (f : B → ∃ P) (xs : List B) → ∃ (All P) construct f [] = [] , [] construct f (x ∷ xs) = Prod.zip _∷_ _∷_ (f x) (construct f xs) fromList : (xs : List (∃ P)) → All P (List.map proj₁ xs) fromList [] = [] fromList ((x , p) ∷ xps) = p ∷ fromList xps toList : ∀ {xs} → All P xs → List (∃ P) toList pxs = reduce (λ {x} px → x , px) pxs module _ {P : Pred A p} {Q : Pred A q} where map : P ⊆ Q → All P ⊆ All Q map g [] = [] map g (px ∷ pxs) = g px ∷ map g pxs module _ {P : Pred A p} {Q : Pred A q} {R : Pred A r} where zipWith : P ∩ Q ⊆ R → All P ∩ All Q ⊆ All R zipWith f ([] , []) = [] zipWith f (px ∷ pxs , qx ∷ qxs) = f (px , qx) ∷ zipWith f (pxs , qxs) unzipWith : R ⊆ P ∩ Q → All R ⊆ All P ∩ All Q unzipWith f [] = [] , [] unzipWith f (rx ∷ rxs) = Prod.zip _∷_ _∷_ (f rx) (unzipWith f rxs) module _ {P : Pred A p} {Q : Pred A q} where zip : All P ∩ All Q ⊆ All (P ∩ Q) zip = zipWith id unzip : All (P ∩ Q) ⊆ All P ∩ All Q unzip = unzipWith id self : ∀ {xs : List A} → All (const A) xs self = tabulate (λ {x} _ → x) ------------------------------------------------------------------------ -- (weak) updateAt module _ {P : Pred A p} where infixl 6 _[_]%=_ _[_]≔_ updateAt : ∀ {x xs} → x ∈ xs → (P x → P x) → All P xs → All P xs updateAt () f [] updateAt (here refl) f (px ∷ pxs) = f px ∷ pxs updateAt (there i) f (px ∷ pxs) = px ∷ updateAt i f pxs _[_]%=_ : ∀ {x xs} → All P xs → x ∈ xs → (P x → P x) → All P xs pxs [ i ]%= f = updateAt i f pxs _[_]≔_ : ∀ {x xs} → All P xs → x ∈ xs → P x → All P xs pxs [ i ]≔ px = pxs [ i ]%= const px ------------------------------------------------------------------------ -- Traversable-like functions module _ (p : Level) {A : Set a} {P : Pred A (a ⊔ p)} {F : Set (a ⊔ p) → Set (a ⊔ p)} (App : RawApplicative F) where open RawApplicative App sequenceA : All (F ∘′ P) ⊆ F ∘′ All P sequenceA [] = pure [] sequenceA (x ∷ xs) = _∷_ <$> x ⊛ sequenceA xs mapA : ∀ {Q : Pred A q} → (Q ⊆ F ∘′ P) → All Q ⊆ (F ∘′ All P) mapA f = sequenceA ∘′ map f forA : ∀ {Q : Pred A q} {xs} → All Q xs → (Q ⊆ F ∘′ P) → F (All P xs) forA qxs f = mapA f qxs module _ (p : Level) {A : Set a} {P : Pred A (a ⊔ p)} {M : Set (a ⊔ p) → Set (a ⊔ p)} (Mon : RawMonad M) where private App = RawMonad.rawIApplicative Mon sequenceM : All (M ∘′ P) ⊆ M ∘′ All P sequenceM = sequenceA p App mapM : ∀ {Q : Pred A q} → (Q ⊆ M ∘′ P) → All Q ⊆ (M ∘′ All P) mapM = mapA p App forM : ∀ {Q : Pred A q} {xs} → All Q xs → (Q ⊆ M ∘′ P) → M (All P xs) forM = forA p App ------------------------------------------------------------------------ -- Generalised lookup based on a proof of Any module _ {P : Pred A p} {Q : Pred A q} where lookupAny : ∀ {xs} → All P xs → (i : Any Q xs) → (P ∩ Q) (Any.lookup i) lookupAny (px ∷ pxs) (here qx) = px , qx lookupAny (px ∷ pxs) (there i) = lookupAny pxs i module _ {P : Pred A p} {Q : Pred A q} {R : Pred A r} where lookupWith : ∀[ P ⇒ Q ⇒ R ] → ∀ {xs} → All P xs → (i : Any Q xs) → R (Any.lookup i) lookupWith f pxs i = Prod.uncurry f (lookupAny pxs i) module _ {P : Pred A p} where lookup : ∀ {xs} → All P xs → (∀ {x} → x ∈ xs → P x) lookup pxs = lookupWith (λ { px refl → px }) pxs ------------------------------------------------------------------------ -- Properties of predicates preserved by All module _ {P : Pred A p} where all : Decidable P → Decidable (All P) all p [] = yes [] all p (x ∷ xs) = Dec.map′ (uncurry _∷_) uncons (p x ×-dec all p xs) universal : Universal P → Universal (All P) universal u [] = [] universal u (x ∷ xs) = u x ∷ universal u xs irrelevant : Irrelevant P → Irrelevant (All P) irrelevant irr [] [] = P.refl irrelevant irr (px₁ ∷ pxs₁) (px₂ ∷ pxs₂) = P.cong₂ _∷_ (irr px₁ px₂) (irrelevant irr pxs₁ pxs₂) satisfiable : Satisfiable (All P) satisfiable = [] , []
Also , antimony was identified as one of 12 critical raw materials for the EU in a report published in 2011 , primarily due to the lack of supply outside China .
= Soviet cruiser Krasnyi Kavkaz =
This industry report offers the most up-to-date market data on the actual market situation, trends and future outlook for razors and razor blades in Philippines. The research includes historic market data from 2008 to 2014 and forecasts until 2019 which makes the report an invaluable resource for industry executives, marketing, sales and product managers, analysts, and other people looking for key industry data in readily accessible and clearly presented tables and graphs. - What is the current size of the razor and razor blade market in Philippines?
myDataFile <- read.csv('/Users/saugat/Downloads/2008.csv') table(myDataFile$Origin) sort(table(myDataFile$Origin)) sort(table(myDataFile$Origin), decreasing=TRUE) seq(0, 100, by=10) cut(myDataFile$DepTime, breaks = seq(0, 2400, 100)) plot(table(cut(myDataFile$DepTime, breaks = seq(0, 2400, by = 100)))) sort(tapply(myDataFile$DepDelay, myDataFile$Origin, mean, na.rm = TRUE)) sort(tapply(myDataFile$Distance, myDataFile$Origin, mean), decreasing = TRUE) tapply(myDataFile$ArrDelay, myDataFile$DayOfWeek, mean, na.rm = TRUE) tapply(myDataFile$ArrDelay[myDataFile$Dest == 'IND'], myDataFile$DayOfWeek[myDataFile$Dest == 'IND'], mean, na.rm = TRUE) sort(tapply(myDataFile$DepDelay, myDataFile$UniqueCarrier, mean, na.rm = TRUE), decreasing = TRUE) dates <- paste(myDataFile$Month, myDataFile$DayofMonth, myDataFile$Year, sep = "/") sort(tapply(myDataFile$DepDelay, dates, mean, na.rm = TRUE), decreasing = TRUE) sort(tapply(myDataFile$DepDelay[myDataFile$Origin == 'ORD'], dates[myDataFile$Origin == 'ORD'], mean, na.rm = TRUE), decreasing = TRUE) atlToLax <- myDataFile$Origin == 'ATL' & myDataFile$Dest == 'LAX' tail(sort(tapply(myDataFile$DepDelay[atlToLax], dates[atlToLax], mean, na.rm = TRUE)))
function A = uplus(A) %UPLUS Long unary plus +A % % % written 12/30/98 S.M. Rump % modified 04/04/04 S.M. Rump set round to nearest for safety % modified 04/06/05 S.M. Rump rounding unchanged %
function x = lprec1(c, d, h, g, extmod) % LPREC1 One-level Laplacian pyramid reconstruction % % x = lprec1(c, d, h, g) % % Input: % c: coarse signal at half size % d: detail signal at full size % h, g: two biorthogonal 1-D lowpass filters % extmod: [optional] extension mode (default is 'per') % % Output: % x: reconstructed signal % % Note: This uses a new reconstruction method by Do and Vetterli, % "Framming pyramids", IEEE Trans. on Sig Proc., Sep. 2003. % % See also: LPDEC1 if ~exist('extmod', 'var') extmod = 'per'; end nd = ndims(c); % First, filter and downsample the detail image r = d; for dim = 1:nd r = filtdn(r, h, dim, extmod, 0); end % Then subtract the result from the coarse signal p = c - r; % Even size filter needs to be adjusted to obtain perfect reconstruction adjust = mod(length(g) + 1, 2); % Then upsample and filter for dim = 1:nd p = upfilt(p, g, dim, extmod, adjust); end % Final combination x = p + d;
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import os import torch import numpy as np from PIL import Image from data.pix2pix_dataset import Pix2pixDataset from data.base_dataset import get_params, get_transform class CelebAHQDataset(Pix2pixDataset): #hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck, #cloth, hat, eye_g, ear_r, neck_l @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode='resize_and_crop') parser.set_defaults(no_pairing_check=True) if is_train: parser.set_defaults(load_size=286) else: parser.set_defaults(load_size=256) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=19) parser.set_defaults(contain_dontcare_label=False) parser.set_defaults(cache_filelist_read=False) parser.set_defaults(cache_filelist_write=False) return parser def get_paths(self, opt): if opt.phase == 'train': fd = open(os.path.join(opt.dataroot, 'train.txt')) lines = fd.readlines() fd.close() elif opt.phase == 'test': fd = open(os.path.join(opt.dataroot, 'val.txt')) lines = fd.readlines() fd.close() image_paths = [] label_paths = [] for i in range(len(lines)): image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg')) label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', lines[i].strip().zfill(5) + '.png')) return label_paths, image_paths def get_ref(self, opt): extra = '' if opt.phase == 'test': extra = '_test' with open('./data/celebahq_ref{}.txt'.format(extra)) as fd: lines = fd.readlines() ref_dict = {} for i in range(len(lines)): items = lines[i].strip().split(',') key = items[0] if opt.phase == 'test': val = items[1:] else: val = [items[1], items[-1]] ref_dict[key] = val train_test_folder = ('', '') return ref_dict, train_test_folder def get_label_tensor(self, path): # parts = ['skin', 'hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'neck', # 'cloth', 'hat', 'eye_g', 'ear_r', 'neck_l'] label_except_glasses = Image.open(path).convert('L') root, name = path.replace('\\', '/').split('all_parts_except_glasses/') idx = name.split('.')[0] subfolder = str(int(idx) // 2000) if os.path.exists(os.path.join(root, subfolder, idx + '_eye_g.png')): glasses = Image.open(os.path.join(root, subfolder, idx + '_eye_g.png')).convert('L') else: glasses = Image.fromarray(np.zeros(label_except_glasses.size, dtype=np.uint8)) params = get_params(self.opt, label_except_glasses.size) transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) label_except_glasses_tensor = transform_label(label_except_glasses) * 255.0 glasses_tensor = transform_label(glasses) label_tensor = torch.cat((label_except_glasses_tensor, glasses_tensor), dim=0) return label_tensor, params def imgpath_to_labelpath(self, path): root, name = path.split('CelebA-HQ-img/') label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', name.split('.')[0].zfill(5) + '.png') return label_path
subsection\<open>Logical and structural substitution\<close> theory Substitution imports DeBruijn begin primrec subst_trm :: "[trm, trm, nat] \<Rightarrow> trm" ("_[_'/_]\<^sup>T" [300, 0, 0] 300) and subst_cmd :: "[cmd, trm, nat] \<Rightarrow> cmd" ("_[_'/_]\<^sup>C" [300, 0, 0] 300) where subst_LVar: "(`i)[s/k]\<^sup>T = (if k < i then `(i-1) else if k = i then s else (`i))" | subst_Lbd: "(\<lambda> T : t)[s/k]\<^sup>T = \<lambda> T : (t[(liftL_trm s 0) / k+1]\<^sup>T)" | subst_App: "(t \<degree> u)[s/k]\<^sup>T = t[s/k]\<^sup>T \<degree> u[s/k]\<^sup>T" | subst_Mu: "(\<mu> T : c)[s/k]\<^sup>T = \<mu> T : (c[(liftM_trm s 0) / k]\<^sup>C)" | subst_MVar: "(<i> t)[s/k]\<^sup>C = <i> (t[s/k]\<^sup>T)" text\<open>Substituting a term for the hole in a context.\<close> primrec ctxt_subst :: "ctxt \<Rightarrow> trm \<Rightarrow> trm" where "ctxt_subst \<diamond> s = s" | "ctxt_subst (E \<^sup>\<bullet> t) s = (ctxt_subst E s)\<degree> t" lemma ctxt_app_subst: shows "ctxt_subst E (ctxt_subst F t) = ctxt_subst (E . F) t" by (induction E, auto) text\<open>The structural substitution is based on Geuvers and al.~\<^cite>\<open>"DBLP:journals/apal/GeuversKM13"\<close>.\<close> primrec struct_subst_trm :: "[trm, nat, nat, ctxt] \<Rightarrow> trm" ("_[_=_ _]\<^sup>T" [300, 0, 0, 0] 300) and struct_subst_cmd :: "[cmd, nat, nat, ctxt] \<Rightarrow> cmd" ("_[_=_ _]\<^sup>C" [300, 0, 0, 0] 300) where struct_LVar: "(`i)[j=k E]\<^sup>T = (`i)" | struct_Lbd: "(\<lambda> T : t)[j=k E]\<^sup>T = (\<lambda> T : (t[j=k (liftL_ctxt E 0)]\<^sup>T))" | struct_App: "(t\<degree>s)[j=k E]\<^sup>T = (t[j=k E]\<^sup>T)\<degree>(s[j=k E]\<^sup>T)" | struct_Mu: "(\<mu> T : c)[j=k E]\<^sup>T = \<mu> T : (c[(j+1)=(k+1) (liftM_ctxt E 0)]\<^sup>C)" | struct_MVar: "(<i> t)[j=k E]\<^sup>C = (if i=j then (<k> (ctxt_subst E (t[j=k E]\<^sup>T))) else (if j<i \<and> i\<le>k then (<i-1> (t[j=k E]\<^sup>T)) else (if k\<le>i \<and> i<j then (<i+1> (t[j=k E]\<^sup>T)) else (<i> (t[j=k E]\<^sup>T)))))" text\<open>Lifting of lambda and mu variables commute with each other\<close> lemma liftLM_comm: "liftL_trm (liftM_trm t n) m = liftM_trm (liftL_trm t m) n" "liftL_cmd (liftM_cmd c n) m = liftM_cmd (liftL_cmd c m) n" by(induct t and c arbitrary: n m and n m) auto lemma liftLM_comm_ctxt: "liftL_ctxt (liftM_ctxt E n) m = liftM_ctxt (liftL_ctxt E m) n" by(induct E arbitrary: n m, auto simp add: liftLM_comm) text\<open>Lifting of $\mu$-variables (almost) commutes.\<close> lemma liftMM_comm: "n\<ge>m \<Longrightarrow> liftM_trm (liftM_trm t n) m = liftM_trm (liftM_trm t m) (Suc n)" "n\<ge>m \<Longrightarrow> liftM_cmd (liftM_cmd c n) m = liftM_cmd (liftM_cmd c m) (Suc n)" by(induct t and c arbitrary: n m and n m) auto lemma liftMM_comm_ctxt: "liftM_ctxt (liftM_ctxt E n) 0 = liftM_ctxt (liftM_ctxt E 0) (n+1)" by(induct E arbitrary: n, auto simp add: liftMM_comm) text\<open>If a $\mu$ variable $i$ doesn't occur in a term or a context, then these remain the same after structural substitution of variable $i$.\<close> lemma liftM_struct_subst: "liftM_trm t i[i=i F]\<^sup>T = liftM_trm t i" "liftM_cmd c i[i=i F]\<^sup>C = liftM_cmd c i" by(induct t and c arbitrary: i F and i F) auto lemma liftM_ctxt_struct_subst: "(ctxt_subst (liftM_ctxt E i) t)[i=i F]\<^sup>T = ctxt_subst (liftM_ctxt E i) (t[i=i F]\<^sup>T)" by(induct E arbitrary: i t F; force simp add: liftM_struct_subst) end
\section{Orthogonal Complement and Adjoint Map} \begin{definition} Suppose $V=U\oplus W$. The projection operator $\pi=\pi_W:V\to W$ into $W$ is defined via $u+w\mapsto w$ for any $u\in U,w\in W$. \end{definition} Easy to see that $\pi$ is linear and $\pi^2=\pi$ \begin{remark} We have $\pi_U=\operatorname{id}-\pi_W$. \end{remark} Of course, in the case where $U=W^\perp$, we can have something better. \begin{lemma} Let $V$ be an inner product space and $W\le V$ finite dimensional subspace of $V$, then:\\ (a) If $\{e_i\}$ is an orthonormal basis of $W$, then $\forall v\in V,\pi(v)=\sum_i\langle v,e_i\rangle e_i$.\\ (b) $\forall v\in V,w\in W,\|v-\pi(v)\|\le\|v-w\|$ with equality iff $w=\pi(v)$. \end{lemma} \begin{proof} Just observe that $v-\pi(v)\in W^\perp$ which is known to be a complementary subspace of $W$. This gives (a) immediately, and for (b) we have $\|v-w\|^2=\|v-\pi(v)+\pi(v)-w\|^2=\|v-\pi(v)\|^2+\|\pi(v)-w\|^2\ge \|v-\pi(v)\|$. \end{proof} \begin{proposition} Let $V,W$ be finite dimensional inner product spaces and $\alpha\in L(V,W)$. Then there is a unique linear map $\alpha^\ast:W\to V$ such that $\forall v\in V,w\in W,\langle \alpha(v),w\rangle=\langle v,\alpha^\ast(w)\rangle$. Moreover, if $B,C$ are orthonormal bases of $V,W$, then $[\alpha^\ast]_{C,B}=(\overline{[\alpha]}_{B,C})^\top$. \end{proposition} \begin{proof} Brute-force computation. \end{proof} \begin{definition} This map $\alpha^\ast$ is called the adjoint of $\alpha$. \end{definition} \begin{remark} One might notice that we used the same notation for adjoint and dual of a map. This (intentional) abuse of notation can be justified by considering the linear isomorphisms $\psi_{R,V}:V\to V^\ast,\psi_{R,W}:W\to W^\ast$ via $\psi_{R,V}(v)=\langle \cdot,v\rangle,\psi_{R,W}(w)=\langle \cdot,w\rangle$ which immediately satisfies $\alpha^\ast_{\rm adjoint}=\psi_{R,V}^{-1}\circ\alpha^\ast_{\rm dual}\circ\psi_{R,W}$. \[ \begin{tikzcd} W^\ast\arrow{r}{\alpha^\ast_{\rm dual}}&V^\ast\\ W\arrow{u}{\psi_{R,W}}\arrow[swap]{r}{\alpha^\ast_{\rm adjoint}}&V\arrow[swap]{u}{\psi_{R,V}} \end{tikzcd} \] \end{remark} \begin{definition} Let $V$ be an inner product space. A map $\alpha\in L(V)$ is self-adjoint if $\alpha=\alpha^\ast$, i.e. $\forall v,w\in V,\langle\alpha(v),w\rangle=\langle v,\alpha(w)\rangle$.\\ It is called an isometry if $\alpha^\ast\circ\alpha=\operatorname{id}$, or $\langle\alpha(v),\alpha(w)\rangle=\langle v,w\rangle$ for any $v,w\in V$. \end{definition} \begin{remark} By the polarisation identity, $\alpha$ is an isometry iff $\|\alpha(v)\|=\|v\|$ for any $v\in V$. \end{remark} \begin{lemma} Let $V$ be a finite dimensional inner product space over $\mathbb R$ (resp. $\mathbb C$). Then $\alpha\in L(V)$ is self-adjoint iff for any orthonormal basis $B$ of $V$, $[\alpha]_B$ is symmetric (resp. Hemitian). It is an isometry iff for any orthonormal basis $B$ of $V$, $[\alpha]_B$ is orthonormal (resp. unitary). \end{lemma} \begin{proof} Immediate. \end{proof} The collection of isometries are naturally subgroups of $L(V)$. \begin{definition} Let $V$ be a finite dimensional inner product space over a field $F=\mathbb R$ or $\mathbb C$. The subgroup of isometries $\{\alpha\in L(V):\alpha^\ast\circ\alpha=\operatorname{id}\}\le L(V)$ is called the orthogonal group $O(V)$ of $V$ when $F=\mathbb R$ and the unitary group $U(V)$ of $V$ when $F=\mathbb C$. \end{definition} \begin{remark} Fix an orthonormal basis $\{e_i\}$ of $V$. Then there is a one-to-one correspondence between the isometries in $V$ and the orthonormal bases of $V$ via $\alpha\leftrightarrow \{\alpha(e_i)\}$. \end{remark}
using Weber using Base.Test include("find_timing.jl") function cause_column_error() find_timing() do addtrial(moment(record,"test",unspecified_column_name="string")) end end const reserved_columns = [:reserved, :weber_version, :start_date, :start_time, :offset, :trial, :time] function cause_reserved_error(decl,use=decl) find_timing(columns=[decl]) do addtrial(moment(record,"test";[use => "test"]...)) end nothing end @testset "Record Columns" begin @test_throws ErrorException cause_column_error() @test_throws ErrorException cause_reserved_error(:weber_version) @test_throws ErrorException cause_reserved_error(:start_date) @test_throws ErrorException cause_reserved_error(:start_time) @test_throws ErrorException cause_reserved_error(:offset) @test_throws ErrorException cause_reserved_error(:trial) @test_throws ErrorException cause_reserved_error(:time) @test_throws ErrorException cause_reserved_error(:joe,:bob) end
/- Copyright (c) 2017 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl, Mario Carneiro -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.topology.tactic import Mathlib.PostPort universes u u_1 l v u_2 w u_3 namespace Mathlib /-! # Ordering on topologies and (co)induced topologies Topologies on a fixed type `α` are ordered, by reverse inclusion. That is, for topologies `t₁` and `t₂` on `α`, we write `t₁ ≤ t₂` if every set open in `t₂` is also open in `t₁`. (One also calls `t₁` finer than `t₂`, and `t₂` coarser than `t₁`.) Any function `f : α → β` induces `induced f : topological_space β → topological_space α` and `coinduced f : topological_space α → topological_space β`. Continuity, the ordering on topologies and (co)induced topologies are related as follows: * The identity map (α, t₁) → (α, t₂) is continuous iff t₁ ≤ t₂. * A map f : (α, t) → (β, u) is continuous iff t ≤ induced f u (`continuous_iff_le_induced`) iff coinduced f t ≤ u (`continuous_iff_coinduced_le`). Topologies on α form a complete lattice, with ⊥ the discrete topology and ⊤ the indiscrete topology. For a function f : α → β, (coinduced f, induced f) is a Galois connection between topologies on α and topologies on β. ## Implementation notes There is a Galois insertion between topologies on α (with the inclusion ordering) and all collections of sets in α. The complete lattice structure on topologies on α is defined as the reverse of the one obtained via this Galois insertion. ## Tags finer, coarser, induced topology, coinduced topology -/ namespace topological_space /-- The open sets of the least topology containing a collection of basic sets. -/ inductive generate_open {α : Type u} (g : set (set α)) : set α → Prop where | basic : ∀ (s : set α), s ∈ g → generate_open g s | univ : generate_open g set.univ | inter : ∀ (s t : set α), generate_open g s → generate_open g t → generate_open g (s ∩ t) | sUnion : ∀ (k : set (set α)), (∀ (s : set α), s ∈ k → generate_open g s) → generate_open g (⋃₀k) /-- The smallest topological space containing the collection `g` of basic sets -/ def generate_from {α : Type u} (g : set (set α)) : topological_space α := mk (generate_open g) generate_open.univ generate_open.inter generate_open.sUnion theorem nhds_generate_from {α : Type u} {g : set (set α)} {a : α} : nhds a = infi fun (s : set α) => infi fun (H : s ∈ set_of fun (s : set α) => a ∈ s ∧ s ∈ g) => filter.principal s := sorry theorem tendsto_nhds_generate_from {α : Type u} {β : Type u_1} {m : α → β} {f : filter α} {g : set (set β)} {b : β} (h : ∀ (s : set β), s ∈ g → b ∈ s → m ⁻¹' s ∈ f) : filter.tendsto m f (nhds b) := sorry /-- Construct a topology on α given the filter of neighborhoods of each point of α. -/ protected def mk_of_nhds {α : Type u} (n : α → filter α) : topological_space α := mk (fun (s : set α) => ∀ (a : α), a ∈ s → s ∈ n a) sorry sorry sorry theorem nhds_mk_of_nhds {α : Type u} (n : α → filter α) (a : α) (h₀ : pure ≤ n) (h₁ : ∀ {a : α} {s : set α}, s ∈ n a → ∃ (t : set α), ∃ (H : t ∈ n a), t ⊆ s ∧ ∀ (a' : α), a' ∈ t → s ∈ n a') : nhds a = n a := sorry end topological_space /-- The inclusion ordering on topologies on α. We use it to get a complete lattice instance via the Galois insertion method, but the partial order that we will eventually impose on `topological_space α` is the reverse one. -/ def tmp_order {α : Type u} : partial_order (topological_space α) := partial_order.mk (fun (t s : topological_space α) => topological_space.is_open t ≤ topological_space.is_open s) (preorder.lt._default fun (t s : topological_space α) => topological_space.is_open t ≤ topological_space.is_open s) sorry sorry sorry /- We'll later restate this lemma in terms of the correct order on `topological_space α`. -/ /-- If `s` equals the collection of open sets in the topology it generates, then `s` defines a topology. -/ protected def mk_of_closure {α : Type u} (s : set (set α)) (hs : (set_of fun (u : set α) => topological_space.is_open (topological_space.generate_from s) u) = s) : topological_space α := topological_space.mk (fun (u : set α) => u ∈ s) sorry sorry sorry theorem mk_of_closure_sets {α : Type u} {s : set (set α)} {hs : (set_of fun (u : set α) => topological_space.is_open (topological_space.generate_from s) u) = s} : Mathlib.mk_of_closure s hs = topological_space.generate_from s := topological_space_eq (Eq.symm hs) /-- The Galois insertion between `set (set α)` and `topological_space α` whose lower part sends a collection of subsets of α to the topology they generate, and whose upper part sends a topology to its collection of open subsets. -/ def gi_generate_from (α : Type u_1) : galois_insertion topological_space.generate_from fun (t : topological_space α) => set_of fun (s : set α) => topological_space.is_open t s := galois_insertion.mk (fun (g : set (set α)) (hg : (set_of fun (s : set α) => topological_space.is_open (topological_space.generate_from g) s) ≤ g) => Mathlib.mk_of_closure g sorry) sorry sorry sorry theorem generate_from_mono {α : Type u_1} {g₁ : set (set α)} {g₂ : set (set α)} (h : g₁ ⊆ g₂) : topological_space.generate_from g₁ ≤ topological_space.generate_from g₂ := galois_connection.monotone_l (galois_insertion.gc (gi_generate_from α)) h /-- The complete lattice of topological spaces, but built on the inclusion ordering. -/ def tmp_complete_lattice {α : Type u} : complete_lattice (topological_space α) := galois_insertion.lift_complete_lattice (gi_generate_from α) /-- The ordering on topologies on the type `α`. `t ≤ s` if every set open in `s` is also open in `t` (`t` is finer than `s`). -/ protected instance topological_space.partial_order {α : Type u} : partial_order (topological_space α) := partial_order.mk (fun (t s : topological_space α) => topological_space.is_open s ≤ topological_space.is_open t) (preorder.lt._default fun (t s : topological_space α) => topological_space.is_open s ≤ topological_space.is_open t) sorry sorry sorry theorem le_generate_from_iff_subset_is_open {α : Type u} {g : set (set α)} {t : topological_space α} : t ≤ topological_space.generate_from g ↔ g ⊆ set_of fun (s : set α) => topological_space.is_open t s := generate_from_le_iff_subset_is_open /-- Topologies on `α` form a complete lattice, with `⊥` the discrete topology and `⊤` the indiscrete topology. The infimum of a collection of topologies is the topology generated by all their open sets, while the supremem is the topology whose open sets are those sets open in every member of the collection. -/ protected instance topological_space.complete_lattice {α : Type u} : complete_lattice (topological_space α) := order_dual.complete_lattice (topological_space α) /-- A topological space is discrete if every set is open, that is, its topology equals the discrete topology `⊥`. -/ class discrete_topology (α : Type u_1) [t : topological_space α] where eq_bot : t = ⊥ @[simp] theorem is_open_discrete {α : Type u} [topological_space α] [discrete_topology α] (s : set α) : is_open s := Eq.symm (discrete_topology.eq_bot α) ▸ trivial @[simp] theorem is_closed_discrete {α : Type u} [topological_space α] [discrete_topology α] (s : set α) : is_closed s := Eq.symm (discrete_topology.eq_bot α) ▸ trivial theorem continuous_of_discrete_topology {α : Type u} {β : Type v} [topological_space α] [discrete_topology α] [topological_space β] {f : α → β} : continuous f := iff.mpr continuous_def fun (s : set β) (hs : is_open s) => is_open_discrete (f ⁻¹' s) theorem nhds_bot (α : Type u_1) : nhds = pure := le_antisymm (id fun (a : α) => id fun (s : set α) (hs : s ∈ pure a) => mem_nhds_sets trivial hs) pure_le_nhds theorem nhds_discrete (α : Type u_1) [topological_space α] [discrete_topology α] : nhds = pure := Eq.symm (discrete_topology.eq_bot α) ▸ nhds_bot α theorem le_of_nhds_le_nhds {α : Type u} {t₁ : topological_space α} {t₂ : topological_space α} (h : ∀ (x : α), nhds x ≤ nhds x) : t₁ ≤ t₂ := sorry theorem eq_of_nhds_eq_nhds {α : Type u} {t₁ : topological_space α} {t₂ : topological_space α} (h : ∀ (x : α), nhds x = nhds x) : t₁ = t₂ := le_antisymm (le_of_nhds_le_nhds fun (x : α) => le_of_eq (h x)) (le_of_nhds_le_nhds fun (x : α) => le_of_eq (Eq.symm (h x))) theorem eq_bot_of_singletons_open {α : Type u} {t : topological_space α} (h : ∀ (x : α), topological_space.is_open t (singleton x)) : t = ⊥ := bot_unique fun (s : set α) (hs : topological_space.is_open ⊥ s) => set.bUnion_of_singleton s ▸ is_open_bUnion fun (x : α) (_x : x ∈ s) => h x theorem forall_open_iff_discrete {X : Type u_1} [topological_space X] : (∀ (s : set X), is_open s) ↔ discrete_topology X := sorry theorem singletons_open_iff_discrete {X : Type u_1} [topological_space X] : (∀ (a : X), is_open (singleton a)) ↔ discrete_topology X := { mp := fun (h : ∀ (a : X), is_open (singleton a)) => discrete_topology.mk (eq_bot_of_singletons_open h), mpr := fun (a : discrete_topology X) (_x : X) => is_open_discrete (singleton _x) } /-- Given `f : α → β` and a topology on `β`, the induced topology on `α` is the collection of sets that are preimages of some open set in `β`. This is the coarsest topology that makes `f` continuous. -/ def topological_space.induced {α : Type u} {β : Type v} (f : α → β) (t : topological_space β) : topological_space α := topological_space.mk (fun (s : set α) => ∃ (s' : set β), topological_space.is_open t s' ∧ f ⁻¹' s' = s) sorry sorry sorry theorem is_open_induced_iff {α : Type u_1} {β : Type u_2} [t : topological_space β] {s : set α} {f : α → β} : is_open s ↔ ∃ (t_1 : set β), is_open t_1 ∧ f ⁻¹' t_1 = s := iff.rfl theorem is_closed_induced_iff {α : Type u_1} {β : Type u_2} [t : topological_space β] {s : set α} {f : α → β} : is_closed s ↔ ∃ (t_1 : set β), is_closed t_1 ∧ s = f ⁻¹' t_1 := sorry /-- Given `f : α → β` and a topology on `α`, the coinduced topology on `β` is defined such that `s:set β` is open if the preimage of `s` is open. This is the finest topology that makes `f` continuous. -/ def topological_space.coinduced {α : Type u} {β : Type v} (f : α → β) (t : topological_space α) : topological_space β := topological_space.mk (fun (s : set β) => topological_space.is_open t (f ⁻¹' s)) sorry sorry sorry theorem is_open_coinduced {α : Type u_1} {β : Type u_2} {t : topological_space α} {s : set β} {f : α → β} : is_open s ↔ is_open (f ⁻¹' s) := iff.rfl theorem continuous.coinduced_le {α : Type u_1} {β : Type u_2} {t : topological_space α} {t' : topological_space β} {f : α → β} (h : continuous f) : topological_space.coinduced f t ≤ t' := fun (s : set β) (hs : topological_space.is_open t' s) => iff.mp continuous_def h s hs theorem coinduced_le_iff_le_induced {α : Type u_1} {β : Type u_2} {f : α → β} {tα : topological_space α} {tβ : topological_space β} : topological_space.coinduced f tα ≤ tβ ↔ tα ≤ topological_space.induced f tβ := sorry theorem continuous.le_induced {α : Type u_1} {β : Type u_2} {t : topological_space α} {t' : topological_space β} {f : α → β} (h : continuous f) : t ≤ topological_space.induced f t' := iff.mp coinduced_le_iff_le_induced (continuous.coinduced_le h) theorem gc_coinduced_induced {α : Type u_1} {β : Type u_2} (f : α → β) : galois_connection (topological_space.coinduced f) (topological_space.induced f) := fun (f_1 : topological_space α) (g : topological_space β) => coinduced_le_iff_le_induced theorem induced_mono {α : Type u_1} {β : Type u_2} {t₁ : topological_space α} {t₂ : topological_space α} {g : β → α} (h : t₁ ≤ t₂) : topological_space.induced g t₁ ≤ topological_space.induced g t₂ := galois_connection.monotone_u (gc_coinduced_induced g) h theorem coinduced_mono {α : Type u_1} {β : Type u_2} {t₁ : topological_space α} {t₂ : topological_space α} {f : α → β} (h : t₁ ≤ t₂) : topological_space.coinduced f t₁ ≤ topological_space.coinduced f t₂ := galois_connection.monotone_l (gc_coinduced_induced f) h @[simp] theorem induced_top {α : Type u_1} {β : Type u_2} {g : β → α} : topological_space.induced g ⊤ = ⊤ := galois_connection.u_top (gc_coinduced_induced g) @[simp] theorem induced_inf {α : Type u_1} {β : Type u_2} {t₁ : topological_space α} {t₂ : topological_space α} {g : β → α} : topological_space.induced g (t₁ ⊓ t₂) = topological_space.induced g t₁ ⊓ topological_space.induced g t₂ := galois_connection.u_inf (gc_coinduced_induced g) @[simp] theorem induced_infi {α : Type u_1} {β : Type u_2} {g : β → α} {ι : Sort w} {t : ι → topological_space α} : topological_space.induced g (infi fun (i : ι) => t i) = infi fun (i : ι) => topological_space.induced g (t i) := galois_connection.u_infi (gc_coinduced_induced g) @[simp] theorem coinduced_bot {α : Type u_1} {β : Type u_2} {f : α → β} : topological_space.coinduced f ⊥ = ⊥ := galois_connection.l_bot (gc_coinduced_induced f) @[simp] theorem coinduced_sup {α : Type u_1} {β : Type u_2} {t₁ : topological_space α} {t₂ : topological_space α} {f : α → β} : topological_space.coinduced f (t₁ ⊔ t₂) = topological_space.coinduced f t₁ ⊔ topological_space.coinduced f t₂ := galois_connection.l_sup (gc_coinduced_induced f) @[simp] theorem coinduced_supr {α : Type u_1} {β : Type u_2} {f : α → β} {ι : Sort w} {t : ι → topological_space α} : topological_space.coinduced f (supr fun (i : ι) => t i) = supr fun (i : ι) => topological_space.coinduced f (t i) := galois_connection.l_supr (gc_coinduced_induced f) theorem induced_id {α : Type u_1} [t : topological_space α] : topological_space.induced id t = t := sorry theorem induced_compose {α : Type u_1} {β : Type u_2} {γ : Type u_3} [tγ : topological_space γ] {f : α → β} {g : β → γ} : topological_space.induced f (topological_space.induced g tγ) = topological_space.induced (g ∘ f) tγ := sorry theorem coinduced_id {α : Type u_1} [t : topological_space α] : topological_space.coinduced id t = t := topological_space_eq rfl theorem coinduced_compose {α : Type u_1} {β : Type u_2} {γ : Type u_3} [tα : topological_space α] {f : α → β} {g : β → γ} : topological_space.coinduced g (topological_space.coinduced f tα) = topological_space.coinduced (g ∘ f) tα := topological_space_eq rfl /- constructions using the complete lattice structure -/ protected instance inhabited_topological_space {α : Type u} : Inhabited (topological_space α) := { default := ⊤ } protected instance subsingleton.unique_topological_space {α : Type u} [subsingleton α] : unique (topological_space α) := unique.mk { default := ⊥ } sorry protected instance subsingleton.discrete_topology {α : Type u} [t : topological_space α] [subsingleton α] : discrete_topology α := discrete_topology.mk (unique.eq_default t) protected instance empty.topological_space : topological_space empty := ⊥ protected instance empty.discrete_topology : discrete_topology empty := discrete_topology.mk rfl protected instance pempty.topological_space : topological_space pempty := ⊥ protected instance pempty.discrete_topology : discrete_topology pempty := discrete_topology.mk rfl protected instance unit.topological_space : topological_space Unit := ⊥ protected instance unit.discrete_topology : discrete_topology Unit := discrete_topology.mk rfl protected instance bool.topological_space : topological_space Bool := ⊥ protected instance bool.discrete_topology : discrete_topology Bool := discrete_topology.mk rfl protected instance nat.topological_space : topological_space ℕ := ⊥ protected instance nat.discrete_topology : discrete_topology ℕ := discrete_topology.mk rfl protected instance int.topological_space : topological_space ℤ := ⊥ protected instance int.discrete_topology : discrete_topology ℤ := discrete_topology.mk rfl protected instance sierpinski_space : topological_space Prop := topological_space.generate_from (singleton (singleton True)) theorem le_generate_from {α : Type u} {t : topological_space α} {g : set (set α)} (h : ∀ (s : set α), s ∈ g → is_open s) : t ≤ topological_space.generate_from g := iff.mpr le_generate_from_iff_subset_is_open h theorem induced_generate_from_eq {α : Type u_1} {β : Type u_2} {b : set (set β)} {f : α → β} : topological_space.induced f (topological_space.generate_from b) = topological_space.generate_from (set.preimage f '' b) := sorry /-- This construction is left adjoint to the operation sending a topology on `α` to its neighborhood filter at a fixed point `a : α`. -/ protected def topological_space.nhds_adjoint {α : Type u} (a : α) (f : filter α) : topological_space α := topological_space.mk (fun (s : set α) => a ∈ s → s ∈ f) sorry sorry sorry theorem gc_nhds {α : Type u} (a : α) : galois_connection (topological_space.nhds_adjoint a) fun (t : topological_space α) => nhds a := sorry theorem nhds_mono {α : Type u} {t₁ : topological_space α} {t₂ : topological_space α} {a : α} (h : t₁ ≤ t₂) : nhds a ≤ nhds a := galois_connection.monotone_u (gc_nhds a) h theorem nhds_infi {α : Type u} {ι : Sort u_1} {t : ι → topological_space α} {a : α} : nhds a = infi fun (i : ι) => nhds a := galois_connection.u_infi (gc_nhds a) theorem nhds_Inf {α : Type u} {s : set (topological_space α)} {a : α} : nhds a = infi fun (t : topological_space α) => infi fun (H : t ∈ s) => nhds a := galois_connection.u_Inf (gc_nhds a) theorem nhds_inf {α : Type u} {t₁ : topological_space α} {t₂ : topological_space α} {a : α} : nhds a = nhds a ⊓ nhds a := galois_connection.u_inf (gc_nhds a) theorem nhds_top {α : Type u} {a : α} : nhds a = ⊤ := galois_connection.u_top (gc_nhds a) theorem continuous_iff_coinduced_le {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space β} : continuous f ↔ topological_space.coinduced f t₁ ≤ t₂ := iff.trans continuous_def iff.rfl theorem continuous_iff_le_induced {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space β} : continuous f ↔ t₁ ≤ topological_space.induced f t₂ := iff.trans continuous_iff_coinduced_le (gc_coinduced_induced f t₁ t₂) theorem continuous_generated_from {α : Type u} {β : Type v} {f : α → β} {t : topological_space α} {b : set (set β)} (h : ∀ (s : set β), s ∈ b → is_open (f ⁻¹' s)) : continuous f := iff.mpr continuous_iff_coinduced_le (le_generate_from h) theorem continuous_induced_dom {α : Type u} {β : Type v} {f : α → β} {t : topological_space β} : continuous f := eq.mpr (id (Eq._oldrec (Eq.refl (continuous f)) (propext continuous_def))) fun (s : set β) (h : is_open s) => Exists.intro s { left := h, right := rfl } theorem continuous_induced_rng {α : Type u} {β : Type v} {γ : Type u_1} {f : α → β} {g : γ → α} {t₂ : topological_space β} {t₁ : topological_space γ} (h : continuous (f ∘ g)) : continuous g := sorry theorem continuous_induced_rng' {α : Type u} {β : Type v} {γ : Type u_1} [topological_space α] [topological_space β] [topological_space γ] {g : γ → α} (f : α → β) (H : _inst_1 = topological_space.induced f _inst_2) (h : continuous (f ∘ g)) : continuous g := Eq.symm H ▸ continuous_induced_rng h theorem continuous_coinduced_rng {α : Type u} {β : Type v} {f : α → β} {t : topological_space α} : continuous f := eq.mpr (id (Eq._oldrec (Eq.refl (continuous f)) (propext continuous_def))) fun (s : set β) (h : is_open s) => h theorem continuous_coinduced_dom {α : Type u} {β : Type v} {γ : Type u_1} {f : α → β} {g : β → γ} {t₁ : topological_space α} {t₂ : topological_space γ} (h : continuous (g ∘ f)) : continuous g := eq.mpr (id (Eq._oldrec (Eq.refl (continuous g)) (propext continuous_def))) fun (s : set γ) (hs : is_open s) => eq.mp (Eq._oldrec (Eq.refl (continuous (g ∘ f))) (propext continuous_def)) h s hs theorem continuous_le_dom {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space α} {t₃ : topological_space β} (h₁ : t₂ ≤ t₁) (h₂ : continuous f) : continuous f := eq.mpr (id (Eq._oldrec (Eq.refl (continuous f)) (propext continuous_def))) fun (s : set β) (h : is_open s) => h₁ (f ⁻¹' s) (eq.mp (Eq._oldrec (Eq.refl (continuous f)) (propext continuous_def)) h₂ s h) theorem continuous_le_rng {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space β} {t₃ : topological_space β} (h₁ : t₂ ≤ t₃) (h₂ : continuous f) : continuous f := eq.mpr (id (Eq._oldrec (Eq.refl (continuous f)) (propext continuous_def))) fun (s : set β) (h : is_open s) => eq.mp (Eq._oldrec (Eq.refl (continuous f)) (propext continuous_def)) h₂ s (h₁ s h) theorem continuous_sup_dom {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space α} {t₃ : topological_space β} (h₁ : continuous f) (h₂ : continuous f) : continuous f := sorry theorem continuous_sup_rng_left {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₃ : topological_space β} {t₂ : topological_space β} : continuous f → continuous f := continuous_le_rng le_sup_left theorem continuous_sup_rng_right {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₃ : topological_space β} {t₂ : topological_space β} : continuous f → continuous f := continuous_le_rng le_sup_right theorem continuous_Sup_dom {α : Type u} {β : Type v} {f : α → β} {t₁ : set (topological_space α)} {t₂ : topological_space β} (h : ∀ (t : topological_space α), t ∈ t₁ → continuous f) : continuous f := iff.mpr continuous_iff_le_induced (Sup_le fun (t : topological_space α) (ht : t ∈ t₁) => iff.mp continuous_iff_le_induced (h t ht)) theorem continuous_Sup_rng {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : set (topological_space β)} {t : topological_space β} (h₁ : t ∈ t₂) (hf : continuous f) : continuous f := iff.mpr continuous_iff_coinduced_le (le_Sup_of_le h₁ (iff.mp continuous_iff_coinduced_le hf)) theorem continuous_supr_dom {α : Type u} {β : Type v} {f : α → β} {ι : Sort u_2} {t₁ : ι → topological_space α} {t₂ : topological_space β} (h : ι → continuous f) : continuous f := sorry theorem continuous_supr_rng {α : Type u} {β : Type v} {f : α → β} {ι : Sort u_2} {t₁ : topological_space α} {t₂ : ι → topological_space β} {i : ι} (h : continuous f) : continuous f := continuous_Sup_rng (Exists.intro i rfl) h theorem continuous_inf_rng {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space β} {t₃ : topological_space β} (h₁ : continuous f) (h₂ : continuous f) : continuous f := iff.mpr continuous_iff_coinduced_le (le_inf (iff.mp continuous_iff_coinduced_le h₁) (iff.mp continuous_iff_coinduced_le h₂)) theorem continuous_inf_dom_left {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space α} {t₃ : topological_space β} : continuous f → continuous f := continuous_le_dom inf_le_left theorem continuous_inf_dom_right {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : topological_space α} {t₃ : topological_space β} : continuous f → continuous f := continuous_le_dom inf_le_right theorem continuous_Inf_dom {α : Type u} {β : Type v} {f : α → β} {t₁ : set (topological_space α)} {t₂ : topological_space β} {t : topological_space α} (h₁ : t ∈ t₁) : continuous f → continuous f := continuous_le_dom (Inf_le h₁) theorem continuous_Inf_rng {α : Type u} {β : Type v} {f : α → β} {t₁ : topological_space α} {t₂ : set (topological_space β)} (h : ∀ (t : topological_space β), t ∈ t₂ → continuous f) : continuous f := iff.mpr continuous_iff_coinduced_le (le_Inf fun (b : topological_space β) (hb : b ∈ t₂) => iff.mp continuous_iff_coinduced_le (h b hb)) theorem continuous_infi_dom {α : Type u} {β : Type v} {f : α → β} {ι : Sort u_2} {t₁ : ι → topological_space α} {t₂ : topological_space β} {i : ι} : continuous f → continuous f := continuous_le_dom (infi_le t₁ i) theorem continuous_infi_rng {α : Type u} {β : Type v} {f : α → β} {ι : Sort u_2} {t₁ : topological_space α} {t₂ : ι → topological_space β} (h : ι → continuous f) : continuous f := iff.mpr continuous_iff_coinduced_le (le_infi fun (i : ι) => iff.mp continuous_iff_coinduced_le (h i)) theorem continuous_bot {α : Type u} {β : Type v} {f : α → β} {t : topological_space β} : continuous f := iff.mpr continuous_iff_le_induced bot_le theorem continuous_top {α : Type u} {β : Type v} {f : α → β} {t : topological_space α} : continuous f := iff.mpr continuous_iff_coinduced_le le_top /- 𝓝 in the induced topology -/ theorem mem_nhds_induced {α : Type u} {β : Type v} [T : topological_space α] (f : β → α) (a : β) (s : set β) : s ∈ nhds a ↔ ∃ (u : set α), ∃ (H : u ∈ nhds (f a)), f ⁻¹' u ⊆ s := sorry theorem nhds_induced {α : Type u} {β : Type v} [T : topological_space α] (f : β → α) (a : β) : nhds a = filter.comap f (nhds (f a)) := sorry theorem induced_iff_nhds_eq {α : Type u} {β : Type v} [tα : topological_space α] [tβ : topological_space β] (f : β → α) : tβ = topological_space.induced f tα ↔ ∀ (b : β), nhds b = filter.comap f (nhds (f b)) := sorry theorem map_nhds_induced_of_surjective {α : Type u} {β : Type v} [T : topological_space α] {f : β → α} (hf : function.surjective f) (a : β) : filter.map f (nhds a) = nhds (f a) := sorry theorem is_open_induced_eq {α : Type u_1} {β : Type u_2} [t : topological_space β] {f : α → β} {s : set α} : is_open s ↔ s ∈ set.preimage f '' set_of fun (s : set β) => is_open s := iff.rfl theorem is_open_induced {α : Type u_1} {β : Type u_2} [t : topological_space β] {f : α → β} {s : set β} (h : is_open s) : topological_space.is_open (topological_space.induced f t) (f ⁻¹' s) := Exists.intro s { left := h, right := rfl } theorem map_nhds_induced_eq {α : Type u_1} {β : Type u_2} [t : topological_space β] {f : α → β} {a : α} (h : set.range f ∈ nhds (f a)) : filter.map f (nhds a) = nhds (f a) := eq.mpr (id (Eq._oldrec (Eq.refl (filter.map f (nhds a) = nhds (f a))) (nhds_induced f a))) (eq.mpr (id (Eq._oldrec (Eq.refl (filter.map f (filter.comap f (nhds (f a))) = nhds (f a))) (filter.map_comap h))) (Eq.refl (nhds (f a)))) theorem closure_induced {α : Type u_1} {β : Type u_2} [t : topological_space β] {f : α → β} {a : α} {s : set α} (hf : ∀ (x y : α), f x = f y → x = y) : a ∈ closure s ↔ f a ∈ closure (f '' s) := sorry @[simp] theorem is_open_singleton_true : is_open (singleton True) := topological_space.generate_open.basic (singleton True) (eq.mpr (id (propext ((fun {α : Type} (a : α) => iff_true_intro (set.mem_singleton a)) (singleton True)))) trivial) theorem continuous_Prop {α : Type u_1} [topological_space α] {p : α → Prop} : continuous p ↔ is_open (set_of fun (x : α) => p x) := sorry theorem is_open_supr_iff {α : Type u} {ι : Type v} {t : ι → topological_space α} {s : set α} : is_open s ↔ ι → is_open s := sorry theorem is_closed_infi_iff {α : Type u} {ι : Type v} {t : ι → topological_space α} {s : set α} : is_closed s ↔ ι → is_closed s := is_open_supr_iff end Mathlib
Formal statement is: lemma measurableI: "(\<And>x. x \<in> space M \<Longrightarrow> f x \<in> space N) \<Longrightarrow> (\<And>A. A \<in> sets N \<Longrightarrow> f -` A \<inter> space M \<in> sets M) \<Longrightarrow> f \<in> measurable M N" Informal statement is: If $f$ is a function from a measurable space $(X, \mathcal{A})$ to a measurable space $(Y, \mathcal{B})$, then $f$ is measurable if $f^{-1}(B) \in \mathcal{A}$ for all $B \in \mathcal{B}$.
subroutine write_avs_node_mat(lu,ifdual,mout) !*********************************************************************** ! Copyright, 1993, 2004, The Regents of the University of California. ! This program was prepared by the Regents of the University of ! California at Los Alamos National Laboratory (the University) under ! contract No. W-7405-ENG-36 with the U.S. Department of Energy (DOE). ! All rights in the program are reserved by the DOE and the University. ! Permission is granted to the public to copy and use this software ! without charge, provided that this Notice and any statement of ! authorship are reproduced on all copies. Neither the U.S. Government ! nor the University makes any warranty, express or implied, or ! assumes any liability or responsibility for the use of this software. C*********************************************************************** CD1 CD1 PURPOSE CD1 CD1 Output AVS scalar node information for FEHM mesh materials. CD1 C*********************************************************************** CD2 CD2 REVISION HISTORY CD2 CD2 Revision ECD CD2 Date Programmer Number Comments CD2 CD2 10-SEP-93 Carl Gable 22 Initial implementation. CD2 CD2 $Log: /pvcs.config/fehm90/src/write_avs_node_mat.f_a $ !D2 !D2 Rev 2.5 06 Jan 2004 10:43:06 pvcs !D2 FEHM Version 2.21, STN 10086-2.21-00, Qualified October 2003 !D2 !D2 Rev 2.4 29 Jan 2003 09:24:42 pvcs !D2 FEHM Version 2.20, STN 10086-2.20-00 !D2 !D2 Rev 2.3 14 Nov 2001 13:29:16 pvcs !D2 FEHM Version 2.12, STN 10086-2.12-00 !D2 !D2 Rev 2.2 06 Jun 2001 13:28:44 pvcs !D2 FEHM Version 2.11, STN 10086-2.11-00 !D2 !D2 Rev 2.1 30 Nov 2000 12:13:30 pvcs !D2 FEHM Version 2.10, STN 10086-2.10-00 !D2 !D2 Rev 2.0 Fri May 07 14:48:18 1999 pvcs !D2 FEHM Version 2.0, SC-194 (Fortran 90) CD2 CD2 Rev 1.3 Fri Feb 02 14:20:54 1996 hend CD2 Updated Requirements Traceability CD2 CD2 Rev 1.2 01/20/95 13:30:58 tam CD2 Changed format for strings from * to a56, kept length to 80 so left justified CD2 CD2 Rev 1.1 12/12/94 16:29:40 tam CD2 coerced irlp and icap to floats during write to avs file CD2 CD2 Rev 1.0 08/23/94 15:34:16 llt CD2 Original version CD2 C*********************************************************************** CD3 CD3 INTERFACES CD3 CD3 Formal Calling Parameters CD3 CD3 Identifier Type Use Description CD3 CD3 Interface Tables CD3 CD3 None CD3 CD3 Files CD3 CD3 None CD3 C*********************************************************************** CD4 CD4 GLOBAL OBJECTS CD4 CD4 None CD4 CD4 C*********************************************************************** CD5 CD5 LOCAL IDENTIFIERS CD5 CD5 Local Constants CD5 CD5 None CD5 CD5 Local Types CD5 CD5 None CD5 CD5 Local variables CD5 CD5 Identifier Type Description CD5 CD5 Local Subprograms CD5 CD5 None CD5 C*********************************************************************** CD6 CD6 FUNCTIONAL DESCRIPTION CD6 C*********************************************************************** CD7 CD7 ASSUMPTIONS AND LIMITATIONS CD7 CD7 None CD7 C*********************************************************************** CD8 CD8 SPECIAL COMMENTS CD8 CD8 Requirements from SDN: 10086-RD-2.20-00 CD8 SOFTWARE REQUIREMENTS DOCUMENT (RD) for the CD8 FEHM Application Version 2.20 CD8 C*********************************************************************** CD9 CD9 REQUIREMENTS TRACEABILITY CD9 CD9 2.6 Provide Input/Output Data Files CD9 3.0 INPUT AND OUTPUT REQUIREMENTS CD9 C*********************************************************************** CDA CDA REFERENCES CDA CDA None CDA C*********************************************************************** CPS CPS PSEUDOCODE CPS CPS BEGIN CPS CPS END CPS C*********************************************************************** use avsio, only : iocord, iogeo, iokd, geoname, iodual, iogdkm, & iogrid use comai use combi, only : corz use comdi use comchem, only : cpntnam use comrxni, only : rxn_flag use comriv, only : iriver use davidi implicit none integer i, j, lu, ifdual, maxtitle, mout, length, ic1, ic2 integer il, open_file, nelm(ns_in), iocord_temp integer icord1, icord2, icord3, neq_read, neq_write, istart, iend parameter(maxtitle = 22) character*3 dls character*5 char_type, dual_char character*15 nform character*300 temp_string character*42 title(maxtitle), units(maxtitle), pstring character*42, allocatable :: title_kd(:) character*600 print_title, vstring real*8 perm_fac parameter (perm_fac=1.d-6) logical :: xon = .false., yon = .false., zon = .false. c If this structure changes, also change the binary version c To ensure label is left justified, an <= string length c adding avsx output option -- can assign integer perms then plot c the material types in avsx with a discrete colorbar. PHS 8/11/2000 c------------------------------------------------------------------------------ temp_string = '' print_title = '' pstring = '' dual_char = '' iocord_temp = iocord if (ifdual .eq. 1) then istart = 1 iend = neq - neq_primary if (iodual .eq. 1) then dual_char = 'Dual ' else dual_char = 'GDKM ' if (icnl .eq. 0) then iocord = 3 else iocord = 2 end if endif else istart = 1 iend = neq_primary end if title(1) = trim(dual_char) // 'Permeability (m**2) in X' title(2) = trim(dual_char) // 'Permeability (m**2) in Y' title(3) = trim(dual_char) // 'Permeability (m**2) in Z' title(4) = trim(dual_char) // 'Thermal Conductivity (W/m*K) in X' title(5) = trim(dual_char) // 'Thermal Conductivity (W/m*K) in Y' title(6) = trim(dual_char) // 'Thermal Conductivity (W/m*K) in Z' title(7) = trim(dual_char) // 'Porosity' title(15)= trim(dual_char) // 'Rock bulk density (kg/m**3)' title(8) = trim(dual_char) // 'Rock specific heat (MJ/kg*K)' title(9) = trim(dual_char) // 'Capillary pressure (MPa)' title(10)= trim(dual_char) // 'Relative permeability model' title(11)= trim(dual_char) // 'Capillary pressure model' title(12) = 'X coordinate (m)' title(13) = 'Y coordinate (m)' title(14) = 'Z coordinate (m)' units(1) = '(m**2)' units(2) = '(m**2)' units(3) = '(m**2)' units(4) = '(W/m*K)' units(5) = '(W/m*K)' units(6) = '(W/m*K)' units(7) = '(non dim)' units(15) = '(kg/m**3)' units(8) = '(MJ/kg*K)' units(9) = '(MPa)' units(10)= '(flag)' units(11)= '(flag)' ic1 = 1 select case (icnl) case (1, 4) xon = .true. yon = .true. case (2, 5) xon = .true. zon = .true. case(3, 6) yon = .true. zon = .true. case default xon = .true. yon = .true. zon = .true. end select if(altc(1:3).eq.'avs' .and. altc(4:4) .ne. 'x') then write (temp_string, '(i2)') mout ic2 = len_trim(temp_string) pstring(ic1:ic2) = temp_string write (temp_string, '(a2)') ' 1' do i = 1, mout ic1 = ic2 + 1 ic2 = ic2 + len_trim(temp_string) pstring(ic1:ic2) = temp_string end do length = len_trim(pstring) write (lu, '(42a)') pstring(1:length) if (idoff .ne. -1) then ! Permeability will be written if (xon) write (lu, 100) trim(title(1)), trim(units(1)) if (yon) write (lu, 100) trim(title(2)), trim(units(2)) if (zon) write (lu, 100) trim(title(3)), trim(units(3)) end if if (ico2 .gt. 0 .or. ice .ne. 0) then ! Conductivity will be written if (xon) write (lu, 100) trim(title(4)), trim(units(4)) if (yon) write (lu, 100) trim(title(5)), trim(units(5)) if (zon) write (lu, 100) trim(title(6)), trim(units(6)) end if ! Porosity, bulk density and specific heat will be written write (lu, 100) trim(title(7)), trim(units(7)) write (lu, 100) trim(title(15)), trim(units(15)) write (lu, 100) trim(title(8)), trim(units(8)) if (irdof .ne. 13) then ! Capillary pressure will be written write(lu, 100) trim(title(9)), trim(units(9)) end if if (rlp_flag .eq. 1) then ! rlp and cap model flags will be written if rlp_flag .eq. 1 write (lu, 100) trim(title(10)), trim(units(10)) write (lu, 100) trim(title(11)), trim(units(11)) end if if (iccen .eq. 1 .and. iokd .eq. 1 .and. rxn_flag .eq. 0) then ! Kd will be output for each transport specie and model allocate (title_kd(nspeci)) do i = 1, nspeci title_kd(i) = trim(cpntnam(i)) // ' (Kd l/kg) (l/kg)' end do end if else ic1 = 1 ic2 = 0 if (altc(1:4) .eq. 'avsx') then dls = ' : ' write (nform, 200) dls temp_string = 'node' else if (altc(1:3) .eq. 'tec') then ! nform = '(' // "'" // ' "' // "', a, '" // '"' // "')" write (nform, 300) if (iocord .ne. 0) then select case (icnl) case (1, 4) icord1 = 1 icord2 = 2 icord3 = 1 case (2, 5) icord1 = 1 icord2 = 3 icord3 = 2 case(3, 6) icord1 = 2 icord2 = 3 icord3 = 1 case default icord1 = 1 icord2 = 3 icord3 = 1 end select ! Write X coordinate if (icnl .ne. 3 .and. icnl .ne. 6) then write(temp_string,nform) trim(title(12)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if ! Write Y coordinate if (icnl .ne. 2 .and. icnl .ne. 5) then write(temp_string,nform) trim(title(13)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if ! Write Z coordinate if (icnl .ne. 1 .and. icnl .ne. 4) then write(temp_string,nform) trim(title(14)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if write (temp_string, fmt=nform) 'node' else write (temp_string, fmt=nform) 'node' end if else if (altc(1:3) .eq. 'sur') then dls = ', ' write (nform, 200) dls temp_string = 'node' end if ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 if (idoff .ne. -1) then ! Permeability will be written if (xon) then write (temp_string, fmt=nform) trim(title(1)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (yon) then write (temp_string, fmt=nform) trim(title(2)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (zon) then write (temp_string, fmt=nform) trim(title(3)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if end if if (ico2 .gt. 0 .or. ice .ne. 0) then ! Conductivity will be written if (xon) then write (temp_string, fmt=nform) trim(title(4)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (yon) then write (temp_string, fmt=nform) trim(title(5)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (zon) then write (temp_string, fmt=nform) trim(title(6)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if end if ! Porosity, bulk density, and specific heat will be written write (temp_string, fmt=nform) trim(title(7)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 write (temp_string, fmt=nform) trim(title(15)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 write (temp_string, fmt=nform) trim(title(8)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 if (irdof .ne. 13) then ! Capillary pressure will be written write(temp_string, fmt=nform) trim(title(9)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (rlp_flag .eq. 1) then ! rlp and cap model flags will be written if rlp_flag .eq. 1 write (temp_string, fmt=nform) trim(title(10)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 write (temp_string, fmt=nform) trim(title(11)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end if ! Kd will be written if (iccen .eq. 1 .and. iokd .eq. 1 .and. rxn_flag .eq.0) then allocate (title_kd(nspeci)) do i = 1, nspeci title_kd(i) = trim(cpntnam(i)) // ' (Kd l/kg)' write (temp_string, fmt=nform) trim(title_kd(i)) ic2 = ic2 + len_trim(temp_string) print_title(ic1:ic2) = temp_string ic1 = ic2 + 1 end do end if length = len_trim(print_title) if (altc(1:3) .ne. 'tec') then write (lu, '(a)') print_title(1:length) else write (lu, 125) verno, jdate, jtime, trim(wdd) if (iogrid .eq. 1 .and. iocord .eq. 0) then write (lu, 140) end if write (lu, '("VARIABLES = ", a)') print_title(1:length) if (iogeo .eq. 1) then if(iriver.ne.2.and.gdkm_flag.ne.1) then neq_write = neq else neq_write = neq_primary endif select case (ns_in) case (5,6,8) write (temp_string, 135) neq_write, nei_in, 'FEBRICK' case (4) if (icnl .eq. 0) then write (temp_string, 135) neq_write, nei_in, & 'FETETRAHEDRON' else write (temp_string, 135) neq_write, nei_in, & 'FEQUADRILATERAL' end if case (3) write (temp_string, 135) neq_write, nei_in, & 'FETRIANGLE' case (2) write (temp_string, 135) neq_write, nei_in, & 'FELINESEG' case (0) ! fdm grid write (temp_string, '(a)') '' end select write (lu, 130) trim(temp_string) end if endif end if 125 format('TITLE = "', a30, 1x, a11, 1x, a8, 1x, a, '"') 130 format('ZONE T = "Material properties"', a) 135 format(', N = ', i8, ', E = ', i8, ', DATAPACKING = POINT', & ', ZONETYPE = ', a) 140 format('FILETYPE = "SOLUTION"') temp_string = '' vstring = '' if (altc(1:4) .ne. 'avsx' .and. altc(1:3) .ne. 'sur') then if (ifdual .ne. 0) ifdual = 1 do j = istart, iend ic1 = 1 ic2 = 0 if (altc(1:3) .eq. 'tec' .and. iocord .ne. 0) then ! Write coordinates do i = icord1, icord2, icord3 write(temp_string,'(g16.9)') corz(j,i) ic2 = ic1 + 17 vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end do end if i = j + neq_primary*ifdual write (temp_string, '(i10.10)') j ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 if (idoff .ne. -1) then ! Permeability will be written if (xon) then write(temp_string, '(1x,1p,g14.6)') pnx(i)*perm_fac ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (yon) then write(temp_string, '(1x,1p,g14.6)') pny(i)*perm_fac ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (zon) then write(temp_string, '(1x,1p,g14.6)') pnz(i)*perm_fac ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if end if if (ico2 .gt. 0 .or. ice .ne. 0) then ! Conductivity will be written if (xon) then write (temp_string, '(1x,1p,g14.6)') thx(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (yon) then write (temp_string, '(1x,1p,g14.6)') thy(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (zon) then write (temp_string, '(1x,1p,g14.6)') thz(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if end if ! Porosity and specific heat will be written write (temp_string,'(3(x,1p,g14.6))') ps(i), denr(i), cpr(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 if (irdof .ne. 13) then ! Capillary pressure will be written write (temp_string, '(x,1p,g14.6)') pcp(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (rlp_flag .eq. 1) then ! rlp and cap model flags will be written if rlp_flag .eq. 1 write (temp_string, '(2(x,i4))') irlp(i), icap(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if(iccen .eq. 1 .and. iokd .eq. 1 .and. rxn_flag .eq.0) then do i = 1, nspeci write (temp_string, '(1x,g14.6)') a1adfl(i,itrc(j)) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end do end if length = len_trim(vstring) write (lu, '(a)') vstring(1:length) end do if (altc(1:3) .eq. 'tec' .and. iogeo .eq. 1) then ! Read the element connectivity and write to tec file il = open_file(geoname,'old') ! avsx geometry file has an initial line that starts with neq_primary read(il,*) i if (i .ne. neq_primary) backspace il c for river or wells, "geoname" will only have neq_primary nodes if(iriver.ne.2.and.gdkm_flag.ne.1) then neq_read = neq else neq_read = neq_primary endif do i = 1, neq_read read(il,*) end do do i = 1, nei_in read (il,*) ic1,ic2,char_type,(nelm(j), j=1,ns_in) write(lu, '(8(i8))') (nelm(j), j=1,ns_in) end do close (il) end if else if (ifdual .ne. 0) ifdual = 1 do j = istart, iend i = j + neq_primary*ifdual ic1 = 1 write (temp_string, '(i10.10)') j ic2 = len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 if (idoff .ne. -1) then ! Permeability will be written if (xon) then write (temp_string, '(a,1p,g14.6)') & dls, pnx(i)*perm_fac ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (yon) then write (temp_string, '(a,1p,g14.6)') & dls, pny(i)*perm_fac ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (zon) then write (temp_string, '(a,1p,g14.6)') & dls, pnz(i)*perm_fac ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if end if if (ico2 .gt. 0 .or. ice .ne. 0) then ! Conductivity will be written if (xon) then write (temp_string, '(a,1p,g14.6)') dls, thx(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (yon) then write (temp_string, '(a,1p,g14.6)') dls, thy(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (zon) then write (temp_string, '(a,1p,g14.6)') dls, thz(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if end if ! Porosity, bulk density, and specific heat will be written write (temp_string, '(3(a,1p,g14.6))') dls, ps(i), ! & dls, denr(i) & dls, denr(i), dls, cpr(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 if (irdof .ne. 13) then ! Capillary pressure will be written write (temp_string, '(a,1p,g14.6)') dls, pcp(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if if (rlp_flag .eq. 1) then ! rlp and cap model flags will be written if rlp_flag .eq. 1 write (temp_string, '(2(a,i4))') dls, irlp(i), & dls, icap(i) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end if ! Kd will be written if(iccen .eq. 1 .and. iokd .eq. 1 .and. rxn_flag .eq.0) then do i = 1, nspeci write (temp_string, '(a,g14.6)') dls, & a1adfl(i,itrc(j)) ic2 = ic2 + len_trim(temp_string) vstring(ic1:ic2) = temp_string ic1 = ic2 + 1 end do end if length = len_trim(vstring) write (lu, '(a)') vstring(1:length) end do end if iocord = iocord_temp 100 format(a, ', ', a) 200 format("( ' ", a, "', a)") 300 format("('",' "', "', a, '",'"',"')") return end
Here’s a chance to order your entry programmes for THE EVENT of the YEAR!! We have a really good line-up from a wide range of acts by performers ranging from age 6 to 60+. Why not book your seat for the event now?! Posted on Wednesday, 30 March 2016 by Editor • This entry was tagged Events, Village Hall. Bookmark the permalink.
State Before: ι : Type u_3 ι' : Type ?u.157559 R : Type u_1 R₂ : Type ?u.157565 K : Type ?u.157568 M : Type u_2 M' : Type ?u.157574 M'' : Type ?u.157577 V : Type u V' : Type ?u.157582 inst✝⁴ : Semiring R inst✝³ : AddCommMonoid M inst✝² : Module R M inst✝¹ : AddCommMonoid M' inst✝ : Module R M' b b₁ : Basis ι R M i : ι c : R x : M ⊢ ↑(sumCoords b) = fun m => ∑ᶠ (i : ι), ↑(coord b i) m State After: case h ι : Type u_3 ι' : Type ?u.157559 R : Type u_1 R₂ : Type ?u.157565 K : Type ?u.157568 M : Type u_2 M' : Type ?u.157574 M'' : Type ?u.157577 V : Type u V' : Type ?u.157582 inst✝⁴ : Semiring R inst✝³ : AddCommMonoid M inst✝² : Module R M inst✝¹ : AddCommMonoid M' inst✝ : Module R M' b b₁ : Basis ι R M i : ι c : R x m : M ⊢ ↑(sumCoords b) m = ∑ᶠ (i : ι), ↑(coord b i) m Tactic: ext m State Before: case h ι : Type u_3 ι' : Type ?u.157559 R : Type u_1 R₂ : Type ?u.157565 K : Type ?u.157568 M : Type u_2 M' : Type ?u.157574 M'' : Type ?u.157577 V : Type u V' : Type ?u.157582 inst✝⁴ : Semiring R inst✝³ : AddCommMonoid M inst✝² : Module R M inst✝¹ : AddCommMonoid M' inst✝ : Module R M' b b₁ : Basis ι R M i : ι c : R x m : M ⊢ ↑(sumCoords b) m = ∑ᶠ (i : ι), ↑(coord b i) m State After: no goals Tactic: simp only [Basis.sumCoords, Basis.coord, Finsupp.lapply_apply, LinearMap.id_coe, LinearEquiv.coe_coe, Function.comp_apply, Finsupp.coe_lsum, LinearMap.coe_comp, finsum_eq_sum _ (b.repr m).finite_support, Finsupp.sum, Finset.finite_toSet_toFinset, id.def, Finsupp.fun_support_eq]
State Before: 𝕜 : Type u_2 E : Type u_1 F : Type ?u.28879 G : Type ?u.28882 ι : Type ?u.28885 π : ι → Type ?u.28890 inst✝² : OrderedSemiring 𝕜 inst✝¹ : AddCommMonoid E inst✝ : MulActionWithZero 𝕜 E x y : E ⊢ 1 • x + 0 • y = x State After: no goals Tactic: rw [zero_smul, one_smul, add_zero]
From iris.bi Require Export notation. From iris.algebra Require Export ofe. From iris Require Import options. Set Primitive Projections. Section bi_mixin. Context {PROP : Type} `{Dist PROP, Equiv PROP}. Context (bi_entails : PROP → PROP → Prop). Context (bi_emp : PROP). Context (bi_pure : Prop → PROP). Context (bi_and : PROP → PROP → PROP). Context (bi_or : PROP → PROP → PROP). Context (bi_impl : PROP → PROP → PROP). Context (bi_forall : ∀ A, (A → PROP) → PROP). Context (bi_exist : ∀ A, (A → PROP) → PROP). Context (bi_sep : PROP → PROP → PROP). Context (bi_wand : PROP → PROP → PROP). Context (bi_persistently : PROP → PROP). Bind Scope bi_scope with PROP. Local Infix "⊢" := bi_entails. Local Notation "'emp'" := bi_emp : bi_scope. Local Notation "'True'" := (bi_pure True) : bi_scope. Local Notation "'False'" := (bi_pure False) : bi_scope. Local Notation "'⌜' φ '⌝'" := (bi_pure φ%type%stdpp) : bi_scope. Local Infix "∧" := bi_and : bi_scope. Local Infix "∨" := bi_or : bi_scope. Local Infix "→" := bi_impl : bi_scope. Local Notation "∀ x .. y , P" := (bi_forall _ (λ x, .. (bi_forall _ (λ y, P%I)) ..)) : bi_scope. Local Notation "∃ x .. y , P" := (bi_exist _ (λ x, .. (bi_exist _ (λ y, P%I)) ..)) : bi_scope. Local Infix "∗" := bi_sep : bi_scope. Local Infix "-∗" := bi_wand : bi_scope. Local Notation "'<pers>' P" := (bi_persistently P) : bi_scope. (** * Axioms for a general BI (logic of bunched implications) *) (** The following axioms are satisifed by both affine and linear BIs, and BIs that combine both kinds of resources. In particular, we have an "ordered RA" model satisfying all these axioms. For this model, we extend RAs with an arbitrary partial order, and up-close resources wrt. that order (instead of extension order). We demand composition to be monotone wrt. the order: [x1 ≼ x2 → x1 ⋅ y ≼ x2 ⋅ y]. We define [emp := λ r, ε ≼ r]; persistently is still defined with the core: [persistently P := λ r, P (core r)]. This is uplcosed because the core is monotone. *) Record BiMixin := { bi_mixin_entails_po : PreOrder bi_entails; bi_mixin_equiv_spec P Q : (P ≡ Q) ↔ (P ⊢ Q) ∧ (Q ⊢ P); (** Non-expansiveness *) bi_mixin_pure_ne n : Proper (iff ==> dist n) bi_pure; bi_mixin_and_ne : NonExpansive2 bi_and; bi_mixin_or_ne : NonExpansive2 bi_or; bi_mixin_impl_ne : NonExpansive2 bi_impl; bi_mixin_forall_ne A n : Proper (pointwise_relation _ (dist n) ==> dist n) (bi_forall A); bi_mixin_exist_ne A n : Proper (pointwise_relation _ (dist n) ==> dist n) (bi_exist A); bi_mixin_sep_ne : NonExpansive2 bi_sep; bi_mixin_wand_ne : NonExpansive2 bi_wand; bi_mixin_persistently_ne : NonExpansive bi_persistently; (** Higher-order logic *) bi_mixin_pure_intro (φ : Prop) P : φ → P ⊢ ⌜ φ ⌝; bi_mixin_pure_elim' (φ : Prop) P : (φ → True ⊢ P) → ⌜ φ ⌝ ⊢ P; bi_mixin_and_elim_l P Q : P ∧ Q ⊢ P; bi_mixin_and_elim_r P Q : P ∧ Q ⊢ Q; bi_mixin_and_intro P Q R : (P ⊢ Q) → (P ⊢ R) → P ⊢ Q ∧ R; bi_mixin_or_intro_l P Q : P ⊢ P ∨ Q; bi_mixin_or_intro_r P Q : Q ⊢ P ∨ Q; bi_mixin_or_elim P Q R : (P ⊢ R) → (Q ⊢ R) → P ∨ Q ⊢ R; bi_mixin_impl_intro_r P Q R : (P ∧ Q ⊢ R) → P ⊢ Q → R; bi_mixin_impl_elim_l' P Q R : (P ⊢ Q → R) → P ∧ Q ⊢ R; bi_mixin_forall_intro {A} P (Ψ : A → PROP) : (∀ a, P ⊢ Ψ a) → P ⊢ ∀ a, Ψ a; bi_mixin_forall_elim {A} {Ψ : A → PROP} a : (∀ a, Ψ a) ⊢ Ψ a; bi_mixin_exist_intro {A} {Ψ : A → PROP} a : Ψ a ⊢ ∃ a, Ψ a; bi_mixin_exist_elim {A} (Φ : A → PROP) Q : (∀ a, Φ a ⊢ Q) → (∃ a, Φ a) ⊢ Q; (** BI connectives *) bi_mixin_sep_mono P P' Q Q' : (P ⊢ Q) → (P' ⊢ Q') → P ∗ P' ⊢ Q ∗ Q'; bi_mixin_emp_sep_1 P : P ⊢ emp ∗ P; bi_mixin_emp_sep_2 P : emp ∗ P ⊢ P; bi_mixin_sep_comm' P Q : P ∗ Q ⊢ Q ∗ P; bi_mixin_sep_assoc' P Q R : (P ∗ Q) ∗ R ⊢ P ∗ (Q ∗ R); bi_mixin_wand_intro_r P Q R : (P ∗ Q ⊢ R) → P ⊢ Q -∗ R; bi_mixin_wand_elim_l' P Q R : (P ⊢ Q -∗ R) → P ∗ Q ⊢ R; (** Persistently *) (* In the ordered RA model: Holds without further assumptions. *) bi_mixin_persistently_mono P Q : (P ⊢ Q) → <pers> P ⊢ <pers> Q; (* In the ordered RA model: `core` is idempotent *) bi_mixin_persistently_idemp_2 P : <pers> P ⊢ <pers> <pers> P; (* In the ordered RA model: [ε ≼ core x]. *) bi_mixin_persistently_emp_2 : emp ⊢ <pers> emp; bi_mixin_persistently_forall_2 {A} (Ψ : A → PROP) : (∀ a, <pers> (Ψ a)) ⊢ <pers> (∀ a, Ψ a); bi_mixin_persistently_exist_1 {A} (Ψ : A → PROP) : <pers> (∃ a, Ψ a) ⊢ ∃ a, <pers> (Ψ a); (* In the ordered RA model: [core x ≼ core (x ⋅ y)]. *) bi_mixin_persistently_absorbing P Q : <pers> P ∗ Q ⊢ <pers> P; (* In the ordered RA model: [x ⋅ core x = x]. *) bi_mixin_persistently_and_sep_elim P Q : <pers> P ∧ Q ⊢ P ∗ Q; }. (** We equip any BI with a later modality. This avoids an additional layer in the BI hierachy and improves performance significantly (see Iris issue #303). For non step-indexed BIs the later modality can simply be defined as the identity function, as the Löb axiom or contractiveness of later is not part of [BiLaterMixin]. For step-indexed BIs one should separately prove an instance of the class [BiLaterContractive PROP] or [BiLöb PROP]. (Note that there is an instance [BiLaterContractive PROP → BiLöb PROP] in [derived_laws_later].) For non step-indexed BIs one can get a "free" instance of [BiLaterMixin] using the smart constructor [bi_later_mixin_id] below. *) Context (bi_later : PROP → PROP). Local Notation "▷ P" := (bi_later P) : bi_scope. Record BiLaterMixin := { bi_mixin_later_ne : NonExpansive bi_later; bi_mixin_later_mono P Q : (P ⊢ Q) → ▷ P ⊢ ▷ Q; bi_mixin_later_intro P : P ⊢ ▷ P; bi_mixin_later_forall_2 {A} (Φ : A → PROP) : (∀ a, ▷ Φ a) ⊢ ▷ ∀ a, Φ a; bi_mixin_later_exist_false {A} (Φ : A → PROP) : (▷ ∃ a, Φ a) ⊢ ▷ False ∨ (∃ a, ▷ Φ a); bi_mixin_later_sep_1 P Q : ▷ (P ∗ Q) ⊢ ▷ P ∗ ▷ Q; bi_mixin_later_sep_2 P Q : ▷ P ∗ ▷ Q ⊢ ▷ (P ∗ Q); bi_mixin_later_persistently_1 P : ▷ <pers> P ⊢ <pers> ▷ P; bi_mixin_later_persistently_2 P : <pers> ▷ P ⊢ ▷ <pers> P; bi_mixin_later_false_em P : ▷ P ⊢ ▷ False ∨ (▷ False → P); }. Lemma bi_later_mixin_id : (∀ (P : PROP), (▷ P)%I = P) → BiMixin → BiLaterMixin. Proof. intros Hlater Hbi. pose proof (bi_mixin_entails_po Hbi). split; repeat intro; rewrite ?Hlater ?Hequiv //. - apply (bi_mixin_forall_intro Hbi)=> a. etrans; [apply (bi_mixin_forall_elim Hbi a)|]. by rewrite Hlater. - etrans; [|apply (bi_mixin_or_intro_r Hbi)]. apply (bi_mixin_exist_elim Hbi)=> a. etrans; [|apply (bi_mixin_exist_intro Hbi a)]. by rewrite /= Hlater. - etrans; [|apply (bi_mixin_or_intro_r Hbi)]. apply (bi_mixin_impl_intro_r Hbi), (bi_mixin_and_elim_l Hbi). Qed. End bi_mixin. Structure bi := Bi { bi_car :> Type; bi_dist : Dist bi_car; bi_equiv : Equiv bi_car; bi_entails : bi_car → bi_car → Prop; bi_emp : bi_car; bi_pure : Prop → bi_car; bi_and : bi_car → bi_car → bi_car; bi_or : bi_car → bi_car → bi_car; bi_impl : bi_car → bi_car → bi_car; bi_forall : ∀ A, (A → bi_car) → bi_car; bi_exist : ∀ A, (A → bi_car) → bi_car; bi_sep : bi_car → bi_car → bi_car; bi_wand : bi_car → bi_car → bi_car; bi_persistently : bi_car → bi_car; bi_later : bi_car → bi_car; bi_ofe_mixin : OfeMixin bi_car; bi_cofe : Cofe (OfeT bi_car bi_ofe_mixin); bi_bi_mixin : BiMixin bi_entails bi_emp bi_pure bi_and bi_or bi_impl bi_forall bi_exist bi_sep bi_wand bi_persistently; bi_bi_later_mixin : BiLaterMixin bi_entails bi_pure bi_or bi_impl bi_forall bi_exist bi_sep bi_persistently bi_later; }. Coercion bi_ofeO (PROP : bi) : ofeT := OfeT PROP (bi_ofe_mixin PROP). Canonical Structure bi_ofeO. Global Instance bi_cofe' (PROP : bi) : Cofe PROP. Proof. apply bi_cofe. Qed. Instance: Params (@bi_entails) 1 := {}. Instance: Params (@bi_emp) 1 := {}. Instance: Params (@bi_pure) 1 := {}. Instance: Params (@bi_and) 1 := {}. Instance: Params (@bi_or) 1 := {}. Instance: Params (@bi_impl) 1 := {}. Instance: Params (@bi_forall) 2 := {}. Instance: Params (@bi_exist) 2 := {}. Instance: Params (@bi_sep) 1 := {}. Instance: Params (@bi_wand) 1 := {}. Instance: Params (@bi_persistently) 1 := {}. Instance: Params (@bi_later) 1 := {}. Arguments bi_car : simpl never. Arguments bi_dist : simpl never. Arguments bi_equiv : simpl never. Arguments bi_entails {PROP} _%I _%I : simpl never, rename. Arguments bi_emp {PROP} : simpl never, rename. Arguments bi_pure {PROP} _%stdpp : simpl never, rename. Arguments bi_and {PROP} _%I _%I : simpl never, rename. Arguments bi_or {PROP} _%I _%I : simpl never, rename. Arguments bi_impl {PROP} _%I _%I : simpl never, rename. Arguments bi_forall {PROP _} _%I : simpl never, rename. Arguments bi_exist {PROP _} _%I : simpl never, rename. Arguments bi_sep {PROP} _%I _%I : simpl never, rename. Arguments bi_wand {PROP} _%I _%I : simpl never, rename. Arguments bi_persistently {PROP} _%I : simpl never, rename. Arguments bi_later {PROP} _%I : simpl never, rename. Hint Extern 0 (bi_entails _ _) => reflexivity : core. Instance bi_rewrite_relation (PROP : bi) : RewriteRelation (@bi_entails PROP) := {}. Instance bi_inhabited {PROP : bi} : Inhabited PROP := populate (bi_pure True). Notation "P ⊢ Q" := (bi_entails P%I Q%I) : stdpp_scope. Notation "P '⊢@{' PROP } Q" := (bi_entails (PROP:=PROP) P%I Q%I) (only parsing) : stdpp_scope. Notation "(⊢)" := bi_entails (only parsing) : stdpp_scope. Notation "'(⊢@{' PROP } )" := (bi_entails (PROP:=PROP)) (only parsing) : stdpp_scope. Notation "P ⊣⊢ Q" := (equiv (A:=bi_car _) P%I Q%I) : stdpp_scope. Notation "P '⊣⊢@{' PROP } Q" := (equiv (A:=bi_car PROP) P%I Q%I) (only parsing) : stdpp_scope. Notation "(⊣⊢)" := (equiv (A:=bi_car _)) (only parsing) : stdpp_scope. Notation "'(⊣⊢@{' PROP } )" := (equiv (A:=bi_car PROP)) (only parsing) : stdpp_scope. Notation "( P ⊣⊢.)" := (equiv (A:=bi_car _) P) (only parsing) : stdpp_scope. Notation "(.⊣⊢ Q )" := (λ P, P ≡@{bi_car _} Q) (only parsing) : stdpp_scope. Notation "P -∗ Q" := (P ⊢ Q) : stdpp_scope. Notation "'emp'" := (bi_emp) : bi_scope. Notation "'⌜' φ '⌝'" := (bi_pure φ%type%stdpp) : bi_scope. Notation "'True'" := (bi_pure True) : bi_scope. Notation "'False'" := (bi_pure False) : bi_scope. Infix "∧" := bi_and : bi_scope. Notation "(∧)" := bi_and (only parsing) : bi_scope. Infix "∨" := bi_or : bi_scope. Notation "(∨)" := bi_or (only parsing) : bi_scope. Infix "→" := bi_impl : bi_scope. Notation "¬ P" := (P → False)%I : bi_scope. Infix "∗" := bi_sep : bi_scope. Notation "(∗)" := bi_sep (only parsing) : bi_scope. Notation "P -∗ Q" := (bi_wand P Q) : bi_scope. Notation "∀ x .. y , P" := (bi_forall (λ x, .. (bi_forall (λ y, P)) ..)%I) : bi_scope. Notation "∃ x .. y , P" := (bi_exist (λ x, .. (bi_exist (λ y, P)) ..)%I) : bi_scope. Notation "'<pers>' P" := (bi_persistently P) : bi_scope. Notation "▷ P" := (bi_later P) : bi_scope. Definition bi_emp_valid {PROP : bi} (P : PROP) : Prop := emp ⊢ P. Arguments bi_emp_valid {_} _%I : simpl never. Typeclasses Opaque bi_emp_valid. Notation "⊢ Q" := (bi_emp_valid Q%I) : stdpp_scope. Notation "'⊢@{' PROP } Q" := (bi_emp_valid (PROP:=PROP) Q%I) (only parsing) : stdpp_scope. (** Work around parsing issues: see [notation.v] for details. *) Notation "'(⊢@{' PROP } Q )" := (bi_emp_valid (PROP:=PROP) Q%I) (only parsing) : stdpp_scope. Notation "(.⊢ Q )" := (λ P, P ⊢ Q) (only parsing) : stdpp_scope. Notation "( P ⊢.)" := (bi_entails P) (only parsing) : stdpp_scope. Module bi. Section bi_laws. Context {PROP : bi}. Implicit Types φ : Prop. Implicit Types P Q R : PROP. Implicit Types A : Type. (* About the entailment *) Global Instance entails_po : PreOrder (@bi_entails PROP). Proof. eapply bi_mixin_entails_po, bi_bi_mixin. Qed. Lemma equiv_spec P Q : P ≡ Q ↔ (P ⊢ Q) ∧ (Q ⊢ P). Proof. eapply bi_mixin_equiv_spec, bi_bi_mixin. Qed. (* Non-expansiveness *) Global Instance pure_ne n : Proper (iff ==> dist n) (@bi_pure PROP). Proof. eapply bi_mixin_pure_ne, bi_bi_mixin. Qed. Global Instance and_ne : NonExpansive2 (@bi_and PROP). Proof. eapply bi_mixin_and_ne, bi_bi_mixin. Qed. Global Instance or_ne : NonExpansive2 (@bi_or PROP). Proof. eapply bi_mixin_or_ne, bi_bi_mixin. Qed. Global Instance impl_ne : NonExpansive2 (@bi_impl PROP). Proof. eapply bi_mixin_impl_ne, bi_bi_mixin. Qed. Global Instance forall_ne A n : Proper (pointwise_relation _ (dist n) ==> dist n) (@bi_forall PROP A). Proof. eapply bi_mixin_forall_ne, bi_bi_mixin. Qed. Global Instance exist_ne A n : Proper (pointwise_relation _ (dist n) ==> dist n) (@bi_exist PROP A). Proof. eapply bi_mixin_exist_ne, bi_bi_mixin. Qed. Global Instance sep_ne : NonExpansive2 (@bi_sep PROP). Proof. eapply bi_mixin_sep_ne, bi_bi_mixin. Qed. Global Instance wand_ne : NonExpansive2 (@bi_wand PROP). Proof. eapply bi_mixin_wand_ne, bi_bi_mixin. Qed. Global Instance persistently_ne : NonExpansive (@bi_persistently PROP). Proof. eapply bi_mixin_persistently_ne, bi_bi_mixin. Qed. (* Higher-order logic *) Lemma pure_intro (φ : Prop) P : φ → P ⊢ ⌜ φ ⌝. Proof. eapply bi_mixin_pure_intro, bi_bi_mixin. Qed. Lemma pure_elim' (φ : Prop) P : (φ → True ⊢ P) → ⌜ φ ⌝ ⊢ P. Proof. eapply bi_mixin_pure_elim', bi_bi_mixin. Qed. Lemma and_elim_l P Q : P ∧ Q ⊢ P. Proof. eapply bi_mixin_and_elim_l, bi_bi_mixin. Qed. Lemma and_elim_r P Q : P ∧ Q ⊢ Q. Proof. eapply bi_mixin_and_elim_r, bi_bi_mixin. Qed. Lemma and_intro P Q R : (P ⊢ Q) → (P ⊢ R) → P ⊢ Q ∧ R. Proof. eapply bi_mixin_and_intro, bi_bi_mixin. Qed. Lemma or_intro_l P Q : P ⊢ P ∨ Q. Proof. eapply bi_mixin_or_intro_l, bi_bi_mixin. Qed. Lemma or_intro_r P Q : Q ⊢ P ∨ Q. Proof. eapply bi_mixin_or_intro_r, bi_bi_mixin. Qed. Lemma or_elim P Q R : (P ⊢ R) → (Q ⊢ R) → P ∨ Q ⊢ R. Proof. eapply bi_mixin_or_elim, bi_bi_mixin. Qed. Lemma impl_intro_r P Q R : (P ∧ Q ⊢ R) → P ⊢ Q → R. Proof. eapply bi_mixin_impl_intro_r, bi_bi_mixin. Qed. Lemma impl_elim_l' P Q R : (P ⊢ Q → R) → P ∧ Q ⊢ R. Proof. eapply bi_mixin_impl_elim_l', bi_bi_mixin. Qed. Lemma forall_intro {A} P (Ψ : A → PROP) : (∀ a, P ⊢ Ψ a) → P ⊢ ∀ a, Ψ a. Proof. eapply bi_mixin_forall_intro, bi_bi_mixin. Qed. Lemma forall_elim {A} {Ψ : A → PROP} a : (∀ a, Ψ a) ⊢ Ψ a. Proof. eapply (bi_mixin_forall_elim bi_entails), bi_bi_mixin. Qed. Lemma exist_intro {A} {Ψ : A → PROP} a : Ψ a ⊢ ∃ a, Ψ a. Proof. eapply bi_mixin_exist_intro, bi_bi_mixin. Qed. Lemma exist_elim {A} (Φ : A → PROP) Q : (∀ a, Φ a ⊢ Q) → (∃ a, Φ a) ⊢ Q. Proof. eapply bi_mixin_exist_elim, bi_bi_mixin. Qed. (* BI connectives *) Lemma sep_mono P P' Q Q' : (P ⊢ Q) → (P' ⊢ Q') → P ∗ P' ⊢ Q ∗ Q'. Proof. eapply bi_mixin_sep_mono, bi_bi_mixin. Qed. Lemma emp_sep_1 P : P ⊢ emp ∗ P. Proof. eapply bi_mixin_emp_sep_1, bi_bi_mixin. Qed. Lemma emp_sep_2 P : emp ∗ P ⊢ P. Proof. eapply bi_mixin_emp_sep_2, bi_bi_mixin. Qed. Lemma sep_comm' P Q : P ∗ Q ⊢ Q ∗ P. Proof. eapply (bi_mixin_sep_comm' bi_entails), bi_bi_mixin. Qed. Lemma sep_assoc' P Q R : (P ∗ Q) ∗ R ⊢ P ∗ (Q ∗ R). Proof. eapply bi_mixin_sep_assoc', bi_bi_mixin. Qed. Lemma wand_intro_r P Q R : (P ∗ Q ⊢ R) → P ⊢ Q -∗ R. Proof. eapply bi_mixin_wand_intro_r, bi_bi_mixin. Qed. Lemma wand_elim_l' P Q R : (P ⊢ Q -∗ R) → P ∗ Q ⊢ R. Proof. eapply bi_mixin_wand_elim_l', bi_bi_mixin. Qed. (* Persistently *) Lemma persistently_mono P Q : (P ⊢ Q) → <pers> P ⊢ <pers> Q. Proof. eapply bi_mixin_persistently_mono, bi_bi_mixin. Qed. Lemma persistently_idemp_2 P : <pers> P ⊢ <pers> <pers> P. Proof. eapply bi_mixin_persistently_idemp_2, bi_bi_mixin. Qed. Lemma persistently_emp_2 : emp ⊢@{PROP} <pers> emp. Proof. eapply bi_mixin_persistently_emp_2, bi_bi_mixin. Qed. Lemma persistently_forall_2 {A} (Ψ : A → PROP) : (∀ a, <pers> (Ψ a)) ⊢ <pers> (∀ a, Ψ a). Proof. eapply bi_mixin_persistently_forall_2, bi_bi_mixin. Qed. Lemma persistently_exist_1 {A} (Ψ : A → PROP) : <pers> (∃ a, Ψ a) ⊢ ∃ a, <pers> (Ψ a). Proof. eapply bi_mixin_persistently_exist_1, bi_bi_mixin. Qed. Lemma persistently_absorbing P Q : <pers> P ∗ Q ⊢ <pers> P. Proof. eapply (bi_mixin_persistently_absorbing bi_entails), bi_bi_mixin. Qed. Lemma persistently_and_sep_elim P Q : <pers> P ∧ Q ⊢ P ∗ Q. Proof. eapply (bi_mixin_persistently_and_sep_elim bi_entails), bi_bi_mixin. Qed. (* Later *) Global Instance later_ne : NonExpansive (@bi_later PROP). Proof. eapply bi_mixin_later_ne, bi_bi_later_mixin. Qed. Lemma later_mono P Q : (P ⊢ Q) → ▷ P ⊢ ▷ Q. Proof. eapply bi_mixin_later_mono, bi_bi_later_mixin. Qed. Lemma later_intro P : P ⊢ ▷ P. Proof. eapply bi_mixin_later_intro, bi_bi_later_mixin. Qed. Lemma later_forall_2 {A} (Φ : A → PROP) : (∀ a, ▷ Φ a) ⊢ ▷ ∀ a, Φ a. Proof. eapply bi_mixin_later_forall_2, bi_bi_later_mixin. Qed. Lemma later_exist_false {A} (Φ : A → PROP) : (▷ ∃ a, Φ a) ⊢ ▷ False ∨ (∃ a, ▷ Φ a). Proof. eapply bi_mixin_later_exist_false, bi_bi_later_mixin. Qed. Lemma later_sep_1 P Q : ▷ (P ∗ Q) ⊢ ▷ P ∗ ▷ Q. Proof. eapply bi_mixin_later_sep_1, bi_bi_later_mixin. Qed. Lemma later_sep_2 P Q : ▷ P ∗ ▷ Q ⊢ ▷ (P ∗ Q). Proof. eapply bi_mixin_later_sep_2, bi_bi_later_mixin. Qed. Lemma later_persistently_1 P : ▷ <pers> P ⊢ <pers> ▷ P. Proof. eapply (bi_mixin_later_persistently_1 bi_entails), bi_bi_later_mixin. Qed. Lemma later_persistently_2 P : <pers> ▷ P ⊢ ▷ <pers> P. Proof. eapply (bi_mixin_later_persistently_2 bi_entails), bi_bi_later_mixin. Qed. Lemma later_false_em P : ▷ P ⊢ ▷ False ∨ (▷ False → P). Proof. eapply bi_mixin_later_false_em, bi_bi_later_mixin. Qed. End bi_laws. End bi.
lemma s_le_p: "a \<in> s \<Longrightarrow> a j \<le> p"
theory Finite_Linear_Ops imports Finite_Linear_Model begin subsection \<open> Operators \<close> definition Div :: "'e fltraces" where "Div = {\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>}" lemma FL2_Div [simp]: "FL2 Div" unfolding FL2_def Div_def apply auto by (metis Finite_Linear_Model.last.simps(1) amember.simps(1) concat_FL_last_not_bullet_absorb last_bullet_then_last_cons) definition Stop :: "'e fltraces" where "Stop = {\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>} \<union> {\<langle>[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>}" lemma Stop_is_FL2 [simp]: "FL2 Stop" unfolding FL2_def Stop_def apply auto apply (metis Finite_Linear_Model.last.simps(1) acceptance.inject amember.elims(2) concat_FL_last_not_bullet_absorb empty_iff last_bullet_then_last_cons) by (metis Finite_Linear_Model.last.simps(1) amember.simps(1) concat_FL_last_not_bullet_absorb last_bullet_then_last_cons) definition prefixH :: "'e \<Rightarrow> 'e fltrace \<Rightarrow> 'e fltrace \<Rightarrow> bool" where "prefixH a aa X = (X = \<langle>[{a}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L> \<or> X = \<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L> \<or> X = ([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> aa \<or> X = (\<bullet>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> aa)" definition Prefix :: "'e \<Rightarrow> 'e fltraces \<Rightarrow> 'e fltraces" (infixl "\<rightarrow>\<^sub>\<F>\<^sub>\<L>" 65) where "a \<rightarrow>\<^sub>\<F>\<^sub>\<L> P = {\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>} \<union> {\<langle>[{a}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>} \<union> {([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L>#\<^sub>\<F>\<^sub>\<L>\<rho>| \<rho>. \<rho> \<in> P} \<union> {(\<bullet>,a)\<^sub>\<F>\<^sub>\<L>#\<^sub>\<F>\<^sub>\<L>\<rho>| \<rho>. \<rho> \<in> P}" definition PrefixAlt :: "'e \<Rightarrow> 'e fltraces \<Rightarrow> 'e fltraces" where "PrefixAlt a P = {x|s x. prefixH a s x \<and> s\<in>P}" (* lemma eq_acceptances [simp]: "([{aa}]\<^sub>\<F>\<^sub>\<L>,aa)\<^sub>\<F>\<^sub>\<L> = ([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L> \<longleftrightarrow> a = aa" apply auto by (simp add: acceptance_pair_eq) lemma unequal_acceptances [simp]: "([{aa}]\<^sub>\<F>\<^sub>\<L>,aa)\<^sub>\<F>\<^sub>\<L> \<noteq> (\<bullet>,a)\<^sub>\<F>\<^sub>\<L>" apply auto by (metis acceptance.distinct(1) acceptance_set amember.simps(2) singletonI) lemma unequal_acceptances_2 [simp]: "(\<bullet>,a)\<^sub>\<F>\<^sub>\<L> \<noteq> ([{aa}]\<^sub>\<F>\<^sub>\<L>,aa)\<^sub>\<F>\<^sub>\<L>" apply auto by (metis acceptance.distinct(1) acceptance_set amember.simps(2) singletonI) lemma eq_acceptances_bullet [simp]: "(\<bullet>,aa)\<^sub>\<F>\<^sub>\<L> = (\<bullet>,a)\<^sub>\<F>\<^sub>\<L> \<longleftrightarrow> aa = a" apply auto by (metis acceptance_event)*) lemma Prefix_PrefixAlt_eq: assumes "FL0 P" "FL1 P" shows "Prefix a P = PrefixAlt a P" using assms unfolding Prefix_def PrefixAlt_def prefixH_def apply auto using FL0_def apply fastforce using FL0_def by fastforce definition IntChoice :: "'e fltraces \<Rightarrow> 'e fltraces \<Rightarrow> 'e fltraces" (infixl "\<sqinter>\<^sub>\<F>\<^sub>\<L>" 65) where "P \<sqinter>\<^sub>\<F>\<^sub>\<L> Q \<equiv> P \<union> Q" fun ExtChoiceH :: "'e fltrace \<Rightarrow> 'e fltrace \<Rightarrow> 'e fltrace \<Rightarrow> bool" where "ExtChoiceH \<langle>A\<rangle>\<^sub>\<F>\<^sub>\<L> \<langle>B\<rangle>\<^sub>\<F>\<^sub>\<L> X = (X = \<langle>A \<union>\<^sub>\<F>\<^sub>\<L> B\<rangle>\<^sub>\<F>\<^sub>\<L>)" | "ExtChoiceH (A #\<^sub>\<F>\<^sub>\<L> aa) (B #\<^sub>\<F>\<^sub>\<L> bb) X = (X = ((acceptance(A) \<union>\<^sub>\<F>\<^sub>\<L> acceptance(B),event(A))\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> aa) \<or> X = ((acceptance(A) \<union>\<^sub>\<F>\<^sub>\<L> acceptance(B),event(B))\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> bb))" | "ExtChoiceH \<langle>A\<rangle>\<^sub>\<F>\<^sub>\<L> (B #\<^sub>\<F>\<^sub>\<L> bb) X = (X = \<langle>A \<union>\<^sub>\<F>\<^sub>\<L> acceptance(B)\<rangle>\<^sub>\<F>\<^sub>\<L> \<or> X = ((A \<union>\<^sub>\<F>\<^sub>\<L> acceptance(B),event(B))\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> bb))" | "ExtChoiceH (A #\<^sub>\<F>\<^sub>\<L> aa) \<langle>B\<rangle>\<^sub>\<F>\<^sub>\<L> X = (X = \<langle>acceptance(A) \<union>\<^sub>\<F>\<^sub>\<L> B\<rangle>\<^sub>\<F>\<^sub>\<L> \<or> X = ((acceptance(A) \<union>\<^sub>\<F>\<^sub>\<L> B,event(A))\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> aa))" definition ExtChoice :: "'e fltraces \<Rightarrow> 'e fltraces \<Rightarrow> 'e fltraces" (infixl "\<box>\<^sub>\<F>\<^sub>\<L>" 65) where "P \<box>\<^sub>\<F>\<^sub>\<L> Q = {X| X A B. ExtChoiceH A B X \<and> A \<in> P \<and> B \<in> Q}" fun HideAcceptance :: "'e acceptance \<Rightarrow> 'e set \<Rightarrow> 'e acceptance" where "HideAcceptance \<bullet> X = \<bullet>" | "HideAcceptance [A]\<^sub>\<F>\<^sub>\<L> X = (if A \<inter> X = {} then [A]\<^sub>\<F>\<^sub>\<L> else \<bullet>)" fun HideFL :: "'e fltrace \<Rightarrow> 'e set \<Rightarrow> 'e fltrace" where "HideFL \<langle>A\<rangle>\<^sub>\<F>\<^sub>\<L> X = \<langle>HideAcceptance A X\<rangle>\<^sub>\<F>\<^sub>\<L>" | "HideFL (A #\<^sub>\<F>\<^sub>\<L> aa) X = (if event(A) \<in> X then (HideFL aa X) else (HideAcceptance (acceptance(A)) X,event(A))\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> (HideFL aa X))" definition Hiding :: "'e fltraces \<Rightarrow> 'e set \<Rightarrow> 'e fltraces" (infixl "\\\<^sub>\<F>\<^sub>\<L>" 65) where "P \\\<^sub>\<F>\<^sub>\<L> X = {HideFL s X|s. s \<in> P}" lemma ExtChoiceH_bullet: assumes "ExtChoiceH \<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L> B x" "B \<in> P" "FL1 P" shows "x \<in> P" using assms apply (cases B, auto) apply (metis FL0_FL1_bullet_in_so aunion.simps(1) unionA_sym) using acceptance_bullet_event_FL1 by blast lemma ExtChoiceH_emptyset: assumes "ExtChoiceH \<langle>[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L> B x" "B \<in> P" "FL1 P" shows "x \<in> P" using assms apply (cases B, auto, case_tac x21, auto) apply (case_tac a, auto) apply (simp add: aevent_less_eq_FL1) by (case_tac x21, auto) (* lemma ExtChoice_Div_zero: assumes "FL0 P" "FL1 P" shows "Div \<box>\<^sub>\<F>\<^sub>\<L> P = Div" using assms unfolding Div_def ExtChoice_def apply auto apply (simp add: ExtChoiceH_bullet_then) using FL0_FL1_bullet_in by force *) lemma ExtChoiceH_exists: assumes "x \<in> P" shows "\<exists>B. (ExtChoiceH \<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L> B x \<or> ExtChoiceH \<langle>[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L> B x) \<and> B \<in> P" using assms proof (cases x) case (Acceptance x1) then show ?thesis proof (cases x1) case acnil then show ?thesis using Acceptance assms apply auto by (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) next case (acset x2) then show ?thesis using Acceptance assms apply auto by (rule exI[where x="\<langle>[x2]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) qed next case (AEvent x21 x22) then show ?thesis proof (cases "acceptance(x21) = \<bullet>") case True then show ?thesis using AEvent assms apply auto apply (case_tac x21, auto) by (rule exI[where x="((\<bullet>,event(x21))\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> x22)"], auto) next case acceptance_not_bullet:False then obtain A b where Ab:"x21 = ([A]\<^sub>\<F>\<^sub>\<L>,b)\<^sub>\<F>\<^sub>\<L> \<and> b \<in>\<^sub>\<F>\<^sub>\<L> [A]\<^sub>\<F>\<^sub>\<L>" by (metis Rep_aevent_inverse acceptance.rep_eq amember.elims(2) event.rep_eq event_in_acceptance prod.collapse) then show ?thesis proof (cases "A = {}") case True then show ?thesis using acceptance_not_bullet AEvent Ab by auto next case False then show ?thesis using acceptance_not_bullet AEvent Ab assms by (intro exI[where x="(([A]\<^sub>\<F>\<^sub>\<L>,b)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> x22)"], auto) qed qed qed lemma assumes "FL1 P" "x \<in> P" shows "(\<exists>B. ExtChoiceH \<langle>[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L> B x \<and> B \<in> P) \<or> (\<exists>B. ExtChoiceH \<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L> B x \<and> B \<in> P)" using assms apply auto apply (intro exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) oops lemma ExtChoiceH_triple_refl: "ExtChoiceH x x x" apply (induct x rule:fltrace.induct, auto) by (case_tac x, auto, case_tac x1a, auto, case_tac a, auto) lemma ExtChoiceH_sym: "ExtChoiceH A B x = ExtChoiceH B A x" by (induct A B x rule:ExtChoiceH.induct, auto) lemma ExtChoice_refines_double: "P \<box>\<^sub>\<F>\<^sub>\<L> P \<sqsubseteq>\<^sub>\<F>\<^sub>\<L> P" unfolding ExtChoice_def apply auto using ExtChoiceH_triple_refl by blast (* lemma assumes "s \<le> t" "FL1 P" "FL1 Q" "ExtChoiceH A B t" "A \<in> P" "B \<in> Q" shows "\<exists>A B. ExtChoiceH A B s \<and> A \<in> P \<and> B \<in> Q" using assms proof (induct A B t arbitrary:s rule:ExtChoiceH.induct) case (1 A B X) then show ?case apply auto apply (cases s, auto, case_tac x1, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases A, auto, cases B, auto, cases B, auto) by (metis "1.prems"(4)) next case ExtChoiceH2:(2 A aa B bb X) then show ?case proof (induct s X rule:less_eq_fltrace.induct) case (1 x y) then show ?case using ExtChoiceH2 by auto next case (2 x y ys) then have "x \<le> acceptance(y)" using less_eq_fltrace.simps(2) by blast then show ?case using 2 apply (cases x, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"],rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"],rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>acceptance(A)\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply (rule exI[where x="\<langle>acceptance(B)\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply auto using less_eq_acceptance.elims(2) apply force using FL_cons_acceptance apply blast using FL_cons_acceptance apply blast apply (rule exI[where x="\<langle>acceptance(A)\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply (rule exI[where x="\<langle>acceptance(B)\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply auto using less_eq_acceptance.elims(2) apply force using FL_cons_acceptance apply blast using FL_cons_acceptance by blast next case (3 x xs y ys) have "event A \<in>\<^sub>\<F>\<^sub>\<L> (acceptance A \<union>\<^sub>\<F>\<^sub>\<L> acceptance B) \<or> acceptance A \<union>\<^sub>\<F>\<^sub>\<L> acceptance B = \<bullet>" by (cases A, auto, cases B, auto, case_tac a, auto, case_tac aa, auto, case_tac a, auto) then have "x = (acceptance A \<union>\<^sub>\<F>\<^sub>\<L> acceptance B,event A)\<^sub>\<F>\<^sub>\<L> \<or> x = (\<bullet>,event A)\<^sub>\<F>\<^sub>\<L>" using 3 apply auto apply (cases x, auto) apply (metis acceptance_set amember.simps(1) dual_order.antisym less_eq_acceptance.elims(2) less_eq_aevent_def) apply (metis Un_iff acceptance.distinct(1) acceptance_event amember.simps(2) aunion.elims event_in_acceptance less_eq_aevent_def) apply (cases x, auto) sledgehammer[debug=true] then obtain pA pB xA where pAB: "xA \<le> xs \<and> pA \<le> (acceptance A,event A)\<^sub>\<F>\<^sub>\<L> \<and> pB \<le> (acceptance B,event A)\<^sub>\<F>\<^sub>\<L>" by auto (* then have "x = (acceptance pA \<union>\<^sub>\<F>\<^sub>\<L> acceptance pB,event pA)\<^sub>\<F>\<^sub>\<L>" using 3 apply auto apply (cases x, auto, case_tac a, auto, cases A, cases B, auto) *) then show ?case using 3 apply auto apply (rule exI[where x="pA #\<^sub>\<F>\<^sub>\<L> xA"]) apply (rule exI[where x="pB #\<^sub>\<F>\<^sub>\<L> xA"], auto) next case (4 x xs y) then show ?case sorry qed next case (3 A B bb X) then obtain sA where sA: "sA \<le> A" by auto then show ?case using 3 apply (cases X, auto) apply (rule exI[where x="\<langle>sA\<rangle>\<^sub>\<F>\<^sub>\<L>"], cases A, auto, cases sA, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], cases s, auto, case_tac x1, auto) apply (cases B, auto) apply (cases sA, auto, cases s, auto) proof (induct s X rule:less_eq_fltrace.induct) case (1 x y) then show ?case apply auto apply (cases x, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases A, auto, cases B, auto, case_tac a, auto) by (metis "1.prems"(4)) next case (2 x y ys) then show ?case apply auto apply (cases x, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases A, auto, cases B, auto, case_tac a, auto) by (metis ExtChoiceH.simps(3) acceptance_set amember.simps(2) aunion.simps(3)) next case (3 x xs y ys) then show ?case apply auto apply (cases "bb = \<langle>A \<union>\<^sub>\<F>\<^sub>\<L> acceptance B\<rangle>\<^sub>\<F>\<^sub>\<L>", auto) apply (rule exI[where x="\<langle>A\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>B,\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases xs, auto, case_tac x1, auto) apply (cases x, auto) apply (cases A, auto, case_tac a, auto) apply (simp add: less_eq_aevent_def)+ apply (cases B, auto) next case (4 x xs y) then show ?case sorry qed case (Acceptance x) then show ?case apply auto apply (cases x, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases A, cases B, auto, cases B, auto, case_tac a, auto) apply (metis "3.prems"(4)) apply (cases x, auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases A, cases B, auto, cases B, auto, case_tac a, auto) by (metis ExtChoiceH.simps(3) acceptance_set amember.simps(2) aunion.simps(3)) next case (AEvent x1a s) then show ?case proof (cases s) case (Acceptance x1) then show ?thesis using AEvent apply auto apply (cases x1a, auto) apply (rule exI[where x="\<langle>A \<union>\<^sub>\<F>\<^sub>\<L> acceptance B\<rangle>\<^sub>\<F>\<^sub>\<L>"]) apply (rule exI[where x="\<langle>B,\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (cases B, auto, cases A, auto, case_tac a, auto) apply (case_tac aa, auto) apply (simp add: less_eq_aevent_def) apply (case_tac aa, auto, case_tac a, auto) sledgehammer[debug=true] apply (metis Un_iff acceptance_event acceptance_set amember.elims(2) amember.simps(2) less_eq_acceptance.simps(3) less_eq_aevent_def less_eq_fltrace.simps(1) sup.idem sup_left_commute) apply (cases A, auto, case_tac a, auto) apply (metis Un_commute Un_left_absorb acceptance.distinct(1) acceptance_event acceptance_set amember.elims(2) aunion.simps(3) eq_iff first.simps(2) less_eq_acceptance.elims(2) less_eq_acceptance.simps(2) less_eq_aevent_def unionA_sym) apply (case_tac a, auto) apply (metis Un_commute Un_left_absorb acceptance.distinct(1) acceptance_event acceptance_set amember.elims(2) aunion.simps(3) eq_iff first.simps(2) less_eq_acceptance.elims(2) less_eq_acceptance.simps(2) less_eq_aevent_def unionA_sym) next case (AEvent x21 x22) then show ?thesis sorry qed apply auto apply (cases x1a, auto, case_tac a, auto, cases A, auto) apply (simp_all add: less_eq_aevent_def) apply (cases B, auto, case_tac a, auto) apply (cases s, auto) apply (case_tac x1, auto) qed next case (4 A aa B X) then show ?case sorry qed apply (cases s, auto) lemma assumes "FL1 P" "FL1 Q" shows "FL1 (P \<box>\<^sub>\<F>\<^sub>\<L> Q)" using assms unfolding FL1_def ExtChoice_def apply auto *) text \<open>Idempotency does not hold for external choice in FL.\<close> lemma "P \<sqsubseteq>\<^sub>\<F>\<^sub>\<L> (P \<box>\<^sub>\<F>\<^sub>\<L> P)" unfolding ExtChoice_def apply auto nitpick[expect=genuine] oops lemma ExtChoice_sym: "P \<box>\<^sub>\<F>\<^sub>\<L> Q = Q \<box>\<^sub>\<F>\<^sub>\<L> P" unfolding ExtChoice_def apply auto using ExtChoiceH_sym by blast+ lemma ExtChoice_unit: assumes "FL1 P" shows "Stop \<box>\<^sub>\<F>\<^sub>\<L> P = P" using assms unfolding ExtChoice_def Stop_def apply auto apply (simp add: ExtChoiceH_emptyset) apply (simp add: ExtChoiceH_bullet) using ExtChoiceH_exists by blast lemma ExtChoice_dist: shows "P \<box>\<^sub>\<F>\<^sub>\<L> (Q \<sqinter>\<^sub>\<F>\<^sub>\<L> R) = (P \<box>\<^sub>\<F>\<^sub>\<L> Q) \<sqinter>\<^sub>\<F>\<^sub>\<L> (P \<box>\<^sub>\<F>\<^sub>\<L> R)" unfolding ExtChoice_def IntChoice_def by auto text \<open>Following laws do not hold in FL.\<close> lemma assumes "FL0 P" "FL0 Q" "FL0 R" shows "((P \<sqinter>\<^sub>\<F>\<^sub>\<L> R) \<box>\<^sub>\<F>\<^sub>\<L> (Q \<sqinter>\<^sub>\<F>\<^sub>\<L> R)) = ((P \<box>\<^sub>\<F>\<^sub>\<L> Q) \<sqinter>\<^sub>\<F>\<^sub>\<L> R)" nitpick[expect=genuine] oops lemma assumes "FL0 P" "FL0 Q" "FL0 R" shows "P \<sqinter>\<^sub>\<F>\<^sub>\<L> (Q \<box>\<^sub>\<F>\<^sub>\<L> R) = (P \<sqinter>\<^sub>\<F>\<^sub>\<L> Q) \<box>\<^sub>\<F>\<^sub>\<L> (P \<sqinter>\<^sub>\<F>\<^sub>\<L> R)" nitpick[expect=genuine] oops lemma a_then_Stop: "a \<rightarrow>\<^sub>\<F>\<^sub>\<L> Stop = {\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>, \<langle>[{a}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>, \<langle>(\<bullet>,a)\<^sub>\<F>\<^sub>\<L>,[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>, \<langle>(\<bullet>,a)\<^sub>\<F>\<^sub>\<L>,\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>, \<langle>([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L>,[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>, \<langle>([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L>,\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L> }" unfolding Prefix_def Stop_def by auto lemma Hiding_Stop: "Stop \\\<^sub>\<F>\<^sub>\<L> X = Stop" unfolding Stop_def Hiding_def apply auto apply (rule exI[where x="\<langle>[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) by (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) lemma Hiding_a_then_Stop: assumes "a \<notin> X" shows "(PrefixAlt a Stop) \\\<^sub>\<F>\<^sub>\<L> X = (PrefixAlt a Stop)" using assms unfolding PrefixAlt_def Stop_def Hiding_def prefixH_def apply auto apply (rule exI[where x="\<langle>[{a}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>[{a}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L>,[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>(\<bullet>,a)\<^sub>\<F>\<^sub>\<L>,[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L>,\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) by (rule exI[where x="\<langle>(\<bullet>,a)\<^sub>\<F>\<^sub>\<L>,\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) lemma Hiding_a_then_Stop2: "(PrefixAlt a Stop) \\\<^sub>\<F>\<^sub>\<L> {a} = Stop" unfolding PrefixAlt_def Hiding_def Stop_def prefixH_def apply auto apply (rule exI[where x="([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L>\<langle>[{}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) by (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) lemma Hiding_a_then_P: assumes "FL1 P" shows "(PrefixAlt a P) \\\<^sub>\<F>\<^sub>\<L> {a} = P \\\<^sub>\<F>\<^sub>\<L> {a}" using assms unfolding PrefixAlt_def Hiding_def Stop_def prefixH_def apply auto apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) by (rule_tac x="([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> s" in exI, auto) lemma Hiding_a_then_P_event_in_set: assumes "FL1 P" "a \<in> X" shows "(PrefixAlt a P) \\\<^sub>\<F>\<^sub>\<L> X = P \\\<^sub>\<F>\<^sub>\<L> X" using assms unfolding PrefixAlt_def Hiding_def Stop_def prefixH_def apply auto apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) by (rule_tac x="([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> s" in exI, auto) lemma Hiding_a_then_P_event_not_in_set: assumes "FL1 P" "a \<notin> X" shows "(PrefixAlt a P) \\\<^sub>\<F>\<^sub>\<L> X = (PrefixAlt a (P \\\<^sub>\<F>\<^sub>\<L> X))" using assms unfolding PrefixAlt_def Hiding_def Stop_def prefixH_def apply auto apply (rule exI[where x="\<langle>[{a}]\<^sub>\<F>\<^sub>\<L>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule exI[where x="\<langle>\<bullet>\<rangle>\<^sub>\<F>\<^sub>\<L>"], auto) apply (rule_tac x="([{a}]\<^sub>\<F>\<^sub>\<L>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> sa" in exI, auto) by (rule_tac x="(\<bullet>,a)\<^sub>\<F>\<^sub>\<L> #\<^sub>\<F>\<^sub>\<L> sa" in exI, auto) lemma assumes "\<forall> P Q. f (P \<union> Q) = f P \<union> f Q" shows "\<forall> P Q. Q \<subseteq> P \<longrightarrow> f Q \<subseteq> f P" using assms apply auto by (metis UnCI Un_absorb2) lemma assumes "\<forall> P Q. Q \<subseteq> P \<longrightarrow> f Q \<subseteq> f P" shows "\<forall> P Q. f (P \<union> Q) = f P \<union> f Q" using assms nitpick[expect=genuine] oops end
# James Rekow subgroupDifferences = function(partition1, partition2){ # ARGS: partition1 - a partition of a numeric vector of the form 1:n # partition2 - another partition of the same numeric vector # # RETURNS: numDiffs - number of elements in partition2 that are placed in a different subgroup (as # determined by subgroup index) than in partition1 # identify the number of elements in the base set which the partitions divide numElements = length(unlist(partition1)) # identify the set which the partitions divide baseSet = 1:numElements computeSubgroupIxVec = function(inputPartition){ # ARGS: inputPartition - partition # # RETURNS: subgroupIxVec - numeric vector whose ith element is the index of the subgroup to which # the ith element of the base set belongs to in the input partition # identify number of subgroups in input partition numSubgroups = length(inputPartition) # intializeVector subgroupIxVec = baseSet for(ii in 1:numSubgroups){ # identify elements of baseSet in subgroup ii subgroupElements = inputPartition[[ii]] # subgroupIxVec[subgroupElements] = ii } # end for return(subgroupIxVec) } # end computesubgroupIxVec # compute subgroup ix vector for each partition subgroupIxVec1 = computeSubgroupIxVec(partition1) subgroupIxVec2 = computeSubgroupIxVec(partition2) # count the number of elements that are in different subgroups in the two partitions numDiffs = sum(subgroupIxVec1 != subgroupIxVec2) return(numDiffs) } # end subgroupDifferences function
Require Import UniMath.Foundations.All. Require Import UniMath.MoreFoundations.All. Require Import UniMath.CategoryTheory.Core.Categories. Require Import UniMath.CategoryTheory.Core.Functors. Require Import UniMath.CategoryTheory.Core.NaturalTransformations. Require Import UniMath.CategoryTheory.Core.Isos. Require Import UniMath.CategoryTheory.PrecategoryBinProduct. Require Import UniMath.Bicategories.WkCatEnrichment.prebicategory. Require Import UniMath.Bicategories.Core.Bicat. Import Bicat.Notations. Require Import UniMath.Bicategories.Core.BicategoryLaws. Local Open Scope cat. Definition hcomp_bicat_data : UU := ∑ (ob : UU) (mor : ob → ob → UU) (cell : ∏ (x y : ob), mor x y → mor x y → UU) (id1 : ∏ (x : ob), mor x x) (comp1 : ∏ (x y z : ob), mor x y → mor y z → mor x z), (∏ (x y : ob) (f : mor x y), cell _ _ f f) × (∏ (x y : ob) (f g h : mor x y), cell _ _ f g → cell _ _ g h → cell _ _ f h) × (∏ (x y : ob) (f : mor x y), cell _ _ (comp1 _ _ _ f (id1 y)) f) × (∏ (x y : ob) (f : mor x y), cell _ _ f (comp1 _ _ _ f (id1 y))) × (∏ (x y : ob) (f : mor x y), cell _ _ (comp1 _ _ _ (id1 x) f) f) × (∏ (x y : ob) (f : mor x y), cell _ _ f (comp1 _ _ _ (id1 x) f)) × (∏ (w x y z : ob) (f : mor w x) (g : mor x y) (h : mor y z), cell _ _ (comp1 _ _ _ (comp1 _ _ _ f g) h) (comp1 _ _ _ f (comp1 _ _ _ g h))) × (∏ (w x y z : ob) (f : mor w x) (g : mor x y) (h : mor y z), cell _ _ (comp1 _ _ _ f (comp1 _ _ _ g h)) (comp1 _ _ _ (comp1 _ _ _ f g) h)) × (∏ (x y z : ob) (f₁ f₂ : mor x y) (g₁ g₂ : mor y z), cell _ _ f₁ f₂ → cell _ _ g₁ g₂ → cell _ _ (comp1 _ _ _ f₁ g₁) (comp1 _ _ _ f₂ g₂)). Coercion hcomp_bicat_ob (B : hcomp_bicat_data) : UU := pr1 B. Definition hb_mor {B : hcomp_bicat_data} (b₁ b₂ : B) : UU := pr12 B b₁ b₂. Definition hb_cell {B : hcomp_bicat_data} {b₁ b₂ : B} (f g : hb_mor b₁ b₂) : UU := pr122 B _ _ f g. Definition hb_id1 {B : hcomp_bicat_data} (b : B) : hb_mor b b := pr1 (pr222 B) b . Definition hb_comp1 {B : hcomp_bicat_data} {b₁ b₂ b₃ : B} (f : hb_mor b₁ b₂) (g : hb_mor b₂ b₃) : hb_mor b₁ b₃ := pr12 (pr222 B) _ _ _ f g. Definition hb_id2 {B : hcomp_bicat_data} {b₁ b₂ : B} (f : hb_mor b₁ b₂) : hb_cell f f := pr122 (pr222 B) _ _ f. Definition hb_vcomp {B : hcomp_bicat_data} {b₁ b₂ : B} {f g h : hb_mor b₁ b₂} (α : hb_cell f g) (β : hb_cell g h) : hb_cell f h := pr1 (pr222 (pr222 B)) _ _ _ _ _ α β. Definition hb_runit {B : hcomp_bicat_data} {b₁ b₂ : B} (f : hb_mor b₁ b₂) : hb_cell (hb_comp1 f (hb_id1 b₂)) f := pr12 (pr222 (pr222 B)) _ _ f. Definition hb_rinvunit {B : hcomp_bicat_data} {b₁ b₂ : B} (f : hb_mor b₁ b₂) : hb_cell f (hb_comp1 f (hb_id1 b₂)) := pr122 (pr222 (pr222 B)) _ _ f. Definition hb_lunit {B : hcomp_bicat_data} {b₁ b₂ : B} (f : hb_mor b₁ b₂) : hb_cell (hb_comp1 (hb_id1 b₁) f) f := pr1 (pr222 (pr222 (pr222 B))) _ _ f. Definition hb_linvunit {B : hcomp_bicat_data} {b₁ b₂ : B} (f : hb_mor b₁ b₂) : hb_cell f (hb_comp1 (hb_id1 b₁) f) := pr12 (pr222 (pr222 (pr222 B))) _ _ f. Definition hb_lassoc {B : hcomp_bicat_data} {b₁ b₂ b₃ b₄ : B} (f : hb_mor b₁ b₂) (g : hb_mor b₂ b₃) (h : hb_mor b₃ b₄) : hb_cell (hb_comp1 (hb_comp1 f g) h) (hb_comp1 f (hb_comp1 g h)) := pr122 (pr222 (pr222 (pr222 B))) _ _ _ _ f g h. Definition hb_rassoc {B : hcomp_bicat_data} {b₁ b₂ b₃ b₄ : B} (f : hb_mor b₁ b₂) (g : hb_mor b₂ b₃) (h : hb_mor b₃ b₄) : hb_cell (hb_comp1 f (hb_comp1 g h)) (hb_comp1 (hb_comp1 f g) h) := pr1 (pr222 (pr222 (pr222 (pr222 B)))) _ _ _ _ f g h. Definition hb_hcomp {B : hcomp_bicat_data} {b₁ b₂ b₃ : B} {f₁ f₂ : hb_mor b₁ b₂} {g₁ g₂ : hb_mor b₂ b₃} (α : hb_cell f₁ f₂) (β : hb_cell g₁ g₂) : hb_cell (hb_comp1 f₁ g₁) (hb_comp1 f₂ g₂) := pr2 (pr222 (pr222 (pr222 (pr222 B)))) _ _ _ _ _ _ _ α β. Definition hcomp_bicat_laws (B : hcomp_bicat_data) : UU := (∏ (b₁ b₂ : B) (f g : hb_mor b₁ b₂) (α : hb_cell f g), hb_vcomp (hb_id2 _) α = α) × (∏ (b₁ b₂ : B) (f g : hb_mor b₁ b₂) (α : hb_cell f g), hb_vcomp α (hb_id2 _) = α) × (∏ (b₁ b₂ : B) (f₁ f₂ f₃ f₄ : hb_mor b₁ b₂) (α : hb_cell f₁ f₂) (β : hb_cell f₂ f₃) (γ : hb_cell f₃ f₄), hb_vcomp α (hb_vcomp β γ) = hb_vcomp (hb_vcomp α β) γ) × (∏ (b₁ b₂ : B) (f₁ f₂ f₃ f₄ : hb_mor b₁ b₂) (α : hb_cell f₁ f₂) (β : hb_cell f₂ f₃) (γ : hb_cell f₃ f₄), hb_vcomp (hb_vcomp α β) γ = hb_vcomp α (hb_vcomp β γ)) × (∏ (b₁ b₂ : B) (f g : hb_mor b₁ b₂), isaset (hb_cell f g)) × (∏ (b₁ b₂ b₃ : B) (f : hb_mor b₁ b₂) (g : hb_mor b₂ b₃), hb_hcomp (hb_id2 f) (hb_id2 g) = hb_id2 (hb_comp1 f g)) × (∏ (b₁ b₂ b₃ : B) (f₁ g₁ h₁ : hb_mor b₁ b₂) (f₂ g₂ h₂ : hb_mor b₂ b₃) (α₁ : hb_cell f₁ g₁) (α₂ : hb_cell f₂ g₂) (β₁ : hb_cell g₁ h₁) (β₂ : hb_cell g₂ h₂), hb_hcomp (hb_vcomp α₁ β₁) (hb_vcomp α₂ β₂) = hb_vcomp (hb_hcomp α₁ α₂) (hb_hcomp β₁ β₂)) × (∏ (a b c d : B) (f₁ f₂ : hb_mor a b) (g₁ g₂ : hb_mor b c) (h₁ h₂ : hb_mor c d) (α₁ : hb_cell f₁ f₂) (α₂ : hb_cell g₁ g₂) (α₃ : hb_cell h₁ h₂), hb_vcomp (hb_hcomp α₁ (hb_hcomp α₂ α₃)) (hb_rassoc _ _ _) = hb_vcomp (hb_rassoc _ _ _) (hb_hcomp (hb_hcomp α₁ α₂) α₃)) × (∏ (a b : B) (f₁ f₂ : hb_mor a b) (α : hb_cell f₁ f₂), hb_vcomp (hb_hcomp (hb_id2 (hb_id1 a)) α) (hb_lunit f₂) = hb_vcomp (hb_lunit f₁) α) × (∏ (a b : B) (f₁ f₂ : hb_mor a b) (α : hb_cell f₁ f₂), hb_vcomp (hb_hcomp α (hb_id2 (hb_id1 b))) (hb_runit f₂) = hb_vcomp (hb_runit f₁) α) × (∏ (b₁ b₂ b₃ b₄ : B) (f : hb_mor b₁ b₂) (g : hb_mor b₂ b₃) (h : hb_mor b₃ b₄), hb_vcomp (hb_rassoc f g h) (hb_lassoc f g h) = hb_id2 _) × (∏ (b₁ b₂ b₃ b₄ : B) (f : hb_mor b₁ b₂) (g : hb_mor b₂ b₃) (h : hb_mor b₃ b₄), hb_vcomp (hb_lassoc f g h) (hb_rassoc f g h) = hb_id2 _) × (∏ (b₁ b₂ : B) (f : hb_mor b₁ b₂), hb_vcomp (hb_lunit f) (hb_linvunit f) = hb_id2 _) × (∏ (b₁ b₂ : B) (f : hb_mor b₁ b₂), hb_vcomp (hb_linvunit f) (hb_lunit f) = hb_id2 _) × (∏ (b₁ b₂ : B) (f : hb_mor b₁ b₂), hb_vcomp (hb_runit f) (hb_rinvunit f) = hb_id2 _) × (∏ (b₁ b₂ : B) (f : hb_mor b₁ b₂), hb_vcomp (hb_rinvunit f) (hb_runit f) = hb_id2 _) × (∏ (a b c d e : B) (k : hb_mor a b) (h : hb_mor b c) (g : hb_mor c d) (f : hb_mor d e), hb_vcomp (hb_rassoc k h (hb_comp1 g f)) (hb_rassoc (hb_comp1 k h) g f) = hb_vcomp (hb_vcomp (hb_hcomp (hb_id2 k) (hb_rassoc h g f)) (hb_rassoc k (hb_comp1 h g) f)) (hb_hcomp (hb_rassoc k h g) (hb_id2 f))) × (∏ (a b c : B) (f : hb_mor a b) (g : hb_mor b c), hb_hcomp (hb_id2 f) (hb_lunit g) = hb_vcomp (hb_rassoc f (hb_id1 b) g) (hb_hcomp (hb_runit f) (hb_id2 g))). Lemma isaprop_hcomp_prebicat_laws (B : hcomp_bicat_data) (H : ∏ (a b : B) (f g : hb_mor a b), isaset (hb_cell f g)) : isaprop (hcomp_bicat_laws B). Proof. repeat (apply isapropdirprod) ; try (repeat (apply impred ; intro) ; apply H). do 4 (apply impred ; intro). apply isapropisaset. Qed. Definition hcomp_bicat : UU := ∑ (B : hcomp_bicat_data), hcomp_bicat_laws B. Coercion hcomp_bicat_to_data (B : hcomp_bicat) : hcomp_bicat_data := pr1 B. Definition hcomp_bicat_hom_cat (B : hcomp_bicat) (b₁ b₂ : B) : category. Proof. use make_category. - use make_precategory. + use make_precategory_data. * use make_precategory_ob_mor. ** exact (hb_mor b₁ b₂). ** exact (λ f g, hb_cell f g). * exact (λ f, hb_id2 f). * exact (λ _ _ _ f g, hb_vcomp f g). + repeat split ; simpl ; cbn. * exact (pr12 B b₁ b₂). * exact (pr122 B b₁ b₂). * exact (pr1 (pr222 B) b₁ b₂). * exact (pr12 (pr222 B) b₁ b₂). - exact (pr122 (pr222 B) b₁ b₂). Defined. Definition hcomp_bicat_hcomp (B : hcomp_bicat) (b₁ b₂ b₃ : pr11 B) : precategory_binproduct_data (hcomp_bicat_hom_cat B b₁ b₂) (hcomp_bicat_hom_cat B b₂ b₃) ⟶ hcomp_bicat_hom_cat B b₁ b₃. Proof. use make_functor. - use make_functor_data. + exact (λ fg, hb_comp1 (pr1 fg) (pr2 fg)). + exact (λ fg fg' α, hb_hcomp (pr1 α) (pr2 α)). - split. + intros f ; cbn in *. exact (pr1 (pr222 (pr222 B)) b₁ b₂ b₃ (pr1 f) (pr2 f)). + intros f g h α β ; cbn in *. exact (pr12 (pr222 (pr222 B)) b₁ b₂ b₃ _ _ _ _ _ _ (pr1 α) (pr2 α) (pr1 β) (pr2 β)). Defined. Definition hcomp_bicat_to_prebicategory_ob_hom (B : hcomp_bicat) : prebicategory_ob_hom. Proof. simple refine (_ ,, _). - exact B. - exact (hcomp_bicat_hom_cat B). Defined. Definition hcomp_bicat_to_prebicategory_id_comp (B : hcomp_bicat) : prebicategory_id_comp. Proof. simple refine (_ ,, _ ,, _). - exact (hcomp_bicat_to_prebicategory_ob_hom B). - exact (λ a, hb_id1 a). - exact (hcomp_bicat_hcomp B). Defined. Definition hcomp_bicat_associator (B : hcomp_bicat) (a b c d : hcomp_bicat_to_prebicategory_id_comp B) : associator_trans_type a b c d. Proof. use make_nat_trans. - exact (λ f, hb_rassoc (pr1 f) (pr12 f) (pr22 f)). - intros f₁ f₂ α. apply (pr122 (pr222 (pr222 B))). Defined. Definition hcomp_bicat_lunitor (B : hcomp_bicat) (a b : hcomp_bicat_to_prebicategory_id_comp B) : left_unitor_trans_type a b. Proof. use make_nat_trans. - exact (λ f, hb_lunit f). - intros f₁ f₂ α. apply (pr1 (pr222 (pr222 (pr222 B)))). Defined. Definition hcomp_bicat_runitor (B : hcomp_bicat) (a b : hcomp_bicat_to_prebicategory_id_comp B) : right_unitor_trans_type a b. Proof. use make_nat_trans. - exact (λ f, hb_runit f). - intros f₁ f₂ α. apply (pr2 (pr222 (pr222 (pr222 B)))). Defined. Definition hcomp_bicat_to_prebicategory_data (B : hcomp_bicat) : prebicategory_data. Proof. simple refine (_ ,, _ ,, _ ,, _). - exact (hcomp_bicat_to_prebicategory_id_comp B). - exact (hcomp_bicat_associator B). - exact (hcomp_bicat_lunitor B). - exact (hcomp_bicat_runitor B). Defined. Definition hcomp_bicat_is_prebicategory (B : hcomp_bicat) : is_prebicategory (hcomp_bicat_to_prebicategory_data B). Proof. repeat split. - intros b₁ b₂ b₃ b₄ f g h ; cbn in *. use make_is_z_isomorphism. + exact (hb_lassoc f g h). + split ; apply (pr2 B). - intros b₁ b₂ f ; cbn in *. use make_is_z_isomorphism. + exact (hb_linvunit f). + split ; apply (pr2 B). - intros b₁ b₂ f ; cbn in *. use make_is_z_isomorphism. + exact (hb_rinvunit f). + split ; apply (pr2 B). - apply (pr2 B). - apply (pr2 B). Defined. Definition hcomp_bicat_to_prebicategory (B : hcomp_bicat) : prebicategory. Proof. simple refine (_ ,, _). - exact (hcomp_bicat_to_prebicategory_data B). - exact (hcomp_bicat_is_prebicategory B). Defined. Definition prebicategory_to_hcomp_bicat_data (B : prebicategory) : hcomp_bicat_data. Proof. simple refine (_ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _). - exact (pr1 (pr111 B)). - intros b₁ b₂. exact (pr2 (pr111 B) b₁ b₂). - intros b₁ b₂ f g. exact (f --> g). - exact (pr1 (pr211 B)). - intros b₁ b₂ b₃ f g. exact (pr2 (pr211 B) b₁ b₂ b₃ (f ,, g)). - intros ; simpl. apply identity. - exact (λ _ _ _ _ _ α β, α · β). - cbn in *. intros b₁ b₂ f. exact (pr1 (pr2 (pr221 B) b₁ b₂) f). - intros a b f. apply (pr2 (pr212 B) _ _ f). - cbn in *. intros b₁ b₂ f. exact (pr1 (pr1 (pr221 B) b₁ b₂) f). - intros a b f. apply (pr1 (pr212 B) _ _ f). - intros b₁ b₂ b₃ b₄ f g h ; cbn in *. apply ((pr112 B) _ _ _ _ f g h). - intros b₁ b₂ b₃ b₄ f g h ; cbn in *. exact (pr1 ((pr121 B) _ _ _ _) (f ,, g ,, h)). - intros b₁ b₂ b₃ f₁ f₂ g₁ g₂ α β ; simpl in *. apply (#(pr2 (pr211 B) b₁ b₂ b₃)). exact (α ,, β). Defined. Definition prebicategory_to_hcomp_bicat (B : prebicategory) : hcomp_bicat. Proof. simple refine (_ ,, _). - exact (prebicategory_to_hcomp_bicat_data B). - repeat split ; cbn ; intros. + apply id_left. + apply id_right. + apply assoc. + apply assoc'. + apply homset_property. + apply (functor_id ((pr221 (pr1 B)) b₁ b₂ b₃)). + apply (@functor_comp _ _ ((pr221 (pr1 B)) b₁ b₂ b₃) (_ ,, _) (_ ,, _) (_ ,, _) (_ ,, _) (_ ,, _)). + apply (@nat_trans_ax _ _ _ _ ((pr121 B) a b c d) (_ ,, (_ ,, _)) (_ ,, (_ ,, _)) (_ ,, (_ ,, _))). + apply (nat_trans_ax ((pr122 (pr1 B)) a b)). + apply (nat_trans_ax ((pr222 (pr1 B)) a b)). + apply (z_iso_inv_after_z_iso (make_z_iso _ _ ((pr112 B) b₁ b₂ b₃ b₄ f g h))). + apply (z_iso_after_z_iso_inv (make_z_iso _ _ ((pr112 B) b₁ b₂ b₃ b₄ f g h))). + apply (z_iso_inv_after_z_iso (make_z_iso _ _ ((pr121 (pr2 B)) b₁ b₂ f))). + apply (z_iso_after_z_iso_inv (make_z_iso _ _ ((pr121 (pr2 B)) b₁ b₂ f))). + apply (z_iso_inv_after_z_iso (make_z_iso _ _ ((pr221 (pr2 B)) b₁ b₂ f))). + apply (z_iso_after_z_iso_inv (make_z_iso _ _ ((pr221 (pr2 B)) b₁ b₂ f))). + apply B. + apply B. Defined. Definition hcomp_bicat_weq_prebicategory : hcomp_bicat ≃ prebicategory. Proof. use make_weq. - exact hcomp_bicat_to_prebicategory. - use isweq_iso. + exact prebicategory_to_hcomp_bicat. + intros b. apply idpath. + intros b. apply idpath. Defined. Definition hcomp_bicat_to_precategory_ob_mor (B : hcomp_bicat) : precategory_ob_mor. Proof. simple refine (_ ,, _). - exact B. - exact (λ b₁ b₂, hb_mor b₁ b₂). Defined. Definition hcomp_bicat_to_precategory_id_comp (B : hcomp_bicat) : precategory_id_comp (hcomp_bicat_to_precategory_ob_mor B). Proof. simple refine (_ ,, _). - exact (λ x, hb_id1 _). - exact (λ _ _ _ f g, hb_comp1 f g). Defined. Definition hcomp_bicat_to_precategory_data (B : hcomp_bicat) : precategory_data. Proof. simple refine (_ ,, _). - exact (hcomp_bicat_to_precategory_ob_mor B). - exact (hcomp_bicat_to_precategory_id_comp B). Defined. Definition hcomp_bicat_to_prebicat_1_id_comp_cells (B : hcomp_bicat) : prebicat_1_id_comp_cells. Proof. simple refine (_ ,, _). - exact (hcomp_bicat_to_precategory_data B). - exact (λ x y f g, hb_cell f g). Defined. Definition hcomp_bicat_to_prebicat_2_id_comp_struct (B : hcomp_bicat) : prebicat_2_id_comp_struct (hcomp_bicat_to_prebicat_1_id_comp_cells B). Proof. repeat split ; cbn. - intros. apply hb_id2. - intros. apply hb_lunit. - intros. apply hb_runit. - intros. apply hb_linvunit. - intros. apply hb_rinvunit. - intros. apply hb_lassoc. - intros. apply hb_rassoc. - intros ? ? ? ? ? α β. exact (hb_vcomp α β). - intros ? ? ? f ? ? α. exact (hb_hcomp (hb_id2 _) α). - intros ? ? ? f ? ? α. exact (hb_hcomp α (hb_id2 _)). Defined. Definition hcomp_bicat_to_prebicat_data (B : hcomp_bicat) : prebicat_data. Proof. simple refine (_ ,, _). - exact (hcomp_bicat_to_prebicat_1_id_comp_cells B). - exact (hcomp_bicat_to_prebicat_2_id_comp_struct B). Defined. Definition hcomp_bicat_to_prebicat_laws (B : hcomp_bicat) : prebicat_laws (hcomp_bicat_to_prebicat_data B). Proof. repeat split ; try (intros ; apply (pr2 B)). - intros ; cbn. etrans. { refine (!_). apply (pr12 (pr222 (pr222 B))). } apply maponpaths_2. apply B. - intros ; cbn. etrans. { refine (!_). apply (pr12 (pr222 (pr222 B))). } apply maponpaths. apply B. - intros ; cbn. etrans. { apply (pr122 (pr222 ((pr222 B)))). } apply maponpaths. apply maponpaths_2. apply B. - intros a b c d f₁ f₂ g h α ; cbn. pose (pr122 (pr222 ((pr222 B))) a b c d f₁ f₂ g g h h α (hb_id2 _) (hb_id2 _)) as p. cbn in p. etrans. { exact (!p). } apply maponpaths_2. apply maponpaths. apply B. - intros a b c f₁ f₂ g h α β ; cbn. etrans. { refine (!_). apply (pr12 (pr222 (pr222 B))). } refine (!_). etrans. { refine (!_). apply (pr12 (pr222 (pr222 B))). } etrans. { apply maponpaths. apply B. } etrans. { apply maponpaths_2. apply B. } refine (!_). etrans. { apply maponpaths. apply B. } apply maponpaths_2. apply B. - intros ; cbn. refine (!_). apply B. - intros ; cbn. refine (!_). apply B. Qed. Definition hcomp_bicat_to_prebicat (B : hcomp_bicat) : prebicat. Proof. simple refine (_ ,, _). - exact (hcomp_bicat_to_prebicat_data B). - exact (hcomp_bicat_to_prebicat_laws B). Defined. Definition hcomp_bicat_to_bicat (B : hcomp_bicat) : bicat. Proof. simple refine (_ ,, _). - exact (hcomp_bicat_to_prebicat B). - simpl. intro ; intros. apply (pr122 (pr222 B)). Defined. Definition bicat_to_hcomp_bicat_data (B : bicat) : hcomp_bicat_data. Proof. simple refine (_ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _). - exact B. - exact (λ x y, x --> y). - exact (λ _ _ f g, f ==> g). - exact (λ x, id₁ _). - exact (λ _ _ _ f g, f · g). - exact (λ _ _ f, id2 f). - exact (λ _ _ _ _ _ α β, α • β). - exact (λ _ _ f, runitor f). - exact (λ _ _ f, rinvunitor f). - exact (λ _ _ f, lunitor f). - exact (λ _ _ f, linvunitor f). - exact (λ _ _ _ _ f g h, rassociator f g h). - exact (λ _ _ _ _ f g h, lassociator f g h). - exact (λ _ _ _ _ _ _ _ α β, β ⋆⋆ α). Defined. Definition bicat_to_hcomp_bicat_laws (B : bicat) : hcomp_bicat_laws (bicat_to_hcomp_bicat_data B). Proof. repeat split ; cbn ; intros. - apply id2_left. - apply id2_right. - apply vassocr. - apply vassocl. - apply cellset_property. - apply hcomp_identity. - apply interchange. - apply hcomp_lassoc. - apply lunitor_natural. - apply runitor_natural. - apply lassociator_rassociator. - apply rassociator_lassociator. - apply lunitor_linvunitor. - apply linvunitor_lunitor. - apply runitor_rinvunitor. - apply rinvunitor_runitor. - rewrite <- lwhisker_hcomp, <- rwhisker_hcomp. refine (!_). apply lassociator_lassociator. - rewrite <- lwhisker_hcomp, <- rwhisker_hcomp. rewrite <- lunitor_lwhisker. rewrite !vassocr. rewrite lassociator_rassociator. rewrite id2_left. apply idpath. Qed. Definition bicat_to_hcomp_bicat (B : bicat) : hcomp_bicat. Proof. simple refine (_ ,, _). - exact (bicat_to_hcomp_bicat_data B). - exact (bicat_to_hcomp_bicat_laws B). Defined. Definition hcomp_bicat_to_bicat_to_hcomp_bicat (B : hcomp_bicat) : bicat_to_hcomp_bicat (hcomp_bicat_to_bicat B) = B. Proof. use total2_paths_f. - do 13 (use total2_paths_f ; [ apply idpath | ] ; cbn). use funextsec ; intro x. use funextsec ; intro y. use funextsec ; intro z. use funextsec ; intro f₁. use funextsec ; intro f₂. use funextsec ; intro g₁. use funextsec ; intro g₂. use funextsec ; intro α. use funextsec ; intro β. cbn. etrans. { refine (!_). apply (pr12 (pr222 (pr222 B))). } etrans. { apply maponpaths. apply (pr12 B). } etrans. { apply maponpaths_2. apply (pr122 B). } apply idpath. - apply isaprop_hcomp_prebicat_laws. apply B. Qed. Definition bicat_to_hcomp_bicat_to_bicat (B : bicat) : hcomp_bicat_to_bicat (bicat_to_hcomp_bicat B) = B. Proof. use subtypePath. { intro. do 4 (use impred ; intro). apply isapropisaset. } use total2_paths_f. - use total2_paths_f. + apply idpath. + repeat (use pathsdirprod) ; cbn. * apply idpath. * apply idpath. * apply idpath. * apply idpath. * apply idpath. * apply idpath. * apply idpath. * apply idpath. * repeat (use funextsec ; intro). rewrite <- lwhisker_hcomp. apply idpath. * repeat (use funextsec ; intro). rewrite <- rwhisker_hcomp. apply idpath. - apply isaprop_prebicat_laws. intros. apply cellset_property. Qed. Definition hcomp_bicat_weq_bicat : hcomp_bicat ≃ bicat. Proof. use make_weq. - exact hcomp_bicat_to_bicat. - use isweq_iso. + exact bicat_to_hcomp_bicat. + exact hcomp_bicat_to_bicat_to_hcomp_bicat. + exact bicat_to_hcomp_bicat_to_bicat. Defined. Definition weq_bicat_prebicategory : bicat ≃ prebicategory. Proof. eapply weqcomp. - apply (invweq hcomp_bicat_weq_bicat). - apply hcomp_bicat_weq_prebicategory. Defined.
```python import sympy ``` $$u(t) = \mathcal{1}(t)$$ $$g(t) = e^{-t}\mathcal{1}(t)$$ $$y(t) = u(t) \bigotimes g(t) = \mathcal{L}^{-1}(U(s) G(s)) = \mathcal{L}^{-1}\left(\frac{1}{s} \cdot \frac{1}{s+1}\right) = \mathcal{L}^{-1}\left(\frac{1}{s(s+1)}\right)= \mathcal{L}^{-1}\left(\frac{1}{s} - \frac{1}{s+1}\right) = \mathcal{1}(t) - e^{-t}\mathcal{1}(t)$$ ```python t = sympy.symbols('t', real=True) u = sympy.Heaviside(t) # unit step g = sympy.exp(-t)*sympy.Heaviside(t) y = sympy.Heaviside(t) - sympy.exp(-t)*sympy.Heaviside(t) g ``` $\displaystyle e^{- t} \theta\left(t\right)$ ```python sympy.plot(y) ``` ```python sympy.plot(g) ``` ```python y_approx = g + g.subs(t, t - 1) + g.subs(t, t - 2) + g.subs(t, t - 3) + g.subs(t, t - 4) y_approx ``` $\displaystyle e^{1 - t} \theta\left(t - 1\right) + e^{2 - t} \theta\left(t - 2\right) + e^{3 - t} \theta\left(t - 3\right) + e^{4 - t} \theta\left(t - 4\right) + e^{- t} \theta\left(t\right)$ ```python sympy.plot(y_approx, y, (t, 0, 4)) ``` ```python y_approx2 = 0.5*g + 0.5*g.subs(t, t - 0.5) + 0.5*g.subs(t, t - 1) + 0.5*g.subs(t, t - 1.5) + 0.5*g.subs(t, t - 2) \ + 0.5*g.subs(t, t - 2.5) + 0.5*g.subs(t, t - 3) + 0.5*g.subs(t, t - 3.5) + 0.5*g.subs(t, t - 4) y_approx2 ``` $\displaystyle 0.5 e^{1 - t} \theta\left(t - 1\right) + 0.5 e^{2 - t} \theta\left(t - 2\right) + 0.5 e^{3 - t} \theta\left(t - 3\right) + 0.5 e^{4 - t} \theta\left(t - 4\right) + 0.5 e^{- t} \theta\left(t\right) + 16.5577259793462 e^{- t} \theta\left(t - 3.5\right) + 6.09124698035174 e^{- t} \theta\left(t - 2.5\right) + 2.24084453516903 e^{- t} \theta\left(t - 1.5\right) + 0.824360635350064 e^{- t} \theta\left(t - 0.5\right)$ ```python y_approx3 = 0 tf = 4 dt = 0.1 for i in range(int(tf/dt)): y_approx3 += dt*g.subs(t, t-i*dt) ``` ```python y4 = sympy.Heaviside(t) - sympy.exp(-t) + t*sympy.exp(t) sympy.plot(y4, (t, -3, 3)) ``` ```python y4 = (1 - sympy.exp(-t) + t*sympy.exp(t))*sympy.Heaviside(t) sympy.plot(y4, (t, -3, 3)) ``` ```python sympy.plot(y, y_approx3, (t, 0, 4)) ``` ```python sympy.plot(y_approx, y, y_approx2, (t, 0, 4)) ``` ```python ```
lemma is_pole_divide: fixes f :: "'a :: t2_space \<Rightarrow> 'b :: real_normed_field" assumes "isCont f z" "filterlim g (at 0) (at z)" "f z \<noteq> 0" shows "is_pole (\<lambda>z. f z / g z) z"
theory Prelude_Nat imports "$HETS_LIB/Isabelle/MainHC" uses "$HETS_LIB/Isabelle/prelude" begin ML "Header.initialize [\"SubDomNat\", \"DivideDomNat\", \"DivDomNat\", \"ModDomNat\", \"Distr1Nat\", \"Distr2Nat\", \"NotFalse\", \"NotTrue\", \"AndDef1\", \"AndDef2\", \"AndDef3\", \"AndDef4\", \"OrDef\", \"OtherwiseDef\", \"NotTrue1\", \"NotFalse2\", \"TB1\", \"EqualTDef\", \"SymT\", \"SymF\", \"ReflexT\", \"TransT\", \"DiffTDef\", \"DiffFDef\", \"DiffDef2\", \"TE1\", \"TE2\", \"TE3\", \"TE4\", \"IBE1\", \"IBE2\", \"IBE3\", \"IBE4\", \"IBE5\", \"IBE6\", \"IBE7\", \"IBE8\", \"IUE1\", \"IUE2\", \"ALeIrreflexivity\", \"ALeAsymmetry\", \"ALeTransitive\", \"ALeTotal\", \"IOE01\", \"IOE02\", \"IOE03\", \"IOE04\", \"IOE05\", \"IOE06\", \"IOE07\", \"IOE08\", \"IOE09\", \"LeDef\", \"LeIrreflexivity\", \"LeTAsymmetry\", \"LeTTransitive\", \"LeTTotal\", \"LeqTTransitive\", \"LeqTTotal\", \"GeIrreflexivity\", \"GeTAsymmetry\", \"GeTTransitive\", \"GeTTotal\", \"GeqTTransitive\", \"GeqTTotal\", \"EqTSOrdRel\", \"EqFSOrdRel\", \"EqTOrdRel\", \"EqFOrdRel\", \"LeTGeTRel\", \"LeqTGetTRel\", \"GeTLeTRel\", \"GeqTLeqTRel\", \"LeTGeFEqFRel\", \"LeqTGeFRel\", \"GeTLeFEqFRel\", \"GeqTLeFRel\", \"LeqTLeTEqTRel\", \"GeqTGeTEqTRel\", \"LeFGeFRel\", \"LeqFGetFRel\", \"GeFLeFRel\", \"GeqFLeqFRel\", \"LeFGeTEqTRel\", \"LeqFGeTRel\", \"GeFLeTEqTRel\", \"GeqFLeTRel\", \"LeqFLeFEqFRel\", \"GeqFGeFEqFRel\", \"LeTGeqFRel\", \"GeTLeqFRel\", \"LeLtDef\", \"LeEqDef\", \"LeGtDef\", \"LqLtDef\", \"LqEqDef\", \"LqGtDef\", \"GeLtDef\", \"GeEqDef\", \"GeGtDef\", \"GqLtDef\", \"GqEqDef\", \"GqGtDef\", \"MaxYDef\", \"MaxXDef\", \"MinXDef\", \"MinYDef\", \"TO1\", \"TO2\", \"TO3\", \"TO4\", \"TO5\", \"TO6\", \"TO7\", \"IOO1\", \"IOO2\", \"IOO3\", \"IOO13\", \"IOO14\", \"IOO15\", \"IOO16\", \"IOO17\", \"IOO18\", \"IOO19\", \"IOO20\", \"IOO21\", \"IOO22\", \"IOO23\", \"IOO24\", \"IOO25\", \"IOO26\", \"IOO27\", \"IOO28\", \"IOO29\", \"IOO30\", \"IOO31\", \"IOO32\", \"IOO33\", \"IBO1\", \"IBO4\", \"IBO5\", \"IBO6\", \"IBO7\", \"IBO8\", \"IBO9\", \"IBO10\", \"IBO11\", \"IBO12\", \"IUO01\", \"IUO02\", \"IUO03\", \"IUO04\", \"IUO05\", \"IUO06\", \"IUO07\", \"ga_select_pre\", \"X1DefNat\", \"X2DefNat\", \"X3DefNat\", \"X4DefNat\", \"X5DefNat\", \"X6DefNat\", \"X7DefNat\", \"X8DefNat\", \"X9DefNat\", \"DecimalDef\", \"LeqDef1Nat\", \"LeqDef2Nat\", \"LeqDef3Nat\", \"GeqDefNat\", \"LessDefNat\", \"GreaterDefNat\", \"Even0Nat\", \"EvenSucNat\", \"OddDefNat\", \"Factorial0\", \"FactorialSuc\", \"Add0Nat\", \"AddSucNat\", \"Mult0Nat\", \"MultSucNat\", \"Power0Nat\", \"PowerSucNat\", \"SubTotalDef1Nat\", \"SubTotalDef2Nat\", \"SubDefNat\", \"Divide0Nat1\", \"Divide0Nat\", \"DividePosNat\", \"DivNat\", \"ModNat\"]" typedecl Unit datatype Bool = X_False ("False''") | X_True ("True''") datatype Ordering = EQ | GT | LT datatype Nat = X0 ("0''") | X_suc "Nat" ("suc/'(_')" [3] 999) consts Not__X :: "Bool => Bool" ("(Not''/ _)" [56] 56) X1 :: "Nat" ("1''") X2 :: "Nat" ("2") X3 :: "Nat" ("3") X4 :: "Nat" ("4") X5 :: "Nat" ("5") X6 :: "Nat" ("6") X7 :: "Nat" ("7") X8 :: "Nat" ("8") X9 :: "Nat" ("9") X__XAmpXAmp__X :: "Bool => Bool => Bool" ("(_/ &&/ _)" [54,54] 52) X__XAtXAt__X :: "Nat => Nat => Nat" ("(_/ @@/ _)" [54,54] 52) X__XCaret__X :: "Nat => Nat => Nat" ("(_/ ^''/ _)" [54,54] 52) X__XEqXEq__X :: "'a => 'a => Bool" ("(_/ ==''/ _)" [54,54] 52) X__XExclam :: "Nat => Nat" ("(_/ !'')" [58] 58) X__XGtXEq__X :: "'a => 'a => Bool" ("(_/ >=''/ _)" [54,54] 52) X__XGt__X :: "'a => 'a => Bool" ("(_/ >''/ _)" [54,54] 52) X__XLtXEq__X :: "'a => 'a => Bool" ("(_/ <=''/ _)" [54,54] 52) X__XLt__X :: "'a => 'a => Bool" ("(_/ <''/ _)" [54,54] 52) X__XMinusXExclam__X :: "Nat => Nat => Nat" ("(_/ -!/ _)" [54,54] 52) X__XMinusXQuest__X :: "Nat => Nat => Nat option" ("(_/ -?/ _)" [54,54] 52) X__XPlus__X :: "Nat => Nat => Nat" ("(_/ +''/ _)" [54,54] 52) X__XSlashXEq__X :: "'a => 'a => Bool" ("(_/ '/=/ _)" [54,54] 52) X__XSlashXQuest__X :: "Nat => Nat => Nat option" ("(_/ '/?/ _)" [54,54] 52) X__XVBarXVBar__X :: "Bool => Bool => Bool" ("(_/ ||/ _)" [54,54] 52) X__Xx__X :: "Nat => Nat => Nat" ("(_/ *''/ _)" [54,54] 52) X__div__X :: "Nat => Nat => Nat option" ("(_/ div''/ _)" [54,54] 52) X__le__X :: "'a => 'a => bool" ("(_/ le/ _)" [44,44] 42) X__mod__X :: "Nat => Nat => Nat option" ("(_/ mod''/ _)" [54,54] 52) X_even :: "Nat => Bool" ("even''/'(_')" [3] 999) X_maxX1 :: "Nat => Nat => Nat" ("max''/'(_,/ _')" [3,3] 999) X_maxX2 :: "'a => 'a => 'a" X_minX1 :: "Nat => Nat => Nat" ("min''/'(_,/ _')" [3,3] 999) X_minX2 :: "'a => 'a => 'a" X_odd :: "Nat => Bool" ("odd''/'(_')" [3] 999) X_pre :: "Nat => Nat option" ("pre/'(_')" [3] 999) compare :: "'a => 'a => Ordering" otherwiseH :: "Bool" instance Bool:: type .. instance Nat:: type .. instance Ordering:: type .. instance Unit:: type .. axioms NotFalse [rule_format] : "Not' False' = True'" NotTrue [rule_format] : "Not' True' = False'" AndDef1 [rule_format] : "False' && False' = False'" AndDef2 [rule_format] : "False' && True' = False'" AndDef3 [rule_format] : "True' && False' = False'" AndDef4 [rule_format] : "True' && True' = True'" OrDef [rule_format] : "ALL x. ALL y. x || y = Not' (Not' x && Not' y)" OtherwiseDef [rule_format] : "otherwiseH = True'" NotTrue1 [rule_format] : "ALL x. Not' x = True' = (x = False')" NotFalse2 [rule_format] : "ALL x. Not' x = False' = (x = True')" TB1 [rule_format] : "~ True' = False'" EqualTDef [rule_format] : "ALL x. ALL y. x = y --> x ==' y = True'" SymT [rule_format] : "ALL x. ALL y. x ==' y = True' = (y ==' x = True')" SymF [rule_format] : "ALL x. ALL y. x ==' y = False' = (y ==' x = False')" ReflexT [rule_format] : "ALL x. x ==' x = True'" TransT [rule_format] : "ALL x. ALL y. ALL z. x ==' y = True' & y ==' z = True' --> x ==' z = True'" DiffTDef [rule_format] : "ALL x. ALL y. x /= y = True' = (Not' (x ==' y) = True')" DiffFDef [rule_format] : "ALL x. ALL y. x /= y = False' = (x ==' y = True')" DiffDef2 [rule_format] : "ALL x. ALL y. x /= y = Not' (x ==' y)" TE1 [rule_format] : "ALL x. ALL y. x ==' y = False' --> ~ x = y" TE2 [rule_format] : "ALL x. ALL y. Not' (x ==' y) = True' = (x ==' y = False')" TE3 [rule_format] : "ALL x. ALL y. Not' (x ==' y) = False' = (x ==' y = True')" TE4 [rule_format] : "ALL x. ALL y. (~ x ==' y = True') = (x ==' y = False')" IBE1 [rule_format] : "True' ==' True' = True'" IBE2 [rule_format] : "False' ==' False' = True'" IBE3 [rule_format] : "True' ==' False' = False'" IBE4 [rule_format] : "False' ==' True' = False'" IBE5 [rule_format] : "True' /= False' = True'" IBE6 [rule_format] : "False' /= True' = True'" IBE7 [rule_format] : "Not' (True' ==' False') = True'" IBE8 [rule_format] : "Not' Not' (True' ==' False') = False'" IUE1 [rule_format] : "() ==' () = True'" IUE2 [rule_format] : "() /= () = False'" ALeIrreflexivity [rule_format] : "ALL x. ~ x le x" ALeAsymmetry [rule_format] : "ALL x. ALL y. x le y --> ~ y le x" ALeTransitive [rule_format] : "ALL x. ALL y. ALL z. x le y & y le z --> x le z" ALeTotal [rule_format] : "ALL x. ALL y. (x le y | y le x) | x = y" IOE01 [rule_format] : "LT ==' LT = True'" IOE02 [rule_format] : "EQ ==' EQ = True'" IOE03 [rule_format] : "GT ==' GT = True'" IOE04 [rule_format] : "LT ==' EQ = False'" IOE05 [rule_format] : "LT ==' GT = False'" IOE06 [rule_format] : "EQ ==' GT = False'" IOE07 [rule_format] : "LT /= EQ = True'" IOE08 [rule_format] : "LT /= GT = True'" IOE09 [rule_format] : "EQ /= GT = True'" LeDef [rule_format] : "ALL x. ALL y. x <' y = True' = (x le y)" LeIrreflexivity [rule_format] : "ALL x. x <' x = False'" LeTAsymmetry [rule_format] : "ALL x. ALL y. x <' y = True' --> y <' x = False'" LeTTransitive [rule_format] : "ALL x. ALL y. ALL z. x <' y = True' & y <' z = True' --> x <' z = True'" LeTTotal [rule_format] : "ALL x. ALL y. (x <' y = True' | y <' x = True') | x ==' y = True'" LeqTTransitive [rule_format] : "ALL x. ALL y. ALL z. x <=' y = True' & y <=' z = True' --> x <=' z = True'" LeqTTotal [rule_format] : "ALL x. ALL y. x <=' y = True' & y <=' x = True' --> x ==' y = True'" GeIrreflexivity [rule_format] : "ALL x. x >' x = False'" GeTAsymmetry [rule_format] : "ALL x. ALL y. x >' y = True' --> y >' x = False'" GeTTransitive [rule_format] : "ALL x. ALL y. ALL z. x >' y = True' & y >' z = True' --> x >' z = True'" GeTTotal [rule_format] : "ALL x. ALL y. (x >' y = True' | y >' x = True') | x ==' y = True'" GeqTTransitive [rule_format] : "ALL x. ALL y. ALL z. x >=' y = True' & y >=' z = True' --> x >=' z = True'" GeqTTotal [rule_format] : "ALL x. ALL y. (x >=' y = True' & y >=' x = True') = (x ==' y = True')" EqTSOrdRel [rule_format] : "ALL x. ALL y. x ==' y = True' = (x <' y = False' & x >' y = False')" EqFSOrdRel [rule_format] : "ALL x. ALL y. x ==' y = False' = (x <' y = True' | x >' y = True')" EqTOrdRel [rule_format] : "ALL x. ALL y. x ==' y = True' = (x <=' y = True' & x >=' y = True')" EqFOrdRel [rule_format] : "ALL x. ALL y. x ==' y = False' = (x <=' y = True' | x >=' y = True')" LeTGeTRel [rule_format] : "ALL x. ALL y. x <' y = True' = (y >' x = True')" LeqTGetTRel [rule_format] : "ALL x. ALL y. x <=' y = True' = (y >=' x = True')" GeTLeTRel [rule_format] : "ALL x. ALL y. x >' y = True' = (y <' x = True')" GeqTLeqTRel [rule_format] : "ALL x. ALL y. x >=' y = True' = (y <=' x = True')" LeTGeFEqFRel [rule_format] : "ALL x. ALL y. x <' y = True' = (x >' y = False' & x ==' y = False')" LeqTGeFRel [rule_format] : "ALL x. ALL y. x <=' y = True' = (x >' y = False')" GeTLeFEqFRel [rule_format] : "ALL x. ALL y. x >' y = True' = (x <' y = False' & x ==' y = False')" GeqTLeFRel [rule_format] : "ALL x. ALL y. x >=' y = True' = (x <' y = False')" LeqTLeTEqTRel [rule_format] : "ALL x. ALL y. x <=' y = True' = (x <' y = True' | x ==' y = True')" GeqTGeTEqTRel [rule_format] : "ALL x. ALL y. x >=' y = True' = (x >' y = True' | x ==' y = True')" LeFGeFRel [rule_format] : "ALL x. ALL y. x <' y = False' = (y >' x = False')" LeqFGetFRel [rule_format] : "ALL x. ALL y. x <=' y = False' = (y >=' x = False')" GeFLeFRel [rule_format] : "ALL x. ALL y. x >' y = False' = (y <' x = False')" GeqFLeqFRel [rule_format] : "ALL x. ALL y. x >=' y = False' = (y <=' x = False')" LeFGeTEqTRel [rule_format] : "ALL x. ALL y. x <' y = False' = (x >' y = True' | x ==' y = True')" LeqFGeTRel [rule_format] : "ALL x. ALL y. x <=' y = False' = (x >' y = True')" GeFLeTEqTRel [rule_format] : "ALL x. ALL y. x >' y = False' = (x <' y = True' | x ==' y = True')" GeqFLeTRel [rule_format] : "ALL x. ALL y. x >=' y = False' = (x <' y = True')" LeqFLeFEqFRel [rule_format] : "ALL x. ALL y. x <=' y = False' = (x <' y = False' & x ==' y = False')" GeqFGeFEqFRel [rule_format] : "ALL x. ALL y. x >=' y = False' = (x >' y = False' & x ==' y = False')" LeTGeqFRel [rule_format] : "ALL x. ALL y. x <' y = True' = (x >=' y = False')" GeTLeqFRel [rule_format] : "ALL x. ALL y. x >' y = True' = (x <=' y = False')" LeLtDef [rule_format] : "ALL x. ALL y. compare x y = LT = (x <' y = True' & x ==' y = False')" LeEqDef [rule_format] : "ALL x. ALL y. compare x y = EQ = (x <' y = False' & y <' x = False')" LeGtDef [rule_format] : "ALL x. ALL y. compare x y = GT = (x >' y = True' & x ==' y = False')" LqLtDef [rule_format] : "ALL x. ALL y. (compare x y = LT | compare x y = EQ) = (x <=' y = True')" LqEqDef [rule_format] : "ALL x. ALL y. compare x y = EQ = (x <=' y = True' & y <=' x = True')" LqGtDef [rule_format] : "ALL x. ALL y. compare x y = GT = (x <=' y = False')" GeLtDef [rule_format] : "ALL x. ALL y. compare x y = LT = (x >' y = False' & x ==' y = False')" GeEqDef [rule_format] : "ALL x. ALL y. compare x y = EQ = (x >' y = False' & y >' x = False')" GeGtDef [rule_format] : "ALL x. ALL y. compare x y = GT = (x >' y = True' & x ==' y = False')" GqLtDef [rule_format] : "ALL x. ALL y. compare x y = LT = (x >=' y = False')" GqEqDef [rule_format] : "ALL x. ALL y. compare x y = EQ = (x >=' y = True' & y >=' x = True')" GqGtDef [rule_format] : "ALL x. ALL y. (compare x y = GT | compare x y = EQ) = (x >=' y = True')" MaxYDef [rule_format] : "ALL x. ALL y. X_maxX2 x y = y = (x <=' y = True')" MaxXDef [rule_format] : "ALL x. ALL y. X_maxX2 x y = x = (x >' y = True')" MinXDef [rule_format] : "ALL x. ALL y. X_minX2 x y = x = (x <=' y = True')" MinYDef [rule_format] : "ALL x. ALL y. X_minX2 x y = y = (x >' y = True')" TO1 [rule_format] : "ALL x. ALL y. (x ==' y = True' | x <' y = True') = (x <=' y = True')" TO2 [rule_format] : "ALL x. ALL y. x ==' y = True' --> x <' y = False'" TO3 [rule_format] : "ALL x. ALL y. Not' Not' (x <' y) = True' | Not' (x <' y) = True'" TO4 [rule_format] : "ALL x. ALL y. x <' y = True' --> Not' (x ==' y) = True'" TO5 [rule_format] : "ALL w. ALL x. ALL y. ALL z. (x <' y = True' & y <' z = True') & z <' w = True' --> x <' w = True'" TO6 [rule_format] : "ALL x. ALL z. z <' x = True' --> Not' (x <' z) = True'" TO7 [rule_format] : "ALL x. ALL y. x <' y = True' = (y >' x = True')" IOO1 [rule_format] : "LT le EQ" IOO2 [rule_format] : "EQ le GT" IOO3 [rule_format] : "LT le GT" IOO13 [rule_format] : "LT <' EQ = True'" IOO14 [rule_format] : "EQ <' GT = True'" IOO15 [rule_format] : "LT <' GT = True'" IOO16 [rule_format] : "LT <=' EQ = True'" IOO17 [rule_format] : "EQ <=' GT = True'" IOO18 [rule_format] : "LT <=' GT = True'" IOO19 [rule_format] : "EQ >=' LT = True'" IOO20 [rule_format] : "GT >=' EQ = True'" IOO21 [rule_format] : "GT >=' LT = True'" IOO22 [rule_format] : "EQ >' LT = True'" IOO23 [rule_format] : "GT >' EQ = True'" IOO24 [rule_format] : "GT >' LT = True'" IOO25 [rule_format] : "X_maxX2 LT EQ = EQ" IOO26 [rule_format] : "X_maxX2 EQ GT = GT" IOO27 [rule_format] : "X_maxX2 LT GT = GT" IOO28 [rule_format] : "X_minX2 LT EQ = LT" IOO29 [rule_format] : "X_minX2 EQ GT = EQ" IOO30 [rule_format] : "X_minX2 LT GT = LT" IOO31 [rule_format] : "compare LT LT = EQ" IOO32 [rule_format] : "compare EQ EQ = EQ" IOO33 [rule_format] : "compare GT GT = EQ" IBO1 [rule_format] : "False' le True'" IBO4 [rule_format] : "~ True' le False'" IBO5 [rule_format] : "False' <' True' = True'" IBO6 [rule_format] : "False' >=' True' = False'" IBO7 [rule_format] : "True' >=' False' = True'" IBO8 [rule_format] : "True' <' False' = False'" IBO9 [rule_format] : "X_maxX2 False' True' = True'" IBO10 [rule_format] : "X_minX2 False' True' = False'" IBO11 [rule_format] : "compare True' True' = EQ" IBO12 [rule_format] : "compare False' False' = EQ" IUO01 [rule_format] : "() <=' () = True'" IUO02 [rule_format] : "() <' () = False'" IUO03 [rule_format] : "() >=' () = True'" IUO04 [rule_format] : "() >' () = False'" IUO05 [rule_format] : "() = ()" IUO06 [rule_format] : "() = ()" IUO07 [rule_format] : "compare () () = EQ" ga_select_pre [rule_format] : "ALL x_1_1. pre(suc(x_1_1)) = Some x_1_1" X1DefNat [rule_format] : "1' = suc(0')" X2DefNat [rule_format] : "2 = suc(1')" X3DefNat [rule_format] : "3 = suc(2)" X4DefNat [rule_format] : "4 = suc(3)" X5DefNat [rule_format] : "5 = suc(4)" X6DefNat [rule_format] : "6 = suc(5)" X7DefNat [rule_format] : "7 = suc(6)" X8DefNat [rule_format] : "8 = suc(7)" X9DefNat [rule_format] : "9 = suc(8)" DecimalDef [rule_format] : "ALL m. ALL X_n. m @@ X_n = (m *' suc(9)) +' X_n" LeqDef1Nat [rule_format] : "ALL X_n. 0' <=' X_n = True'" LeqDef2Nat [rule_format] : "ALL X_n. suc(X_n) <=' 0' = False'" LeqDef3Nat [rule_format] : "ALL m. ALL X_n. suc(m) <=' suc(X_n) = True' = (m <=' X_n = True')" GeqDefNat [rule_format] : "ALL m. ALL X_n. m >=' X_n = True' = (X_n <=' m = True')" LessDefNat [rule_format] : "ALL m. ALL X_n. m <' X_n = True' = (m <=' X_n = True' & m ==' X_n = False')" GreaterDefNat [rule_format] : "ALL m. ALL X_n. m >' X_n = True' = (X_n <' m = True')" Even0Nat [rule_format] : "even'(0') = True'" EvenSucNat [rule_format] : "ALL m. even'(suc(m)) = True' = (odd'(m) = True')" OddDefNat [rule_format] : "ALL m. odd'(m) = True' = (even'(m) = False')" Factorial0 [rule_format] : "0' !' = 1'" FactorialSuc [rule_format] : "ALL X_n. suc(X_n) !' = suc(X_n) *' X_n !'" Add0Nat [rule_format] : "ALL m. 0' +' m = m" AddSucNat [rule_format] : "ALL m. ALL X_n. suc(X_n) +' m = suc(X_n +' m)" Mult0Nat [rule_format] : "ALL m. 0' *' m = 0'" MultSucNat [rule_format] : "ALL m. ALL X_n. suc(X_n) *' m = (X_n *' m) +' m" Power0Nat [rule_format] : "ALL m. m ^' 0' = 1'" PowerSucNat [rule_format] : "ALL m. ALL X_n. m ^' suc(X_n) = m *' (m ^' X_n)" SubTotalDef1Nat [rule_format] : "ALL m. ALL X_n. m >' X_n = True' --> X_n -! m = 0'" SubTotalDef2Nat [rule_format] : "ALL m. ALL X_n. m <=' X_n = True' --> Some (X_n -! m) = X_n -? m" SubDefNat [rule_format] : "ALL m. ALL X_n. ALL r. m -? X_n = Some r = (m = r +' X_n)" Divide0Nat1 [rule_format] : "ALL m. ALL X_n. X_n ==' 0' = True' --> ~ defOp (m /? X_n)" Divide0Nat [rule_format] : "ALL m. ~ defOp (m /? 0')" DividePosNat [rule_format] : "ALL m. ALL X_n. ALL r. X_n >' 0' = True' --> m /? X_n = Some r = (m = r *' X_n)" DivNat [rule_format] : "ALL m. ALL X_n. ALL r. m div' X_n = Some r = (EX s. m = (X_n *' r) +' s & s <' X_n = True')" ModNat [rule_format] : "ALL m. ALL X_n. ALL s. m mod' X_n = Some s = (EX r. m = (X_n *' r) +' s & s <' X_n = True')" declare NotFalse [simp] declare NotTrue [simp] declare AndDef1 [simp] declare AndDef2 [simp] declare AndDef3 [simp] declare AndDef4 [simp] declare ReflexT [simp] declare IBE1 [simp] declare IBE2 [simp] declare IBE3 [simp] declare IBE4 [simp] declare IBE5 [simp] declare IBE6 [simp] declare IBE7 [simp] declare IBE8 [simp] declare ALeIrreflexivity [simp] declare ALeAsymmetry [simp] declare IOE01 [simp] declare IOE02 [simp] declare IOE03 [simp] declare IOE04 [simp] declare IOE05 [simp] declare IOE06 [simp] declare IOE07 [simp] declare IOE08 [simp] declare IOE09 [simp] declare LeIrreflexivity [simp] declare LeTAsymmetry [simp] declare GeIrreflexivity [simp] declare GeTAsymmetry [simp] declare TO2 [simp] declare TO4 [simp] declare TO6 [simp] declare IOO1 [simp] declare IOO2 [simp] declare IOO3 [simp] declare IOO13 [simp] declare IOO14 [simp] declare IOO15 [simp] declare IOO16 [simp] declare IOO17 [simp] declare IOO18 [simp] declare IOO19 [simp] declare IOO20 [simp] declare IOO21 [simp] declare IOO22 [simp] declare IOO23 [simp] declare IOO24 [simp] declare IOO25 [simp] declare IOO26 [simp] declare IOO27 [simp] declare IOO28 [simp] declare IOO29 [simp] declare IOO30 [simp] declare IOO31 [simp] declare IOO32 [simp] declare IOO33 [simp] declare IBO1 [simp] declare IBO4 [simp] declare IBO5 [simp] declare IBO6 [simp] declare IBO7 [simp] declare IBO8 [simp] declare IBO9 [simp] declare IBO10 [simp] declare IBO11 [simp] declare IBO12 [simp] declare ga_select_pre [simp] declare LeqDef1Nat [simp] declare LeqDef2Nat [simp] declare Even0Nat [simp] declare Factorial0 [simp] declare Add0Nat [simp] declare Mult0Nat [simp] declare Power0Nat [simp] declare SubTotalDef1Nat [simp] declare SubTotalDef2Nat [simp] declare Divide0Nat1 [simp] declare Divide0Nat [simp] theorem SubDomNat : "ALL m. ALL X_n. defOp (m -? X_n) = (m >=' X_n = True')" apply(auto) apply(rename_tac x y) apply(case_tac "x -? y") apply(auto) apply(simp add: SubDefNat) apply(case_tac "a +' y") apply(auto) apply(simp add: GeqTLeqTRel) apply(simp add: LeqTDef) apply(auto) using X1DefNat X2DefNat X3DefNat X4DefNat X5DefNat X6DefNat X7DefNat X8DefNat X9DefNat DecimalDef by auto ML "Header.record \"SubDomNat\"" theorem DivideDomNat : "ALL m. ALL X_n. defOp (m /? X_n) = (X_n ==' 0' = False' & m mod' X_n = Some 0')" using X1DefNat X2DefNat X3DefNat X4DefNat X5DefNat X6DefNat X7DefNat X8DefNat X9DefNat DecimalDef by auto ML "Header.record \"DivideDomNat\"" theorem DivDomNat : "ALL m. ALL X_n. defOp (m div' X_n) = (~ X_n = 0')" using X1DefNat X2DefNat X3DefNat X4DefNat X5DefNat X6DefNat X7DefNat X8DefNat X9DefNat DecimalDef by auto ML "Header.record \"DivDomNat\"" theorem ModDomNat : "ALL m. ALL X_n. defOp (m mod' X_n) = (~ X_n = 0')" using X1DefNat X2DefNat X3DefNat X4DefNat X5DefNat X6DefNat X7DefNat X8DefNat X9DefNat DecimalDef by auto ML "Header.record \"ModDomNat\"" theorem Distr1Nat : "ALL r. ALL s. ALL t. (r +' s) *' t = (r *' t) +' (s *' t)" using X1DefNat X2DefNat X3DefNat X4DefNat X5DefNat X6DefNat X7DefNat X8DefNat X9DefNat DecimalDef by auto ML "Header.record \"Distr1Nat\"" theorem Distr2Nat : "ALL r. ALL s. ALL t. t *' (r +' s) = (t *' r) +' (t *' s)" using X1DefNat X2DefNat X3DefNat X4DefNat X5DefNat X6DefNat X7DefNat X8DefNat X9DefNat DecimalDef by auto ML "Header.record \"Distr2Nat\"" end
<h1 align=center style="color: #005496; font-size: 4.2em;">Machine Learning with Python</h1> <h2 align=center>Laboratory on Numpy / Matplotlib / Scikit-learn</h2> *** *** ## Introduction In the past few years, Python has become the de-facto standard programming language for data analytics. Python's success is due to several factors, but one major reason has been the availability of powerful, open-source libraries for scientific computation such as Numpy, Scipy and Matplotlib. Python is also the most popular programming language for machine learning, thanks to libraries such as Scikit-learn and TensorFlow. In this lecture we will explore the basics of Numpy, Matplotlib and Scikit-learn. The first is a library for data manipulation through the powerfull `numpy.ndarray` data structure; the second is useful for graphical visualization and plotting; the third is a general purpose library for machine learning, containing dozens of algorithms for classification, regression and clustering. In this lecture we assume familiarity with the Python programming language. If you are not familiar with the language, we advise you to look it up before carrying over to the next sections. Here are some useful links to learn about Python: - https://docs.python.org/3/tutorial/introduction.html - https://www.learnpython.org/ - http://www.scipy-lectures.org/ If you have never seen a page like this, it is a **Jupyther Notebook**. Here one can easily embed Python code and run it on the fly. You can run the code in a cell by selecting the cell and clicking the *Run* button (top). You can do the same using the **SHIFT+Enter** shortcut. You can modify the existing cells, run them and finally save your changes. ## Requirements 1. Python (preferably version > 3.3): https://www.python.org/downloads/ 2. Numpy, Scipy and Matplotlib: https://www.scipy.org/install.html 3. Scikit-learn: http://scikit-learn.org/stable/install.html ## References - https://docs.scipy.org/doc/numpy/ - https://docs.scipy.org/doc/scipy/reference/ - https://matplotlib.org/users/index.html - http://scikit-learn.org/stable/documentation.html # Numpy Numpy provides high-performance data structures for data manipulation and numeric computation. In particular, we will look at the `numpy.ndarray`, a data structure for manipulating vectors, matrices and tensors. Let's start by importing `numpy`: ```python # the np alias is very common import numpy as np ``` We can initialize a Numpy array from a Python list using the `numpy.array` function: ```python # if the argument is a list of numbers, the array will be a 1-dimensional vector a = np.array([1, 2, 3, 4, 5, 6]) a ``` array([1, 2, 3, 4, 5, 6]) ```python # if the argument is a list of lists, the array will be a 2-dimensional matrix M = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) M ``` array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12], [13, 14, 15, 16]]) Given a Numpy array, we can check its `shape`, a tuple containing the number of elements for each dimension: ```python a.shape ``` (6,) ```python M.shape ``` (4, 4) The size of an array is its total number of elements: ```python a.size ``` 6 ```python M.size ``` 16 We can do quite some nice things with Numpy arrays that are not possible with standard Python lists. ### Indexing Numpy array allow us to index arrays in quite advanced ways. ```python # A 1d vector can be indexed in all the common ways a[0] ``` 1 ```python a[1:3] ``` array([2, 3]) ```python a[0:5:2] ``` array([1, 3, 5]) ```python # Use a boolean mask mask = [True, False, False, True, True, False] a[mask] ``` array([1, 4, 5]) ```python # Access specific elements by passing a list of index a[[1, 4, 5]] ``` array([2, 5, 6]) The power of Numpy indexing capabilities starts showing up with 2d arrays: ```python # Access a single element of the matrix M[0, 1] ``` 2 ```python # Access an entire row M[1] ``` array([5, 6, 7, 8]) ```python # Access an entire column M[:,2] ``` array([ 3, 7, 11, 15]) ```python # Extract a sub-matrix M[1:3, 0:2] ``` array([[ 5, 6], [ 9, 10]]) ### Data manipulation We can manipulate data in several ways. ```python # Flatten a matrix M.flatten() ``` array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]) ```python # Reshaping a matrix M.reshape(2, 8) ``` array([[ 1, 2, 3, 4, 5, 6, 7, 8], [ 9, 10, 11, 12, 13, 14, 15, 16]]) ```python # The last index can be automatically inferred using -1 M.reshape(2, -1) ``` array([[ 1, 2, 3, 4, 5, 6, 7, 8], [ 9, 10, 11, 12, 13, 14, 15, 16]]) ```python # Computing the max and the min M.max(), M.min() ``` (16, 1) ```python # Computing the mean and standard deviation M.mean(), M.std() ``` (8.5, 4.6097722286464435) ```python # Computing the sum along the rows M.sum(axis=1) ``` array([10, 26, 42, 58]) ### Linear algebra Numpy is very useful to all sort of numeric computation, especially linear algebra: ```python # Transpose M.T ``` array([[ 1, 5, 9, 13], [ 2, 6, 10, 14], [ 3, 7, 11, 15], [ 4, 8, 12, 16]]) ```python # Adding and multiplying a constant 10 * M + 5 ``` array([[ 15, 25, 35, 45], [ 55, 65, 75, 85], [ 95, 105, 115, 125], [135, 145, 155, 165]]) ```python # Element wise product b = np.array([-1, -2, 4, 6, 8, -4]) a * b ``` array([ -1, -4, 12, 24, 40, -24]) ```python # Dot product a.dot(b) ``` 47 ```python # More linear algebra in the package numpy.linalg # Determinant np.linalg.det(M) ``` 4.7331654313261276e-30 ```python # Eigenvalues np.linalg.eigvals(M) ``` array([ 3.62093727e+01, -2.20937271e+00, -3.18863232e-15, -1.34840081e-16]) ### Vector generation and sampling Numpy allows us to generate or randomly sample vectors: ```python # Generate an array with 0.5 spacing x = np.arange(-10, 10, 0.5) x ``` array([-10. , -9.5, -9. , -8.5, -8. , -7.5, -7. , -6.5, -6. , -5.5, -5. , -4.5, -4. , -3.5, -3. , -2.5, -2. , -1.5, -1. , -0.5, 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5, 5. , 5.5, 6. , 6.5, 7. , 7.5, 8. , 8.5, 9. , 9.5]) ```python # Generate an array with 20 equally spaced points x = np.linspace(-10, 10, 20) x ``` array([-10. , -8.94736842, -7.89473684, -6.84210526, -5.78947368, -4.73684211, -3.68421053, -2.63157895, -1.57894737, -0.52631579, 0.52631579, 1.57894737, 2.63157895, 3.68421053, 4.73684211, 5.78947368, 6.84210526, 7.89473684, 8.94736842, 10. ]) ```python # Sample a vector from a standardize normal distribution np.random.normal(size=(10,)) ``` array([-1.45737898, 0.23555453, 0.24578509, -2.07977299, 1.08726802, -0.41107403, 0.12253856, 1.47129648, 0.5223578 , -0.29633517]) ### Functions Numpy provides all sorts of mathematical functions we can apply to arrays ```python # Exponential function np.exp(x) ``` array([ 4.53999298e-05, 1.30079023e-04, 3.72699966e-04, 1.06785292e-03, 3.05959206e-03, 8.76628553e-03, 2.51169961e-02, 7.19647439e-02, 2.06192028e-01, 5.90777514e-01, 1.69268460e+00, 4.84984802e+00, 1.38956932e+01, 3.98136782e+01, 1.14073401e+02, 3.26840958e+02, 9.36458553e+02, 2.68312340e+03, 7.68763460e+03, 2.20264658e+04]) ```python # Sine np.sin(x) ``` array([ 0.54402111, -0.4594799 , -0.99916962, -0.53027082, 0.47389753, 0.99970104, 0.5163796 , -0.48818921, -0.99996678, -0.50235115, 0.50235115, 0.99996678, 0.48818921, -0.5163796 , -0.99970104, -0.47389753, 0.53027082, 0.99916962, 0.4594799 , -0.54402111]) ```python # A gaussian function y = np.exp(-(x ** 2)/2) y ``` array([ 1.92874985e-22, 4.13228632e-18, 2.92342653e-14, 6.82937941e-11, 5.26814324e-08, 1.34190319e-05, 1.12868324e-03, 3.13480292e-02, 2.87498569e-01, 8.70659634e-01, 8.70659634e-01, 2.87498569e-01, 3.13480292e-02, 1.12868324e-03, 1.34190319e-05, 5.26814324e-08, 6.82937941e-11, 2.92342653e-14, 4.13228632e-18, 1.92874985e-22]) # Matplotlib The above matrices provide little insight without the possibility of visualizing them properly. Matplotlib is a powerful library for data visualization. Let's plot the above function. ```python # the following line is only needed to show plots in the notebook %matplotlib inline import matplotlib.pyplot as plt x = np.linspace(-10, 10, 200) # get a sample of the x axis y = np.exp(-(x**2)/(2*1)) # compute the function for all points in the sample plt.plot(x, y) # add the curve to the plot plt.show() # show the plot ``` We can also plot more than one line in the same figure and add a grid to the plot. ```python z = np.exp(-(x**2)/(2*10)) plt.grid() # add the grid under the curves plt.plot(x, y) # add the first curve to the plot plt.plot(x, z) # add the second curve to the plot plt.show() # show the plot ``` We can also set several properties of the plot in this way: ```python plt.grid() plt.xlabel('x') # add a label to the x axis plt.ylabel('y') # add a label to the y axis plt.xticks(np.arange(-10, 11, 2)) # specify in which point to place a tick on the x axis plt.yticks(np.arange(0, 2.2, 0.2)) # and on the y axis # rs- stands for red, squared markers, solid line # yd-- stands for yellow, diamond markers, dashed line plt.plot(x, y, 'rs-', markevery=10, label='sigma=1') # add a style and a label and specify the gap plt.plot(x, z, 'yd--', markevery=10, label='sigma=10') # between markers for both curves plt.legend() # add the legend (displays the labels of the curves) plt.show() # show the plot ``` Finally, we can save the plot into a png file in this way: ```python plt.grid() plt.xlabel('x') plt.ylabel('y') plt.xticks(np.arange(-10, 11, 2)) plt.yticks(np.arange(0, 2.2, 0.2)) plt.plot(x, y, 'rs-', markevery=10, label='sigma=1') plt.plot(x, z, 'yd--', markevery=10, label='sigma=10') plt.legend() plt.savefig('plot.png', dpi=300) # saves the plot into the file plot.png with 300 dpi # will not work on lion0b because directory is read-only ``` # Scikit-learn Let's now dive into the real **Machine Learning** part. *Scikit-learn* is perhaps the most wide-spread library for Machine Learning in use nowadays, and most of its fame is due to its extreme simplicity. With Scikit-learn it is possible to easily manage datasets, and train a wide range of classifiers out-of-the-box. It is also useful for several other Machine Learning tasks such as regression, clustering, dimensionality reduction, and model selection. In the following we will see how to use Scikit-learn to load a dataset, train a classifier and perform validation and model selection. Scikit-learn comes with a range of popular reference datasets. Let's load and use the *Digits* dataset: ```python from sklearn.datasets import load_digits digits = load_digits() print(digits.DESCR) # print a description of the digits dataset ``` Optical Recognition of Handwritten Digits Data Set =================================================== Notes ----- Data Set Characteristics: :Number of Instances: 5620 :Number of Attributes: 64 :Attribute Information: 8x8 image of integer pixels in the range 0..16. :Missing Attribute Values: None :Creator: E. Alpaydin (alpaydin '@' boun.edu.tr) :Date: July; 1998 This is a copy of the test set of the UCI ML hand-written digits datasets http://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits The data set contains images of hand-written digits: 10 classes where each class refers to a digit. Preprocessing programs made available by NIST were used to extract normalized bitmaps of handwritten digits from a preprinted form. From a total of 43 people, 30 contributed to the training set and different 13 to the test set. 32x32 bitmaps are divided into nonoverlapping blocks of 4x4 and the number of on pixels are counted in each block. This generates an input matrix of 8x8 where each element is an integer in the range 0..16. This reduces dimensionality and gives invariance to small distortions. For info on NIST preprocessing routines, see M. D. Garris, J. L. Blue, G. T. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C. L. Wilson, NIST Form-Based Handprint Recognition System, NISTIR 5469, 1994. References ---------- - C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their Applications to Handwritten Digit Recognition, MSc Thesis, Institute of Graduate Studies in Science and Engineering, Bogazici University. - E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika. - Ken Tang and Ponnuthurai N. Suganthan and Xi Yao and A. Kai Qin. Linear dimensionalityreduction using relevance weighted LDA. School of Electrical and Electronic Engineering Nanyang Technological University. 2005. - Claudio Gentile. A New Approximate Maximal Margin Classification Algorithm. NIPS. 2000. Let's take a look at the data: ```python X, y = digits.data, digits.target # The attributes of the first instance (notice it is a Numpy array) X[0] ``` array([ 0., 0., 5., 13., 9., 1., 0., 0., 0., 0., 13., 15., 10., 15., 5., 0., 0., 3., 15., 2., 0., 11., 8., 0., 0., 4., 12., 0., 0., 8., 8., 0., 0., 5., 8., 0., 0., 9., 8., 0., 0., 4., 11., 0., 1., 12., 7., 0., 0., 2., 14., 5., 10., 12., 0., 0., 0., 0., 6., 13., 10., 0., 0., 0.]) ```python # The label of the first instance y[0] ``` 0 Being a Numpy array, we can actually take a look at this image. We first need to reshape it into an 8x8 matrix and then use matplotlib. ```python x = X[0].reshape((8, 8)) plt.gray() # use a grayscale plt.matshow(x) # display a matrix of values plt.show() # show the figure ``` Now we want to train a classifier to recognize the digits from the images and then we want to evaluate it. In order to make a proper evaluation, we first need to split the dataset in two sets, one for training and one for testing. Scikit-learn helps us with that: ```python from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Let's check the length of the two sets len(X_train), len(X_test) ``` (1437, 360) Now we need a classifier. Let's use an **SVM**. A reminder: \begin{align} \min_{\boldsymbol{w}} \quad & \frac{1}{2}\|\boldsymbol{w}\|^2 + C \sum_{i\in|\mathcal{D}|} \xi_i \\ \forall (x_i, y_i) \in \mathcal{D} \quad & y_i ( \boldsymbol{w}^T x_i + b ) \ge 1 - \xi_i \end{align} ```python from sklearn.svm import SVC # Specify the parameters in the constructor. # C is the parameter of the primal problem of the SVM; # The rbf kernel is the Radial Basis Function; # The rbf kernel takes one parameter: gamma clf = SVC(C=10, kernel='rbf', gamma=0.02) ``` Now the classifier can be trained and then used to predict unseen instances. ```python # Training clf.fit(X_train, y_train) # Prediction y_pred = clf.predict(X_test) y_pred ``` array([3, 3, 3, 7, 3, 1, 3, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 8, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 9, 1, 3, 3, 6, 3, 4, 3, 6, 6, 3, 1, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 0, 3, 3, 0, 1, 3, 3, 3, 3, 3, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 3, 4, 3, 3, 3, 3, 8, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 7, 7, 3, 3, 3, 3, 3, 3, 3, 7, 2, 6, 3, 3, 3, 3, 3, 7, 3, 3, 3, 3, 3, 3, 3, 6, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 1, 3, 7, 3, 2, 2, 3, 3, 8, 3, 3, 2, 3, 3, 6, 9, 3, 3, 1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 1, 3, 3, 3, 3, 6, 1, 3, 6, 0, 4, 3, 2, 7, 3, 6, 3, 3, 3, 3, 3, 2, 3, 6, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 0, 1, 3, 4, 3, 1, 3, 3, 6, 0, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 9, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 3, 3, 6, 3, 3, 3, 3, 3, 3, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 7, 0, 6, 3, 3, 3, 3, 1, 3, 4, 3, 3, 7, 3, 3, 3, 3, 3, 3, 0, 7, 3, 3, 3, 3, 2, 7, 3, 1, 3, 7, 3, 3, 3, 3, 3]) Now we want to evaluate the performance of our classifier. A reminder: \begin{align} \text{Accuracy } &= \frac{\text{true-positive} + \text{true-negative}}{\text{all examples}} \\ \text{Precision } &= \frac{\text{true-positive}}{\text{true-positive} + \text{false-positive}} \\ \text{Recall } &= \frac{\text{true-positive}}{\text{true-positive} + \text{false-negative}} \\ F_1 &= \frac{2 \times \text{precision} \times \text{recall}}{\text{precision} + \text{recall}} \\ \end{align} In a multiclass classification Precision, Recall and $F_1$ are computed per class, considering the given class as positive and all others as negative. We can use Scikit-learn to compute and show these measures for all classes. ```python from sklearn import metrics report = metrics.classification_report(y_test, y_pred) # the support is the number of instances having the given label in y_test print(report) ``` precision recall f1-score support 0 1.00 0.39 0.57 33 1 1.00 0.61 0.76 28 2 1.00 0.36 0.53 33 3 0.12 1.00 0.22 34 4 1.00 0.20 0.33 46 5 1.00 0.02 0.04 47 6 1.00 0.49 0.65 35 7 1.00 0.35 0.52 34 8 1.00 0.10 0.18 30 9 1.00 0.07 0.14 40 avg / total 0.92 0.34 0.37 360 Finally we can compute the accuracy of our classifier: ```python metrics.accuracy_score(y_test, y_pred) ``` 0.33611111111111114 Apparently our classifier performs a bit poorly out-of-sample. This is probably due to the random choice of the parameters for the classifier. We can do much better! We need to perform model selection, that is we need to search for better parameters for our classifier. In particular, we are going to perform a **cross-validation** on the training set and see how the classifier performs with different values of *gamma*. A $k$-fold cross-validation works like this: - Split the dataset $D$ in $k$ equally sized disjoint subsets $D_i$ - For $i \in [1, k]$ - Train the classifier on $T_i = D \setminus D_i$ - Compute the score (accuracy, precision, ...) on $D_i$ - Return the list of scores, one for each fold Scikit-learn helps us with this as well. We compute the cross-validated accuracy for all the possible values of *gamma* and select the *gamma* with the best average accuracy. ```python from sklearn.model_selection import KFold, cross_val_score # 3-fold cross-validation # random_state ensures same split for each value of gamma kf = KFold(n_splits=3, shuffle=True, random_state=42) gamma_values = [0.1, 0.05, 0.02, 0.01] accuracy_scores = [] # Do model selection over all the possible values of gamma for gamma in gamma_values: # Train a classifier with current gamma clf = SVC(C=10, kernel='rbf', gamma=gamma) # Compute cross-validated accuracy scores scores = cross_val_score(clf, X_train, y_train, cv=kf.split(X_train), scoring='accuracy') # Compute the mean accuracy and keep track of it accuracy_score = scores.mean() accuracy_scores.append(accuracy_score) # Get the gamma with highest mean accuracy best_index = np.array(accuracy_scores).argmax() best_gamma = gamma_values[best_index] # Train over the full training set with the best gamma clf = SVC(C=10, kernel='rbf', gamma=best_gamma) clf.fit(X_train, y_train) # Evaluate on the test set y_pred = clf.predict(X_test) accuracy = metrics.accuracy_score(y_test, y_pred) accuracy ``` 0.81388888888888888 Much better! Model selection allows us to fine-tune the parameters of a lerning algorithm to get the best performance. Let's now look at the **Learnig curve** of our classifier, in which we plot the training accuracy and the cross-validated accuracy for increasing number of examples. ```python from sklearn.model_selection import learning_curve plt.figure() plt.title("Learning curve") plt.xlabel("Training examples") plt.ylabel("Score") plt.grid() clf = SVC(C=10, kernel='rbf', gamma=best_gamma) # Compute the scores of the learning curve # by default the (relative) dataset sizes are: 10%, 32.5%, 55%, 77.5%, 100% train_sizes, train_scores, test_scores = learning_curve(clf, X_train, y_train, scoring='accuracy') # Get the mean and std of train and test scores along the varying dataset sizes train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) # Plot the mean and std for the training scores plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") # Plot the mean and std for the cross-validation scores plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.legend() plt.show() ``` Now we want to go even further. We can perform the above model selection procedure considering the *C* parameter as well. In general, this process over several parameters is called **grid search**, and Scikit-learn has an automated procedure to perform cross-validated grid search for any classifier. ```python from sklearn.model_selection import GridSearchCV possible_parameters = { 'C': [1e0, 1e1, 1e2, 1e3], 'gamma': [1e-1, 1e-2, 1e-3, 1e-4] } svc = SVC(kernel='rbf') # The GridSearchCV is itself a classifier # we fit the GridSearchCV with the training data # and then we use it to predict on the test set clf = GridSearchCV(svc, possible_parameters, n_jobs=4) # n_jobs=4 means we parallelize the search over 4 threads clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy = metrics.accuracy_score(y_test, y_pred) accuracy ``` 0.98888888888888893 Nice! Now we have a classifier with a quite competitive accuracy. The state-of-the-art (on a very similar task) has accuracy around $0.9979$, achieved by using Neural Networks, which we will see in the next Lab. Stay tuned!
[STATEMENT] lemma fps_divide_unit': "subdegree g = 0 \<Longrightarrow> f div g = f * inverse g" [PROOF STATE] proof (prove) goal (1 subgoal): 1. subdegree g = 0 \<Longrightarrow> f / g = f * inverse g [PROOF STEP] by (simp add: fps_divide_def)
/* Meru * GraphicsResourceName.cpp * * Copyright (c) 2009, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Sirikata nor the names of its contributors may * be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "GraphicsResourceManager.hpp" #include "GraphicsResourceName.hpp" #include "ResourceManager.hpp" #include <boost/bind.hpp> namespace Meru { GraphicsResourceName::GraphicsResourceName(const URI &resourceID, GraphicsResource::Type referencedType) : GraphicsResource(resourceID.toString(), NAME), mURI(resourceID), mReferencedType(referencedType) { } GraphicsResourceName::~GraphicsResourceName() { } SharedResourcePtr GraphicsResourceName::getReference() { return *(mDependencies.begin()); } void GraphicsResourceName::doParse() { ResourceHash result; std::tr1::function<void(const URI &, const ResourceHash *)> callback = std::tr1::bind(&GraphicsResourceName::hashLookupCallback, getWeakPtr(), mReferencedType, _1, _2); if (ResourceManager::getSingleton().nameLookup(mURI, result, callback)) { hashLookupCallback(getWeakPtr(), mReferencedType, mURI, &result); } } void GraphicsResourceName::doLoad() { loaded(true, mLoadEpoch); } void GraphicsResourceName::doUnload() { unloaded(true, mLoadEpoch); } void GraphicsResourceName::hashLookupCallback(WeakResourcePtr resourcePtr, Type refType, const URI &id, const ResourceHash *hash) { // assert(id != hash); // add dependency for hash SharedResourcePtr resource = resourcePtr.lock(); if (resource) { if (hash) { try { GraphicsResourceManager *grm = GraphicsResourceManager::getSingletonPtr(); SharedResourcePtr hashResource = grm->getResourceAsset(hash->uri(), refType); resource->addDependency(hashResource); resource->parsed(true); } catch (std::invalid_argument &exc) { resource->parsed(false); } } else { resource->parsed(false); } } } void GraphicsResourceName::fullyParsed() { assert(mDependencies.size() > 0); std::set<WeakResourcePtr>::iterator itr; for (itr = mDependents.begin(); itr != mDependents.end(); itr++) { SharedResourcePtr resourcePtr = itr->lock(); if (resourcePtr) resourcePtr->resolveName(mID, (*mDependencies.begin())->getID()); } } void GraphicsResourceName::addDependent(Meru::WeakResourcePtr newParent) { if (mParseState == PARSE_VALID) { newParent.lock()->resolveName(mID, (*mDependencies.begin())->getID()); } GraphicsResource::addDependent(newParent); } }
module JSON import public JSON.FromJSON import public JSON.Option import public JSON.ToJSON import public JSON.Value
#' rcond #' #' Reciprocal condition number estimate. #' #' @details #' The estimate is computed by first forming the R matrix from a QR. Currently #' this involves first computing a crossproduct, so the estimate can be thought #' of as being fairly liberal. Afterwards, the condition number is estimated by #' calling \code{base::rcond()} on the local R matrix. #' #' @section Communication: #' The operation is completely local except for forming the crossproduct, which #' is an \code{allreduce()} call, quadratic on the number of columns. #' #' @param x #' A shaq. #' @param norm,triangular,... #' Arguments passed to \code{base::rcond()}. #' #' @return #' A number. #' #' @examples #' \dontrun{ #' library(kazaam) #' x = ranshaq(runif, 10, 3) #' rc = rcond(x) #' comm.print(rc) #' #' x = expand(matrix(1:30, 10)) #' rc = rcond(x) #' comm.print(rc) #' #' finalize() #' } #' #' @method rcond shaq #' @name rcond #' @rdname rcond NULL rcond.shaq = function(x, norm=c("O", "I", "1"), triangular=FALSE, ...) { norm = pbdMPI::comm.match.arg(toupper(norm), c("O", "I", "1")) R = qr_R(x) base::rcond(R, norm=norm, triangular=TRUE, ...) } #' @rdname rcond #' @export setMethod("rcond", signature(x="shaq"), rcond.shaq)
% Created 2016-09-12 Mon 12:54 \documentclass[presentation]{beamer} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{fixltx2e} \usepackage{graphicx} \usepackage{grffile} \usepackage{longtable} \usepackage{wrapfig} \usepackage{rotating} \usepackage[normalem]{ulem} \usepackage{amsmath} \usepackage{textcomp} \usepackage{amssymb} \usepackage{capt-of} \usepackage{hyperref} \usetheme{default} \author{Paul M. Magwene} \date{} \title{Describing univariate distributions} \input{../mybeamerstyle.tex} \institute[Duke]{Department of Biology} \hypersetup{ pdfauthor={Paul M. Magwene}, pdftitle={Describing univariate distributions}, pdfkeywords={}, pdfsubject={}, pdfcreator={Emacs 24.5.1 (Org mode 8.3.5)}, pdflang={English}} \begin{document} \maketitle \definecolor{bg}{rgb}{0.95,0.95,0.95} \begin{frame}[label={sec:orgheadline1}]{Overview} \begin{itemize} \item Terminology for describing univariate distributions \item Measures of location (centrality) \item Measures of dispersion (spread) \end{itemize} \end{frame} \begin{frame}[label={sec:orgheadline2}]{Population} Population -- A population is a collection of objects, individuals, or observations about which we intend to make general statements. Examples: \begin{itemize} \item The height of American males older than 25 years of age. \item Number of mitochondrial 12S-rRNA haplotypes in the human population \item Number of loblolly pine trees per km2 in North Carolina \end{itemize} \end{frame} \begin{frame}[label={sec:orgheadline3}]{Sample / Random Sample} A sample is a subset of the population. A Random Sample is a sample that is chosen in such a way as to reflect the uncertainty of observations in a population. \end{frame} \begin{frame}[label={sec:orgheadline4}]{Types of data} \begin{itemize} \item Categorical or Nominal -- labels matter but no mathematical notion of order or distance \begin{itemize} \item Sex: Male / Female \item Species \end{itemize} \item Ordinal data -- order matters but no distance metric \begin{itemize} \item Juvenile, Adult \item Small, Medium, Large \item Muddy, Sandy, Gravelly \end{itemize} \item Discrete, Integer, Counting \begin{itemize} \item Number of vertebrae in a snake \item Number of pine trees in a specified area \item Number of heart beats in a minute \item Number of head bobs during courtship display \end{itemize} \item Continuous \begin{itemize} \item Body mass \item Length of right femur \item Duration of aggressive display \end{itemize} \end{itemize} \end{frame} \begin{frame}[label={sec:orgheadline5}]{Interval vs Ratio scales} \begin{itemize} \item Interval scales -- have meaningful order and distance metrics, but don't usually have a meaningful zero value, so computing ratios don't make sense \item Ratio scales -- have a meaningful order, distance metrics, and zero value. \end{itemize} \end{frame} \begin{frame}[label={sec:orgheadline6}]{Statistic} A statistic is a numerical value calculated by applying a function (algorithm) to the values of the items of a sample \end{frame} \begin{frame}[fragile,label={sec:orgheadline7}]{Example data set: butterfat data} We'll use a data set that records the butter fat percentage in milk from 120 Canadian dairy cows (Sokal and Rohlf, Biometry, 4th ed) \begin{itemize} \item See the link on the course wiki for \texttt{butterfat.csv} \item Load \texttt{butterfat.csv} using the \texttt{read.csv} function \end{itemize} \end{frame} \begin{frame}[fragile,label={sec:orgheadline8}]{Generate a histogram} Using the \texttt{ggplot2} library, generate a histogram for the butterfat data set. \begin{figure}[htb] \centering \includegraphics[height=0.5\textheight]{butterfat-hist.pdf} \caption{Histogram of butter fat percentage from 120 Canadian cows.} \end{figure} \end{frame} \section{Measures of location} \label{sec:orgheadline14} \begin{frame}[label={sec:orgheadline9}]{Mean} \begin{itemize} \item Most common measure of location \item Measure of location that minimizes the sum of the squared deviations around it \item Statistical measure of location that has the smallest standard error (to be defined later) \item Physical analogy: If we think of observations as points of mass on a line, the mean is the center of mass (balance point) \end{itemize} Let \(X = \{x_1, x_2, \ldots, x_n\}\). The mean of \(\mathbf{x}\) is: \[ \overline{X}= \frac{1}{n} \sum_{i=1}^n x_i \] \end{frame} \begin{frame}[label={sec:orgheadline10}]{Median} \begin{itemize} \item The middle point of a frequency distribution \item The value of the variable that has an equal number of items on either side of items \end{itemize} The median is a \uline{robust} estimator of location. Robust statistics are those that are not strongly affected by outliers our violations of model assumptions. \end{frame} \begin{frame}[label={sec:orgheadline11}]{Robustness of median: Example} Changes in estimates of location when three outlier values (8, 10, 15) are added to butterfat data. \end{frame} \begin{frame}[label={sec:orgheadline12}]{Mode} \begin{itemize} \item The most common value (or interval) in a distribution \item Unimodal, bimodal, multi-modal \end{itemize} \includegraphics[width=.9\linewidth]{unimodal-and-bimodal.pdf} \end{frame} \begin{frame}[label={sec:orgheadline13}]{Some other ``means''} \alert{Weighted mean} -- useful when there is some a priori notion of weight or importance for different observations \[ \overline{X}_w = \frac{1}{(\sum^n w_i)} \sum^n w_i x_i \] where the \(w_i\) represent the weights attached to each observation. \alert{Geometric mean} -- most often used to study proportional growth (populations, tissues, organs, etc) \[ GM_X = \sqrt[n]{\prod^n x_i} \] \alert{Harmonic mean} -- rarely used in biology. \[ HM_X = \frac{1}{n}\sum^n \frac{1}{x_i} \] \end{frame} \section{Measures of dispersion} \label{sec:orgheadline24} \begin{frame}[label={sec:orgheadline15}]{Range} \begin{itemize} \item The difference between the largest and smallest items in a sample \end{itemize} \[ \max(\mathbf{x}) - \min(\mathbf{x}) \] \end{frame} \begin{frame}[label={sec:orgheadline16}]{Deviates} \alert{Deviate} -- the difference between an observation and the mean; can be negative or positive. Units same as the \(x_i\). \[ x_i - \overline{X} \] \alert{Squared deviate} -- the square of a deviate; always \(\geq 0\) (units\(^2\)). \[ (x_i - \overline{X})^2 \] \alert{Sum of squared deviations} -- the sum of all the squared deviations in a sample (units\(^2\)). \[ \sum_{i=1}^n (x_i - \overline{X})^2 \] \end{frame} \begin{frame}[label={sec:orgheadline17}]{Variance and standard deviation} \alert{Variance} -- the mean squared deviation (units\(^2\)). \[ \sigma_X^2 = \frac{1}{n} \sum_{i=1}^n (x_i - \overline{X})^2 \] \alert{Standard deviation} -- the square root of the variance (units same as the \(x_i\)). \[ \sigma_X = \sqrt{\frac{1}{n} \sum_{i=1}^n (x_i - \overline{X})^2} \] The above are the \uline{population} variance and standard deviation. \end{frame} \begin{frame}[label={sec:orgheadline18}]{Sample estimators of variance and standard deviation} The \emph{unbiased} \uline{sample} estimators of the variance and standard deviation are given by: \begin{equation*} \begin{split} \mbox{Variance:}\qquad & s_X^2 = \frac{1}{n-1} \sum_{i=1}^n (x_i - \overline{X})^2 \\ \mbox{Standard deviation:}\qquad & s_X = \sqrt{\frac{1}{n-1} \sum_{i=1}^n (x_i - \overline{X})^2} \end{split} \end{equation*} \alert{You almost always want to use the sample estimators of variance and standard deviation.} \end{frame} \begin{frame}[label={sec:orgheadline19}]{Standard deviation rules of thumb} If data are normally distributed: \begin{itemize} \item Approximately 68\% of observations fall within 1 standard deviation about the mean \item Approximately 95\% of observations fall within 2 standard deviations about the mean \item Approximately 99.7\% of observations fall within 3 standard deviations about the mean \end{itemize} \includegraphics[width=.9\linewidth]{sd-rule-thumb.pdf} \end{frame} \begin{frame}[label={sec:orgheadline20}]{Coefficient of variation} \begin{itemize} \item Standard deviation expressed as percentage of mean \item Unitless measure \end{itemize} \[ V = \frac{s_X \times 100}{\overline{X}} \] \end{frame} \begin{frame}[label={sec:orgheadline21}]{Quantiles, quartiles, interquartile range} \begin{itemize} \item \alert{Quantiles} -- points that will divide a frequency distribution into equal sized groups \begin{itemize} \item quartiles -- points dividing a distribution into 4 equal groups \item deciles -- points dividing a distribution into 10 equal groups \item percentiles -- points dividing a distribution into 100 equal groups \end{itemize} \item \alert{Interquartile range (IQR)}-- range of values that captures the central 50\% of the distribution \begin{itemize} \item Q1 = lower quartile, Q3 = upper quartile \end{itemize} \end{itemize} \end{frame} \begin{frame}[label={sec:orgheadline22}]{Boxplots typically depict information about quartiles} \begin{figure}[htb] \centering \includegraphics[width=.9\linewidth]{butterfat-hist-boxplot.png} \caption{Histogram of butterfat data set, with superimposed boxplot.} \end{figure} \end{frame} \begin{frame}[label={sec:orgheadline23}]{Median absolute deviation (MAD)} \begin{itemize} \item A robust estimator of dispersion \end{itemize} \[ \mathrm{MAD}(X) = \mathrm{median}(|x_i - \mathrm{median}(X)|) \] For normal distribution, \(\sigma_X \approx 1.486 \times \mathrm{MAD}(X)\). \end{frame} \section{Skewness} \label{sec:orgheadline26} \begin{frame}[label={sec:orgheadline25}]{Skewness} \begin{itemize} \item Skewness describes asymmetry of distributions \end{itemize} \includegraphics[width=.9\linewidth]{skewed-distributions.png} Common measure of skewness: \[ \mbox{skewness} = E\left[\left(\frac{(x - \mu}{\sigma}\right)^3\right] \] \end{frame} \end{document}
[STATEMENT] theorem cos_add: fixes x :: "'a::{real_normed_field,banach}" shows "cos (x + y) = cos x * cos y - sin x * sin y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. cos (x + y) = cos x * cos y - sin x * sin y [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. cos (x + y) = cos x * cos y - sin x * sin y [PROOF STEP] have "(if even p \<and> even n then ((- 1) ^ (p div 2) * int (p choose n) / (fact p)) *\<^sub>R (x^n) * y^(p-n) else 0) - (if even p \<and> odd n then - ((- 1) ^ (p div 2) * int (p choose n) / (fact p)) *\<^sub>R (x^n) * y^(p-n) else 0) = (if even p then ((-1) ^ (p div 2) * (p choose n) / (fact p)) *\<^sub>R (x^n) * y^(p-n) else 0)" if "n \<le> p" for n p :: nat [PROOF STATE] proof (prove) goal (1 subgoal): 1. (if even p \<and> even n then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) - (if even p \<and> odd n then - (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) = (if even p then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) [PROOF STEP] by simp [PROOF STATE] proof (state) this: ?n \<le> ?p \<Longrightarrow> (if even ?p \<and> even ?n then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) - (if even ?p \<and> odd ?n then - (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) = (if even ?p then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) goal (1 subgoal): 1. cos (x + y) = cos x * cos y - sin x * sin y [PROOF STEP] then [PROOF STATE] proof (chain) picking this: ?n \<le> ?p \<Longrightarrow> (if even ?p \<and> even ?n then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) - (if even ?p \<and> odd ?n then - (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) = (if even ?p then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) [PROOF STEP] have "(\<lambda>p. \<Sum>n\<le>p. (if even p then ((-1) ^ (p div 2) * (p choose n) / (fact p)) *\<^sub>R (x^n) * y^(p-n) else 0)) sums (cos x * cos y - sin x * sin y)" [PROOF STATE] proof (prove) using this: ?n \<le> ?p \<Longrightarrow> (if even ?p \<and> even ?n then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) - (if even ?p \<and> odd ?n then - (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) = (if even ?p then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) goal (1 subgoal): 1. (\<lambda>p. \<Sum>n\<le>p. if even p then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) sums (cos x * cos y - sin x * sin y) [PROOF STEP] using sums_diff [OF cos_x_cos_y [of x y] sin_x_sin_y [of x y]] [PROOF STATE] proof (prove) using this: ?n \<le> ?p \<Longrightarrow> (if even ?p \<and> even ?n then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) - (if even ?p \<and> odd ?n then - (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) = (if even ?p then (real_of_int ((- 1) ^ (?p div 2) * int (?p choose ?n)) / fact ?p) *\<^sub>R x ^ ?n * y ^ (?p - ?n) else (0::'a)) (\<lambda>n. (\<Sum>na\<le>n. if even n \<and> even na then (real_of_int ((- 1) ^ (n div 2) * int (n choose na)) / fact n) *\<^sub>R x ^ na * y ^ (n - na) else (0::'a)) - (\<Sum>na\<le>n. if even n \<and> odd na then - (real_of_int ((- 1) ^ (n div 2) * int (n choose na)) / fact n) *\<^sub>R x ^ na * y ^ (n - na) else (0::'a))) sums (cos x * cos y - sin x * sin y) goal (1 subgoal): 1. (\<lambda>p. \<Sum>n\<le>p. if even p then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) sums (cos x * cos y - sin x * sin y) [PROOF STEP] by (simp add: sum_subtractf [symmetric]) [PROOF STATE] proof (state) this: (\<lambda>p. \<Sum>n\<le>p. if even p then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) sums (cos x * cos y - sin x * sin y) goal (1 subgoal): 1. cos (x + y) = cos x * cos y - sin x * sin y [PROOF STEP] then [PROOF STATE] proof (chain) picking this: (\<lambda>p. \<Sum>n\<le>p. if even p then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) sums (cos x * cos y - sin x * sin y) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: (\<lambda>p. \<Sum>n\<le>p. if even p then (real_of_int ((- 1) ^ (p div 2) * int (p choose n)) / fact p) *\<^sub>R x ^ n * y ^ (p - n) else (0::'a)) sums (cos x * cos y - sin x * sin y) goal (1 subgoal): 1. cos (x + y) = cos x * cos y - sin x * sin y [PROOF STEP] by (blast intro: sums_cos_x_plus_y sums_unique2) [PROOF STATE] proof (state) this: cos (x + y) = cos x * cos y - sin x * sin y goal: No subgoals! [PROOF STEP] qed
-- Copyright (c) 2018 Scott Morrison. All rights reserved. -- Released under Apache 2.0 license as described in the file LICENSE. -- Authors: Scott Morrison import category_theory.isomorphism universes u₁ v₁ u₂ v₂ u₃ v₃ namespace category_theory variables {C : Type u₁} [𝒞 : category.{u₁ v₁} C] {D : Type u₂} [𝒟 : category.{u₂ v₂} D] include 𝒞 𝒟 class full (F : C ⥤ D) := (preimage : ∀ {X Y : C} (f : (F X) ⟶ (F Y)), X ⟶ Y) (witness' : ∀ {X Y : C} (f : (F X) ⟶ (F Y)), F.map (preimage f) = f . obviously) restate_axiom full.witness' attribute [simp] full.witness class faithful (F : C ⥤ D) : Prop := (injectivity' : ∀ {X Y : C} {f g : X ⟶ Y} (p : F.map f = F.map g), f = g . obviously) restate_axiom faithful.injectivity' namespace functor def injectivity (F : C ⥤ D) [faithful F] {X Y : C} {f g : X ⟶ Y} (p : F.map f = F.map g) : f = g := faithful.injectivity F p def preimage (F : C ⥤ D) [full F] {X Y : C} (f : F X ⟶ F Y) : X ⟶ Y := full.preimage.{u₁ v₁ u₂ v₂} f @[simp] lemma image_preimage (F : C ⥤ D) [full F] {X Y : C} (f : F X ⟶ F Y) : F.map (preimage F f) = f := begin unfold preimage, obviously end end functor section variables {F : C ⥤ D} [full F] [faithful F] {X Y : C} def preimage_iso (f : (F X) ≅ (F Y)) : X ≅ Y := { hom := F.preimage (f : F X ⟶ F Y), inv := F.preimage (f.symm : F Y ⟶ F X), hom_inv_id' := begin apply @faithful.injectivity _ _ _ _ F, obviously, end, inv_hom_id' := begin apply @faithful.injectivity _ _ _ _ F, obviously, end, } @[simp] lemma preimage_iso_coe (f : (F X) ≅ (F Y)) : ((preimage_iso f) : X ⟶ Y) = F.preimage (f : F X ⟶ F Y) := rfl @[simp] lemma preimage_iso_symm_coe (f : (F X) ≅ (F Y)) : ((preimage_iso f).symm : Y ⟶ X) = F.preimage (f.symm : F Y ⟶ F X) := rfl end class embedding (F : C ⥤ D) extends (full F), (faithful F). end category_theory namespace category_theory variables {C : Type u₁} [𝒞 : category.{u₁ v₁} C] include 𝒞 instance full.id : full (functor.id C) := { preimage := λ _ _ f, f } instance : faithful (functor.id C) := by obviously instance : embedding (functor.id C) := { ((by apply_instance) : full (functor.id C)) with } variables {D : Type u₂} [𝒟 : category.{u₂ v₂} D] {E : Type u₃} [ℰ : category.{u₃ v₃} E] include 𝒟 ℰ variables (F : C ⥤ D) (G : D ⥤ E) instance faithful.comp [faithful F] [faithful G] : faithful (F ⋙ G) := { injectivity' := λ _ _ _ _ p, F.injectivity (G.injectivity p) } instance full.comp [full F] [full G] : full (F ⋙ G) := { preimage := λ _ _ f, F.preimage (G.preimage f) } end category_theory
module Main import IdrisJvm.IO import IdrisJvm.JvmImport import Java.Lang import Java.Util import mmhelloworld.idrisjvmautoffisample.importer.JvmImporter %access public export %language ElabReflection main : JVM_IO () main = do printLn !((integerClass <.!> "parseInt") "23") objectMapper <- (objectMapperClass <.!> "<init>()") jsonNode <- (objectMapperClass <.!> "readTree(java/lang/String)") objectMapper "{\"coord\": {\"lon\": -0.13, \"lat\": 51.51}}" latNode <- (jsonNodeClass <.!> "at(java/lang/String)") jsonNode "/coord/lat" lat <- (jsonNodeClass <.!> "asDouble()") latNode printLn lat
(** Definitions of Algorithmic Lightweight Linear F Authors: Aileen Zhang, Jianzhou Zhao, and Steve Zdancewic. *) Require Export LinearF_Definitions. Require Export LinearF_Infrastructure. (* ********************************************************************** *) (** * #<a name="env"></a># Algo Environments *) (** In our presentation of System F with subtyping, we use a single environment for both typing and subtyping assumptions. We formalize environments by representing them as association lists (lists of pairs of keys and values) whose keys are atoms. The [Metatheory] and [Environment] libraries provide functions, predicates, tactics, notations and lemmas that simplify working with environments. The [Environment] library treats environments as lists of type [list (atom * A)]. Since environments map [atom]s, the type [A] should encode whether a particular binding is a typing or subtyping assumption. Thus, we instantiate [A] with the type [binding], defined below. *) Inductive gbinding : Set := | gbind_kn : kn -> gbinding | gbind_typ : typ -> gbinding. Notation genv := (list (atom * gbinding)). Notation gempty := (@nil (atom * gbinding)). Inductive dbinding : Set := | dbind_typ : typ -> dbinding. Notation denv := (list (atom * dbinding)). Notation dempty := (@nil (atom * dbinding)). (* ********************************************************************** *) (** * #<a name="wf"></a># Well-formedness *) (** A type [T] is well-formed with respect to an environment [E], denoted [(wf_typ E T)], when [T] is locally-closed and its free variables are bound in [E]. We need this relation in order to restrict the subtyping and typing relations, defined below, to contain only well-formed types. (This relation is missing in the original statement of the POPLmark Challenge.) Note: It is tempting to define the premise of [wf_typ_var] as [(X `in` dom E)], since that makes the rule easier to apply (no need to guess an instantiation for [U]). Unfortunately, this is incorrect. We need to check that [X] is bound as a type-variable, not an expression-variable; [(dom E)] does not distinguish between the two kinds of bindings. *) Inductive kn_order : kn -> kn -> Prop := | kn_order_base : kn_order kn_nonlin kn_lin | kn_order_refl : forall K, kn_order K K . Inductive wf_atyp : genv -> typ -> kn -> Prop := | wf_atyp_var : forall K G (X : atom), ok G -> binds X (gbind_kn K) G -> wf_atyp G (typ_fvar X) K | wf_atyp_arrow : forall G K1 K2 K T1 T2, wf_atyp G T1 K1 -> wf_atyp G T2 K2 -> wf_atyp G (typ_arrow K T1 T2) K | wf_atyp_all : forall L G K1 K2 T2, (forall X : atom, X `notin` L -> wf_atyp ([(X, gbind_kn K1)] ++ G) (open_tt T2 X) K2) -> wf_atyp G (typ_all K1 T2) K2 . (** An environment E is well-formed, denoted [(wf_env E)], if each atom is bound at most at once and if each binding is to a well-formed type. This is a stronger relation than the [ok] relation defined in the [Environment] library. We need this relation in order to restrict the subtyping and typing relations, defined below, to contain only well-formed environments. (This relation is missing in the original statement of the POPLmark Challenge.) *) Inductive wf_genv : genv -> Prop := | wf_genv_empty : wf_genv gempty | wf_genv_kn : forall (G : genv) (X : atom) (K : kn), wf_genv G -> X `notin` dom G -> wf_genv ([(X, gbind_kn K)] ++ G) | wf_genv_typ : forall (G : genv) (x : atom) (T : typ) , wf_genv G -> wf_atyp G T kn_nonlin -> x `notin` dom G -> wf_genv ([(x, gbind_typ T)] ++ G) . Inductive wf_denv : genv -> denv -> Prop := | wf_denv_empty : forall G, wf_genv G -> wf_denv G dempty | wf_denv_typ : forall G D T x, wf_denv G D -> wf_atyp G T kn_lin -> x `notin` dom G -> x `notin` dom D -> wf_denv G ([(x, dbind_typ T)] ++ D) . Fixpoint dminus_var (x : atom) (l : list (atom*dbinding)){struct l} : list (atom*dbinding) := match l with | nil => nil | (y, b)::tl => if (eq_atom_dec x y) then dminus_var x tl else (y, b)::(dminus_var x tl) end. Notation "D [-] x" := (dminus_var x D) (at level 70, no associativity). Fixpoint dminus (D : denv) (D' : denv) {struct D'} : denv := match D' with | nil => D | (y, b)::tl => dminus (D [-] y) tl end. Notation "D -- D'" := (dminus D D') (at level 70, no associativity). (* ********************************************************************** *) (** * #<a name="typing_doc"></a># Algo Typing *) Inductive atyping : genv -> denv -> exp -> typ -> denv -> Prop := | atyping_uvar : forall G D x T, binds x (gbind_typ T) G -> wf_denv G D -> atyping G D x T D | atyping_lvar : forall G D x T, binds x (dbind_typ T) D -> wf_denv G D -> atyping G D x T (D [-] x) | atyping_uabs : forall L K G D V e1 T1 D', wf_atyp G V kn_nonlin -> (forall x : atom, x `notin` L -> atyping ([(x, gbind_typ V)] ++ G) D (open_ee e1 x) T1 D') -> (K = kn_nonlin -> D = D') -> atyping G D (exp_abs K V e1) (typ_arrow K V T1) D' | atyping_labs : forall L K G D V e1 T1 D', wf_atyp G V kn_lin -> (forall x : atom, x `notin` L -> atyping G ([(x, dbind_typ V)] ++ D) (open_ee e1 x) T1 D') -> (K = kn_nonlin -> D = D') -> atyping G D (exp_abs K V e1) (typ_arrow K V T1) D' | atyping_app : forall G T1 K D1 D2 D3 e1 e2 T2, atyping G D1 e1 (typ_arrow K T1 T2) D2-> atyping G D2 e2 T1 D3 -> atyping G D1 (exp_app e1 e2) T2 D3 | atyping_tabs : forall L G K e1 T1 D D' K', value e1 -> (forall X : atom, X `notin` L -> wf_atyp ([(X,gbind_kn K)] ++ G) (open_tt T1 X) K') -> (forall X : atom, X `notin` L -> atyping ([(X,gbind_kn K)] ++ G) D (open_te e1 X) (open_tt T1 X) D')-> atyping G D (exp_tabs K e1) (typ_all K T1) D' | atyping_tapp : forall K K' G e1 T T2 D D', atyping G D e1 (typ_all K T2) D' -> wf_atyp G T K' -> kn_order K' K -> atyping G D (exp_tapp e1 T) (open_tt T2 T) D' . (* ********************************************************************** *) (** * #<a name="auto"></a># Automation *) (** We declare most constructors as [Hint]s to be used by the [auto] and [eauto] tactics. We exclude constructors from the subtyping and typing relations that use cofinite quantification. It is unlikely that [eauto] will find an instantiation for the finite set [L], and in those cases, [eauto] can take some time to fail. (A priori, this is not obvious. In practice, one adds as hints all constructors and then later removes some constructors when they cause proof search to take too long.) *) Hint Constructors wf_atyp wf_genv wf_denv ok. Hint Resolve atyping_uvar atyping_lvar atyping_app atyping_tapp. (* ********************************************************************** *) (** * #<a name="cases"></a># Cases Tactic *) Tactic Notation "typ_cases" tactic(first) tactic(c) := first; [ c "typ_bvar" | c "typ_fvar" | c "typ_arrow" | c "typ_all" ]. Tactic Notation "exp_cases" tactic(first) tactic(c) := first; [ c "exp_bvar" | c "exp_fvar" | c "exp_abs" | c "exp_app" | c "exp_tabs" | c "exp_tapp" ]. Tactic Notation "type_cases" tactic(first) tactic(c) := first; [ c "type_var" | c "type_arrow" | c "type_all" ]. Tactic Notation "expr_cases" tactic(first) tactic(c) := first; [ c "expr_var" | c "expr_abs" | c "expr_app" | c "expr_tabs" | c "expr_tapp" ]. Tactic Notation "wf_atyp_cases" tactic(first) tactic(c) := first; [ c "wf_atyp_var" | c "wf_atyp_arrow" | c "wf_typS_all" ]. Tactic Notation "wf_genv_cases" tactic(first) tactic(c) := first; [ c "wf_genv_empty" | c "wf_genv_kn" | c "wf_genv_typ" ]. Tactic Notation "wf_denv_cases" tactic(first) tactic(c) := first; [ c "wf_denv_empty" | c "wf_denv_typ" ]. Tactic Notation "value_cases" tactic(first) tactic(c) := first; [ c "value_abs" | c "value_tabs" ]. Tactic Notation "red_cases" tactic(first) tactic(c) := first; [ c "red_app_1" | c "red_app_2" | c "red_tapp" | c "red_abs" | c "red_tabs" ]. Tactic Notation "atyping_cases" tactic(first) tactic(c) := first; [ c "atyping_uvar" | c "atyping_lvar" | c "atyping_uabs" | c "atyping_labs" | c "atyping_app" | c "atyping_tabs" | c "atyping_tapp" ].
lemma (in finite_measure) finite_measure_mono_AE: assumes imp: "AE x in M. x \<in> A \<longrightarrow> x \<in> B" and B: "B \<in> sets M" shows "measure M A \<le> measure M B"
The ship was assigned to the 19th Destroyer Flotilla on the start of the war and spent the first six months on escort and patrol duties in the English Channel and North Sea . While assisting the damaged minesweeper Sphinx on 4 February 1940 in the Moray Firth , Boreas 's stern was damaged and she required repairs that lasted until the following month . The ship was attached to the 12th Destroyer Flotilla on 29 March until she was damaged in a collision with her sister ship Brilliant on 15 May . Her repairs lasted until 19 June and Boreas was assigned to the 1st Destroyer Flotilla at Dover upon their completion . On 25 July , the ship engaged German E @-@ boats off Dover Harbour together with Brilliant and was badly damaged by German Junkers Ju 87 Stuka dive bombers after she was ordered to withdraw . Her bridge was hit twice by bombs that killed one officer and twenty crewmen . Boreas was under repair at Millwall Dock until 23 January 1941 ; she was lightly damaged by bomb splinters on 19 January . Around 1941 , she was fitted with a Type 286 short @-@ range surface search radar .
library(destiny) data(guo) Dark2 <- scales::brewer_pal(palette = 'Dark2') palette(Dark2(8L)) dm_guo <- DiffusionMap(guo, verbose = FALSE, censor_val = 10, censor_range = c(10, 40)) plot(dm_guo, col = guo$num_cells, pch = 20) sigmas <- find_sigmas(guo, verbose = FALSE, censor_val = 10, censor_range = c(10, 40)) par(lwd = 3) plot(sigmas, col = palette()[[1]], col_highlight = palette()[[4]], col_line = palette()[[6]]) dm_guo_global <- DiffusionMap(guo, sigmas, verbose = FALSE, censor_val = 10, censor_range = c(10, 40)) plot(dm_guo_global, col = guo$num_cells, pch = 20) #library(rgl) #plot3d(eigenvectors(dm_guo)[, 1:3], col = guo$num_cells)
/- Copyright (c) 2017 Kevin Buzzard. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kevin Buzzard, Mario Carneiro -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.real.sqrt import Mathlib.PostPort universes l u_1 namespace Mathlib /-! # The complex numbers The complex numbers are modelled as ℝ^2 in the obvious way. -/ /-! ### Definition and basic arithmmetic -/ /-- Complex numbers consist of two `real`s: a real part `re` and an imaginary part `im`. -/ structure complex where re : ℝ im : ℝ notation:1024 "ℂ" => Mathlib.complex namespace complex protected instance decidable_eq : DecidableEq ℂ := classical.dec_eq ℂ /-- The equivalence between the complex numbers and `ℝ × ℝ`. -/ def equiv_real_prod : ℂ ≃ ℝ × ℝ := equiv.mk (fun (z : ℂ) => (re z, im z)) (fun (p : ℝ × ℝ) => mk (prod.fst p) (prod.snd p)) sorry sorry @[simp] theorem equiv_real_prod_apply (z : ℂ) : coe_fn equiv_real_prod z = (re z, im z) := rfl theorem equiv_real_prod_symm_re (x : ℝ) (y : ℝ) : re (coe_fn (equiv.symm equiv_real_prod) (x, y)) = x := rfl theorem equiv_real_prod_symm_im (x : ℝ) (y : ℝ) : im (coe_fn (equiv.symm equiv_real_prod) (x, y)) = y := rfl @[simp] theorem eta (z : ℂ) : mk (re z) (im z) = z := cases_on z fun (z_re z_im : ℝ) => idRhs (mk (re (mk z_re z_im)) (im (mk z_re z_im)) = mk (re (mk z_re z_im)) (im (mk z_re z_im))) rfl theorem ext {z : ℂ} {w : ℂ} : re z = re w → im z = im w → z = w := sorry theorem ext_iff {z : ℂ} {w : ℂ} : z = w ↔ re z = re w ∧ im z = im w := sorry protected instance has_coe : has_coe ℝ ℂ := has_coe.mk fun (r : ℝ) => mk r 0 @[simp] theorem of_real_re (r : ℝ) : re ↑r = r := rfl @[simp] theorem of_real_im (r : ℝ) : im ↑r = 0 := rfl @[simp] theorem of_real_inj {z : ℝ} {w : ℝ} : ↑z = ↑w ↔ z = w := { mp := congr_arg re, mpr := congr_arg fun {z : ℝ} => ↑z } protected instance has_zero : HasZero ℂ := { zero := ↑0 } protected instance inhabited : Inhabited ℂ := { default := 0 } @[simp] theorem zero_re : re 0 = 0 := rfl @[simp] theorem zero_im : im 0 = 0 := rfl @[simp] theorem of_real_zero : ↑0 = 0 := rfl @[simp] theorem of_real_eq_zero {z : ℝ} : ↑z = 0 ↔ z = 0 := of_real_inj theorem of_real_ne_zero {z : ℝ} : ↑z ≠ 0 ↔ z ≠ 0 := not_congr of_real_eq_zero protected instance has_one : HasOne ℂ := { one := ↑1 } @[simp] theorem one_re : re 1 = 1 := rfl @[simp] theorem one_im : im 1 = 0 := rfl @[simp] theorem of_real_one : ↑1 = 1 := rfl protected instance has_add : Add ℂ := { add := fun (z w : ℂ) => mk (re z + re w) (im z + im w) } @[simp] theorem add_re (z : ℂ) (w : ℂ) : re (z + w) = re z + re w := rfl @[simp] theorem add_im (z : ℂ) (w : ℂ) : im (z + w) = im z + im w := rfl @[simp] theorem bit0_re (z : ℂ) : re (bit0 z) = bit0 (re z) := rfl @[simp] theorem bit1_re (z : ℂ) : re (bit1 z) = bit1 (re z) := rfl @[simp] theorem bit0_im (z : ℂ) : im (bit0 z) = bit0 (im z) := Eq.refl (im (bit0 z)) @[simp] theorem bit1_im (z : ℂ) : im (bit1 z) = bit0 (im z) := add_zero (im (bit0 z)) @[simp] theorem of_real_add (r : ℝ) (s : ℝ) : ↑(r + s) = ↑r + ↑s := sorry @[simp] theorem of_real_bit0 (r : ℝ) : ↑(bit0 r) = bit0 ↑r := sorry @[simp] theorem of_real_bit1 (r : ℝ) : ↑(bit1 r) = bit1 ↑r := sorry protected instance has_neg : Neg ℂ := { neg := fun (z : ℂ) => mk (-re z) (-im z) } @[simp] theorem neg_re (z : ℂ) : re (-z) = -re z := rfl @[simp] theorem neg_im (z : ℂ) : im (-z) = -im z := rfl @[simp] theorem of_real_neg (r : ℝ) : ↑(-r) = -↑r := sorry protected instance has_sub : Sub ℂ := { sub := fun (z w : ℂ) => mk (re z - re w) (im z - im w) } protected instance has_mul : Mul ℂ := { mul := fun (z w : ℂ) => mk (re z * re w - im z * im w) (re z * im w + im z * re w) } @[simp] theorem mul_re (z : ℂ) (w : ℂ) : re (z * w) = re z * re w - im z * im w := rfl @[simp] theorem mul_im (z : ℂ) (w : ℂ) : im (z * w) = re z * im w + im z * re w := rfl @[simp] theorem of_real_mul (r : ℝ) (s : ℝ) : ↑(r * s) = ↑r * ↑s := sorry theorem smul_re (r : ℝ) (z : ℂ) : re (↑r * z) = r * re z := sorry theorem smul_im (r : ℝ) (z : ℂ) : im (↑r * z) = r * im z := sorry theorem of_real_smul (r : ℝ) (z : ℂ) : ↑r * z = mk (r * re z) (r * im z) := ext (smul_re r z) (smul_im r z) /-! ### The imaginary unit, `I` -/ /-- The imaginary unit. -/ def I : ℂ := mk 0 1 @[simp] theorem I_re : re I = 0 := rfl @[simp] theorem I_im : im I = 1 := rfl @[simp] theorem I_mul_I : I * I = -1 := sorry theorem I_mul (z : ℂ) : I * z = mk (-im z) (re z) := sorry theorem I_ne_zero : I ≠ 0 := mt (congr_arg im) (ne.symm zero_ne_one) theorem mk_eq_add_mul_I (a : ℝ) (b : ℝ) : mk a b = ↑a + ↑b * I := sorry @[simp] theorem re_add_im (z : ℂ) : ↑(re z) + ↑(im z) * I = z := sorry /-! ### Commutative ring instance and lemmas -/ protected instance comm_ring : comm_ring ℂ := comm_ring.mk Add.add sorry 0 sorry sorry Neg.neg Sub.sub sorry sorry Mul.mul sorry 1 sorry sorry sorry sorry sorry protected instance re.is_add_group_hom : is_add_group_hom re := is_add_group_hom.mk protected instance im.is_add_group_hom : is_add_group_hom im := is_add_group_hom.mk @[simp] theorem I_pow_bit0 (n : ℕ) : I ^ bit0 n = (-1) ^ n := eq.mpr (id (Eq._oldrec (Eq.refl (I ^ bit0 n = (-1) ^ n)) (pow_bit0' I n))) (eq.mpr (id (Eq._oldrec (Eq.refl ((I * I) ^ n = (-1) ^ n)) I_mul_I)) (Eq.refl ((-1) ^ n))) @[simp] theorem I_pow_bit1 (n : ℕ) : I ^ bit1 n = (-1) ^ n * I := eq.mpr (id (Eq._oldrec (Eq.refl (I ^ bit1 n = (-1) ^ n * I)) (pow_bit1' I n))) (eq.mpr (id (Eq._oldrec (Eq.refl ((I * I) ^ n * I = (-1) ^ n * I)) I_mul_I)) (Eq.refl ((-1) ^ n * I))) /-! ### Complex conjugation -/ /-- The complex conjugate. -/ def conj : ℂ →+* ℂ := ring_hom.mk (fun (z : ℂ) => mk (re z) (-im z)) sorry sorry sorry sorry @[simp] theorem conj_re (z : ℂ) : re (coe_fn conj z) = re z := rfl @[simp] theorem conj_im (z : ℂ) : im (coe_fn conj z) = -im z := rfl @[simp] theorem conj_of_real (r : ℝ) : coe_fn conj ↑r = ↑r := sorry @[simp] theorem conj_I : coe_fn conj I = -I := sorry @[simp] theorem conj_bit0 (z : ℂ) : coe_fn conj (bit0 z) = bit0 (coe_fn conj z) := sorry @[simp] theorem conj_bit1 (z : ℂ) : coe_fn conj (bit1 z) = bit1 (coe_fn conj z) := sorry @[simp] theorem conj_neg_I : coe_fn conj (-I) = I := sorry @[simp] theorem conj_conj (z : ℂ) : coe_fn conj (coe_fn conj z) = z := sorry theorem conj_involutive : function.involutive ⇑conj := conj_conj theorem conj_bijective : function.bijective ⇑conj := function.involutive.bijective conj_involutive theorem conj_inj {z : ℂ} {w : ℂ} : coe_fn conj z = coe_fn conj w ↔ z = w := function.injective.eq_iff (and.left conj_bijective) @[simp] theorem conj_eq_zero {z : ℂ} : coe_fn conj z = 0 ↔ z = 0 := sorry theorem eq_conj_iff_real {z : ℂ} : coe_fn conj z = z ↔ ∃ (r : ℝ), z = ↑r := sorry theorem eq_conj_iff_re {z : ℂ} : coe_fn conj z = z ↔ ↑(re z) = z := sorry protected instance star_ring : star_ring ℂ := star_ring.mk sorry /-! ### Norm squared -/ /-- The norm squared function. -/ def norm_sq : monoid_with_zero_hom ℂ ℝ := monoid_with_zero_hom.mk (fun (z : ℂ) => re z * re z + im z * im z) sorry sorry sorry theorem norm_sq_apply (z : ℂ) : coe_fn norm_sq z = re z * re z + im z * im z := rfl @[simp] theorem norm_sq_of_real (r : ℝ) : coe_fn norm_sq ↑r = r * r := sorry theorem norm_sq_zero : coe_fn norm_sq 0 = 0 := monoid_with_zero_hom.map_zero norm_sq theorem norm_sq_one : coe_fn norm_sq 1 = 1 := monoid_with_zero_hom.map_one norm_sq @[simp] theorem norm_sq_I : coe_fn norm_sq I = 1 := sorry theorem norm_sq_nonneg (z : ℂ) : 0 ≤ coe_fn norm_sq z := add_nonneg (mul_self_nonneg (re z)) (mul_self_nonneg (im z)) theorem norm_sq_eq_zero {z : ℂ} : coe_fn norm_sq z = 0 ↔ z = 0 := sorry @[simp] theorem norm_sq_pos {z : ℂ} : 0 < coe_fn norm_sq z ↔ z ≠ 0 := iff.trans (has_le.le.lt_iff_ne (norm_sq_nonneg z)) (not_congr (iff.trans eq_comm norm_sq_eq_zero)) @[simp] theorem norm_sq_neg (z : ℂ) : coe_fn norm_sq (-z) = coe_fn norm_sq z := sorry @[simp] theorem norm_sq_conj (z : ℂ) : coe_fn norm_sq (coe_fn conj z) = coe_fn norm_sq z := sorry theorem norm_sq_mul (z : ℂ) (w : ℂ) : coe_fn norm_sq (z * w) = coe_fn norm_sq z * coe_fn norm_sq w := monoid_with_zero_hom.map_mul norm_sq z w theorem norm_sq_add (z : ℂ) (w : ℂ) : coe_fn norm_sq (z + w) = coe_fn norm_sq z + coe_fn norm_sq w + bit0 1 * re (z * coe_fn conj w) := sorry theorem re_sq_le_norm_sq (z : ℂ) : re z * re z ≤ coe_fn norm_sq z := le_add_of_nonneg_right (mul_self_nonneg (im z)) theorem im_sq_le_norm_sq (z : ℂ) : im z * im z ≤ coe_fn norm_sq z := le_add_of_nonneg_left (mul_self_nonneg (re z)) theorem mul_conj (z : ℂ) : z * coe_fn conj z = ↑(coe_fn norm_sq z) := sorry theorem add_conj (z : ℂ) : z + coe_fn conj z = ↑(bit0 1 * re z) := sorry /-- The coercion `ℝ → ℂ` as a `ring_hom`. -/ def of_real : ℝ →+* ℂ := ring_hom.mk coe of_real_one of_real_mul of_real_zero of_real_add @[simp] theorem of_real_eq_coe (r : ℝ) : coe_fn of_real r = ↑r := rfl @[simp] theorem I_sq : I ^ bit0 1 = -1 := eq.mpr (id (Eq._oldrec (Eq.refl (I ^ bit0 1 = -1)) (pow_two I))) (eq.mpr (id (Eq._oldrec (Eq.refl (I * I = -1)) I_mul_I)) (Eq.refl (-1))) @[simp] theorem sub_re (z : ℂ) (w : ℂ) : re (z - w) = re z - re w := rfl @[simp] theorem sub_im (z : ℂ) (w : ℂ) : im (z - w) = im z - im w := rfl @[simp] theorem of_real_sub (r : ℝ) (s : ℝ) : ↑(r - s) = ↑r - ↑s := sorry @[simp] theorem of_real_pow (r : ℝ) (n : ℕ) : ↑(r ^ n) = ↑r ^ n := sorry theorem sub_conj (z : ℂ) : z - coe_fn conj z = ↑(bit0 1 * im z) * I := sorry theorem norm_sq_sub (z : ℂ) (w : ℂ) : coe_fn norm_sq (z - w) = coe_fn norm_sq z + coe_fn norm_sq w - bit0 1 * re (z * coe_fn conj w) := sorry /-! ### Inversion -/ protected instance has_inv : has_inv ℂ := has_inv.mk fun (z : ℂ) => coe_fn conj z * ↑(coe_fn norm_sq z⁻¹) theorem inv_def (z : ℂ) : z⁻¹ = coe_fn conj z * ↑(coe_fn norm_sq z⁻¹) := rfl @[simp] theorem inv_re (z : ℂ) : re (z⁻¹) = re z / coe_fn norm_sq z := sorry @[simp] theorem inv_im (z : ℂ) : im (z⁻¹) = -im z / coe_fn norm_sq z := sorry @[simp] theorem of_real_inv (r : ℝ) : ↑(r⁻¹) = (↑r⁻¹) := sorry protected theorem inv_zero : 0⁻¹ = 0 := eq.mpr (id (Eq._oldrec (Eq.refl (0⁻¹ = 0)) (Eq.symm of_real_zero))) (eq.mpr (id (Eq._oldrec (Eq.refl (↑0⁻¹ = ↑0)) (Eq.symm (of_real_inv 0)))) (eq.mpr (id (Eq._oldrec (Eq.refl (↑(0⁻¹) = ↑0)) inv_zero)) (Eq.refl ↑0))) protected theorem mul_inv_cancel {z : ℂ} (h : z ≠ 0) : z * (z⁻¹) = 1 := sorry /-! ### Field instance and lemmas -/ protected instance field : field ℂ := field.mk comm_ring.add comm_ring.add_assoc comm_ring.zero comm_ring.zero_add comm_ring.add_zero comm_ring.neg comm_ring.sub comm_ring.add_left_neg comm_ring.add_comm comm_ring.mul comm_ring.mul_assoc comm_ring.one comm_ring.one_mul comm_ring.mul_one comm_ring.left_distrib comm_ring.right_distrib comm_ring.mul_comm has_inv.inv sorry complex.mul_inv_cancel complex.inv_zero @[simp] theorem I_fpow_bit0 (n : ℤ) : I ^ bit0 n = (-1) ^ n := eq.mpr (id (Eq._oldrec (Eq.refl (I ^ bit0 n = (-1) ^ n)) (fpow_bit0' I n))) (eq.mpr (id (Eq._oldrec (Eq.refl ((I * I) ^ n = (-1) ^ n)) I_mul_I)) (Eq.refl ((-1) ^ n))) @[simp] theorem I_fpow_bit1 (n : ℤ) : I ^ bit1 n = (-1) ^ n * I := eq.mpr (id (Eq._oldrec (Eq.refl (I ^ bit1 n = (-1) ^ n * I)) (fpow_bit1' I n))) (eq.mpr (id (Eq._oldrec (Eq.refl ((I * I) ^ n * I = (-1) ^ n * I)) I_mul_I)) (Eq.refl ((-1) ^ n * I))) theorem div_re (z : ℂ) (w : ℂ) : re (z / w) = re z * re w / coe_fn norm_sq w + im z * im w / coe_fn norm_sq w := sorry theorem div_im (z : ℂ) (w : ℂ) : im (z / w) = im z * re w / coe_fn norm_sq w - re z * im w / coe_fn norm_sq w := sorry @[simp] theorem of_real_div (r : ℝ) (s : ℝ) : ↑(r / s) = ↑r / ↑s := ring_hom.map_div of_real r s @[simp] theorem of_real_fpow (r : ℝ) (n : ℤ) : ↑(r ^ n) = ↑r ^ n := ring_hom.map_fpow of_real r n @[simp] theorem div_I (z : ℂ) : z / I = -(z * I) := sorry @[simp] theorem inv_I : I⁻¹ = -I := sorry @[simp] theorem norm_sq_inv (z : ℂ) : coe_fn norm_sq (z⁻¹) = (coe_fn norm_sq z⁻¹) := monoid_with_zero_hom.map_inv' norm_sq z @[simp] theorem norm_sq_div (z : ℂ) (w : ℂ) : coe_fn norm_sq (z / w) = coe_fn norm_sq z / coe_fn norm_sq w := monoid_with_zero_hom.map_div norm_sq z w /-! ### Cast lemmas -/ @[simp] theorem of_real_nat_cast (n : ℕ) : ↑↑n = ↑n := ring_hom.map_nat_cast of_real n @[simp] theorem nat_cast_re (n : ℕ) : re ↑n = ↑n := eq.mpr (id (Eq._oldrec (Eq.refl (re ↑n = ↑n)) (Eq.symm (of_real_nat_cast n)))) (eq.mpr (id (Eq._oldrec (Eq.refl (re ↑↑n = ↑n)) (of_real_re ↑n))) (Eq.refl ↑n)) @[simp] theorem nat_cast_im (n : ℕ) : im ↑n = 0 := eq.mpr (id (Eq._oldrec (Eq.refl (im ↑n = 0)) (Eq.symm (of_real_nat_cast n)))) (eq.mpr (id (Eq._oldrec (Eq.refl (im ↑↑n = 0)) (of_real_im ↑n))) (Eq.refl 0)) @[simp] theorem of_real_int_cast (n : ℤ) : ↑↑n = ↑n := ring_hom.map_int_cast of_real n @[simp] theorem int_cast_re (n : ℤ) : re ↑n = ↑n := eq.mpr (id (Eq._oldrec (Eq.refl (re ↑n = ↑n)) (Eq.symm (of_real_int_cast n)))) (eq.mpr (id (Eq._oldrec (Eq.refl (re ↑↑n = ↑n)) (of_real_re ↑n))) (Eq.refl ↑n)) @[simp] theorem int_cast_im (n : ℤ) : im ↑n = 0 := eq.mpr (id (Eq._oldrec (Eq.refl (im ↑n = 0)) (Eq.symm (of_real_int_cast n)))) (eq.mpr (id (Eq._oldrec (Eq.refl (im ↑↑n = 0)) (of_real_im ↑n))) (Eq.refl 0)) @[simp] theorem of_real_rat_cast (n : ℚ) : ↑↑n = ↑n := ring_hom.map_rat_cast of_real n @[simp] theorem rat_cast_re (q : ℚ) : re ↑q = ↑q := eq.mpr (id (Eq._oldrec (Eq.refl (re ↑q = ↑q)) (Eq.symm (of_real_rat_cast q)))) (eq.mpr (id (Eq._oldrec (Eq.refl (re ↑↑q = ↑q)) (of_real_re ↑q))) (Eq.refl ↑q)) @[simp] theorem rat_cast_im (q : ℚ) : im ↑q = 0 := eq.mpr (id (Eq._oldrec (Eq.refl (im ↑q = 0)) (Eq.symm (of_real_rat_cast q)))) (eq.mpr (id (Eq._oldrec (Eq.refl (im ↑↑q = 0)) (of_real_im ↑q))) (Eq.refl 0)) /-! ### Characteristic zero -/ protected instance char_zero_complex : char_zero ℂ := char_zero_of_inj_zero fun (n : ℕ) (h : ↑n = 0) => eq.mp (Eq._oldrec (Eq.refl (↑n = 0)) (propext nat.cast_eq_zero)) (eq.mp (Eq._oldrec (Eq.refl (↑↑n = 0)) (propext of_real_eq_zero)) (eq.mp (Eq._oldrec (Eq.refl (↑n = 0)) (Eq.symm (of_real_nat_cast n))) h)) /-- A complex number `z` plus its conjugate `conj z` is `2` times its real part. -/ theorem re_eq_add_conj (z : ℂ) : ↑(re z) = (z + coe_fn conj z) / bit0 1 := sorry /-- A complex number `z` minus its conjugate `conj z` is `2i` times its imaginary part. -/ theorem im_eq_sub_conj (z : ℂ) : ↑(im z) = (z - coe_fn conj z) / (bit0 1 * I) := sorry /-! ### Absolute value -/ /-- The complex absolute value function, defined as the square root of the norm squared. -/ def abs (z : ℂ) : ℝ := real.sqrt (coe_fn norm_sq z) @[simp] theorem abs_of_real (r : ℝ) : abs ↑r = abs r := sorry theorem abs_of_nonneg {r : ℝ} (h : 0 ≤ r) : abs ↑r = r := Eq.trans (abs_of_real r) (abs_of_nonneg h) theorem abs_of_nat (n : ℕ) : abs ↑n = ↑n := Eq.trans (eq.mpr (id (Eq._oldrec (Eq.refl (abs ↑n = abs ↑↑n)) (of_real_nat_cast n))) (Eq.refl (abs ↑n))) (abs_of_nonneg (nat.cast_nonneg n)) theorem mul_self_abs (z : ℂ) : abs z * abs z = coe_fn norm_sq z := real.mul_self_sqrt (norm_sq_nonneg z) @[simp] theorem abs_zero : abs 0 = 0 := sorry @[simp] theorem abs_one : abs 1 = 1 := sorry @[simp] theorem abs_I : abs I = 1 := sorry @[simp] theorem abs_two : abs (bit0 1) = bit0 1 := sorry theorem abs_nonneg (z : ℂ) : 0 ≤ abs z := real.sqrt_nonneg (coe_fn norm_sq z) @[simp] theorem abs_eq_zero {z : ℂ} : abs z = 0 ↔ z = 0 := iff.trans (real.sqrt_eq_zero (norm_sq_nonneg z)) norm_sq_eq_zero theorem abs_ne_zero {z : ℂ} : abs z ≠ 0 ↔ z ≠ 0 := not_congr abs_eq_zero @[simp] theorem abs_conj (z : ℂ) : abs (coe_fn conj z) = abs z := sorry @[simp] theorem abs_mul (z : ℂ) (w : ℂ) : abs (z * w) = abs z * abs w := sorry theorem abs_re_le_abs (z : ℂ) : abs (re z) ≤ abs z := sorry theorem abs_im_le_abs (z : ℂ) : abs (im z) ≤ abs z := sorry theorem re_le_abs (z : ℂ) : re z ≤ abs z := and.right (iff.mp abs_le (abs_re_le_abs z)) theorem im_le_abs (z : ℂ) : im z ≤ abs z := and.right (iff.mp abs_le (abs_im_le_abs z)) theorem abs_add (z : ℂ) (w : ℂ) : abs (z + w) ≤ abs z + abs w := sorry protected instance abs.is_absolute_value : is_absolute_value abs := is_absolute_value.mk abs_nonneg (fun (_x : ℂ) => abs_eq_zero) abs_add abs_mul @[simp] theorem abs_abs (z : ℂ) : abs (abs z) = abs z := abs_of_nonneg (abs_nonneg z) @[simp] theorem abs_pos {z : ℂ} : 0 < abs z ↔ z ≠ 0 := is_absolute_value.abv_pos abs @[simp] theorem abs_neg (z : ℂ) : abs (-z) = abs z := is_absolute_value.abv_neg abs theorem abs_sub (z : ℂ) (w : ℂ) : abs (z - w) = abs (w - z) := is_absolute_value.abv_sub abs theorem abs_sub_le (a : ℂ) (b : ℂ) (c : ℂ) : abs (a - c) ≤ abs (a - b) + abs (b - c) := is_absolute_value.abv_sub_le abs @[simp] theorem abs_inv (z : ℂ) : abs (z⁻¹) = (abs z⁻¹) := is_absolute_value.abv_inv abs @[simp] theorem abs_div (z : ℂ) (w : ℂ) : abs (z / w) = abs z / abs w := is_absolute_value.abv_div abs theorem abs_abs_sub_le_abs_sub (z : ℂ) (w : ℂ) : abs (abs z - abs w) ≤ abs (z - w) := is_absolute_value.abs_abv_sub_le_abv_sub abs theorem abs_le_abs_re_add_abs_im (z : ℂ) : abs z ≤ abs (re z) + abs (im z) := sorry theorem abs_re_div_abs_le_one (z : ℂ) : abs (re z / abs z) ≤ 1 := sorry theorem abs_im_div_abs_le_one (z : ℂ) : abs (im z / abs z) ≤ 1 := sorry @[simp] theorem abs_cast_nat (n : ℕ) : abs ↑n = ↑n := eq.mpr (id (Eq._oldrec (Eq.refl (abs ↑n = ↑n)) (Eq.symm (of_real_nat_cast n)))) (eq.mpr (id (Eq._oldrec (Eq.refl (abs ↑↑n = ↑n)) (abs_of_nonneg (nat.cast_nonneg n)))) (Eq.refl ↑n)) @[simp] theorem int_cast_abs (n : ℤ) : ↑(abs n) = abs ↑n := eq.mpr (id (Eq._oldrec (Eq.refl (↑(abs n) = abs ↑n)) (Eq.symm (of_real_int_cast n)))) (eq.mpr (id (Eq._oldrec (Eq.refl (↑(abs n) = abs ↑↑n)) (abs_of_real ↑n))) (eq.mpr (id (Eq._oldrec (Eq.refl (↑(abs n) = abs ↑n)) int.cast_abs)) (Eq.refl (abs ↑n)))) theorem norm_sq_eq_abs (x : ℂ) : coe_fn norm_sq x = abs x ^ bit0 1 := sorry /-! ### Cauchy sequences -/ theorem is_cau_seq_re (f : cau_seq ℂ abs) : is_cau_seq abs fun (n : ℕ) => re (coe_fn f n) := sorry theorem is_cau_seq_im (f : cau_seq ℂ abs) : is_cau_seq abs fun (n : ℕ) => im (coe_fn f n) := sorry /-- The real part of a complex Cauchy sequence, as a real Cauchy sequence. -/ def cau_seq_re (f : cau_seq ℂ abs) : cau_seq ℝ abs := { val := fun (n : ℕ) => re (coe_fn f n), property := is_cau_seq_re f } /-- The imaginary part of a complex Cauchy sequence, as a real Cauchy sequence. -/ def cau_seq_im (f : cau_seq ℂ abs) : cau_seq ℝ abs := { val := fun (n : ℕ) => im (coe_fn f n), property := is_cau_seq_im f } theorem is_cau_seq_abs {f : ℕ → ℂ} (hf : is_cau_seq abs f) : is_cau_seq abs (abs ∘ f) := sorry /-- The limit of a Cauchy sequence of complex numbers. -/ def lim_aux (f : cau_seq ℂ abs) : ℂ := mk (cau_seq.lim (cau_seq_re f)) (cau_seq.lim (cau_seq_im f)) theorem equiv_lim_aux (f : cau_seq ℂ abs) : f ≈ cau_seq.const abs (lim_aux f) := sorry protected instance abs.cau_seq.is_complete : cau_seq.is_complete ℂ abs := cau_seq.is_complete.mk sorry theorem lim_eq_lim_im_add_lim_re (f : cau_seq ℂ abs) : cau_seq.lim f = ↑(cau_seq.lim (cau_seq_re f)) + ↑(cau_seq.lim (cau_seq_im f)) * I := sorry theorem lim_re (f : cau_seq ℂ abs) : cau_seq.lim (cau_seq_re f) = re (cau_seq.lim f) := sorry theorem lim_im (f : cau_seq ℂ abs) : cau_seq.lim (cau_seq_im f) = im (cau_seq.lim f) := sorry theorem is_cau_seq_conj (f : cau_seq ℂ abs) : is_cau_seq abs fun (n : ℕ) => coe_fn conj (coe_fn f n) := sorry /-- The complex conjugate of a complex Cauchy sequence, as a complex Cauchy sequence. -/ def cau_seq_conj (f : cau_seq ℂ abs) : cau_seq ℂ abs := { val := fun (n : ℕ) => coe_fn conj (coe_fn f n), property := is_cau_seq_conj f } theorem lim_conj (f : cau_seq ℂ abs) : cau_seq.lim (cau_seq_conj f) = coe_fn conj (cau_seq.lim f) := sorry /-- The absolute value of a complex Cauchy sequence, as a real Cauchy sequence. -/ def cau_seq_abs (f : cau_seq ℂ abs) : cau_seq ℝ abs := { val := abs ∘ subtype.val f, property := sorry } theorem lim_abs (f : cau_seq ℂ abs) : cau_seq.lim (cau_seq_abs f) = abs (cau_seq.lim f) := sorry @[simp] theorem of_real_prod {α : Type u_1} (s : finset α) (f : α → ℝ) : ↑(finset.prod s fun (i : α) => f i) = finset.prod s fun (i : α) => ↑(f i) := ring_hom.map_prod of_real (fun (x : α) => f x) s @[simp] theorem of_real_sum {α : Type u_1} (s : finset α) (f : α → ℝ) : ↑(finset.sum s fun (i : α) => f i) = finset.sum s fun (i : α) => ↑(f i) := ring_hom.map_sum of_real (fun (x : α) => f x) s end Mathlib
(* * Vericert: Verified high-level synthesis. * Copyright (C) 2021 Yann Herklotz <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. *) Require Import compcert.backend.Registers. Require Import compcert.lib.Maps. Require Import vericert.common.Vericertlib. #[local] Open Scope positive. Record divider (signed: bool) : Type := mk_divider { div_stages: positive; div_size: positive; div_numer: reg; div_denom: reg; div_quot: reg; div_rem: reg; div_ordering: (div_numer < div_denom /\ div_denom < div_quot /\ div_quot < div_rem) }. Arguments div_stages [signed]. Arguments div_size [signed]. Arguments div_numer [signed]. Arguments div_denom [signed]. Arguments div_quot [signed]. Arguments div_rem [signed]. Record ram := mk_ram { ram_size: nat; ram_mem: reg; ram_en: reg; ram_u_en: reg; ram_addr: reg; ram_wr_en: reg; ram_d_in: reg; ram_d_out: reg; }. Definition all_ram_regs r := ram_mem r::ram_en r::ram_u_en r::ram_addr r::ram_wr_en r::ram_d_in r::ram_d_out r::nil. Inductive funct_unit: Type := | SignedDiv: divider true -> funct_unit | UnsignedDiv: divider false -> funct_unit | Ram: ram -> funct_unit. Definition funct_units := PTree.t funct_unit. Record arch := mk_arch { arch_div: list positive; arch_sdiv: list positive; arch_ram: list positive; }. Record resources := mk_resources { res_funct_units: funct_units; res_arch: arch; }. Definition index_div {b:bool} r (d: divider b) := match r with | 1 => div_numer d | 2 => div_denom d | 3 => div_quot d | _ => div_rem d end. Definition index_ram r (d: ram) := match r with | 1 => ram_mem d | 2 => ram_en d | 3 => ram_u_en d | 4 => ram_addr d | 5 => ram_wr_en d | 6 => ram_d_in d | _ => ram_d_out d end. Definition index_res u r res := match PTree.get u res with | Some (SignedDiv d) => Some (index_div r d) | Some (UnsignedDiv d) => Some (index_div r d) | Some (Ram d) => Some (index_ram r d) | None => None end. Definition get_ram n res: option (positive * ram) := match nth_error (arch_ram (res_arch res)) n with | Some ri => match PTree.get ri (res_funct_units res) with | Some (Ram r) => Some (ri, r) | _ => None end | None => None end. Definition get_div n res := match nth_error (arch_div (res_arch res)) n with | Some ri => match PTree.get ri (res_funct_units res) with | Some (UnsignedDiv d) => Some (ri, d) | _ => None end | None => None end. Definition get_sdiv n res := match nth_error (arch_sdiv (res_arch res)) n with | Some ri => match PTree.get ri (res_funct_units res) with | Some (SignedDiv d) => Some (ri, d) | _ => None end | None => None end. Definition set_res fu res := let max := ((fold_left Pos.max ((arch_sdiv (res_arch res)) ++ (arch_div (res_arch res)) ++ (arch_ram (res_arch res))) 1) + 1)%positive in let nt := PTree.set max fu (res_funct_units res) in match fu with | UnsignedDiv _ => mk_resources nt (mk_arch (max :: arch_div (res_arch res)) (arch_sdiv (res_arch res)) (arch_ram (res_arch res))) | SignedDiv _ => mk_resources nt (mk_arch (arch_div (res_arch res)) (max :: arch_sdiv (res_arch res)) (arch_ram (res_arch res))) | Ram _ => mk_resources nt (mk_arch (arch_div (res_arch res)) (arch_sdiv (res_arch res)) (max :: arch_ram (res_arch res))) end. Definition initial_funct_units: funct_units := PTree.empty _. Definition initial_arch := mk_arch nil nil nil. Definition initial_resources := mk_resources initial_funct_units initial_arch. Definition funct_unit_stages (f: funct_unit) : positive := match f with | SignedDiv d => div_stages d | UnsignedDiv d => div_stages d | _ => 1 end. Definition max_reg_ram r := fold_right Pos.max 1 (ram_mem r::ram_en r::ram_u_en r::ram_addr r ::ram_wr_en r::ram_d_in r::ram_d_out r::nil). Definition max_reg_divider {b: bool} (d: divider b) := fold_right Pos.max 1 (div_numer d::div_denom d::div_quot d::div_rem d::nil). Definition max_reg_fu fu := match fu with | SignedDiv d | UnsignedDiv d => max_reg_divider d | Ram r => max_reg_ram r end. Definition max_reg_funct_units r := PTree.fold (fun m _ a => Pos.max m (max_reg_fu a)) r 1. Definition max_reg_resources r := max_reg_funct_units r.(res_funct_units).
[STATEMENT] lemma writes_subset: "writes S f h h' \<Longrightarrow> S \<subseteq> S' \<Longrightarrow> writes S' f h h'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>writes S f h h'; S \<subseteq> S'\<rbrakk> \<Longrightarrow> writes S' f h h' [PROOF STEP] by(auto simp add: writes_def)
Doxplore Digitizer is a software that helps you Scan your physical documents, perform OCR (Optical Character Recognition) on the scanned images to make them searchable by content. If you lose or accidentally erase an important digital file, such as a proposal or a contract , but still have a hard copy, you can easily replace it in your system by using OCR to scan the original paper document or most recent draft. OCR software converts scanned text into a format that can be read and searched by computers , enabling search for specific documents using a keyword or phrase. For example, you could effortlessly search hundreds of invoices and locate a specific name or account in moments, without manually browsing through a large set of files. Once you’ve scanned your document using OCR, you have the option to edit the text by storing it within a word format (.doc). Expedite your editing process of Scanned files that may need constant to be to be updated in the future. Free up physical storage space by scanning paper documents and disposing or warehousing the originals physical documents. These documents post scanning OCR can also serve as a backup in case of unforeseen damages to paper documents. Ability to scan physical documents from TWAIN-compatible devices. You may scan single or multiple documents in a single session. Multiple scanned files can be combined to a single Adobe PDF file or separate PDF files can be created for each individual scanned file. Scan multiple image formats like BMP, JPEG, PNG, TIFF, GIF or a PDF document. You can perform OCR on the fly with the Scan operation. Users can define the number of pages, to conduct OCR. The readable text post OCR can be displayed in a Word document. Images in the paper document will be placed in the same location after OCR, thereby retaining original layout. Ability to detect incorrectly oriented pages and fix it automatically. Ability to scale image automatically for better recognition. Robust technology supports images with poor brightness or low contrast. Ability to detect and handle inverted text. Unique character analysis technology delivering reliable recognition of any fonts. Advanced algorithms for poor-quality text, distorted, connected and broken characters. This section is used to perform different operations on PDF files. Some operations will be restricted to PDF files those are either password protected or corrupted. Combine multiple pages of a file to a single PDF. Extract pages from a source PDF file and create a new copy of PDF file from the specific pages only. Insert content of a PDF file in another PDF file in the position specified by the user. Password Protect or Unprotect PDF files.
\section{Floquet Conductivity in Landau Levels} Now we can derive conductivity expression for a given Landau level using Floquet conductivity expression derived in [*Ref:my report 2.488]. Before that let's consider the inverse scattering time matrix element in the previous section. From Eq. \eqref{5.3} we can express the $N$th Landau level's inverse scattering time central element ($n=n'=0$) as \begin{equation} \label{6.1} \begin{aligned} \qty(\frac{1}{\tau(\varepsilon,k_x)})^{00}_{N} = \frac { N_{imp}^2 A^2 \hbar V_{imp}}{16\pi^4 \qty(eB)^2} \delta(\varepsilon - \varepsilon_{N}) & \int_{-\infty}^{\infty} d {k'}_x \; J_0^2\qty(\frac{g\hbar}{eB}[{k}_x - {k'}_x]) \\ & \times \qty| \int_{-\infty}^{\infty} d\bar{k} \; {\chi}_{N}\qty(\frac{\hbar}{eB}\bar{k}) {\chi}_{N}\qty(\frac{\hbar}{eB} \qty[{k'}_x - {k}_x - \bar{k}])|^2. \end{aligned} \end{equation} Now we can introduce a new parameter with physical meaning of scatterin-induced broading of the Landau level as follows \begin{equation} \label{6.2} \Gamma^{00}_{N}(\varepsilon,k_x) \equiv \hbar \qty(\frac{1}{\tau(\varepsilon,k_x)})^{00}_{N} \end{equation} and this modify our previous expressing as \begin{equation} \label{6.3} \begin{aligned} \Gamma^{00}_{N}(\varepsilon,k_x) = \frac { N_{imp}^2 A^2 \hbar V_{imp}}{16\pi^4 \qty(eB)^2} \delta(\varepsilon - \varepsilon_{N}) & \int_{-\infty}^{\infty} d {k'}_x \; J_0^2\qty(\frac{g\hbar}{eB}[{k}_x - {k'}_x]) \\ & \times \qty| \int_{-\infty}^{\infty} d\bar{k} \; {\chi}_{N}\qty(\frac{\hbar}{eB}\bar{k}) {\chi}_{N}\qty(\frac{\hbar}{eB} \qty[{k'}_x - {k}_x - \bar{k}])|^2. \end{aligned} \end{equation} In addition, for the case of elastic scattering within the same Landau level, one can present the delta distribution of the energy using the same physical interpretation as follows \begin{equation} \label{6.4} \delta(\varepsilon - \varepsilon_{N}) \approx \frac{1}{\pi \Gamma^{00}_{N}(\varepsilon,k_x)} \end{equation} and this leads to \begin{equation} \label{6.5} \begin{aligned} \qty[\Gamma^{00}_{N}(\varepsilon,k_x)]^2 = \frac { N_{imp}^2 A^2 \hbar V_{imp}}{16\pi^5 \qty(eB)^2} \int_{-\infty}^{\infty} d {k'}_x \; J_0^2\qty(\frac{g\hbar}{eB}[{k}_x - {k'}_x]) \qty| \int_{-\infty}^{\infty} d\bar{k} \; {\chi}_{N}\qty(\frac{\hbar}{eB}\bar{k}) {\chi}_{N}\qty(\frac{\hbar}{eB} \qty[{k'}_x - {k}_x - \bar{k}])|^2. \end{aligned} \end{equation} and \begin{equation} \label{6.6} \begin{aligned} \Gamma^{00}_{N}(\varepsilon,k_x) = \qty[ \frac { N_{imp}^2 A^2 \hbar V_{imp}}{16\pi^5 \qty(eB)^2} \int_{-\infty}^{\infty} d {k'}_x \; J_0^2\qty(\frac{g\hbar}{eB}[{k}_x - {k'}_x]) \qty| \int_{-\infty}^{\infty} d\bar{k} \; {\chi}_{N}\qty(\frac{\hbar}{eB}\bar{k}) {\chi}_{N}\qty(\frac{\hbar}{eB} \qty[{k'}_x - {k}_x - \bar{k}])|^2 ]^{-1/2}. \end{aligned} \end{equation} Using the numerical calculations we can see that the above integral is does not depend on the value of $k_x$ and we can choose any values for $k_x$. Therefore applying $k_x =0$ and letting $k'_x \rightarrow k_1,\bar{k}\rightarrow k_2$ we can modify our equation as \begin{equation} \label{6.7} \begin{aligned} \Gamma^{00}_{N}(\varepsilon,k_x) = \qty[ \frac { N_{imp}^2 A^2 \hbar V_{imp}}{16\pi^5 \qty(eB)^2} \int_{-\infty}^{\infty} d k_1\; J_0^2\qty(\frac{g\hbar}{eB} k_1) \qty| \int_{-\infty}^{\infty} dk_2 \; {\chi}_{N}\qty(\frac{\hbar}{eB} k_2) {\chi}_{N}\qty(\frac{\hbar}{eB} \qty[k_1 - k_2])|^2 ]^{-1/2}. \end{aligned} \end{equation} \noindent Now we can compare the central element of energy level broading for each Landau level using normalized Landau energy broading(inverse scattering time) against applied dressing field's electric field's amplitude ($E$) as follows \begin{equation} \label{6.8} \Lambda^{00}_{N} \equiv \frac{\Gamma^{00}_{N}(\varepsilon,k_x)} {\Gamma^{00}_{N}(\varepsilon,k_x)\big|_{E=0}}. \end{equation} As you can see in the Fig. \ref{fig6.1}, when the applied dressing field's intensity is increasing the broading of the Landau energy level decreasing. The effect of this decreasing is depend on the considering Landau level and for higher the level lower the effect. \begin{figure}[ht!] \centering \includegraphics[scale=0.5]{figures/fig04.pdf} \caption{Normalized broading of Landau levels agaist dressing fileds amplitude. Red line represents $N=0$, blue line represents $N=1$, green line represents $N=2$ and purplr color line represents $N=3$ Landau levels.} \label{fig6.1} \end{figure} \noindent Then we can use Floquet conductivity expression derived in [*Ref:my report 2.488] as follows \begin{equation} \label{6.9} \begin{aligned} \lim_{\omega \to 0} \text{Re}[{\sigma}^{xx}(0,\omega)] &= \frac{-1}{4\pi\hbar A} \int_{\lambda-\hbar\Omega/2}^{\lambda+ \hbar\Omega/2} d\varepsilon \bigg( -\frac{\partial f}{\partial \varepsilon} \bigg) \frac{1}{A}\sum_{\mb{k}} \\ & \times \sum_{s,s'=-\infty}^{\infty} {j}^x_s(\mb{k}){j}^x_{s'}(\mb{k}) \tr_s \big[ \big( \mb{G}^{r}_0 (\varepsilon;\mb{k}) - \mb{G}^{a}_0 (\varepsilon;\mb{k}) \big) \odot_s \big( \mb{G}^{r}_0 (\varepsilon;\mb{k}) - \mb{G}^{a}_0 (\varepsilon;\mb{k}) \big) \big]. \end{aligned} \end{equation} However, in this case we are consider only $x$ directional momentum as a quantum number to seperate different states and let $\lambda = \varepsilon_N$. In addition, with the current operator derivation IF we get component values only for $s=s'=0$ our conductivity rquation will be modified to \begin{equation} \label{6.10} \begin{aligned} \lim_{\omega \to 0} \text{Re}[{\sigma}^{xx}(0,\omega)] &= \frac{-1}{4\pi\hbar A} \int_{\varepsilon_N - \hbar\Omega/2}^{\varepsilon_N + \hbar\Omega/2} d\varepsilon \bigg( -\frac{\partial f}{\partial \varepsilon} \bigg) \frac{1}{L_x}\sum_{{k_x}} \qty[{j}^x_0({k_x})]^2 \\ & \times \tr \big[ \big( \mb{G}^{r}_0 (\varepsilon;{k_x}) - \mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \big( \mb{G}^{r}_0 (\varepsilon;{k_x}) - \mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \big]. \end{aligned} \end{equation} Now we can expand the above expressing using unitary transformation \begin{equation} \label{6.11} \qty(\mb{T})_{\alpha}^{nn'} \equiv \ket*{\phi_{\alpha}^{n+n'}} \end{equation} where \begin{equation} \label{6.12} \ket{\phi_{\alpha}(t)} = \sum_{n = - \infty}^{\infty} e^{-in\omega t} \ket{\phi_{\alpha}^{n}}. \end{equation} Therefore our conductivity equation becomes \begin{equation} \label{6.13} \begin{aligned} \lim_{\omega \to 0} \text{Re}[{\sigma}^{xx}(0,\omega)] &= \frac{-1}{4\pi\hbar A} \int_{\varepsilon_N - \hbar\Omega/2}^{\varepsilon_N + \hbar\Omega/2} d\varepsilon \bigg( -\frac{\partial f}{\partial \varepsilon} \bigg) \frac{1}{L_x}\sum_{{k_x}} \qty[{j}^x_0({k_x})]^2 \\ & \times \qty[ \mb{T}^{\dagger}(k_x) \big( \mb{G}^{r}_0 (\varepsilon;{k_x}) - \mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \mb{T})(k_x) ]^2_{00}. \end{aligned} \end{equation} As derived in [*Ref:my report 2.547] we can present this matrix multiplication result as follows \begin{equation} \label{6.14} \begin{aligned} \qty[ \mb{T}^{\dagger}(k_x) \big( \mb{G}^{r}_0 (\varepsilon;{k_x}) - \mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \mb{T})(k_x) ]^2_{00} & \approx \qty[ \mb{T}^{\dagger}(k_x) \big( \mb{G}^{r}_0 (\varepsilon;{k_x})\mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \mb{T})(k_x) ]_{00} \\ & = \frac{-1} { \qty(\frac{\varepsilon}{\hbar} - \frac{\varepsilon_N}{\hbar})^2 + \qty(\frac{\Gamma^{00}_{N}(\varepsilon,k_x)}{2\hbar})^2 } \end{aligned} \end{equation} and this can be more simplified as \begin{equation} \label{6.15} \begin{aligned} \qty[ \mb{T}^{\dagger}(k_x) \big( \mb{G}^{r}_0 (\varepsilon;{k_x}) - \mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \mb{T})(k_x) ]^2_{00} = \frac{-\hbar^2} {\qty[\Gamma^{00}_{N}(\varepsilon,k_x)]^2} \qty[ 1 + \qty(\frac{{\varepsilon}-{\varepsilon_N}}{\Gamma^{00}_{N}(\varepsilon,k_x)/2})^2 ]^{-1}. \end{aligned} \end{equation} Since the squared termed in the square brackets goinf to zero with valid conditions we can use binomial approximation for the square bracket term and get the following derivation \begin{equation} \label{6.16} \begin{aligned} \qty[ \mb{T}^{\dagger}(k_x) \big( \mb{G}^{r}_0 (\varepsilon;{k_x}) - \mb{G}^{a}_0 (\varepsilon;{k_x}) \big) \mb{T})(k_x) ]^2_{00} = \frac{-\hbar^2} {\qty[\Gamma^{00}_{N}(\varepsilon,k_x)]^2} \qty[ 1 - 4\qty(\frac{{\varepsilon}-{\varepsilon_N}}{\Gamma^{00}_{N}(\varepsilon,k_x)})^2 ] \end{aligned} \end{equation} \noindent Now the conductivity expression get modify as \begin{equation} \label{6.17} \begin{aligned} \lim_{\omega \to 0} \text{Re}[{\sigma}^{xx}(0,\omega)] = \frac{\hbar}{4\pi A L_x} \int_{\varepsilon_N - \hbar\Omega/2}^{\varepsilon_N + \hbar\Omega/2} d\varepsilon \bigg( -\frac{\partial f}{\partial \varepsilon} \bigg) \sum_{{k_x}} \qty[{j}^x_0({k_x})]^2 \frac{\hbar^2} {\qty[\Gamma^{00}_{N}(\varepsilon,k_x)]^2} \qty[ 1 - 4\qty(\frac{{\varepsilon}-{\varepsilon_N}}{\Gamma^{00}_{N}(\varepsilon,k_x)})^2 ] \end{aligned} \end{equation} \noindent Then assuming we are considering fermions in aero temperature for this scenario we can describe the particle distribution function using the Fermi-Dirac distribution in zero-tempurature. \begin{equation} \label{6.18} -\frac{\partial f}{\partial \varepsilon} = \delta(\varepsilon_F - \varepsilon) \end{equation} where $\varepsilon_F$ represent the Fermi energy for the considered material. \noindent With this approximation we can derive that \begin{equation} \label{6.19} \begin{aligned} \lim_{\omega \to 0} \text{Re}[{\sigma}^{xx}(0,\omega)] = \frac{\hbar}{4\pi A L_x} \int_{\varepsilon_N - \hbar\Omega/2}^{\varepsilon_N + \hbar\Omega/2} d\varepsilon \delta(\varepsilon_F - \varepsilon) \sum_{{k_x}} \qty[{j}^x_0({k_x})]^2 \frac{\hbar^2} {\qty[\Gamma^{00}_{N}(\varepsilon,k_x)]^2} \qty[ 1 - 4\qty(\frac{{\varepsilon}-{\varepsilon_N}}{\Gamma^{00}_{N}(\varepsilon,k_x)})^2 ] \end{aligned} \end{equation} and \begin{equation} \label{6.20} \begin{aligned} \lim_{\omega \to 0} \text{Re}[{\sigma}^{xx}(0,\omega)] = \frac{\hbar}{4\pi A L_x} \sum_{{k_x}} \qty[{j}^x_0({k_x})]^2 \frac{\hbar^2} {\qty[\Gamma^{00}_{N}(\varepsilon_F,k_x)]^2} \qty[ 1 - 4\qty(\frac{{\varepsilon_F}-{\varepsilon_N}}{\Gamma^{00}_{N}(\varepsilon_F,k_x)})^2 ] \end{aligned} \end{equation} \hfill$\blacksquare$
%% Copyright (C) 2009-2011, Gostai S.A.S. %% %% This software is provided "as is" without warranty of any kind, %% either expressed or implied, including but not limited to the %% implied warranties of fitness for a particular purpose. %% %% See the LICENSE file for more information. \section{Dictionary} A \dfn{dictionary} is an \dfn{associative array}, also known as a \dfn{hash} in some programming languages. They are arrays whose indexes are arbitrary objects. \subsection{Example} The following session demonstrates the features of the Dictionary objects. \begin{urbiscript}[firstnumber=1] var d = ["one" => 1, "two" => 2]; [00000001] ["one" => 1, "two" => 2] for (var p : d) echo (p.first + " => " + p.second); [00000003] *** one => 1 [00000002] *** two => 2 "three" in d; [00000004] false d["three"]; [00000005:error] !!! missing key: three d["three"] = d["one"] + d["two"]|; "three" in d; [00000006] true d.getWithDefault("four", 4); [00000007] 4 \end{urbiscript} \subsection{Hash values} \label{sec:dictionary:hash} Arbitrary objects can be used as dictionary keys. To map to the same cell, two objects used as keys must have equal hashes (retrieved with the \refSlot[Object]{hash} method) and be equal to each other (in the \refSlot[Object]{'=='} sense). This means that two different objects may have the same hash: the equality operator (\refSlot[Object]{'=='}) is checked in addition to the hash, to handle such collision. However a good hash algorithm should avoid this case, since it hinders performances. See \refSlot[Object]{hash} for more detail on how to override hash values. Most standard value-based classes implement a reasonable hash function: see \refSlot[Float]{hash}, \refSlot[String]{hash}, \refSlot[List]{hash}, \ldots \subsection{Prototypes} \begin{refObjects} \item[Comparable] \item[Container] \item[Object] \item[RangeIterable] \end{refObjects} \subsection{Construction} The Dictionary constructor takes arguments by pair (key, value). \begin{urbiscript} Dictionary.new("one", 1, "two", 2); [00000000] ["one" => 1, "two" => 2] Dictionary.new(); [00000000] [ => ] \end{urbiscript} There must be an even number of arguments. \begin{urbiscript} Dictionary.new("1", 2, "3"); [00000001:error] !!! new: odd number of arguments \end{urbiscript} You are encouraged to use the specific syntax for Dictionary literals: \begin{urbiscript} ["one" => 1, "two" => 2]; [00000000] ["one" => 1, "two" => 2] [=>]; [00000000] [ => ] \end{urbiscript} An extra comma can be added at the end of the list. \begin{urbiscript} [ "one" => 1, "two" => 2, ]; [00000000] ["one" => 1, "two" => 2] \end{urbiscript} It is guaranteed that the pairs to insert are evaluated left-to-write, key first, the value. \begin{urbiassert} ["a".fresh() => "b".fresh(), "c".fresh() => "d".fresh()] == ["a_5" => "b_6", "c_7" => "d_8"]; \end{urbiassert} Duplicate keys in Dictionary literal are an error. On this regards, \us departs from choices made in JavaScript, Perl, Python, Ruby, and probably many other languages. \begin{urbiscript} ["one" => 1, "one" => 2]; [00000001:error] !!! duplicate dictionary key: "one" \end{urbiscript} \subsection{Slots} \begin{urbiscriptapi} \item['=='](<that>)% Whether \this equals \var{that}. Expects members to be \refObject{Comparable}. \begin{urbiassert} [ => ] == [ => ]; ["a" => 1, "b" => 2] == ["b" => 2, "a" => 1]; \end{urbiassert} \item|'[]'|(<key>)% Syntactic sugar for \lstinline|get(\var{key})|. \begin{urbiscript} assert (["one" => 1]["one"] == 1); ["one" => 1]["two"]; [00000012:error] !!! missing key: two \end{urbiscript} \item|'[]='|(<key>, <value>)% Syntactic sugar for \lstinline|set(\var{key}, \var{value})|, but returns \var{value}. \begin{urbiassert} var d = ["one" =>"2"]; (d["one"] = 1) == 1; d["one"] == 1; \end{urbiassert} \item[asBool] Negation of \refSlot{empty}. \begin{urbiassert} [=>].asBool() == false; ["key" => "value"].asBool() == true; \end{urbiassert} \item[asList]% The contents of the dictionary as a \refObject{Pair} list (\var{key}, \var{value}). \begin{urbiassert} ["one" => 1, "two" => 2].asList() == [("one", 1), ("two", 2)]; \end{urbiassert} \noindent Since Dictionary derives from \refObject{RangeIterable}, it is easy to iterate over a Dictionary using a range-\lstinline|for| (\autoref{sec:lang:foreach}). No particular order is ensured. \begin{urbiscript} { var res = []; for| (var entry: ["one" => 1, "two" => 2]) res << entry.second; assert(res == [1, 2]); }; \end{urbiscript} \item[asString] A string representing the dictionary. There is no guarantee on the order of the output. \begin{urbiassert} [=>].asString() == "[ => ]"; ["a" => 1, "b" => 2].asString() == "[\"a\" => 1, \"b\" => 2]"; \end{urbiassert} \item[asTree]% Display the content of the List as a tree representation. \begin{urbiscript} echo("simple dictionary:" + ["key1" => "elt1", "key2" => ["key3" => "elt3"]].asTree()); [:][00000001] *** simple dictionary: [:][ [:] key1 => elt1, [:] key2 => [:] [ [:] key3 => elt3, [:] ] [:]] echo("dictionary with list:" + ["key1" => "elt1", "key2" => ["key3" => ["key4", "key5"]]].asTree()); [:][00000002] *** dictionary with list: [:][ [:] key1 => elt1, [:] key2 => [:] [ [:] key3 => [:] [ [:] key4, [:] key5, [:] ] [:] ] [:]] \end{urbiscript} \item[clear] Empty the dictionary. \begin{urbiassert} ["one" => 1].clear().empty; \end{urbiassert} \item[elementAdded] An event emitted each time a new element is added to the Dictionary. \item[elementChanged] An event emitted each time the value associated to a key of the Dictionary is changed. \item[elementRemoved] An event emitted each time an element is removed from the Dictionary. \begin{urbiscript} d = [ => ] |; at(d.elementAdded?) echo ("added"); at(d.elementChanged?) echo ("changed"); at(d.elementRemoved?) echo ("removed"); d["key1"] = "value1"; [00000001] "value1" [00000001] *** added d["key2"] = "value2"; [00000001] "value2" [00000001] *** added d["key2"] = "value3"; [00000001] "value3" [00000001] *** changed d.erase("key2"); [00000002] ["key1" => "value1"] [00000001] *** removed d.clear(); [00000003] [ => ] [00000001] *** removed d.clear(); [00000003] [ => ] \end{urbiscript} \item[empty] Whether the dictionary is empty. \begin{urbiassert} [=>].empty == true; ["key" => "value"].empty == false; \end{urbiassert} \item[erase](<key>) Remove the mapping for \var{key}. \begin{urbicomment} removeSlot("d")|; \end{urbicomment} \begin{urbiscript} { var d = ["one" => 1, "two" => 2]; assert { d.erase("two") === d; d == ["one" => 1]; }; try { ["one" => 1, "two" => 2].erase("three"); echo("never reached"); } catch (var e if e.isA(Dictionary.KeyError)) { assert(e.key == "three") }; }; \end{urbiscript} %% commented until a consensus is reached. %% %% \item[extend](<ext>) %% Extend with the dictionary \var{ext}. %% Return the value of the new dictionary. %% \begin{urbiscript} %% d = ["one" => 1, "two" => 2]; %% [00000001] ["one" => 1, "two" => 2] %% d.extend(["one" => 0, "three" => 3]); %% [00000002] ["one" => 0, "three" => 3, "two" => 2] %% \end{urbiscript} \item[get](<key>)% The value associated to \var{key}. A \lstinline|Dictionary.KeyError| exception is thrown if the key is missing. % FIXME: the following exception test should be rewritten when (if) % we introduce the throw assertion. \begin{urbiscript} var d = ["one" => 1, "two" => 2]|; assert(d.get("one") == 1); ["one" => 1, "two" => 2].get("three"); [00000010:error] !!! missing key: three try { d.get("three"); echo("never reached"); } catch (var e if e.isA(Dictionary.KeyError)) { assert(e.key == "three") }; \end{urbiscript} \item[getWithDefault](<key>, <defaultValue>)% The value associated to \var{key} if it exists, \var{defaultValue} otherwise. \begin{urbiassert} var d = ["one" => 1, "two" => 2]; d.getWithDefault("one", -1) == 1; d.getWithDefault("three", 3) == 3; \end{urbiassert} \item[has](<key>)% Whether the dictionary has a mapping for \var{key}. \begin{urbiassert} var d = ["one" => 1]; d.has("one"); !d.has("zero"); \end{urbiassert} The infix operators \lstinline|in| and \lstinline|not in| use \lstinline|has| (see \autoref{sec:lang:op:containers}). \begin{urbiassert} "one" in ["one" => 1]; "two" not in ["one" => 1]; \end{urbiassert} \item[init](<key1>, <value1>, ...)% Insert the mapping from \var{key1} to \var{value1} and so forth. \begin{urbiscript} Dictionary.clone().init("one", 1, "two", 2); [00000000] ["one" => 1, "two" => 2] \end{urbiscript} \item[keys]% The list of all the keys. No particular order is ensured. Since \refObject{List} features the same function, uniform iteration over a List or a Dictionary is possible. \begin{urbiassert} var d = ["one" => 1, "two" => 2]; d.keys == ["one", "two"]; \end{urbiassert} \item[matchAgainst](<handler>, <pattern>) Pattern matching on members. See \refObject{Pattern}. \begin{urbiscript} { // Match a subset of the dictionary. ["a" => var a] = ["a" => 1, "b" => 2]; // get the matched value. assert(a == 1); }; \end{urbiscript} \item[set](<key>, <value>)% Map \var{key} to \var{value} and return \this so that invocations to \refSlot{set} can be chained. The possibly existing previous mapping is overridden. \begin{urbiscript} [=>].set("one", 2) .set("two", 2) .set("one", 1); [00000000] ["one" => 1, "two" => 2] \end{urbiscript} \item[size] Number of element in the dictionary. \begin{urbiassert} var d = [=>]; d.size == 0; d["a"] = 10; d.size == 1; d["b"] = 20; d.size == 2; d["a"] = 30; d.size == 2; \end{urbiassert} \end{urbiscriptapi} %%% Local Variables: %%% coding: utf-8 %%% mode: latex %%% TeX-master: "../urbi-sdk" %%% ispell-dictionary: "american" %%% ispell-personal-dictionary: "../urbi.dict" %%% fill-column: 76 %%% End:
(* Property from Productive Use of Failure in Inductive Proof, Andrew Ireland and Alan Bundy, JAR 1996. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_prop_04 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun x :: "'a list => 'a list => 'a list" where "x (nil2) z = z" | "x (cons2 z2 xs) z = cons2 z2 (x xs z)" fun length :: "'a list => Nat" where "length (nil2) = Z" | "length (cons2 z xs) = S (length xs)" fun double :: "Nat => Nat" where "double (Z) = Z" | "double (S z) = S (S (double z))" theorem property0 : "((length (x y y)) = (double (length y)))" oops end
lemma LIMSEQ_INF: "decseq X \<Longrightarrow> X \<longlonglongrightarrow> (INF i. X i :: 'a::{complete_linorder,linorder_topology})"
(* Title: ZF/ArithSimp.thy Author: Lawrence C Paulson, Cambridge University Computer Laboratory Copyright 2000 University of Cambridge *) section{*Arithmetic with simplification*} theory ArithSimp imports Arith begin ML_file "~~/src/Provers/Arith/cancel_numerals.ML" ML_file "~~/src/Provers/Arith/combine_numerals.ML" ML_file "arith_data.ML" subsection{*Difference*} lemma diff_self_eq_0 [simp]: "m #- m = 0" apply (subgoal_tac "natify (m) #- natify (m) = 0") apply (rule_tac [2] natify_in_nat [THEN nat_induct], auto) done (**Addition is the inverse of subtraction**) (*We need m:nat even if we replace the RHS by natify(m), for consider e.g. n=2, m=omega; then n + (m-n) = 2 + (0-2) = 2 \<noteq> 0 = natify(m).*) lemma add_diff_inverse: "[| n \<le> m; m:nat |] ==> n #+ (m#-n) = m" apply (frule lt_nat_in_nat, erule nat_succI) apply (erule rev_mp) apply (rule_tac m = m and n = n in diff_induct, auto) done lemma add_diff_inverse2: "[| n \<le> m; m:nat |] ==> (m#-n) #+ n = m" apply (frule lt_nat_in_nat, erule nat_succI) apply (simp (no_asm_simp) add: add_commute add_diff_inverse) done (*Proof is IDENTICAL to that of add_diff_inverse*) lemma diff_succ: "[| n \<le> m; m:nat |] ==> succ(m) #- n = succ(m#-n)" apply (frule lt_nat_in_nat, erule nat_succI) apply (erule rev_mp) apply (rule_tac m = m and n = n in diff_induct) apply (simp_all (no_asm_simp)) done lemma zero_less_diff [simp]: "[| m: nat; n: nat |] ==> 0 < (n #- m) \<longleftrightarrow> m<n" apply (rule_tac m = m and n = n in diff_induct) apply (simp_all (no_asm_simp)) done (** Difference distributes over multiplication **) lemma diff_mult_distrib: "(m #- n) #* k = (m #* k) #- (n #* k)" apply (subgoal_tac " (natify (m) #- natify (n)) #* natify (k) = (natify (m) #* natify (k)) #- (natify (n) #* natify (k))") apply (rule_tac [2] m = "natify (m) " and n = "natify (n) " in diff_induct) apply (simp_all add: diff_cancel) done lemma diff_mult_distrib2: "k #* (m #- n) = (k #* m) #- (k #* n)" apply (simp (no_asm) add: mult_commute [of k] diff_mult_distrib) done subsection{*Remainder*} (*We need m:nat even with natify*) lemma div_termination: "[| 0<n; n \<le> m; m:nat |] ==> m #- n < m" apply (frule lt_nat_in_nat, erule nat_succI) apply (erule rev_mp) apply (erule rev_mp) apply (rule_tac m = m and n = n in diff_induct) apply (simp_all (no_asm_simp) add: diff_le_self) done (*for mod and div*) lemmas div_rls = nat_typechecks Ord_transrec_type apply_funtype div_termination [THEN ltD] nat_into_Ord not_lt_iff_le [THEN iffD1] lemma raw_mod_type: "[| m:nat; n:nat |] ==> raw_mod (m, n) \<in> nat" apply (unfold raw_mod_def) apply (rule Ord_transrec_type) apply (auto simp add: nat_into_Ord [THEN Ord_0_lt_iff]) apply (blast intro: div_rls) done lemma mod_type [TC,iff]: "m mod n \<in> nat" apply (unfold mod_def) apply (simp (no_asm) add: mod_def raw_mod_type) done (** Aribtrary definitions for division by zero. Useful to simplify certain equations **) lemma DIVISION_BY_ZERO_DIV: "a div 0 = 0" apply (unfold div_def) apply (rule raw_div_def [THEN def_transrec, THEN trans]) apply (simp (no_asm_simp)) done (*NOT for adding to default simpset*) lemma DIVISION_BY_ZERO_MOD: "a mod 0 = natify(a)" apply (unfold mod_def) apply (rule raw_mod_def [THEN def_transrec, THEN trans]) apply (simp (no_asm_simp)) done (*NOT for adding to default simpset*) lemma raw_mod_less: "m<n ==> raw_mod (m,n) = m" apply (rule raw_mod_def [THEN def_transrec, THEN trans]) apply (simp (no_asm_simp) add: div_termination [THEN ltD]) done lemma mod_less [simp]: "[| m<n; n \<in> nat |] ==> m mod n = m" apply (frule lt_nat_in_nat, assumption) apply (simp (no_asm_simp) add: mod_def raw_mod_less) done lemma raw_mod_geq: "[| 0<n; n \<le> m; m:nat |] ==> raw_mod (m, n) = raw_mod (m#-n, n)" apply (frule lt_nat_in_nat, erule nat_succI) apply (rule raw_mod_def [THEN def_transrec, THEN trans]) apply (simp (no_asm_simp) add: div_termination [THEN ltD] not_lt_iff_le [THEN iffD2], blast) done lemma mod_geq: "[| n \<le> m; m:nat |] ==> m mod n = (m#-n) mod n" apply (frule lt_nat_in_nat, erule nat_succI) apply (case_tac "n=0") apply (simp add: DIVISION_BY_ZERO_MOD) apply (simp add: mod_def raw_mod_geq nat_into_Ord [THEN Ord_0_lt_iff]) done subsection{*Division*} lemma raw_div_type: "[| m:nat; n:nat |] ==> raw_div (m, n) \<in> nat" apply (unfold raw_div_def) apply (rule Ord_transrec_type) apply (auto simp add: nat_into_Ord [THEN Ord_0_lt_iff]) apply (blast intro: div_rls) done lemma div_type [TC,iff]: "m div n \<in> nat" apply (unfold div_def) apply (simp (no_asm) add: div_def raw_div_type) done lemma raw_div_less: "m<n ==> raw_div (m,n) = 0" apply (rule raw_div_def [THEN def_transrec, THEN trans]) apply (simp (no_asm_simp) add: div_termination [THEN ltD]) done lemma div_less [simp]: "[| m<n; n \<in> nat |] ==> m div n = 0" apply (frule lt_nat_in_nat, assumption) apply (simp (no_asm_simp) add: div_def raw_div_less) done lemma raw_div_geq: "[| 0<n; n \<le> m; m:nat |] ==> raw_div(m,n) = succ(raw_div(m#-n, n))" apply (subgoal_tac "n \<noteq> 0") prefer 2 apply blast apply (frule lt_nat_in_nat, erule nat_succI) apply (rule raw_div_def [THEN def_transrec, THEN trans]) apply (simp (no_asm_simp) add: div_termination [THEN ltD] not_lt_iff_le [THEN iffD2] ) done lemma div_geq [simp]: "[| 0<n; n \<le> m; m:nat |] ==> m div n = succ ((m#-n) div n)" apply (frule lt_nat_in_nat, erule nat_succI) apply (simp (no_asm_simp) add: div_def raw_div_geq) done declare div_less [simp] div_geq [simp] (*A key result*) lemma mod_div_lemma: "[| m: nat; n: nat |] ==> (m div n)#*n #+ m mod n = m" apply (case_tac "n=0") apply (simp add: DIVISION_BY_ZERO_MOD) apply (simp add: nat_into_Ord [THEN Ord_0_lt_iff]) apply (erule complete_induct) apply (case_tac "x<n") txt{*case x<n*} apply (simp (no_asm_simp)) txt{*case @{term"n \<le> x"}*} apply (simp add: not_lt_iff_le add_assoc mod_geq div_termination [THEN ltD] add_diff_inverse) done lemma mod_div_equality_natify: "(m div n)#*n #+ m mod n = natify(m)" apply (subgoal_tac " (natify (m) div natify (n))#*natify (n) #+ natify (m) mod natify (n) = natify (m) ") apply force apply (subst mod_div_lemma, auto) done lemma mod_div_equality: "m: nat ==> (m div n)#*n #+ m mod n = m" apply (simp (no_asm_simp) add: mod_div_equality_natify) done subsection{*Further Facts about Remainder*} text{*(mainly for mutilated chess board)*} lemma mod_succ_lemma: "[| 0<n; m:nat; n:nat |] ==> succ(m) mod n = (if succ(m mod n) = n then 0 else succ(m mod n))" apply (erule complete_induct) apply (case_tac "succ (x) <n") txt{* case succ(x) < n *} apply (simp (no_asm_simp) add: nat_le_refl [THEN lt_trans] succ_neq_self) apply (simp add: ltD [THEN mem_imp_not_eq]) txt{* case @{term"n \<le> succ(x)"} *} apply (simp add: mod_geq not_lt_iff_le) apply (erule leE) apply (simp (no_asm_simp) add: mod_geq div_termination [THEN ltD] diff_succ) txt{*equality case*} apply (simp add: diff_self_eq_0) done lemma mod_succ: "n:nat ==> succ(m) mod n = (if succ(m mod n) = n then 0 else succ(m mod n))" apply (case_tac "n=0") apply (simp (no_asm_simp) add: natify_succ DIVISION_BY_ZERO_MOD) apply (subgoal_tac "natify (succ (m)) mod n = (if succ (natify (m) mod n) = n then 0 else succ (natify (m) mod n))") prefer 2 apply (subst natify_succ) apply (rule mod_succ_lemma) apply (auto simp del: natify_succ simp add: nat_into_Ord [THEN Ord_0_lt_iff]) done lemma mod_less_divisor: "[| 0<n; n:nat |] ==> m mod n < n" apply (subgoal_tac "natify (m) mod n < n") apply (rule_tac [2] i = "natify (m) " in complete_induct) apply (case_tac [3] "x<n", auto) txt{* case @{term"n \<le> x"}*} apply (simp add: mod_geq not_lt_iff_le div_termination [THEN ltD]) done lemma mod_1_eq [simp]: "m mod 1 = 0" by (cut_tac n = 1 in mod_less_divisor, auto) lemma mod2_cases: "b<2 ==> k mod 2 = b | k mod 2 = (if b=1 then 0 else 1)" apply (subgoal_tac "k mod 2: 2") prefer 2 apply (simp add: mod_less_divisor [THEN ltD]) apply (drule ltD, auto) done lemma mod2_succ_succ [simp]: "succ(succ(m)) mod 2 = m mod 2" apply (subgoal_tac "m mod 2: 2") prefer 2 apply (simp add: mod_less_divisor [THEN ltD]) apply (auto simp add: mod_succ) done lemma mod2_add_more [simp]: "(m#+m#+n) mod 2 = n mod 2" apply (subgoal_tac " (natify (m) #+natify (m) #+n) mod 2 = n mod 2") apply (rule_tac [2] n = "natify (m) " in nat_induct) apply auto done lemma mod2_add_self [simp]: "(m#+m) mod 2 = 0" by (cut_tac n = 0 in mod2_add_more, auto) subsection{*Additional theorems about @{text "\<le>"}*} lemma add_le_self: "m:nat ==> m \<le> (m #+ n)" apply (simp (no_asm_simp)) done lemma add_le_self2: "m:nat ==> m \<le> (n #+ m)" apply (simp (no_asm_simp)) done (*** Monotonicity of Multiplication ***) lemma mult_le_mono1: "[| i \<le> j; j:nat |] ==> (i#*k) \<le> (j#*k)" apply (subgoal_tac "natify (i) #*natify (k) \<le> j#*natify (k) ") apply (frule_tac [2] lt_nat_in_nat) apply (rule_tac [3] n = "natify (k) " in nat_induct) apply (simp_all add: add_le_mono) done (* @{text"\<le>"} monotonicity, BOTH arguments*) lemma mult_le_mono: "[| i \<le> j; k \<le> l; j:nat; l:nat |] ==> i#*k \<le> j#*l" apply (rule mult_le_mono1 [THEN le_trans], assumption+) apply (subst mult_commute, subst mult_commute, rule mult_le_mono1, assumption+) done (*strict, in 1st argument; proof is by induction on k>0. I can't see how to relax the typing conditions.*) lemma mult_lt_mono2: "[| i<j; 0<k; j:nat; k:nat |] ==> k#*i < k#*j" apply (erule zero_lt_natE) apply (frule_tac [2] lt_nat_in_nat) apply (simp_all (no_asm_simp)) apply (induct_tac "x") apply (simp_all (no_asm_simp) add: add_lt_mono) done lemma mult_lt_mono1: "[| i<j; 0<k; j:nat; k:nat |] ==> i#*k < j#*k" apply (simp (no_asm_simp) add: mult_lt_mono2 mult_commute [of _ k]) done lemma add_eq_0_iff [iff]: "m#+n = 0 \<longleftrightarrow> natify(m)=0 & natify(n)=0" apply (subgoal_tac "natify (m) #+ natify (n) = 0 \<longleftrightarrow> natify (m) =0 & natify (n) =0") apply (rule_tac [2] n = "natify (m) " in natE) apply (rule_tac [4] n = "natify (n) " in natE) apply auto done lemma zero_lt_mult_iff [iff]: "0 < m#*n \<longleftrightarrow> 0 < natify(m) & 0 < natify(n)" apply (subgoal_tac "0 < natify (m) #*natify (n) \<longleftrightarrow> 0 < natify (m) & 0 < natify (n) ") apply (rule_tac [2] n = "natify (m) " in natE) apply (rule_tac [4] n = "natify (n) " in natE) apply (rule_tac [3] n = "natify (n) " in natE) apply auto done lemma mult_eq_1_iff [iff]: "m#*n = 1 \<longleftrightarrow> natify(m)=1 & natify(n)=1" apply (subgoal_tac "natify (m) #* natify (n) = 1 \<longleftrightarrow> natify (m) =1 & natify (n) =1") apply (rule_tac [2] n = "natify (m) " in natE) apply (rule_tac [4] n = "natify (n) " in natE) apply auto done lemma mult_is_zero: "[|m: nat; n: nat|] ==> (m #* n = 0) \<longleftrightarrow> (m = 0 | n = 0)" apply auto apply (erule natE) apply (erule_tac [2] natE, auto) done lemma mult_is_zero_natify [iff]: "(m #* n = 0) \<longleftrightarrow> (natify(m) = 0 | natify(n) = 0)" apply (cut_tac m = "natify (m) " and n = "natify (n) " in mult_is_zero) apply auto done subsection{*Cancellation Laws for Common Factors in Comparisons*} lemma mult_less_cancel_lemma: "[| k: nat; m: nat; n: nat |] ==> (m#*k < n#*k) \<longleftrightarrow> (0<k & m<n)" apply (safe intro!: mult_lt_mono1) apply (erule natE, auto) apply (rule not_le_iff_lt [THEN iffD1]) apply (drule_tac [3] not_le_iff_lt [THEN [2] rev_iffD2]) prefer 5 apply (blast intro: mult_le_mono1, auto) done lemma mult_less_cancel2 [simp]: "(m#*k < n#*k) \<longleftrightarrow> (0 < natify(k) & natify(m) < natify(n))" apply (rule iff_trans) apply (rule_tac [2] mult_less_cancel_lemma, auto) done lemma mult_less_cancel1 [simp]: "(k#*m < k#*n) \<longleftrightarrow> (0 < natify(k) & natify(m) < natify(n))" apply (simp (no_asm) add: mult_less_cancel2 mult_commute [of k]) done lemma mult_le_cancel2 [simp]: "(m#*k \<le> n#*k) \<longleftrightarrow> (0 < natify(k) \<longrightarrow> natify(m) \<le> natify(n))" apply (simp (no_asm_simp) add: not_lt_iff_le [THEN iff_sym]) apply auto done lemma mult_le_cancel1 [simp]: "(k#*m \<le> k#*n) \<longleftrightarrow> (0 < natify(k) \<longrightarrow> natify(m) \<le> natify(n))" apply (simp (no_asm_simp) add: not_lt_iff_le [THEN iff_sym]) apply auto done lemma mult_le_cancel_le1: "k \<in> nat ==> k #* m \<le> k \<longleftrightarrow> (0 < k \<longrightarrow> natify(m) \<le> 1)" by (cut_tac k = k and m = m and n = 1 in mult_le_cancel1, auto) lemma Ord_eq_iff_le: "[| Ord(m); Ord(n) |] ==> m=n \<longleftrightarrow> (m \<le> n & n \<le> m)" by (blast intro: le_anti_sym) lemma mult_cancel2_lemma: "[| k: nat; m: nat; n: nat |] ==> (m#*k = n#*k) \<longleftrightarrow> (m=n | k=0)" apply (simp (no_asm_simp) add: Ord_eq_iff_le [of "m#*k"] Ord_eq_iff_le [of m]) apply (auto simp add: Ord_0_lt_iff) done lemma mult_cancel2 [simp]: "(m#*k = n#*k) \<longleftrightarrow> (natify(m) = natify(n) | natify(k) = 0)" apply (rule iff_trans) apply (rule_tac [2] mult_cancel2_lemma, auto) done lemma mult_cancel1 [simp]: "(k#*m = k#*n) \<longleftrightarrow> (natify(m) = natify(n) | natify(k) = 0)" apply (simp (no_asm) add: mult_cancel2 mult_commute [of k]) done (** Cancellation law for division **) lemma div_cancel_raw: "[| 0<n; 0<k; k:nat; m:nat; n:nat |] ==> (k#*m) div (k#*n) = m div n" apply (erule_tac i = m in complete_induct) apply (case_tac "x<n") apply (simp add: div_less zero_lt_mult_iff mult_lt_mono2) apply (simp add: not_lt_iff_le zero_lt_mult_iff le_refl [THEN mult_le_mono] div_geq diff_mult_distrib2 [symmetric] div_termination [THEN ltD]) done lemma div_cancel: "[| 0 < natify(n); 0 < natify(k) |] ==> (k#*m) div (k#*n) = m div n" apply (cut_tac k = "natify (k) " and m = "natify (m)" and n = "natify (n)" in div_cancel_raw) apply auto done subsection{*More Lemmas about Remainder*} lemma mult_mod_distrib_raw: "[| k:nat; m:nat; n:nat |] ==> (k#*m) mod (k#*n) = k #* (m mod n)" apply (case_tac "k=0") apply (simp add: DIVISION_BY_ZERO_MOD) apply (case_tac "n=0") apply (simp add: DIVISION_BY_ZERO_MOD) apply (simp add: nat_into_Ord [THEN Ord_0_lt_iff]) apply (erule_tac i = m in complete_induct) apply (case_tac "x<n") apply (simp (no_asm_simp) add: mod_less zero_lt_mult_iff mult_lt_mono2) apply (simp add: not_lt_iff_le zero_lt_mult_iff le_refl [THEN mult_le_mono] mod_geq diff_mult_distrib2 [symmetric] div_termination [THEN ltD]) done lemma mod_mult_distrib2: "k #* (m mod n) = (k#*m) mod (k#*n)" apply (cut_tac k = "natify (k) " and m = "natify (m)" and n = "natify (n)" in mult_mod_distrib_raw) apply auto done lemma mult_mod_distrib: "(m mod n) #* k = (m#*k) mod (n#*k)" apply (simp (no_asm) add: mult_commute mod_mult_distrib2) done lemma mod_add_self2_raw: "n \<in> nat ==> (m #+ n) mod n = m mod n" apply (subgoal_tac " (n #+ m) mod n = (n #+ m #- n) mod n") apply (simp add: add_commute) apply (subst mod_geq [symmetric], auto) done lemma mod_add_self2 [simp]: "(m #+ n) mod n = m mod n" apply (cut_tac n = "natify (n) " in mod_add_self2_raw) apply auto done lemma mod_add_self1 [simp]: "(n#+m) mod n = m mod n" apply (simp (no_asm_simp) add: add_commute mod_add_self2) done lemma mod_mult_self1_raw: "k \<in> nat ==> (m #+ k#*n) mod n = m mod n" apply (erule nat_induct) apply (simp_all (no_asm_simp) add: add_left_commute [of _ n]) done lemma mod_mult_self1 [simp]: "(m #+ k#*n) mod n = m mod n" apply (cut_tac k = "natify (k) " in mod_mult_self1_raw) apply auto done lemma mod_mult_self2 [simp]: "(m #+ n#*k) mod n = m mod n" apply (simp (no_asm) add: mult_commute mod_mult_self1) done (*Lemma for gcd*) lemma mult_eq_self_implies_10: "m = m#*n ==> natify(n)=1 | m=0" apply (subgoal_tac "m: nat") prefer 2 apply (erule ssubst) apply simp apply (rule disjCI) apply (drule sym) apply (rule Ord_linear_lt [of "natify(n)" 1]) apply simp_all apply (subgoal_tac "m #* n = 0", simp) apply (subst mult_natify2 [symmetric]) apply (simp del: mult_natify2) apply (drule nat_into_Ord [THEN Ord_0_lt, THEN [2] mult_lt_mono2], auto) done lemma less_imp_succ_add [rule_format]: "[| m<n; n: nat |] ==> \<exists>k\<in>nat. n = succ(m#+k)" apply (frule lt_nat_in_nat, assumption) apply (erule rev_mp) apply (induct_tac "n") apply (simp_all (no_asm) add: le_iff) apply (blast elim!: leE intro!: add_0_right [symmetric] add_succ_right [symmetric]) done lemma less_iff_succ_add: "[| m: nat; n: nat |] ==> (m<n) \<longleftrightarrow> (\<exists>k\<in>nat. n = succ(m#+k))" by (auto intro: less_imp_succ_add) lemma add_lt_elim2: "\<lbrakk>a #+ d = b #+ c; a < b; b \<in> nat; c \<in> nat; d \<in> nat\<rbrakk> \<Longrightarrow> c < d" by (drule less_imp_succ_add, auto) lemma add_le_elim2: "\<lbrakk>a #+ d = b #+ c; a \<le> b; b \<in> nat; c \<in> nat; d \<in> nat\<rbrakk> \<Longrightarrow> c \<le> d" by (drule less_imp_succ_add, auto) subsubsection{*More Lemmas About Difference*} lemma diff_is_0_lemma: "[| m: nat; n: nat |] ==> m #- n = 0 \<longleftrightarrow> m \<le> n" apply (rule_tac m = m and n = n in diff_induct, simp_all) done lemma diff_is_0_iff: "m #- n = 0 \<longleftrightarrow> natify(m) \<le> natify(n)" by (simp add: diff_is_0_lemma [symmetric]) lemma nat_lt_imp_diff_eq_0: "[| a:nat; b:nat; a<b |] ==> a #- b = 0" by (simp add: diff_is_0_iff le_iff) lemma raw_nat_diff_split: "[| a:nat; b:nat |] ==> (P(a #- b)) \<longleftrightarrow> ((a < b \<longrightarrow>P(0)) & (\<forall>d\<in>nat. a = b #+ d \<longrightarrow> P(d)))" apply (case_tac "a < b") apply (force simp add: nat_lt_imp_diff_eq_0) apply (rule iffI, force, simp) apply (drule_tac x="a#-b" in bspec) apply (simp_all add: Ordinal.not_lt_iff_le add_diff_inverse) done lemma nat_diff_split: "(P(a #- b)) \<longleftrightarrow> (natify(a) < natify(b) \<longrightarrow>P(0)) & (\<forall>d\<in>nat. natify(a) = b #+ d \<longrightarrow> P(d))" apply (cut_tac P=P and a="natify(a)" and b="natify(b)" in raw_nat_diff_split) apply simp_all done text{*Difference and less-than*} lemma diff_lt_imp_lt: "[|(k#-i) < (k#-j); i\<in>nat; j\<in>nat; k\<in>nat|] ==> j<i" apply (erule rev_mp) apply (simp split add: nat_diff_split, auto) apply (blast intro: add_le_self lt_trans1) apply (rule not_le_iff_lt [THEN iffD1], auto) apply (subgoal_tac "i #+ da < j #+ d", force) apply (blast intro: add_le_lt_mono) done lemma lt_imp_diff_lt: "[|j<i; i\<le>k; k\<in>nat|] ==> (k#-i) < (k#-j)" apply (frule le_in_nat, assumption) apply (frule lt_nat_in_nat, assumption) apply (simp split add: nat_diff_split, auto) apply (blast intro: lt_asym lt_trans2) apply (blast intro: lt_irrefl lt_trans2) apply (rule not_le_iff_lt [THEN iffD1], auto) apply (subgoal_tac "j #+ d < i #+ da", force) apply (blast intro: add_lt_le_mono) done lemma diff_lt_iff_lt: "[|i\<le>k; j\<in>nat; k\<in>nat|] ==> (k#-i) < (k#-j) \<longleftrightarrow> j<i" apply (frule le_in_nat, assumption) apply (blast intro: lt_imp_diff_lt diff_lt_imp_lt) done end
[STATEMENT] lemma perm_\<sigma>\<^sub>1' : "\<sigma>\<^sub>1' = \<lparr> heap = Map.empty (oid8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9) (oid7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8) (oid6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7) (oid5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6) \<^cancel>\<open>oid4\<close> (oid3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4) (oid2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3) (oid1 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2) (oid0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1) , assocs = assocs \<sigma>\<^sub>1' \<rparr>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<sigma>\<^sub>1' = \<lparr>heap = [oid8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, oid7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, oid6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, oid5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, oid3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, oid2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, oid1 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, oid0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1], assocs = assocs \<sigma>\<^sub>1'\<rparr> [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. \<sigma>\<^sub>1' = \<lparr>heap = [oid8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, oid7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, oid6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, oid5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, oid3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, oid2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, oid1 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, oid0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1], assocs = assocs \<sigma>\<^sub>1'\<rparr> [PROOF STEP] note P = fun_upd_twist [PROOF STATE] proof (state) this: ?a \<noteq> ?c \<Longrightarrow> ?m(?a := ?b, ?c := ?d) = ?m(?c := ?d, ?a := ?b) goal (1 subgoal): 1. \<sigma>\<^sub>1' = \<lparr>heap = [oid8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, oid7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, oid6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, oid5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, oid3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, oid2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, oid1 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, oid0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1], assocs = assocs \<sigma>\<^sub>1'\<rparr> [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<sigma>\<^sub>1' = \<lparr>heap = [oid8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, oid7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, oid6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, oid5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, oid3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, oid2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, oid1 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, oid0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1], assocs = assocs \<sigma>\<^sub>1'\<rparr> [PROOF STEP] apply(simp add: \<sigma>\<^sub>1'_def oid0_def oid1_def oid2_def oid3_def oid4_def oid5_def oid6_def oid7_def oid8_def) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (2) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (3) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (2) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (4) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (3) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (2) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (5) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (4) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (3) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (2) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (6) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (5) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (4) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (3) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (2) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (7) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (6) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (5) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (4) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (3) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (2) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] apply(subst (1) P, simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] = [8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, 7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, 6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, 5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, 3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, 2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, Suc 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, 0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1] [PROOF STEP] by(simp) [PROOF STATE] proof (state) this: \<sigma>\<^sub>1' = \<lparr>heap = [oid8 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person9, oid7 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person8, oid6 \<mapsto> in\<^sub>O\<^sub>c\<^sub>l\<^sub>A\<^sub>n\<^sub>y person7, oid5 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person6, oid3 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person4, oid2 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person3, oid1 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person2, oid0 \<mapsto> in\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n person1], assocs = assocs \<sigma>\<^sub>1'\<rparr> goal: No subgoals! [PROOF STEP] qed
/- Copyright (c) 2021 Rémy Degenne. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Rémy Degenne -/ import measure_theory.measure.measure_space /-! # Typeclasses for measurability of lattice operations In this file we define classes `has_measurable_sup` and `has_measurable_inf` and prove dot-style lemmas (`measurable.sup`, `ae_measurable.sup` etc). For binary operations we define two typeclasses: - `has_measurable_sup` says that both left and right sup are measurable; - `has_measurable_sup₂` says that `λ p : α × α, p.1 ⊔ p.2` is measurable, and similarly for other binary operations. The reason for introducing these classes is that in case of topological space `α` equipped with the Borel `σ`-algebra, instances for `has_measurable_sup₂` etc require `α` to have a second countable topology. For instances relating, e.g., `has_continuous_sup` to `has_measurable_sup` see file `measure_theory.borel_space`. ## Tags measurable function, lattice operation -/ open measure_theory /-- We say that a type `has_measurable_sup` if `((⊔) c)` and `(⊔ c)` are measurable functions. For a typeclass assuming measurability of `uncurry (⊔)` see `has_measurable_sup₂`. -/ class has_measurable_sup (M : Type*) [measurable_space M] [has_sup M] : Prop := (measurable_const_sup : ∀ c : M, measurable ((⊔) c)) (measurable_sup_const : ∀ c : M, measurable (⊔ c)) /-- We say that a type `has_measurable_sup₂` if `uncurry (⊔)` is a measurable functions. For a typeclass assuming measurability of `((⊔) c)` and `(⊔ c)` see `has_measurable_sup`. -/ class has_measurable_sup₂ (M : Type*) [measurable_space M] [has_sup M] : Prop := (measurable_sup : measurable (λ p : M × M, p.1 ⊔ p.2)) export has_measurable_sup₂ (measurable_sup) has_measurable_sup (measurable_const_sup measurable_sup_const) /-- We say that a type `has_measurable_inf` if `((⊓) c)` and `(⊓ c)` are measurable functions. For a typeclass assuming measurability of `uncurry (⊓)` see `has_measurable_inf₂`. -/ class has_measurable_inf (M : Type*) [measurable_space M] [has_inf M] : Prop := (measurable_const_inf : ∀ c : M, measurable ((⊓) c)) (measurable_inf_const : ∀ c : M, measurable (⊓ c)) /-- We say that a type `has_measurable_inf₂` if `uncurry (⊔)` is a measurable functions. For a typeclass assuming measurability of `((⊔) c)` and `(⊔ c)` see `has_measurable_inf`. -/ class has_measurable_inf₂ (M : Type*) [measurable_space M] [has_inf M] : Prop := (measurable_inf : measurable (λ p : M × M, p.1 ⊓ p.2)) export has_measurable_inf₂ (measurable_inf) has_measurable_inf (measurable_const_inf measurable_inf_const) variables {M : Type*} [measurable_space M] section order_dual @[priority 100] instance order_dual.has_measurable_sup [has_inf M] [has_measurable_inf M] : has_measurable_sup (order_dual M) := ⟨@measurable_const_inf M _ _ _, @measurable_inf_const M _ _ _⟩ @[priority 100] instance order_dual.has_measurable_inf [has_sup M] [has_measurable_sup M] : has_measurable_inf (order_dual M) := ⟨@measurable_const_sup M _ _ _, @measurable_sup_const M _ _ _⟩ @[priority 100] instance order_dual.has_measurable_sup₂ [has_inf M] [has_measurable_inf₂ M] : has_measurable_sup₂ (order_dual M) := ⟨@measurable_inf M _ _ _⟩ @[priority 100] instance order_dual.has_measurable_inf₂ [has_sup M] [has_measurable_sup₂ M] : has_measurable_inf₂ (order_dual M) := ⟨@measurable_sup M _ _ _⟩ end order_dual variables {α : Type*} {m : measurable_space α} {μ : measure α} {f g : α → M} include m section sup variables [has_sup M] section measurable_sup variables [has_measurable_sup M] @[measurability] lemma measurable.const_sup (hf : measurable f) (c : M) : measurable (λ x, c ⊔ f x) := (measurable_const_sup c).comp hf @[measurability] lemma ae_measurable.const_sup (hf : ae_measurable f μ) (c : M) : ae_measurable (λ x, c ⊔ f x) μ := (has_measurable_sup.measurable_const_sup c).comp_ae_measurable hf @[measurability] lemma measurable.sup_const (hf : measurable f) (c : M) : measurable (λ x, f x ⊔ c) := (measurable_sup_const c).comp hf @[measurability] lemma ae_measurable.sup_const (hf : ae_measurable f μ) (c : M) : ae_measurable (λ x, f x ⊔ c) μ := (measurable_sup_const c).comp_ae_measurable hf end measurable_sup section measurable_sup₂ variables [has_measurable_sup₂ M] @[measurability] lemma measurable.sup' (hf : measurable f) (hg : measurable g) : measurable (f ⊔ g) := measurable_sup.comp (hf.prod_mk hg) @[measurability] lemma measurable.sup (hf : measurable f) (hg : measurable g) : measurable (λ a, f a ⊔ g a) := measurable_sup.comp (hf.prod_mk hg) @[measurability] lemma ae_measurable.sup' (hf : ae_measurable f μ) (hg : ae_measurable g μ) : ae_measurable (f ⊔ g) μ := measurable_sup.comp_ae_measurable (hf.prod_mk hg) @[measurability] lemma ae_measurable.sup (hf : ae_measurable f μ) (hg : ae_measurable g μ) : ae_measurable (λ a, f a ⊔ g a) μ := measurable_sup.comp_ae_measurable (hf.prod_mk hg) omit m @[priority 100] instance has_measurable_sup₂.to_has_measurable_sup : has_measurable_sup M := ⟨λ c, measurable_const.sup measurable_id, λ c, measurable_id.sup measurable_const⟩ include m end measurable_sup₂ end sup section inf variables [has_inf M] section measurable_inf variables [has_measurable_inf M] @[measurability] lemma measurable.const_inf (hf : measurable f) (c : M) : measurable (λ x, c ⊓ f x) := (measurable_const_inf c).comp hf @[measurability] lemma ae_measurable.const_inf (hf : ae_measurable f μ) (c : M) : ae_measurable (λ x, c ⊓ f x) μ := (has_measurable_inf.measurable_const_inf c).comp_ae_measurable hf @[measurability] lemma measurable.inf_const (hf : measurable f) (c : M) : measurable (λ x, f x ⊓ c) := (measurable_inf_const c).comp hf @[measurability] lemma ae_measurable.inf_const (hf : ae_measurable f μ) (c : M) : ae_measurable (λ x, f x ⊓ c) μ := (measurable_inf_const c).comp_ae_measurable hf end measurable_inf section measurable_inf₂ variables [has_measurable_inf₂ M] @[measurability] lemma measurable.inf' (hf : measurable f) (hg : measurable g) : measurable (f ⊓ g) := measurable_inf.comp (hf.prod_mk hg) @[measurability] lemma measurable.inf (hf : measurable f) (hg : measurable g) : measurable (λ a, f a ⊓ g a) := measurable_inf.comp (hf.prod_mk hg) @[measurability] lemma ae_measurable.inf' (hf : ae_measurable f μ) (hg : ae_measurable g μ) : ae_measurable (f ⊓ g) μ := measurable_inf.comp_ae_measurable (hf.prod_mk hg) @[measurability] lemma ae_measurable.inf (hf : ae_measurable f μ) (hg : ae_measurable g μ) : ae_measurable (λ a, f a ⊓ g a) μ := measurable_inf.comp_ae_measurable (hf.prod_mk hg) omit m @[priority 100] instance has_measurable_inf₂.to_has_measurable_inf : has_measurable_inf M := ⟨λ c, measurable_const.inf measurable_id, λ c, measurable_id.inf measurable_const⟩ include m end measurable_inf₂ end inf
function a = nnz(t) %NNZ Number of nonzeros in sparse tensor. % % NNZ(T) is the number of nonzero elements in T. % % See also SPTENSOR, SPTENSOR/FIND. % %MATLAB Tensor Toolbox. %Copyright 2015, Sandia Corporation. % This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. % http://www.sandia.gov/~tgkolda/TensorToolbox. % Copyright (2015) Sandia Corporation. Under the terms of Contract % DE-AC04-94AL85000, there is a non-exclusive license for use of this % work by or on behalf of the U.S. Government. Export of this data may % require a license from the United States Government. % The full license terms can be found in the file LICENSE.txt if isempty(t.subs) a = 0; else a = size(t.subs,1); end
State Before: α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g : α → β inst✝¹ : Zero β inst✝ : T2Space β hf : AEFinStronglyMeasurable f μ ⊢ ∃ t, MeasurableSet t ∧ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 ∧ SigmaFinite (Measure.restrict μ t) State After: case intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g ⊢ ∃ t, MeasurableSet t ∧ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 ∧ SigmaFinite (Measure.restrict μ t) Tactic: rcases hf with ⟨g, hg, hfg⟩ State Before: case intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g ⊢ ∃ t, MeasurableSet t ∧ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 ∧ SigmaFinite (Measure.restrict μ t) State After: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ ∃ t, MeasurableSet t ∧ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 ∧ SigmaFinite (Measure.restrict μ t) Tactic: obtain ⟨t, ht, hgt_zero, htμ⟩ := hg.exists_set_sigmaFinite State Before: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ ∃ t, MeasurableSet t ∧ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 ∧ SigmaFinite (Measure.restrict μ t) State After: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 Tactic: refine' ⟨t, ht, _, htμ⟩ State Before: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ f =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 State After: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ (fun x => g x) =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 Tactic: refine' EventuallyEq.trans (ae_restrict_of_ae hfg) _ State Before: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ (fun x => g x) =ᶠ[ae (Measure.restrict μ (tᶜ))] 0 State After: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ ∀ᵐ (x : α) ∂μ, x ∈ tᶜ → g x = OfNat.ofNat 0 x Tactic: rw [EventuallyEq, ae_restrict_iff' ht.compl] State Before: case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type ?u.549973 ι : Type ?u.549976 inst✝³ : Countable ι m : MeasurableSpace α μ : Measure α inst✝² : TopologicalSpace β f g✝ : α → β inst✝¹ : Zero β inst✝ : T2Space β g : α → β hg : FinStronglyMeasurable g μ hfg : f =ᶠ[ae μ] g t : Set α ht : MeasurableSet t hgt_zero : ∀ (x : α), x ∈ tᶜ → g x = 0 htμ : SigmaFinite (Measure.restrict μ t) ⊢ ∀ᵐ (x : α) ∂μ, x ∈ tᶜ → g x = OfNat.ofNat 0 x State After: no goals Tactic: exact eventually_of_forall hgt_zero